aic7xxx_core.c 209 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901
  1. /*
  2. * Core routines and tables shareable across OS platforms.
  3. *
  4. * Copyright (c) 1994-2002 Justin T. Gibbs.
  5. * Copyright (c) 2000-2002 Adaptec Inc.
  6. * All rights reserved.
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions
  10. * are met:
  11. * 1. Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions, and the following disclaimer,
  13. * without modification.
  14. * 2. Redistributions in binary form must reproduce at minimum a disclaimer
  15. * substantially similar to the "NO WARRANTY" disclaimer below
  16. * ("Disclaimer") and any redistribution must be conditioned upon
  17. * including a substantially similar Disclaimer requirement for further
  18. * binary redistribution.
  19. * 3. Neither the names of the above-listed copyright holders nor the names
  20. * of any contributors may be used to endorse or promote products derived
  21. * from this software without specific prior written permission.
  22. *
  23. * Alternatively, this software may be distributed under the terms of the
  24. * GNU General Public License ("GPL") version 2 as published by the Free
  25. * Software Foundation.
  26. *
  27. * NO WARRANTY
  28. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  29. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  30. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
  31. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  32. * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  33. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  34. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  35. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  36. * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
  37. * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  38. * POSSIBILITY OF SUCH DAMAGES.
  39. *
  40. * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#155 $
  41. */
  42. #include "aic7xxx_osm.h"
  43. #include "aic7xxx_inline.h"
  44. #include "aicasm/aicasm_insformat.h"
  45. /***************************** Lookup Tables **********************************/
  46. static const char *const ahc_chip_names[] = {
  47. "NONE",
  48. "aic7770",
  49. "aic7850",
  50. "aic7855",
  51. "aic7859",
  52. "aic7860",
  53. "aic7870",
  54. "aic7880",
  55. "aic7895",
  56. "aic7895C",
  57. "aic7890/91",
  58. "aic7896/97",
  59. "aic7892",
  60. "aic7899"
  61. };
  62. /*
  63. * Hardware error codes.
  64. */
  65. struct ahc_hard_error_entry {
  66. uint8_t errno;
  67. const char *errmesg;
  68. };
  69. static const struct ahc_hard_error_entry ahc_hard_errors[] = {
  70. { ILLHADDR, "Illegal Host Access" },
  71. { ILLSADDR, "Illegal Sequencer Address referenced" },
  72. { ILLOPCODE, "Illegal Opcode in sequencer program" },
  73. { SQPARERR, "Sequencer Parity Error" },
  74. { DPARERR, "Data-path Parity Error" },
  75. { MPARERR, "Scratch or SCB Memory Parity Error" },
  76. { PCIERRSTAT, "PCI Error detected" },
  77. { CIOPARERR, "CIOBUS Parity Error" },
  78. };
  79. static const u_int num_errors = ARRAY_SIZE(ahc_hard_errors);
  80. static const struct ahc_phase_table_entry ahc_phase_table[] =
  81. {
  82. { P_DATAOUT, NOP, "in Data-out phase" },
  83. { P_DATAIN, INITIATOR_ERROR, "in Data-in phase" },
  84. { P_DATAOUT_DT, NOP, "in DT Data-out phase" },
  85. { P_DATAIN_DT, INITIATOR_ERROR, "in DT Data-in phase" },
  86. { P_COMMAND, NOP, "in Command phase" },
  87. { P_MESGOUT, NOP, "in Message-out phase" },
  88. { P_STATUS, INITIATOR_ERROR, "in Status phase" },
  89. { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" },
  90. { P_BUSFREE, NOP, "while idle" },
  91. { 0, NOP, "in unknown phase" }
  92. };
  93. /*
  94. * In most cases we only wish to itterate over real phases, so
  95. * exclude the last element from the count.
  96. */
  97. static const u_int num_phases = ARRAY_SIZE(ahc_phase_table) - 1;
  98. /*
  99. * Valid SCSIRATE values. (p. 3-17)
  100. * Provides a mapping of tranfer periods in ns to the proper value to
  101. * stick in the scsixfer reg.
  102. */
  103. static const struct ahc_syncrate ahc_syncrates[] =
  104. {
  105. /* ultra2 fast/ultra period rate */
  106. { 0x42, 0x000, 9, "80.0" },
  107. { 0x03, 0x000, 10, "40.0" },
  108. { 0x04, 0x000, 11, "33.0" },
  109. { 0x05, 0x100, 12, "20.0" },
  110. { 0x06, 0x110, 15, "16.0" },
  111. { 0x07, 0x120, 18, "13.4" },
  112. { 0x08, 0x000, 25, "10.0" },
  113. { 0x19, 0x010, 31, "8.0" },
  114. { 0x1a, 0x020, 37, "6.67" },
  115. { 0x1b, 0x030, 43, "5.7" },
  116. { 0x1c, 0x040, 50, "5.0" },
  117. { 0x00, 0x050, 56, "4.4" },
  118. { 0x00, 0x060, 62, "4.0" },
  119. { 0x00, 0x070, 68, "3.6" },
  120. { 0x00, 0x000, 0, NULL }
  121. };
  122. /* Our Sequencer Program */
  123. #include "aic7xxx_seq.h"
  124. /**************************** Function Declarations ***************************/
  125. static void ahc_force_renegotiation(struct ahc_softc *ahc,
  126. struct ahc_devinfo *devinfo);
  127. static struct ahc_tmode_tstate*
  128. ahc_alloc_tstate(struct ahc_softc *ahc,
  129. u_int scsi_id, char channel);
  130. #ifdef AHC_TARGET_MODE
  131. static void ahc_free_tstate(struct ahc_softc *ahc,
  132. u_int scsi_id, char channel, int force);
  133. #endif
  134. static const struct ahc_syncrate*
  135. ahc_devlimited_syncrate(struct ahc_softc *ahc,
  136. struct ahc_initiator_tinfo *,
  137. u_int *period,
  138. u_int *ppr_options,
  139. role_t role);
  140. static void ahc_update_pending_scbs(struct ahc_softc *ahc);
  141. static void ahc_fetch_devinfo(struct ahc_softc *ahc,
  142. struct ahc_devinfo *devinfo);
  143. static void ahc_scb_devinfo(struct ahc_softc *ahc,
  144. struct ahc_devinfo *devinfo,
  145. struct scb *scb);
  146. static void ahc_assert_atn(struct ahc_softc *ahc);
  147. static void ahc_setup_initiator_msgout(struct ahc_softc *ahc,
  148. struct ahc_devinfo *devinfo,
  149. struct scb *scb);
  150. static void ahc_build_transfer_msg(struct ahc_softc *ahc,
  151. struct ahc_devinfo *devinfo);
  152. static void ahc_construct_sdtr(struct ahc_softc *ahc,
  153. struct ahc_devinfo *devinfo,
  154. u_int period, u_int offset);
  155. static void ahc_construct_wdtr(struct ahc_softc *ahc,
  156. struct ahc_devinfo *devinfo,
  157. u_int bus_width);
  158. static void ahc_construct_ppr(struct ahc_softc *ahc,
  159. struct ahc_devinfo *devinfo,
  160. u_int period, u_int offset,
  161. u_int bus_width, u_int ppr_options);
  162. static void ahc_clear_msg_state(struct ahc_softc *ahc);
  163. static void ahc_handle_proto_violation(struct ahc_softc *ahc);
  164. static void ahc_handle_message_phase(struct ahc_softc *ahc);
  165. typedef enum {
  166. AHCMSG_1B,
  167. AHCMSG_2B,
  168. AHCMSG_EXT
  169. } ahc_msgtype;
  170. static int ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type,
  171. u_int msgval, int full);
  172. static int ahc_parse_msg(struct ahc_softc *ahc,
  173. struct ahc_devinfo *devinfo);
  174. static int ahc_handle_msg_reject(struct ahc_softc *ahc,
  175. struct ahc_devinfo *devinfo);
  176. static void ahc_handle_ign_wide_residue(struct ahc_softc *ahc,
  177. struct ahc_devinfo *devinfo);
  178. static void ahc_reinitialize_dataptrs(struct ahc_softc *ahc);
  179. static void ahc_handle_devreset(struct ahc_softc *ahc,
  180. struct ahc_devinfo *devinfo,
  181. cam_status status, char *message,
  182. int verbose_level);
  183. #ifdef AHC_TARGET_MODE
  184. static void ahc_setup_target_msgin(struct ahc_softc *ahc,
  185. struct ahc_devinfo *devinfo,
  186. struct scb *scb);
  187. #endif
  188. static bus_dmamap_callback_t ahc_dmamap_cb;
  189. static void ahc_build_free_scb_list(struct ahc_softc *ahc);
  190. static int ahc_init_scbdata(struct ahc_softc *ahc);
  191. static void ahc_fini_scbdata(struct ahc_softc *ahc);
  192. static void ahc_qinfifo_requeue(struct ahc_softc *ahc,
  193. struct scb *prev_scb,
  194. struct scb *scb);
  195. static int ahc_qinfifo_count(struct ahc_softc *ahc);
  196. static u_int ahc_rem_scb_from_disc_list(struct ahc_softc *ahc,
  197. u_int prev, u_int scbptr);
  198. static void ahc_add_curscb_to_free_list(struct ahc_softc *ahc);
  199. static u_int ahc_rem_wscb(struct ahc_softc *ahc,
  200. u_int scbpos, u_int prev);
  201. static void ahc_reset_current_bus(struct ahc_softc *ahc);
  202. #ifdef AHC_DUMP_SEQ
  203. static void ahc_dumpseq(struct ahc_softc *ahc);
  204. #endif
  205. static int ahc_loadseq(struct ahc_softc *ahc);
  206. static int ahc_check_patch(struct ahc_softc *ahc,
  207. const struct patch **start_patch,
  208. u_int start_instr, u_int *skip_addr);
  209. static void ahc_download_instr(struct ahc_softc *ahc,
  210. u_int instrptr, uint8_t *dconsts);
  211. #ifdef AHC_TARGET_MODE
  212. static void ahc_queue_lstate_event(struct ahc_softc *ahc,
  213. struct ahc_tmode_lstate *lstate,
  214. u_int initiator_id,
  215. u_int event_type,
  216. u_int event_arg);
  217. static void ahc_update_scsiid(struct ahc_softc *ahc,
  218. u_int targid_mask);
  219. static int ahc_handle_target_cmd(struct ahc_softc *ahc,
  220. struct target_cmd *cmd);
  221. #endif
  222. static u_int ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl);
  223. static void ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl);
  224. static void ahc_busy_tcl(struct ahc_softc *ahc,
  225. u_int tcl, u_int busyid);
  226. /************************** SCB and SCB queue management **********************/
  227. static void ahc_run_untagged_queues(struct ahc_softc *ahc);
  228. static void ahc_run_untagged_queue(struct ahc_softc *ahc,
  229. struct scb_tailq *queue);
  230. /****************************** Initialization ********************************/
  231. static void ahc_alloc_scbs(struct ahc_softc *ahc);
  232. static void ahc_shutdown(void *arg);
  233. /*************************** Interrupt Services *******************************/
  234. static void ahc_clear_intstat(struct ahc_softc *ahc);
  235. static void ahc_run_qoutfifo(struct ahc_softc *ahc);
  236. #ifdef AHC_TARGET_MODE
  237. static void ahc_run_tqinfifo(struct ahc_softc *ahc, int paused);
  238. #endif
  239. static void ahc_handle_brkadrint(struct ahc_softc *ahc);
  240. static void ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat);
  241. static void ahc_handle_scsiint(struct ahc_softc *ahc,
  242. u_int intstat);
  243. static void ahc_clear_critical_section(struct ahc_softc *ahc);
  244. /***************************** Error Recovery *********************************/
  245. static void ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb);
  246. static int ahc_abort_scbs(struct ahc_softc *ahc, int target,
  247. char channel, int lun, u_int tag,
  248. role_t role, uint32_t status);
  249. static void ahc_calc_residual(struct ahc_softc *ahc,
  250. struct scb *scb);
  251. /*********************** Untagged Transaction Routines ************************/
  252. static inline void ahc_freeze_untagged_queues(struct ahc_softc *ahc);
  253. static inline void ahc_release_untagged_queues(struct ahc_softc *ahc);
  254. /*
  255. * Block our completion routine from starting the next untagged
  256. * transaction for this target or target lun.
  257. */
  258. static inline void
  259. ahc_freeze_untagged_queues(struct ahc_softc *ahc)
  260. {
  261. if ((ahc->flags & AHC_SCB_BTT) == 0)
  262. ahc->untagged_queue_lock++;
  263. }
  264. /*
  265. * Allow the next untagged transaction for this target or target lun
  266. * to be executed. We use a counting semaphore to allow the lock
  267. * to be acquired recursively. Once the count drops to zero, the
  268. * transaction queues will be run.
  269. */
  270. static inline void
  271. ahc_release_untagged_queues(struct ahc_softc *ahc)
  272. {
  273. if ((ahc->flags & AHC_SCB_BTT) == 0) {
  274. ahc->untagged_queue_lock--;
  275. if (ahc->untagged_queue_lock == 0)
  276. ahc_run_untagged_queues(ahc);
  277. }
  278. }
  279. /************************* Sequencer Execution Control ************************/
  280. /*
  281. * Work around any chip bugs related to halting sequencer execution.
  282. * On Ultra2 controllers, we must clear the CIOBUS stretch signal by
  283. * reading a register that will set this signal and deassert it.
  284. * Without this workaround, if the chip is paused, by an interrupt or
  285. * manual pause while accessing scb ram, accesses to certain registers
  286. * will hang the system (infinite pci retries).
  287. */
  288. static void
  289. ahc_pause_bug_fix(struct ahc_softc *ahc)
  290. {
  291. if ((ahc->features & AHC_ULTRA2) != 0)
  292. (void)ahc_inb(ahc, CCSCBCTL);
  293. }
  294. /*
  295. * Determine whether the sequencer has halted code execution.
  296. * Returns non-zero status if the sequencer is stopped.
  297. */
  298. int
  299. ahc_is_paused(struct ahc_softc *ahc)
  300. {
  301. return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0);
  302. }
  303. /*
  304. * Request that the sequencer stop and wait, indefinitely, for it
  305. * to stop. The sequencer will only acknowledge that it is paused
  306. * once it has reached an instruction boundary and PAUSEDIS is
  307. * cleared in the SEQCTL register. The sequencer may use PAUSEDIS
  308. * for critical sections.
  309. */
  310. void
  311. ahc_pause(struct ahc_softc *ahc)
  312. {
  313. ahc_outb(ahc, HCNTRL, ahc->pause);
  314. /*
  315. * Since the sequencer can disable pausing in a critical section, we
  316. * must loop until it actually stops.
  317. */
  318. while (ahc_is_paused(ahc) == 0)
  319. ;
  320. ahc_pause_bug_fix(ahc);
  321. }
  322. /*
  323. * Allow the sequencer to continue program execution.
  324. * We check here to ensure that no additional interrupt
  325. * sources that would cause the sequencer to halt have been
  326. * asserted. If, for example, a SCSI bus reset is detected
  327. * while we are fielding a different, pausing, interrupt type,
  328. * we don't want to release the sequencer before going back
  329. * into our interrupt handler and dealing with this new
  330. * condition.
  331. */
  332. void
  333. ahc_unpause(struct ahc_softc *ahc)
  334. {
  335. if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0)
  336. ahc_outb(ahc, HCNTRL, ahc->unpause);
  337. }
  338. /************************** Memory mapping routines ***************************/
  339. static struct ahc_dma_seg *
  340. ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr)
  341. {
  342. int sg_index;
  343. sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg);
  344. /* sg_list_phys points to entry 1, not 0 */
  345. sg_index++;
  346. return (&scb->sg_list[sg_index]);
  347. }
  348. static uint32_t
  349. ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg)
  350. {
  351. int sg_index;
  352. /* sg_list_phys points to entry 1, not 0 */
  353. sg_index = sg - &scb->sg_list[1];
  354. return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list)));
  355. }
  356. static uint32_t
  357. ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index)
  358. {
  359. return (ahc->scb_data->hscb_busaddr
  360. + (sizeof(struct hardware_scb) * index));
  361. }
  362. static void
  363. ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op)
  364. {
  365. ahc_dmamap_sync(ahc, ahc->scb_data->hscb_dmat,
  366. ahc->scb_data->hscb_dmamap,
  367. /*offset*/(scb->hscb - ahc->hscbs) * sizeof(*scb->hscb),
  368. /*len*/sizeof(*scb->hscb), op);
  369. }
  370. void
  371. ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op)
  372. {
  373. if (scb->sg_count == 0)
  374. return;
  375. ahc_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap,
  376. /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr)
  377. * sizeof(struct ahc_dma_seg),
  378. /*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op);
  379. }
  380. #ifdef AHC_TARGET_MODE
  381. static uint32_t
  382. ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index)
  383. {
  384. return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo);
  385. }
  386. #endif
  387. /*********************** Miscellaneous Support Functions ***********************/
  388. /*
  389. * Determine whether the sequencer reported a residual
  390. * for this SCB/transaction.
  391. */
  392. static void
  393. ahc_update_residual(struct ahc_softc *ahc, struct scb *scb)
  394. {
  395. uint32_t sgptr;
  396. sgptr = ahc_le32toh(scb->hscb->sgptr);
  397. if ((sgptr & SG_RESID_VALID) != 0)
  398. ahc_calc_residual(ahc, scb);
  399. }
  400. /*
  401. * Return pointers to the transfer negotiation information
  402. * for the specified our_id/remote_id pair.
  403. */
  404. struct ahc_initiator_tinfo *
  405. ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id,
  406. u_int remote_id, struct ahc_tmode_tstate **tstate)
  407. {
  408. /*
  409. * Transfer data structures are stored from the perspective
  410. * of the target role. Since the parameters for a connection
  411. * in the initiator role to a given target are the same as
  412. * when the roles are reversed, we pretend we are the target.
  413. */
  414. if (channel == 'B')
  415. our_id += 8;
  416. *tstate = ahc->enabled_targets[our_id];
  417. return (&(*tstate)->transinfo[remote_id]);
  418. }
  419. uint16_t
  420. ahc_inw(struct ahc_softc *ahc, u_int port)
  421. {
  422. uint16_t r = ahc_inb(ahc, port+1) << 8;
  423. return r | ahc_inb(ahc, port);
  424. }
  425. void
  426. ahc_outw(struct ahc_softc *ahc, u_int port, u_int value)
  427. {
  428. ahc_outb(ahc, port, value & 0xFF);
  429. ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
  430. }
  431. uint32_t
  432. ahc_inl(struct ahc_softc *ahc, u_int port)
  433. {
  434. return ((ahc_inb(ahc, port))
  435. | (ahc_inb(ahc, port+1) << 8)
  436. | (ahc_inb(ahc, port+2) << 16)
  437. | (ahc_inb(ahc, port+3) << 24));
  438. }
  439. void
  440. ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value)
  441. {
  442. ahc_outb(ahc, port, (value) & 0xFF);
  443. ahc_outb(ahc, port+1, ((value) >> 8) & 0xFF);
  444. ahc_outb(ahc, port+2, ((value) >> 16) & 0xFF);
  445. ahc_outb(ahc, port+3, ((value) >> 24) & 0xFF);
  446. }
  447. uint64_t
  448. ahc_inq(struct ahc_softc *ahc, u_int port)
  449. {
  450. return ((ahc_inb(ahc, port))
  451. | (ahc_inb(ahc, port+1) << 8)
  452. | (ahc_inb(ahc, port+2) << 16)
  453. | (((uint64_t)ahc_inb(ahc, port+3)) << 24)
  454. | (((uint64_t)ahc_inb(ahc, port+4)) << 32)
  455. | (((uint64_t)ahc_inb(ahc, port+5)) << 40)
  456. | (((uint64_t)ahc_inb(ahc, port+6)) << 48)
  457. | (((uint64_t)ahc_inb(ahc, port+7)) << 56));
  458. }
  459. void
  460. ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value)
  461. {
  462. ahc_outb(ahc, port, value & 0xFF);
  463. ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
  464. ahc_outb(ahc, port+2, (value >> 16) & 0xFF);
  465. ahc_outb(ahc, port+3, (value >> 24) & 0xFF);
  466. ahc_outb(ahc, port+4, (value >> 32) & 0xFF);
  467. ahc_outb(ahc, port+5, (value >> 40) & 0xFF);
  468. ahc_outb(ahc, port+6, (value >> 48) & 0xFF);
  469. ahc_outb(ahc, port+7, (value >> 56) & 0xFF);
  470. }
  471. /*
  472. * Get a free scb. If there are none, see if we can allocate a new SCB.
  473. */
  474. struct scb *
  475. ahc_get_scb(struct ahc_softc *ahc)
  476. {
  477. struct scb *scb;
  478. if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) {
  479. ahc_alloc_scbs(ahc);
  480. scb = SLIST_FIRST(&ahc->scb_data->free_scbs);
  481. if (scb == NULL)
  482. return (NULL);
  483. }
  484. SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle);
  485. return (scb);
  486. }
  487. /*
  488. * Return an SCB resource to the free list.
  489. */
  490. void
  491. ahc_free_scb(struct ahc_softc *ahc, struct scb *scb)
  492. {
  493. struct hardware_scb *hscb;
  494. hscb = scb->hscb;
  495. /* Clean up for the next user */
  496. ahc->scb_data->scbindex[hscb->tag] = NULL;
  497. scb->flags = SCB_FREE;
  498. hscb->control = 0;
  499. SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle);
  500. /* Notify the OSM that a resource is now available. */
  501. ahc_platform_scb_free(ahc, scb);
  502. }
  503. struct scb *
  504. ahc_lookup_scb(struct ahc_softc *ahc, u_int tag)
  505. {
  506. struct scb* scb;
  507. scb = ahc->scb_data->scbindex[tag];
  508. if (scb != NULL)
  509. ahc_sync_scb(ahc, scb,
  510. BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
  511. return (scb);
  512. }
  513. static void
  514. ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb)
  515. {
  516. struct hardware_scb *q_hscb;
  517. u_int saved_tag;
  518. /*
  519. * Our queuing method is a bit tricky. The card
  520. * knows in advance which HSCB to download, and we
  521. * can't disappoint it. To achieve this, the next
  522. * SCB to download is saved off in ahc->next_queued_scb.
  523. * When we are called to queue "an arbitrary scb",
  524. * we copy the contents of the incoming HSCB to the one
  525. * the sequencer knows about, swap HSCB pointers and
  526. * finally assign the SCB to the tag indexed location
  527. * in the scb_array. This makes sure that we can still
  528. * locate the correct SCB by SCB_TAG.
  529. */
  530. q_hscb = ahc->next_queued_scb->hscb;
  531. saved_tag = q_hscb->tag;
  532. memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
  533. if ((scb->flags & SCB_CDB32_PTR) != 0) {
  534. q_hscb->shared_data.cdb_ptr =
  535. ahc_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag)
  536. + offsetof(struct hardware_scb, cdb32));
  537. }
  538. q_hscb->tag = saved_tag;
  539. q_hscb->next = scb->hscb->tag;
  540. /* Now swap HSCB pointers. */
  541. ahc->next_queued_scb->hscb = scb->hscb;
  542. scb->hscb = q_hscb;
  543. /* Now define the mapping from tag to SCB in the scbindex */
  544. ahc->scb_data->scbindex[scb->hscb->tag] = scb;
  545. }
  546. /*
  547. * Tell the sequencer about a new transaction to execute.
  548. */
  549. void
  550. ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb)
  551. {
  552. ahc_swap_with_next_hscb(ahc, scb);
  553. if (scb->hscb->tag == SCB_LIST_NULL
  554. || scb->hscb->next == SCB_LIST_NULL)
  555. panic("Attempt to queue invalid SCB tag %x:%x\n",
  556. scb->hscb->tag, scb->hscb->next);
  557. /*
  558. * Setup data "oddness".
  559. */
  560. scb->hscb->lun &= LID;
  561. if (ahc_get_transfer_length(scb) & 0x1)
  562. scb->hscb->lun |= SCB_XFERLEN_ODD;
  563. /*
  564. * Keep a history of SCBs we've downloaded in the qinfifo.
  565. */
  566. ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
  567. /*
  568. * Make sure our data is consistent from the
  569. * perspective of the adapter.
  570. */
  571. ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  572. /* Tell the adapter about the newly queued SCB */
  573. if ((ahc->features & AHC_QUEUE_REGS) != 0) {
  574. ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
  575. } else {
  576. if ((ahc->features & AHC_AUTOPAUSE) == 0)
  577. ahc_pause(ahc);
  578. ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
  579. if ((ahc->features & AHC_AUTOPAUSE) == 0)
  580. ahc_unpause(ahc);
  581. }
  582. }
  583. struct scsi_sense_data *
  584. ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb)
  585. {
  586. int offset;
  587. offset = scb - ahc->scb_data->scbarray;
  588. return (&ahc->scb_data->sense[offset]);
  589. }
  590. static uint32_t
  591. ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb)
  592. {
  593. int offset;
  594. offset = scb - ahc->scb_data->scbarray;
  595. return (ahc->scb_data->sense_busaddr
  596. + (offset * sizeof(struct scsi_sense_data)));
  597. }
  598. /************************** Interrupt Processing ******************************/
  599. static void
  600. ahc_sync_qoutfifo(struct ahc_softc *ahc, int op)
  601. {
  602. ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
  603. /*offset*/0, /*len*/256, op);
  604. }
  605. static void
  606. ahc_sync_tqinfifo(struct ahc_softc *ahc, int op)
  607. {
  608. #ifdef AHC_TARGET_MODE
  609. if ((ahc->flags & AHC_TARGETROLE) != 0) {
  610. ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
  611. ahc->shared_data_dmamap,
  612. ahc_targetcmd_offset(ahc, 0),
  613. sizeof(struct target_cmd) * AHC_TMODE_CMDS,
  614. op);
  615. }
  616. #endif
  617. }
  618. /*
  619. * See if the firmware has posted any completed commands
  620. * into our in-core command complete fifos.
  621. */
  622. #define AHC_RUN_QOUTFIFO 0x1
  623. #define AHC_RUN_TQINFIFO 0x2
  624. static u_int
  625. ahc_check_cmdcmpltqueues(struct ahc_softc *ahc)
  626. {
  627. u_int retval;
  628. retval = 0;
  629. ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
  630. /*offset*/ahc->qoutfifonext, /*len*/1,
  631. BUS_DMASYNC_POSTREAD);
  632. if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL)
  633. retval |= AHC_RUN_QOUTFIFO;
  634. #ifdef AHC_TARGET_MODE
  635. if ((ahc->flags & AHC_TARGETROLE) != 0
  636. && (ahc->flags & AHC_TQINFIFO_BLOCKED) == 0) {
  637. ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
  638. ahc->shared_data_dmamap,
  639. ahc_targetcmd_offset(ahc, ahc->tqinfifofnext),
  640. /*len*/sizeof(struct target_cmd),
  641. BUS_DMASYNC_POSTREAD);
  642. if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0)
  643. retval |= AHC_RUN_TQINFIFO;
  644. }
  645. #endif
  646. return (retval);
  647. }
  648. /*
  649. * Catch an interrupt from the adapter
  650. */
  651. int
  652. ahc_intr(struct ahc_softc *ahc)
  653. {
  654. u_int intstat;
  655. if ((ahc->pause & INTEN) == 0) {
  656. /*
  657. * Our interrupt is not enabled on the chip
  658. * and may be disabled for re-entrancy reasons,
  659. * so just return. This is likely just a shared
  660. * interrupt.
  661. */
  662. return (0);
  663. }
  664. /*
  665. * Instead of directly reading the interrupt status register,
  666. * infer the cause of the interrupt by checking our in-core
  667. * completion queues. This avoids a costly PCI bus read in
  668. * most cases.
  669. */
  670. if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0
  671. && (ahc_check_cmdcmpltqueues(ahc) != 0))
  672. intstat = CMDCMPLT;
  673. else {
  674. intstat = ahc_inb(ahc, INTSTAT);
  675. }
  676. if ((intstat & INT_PEND) == 0) {
  677. #if AHC_PCI_CONFIG > 0
  678. if (ahc->unsolicited_ints > 500) {
  679. ahc->unsolicited_ints = 0;
  680. if ((ahc->chip & AHC_PCI) != 0
  681. && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0)
  682. ahc->bus_intr(ahc);
  683. }
  684. #endif
  685. ahc->unsolicited_ints++;
  686. return (0);
  687. }
  688. ahc->unsolicited_ints = 0;
  689. if (intstat & CMDCMPLT) {
  690. ahc_outb(ahc, CLRINT, CLRCMDINT);
  691. /*
  692. * Ensure that the chip sees that we've cleared
  693. * this interrupt before we walk the output fifo.
  694. * Otherwise, we may, due to posted bus writes,
  695. * clear the interrupt after we finish the scan,
  696. * and after the sequencer has added new entries
  697. * and asserted the interrupt again.
  698. */
  699. ahc_flush_device_writes(ahc);
  700. ahc_run_qoutfifo(ahc);
  701. #ifdef AHC_TARGET_MODE
  702. if ((ahc->flags & AHC_TARGETROLE) != 0)
  703. ahc_run_tqinfifo(ahc, /*paused*/FALSE);
  704. #endif
  705. }
  706. /*
  707. * Handle statuses that may invalidate our cached
  708. * copy of INTSTAT separately.
  709. */
  710. if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) {
  711. /* Hot eject. Do nothing */
  712. } else if (intstat & BRKADRINT) {
  713. ahc_handle_brkadrint(ahc);
  714. } else if ((intstat & (SEQINT|SCSIINT)) != 0) {
  715. ahc_pause_bug_fix(ahc);
  716. if ((intstat & SEQINT) != 0)
  717. ahc_handle_seqint(ahc, intstat);
  718. if ((intstat & SCSIINT) != 0)
  719. ahc_handle_scsiint(ahc, intstat);
  720. }
  721. return (1);
  722. }
  723. /************************* Sequencer Execution Control ************************/
  724. /*
  725. * Restart the sequencer program from address zero
  726. */
  727. static void
  728. ahc_restart(struct ahc_softc *ahc)
  729. {
  730. uint8_t sblkctl;
  731. ahc_pause(ahc);
  732. /* No more pending messages. */
  733. ahc_clear_msg_state(ahc);
  734. ahc_outb(ahc, SCSISIGO, 0); /* De-assert BSY */
  735. ahc_outb(ahc, MSG_OUT, NOP); /* No message to send */
  736. ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET);
  737. ahc_outb(ahc, LASTPHASE, P_BUSFREE);
  738. ahc_outb(ahc, SAVED_SCSIID, 0xFF);
  739. ahc_outb(ahc, SAVED_LUN, 0xFF);
  740. /*
  741. * Ensure that the sequencer's idea of TQINPOS
  742. * matches our own. The sequencer increments TQINPOS
  743. * only after it sees a DMA complete and a reset could
  744. * occur before the increment leaving the kernel to believe
  745. * the command arrived but the sequencer to not.
  746. */
  747. ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
  748. /* Always allow reselection */
  749. ahc_outb(ahc, SCSISEQ,
  750. ahc_inb(ahc, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP));
  751. if ((ahc->features & AHC_CMD_CHAN) != 0) {
  752. /* Ensure that no DMA operations are in progress */
  753. ahc_outb(ahc, CCSCBCNT, 0);
  754. ahc_outb(ahc, CCSGCTL, 0);
  755. ahc_outb(ahc, CCSCBCTL, 0);
  756. }
  757. /*
  758. * If we were in the process of DMA'ing SCB data into
  759. * an SCB, replace that SCB on the free list. This prevents
  760. * an SCB leak.
  761. */
  762. if ((ahc_inb(ahc, SEQ_FLAGS2) & SCB_DMA) != 0) {
  763. ahc_add_curscb_to_free_list(ahc);
  764. ahc_outb(ahc, SEQ_FLAGS2,
  765. ahc_inb(ahc, SEQ_FLAGS2) & ~SCB_DMA);
  766. }
  767. /*
  768. * Clear any pending sequencer interrupt. It is no
  769. * longer relevant since we're resetting the Program
  770. * Counter.
  771. */
  772. ahc_outb(ahc, CLRINT, CLRSEQINT);
  773. ahc_outb(ahc, MWI_RESIDUAL, 0);
  774. ahc_outb(ahc, SEQCTL, ahc->seqctl);
  775. ahc_outb(ahc, SEQADDR0, 0);
  776. ahc_outb(ahc, SEQADDR1, 0);
  777. /*
  778. * Take the LED out of diagnostic mode on PM resume, too
  779. */
  780. sblkctl = ahc_inb(ahc, SBLKCTL);
  781. ahc_outb(ahc, SBLKCTL, (sblkctl & ~(DIAGLEDEN|DIAGLEDON)));
  782. ahc_unpause(ahc);
  783. }
  784. /************************* Input/Output Queues ********************************/
  785. static void
  786. ahc_run_qoutfifo(struct ahc_softc *ahc)
  787. {
  788. struct scb *scb;
  789. u_int scb_index;
  790. ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD);
  791. while (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL) {
  792. scb_index = ahc->qoutfifo[ahc->qoutfifonext];
  793. if ((ahc->qoutfifonext & 0x03) == 0x03) {
  794. u_int modnext;
  795. /*
  796. * Clear 32bits of QOUTFIFO at a time
  797. * so that we don't clobber an incoming
  798. * byte DMA to the array on architectures
  799. * that only support 32bit load and store
  800. * operations.
  801. */
  802. modnext = ahc->qoutfifonext & ~0x3;
  803. *((uint32_t *)(&ahc->qoutfifo[modnext])) = 0xFFFFFFFFUL;
  804. ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
  805. ahc->shared_data_dmamap,
  806. /*offset*/modnext, /*len*/4,
  807. BUS_DMASYNC_PREREAD);
  808. }
  809. ahc->qoutfifonext++;
  810. scb = ahc_lookup_scb(ahc, scb_index);
  811. if (scb == NULL) {
  812. printk("%s: WARNING no command for scb %d "
  813. "(cmdcmplt)\nQOUTPOS = %d\n",
  814. ahc_name(ahc), scb_index,
  815. (ahc->qoutfifonext - 1) & 0xFF);
  816. continue;
  817. }
  818. /*
  819. * Save off the residual
  820. * if there is one.
  821. */
  822. ahc_update_residual(ahc, scb);
  823. ahc_done(ahc, scb);
  824. }
  825. }
  826. static void
  827. ahc_run_untagged_queues(struct ahc_softc *ahc)
  828. {
  829. int i;
  830. for (i = 0; i < 16; i++)
  831. ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]);
  832. }
  833. static void
  834. ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
  835. {
  836. struct scb *scb;
  837. if (ahc->untagged_queue_lock != 0)
  838. return;
  839. if ((scb = TAILQ_FIRST(queue)) != NULL
  840. && (scb->flags & SCB_ACTIVE) == 0) {
  841. scb->flags |= SCB_ACTIVE;
  842. ahc_queue_scb(ahc, scb);
  843. }
  844. }
  845. /************************* Interrupt Handling *********************************/
  846. static void
  847. ahc_handle_brkadrint(struct ahc_softc *ahc)
  848. {
  849. /*
  850. * We upset the sequencer :-(
  851. * Lookup the error message
  852. */
  853. int i;
  854. int error;
  855. error = ahc_inb(ahc, ERROR);
  856. for (i = 0; error != 1 && i < num_errors; i++)
  857. error >>= 1;
  858. printk("%s: brkadrint, %s at seqaddr = 0x%x\n",
  859. ahc_name(ahc), ahc_hard_errors[i].errmesg,
  860. ahc_inb(ahc, SEQADDR0) |
  861. (ahc_inb(ahc, SEQADDR1) << 8));
  862. ahc_dump_card_state(ahc);
  863. /* Tell everyone that this HBA is no longer available */
  864. ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, ALL_CHANNELS,
  865. CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN,
  866. CAM_NO_HBA);
  867. /* Disable all interrupt sources by resetting the controller */
  868. ahc_shutdown(ahc);
  869. }
  870. static void
  871. ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
  872. {
  873. struct scb *scb;
  874. struct ahc_devinfo devinfo;
  875. ahc_fetch_devinfo(ahc, &devinfo);
  876. /*
  877. * Clear the upper byte that holds SEQINT status
  878. * codes and clear the SEQINT bit. We will unpause
  879. * the sequencer, if appropriate, after servicing
  880. * the request.
  881. */
  882. ahc_outb(ahc, CLRINT, CLRSEQINT);
  883. switch (intstat & SEQINT_MASK) {
  884. case BAD_STATUS:
  885. {
  886. u_int scb_index;
  887. struct hardware_scb *hscb;
  888. /*
  889. * Set the default return value to 0 (don't
  890. * send sense). The sense code will change
  891. * this if needed.
  892. */
  893. ahc_outb(ahc, RETURN_1, 0);
  894. /*
  895. * The sequencer will notify us when a command
  896. * has an error that would be of interest to
  897. * the kernel. This allows us to leave the sequencer
  898. * running in the common case of command completes
  899. * without error. The sequencer will already have
  900. * dma'd the SCB back up to us, so we can reference
  901. * the in kernel copy directly.
  902. */
  903. scb_index = ahc_inb(ahc, SCB_TAG);
  904. scb = ahc_lookup_scb(ahc, scb_index);
  905. if (scb == NULL) {
  906. ahc_print_devinfo(ahc, &devinfo);
  907. printk("ahc_intr - referenced scb "
  908. "not valid during seqint 0x%x scb(%d)\n",
  909. intstat, scb_index);
  910. ahc_dump_card_state(ahc);
  911. panic("for safety");
  912. goto unpause;
  913. }
  914. hscb = scb->hscb;
  915. /* Don't want to clobber the original sense code */
  916. if ((scb->flags & SCB_SENSE) != 0) {
  917. /*
  918. * Clear the SCB_SENSE Flag and have
  919. * the sequencer do a normal command
  920. * complete.
  921. */
  922. scb->flags &= ~SCB_SENSE;
  923. ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
  924. break;
  925. }
  926. ahc_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR);
  927. /* Freeze the queue until the client sees the error. */
  928. ahc_freeze_devq(ahc, scb);
  929. ahc_freeze_scb(scb);
  930. ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status);
  931. switch (hscb->shared_data.status.scsi_status) {
  932. case SAM_STAT_GOOD:
  933. printk("%s: Interrupted for status of 0???\n",
  934. ahc_name(ahc));
  935. break;
  936. case SAM_STAT_COMMAND_TERMINATED:
  937. case SAM_STAT_CHECK_CONDITION:
  938. {
  939. struct ahc_dma_seg *sg;
  940. struct scsi_sense *sc;
  941. struct ahc_initiator_tinfo *targ_info;
  942. struct ahc_tmode_tstate *tstate;
  943. struct ahc_transinfo *tinfo;
  944. #ifdef AHC_DEBUG
  945. if (ahc_debug & AHC_SHOW_SENSE) {
  946. ahc_print_path(ahc, scb);
  947. printk("SCB %d: requests Check Status\n",
  948. scb->hscb->tag);
  949. }
  950. #endif
  951. if (ahc_perform_autosense(scb) == 0)
  952. break;
  953. targ_info = ahc_fetch_transinfo(ahc,
  954. devinfo.channel,
  955. devinfo.our_scsiid,
  956. devinfo.target,
  957. &tstate);
  958. tinfo = &targ_info->curr;
  959. sg = scb->sg_list;
  960. sc = (struct scsi_sense *)(&hscb->shared_data.cdb);
  961. /*
  962. * Save off the residual if there is one.
  963. */
  964. ahc_update_residual(ahc, scb);
  965. #ifdef AHC_DEBUG
  966. if (ahc_debug & AHC_SHOW_SENSE) {
  967. ahc_print_path(ahc, scb);
  968. printk("Sending Sense\n");
  969. }
  970. #endif
  971. sg->addr = ahc_get_sense_bufaddr(ahc, scb);
  972. sg->len = ahc_get_sense_bufsize(ahc, scb);
  973. sg->len |= AHC_DMA_LAST_SEG;
  974. /* Fixup byte order */
  975. sg->addr = ahc_htole32(sg->addr);
  976. sg->len = ahc_htole32(sg->len);
  977. sc->opcode = REQUEST_SENSE;
  978. sc->byte2 = 0;
  979. if (tinfo->protocol_version <= SCSI_REV_2
  980. && SCB_GET_LUN(scb) < 8)
  981. sc->byte2 = SCB_GET_LUN(scb) << 5;
  982. sc->unused[0] = 0;
  983. sc->unused[1] = 0;
  984. sc->length = sg->len;
  985. sc->control = 0;
  986. /*
  987. * We can't allow the target to disconnect.
  988. * This will be an untagged transaction and
  989. * having the target disconnect will make this
  990. * transaction indestinguishable from outstanding
  991. * tagged transactions.
  992. */
  993. hscb->control = 0;
  994. /*
  995. * This request sense could be because the
  996. * the device lost power or in some other
  997. * way has lost our transfer negotiations.
  998. * Renegotiate if appropriate. Unit attention
  999. * errors will be reported before any data
  1000. * phases occur.
  1001. */
  1002. if (ahc_get_residual(scb)
  1003. == ahc_get_transfer_length(scb)) {
  1004. ahc_update_neg_request(ahc, &devinfo,
  1005. tstate, targ_info,
  1006. AHC_NEG_IF_NON_ASYNC);
  1007. }
  1008. if (tstate->auto_negotiate & devinfo.target_mask) {
  1009. hscb->control |= MK_MESSAGE;
  1010. scb->flags &= ~SCB_NEGOTIATE;
  1011. scb->flags |= SCB_AUTO_NEGOTIATE;
  1012. }
  1013. hscb->cdb_len = sizeof(*sc);
  1014. hscb->dataptr = sg->addr;
  1015. hscb->datacnt = sg->len;
  1016. hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
  1017. hscb->sgptr = ahc_htole32(hscb->sgptr);
  1018. scb->sg_count = 1;
  1019. scb->flags |= SCB_SENSE;
  1020. ahc_qinfifo_requeue_tail(ahc, scb);
  1021. ahc_outb(ahc, RETURN_1, SEND_SENSE);
  1022. /*
  1023. * Ensure we have enough time to actually
  1024. * retrieve the sense.
  1025. */
  1026. ahc_scb_timer_reset(scb, 5 * 1000000);
  1027. break;
  1028. }
  1029. default:
  1030. break;
  1031. }
  1032. break;
  1033. }
  1034. case NO_MATCH:
  1035. {
  1036. /* Ensure we don't leave the selection hardware on */
  1037. ahc_outb(ahc, SCSISEQ,
  1038. ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
  1039. printk("%s:%c:%d: no active SCB for reconnecting "
  1040. "target - issuing BUS DEVICE RESET\n",
  1041. ahc_name(ahc), devinfo.channel, devinfo.target);
  1042. printk("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
  1043. "ARG_1 == 0x%x ACCUM = 0x%x\n",
  1044. ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
  1045. ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
  1046. printk("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
  1047. "SINDEX == 0x%x\n",
  1048. ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
  1049. ahc_index_busy_tcl(ahc,
  1050. BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
  1051. ahc_inb(ahc, SAVED_LUN))),
  1052. ahc_inb(ahc, SINDEX));
  1053. printk("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
  1054. "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
  1055. ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
  1056. ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
  1057. ahc_inb(ahc, SCB_CONTROL));
  1058. printk("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n",
  1059. ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI));
  1060. printk("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0));
  1061. printk("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL));
  1062. ahc_dump_card_state(ahc);
  1063. ahc->msgout_buf[0] = TARGET_RESET;
  1064. ahc->msgout_len = 1;
  1065. ahc->msgout_index = 0;
  1066. ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
  1067. ahc_outb(ahc, MSG_OUT, HOST_MSG);
  1068. ahc_assert_atn(ahc);
  1069. break;
  1070. }
  1071. case SEND_REJECT:
  1072. {
  1073. u_int rejbyte = ahc_inb(ahc, ACCUM);
  1074. printk("%s:%c:%d: Warning - unknown message received from "
  1075. "target (0x%x). Rejecting\n",
  1076. ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte);
  1077. break;
  1078. }
  1079. case PROTO_VIOLATION:
  1080. {
  1081. ahc_handle_proto_violation(ahc);
  1082. break;
  1083. }
  1084. case IGN_WIDE_RES:
  1085. ahc_handle_ign_wide_residue(ahc, &devinfo);
  1086. break;
  1087. case PDATA_REINIT:
  1088. ahc_reinitialize_dataptrs(ahc);
  1089. break;
  1090. case BAD_PHASE:
  1091. {
  1092. u_int lastphase;
  1093. lastphase = ahc_inb(ahc, LASTPHASE);
  1094. printk("%s:%c:%d: unknown scsi bus phase %x, "
  1095. "lastphase = 0x%x. Attempting to continue\n",
  1096. ahc_name(ahc), devinfo.channel, devinfo.target,
  1097. lastphase, ahc_inb(ahc, SCSISIGI));
  1098. break;
  1099. }
  1100. case MISSED_BUSFREE:
  1101. {
  1102. u_int lastphase;
  1103. lastphase = ahc_inb(ahc, LASTPHASE);
  1104. printk("%s:%c:%d: Missed busfree. "
  1105. "Lastphase = 0x%x, Curphase = 0x%x\n",
  1106. ahc_name(ahc), devinfo.channel, devinfo.target,
  1107. lastphase, ahc_inb(ahc, SCSISIGI));
  1108. ahc_restart(ahc);
  1109. return;
  1110. }
  1111. case HOST_MSG_LOOP:
  1112. {
  1113. /*
  1114. * The sequencer has encountered a message phase
  1115. * that requires host assistance for completion.
  1116. * While handling the message phase(s), we will be
  1117. * notified by the sequencer after each byte is
  1118. * transferred so we can track bus phase changes.
  1119. *
  1120. * If this is the first time we've seen a HOST_MSG_LOOP
  1121. * interrupt, initialize the state of the host message
  1122. * loop.
  1123. */
  1124. if (ahc->msg_type == MSG_TYPE_NONE) {
  1125. struct scb *scb;
  1126. u_int scb_index;
  1127. u_int bus_phase;
  1128. bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
  1129. if (bus_phase != P_MESGIN
  1130. && bus_phase != P_MESGOUT) {
  1131. printk("ahc_intr: HOST_MSG_LOOP bad "
  1132. "phase 0x%x\n",
  1133. bus_phase);
  1134. /*
  1135. * Probably transitioned to bus free before
  1136. * we got here. Just punt the message.
  1137. */
  1138. ahc_clear_intstat(ahc);
  1139. ahc_restart(ahc);
  1140. return;
  1141. }
  1142. scb_index = ahc_inb(ahc, SCB_TAG);
  1143. scb = ahc_lookup_scb(ahc, scb_index);
  1144. if (devinfo.role == ROLE_INITIATOR) {
  1145. if (bus_phase == P_MESGOUT) {
  1146. if (scb == NULL)
  1147. panic("HOST_MSG_LOOP with "
  1148. "invalid SCB %x\n",
  1149. scb_index);
  1150. ahc_setup_initiator_msgout(ahc,
  1151. &devinfo,
  1152. scb);
  1153. } else {
  1154. ahc->msg_type =
  1155. MSG_TYPE_INITIATOR_MSGIN;
  1156. ahc->msgin_index = 0;
  1157. }
  1158. }
  1159. #ifdef AHC_TARGET_MODE
  1160. else {
  1161. if (bus_phase == P_MESGOUT) {
  1162. ahc->msg_type =
  1163. MSG_TYPE_TARGET_MSGOUT;
  1164. ahc->msgin_index = 0;
  1165. } else
  1166. ahc_setup_target_msgin(ahc,
  1167. &devinfo,
  1168. scb);
  1169. }
  1170. #endif
  1171. }
  1172. ahc_handle_message_phase(ahc);
  1173. break;
  1174. }
  1175. case PERR_DETECTED:
  1176. {
  1177. /*
  1178. * If we've cleared the parity error interrupt
  1179. * but the sequencer still believes that SCSIPERR
  1180. * is true, it must be that the parity error is
  1181. * for the currently presented byte on the bus,
  1182. * and we are not in a phase (data-in) where we will
  1183. * eventually ack this byte. Ack the byte and
  1184. * throw it away in the hope that the target will
  1185. * take us to message out to deliver the appropriate
  1186. * error message.
  1187. */
  1188. if ((intstat & SCSIINT) == 0
  1189. && (ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0) {
  1190. if ((ahc->features & AHC_DT) == 0) {
  1191. u_int curphase;
  1192. /*
  1193. * The hardware will only let you ack bytes
  1194. * if the expected phase in SCSISIGO matches
  1195. * the current phase. Make sure this is
  1196. * currently the case.
  1197. */
  1198. curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
  1199. ahc_outb(ahc, LASTPHASE, curphase);
  1200. ahc_outb(ahc, SCSISIGO, curphase);
  1201. }
  1202. if ((ahc_inb(ahc, SCSISIGI) & (CDI|MSGI)) == 0) {
  1203. int wait;
  1204. /*
  1205. * In a data phase. Faster to bitbucket
  1206. * the data than to individually ack each
  1207. * byte. This is also the only strategy
  1208. * that will work with AUTOACK enabled.
  1209. */
  1210. ahc_outb(ahc, SXFRCTL1,
  1211. ahc_inb(ahc, SXFRCTL1) | BITBUCKET);
  1212. wait = 5000;
  1213. while (--wait != 0) {
  1214. if ((ahc_inb(ahc, SCSISIGI)
  1215. & (CDI|MSGI)) != 0)
  1216. break;
  1217. ahc_delay(100);
  1218. }
  1219. ahc_outb(ahc, SXFRCTL1,
  1220. ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET);
  1221. if (wait == 0) {
  1222. struct scb *scb;
  1223. u_int scb_index;
  1224. ahc_print_devinfo(ahc, &devinfo);
  1225. printk("Unable to clear parity error. "
  1226. "Resetting bus.\n");
  1227. scb_index = ahc_inb(ahc, SCB_TAG);
  1228. scb = ahc_lookup_scb(ahc, scb_index);
  1229. if (scb != NULL)
  1230. ahc_set_transaction_status(scb,
  1231. CAM_UNCOR_PARITY);
  1232. ahc_reset_channel(ahc, devinfo.channel,
  1233. /*init reset*/TRUE);
  1234. }
  1235. } else {
  1236. ahc_inb(ahc, SCSIDATL);
  1237. }
  1238. }
  1239. break;
  1240. }
  1241. case DATA_OVERRUN:
  1242. {
  1243. /*
  1244. * When the sequencer detects an overrun, it
  1245. * places the controller in "BITBUCKET" mode
  1246. * and allows the target to complete its transfer.
  1247. * Unfortunately, none of the counters get updated
  1248. * when the controller is in this mode, so we have
  1249. * no way of knowing how large the overrun was.
  1250. */
  1251. u_int scbindex = ahc_inb(ahc, SCB_TAG);
  1252. u_int lastphase = ahc_inb(ahc, LASTPHASE);
  1253. u_int i;
  1254. scb = ahc_lookup_scb(ahc, scbindex);
  1255. for (i = 0; i < num_phases; i++) {
  1256. if (lastphase == ahc_phase_table[i].phase)
  1257. break;
  1258. }
  1259. ahc_print_path(ahc, scb);
  1260. printk("data overrun detected %s."
  1261. " Tag == 0x%x.\n",
  1262. ahc_phase_table[i].phasemsg,
  1263. scb->hscb->tag);
  1264. ahc_print_path(ahc, scb);
  1265. printk("%s seen Data Phase. Length = %ld. NumSGs = %d.\n",
  1266. ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't",
  1267. ahc_get_transfer_length(scb), scb->sg_count);
  1268. if (scb->sg_count > 0) {
  1269. for (i = 0; i < scb->sg_count; i++) {
  1270. printk("sg[%d] - Addr 0x%x%x : Length %d\n",
  1271. i,
  1272. (ahc_le32toh(scb->sg_list[i].len) >> 24
  1273. & SG_HIGH_ADDR_BITS),
  1274. ahc_le32toh(scb->sg_list[i].addr),
  1275. ahc_le32toh(scb->sg_list[i].len)
  1276. & AHC_SG_LEN_MASK);
  1277. }
  1278. }
  1279. /*
  1280. * Set this and it will take effect when the
  1281. * target does a command complete.
  1282. */
  1283. ahc_freeze_devq(ahc, scb);
  1284. if ((scb->flags & SCB_SENSE) == 0) {
  1285. ahc_set_transaction_status(scb, CAM_DATA_RUN_ERR);
  1286. } else {
  1287. scb->flags &= ~SCB_SENSE;
  1288. ahc_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
  1289. }
  1290. ahc_freeze_scb(scb);
  1291. if ((ahc->features & AHC_ULTRA2) != 0) {
  1292. /*
  1293. * Clear the channel in case we return
  1294. * to data phase later.
  1295. */
  1296. ahc_outb(ahc, SXFRCTL0,
  1297. ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN);
  1298. ahc_outb(ahc, SXFRCTL0,
  1299. ahc_inb(ahc, SXFRCTL0) | CLRSTCNT|CLRCHN);
  1300. }
  1301. if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
  1302. u_int dscommand1;
  1303. /* Ensure HHADDR is 0 for future DMA operations. */
  1304. dscommand1 = ahc_inb(ahc, DSCOMMAND1);
  1305. ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0);
  1306. ahc_outb(ahc, HADDR, 0);
  1307. ahc_outb(ahc, DSCOMMAND1, dscommand1);
  1308. }
  1309. break;
  1310. }
  1311. case MKMSG_FAILED:
  1312. {
  1313. u_int scbindex;
  1314. printk("%s:%c:%d:%d: Attempt to issue message failed\n",
  1315. ahc_name(ahc), devinfo.channel, devinfo.target,
  1316. devinfo.lun);
  1317. scbindex = ahc_inb(ahc, SCB_TAG);
  1318. scb = ahc_lookup_scb(ahc, scbindex);
  1319. if (scb != NULL
  1320. && (scb->flags & SCB_RECOVERY_SCB) != 0)
  1321. /*
  1322. * Ensure that we didn't put a second instance of this
  1323. * SCB into the QINFIFO.
  1324. */
  1325. ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
  1326. SCB_GET_CHANNEL(ahc, scb),
  1327. SCB_GET_LUN(scb), scb->hscb->tag,
  1328. ROLE_INITIATOR, /*status*/0,
  1329. SEARCH_REMOVE);
  1330. break;
  1331. }
  1332. case NO_FREE_SCB:
  1333. {
  1334. printk("%s: No free or disconnected SCBs\n", ahc_name(ahc));
  1335. ahc_dump_card_state(ahc);
  1336. panic("for safety");
  1337. break;
  1338. }
  1339. case SCB_MISMATCH:
  1340. {
  1341. u_int scbptr;
  1342. scbptr = ahc_inb(ahc, SCBPTR);
  1343. printk("Bogus TAG after DMA. SCBPTR %d, tag %d, our tag %d\n",
  1344. scbptr, ahc_inb(ahc, ARG_1),
  1345. ahc->scb_data->hscbs[scbptr].tag);
  1346. ahc_dump_card_state(ahc);
  1347. panic("for safety");
  1348. break;
  1349. }
  1350. case OUT_OF_RANGE:
  1351. {
  1352. printk("%s: BTT calculation out of range\n", ahc_name(ahc));
  1353. printk("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, "
  1354. "ARG_1 == 0x%x ACCUM = 0x%x\n",
  1355. ahc_inb(ahc, SAVED_SCSIID), ahc_inb(ahc, SAVED_LUN),
  1356. ahc_inb(ahc, ARG_1), ahc_inb(ahc, ACCUM));
  1357. printk("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, "
  1358. "SINDEX == 0x%x\n, A == 0x%x\n",
  1359. ahc_inb(ahc, SEQ_FLAGS), ahc_inb(ahc, SCBPTR),
  1360. ahc_index_busy_tcl(ahc,
  1361. BUILD_TCL(ahc_inb(ahc, SAVED_SCSIID),
  1362. ahc_inb(ahc, SAVED_LUN))),
  1363. ahc_inb(ahc, SINDEX),
  1364. ahc_inb(ahc, ACCUM));
  1365. printk("SCSIID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, "
  1366. "SCB_TAG == 0x%x, SCB_CONTROL == 0x%x\n",
  1367. ahc_inb(ahc, SCSIID), ahc_inb(ahc, SCB_SCSIID),
  1368. ahc_inb(ahc, SCB_LUN), ahc_inb(ahc, SCB_TAG),
  1369. ahc_inb(ahc, SCB_CONTROL));
  1370. printk("SCSIBUSL == 0x%x, SCSISIGI == 0x%x\n",
  1371. ahc_inb(ahc, SCSIBUSL), ahc_inb(ahc, SCSISIGI));
  1372. ahc_dump_card_state(ahc);
  1373. panic("for safety");
  1374. break;
  1375. }
  1376. default:
  1377. printk("ahc_intr: seqint, "
  1378. "intstat == 0x%x, scsisigi = 0x%x\n",
  1379. intstat, ahc_inb(ahc, SCSISIGI));
  1380. break;
  1381. }
  1382. unpause:
  1383. /*
  1384. * The sequencer is paused immediately on
  1385. * a SEQINT, so we should restart it when
  1386. * we're done.
  1387. */
  1388. ahc_unpause(ahc);
  1389. }
  1390. static void
  1391. ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
  1392. {
  1393. u_int scb_index;
  1394. u_int status0;
  1395. u_int status;
  1396. struct scb *scb;
  1397. char cur_channel;
  1398. char intr_channel;
  1399. if ((ahc->features & AHC_TWIN) != 0
  1400. && ((ahc_inb(ahc, SBLKCTL) & SELBUSB) != 0))
  1401. cur_channel = 'B';
  1402. else
  1403. cur_channel = 'A';
  1404. intr_channel = cur_channel;
  1405. if ((ahc->features & AHC_ULTRA2) != 0)
  1406. status0 = ahc_inb(ahc, SSTAT0) & IOERR;
  1407. else
  1408. status0 = 0;
  1409. status = ahc_inb(ahc, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
  1410. if (status == 0 && status0 == 0) {
  1411. if ((ahc->features & AHC_TWIN) != 0) {
  1412. /* Try the other channel */
  1413. ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
  1414. status = ahc_inb(ahc, SSTAT1)
  1415. & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
  1416. intr_channel = (cur_channel == 'A') ? 'B' : 'A';
  1417. }
  1418. if (status == 0) {
  1419. printk("%s: Spurious SCSI interrupt\n", ahc_name(ahc));
  1420. ahc_outb(ahc, CLRINT, CLRSCSIINT);
  1421. ahc_unpause(ahc);
  1422. return;
  1423. }
  1424. }
  1425. /* Make sure the sequencer is in a safe location. */
  1426. ahc_clear_critical_section(ahc);
  1427. scb_index = ahc_inb(ahc, SCB_TAG);
  1428. scb = ahc_lookup_scb(ahc, scb_index);
  1429. if (scb != NULL
  1430. && (ahc_inb(ahc, SEQ_FLAGS) & NOT_IDENTIFIED) != 0)
  1431. scb = NULL;
  1432. if ((ahc->features & AHC_ULTRA2) != 0
  1433. && (status0 & IOERR) != 0) {
  1434. int now_lvd;
  1435. now_lvd = ahc_inb(ahc, SBLKCTL) & ENAB40;
  1436. printk("%s: Transceiver State Has Changed to %s mode\n",
  1437. ahc_name(ahc), now_lvd ? "LVD" : "SE");
  1438. ahc_outb(ahc, CLRSINT0, CLRIOERR);
  1439. /*
  1440. * When transitioning to SE mode, the reset line
  1441. * glitches, triggering an arbitration bug in some
  1442. * Ultra2 controllers. This bug is cleared when we
  1443. * assert the reset line. Since a reset glitch has
  1444. * already occurred with this transition and a
  1445. * transceiver state change is handled just like
  1446. * a bus reset anyway, asserting the reset line
  1447. * ourselves is safe.
  1448. */
  1449. ahc_reset_channel(ahc, intr_channel,
  1450. /*Initiate Reset*/now_lvd == 0);
  1451. } else if ((status & SCSIRSTI) != 0) {
  1452. printk("%s: Someone reset channel %c\n",
  1453. ahc_name(ahc), intr_channel);
  1454. if (intr_channel != cur_channel)
  1455. ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
  1456. ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE);
  1457. } else if ((status & SCSIPERR) != 0) {
  1458. /*
  1459. * Determine the bus phase and queue an appropriate message.
  1460. * SCSIPERR is latched true as soon as a parity error
  1461. * occurs. If the sequencer acked the transfer that
  1462. * caused the parity error and the currently presented
  1463. * transfer on the bus has correct parity, SCSIPERR will
  1464. * be cleared by CLRSCSIPERR. Use this to determine if
  1465. * we should look at the last phase the sequencer recorded,
  1466. * or the current phase presented on the bus.
  1467. */
  1468. struct ahc_devinfo devinfo;
  1469. u_int mesg_out;
  1470. u_int curphase;
  1471. u_int errorphase;
  1472. u_int lastphase;
  1473. u_int scsirate;
  1474. u_int i;
  1475. u_int sstat2;
  1476. int silent;
  1477. lastphase = ahc_inb(ahc, LASTPHASE);
  1478. curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
  1479. sstat2 = ahc_inb(ahc, SSTAT2);
  1480. ahc_outb(ahc, CLRSINT1, CLRSCSIPERR);
  1481. /*
  1482. * For all phases save DATA, the sequencer won't
  1483. * automatically ack a byte that has a parity error
  1484. * in it. So the only way that the current phase
  1485. * could be 'data-in' is if the parity error is for
  1486. * an already acked byte in the data phase. During
  1487. * synchronous data-in transfers, we may actually
  1488. * ack bytes before latching the current phase in
  1489. * LASTPHASE, leading to the discrepancy between
  1490. * curphase and lastphase.
  1491. */
  1492. if ((ahc_inb(ahc, SSTAT1) & SCSIPERR) != 0
  1493. || curphase == P_DATAIN || curphase == P_DATAIN_DT)
  1494. errorphase = curphase;
  1495. else
  1496. errorphase = lastphase;
  1497. for (i = 0; i < num_phases; i++) {
  1498. if (errorphase == ahc_phase_table[i].phase)
  1499. break;
  1500. }
  1501. mesg_out = ahc_phase_table[i].mesg_out;
  1502. silent = FALSE;
  1503. if (scb != NULL) {
  1504. if (SCB_IS_SILENT(scb))
  1505. silent = TRUE;
  1506. else
  1507. ahc_print_path(ahc, scb);
  1508. scb->flags |= SCB_TRANSMISSION_ERROR;
  1509. } else
  1510. printk("%s:%c:%d: ", ahc_name(ahc), intr_channel,
  1511. SCSIID_TARGET(ahc, ahc_inb(ahc, SAVED_SCSIID)));
  1512. scsirate = ahc_inb(ahc, SCSIRATE);
  1513. if (silent == FALSE) {
  1514. printk("parity error detected %s. "
  1515. "SEQADDR(0x%x) SCSIRATE(0x%x)\n",
  1516. ahc_phase_table[i].phasemsg,
  1517. ahc_inw(ahc, SEQADDR0),
  1518. scsirate);
  1519. if ((ahc->features & AHC_DT) != 0) {
  1520. if ((sstat2 & CRCVALERR) != 0)
  1521. printk("\tCRC Value Mismatch\n");
  1522. if ((sstat2 & CRCENDERR) != 0)
  1523. printk("\tNo terminal CRC packet "
  1524. "received\n");
  1525. if ((sstat2 & CRCREQERR) != 0)
  1526. printk("\tIllegal CRC packet "
  1527. "request\n");
  1528. if ((sstat2 & DUAL_EDGE_ERR) != 0)
  1529. printk("\tUnexpected %sDT Data Phase\n",
  1530. (scsirate & SINGLE_EDGE)
  1531. ? "" : "non-");
  1532. }
  1533. }
  1534. if ((ahc->features & AHC_DT) != 0
  1535. && (sstat2 & DUAL_EDGE_ERR) != 0) {
  1536. /*
  1537. * This error applies regardless of
  1538. * data direction, so ignore the value
  1539. * in the phase table.
  1540. */
  1541. mesg_out = INITIATOR_ERROR;
  1542. }
  1543. /*
  1544. * We've set the hardware to assert ATN if we
  1545. * get a parity error on "in" phases, so all we
  1546. * need to do is stuff the message buffer with
  1547. * the appropriate message. "In" phases have set
  1548. * mesg_out to something other than MSG_NOP.
  1549. */
  1550. if (mesg_out != NOP) {
  1551. if (ahc->msg_type != MSG_TYPE_NONE)
  1552. ahc->send_msg_perror = TRUE;
  1553. else
  1554. ahc_outb(ahc, MSG_OUT, mesg_out);
  1555. }
  1556. /*
  1557. * Force a renegotiation with this target just in
  1558. * case we are out of sync for some external reason
  1559. * unknown (or unreported) by the target.
  1560. */
  1561. ahc_fetch_devinfo(ahc, &devinfo);
  1562. ahc_force_renegotiation(ahc, &devinfo);
  1563. ahc_outb(ahc, CLRINT, CLRSCSIINT);
  1564. ahc_unpause(ahc);
  1565. } else if ((status & SELTO) != 0) {
  1566. u_int scbptr;
  1567. /* Stop the selection */
  1568. ahc_outb(ahc, SCSISEQ, 0);
  1569. /* No more pending messages */
  1570. ahc_clear_msg_state(ahc);
  1571. /* Clear interrupt state */
  1572. ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
  1573. ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR);
  1574. /*
  1575. * Although the driver does not care about the
  1576. * 'Selection in Progress' status bit, the busy
  1577. * LED does. SELINGO is only cleared by a successful
  1578. * selection, so we must manually clear it to insure
  1579. * the LED turns off just incase no future successful
  1580. * selections occur (e.g. no devices on the bus).
  1581. */
  1582. ahc_outb(ahc, CLRSINT0, CLRSELINGO);
  1583. scbptr = ahc_inb(ahc, WAITING_SCBH);
  1584. ahc_outb(ahc, SCBPTR, scbptr);
  1585. scb_index = ahc_inb(ahc, SCB_TAG);
  1586. scb = ahc_lookup_scb(ahc, scb_index);
  1587. if (scb == NULL) {
  1588. printk("%s: ahc_intr - referenced scb not "
  1589. "valid during SELTO scb(%d, %d)\n",
  1590. ahc_name(ahc), scbptr, scb_index);
  1591. ahc_dump_card_state(ahc);
  1592. } else {
  1593. struct ahc_devinfo devinfo;
  1594. #ifdef AHC_DEBUG
  1595. if ((ahc_debug & AHC_SHOW_SELTO) != 0) {
  1596. ahc_print_path(ahc, scb);
  1597. printk("Saw Selection Timeout for SCB 0x%x\n",
  1598. scb_index);
  1599. }
  1600. #endif
  1601. ahc_scb_devinfo(ahc, &devinfo, scb);
  1602. ahc_set_transaction_status(scb, CAM_SEL_TIMEOUT);
  1603. ahc_freeze_devq(ahc, scb);
  1604. /*
  1605. * Cancel any pending transactions on the device
  1606. * now that it seems to be missing. This will
  1607. * also revert us to async/narrow transfers until
  1608. * we can renegotiate with the device.
  1609. */
  1610. ahc_handle_devreset(ahc, &devinfo,
  1611. CAM_SEL_TIMEOUT,
  1612. "Selection Timeout",
  1613. /*verbose_level*/1);
  1614. }
  1615. ahc_outb(ahc, CLRINT, CLRSCSIINT);
  1616. ahc_restart(ahc);
  1617. } else if ((status & BUSFREE) != 0
  1618. && (ahc_inb(ahc, SIMODE1) & ENBUSFREE) != 0) {
  1619. struct ahc_devinfo devinfo;
  1620. u_int lastphase;
  1621. u_int saved_scsiid;
  1622. u_int saved_lun;
  1623. u_int target;
  1624. u_int initiator_role_id;
  1625. char channel;
  1626. int printerror;
  1627. /*
  1628. * Clear our selection hardware as soon as possible.
  1629. * We may have an entry in the waiting Q for this target,
  1630. * that is affected by this busfree and we don't want to
  1631. * go about selecting the target while we handle the event.
  1632. */
  1633. ahc_outb(ahc, SCSISEQ,
  1634. ahc_inb(ahc, SCSISEQ) & (ENSELI|ENRSELI|ENAUTOATNP));
  1635. /*
  1636. * Disable busfree interrupts and clear the busfree
  1637. * interrupt status. We do this here so that several
  1638. * bus transactions occur prior to clearing the SCSIINT
  1639. * latch. It can take a bit for the clearing to take effect.
  1640. */
  1641. ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENBUSFREE);
  1642. ahc_outb(ahc, CLRSINT1, CLRBUSFREE|CLRSCSIPERR);
  1643. /*
  1644. * Look at what phase we were last in.
  1645. * If its message out, chances are pretty good
  1646. * that the busfree was in response to one of
  1647. * our abort requests.
  1648. */
  1649. lastphase = ahc_inb(ahc, LASTPHASE);
  1650. saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
  1651. saved_lun = ahc_inb(ahc, SAVED_LUN);
  1652. target = SCSIID_TARGET(ahc, saved_scsiid);
  1653. initiator_role_id = SCSIID_OUR_ID(saved_scsiid);
  1654. channel = SCSIID_CHANNEL(ahc, saved_scsiid);
  1655. ahc_compile_devinfo(&devinfo, initiator_role_id,
  1656. target, saved_lun, channel, ROLE_INITIATOR);
  1657. printerror = 1;
  1658. if (lastphase == P_MESGOUT) {
  1659. u_int tag;
  1660. tag = SCB_LIST_NULL;
  1661. if (ahc_sent_msg(ahc, AHCMSG_1B, ABORT_TASK, TRUE)
  1662. || ahc_sent_msg(ahc, AHCMSG_1B, ABORT_TASK_SET, TRUE)) {
  1663. if (ahc->msgout_buf[ahc->msgout_index - 1]
  1664. == ABORT_TASK)
  1665. tag = scb->hscb->tag;
  1666. ahc_print_path(ahc, scb);
  1667. printk("SCB %d - Abort%s Completed.\n",
  1668. scb->hscb->tag, tag == SCB_LIST_NULL ?
  1669. "" : " Tag");
  1670. ahc_abort_scbs(ahc, target, channel,
  1671. saved_lun, tag,
  1672. ROLE_INITIATOR,
  1673. CAM_REQ_ABORTED);
  1674. printerror = 0;
  1675. } else if (ahc_sent_msg(ahc, AHCMSG_1B,
  1676. TARGET_RESET, TRUE)) {
  1677. ahc_compile_devinfo(&devinfo,
  1678. initiator_role_id,
  1679. target,
  1680. CAM_LUN_WILDCARD,
  1681. channel,
  1682. ROLE_INITIATOR);
  1683. ahc_handle_devreset(ahc, &devinfo,
  1684. CAM_BDR_SENT,
  1685. "Bus Device Reset",
  1686. /*verbose_level*/0);
  1687. printerror = 0;
  1688. } else if (ahc_sent_msg(ahc, AHCMSG_EXT,
  1689. EXTENDED_PPR, FALSE)) {
  1690. struct ahc_initiator_tinfo *tinfo;
  1691. struct ahc_tmode_tstate *tstate;
  1692. /*
  1693. * PPR Rejected. Try non-ppr negotiation
  1694. * and retry command.
  1695. */
  1696. tinfo = ahc_fetch_transinfo(ahc,
  1697. devinfo.channel,
  1698. devinfo.our_scsiid,
  1699. devinfo.target,
  1700. &tstate);
  1701. tinfo->curr.transport_version = 2;
  1702. tinfo->goal.transport_version = 2;
  1703. tinfo->goal.ppr_options = 0;
  1704. ahc_qinfifo_requeue_tail(ahc, scb);
  1705. printerror = 0;
  1706. } else if (ahc_sent_msg(ahc, AHCMSG_EXT,
  1707. EXTENDED_WDTR, FALSE)) {
  1708. /*
  1709. * Negotiation Rejected. Go-narrow and
  1710. * retry command.
  1711. */
  1712. ahc_set_width(ahc, &devinfo,
  1713. MSG_EXT_WDTR_BUS_8_BIT,
  1714. AHC_TRANS_CUR|AHC_TRANS_GOAL,
  1715. /*paused*/TRUE);
  1716. ahc_qinfifo_requeue_tail(ahc, scb);
  1717. printerror = 0;
  1718. } else if (ahc_sent_msg(ahc, AHCMSG_EXT,
  1719. EXTENDED_SDTR, FALSE)) {
  1720. /*
  1721. * Negotiation Rejected. Go-async and
  1722. * retry command.
  1723. */
  1724. ahc_set_syncrate(ahc, &devinfo,
  1725. /*syncrate*/NULL,
  1726. /*period*/0, /*offset*/0,
  1727. /*ppr_options*/0,
  1728. AHC_TRANS_CUR|AHC_TRANS_GOAL,
  1729. /*paused*/TRUE);
  1730. ahc_qinfifo_requeue_tail(ahc, scb);
  1731. printerror = 0;
  1732. }
  1733. }
  1734. if (printerror != 0) {
  1735. u_int i;
  1736. if (scb != NULL) {
  1737. u_int tag;
  1738. if ((scb->hscb->control & TAG_ENB) != 0)
  1739. tag = scb->hscb->tag;
  1740. else
  1741. tag = SCB_LIST_NULL;
  1742. ahc_print_path(ahc, scb);
  1743. ahc_abort_scbs(ahc, target, channel,
  1744. SCB_GET_LUN(scb), tag,
  1745. ROLE_INITIATOR,
  1746. CAM_UNEXP_BUSFREE);
  1747. } else {
  1748. /*
  1749. * We had not fully identified this connection,
  1750. * so we cannot abort anything.
  1751. */
  1752. printk("%s: ", ahc_name(ahc));
  1753. }
  1754. for (i = 0; i < num_phases; i++) {
  1755. if (lastphase == ahc_phase_table[i].phase)
  1756. break;
  1757. }
  1758. if (lastphase != P_BUSFREE) {
  1759. /*
  1760. * Renegotiate with this device at the
  1761. * next opportunity just in case this busfree
  1762. * is due to a negotiation mismatch with the
  1763. * device.
  1764. */
  1765. ahc_force_renegotiation(ahc, &devinfo);
  1766. }
  1767. printk("Unexpected busfree %s\n"
  1768. "SEQADDR == 0x%x\n",
  1769. ahc_phase_table[i].phasemsg,
  1770. ahc_inb(ahc, SEQADDR0)
  1771. | (ahc_inb(ahc, SEQADDR1) << 8));
  1772. }
  1773. ahc_outb(ahc, CLRINT, CLRSCSIINT);
  1774. ahc_restart(ahc);
  1775. } else {
  1776. printk("%s: Missing case in ahc_handle_scsiint. status = %x\n",
  1777. ahc_name(ahc), status);
  1778. ahc_outb(ahc, CLRINT, CLRSCSIINT);
  1779. }
  1780. }
  1781. /*
  1782. * Force renegotiation to occur the next time we initiate
  1783. * a command to the current device.
  1784. */
  1785. static void
  1786. ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
  1787. {
  1788. struct ahc_initiator_tinfo *targ_info;
  1789. struct ahc_tmode_tstate *tstate;
  1790. targ_info = ahc_fetch_transinfo(ahc,
  1791. devinfo->channel,
  1792. devinfo->our_scsiid,
  1793. devinfo->target,
  1794. &tstate);
  1795. ahc_update_neg_request(ahc, devinfo, tstate,
  1796. targ_info, AHC_NEG_IF_NON_ASYNC);
  1797. }
  1798. #define AHC_MAX_STEPS 2000
  1799. static void
  1800. ahc_clear_critical_section(struct ahc_softc *ahc)
  1801. {
  1802. int stepping;
  1803. int steps;
  1804. u_int simode0;
  1805. u_int simode1;
  1806. if (ahc->num_critical_sections == 0)
  1807. return;
  1808. stepping = FALSE;
  1809. steps = 0;
  1810. simode0 = 0;
  1811. simode1 = 0;
  1812. for (;;) {
  1813. struct cs *cs;
  1814. u_int seqaddr;
  1815. u_int i;
  1816. seqaddr = ahc_inb(ahc, SEQADDR0)
  1817. | (ahc_inb(ahc, SEQADDR1) << 8);
  1818. /*
  1819. * Seqaddr represents the next instruction to execute,
  1820. * so we are really executing the instruction just
  1821. * before it.
  1822. */
  1823. if (seqaddr != 0)
  1824. seqaddr -= 1;
  1825. cs = ahc->critical_sections;
  1826. for (i = 0; i < ahc->num_critical_sections; i++, cs++) {
  1827. if (cs->begin < seqaddr && cs->end >= seqaddr)
  1828. break;
  1829. }
  1830. if (i == ahc->num_critical_sections)
  1831. break;
  1832. if (steps > AHC_MAX_STEPS) {
  1833. printk("%s: Infinite loop in critical section\n",
  1834. ahc_name(ahc));
  1835. ahc_dump_card_state(ahc);
  1836. panic("critical section loop");
  1837. }
  1838. steps++;
  1839. if (stepping == FALSE) {
  1840. /*
  1841. * Disable all interrupt sources so that the
  1842. * sequencer will not be stuck by a pausing
  1843. * interrupt condition while we attempt to
  1844. * leave a critical section.
  1845. */
  1846. simode0 = ahc_inb(ahc, SIMODE0);
  1847. ahc_outb(ahc, SIMODE0, 0);
  1848. simode1 = ahc_inb(ahc, SIMODE1);
  1849. if ((ahc->features & AHC_DT) != 0)
  1850. /*
  1851. * On DT class controllers, we
  1852. * use the enhanced busfree logic.
  1853. * Unfortunately we cannot re-enable
  1854. * busfree detection within the
  1855. * current connection, so we must
  1856. * leave it on while single stepping.
  1857. */
  1858. ahc_outb(ahc, SIMODE1, simode1 & ENBUSFREE);
  1859. else
  1860. ahc_outb(ahc, SIMODE1, 0);
  1861. ahc_outb(ahc, CLRINT, CLRSCSIINT);
  1862. ahc_outb(ahc, SEQCTL, ahc->seqctl | STEP);
  1863. stepping = TRUE;
  1864. }
  1865. if ((ahc->features & AHC_DT) != 0) {
  1866. ahc_outb(ahc, CLRSINT1, CLRBUSFREE);
  1867. ahc_outb(ahc, CLRINT, CLRSCSIINT);
  1868. }
  1869. ahc_outb(ahc, HCNTRL, ahc->unpause);
  1870. while (!ahc_is_paused(ahc))
  1871. ahc_delay(200);
  1872. }
  1873. if (stepping) {
  1874. ahc_outb(ahc, SIMODE0, simode0);
  1875. ahc_outb(ahc, SIMODE1, simode1);
  1876. ahc_outb(ahc, SEQCTL, ahc->seqctl);
  1877. }
  1878. }
  1879. /*
  1880. * Clear any pending interrupt status.
  1881. */
  1882. static void
  1883. ahc_clear_intstat(struct ahc_softc *ahc)
  1884. {
  1885. /* Clear any interrupt conditions this may have caused */
  1886. ahc_outb(ahc, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI
  1887. |CLRBUSFREE|CLRSCSIPERR|CLRPHASECHG|
  1888. CLRREQINIT);
  1889. ahc_flush_device_writes(ahc);
  1890. ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO);
  1891. ahc_flush_device_writes(ahc);
  1892. ahc_outb(ahc, CLRINT, CLRSCSIINT);
  1893. ahc_flush_device_writes(ahc);
  1894. }
  1895. /**************************** Debugging Routines ******************************/
  1896. #ifdef AHC_DEBUG
  1897. uint32_t ahc_debug = AHC_DEBUG_OPTS;
  1898. #endif
  1899. #if 0 /* unused */
  1900. static void
  1901. ahc_print_scb(struct scb *scb)
  1902. {
  1903. int i;
  1904. struct hardware_scb *hscb = scb->hscb;
  1905. printk("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n",
  1906. (void *)scb,
  1907. hscb->control,
  1908. hscb->scsiid,
  1909. hscb->lun,
  1910. hscb->cdb_len);
  1911. printk("Shared Data: ");
  1912. for (i = 0; i < sizeof(hscb->shared_data.cdb); i++)
  1913. printk("%#02x", hscb->shared_data.cdb[i]);
  1914. printk(" dataptr:%#x datacnt:%#x sgptr:%#x tag:%#x\n",
  1915. ahc_le32toh(hscb->dataptr),
  1916. ahc_le32toh(hscb->datacnt),
  1917. ahc_le32toh(hscb->sgptr),
  1918. hscb->tag);
  1919. if (scb->sg_count > 0) {
  1920. for (i = 0; i < scb->sg_count; i++) {
  1921. printk("sg[%d] - Addr 0x%x%x : Length %d\n",
  1922. i,
  1923. (ahc_le32toh(scb->sg_list[i].len) >> 24
  1924. & SG_HIGH_ADDR_BITS),
  1925. ahc_le32toh(scb->sg_list[i].addr),
  1926. ahc_le32toh(scb->sg_list[i].len));
  1927. }
  1928. }
  1929. }
  1930. #endif
  1931. /************************* Transfer Negotiation *******************************/
  1932. /*
  1933. * Allocate per target mode instance (ID we respond to as a target)
  1934. * transfer negotiation data structures.
  1935. */
  1936. static struct ahc_tmode_tstate *
  1937. ahc_alloc_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel)
  1938. {
  1939. struct ahc_tmode_tstate *master_tstate;
  1940. struct ahc_tmode_tstate *tstate;
  1941. int i;
  1942. master_tstate = ahc->enabled_targets[ahc->our_id];
  1943. if (channel == 'B') {
  1944. scsi_id += 8;
  1945. master_tstate = ahc->enabled_targets[ahc->our_id_b + 8];
  1946. }
  1947. if (ahc->enabled_targets[scsi_id] != NULL
  1948. && ahc->enabled_targets[scsi_id] != master_tstate)
  1949. panic("%s: ahc_alloc_tstate - Target already allocated",
  1950. ahc_name(ahc));
  1951. tstate = kmalloc(sizeof(*tstate), GFP_ATOMIC);
  1952. if (tstate == NULL)
  1953. return (NULL);
  1954. /*
  1955. * If we have allocated a master tstate, copy user settings from
  1956. * the master tstate (taken from SRAM or the EEPROM) for this
  1957. * channel, but reset our current and goal settings to async/narrow
  1958. * until an initiator talks to us.
  1959. */
  1960. if (master_tstate != NULL) {
  1961. memcpy(tstate, master_tstate, sizeof(*tstate));
  1962. memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns));
  1963. tstate->ultraenb = 0;
  1964. for (i = 0; i < AHC_NUM_TARGETS; i++) {
  1965. memset(&tstate->transinfo[i].curr, 0,
  1966. sizeof(tstate->transinfo[i].curr));
  1967. memset(&tstate->transinfo[i].goal, 0,
  1968. sizeof(tstate->transinfo[i].goal));
  1969. }
  1970. } else
  1971. memset(tstate, 0, sizeof(*tstate));
  1972. ahc->enabled_targets[scsi_id] = tstate;
  1973. return (tstate);
  1974. }
  1975. #ifdef AHC_TARGET_MODE
  1976. /*
  1977. * Free per target mode instance (ID we respond to as a target)
  1978. * transfer negotiation data structures.
  1979. */
  1980. static void
  1981. ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
  1982. {
  1983. struct ahc_tmode_tstate *tstate;
  1984. /*
  1985. * Don't clean up our "master" tstate.
  1986. * It has our default user settings.
  1987. */
  1988. if (((channel == 'B' && scsi_id == ahc->our_id_b)
  1989. || (channel == 'A' && scsi_id == ahc->our_id))
  1990. && force == FALSE)
  1991. return;
  1992. if (channel == 'B')
  1993. scsi_id += 8;
  1994. tstate = ahc->enabled_targets[scsi_id];
  1995. kfree(tstate);
  1996. ahc->enabled_targets[scsi_id] = NULL;
  1997. }
  1998. #endif
  1999. /*
  2000. * Called when we have an active connection to a target on the bus,
  2001. * this function finds the nearest syncrate to the input period limited
  2002. * by the capabilities of the bus connectivity of and sync settings for
  2003. * the target.
  2004. */
  2005. static const struct ahc_syncrate *
  2006. ahc_devlimited_syncrate(struct ahc_softc *ahc,
  2007. struct ahc_initiator_tinfo *tinfo,
  2008. u_int *period, u_int *ppr_options, role_t role)
  2009. {
  2010. struct ahc_transinfo *transinfo;
  2011. u_int maxsync;
  2012. if ((ahc->features & AHC_ULTRA2) != 0) {
  2013. if ((ahc_inb(ahc, SBLKCTL) & ENAB40) != 0
  2014. && (ahc_inb(ahc, SSTAT2) & EXP_ACTIVE) == 0) {
  2015. maxsync = AHC_SYNCRATE_DT;
  2016. } else {
  2017. maxsync = AHC_SYNCRATE_ULTRA;
  2018. /* Can't do DT on an SE bus */
  2019. *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
  2020. }
  2021. } else if ((ahc->features & AHC_ULTRA) != 0) {
  2022. maxsync = AHC_SYNCRATE_ULTRA;
  2023. } else {
  2024. maxsync = AHC_SYNCRATE_FAST;
  2025. }
  2026. /*
  2027. * Never allow a value higher than our current goal
  2028. * period otherwise we may allow a target initiated
  2029. * negotiation to go above the limit as set by the
  2030. * user. In the case of an initiator initiated
  2031. * sync negotiation, we limit based on the user
  2032. * setting. This allows the system to still accept
  2033. * incoming negotiations even if target initiated
  2034. * negotiation is not performed.
  2035. */
  2036. if (role == ROLE_TARGET)
  2037. transinfo = &tinfo->user;
  2038. else
  2039. transinfo = &tinfo->goal;
  2040. *ppr_options &= transinfo->ppr_options;
  2041. if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) {
  2042. maxsync = max(maxsync, (u_int)AHC_SYNCRATE_ULTRA2);
  2043. *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
  2044. }
  2045. if (transinfo->period == 0) {
  2046. *period = 0;
  2047. *ppr_options = 0;
  2048. return (NULL);
  2049. }
  2050. *period = max(*period, (u_int)transinfo->period);
  2051. return (ahc_find_syncrate(ahc, period, ppr_options, maxsync));
  2052. }
  2053. /*
  2054. * Look up the valid period to SCSIRATE conversion in our table.
  2055. * Return the period and offset that should be sent to the target
  2056. * if this was the beginning of an SDTR.
  2057. */
  2058. const struct ahc_syncrate *
  2059. ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
  2060. u_int *ppr_options, u_int maxsync)
  2061. {
  2062. const struct ahc_syncrate *syncrate;
  2063. if ((ahc->features & AHC_DT) == 0)
  2064. *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
  2065. /* Skip all DT only entries if DT is not available */
  2066. if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0
  2067. && maxsync < AHC_SYNCRATE_ULTRA2)
  2068. maxsync = AHC_SYNCRATE_ULTRA2;
  2069. /* Now set the maxsync based on the card capabilities
  2070. * DT is already done above */
  2071. if ((ahc->features & (AHC_DT | AHC_ULTRA2)) == 0
  2072. && maxsync < AHC_SYNCRATE_ULTRA)
  2073. maxsync = AHC_SYNCRATE_ULTRA;
  2074. if ((ahc->features & (AHC_DT | AHC_ULTRA2 | AHC_ULTRA)) == 0
  2075. && maxsync < AHC_SYNCRATE_FAST)
  2076. maxsync = AHC_SYNCRATE_FAST;
  2077. for (syncrate = &ahc_syncrates[maxsync];
  2078. syncrate->rate != NULL;
  2079. syncrate++) {
  2080. /*
  2081. * The Ultra2 table doesn't go as low
  2082. * as for the Fast/Ultra cards.
  2083. */
  2084. if ((ahc->features & AHC_ULTRA2) != 0
  2085. && (syncrate->sxfr_u2 == 0))
  2086. break;
  2087. if (*period <= syncrate->period) {
  2088. /*
  2089. * When responding to a target that requests
  2090. * sync, the requested rate may fall between
  2091. * two rates that we can output, but still be
  2092. * a rate that we can receive. Because of this,
  2093. * we want to respond to the target with
  2094. * the same rate that it sent to us even
  2095. * if the period we use to send data to it
  2096. * is lower. Only lower the response period
  2097. * if we must.
  2098. */
  2099. if (syncrate == &ahc_syncrates[maxsync])
  2100. *period = syncrate->period;
  2101. /*
  2102. * At some speeds, we only support
  2103. * ST transfers.
  2104. */
  2105. if ((syncrate->sxfr_u2 & ST_SXFR) != 0)
  2106. *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
  2107. break;
  2108. }
  2109. }
  2110. if ((*period == 0)
  2111. || (syncrate->rate == NULL)
  2112. || ((ahc->features & AHC_ULTRA2) != 0
  2113. && (syncrate->sxfr_u2 == 0))) {
  2114. /* Use asynchronous transfers. */
  2115. *period = 0;
  2116. syncrate = NULL;
  2117. *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
  2118. }
  2119. return (syncrate);
  2120. }
  2121. /*
  2122. * Convert from an entry in our syncrate table to the SCSI equivalent
  2123. * sync "period" factor.
  2124. */
  2125. u_int
  2126. ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync)
  2127. {
  2128. const struct ahc_syncrate *syncrate;
  2129. if ((ahc->features & AHC_ULTRA2) != 0)
  2130. scsirate &= SXFR_ULTRA2;
  2131. else
  2132. scsirate &= SXFR;
  2133. /* now set maxsync based on card capabilities */
  2134. if ((ahc->features & AHC_DT) == 0 && maxsync < AHC_SYNCRATE_ULTRA2)
  2135. maxsync = AHC_SYNCRATE_ULTRA2;
  2136. if ((ahc->features & (AHC_DT | AHC_ULTRA2)) == 0
  2137. && maxsync < AHC_SYNCRATE_ULTRA)
  2138. maxsync = AHC_SYNCRATE_ULTRA;
  2139. if ((ahc->features & (AHC_DT | AHC_ULTRA2 | AHC_ULTRA)) == 0
  2140. && maxsync < AHC_SYNCRATE_FAST)
  2141. maxsync = AHC_SYNCRATE_FAST;
  2142. syncrate = &ahc_syncrates[maxsync];
  2143. while (syncrate->rate != NULL) {
  2144. if ((ahc->features & AHC_ULTRA2) != 0) {
  2145. if (syncrate->sxfr_u2 == 0)
  2146. break;
  2147. else if (scsirate == (syncrate->sxfr_u2 & SXFR_ULTRA2))
  2148. return (syncrate->period);
  2149. } else if (scsirate == (syncrate->sxfr & SXFR)) {
  2150. return (syncrate->period);
  2151. }
  2152. syncrate++;
  2153. }
  2154. return (0); /* async */
  2155. }
  2156. /*
  2157. * Truncate the given synchronous offset to a value the
  2158. * current adapter type and syncrate are capable of.
  2159. */
  2160. static void
  2161. ahc_validate_offset(struct ahc_softc *ahc,
  2162. struct ahc_initiator_tinfo *tinfo,
  2163. const struct ahc_syncrate *syncrate,
  2164. u_int *offset, int wide, role_t role)
  2165. {
  2166. u_int maxoffset;
  2167. /* Limit offset to what we can do */
  2168. if (syncrate == NULL) {
  2169. maxoffset = 0;
  2170. } else if ((ahc->features & AHC_ULTRA2) != 0) {
  2171. maxoffset = MAX_OFFSET_ULTRA2;
  2172. } else {
  2173. if (wide)
  2174. maxoffset = MAX_OFFSET_16BIT;
  2175. else
  2176. maxoffset = MAX_OFFSET_8BIT;
  2177. }
  2178. *offset = min(*offset, maxoffset);
  2179. if (tinfo != NULL) {
  2180. if (role == ROLE_TARGET)
  2181. *offset = min(*offset, (u_int)tinfo->user.offset);
  2182. else
  2183. *offset = min(*offset, (u_int)tinfo->goal.offset);
  2184. }
  2185. }
  2186. /*
  2187. * Truncate the given transfer width parameter to a value the
  2188. * current adapter type is capable of.
  2189. */
  2190. static void
  2191. ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo,
  2192. u_int *bus_width, role_t role)
  2193. {
  2194. switch (*bus_width) {
  2195. default:
  2196. if (ahc->features & AHC_WIDE) {
  2197. /* Respond Wide */
  2198. *bus_width = MSG_EXT_WDTR_BUS_16_BIT;
  2199. break;
  2200. }
  2201. fallthrough;
  2202. case MSG_EXT_WDTR_BUS_8_BIT:
  2203. *bus_width = MSG_EXT_WDTR_BUS_8_BIT;
  2204. break;
  2205. }
  2206. if (tinfo != NULL) {
  2207. if (role == ROLE_TARGET)
  2208. *bus_width = min((u_int)tinfo->user.width, *bus_width);
  2209. else
  2210. *bus_width = min((u_int)tinfo->goal.width, *bus_width);
  2211. }
  2212. }
  2213. /*
  2214. * Update the bitmask of targets for which the controller should
  2215. * negotiate with at the next convenient opportunity. This currently
  2216. * means the next time we send the initial identify messages for
  2217. * a new transaction.
  2218. */
  2219. int
  2220. ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
  2221. struct ahc_tmode_tstate *tstate,
  2222. struct ahc_initiator_tinfo *tinfo, ahc_neg_type neg_type)
  2223. {
  2224. u_int auto_negotiate_orig;
  2225. auto_negotiate_orig = tstate->auto_negotiate;
  2226. if (neg_type == AHC_NEG_ALWAYS) {
  2227. /*
  2228. * Force our "current" settings to be
  2229. * unknown so that unless a bus reset
  2230. * occurs the need to renegotiate is
  2231. * recorded persistently.
  2232. */
  2233. if ((ahc->features & AHC_WIDE) != 0)
  2234. tinfo->curr.width = AHC_WIDTH_UNKNOWN;
  2235. tinfo->curr.period = AHC_PERIOD_UNKNOWN;
  2236. tinfo->curr.offset = AHC_OFFSET_UNKNOWN;
  2237. }
  2238. if (tinfo->curr.period != tinfo->goal.period
  2239. || tinfo->curr.width != tinfo->goal.width
  2240. || tinfo->curr.offset != tinfo->goal.offset
  2241. || tinfo->curr.ppr_options != tinfo->goal.ppr_options
  2242. || (neg_type == AHC_NEG_IF_NON_ASYNC
  2243. && (tinfo->goal.offset != 0
  2244. || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT
  2245. || tinfo->goal.ppr_options != 0)))
  2246. tstate->auto_negotiate |= devinfo->target_mask;
  2247. else
  2248. tstate->auto_negotiate &= ~devinfo->target_mask;
  2249. return (auto_negotiate_orig != tstate->auto_negotiate);
  2250. }
  2251. /*
  2252. * Update the user/goal/curr tables of synchronous negotiation
  2253. * parameters as well as, in the case of a current or active update,
  2254. * any data structures on the host controller. In the case of an
  2255. * active update, the specified target is currently talking to us on
  2256. * the bus, so the transfer parameter update must take effect
  2257. * immediately.
  2258. */
  2259. void
  2260. ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
  2261. const struct ahc_syncrate *syncrate, u_int period,
  2262. u_int offset, u_int ppr_options, u_int type, int paused)
  2263. {
  2264. struct ahc_initiator_tinfo *tinfo;
  2265. struct ahc_tmode_tstate *tstate;
  2266. u_int old_period;
  2267. u_int old_offset;
  2268. u_int old_ppr;
  2269. int active;
  2270. int update_needed;
  2271. active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
  2272. update_needed = 0;
  2273. if (syncrate == NULL) {
  2274. period = 0;
  2275. offset = 0;
  2276. }
  2277. tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
  2278. devinfo->target, &tstate);
  2279. if ((type & AHC_TRANS_USER) != 0) {
  2280. tinfo->user.period = period;
  2281. tinfo->user.offset = offset;
  2282. tinfo->user.ppr_options = ppr_options;
  2283. }
  2284. if ((type & AHC_TRANS_GOAL) != 0) {
  2285. tinfo->goal.period = period;
  2286. tinfo->goal.offset = offset;
  2287. tinfo->goal.ppr_options = ppr_options;
  2288. }
  2289. old_period = tinfo->curr.period;
  2290. old_offset = tinfo->curr.offset;
  2291. old_ppr = tinfo->curr.ppr_options;
  2292. if ((type & AHC_TRANS_CUR) != 0
  2293. && (old_period != period
  2294. || old_offset != offset
  2295. || old_ppr != ppr_options)) {
  2296. u_int scsirate;
  2297. update_needed++;
  2298. scsirate = tinfo->scsirate;
  2299. if ((ahc->features & AHC_ULTRA2) != 0) {
  2300. scsirate &= ~(SXFR_ULTRA2|SINGLE_EDGE|ENABLE_CRC);
  2301. if (syncrate != NULL) {
  2302. scsirate |= syncrate->sxfr_u2;
  2303. if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0)
  2304. scsirate |= ENABLE_CRC;
  2305. else
  2306. scsirate |= SINGLE_EDGE;
  2307. }
  2308. } else {
  2309. scsirate &= ~(SXFR|SOFS);
  2310. /*
  2311. * Ensure Ultra mode is set properly for
  2312. * this target.
  2313. */
  2314. tstate->ultraenb &= ~devinfo->target_mask;
  2315. if (syncrate != NULL) {
  2316. if (syncrate->sxfr & ULTRA_SXFR) {
  2317. tstate->ultraenb |=
  2318. devinfo->target_mask;
  2319. }
  2320. scsirate |= syncrate->sxfr & SXFR;
  2321. scsirate |= offset & SOFS;
  2322. }
  2323. if (active) {
  2324. u_int sxfrctl0;
  2325. sxfrctl0 = ahc_inb(ahc, SXFRCTL0);
  2326. sxfrctl0 &= ~FAST20;
  2327. if (tstate->ultraenb & devinfo->target_mask)
  2328. sxfrctl0 |= FAST20;
  2329. ahc_outb(ahc, SXFRCTL0, sxfrctl0);
  2330. }
  2331. }
  2332. if (active) {
  2333. ahc_outb(ahc, SCSIRATE, scsirate);
  2334. if ((ahc->features & AHC_ULTRA2) != 0)
  2335. ahc_outb(ahc, SCSIOFFSET, offset);
  2336. }
  2337. tinfo->scsirate = scsirate;
  2338. tinfo->curr.period = period;
  2339. tinfo->curr.offset = offset;
  2340. tinfo->curr.ppr_options = ppr_options;
  2341. ahc_send_async(ahc, devinfo->channel, devinfo->target,
  2342. CAM_LUN_WILDCARD, AC_TRANSFER_NEG);
  2343. if (bootverbose) {
  2344. if (offset != 0) {
  2345. printk("%s: target %d synchronous at %sMHz%s, "
  2346. "offset = 0x%x\n", ahc_name(ahc),
  2347. devinfo->target, syncrate->rate,
  2348. (ppr_options & MSG_EXT_PPR_DT_REQ)
  2349. ? " DT" : "", offset);
  2350. } else {
  2351. printk("%s: target %d using "
  2352. "asynchronous transfers\n",
  2353. ahc_name(ahc), devinfo->target);
  2354. }
  2355. }
  2356. }
  2357. update_needed += ahc_update_neg_request(ahc, devinfo, tstate,
  2358. tinfo, AHC_NEG_TO_GOAL);
  2359. if (update_needed)
  2360. ahc_update_pending_scbs(ahc);
  2361. }
  2362. /*
  2363. * Update the user/goal/curr tables of wide negotiation
  2364. * parameters as well as, in the case of a current or active update,
  2365. * any data structures on the host controller. In the case of an
  2366. * active update, the specified target is currently talking to us on
  2367. * the bus, so the transfer parameter update must take effect
  2368. * immediately.
  2369. */
  2370. void
  2371. ahc_set_width(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
  2372. u_int width, u_int type, int paused)
  2373. {
  2374. struct ahc_initiator_tinfo *tinfo;
  2375. struct ahc_tmode_tstate *tstate;
  2376. u_int oldwidth;
  2377. int active;
  2378. int update_needed;
  2379. active = (type & AHC_TRANS_ACTIVE) == AHC_TRANS_ACTIVE;
  2380. update_needed = 0;
  2381. tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
  2382. devinfo->target, &tstate);
  2383. if ((type & AHC_TRANS_USER) != 0)
  2384. tinfo->user.width = width;
  2385. if ((type & AHC_TRANS_GOAL) != 0)
  2386. tinfo->goal.width = width;
  2387. oldwidth = tinfo->curr.width;
  2388. if ((type & AHC_TRANS_CUR) != 0 && oldwidth != width) {
  2389. u_int scsirate;
  2390. update_needed++;
  2391. scsirate = tinfo->scsirate;
  2392. scsirate &= ~WIDEXFER;
  2393. if (width == MSG_EXT_WDTR_BUS_16_BIT)
  2394. scsirate |= WIDEXFER;
  2395. tinfo->scsirate = scsirate;
  2396. if (active)
  2397. ahc_outb(ahc, SCSIRATE, scsirate);
  2398. tinfo->curr.width = width;
  2399. ahc_send_async(ahc, devinfo->channel, devinfo->target,
  2400. CAM_LUN_WILDCARD, AC_TRANSFER_NEG);
  2401. if (bootverbose) {
  2402. printk("%s: target %d using %dbit transfers\n",
  2403. ahc_name(ahc), devinfo->target,
  2404. 8 * (0x01 << width));
  2405. }
  2406. }
  2407. update_needed += ahc_update_neg_request(ahc, devinfo, tstate,
  2408. tinfo, AHC_NEG_TO_GOAL);
  2409. if (update_needed)
  2410. ahc_update_pending_scbs(ahc);
  2411. }
  2412. /*
  2413. * Update the current state of tagged queuing for a given target.
  2414. */
  2415. static void
  2416. ahc_set_tags(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
  2417. struct ahc_devinfo *devinfo, ahc_queue_alg alg)
  2418. {
  2419. struct scsi_device *sdev = cmd->device;
  2420. ahc_platform_set_tags(ahc, sdev, devinfo, alg);
  2421. ahc_send_async(ahc, devinfo->channel, devinfo->target,
  2422. devinfo->lun, AC_TRANSFER_NEG);
  2423. }
  2424. /*
  2425. * When the transfer settings for a connection change, update any
  2426. * in-transit SCBs to contain the new data so the hardware will
  2427. * be set correctly during future (re)selections.
  2428. */
  2429. static void
  2430. ahc_update_pending_scbs(struct ahc_softc *ahc)
  2431. {
  2432. struct scb *pending_scb;
  2433. int pending_scb_count;
  2434. int i;
  2435. int paused;
  2436. u_int saved_scbptr;
  2437. /*
  2438. * Traverse the pending SCB list and ensure that all of the
  2439. * SCBs there have the proper settings.
  2440. */
  2441. pending_scb_count = 0;
  2442. LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
  2443. struct ahc_devinfo devinfo;
  2444. struct hardware_scb *pending_hscb;
  2445. struct ahc_initiator_tinfo *tinfo;
  2446. struct ahc_tmode_tstate *tstate;
  2447. ahc_scb_devinfo(ahc, &devinfo, pending_scb);
  2448. tinfo = ahc_fetch_transinfo(ahc, devinfo.channel,
  2449. devinfo.our_scsiid,
  2450. devinfo.target, &tstate);
  2451. pending_hscb = pending_scb->hscb;
  2452. pending_hscb->control &= ~ULTRAENB;
  2453. if ((tstate->ultraenb & devinfo.target_mask) != 0)
  2454. pending_hscb->control |= ULTRAENB;
  2455. pending_hscb->scsirate = tinfo->scsirate;
  2456. pending_hscb->scsioffset = tinfo->curr.offset;
  2457. if ((tstate->auto_negotiate & devinfo.target_mask) == 0
  2458. && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) {
  2459. pending_scb->flags &= ~SCB_AUTO_NEGOTIATE;
  2460. pending_hscb->control &= ~MK_MESSAGE;
  2461. }
  2462. ahc_sync_scb(ahc, pending_scb,
  2463. BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  2464. pending_scb_count++;
  2465. }
  2466. if (pending_scb_count == 0)
  2467. return;
  2468. if (ahc_is_paused(ahc)) {
  2469. paused = 1;
  2470. } else {
  2471. paused = 0;
  2472. ahc_pause(ahc);
  2473. }
  2474. saved_scbptr = ahc_inb(ahc, SCBPTR);
  2475. /* Ensure that the hscbs down on the card match the new information */
  2476. for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
  2477. struct hardware_scb *pending_hscb;
  2478. u_int control;
  2479. u_int scb_tag;
  2480. ahc_outb(ahc, SCBPTR, i);
  2481. scb_tag = ahc_inb(ahc, SCB_TAG);
  2482. pending_scb = ahc_lookup_scb(ahc, scb_tag);
  2483. if (pending_scb == NULL)
  2484. continue;
  2485. pending_hscb = pending_scb->hscb;
  2486. control = ahc_inb(ahc, SCB_CONTROL);
  2487. control &= ~(ULTRAENB|MK_MESSAGE);
  2488. control |= pending_hscb->control & (ULTRAENB|MK_MESSAGE);
  2489. ahc_outb(ahc, SCB_CONTROL, control);
  2490. ahc_outb(ahc, SCB_SCSIRATE, pending_hscb->scsirate);
  2491. ahc_outb(ahc, SCB_SCSIOFFSET, pending_hscb->scsioffset);
  2492. }
  2493. ahc_outb(ahc, SCBPTR, saved_scbptr);
  2494. if (paused == 0)
  2495. ahc_unpause(ahc);
  2496. }
  2497. /**************************** Pathing Information *****************************/
  2498. static void
  2499. ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
  2500. {
  2501. u_int saved_scsiid;
  2502. role_t role;
  2503. int our_id;
  2504. if (ahc_inb(ahc, SSTAT0) & TARGET)
  2505. role = ROLE_TARGET;
  2506. else
  2507. role = ROLE_INITIATOR;
  2508. if (role == ROLE_TARGET
  2509. && (ahc->features & AHC_MULTI_TID) != 0
  2510. && (ahc_inb(ahc, SEQ_FLAGS)
  2511. & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) {
  2512. /* We were selected, so pull our id from TARGIDIN */
  2513. our_id = ahc_inb(ahc, TARGIDIN) & OID;
  2514. } else if ((ahc->features & AHC_ULTRA2) != 0)
  2515. our_id = ahc_inb(ahc, SCSIID_ULTRA2) & OID;
  2516. else
  2517. our_id = ahc_inb(ahc, SCSIID) & OID;
  2518. saved_scsiid = ahc_inb(ahc, SAVED_SCSIID);
  2519. ahc_compile_devinfo(devinfo,
  2520. our_id,
  2521. SCSIID_TARGET(ahc, saved_scsiid),
  2522. ahc_inb(ahc, SAVED_LUN),
  2523. SCSIID_CHANNEL(ahc, saved_scsiid),
  2524. role);
  2525. }
  2526. static const struct ahc_phase_table_entry*
  2527. ahc_lookup_phase_entry(int phase)
  2528. {
  2529. const struct ahc_phase_table_entry *entry;
  2530. const struct ahc_phase_table_entry *last_entry;
  2531. /*
  2532. * num_phases doesn't include the default entry which
  2533. * will be returned if the phase doesn't match.
  2534. */
  2535. last_entry = &ahc_phase_table[num_phases];
  2536. for (entry = ahc_phase_table; entry < last_entry; entry++) {
  2537. if (phase == entry->phase)
  2538. break;
  2539. }
  2540. return (entry);
  2541. }
  2542. void
  2543. ahc_compile_devinfo(struct ahc_devinfo *devinfo, u_int our_id, u_int target,
  2544. u_int lun, char channel, role_t role)
  2545. {
  2546. devinfo->our_scsiid = our_id;
  2547. devinfo->target = target;
  2548. devinfo->lun = lun;
  2549. devinfo->target_offset = target;
  2550. devinfo->channel = channel;
  2551. devinfo->role = role;
  2552. if (channel == 'B')
  2553. devinfo->target_offset += 8;
  2554. devinfo->target_mask = (0x01 << devinfo->target_offset);
  2555. }
  2556. void
  2557. ahc_print_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
  2558. {
  2559. printk("%s:%c:%d:%d: ", ahc_name(ahc), devinfo->channel,
  2560. devinfo->target, devinfo->lun);
  2561. }
  2562. static void
  2563. ahc_scb_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
  2564. struct scb *scb)
  2565. {
  2566. role_t role;
  2567. int our_id;
  2568. our_id = SCSIID_OUR_ID(scb->hscb->scsiid);
  2569. role = ROLE_INITIATOR;
  2570. if ((scb->flags & SCB_TARGET_SCB) != 0)
  2571. role = ROLE_TARGET;
  2572. ahc_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahc, scb),
  2573. SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahc, scb), role);
  2574. }
  2575. /************************ Message Phase Processing ****************************/
  2576. static void
  2577. ahc_assert_atn(struct ahc_softc *ahc)
  2578. {
  2579. u_int scsisigo;
  2580. scsisigo = ATNO;
  2581. if ((ahc->features & AHC_DT) == 0)
  2582. scsisigo |= ahc_inb(ahc, SCSISIGI);
  2583. ahc_outb(ahc, SCSISIGO, scsisigo);
  2584. }
  2585. /*
  2586. * When an initiator transaction with the MK_MESSAGE flag either reconnects
  2587. * or enters the initial message out phase, we are interrupted. Fill our
  2588. * outgoing message buffer with the appropriate message and beging handing
  2589. * the message phase(s) manually.
  2590. */
  2591. static void
  2592. ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
  2593. struct scb *scb)
  2594. {
  2595. /*
  2596. * To facilitate adding multiple messages together,
  2597. * each routine should increment the index and len
  2598. * variables instead of setting them explicitly.
  2599. */
  2600. ahc->msgout_index = 0;
  2601. ahc->msgout_len = 0;
  2602. if ((scb->flags & SCB_DEVICE_RESET) == 0
  2603. && ahc_inb(ahc, MSG_OUT) == MSG_IDENTIFYFLAG) {
  2604. u_int identify_msg;
  2605. identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb);
  2606. if ((scb->hscb->control & DISCENB) != 0)
  2607. identify_msg |= MSG_IDENTIFY_DISCFLAG;
  2608. ahc->msgout_buf[ahc->msgout_index++] = identify_msg;
  2609. ahc->msgout_len++;
  2610. if ((scb->hscb->control & TAG_ENB) != 0) {
  2611. ahc->msgout_buf[ahc->msgout_index++] =
  2612. scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE);
  2613. ahc->msgout_buf[ahc->msgout_index++] = scb->hscb->tag;
  2614. ahc->msgout_len += 2;
  2615. }
  2616. }
  2617. if (scb->flags & SCB_DEVICE_RESET) {
  2618. ahc->msgout_buf[ahc->msgout_index++] = TARGET_RESET;
  2619. ahc->msgout_len++;
  2620. ahc_print_path(ahc, scb);
  2621. printk("Bus Device Reset Message Sent\n");
  2622. /*
  2623. * Clear our selection hardware in advance of
  2624. * the busfree. We may have an entry in the waiting
  2625. * Q for this target, and we don't want to go about
  2626. * selecting while we handle the busfree and blow it
  2627. * away.
  2628. */
  2629. ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
  2630. } else if ((scb->flags & SCB_ABORT) != 0) {
  2631. if ((scb->hscb->control & TAG_ENB) != 0)
  2632. ahc->msgout_buf[ahc->msgout_index++] = ABORT_TASK;
  2633. else
  2634. ahc->msgout_buf[ahc->msgout_index++] = ABORT_TASK_SET;
  2635. ahc->msgout_len++;
  2636. ahc_print_path(ahc, scb);
  2637. printk("Abort%s Message Sent\n",
  2638. (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : "");
  2639. /*
  2640. * Clear our selection hardware in advance of
  2641. * the busfree. We may have an entry in the waiting
  2642. * Q for this target, and we don't want to go about
  2643. * selecting while we handle the busfree and blow it
  2644. * away.
  2645. */
  2646. ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
  2647. } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) {
  2648. ahc_build_transfer_msg(ahc, devinfo);
  2649. } else {
  2650. printk("ahc_intr: AWAITING_MSG for an SCB that "
  2651. "does not have a waiting message\n");
  2652. printk("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid,
  2653. devinfo->target_mask);
  2654. panic("SCB = %d, SCB Control = %x, MSG_OUT = %x "
  2655. "SCB flags = %x", scb->hscb->tag, scb->hscb->control,
  2656. ahc_inb(ahc, MSG_OUT), scb->flags);
  2657. }
  2658. /*
  2659. * Clear the MK_MESSAGE flag from the SCB so we aren't
  2660. * asked to send this message again.
  2661. */
  2662. ahc_outb(ahc, SCB_CONTROL, ahc_inb(ahc, SCB_CONTROL) & ~MK_MESSAGE);
  2663. scb->hscb->control &= ~MK_MESSAGE;
  2664. ahc->msgout_index = 0;
  2665. ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
  2666. }
  2667. /*
  2668. * Build an appropriate transfer negotiation message for the
  2669. * currently active target.
  2670. */
  2671. static void
  2672. ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
  2673. {
  2674. /*
  2675. * We need to initiate transfer negotiations.
  2676. * If our current and goal settings are identical,
  2677. * we want to renegotiate due to a check condition.
  2678. */
  2679. struct ahc_initiator_tinfo *tinfo;
  2680. struct ahc_tmode_tstate *tstate;
  2681. const struct ahc_syncrate *rate;
  2682. int dowide;
  2683. int dosync;
  2684. int doppr;
  2685. u_int period;
  2686. u_int ppr_options;
  2687. u_int offset;
  2688. tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
  2689. devinfo->target, &tstate);
  2690. /*
  2691. * Filter our period based on the current connection.
  2692. * If we can't perform DT transfers on this segment (not in LVD
  2693. * mode for instance), then our decision to issue a PPR message
  2694. * may change.
  2695. */
  2696. period = tinfo->goal.period;
  2697. offset = tinfo->goal.offset;
  2698. ppr_options = tinfo->goal.ppr_options;
  2699. /* Target initiated PPR is not allowed in the SCSI spec */
  2700. if (devinfo->role == ROLE_TARGET)
  2701. ppr_options = 0;
  2702. rate = ahc_devlimited_syncrate(ahc, tinfo, &period,
  2703. &ppr_options, devinfo->role);
  2704. dowide = tinfo->curr.width != tinfo->goal.width;
  2705. dosync = tinfo->curr.offset != offset || tinfo->curr.period != period;
  2706. /*
  2707. * Only use PPR if we have options that need it, even if the device
  2708. * claims to support it. There might be an expander in the way
  2709. * that doesn't.
  2710. */
  2711. doppr = ppr_options != 0;
  2712. if (!dowide && !dosync && !doppr) {
  2713. dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT;
  2714. dosync = tinfo->goal.offset != 0;
  2715. }
  2716. if (!dowide && !dosync && !doppr) {
  2717. /*
  2718. * Force async with a WDTR message if we have a wide bus,
  2719. * or just issue an SDTR with a 0 offset.
  2720. */
  2721. if ((ahc->features & AHC_WIDE) != 0)
  2722. dowide = 1;
  2723. else
  2724. dosync = 1;
  2725. if (bootverbose) {
  2726. ahc_print_devinfo(ahc, devinfo);
  2727. printk("Ensuring async\n");
  2728. }
  2729. }
  2730. /* Target initiated PPR is not allowed in the SCSI spec */
  2731. if (devinfo->role == ROLE_TARGET)
  2732. doppr = 0;
  2733. /*
  2734. * Both the PPR message and SDTR message require the
  2735. * goal syncrate to be limited to what the target device
  2736. * is capable of handling (based on whether an LVD->SE
  2737. * expander is on the bus), so combine these two cases.
  2738. * Regardless, guarantee that if we are using WDTR and SDTR
  2739. * messages that WDTR comes first.
  2740. */
  2741. if (doppr || (dosync && !dowide)) {
  2742. offset = tinfo->goal.offset;
  2743. ahc_validate_offset(ahc, tinfo, rate, &offset,
  2744. doppr ? tinfo->goal.width
  2745. : tinfo->curr.width,
  2746. devinfo->role);
  2747. if (doppr) {
  2748. ahc_construct_ppr(ahc, devinfo, period, offset,
  2749. tinfo->goal.width, ppr_options);
  2750. } else {
  2751. ahc_construct_sdtr(ahc, devinfo, period, offset);
  2752. }
  2753. } else {
  2754. ahc_construct_wdtr(ahc, devinfo, tinfo->goal.width);
  2755. }
  2756. }
  2757. /*
  2758. * Build a synchronous negotiation message in our message
  2759. * buffer based on the input parameters.
  2760. */
  2761. static void
  2762. ahc_construct_sdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
  2763. u_int period, u_int offset)
  2764. {
  2765. if (offset == 0)
  2766. period = AHC_ASYNC_XFER_PERIOD;
  2767. ahc->msgout_index += spi_populate_sync_msg(
  2768. ahc->msgout_buf + ahc->msgout_index, period, offset);
  2769. ahc->msgout_len += 5;
  2770. if (bootverbose) {
  2771. printk("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n",
  2772. ahc_name(ahc), devinfo->channel, devinfo->target,
  2773. devinfo->lun, period, offset);
  2774. }
  2775. }
  2776. /*
  2777. * Build a wide negotiation message in our message
  2778. * buffer based on the input parameters.
  2779. */
  2780. static void
  2781. ahc_construct_wdtr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
  2782. u_int bus_width)
  2783. {
  2784. ahc->msgout_index += spi_populate_width_msg(
  2785. ahc->msgout_buf + ahc->msgout_index, bus_width);
  2786. ahc->msgout_len += 4;
  2787. if (bootverbose) {
  2788. printk("(%s:%c:%d:%d): Sending WDTR %x\n",
  2789. ahc_name(ahc), devinfo->channel, devinfo->target,
  2790. devinfo->lun, bus_width);
  2791. }
  2792. }
  2793. /*
  2794. * Build a parallel protocol request message in our message
  2795. * buffer based on the input parameters.
  2796. */
  2797. static void
  2798. ahc_construct_ppr(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
  2799. u_int period, u_int offset, u_int bus_width,
  2800. u_int ppr_options)
  2801. {
  2802. if (offset == 0)
  2803. period = AHC_ASYNC_XFER_PERIOD;
  2804. ahc->msgout_index += spi_populate_ppr_msg(
  2805. ahc->msgout_buf + ahc->msgout_index, period, offset,
  2806. bus_width, ppr_options);
  2807. ahc->msgout_len += 8;
  2808. if (bootverbose) {
  2809. printk("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, "
  2810. "offset %x, ppr_options %x\n", ahc_name(ahc),
  2811. devinfo->channel, devinfo->target, devinfo->lun,
  2812. bus_width, period, offset, ppr_options);
  2813. }
  2814. }
  2815. /*
  2816. * Clear any active message state.
  2817. */
  2818. static void
  2819. ahc_clear_msg_state(struct ahc_softc *ahc)
  2820. {
  2821. ahc->msgout_len = 0;
  2822. ahc->msgin_index = 0;
  2823. ahc->msg_type = MSG_TYPE_NONE;
  2824. if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0) {
  2825. /*
  2826. * The target didn't care to respond to our
  2827. * message request, so clear ATN.
  2828. */
  2829. ahc_outb(ahc, CLRSINT1, CLRATNO);
  2830. }
  2831. ahc_outb(ahc, MSG_OUT, NOP);
  2832. ahc_outb(ahc, SEQ_FLAGS2,
  2833. ahc_inb(ahc, SEQ_FLAGS2) & ~TARGET_MSG_PENDING);
  2834. }
  2835. static void
  2836. ahc_handle_proto_violation(struct ahc_softc *ahc)
  2837. {
  2838. struct ahc_devinfo devinfo;
  2839. struct scb *scb;
  2840. u_int scbid;
  2841. u_int seq_flags;
  2842. u_int curphase;
  2843. u_int lastphase;
  2844. int found;
  2845. ahc_fetch_devinfo(ahc, &devinfo);
  2846. scbid = ahc_inb(ahc, SCB_TAG);
  2847. scb = ahc_lookup_scb(ahc, scbid);
  2848. seq_flags = ahc_inb(ahc, SEQ_FLAGS);
  2849. curphase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
  2850. lastphase = ahc_inb(ahc, LASTPHASE);
  2851. if ((seq_flags & NOT_IDENTIFIED) != 0) {
  2852. /*
  2853. * The reconnecting target either did not send an
  2854. * identify message, or did, but we didn't find an SCB
  2855. * to match.
  2856. */
  2857. ahc_print_devinfo(ahc, &devinfo);
  2858. printk("Target did not send an IDENTIFY message. "
  2859. "LASTPHASE = 0x%x.\n", lastphase);
  2860. scb = NULL;
  2861. } else if (scb == NULL) {
  2862. /*
  2863. * We don't seem to have an SCB active for this
  2864. * transaction. Print an error and reset the bus.
  2865. */
  2866. ahc_print_devinfo(ahc, &devinfo);
  2867. printk("No SCB found during protocol violation\n");
  2868. goto proto_violation_reset;
  2869. } else {
  2870. ahc_set_transaction_status(scb, CAM_SEQUENCE_FAIL);
  2871. if ((seq_flags & NO_CDB_SENT) != 0) {
  2872. ahc_print_path(ahc, scb);
  2873. printk("No or incomplete CDB sent to device.\n");
  2874. } else if ((ahc_inb(ahc, SCB_CONTROL) & STATUS_RCVD) == 0) {
  2875. /*
  2876. * The target never bothered to provide status to
  2877. * us prior to completing the command. Since we don't
  2878. * know the disposition of this command, we must attempt
  2879. * to abort it. Assert ATN and prepare to send an abort
  2880. * message.
  2881. */
  2882. ahc_print_path(ahc, scb);
  2883. printk("Completed command without status.\n");
  2884. } else {
  2885. ahc_print_path(ahc, scb);
  2886. printk("Unknown protocol violation.\n");
  2887. ahc_dump_card_state(ahc);
  2888. }
  2889. }
  2890. if ((lastphase & ~P_DATAIN_DT) == 0
  2891. || lastphase == P_COMMAND) {
  2892. proto_violation_reset:
  2893. /*
  2894. * Target either went directly to data/command
  2895. * phase or didn't respond to our ATN.
  2896. * The only safe thing to do is to blow
  2897. * it away with a bus reset.
  2898. */
  2899. found = ahc_reset_channel(ahc, 'A', TRUE);
  2900. printk("%s: Issued Channel %c Bus Reset. "
  2901. "%d SCBs aborted\n", ahc_name(ahc), 'A', found);
  2902. } else {
  2903. /*
  2904. * Leave the selection hardware off in case
  2905. * this abort attempt will affect yet to
  2906. * be sent commands.
  2907. */
  2908. ahc_outb(ahc, SCSISEQ,
  2909. ahc_inb(ahc, SCSISEQ) & ~ENSELO);
  2910. ahc_assert_atn(ahc);
  2911. ahc_outb(ahc, MSG_OUT, HOST_MSG);
  2912. if (scb == NULL) {
  2913. ahc_print_devinfo(ahc, &devinfo);
  2914. ahc->msgout_buf[0] = ABORT_TASK;
  2915. ahc->msgout_len = 1;
  2916. ahc->msgout_index = 0;
  2917. ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
  2918. } else {
  2919. ahc_print_path(ahc, scb);
  2920. scb->flags |= SCB_ABORT;
  2921. }
  2922. printk("Protocol violation %s. Attempting to abort.\n",
  2923. ahc_lookup_phase_entry(curphase)->phasemsg);
  2924. }
  2925. }
  2926. /*
  2927. * Manual message loop handler.
  2928. */
  2929. static void
  2930. ahc_handle_message_phase(struct ahc_softc *ahc)
  2931. {
  2932. struct ahc_devinfo devinfo;
  2933. u_int bus_phase;
  2934. int end_session;
  2935. ahc_fetch_devinfo(ahc, &devinfo);
  2936. end_session = FALSE;
  2937. bus_phase = ahc_inb(ahc, SCSISIGI) & PHASE_MASK;
  2938. reswitch:
  2939. switch (ahc->msg_type) {
  2940. case MSG_TYPE_INITIATOR_MSGOUT:
  2941. {
  2942. int lastbyte;
  2943. int phasemis;
  2944. int msgdone;
  2945. if (ahc->msgout_len == 0)
  2946. panic("HOST_MSG_LOOP interrupt with no active message");
  2947. #ifdef AHC_DEBUG
  2948. if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
  2949. ahc_print_devinfo(ahc, &devinfo);
  2950. printk("INITIATOR_MSG_OUT");
  2951. }
  2952. #endif
  2953. phasemis = bus_phase != P_MESGOUT;
  2954. if (phasemis) {
  2955. #ifdef AHC_DEBUG
  2956. if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
  2957. printk(" PHASEMIS %s\n",
  2958. ahc_lookup_phase_entry(bus_phase)
  2959. ->phasemsg);
  2960. }
  2961. #endif
  2962. if (bus_phase == P_MESGIN) {
  2963. /*
  2964. * Change gears and see if
  2965. * this messages is of interest to
  2966. * us or should be passed back to
  2967. * the sequencer.
  2968. */
  2969. ahc_outb(ahc, CLRSINT1, CLRATNO);
  2970. ahc->send_msg_perror = FALSE;
  2971. ahc->msg_type = MSG_TYPE_INITIATOR_MSGIN;
  2972. ahc->msgin_index = 0;
  2973. goto reswitch;
  2974. }
  2975. end_session = TRUE;
  2976. break;
  2977. }
  2978. if (ahc->send_msg_perror) {
  2979. ahc_outb(ahc, CLRSINT1, CLRATNO);
  2980. ahc_outb(ahc, CLRSINT1, CLRREQINIT);
  2981. #ifdef AHC_DEBUG
  2982. if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
  2983. printk(" byte 0x%x\n", ahc->send_msg_perror);
  2984. #endif
  2985. ahc_outb(ahc, SCSIDATL, MSG_PARITY_ERROR);
  2986. break;
  2987. }
  2988. msgdone = ahc->msgout_index == ahc->msgout_len;
  2989. if (msgdone) {
  2990. /*
  2991. * The target has requested a retry.
  2992. * Re-assert ATN, reset our message index to
  2993. * 0, and try again.
  2994. */
  2995. ahc->msgout_index = 0;
  2996. ahc_assert_atn(ahc);
  2997. }
  2998. lastbyte = ahc->msgout_index == (ahc->msgout_len - 1);
  2999. if (lastbyte) {
  3000. /* Last byte is signified by dropping ATN */
  3001. ahc_outb(ahc, CLRSINT1, CLRATNO);
  3002. }
  3003. /*
  3004. * Clear our interrupt status and present
  3005. * the next byte on the bus.
  3006. */
  3007. ahc_outb(ahc, CLRSINT1, CLRREQINIT);
  3008. #ifdef AHC_DEBUG
  3009. if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
  3010. printk(" byte 0x%x\n",
  3011. ahc->msgout_buf[ahc->msgout_index]);
  3012. #endif
  3013. ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
  3014. break;
  3015. }
  3016. case MSG_TYPE_INITIATOR_MSGIN:
  3017. {
  3018. int phasemis;
  3019. int message_done;
  3020. #ifdef AHC_DEBUG
  3021. if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
  3022. ahc_print_devinfo(ahc, &devinfo);
  3023. printk("INITIATOR_MSG_IN");
  3024. }
  3025. #endif
  3026. phasemis = bus_phase != P_MESGIN;
  3027. if (phasemis) {
  3028. #ifdef AHC_DEBUG
  3029. if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
  3030. printk(" PHASEMIS %s\n",
  3031. ahc_lookup_phase_entry(bus_phase)
  3032. ->phasemsg);
  3033. }
  3034. #endif
  3035. ahc->msgin_index = 0;
  3036. if (bus_phase == P_MESGOUT
  3037. && (ahc->send_msg_perror == TRUE
  3038. || (ahc->msgout_len != 0
  3039. && ahc->msgout_index == 0))) {
  3040. ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
  3041. goto reswitch;
  3042. }
  3043. end_session = TRUE;
  3044. break;
  3045. }
  3046. /* Pull the byte in without acking it */
  3047. ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIBUSL);
  3048. #ifdef AHC_DEBUG
  3049. if ((ahc_debug & AHC_SHOW_MESSAGES) != 0)
  3050. printk(" byte 0x%x\n",
  3051. ahc->msgin_buf[ahc->msgin_index]);
  3052. #endif
  3053. message_done = ahc_parse_msg(ahc, &devinfo);
  3054. if (message_done) {
  3055. /*
  3056. * Clear our incoming message buffer in case there
  3057. * is another message following this one.
  3058. */
  3059. ahc->msgin_index = 0;
  3060. /*
  3061. * If this message illicited a response,
  3062. * assert ATN so the target takes us to the
  3063. * message out phase.
  3064. */
  3065. if (ahc->msgout_len != 0) {
  3066. #ifdef AHC_DEBUG
  3067. if ((ahc_debug & AHC_SHOW_MESSAGES) != 0) {
  3068. ahc_print_devinfo(ahc, &devinfo);
  3069. printk("Asserting ATN for response\n");
  3070. }
  3071. #endif
  3072. ahc_assert_atn(ahc);
  3073. }
  3074. } else
  3075. ahc->msgin_index++;
  3076. if (message_done == MSGLOOP_TERMINATED) {
  3077. end_session = TRUE;
  3078. } else {
  3079. /* Ack the byte */
  3080. ahc_outb(ahc, CLRSINT1, CLRREQINIT);
  3081. ahc_inb(ahc, SCSIDATL);
  3082. }
  3083. break;
  3084. }
  3085. case MSG_TYPE_TARGET_MSGIN:
  3086. {
  3087. int msgdone;
  3088. int msgout_request;
  3089. if (ahc->msgout_len == 0)
  3090. panic("Target MSGIN with no active message");
  3091. /*
  3092. * If we interrupted a mesgout session, the initiator
  3093. * will not know this until our first REQ. So, we
  3094. * only honor mesgout requests after we've sent our
  3095. * first byte.
  3096. */
  3097. if ((ahc_inb(ahc, SCSISIGI) & ATNI) != 0
  3098. && ahc->msgout_index > 0)
  3099. msgout_request = TRUE;
  3100. else
  3101. msgout_request = FALSE;
  3102. if (msgout_request) {
  3103. /*
  3104. * Change gears and see if
  3105. * this messages is of interest to
  3106. * us or should be passed back to
  3107. * the sequencer.
  3108. */
  3109. ahc->msg_type = MSG_TYPE_TARGET_MSGOUT;
  3110. ahc_outb(ahc, SCSISIGO, P_MESGOUT | BSYO);
  3111. ahc->msgin_index = 0;
  3112. /* Dummy read to REQ for first byte */
  3113. ahc_inb(ahc, SCSIDATL);
  3114. ahc_outb(ahc, SXFRCTL0,
  3115. ahc_inb(ahc, SXFRCTL0) | SPIOEN);
  3116. break;
  3117. }
  3118. msgdone = ahc->msgout_index == ahc->msgout_len;
  3119. if (msgdone) {
  3120. ahc_outb(ahc, SXFRCTL0,
  3121. ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
  3122. end_session = TRUE;
  3123. break;
  3124. }
  3125. /*
  3126. * Present the next byte on the bus.
  3127. */
  3128. ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) | SPIOEN);
  3129. ahc_outb(ahc, SCSIDATL, ahc->msgout_buf[ahc->msgout_index++]);
  3130. break;
  3131. }
  3132. case MSG_TYPE_TARGET_MSGOUT:
  3133. {
  3134. int lastbyte;
  3135. int msgdone;
  3136. /*
  3137. * The initiator signals that this is
  3138. * the last byte by dropping ATN.
  3139. */
  3140. lastbyte = (ahc_inb(ahc, SCSISIGI) & ATNI) == 0;
  3141. /*
  3142. * Read the latched byte, but turn off SPIOEN first
  3143. * so that we don't inadvertently cause a REQ for the
  3144. * next byte.
  3145. */
  3146. ahc_outb(ahc, SXFRCTL0, ahc_inb(ahc, SXFRCTL0) & ~SPIOEN);
  3147. ahc->msgin_buf[ahc->msgin_index] = ahc_inb(ahc, SCSIDATL);
  3148. msgdone = ahc_parse_msg(ahc, &devinfo);
  3149. if (msgdone == MSGLOOP_TERMINATED) {
  3150. /*
  3151. * The message is *really* done in that it caused
  3152. * us to go to bus free. The sequencer has already
  3153. * been reset at this point, so pull the ejection
  3154. * handle.
  3155. */
  3156. return;
  3157. }
  3158. ahc->msgin_index++;
  3159. /*
  3160. * XXX Read spec about initiator dropping ATN too soon
  3161. * and use msgdone to detect it.
  3162. */
  3163. if (msgdone == MSGLOOP_MSGCOMPLETE) {
  3164. ahc->msgin_index = 0;
  3165. /*
  3166. * If this message illicited a response, transition
  3167. * to the Message in phase and send it.
  3168. */
  3169. if (ahc->msgout_len != 0) {
  3170. ahc_outb(ahc, SCSISIGO, P_MESGIN | BSYO);
  3171. ahc_outb(ahc, SXFRCTL0,
  3172. ahc_inb(ahc, SXFRCTL0) | SPIOEN);
  3173. ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
  3174. ahc->msgin_index = 0;
  3175. break;
  3176. }
  3177. }
  3178. if (lastbyte)
  3179. end_session = TRUE;
  3180. else {
  3181. /* Ask for the next byte. */
  3182. ahc_outb(ahc, SXFRCTL0,
  3183. ahc_inb(ahc, SXFRCTL0) | SPIOEN);
  3184. }
  3185. break;
  3186. }
  3187. default:
  3188. panic("Unknown REQINIT message type");
  3189. }
  3190. if (end_session) {
  3191. ahc_clear_msg_state(ahc);
  3192. ahc_outb(ahc, RETURN_1, EXIT_MSG_LOOP);
  3193. } else
  3194. ahc_outb(ahc, RETURN_1, CONT_MSG_LOOP);
  3195. }
  3196. /*
  3197. * See if we sent a particular extended message to the target.
  3198. * If "full" is true, return true only if the target saw the full
  3199. * message. If "full" is false, return true if the target saw at
  3200. * least the first byte of the message.
  3201. */
  3202. static int
  3203. ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full)
  3204. {
  3205. int found;
  3206. u_int index;
  3207. found = FALSE;
  3208. index = 0;
  3209. while (index < ahc->msgout_len) {
  3210. if (ahc->msgout_buf[index] == EXTENDED_MESSAGE) {
  3211. u_int end_index;
  3212. end_index = index + 1 + ahc->msgout_buf[index + 1];
  3213. if (ahc->msgout_buf[index+2] == msgval
  3214. && type == AHCMSG_EXT) {
  3215. if (full) {
  3216. if (ahc->msgout_index > end_index)
  3217. found = TRUE;
  3218. } else if (ahc->msgout_index > index)
  3219. found = TRUE;
  3220. }
  3221. index = end_index;
  3222. } else if (ahc->msgout_buf[index] >= SIMPLE_QUEUE_TAG
  3223. && ahc->msgout_buf[index] <= IGNORE_WIDE_RESIDUE) {
  3224. /* Skip tag type and tag id or residue param*/
  3225. index += 2;
  3226. } else {
  3227. /* Single byte message */
  3228. if (type == AHCMSG_1B
  3229. && ahc->msgout_buf[index] == msgval
  3230. && ahc->msgout_index > index)
  3231. found = TRUE;
  3232. index++;
  3233. }
  3234. if (found)
  3235. break;
  3236. }
  3237. return (found);
  3238. }
  3239. /*
  3240. * Wait for a complete incoming message, parse it, and respond accordingly.
  3241. */
  3242. static int
  3243. ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
  3244. {
  3245. struct ahc_initiator_tinfo *tinfo;
  3246. struct ahc_tmode_tstate *tstate;
  3247. int reject;
  3248. int done;
  3249. int response;
  3250. u_int targ_scsirate;
  3251. done = MSGLOOP_IN_PROG;
  3252. response = FALSE;
  3253. reject = FALSE;
  3254. tinfo = ahc_fetch_transinfo(ahc, devinfo->channel, devinfo->our_scsiid,
  3255. devinfo->target, &tstate);
  3256. targ_scsirate = tinfo->scsirate;
  3257. /*
  3258. * Parse as much of the message as is available,
  3259. * rejecting it if we don't support it. When
  3260. * the entire message is available and has been
  3261. * handled, return MSGLOOP_MSGCOMPLETE, indicating
  3262. * that we have parsed an entire message.
  3263. *
  3264. * In the case of extended messages, we accept the length
  3265. * byte outright and perform more checking once we know the
  3266. * extended message type.
  3267. */
  3268. switch (ahc->msgin_buf[0]) {
  3269. case DISCONNECT:
  3270. case SAVE_POINTERS:
  3271. case COMMAND_COMPLETE:
  3272. case RESTORE_POINTERS:
  3273. case IGNORE_WIDE_RESIDUE:
  3274. /*
  3275. * End our message loop as these are messages
  3276. * the sequencer handles on its own.
  3277. */
  3278. done = MSGLOOP_TERMINATED;
  3279. break;
  3280. case MESSAGE_REJECT:
  3281. response = ahc_handle_msg_reject(ahc, devinfo);
  3282. fallthrough;
  3283. case NOP:
  3284. done = MSGLOOP_MSGCOMPLETE;
  3285. break;
  3286. case EXTENDED_MESSAGE:
  3287. {
  3288. /* Wait for enough of the message to begin validation */
  3289. if (ahc->msgin_index < 2)
  3290. break;
  3291. switch (ahc->msgin_buf[2]) {
  3292. case EXTENDED_SDTR:
  3293. {
  3294. const struct ahc_syncrate *syncrate;
  3295. u_int period;
  3296. u_int ppr_options;
  3297. u_int offset;
  3298. u_int saved_offset;
  3299. if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
  3300. reject = TRUE;
  3301. break;
  3302. }
  3303. /*
  3304. * Wait until we have both args before validating
  3305. * and acting on this message.
  3306. *
  3307. * Add one to MSG_EXT_SDTR_LEN to account for
  3308. * the extended message preamble.
  3309. */
  3310. if (ahc->msgin_index < (MSG_EXT_SDTR_LEN + 1))
  3311. break;
  3312. period = ahc->msgin_buf[3];
  3313. ppr_options = 0;
  3314. saved_offset = offset = ahc->msgin_buf[4];
  3315. syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period,
  3316. &ppr_options,
  3317. devinfo->role);
  3318. ahc_validate_offset(ahc, tinfo, syncrate, &offset,
  3319. targ_scsirate & WIDEXFER,
  3320. devinfo->role);
  3321. if (bootverbose) {
  3322. printk("(%s:%c:%d:%d): Received "
  3323. "SDTR period %x, offset %x\n\t"
  3324. "Filtered to period %x, offset %x\n",
  3325. ahc_name(ahc), devinfo->channel,
  3326. devinfo->target, devinfo->lun,
  3327. ahc->msgin_buf[3], saved_offset,
  3328. period, offset);
  3329. }
  3330. ahc_set_syncrate(ahc, devinfo,
  3331. syncrate, period,
  3332. offset, ppr_options,
  3333. AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
  3334. /*paused*/TRUE);
  3335. /*
  3336. * See if we initiated Sync Negotiation
  3337. * and didn't have to fall down to async
  3338. * transfers.
  3339. */
  3340. if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_SDTR, TRUE)) {
  3341. /* We started it */
  3342. if (saved_offset != offset) {
  3343. /* Went too low - force async */
  3344. reject = TRUE;
  3345. }
  3346. } else {
  3347. /*
  3348. * Send our own SDTR in reply
  3349. */
  3350. if (bootverbose
  3351. && devinfo->role == ROLE_INITIATOR) {
  3352. printk("(%s:%c:%d:%d): Target "
  3353. "Initiated SDTR\n",
  3354. ahc_name(ahc), devinfo->channel,
  3355. devinfo->target, devinfo->lun);
  3356. }
  3357. ahc->msgout_index = 0;
  3358. ahc->msgout_len = 0;
  3359. ahc_construct_sdtr(ahc, devinfo,
  3360. period, offset);
  3361. ahc->msgout_index = 0;
  3362. response = TRUE;
  3363. }
  3364. done = MSGLOOP_MSGCOMPLETE;
  3365. break;
  3366. }
  3367. case EXTENDED_WDTR:
  3368. {
  3369. u_int bus_width;
  3370. u_int saved_width;
  3371. u_int sending_reply;
  3372. sending_reply = FALSE;
  3373. if (ahc->msgin_buf[1] != MSG_EXT_WDTR_LEN) {
  3374. reject = TRUE;
  3375. break;
  3376. }
  3377. /*
  3378. * Wait until we have our arg before validating
  3379. * and acting on this message.
  3380. *
  3381. * Add one to MSG_EXT_WDTR_LEN to account for
  3382. * the extended message preamble.
  3383. */
  3384. if (ahc->msgin_index < (MSG_EXT_WDTR_LEN + 1))
  3385. break;
  3386. bus_width = ahc->msgin_buf[3];
  3387. saved_width = bus_width;
  3388. ahc_validate_width(ahc, tinfo, &bus_width,
  3389. devinfo->role);
  3390. if (bootverbose) {
  3391. printk("(%s:%c:%d:%d): Received WDTR "
  3392. "%x filtered to %x\n",
  3393. ahc_name(ahc), devinfo->channel,
  3394. devinfo->target, devinfo->lun,
  3395. saved_width, bus_width);
  3396. }
  3397. if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_WDTR, TRUE)) {
  3398. /*
  3399. * Don't send a WDTR back to the
  3400. * target, since we asked first.
  3401. * If the width went higher than our
  3402. * request, reject it.
  3403. */
  3404. if (saved_width > bus_width) {
  3405. reject = TRUE;
  3406. printk("(%s:%c:%d:%d): requested %dBit "
  3407. "transfers. Rejecting...\n",
  3408. ahc_name(ahc), devinfo->channel,
  3409. devinfo->target, devinfo->lun,
  3410. 8 * (0x01 << bus_width));
  3411. bus_width = 0;
  3412. }
  3413. } else {
  3414. /*
  3415. * Send our own WDTR in reply
  3416. */
  3417. if (bootverbose
  3418. && devinfo->role == ROLE_INITIATOR) {
  3419. printk("(%s:%c:%d:%d): Target "
  3420. "Initiated WDTR\n",
  3421. ahc_name(ahc), devinfo->channel,
  3422. devinfo->target, devinfo->lun);
  3423. }
  3424. ahc->msgout_index = 0;
  3425. ahc->msgout_len = 0;
  3426. ahc_construct_wdtr(ahc, devinfo, bus_width);
  3427. ahc->msgout_index = 0;
  3428. response = TRUE;
  3429. sending_reply = TRUE;
  3430. }
  3431. /*
  3432. * After a wide message, we are async, but
  3433. * some devices don't seem to honor this portion
  3434. * of the spec. Force a renegotiation of the
  3435. * sync component of our transfer agreement even
  3436. * if our goal is async. By updating our width
  3437. * after forcing the negotiation, we avoid
  3438. * renegotiating for width.
  3439. */
  3440. ahc_update_neg_request(ahc, devinfo, tstate,
  3441. tinfo, AHC_NEG_ALWAYS);
  3442. ahc_set_width(ahc, devinfo, bus_width,
  3443. AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
  3444. /*paused*/TRUE);
  3445. if (sending_reply == FALSE && reject == FALSE) {
  3446. /*
  3447. * We will always have an SDTR to send.
  3448. */
  3449. ahc->msgout_index = 0;
  3450. ahc->msgout_len = 0;
  3451. ahc_build_transfer_msg(ahc, devinfo);
  3452. ahc->msgout_index = 0;
  3453. response = TRUE;
  3454. }
  3455. done = MSGLOOP_MSGCOMPLETE;
  3456. break;
  3457. }
  3458. case EXTENDED_PPR:
  3459. {
  3460. const struct ahc_syncrate *syncrate;
  3461. u_int period;
  3462. u_int offset;
  3463. u_int bus_width;
  3464. u_int ppr_options;
  3465. u_int saved_width;
  3466. u_int saved_offset;
  3467. u_int saved_ppr_options;
  3468. if (ahc->msgin_buf[1] != MSG_EXT_PPR_LEN) {
  3469. reject = TRUE;
  3470. break;
  3471. }
  3472. /*
  3473. * Wait until we have all args before validating
  3474. * and acting on this message.
  3475. *
  3476. * Add one to MSG_EXT_PPR_LEN to account for
  3477. * the extended message preamble.
  3478. */
  3479. if (ahc->msgin_index < (MSG_EXT_PPR_LEN + 1))
  3480. break;
  3481. period = ahc->msgin_buf[3];
  3482. offset = ahc->msgin_buf[5];
  3483. bus_width = ahc->msgin_buf[6];
  3484. saved_width = bus_width;
  3485. ppr_options = ahc->msgin_buf[7];
  3486. /*
  3487. * According to the spec, a DT only
  3488. * period factor with no DT option
  3489. * set implies async.
  3490. */
  3491. if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0
  3492. && period == 9)
  3493. offset = 0;
  3494. saved_ppr_options = ppr_options;
  3495. saved_offset = offset;
  3496. /*
  3497. * Mask out any options we don't support
  3498. * on any controller. Transfer options are
  3499. * only available if we are negotiating wide.
  3500. */
  3501. ppr_options &= MSG_EXT_PPR_DT_REQ;
  3502. if (bus_width == 0)
  3503. ppr_options = 0;
  3504. ahc_validate_width(ahc, tinfo, &bus_width,
  3505. devinfo->role);
  3506. syncrate = ahc_devlimited_syncrate(ahc, tinfo, &period,
  3507. &ppr_options,
  3508. devinfo->role);
  3509. ahc_validate_offset(ahc, tinfo, syncrate,
  3510. &offset, bus_width,
  3511. devinfo->role);
  3512. if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_PPR, TRUE)) {
  3513. /*
  3514. * If we are unable to do any of the
  3515. * requested options (we went too low),
  3516. * then we'll have to reject the message.
  3517. */
  3518. if (saved_width > bus_width
  3519. || saved_offset != offset
  3520. || saved_ppr_options != ppr_options) {
  3521. reject = TRUE;
  3522. period = 0;
  3523. offset = 0;
  3524. bus_width = 0;
  3525. ppr_options = 0;
  3526. syncrate = NULL;
  3527. }
  3528. } else {
  3529. if (devinfo->role != ROLE_TARGET)
  3530. printk("(%s:%c:%d:%d): Target "
  3531. "Initiated PPR\n",
  3532. ahc_name(ahc), devinfo->channel,
  3533. devinfo->target, devinfo->lun);
  3534. else
  3535. printk("(%s:%c:%d:%d): Initiator "
  3536. "Initiated PPR\n",
  3537. ahc_name(ahc), devinfo->channel,
  3538. devinfo->target, devinfo->lun);
  3539. ahc->msgout_index = 0;
  3540. ahc->msgout_len = 0;
  3541. ahc_construct_ppr(ahc, devinfo, period, offset,
  3542. bus_width, ppr_options);
  3543. ahc->msgout_index = 0;
  3544. response = TRUE;
  3545. }
  3546. if (bootverbose) {
  3547. printk("(%s:%c:%d:%d): Received PPR width %x, "
  3548. "period %x, offset %x,options %x\n"
  3549. "\tFiltered to width %x, period %x, "
  3550. "offset %x, options %x\n",
  3551. ahc_name(ahc), devinfo->channel,
  3552. devinfo->target, devinfo->lun,
  3553. saved_width, ahc->msgin_buf[3],
  3554. saved_offset, saved_ppr_options,
  3555. bus_width, period, offset, ppr_options);
  3556. }
  3557. ahc_set_width(ahc, devinfo, bus_width,
  3558. AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
  3559. /*paused*/TRUE);
  3560. ahc_set_syncrate(ahc, devinfo,
  3561. syncrate, period,
  3562. offset, ppr_options,
  3563. AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
  3564. /*paused*/TRUE);
  3565. done = MSGLOOP_MSGCOMPLETE;
  3566. break;
  3567. }
  3568. default:
  3569. /* Unknown extended message. Reject it. */
  3570. reject = TRUE;
  3571. break;
  3572. }
  3573. break;
  3574. }
  3575. #ifdef AHC_TARGET_MODE
  3576. case TARGET_RESET:
  3577. ahc_handle_devreset(ahc, devinfo,
  3578. CAM_BDR_SENT,
  3579. "Bus Device Reset Received",
  3580. /*verbose_level*/0);
  3581. ahc_restart(ahc);
  3582. done = MSGLOOP_TERMINATED;
  3583. break;
  3584. case ABORT_TASK:
  3585. case ABORT_TASK_SET:
  3586. case CLEAR_QUEUE_TASK_SET:
  3587. {
  3588. int tag;
  3589. /* Target mode messages */
  3590. if (devinfo->role != ROLE_TARGET) {
  3591. reject = TRUE;
  3592. break;
  3593. }
  3594. tag = SCB_LIST_NULL;
  3595. if (ahc->msgin_buf[0] == ABORT_TASK)
  3596. tag = ahc_inb(ahc, INITIATOR_TAG);
  3597. ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
  3598. devinfo->lun, tag, ROLE_TARGET,
  3599. CAM_REQ_ABORTED);
  3600. tstate = ahc->enabled_targets[devinfo->our_scsiid];
  3601. if (tstate != NULL) {
  3602. struct ahc_tmode_lstate* lstate;
  3603. lstate = tstate->enabled_luns[devinfo->lun];
  3604. if (lstate != NULL) {
  3605. ahc_queue_lstate_event(ahc, lstate,
  3606. devinfo->our_scsiid,
  3607. ahc->msgin_buf[0],
  3608. /*arg*/tag);
  3609. ahc_send_lstate_events(ahc, lstate);
  3610. }
  3611. }
  3612. ahc_restart(ahc);
  3613. done = MSGLOOP_TERMINATED;
  3614. break;
  3615. }
  3616. #endif
  3617. case TERMINATE_IO_PROC:
  3618. default:
  3619. reject = TRUE;
  3620. break;
  3621. }
  3622. if (reject) {
  3623. /*
  3624. * Setup to reject the message.
  3625. */
  3626. ahc->msgout_index = 0;
  3627. ahc->msgout_len = 1;
  3628. ahc->msgout_buf[0] = MESSAGE_REJECT;
  3629. done = MSGLOOP_MSGCOMPLETE;
  3630. response = TRUE;
  3631. }
  3632. if (done != MSGLOOP_IN_PROG && !response)
  3633. /* Clear the outgoing message buffer */
  3634. ahc->msgout_len = 0;
  3635. return (done);
  3636. }
  3637. /*
  3638. * Process a message reject message.
  3639. */
  3640. static int
  3641. ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
  3642. {
  3643. /*
  3644. * What we care about here is if we had an
  3645. * outstanding SDTR or WDTR message for this
  3646. * target. If we did, this is a signal that
  3647. * the target is refusing negotiation.
  3648. */
  3649. struct scb *scb;
  3650. struct ahc_initiator_tinfo *tinfo;
  3651. struct ahc_tmode_tstate *tstate;
  3652. u_int scb_index;
  3653. u_int last_msg;
  3654. int response = 0;
  3655. scb_index = ahc_inb(ahc, SCB_TAG);
  3656. scb = ahc_lookup_scb(ahc, scb_index);
  3657. tinfo = ahc_fetch_transinfo(ahc, devinfo->channel,
  3658. devinfo->our_scsiid,
  3659. devinfo->target, &tstate);
  3660. /* Might be necessary */
  3661. last_msg = ahc_inb(ahc, LAST_MSG);
  3662. if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_PPR, /*full*/FALSE)) {
  3663. /*
  3664. * Target does not support the PPR message.
  3665. * Attempt to negotiate SPI-2 style.
  3666. */
  3667. if (bootverbose) {
  3668. printk("(%s:%c:%d:%d): PPR Rejected. "
  3669. "Trying WDTR/SDTR\n",
  3670. ahc_name(ahc), devinfo->channel,
  3671. devinfo->target, devinfo->lun);
  3672. }
  3673. tinfo->goal.ppr_options = 0;
  3674. tinfo->curr.transport_version = 2;
  3675. tinfo->goal.transport_version = 2;
  3676. ahc->msgout_index = 0;
  3677. ahc->msgout_len = 0;
  3678. ahc_build_transfer_msg(ahc, devinfo);
  3679. ahc->msgout_index = 0;
  3680. response = 1;
  3681. } else if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_WDTR, /*full*/FALSE)) {
  3682. /* note 8bit xfers */
  3683. printk("(%s:%c:%d:%d): refuses WIDE negotiation. Using "
  3684. "8bit transfers\n", ahc_name(ahc),
  3685. devinfo->channel, devinfo->target, devinfo->lun);
  3686. ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
  3687. AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
  3688. /*paused*/TRUE);
  3689. /*
  3690. * No need to clear the sync rate. If the target
  3691. * did not accept the command, our syncrate is
  3692. * unaffected. If the target started the negotiation,
  3693. * but rejected our response, we already cleared the
  3694. * sync rate before sending our WDTR.
  3695. */
  3696. if (tinfo->goal.offset != tinfo->curr.offset) {
  3697. /* Start the sync negotiation */
  3698. ahc->msgout_index = 0;
  3699. ahc->msgout_len = 0;
  3700. ahc_build_transfer_msg(ahc, devinfo);
  3701. ahc->msgout_index = 0;
  3702. response = 1;
  3703. }
  3704. } else if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_SDTR, /*full*/FALSE)) {
  3705. /* note asynch xfers and clear flag */
  3706. ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0,
  3707. /*offset*/0, /*ppr_options*/0,
  3708. AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
  3709. /*paused*/TRUE);
  3710. printk("(%s:%c:%d:%d): refuses synchronous negotiation. "
  3711. "Using asynchronous transfers\n",
  3712. ahc_name(ahc), devinfo->channel,
  3713. devinfo->target, devinfo->lun);
  3714. } else if ((scb->hscb->control & SIMPLE_QUEUE_TAG) != 0) {
  3715. int tag_type;
  3716. int mask;
  3717. tag_type = (scb->hscb->control & SIMPLE_QUEUE_TAG);
  3718. if (tag_type == SIMPLE_QUEUE_TAG) {
  3719. printk("(%s:%c:%d:%d): refuses tagged commands. "
  3720. "Performing non-tagged I/O\n", ahc_name(ahc),
  3721. devinfo->channel, devinfo->target, devinfo->lun);
  3722. ahc_set_tags(ahc, scb->io_ctx, devinfo, AHC_QUEUE_NONE);
  3723. mask = ~0x23;
  3724. } else {
  3725. printk("(%s:%c:%d:%d): refuses %s tagged commands. "
  3726. "Performing simple queue tagged I/O only\n",
  3727. ahc_name(ahc), devinfo->channel, devinfo->target,
  3728. devinfo->lun, tag_type == ORDERED_QUEUE_TAG
  3729. ? "ordered" : "head of queue");
  3730. ahc_set_tags(ahc, scb->io_ctx, devinfo, AHC_QUEUE_BASIC);
  3731. mask = ~0x03;
  3732. }
  3733. /*
  3734. * Resend the identify for this CCB as the target
  3735. * may believe that the selection is invalid otherwise.
  3736. */
  3737. ahc_outb(ahc, SCB_CONTROL,
  3738. ahc_inb(ahc, SCB_CONTROL) & mask);
  3739. scb->hscb->control &= mask;
  3740. ahc_set_transaction_tag(scb, /*enabled*/FALSE,
  3741. /*type*/SIMPLE_QUEUE_TAG);
  3742. ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG);
  3743. ahc_assert_atn(ahc);
  3744. /*
  3745. * This transaction is now at the head of
  3746. * the untagged queue for this target.
  3747. */
  3748. if ((ahc->flags & AHC_SCB_BTT) == 0) {
  3749. struct scb_tailq *untagged_q;
  3750. untagged_q =
  3751. &(ahc->untagged_queues[devinfo->target_offset]);
  3752. TAILQ_INSERT_HEAD(untagged_q, scb, links.tqe);
  3753. scb->flags |= SCB_UNTAGGEDQ;
  3754. }
  3755. ahc_busy_tcl(ahc, BUILD_TCL(scb->hscb->scsiid, devinfo->lun),
  3756. scb->hscb->tag);
  3757. /*
  3758. * Requeue all tagged commands for this target
  3759. * currently in our possession so they can be
  3760. * converted to untagged commands.
  3761. */
  3762. ahc_search_qinfifo(ahc, SCB_GET_TARGET(ahc, scb),
  3763. SCB_GET_CHANNEL(ahc, scb),
  3764. SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL,
  3765. ROLE_INITIATOR, CAM_REQUEUE_REQ,
  3766. SEARCH_COMPLETE);
  3767. } else {
  3768. /*
  3769. * Otherwise, we ignore it.
  3770. */
  3771. printk("%s:%c:%d: Message reject for %x -- ignored\n",
  3772. ahc_name(ahc), devinfo->channel, devinfo->target,
  3773. last_msg);
  3774. }
  3775. return (response);
  3776. }
  3777. /*
  3778. * Process an ingnore wide residue message.
  3779. */
  3780. static void
  3781. ahc_handle_ign_wide_residue(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
  3782. {
  3783. u_int scb_index;
  3784. struct scb *scb;
  3785. scb_index = ahc_inb(ahc, SCB_TAG);
  3786. scb = ahc_lookup_scb(ahc, scb_index);
  3787. /*
  3788. * XXX Actually check data direction in the sequencer?
  3789. * Perhaps add datadir to some spare bits in the hscb?
  3790. */
  3791. if ((ahc_inb(ahc, SEQ_FLAGS) & DPHASE) == 0
  3792. || ahc_get_transfer_dir(scb) != CAM_DIR_IN) {
  3793. /*
  3794. * Ignore the message if we haven't
  3795. * seen an appropriate data phase yet.
  3796. */
  3797. } else {
  3798. /*
  3799. * If the residual occurred on the last
  3800. * transfer and the transfer request was
  3801. * expected to end on an odd count, do
  3802. * nothing. Otherwise, subtract a byte
  3803. * and update the residual count accordingly.
  3804. */
  3805. uint32_t sgptr;
  3806. sgptr = ahc_inb(ahc, SCB_RESIDUAL_SGPTR);
  3807. if ((sgptr & SG_LIST_NULL) != 0
  3808. && (ahc_inb(ahc, SCB_LUN) & SCB_XFERLEN_ODD) != 0) {
  3809. /*
  3810. * If the residual occurred on the last
  3811. * transfer and the transfer request was
  3812. * expected to end on an odd count, do
  3813. * nothing.
  3814. */
  3815. } else {
  3816. struct ahc_dma_seg *sg;
  3817. uint32_t data_cnt;
  3818. uint32_t data_addr;
  3819. uint32_t sglen;
  3820. /* Pull in all of the sgptr */
  3821. sgptr = ahc_inl(ahc, SCB_RESIDUAL_SGPTR);
  3822. data_cnt = ahc_inl(ahc, SCB_RESIDUAL_DATACNT);
  3823. if ((sgptr & SG_LIST_NULL) != 0) {
  3824. /*
  3825. * The residual data count is not updated
  3826. * for the command run to completion case.
  3827. * Explicitly zero the count.
  3828. */
  3829. data_cnt &= ~AHC_SG_LEN_MASK;
  3830. }
  3831. data_addr = ahc_inl(ahc, SHADDR);
  3832. data_cnt += 1;
  3833. data_addr -= 1;
  3834. sgptr &= SG_PTR_MASK;
  3835. sg = ahc_sg_bus_to_virt(scb, sgptr);
  3836. /*
  3837. * The residual sg ptr points to the next S/G
  3838. * to load so we must go back one.
  3839. */
  3840. sg--;
  3841. sglen = ahc_le32toh(sg->len) & AHC_SG_LEN_MASK;
  3842. if (sg != scb->sg_list
  3843. && sglen < (data_cnt & AHC_SG_LEN_MASK)) {
  3844. sg--;
  3845. sglen = ahc_le32toh(sg->len);
  3846. /*
  3847. * Preserve High Address and SG_LIST bits
  3848. * while setting the count to 1.
  3849. */
  3850. data_cnt = 1 | (sglen & (~AHC_SG_LEN_MASK));
  3851. data_addr = ahc_le32toh(sg->addr)
  3852. + (sglen & AHC_SG_LEN_MASK) - 1;
  3853. /*
  3854. * Increment sg so it points to the
  3855. * "next" sg.
  3856. */
  3857. sg++;
  3858. sgptr = ahc_sg_virt_to_bus(scb, sg);
  3859. }
  3860. ahc_outl(ahc, SCB_RESIDUAL_SGPTR, sgptr);
  3861. ahc_outl(ahc, SCB_RESIDUAL_DATACNT, data_cnt);
  3862. /*
  3863. * Toggle the "oddness" of the transfer length
  3864. * to handle this mid-transfer ignore wide
  3865. * residue. This ensures that the oddness is
  3866. * correct for subsequent data transfers.
  3867. */
  3868. ahc_outb(ahc, SCB_LUN,
  3869. ahc_inb(ahc, SCB_LUN) ^ SCB_XFERLEN_ODD);
  3870. }
  3871. }
  3872. }
  3873. /*
  3874. * Reinitialize the data pointers for the active transfer
  3875. * based on its current residual.
  3876. */
  3877. static void
  3878. ahc_reinitialize_dataptrs(struct ahc_softc *ahc)
  3879. {
  3880. struct scb *scb;
  3881. struct ahc_dma_seg *sg;
  3882. u_int scb_index;
  3883. uint32_t sgptr;
  3884. uint32_t resid;
  3885. uint32_t dataptr;
  3886. scb_index = ahc_inb(ahc, SCB_TAG);
  3887. scb = ahc_lookup_scb(ahc, scb_index);
  3888. sgptr = (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 3) << 24)
  3889. | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 2) << 16)
  3890. | (ahc_inb(ahc, SCB_RESIDUAL_SGPTR + 1) << 8)
  3891. | ahc_inb(ahc, SCB_RESIDUAL_SGPTR);
  3892. sgptr &= SG_PTR_MASK;
  3893. sg = ahc_sg_bus_to_virt(scb, sgptr);
  3894. /* The residual sg_ptr always points to the next sg */
  3895. sg--;
  3896. resid = (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 2) << 16)
  3897. | (ahc_inb(ahc, SCB_RESIDUAL_DATACNT + 1) << 8)
  3898. | ahc_inb(ahc, SCB_RESIDUAL_DATACNT);
  3899. dataptr = ahc_le32toh(sg->addr)
  3900. + (ahc_le32toh(sg->len) & AHC_SG_LEN_MASK)
  3901. - resid;
  3902. if ((ahc->flags & AHC_39BIT_ADDRESSING) != 0) {
  3903. u_int dscommand1;
  3904. dscommand1 = ahc_inb(ahc, DSCOMMAND1);
  3905. ahc_outb(ahc, DSCOMMAND1, dscommand1 | HADDLDSEL0);
  3906. ahc_outb(ahc, HADDR,
  3907. (ahc_le32toh(sg->len) >> 24) & SG_HIGH_ADDR_BITS);
  3908. ahc_outb(ahc, DSCOMMAND1, dscommand1);
  3909. }
  3910. ahc_outb(ahc, HADDR + 3, dataptr >> 24);
  3911. ahc_outb(ahc, HADDR + 2, dataptr >> 16);
  3912. ahc_outb(ahc, HADDR + 1, dataptr >> 8);
  3913. ahc_outb(ahc, HADDR, dataptr);
  3914. ahc_outb(ahc, HCNT + 2, resid >> 16);
  3915. ahc_outb(ahc, HCNT + 1, resid >> 8);
  3916. ahc_outb(ahc, HCNT, resid);
  3917. if ((ahc->features & AHC_ULTRA2) == 0) {
  3918. ahc_outb(ahc, STCNT + 2, resid >> 16);
  3919. ahc_outb(ahc, STCNT + 1, resid >> 8);
  3920. ahc_outb(ahc, STCNT, resid);
  3921. }
  3922. }
  3923. /*
  3924. * Handle the effects of issuing a bus device reset message.
  3925. */
  3926. static void
  3927. ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
  3928. cam_status status, char *message, int verbose_level)
  3929. {
  3930. #ifdef AHC_TARGET_MODE
  3931. struct ahc_tmode_tstate* tstate;
  3932. u_int lun;
  3933. #endif
  3934. int found;
  3935. found = ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
  3936. CAM_LUN_WILDCARD, SCB_LIST_NULL, devinfo->role,
  3937. status);
  3938. #ifdef AHC_TARGET_MODE
  3939. /*
  3940. * Send an immediate notify ccb to all target mord peripheral
  3941. * drivers affected by this action.
  3942. */
  3943. tstate = ahc->enabled_targets[devinfo->our_scsiid];
  3944. if (tstate != NULL) {
  3945. for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
  3946. struct ahc_tmode_lstate* lstate;
  3947. lstate = tstate->enabled_luns[lun];
  3948. if (lstate == NULL)
  3949. continue;
  3950. ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid,
  3951. TARGET_RESET, /*arg*/0);
  3952. ahc_send_lstate_events(ahc, lstate);
  3953. }
  3954. }
  3955. #endif
  3956. /*
  3957. * Go back to async/narrow transfers and renegotiate.
  3958. */
  3959. ahc_set_width(ahc, devinfo, MSG_EXT_WDTR_BUS_8_BIT,
  3960. AHC_TRANS_CUR, /*paused*/TRUE);
  3961. ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL,
  3962. /*period*/0, /*offset*/0, /*ppr_options*/0,
  3963. AHC_TRANS_CUR, /*paused*/TRUE);
  3964. if (status != CAM_SEL_TIMEOUT)
  3965. ahc_send_async(ahc, devinfo->channel, devinfo->target,
  3966. CAM_LUN_WILDCARD, AC_SENT_BDR);
  3967. if (message != NULL
  3968. && (verbose_level <= bootverbose))
  3969. printk("%s: %s on %c:%d. %d SCBs aborted\n", ahc_name(ahc),
  3970. message, devinfo->channel, devinfo->target, found);
  3971. }
  3972. #ifdef AHC_TARGET_MODE
  3973. static void
  3974. ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
  3975. struct scb *scb)
  3976. {
  3977. /*
  3978. * To facilitate adding multiple messages together,
  3979. * each routine should increment the index and len
  3980. * variables instead of setting them explicitly.
  3981. */
  3982. ahc->msgout_index = 0;
  3983. ahc->msgout_len = 0;
  3984. if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0)
  3985. ahc_build_transfer_msg(ahc, devinfo);
  3986. else
  3987. panic("ahc_intr: AWAITING target message with no message");
  3988. ahc->msgout_index = 0;
  3989. ahc->msg_type = MSG_TYPE_TARGET_MSGIN;
  3990. }
  3991. #endif
  3992. /**************************** Initialization **********************************/
  3993. /*
  3994. * Allocate a controller structure for a new device
  3995. * and perform initial initializion.
  3996. */
  3997. struct ahc_softc *
  3998. ahc_alloc(void *platform_arg, char *name)
  3999. {
  4000. struct ahc_softc *ahc;
  4001. int i;
  4002. ahc = kzalloc(sizeof(*ahc), GFP_ATOMIC);
  4003. if (!ahc) {
  4004. printk("aic7xxx: cannot malloc softc!\n");
  4005. kfree(name);
  4006. return NULL;
  4007. }
  4008. ahc->seep_config = kmalloc(sizeof(*ahc->seep_config), GFP_ATOMIC);
  4009. if (ahc->seep_config == NULL) {
  4010. kfree(ahc);
  4011. kfree(name);
  4012. return (NULL);
  4013. }
  4014. LIST_INIT(&ahc->pending_scbs);
  4015. /* We don't know our unit number until the OSM sets it */
  4016. ahc->name = name;
  4017. ahc->unit = -1;
  4018. ahc->description = NULL;
  4019. ahc->channel = 'A';
  4020. ahc->channel_b = 'B';
  4021. ahc->chip = AHC_NONE;
  4022. ahc->features = AHC_FENONE;
  4023. ahc->bugs = AHC_BUGNONE;
  4024. ahc->flags = AHC_FNONE;
  4025. /*
  4026. * Default to all error reporting enabled with the
  4027. * sequencer operating at its fastest speed.
  4028. * The bus attach code may modify this.
  4029. */
  4030. ahc->seqctl = FASTMODE;
  4031. for (i = 0; i < AHC_NUM_TARGETS; i++)
  4032. TAILQ_INIT(&ahc->untagged_queues[i]);
  4033. if (ahc_platform_alloc(ahc, platform_arg) != 0) {
  4034. ahc_free(ahc);
  4035. ahc = NULL;
  4036. }
  4037. return (ahc);
  4038. }
  4039. int
  4040. ahc_softc_init(struct ahc_softc *ahc)
  4041. {
  4042. /* The IRQMS bit is only valid on VL and EISA chips */
  4043. if ((ahc->chip & AHC_PCI) == 0)
  4044. ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS;
  4045. else
  4046. ahc->unpause = 0;
  4047. ahc->pause = ahc->unpause | PAUSE;
  4048. /* XXX The shared scb data stuff should be deprecated */
  4049. if (ahc->scb_data == NULL) {
  4050. ahc->scb_data = kzalloc(sizeof(*ahc->scb_data), GFP_ATOMIC);
  4051. if (ahc->scb_data == NULL)
  4052. return (ENOMEM);
  4053. }
  4054. return (0);
  4055. }
  4056. void
  4057. ahc_set_unit(struct ahc_softc *ahc, int unit)
  4058. {
  4059. ahc->unit = unit;
  4060. }
  4061. void
  4062. ahc_set_name(struct ahc_softc *ahc, char *name)
  4063. {
  4064. kfree(ahc->name);
  4065. ahc->name = name;
  4066. }
  4067. void
  4068. ahc_free(struct ahc_softc *ahc)
  4069. {
  4070. int i;
  4071. switch (ahc->init_level) {
  4072. default:
  4073. case 5:
  4074. ahc_shutdown(ahc);
  4075. fallthrough;
  4076. case 4:
  4077. ahc_dmamap_unload(ahc, ahc->shared_data_dmat,
  4078. ahc->shared_data_dmamap);
  4079. fallthrough;
  4080. case 3:
  4081. ahc_dmamem_free(ahc, ahc->shared_data_dmat, ahc->qoutfifo,
  4082. ahc->shared_data_dmamap);
  4083. ahc_dmamap_destroy(ahc, ahc->shared_data_dmat,
  4084. ahc->shared_data_dmamap);
  4085. fallthrough;
  4086. case 2:
  4087. ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat);
  4088. fallthrough;
  4089. case 1:
  4090. break;
  4091. case 0:
  4092. break;
  4093. }
  4094. ahc_platform_free(ahc);
  4095. ahc_fini_scbdata(ahc);
  4096. for (i = 0; i < AHC_NUM_TARGETS; i++) {
  4097. struct ahc_tmode_tstate *tstate;
  4098. tstate = ahc->enabled_targets[i];
  4099. if (tstate != NULL) {
  4100. #ifdef AHC_TARGET_MODE
  4101. int j;
  4102. for (j = 0; j < AHC_NUM_LUNS; j++) {
  4103. struct ahc_tmode_lstate *lstate;
  4104. lstate = tstate->enabled_luns[j];
  4105. if (lstate != NULL) {
  4106. xpt_free_path(lstate->path);
  4107. kfree(lstate);
  4108. }
  4109. }
  4110. #endif
  4111. kfree(tstate);
  4112. }
  4113. }
  4114. #ifdef AHC_TARGET_MODE
  4115. if (ahc->black_hole != NULL) {
  4116. xpt_free_path(ahc->black_hole->path);
  4117. kfree(ahc->black_hole);
  4118. }
  4119. #endif
  4120. kfree(ahc->name);
  4121. kfree(ahc->seep_config);
  4122. kfree(ahc);
  4123. return;
  4124. }
  4125. static void
  4126. ahc_shutdown(void *arg)
  4127. {
  4128. struct ahc_softc *ahc;
  4129. int i;
  4130. ahc = (struct ahc_softc *)arg;
  4131. /* This will reset most registers to 0, but not all */
  4132. ahc_reset(ahc, /*reinit*/FALSE);
  4133. ahc_outb(ahc, SCSISEQ, 0);
  4134. ahc_outb(ahc, SXFRCTL0, 0);
  4135. ahc_outb(ahc, DSPCISTATUS, 0);
  4136. for (i = TARG_SCSIRATE; i < SCSICONF; i++)
  4137. ahc_outb(ahc, i, 0);
  4138. }
  4139. /*
  4140. * Reset the controller and record some information about it
  4141. * that is only available just after a reset. If "reinit" is
  4142. * non-zero, this reset occurred after initial configuration
  4143. * and the caller requests that the chip be fully reinitialized
  4144. * to a runable state. Chip interrupts are *not* enabled after
  4145. * a reinitialization. The caller must enable interrupts via
  4146. * ahc_intr_enable().
  4147. */
  4148. int
  4149. ahc_reset(struct ahc_softc *ahc, int reinit)
  4150. {
  4151. u_int sblkctl;
  4152. u_int sxfrctl1_a, sxfrctl1_b;
  4153. int error;
  4154. int wait;
  4155. /*
  4156. * Preserve the value of the SXFRCTL1 register for all channels.
  4157. * It contains settings that affect termination and we don't want
  4158. * to disturb the integrity of the bus.
  4159. */
  4160. ahc_pause(ahc);
  4161. sxfrctl1_b = 0;
  4162. if ((ahc->chip & AHC_CHIPID_MASK) == AHC_AIC7770) {
  4163. u_int sblkctl;
  4164. /*
  4165. * Save channel B's settings in case this chip
  4166. * is setup for TWIN channel operation.
  4167. */
  4168. sblkctl = ahc_inb(ahc, SBLKCTL);
  4169. ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
  4170. sxfrctl1_b = ahc_inb(ahc, SXFRCTL1);
  4171. ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
  4172. }
  4173. sxfrctl1_a = ahc_inb(ahc, SXFRCTL1);
  4174. ahc_outb(ahc, HCNTRL, CHIPRST | ahc->pause);
  4175. /*
  4176. * Ensure that the reset has finished. We delay 1000us
  4177. * prior to reading the register to make sure the chip
  4178. * has sufficiently completed its reset to handle register
  4179. * accesses.
  4180. */
  4181. wait = 1000;
  4182. do {
  4183. ahc_delay(1000);
  4184. } while (--wait && !(ahc_inb(ahc, HCNTRL) & CHIPRSTACK));
  4185. if (wait == 0) {
  4186. printk("%s: WARNING - Failed chip reset! "
  4187. "Trying to initialize anyway.\n", ahc_name(ahc));
  4188. }
  4189. ahc_outb(ahc, HCNTRL, ahc->pause);
  4190. /* Determine channel configuration */
  4191. sblkctl = ahc_inb(ahc, SBLKCTL) & (SELBUSB|SELWIDE);
  4192. /* No Twin Channel PCI cards */
  4193. if ((ahc->chip & AHC_PCI) != 0)
  4194. sblkctl &= ~SELBUSB;
  4195. switch (sblkctl) {
  4196. case 0:
  4197. /* Single Narrow Channel */
  4198. break;
  4199. case 2:
  4200. /* Wide Channel */
  4201. ahc->features |= AHC_WIDE;
  4202. break;
  4203. case 8:
  4204. /* Twin Channel */
  4205. ahc->features |= AHC_TWIN;
  4206. break;
  4207. default:
  4208. printk(" Unsupported adapter type. Ignoring\n");
  4209. return(-1);
  4210. }
  4211. /*
  4212. * Reload sxfrctl1.
  4213. *
  4214. * We must always initialize STPWEN to 1 before we
  4215. * restore the saved values. STPWEN is initialized
  4216. * to a tri-state condition which can only be cleared
  4217. * by turning it on.
  4218. */
  4219. if ((ahc->features & AHC_TWIN) != 0) {
  4220. u_int sblkctl;
  4221. sblkctl = ahc_inb(ahc, SBLKCTL);
  4222. ahc_outb(ahc, SBLKCTL, sblkctl | SELBUSB);
  4223. ahc_outb(ahc, SXFRCTL1, sxfrctl1_b);
  4224. ahc_outb(ahc, SBLKCTL, sblkctl & ~SELBUSB);
  4225. }
  4226. ahc_outb(ahc, SXFRCTL1, sxfrctl1_a);
  4227. error = 0;
  4228. if (reinit != 0)
  4229. /*
  4230. * If a recovery action has forced a chip reset,
  4231. * re-initialize the chip to our liking.
  4232. */
  4233. error = ahc->bus_chip_init(ahc);
  4234. #ifdef AHC_DUMP_SEQ
  4235. else
  4236. ahc_dumpseq(ahc);
  4237. #endif
  4238. return (error);
  4239. }
  4240. /*
  4241. * Determine the number of SCBs available on the controller
  4242. */
  4243. int
  4244. ahc_probe_scbs(struct ahc_softc *ahc) {
  4245. int i;
  4246. for (i = 0; i < AHC_SCB_MAX; i++) {
  4247. ahc_outb(ahc, SCBPTR, i);
  4248. ahc_outb(ahc, SCB_BASE, i);
  4249. if (ahc_inb(ahc, SCB_BASE) != i)
  4250. break;
  4251. ahc_outb(ahc, SCBPTR, 0);
  4252. if (ahc_inb(ahc, SCB_BASE) != 0)
  4253. break;
  4254. }
  4255. return (i);
  4256. }
  4257. static void
  4258. ahc_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
  4259. {
  4260. dma_addr_t *baddr;
  4261. baddr = (dma_addr_t *)arg;
  4262. *baddr = segs->ds_addr;
  4263. }
  4264. static void
  4265. ahc_build_free_scb_list(struct ahc_softc *ahc)
  4266. {
  4267. int scbsize;
  4268. int i;
  4269. scbsize = 32;
  4270. if ((ahc->flags & AHC_LSCBS_ENABLED) != 0)
  4271. scbsize = 64;
  4272. for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
  4273. int j;
  4274. ahc_outb(ahc, SCBPTR, i);
  4275. /*
  4276. * Touch all SCB bytes to avoid parity errors
  4277. * should one of our debugging routines read
  4278. * an otherwise uninitiatlized byte.
  4279. */
  4280. for (j = 0; j < scbsize; j++)
  4281. ahc_outb(ahc, SCB_BASE+j, 0xFF);
  4282. /* Clear the control byte. */
  4283. ahc_outb(ahc, SCB_CONTROL, 0);
  4284. /* Set the next pointer */
  4285. if ((ahc->flags & AHC_PAGESCBS) != 0)
  4286. ahc_outb(ahc, SCB_NEXT, i+1);
  4287. else
  4288. ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
  4289. /* Make the tag number, SCSIID, and lun invalid */
  4290. ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
  4291. ahc_outb(ahc, SCB_SCSIID, 0xFF);
  4292. ahc_outb(ahc, SCB_LUN, 0xFF);
  4293. }
  4294. if ((ahc->flags & AHC_PAGESCBS) != 0) {
  4295. /* SCB 0 heads the free list. */
  4296. ahc_outb(ahc, FREE_SCBH, 0);
  4297. } else {
  4298. /* No free list. */
  4299. ahc_outb(ahc, FREE_SCBH, SCB_LIST_NULL);
  4300. }
  4301. /* Make sure that the last SCB terminates the free list */
  4302. ahc_outb(ahc, SCBPTR, i-1);
  4303. ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
  4304. }
  4305. static int
  4306. ahc_init_scbdata(struct ahc_softc *ahc)
  4307. {
  4308. struct scb_data *scb_data;
  4309. scb_data = ahc->scb_data;
  4310. SLIST_INIT(&scb_data->free_scbs);
  4311. SLIST_INIT(&scb_data->sg_maps);
  4312. /* Allocate SCB resources */
  4313. scb_data->scbarray = kcalloc(AHC_SCB_MAX_ALLOC, sizeof(struct scb),
  4314. GFP_ATOMIC);
  4315. if (scb_data->scbarray == NULL)
  4316. return (ENOMEM);
  4317. /* Determine the number of hardware SCBs and initialize them */
  4318. scb_data->maxhscbs = ahc_probe_scbs(ahc);
  4319. if (ahc->scb_data->maxhscbs == 0) {
  4320. printk("%s: No SCB space found\n", ahc_name(ahc));
  4321. return (ENXIO);
  4322. }
  4323. /*
  4324. * Create our DMA tags. These tags define the kinds of device
  4325. * accessible memory allocations and memory mappings we will
  4326. * need to perform during normal operation.
  4327. *
  4328. * Unless we need to further restrict the allocation, we rely
  4329. * on the restrictions of the parent dmat, hence the common
  4330. * use of MAXADDR and MAXSIZE.
  4331. */
  4332. /* DMA tag for our hardware scb structures */
  4333. if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
  4334. /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
  4335. /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
  4336. /*highaddr*/BUS_SPACE_MAXADDR,
  4337. /*filter*/NULL, /*filterarg*/NULL,
  4338. AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb),
  4339. /*nsegments*/1,
  4340. /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
  4341. /*flags*/0, &scb_data->hscb_dmat) != 0) {
  4342. goto error_exit;
  4343. }
  4344. scb_data->init_level++;
  4345. /* Allocation for our hscbs */
  4346. if (ahc_dmamem_alloc(ahc, scb_data->hscb_dmat,
  4347. (void **)&scb_data->hscbs,
  4348. BUS_DMA_NOWAIT, &scb_data->hscb_dmamap) != 0) {
  4349. goto error_exit;
  4350. }
  4351. scb_data->init_level++;
  4352. /* And permanently map them */
  4353. ahc_dmamap_load(ahc, scb_data->hscb_dmat, scb_data->hscb_dmamap,
  4354. scb_data->hscbs,
  4355. AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb),
  4356. ahc_dmamap_cb, &scb_data->hscb_busaddr, /*flags*/0);
  4357. scb_data->init_level++;
  4358. /* DMA tag for our sense buffers */
  4359. if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
  4360. /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
  4361. /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
  4362. /*highaddr*/BUS_SPACE_MAXADDR,
  4363. /*filter*/NULL, /*filterarg*/NULL,
  4364. AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data),
  4365. /*nsegments*/1,
  4366. /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
  4367. /*flags*/0, &scb_data->sense_dmat) != 0) {
  4368. goto error_exit;
  4369. }
  4370. scb_data->init_level++;
  4371. /* Allocate them */
  4372. if (ahc_dmamem_alloc(ahc, scb_data->sense_dmat,
  4373. (void **)&scb_data->sense,
  4374. BUS_DMA_NOWAIT, &scb_data->sense_dmamap) != 0) {
  4375. goto error_exit;
  4376. }
  4377. scb_data->init_level++;
  4378. /* And permanently map them */
  4379. ahc_dmamap_load(ahc, scb_data->sense_dmat, scb_data->sense_dmamap,
  4380. scb_data->sense,
  4381. AHC_SCB_MAX_ALLOC * sizeof(struct scsi_sense_data),
  4382. ahc_dmamap_cb, &scb_data->sense_busaddr, /*flags*/0);
  4383. scb_data->init_level++;
  4384. /* DMA tag for our S/G structures. We allocate in page sized chunks */
  4385. if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/8,
  4386. /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
  4387. /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
  4388. /*highaddr*/BUS_SPACE_MAXADDR,
  4389. /*filter*/NULL, /*filterarg*/NULL,
  4390. PAGE_SIZE, /*nsegments*/1,
  4391. /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
  4392. /*flags*/0, &scb_data->sg_dmat) != 0) {
  4393. goto error_exit;
  4394. }
  4395. scb_data->init_level++;
  4396. /* Perform initial CCB allocation */
  4397. memset(scb_data->hscbs, 0,
  4398. AHC_SCB_MAX_ALLOC * sizeof(struct hardware_scb));
  4399. ahc_alloc_scbs(ahc);
  4400. if (scb_data->numscbs == 0) {
  4401. printk("%s: ahc_init_scbdata - "
  4402. "Unable to allocate initial scbs\n",
  4403. ahc_name(ahc));
  4404. goto error_exit;
  4405. }
  4406. /*
  4407. * Reserve the next queued SCB.
  4408. */
  4409. ahc->next_queued_scb = ahc_get_scb(ahc);
  4410. /*
  4411. * Note that we were successful
  4412. */
  4413. return (0);
  4414. error_exit:
  4415. return (ENOMEM);
  4416. }
  4417. static void
  4418. ahc_fini_scbdata(struct ahc_softc *ahc)
  4419. {
  4420. struct scb_data *scb_data;
  4421. scb_data = ahc->scb_data;
  4422. if (scb_data == NULL)
  4423. return;
  4424. switch (scb_data->init_level) {
  4425. default:
  4426. case 7:
  4427. {
  4428. struct sg_map_node *sg_map;
  4429. while ((sg_map = SLIST_FIRST(&scb_data->sg_maps))!= NULL) {
  4430. SLIST_REMOVE_HEAD(&scb_data->sg_maps, links);
  4431. ahc_dmamap_unload(ahc, scb_data->sg_dmat,
  4432. sg_map->sg_dmamap);
  4433. ahc_dmamem_free(ahc, scb_data->sg_dmat,
  4434. sg_map->sg_vaddr,
  4435. sg_map->sg_dmamap);
  4436. kfree(sg_map);
  4437. }
  4438. ahc_dma_tag_destroy(ahc, scb_data->sg_dmat);
  4439. }
  4440. fallthrough;
  4441. case 6:
  4442. ahc_dmamap_unload(ahc, scb_data->sense_dmat,
  4443. scb_data->sense_dmamap);
  4444. fallthrough;
  4445. case 5:
  4446. ahc_dmamem_free(ahc, scb_data->sense_dmat, scb_data->sense,
  4447. scb_data->sense_dmamap);
  4448. ahc_dmamap_destroy(ahc, scb_data->sense_dmat,
  4449. scb_data->sense_dmamap);
  4450. fallthrough;
  4451. case 4:
  4452. ahc_dma_tag_destroy(ahc, scb_data->sense_dmat);
  4453. fallthrough;
  4454. case 3:
  4455. ahc_dmamap_unload(ahc, scb_data->hscb_dmat,
  4456. scb_data->hscb_dmamap);
  4457. fallthrough;
  4458. case 2:
  4459. ahc_dmamem_free(ahc, scb_data->hscb_dmat, scb_data->hscbs,
  4460. scb_data->hscb_dmamap);
  4461. ahc_dmamap_destroy(ahc, scb_data->hscb_dmat,
  4462. scb_data->hscb_dmamap);
  4463. fallthrough;
  4464. case 1:
  4465. ahc_dma_tag_destroy(ahc, scb_data->hscb_dmat);
  4466. break;
  4467. case 0:
  4468. break;
  4469. }
  4470. kfree(scb_data->scbarray);
  4471. }
  4472. static void
  4473. ahc_alloc_scbs(struct ahc_softc *ahc)
  4474. {
  4475. struct scb_data *scb_data;
  4476. struct scb *next_scb;
  4477. struct sg_map_node *sg_map;
  4478. dma_addr_t physaddr;
  4479. struct ahc_dma_seg *segs;
  4480. int newcount;
  4481. int i;
  4482. scb_data = ahc->scb_data;
  4483. if (scb_data->numscbs >= AHC_SCB_MAX_ALLOC)
  4484. /* Can't allocate any more */
  4485. return;
  4486. next_scb = &scb_data->scbarray[scb_data->numscbs];
  4487. sg_map = kmalloc(sizeof(*sg_map), GFP_ATOMIC);
  4488. if (sg_map == NULL)
  4489. return;
  4490. /* Allocate S/G space for the next batch of SCBS */
  4491. if (ahc_dmamem_alloc(ahc, scb_data->sg_dmat,
  4492. (void **)&sg_map->sg_vaddr,
  4493. BUS_DMA_NOWAIT, &sg_map->sg_dmamap) != 0) {
  4494. kfree(sg_map);
  4495. return;
  4496. }
  4497. SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links);
  4498. ahc_dmamap_load(ahc, scb_data->sg_dmat, sg_map->sg_dmamap,
  4499. sg_map->sg_vaddr, PAGE_SIZE, ahc_dmamap_cb,
  4500. &sg_map->sg_physaddr, /*flags*/0);
  4501. segs = sg_map->sg_vaddr;
  4502. physaddr = sg_map->sg_physaddr;
  4503. newcount = (PAGE_SIZE / (AHC_NSEG * sizeof(struct ahc_dma_seg)));
  4504. newcount = min(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs));
  4505. for (i = 0; i < newcount; i++) {
  4506. struct scb_platform_data *pdata;
  4507. pdata = kmalloc(sizeof(*pdata), GFP_ATOMIC);
  4508. if (pdata == NULL)
  4509. break;
  4510. next_scb->platform_data = pdata;
  4511. next_scb->sg_map = sg_map;
  4512. next_scb->sg_list = segs;
  4513. /*
  4514. * The sequencer always starts with the second entry.
  4515. * The first entry is embedded in the scb.
  4516. */
  4517. next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg);
  4518. next_scb->ahc_softc = ahc;
  4519. next_scb->flags = SCB_FREE;
  4520. next_scb->hscb = &scb_data->hscbs[scb_data->numscbs];
  4521. next_scb->hscb->tag = ahc->scb_data->numscbs;
  4522. SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs,
  4523. next_scb, links.sle);
  4524. segs += AHC_NSEG;
  4525. physaddr += (AHC_NSEG * sizeof(struct ahc_dma_seg));
  4526. next_scb++;
  4527. ahc->scb_data->numscbs++;
  4528. }
  4529. }
  4530. void
  4531. ahc_controller_info(struct ahc_softc *ahc, char *buf)
  4532. {
  4533. int len;
  4534. len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]);
  4535. buf += len;
  4536. if ((ahc->features & AHC_TWIN) != 0)
  4537. len = sprintf(buf, "Twin Channel, A SCSI Id=%d, "
  4538. "B SCSI Id=%d, primary %c, ",
  4539. ahc->our_id, ahc->our_id_b,
  4540. (ahc->flags & AHC_PRIMARY_CHANNEL) + 'A');
  4541. else {
  4542. const char *speed;
  4543. const char *type;
  4544. speed = "";
  4545. if ((ahc->features & AHC_ULTRA) != 0) {
  4546. speed = "Ultra ";
  4547. } else if ((ahc->features & AHC_DT) != 0) {
  4548. speed = "Ultra160 ";
  4549. } else if ((ahc->features & AHC_ULTRA2) != 0) {
  4550. speed = "Ultra2 ";
  4551. }
  4552. if ((ahc->features & AHC_WIDE) != 0) {
  4553. type = "Wide";
  4554. } else {
  4555. type = "Single";
  4556. }
  4557. len = sprintf(buf, "%s%s Channel %c, SCSI Id=%d, ",
  4558. speed, type, ahc->channel, ahc->our_id);
  4559. }
  4560. buf += len;
  4561. if ((ahc->flags & AHC_PAGESCBS) != 0)
  4562. sprintf(buf, "%d/%d SCBs",
  4563. ahc->scb_data->maxhscbs, AHC_MAX_QUEUE);
  4564. else
  4565. sprintf(buf, "%d SCBs", ahc->scb_data->maxhscbs);
  4566. }
  4567. int
  4568. ahc_chip_init(struct ahc_softc *ahc)
  4569. {
  4570. int term;
  4571. int error;
  4572. u_int i;
  4573. u_int scsi_conf;
  4574. u_int scsiseq_template;
  4575. uint32_t physaddr;
  4576. ahc_outb(ahc, SEQ_FLAGS, 0);
  4577. ahc_outb(ahc, SEQ_FLAGS2, 0);
  4578. /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1, for both channels*/
  4579. if (ahc->features & AHC_TWIN) {
  4580. /*
  4581. * Setup Channel B first.
  4582. */
  4583. ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) | SELBUSB);
  4584. term = (ahc->flags & AHC_TERM_ENB_B) != 0 ? STPWEN : 0;
  4585. ahc_outb(ahc, SCSIID, ahc->our_id_b);
  4586. scsi_conf = ahc_inb(ahc, SCSICONF + 1);
  4587. ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
  4588. |term|ahc->seltime_b|ENSTIMER|ACTNEGEN);
  4589. if ((ahc->features & AHC_ULTRA2) != 0)
  4590. ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR);
  4591. ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
  4592. ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
  4593. /* Select Channel A */
  4594. ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) & ~SELBUSB);
  4595. }
  4596. term = (ahc->flags & AHC_TERM_ENB_A) != 0 ? STPWEN : 0;
  4597. if ((ahc->features & AHC_ULTRA2) != 0)
  4598. ahc_outb(ahc, SCSIID_ULTRA2, ahc->our_id);
  4599. else
  4600. ahc_outb(ahc, SCSIID, ahc->our_id);
  4601. scsi_conf = ahc_inb(ahc, SCSICONF);
  4602. ahc_outb(ahc, SXFRCTL1, (scsi_conf & (ENSPCHK|STIMESEL))
  4603. |term|ahc->seltime
  4604. |ENSTIMER|ACTNEGEN);
  4605. if ((ahc->features & AHC_ULTRA2) != 0)
  4606. ahc_outb(ahc, SIMODE0, ahc_inb(ahc, SIMODE0)|ENIOERR);
  4607. ahc_outb(ahc, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
  4608. ahc_outb(ahc, SXFRCTL0, DFON|SPIOEN);
  4609. /* There are no untagged SCBs active yet. */
  4610. for (i = 0; i < 16; i++) {
  4611. ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, 0));
  4612. if ((ahc->flags & AHC_SCB_BTT) != 0) {
  4613. int lun;
  4614. /*
  4615. * The SCB based BTT allows an entry per
  4616. * target and lun pair.
  4617. */
  4618. for (lun = 1; lun < AHC_NUM_LUNS; lun++)
  4619. ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, lun));
  4620. }
  4621. }
  4622. /* All of our queues are empty */
  4623. for (i = 0; i < 256; i++)
  4624. ahc->qoutfifo[i] = SCB_LIST_NULL;
  4625. ahc_sync_qoutfifo(ahc, BUS_DMASYNC_PREREAD);
  4626. for (i = 0; i < 256; i++)
  4627. ahc->qinfifo[i] = SCB_LIST_NULL;
  4628. if ((ahc->features & AHC_MULTI_TID) != 0) {
  4629. ahc_outb(ahc, TARGID, 0);
  4630. ahc_outb(ahc, TARGID + 1, 0);
  4631. }
  4632. /*
  4633. * Tell the sequencer where it can find our arrays in memory.
  4634. */
  4635. physaddr = ahc->scb_data->hscb_busaddr;
  4636. ahc_outb(ahc, HSCB_ADDR, physaddr & 0xFF);
  4637. ahc_outb(ahc, HSCB_ADDR + 1, (physaddr >> 8) & 0xFF);
  4638. ahc_outb(ahc, HSCB_ADDR + 2, (physaddr >> 16) & 0xFF);
  4639. ahc_outb(ahc, HSCB_ADDR + 3, (physaddr >> 24) & 0xFF);
  4640. physaddr = ahc->shared_data_busaddr;
  4641. ahc_outb(ahc, SHARED_DATA_ADDR, physaddr & 0xFF);
  4642. ahc_outb(ahc, SHARED_DATA_ADDR + 1, (physaddr >> 8) & 0xFF);
  4643. ahc_outb(ahc, SHARED_DATA_ADDR + 2, (physaddr >> 16) & 0xFF);
  4644. ahc_outb(ahc, SHARED_DATA_ADDR + 3, (physaddr >> 24) & 0xFF);
  4645. /*
  4646. * Initialize the group code to command length table.
  4647. * This overrides the values in TARG_SCSIRATE, so only
  4648. * setup the table after we have processed that information.
  4649. */
  4650. ahc_outb(ahc, CMDSIZE_TABLE, 5);
  4651. ahc_outb(ahc, CMDSIZE_TABLE + 1, 9);
  4652. ahc_outb(ahc, CMDSIZE_TABLE + 2, 9);
  4653. ahc_outb(ahc, CMDSIZE_TABLE + 3, 0);
  4654. ahc_outb(ahc, CMDSIZE_TABLE + 4, 15);
  4655. ahc_outb(ahc, CMDSIZE_TABLE + 5, 11);
  4656. ahc_outb(ahc, CMDSIZE_TABLE + 6, 0);
  4657. ahc_outb(ahc, CMDSIZE_TABLE + 7, 0);
  4658. if ((ahc->features & AHC_HS_MAILBOX) != 0)
  4659. ahc_outb(ahc, HS_MAILBOX, 0);
  4660. /* Tell the sequencer of our initial queue positions */
  4661. if ((ahc->features & AHC_TARGETMODE) != 0) {
  4662. ahc->tqinfifonext = 1;
  4663. ahc_outb(ahc, KERNEL_TQINPOS, ahc->tqinfifonext - 1);
  4664. ahc_outb(ahc, TQINPOS, ahc->tqinfifonext);
  4665. }
  4666. ahc->qinfifonext = 0;
  4667. ahc->qoutfifonext = 0;
  4668. if ((ahc->features & AHC_QUEUE_REGS) != 0) {
  4669. ahc_outb(ahc, QOFF_CTLSTA, SCB_QSIZE_256);
  4670. ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
  4671. ahc_outb(ahc, SNSCB_QOFF, ahc->qinfifonext);
  4672. ahc_outb(ahc, SDSCB_QOFF, 0);
  4673. } else {
  4674. ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
  4675. ahc_outb(ahc, QINPOS, ahc->qinfifonext);
  4676. ahc_outb(ahc, QOUTPOS, ahc->qoutfifonext);
  4677. }
  4678. /* We don't have any waiting selections */
  4679. ahc_outb(ahc, WAITING_SCBH, SCB_LIST_NULL);
  4680. /* Our disconnection list is empty too */
  4681. ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL);
  4682. /* Message out buffer starts empty */
  4683. ahc_outb(ahc, MSG_OUT, NOP);
  4684. /*
  4685. * Setup the allowed SCSI Sequences based on operational mode.
  4686. * If we are a target, we'll enable select in operations once
  4687. * we've had a lun enabled.
  4688. */
  4689. scsiseq_template = ENSELO|ENAUTOATNO|ENAUTOATNP;
  4690. if ((ahc->flags & AHC_INITIATORROLE) != 0)
  4691. scsiseq_template |= ENRSELI;
  4692. ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq_template);
  4693. /* Initialize our list of free SCBs. */
  4694. ahc_build_free_scb_list(ahc);
  4695. /*
  4696. * Tell the sequencer which SCB will be the next one it receives.
  4697. */
  4698. ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag);
  4699. /*
  4700. * Load the Sequencer program and Enable the adapter
  4701. * in "fast" mode.
  4702. */
  4703. if (bootverbose)
  4704. printk("%s: Downloading Sequencer Program...",
  4705. ahc_name(ahc));
  4706. error = ahc_loadseq(ahc);
  4707. if (error != 0)
  4708. return (error);
  4709. if ((ahc->features & AHC_ULTRA2) != 0) {
  4710. int wait;
  4711. /*
  4712. * Wait for up to 500ms for our transceivers
  4713. * to settle. If the adapter does not have
  4714. * a cable attached, the transceivers may
  4715. * never settle, so don't complain if we
  4716. * fail here.
  4717. */
  4718. for (wait = 5000;
  4719. (ahc_inb(ahc, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait;
  4720. wait--)
  4721. ahc_delay(100);
  4722. }
  4723. ahc_restart(ahc);
  4724. return (0);
  4725. }
  4726. /*
  4727. * Start the board, ready for normal operation
  4728. */
  4729. int
  4730. ahc_init(struct ahc_softc *ahc)
  4731. {
  4732. int max_targ;
  4733. u_int i;
  4734. u_int scsi_conf;
  4735. u_int ultraenb;
  4736. u_int discenable;
  4737. u_int tagenable;
  4738. size_t driver_data_size;
  4739. #ifdef AHC_DEBUG
  4740. if ((ahc_debug & AHC_DEBUG_SEQUENCER) != 0)
  4741. ahc->flags |= AHC_SEQUENCER_DEBUG;
  4742. #endif
  4743. #ifdef AHC_PRINT_SRAM
  4744. printk("Scratch Ram:");
  4745. for (i = 0x20; i < 0x5f; i++) {
  4746. if (((i % 8) == 0) && (i != 0)) {
  4747. printk ("\n ");
  4748. }
  4749. printk (" 0x%x", ahc_inb(ahc, i));
  4750. }
  4751. if ((ahc->features & AHC_MORE_SRAM) != 0) {
  4752. for (i = 0x70; i < 0x7f; i++) {
  4753. if (((i % 8) == 0) && (i != 0)) {
  4754. printk ("\n ");
  4755. }
  4756. printk (" 0x%x", ahc_inb(ahc, i));
  4757. }
  4758. }
  4759. printk ("\n");
  4760. /*
  4761. * Reading uninitialized scratch ram may
  4762. * generate parity errors.
  4763. */
  4764. ahc_outb(ahc, CLRINT, CLRPARERR);
  4765. ahc_outb(ahc, CLRINT, CLRBRKADRINT);
  4766. #endif
  4767. max_targ = 15;
  4768. /*
  4769. * Assume we have a board at this stage and it has been reset.
  4770. */
  4771. if ((ahc->flags & AHC_USEDEFAULTS) != 0)
  4772. ahc->our_id = ahc->our_id_b = 7;
  4773. /*
  4774. * Default to allowing initiator operations.
  4775. */
  4776. ahc->flags |= AHC_INITIATORROLE;
  4777. /*
  4778. * Only allow target mode features if this unit has them enabled.
  4779. */
  4780. if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0)
  4781. ahc->features &= ~AHC_TARGETMODE;
  4782. ahc->init_level++;
  4783. /*
  4784. * DMA tag for our command fifos and other data in system memory
  4785. * the card's sequencer must be able to access. For initiator
  4786. * roles, we need to allocate space for the qinfifo and qoutfifo.
  4787. * The qinfifo and qoutfifo are composed of 256 1 byte elements.
  4788. * When providing for the target mode role, we must additionally
  4789. * provide space for the incoming target command fifo and an extra
  4790. * byte to deal with a dma bug in some chip versions.
  4791. */
  4792. driver_data_size = 2 * 256 * sizeof(uint8_t);
  4793. if ((ahc->features & AHC_TARGETMODE) != 0)
  4794. driver_data_size += AHC_TMODE_CMDS * sizeof(struct target_cmd)
  4795. + /*DMA WideOdd Bug Buffer*/1;
  4796. if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
  4797. /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
  4798. /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
  4799. /*highaddr*/BUS_SPACE_MAXADDR,
  4800. /*filter*/NULL, /*filterarg*/NULL,
  4801. driver_data_size,
  4802. /*nsegments*/1,
  4803. /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
  4804. /*flags*/0, &ahc->shared_data_dmat) != 0) {
  4805. return (ENOMEM);
  4806. }
  4807. ahc->init_level++;
  4808. /* Allocation of driver data */
  4809. if (ahc_dmamem_alloc(ahc, ahc->shared_data_dmat,
  4810. (void **)&ahc->qoutfifo,
  4811. BUS_DMA_NOWAIT, &ahc->shared_data_dmamap) != 0) {
  4812. return (ENOMEM);
  4813. }
  4814. ahc->init_level++;
  4815. /* And permanently map it in */
  4816. ahc_dmamap_load(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
  4817. ahc->qoutfifo, driver_data_size, ahc_dmamap_cb,
  4818. &ahc->shared_data_busaddr, /*flags*/0);
  4819. if ((ahc->features & AHC_TARGETMODE) != 0) {
  4820. ahc->targetcmds = (struct target_cmd *)ahc->qoutfifo;
  4821. ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[AHC_TMODE_CMDS];
  4822. ahc->dma_bug_buf = ahc->shared_data_busaddr
  4823. + driver_data_size - 1;
  4824. /* All target command blocks start out invalid. */
  4825. for (i = 0; i < AHC_TMODE_CMDS; i++)
  4826. ahc->targetcmds[i].cmd_valid = 0;
  4827. ahc_sync_tqinfifo(ahc, BUS_DMASYNC_PREREAD);
  4828. ahc->qoutfifo = (uint8_t *)&ahc->targetcmds[256];
  4829. }
  4830. ahc->qinfifo = &ahc->qoutfifo[256];
  4831. ahc->init_level++;
  4832. /* Allocate SCB data now that buffer_dmat is initialized */
  4833. if (ahc->scb_data->maxhscbs == 0)
  4834. if (ahc_init_scbdata(ahc) != 0)
  4835. return (ENOMEM);
  4836. /*
  4837. * Allocate a tstate to house information for our
  4838. * initiator presence on the bus as well as the user
  4839. * data for any target mode initiator.
  4840. */
  4841. if (ahc_alloc_tstate(ahc, ahc->our_id, 'A') == NULL) {
  4842. printk("%s: unable to allocate ahc_tmode_tstate. "
  4843. "Failing attach\n", ahc_name(ahc));
  4844. return (ENOMEM);
  4845. }
  4846. if ((ahc->features & AHC_TWIN) != 0) {
  4847. if (ahc_alloc_tstate(ahc, ahc->our_id_b, 'B') == NULL) {
  4848. printk("%s: unable to allocate ahc_tmode_tstate. "
  4849. "Failing attach\n", ahc_name(ahc));
  4850. return (ENOMEM);
  4851. }
  4852. }
  4853. if (ahc->scb_data->maxhscbs < AHC_SCB_MAX_ALLOC) {
  4854. ahc->flags |= AHC_PAGESCBS;
  4855. } else {
  4856. ahc->flags &= ~AHC_PAGESCBS;
  4857. }
  4858. #ifdef AHC_DEBUG
  4859. if (ahc_debug & AHC_SHOW_MISC) {
  4860. printk("%s: hardware scb %u bytes; kernel scb %u bytes; "
  4861. "ahc_dma %u bytes\n",
  4862. ahc_name(ahc),
  4863. (u_int)sizeof(struct hardware_scb),
  4864. (u_int)sizeof(struct scb),
  4865. (u_int)sizeof(struct ahc_dma_seg));
  4866. }
  4867. #endif /* AHC_DEBUG */
  4868. /*
  4869. * Look at the information that board initialization or
  4870. * the board bios has left us.
  4871. */
  4872. if (ahc->features & AHC_TWIN) {
  4873. scsi_conf = ahc_inb(ahc, SCSICONF + 1);
  4874. if ((scsi_conf & RESET_SCSI) != 0
  4875. && (ahc->flags & AHC_INITIATORROLE) != 0)
  4876. ahc->flags |= AHC_RESET_BUS_B;
  4877. }
  4878. scsi_conf = ahc_inb(ahc, SCSICONF);
  4879. if ((scsi_conf & RESET_SCSI) != 0
  4880. && (ahc->flags & AHC_INITIATORROLE) != 0)
  4881. ahc->flags |= AHC_RESET_BUS_A;
  4882. ultraenb = 0;
  4883. tagenable = ALL_TARGETS_MASK;
  4884. /* Grab the disconnection disable table and invert it for our needs */
  4885. if ((ahc->flags & AHC_USEDEFAULTS) != 0) {
  4886. printk("%s: Host Adapter Bios disabled. Using default SCSI "
  4887. "device parameters\n", ahc_name(ahc));
  4888. ahc->flags |= AHC_EXTENDED_TRANS_A|AHC_EXTENDED_TRANS_B|
  4889. AHC_TERM_ENB_A|AHC_TERM_ENB_B;
  4890. discenable = ALL_TARGETS_MASK;
  4891. if ((ahc->features & AHC_ULTRA) != 0)
  4892. ultraenb = ALL_TARGETS_MASK;
  4893. } else {
  4894. discenable = ~((ahc_inb(ahc, DISC_DSB + 1) << 8)
  4895. | ahc_inb(ahc, DISC_DSB));
  4896. if ((ahc->features & (AHC_ULTRA|AHC_ULTRA2)) != 0)
  4897. ultraenb = (ahc_inb(ahc, ULTRA_ENB + 1) << 8)
  4898. | ahc_inb(ahc, ULTRA_ENB);
  4899. }
  4900. if ((ahc->features & (AHC_WIDE|AHC_TWIN)) == 0)
  4901. max_targ = 7;
  4902. for (i = 0; i <= max_targ; i++) {
  4903. struct ahc_initiator_tinfo *tinfo;
  4904. struct ahc_tmode_tstate *tstate;
  4905. u_int our_id;
  4906. u_int target_id;
  4907. char channel;
  4908. channel = 'A';
  4909. our_id = ahc->our_id;
  4910. target_id = i;
  4911. if (i > 7 && (ahc->features & AHC_TWIN) != 0) {
  4912. channel = 'B';
  4913. our_id = ahc->our_id_b;
  4914. target_id = i % 8;
  4915. }
  4916. tinfo = ahc_fetch_transinfo(ahc, channel, our_id,
  4917. target_id, &tstate);
  4918. /* Default to async narrow across the board */
  4919. memset(tinfo, 0, sizeof(*tinfo));
  4920. if (ahc->flags & AHC_USEDEFAULTS) {
  4921. if ((ahc->features & AHC_WIDE) != 0)
  4922. tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
  4923. /*
  4924. * These will be truncated when we determine the
  4925. * connection type we have with the target.
  4926. */
  4927. tinfo->user.period = ahc_syncrates->period;
  4928. tinfo->user.offset = MAX_OFFSET;
  4929. } else {
  4930. u_int scsirate;
  4931. uint16_t mask;
  4932. /* Take the settings leftover in scratch RAM. */
  4933. scsirate = ahc_inb(ahc, TARG_SCSIRATE + i);
  4934. mask = (0x01 << i);
  4935. if ((ahc->features & AHC_ULTRA2) != 0) {
  4936. u_int offset;
  4937. u_int maxsync;
  4938. if ((scsirate & SOFS) == 0x0F) {
  4939. /*
  4940. * Haven't negotiated yet,
  4941. * so the format is different.
  4942. */
  4943. scsirate = (scsirate & SXFR) >> 4
  4944. | (ultraenb & mask)
  4945. ? 0x08 : 0x0
  4946. | (scsirate & WIDEXFER);
  4947. offset = MAX_OFFSET_ULTRA2;
  4948. } else
  4949. offset = ahc_inb(ahc, TARG_OFFSET + i);
  4950. if ((scsirate & ~WIDEXFER) == 0 && offset != 0)
  4951. /* Set to the lowest sync rate, 5MHz */
  4952. scsirate |= 0x1c;
  4953. maxsync = AHC_SYNCRATE_ULTRA2;
  4954. if ((ahc->features & AHC_DT) != 0)
  4955. maxsync = AHC_SYNCRATE_DT;
  4956. tinfo->user.period =
  4957. ahc_find_period(ahc, scsirate, maxsync);
  4958. if (offset == 0)
  4959. tinfo->user.period = 0;
  4960. else
  4961. tinfo->user.offset = MAX_OFFSET;
  4962. if ((scsirate & SXFR_ULTRA2) <= 8/*10MHz*/
  4963. && (ahc->features & AHC_DT) != 0)
  4964. tinfo->user.ppr_options =
  4965. MSG_EXT_PPR_DT_REQ;
  4966. } else if ((scsirate & SOFS) != 0) {
  4967. if ((scsirate & SXFR) == 0x40
  4968. && (ultraenb & mask) != 0) {
  4969. /* Treat 10MHz as a non-ultra speed */
  4970. scsirate &= ~SXFR;
  4971. ultraenb &= ~mask;
  4972. }
  4973. tinfo->user.period =
  4974. ahc_find_period(ahc, scsirate,
  4975. (ultraenb & mask)
  4976. ? AHC_SYNCRATE_ULTRA
  4977. : AHC_SYNCRATE_FAST);
  4978. if (tinfo->user.period != 0)
  4979. tinfo->user.offset = MAX_OFFSET;
  4980. }
  4981. if (tinfo->user.period == 0)
  4982. tinfo->user.offset = 0;
  4983. if ((scsirate & WIDEXFER) != 0
  4984. && (ahc->features & AHC_WIDE) != 0)
  4985. tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT;
  4986. tinfo->user.protocol_version = 4;
  4987. if ((ahc->features & AHC_DT) != 0)
  4988. tinfo->user.transport_version = 3;
  4989. else
  4990. tinfo->user.transport_version = 2;
  4991. tinfo->goal.protocol_version = 2;
  4992. tinfo->goal.transport_version = 2;
  4993. tinfo->curr.protocol_version = 2;
  4994. tinfo->curr.transport_version = 2;
  4995. }
  4996. tstate->ultraenb = 0;
  4997. }
  4998. ahc->user_discenable = discenable;
  4999. ahc->user_tagenable = tagenable;
  5000. return (ahc->bus_chip_init(ahc));
  5001. }
  5002. void
  5003. ahc_intr_enable(struct ahc_softc *ahc, int enable)
  5004. {
  5005. u_int hcntrl;
  5006. hcntrl = ahc_inb(ahc, HCNTRL);
  5007. hcntrl &= ~INTEN;
  5008. ahc->pause &= ~INTEN;
  5009. ahc->unpause &= ~INTEN;
  5010. if (enable) {
  5011. hcntrl |= INTEN;
  5012. ahc->pause |= INTEN;
  5013. ahc->unpause |= INTEN;
  5014. }
  5015. ahc_outb(ahc, HCNTRL, hcntrl);
  5016. }
  5017. /*
  5018. * Ensure that the card is paused in a location
  5019. * outside of all critical sections and that all
  5020. * pending work is completed prior to returning.
  5021. * This routine should only be called from outside
  5022. * an interrupt context.
  5023. */
  5024. void
  5025. ahc_pause_and_flushwork(struct ahc_softc *ahc)
  5026. {
  5027. int intstat;
  5028. int maxloops;
  5029. int paused;
  5030. maxloops = 1000;
  5031. ahc->flags |= AHC_ALL_INTERRUPTS;
  5032. paused = FALSE;
  5033. do {
  5034. if (paused) {
  5035. ahc_unpause(ahc);
  5036. /*
  5037. * Give the sequencer some time to service
  5038. * any active selections.
  5039. */
  5040. ahc_delay(500);
  5041. }
  5042. ahc_intr(ahc);
  5043. ahc_pause(ahc);
  5044. paused = TRUE;
  5045. ahc_outb(ahc, SCSISEQ, ahc_inb(ahc, SCSISEQ) & ~ENSELO);
  5046. intstat = ahc_inb(ahc, INTSTAT);
  5047. if ((intstat & INT_PEND) == 0) {
  5048. ahc_clear_critical_section(ahc);
  5049. intstat = ahc_inb(ahc, INTSTAT);
  5050. }
  5051. } while (--maxloops
  5052. && (intstat != 0xFF || (ahc->features & AHC_REMOVABLE) == 0)
  5053. && ((intstat & INT_PEND) != 0
  5054. || (ahc_inb(ahc, SSTAT0) & (SELDO|SELINGO)) != 0));
  5055. if (maxloops == 0) {
  5056. printk("Infinite interrupt loop, INTSTAT = %x",
  5057. ahc_inb(ahc, INTSTAT));
  5058. }
  5059. ahc_platform_flushwork(ahc);
  5060. ahc->flags &= ~AHC_ALL_INTERRUPTS;
  5061. }
  5062. int __maybe_unused
  5063. ahc_suspend(struct ahc_softc *ahc)
  5064. {
  5065. ahc_pause_and_flushwork(ahc);
  5066. if (LIST_FIRST(&ahc->pending_scbs) != NULL) {
  5067. ahc_unpause(ahc);
  5068. return (EBUSY);
  5069. }
  5070. #ifdef AHC_TARGET_MODE
  5071. /*
  5072. * XXX What about ATIOs that have not yet been serviced?
  5073. * Perhaps we should just refuse to be suspended if we
  5074. * are acting in a target role.
  5075. */
  5076. if (ahc->pending_device != NULL) {
  5077. ahc_unpause(ahc);
  5078. return (EBUSY);
  5079. }
  5080. #endif
  5081. ahc_shutdown(ahc);
  5082. return (0);
  5083. }
  5084. int __maybe_unused
  5085. ahc_resume(struct ahc_softc *ahc)
  5086. {
  5087. ahc_reset(ahc, /*reinit*/TRUE);
  5088. ahc_intr_enable(ahc, TRUE);
  5089. ahc_restart(ahc);
  5090. return (0);
  5091. }
  5092. /************************** Busy Target Table *********************************/
  5093. /*
  5094. * Return the untagged transaction id for a given target/channel lun.
  5095. * Optionally, clear the entry.
  5096. */
  5097. static u_int
  5098. ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl)
  5099. {
  5100. u_int scbid;
  5101. u_int target_offset;
  5102. if ((ahc->flags & AHC_SCB_BTT) != 0) {
  5103. u_int saved_scbptr;
  5104. saved_scbptr = ahc_inb(ahc, SCBPTR);
  5105. ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
  5106. scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl));
  5107. ahc_outb(ahc, SCBPTR, saved_scbptr);
  5108. } else {
  5109. target_offset = TCL_TARGET_OFFSET(tcl);
  5110. scbid = ahc_inb(ahc, BUSY_TARGETS + target_offset);
  5111. }
  5112. return (scbid);
  5113. }
  5114. static void
  5115. ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl)
  5116. {
  5117. u_int target_offset;
  5118. if ((ahc->flags & AHC_SCB_BTT) != 0) {
  5119. u_int saved_scbptr;
  5120. saved_scbptr = ahc_inb(ahc, SCBPTR);
  5121. ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
  5122. ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL);
  5123. ahc_outb(ahc, SCBPTR, saved_scbptr);
  5124. } else {
  5125. target_offset = TCL_TARGET_OFFSET(tcl);
  5126. ahc_outb(ahc, BUSY_TARGETS + target_offset, SCB_LIST_NULL);
  5127. }
  5128. }
  5129. static void
  5130. ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid)
  5131. {
  5132. u_int target_offset;
  5133. if ((ahc->flags & AHC_SCB_BTT) != 0) {
  5134. u_int saved_scbptr;
  5135. saved_scbptr = ahc_inb(ahc, SCBPTR);
  5136. ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
  5137. ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid);
  5138. ahc_outb(ahc, SCBPTR, saved_scbptr);
  5139. } else {
  5140. target_offset = TCL_TARGET_OFFSET(tcl);
  5141. ahc_outb(ahc, BUSY_TARGETS + target_offset, scbid);
  5142. }
  5143. }
  5144. /************************** SCB and SCB queue management **********************/
  5145. int
  5146. ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target,
  5147. char channel, int lun, u_int tag, role_t role)
  5148. {
  5149. int targ = SCB_GET_TARGET(ahc, scb);
  5150. char chan = SCB_GET_CHANNEL(ahc, scb);
  5151. int slun = SCB_GET_LUN(scb);
  5152. int match;
  5153. match = ((chan == channel) || (channel == ALL_CHANNELS));
  5154. if (match != 0)
  5155. match = ((targ == target) || (target == CAM_TARGET_WILDCARD));
  5156. if (match != 0)
  5157. match = ((lun == slun) || (lun == CAM_LUN_WILDCARD));
  5158. if (match != 0) {
  5159. #ifdef AHC_TARGET_MODE
  5160. int group;
  5161. group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code);
  5162. if (role == ROLE_INITIATOR) {
  5163. match = (group != XPT_FC_GROUP_TMODE)
  5164. && ((tag == scb->hscb->tag)
  5165. || (tag == SCB_LIST_NULL));
  5166. } else if (role == ROLE_TARGET) {
  5167. match = (group == XPT_FC_GROUP_TMODE)
  5168. && ((tag == scb->io_ctx->csio.tag_id)
  5169. || (tag == SCB_LIST_NULL));
  5170. }
  5171. #else /* !AHC_TARGET_MODE */
  5172. match = ((tag == scb->hscb->tag) || (tag == SCB_LIST_NULL));
  5173. #endif /* AHC_TARGET_MODE */
  5174. }
  5175. return match;
  5176. }
  5177. static void
  5178. ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
  5179. {
  5180. int target;
  5181. char channel;
  5182. int lun;
  5183. target = SCB_GET_TARGET(ahc, scb);
  5184. lun = SCB_GET_LUN(scb);
  5185. channel = SCB_GET_CHANNEL(ahc, scb);
  5186. ahc_search_qinfifo(ahc, target, channel, lun,
  5187. /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN,
  5188. CAM_REQUEUE_REQ, SEARCH_COMPLETE);
  5189. ahc_platform_freeze_devq(ahc, scb);
  5190. }
  5191. void
  5192. ahc_qinfifo_requeue_tail(struct ahc_softc *ahc, struct scb *scb)
  5193. {
  5194. struct scb *prev_scb;
  5195. prev_scb = NULL;
  5196. if (ahc_qinfifo_count(ahc) != 0) {
  5197. u_int prev_tag;
  5198. uint8_t prev_pos;
  5199. prev_pos = ahc->qinfifonext - 1;
  5200. prev_tag = ahc->qinfifo[prev_pos];
  5201. prev_scb = ahc_lookup_scb(ahc, prev_tag);
  5202. }
  5203. ahc_qinfifo_requeue(ahc, prev_scb, scb);
  5204. if ((ahc->features & AHC_QUEUE_REGS) != 0) {
  5205. ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
  5206. } else {
  5207. ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
  5208. }
  5209. }
  5210. static void
  5211. ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb,
  5212. struct scb *scb)
  5213. {
  5214. if (prev_scb == NULL) {
  5215. ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
  5216. } else {
  5217. prev_scb->hscb->next = scb->hscb->tag;
  5218. ahc_sync_scb(ahc, prev_scb,
  5219. BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  5220. }
  5221. ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
  5222. scb->hscb->next = ahc->next_queued_scb->hscb->tag;
  5223. ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
  5224. }
  5225. static int
  5226. ahc_qinfifo_count(struct ahc_softc *ahc)
  5227. {
  5228. uint8_t qinpos;
  5229. uint8_t diff;
  5230. if ((ahc->features & AHC_QUEUE_REGS) != 0) {
  5231. qinpos = ahc_inb(ahc, SNSCB_QOFF);
  5232. ahc_outb(ahc, SNSCB_QOFF, qinpos);
  5233. } else
  5234. qinpos = ahc_inb(ahc, QINPOS);
  5235. diff = ahc->qinfifonext - qinpos;
  5236. return (diff);
  5237. }
  5238. int
  5239. ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
  5240. int lun, u_int tag, role_t role, uint32_t status,
  5241. ahc_search_action action)
  5242. {
  5243. struct scb *scb;
  5244. struct scb *prev_scb;
  5245. uint8_t qinstart;
  5246. uint8_t qinpos;
  5247. uint8_t qintail;
  5248. uint8_t next;
  5249. uint8_t prev;
  5250. uint8_t curscbptr;
  5251. int found;
  5252. int have_qregs;
  5253. qintail = ahc->qinfifonext;
  5254. have_qregs = (ahc->features & AHC_QUEUE_REGS) != 0;
  5255. if (have_qregs) {
  5256. qinstart = ahc_inb(ahc, SNSCB_QOFF);
  5257. ahc_outb(ahc, SNSCB_QOFF, qinstart);
  5258. } else
  5259. qinstart = ahc_inb(ahc, QINPOS);
  5260. qinpos = qinstart;
  5261. found = 0;
  5262. prev_scb = NULL;
  5263. if (action == SEARCH_COMPLETE) {
  5264. /*
  5265. * Don't attempt to run any queued untagged transactions
  5266. * until we are done with the abort process.
  5267. */
  5268. ahc_freeze_untagged_queues(ahc);
  5269. }
  5270. /*
  5271. * Start with an empty queue. Entries that are not chosen
  5272. * for removal will be re-added to the queue as we go.
  5273. */
  5274. ahc->qinfifonext = qinpos;
  5275. ahc_outb(ahc, NEXT_QUEUED_SCB, ahc->next_queued_scb->hscb->tag);
  5276. while (qinpos != qintail) {
  5277. scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinpos]);
  5278. if (scb == NULL) {
  5279. printk("qinpos = %d, SCB index = %d\n",
  5280. qinpos, ahc->qinfifo[qinpos]);
  5281. panic("Loop 1\n");
  5282. }
  5283. if (ahc_match_scb(ahc, scb, target, channel, lun, tag, role)) {
  5284. /*
  5285. * We found an scb that needs to be acted on.
  5286. */
  5287. found++;
  5288. switch (action) {
  5289. case SEARCH_COMPLETE:
  5290. {
  5291. cam_status ostat;
  5292. cam_status cstat;
  5293. ostat = ahc_get_transaction_status(scb);
  5294. if (ostat == CAM_REQ_INPROG)
  5295. ahc_set_transaction_status(scb, status);
  5296. cstat = ahc_get_transaction_status(scb);
  5297. if (cstat != CAM_REQ_CMP)
  5298. ahc_freeze_scb(scb);
  5299. if ((scb->flags & SCB_ACTIVE) == 0)
  5300. printk("Inactive SCB in qinfifo\n");
  5301. ahc_done(ahc, scb);
  5302. }
  5303. fallthrough;
  5304. case SEARCH_REMOVE:
  5305. break;
  5306. case SEARCH_COUNT:
  5307. ahc_qinfifo_requeue(ahc, prev_scb, scb);
  5308. prev_scb = scb;
  5309. break;
  5310. }
  5311. } else {
  5312. ahc_qinfifo_requeue(ahc, prev_scb, scb);
  5313. prev_scb = scb;
  5314. }
  5315. qinpos++;
  5316. }
  5317. if ((ahc->features & AHC_QUEUE_REGS) != 0) {
  5318. ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
  5319. } else {
  5320. ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
  5321. }
  5322. if (action != SEARCH_COUNT
  5323. && (found != 0)
  5324. && (qinstart != ahc->qinfifonext)) {
  5325. /*
  5326. * The sequencer may be in the process of dmaing
  5327. * down the SCB at the beginning of the queue.
  5328. * This could be problematic if either the first,
  5329. * or the second SCB is removed from the queue
  5330. * (the first SCB includes a pointer to the "next"
  5331. * SCB to dma). If we have removed any entries, swap
  5332. * the first element in the queue with the next HSCB
  5333. * so the sequencer will notice that NEXT_QUEUED_SCB
  5334. * has changed during its dma attempt and will retry
  5335. * the DMA.
  5336. */
  5337. scb = ahc_lookup_scb(ahc, ahc->qinfifo[qinstart]);
  5338. if (scb == NULL) {
  5339. printk("found = %d, qinstart = %d, qinfifionext = %d\n",
  5340. found, qinstart, ahc->qinfifonext);
  5341. panic("First/Second Qinfifo fixup\n");
  5342. }
  5343. /*
  5344. * ahc_swap_with_next_hscb forces our next pointer to
  5345. * point to the reserved SCB for future commands. Save
  5346. * and restore our original next pointer to maintain
  5347. * queue integrity.
  5348. */
  5349. next = scb->hscb->next;
  5350. ahc->scb_data->scbindex[scb->hscb->tag] = NULL;
  5351. ahc_swap_with_next_hscb(ahc, scb);
  5352. scb->hscb->next = next;
  5353. ahc->qinfifo[qinstart] = scb->hscb->tag;
  5354. /* Tell the card about the new head of the qinfifo. */
  5355. ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
  5356. /* Fixup the tail "next" pointer. */
  5357. qintail = ahc->qinfifonext - 1;
  5358. scb = ahc_lookup_scb(ahc, ahc->qinfifo[qintail]);
  5359. scb->hscb->next = ahc->next_queued_scb->hscb->tag;
  5360. }
  5361. /*
  5362. * Search waiting for selection list.
  5363. */
  5364. curscbptr = ahc_inb(ahc, SCBPTR);
  5365. next = ahc_inb(ahc, WAITING_SCBH); /* Start at head of list. */
  5366. prev = SCB_LIST_NULL;
  5367. while (next != SCB_LIST_NULL) {
  5368. uint8_t scb_index;
  5369. ahc_outb(ahc, SCBPTR, next);
  5370. scb_index = ahc_inb(ahc, SCB_TAG);
  5371. if (scb_index >= ahc->scb_data->numscbs) {
  5372. printk("Waiting List inconsistency. "
  5373. "SCB index == %d, yet numscbs == %d.",
  5374. scb_index, ahc->scb_data->numscbs);
  5375. ahc_dump_card_state(ahc);
  5376. panic("for safety");
  5377. }
  5378. scb = ahc_lookup_scb(ahc, scb_index);
  5379. if (scb == NULL) {
  5380. printk("scb_index = %d, next = %d\n",
  5381. scb_index, next);
  5382. panic("Waiting List traversal\n");
  5383. }
  5384. if (ahc_match_scb(ahc, scb, target, channel,
  5385. lun, SCB_LIST_NULL, role)) {
  5386. /*
  5387. * We found an scb that needs to be acted on.
  5388. */
  5389. found++;
  5390. switch (action) {
  5391. case SEARCH_COMPLETE:
  5392. {
  5393. cam_status ostat;
  5394. cam_status cstat;
  5395. ostat = ahc_get_transaction_status(scb);
  5396. if (ostat == CAM_REQ_INPROG)
  5397. ahc_set_transaction_status(scb,
  5398. status);
  5399. cstat = ahc_get_transaction_status(scb);
  5400. if (cstat != CAM_REQ_CMP)
  5401. ahc_freeze_scb(scb);
  5402. if ((scb->flags & SCB_ACTIVE) == 0)
  5403. printk("Inactive SCB in Waiting List\n");
  5404. ahc_done(ahc, scb);
  5405. }
  5406. fallthrough;
  5407. case SEARCH_REMOVE:
  5408. next = ahc_rem_wscb(ahc, next, prev);
  5409. break;
  5410. case SEARCH_COUNT:
  5411. prev = next;
  5412. next = ahc_inb(ahc, SCB_NEXT);
  5413. break;
  5414. }
  5415. } else {
  5416. prev = next;
  5417. next = ahc_inb(ahc, SCB_NEXT);
  5418. }
  5419. }
  5420. ahc_outb(ahc, SCBPTR, curscbptr);
  5421. found += ahc_search_untagged_queues(ahc, /*ahc_io_ctx_t*/NULL, target,
  5422. channel, lun, status, action);
  5423. if (action == SEARCH_COMPLETE)
  5424. ahc_release_untagged_queues(ahc);
  5425. return (found);
  5426. }
  5427. int
  5428. ahc_search_untagged_queues(struct ahc_softc *ahc, ahc_io_ctx_t ctx,
  5429. int target, char channel, int lun, uint32_t status,
  5430. ahc_search_action action)
  5431. {
  5432. struct scb *scb;
  5433. int maxtarget;
  5434. int found;
  5435. int i;
  5436. if (action == SEARCH_COMPLETE) {
  5437. /*
  5438. * Don't attempt to run any queued untagged transactions
  5439. * until we are done with the abort process.
  5440. */
  5441. ahc_freeze_untagged_queues(ahc);
  5442. }
  5443. found = 0;
  5444. i = 0;
  5445. if ((ahc->flags & AHC_SCB_BTT) == 0) {
  5446. maxtarget = 16;
  5447. if (target != CAM_TARGET_WILDCARD) {
  5448. i = target;
  5449. if (channel == 'B')
  5450. i += 8;
  5451. maxtarget = i + 1;
  5452. }
  5453. } else {
  5454. maxtarget = 0;
  5455. }
  5456. for (; i < maxtarget; i++) {
  5457. struct scb_tailq *untagged_q;
  5458. struct scb *next_scb;
  5459. untagged_q = &(ahc->untagged_queues[i]);
  5460. next_scb = TAILQ_FIRST(untagged_q);
  5461. while (next_scb != NULL) {
  5462. scb = next_scb;
  5463. next_scb = TAILQ_NEXT(scb, links.tqe);
  5464. /*
  5465. * The head of the list may be the currently
  5466. * active untagged command for a device.
  5467. * We're only searching for commands that
  5468. * have not been started. A transaction
  5469. * marked active but still in the qinfifo
  5470. * is removed by the qinfifo scanning code
  5471. * above.
  5472. */
  5473. if ((scb->flags & SCB_ACTIVE) != 0)
  5474. continue;
  5475. if (ahc_match_scb(ahc, scb, target, channel, lun,
  5476. SCB_LIST_NULL, ROLE_INITIATOR) == 0
  5477. || (ctx != NULL && ctx != scb->io_ctx))
  5478. continue;
  5479. /*
  5480. * We found an scb that needs to be acted on.
  5481. */
  5482. found++;
  5483. switch (action) {
  5484. case SEARCH_COMPLETE:
  5485. {
  5486. cam_status ostat;
  5487. cam_status cstat;
  5488. ostat = ahc_get_transaction_status(scb);
  5489. if (ostat == CAM_REQ_INPROG)
  5490. ahc_set_transaction_status(scb, status);
  5491. cstat = ahc_get_transaction_status(scb);
  5492. if (cstat != CAM_REQ_CMP)
  5493. ahc_freeze_scb(scb);
  5494. if ((scb->flags & SCB_ACTIVE) == 0)
  5495. printk("Inactive SCB in untaggedQ\n");
  5496. ahc_done(ahc, scb);
  5497. break;
  5498. }
  5499. case SEARCH_REMOVE:
  5500. scb->flags &= ~SCB_UNTAGGEDQ;
  5501. TAILQ_REMOVE(untagged_q, scb, links.tqe);
  5502. break;
  5503. case SEARCH_COUNT:
  5504. break;
  5505. }
  5506. }
  5507. }
  5508. if (action == SEARCH_COMPLETE)
  5509. ahc_release_untagged_queues(ahc);
  5510. return (found);
  5511. }
  5512. int
  5513. ahc_search_disc_list(struct ahc_softc *ahc, int target, char channel,
  5514. int lun, u_int tag, int stop_on_first, int remove,
  5515. int save_state)
  5516. {
  5517. struct scb *scbp;
  5518. u_int next;
  5519. u_int prev;
  5520. u_int count;
  5521. u_int active_scb;
  5522. count = 0;
  5523. next = ahc_inb(ahc, DISCONNECTED_SCBH);
  5524. prev = SCB_LIST_NULL;
  5525. if (save_state) {
  5526. /* restore this when we're done */
  5527. active_scb = ahc_inb(ahc, SCBPTR);
  5528. } else
  5529. /* Silence compiler */
  5530. active_scb = SCB_LIST_NULL;
  5531. while (next != SCB_LIST_NULL) {
  5532. u_int scb_index;
  5533. ahc_outb(ahc, SCBPTR, next);
  5534. scb_index = ahc_inb(ahc, SCB_TAG);
  5535. if (scb_index >= ahc->scb_data->numscbs) {
  5536. printk("Disconnected List inconsistency. "
  5537. "SCB index == %d, yet numscbs == %d.",
  5538. scb_index, ahc->scb_data->numscbs);
  5539. ahc_dump_card_state(ahc);
  5540. panic("for safety");
  5541. }
  5542. if (next == prev) {
  5543. panic("Disconnected List Loop. "
  5544. "cur SCBPTR == %x, prev SCBPTR == %x.",
  5545. next, prev);
  5546. }
  5547. scbp = ahc_lookup_scb(ahc, scb_index);
  5548. if (ahc_match_scb(ahc, scbp, target, channel, lun,
  5549. tag, ROLE_INITIATOR)) {
  5550. count++;
  5551. if (remove) {
  5552. next =
  5553. ahc_rem_scb_from_disc_list(ahc, prev, next);
  5554. } else {
  5555. prev = next;
  5556. next = ahc_inb(ahc, SCB_NEXT);
  5557. }
  5558. if (stop_on_first)
  5559. break;
  5560. } else {
  5561. prev = next;
  5562. next = ahc_inb(ahc, SCB_NEXT);
  5563. }
  5564. }
  5565. if (save_state)
  5566. ahc_outb(ahc, SCBPTR, active_scb);
  5567. return (count);
  5568. }
  5569. /*
  5570. * Remove an SCB from the on chip list of disconnected transactions.
  5571. * This is empty/unused if we are not performing SCB paging.
  5572. */
  5573. static u_int
  5574. ahc_rem_scb_from_disc_list(struct ahc_softc *ahc, u_int prev, u_int scbptr)
  5575. {
  5576. u_int next;
  5577. ahc_outb(ahc, SCBPTR, scbptr);
  5578. next = ahc_inb(ahc, SCB_NEXT);
  5579. ahc_outb(ahc, SCB_CONTROL, 0);
  5580. ahc_add_curscb_to_free_list(ahc);
  5581. if (prev != SCB_LIST_NULL) {
  5582. ahc_outb(ahc, SCBPTR, prev);
  5583. ahc_outb(ahc, SCB_NEXT, next);
  5584. } else
  5585. ahc_outb(ahc, DISCONNECTED_SCBH, next);
  5586. return (next);
  5587. }
  5588. /*
  5589. * Add the SCB as selected by SCBPTR onto the on chip list of
  5590. * free hardware SCBs. This list is empty/unused if we are not
  5591. * performing SCB paging.
  5592. */
  5593. static void
  5594. ahc_add_curscb_to_free_list(struct ahc_softc *ahc)
  5595. {
  5596. /*
  5597. * Invalidate the tag so that our abort
  5598. * routines don't think it's active.
  5599. */
  5600. ahc_outb(ahc, SCB_TAG, SCB_LIST_NULL);
  5601. if ((ahc->flags & AHC_PAGESCBS) != 0) {
  5602. ahc_outb(ahc, SCB_NEXT, ahc_inb(ahc, FREE_SCBH));
  5603. ahc_outb(ahc, FREE_SCBH, ahc_inb(ahc, SCBPTR));
  5604. }
  5605. }
  5606. /*
  5607. * Manipulate the waiting for selection list and return the
  5608. * scb that follows the one that we remove.
  5609. */
  5610. static u_int
  5611. ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
  5612. {
  5613. u_int curscb, next;
  5614. /*
  5615. * Select the SCB we want to abort and
  5616. * pull the next pointer out of it.
  5617. */
  5618. curscb = ahc_inb(ahc, SCBPTR);
  5619. ahc_outb(ahc, SCBPTR, scbpos);
  5620. next = ahc_inb(ahc, SCB_NEXT);
  5621. /* Clear the necessary fields */
  5622. ahc_outb(ahc, SCB_CONTROL, 0);
  5623. ahc_add_curscb_to_free_list(ahc);
  5624. /* update the waiting list */
  5625. if (prev == SCB_LIST_NULL) {
  5626. /* First in the list */
  5627. ahc_outb(ahc, WAITING_SCBH, next);
  5628. /*
  5629. * Ensure we aren't attempting to perform
  5630. * selection for this entry.
  5631. */
  5632. ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
  5633. } else {
  5634. /*
  5635. * Select the scb that pointed to us
  5636. * and update its next pointer.
  5637. */
  5638. ahc_outb(ahc, SCBPTR, prev);
  5639. ahc_outb(ahc, SCB_NEXT, next);
  5640. }
  5641. /*
  5642. * Point us back at the original scb position.
  5643. */
  5644. ahc_outb(ahc, SCBPTR, curscb);
  5645. return next;
  5646. }
  5647. /******************************** Error Handling ******************************/
  5648. /*
  5649. * Abort all SCBs that match the given description (target/channel/lun/tag),
  5650. * setting their status to the passed in status if the status has not already
  5651. * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer
  5652. * is paused before it is called.
  5653. */
  5654. static int
  5655. ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel,
  5656. int lun, u_int tag, role_t role, uint32_t status)
  5657. {
  5658. struct scb *scbp;
  5659. struct scb *scbp_next;
  5660. u_int active_scb;
  5661. int i, j;
  5662. int maxtarget;
  5663. int minlun;
  5664. int maxlun;
  5665. int found;
  5666. /*
  5667. * Don't attempt to run any queued untagged transactions
  5668. * until we are done with the abort process.
  5669. */
  5670. ahc_freeze_untagged_queues(ahc);
  5671. /* restore this when we're done */
  5672. active_scb = ahc_inb(ahc, SCBPTR);
  5673. found = ahc_search_qinfifo(ahc, target, channel, lun, SCB_LIST_NULL,
  5674. role, CAM_REQUEUE_REQ, SEARCH_COMPLETE);
  5675. /*
  5676. * Clean out the busy target table for any untagged commands.
  5677. */
  5678. i = 0;
  5679. maxtarget = 16;
  5680. if (target != CAM_TARGET_WILDCARD) {
  5681. i = target;
  5682. if (channel == 'B')
  5683. i += 8;
  5684. maxtarget = i + 1;
  5685. }
  5686. if (lun == CAM_LUN_WILDCARD) {
  5687. /*
  5688. * Unless we are using an SCB based
  5689. * busy targets table, there is only
  5690. * one table entry for all luns of
  5691. * a target.
  5692. */
  5693. minlun = 0;
  5694. maxlun = 1;
  5695. if ((ahc->flags & AHC_SCB_BTT) != 0)
  5696. maxlun = AHC_NUM_LUNS;
  5697. } else {
  5698. minlun = lun;
  5699. maxlun = lun + 1;
  5700. }
  5701. if (role != ROLE_TARGET) {
  5702. for (;i < maxtarget; i++) {
  5703. for (j = minlun;j < maxlun; j++) {
  5704. u_int scbid;
  5705. u_int tcl;
  5706. tcl = BUILD_TCL(i << 4, j);
  5707. scbid = ahc_index_busy_tcl(ahc, tcl);
  5708. scbp = ahc_lookup_scb(ahc, scbid);
  5709. if (scbp == NULL
  5710. || ahc_match_scb(ahc, scbp, target, channel,
  5711. lun, tag, role) == 0)
  5712. continue;
  5713. ahc_unbusy_tcl(ahc, BUILD_TCL(i << 4, j));
  5714. }
  5715. }
  5716. /*
  5717. * Go through the disconnected list and remove any entries we
  5718. * have queued for completion, 0'ing their control byte too.
  5719. * We save the active SCB and restore it ourselves, so there
  5720. * is no reason for this search to restore it too.
  5721. */
  5722. ahc_search_disc_list(ahc, target, channel, lun, tag,
  5723. /*stop_on_first*/FALSE, /*remove*/TRUE,
  5724. /*save_state*/FALSE);
  5725. }
  5726. /*
  5727. * Go through the hardware SCB array looking for commands that
  5728. * were active but not on any list. In some cases, these remnants
  5729. * might not still have mappings in the scbindex array (e.g. unexpected
  5730. * bus free with the same scb queued for an abort). Don't hold this
  5731. * against them.
  5732. */
  5733. for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
  5734. u_int scbid;
  5735. ahc_outb(ahc, SCBPTR, i);
  5736. scbid = ahc_inb(ahc, SCB_TAG);
  5737. scbp = ahc_lookup_scb(ahc, scbid);
  5738. if ((scbp == NULL && scbid != SCB_LIST_NULL)
  5739. || (scbp != NULL
  5740. && ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)))
  5741. ahc_add_curscb_to_free_list(ahc);
  5742. }
  5743. /*
  5744. * Go through the pending CCB list and look for
  5745. * commands for this target that are still active.
  5746. * These are other tagged commands that were
  5747. * disconnected when the reset occurred.
  5748. */
  5749. scbp_next = LIST_FIRST(&ahc->pending_scbs);
  5750. while (scbp_next != NULL) {
  5751. scbp = scbp_next;
  5752. scbp_next = LIST_NEXT(scbp, pending_links);
  5753. if (ahc_match_scb(ahc, scbp, target, channel, lun, tag, role)) {
  5754. cam_status ostat;
  5755. ostat = ahc_get_transaction_status(scbp);
  5756. if (ostat == CAM_REQ_INPROG)
  5757. ahc_set_transaction_status(scbp, status);
  5758. if (ahc_get_transaction_status(scbp) != CAM_REQ_CMP)
  5759. ahc_freeze_scb(scbp);
  5760. if ((scbp->flags & SCB_ACTIVE) == 0)
  5761. printk("Inactive SCB on pending list\n");
  5762. ahc_done(ahc, scbp);
  5763. found++;
  5764. }
  5765. }
  5766. ahc_outb(ahc, SCBPTR, active_scb);
  5767. ahc_platform_abort_scbs(ahc, target, channel, lun, tag, role, status);
  5768. ahc_release_untagged_queues(ahc);
  5769. return found;
  5770. }
  5771. static void
  5772. ahc_reset_current_bus(struct ahc_softc *ahc)
  5773. {
  5774. uint8_t scsiseq;
  5775. ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) & ~ENSCSIRST);
  5776. scsiseq = ahc_inb(ahc, SCSISEQ);
  5777. ahc_outb(ahc, SCSISEQ, scsiseq | SCSIRSTO);
  5778. ahc_flush_device_writes(ahc);
  5779. ahc_delay(AHC_BUSRESET_DELAY);
  5780. /* Turn off the bus reset */
  5781. ahc_outb(ahc, SCSISEQ, scsiseq & ~SCSIRSTO);
  5782. ahc_clear_intstat(ahc);
  5783. /* Re-enable reset interrupts */
  5784. ahc_outb(ahc, SIMODE1, ahc_inb(ahc, SIMODE1) | ENSCSIRST);
  5785. }
  5786. int
  5787. ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset)
  5788. {
  5789. struct ahc_devinfo devinfo;
  5790. u_int initiator, target, max_scsiid;
  5791. u_int sblkctl;
  5792. u_int scsiseq;
  5793. u_int simode1;
  5794. int found;
  5795. int restart_needed;
  5796. char cur_channel;
  5797. ahc->pending_device = NULL;
  5798. ahc_compile_devinfo(&devinfo,
  5799. CAM_TARGET_WILDCARD,
  5800. CAM_TARGET_WILDCARD,
  5801. CAM_LUN_WILDCARD,
  5802. channel, ROLE_UNKNOWN);
  5803. ahc_pause(ahc);
  5804. /* Make sure the sequencer is in a safe location. */
  5805. ahc_clear_critical_section(ahc);
  5806. /*
  5807. * Run our command complete fifos to ensure that we perform
  5808. * completion processing on any commands that 'completed'
  5809. * before the reset occurred.
  5810. */
  5811. ahc_run_qoutfifo(ahc);
  5812. #ifdef AHC_TARGET_MODE
  5813. /*
  5814. * XXX - In Twin mode, the tqinfifo may have commands
  5815. * for an unaffected channel in it. However, if
  5816. * we have run out of ATIO resources to drain that
  5817. * queue, we may not get them all out here. Further,
  5818. * the blocked transactions for the reset channel
  5819. * should just be killed off, irrespecitve of whether
  5820. * we are blocked on ATIO resources. Write a routine
  5821. * to compact the tqinfifo appropriately.
  5822. */
  5823. if ((ahc->flags & AHC_TARGETROLE) != 0) {
  5824. ahc_run_tqinfifo(ahc, /*paused*/TRUE);
  5825. }
  5826. #endif
  5827. /*
  5828. * Reset the bus if we are initiating this reset
  5829. */
  5830. sblkctl = ahc_inb(ahc, SBLKCTL);
  5831. cur_channel = 'A';
  5832. if ((ahc->features & AHC_TWIN) != 0
  5833. && ((sblkctl & SELBUSB) != 0))
  5834. cur_channel = 'B';
  5835. scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
  5836. if (cur_channel != channel) {
  5837. /* Case 1: Command for another bus is active
  5838. * Stealthily reset the other bus without
  5839. * upsetting the current bus.
  5840. */
  5841. ahc_outb(ahc, SBLKCTL, sblkctl ^ SELBUSB);
  5842. simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST);
  5843. #ifdef AHC_TARGET_MODE
  5844. /*
  5845. * Bus resets clear ENSELI, so we cannot
  5846. * defer re-enabling bus reset interrupts
  5847. * if we are in target mode.
  5848. */
  5849. if ((ahc->flags & AHC_TARGETROLE) != 0)
  5850. simode1 |= ENSCSIRST;
  5851. #endif
  5852. ahc_outb(ahc, SIMODE1, simode1);
  5853. if (initiate_reset)
  5854. ahc_reset_current_bus(ahc);
  5855. ahc_clear_intstat(ahc);
  5856. ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP));
  5857. ahc_outb(ahc, SBLKCTL, sblkctl);
  5858. restart_needed = FALSE;
  5859. } else {
  5860. /* Case 2: A command from this bus is active or we're idle */
  5861. simode1 = ahc_inb(ahc, SIMODE1) & ~(ENBUSFREE|ENSCSIRST);
  5862. #ifdef AHC_TARGET_MODE
  5863. /*
  5864. * Bus resets clear ENSELI, so we cannot
  5865. * defer re-enabling bus reset interrupts
  5866. * if we are in target mode.
  5867. */
  5868. if ((ahc->flags & AHC_TARGETROLE) != 0)
  5869. simode1 |= ENSCSIRST;
  5870. #endif
  5871. ahc_outb(ahc, SIMODE1, simode1);
  5872. if (initiate_reset)
  5873. ahc_reset_current_bus(ahc);
  5874. ahc_clear_intstat(ahc);
  5875. ahc_outb(ahc, SCSISEQ, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP));
  5876. restart_needed = TRUE;
  5877. }
  5878. /*
  5879. * Clean up all the state information for the
  5880. * pending transactions on this bus.
  5881. */
  5882. found = ahc_abort_scbs(ahc, CAM_TARGET_WILDCARD, channel,
  5883. CAM_LUN_WILDCARD, SCB_LIST_NULL,
  5884. ROLE_UNKNOWN, CAM_SCSI_BUS_RESET);
  5885. max_scsiid = (ahc->features & AHC_WIDE) ? 15 : 7;
  5886. #ifdef AHC_TARGET_MODE
  5887. /*
  5888. * Send an immediate notify ccb to all target more peripheral
  5889. * drivers affected by this action.
  5890. */
  5891. for (target = 0; target <= max_scsiid; target++) {
  5892. struct ahc_tmode_tstate* tstate;
  5893. u_int lun;
  5894. tstate = ahc->enabled_targets[target];
  5895. if (tstate == NULL)
  5896. continue;
  5897. for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
  5898. struct ahc_tmode_lstate* lstate;
  5899. lstate = tstate->enabled_luns[lun];
  5900. if (lstate == NULL)
  5901. continue;
  5902. ahc_queue_lstate_event(ahc, lstate, CAM_TARGET_WILDCARD,
  5903. EVENT_TYPE_BUS_RESET, /*arg*/0);
  5904. ahc_send_lstate_events(ahc, lstate);
  5905. }
  5906. }
  5907. #endif
  5908. /* Notify the XPT that a bus reset occurred */
  5909. ahc_send_async(ahc, devinfo.channel, CAM_TARGET_WILDCARD,
  5910. CAM_LUN_WILDCARD, AC_BUS_RESET);
  5911. /*
  5912. * Revert to async/narrow transfers until we renegotiate.
  5913. */
  5914. for (target = 0; target <= max_scsiid; target++) {
  5915. if (ahc->enabled_targets[target] == NULL)
  5916. continue;
  5917. for (initiator = 0; initiator <= max_scsiid; initiator++) {
  5918. struct ahc_devinfo devinfo;
  5919. ahc_compile_devinfo(&devinfo, target, initiator,
  5920. CAM_LUN_WILDCARD,
  5921. channel, ROLE_UNKNOWN);
  5922. ahc_set_width(ahc, &devinfo, MSG_EXT_WDTR_BUS_8_BIT,
  5923. AHC_TRANS_CUR, /*paused*/TRUE);
  5924. ahc_set_syncrate(ahc, &devinfo, /*syncrate*/NULL,
  5925. /*period*/0, /*offset*/0,
  5926. /*ppr_options*/0, AHC_TRANS_CUR,
  5927. /*paused*/TRUE);
  5928. }
  5929. }
  5930. if (restart_needed)
  5931. ahc_restart(ahc);
  5932. else
  5933. ahc_unpause(ahc);
  5934. return found;
  5935. }
  5936. /***************************** Residual Processing ****************************/
  5937. /*
  5938. * Calculate the residual for a just completed SCB.
  5939. */
  5940. static void
  5941. ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb)
  5942. {
  5943. struct hardware_scb *hscb;
  5944. struct status_pkt *spkt;
  5945. uint32_t sgptr;
  5946. uint32_t resid_sgptr;
  5947. uint32_t resid;
  5948. /*
  5949. * 5 cases.
  5950. * 1) No residual.
  5951. * SG_RESID_VALID clear in sgptr.
  5952. * 2) Transferless command
  5953. * 3) Never performed any transfers.
  5954. * sgptr has SG_FULL_RESID set.
  5955. * 4) No residual but target did not
  5956. * save data pointers after the
  5957. * last transfer, so sgptr was
  5958. * never updated.
  5959. * 5) We have a partial residual.
  5960. * Use residual_sgptr to determine
  5961. * where we are.
  5962. */
  5963. hscb = scb->hscb;
  5964. sgptr = ahc_le32toh(hscb->sgptr);
  5965. if ((sgptr & SG_RESID_VALID) == 0)
  5966. /* Case 1 */
  5967. return;
  5968. sgptr &= ~SG_RESID_VALID;
  5969. if ((sgptr & SG_LIST_NULL) != 0)
  5970. /* Case 2 */
  5971. return;
  5972. spkt = &hscb->shared_data.status;
  5973. resid_sgptr = ahc_le32toh(spkt->residual_sg_ptr);
  5974. if ((sgptr & SG_FULL_RESID) != 0) {
  5975. /* Case 3 */
  5976. resid = ahc_get_transfer_length(scb);
  5977. } else if ((resid_sgptr & SG_LIST_NULL) != 0) {
  5978. /* Case 4 */
  5979. return;
  5980. } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) {
  5981. panic("Bogus resid sgptr value 0x%x\n", resid_sgptr);
  5982. } else {
  5983. struct ahc_dma_seg *sg;
  5984. /*
  5985. * Remainder of the SG where the transfer
  5986. * stopped.
  5987. */
  5988. resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK;
  5989. sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK);
  5990. /* The residual sg_ptr always points to the next sg */
  5991. sg--;
  5992. /*
  5993. * Add up the contents of all residual
  5994. * SG segments that are after the SG where
  5995. * the transfer stopped.
  5996. */
  5997. while ((ahc_le32toh(sg->len) & AHC_DMA_LAST_SEG) == 0) {
  5998. sg++;
  5999. resid += ahc_le32toh(sg->len) & AHC_SG_LEN_MASK;
  6000. }
  6001. }
  6002. if ((scb->flags & SCB_SENSE) == 0)
  6003. ahc_set_residual(scb, resid);
  6004. else
  6005. ahc_set_sense_residual(scb, resid);
  6006. #ifdef AHC_DEBUG
  6007. if ((ahc_debug & AHC_SHOW_MISC) != 0) {
  6008. ahc_print_path(ahc, scb);
  6009. printk("Handled %sResidual of %d bytes\n",
  6010. (scb->flags & SCB_SENSE) ? "Sense " : "", resid);
  6011. }
  6012. #endif
  6013. }
  6014. /******************************* Target Mode **********************************/
  6015. #ifdef AHC_TARGET_MODE
  6016. /*
  6017. * Add a target mode event to this lun's queue
  6018. */
  6019. static void
  6020. ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate,
  6021. u_int initiator_id, u_int event_type, u_int event_arg)
  6022. {
  6023. struct ahc_tmode_event *event;
  6024. int pending;
  6025. xpt_freeze_devq(lstate->path, /*count*/1);
  6026. if (lstate->event_w_idx >= lstate->event_r_idx)
  6027. pending = lstate->event_w_idx - lstate->event_r_idx;
  6028. else
  6029. pending = AHC_TMODE_EVENT_BUFFER_SIZE + 1
  6030. - (lstate->event_r_idx - lstate->event_w_idx);
  6031. if (event_type == EVENT_TYPE_BUS_RESET
  6032. || event_type == TARGET_RESET) {
  6033. /*
  6034. * Any earlier events are irrelevant, so reset our buffer.
  6035. * This has the effect of allowing us to deal with reset
  6036. * floods (an external device holding down the reset line)
  6037. * without losing the event that is really interesting.
  6038. */
  6039. lstate->event_r_idx = 0;
  6040. lstate->event_w_idx = 0;
  6041. xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE);
  6042. }
  6043. if (pending == AHC_TMODE_EVENT_BUFFER_SIZE) {
  6044. xpt_print_path(lstate->path);
  6045. printk("immediate event %x:%x lost\n",
  6046. lstate->event_buffer[lstate->event_r_idx].event_type,
  6047. lstate->event_buffer[lstate->event_r_idx].event_arg);
  6048. lstate->event_r_idx++;
  6049. if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
  6050. lstate->event_r_idx = 0;
  6051. xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE);
  6052. }
  6053. event = &lstate->event_buffer[lstate->event_w_idx];
  6054. event->initiator_id = initiator_id;
  6055. event->event_type = event_type;
  6056. event->event_arg = event_arg;
  6057. lstate->event_w_idx++;
  6058. if (lstate->event_w_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
  6059. lstate->event_w_idx = 0;
  6060. }
  6061. /*
  6062. * Send any target mode events queued up waiting
  6063. * for immediate notify resources.
  6064. */
  6065. void
  6066. ahc_send_lstate_events(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate)
  6067. {
  6068. struct ccb_hdr *ccbh;
  6069. struct ccb_immed_notify *inot;
  6070. while (lstate->event_r_idx != lstate->event_w_idx
  6071. && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) {
  6072. struct ahc_tmode_event *event;
  6073. event = &lstate->event_buffer[lstate->event_r_idx];
  6074. SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle);
  6075. inot = (struct ccb_immed_notify *)ccbh;
  6076. switch (event->event_type) {
  6077. case EVENT_TYPE_BUS_RESET:
  6078. ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN;
  6079. break;
  6080. default:
  6081. ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
  6082. inot->message_args[0] = event->event_type;
  6083. inot->message_args[1] = event->event_arg;
  6084. break;
  6085. }
  6086. inot->initiator_id = event->initiator_id;
  6087. inot->sense_len = 0;
  6088. xpt_done((union ccb *)inot);
  6089. lstate->event_r_idx++;
  6090. if (lstate->event_r_idx == AHC_TMODE_EVENT_BUFFER_SIZE)
  6091. lstate->event_r_idx = 0;
  6092. }
  6093. }
  6094. #endif
  6095. /******************** Sequencer Program Patching/Download *********************/
  6096. #ifdef AHC_DUMP_SEQ
  6097. void
  6098. ahc_dumpseq(struct ahc_softc* ahc)
  6099. {
  6100. int i;
  6101. ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
  6102. ahc_outb(ahc, SEQADDR0, 0);
  6103. ahc_outb(ahc, SEQADDR1, 0);
  6104. for (i = 0; i < ahc->instruction_ram_size; i++) {
  6105. uint8_t ins_bytes[4];
  6106. ahc_insb(ahc, SEQRAM, ins_bytes, 4);
  6107. printk("0x%08x\n", ins_bytes[0] << 24
  6108. | ins_bytes[1] << 16
  6109. | ins_bytes[2] << 8
  6110. | ins_bytes[3]);
  6111. }
  6112. }
  6113. #endif
  6114. static int
  6115. ahc_loadseq(struct ahc_softc *ahc)
  6116. {
  6117. struct cs cs_table[NUM_CRITICAL_SECTIONS];
  6118. u_int begin_set[NUM_CRITICAL_SECTIONS];
  6119. u_int end_set[NUM_CRITICAL_SECTIONS];
  6120. const struct patch *cur_patch;
  6121. u_int cs_count;
  6122. u_int cur_cs;
  6123. u_int i;
  6124. u_int skip_addr;
  6125. u_int sg_prefetch_cnt;
  6126. int downloaded;
  6127. uint8_t download_consts[7];
  6128. /*
  6129. * Start out with 0 critical sections
  6130. * that apply to this firmware load.
  6131. */
  6132. cs_count = 0;
  6133. cur_cs = 0;
  6134. memset(begin_set, 0, sizeof(begin_set));
  6135. memset(end_set, 0, sizeof(end_set));
  6136. /* Setup downloadable constant table */
  6137. download_consts[QOUTFIFO_OFFSET] = 0;
  6138. if (ahc->targetcmds != NULL)
  6139. download_consts[QOUTFIFO_OFFSET] += 32;
  6140. download_consts[QINFIFO_OFFSET] = download_consts[QOUTFIFO_OFFSET] + 1;
  6141. download_consts[CACHESIZE_MASK] = ahc->pci_cachesize - 1;
  6142. download_consts[INVERTED_CACHESIZE_MASK] = ~(ahc->pci_cachesize - 1);
  6143. sg_prefetch_cnt = ahc->pci_cachesize;
  6144. if (sg_prefetch_cnt < (2 * sizeof(struct ahc_dma_seg)))
  6145. sg_prefetch_cnt = 2 * sizeof(struct ahc_dma_seg);
  6146. download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt;
  6147. download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_cnt - 1);
  6148. download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_cnt - 1);
  6149. cur_patch = patches;
  6150. downloaded = 0;
  6151. skip_addr = 0;
  6152. ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE|LOADRAM);
  6153. ahc_outb(ahc, SEQADDR0, 0);
  6154. ahc_outb(ahc, SEQADDR1, 0);
  6155. for (i = 0; i < sizeof(seqprog)/4; i++) {
  6156. if (ahc_check_patch(ahc, &cur_patch, i, &skip_addr) == 0) {
  6157. /*
  6158. * Don't download this instruction as it
  6159. * is in a patch that was removed.
  6160. */
  6161. continue;
  6162. }
  6163. if (downloaded == ahc->instruction_ram_size) {
  6164. /*
  6165. * We're about to exceed the instruction
  6166. * storage capacity for this chip. Fail
  6167. * the load.
  6168. */
  6169. printk("\n%s: Program too large for instruction memory "
  6170. "size of %d!\n", ahc_name(ahc),
  6171. ahc->instruction_ram_size);
  6172. return (ENOMEM);
  6173. }
  6174. /*
  6175. * Move through the CS table until we find a CS
  6176. * that might apply to this instruction.
  6177. */
  6178. for (; cur_cs < NUM_CRITICAL_SECTIONS; cur_cs++) {
  6179. if (critical_sections[cur_cs].end <= i) {
  6180. if (begin_set[cs_count] == TRUE
  6181. && end_set[cs_count] == FALSE) {
  6182. cs_table[cs_count].end = downloaded;
  6183. end_set[cs_count] = TRUE;
  6184. cs_count++;
  6185. }
  6186. continue;
  6187. }
  6188. if (critical_sections[cur_cs].begin <= i
  6189. && begin_set[cs_count] == FALSE) {
  6190. cs_table[cs_count].begin = downloaded;
  6191. begin_set[cs_count] = TRUE;
  6192. }
  6193. break;
  6194. }
  6195. ahc_download_instr(ahc, i, download_consts);
  6196. downloaded++;
  6197. }
  6198. ahc->num_critical_sections = cs_count;
  6199. if (cs_count != 0) {
  6200. cs_count *= sizeof(struct cs);
  6201. ahc->critical_sections = kmemdup(cs_table, cs_count, GFP_ATOMIC);
  6202. if (ahc->critical_sections == NULL)
  6203. panic("ahc_loadseq: Could not malloc");
  6204. }
  6205. ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE);
  6206. if (bootverbose) {
  6207. printk(" %d instructions downloaded\n", downloaded);
  6208. printk("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n",
  6209. ahc_name(ahc), ahc->features, ahc->bugs, ahc->flags);
  6210. }
  6211. return (0);
  6212. }
  6213. static int
  6214. ahc_check_patch(struct ahc_softc *ahc, const struct patch **start_patch,
  6215. u_int start_instr, u_int *skip_addr)
  6216. {
  6217. const struct patch *cur_patch;
  6218. const struct patch *last_patch;
  6219. u_int num_patches;
  6220. num_patches = ARRAY_SIZE(patches);
  6221. last_patch = &patches[num_patches];
  6222. cur_patch = *start_patch;
  6223. while (cur_patch < last_patch && start_instr == cur_patch->begin) {
  6224. if (cur_patch->patch_func(ahc) == 0) {
  6225. /* Start rejecting code */
  6226. *skip_addr = start_instr + cur_patch->skip_instr;
  6227. cur_patch += cur_patch->skip_patch;
  6228. } else {
  6229. /* Accepted this patch. Advance to the next
  6230. * one and wait for our intruction pointer to
  6231. * hit this point.
  6232. */
  6233. cur_patch++;
  6234. }
  6235. }
  6236. *start_patch = cur_patch;
  6237. if (start_instr < *skip_addr)
  6238. /* Still skipping */
  6239. return (0);
  6240. return (1);
  6241. }
  6242. static void
  6243. ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
  6244. {
  6245. union ins_formats instr;
  6246. struct ins_format1 *fmt1_ins;
  6247. struct ins_format3 *fmt3_ins;
  6248. u_int opcode;
  6249. /*
  6250. * The firmware is always compiled into a little endian format.
  6251. */
  6252. instr.integer = ahc_le32toh(*(uint32_t*)&seqprog[instrptr * 4]);
  6253. fmt1_ins = &instr.format1;
  6254. fmt3_ins = NULL;
  6255. /* Pull the opcode */
  6256. opcode = instr.format1.opcode;
  6257. switch (opcode) {
  6258. case AIC_OP_JMP:
  6259. case AIC_OP_JC:
  6260. case AIC_OP_JNC:
  6261. case AIC_OP_CALL:
  6262. case AIC_OP_JNE:
  6263. case AIC_OP_JNZ:
  6264. case AIC_OP_JE:
  6265. case AIC_OP_JZ:
  6266. {
  6267. const struct patch *cur_patch;
  6268. int address_offset;
  6269. u_int address;
  6270. u_int skip_addr;
  6271. u_int i;
  6272. fmt3_ins = &instr.format3;
  6273. address_offset = 0;
  6274. address = fmt3_ins->address;
  6275. cur_patch = patches;
  6276. skip_addr = 0;
  6277. for (i = 0; i < address;) {
  6278. ahc_check_patch(ahc, &cur_patch, i, &skip_addr);
  6279. if (skip_addr > i) {
  6280. int end_addr;
  6281. end_addr = min(address, skip_addr);
  6282. address_offset += end_addr - i;
  6283. i = skip_addr;
  6284. } else {
  6285. i++;
  6286. }
  6287. }
  6288. address -= address_offset;
  6289. fmt3_ins->address = address;
  6290. }
  6291. fallthrough;
  6292. case AIC_OP_OR:
  6293. case AIC_OP_AND:
  6294. case AIC_OP_XOR:
  6295. case AIC_OP_ADD:
  6296. case AIC_OP_ADC:
  6297. case AIC_OP_BMOV:
  6298. if (fmt1_ins->parity != 0) {
  6299. fmt1_ins->immediate = dconsts[fmt1_ins->immediate];
  6300. }
  6301. fmt1_ins->parity = 0;
  6302. if ((ahc->features & AHC_CMD_CHAN) == 0
  6303. && opcode == AIC_OP_BMOV) {
  6304. /*
  6305. * Block move was added at the same time
  6306. * as the command channel. Verify that
  6307. * this is only a move of a single element
  6308. * and convert the BMOV to a MOV
  6309. * (AND with an immediate of FF).
  6310. */
  6311. if (fmt1_ins->immediate != 1)
  6312. panic("%s: BMOV not supported\n",
  6313. ahc_name(ahc));
  6314. fmt1_ins->opcode = AIC_OP_AND;
  6315. fmt1_ins->immediate = 0xff;
  6316. }
  6317. fallthrough;
  6318. case AIC_OP_ROL:
  6319. if ((ahc->features & AHC_ULTRA2) != 0) {
  6320. int i, count;
  6321. /* Calculate odd parity for the instruction */
  6322. for (i = 0, count = 0; i < 31; i++) {
  6323. uint32_t mask;
  6324. mask = 0x01 << i;
  6325. if ((instr.integer & mask) != 0)
  6326. count++;
  6327. }
  6328. if ((count & 0x01) == 0)
  6329. instr.format1.parity = 1;
  6330. } else {
  6331. /* Compress the instruction for older sequencers */
  6332. if (fmt3_ins != NULL) {
  6333. instr.integer =
  6334. fmt3_ins->immediate
  6335. | (fmt3_ins->source << 8)
  6336. | (fmt3_ins->address << 16)
  6337. | (fmt3_ins->opcode << 25);
  6338. } else {
  6339. instr.integer =
  6340. fmt1_ins->immediate
  6341. | (fmt1_ins->source << 8)
  6342. | (fmt1_ins->destination << 16)
  6343. | (fmt1_ins->ret << 24)
  6344. | (fmt1_ins->opcode << 25);
  6345. }
  6346. }
  6347. /* The sequencer is a little endian cpu */
  6348. instr.integer = ahc_htole32(instr.integer);
  6349. ahc_outsb(ahc, SEQRAM, instr.bytes, 4);
  6350. break;
  6351. default:
  6352. panic("Unknown opcode encountered in seq program");
  6353. break;
  6354. }
  6355. }
  6356. int
  6357. ahc_print_register(const ahc_reg_parse_entry_t *table, u_int num_entries,
  6358. const char *name, u_int address, u_int value,
  6359. u_int *cur_column, u_int wrap_point)
  6360. {
  6361. int printed;
  6362. u_int printed_mask;
  6363. if (cur_column != NULL && *cur_column >= wrap_point) {
  6364. printk("\n");
  6365. *cur_column = 0;
  6366. }
  6367. printed = printk("%s[0x%x]", name, value);
  6368. if (table == NULL) {
  6369. printed += printk(" ");
  6370. *cur_column += printed;
  6371. return (printed);
  6372. }
  6373. printed_mask = 0;
  6374. while (printed_mask != 0xFF) {
  6375. int entry;
  6376. for (entry = 0; entry < num_entries; entry++) {
  6377. if (((value & table[entry].mask)
  6378. != table[entry].value)
  6379. || ((printed_mask & table[entry].mask)
  6380. == table[entry].mask))
  6381. continue;
  6382. printed += printk("%s%s",
  6383. printed_mask == 0 ? ":(" : "|",
  6384. table[entry].name);
  6385. printed_mask |= table[entry].mask;
  6386. break;
  6387. }
  6388. if (entry >= num_entries)
  6389. break;
  6390. }
  6391. if (printed_mask != 0)
  6392. printed += printk(") ");
  6393. else
  6394. printed += printk(" ");
  6395. if (cur_column != NULL)
  6396. *cur_column += printed;
  6397. return (printed);
  6398. }
  6399. void
  6400. ahc_dump_card_state(struct ahc_softc *ahc)
  6401. {
  6402. struct scb *scb;
  6403. struct scb_tailq *untagged_q;
  6404. u_int cur_col;
  6405. int paused;
  6406. int target;
  6407. int maxtarget;
  6408. int i;
  6409. uint8_t last_phase;
  6410. uint8_t qinpos;
  6411. uint8_t qintail;
  6412. uint8_t qoutpos;
  6413. uint8_t scb_index;
  6414. uint8_t saved_scbptr;
  6415. if (ahc_is_paused(ahc)) {
  6416. paused = 1;
  6417. } else {
  6418. paused = 0;
  6419. ahc_pause(ahc);
  6420. }
  6421. saved_scbptr = ahc_inb(ahc, SCBPTR);
  6422. last_phase = ahc_inb(ahc, LASTPHASE);
  6423. printk(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n"
  6424. "%s: Dumping Card State %s, at SEQADDR 0x%x\n",
  6425. ahc_name(ahc), ahc_lookup_phase_entry(last_phase)->phasemsg,
  6426. ahc_inb(ahc, SEQADDR0) | (ahc_inb(ahc, SEQADDR1) << 8));
  6427. if (paused)
  6428. printk("Card was paused\n");
  6429. printk("ACCUM = 0x%x, SINDEX = 0x%x, DINDEX = 0x%x, ARG_2 = 0x%x\n",
  6430. ahc_inb(ahc, ACCUM), ahc_inb(ahc, SINDEX), ahc_inb(ahc, DINDEX),
  6431. ahc_inb(ahc, ARG_2));
  6432. printk("HCNT = 0x%x SCBPTR = 0x%x\n", ahc_inb(ahc, HCNT),
  6433. ahc_inb(ahc, SCBPTR));
  6434. cur_col = 0;
  6435. if ((ahc->features & AHC_DT) != 0)
  6436. ahc_scsiphase_print(ahc_inb(ahc, SCSIPHASE), &cur_col, 50);
  6437. ahc_scsisigi_print(ahc_inb(ahc, SCSISIGI), &cur_col, 50);
  6438. ahc_error_print(ahc_inb(ahc, ERROR), &cur_col, 50);
  6439. ahc_scsibusl_print(ahc_inb(ahc, SCSIBUSL), &cur_col, 50);
  6440. ahc_lastphase_print(ahc_inb(ahc, LASTPHASE), &cur_col, 50);
  6441. ahc_scsiseq_print(ahc_inb(ahc, SCSISEQ), &cur_col, 50);
  6442. ahc_sblkctl_print(ahc_inb(ahc, SBLKCTL), &cur_col, 50);
  6443. ahc_scsirate_print(ahc_inb(ahc, SCSIRATE), &cur_col, 50);
  6444. ahc_seqctl_print(ahc_inb(ahc, SEQCTL), &cur_col, 50);
  6445. ahc_seq_flags_print(ahc_inb(ahc, SEQ_FLAGS), &cur_col, 50);
  6446. ahc_sstat0_print(ahc_inb(ahc, SSTAT0), &cur_col, 50);
  6447. ahc_sstat1_print(ahc_inb(ahc, SSTAT1), &cur_col, 50);
  6448. ahc_sstat2_print(ahc_inb(ahc, SSTAT2), &cur_col, 50);
  6449. ahc_sstat3_print(ahc_inb(ahc, SSTAT3), &cur_col, 50);
  6450. ahc_simode0_print(ahc_inb(ahc, SIMODE0), &cur_col, 50);
  6451. ahc_simode1_print(ahc_inb(ahc, SIMODE1), &cur_col, 50);
  6452. ahc_sxfrctl0_print(ahc_inb(ahc, SXFRCTL0), &cur_col, 50);
  6453. ahc_dfcntrl_print(ahc_inb(ahc, DFCNTRL), &cur_col, 50);
  6454. ahc_dfstatus_print(ahc_inb(ahc, DFSTATUS), &cur_col, 50);
  6455. if (cur_col != 0)
  6456. printk("\n");
  6457. printk("STACK:");
  6458. for (i = 0; i < STACK_SIZE; i++)
  6459. printk(" 0x%x", ahc_inb(ahc, STACK)|(ahc_inb(ahc, STACK) << 8));
  6460. printk("\nSCB count = %d\n", ahc->scb_data->numscbs);
  6461. printk("Kernel NEXTQSCB = %d\n", ahc->next_queued_scb->hscb->tag);
  6462. printk("Card NEXTQSCB = %d\n", ahc_inb(ahc, NEXT_QUEUED_SCB));
  6463. /* QINFIFO */
  6464. printk("QINFIFO entries: ");
  6465. if ((ahc->features & AHC_QUEUE_REGS) != 0) {
  6466. qinpos = ahc_inb(ahc, SNSCB_QOFF);
  6467. ahc_outb(ahc, SNSCB_QOFF, qinpos);
  6468. } else
  6469. qinpos = ahc_inb(ahc, QINPOS);
  6470. qintail = ahc->qinfifonext;
  6471. while (qinpos != qintail) {
  6472. printk("%d ", ahc->qinfifo[qinpos]);
  6473. qinpos++;
  6474. }
  6475. printk("\n");
  6476. printk("Waiting Queue entries: ");
  6477. scb_index = ahc_inb(ahc, WAITING_SCBH);
  6478. i = 0;
  6479. while (scb_index != SCB_LIST_NULL && i++ < 256) {
  6480. ahc_outb(ahc, SCBPTR, scb_index);
  6481. printk("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
  6482. scb_index = ahc_inb(ahc, SCB_NEXT);
  6483. }
  6484. printk("\n");
  6485. printk("Disconnected Queue entries: ");
  6486. scb_index = ahc_inb(ahc, DISCONNECTED_SCBH);
  6487. i = 0;
  6488. while (scb_index != SCB_LIST_NULL && i++ < 256) {
  6489. ahc_outb(ahc, SCBPTR, scb_index);
  6490. printk("%d:%d ", scb_index, ahc_inb(ahc, SCB_TAG));
  6491. scb_index = ahc_inb(ahc, SCB_NEXT);
  6492. }
  6493. printk("\n");
  6494. ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD);
  6495. printk("QOUTFIFO entries: ");
  6496. qoutpos = ahc->qoutfifonext;
  6497. i = 0;
  6498. while (ahc->qoutfifo[qoutpos] != SCB_LIST_NULL && i++ < 256) {
  6499. printk("%d ", ahc->qoutfifo[qoutpos]);
  6500. qoutpos++;
  6501. }
  6502. printk("\n");
  6503. printk("Sequencer Free SCB List: ");
  6504. scb_index = ahc_inb(ahc, FREE_SCBH);
  6505. i = 0;
  6506. while (scb_index != SCB_LIST_NULL && i++ < 256) {
  6507. ahc_outb(ahc, SCBPTR, scb_index);
  6508. printk("%d ", scb_index);
  6509. scb_index = ahc_inb(ahc, SCB_NEXT);
  6510. }
  6511. printk("\n");
  6512. printk("Sequencer SCB Info: ");
  6513. for (i = 0; i < ahc->scb_data->maxhscbs; i++) {
  6514. ahc_outb(ahc, SCBPTR, i);
  6515. cur_col = printk("\n%3d ", i);
  6516. ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL), &cur_col, 60);
  6517. ahc_scb_scsiid_print(ahc_inb(ahc, SCB_SCSIID), &cur_col, 60);
  6518. ahc_scb_lun_print(ahc_inb(ahc, SCB_LUN), &cur_col, 60);
  6519. ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60);
  6520. }
  6521. printk("\n");
  6522. printk("Pending list: ");
  6523. i = 0;
  6524. LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
  6525. if (i++ > 256)
  6526. break;
  6527. cur_col = printk("\n%3d ", scb->hscb->tag);
  6528. ahc_scb_control_print(scb->hscb->control, &cur_col, 60);
  6529. ahc_scb_scsiid_print(scb->hscb->scsiid, &cur_col, 60);
  6530. ahc_scb_lun_print(scb->hscb->lun, &cur_col, 60);
  6531. if ((ahc->flags & AHC_PAGESCBS) == 0) {
  6532. ahc_outb(ahc, SCBPTR, scb->hscb->tag);
  6533. printk("(");
  6534. ahc_scb_control_print(ahc_inb(ahc, SCB_CONTROL),
  6535. &cur_col, 60);
  6536. ahc_scb_tag_print(ahc_inb(ahc, SCB_TAG), &cur_col, 60);
  6537. printk(")");
  6538. }
  6539. }
  6540. printk("\n");
  6541. printk("Kernel Free SCB list: ");
  6542. i = 0;
  6543. SLIST_FOREACH(scb, &ahc->scb_data->free_scbs, links.sle) {
  6544. if (i++ > 256)
  6545. break;
  6546. printk("%d ", scb->hscb->tag);
  6547. }
  6548. printk("\n");
  6549. maxtarget = (ahc->features & (AHC_WIDE|AHC_TWIN)) ? 15 : 7;
  6550. for (target = 0; target <= maxtarget; target++) {
  6551. untagged_q = &ahc->untagged_queues[target];
  6552. if (TAILQ_FIRST(untagged_q) == NULL)
  6553. continue;
  6554. printk("Untagged Q(%d): ", target);
  6555. i = 0;
  6556. TAILQ_FOREACH(scb, untagged_q, links.tqe) {
  6557. if (i++ > 256)
  6558. break;
  6559. printk("%d ", scb->hscb->tag);
  6560. }
  6561. printk("\n");
  6562. }
  6563. printk("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n");
  6564. ahc_outb(ahc, SCBPTR, saved_scbptr);
  6565. if (paused == 0)
  6566. ahc_unpause(ahc);
  6567. }
  6568. /************************* Target Mode ****************************************/
  6569. #ifdef AHC_TARGET_MODE
  6570. cam_status
  6571. ahc_find_tmode_devs(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb,
  6572. struct ahc_tmode_tstate **tstate,
  6573. struct ahc_tmode_lstate **lstate,
  6574. int notfound_failure)
  6575. {
  6576. if ((ahc->features & AHC_TARGETMODE) == 0)
  6577. return (CAM_REQ_INVALID);
  6578. /*
  6579. * Handle the 'black hole' device that sucks up
  6580. * requests to unattached luns on enabled targets.
  6581. */
  6582. if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD
  6583. && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
  6584. *tstate = NULL;
  6585. *lstate = ahc->black_hole;
  6586. } else {
  6587. u_int max_id;
  6588. max_id = (ahc->features & AHC_WIDE) ? 16 : 8;
  6589. if (ccb->ccb_h.target_id >= max_id)
  6590. return (CAM_TID_INVALID);
  6591. if (ccb->ccb_h.target_lun >= AHC_NUM_LUNS)
  6592. return (CAM_LUN_INVALID);
  6593. *tstate = ahc->enabled_targets[ccb->ccb_h.target_id];
  6594. *lstate = NULL;
  6595. if (*tstate != NULL)
  6596. *lstate =
  6597. (*tstate)->enabled_luns[ccb->ccb_h.target_lun];
  6598. }
  6599. if (notfound_failure != 0 && *lstate == NULL)
  6600. return (CAM_PATH_INVALID);
  6601. return (CAM_REQ_CMP);
  6602. }
  6603. void
  6604. ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
  6605. {
  6606. struct ahc_tmode_tstate *tstate;
  6607. struct ahc_tmode_lstate *lstate;
  6608. struct ccb_en_lun *cel;
  6609. cam_status status;
  6610. u_long s;
  6611. u_int target;
  6612. u_int lun;
  6613. u_int target_mask;
  6614. u_int our_id;
  6615. int error;
  6616. char channel;
  6617. status = ahc_find_tmode_devs(ahc, sim, ccb, &tstate, &lstate,
  6618. /*notfound_failure*/FALSE);
  6619. if (status != CAM_REQ_CMP) {
  6620. ccb->ccb_h.status = status;
  6621. return;
  6622. }
  6623. if (cam_sim_bus(sim) == 0)
  6624. our_id = ahc->our_id;
  6625. else
  6626. our_id = ahc->our_id_b;
  6627. if (ccb->ccb_h.target_id != our_id) {
  6628. /*
  6629. * our_id represents our initiator ID, or
  6630. * the ID of the first target to have an
  6631. * enabled lun in target mode. There are
  6632. * two cases that may preclude enabling a
  6633. * target id other than our_id.
  6634. *
  6635. * o our_id is for an active initiator role.
  6636. * Since the hardware does not support
  6637. * reselections to the initiator role at
  6638. * anything other than our_id, and our_id
  6639. * is used by the hardware to indicate the
  6640. * ID to use for both select-out and
  6641. * reselect-out operations, the only target
  6642. * ID we can support in this mode is our_id.
  6643. *
  6644. * o The MULTARGID feature is not available and
  6645. * a previous target mode ID has been enabled.
  6646. */
  6647. if ((ahc->features & AHC_MULTIROLE) != 0) {
  6648. if ((ahc->features & AHC_MULTI_TID) != 0
  6649. && (ahc->flags & AHC_INITIATORROLE) != 0) {
  6650. /*
  6651. * Only allow additional targets if
  6652. * the initiator role is disabled.
  6653. * The hardware cannot handle a re-select-in
  6654. * on the initiator id during a re-select-out
  6655. * on a different target id.
  6656. */
  6657. status = CAM_TID_INVALID;
  6658. } else if ((ahc->flags & AHC_INITIATORROLE) != 0
  6659. || ahc->enabled_luns > 0) {
  6660. /*
  6661. * Only allow our target id to change
  6662. * if the initiator role is not configured
  6663. * and there are no enabled luns which
  6664. * are attached to the currently registered
  6665. * scsi id.
  6666. */
  6667. status = CAM_TID_INVALID;
  6668. }
  6669. } else if ((ahc->features & AHC_MULTI_TID) == 0
  6670. && ahc->enabled_luns > 0) {
  6671. status = CAM_TID_INVALID;
  6672. }
  6673. }
  6674. if (status != CAM_REQ_CMP) {
  6675. ccb->ccb_h.status = status;
  6676. return;
  6677. }
  6678. /*
  6679. * We now have an id that is valid.
  6680. * If we aren't in target mode, switch modes.
  6681. */
  6682. if ((ahc->flags & AHC_TARGETROLE) == 0
  6683. && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
  6684. u_long s;
  6685. ahc_flag saved_flags;
  6686. printk("Configuring Target Mode\n");
  6687. ahc_lock(ahc, &s);
  6688. if (LIST_FIRST(&ahc->pending_scbs) != NULL) {
  6689. ccb->ccb_h.status = CAM_BUSY;
  6690. ahc_unlock(ahc, &s);
  6691. return;
  6692. }
  6693. saved_flags = ahc->flags;
  6694. ahc->flags |= AHC_TARGETROLE;
  6695. if ((ahc->features & AHC_MULTIROLE) == 0)
  6696. ahc->flags &= ~AHC_INITIATORROLE;
  6697. ahc_pause(ahc);
  6698. error = ahc_loadseq(ahc);
  6699. if (error != 0) {
  6700. /*
  6701. * Restore original configuration and notify
  6702. * the caller that we cannot support target mode.
  6703. * Since the adapter started out in this
  6704. * configuration, the firmware load will succeed,
  6705. * so there is no point in checking ahc_loadseq's
  6706. * return value.
  6707. */
  6708. ahc->flags = saved_flags;
  6709. (void)ahc_loadseq(ahc);
  6710. ahc_restart(ahc);
  6711. ahc_unlock(ahc, &s);
  6712. ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
  6713. return;
  6714. }
  6715. ahc_restart(ahc);
  6716. ahc_unlock(ahc, &s);
  6717. }
  6718. cel = &ccb->cel;
  6719. target = ccb->ccb_h.target_id;
  6720. lun = ccb->ccb_h.target_lun;
  6721. channel = SIM_CHANNEL(ahc, sim);
  6722. target_mask = 0x01 << target;
  6723. if (channel == 'B')
  6724. target_mask <<= 8;
  6725. if (cel->enable != 0) {
  6726. u_int scsiseq;
  6727. /* Are we already enabled?? */
  6728. if (lstate != NULL) {
  6729. xpt_print_path(ccb->ccb_h.path);
  6730. printk("Lun already enabled\n");
  6731. ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
  6732. return;
  6733. }
  6734. if (cel->grp6_len != 0
  6735. || cel->grp7_len != 0) {
  6736. /*
  6737. * Don't (yet?) support vendor
  6738. * specific commands.
  6739. */
  6740. ccb->ccb_h.status = CAM_REQ_INVALID;
  6741. printk("Non-zero Group Codes\n");
  6742. return;
  6743. }
  6744. /*
  6745. * Seems to be okay.
  6746. * Setup our data structures.
  6747. */
  6748. if (target != CAM_TARGET_WILDCARD && tstate == NULL) {
  6749. tstate = ahc_alloc_tstate(ahc, target, channel);
  6750. if (tstate == NULL) {
  6751. xpt_print_path(ccb->ccb_h.path);
  6752. printk("Couldn't allocate tstate\n");
  6753. ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
  6754. return;
  6755. }
  6756. }
  6757. lstate = kzalloc(sizeof(*lstate), GFP_ATOMIC);
  6758. if (lstate == NULL) {
  6759. xpt_print_path(ccb->ccb_h.path);
  6760. printk("Couldn't allocate lstate\n");
  6761. ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
  6762. return;
  6763. }
  6764. status = xpt_create_path(&lstate->path, /*periph*/NULL,
  6765. xpt_path_path_id(ccb->ccb_h.path),
  6766. xpt_path_target_id(ccb->ccb_h.path),
  6767. xpt_path_lun_id(ccb->ccb_h.path));
  6768. if (status != CAM_REQ_CMP) {
  6769. kfree(lstate);
  6770. xpt_print_path(ccb->ccb_h.path);
  6771. printk("Couldn't allocate path\n");
  6772. ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
  6773. return;
  6774. }
  6775. SLIST_INIT(&lstate->accept_tios);
  6776. SLIST_INIT(&lstate->immed_notifies);
  6777. ahc_lock(ahc, &s);
  6778. ahc_pause(ahc);
  6779. if (target != CAM_TARGET_WILDCARD) {
  6780. tstate->enabled_luns[lun] = lstate;
  6781. ahc->enabled_luns++;
  6782. if ((ahc->features & AHC_MULTI_TID) != 0) {
  6783. u_int targid_mask;
  6784. targid_mask = ahc_inb(ahc, TARGID)
  6785. | (ahc_inb(ahc, TARGID + 1) << 8);
  6786. targid_mask |= target_mask;
  6787. ahc_outb(ahc, TARGID, targid_mask);
  6788. ahc_outb(ahc, TARGID+1, (targid_mask >> 8));
  6789. ahc_update_scsiid(ahc, targid_mask);
  6790. } else {
  6791. u_int our_id;
  6792. char channel;
  6793. channel = SIM_CHANNEL(ahc, sim);
  6794. our_id = SIM_SCSI_ID(ahc, sim);
  6795. /*
  6796. * This can only happen if selections
  6797. * are not enabled
  6798. */
  6799. if (target != our_id) {
  6800. u_int sblkctl;
  6801. char cur_channel;
  6802. int swap;
  6803. sblkctl = ahc_inb(ahc, SBLKCTL);
  6804. cur_channel = (sblkctl & SELBUSB)
  6805. ? 'B' : 'A';
  6806. if ((ahc->features & AHC_TWIN) == 0)
  6807. cur_channel = 'A';
  6808. swap = cur_channel != channel;
  6809. if (channel == 'A')
  6810. ahc->our_id = target;
  6811. else
  6812. ahc->our_id_b = target;
  6813. if (swap)
  6814. ahc_outb(ahc, SBLKCTL,
  6815. sblkctl ^ SELBUSB);
  6816. ahc_outb(ahc, SCSIID, target);
  6817. if (swap)
  6818. ahc_outb(ahc, SBLKCTL, sblkctl);
  6819. }
  6820. }
  6821. } else
  6822. ahc->black_hole = lstate;
  6823. /* Allow select-in operations */
  6824. if (ahc->black_hole != NULL && ahc->enabled_luns > 0) {
  6825. scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
  6826. scsiseq |= ENSELI;
  6827. ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
  6828. scsiseq = ahc_inb(ahc, SCSISEQ);
  6829. scsiseq |= ENSELI;
  6830. ahc_outb(ahc, SCSISEQ, scsiseq);
  6831. }
  6832. ahc_unpause(ahc);
  6833. ahc_unlock(ahc, &s);
  6834. ccb->ccb_h.status = CAM_REQ_CMP;
  6835. xpt_print_path(ccb->ccb_h.path);
  6836. printk("Lun now enabled for target mode\n");
  6837. } else {
  6838. struct scb *scb;
  6839. int i, empty;
  6840. if (lstate == NULL) {
  6841. ccb->ccb_h.status = CAM_LUN_INVALID;
  6842. return;
  6843. }
  6844. ahc_lock(ahc, &s);
  6845. ccb->ccb_h.status = CAM_REQ_CMP;
  6846. LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
  6847. struct ccb_hdr *ccbh;
  6848. ccbh = &scb->io_ctx->ccb_h;
  6849. if (ccbh->func_code == XPT_CONT_TARGET_IO
  6850. && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){
  6851. printk("CTIO pending\n");
  6852. ccb->ccb_h.status = CAM_REQ_INVALID;
  6853. ahc_unlock(ahc, &s);
  6854. return;
  6855. }
  6856. }
  6857. if (SLIST_FIRST(&lstate->accept_tios) != NULL) {
  6858. printk("ATIOs pending\n");
  6859. ccb->ccb_h.status = CAM_REQ_INVALID;
  6860. }
  6861. if (SLIST_FIRST(&lstate->immed_notifies) != NULL) {
  6862. printk("INOTs pending\n");
  6863. ccb->ccb_h.status = CAM_REQ_INVALID;
  6864. }
  6865. if (ccb->ccb_h.status != CAM_REQ_CMP) {
  6866. ahc_unlock(ahc, &s);
  6867. return;
  6868. }
  6869. xpt_print_path(ccb->ccb_h.path);
  6870. printk("Target mode disabled\n");
  6871. xpt_free_path(lstate->path);
  6872. kfree(lstate);
  6873. ahc_pause(ahc);
  6874. /* Can we clean up the target too? */
  6875. if (target != CAM_TARGET_WILDCARD) {
  6876. tstate->enabled_luns[lun] = NULL;
  6877. ahc->enabled_luns--;
  6878. for (empty = 1, i = 0; i < 8; i++)
  6879. if (tstate->enabled_luns[i] != NULL) {
  6880. empty = 0;
  6881. break;
  6882. }
  6883. if (empty) {
  6884. ahc_free_tstate(ahc, target, channel,
  6885. /*force*/FALSE);
  6886. if (ahc->features & AHC_MULTI_TID) {
  6887. u_int targid_mask;
  6888. targid_mask = ahc_inb(ahc, TARGID)
  6889. | (ahc_inb(ahc, TARGID + 1)
  6890. << 8);
  6891. targid_mask &= ~target_mask;
  6892. ahc_outb(ahc, TARGID, targid_mask);
  6893. ahc_outb(ahc, TARGID+1,
  6894. (targid_mask >> 8));
  6895. ahc_update_scsiid(ahc, targid_mask);
  6896. }
  6897. }
  6898. } else {
  6899. ahc->black_hole = NULL;
  6900. /*
  6901. * We can't allow selections without
  6902. * our black hole device.
  6903. */
  6904. empty = TRUE;
  6905. }
  6906. if (ahc->enabled_luns == 0) {
  6907. /* Disallow select-in */
  6908. u_int scsiseq;
  6909. scsiseq = ahc_inb(ahc, SCSISEQ_TEMPLATE);
  6910. scsiseq &= ~ENSELI;
  6911. ahc_outb(ahc, SCSISEQ_TEMPLATE, scsiseq);
  6912. scsiseq = ahc_inb(ahc, SCSISEQ);
  6913. scsiseq &= ~ENSELI;
  6914. ahc_outb(ahc, SCSISEQ, scsiseq);
  6915. if ((ahc->features & AHC_MULTIROLE) == 0) {
  6916. printk("Configuring Initiator Mode\n");
  6917. ahc->flags &= ~AHC_TARGETROLE;
  6918. ahc->flags |= AHC_INITIATORROLE;
  6919. /*
  6920. * Returning to a configuration that
  6921. * fit previously will always succeed.
  6922. */
  6923. (void)ahc_loadseq(ahc);
  6924. ahc_restart(ahc);
  6925. /*
  6926. * Unpaused. The extra unpause
  6927. * that follows is harmless.
  6928. */
  6929. }
  6930. }
  6931. ahc_unpause(ahc);
  6932. ahc_unlock(ahc, &s);
  6933. }
  6934. }
  6935. static void
  6936. ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask)
  6937. {
  6938. u_int scsiid_mask;
  6939. u_int scsiid;
  6940. if ((ahc->features & AHC_MULTI_TID) == 0)
  6941. panic("ahc_update_scsiid called on non-multitid unit\n");
  6942. /*
  6943. * Since we will rely on the TARGID mask
  6944. * for selection enables, ensure that OID
  6945. * in SCSIID is not set to some other ID
  6946. * that we don't want to allow selections on.
  6947. */
  6948. if ((ahc->features & AHC_ULTRA2) != 0)
  6949. scsiid = ahc_inb(ahc, SCSIID_ULTRA2);
  6950. else
  6951. scsiid = ahc_inb(ahc, SCSIID);
  6952. scsiid_mask = 0x1 << (scsiid & OID);
  6953. if ((targid_mask & scsiid_mask) == 0) {
  6954. u_int our_id;
  6955. /* ffs counts from 1 */
  6956. our_id = ffs(targid_mask);
  6957. if (our_id == 0)
  6958. our_id = ahc->our_id;
  6959. else
  6960. our_id--;
  6961. scsiid &= TID;
  6962. scsiid |= our_id;
  6963. }
  6964. if ((ahc->features & AHC_ULTRA2) != 0)
  6965. ahc_outb(ahc, SCSIID_ULTRA2, scsiid);
  6966. else
  6967. ahc_outb(ahc, SCSIID, scsiid);
  6968. }
  6969. static void
  6970. ahc_run_tqinfifo(struct ahc_softc *ahc, int paused)
  6971. {
  6972. struct target_cmd *cmd;
  6973. /*
  6974. * If the card supports auto-access pause,
  6975. * we can access the card directly regardless
  6976. * of whether it is paused or not.
  6977. */
  6978. if ((ahc->features & AHC_AUTOPAUSE) != 0)
  6979. paused = TRUE;
  6980. ahc_sync_tqinfifo(ahc, BUS_DMASYNC_POSTREAD);
  6981. while ((cmd = &ahc->targetcmds[ahc->tqinfifonext])->cmd_valid != 0) {
  6982. /*
  6983. * Only advance through the queue if we
  6984. * have the resources to process the command.
  6985. */
  6986. if (ahc_handle_target_cmd(ahc, cmd) != 0)
  6987. break;
  6988. cmd->cmd_valid = 0;
  6989. ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
  6990. ahc->shared_data_dmamap,
  6991. ahc_targetcmd_offset(ahc, ahc->tqinfifonext),
  6992. sizeof(struct target_cmd),
  6993. BUS_DMASYNC_PREREAD);
  6994. ahc->tqinfifonext++;
  6995. /*
  6996. * Lazily update our position in the target mode incoming
  6997. * command queue as seen by the sequencer.
  6998. */
  6999. if ((ahc->tqinfifonext & (HOST_TQINPOS - 1)) == 1) {
  7000. if ((ahc->features & AHC_HS_MAILBOX) != 0) {
  7001. u_int hs_mailbox;
  7002. hs_mailbox = ahc_inb(ahc, HS_MAILBOX);
  7003. hs_mailbox &= ~HOST_TQINPOS;
  7004. hs_mailbox |= ahc->tqinfifonext & HOST_TQINPOS;
  7005. ahc_outb(ahc, HS_MAILBOX, hs_mailbox);
  7006. } else {
  7007. if (!paused)
  7008. ahc_pause(ahc);
  7009. ahc_outb(ahc, KERNEL_TQINPOS,
  7010. ahc->tqinfifonext & HOST_TQINPOS);
  7011. if (!paused)
  7012. ahc_unpause(ahc);
  7013. }
  7014. }
  7015. }
  7016. }
  7017. static int
  7018. ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd)
  7019. {
  7020. struct ahc_tmode_tstate *tstate;
  7021. struct ahc_tmode_lstate *lstate;
  7022. struct ccb_accept_tio *atio;
  7023. uint8_t *byte;
  7024. int initiator;
  7025. int target;
  7026. int lun;
  7027. initiator = SCSIID_TARGET(ahc, cmd->scsiid);
  7028. target = SCSIID_OUR_ID(cmd->scsiid);
  7029. lun = (cmd->identify & MSG_IDENTIFY_LUNMASK);
  7030. byte = cmd->bytes;
  7031. tstate = ahc->enabled_targets[target];
  7032. lstate = NULL;
  7033. if (tstate != NULL)
  7034. lstate = tstate->enabled_luns[lun];
  7035. /*
  7036. * Commands for disabled luns go to the black hole driver.
  7037. */
  7038. if (lstate == NULL)
  7039. lstate = ahc->black_hole;
  7040. atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios);
  7041. if (atio == NULL) {
  7042. ahc->flags |= AHC_TQINFIFO_BLOCKED;
  7043. /*
  7044. * Wait for more ATIOs from the peripheral driver for this lun.
  7045. */
  7046. if (bootverbose)
  7047. printk("%s: ATIOs exhausted\n", ahc_name(ahc));
  7048. return (1);
  7049. } else
  7050. ahc->flags &= ~AHC_TQINFIFO_BLOCKED;
  7051. #if 0
  7052. printk("Incoming command from %d for %d:%d%s\n",
  7053. initiator, target, lun,
  7054. lstate == ahc->black_hole ? "(Black Holed)" : "");
  7055. #endif
  7056. SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle);
  7057. if (lstate == ahc->black_hole) {
  7058. /* Fill in the wildcards */
  7059. atio->ccb_h.target_id = target;
  7060. atio->ccb_h.target_lun = lun;
  7061. }
  7062. /*
  7063. * Package it up and send it off to
  7064. * whomever has this lun enabled.
  7065. */
  7066. atio->sense_len = 0;
  7067. atio->init_id = initiator;
  7068. if (byte[0] != 0xFF) {
  7069. /* Tag was included */
  7070. atio->tag_action = *byte++;
  7071. atio->tag_id = *byte++;
  7072. atio->ccb_h.flags = CAM_TAG_ACTION_VALID;
  7073. } else {
  7074. atio->ccb_h.flags = 0;
  7075. }
  7076. byte++;
  7077. /* Okay. Now determine the cdb size based on the command code */
  7078. switch (*byte >> CMD_GROUP_CODE_SHIFT) {
  7079. case 0:
  7080. atio->cdb_len = 6;
  7081. break;
  7082. case 1:
  7083. case 2:
  7084. atio->cdb_len = 10;
  7085. break;
  7086. case 4:
  7087. atio->cdb_len = 16;
  7088. break;
  7089. case 5:
  7090. atio->cdb_len = 12;
  7091. break;
  7092. case 3:
  7093. default:
  7094. /* Only copy the opcode. */
  7095. atio->cdb_len = 1;
  7096. printk("Reserved or VU command code type encountered\n");
  7097. break;
  7098. }
  7099. memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len);
  7100. atio->ccb_h.status |= CAM_CDB_RECVD;
  7101. if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) {
  7102. /*
  7103. * We weren't allowed to disconnect.
  7104. * We're hanging on the bus until a
  7105. * continue target I/O comes in response
  7106. * to this accept tio.
  7107. */
  7108. #if 0
  7109. printk("Received Immediate Command %d:%d:%d - %p\n",
  7110. initiator, target, lun, ahc->pending_device);
  7111. #endif
  7112. ahc->pending_device = lstate;
  7113. ahc_freeze_ccb((union ccb *)atio);
  7114. atio->ccb_h.flags |= CAM_DIS_DISCONNECT;
  7115. }
  7116. xpt_done((union ccb*)atio);
  7117. return (0);
  7118. }
  7119. #endif