libata-core.c 170 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * libata-core.c - helper library for ATA
  4. *
  5. * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
  6. * Copyright 2003-2004 Jeff Garzik
  7. *
  8. * libata documentation is available via 'make {ps|pdf}docs',
  9. * as Documentation/driver-api/libata.rst
  10. *
  11. * Hardware documentation available from http://www.t13.org/ and
  12. * http://www.sata-io.org/
  13. *
  14. * Standards documents from:
  15. * http://www.t13.org (ATA standards, PCI DMA IDE spec)
  16. * http://www.t10.org (SCSI MMC - for ATAPI MMC)
  17. * http://www.sata-io.org (SATA)
  18. * http://www.compactflash.org (CF)
  19. * http://www.qic.org (QIC157 - Tape and DSC)
  20. * http://www.ce-ata.org (CE-ATA: not supported)
  21. *
  22. * libata is essentially a library of internal helper functions for
  23. * low-level ATA host controller drivers. As such, the API/ABI is
  24. * likely to change as new drivers are added and updated.
  25. * Do not depend on ABI/API stability.
  26. */
  27. #include <linux/kernel.h>
  28. #include <linux/module.h>
  29. #include <linux/pci.h>
  30. #include <linux/init.h>
  31. #include <linux/list.h>
  32. #include <linux/mm.h>
  33. #include <linux/spinlock.h>
  34. #include <linux/blkdev.h>
  35. #include <linux/delay.h>
  36. #include <linux/timer.h>
  37. #include <linux/time.h>
  38. #include <linux/interrupt.h>
  39. #include <linux/completion.h>
  40. #include <linux/suspend.h>
  41. #include <linux/workqueue.h>
  42. #include <linux/scatterlist.h>
  43. #include <linux/io.h>
  44. #include <linux/log2.h>
  45. #include <linux/slab.h>
  46. #include <linux/glob.h>
  47. #include <scsi/scsi.h>
  48. #include <scsi/scsi_cmnd.h>
  49. #include <scsi/scsi_host.h>
  50. #include <linux/libata.h>
  51. #include <asm/byteorder.h>
  52. #include <linux/unaligned.h>
  53. #include <linux/cdrom.h>
  54. #include <linux/ratelimit.h>
  55. #include <linux/leds.h>
  56. #include <linux/pm_runtime.h>
  57. #include <linux/platform_device.h>
  58. #include <asm/setup.h>
  59. #define CREATE_TRACE_POINTS
  60. #include <trace/events/libata.h>
  61. #include "libata.h"
  62. #include "libata-transport.h"
  63. const struct ata_port_operations ata_base_port_ops = {
  64. .prereset = ata_std_prereset,
  65. .postreset = ata_std_postreset,
  66. .error_handler = ata_std_error_handler,
  67. .sched_eh = ata_std_sched_eh,
  68. .end_eh = ata_std_end_eh,
  69. };
  70. static unsigned int ata_dev_init_params(struct ata_device *dev,
  71. u16 heads, u16 sectors);
  72. static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
  73. static void ata_dev_xfermask(struct ata_device *dev);
  74. static unsigned int ata_dev_quirks(const struct ata_device *dev);
  75. static DEFINE_IDA(ata_ida);
  76. #ifdef CONFIG_ATA_FORCE
  77. struct ata_force_param {
  78. const char *name;
  79. u8 cbl;
  80. u8 spd_limit;
  81. unsigned int xfer_mask;
  82. unsigned int quirk_on;
  83. unsigned int quirk_off;
  84. unsigned int pflags_on;
  85. u16 lflags_on;
  86. u16 lflags_off;
  87. };
  88. struct ata_force_ent {
  89. int port;
  90. int device;
  91. struct ata_force_param param;
  92. };
  93. static struct ata_force_ent *ata_force_tbl;
  94. static int ata_force_tbl_size;
  95. static char ata_force_param_buf[COMMAND_LINE_SIZE] __initdata;
  96. /* param_buf is thrown away after initialization, disallow read */
  97. module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
  98. MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/admin-guide/kernel-parameters.rst for details)");
  99. #endif
  100. static int atapi_enabled = 1;
  101. module_param(atapi_enabled, int, 0444);
  102. MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])");
  103. static int atapi_dmadir = 0;
  104. module_param(atapi_dmadir, int, 0444);
  105. MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)");
  106. int atapi_passthru16 = 1;
  107. module_param(atapi_passthru16, int, 0444);
  108. MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])");
  109. int libata_fua = 0;
  110. module_param_named(fua, libata_fua, int, 0444);
  111. MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)");
  112. static int ata_ignore_hpa;
  113. module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
  114. MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
  115. static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
  116. module_param_named(dma, libata_dma_mask, int, 0444);
  117. MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
  118. static int ata_probe_timeout;
  119. module_param(ata_probe_timeout, int, 0444);
  120. MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
  121. int libata_noacpi = 0;
  122. module_param_named(noacpi, libata_noacpi, int, 0444);
  123. MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)");
  124. int libata_allow_tpm = 0;
  125. module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
  126. MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
  127. static int atapi_an;
  128. module_param(atapi_an, int, 0444);
  129. MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
  130. MODULE_AUTHOR("Jeff Garzik");
  131. MODULE_DESCRIPTION("Library module for ATA devices");
  132. MODULE_LICENSE("GPL");
  133. MODULE_VERSION(DRV_VERSION);
  134. static inline bool ata_dev_print_info(const struct ata_device *dev)
  135. {
  136. struct ata_eh_context *ehc = &dev->link->eh_context;
  137. return ehc->i.flags & ATA_EHI_PRINTINFO;
  138. }
  139. /**
  140. * ata_link_next - link iteration helper
  141. * @link: the previous link, NULL to start
  142. * @ap: ATA port containing links to iterate
  143. * @mode: iteration mode, one of ATA_LITER_*
  144. *
  145. * LOCKING:
  146. * Host lock or EH context.
  147. *
  148. * RETURNS:
  149. * Pointer to the next link.
  150. */
  151. struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap,
  152. enum ata_link_iter_mode mode)
  153. {
  154. BUG_ON(mode != ATA_LITER_EDGE &&
  155. mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST);
  156. /* NULL link indicates start of iteration */
  157. if (!link)
  158. switch (mode) {
  159. case ATA_LITER_EDGE:
  160. case ATA_LITER_PMP_FIRST:
  161. if (sata_pmp_attached(ap))
  162. return ap->pmp_link;
  163. fallthrough;
  164. case ATA_LITER_HOST_FIRST:
  165. return &ap->link;
  166. }
  167. /* we just iterated over the host link, what's next? */
  168. if (link == &ap->link)
  169. switch (mode) {
  170. case ATA_LITER_HOST_FIRST:
  171. if (sata_pmp_attached(ap))
  172. return ap->pmp_link;
  173. fallthrough;
  174. case ATA_LITER_PMP_FIRST:
  175. if (unlikely(ap->slave_link))
  176. return ap->slave_link;
  177. fallthrough;
  178. case ATA_LITER_EDGE:
  179. return NULL;
  180. }
  181. /* slave_link excludes PMP */
  182. if (unlikely(link == ap->slave_link))
  183. return NULL;
  184. /* we were over a PMP link */
  185. if (++link < ap->pmp_link + ap->nr_pmp_links)
  186. return link;
  187. if (mode == ATA_LITER_PMP_FIRST)
  188. return &ap->link;
  189. return NULL;
  190. }
  191. EXPORT_SYMBOL_GPL(ata_link_next);
  192. /**
  193. * ata_dev_next - device iteration helper
  194. * @dev: the previous device, NULL to start
  195. * @link: ATA link containing devices to iterate
  196. * @mode: iteration mode, one of ATA_DITER_*
  197. *
  198. * LOCKING:
  199. * Host lock or EH context.
  200. *
  201. * RETURNS:
  202. * Pointer to the next device.
  203. */
  204. struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link,
  205. enum ata_dev_iter_mode mode)
  206. {
  207. BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE &&
  208. mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE);
  209. /* NULL dev indicates start of iteration */
  210. if (!dev)
  211. switch (mode) {
  212. case ATA_DITER_ENABLED:
  213. case ATA_DITER_ALL:
  214. dev = link->device;
  215. goto check;
  216. case ATA_DITER_ENABLED_REVERSE:
  217. case ATA_DITER_ALL_REVERSE:
  218. dev = link->device + ata_link_max_devices(link) - 1;
  219. goto check;
  220. }
  221. next:
  222. /* move to the next one */
  223. switch (mode) {
  224. case ATA_DITER_ENABLED:
  225. case ATA_DITER_ALL:
  226. if (++dev < link->device + ata_link_max_devices(link))
  227. goto check;
  228. return NULL;
  229. case ATA_DITER_ENABLED_REVERSE:
  230. case ATA_DITER_ALL_REVERSE:
  231. if (--dev >= link->device)
  232. goto check;
  233. return NULL;
  234. }
  235. check:
  236. if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) &&
  237. !ata_dev_enabled(dev))
  238. goto next;
  239. return dev;
  240. }
  241. EXPORT_SYMBOL_GPL(ata_dev_next);
  242. /**
  243. * ata_dev_phys_link - find physical link for a device
  244. * @dev: ATA device to look up physical link for
  245. *
  246. * Look up physical link which @dev is attached to. Note that
  247. * this is different from @dev->link only when @dev is on slave
  248. * link. For all other cases, it's the same as @dev->link.
  249. *
  250. * LOCKING:
  251. * Don't care.
  252. *
  253. * RETURNS:
  254. * Pointer to the found physical link.
  255. */
  256. struct ata_link *ata_dev_phys_link(struct ata_device *dev)
  257. {
  258. struct ata_port *ap = dev->link->ap;
  259. if (!ap->slave_link)
  260. return dev->link;
  261. if (!dev->devno)
  262. return &ap->link;
  263. return ap->slave_link;
  264. }
  265. #ifdef CONFIG_ATA_FORCE
  266. /**
  267. * ata_force_cbl - force cable type according to libata.force
  268. * @ap: ATA port of interest
  269. *
  270. * Force cable type according to libata.force and whine about it.
  271. * The last entry which has matching port number is used, so it
  272. * can be specified as part of device force parameters. For
  273. * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
  274. * same effect.
  275. *
  276. * LOCKING:
  277. * EH context.
  278. */
  279. void ata_force_cbl(struct ata_port *ap)
  280. {
  281. int i;
  282. for (i = ata_force_tbl_size - 1; i >= 0; i--) {
  283. const struct ata_force_ent *fe = &ata_force_tbl[i];
  284. if (fe->port != -1 && fe->port != ap->print_id)
  285. continue;
  286. if (fe->param.cbl == ATA_CBL_NONE)
  287. continue;
  288. ap->cbl = fe->param.cbl;
  289. ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name);
  290. return;
  291. }
  292. }
  293. /**
  294. * ata_force_pflags - force port flags according to libata.force
  295. * @ap: ATA port of interest
  296. *
  297. * Force port flags according to libata.force and whine about it.
  298. *
  299. * LOCKING:
  300. * EH context.
  301. */
  302. static void ata_force_pflags(struct ata_port *ap)
  303. {
  304. int i;
  305. for (i = ata_force_tbl_size - 1; i >= 0; i--) {
  306. const struct ata_force_ent *fe = &ata_force_tbl[i];
  307. if (fe->port != -1 && fe->port != ap->print_id)
  308. continue;
  309. /* let pflags stack */
  310. if (fe->param.pflags_on) {
  311. ap->pflags |= fe->param.pflags_on;
  312. ata_port_notice(ap,
  313. "FORCE: port flag 0x%x forced -> 0x%x\n",
  314. fe->param.pflags_on, ap->pflags);
  315. }
  316. }
  317. }
  318. /**
  319. * ata_force_link_limits - force link limits according to libata.force
  320. * @link: ATA link of interest
  321. *
  322. * Force link flags and SATA spd limit according to libata.force
  323. * and whine about it. When only the port part is specified
  324. * (e.g. 1:), the limit applies to all links connected to both
  325. * the host link and all fan-out ports connected via PMP. If the
  326. * device part is specified as 0 (e.g. 1.00:), it specifies the
  327. * first fan-out link not the host link. Device number 15 always
  328. * points to the host link whether PMP is attached or not. If the
  329. * controller has slave link, device number 16 points to it.
  330. *
  331. * LOCKING:
  332. * EH context.
  333. */
  334. static void ata_force_link_limits(struct ata_link *link)
  335. {
  336. bool did_spd = false;
  337. int linkno = link->pmp;
  338. int i;
  339. if (ata_is_host_link(link))
  340. linkno += 15;
  341. for (i = ata_force_tbl_size - 1; i >= 0; i--) {
  342. const struct ata_force_ent *fe = &ata_force_tbl[i];
  343. if (fe->port != -1 && fe->port != link->ap->print_id)
  344. continue;
  345. if (fe->device != -1 && fe->device != linkno)
  346. continue;
  347. /* only honor the first spd limit */
  348. if (!did_spd && fe->param.spd_limit) {
  349. link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
  350. ata_link_notice(link, "FORCE: PHY spd limit set to %s\n",
  351. fe->param.name);
  352. did_spd = true;
  353. }
  354. /* let lflags stack */
  355. if (fe->param.lflags_on) {
  356. link->flags |= fe->param.lflags_on;
  357. ata_link_notice(link,
  358. "FORCE: link flag 0x%x forced -> 0x%x\n",
  359. fe->param.lflags_on, link->flags);
  360. }
  361. if (fe->param.lflags_off) {
  362. link->flags &= ~fe->param.lflags_off;
  363. ata_link_notice(link,
  364. "FORCE: link flag 0x%x cleared -> 0x%x\n",
  365. fe->param.lflags_off, link->flags);
  366. }
  367. }
  368. }
  369. /**
  370. * ata_force_xfermask - force xfermask according to libata.force
  371. * @dev: ATA device of interest
  372. *
  373. * Force xfer_mask according to libata.force and whine about it.
  374. * For consistency with link selection, device number 15 selects
  375. * the first device connected to the host link.
  376. *
  377. * LOCKING:
  378. * EH context.
  379. */
  380. static void ata_force_xfermask(struct ata_device *dev)
  381. {
  382. int devno = dev->link->pmp + dev->devno;
  383. int alt_devno = devno;
  384. int i;
  385. /* allow n.15/16 for devices attached to host port */
  386. if (ata_is_host_link(dev->link))
  387. alt_devno += 15;
  388. for (i = ata_force_tbl_size - 1; i >= 0; i--) {
  389. const struct ata_force_ent *fe = &ata_force_tbl[i];
  390. unsigned int pio_mask, mwdma_mask, udma_mask;
  391. if (fe->port != -1 && fe->port != dev->link->ap->print_id)
  392. continue;
  393. if (fe->device != -1 && fe->device != devno &&
  394. fe->device != alt_devno)
  395. continue;
  396. if (!fe->param.xfer_mask)
  397. continue;
  398. ata_unpack_xfermask(fe->param.xfer_mask,
  399. &pio_mask, &mwdma_mask, &udma_mask);
  400. if (udma_mask)
  401. dev->udma_mask = udma_mask;
  402. else if (mwdma_mask) {
  403. dev->udma_mask = 0;
  404. dev->mwdma_mask = mwdma_mask;
  405. } else {
  406. dev->udma_mask = 0;
  407. dev->mwdma_mask = 0;
  408. dev->pio_mask = pio_mask;
  409. }
  410. ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n",
  411. fe->param.name);
  412. return;
  413. }
  414. }
  415. /**
  416. * ata_force_quirks - force quirks according to libata.force
  417. * @dev: ATA device of interest
  418. *
  419. * Force quirks according to libata.force and whine about it.
  420. * For consistency with link selection, device number 15 selects
  421. * the first device connected to the host link.
  422. *
  423. * LOCKING:
  424. * EH context.
  425. */
  426. static void ata_force_quirks(struct ata_device *dev)
  427. {
  428. int devno = dev->link->pmp + dev->devno;
  429. int alt_devno = devno;
  430. int i;
  431. /* allow n.15/16 for devices attached to host port */
  432. if (ata_is_host_link(dev->link))
  433. alt_devno += 15;
  434. for (i = 0; i < ata_force_tbl_size; i++) {
  435. const struct ata_force_ent *fe = &ata_force_tbl[i];
  436. if (fe->port != -1 && fe->port != dev->link->ap->print_id)
  437. continue;
  438. if (fe->device != -1 && fe->device != devno &&
  439. fe->device != alt_devno)
  440. continue;
  441. if (!(~dev->quirks & fe->param.quirk_on) &&
  442. !(dev->quirks & fe->param.quirk_off))
  443. continue;
  444. dev->quirks |= fe->param.quirk_on;
  445. dev->quirks &= ~fe->param.quirk_off;
  446. ata_dev_notice(dev, "FORCE: modified (%s)\n",
  447. fe->param.name);
  448. }
  449. }
  450. #else
  451. static inline void ata_force_pflags(struct ata_port *ap) { }
  452. static inline void ata_force_link_limits(struct ata_link *link) { }
  453. static inline void ata_force_xfermask(struct ata_device *dev) { }
  454. static inline void ata_force_quirks(struct ata_device *dev) { }
  455. #endif
  456. /**
  457. * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
  458. * @opcode: SCSI opcode
  459. *
  460. * Determine ATAPI command type from @opcode.
  461. *
  462. * LOCKING:
  463. * None.
  464. *
  465. * RETURNS:
  466. * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
  467. */
  468. int atapi_cmd_type(u8 opcode)
  469. {
  470. switch (opcode) {
  471. case GPCMD_READ_10:
  472. case GPCMD_READ_12:
  473. return ATAPI_READ;
  474. case GPCMD_WRITE_10:
  475. case GPCMD_WRITE_12:
  476. case GPCMD_WRITE_AND_VERIFY_10:
  477. return ATAPI_WRITE;
  478. case GPCMD_READ_CD:
  479. case GPCMD_READ_CD_MSF:
  480. return ATAPI_READ_CD;
  481. case ATA_16:
  482. case ATA_12:
  483. if (atapi_passthru16)
  484. return ATAPI_PASS_THRU;
  485. fallthrough;
  486. default:
  487. return ATAPI_MISC;
  488. }
  489. }
  490. EXPORT_SYMBOL_GPL(atapi_cmd_type);
  491. static const u8 ata_rw_cmds[] = {
  492. /* pio multi */
  493. ATA_CMD_READ_MULTI,
  494. ATA_CMD_WRITE_MULTI,
  495. ATA_CMD_READ_MULTI_EXT,
  496. ATA_CMD_WRITE_MULTI_EXT,
  497. 0,
  498. 0,
  499. 0,
  500. 0,
  501. /* pio */
  502. ATA_CMD_PIO_READ,
  503. ATA_CMD_PIO_WRITE,
  504. ATA_CMD_PIO_READ_EXT,
  505. ATA_CMD_PIO_WRITE_EXT,
  506. 0,
  507. 0,
  508. 0,
  509. 0,
  510. /* dma */
  511. ATA_CMD_READ,
  512. ATA_CMD_WRITE,
  513. ATA_CMD_READ_EXT,
  514. ATA_CMD_WRITE_EXT,
  515. 0,
  516. 0,
  517. 0,
  518. ATA_CMD_WRITE_FUA_EXT
  519. };
  520. /**
  521. * ata_set_rwcmd_protocol - set taskfile r/w command and protocol
  522. * @dev: target device for the taskfile
  523. * @tf: taskfile to examine and configure
  524. *
  525. * Examine the device configuration and tf->flags to determine
  526. * the proper read/write command and protocol to use for @tf.
  527. *
  528. * LOCKING:
  529. * caller.
  530. */
  531. static bool ata_set_rwcmd_protocol(struct ata_device *dev,
  532. struct ata_taskfile *tf)
  533. {
  534. u8 cmd;
  535. int index, fua, lba48, write;
  536. fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
  537. lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
  538. write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
  539. if (dev->flags & ATA_DFLAG_PIO) {
  540. tf->protocol = ATA_PROT_PIO;
  541. index = dev->multi_count ? 0 : 8;
  542. } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
  543. /* Unable to use DMA due to host limitation */
  544. tf->protocol = ATA_PROT_PIO;
  545. index = dev->multi_count ? 0 : 8;
  546. } else {
  547. tf->protocol = ATA_PROT_DMA;
  548. index = 16;
  549. }
  550. cmd = ata_rw_cmds[index + fua + lba48 + write];
  551. if (!cmd)
  552. return false;
  553. tf->command = cmd;
  554. return true;
  555. }
  556. /**
  557. * ata_tf_read_block - Read block address from ATA taskfile
  558. * @tf: ATA taskfile of interest
  559. * @dev: ATA device @tf belongs to
  560. *
  561. * LOCKING:
  562. * None.
  563. *
  564. * Read block address from @tf. This function can handle all
  565. * three address formats - LBA, LBA48 and CHS. tf->protocol and
  566. * flags select the address format to use.
  567. *
  568. * RETURNS:
  569. * Block address read from @tf.
  570. */
  571. u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev)
  572. {
  573. u64 block = 0;
  574. if (tf->flags & ATA_TFLAG_LBA) {
  575. if (tf->flags & ATA_TFLAG_LBA48) {
  576. block |= (u64)tf->hob_lbah << 40;
  577. block |= (u64)tf->hob_lbam << 32;
  578. block |= (u64)tf->hob_lbal << 24;
  579. } else
  580. block |= (tf->device & 0xf) << 24;
  581. block |= tf->lbah << 16;
  582. block |= tf->lbam << 8;
  583. block |= tf->lbal;
  584. } else {
  585. u32 cyl, head, sect;
  586. cyl = tf->lbam | (tf->lbah << 8);
  587. head = tf->device & 0xf;
  588. sect = tf->lbal;
  589. if (!sect) {
  590. ata_dev_warn(dev,
  591. "device reported invalid CHS sector 0\n");
  592. return U64_MAX;
  593. }
  594. block = (cyl * dev->heads + head) * dev->sectors + sect - 1;
  595. }
  596. return block;
  597. }
  598. /*
  599. * Set a taskfile command duration limit index.
  600. */
  601. static inline void ata_set_tf_cdl(struct ata_queued_cmd *qc, int cdl)
  602. {
  603. struct ata_taskfile *tf = &qc->tf;
  604. if (tf->protocol == ATA_PROT_NCQ)
  605. tf->auxiliary |= cdl;
  606. else
  607. tf->feature |= cdl;
  608. /*
  609. * Mark this command as having a CDL and request the result
  610. * task file so that we can inspect the sense data available
  611. * bit on completion.
  612. */
  613. qc->flags |= ATA_QCFLAG_HAS_CDL | ATA_QCFLAG_RESULT_TF;
  614. }
  615. /**
  616. * ata_build_rw_tf - Build ATA taskfile for given read/write request
  617. * @qc: Metadata associated with the taskfile to build
  618. * @block: Block address
  619. * @n_block: Number of blocks
  620. * @tf_flags: RW/FUA etc...
  621. * @cdl: Command duration limit index
  622. * @class: IO priority class
  623. *
  624. * LOCKING:
  625. * None.
  626. *
  627. * Build ATA taskfile for the command @qc for read/write request described
  628. * by @block, @n_block, @tf_flags and @class.
  629. *
  630. * RETURNS:
  631. *
  632. * 0 on success, -ERANGE if the request is too large for @dev,
  633. * -EINVAL if the request is invalid.
  634. */
  635. int ata_build_rw_tf(struct ata_queued_cmd *qc, u64 block, u32 n_block,
  636. unsigned int tf_flags, int cdl, int class)
  637. {
  638. struct ata_taskfile *tf = &qc->tf;
  639. struct ata_device *dev = qc->dev;
  640. tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
  641. tf->flags |= tf_flags;
  642. if (ata_ncq_enabled(dev)) {
  643. /* yay, NCQ */
  644. if (!lba_48_ok(block, n_block))
  645. return -ERANGE;
  646. tf->protocol = ATA_PROT_NCQ;
  647. tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
  648. if (tf->flags & ATA_TFLAG_WRITE)
  649. tf->command = ATA_CMD_FPDMA_WRITE;
  650. else
  651. tf->command = ATA_CMD_FPDMA_READ;
  652. tf->nsect = qc->hw_tag << 3;
  653. tf->hob_feature = (n_block >> 8) & 0xff;
  654. tf->feature = n_block & 0xff;
  655. tf->hob_lbah = (block >> 40) & 0xff;
  656. tf->hob_lbam = (block >> 32) & 0xff;
  657. tf->hob_lbal = (block >> 24) & 0xff;
  658. tf->lbah = (block >> 16) & 0xff;
  659. tf->lbam = (block >> 8) & 0xff;
  660. tf->lbal = block & 0xff;
  661. tf->device = ATA_LBA;
  662. if (tf->flags & ATA_TFLAG_FUA)
  663. tf->device |= 1 << 7;
  664. if (dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED &&
  665. class == IOPRIO_CLASS_RT)
  666. tf->hob_nsect |= ATA_PRIO_HIGH << ATA_SHIFT_PRIO;
  667. if ((dev->flags & ATA_DFLAG_CDL_ENABLED) && cdl)
  668. ata_set_tf_cdl(qc, cdl);
  669. } else if (dev->flags & ATA_DFLAG_LBA) {
  670. tf->flags |= ATA_TFLAG_LBA;
  671. if ((dev->flags & ATA_DFLAG_CDL_ENABLED) && cdl)
  672. ata_set_tf_cdl(qc, cdl);
  673. /* Both FUA writes and a CDL index require 48-bit commands */
  674. if (!(tf->flags & ATA_TFLAG_FUA) &&
  675. !(qc->flags & ATA_QCFLAG_HAS_CDL) &&
  676. lba_28_ok(block, n_block)) {
  677. /* use LBA28 */
  678. tf->device |= (block >> 24) & 0xf;
  679. } else if (lba_48_ok(block, n_block)) {
  680. if (!(dev->flags & ATA_DFLAG_LBA48))
  681. return -ERANGE;
  682. /* use LBA48 */
  683. tf->flags |= ATA_TFLAG_LBA48;
  684. tf->hob_nsect = (n_block >> 8) & 0xff;
  685. tf->hob_lbah = (block >> 40) & 0xff;
  686. tf->hob_lbam = (block >> 32) & 0xff;
  687. tf->hob_lbal = (block >> 24) & 0xff;
  688. } else {
  689. /* request too large even for LBA48 */
  690. return -ERANGE;
  691. }
  692. if (unlikely(!ata_set_rwcmd_protocol(dev, tf)))
  693. return -EINVAL;
  694. tf->nsect = n_block & 0xff;
  695. tf->lbah = (block >> 16) & 0xff;
  696. tf->lbam = (block >> 8) & 0xff;
  697. tf->lbal = block & 0xff;
  698. tf->device |= ATA_LBA;
  699. } else {
  700. /* CHS */
  701. u32 sect, head, cyl, track;
  702. /* The request -may- be too large for CHS addressing. */
  703. if (!lba_28_ok(block, n_block))
  704. return -ERANGE;
  705. if (unlikely(!ata_set_rwcmd_protocol(dev, tf)))
  706. return -EINVAL;
  707. /* Convert LBA to CHS */
  708. track = (u32)block / dev->sectors;
  709. cyl = track / dev->heads;
  710. head = track % dev->heads;
  711. sect = (u32)block % dev->sectors + 1;
  712. /* Check whether the converted CHS can fit.
  713. Cylinder: 0-65535
  714. Head: 0-15
  715. Sector: 1-255*/
  716. if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
  717. return -ERANGE;
  718. tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
  719. tf->lbal = sect;
  720. tf->lbam = cyl;
  721. tf->lbah = cyl >> 8;
  722. tf->device |= head;
  723. }
  724. return 0;
  725. }
  726. /**
  727. * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
  728. * @pio_mask: pio_mask
  729. * @mwdma_mask: mwdma_mask
  730. * @udma_mask: udma_mask
  731. *
  732. * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
  733. * unsigned int xfer_mask.
  734. *
  735. * LOCKING:
  736. * None.
  737. *
  738. * RETURNS:
  739. * Packed xfer_mask.
  740. */
  741. unsigned int ata_pack_xfermask(unsigned int pio_mask,
  742. unsigned int mwdma_mask,
  743. unsigned int udma_mask)
  744. {
  745. return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
  746. ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
  747. ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
  748. }
  749. EXPORT_SYMBOL_GPL(ata_pack_xfermask);
  750. /**
  751. * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
  752. * @xfer_mask: xfer_mask to unpack
  753. * @pio_mask: resulting pio_mask
  754. * @mwdma_mask: resulting mwdma_mask
  755. * @udma_mask: resulting udma_mask
  756. *
  757. * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
  758. * Any NULL destination masks will be ignored.
  759. */
  760. void ata_unpack_xfermask(unsigned int xfer_mask, unsigned int *pio_mask,
  761. unsigned int *mwdma_mask, unsigned int *udma_mask)
  762. {
  763. if (pio_mask)
  764. *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
  765. if (mwdma_mask)
  766. *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
  767. if (udma_mask)
  768. *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
  769. }
  770. static const struct ata_xfer_ent {
  771. int shift, bits;
  772. u8 base;
  773. } ata_xfer_tbl[] = {
  774. { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
  775. { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
  776. { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
  777. { -1, },
  778. };
  779. /**
  780. * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
  781. * @xfer_mask: xfer_mask of interest
  782. *
  783. * Return matching XFER_* value for @xfer_mask. Only the highest
  784. * bit of @xfer_mask is considered.
  785. *
  786. * LOCKING:
  787. * None.
  788. *
  789. * RETURNS:
  790. * Matching XFER_* value, 0xff if no match found.
  791. */
  792. u8 ata_xfer_mask2mode(unsigned int xfer_mask)
  793. {
  794. int highbit = fls(xfer_mask) - 1;
  795. const struct ata_xfer_ent *ent;
  796. for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
  797. if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
  798. return ent->base + highbit - ent->shift;
  799. return 0xff;
  800. }
  801. EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
  802. /**
  803. * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
  804. * @xfer_mode: XFER_* of interest
  805. *
  806. * Return matching xfer_mask for @xfer_mode.
  807. *
  808. * LOCKING:
  809. * None.
  810. *
  811. * RETURNS:
  812. * Matching xfer_mask, 0 if no match found.
  813. */
  814. unsigned int ata_xfer_mode2mask(u8 xfer_mode)
  815. {
  816. const struct ata_xfer_ent *ent;
  817. for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
  818. if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
  819. return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
  820. & ~((1 << ent->shift) - 1);
  821. return 0;
  822. }
  823. EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
  824. /**
  825. * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
  826. * @xfer_mode: XFER_* of interest
  827. *
  828. * Return matching xfer_shift for @xfer_mode.
  829. *
  830. * LOCKING:
  831. * None.
  832. *
  833. * RETURNS:
  834. * Matching xfer_shift, -1 if no match found.
  835. */
  836. int ata_xfer_mode2shift(u8 xfer_mode)
  837. {
  838. const struct ata_xfer_ent *ent;
  839. for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
  840. if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
  841. return ent->shift;
  842. return -1;
  843. }
  844. EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
  845. /**
  846. * ata_mode_string - convert xfer_mask to string
  847. * @xfer_mask: mask of bits supported; only highest bit counts.
  848. *
  849. * Determine string which represents the highest speed
  850. * (highest bit in @modemask).
  851. *
  852. * LOCKING:
  853. * None.
  854. *
  855. * RETURNS:
  856. * Constant C string representing highest speed listed in
  857. * @mode_mask, or the constant C string "<n/a>".
  858. */
  859. const char *ata_mode_string(unsigned int xfer_mask)
  860. {
  861. static const char * const xfer_mode_str[] = {
  862. "PIO0",
  863. "PIO1",
  864. "PIO2",
  865. "PIO3",
  866. "PIO4",
  867. "PIO5",
  868. "PIO6",
  869. "MWDMA0",
  870. "MWDMA1",
  871. "MWDMA2",
  872. "MWDMA3",
  873. "MWDMA4",
  874. "UDMA/16",
  875. "UDMA/25",
  876. "UDMA/33",
  877. "UDMA/44",
  878. "UDMA/66",
  879. "UDMA/100",
  880. "UDMA/133",
  881. "UDMA7",
  882. };
  883. int highbit;
  884. highbit = fls(xfer_mask) - 1;
  885. if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
  886. return xfer_mode_str[highbit];
  887. return "<n/a>";
  888. }
  889. EXPORT_SYMBOL_GPL(ata_mode_string);
  890. const char *sata_spd_string(unsigned int spd)
  891. {
  892. static const char * const spd_str[] = {
  893. "1.5 Gbps",
  894. "3.0 Gbps",
  895. "6.0 Gbps",
  896. };
  897. if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
  898. return "<unknown>";
  899. return spd_str[spd - 1];
  900. }
  901. /**
  902. * ata_dev_classify - determine device type based on ATA-spec signature
  903. * @tf: ATA taskfile register set for device to be identified
  904. *
  905. * Determine from taskfile register contents whether a device is
  906. * ATA or ATAPI, as per "Signature and persistence" section
  907. * of ATA/PI spec (volume 1, sect 5.14).
  908. *
  909. * LOCKING:
  910. * None.
  911. *
  912. * RETURNS:
  913. * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
  914. * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
  915. */
  916. unsigned int ata_dev_classify(const struct ata_taskfile *tf)
  917. {
  918. /* Apple's open source Darwin code hints that some devices only
  919. * put a proper signature into the LBA mid/high registers,
  920. * So, we only check those. It's sufficient for uniqueness.
  921. *
  922. * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
  923. * signatures for ATA and ATAPI devices attached on SerialATA,
  924. * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
  925. * spec has never mentioned about using different signatures
  926. * for ATA/ATAPI devices. Then, Serial ATA II: Port
  927. * Multiplier specification began to use 0x69/0x96 to identify
  928. * port multpliers and 0x3c/0xc3 to identify SEMB device.
  929. * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
  930. * 0x69/0x96 shortly and described them as reserved for
  931. * SerialATA.
  932. *
  933. * We follow the current spec and consider that 0x69/0x96
  934. * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
  935. * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports
  936. * SEMB signature. This is worked around in
  937. * ata_dev_read_id().
  938. */
  939. if (tf->lbam == 0 && tf->lbah == 0)
  940. return ATA_DEV_ATA;
  941. if (tf->lbam == 0x14 && tf->lbah == 0xeb)
  942. return ATA_DEV_ATAPI;
  943. if (tf->lbam == 0x69 && tf->lbah == 0x96)
  944. return ATA_DEV_PMP;
  945. if (tf->lbam == 0x3c && tf->lbah == 0xc3)
  946. return ATA_DEV_SEMB;
  947. if (tf->lbam == 0xcd && tf->lbah == 0xab)
  948. return ATA_DEV_ZAC;
  949. return ATA_DEV_UNKNOWN;
  950. }
  951. EXPORT_SYMBOL_GPL(ata_dev_classify);
  952. /**
  953. * ata_id_string - Convert IDENTIFY DEVICE page into string
  954. * @id: IDENTIFY DEVICE results we will examine
  955. * @s: string into which data is output
  956. * @ofs: offset into identify device page
  957. * @len: length of string to return. must be an even number.
  958. *
  959. * The strings in the IDENTIFY DEVICE page are broken up into
  960. * 16-bit chunks. Run through the string, and output each
  961. * 8-bit chunk linearly, regardless of platform.
  962. *
  963. * LOCKING:
  964. * caller.
  965. */
  966. void ata_id_string(const u16 *id, unsigned char *s,
  967. unsigned int ofs, unsigned int len)
  968. {
  969. unsigned int c;
  970. BUG_ON(len & 1);
  971. while (len > 0) {
  972. c = id[ofs] >> 8;
  973. *s = c;
  974. s++;
  975. c = id[ofs] & 0xff;
  976. *s = c;
  977. s++;
  978. ofs++;
  979. len -= 2;
  980. }
  981. }
  982. EXPORT_SYMBOL_GPL(ata_id_string);
  983. /**
  984. * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
  985. * @id: IDENTIFY DEVICE results we will examine
  986. * @s: string into which data is output
  987. * @ofs: offset into identify device page
  988. * @len: length of string to return. must be an odd number.
  989. *
  990. * This function is identical to ata_id_string except that it
  991. * trims trailing spaces and terminates the resulting string with
  992. * null. @len must be actual maximum length (even number) + 1.
  993. *
  994. * LOCKING:
  995. * caller.
  996. */
  997. void ata_id_c_string(const u16 *id, unsigned char *s,
  998. unsigned int ofs, unsigned int len)
  999. {
  1000. unsigned char *p;
  1001. ata_id_string(id, s, ofs, len - 1);
  1002. p = s + strnlen(s, len - 1);
  1003. while (p > s && p[-1] == ' ')
  1004. p--;
  1005. *p = '\0';
  1006. }
  1007. EXPORT_SYMBOL_GPL(ata_id_c_string);
  1008. static u64 ata_id_n_sectors(const u16 *id)
  1009. {
  1010. if (ata_id_has_lba(id)) {
  1011. if (ata_id_has_lba48(id))
  1012. return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
  1013. return ata_id_u32(id, ATA_ID_LBA_CAPACITY);
  1014. }
  1015. if (ata_id_current_chs_valid(id))
  1016. return (u32)id[ATA_ID_CUR_CYLS] * (u32)id[ATA_ID_CUR_HEADS] *
  1017. (u32)id[ATA_ID_CUR_SECTORS];
  1018. return (u32)id[ATA_ID_CYLS] * (u32)id[ATA_ID_HEADS] *
  1019. (u32)id[ATA_ID_SECTORS];
  1020. }
  1021. u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
  1022. {
  1023. u64 sectors = 0;
  1024. sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
  1025. sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
  1026. sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24;
  1027. sectors |= (tf->lbah & 0xff) << 16;
  1028. sectors |= (tf->lbam & 0xff) << 8;
  1029. sectors |= (tf->lbal & 0xff);
  1030. return sectors;
  1031. }
  1032. u64 ata_tf_to_lba(const struct ata_taskfile *tf)
  1033. {
  1034. u64 sectors = 0;
  1035. sectors |= (tf->device & 0x0f) << 24;
  1036. sectors |= (tf->lbah & 0xff) << 16;
  1037. sectors |= (tf->lbam & 0xff) << 8;
  1038. sectors |= (tf->lbal & 0xff);
  1039. return sectors;
  1040. }
  1041. /**
  1042. * ata_read_native_max_address - Read native max address
  1043. * @dev: target device
  1044. * @max_sectors: out parameter for the result native max address
  1045. *
  1046. * Perform an LBA48 or LBA28 native size query upon the device in
  1047. * question.
  1048. *
  1049. * RETURNS:
  1050. * 0 on success, -EACCES if command is aborted by the drive.
  1051. * -EIO on other errors.
  1052. */
  1053. static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
  1054. {
  1055. unsigned int err_mask;
  1056. struct ata_taskfile tf;
  1057. int lba48 = ata_id_has_lba48(dev->id);
  1058. ata_tf_init(dev, &tf);
  1059. /* always clear all address registers */
  1060. tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
  1061. if (lba48) {
  1062. tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
  1063. tf.flags |= ATA_TFLAG_LBA48;
  1064. } else
  1065. tf.command = ATA_CMD_READ_NATIVE_MAX;
  1066. tf.protocol = ATA_PROT_NODATA;
  1067. tf.device |= ATA_LBA;
  1068. err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
  1069. if (err_mask) {
  1070. ata_dev_warn(dev,
  1071. "failed to read native max address (err_mask=0x%x)\n",
  1072. err_mask);
  1073. if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
  1074. return -EACCES;
  1075. return -EIO;
  1076. }
  1077. if (lba48)
  1078. *max_sectors = ata_tf_to_lba48(&tf) + 1;
  1079. else
  1080. *max_sectors = ata_tf_to_lba(&tf) + 1;
  1081. if (dev->quirks & ATA_QUIRK_HPA_SIZE)
  1082. (*max_sectors)--;
  1083. return 0;
  1084. }
  1085. /**
  1086. * ata_set_max_sectors - Set max sectors
  1087. * @dev: target device
  1088. * @new_sectors: new max sectors value to set for the device
  1089. *
  1090. * Set max sectors of @dev to @new_sectors.
  1091. *
  1092. * RETURNS:
  1093. * 0 on success, -EACCES if command is aborted or denied (due to
  1094. * previous non-volatile SET_MAX) by the drive. -EIO on other
  1095. * errors.
  1096. */
  1097. static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
  1098. {
  1099. unsigned int err_mask;
  1100. struct ata_taskfile tf;
  1101. int lba48 = ata_id_has_lba48(dev->id);
  1102. new_sectors--;
  1103. ata_tf_init(dev, &tf);
  1104. tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
  1105. if (lba48) {
  1106. tf.command = ATA_CMD_SET_MAX_EXT;
  1107. tf.flags |= ATA_TFLAG_LBA48;
  1108. tf.hob_lbal = (new_sectors >> 24) & 0xff;
  1109. tf.hob_lbam = (new_sectors >> 32) & 0xff;
  1110. tf.hob_lbah = (new_sectors >> 40) & 0xff;
  1111. } else {
  1112. tf.command = ATA_CMD_SET_MAX;
  1113. tf.device |= (new_sectors >> 24) & 0xf;
  1114. }
  1115. tf.protocol = ATA_PROT_NODATA;
  1116. tf.device |= ATA_LBA;
  1117. tf.lbal = (new_sectors >> 0) & 0xff;
  1118. tf.lbam = (new_sectors >> 8) & 0xff;
  1119. tf.lbah = (new_sectors >> 16) & 0xff;
  1120. err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
  1121. if (err_mask) {
  1122. ata_dev_warn(dev,
  1123. "failed to set max address (err_mask=0x%x)\n",
  1124. err_mask);
  1125. if (err_mask == AC_ERR_DEV &&
  1126. (tf.error & (ATA_ABORTED | ATA_IDNF)))
  1127. return -EACCES;
  1128. return -EIO;
  1129. }
  1130. return 0;
  1131. }
  1132. /**
  1133. * ata_hpa_resize - Resize a device with an HPA set
  1134. * @dev: Device to resize
  1135. *
  1136. * Read the size of an LBA28 or LBA48 disk with HPA features and resize
  1137. * it if required to the full size of the media. The caller must check
  1138. * the drive has the HPA feature set enabled.
  1139. *
  1140. * RETURNS:
  1141. * 0 on success, -errno on failure.
  1142. */
  1143. static int ata_hpa_resize(struct ata_device *dev)
  1144. {
  1145. bool print_info = ata_dev_print_info(dev);
  1146. bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA;
  1147. u64 sectors = ata_id_n_sectors(dev->id);
  1148. u64 native_sectors;
  1149. int rc;
  1150. /* do we need to do it? */
  1151. if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
  1152. !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
  1153. (dev->quirks & ATA_QUIRK_BROKEN_HPA))
  1154. return 0;
  1155. /* read native max address */
  1156. rc = ata_read_native_max_address(dev, &native_sectors);
  1157. if (rc) {
  1158. /* If device aborted the command or HPA isn't going to
  1159. * be unlocked, skip HPA resizing.
  1160. */
  1161. if (rc == -EACCES || !unlock_hpa) {
  1162. ata_dev_warn(dev,
  1163. "HPA support seems broken, skipping HPA handling\n");
  1164. dev->quirks |= ATA_QUIRK_BROKEN_HPA;
  1165. /* we can continue if device aborted the command */
  1166. if (rc == -EACCES)
  1167. rc = 0;
  1168. }
  1169. return rc;
  1170. }
  1171. dev->n_native_sectors = native_sectors;
  1172. /* nothing to do? */
  1173. if (native_sectors <= sectors || !unlock_hpa) {
  1174. if (!print_info || native_sectors == sectors)
  1175. return 0;
  1176. if (native_sectors > sectors)
  1177. ata_dev_info(dev,
  1178. "HPA detected: current %llu, native %llu\n",
  1179. (unsigned long long)sectors,
  1180. (unsigned long long)native_sectors);
  1181. else if (native_sectors < sectors)
  1182. ata_dev_warn(dev,
  1183. "native sectors (%llu) is smaller than sectors (%llu)\n",
  1184. (unsigned long long)native_sectors,
  1185. (unsigned long long)sectors);
  1186. return 0;
  1187. }
  1188. /* let's unlock HPA */
  1189. rc = ata_set_max_sectors(dev, native_sectors);
  1190. if (rc == -EACCES) {
  1191. /* if device aborted the command, skip HPA resizing */
  1192. ata_dev_warn(dev,
  1193. "device aborted resize (%llu -> %llu), skipping HPA handling\n",
  1194. (unsigned long long)sectors,
  1195. (unsigned long long)native_sectors);
  1196. dev->quirks |= ATA_QUIRK_BROKEN_HPA;
  1197. return 0;
  1198. } else if (rc)
  1199. return rc;
  1200. /* re-read IDENTIFY data */
  1201. rc = ata_dev_reread_id(dev, 0);
  1202. if (rc) {
  1203. ata_dev_err(dev,
  1204. "failed to re-read IDENTIFY data after HPA resizing\n");
  1205. return rc;
  1206. }
  1207. if (print_info) {
  1208. u64 new_sectors = ata_id_n_sectors(dev->id);
  1209. ata_dev_info(dev,
  1210. "HPA unlocked: %llu -> %llu, native %llu\n",
  1211. (unsigned long long)sectors,
  1212. (unsigned long long)new_sectors,
  1213. (unsigned long long)native_sectors);
  1214. }
  1215. return 0;
  1216. }
  1217. /**
  1218. * ata_dump_id - IDENTIFY DEVICE info debugging output
  1219. * @dev: device from which the information is fetched
  1220. * @id: IDENTIFY DEVICE page to dump
  1221. *
  1222. * Dump selected 16-bit words from the given IDENTIFY DEVICE
  1223. * page.
  1224. *
  1225. * LOCKING:
  1226. * caller.
  1227. */
  1228. static inline void ata_dump_id(struct ata_device *dev, const u16 *id)
  1229. {
  1230. ata_dev_dbg(dev,
  1231. "49==0x%04x 53==0x%04x 63==0x%04x 64==0x%04x 75==0x%04x\n"
  1232. "80==0x%04x 81==0x%04x 82==0x%04x 83==0x%04x 84==0x%04x\n"
  1233. "88==0x%04x 93==0x%04x\n",
  1234. id[49], id[53], id[63], id[64], id[75], id[80],
  1235. id[81], id[82], id[83], id[84], id[88], id[93]);
  1236. }
  1237. /**
  1238. * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
  1239. * @id: IDENTIFY data to compute xfer mask from
  1240. *
  1241. * Compute the xfermask for this device. This is not as trivial
  1242. * as it seems if we must consider early devices correctly.
  1243. *
  1244. * FIXME: pre IDE drive timing (do we care ?).
  1245. *
  1246. * LOCKING:
  1247. * None.
  1248. *
  1249. * RETURNS:
  1250. * Computed xfermask
  1251. */
  1252. unsigned int ata_id_xfermask(const u16 *id)
  1253. {
  1254. unsigned int pio_mask, mwdma_mask, udma_mask;
  1255. /* Usual case. Word 53 indicates word 64 is valid */
  1256. if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
  1257. pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
  1258. pio_mask <<= 3;
  1259. pio_mask |= 0x7;
  1260. } else {
  1261. /* If word 64 isn't valid then Word 51 high byte holds
  1262. * the PIO timing number for the maximum. Turn it into
  1263. * a mask.
  1264. */
  1265. u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
  1266. if (mode < 5) /* Valid PIO range */
  1267. pio_mask = (2 << mode) - 1;
  1268. else
  1269. pio_mask = 1;
  1270. /* But wait.. there's more. Design your standards by
  1271. * committee and you too can get a free iordy field to
  1272. * process. However it is the speeds not the modes that
  1273. * are supported... Note drivers using the timing API
  1274. * will get this right anyway
  1275. */
  1276. }
  1277. mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
  1278. if (ata_id_is_cfa(id)) {
  1279. /*
  1280. * Process compact flash extended modes
  1281. */
  1282. int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7;
  1283. int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7;
  1284. if (pio)
  1285. pio_mask |= (1 << 5);
  1286. if (pio > 1)
  1287. pio_mask |= (1 << 6);
  1288. if (dma)
  1289. mwdma_mask |= (1 << 3);
  1290. if (dma > 1)
  1291. mwdma_mask |= (1 << 4);
  1292. }
  1293. udma_mask = 0;
  1294. if (id[ATA_ID_FIELD_VALID] & (1 << 2))
  1295. udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
  1296. return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
  1297. }
  1298. EXPORT_SYMBOL_GPL(ata_id_xfermask);
  1299. static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
  1300. {
  1301. struct completion *waiting = qc->private_data;
  1302. complete(waiting);
  1303. }
  1304. /**
  1305. * ata_exec_internal - execute libata internal command
  1306. * @dev: Device to which the command is sent
  1307. * @tf: Taskfile registers for the command and the result
  1308. * @cdb: CDB for packet command
  1309. * @dma_dir: Data transfer direction of the command
  1310. * @buf: Data buffer of the command
  1311. * @buflen: Length of data buffer
  1312. * @timeout: Timeout in msecs (0 for default)
  1313. *
  1314. * Executes libata internal command with timeout. @tf contains
  1315. * the command on entry and the result on return. Timeout and error
  1316. * conditions are reported via the return value. No recovery action
  1317. * is taken after a command times out. It is the caller's duty to
  1318. * clean up after timeout.
  1319. *
  1320. * LOCKING:
  1321. * None. Should be called with kernel context, might sleep.
  1322. *
  1323. * RETURNS:
  1324. * Zero on success, AC_ERR_* mask on failure
  1325. */
  1326. unsigned int ata_exec_internal(struct ata_device *dev, struct ata_taskfile *tf,
  1327. const u8 *cdb, enum dma_data_direction dma_dir,
  1328. void *buf, unsigned int buflen,
  1329. unsigned int timeout)
  1330. {
  1331. struct ata_link *link = dev->link;
  1332. struct ata_port *ap = link->ap;
  1333. u8 command = tf->command;
  1334. struct ata_queued_cmd *qc;
  1335. struct scatterlist sgl;
  1336. unsigned int preempted_tag;
  1337. u32 preempted_sactive;
  1338. u64 preempted_qc_active;
  1339. int preempted_nr_active_links;
  1340. bool auto_timeout = false;
  1341. DECLARE_COMPLETION_ONSTACK(wait);
  1342. unsigned long flags;
  1343. unsigned int err_mask;
  1344. int rc;
  1345. if (WARN_ON(dma_dir != DMA_NONE && !buf))
  1346. return AC_ERR_INVALID;
  1347. spin_lock_irqsave(ap->lock, flags);
  1348. /* No internal command while frozen */
  1349. if (ata_port_is_frozen(ap)) {
  1350. spin_unlock_irqrestore(ap->lock, flags);
  1351. return AC_ERR_SYSTEM;
  1352. }
  1353. /* Initialize internal qc */
  1354. qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
  1355. qc->tag = ATA_TAG_INTERNAL;
  1356. qc->hw_tag = 0;
  1357. qc->scsicmd = NULL;
  1358. qc->ap = ap;
  1359. qc->dev = dev;
  1360. ata_qc_reinit(qc);
  1361. preempted_tag = link->active_tag;
  1362. preempted_sactive = link->sactive;
  1363. preempted_qc_active = ap->qc_active;
  1364. preempted_nr_active_links = ap->nr_active_links;
  1365. link->active_tag = ATA_TAG_POISON;
  1366. link->sactive = 0;
  1367. ap->qc_active = 0;
  1368. ap->nr_active_links = 0;
  1369. /* Prepare and issue qc */
  1370. qc->tf = *tf;
  1371. if (cdb)
  1372. memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
  1373. /* Some SATA bridges need us to indicate data xfer direction */
  1374. if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) &&
  1375. dma_dir == DMA_FROM_DEVICE)
  1376. qc->tf.feature |= ATAPI_DMADIR;
  1377. qc->flags |= ATA_QCFLAG_RESULT_TF;
  1378. qc->dma_dir = dma_dir;
  1379. if (dma_dir != DMA_NONE) {
  1380. sg_init_one(&sgl, buf, buflen);
  1381. ata_sg_init(qc, &sgl, 1);
  1382. qc->nbytes = buflen;
  1383. }
  1384. qc->private_data = &wait;
  1385. qc->complete_fn = ata_qc_complete_internal;
  1386. ata_qc_issue(qc);
  1387. spin_unlock_irqrestore(ap->lock, flags);
  1388. if (!timeout) {
  1389. if (ata_probe_timeout) {
  1390. timeout = ata_probe_timeout * 1000;
  1391. } else {
  1392. timeout = ata_internal_cmd_timeout(dev, command);
  1393. auto_timeout = true;
  1394. }
  1395. }
  1396. ata_eh_release(ap);
  1397. rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
  1398. ata_eh_acquire(ap);
  1399. ata_sff_flush_pio_task(ap);
  1400. if (!rc) {
  1401. /*
  1402. * We are racing with irq here. If we lose, the following test
  1403. * prevents us from completing the qc twice. If we win, the port
  1404. * is frozen and will be cleaned up by ->post_internal_cmd().
  1405. */
  1406. spin_lock_irqsave(ap->lock, flags);
  1407. if (qc->flags & ATA_QCFLAG_ACTIVE) {
  1408. qc->err_mask |= AC_ERR_TIMEOUT;
  1409. ata_port_freeze(ap);
  1410. ata_dev_warn(dev, "qc timeout after %u msecs (cmd 0x%x)\n",
  1411. timeout, command);
  1412. }
  1413. spin_unlock_irqrestore(ap->lock, flags);
  1414. }
  1415. if (ap->ops->post_internal_cmd)
  1416. ap->ops->post_internal_cmd(qc);
  1417. /* Perform minimal error analysis */
  1418. if (qc->flags & ATA_QCFLAG_EH) {
  1419. if (qc->result_tf.status & (ATA_ERR | ATA_DF))
  1420. qc->err_mask |= AC_ERR_DEV;
  1421. if (!qc->err_mask)
  1422. qc->err_mask |= AC_ERR_OTHER;
  1423. if (qc->err_mask & ~AC_ERR_OTHER)
  1424. qc->err_mask &= ~AC_ERR_OTHER;
  1425. } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
  1426. qc->result_tf.status |= ATA_SENSE;
  1427. }
  1428. /* Finish up */
  1429. spin_lock_irqsave(ap->lock, flags);
  1430. *tf = qc->result_tf;
  1431. err_mask = qc->err_mask;
  1432. ata_qc_free(qc);
  1433. link->active_tag = preempted_tag;
  1434. link->sactive = preempted_sactive;
  1435. ap->qc_active = preempted_qc_active;
  1436. ap->nr_active_links = preempted_nr_active_links;
  1437. spin_unlock_irqrestore(ap->lock, flags);
  1438. if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
  1439. ata_internal_cmd_timed_out(dev, command);
  1440. return err_mask;
  1441. }
  1442. /**
  1443. * ata_pio_need_iordy - check if iordy needed
  1444. * @adev: ATA device
  1445. *
  1446. * Check if the current speed of the device requires IORDY. Used
  1447. * by various controllers for chip configuration.
  1448. */
  1449. unsigned int ata_pio_need_iordy(const struct ata_device *adev)
  1450. {
  1451. /* Don't set IORDY if we're preparing for reset. IORDY may
  1452. * lead to controller lock up on certain controllers if the
  1453. * port is not occupied. See bko#11703 for details.
  1454. */
  1455. if (adev->link->ap->pflags & ATA_PFLAG_RESETTING)
  1456. return 0;
  1457. /* Controller doesn't support IORDY. Probably a pointless
  1458. * check as the caller should know this.
  1459. */
  1460. if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
  1461. return 0;
  1462. /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */
  1463. if (ata_id_is_cfa(adev->id)
  1464. && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6))
  1465. return 0;
  1466. /* PIO3 and higher it is mandatory */
  1467. if (adev->pio_mode > XFER_PIO_2)
  1468. return 1;
  1469. /* We turn it on when possible */
  1470. if (ata_id_has_iordy(adev->id))
  1471. return 1;
  1472. return 0;
  1473. }
  1474. EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
  1475. /**
  1476. * ata_pio_mask_no_iordy - Return the non IORDY mask
  1477. * @adev: ATA device
  1478. *
  1479. * Compute the highest mode possible if we are not using iordy. Return
  1480. * -1 if no iordy mode is available.
  1481. */
  1482. static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
  1483. {
  1484. /* If we have no drive specific rule, then PIO 2 is non IORDY */
  1485. if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
  1486. u16 pio = adev->id[ATA_ID_EIDE_PIO];
  1487. /* Is the speed faster than the drive allows non IORDY ? */
  1488. if (pio) {
  1489. /* This is cycle times not frequency - watch the logic! */
  1490. if (pio > 240) /* PIO2 is 240nS per cycle */
  1491. return 3 << ATA_SHIFT_PIO;
  1492. return 7 << ATA_SHIFT_PIO;
  1493. }
  1494. }
  1495. return 3 << ATA_SHIFT_PIO;
  1496. }
  1497. /**
  1498. * ata_do_dev_read_id - default ID read method
  1499. * @dev: device
  1500. * @tf: proposed taskfile
  1501. * @id: data buffer
  1502. *
  1503. * Issue the identify taskfile and hand back the buffer containing
  1504. * identify data. For some RAID controllers and for pre ATA devices
  1505. * this function is wrapped or replaced by the driver
  1506. */
  1507. unsigned int ata_do_dev_read_id(struct ata_device *dev,
  1508. struct ata_taskfile *tf, __le16 *id)
  1509. {
  1510. return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
  1511. id, sizeof(id[0]) * ATA_ID_WORDS, 0);
  1512. }
  1513. EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
  1514. /**
  1515. * ata_dev_read_id - Read ID data from the specified device
  1516. * @dev: target device
  1517. * @p_class: pointer to class of the target device (may be changed)
  1518. * @flags: ATA_READID_* flags
  1519. * @id: buffer to read IDENTIFY data into
  1520. *
  1521. * Read ID data from the specified device. ATA_CMD_ID_ATA is
  1522. * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
  1523. * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
  1524. * for pre-ATA4 drives.
  1525. *
  1526. * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
  1527. * now we abort if we hit that case.
  1528. *
  1529. * LOCKING:
  1530. * Kernel thread context (may sleep)
  1531. *
  1532. * RETURNS:
  1533. * 0 on success, -errno otherwise.
  1534. */
  1535. int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
  1536. unsigned int flags, u16 *id)
  1537. {
  1538. struct ata_port *ap = dev->link->ap;
  1539. unsigned int class = *p_class;
  1540. struct ata_taskfile tf;
  1541. unsigned int err_mask = 0;
  1542. const char *reason;
  1543. bool is_semb = class == ATA_DEV_SEMB;
  1544. int may_fallback = 1, tried_spinup = 0;
  1545. int rc;
  1546. retry:
  1547. ata_tf_init(dev, &tf);
  1548. switch (class) {
  1549. case ATA_DEV_SEMB:
  1550. class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
  1551. fallthrough;
  1552. case ATA_DEV_ATA:
  1553. case ATA_DEV_ZAC:
  1554. tf.command = ATA_CMD_ID_ATA;
  1555. break;
  1556. case ATA_DEV_ATAPI:
  1557. tf.command = ATA_CMD_ID_ATAPI;
  1558. break;
  1559. default:
  1560. rc = -ENODEV;
  1561. reason = "unsupported class";
  1562. goto err_out;
  1563. }
  1564. tf.protocol = ATA_PROT_PIO;
  1565. /* Some devices choke if TF registers contain garbage. Make
  1566. * sure those are properly initialized.
  1567. */
  1568. tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
  1569. /* Device presence detection is unreliable on some
  1570. * controllers. Always poll IDENTIFY if available.
  1571. */
  1572. tf.flags |= ATA_TFLAG_POLLING;
  1573. if (ap->ops->read_id)
  1574. err_mask = ap->ops->read_id(dev, &tf, (__le16 *)id);
  1575. else
  1576. err_mask = ata_do_dev_read_id(dev, &tf, (__le16 *)id);
  1577. if (err_mask) {
  1578. if (err_mask & AC_ERR_NODEV_HINT) {
  1579. ata_dev_dbg(dev, "NODEV after polling detection\n");
  1580. return -ENOENT;
  1581. }
  1582. if (is_semb) {
  1583. ata_dev_info(dev,
  1584. "IDENTIFY failed on device w/ SEMB sig, disabled\n");
  1585. /* SEMB is not supported yet */
  1586. *p_class = ATA_DEV_SEMB_UNSUP;
  1587. return 0;
  1588. }
  1589. if ((err_mask == AC_ERR_DEV) && (tf.error & ATA_ABORTED)) {
  1590. /* Device or controller might have reported
  1591. * the wrong device class. Give a shot at the
  1592. * other IDENTIFY if the current one is
  1593. * aborted by the device.
  1594. */
  1595. if (may_fallback) {
  1596. may_fallback = 0;
  1597. if (class == ATA_DEV_ATA)
  1598. class = ATA_DEV_ATAPI;
  1599. else
  1600. class = ATA_DEV_ATA;
  1601. goto retry;
  1602. }
  1603. /* Control reaches here iff the device aborted
  1604. * both flavors of IDENTIFYs which happens
  1605. * sometimes with phantom devices.
  1606. */
  1607. ata_dev_dbg(dev,
  1608. "both IDENTIFYs aborted, assuming NODEV\n");
  1609. return -ENOENT;
  1610. }
  1611. rc = -EIO;
  1612. reason = "I/O error";
  1613. goto err_out;
  1614. }
  1615. if (dev->quirks & ATA_QUIRK_DUMP_ID) {
  1616. ata_dev_info(dev, "dumping IDENTIFY data, "
  1617. "class=%d may_fallback=%d tried_spinup=%d\n",
  1618. class, may_fallback, tried_spinup);
  1619. print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET,
  1620. 16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
  1621. }
  1622. /* Falling back doesn't make sense if ID data was read
  1623. * successfully at least once.
  1624. */
  1625. may_fallback = 0;
  1626. swap_buf_le16(id, ATA_ID_WORDS);
  1627. /* sanity check */
  1628. rc = -EINVAL;
  1629. reason = "device reports invalid type";
  1630. if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
  1631. if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
  1632. goto err_out;
  1633. if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
  1634. ata_id_is_ata(id)) {
  1635. ata_dev_dbg(dev,
  1636. "host indicates ignore ATA devices, ignored\n");
  1637. return -ENOENT;
  1638. }
  1639. } else {
  1640. if (ata_id_is_ata(id))
  1641. goto err_out;
  1642. }
  1643. if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
  1644. tried_spinup = 1;
  1645. /*
  1646. * Drive powered-up in standby mode, and requires a specific
  1647. * SET_FEATURES spin-up subcommand before it will accept
  1648. * anything other than the original IDENTIFY command.
  1649. */
  1650. err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
  1651. if (err_mask && id[2] != 0x738c) {
  1652. rc = -EIO;
  1653. reason = "SPINUP failed";
  1654. goto err_out;
  1655. }
  1656. /*
  1657. * If the drive initially returned incomplete IDENTIFY info,
  1658. * we now must reissue the IDENTIFY command.
  1659. */
  1660. if (id[2] == 0x37c8)
  1661. goto retry;
  1662. }
  1663. if ((flags & ATA_READID_POSTRESET) &&
  1664. (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
  1665. /*
  1666. * The exact sequence expected by certain pre-ATA4 drives is:
  1667. * SRST RESET
  1668. * IDENTIFY (optional in early ATA)
  1669. * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
  1670. * anything else..
  1671. * Some drives were very specific about that exact sequence.
  1672. *
  1673. * Note that ATA4 says lba is mandatory so the second check
  1674. * should never trigger.
  1675. */
  1676. if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
  1677. err_mask = ata_dev_init_params(dev, id[3], id[6]);
  1678. if (err_mask) {
  1679. rc = -EIO;
  1680. reason = "INIT_DEV_PARAMS failed";
  1681. goto err_out;
  1682. }
  1683. /* current CHS translation info (id[53-58]) might be
  1684. * changed. reread the identify device info.
  1685. */
  1686. flags &= ~ATA_READID_POSTRESET;
  1687. goto retry;
  1688. }
  1689. }
  1690. *p_class = class;
  1691. return 0;
  1692. err_out:
  1693. ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
  1694. reason, err_mask);
  1695. return rc;
  1696. }
  1697. bool ata_dev_power_init_tf(struct ata_device *dev, struct ata_taskfile *tf,
  1698. bool set_active)
  1699. {
  1700. /* Only applies to ATA and ZAC devices */
  1701. if (dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC)
  1702. return false;
  1703. ata_tf_init(dev, tf);
  1704. tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
  1705. tf->protocol = ATA_PROT_NODATA;
  1706. if (set_active) {
  1707. /* VERIFY for 1 sector at lba=0 */
  1708. tf->command = ATA_CMD_VERIFY;
  1709. tf->nsect = 1;
  1710. if (dev->flags & ATA_DFLAG_LBA) {
  1711. tf->flags |= ATA_TFLAG_LBA;
  1712. tf->device |= ATA_LBA;
  1713. } else {
  1714. /* CHS */
  1715. tf->lbal = 0x1; /* sect */
  1716. }
  1717. } else {
  1718. tf->command = ATA_CMD_STANDBYNOW1;
  1719. }
  1720. return true;
  1721. }
  1722. static bool ata_dev_power_is_active(struct ata_device *dev)
  1723. {
  1724. struct ata_taskfile tf;
  1725. unsigned int err_mask;
  1726. ata_tf_init(dev, &tf);
  1727. tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
  1728. tf.protocol = ATA_PROT_NODATA;
  1729. tf.command = ATA_CMD_CHK_POWER;
  1730. err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
  1731. if (err_mask) {
  1732. ata_dev_err(dev, "Check power mode failed (err_mask=0x%x)\n",
  1733. err_mask);
  1734. /*
  1735. * Assume we are in standby mode so that we always force a
  1736. * spinup in ata_dev_power_set_active().
  1737. */
  1738. return false;
  1739. }
  1740. ata_dev_dbg(dev, "Power mode: 0x%02x\n", tf.nsect);
  1741. /* Active or idle */
  1742. return tf.nsect == 0xff;
  1743. }
  1744. /**
  1745. * ata_dev_power_set_standby - Set a device power mode to standby
  1746. * @dev: target device
  1747. *
  1748. * Issue a STANDBY IMMEDIATE command to set a device power mode to standby.
  1749. * For an HDD device, this spins down the disks.
  1750. *
  1751. * LOCKING:
  1752. * Kernel thread context (may sleep).
  1753. */
  1754. void ata_dev_power_set_standby(struct ata_device *dev)
  1755. {
  1756. unsigned long ap_flags = dev->link->ap->flags;
  1757. struct ata_taskfile tf;
  1758. unsigned int err_mask;
  1759. /* If the device is already sleeping or in standby, do nothing. */
  1760. if ((dev->flags & ATA_DFLAG_SLEEPING) ||
  1761. !ata_dev_power_is_active(dev))
  1762. return;
  1763. /*
  1764. * Some odd clown BIOSes issue spindown on power off (ACPI S4 or S5)
  1765. * causing some drives to spin up and down again. For these, do nothing
  1766. * if we are being called on shutdown.
  1767. */
  1768. if ((ap_flags & ATA_FLAG_NO_POWEROFF_SPINDOWN) &&
  1769. system_state == SYSTEM_POWER_OFF)
  1770. return;
  1771. if ((ap_flags & ATA_FLAG_NO_HIBERNATE_SPINDOWN) &&
  1772. system_entering_hibernation())
  1773. return;
  1774. /* Issue STANDBY IMMEDIATE command only if supported by the device */
  1775. if (!ata_dev_power_init_tf(dev, &tf, false))
  1776. return;
  1777. ata_dev_notice(dev, "Entering standby power mode\n");
  1778. err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
  1779. if (err_mask)
  1780. ata_dev_err(dev, "STANDBY IMMEDIATE failed (err_mask=0x%x)\n",
  1781. err_mask);
  1782. }
  1783. /**
  1784. * ata_dev_power_set_active - Set a device power mode to active
  1785. * @dev: target device
  1786. *
  1787. * Issue a VERIFY command to enter to ensure that the device is in the
  1788. * active power mode. For a spun-down HDD (standby or idle power mode),
  1789. * the VERIFY command will complete after the disk spins up.
  1790. *
  1791. * LOCKING:
  1792. * Kernel thread context (may sleep).
  1793. */
  1794. void ata_dev_power_set_active(struct ata_device *dev)
  1795. {
  1796. struct ata_taskfile tf;
  1797. unsigned int err_mask;
  1798. /*
  1799. * Issue READ VERIFY SECTORS command for 1 sector at lba=0 only
  1800. * if supported by the device.
  1801. */
  1802. if (!ata_dev_power_init_tf(dev, &tf, true))
  1803. return;
  1804. /*
  1805. * Check the device power state & condition and force a spinup with
  1806. * VERIFY command only if the drive is not already ACTIVE or IDLE.
  1807. */
  1808. if (ata_dev_power_is_active(dev))
  1809. return;
  1810. ata_dev_notice(dev, "Entering active power mode\n");
  1811. err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
  1812. if (err_mask)
  1813. ata_dev_err(dev, "VERIFY failed (err_mask=0x%x)\n",
  1814. err_mask);
  1815. }
  1816. /**
  1817. * ata_read_log_page - read a specific log page
  1818. * @dev: target device
  1819. * @log: log to read
  1820. * @page: page to read
  1821. * @buf: buffer to store read page
  1822. * @sectors: number of sectors to read
  1823. *
  1824. * Read log page using READ_LOG_EXT command.
  1825. *
  1826. * LOCKING:
  1827. * Kernel thread context (may sleep).
  1828. *
  1829. * RETURNS:
  1830. * 0 on success, AC_ERR_* mask otherwise.
  1831. */
  1832. unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
  1833. u8 page, void *buf, unsigned int sectors)
  1834. {
  1835. unsigned long ap_flags = dev->link->ap->flags;
  1836. struct ata_taskfile tf;
  1837. unsigned int err_mask;
  1838. bool dma = false;
  1839. ata_dev_dbg(dev, "read log page - log 0x%x, page 0x%x\n", log, page);
  1840. /*
  1841. * Return error without actually issuing the command on controllers
  1842. * which e.g. lockup on a read log page.
  1843. */
  1844. if (ap_flags & ATA_FLAG_NO_LOG_PAGE)
  1845. return AC_ERR_DEV;
  1846. retry:
  1847. ata_tf_init(dev, &tf);
  1848. if (ata_dma_enabled(dev) && ata_id_has_read_log_dma_ext(dev->id) &&
  1849. !(dev->quirks & ATA_QUIRK_NO_DMA_LOG)) {
  1850. tf.command = ATA_CMD_READ_LOG_DMA_EXT;
  1851. tf.protocol = ATA_PROT_DMA;
  1852. dma = true;
  1853. } else {
  1854. tf.command = ATA_CMD_READ_LOG_EXT;
  1855. tf.protocol = ATA_PROT_PIO;
  1856. dma = false;
  1857. }
  1858. tf.lbal = log;
  1859. tf.lbam = page;
  1860. tf.nsect = sectors;
  1861. tf.hob_nsect = sectors >> 8;
  1862. tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
  1863. err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
  1864. buf, sectors * ATA_SECT_SIZE, 0);
  1865. if (err_mask) {
  1866. if (dma) {
  1867. dev->quirks |= ATA_QUIRK_NO_DMA_LOG;
  1868. if (!ata_port_is_frozen(dev->link->ap))
  1869. goto retry;
  1870. }
  1871. ata_dev_err(dev,
  1872. "Read log 0x%02x page 0x%02x failed, Emask 0x%x\n",
  1873. (unsigned int)log, (unsigned int)page, err_mask);
  1874. }
  1875. return err_mask;
  1876. }
  1877. static int ata_log_supported(struct ata_device *dev, u8 log)
  1878. {
  1879. if (dev->quirks & ATA_QUIRK_NO_LOG_DIR)
  1880. return 0;
  1881. if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, dev->sector_buf, 1))
  1882. return 0;
  1883. return get_unaligned_le16(&dev->sector_buf[log * 2]);
  1884. }
  1885. static bool ata_identify_page_supported(struct ata_device *dev, u8 page)
  1886. {
  1887. unsigned int err, i;
  1888. if (dev->quirks & ATA_QUIRK_NO_ID_DEV_LOG)
  1889. return false;
  1890. if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE)) {
  1891. /*
  1892. * IDENTIFY DEVICE data log is defined as mandatory starting
  1893. * with ACS-3 (ATA version 10). Warn about the missing log
  1894. * for drives which implement this ATA level or above.
  1895. */
  1896. if (ata_id_major_version(dev->id) >= 10)
  1897. ata_dev_warn(dev,
  1898. "ATA Identify Device Log not supported\n");
  1899. dev->quirks |= ATA_QUIRK_NO_ID_DEV_LOG;
  1900. return false;
  1901. }
  1902. /*
  1903. * Read IDENTIFY DEVICE data log, page 0, to figure out if the page is
  1904. * supported.
  1905. */
  1906. err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, 0,
  1907. dev->sector_buf, 1);
  1908. if (err)
  1909. return false;
  1910. for (i = 0; i < dev->sector_buf[8]; i++) {
  1911. if (dev->sector_buf[9 + i] == page)
  1912. return true;
  1913. }
  1914. return false;
  1915. }
  1916. static int ata_do_link_spd_quirk(struct ata_device *dev)
  1917. {
  1918. struct ata_link *plink = ata_dev_phys_link(dev);
  1919. u32 target, target_limit;
  1920. if (!sata_scr_valid(plink))
  1921. return 0;
  1922. if (dev->quirks & ATA_QUIRK_1_5_GBPS)
  1923. target = 1;
  1924. else
  1925. return 0;
  1926. target_limit = (1 << target) - 1;
  1927. /* if already on stricter limit, no need to push further */
  1928. if (plink->sata_spd_limit <= target_limit)
  1929. return 0;
  1930. plink->sata_spd_limit = target_limit;
  1931. /* Request another EH round by returning -EAGAIN if link is
  1932. * going faster than the target speed. Forward progress is
  1933. * guaranteed by setting sata_spd_limit to target_limit above.
  1934. */
  1935. if (plink->sata_spd > target) {
  1936. ata_dev_info(dev, "applying link speed limit quirk to %s\n",
  1937. sata_spd_string(target));
  1938. return -EAGAIN;
  1939. }
  1940. return 0;
  1941. }
  1942. static inline bool ata_dev_knobble(struct ata_device *dev)
  1943. {
  1944. struct ata_port *ap = dev->link->ap;
  1945. if (ata_dev_quirks(dev) & ATA_QUIRK_BRIDGE_OK)
  1946. return false;
  1947. return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
  1948. }
  1949. static void ata_dev_config_ncq_send_recv(struct ata_device *dev)
  1950. {
  1951. unsigned int err_mask;
  1952. if (!ata_log_supported(dev, ATA_LOG_NCQ_SEND_RECV)) {
  1953. ata_dev_warn(dev, "NCQ Send/Recv Log not supported\n");
  1954. return;
  1955. }
  1956. err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
  1957. 0, dev->sector_buf, 1);
  1958. if (!err_mask) {
  1959. u8 *cmds = dev->ncq_send_recv_cmds;
  1960. dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
  1961. memcpy(cmds, dev->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
  1962. if (dev->quirks & ATA_QUIRK_NO_NCQ_TRIM) {
  1963. ata_dev_dbg(dev, "disabling queued TRIM support\n");
  1964. cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
  1965. ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
  1966. }
  1967. }
  1968. }
  1969. static void ata_dev_config_ncq_non_data(struct ata_device *dev)
  1970. {
  1971. unsigned int err_mask;
  1972. if (!ata_log_supported(dev, ATA_LOG_NCQ_NON_DATA)) {
  1973. ata_dev_warn(dev,
  1974. "NCQ Send/Recv Log not supported\n");
  1975. return;
  1976. }
  1977. err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA,
  1978. 0, dev->sector_buf, 1);
  1979. if (!err_mask)
  1980. memcpy(dev->ncq_non_data_cmds, dev->sector_buf,
  1981. ATA_LOG_NCQ_NON_DATA_SIZE);
  1982. }
  1983. static void ata_dev_config_ncq_prio(struct ata_device *dev)
  1984. {
  1985. unsigned int err_mask;
  1986. if (!ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
  1987. return;
  1988. err_mask = ata_read_log_page(dev,
  1989. ATA_LOG_IDENTIFY_DEVICE,
  1990. ATA_LOG_SATA_SETTINGS,
  1991. dev->sector_buf, 1);
  1992. if (err_mask)
  1993. goto not_supported;
  1994. if (!(dev->sector_buf[ATA_LOG_NCQ_PRIO_OFFSET] & BIT(3)))
  1995. goto not_supported;
  1996. dev->flags |= ATA_DFLAG_NCQ_PRIO;
  1997. return;
  1998. not_supported:
  1999. dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLED;
  2000. dev->flags &= ~ATA_DFLAG_NCQ_PRIO;
  2001. }
  2002. static bool ata_dev_check_adapter(struct ata_device *dev,
  2003. unsigned short vendor_id)
  2004. {
  2005. struct pci_dev *pcidev = NULL;
  2006. struct device *parent_dev = NULL;
  2007. for (parent_dev = dev->tdev.parent; parent_dev != NULL;
  2008. parent_dev = parent_dev->parent) {
  2009. if (dev_is_pci(parent_dev)) {
  2010. pcidev = to_pci_dev(parent_dev);
  2011. if (pcidev->vendor == vendor_id)
  2012. return true;
  2013. break;
  2014. }
  2015. }
  2016. return false;
  2017. }
  2018. static int ata_dev_config_ncq(struct ata_device *dev,
  2019. char *desc, size_t desc_sz)
  2020. {
  2021. struct ata_port *ap = dev->link->ap;
  2022. int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
  2023. unsigned int err_mask;
  2024. char *aa_desc = "";
  2025. if (!ata_id_has_ncq(dev->id)) {
  2026. desc[0] = '\0';
  2027. return 0;
  2028. }
  2029. if (!IS_ENABLED(CONFIG_SATA_HOST))
  2030. return 0;
  2031. if (dev->quirks & ATA_QUIRK_NONCQ) {
  2032. snprintf(desc, desc_sz, "NCQ (not used)");
  2033. return 0;
  2034. }
  2035. if (dev->quirks & ATA_QUIRK_NO_NCQ_ON_ATI &&
  2036. ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI)) {
  2037. snprintf(desc, desc_sz, "NCQ (not used)");
  2038. return 0;
  2039. }
  2040. if (ap->flags & ATA_FLAG_NCQ) {
  2041. hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE);
  2042. dev->flags |= ATA_DFLAG_NCQ;
  2043. }
  2044. if (!(dev->quirks & ATA_QUIRK_BROKEN_FPDMA_AA) &&
  2045. (ap->flags & ATA_FLAG_FPDMA_AA) &&
  2046. ata_id_has_fpdma_aa(dev->id)) {
  2047. err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE,
  2048. SATA_FPDMA_AA);
  2049. if (err_mask) {
  2050. ata_dev_err(dev,
  2051. "failed to enable AA (error_mask=0x%x)\n",
  2052. err_mask);
  2053. if (err_mask != AC_ERR_DEV) {
  2054. dev->quirks |= ATA_QUIRK_BROKEN_FPDMA_AA;
  2055. return -EIO;
  2056. }
  2057. } else
  2058. aa_desc = ", AA";
  2059. }
  2060. if (hdepth >= ddepth)
  2061. snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc);
  2062. else
  2063. snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
  2064. ddepth, aa_desc);
  2065. if ((ap->flags & ATA_FLAG_FPDMA_AUX)) {
  2066. if (ata_id_has_ncq_send_and_recv(dev->id))
  2067. ata_dev_config_ncq_send_recv(dev);
  2068. if (ata_id_has_ncq_non_data(dev->id))
  2069. ata_dev_config_ncq_non_data(dev);
  2070. if (ata_id_has_ncq_prio(dev->id))
  2071. ata_dev_config_ncq_prio(dev);
  2072. }
  2073. return 0;
  2074. }
  2075. static void ata_dev_config_sense_reporting(struct ata_device *dev)
  2076. {
  2077. unsigned int err_mask;
  2078. if (!ata_id_has_sense_reporting(dev->id))
  2079. return;
  2080. if (ata_id_sense_reporting_enabled(dev->id))
  2081. return;
  2082. err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1);
  2083. if (err_mask) {
  2084. ata_dev_dbg(dev,
  2085. "failed to enable Sense Data Reporting, Emask 0x%x\n",
  2086. err_mask);
  2087. }
  2088. }
  2089. static void ata_dev_config_zac(struct ata_device *dev)
  2090. {
  2091. unsigned int err_mask;
  2092. u8 *identify_buf = dev->sector_buf;
  2093. dev->zac_zones_optimal_open = U32_MAX;
  2094. dev->zac_zones_optimal_nonseq = U32_MAX;
  2095. dev->zac_zones_max_open = U32_MAX;
  2096. /*
  2097. * Always set the 'ZAC' flag for Host-managed devices.
  2098. */
  2099. if (dev->class == ATA_DEV_ZAC)
  2100. dev->flags |= ATA_DFLAG_ZAC;
  2101. else if (ata_id_zoned_cap(dev->id) == 0x01)
  2102. /*
  2103. * Check for host-aware devices.
  2104. */
  2105. dev->flags |= ATA_DFLAG_ZAC;
  2106. if (!(dev->flags & ATA_DFLAG_ZAC))
  2107. return;
  2108. if (!ata_identify_page_supported(dev, ATA_LOG_ZONED_INFORMATION)) {
  2109. ata_dev_warn(dev,
  2110. "ATA Zoned Information Log not supported\n");
  2111. return;
  2112. }
  2113. /*
  2114. * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information)
  2115. */
  2116. err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
  2117. ATA_LOG_ZONED_INFORMATION,
  2118. identify_buf, 1);
  2119. if (!err_mask) {
  2120. u64 zoned_cap, opt_open, opt_nonseq, max_open;
  2121. zoned_cap = get_unaligned_le64(&identify_buf[8]);
  2122. if ((zoned_cap >> 63))
  2123. dev->zac_zoned_cap = (zoned_cap & 1);
  2124. opt_open = get_unaligned_le64(&identify_buf[24]);
  2125. if ((opt_open >> 63))
  2126. dev->zac_zones_optimal_open = (u32)opt_open;
  2127. opt_nonseq = get_unaligned_le64(&identify_buf[32]);
  2128. if ((opt_nonseq >> 63))
  2129. dev->zac_zones_optimal_nonseq = (u32)opt_nonseq;
  2130. max_open = get_unaligned_le64(&identify_buf[40]);
  2131. if ((max_open >> 63))
  2132. dev->zac_zones_max_open = (u32)max_open;
  2133. }
  2134. }
  2135. static void ata_dev_config_trusted(struct ata_device *dev)
  2136. {
  2137. u64 trusted_cap;
  2138. unsigned int err;
  2139. if (!ata_id_has_trusted(dev->id))
  2140. return;
  2141. if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
  2142. ata_dev_warn(dev,
  2143. "Security Log not supported\n");
  2144. return;
  2145. }
  2146. err = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE, ATA_LOG_SECURITY,
  2147. dev->sector_buf, 1);
  2148. if (err)
  2149. return;
  2150. trusted_cap = get_unaligned_le64(&dev->sector_buf[40]);
  2151. if (!(trusted_cap & (1ULL << 63))) {
  2152. ata_dev_dbg(dev,
  2153. "Trusted Computing capability qword not valid!\n");
  2154. return;
  2155. }
  2156. if (trusted_cap & (1 << 0))
  2157. dev->flags |= ATA_DFLAG_TRUSTED;
  2158. }
  2159. void ata_dev_cleanup_cdl_resources(struct ata_device *dev)
  2160. {
  2161. kfree(dev->cdl);
  2162. dev->cdl = NULL;
  2163. }
  2164. static int ata_dev_init_cdl_resources(struct ata_device *dev)
  2165. {
  2166. struct ata_cdl *cdl = dev->cdl;
  2167. unsigned int err_mask;
  2168. if (!cdl) {
  2169. cdl = kzalloc(sizeof(*cdl), GFP_KERNEL);
  2170. if (!cdl)
  2171. return -ENOMEM;
  2172. dev->cdl = cdl;
  2173. }
  2174. err_mask = ata_read_log_page(dev, ATA_LOG_CDL, 0, cdl->desc_log_buf,
  2175. ATA_LOG_CDL_SIZE / ATA_SECT_SIZE);
  2176. if (err_mask) {
  2177. ata_dev_warn(dev, "Read Command Duration Limits log failed\n");
  2178. ata_dev_cleanup_cdl_resources(dev);
  2179. return -EIO;
  2180. }
  2181. return 0;
  2182. }
  2183. static void ata_dev_config_cdl(struct ata_device *dev)
  2184. {
  2185. unsigned int err_mask;
  2186. bool cdl_enabled;
  2187. u64 val;
  2188. int ret;
  2189. if (ata_id_major_version(dev->id) < 11)
  2190. goto not_supported;
  2191. if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE) ||
  2192. !ata_identify_page_supported(dev, ATA_LOG_SUPPORTED_CAPABILITIES) ||
  2193. !ata_identify_page_supported(dev, ATA_LOG_CURRENT_SETTINGS))
  2194. goto not_supported;
  2195. err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
  2196. ATA_LOG_SUPPORTED_CAPABILITIES,
  2197. dev->sector_buf, 1);
  2198. if (err_mask)
  2199. goto not_supported;
  2200. /* Check Command Duration Limit Supported bits */
  2201. val = get_unaligned_le64(&dev->sector_buf[168]);
  2202. if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(0)))
  2203. goto not_supported;
  2204. /* Warn the user if command duration guideline is not supported */
  2205. if (!(val & BIT_ULL(1)))
  2206. ata_dev_warn(dev,
  2207. "Command duration guideline is not supported\n");
  2208. /*
  2209. * We must have support for the sense data for successful NCQ commands
  2210. * log indicated by the successful NCQ command sense data supported bit.
  2211. */
  2212. val = get_unaligned_le64(&dev->sector_buf[8]);
  2213. if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(47))) {
  2214. ata_dev_warn(dev,
  2215. "CDL supported but Successful NCQ Command Sense Data is not supported\n");
  2216. goto not_supported;
  2217. }
  2218. /* Without NCQ autosense, the successful NCQ commands log is useless. */
  2219. if (!ata_id_has_ncq_autosense(dev->id)) {
  2220. ata_dev_warn(dev,
  2221. "CDL supported but NCQ autosense is not supported\n");
  2222. goto not_supported;
  2223. }
  2224. /*
  2225. * If CDL is marked as enabled, make sure the feature is enabled too.
  2226. * Conversely, if CDL is disabled, make sure the feature is turned off.
  2227. */
  2228. err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
  2229. ATA_LOG_CURRENT_SETTINGS,
  2230. dev->sector_buf, 1);
  2231. if (err_mask)
  2232. goto not_supported;
  2233. val = get_unaligned_le64(&dev->sector_buf[8]);
  2234. cdl_enabled = val & BIT_ULL(63) && val & BIT_ULL(21);
  2235. if (dev->flags & ATA_DFLAG_CDL_ENABLED) {
  2236. if (!cdl_enabled) {
  2237. /* Enable CDL on the device */
  2238. err_mask = ata_dev_set_feature(dev, SETFEATURES_CDL, 1);
  2239. if (err_mask) {
  2240. ata_dev_err(dev,
  2241. "Enable CDL feature failed\n");
  2242. goto not_supported;
  2243. }
  2244. }
  2245. } else {
  2246. if (cdl_enabled) {
  2247. /* Disable CDL on the device */
  2248. err_mask = ata_dev_set_feature(dev, SETFEATURES_CDL, 0);
  2249. if (err_mask) {
  2250. ata_dev_err(dev,
  2251. "Disable CDL feature failed\n");
  2252. goto not_supported;
  2253. }
  2254. }
  2255. }
  2256. /*
  2257. * While CDL itself has to be enabled using sysfs, CDL requires that
  2258. * sense data for successful NCQ commands is enabled to work properly.
  2259. * Just like ata_dev_config_sense_reporting(), enable it unconditionally
  2260. * if supported.
  2261. */
  2262. if (!(val & BIT_ULL(63)) || !(val & BIT_ULL(18))) {
  2263. err_mask = ata_dev_set_feature(dev,
  2264. SETFEATURE_SENSE_DATA_SUCC_NCQ, 0x1);
  2265. if (err_mask) {
  2266. ata_dev_warn(dev,
  2267. "failed to enable Sense Data for successful NCQ commands, Emask 0x%x\n",
  2268. err_mask);
  2269. goto not_supported;
  2270. }
  2271. }
  2272. /* CDL is supported: allocate and initialize needed resources. */
  2273. ret = ata_dev_init_cdl_resources(dev);
  2274. if (ret) {
  2275. ata_dev_warn(dev, "Initialize CDL resources failed\n");
  2276. goto not_supported;
  2277. }
  2278. dev->flags |= ATA_DFLAG_CDL;
  2279. return;
  2280. not_supported:
  2281. dev->flags &= ~(ATA_DFLAG_CDL | ATA_DFLAG_CDL_ENABLED);
  2282. ata_dev_cleanup_cdl_resources(dev);
  2283. }
  2284. static int ata_dev_config_lba(struct ata_device *dev)
  2285. {
  2286. const u16 *id = dev->id;
  2287. const char *lba_desc;
  2288. char ncq_desc[32];
  2289. int ret;
  2290. dev->flags |= ATA_DFLAG_LBA;
  2291. if (ata_id_has_lba48(id)) {
  2292. lba_desc = "LBA48";
  2293. dev->flags |= ATA_DFLAG_LBA48;
  2294. if (dev->n_sectors >= (1UL << 28) &&
  2295. ata_id_has_flush_ext(id))
  2296. dev->flags |= ATA_DFLAG_FLUSH_EXT;
  2297. } else {
  2298. lba_desc = "LBA";
  2299. }
  2300. /* config NCQ */
  2301. ret = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
  2302. /* print device info to dmesg */
  2303. if (ata_dev_print_info(dev))
  2304. ata_dev_info(dev,
  2305. "%llu sectors, multi %u: %s %s\n",
  2306. (unsigned long long)dev->n_sectors,
  2307. dev->multi_count, lba_desc, ncq_desc);
  2308. return ret;
  2309. }
  2310. static void ata_dev_config_chs(struct ata_device *dev)
  2311. {
  2312. const u16 *id = dev->id;
  2313. if (ata_id_current_chs_valid(id)) {
  2314. /* Current CHS translation is valid. */
  2315. dev->cylinders = id[54];
  2316. dev->heads = id[55];
  2317. dev->sectors = id[56];
  2318. } else {
  2319. /* Default translation */
  2320. dev->cylinders = id[1];
  2321. dev->heads = id[3];
  2322. dev->sectors = id[6];
  2323. }
  2324. /* print device info to dmesg */
  2325. if (ata_dev_print_info(dev))
  2326. ata_dev_info(dev,
  2327. "%llu sectors, multi %u, CHS %u/%u/%u\n",
  2328. (unsigned long long)dev->n_sectors,
  2329. dev->multi_count, dev->cylinders,
  2330. dev->heads, dev->sectors);
  2331. }
  2332. static void ata_dev_config_fua(struct ata_device *dev)
  2333. {
  2334. /* Ignore FUA support if its use is disabled globally */
  2335. if (!libata_fua)
  2336. goto nofua;
  2337. /* Ignore devices without support for WRITE DMA FUA EXT */
  2338. if (!(dev->flags & ATA_DFLAG_LBA48) || !ata_id_has_fua(dev->id))
  2339. goto nofua;
  2340. /* Ignore known bad devices and devices that lack NCQ support */
  2341. if (!ata_ncq_supported(dev) || (dev->quirks & ATA_QUIRK_NO_FUA))
  2342. goto nofua;
  2343. dev->flags |= ATA_DFLAG_FUA;
  2344. return;
  2345. nofua:
  2346. dev->flags &= ~ATA_DFLAG_FUA;
  2347. }
  2348. static void ata_dev_config_devslp(struct ata_device *dev)
  2349. {
  2350. u8 *sata_setting = dev->sector_buf;
  2351. unsigned int err_mask;
  2352. int i, j;
  2353. /*
  2354. * Check device sleep capability. Get DevSlp timing variables
  2355. * from SATA Settings page of Identify Device Data Log.
  2356. */
  2357. if (!ata_id_has_devslp(dev->id) ||
  2358. !ata_identify_page_supported(dev, ATA_LOG_SATA_SETTINGS))
  2359. return;
  2360. err_mask = ata_read_log_page(dev,
  2361. ATA_LOG_IDENTIFY_DEVICE,
  2362. ATA_LOG_SATA_SETTINGS,
  2363. sata_setting, 1);
  2364. if (err_mask)
  2365. return;
  2366. dev->flags |= ATA_DFLAG_DEVSLP;
  2367. for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) {
  2368. j = ATA_LOG_DEVSLP_OFFSET + i;
  2369. dev->devslp_timing[i] = sata_setting[j];
  2370. }
  2371. }
  2372. static void ata_dev_config_cpr(struct ata_device *dev)
  2373. {
  2374. unsigned int err_mask;
  2375. size_t buf_len;
  2376. int i, nr_cpr = 0;
  2377. struct ata_cpr_log *cpr_log = NULL;
  2378. u8 *desc, *buf = NULL;
  2379. if (ata_id_major_version(dev->id) < 11)
  2380. goto out;
  2381. buf_len = ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES);
  2382. if (buf_len == 0)
  2383. goto out;
  2384. /*
  2385. * Read the concurrent positioning ranges log (0x47). We can have at
  2386. * most 255 32B range descriptors plus a 64B header. This log varies in
  2387. * size, so use the size reported in the GPL directory. Reading beyond
  2388. * the supported length will result in an error.
  2389. */
  2390. buf_len <<= 9;
  2391. buf = kzalloc(buf_len, GFP_KERNEL);
  2392. if (!buf)
  2393. goto out;
  2394. err_mask = ata_read_log_page(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES,
  2395. 0, buf, buf_len >> 9);
  2396. if (err_mask)
  2397. goto out;
  2398. nr_cpr = buf[0];
  2399. if (!nr_cpr)
  2400. goto out;
  2401. cpr_log = kzalloc(struct_size(cpr_log, cpr, nr_cpr), GFP_KERNEL);
  2402. if (!cpr_log)
  2403. goto out;
  2404. cpr_log->nr_cpr = nr_cpr;
  2405. desc = &buf[64];
  2406. for (i = 0; i < nr_cpr; i++, desc += 32) {
  2407. cpr_log->cpr[i].num = desc[0];
  2408. cpr_log->cpr[i].num_storage_elements = desc[1];
  2409. cpr_log->cpr[i].start_lba = get_unaligned_le64(&desc[8]);
  2410. cpr_log->cpr[i].num_lbas = get_unaligned_le64(&desc[16]);
  2411. }
  2412. out:
  2413. swap(dev->cpr_log, cpr_log);
  2414. kfree(cpr_log);
  2415. kfree(buf);
  2416. }
  2417. static void ata_dev_print_features(struct ata_device *dev)
  2418. {
  2419. if (!(dev->flags & ATA_DFLAG_FEATURES_MASK))
  2420. return;
  2421. ata_dev_info(dev,
  2422. "Features:%s%s%s%s%s%s%s%s\n",
  2423. dev->flags & ATA_DFLAG_FUA ? " FUA" : "",
  2424. dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "",
  2425. dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "",
  2426. dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "",
  2427. dev->flags & ATA_DFLAG_NCQ_SEND_RECV ? " NCQ-sndrcv" : "",
  2428. dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "",
  2429. dev->flags & ATA_DFLAG_CDL ? " CDL" : "",
  2430. dev->cpr_log ? " CPR" : "");
  2431. }
  2432. /**
  2433. * ata_dev_configure - Configure the specified ATA/ATAPI device
  2434. * @dev: Target device to configure
  2435. *
  2436. * Configure @dev according to @dev->id. Generic and low-level
  2437. * driver specific fixups are also applied.
  2438. *
  2439. * LOCKING:
  2440. * Kernel thread context (may sleep)
  2441. *
  2442. * RETURNS:
  2443. * 0 on success, -errno otherwise
  2444. */
  2445. int ata_dev_configure(struct ata_device *dev)
  2446. {
  2447. struct ata_port *ap = dev->link->ap;
  2448. bool print_info = ata_dev_print_info(dev);
  2449. const u16 *id = dev->id;
  2450. unsigned int xfer_mask;
  2451. unsigned int err_mask;
  2452. char revbuf[7]; /* XYZ-99\0 */
  2453. char fwrevbuf[ATA_ID_FW_REV_LEN+1];
  2454. char modelbuf[ATA_ID_PROD_LEN+1];
  2455. int rc;
  2456. if (!ata_dev_enabled(dev)) {
  2457. ata_dev_dbg(dev, "no device\n");
  2458. return 0;
  2459. }
  2460. /* Set quirks */
  2461. dev->quirks |= ata_dev_quirks(dev);
  2462. ata_force_quirks(dev);
  2463. if (dev->quirks & ATA_QUIRK_DISABLE) {
  2464. ata_dev_info(dev, "unsupported device, disabling\n");
  2465. ata_dev_disable(dev);
  2466. return 0;
  2467. }
  2468. if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
  2469. dev->class == ATA_DEV_ATAPI) {
  2470. ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n",
  2471. atapi_enabled ? "not supported with this driver"
  2472. : "disabled");
  2473. ata_dev_disable(dev);
  2474. return 0;
  2475. }
  2476. rc = ata_do_link_spd_quirk(dev);
  2477. if (rc)
  2478. return rc;
  2479. /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
  2480. if ((dev->quirks & ATA_QUIRK_WD_BROKEN_LPM) &&
  2481. (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
  2482. dev->quirks |= ATA_QUIRK_NOLPM;
  2483. if (dev->quirks & ATA_QUIRK_NO_LPM_ON_ATI &&
  2484. ata_dev_check_adapter(dev, PCI_VENDOR_ID_ATI))
  2485. dev->quirks |= ATA_QUIRK_NOLPM;
  2486. if (ap->flags & ATA_FLAG_NO_LPM)
  2487. dev->quirks |= ATA_QUIRK_NOLPM;
  2488. if (dev->quirks & ATA_QUIRK_NOLPM) {
  2489. ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
  2490. dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
  2491. }
  2492. /* let ACPI work its magic */
  2493. rc = ata_acpi_on_devcfg(dev);
  2494. if (rc)
  2495. return rc;
  2496. /* massage HPA, do it early as it might change IDENTIFY data */
  2497. rc = ata_hpa_resize(dev);
  2498. if (rc)
  2499. return rc;
  2500. /* print device capabilities */
  2501. ata_dev_dbg(dev,
  2502. "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
  2503. "85:%04x 86:%04x 87:%04x 88:%04x\n",
  2504. __func__,
  2505. id[49], id[82], id[83], id[84],
  2506. id[85], id[86], id[87], id[88]);
  2507. /* initialize to-be-configured parameters */
  2508. dev->flags &= ~ATA_DFLAG_CFG_MASK;
  2509. dev->max_sectors = 0;
  2510. dev->cdb_len = 0;
  2511. dev->n_sectors = 0;
  2512. dev->cylinders = 0;
  2513. dev->heads = 0;
  2514. dev->sectors = 0;
  2515. dev->multi_count = 0;
  2516. /*
  2517. * common ATA, ATAPI feature tests
  2518. */
  2519. /* find max transfer mode; for printk only */
  2520. xfer_mask = ata_id_xfermask(id);
  2521. ata_dump_id(dev, id);
  2522. /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
  2523. ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
  2524. sizeof(fwrevbuf));
  2525. ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
  2526. sizeof(modelbuf));
  2527. /* ATA-specific feature tests */
  2528. if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
  2529. if (ata_id_is_cfa(id)) {
  2530. /* CPRM may make this media unusable */
  2531. if (id[ATA_ID_CFA_KEY_MGMT] & 1)
  2532. ata_dev_warn(dev,
  2533. "supports DRM functions and may not be fully accessible\n");
  2534. snprintf(revbuf, 7, "CFA");
  2535. } else {
  2536. snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
  2537. /* Warn the user if the device has TPM extensions */
  2538. if (ata_id_has_tpm(id))
  2539. ata_dev_warn(dev,
  2540. "supports DRM functions and may not be fully accessible\n");
  2541. }
  2542. dev->n_sectors = ata_id_n_sectors(id);
  2543. /* get current R/W Multiple count setting */
  2544. if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) {
  2545. unsigned int max = dev->id[47] & 0xff;
  2546. unsigned int cnt = dev->id[59] & 0xff;
  2547. /* only recognize/allow powers of two here */
  2548. if (is_power_of_2(max) && is_power_of_2(cnt))
  2549. if (cnt <= max)
  2550. dev->multi_count = cnt;
  2551. }
  2552. /* print device info to dmesg */
  2553. if (print_info)
  2554. ata_dev_info(dev, "%s: %s, %s, max %s\n",
  2555. revbuf, modelbuf, fwrevbuf,
  2556. ata_mode_string(xfer_mask));
  2557. if (ata_id_has_lba(id)) {
  2558. rc = ata_dev_config_lba(dev);
  2559. if (rc)
  2560. return rc;
  2561. } else {
  2562. ata_dev_config_chs(dev);
  2563. }
  2564. ata_dev_config_fua(dev);
  2565. ata_dev_config_devslp(dev);
  2566. ata_dev_config_sense_reporting(dev);
  2567. ata_dev_config_zac(dev);
  2568. ata_dev_config_trusted(dev);
  2569. ata_dev_config_cpr(dev);
  2570. ata_dev_config_cdl(dev);
  2571. dev->cdb_len = 32;
  2572. if (print_info)
  2573. ata_dev_print_features(dev);
  2574. }
  2575. /* ATAPI-specific feature tests */
  2576. else if (dev->class == ATA_DEV_ATAPI) {
  2577. const char *cdb_intr_string = "";
  2578. const char *atapi_an_string = "";
  2579. const char *dma_dir_string = "";
  2580. u32 sntf;
  2581. rc = atapi_cdb_len(id);
  2582. if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
  2583. ata_dev_warn(dev, "unsupported CDB len %d\n", rc);
  2584. rc = -EINVAL;
  2585. goto err_out_nosup;
  2586. }
  2587. dev->cdb_len = (unsigned int) rc;
  2588. /* Enable ATAPI AN if both the host and device have
  2589. * the support. If PMP is attached, SNTF is required
  2590. * to enable ATAPI AN to discern between PHY status
  2591. * changed notifications and ATAPI ANs.
  2592. */
  2593. if (atapi_an &&
  2594. (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
  2595. (!sata_pmp_attached(ap) ||
  2596. sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
  2597. /* issue SET feature command to turn this on */
  2598. err_mask = ata_dev_set_feature(dev,
  2599. SETFEATURES_SATA_ENABLE, SATA_AN);
  2600. if (err_mask)
  2601. ata_dev_err(dev,
  2602. "failed to enable ATAPI AN (err_mask=0x%x)\n",
  2603. err_mask);
  2604. else {
  2605. dev->flags |= ATA_DFLAG_AN;
  2606. atapi_an_string = ", ATAPI AN";
  2607. }
  2608. }
  2609. if (ata_id_cdb_intr(dev->id)) {
  2610. dev->flags |= ATA_DFLAG_CDB_INTR;
  2611. cdb_intr_string = ", CDB intr";
  2612. }
  2613. if (atapi_dmadir || (dev->quirks & ATA_QUIRK_ATAPI_DMADIR) ||
  2614. atapi_id_dmadir(dev->id)) {
  2615. dev->flags |= ATA_DFLAG_DMADIR;
  2616. dma_dir_string = ", DMADIR";
  2617. }
  2618. if (ata_id_has_da(dev->id)) {
  2619. dev->flags |= ATA_DFLAG_DA;
  2620. zpodd_init(dev);
  2621. }
  2622. /* print device info to dmesg */
  2623. if (print_info)
  2624. ata_dev_info(dev,
  2625. "ATAPI: %s, %s, max %s%s%s%s\n",
  2626. modelbuf, fwrevbuf,
  2627. ata_mode_string(xfer_mask),
  2628. cdb_intr_string, atapi_an_string,
  2629. dma_dir_string);
  2630. }
  2631. /* determine max_sectors */
  2632. dev->max_sectors = ATA_MAX_SECTORS;
  2633. if (dev->flags & ATA_DFLAG_LBA48)
  2634. dev->max_sectors = ATA_MAX_SECTORS_LBA48;
  2635. /* Limit PATA drive on SATA cable bridge transfers to udma5,
  2636. 200 sectors */
  2637. if (ata_dev_knobble(dev)) {
  2638. if (print_info)
  2639. ata_dev_info(dev, "applying bridge limits\n");
  2640. dev->udma_mask &= ATA_UDMA5;
  2641. dev->max_sectors = ATA_MAX_SECTORS;
  2642. }
  2643. if ((dev->class == ATA_DEV_ATAPI) &&
  2644. (atapi_command_packet_set(id) == TYPE_TAPE)) {
  2645. dev->max_sectors = ATA_MAX_SECTORS_TAPE;
  2646. dev->quirks |= ATA_QUIRK_STUCK_ERR;
  2647. }
  2648. if (dev->quirks & ATA_QUIRK_MAX_SEC_128)
  2649. dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
  2650. dev->max_sectors);
  2651. if (dev->quirks & ATA_QUIRK_MAX_SEC_1024)
  2652. dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
  2653. dev->max_sectors);
  2654. if (dev->quirks & ATA_QUIRK_MAX_SEC_LBA48)
  2655. dev->max_sectors = ATA_MAX_SECTORS_LBA48;
  2656. if (ap->ops->dev_config)
  2657. ap->ops->dev_config(dev);
  2658. if (dev->quirks & ATA_QUIRK_DIAGNOSTIC) {
  2659. /* Let the user know. We don't want to disallow opens for
  2660. rescue purposes, or in case the vendor is just a blithering
  2661. idiot. Do this after the dev_config call as some controllers
  2662. with buggy firmware may want to avoid reporting false device
  2663. bugs */
  2664. if (print_info) {
  2665. ata_dev_warn(dev,
  2666. "Drive reports diagnostics failure. This may indicate a drive\n");
  2667. ata_dev_warn(dev,
  2668. "fault or invalid emulation. Contact drive vendor for information.\n");
  2669. }
  2670. }
  2671. if ((dev->quirks & ATA_QUIRK_FIRMWARE_WARN) && print_info) {
  2672. ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n");
  2673. ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n");
  2674. }
  2675. return 0;
  2676. err_out_nosup:
  2677. return rc;
  2678. }
  2679. /**
  2680. * ata_cable_40wire - return 40 wire cable type
  2681. * @ap: port
  2682. *
  2683. * Helper method for drivers which want to hardwire 40 wire cable
  2684. * detection.
  2685. */
  2686. int ata_cable_40wire(struct ata_port *ap)
  2687. {
  2688. return ATA_CBL_PATA40;
  2689. }
  2690. EXPORT_SYMBOL_GPL(ata_cable_40wire);
  2691. /**
  2692. * ata_cable_80wire - return 80 wire cable type
  2693. * @ap: port
  2694. *
  2695. * Helper method for drivers which want to hardwire 80 wire cable
  2696. * detection.
  2697. */
  2698. int ata_cable_80wire(struct ata_port *ap)
  2699. {
  2700. return ATA_CBL_PATA80;
  2701. }
  2702. EXPORT_SYMBOL_GPL(ata_cable_80wire);
  2703. /**
  2704. * ata_cable_unknown - return unknown PATA cable.
  2705. * @ap: port
  2706. *
  2707. * Helper method for drivers which have no PATA cable detection.
  2708. */
  2709. int ata_cable_unknown(struct ata_port *ap)
  2710. {
  2711. return ATA_CBL_PATA_UNK;
  2712. }
  2713. EXPORT_SYMBOL_GPL(ata_cable_unknown);
  2714. /**
  2715. * ata_cable_ignore - return ignored PATA cable.
  2716. * @ap: port
  2717. *
  2718. * Helper method for drivers which don't use cable type to limit
  2719. * transfer mode.
  2720. */
  2721. int ata_cable_ignore(struct ata_port *ap)
  2722. {
  2723. return ATA_CBL_PATA_IGN;
  2724. }
  2725. EXPORT_SYMBOL_GPL(ata_cable_ignore);
  2726. /**
  2727. * ata_cable_sata - return SATA cable type
  2728. * @ap: port
  2729. *
  2730. * Helper method for drivers which have SATA cables
  2731. */
  2732. int ata_cable_sata(struct ata_port *ap)
  2733. {
  2734. return ATA_CBL_SATA;
  2735. }
  2736. EXPORT_SYMBOL_GPL(ata_cable_sata);
  2737. /**
  2738. * sata_print_link_status - Print SATA link status
  2739. * @link: SATA link to printk link status about
  2740. *
  2741. * This function prints link speed and status of a SATA link.
  2742. *
  2743. * LOCKING:
  2744. * None.
  2745. */
  2746. static void sata_print_link_status(struct ata_link *link)
  2747. {
  2748. u32 sstatus, scontrol, tmp;
  2749. if (sata_scr_read(link, SCR_STATUS, &sstatus))
  2750. return;
  2751. if (sata_scr_read(link, SCR_CONTROL, &scontrol))
  2752. return;
  2753. if (ata_phys_link_online(link)) {
  2754. tmp = (sstatus >> 4) & 0xf;
  2755. ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n",
  2756. sata_spd_string(tmp), sstatus, scontrol);
  2757. } else {
  2758. ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n",
  2759. sstatus, scontrol);
  2760. }
  2761. }
  2762. /**
  2763. * ata_dev_pair - return other device on cable
  2764. * @adev: device
  2765. *
  2766. * Obtain the other device on the same cable, or if none is
  2767. * present NULL is returned
  2768. */
  2769. struct ata_device *ata_dev_pair(struct ata_device *adev)
  2770. {
  2771. struct ata_link *link = adev->link;
  2772. struct ata_device *pair = &link->device[1 - adev->devno];
  2773. if (!ata_dev_enabled(pair))
  2774. return NULL;
  2775. return pair;
  2776. }
  2777. EXPORT_SYMBOL_GPL(ata_dev_pair);
  2778. #ifdef CONFIG_ATA_ACPI
  2779. /**
  2780. * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
  2781. * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
  2782. * @cycle: cycle duration in ns
  2783. *
  2784. * Return matching xfer mode for @cycle. The returned mode is of
  2785. * the transfer type specified by @xfer_shift. If @cycle is too
  2786. * slow for @xfer_shift, 0xff is returned. If @cycle is faster
  2787. * than the fastest known mode, the fasted mode is returned.
  2788. *
  2789. * LOCKING:
  2790. * None.
  2791. *
  2792. * RETURNS:
  2793. * Matching xfer_mode, 0xff if no match found.
  2794. */
  2795. u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
  2796. {
  2797. u8 base_mode = 0xff, last_mode = 0xff;
  2798. const struct ata_xfer_ent *ent;
  2799. const struct ata_timing *t;
  2800. for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
  2801. if (ent->shift == xfer_shift)
  2802. base_mode = ent->base;
  2803. for (t = ata_timing_find_mode(base_mode);
  2804. t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
  2805. unsigned short this_cycle;
  2806. switch (xfer_shift) {
  2807. case ATA_SHIFT_PIO:
  2808. case ATA_SHIFT_MWDMA:
  2809. this_cycle = t->cycle;
  2810. break;
  2811. case ATA_SHIFT_UDMA:
  2812. this_cycle = t->udma;
  2813. break;
  2814. default:
  2815. return 0xff;
  2816. }
  2817. if (cycle > this_cycle)
  2818. break;
  2819. last_mode = t->mode;
  2820. }
  2821. return last_mode;
  2822. }
  2823. #endif
  2824. /**
  2825. * ata_down_xfermask_limit - adjust dev xfer masks downward
  2826. * @dev: Device to adjust xfer masks
  2827. * @sel: ATA_DNXFER_* selector
  2828. *
  2829. * Adjust xfer masks of @dev downward. Note that this function
  2830. * does not apply the change. Invoking ata_set_mode() afterwards
  2831. * will apply the limit.
  2832. *
  2833. * LOCKING:
  2834. * Inherited from caller.
  2835. *
  2836. * RETURNS:
  2837. * 0 on success, negative errno on failure
  2838. */
  2839. int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
  2840. {
  2841. char buf[32];
  2842. unsigned int orig_mask, xfer_mask;
  2843. unsigned int pio_mask, mwdma_mask, udma_mask;
  2844. int quiet, highbit;
  2845. quiet = !!(sel & ATA_DNXFER_QUIET);
  2846. sel &= ~ATA_DNXFER_QUIET;
  2847. xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
  2848. dev->mwdma_mask,
  2849. dev->udma_mask);
  2850. ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
  2851. switch (sel) {
  2852. case ATA_DNXFER_PIO:
  2853. highbit = fls(pio_mask) - 1;
  2854. pio_mask &= ~(1 << highbit);
  2855. break;
  2856. case ATA_DNXFER_DMA:
  2857. if (udma_mask) {
  2858. highbit = fls(udma_mask) - 1;
  2859. udma_mask &= ~(1 << highbit);
  2860. if (!udma_mask)
  2861. return -ENOENT;
  2862. } else if (mwdma_mask) {
  2863. highbit = fls(mwdma_mask) - 1;
  2864. mwdma_mask &= ~(1 << highbit);
  2865. if (!mwdma_mask)
  2866. return -ENOENT;
  2867. }
  2868. break;
  2869. case ATA_DNXFER_40C:
  2870. udma_mask &= ATA_UDMA_MASK_40C;
  2871. break;
  2872. case ATA_DNXFER_FORCE_PIO0:
  2873. pio_mask &= 1;
  2874. fallthrough;
  2875. case ATA_DNXFER_FORCE_PIO:
  2876. mwdma_mask = 0;
  2877. udma_mask = 0;
  2878. break;
  2879. default:
  2880. BUG();
  2881. }
  2882. xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
  2883. if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
  2884. return -ENOENT;
  2885. if (!quiet) {
  2886. if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
  2887. snprintf(buf, sizeof(buf), "%s:%s",
  2888. ata_mode_string(xfer_mask),
  2889. ata_mode_string(xfer_mask & ATA_MASK_PIO));
  2890. else
  2891. snprintf(buf, sizeof(buf), "%s",
  2892. ata_mode_string(xfer_mask));
  2893. ata_dev_warn(dev, "limiting speed to %s\n", buf);
  2894. }
  2895. ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
  2896. &dev->udma_mask);
  2897. return 0;
  2898. }
  2899. static int ata_dev_set_mode(struct ata_device *dev)
  2900. {
  2901. struct ata_port *ap = dev->link->ap;
  2902. struct ata_eh_context *ehc = &dev->link->eh_context;
  2903. const bool nosetxfer = dev->quirks & ATA_QUIRK_NOSETXFER;
  2904. const char *dev_err_whine = "";
  2905. int ign_dev_err = 0;
  2906. unsigned int err_mask = 0;
  2907. int rc;
  2908. dev->flags &= ~ATA_DFLAG_PIO;
  2909. if (dev->xfer_shift == ATA_SHIFT_PIO)
  2910. dev->flags |= ATA_DFLAG_PIO;
  2911. if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id))
  2912. dev_err_whine = " (SET_XFERMODE skipped)";
  2913. else {
  2914. if (nosetxfer)
  2915. ata_dev_warn(dev,
  2916. "NOSETXFER but PATA detected - can't "
  2917. "skip SETXFER, might malfunction\n");
  2918. err_mask = ata_dev_set_xfermode(dev);
  2919. }
  2920. if (err_mask & ~AC_ERR_DEV)
  2921. goto fail;
  2922. /* revalidate */
  2923. ehc->i.flags |= ATA_EHI_POST_SETMODE;
  2924. rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
  2925. ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
  2926. if (rc)
  2927. return rc;
  2928. if (dev->xfer_shift == ATA_SHIFT_PIO) {
  2929. /* Old CFA may refuse this command, which is just fine */
  2930. if (ata_id_is_cfa(dev->id))
  2931. ign_dev_err = 1;
  2932. /* Catch several broken garbage emulations plus some pre
  2933. ATA devices */
  2934. if (ata_id_major_version(dev->id) == 0 &&
  2935. dev->pio_mode <= XFER_PIO_2)
  2936. ign_dev_err = 1;
  2937. /* Some very old devices and some bad newer ones fail
  2938. any kind of SET_XFERMODE request but support PIO0-2
  2939. timings and no IORDY */
  2940. if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
  2941. ign_dev_err = 1;
  2942. }
  2943. /* Early MWDMA devices do DMA but don't allow DMA mode setting.
  2944. Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
  2945. if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
  2946. dev->dma_mode == XFER_MW_DMA_0 &&
  2947. (dev->id[63] >> 8) & 1)
  2948. ign_dev_err = 1;
  2949. /* if the device is actually configured correctly, ignore dev err */
  2950. if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
  2951. ign_dev_err = 1;
  2952. if (err_mask & AC_ERR_DEV) {
  2953. if (!ign_dev_err)
  2954. goto fail;
  2955. else
  2956. dev_err_whine = " (device error ignored)";
  2957. }
  2958. ata_dev_dbg(dev, "xfer_shift=%u, xfer_mode=0x%x\n",
  2959. dev->xfer_shift, (int)dev->xfer_mode);
  2960. if (!(ehc->i.flags & ATA_EHI_QUIET) ||
  2961. ehc->i.flags & ATA_EHI_DID_HARDRESET)
  2962. ata_dev_info(dev, "configured for %s%s\n",
  2963. ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
  2964. dev_err_whine);
  2965. return 0;
  2966. fail:
  2967. ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask);
  2968. return -EIO;
  2969. }
  2970. /**
  2971. * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
  2972. * @link: link on which timings will be programmed
  2973. * @r_failed_dev: out parameter for failed device
  2974. *
  2975. * Standard implementation of the function used to tune and set
  2976. * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
  2977. * ata_dev_set_mode() fails, pointer to the failing device is
  2978. * returned in @r_failed_dev.
  2979. *
  2980. * LOCKING:
  2981. * PCI/etc. bus probe sem.
  2982. *
  2983. * RETURNS:
  2984. * 0 on success, negative errno otherwise
  2985. */
  2986. int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
  2987. {
  2988. struct ata_port *ap = link->ap;
  2989. struct ata_device *dev;
  2990. int rc = 0, used_dma = 0, found = 0;
  2991. /* step 1: calculate xfer_mask */
  2992. ata_for_each_dev(dev, link, ENABLED) {
  2993. unsigned int pio_mask, dma_mask;
  2994. unsigned int mode_mask;
  2995. mode_mask = ATA_DMA_MASK_ATA;
  2996. if (dev->class == ATA_DEV_ATAPI)
  2997. mode_mask = ATA_DMA_MASK_ATAPI;
  2998. else if (ata_id_is_cfa(dev->id))
  2999. mode_mask = ATA_DMA_MASK_CFA;
  3000. ata_dev_xfermask(dev);
  3001. ata_force_xfermask(dev);
  3002. pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
  3003. if (libata_dma_mask & mode_mask)
  3004. dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
  3005. dev->udma_mask);
  3006. else
  3007. dma_mask = 0;
  3008. dev->pio_mode = ata_xfer_mask2mode(pio_mask);
  3009. dev->dma_mode = ata_xfer_mask2mode(dma_mask);
  3010. found = 1;
  3011. if (ata_dma_enabled(dev))
  3012. used_dma = 1;
  3013. }
  3014. if (!found)
  3015. goto out;
  3016. /* step 2: always set host PIO timings */
  3017. ata_for_each_dev(dev, link, ENABLED) {
  3018. if (dev->pio_mode == 0xff) {
  3019. ata_dev_warn(dev, "no PIO support\n");
  3020. rc = -EINVAL;
  3021. goto out;
  3022. }
  3023. dev->xfer_mode = dev->pio_mode;
  3024. dev->xfer_shift = ATA_SHIFT_PIO;
  3025. if (ap->ops->set_piomode)
  3026. ap->ops->set_piomode(ap, dev);
  3027. }
  3028. /* step 3: set host DMA timings */
  3029. ata_for_each_dev(dev, link, ENABLED) {
  3030. if (!ata_dma_enabled(dev))
  3031. continue;
  3032. dev->xfer_mode = dev->dma_mode;
  3033. dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
  3034. if (ap->ops->set_dmamode)
  3035. ap->ops->set_dmamode(ap, dev);
  3036. }
  3037. /* step 4: update devices' xfer mode */
  3038. ata_for_each_dev(dev, link, ENABLED) {
  3039. rc = ata_dev_set_mode(dev);
  3040. if (rc)
  3041. goto out;
  3042. }
  3043. /* Record simplex status. If we selected DMA then the other
  3044. * host channels are not permitted to do so.
  3045. */
  3046. if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
  3047. ap->host->simplex_claimed = ap;
  3048. out:
  3049. if (rc)
  3050. *r_failed_dev = dev;
  3051. return rc;
  3052. }
  3053. EXPORT_SYMBOL_GPL(ata_do_set_mode);
  3054. /**
  3055. * ata_wait_ready - wait for link to become ready
  3056. * @link: link to be waited on
  3057. * @deadline: deadline jiffies for the operation
  3058. * @check_ready: callback to check link readiness
  3059. *
  3060. * Wait for @link to become ready. @check_ready should return
  3061. * positive number if @link is ready, 0 if it isn't, -ENODEV if
  3062. * link doesn't seem to be occupied, other errno for other error
  3063. * conditions.
  3064. *
  3065. * Transient -ENODEV conditions are allowed for
  3066. * ATA_TMOUT_FF_WAIT.
  3067. *
  3068. * LOCKING:
  3069. * EH context.
  3070. *
  3071. * RETURNS:
  3072. * 0 if @link is ready before @deadline; otherwise, -errno.
  3073. */
  3074. int ata_wait_ready(struct ata_link *link, unsigned long deadline,
  3075. int (*check_ready)(struct ata_link *link))
  3076. {
  3077. unsigned long start = jiffies;
  3078. unsigned long nodev_deadline;
  3079. int warned = 0;
  3080. /* choose which 0xff timeout to use, read comment in libata.h */
  3081. if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN)
  3082. nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG);
  3083. else
  3084. nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
  3085. /* Slave readiness can't be tested separately from master. On
  3086. * M/S emulation configuration, this function should be called
  3087. * only on the master and it will handle both master and slave.
  3088. */
  3089. WARN_ON(link == link->ap->slave_link);
  3090. if (time_after(nodev_deadline, deadline))
  3091. nodev_deadline = deadline;
  3092. while (1) {
  3093. unsigned long now = jiffies;
  3094. int ready, tmp;
  3095. ready = tmp = check_ready(link);
  3096. if (ready > 0)
  3097. return 0;
  3098. /*
  3099. * -ENODEV could be transient. Ignore -ENODEV if link
  3100. * is online. Also, some SATA devices take a long
  3101. * time to clear 0xff after reset. Wait for
  3102. * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't
  3103. * offline.
  3104. *
  3105. * Note that some PATA controllers (pata_ali) explode
  3106. * if status register is read more than once when
  3107. * there's no device attached.
  3108. */
  3109. if (ready == -ENODEV) {
  3110. if (ata_link_online(link))
  3111. ready = 0;
  3112. else if ((link->ap->flags & ATA_FLAG_SATA) &&
  3113. !ata_link_offline(link) &&
  3114. time_before(now, nodev_deadline))
  3115. ready = 0;
  3116. }
  3117. if (ready)
  3118. return ready;
  3119. if (time_after(now, deadline))
  3120. return -EBUSY;
  3121. if (!warned && time_after(now, start + 5 * HZ) &&
  3122. (deadline - now > 3 * HZ)) {
  3123. ata_link_warn(link,
  3124. "link is slow to respond, please be patient "
  3125. "(ready=%d)\n", tmp);
  3126. warned = 1;
  3127. }
  3128. ata_msleep(link->ap, 50);
  3129. }
  3130. }
  3131. /**
  3132. * ata_wait_after_reset - wait for link to become ready after reset
  3133. * @link: link to be waited on
  3134. * @deadline: deadline jiffies for the operation
  3135. * @check_ready: callback to check link readiness
  3136. *
  3137. * Wait for @link to become ready after reset.
  3138. *
  3139. * LOCKING:
  3140. * EH context.
  3141. *
  3142. * RETURNS:
  3143. * 0 if @link is ready before @deadline; otherwise, -errno.
  3144. */
  3145. int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
  3146. int (*check_ready)(struct ata_link *link))
  3147. {
  3148. ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
  3149. return ata_wait_ready(link, deadline, check_ready);
  3150. }
  3151. EXPORT_SYMBOL_GPL(ata_wait_after_reset);
  3152. /**
  3153. * ata_std_prereset - prepare for reset
  3154. * @link: ATA link to be reset
  3155. * @deadline: deadline jiffies for the operation
  3156. *
  3157. * @link is about to be reset. Initialize it. Failure from
  3158. * prereset makes libata abort whole reset sequence and give up
  3159. * that port, so prereset should be best-effort. It does its
  3160. * best to prepare for reset sequence but if things go wrong, it
  3161. * should just whine, not fail.
  3162. *
  3163. * LOCKING:
  3164. * Kernel thread context (may sleep)
  3165. *
  3166. * RETURNS:
  3167. * Always 0.
  3168. */
  3169. int ata_std_prereset(struct ata_link *link, unsigned long deadline)
  3170. {
  3171. struct ata_port *ap = link->ap;
  3172. struct ata_eh_context *ehc = &link->eh_context;
  3173. const unsigned int *timing = sata_ehc_deb_timing(ehc);
  3174. int rc;
  3175. /* if we're about to do hardreset, nothing more to do */
  3176. if (ehc->i.action & ATA_EH_HARDRESET)
  3177. return 0;
  3178. /* if SATA, resume link */
  3179. if (ap->flags & ATA_FLAG_SATA) {
  3180. rc = sata_link_resume(link, timing, deadline);
  3181. /* whine about phy resume failure but proceed */
  3182. if (rc && rc != -EOPNOTSUPP)
  3183. ata_link_warn(link,
  3184. "failed to resume link for reset (errno=%d)\n",
  3185. rc);
  3186. }
  3187. /* no point in trying softreset on offline link */
  3188. if (ata_phys_link_offline(link))
  3189. ehc->i.action &= ~ATA_EH_SOFTRESET;
  3190. return 0;
  3191. }
  3192. EXPORT_SYMBOL_GPL(ata_std_prereset);
  3193. /**
  3194. * ata_std_postreset - standard postreset callback
  3195. * @link: the target ata_link
  3196. * @classes: classes of attached devices
  3197. *
  3198. * This function is invoked after a successful reset. Note that
  3199. * the device might have been reset more than once using
  3200. * different reset methods before postreset is invoked.
  3201. *
  3202. * LOCKING:
  3203. * Kernel thread context (may sleep)
  3204. */
  3205. void ata_std_postreset(struct ata_link *link, unsigned int *classes)
  3206. {
  3207. u32 serror;
  3208. /* reset complete, clear SError */
  3209. if (!sata_scr_read(link, SCR_ERROR, &serror))
  3210. sata_scr_write(link, SCR_ERROR, serror);
  3211. /* print link status */
  3212. sata_print_link_status(link);
  3213. }
  3214. EXPORT_SYMBOL_GPL(ata_std_postreset);
  3215. /**
  3216. * ata_dev_same_device - Determine whether new ID matches configured device
  3217. * @dev: device to compare against
  3218. * @new_class: class of the new device
  3219. * @new_id: IDENTIFY page of the new device
  3220. *
  3221. * Compare @new_class and @new_id against @dev and determine
  3222. * whether @dev is the device indicated by @new_class and
  3223. * @new_id.
  3224. *
  3225. * LOCKING:
  3226. * None.
  3227. *
  3228. * RETURNS:
  3229. * 1 if @dev matches @new_class and @new_id, 0 otherwise.
  3230. */
  3231. static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
  3232. const u16 *new_id)
  3233. {
  3234. const u16 *old_id = dev->id;
  3235. unsigned char model[2][ATA_ID_PROD_LEN + 1];
  3236. unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
  3237. if (dev->class != new_class) {
  3238. ata_dev_info(dev, "class mismatch %d != %d\n",
  3239. dev->class, new_class);
  3240. return 0;
  3241. }
  3242. ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
  3243. ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
  3244. ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
  3245. ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
  3246. if (strcmp(model[0], model[1])) {
  3247. ata_dev_info(dev, "model number mismatch '%s' != '%s'\n",
  3248. model[0], model[1]);
  3249. return 0;
  3250. }
  3251. if (strcmp(serial[0], serial[1])) {
  3252. ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n",
  3253. serial[0], serial[1]);
  3254. return 0;
  3255. }
  3256. return 1;
  3257. }
  3258. /**
  3259. * ata_dev_reread_id - Re-read IDENTIFY data
  3260. * @dev: target ATA device
  3261. * @readid_flags: read ID flags
  3262. *
  3263. * Re-read IDENTIFY page and make sure @dev is still attached to
  3264. * the port.
  3265. *
  3266. * LOCKING:
  3267. * Kernel thread context (may sleep)
  3268. *
  3269. * RETURNS:
  3270. * 0 on success, negative errno otherwise
  3271. */
  3272. int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
  3273. {
  3274. unsigned int class = dev->class;
  3275. u16 *id = (void *)dev->sector_buf;
  3276. int rc;
  3277. /* read ID data */
  3278. rc = ata_dev_read_id(dev, &class, readid_flags, id);
  3279. if (rc)
  3280. return rc;
  3281. /* is the device still there? */
  3282. if (!ata_dev_same_device(dev, class, id))
  3283. return -ENODEV;
  3284. memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
  3285. return 0;
  3286. }
  3287. /**
  3288. * ata_dev_revalidate - Revalidate ATA device
  3289. * @dev: device to revalidate
  3290. * @new_class: new class code
  3291. * @readid_flags: read ID flags
  3292. *
  3293. * Re-read IDENTIFY page, make sure @dev is still attached to the
  3294. * port and reconfigure it according to the new IDENTIFY page.
  3295. *
  3296. * LOCKING:
  3297. * Kernel thread context (may sleep)
  3298. *
  3299. * RETURNS:
  3300. * 0 on success, negative errno otherwise
  3301. */
  3302. int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
  3303. unsigned int readid_flags)
  3304. {
  3305. u64 n_sectors = dev->n_sectors;
  3306. u64 n_native_sectors = dev->n_native_sectors;
  3307. int rc;
  3308. if (!ata_dev_enabled(dev))
  3309. return -ENODEV;
  3310. /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
  3311. if (ata_class_enabled(new_class) && new_class == ATA_DEV_PMP) {
  3312. ata_dev_info(dev, "class mismatch %u != %u\n",
  3313. dev->class, new_class);
  3314. rc = -ENODEV;
  3315. goto fail;
  3316. }
  3317. /* re-read ID */
  3318. rc = ata_dev_reread_id(dev, readid_flags);
  3319. if (rc)
  3320. goto fail;
  3321. /* configure device according to the new ID */
  3322. rc = ata_dev_configure(dev);
  3323. if (rc)
  3324. goto fail;
  3325. /* verify n_sectors hasn't changed */
  3326. if (dev->class != ATA_DEV_ATA || !n_sectors ||
  3327. dev->n_sectors == n_sectors)
  3328. return 0;
  3329. /* n_sectors has changed */
  3330. ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n",
  3331. (unsigned long long)n_sectors,
  3332. (unsigned long long)dev->n_sectors);
  3333. /*
  3334. * Something could have caused HPA to be unlocked
  3335. * involuntarily. If n_native_sectors hasn't changed and the
  3336. * new size matches it, keep the device.
  3337. */
  3338. if (dev->n_native_sectors == n_native_sectors &&
  3339. dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) {
  3340. ata_dev_warn(dev,
  3341. "new n_sectors matches native, probably "
  3342. "late HPA unlock, n_sectors updated\n");
  3343. /* use the larger n_sectors */
  3344. return 0;
  3345. }
  3346. /*
  3347. * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try
  3348. * unlocking HPA in those cases.
  3349. *
  3350. * https://bugzilla.kernel.org/show_bug.cgi?id=15396
  3351. */
  3352. if (dev->n_native_sectors == n_native_sectors &&
  3353. dev->n_sectors < n_sectors && n_sectors == n_native_sectors &&
  3354. !(dev->quirks & ATA_QUIRK_BROKEN_HPA)) {
  3355. ata_dev_warn(dev,
  3356. "old n_sectors matches native, probably "
  3357. "late HPA lock, will try to unlock HPA\n");
  3358. /* try unlocking HPA */
  3359. dev->flags |= ATA_DFLAG_UNLOCK_HPA;
  3360. rc = -EIO;
  3361. } else
  3362. rc = -ENODEV;
  3363. /* restore original n_[native_]sectors and fail */
  3364. dev->n_native_sectors = n_native_sectors;
  3365. dev->n_sectors = n_sectors;
  3366. fail:
  3367. ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc);
  3368. return rc;
  3369. }
  3370. static const char * const ata_quirk_names[] = {
  3371. [__ATA_QUIRK_DIAGNOSTIC] = "diagnostic",
  3372. [__ATA_QUIRK_NODMA] = "nodma",
  3373. [__ATA_QUIRK_NONCQ] = "noncq",
  3374. [__ATA_QUIRK_MAX_SEC_128] = "maxsec128",
  3375. [__ATA_QUIRK_BROKEN_HPA] = "brokenhpa",
  3376. [__ATA_QUIRK_DISABLE] = "disable",
  3377. [__ATA_QUIRK_HPA_SIZE] = "hpasize",
  3378. [__ATA_QUIRK_IVB] = "ivb",
  3379. [__ATA_QUIRK_STUCK_ERR] = "stuckerr",
  3380. [__ATA_QUIRK_BRIDGE_OK] = "bridgeok",
  3381. [__ATA_QUIRK_ATAPI_MOD16_DMA] = "atapimod16dma",
  3382. [__ATA_QUIRK_FIRMWARE_WARN] = "firmwarewarn",
  3383. [__ATA_QUIRK_1_5_GBPS] = "1.5gbps",
  3384. [__ATA_QUIRK_NOSETXFER] = "nosetxfer",
  3385. [__ATA_QUIRK_BROKEN_FPDMA_AA] = "brokenfpdmaaa",
  3386. [__ATA_QUIRK_DUMP_ID] = "dumpid",
  3387. [__ATA_QUIRK_MAX_SEC_LBA48] = "maxseclba48",
  3388. [__ATA_QUIRK_ATAPI_DMADIR] = "atapidmadir",
  3389. [__ATA_QUIRK_NO_NCQ_TRIM] = "noncqtrim",
  3390. [__ATA_QUIRK_NOLPM] = "nolpm",
  3391. [__ATA_QUIRK_WD_BROKEN_LPM] = "wdbrokenlpm",
  3392. [__ATA_QUIRK_ZERO_AFTER_TRIM] = "zeroaftertrim",
  3393. [__ATA_QUIRK_NO_DMA_LOG] = "nodmalog",
  3394. [__ATA_QUIRK_NOTRIM] = "notrim",
  3395. [__ATA_QUIRK_MAX_SEC_1024] = "maxsec1024",
  3396. [__ATA_QUIRK_MAX_TRIM_128M] = "maxtrim128m",
  3397. [__ATA_QUIRK_NO_NCQ_ON_ATI] = "noncqonati",
  3398. [__ATA_QUIRK_NO_LPM_ON_ATI] = "nolpmonati",
  3399. [__ATA_QUIRK_NO_ID_DEV_LOG] = "noiddevlog",
  3400. [__ATA_QUIRK_NO_LOG_DIR] = "nologdir",
  3401. [__ATA_QUIRK_NO_FUA] = "nofua",
  3402. };
  3403. static void ata_dev_print_quirks(const struct ata_device *dev,
  3404. const char *model, const char *rev,
  3405. unsigned int quirks)
  3406. {
  3407. struct ata_eh_context *ehc = &dev->link->eh_context;
  3408. int n = 0, i;
  3409. size_t sz;
  3410. char *str;
  3411. if (!ata_dev_print_info(dev) || ehc->i.flags & ATA_EHI_DID_PRINT_QUIRKS)
  3412. return;
  3413. ehc->i.flags |= ATA_EHI_DID_PRINT_QUIRKS;
  3414. if (!quirks)
  3415. return;
  3416. sz = 64 + ARRAY_SIZE(ata_quirk_names) * 16;
  3417. str = kmalloc(sz, GFP_KERNEL);
  3418. if (!str)
  3419. return;
  3420. n = snprintf(str, sz, "Model '%s', rev '%s', applying quirks:",
  3421. model, rev);
  3422. for (i = 0; i < ARRAY_SIZE(ata_quirk_names); i++) {
  3423. if (quirks & (1U << i))
  3424. n += snprintf(str + n, sz - n,
  3425. " %s", ata_quirk_names[i]);
  3426. }
  3427. ata_dev_warn(dev, "%s\n", str);
  3428. kfree(str);
  3429. }
  3430. struct ata_dev_quirks_entry {
  3431. const char *model_num;
  3432. const char *model_rev;
  3433. unsigned int quirks;
  3434. };
  3435. static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
  3436. /* Devices with DMA related problems under Linux */
  3437. { "WDC AC11000H", NULL, ATA_QUIRK_NODMA },
  3438. { "WDC AC22100H", NULL, ATA_QUIRK_NODMA },
  3439. { "WDC AC32500H", NULL, ATA_QUIRK_NODMA },
  3440. { "WDC AC33100H", NULL, ATA_QUIRK_NODMA },
  3441. { "WDC AC31600H", NULL, ATA_QUIRK_NODMA },
  3442. { "WDC AC32100H", "24.09P07", ATA_QUIRK_NODMA },
  3443. { "WDC AC23200L", "21.10N21", ATA_QUIRK_NODMA },
  3444. { "Compaq CRD-8241B", NULL, ATA_QUIRK_NODMA },
  3445. { "CRD-8400B", NULL, ATA_QUIRK_NODMA },
  3446. { "CRD-848[02]B", NULL, ATA_QUIRK_NODMA },
  3447. { "CRD-84", NULL, ATA_QUIRK_NODMA },
  3448. { "SanDisk SDP3B", NULL, ATA_QUIRK_NODMA },
  3449. { "SanDisk SDP3B-64", NULL, ATA_QUIRK_NODMA },
  3450. { "SANYO CD-ROM CRD", NULL, ATA_QUIRK_NODMA },
  3451. { "HITACHI CDR-8", NULL, ATA_QUIRK_NODMA },
  3452. { "HITACHI CDR-8[34]35", NULL, ATA_QUIRK_NODMA },
  3453. { "Toshiba CD-ROM XM-6202B", NULL, ATA_QUIRK_NODMA },
  3454. { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_QUIRK_NODMA },
  3455. { "CD-532E-A", NULL, ATA_QUIRK_NODMA },
  3456. { "E-IDE CD-ROM CR-840", NULL, ATA_QUIRK_NODMA },
  3457. { "CD-ROM Drive/F5A", NULL, ATA_QUIRK_NODMA },
  3458. { "WPI CDD-820", NULL, ATA_QUIRK_NODMA },
  3459. { "SAMSUNG CD-ROM SC-148C", NULL, ATA_QUIRK_NODMA },
  3460. { "SAMSUNG CD-ROM SC", NULL, ATA_QUIRK_NODMA },
  3461. { "ATAPI CD-ROM DRIVE 40X MAXIMUM", NULL, ATA_QUIRK_NODMA },
  3462. { "_NEC DV5800A", NULL, ATA_QUIRK_NODMA },
  3463. { "SAMSUNG CD-ROM SN-124", "N001", ATA_QUIRK_NODMA },
  3464. { "Seagate STT20000A", NULL, ATA_QUIRK_NODMA },
  3465. { " 2GB ATA Flash Disk", "ADMA428M", ATA_QUIRK_NODMA },
  3466. { "VRFDFC22048UCHC-TE*", NULL, ATA_QUIRK_NODMA },
  3467. /* Odd clown on sil3726/4726 PMPs */
  3468. { "Config Disk", NULL, ATA_QUIRK_DISABLE },
  3469. /* Similar story with ASMedia 1092 */
  3470. { "ASMT109x- Config", NULL, ATA_QUIRK_DISABLE },
  3471. /* Weird ATAPI devices */
  3472. { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_QUIRK_MAX_SEC_128 },
  3473. { "QUANTUM DAT DAT72-000", NULL, ATA_QUIRK_ATAPI_MOD16_DMA },
  3474. { "Slimtype DVD A DS8A8SH", NULL, ATA_QUIRK_MAX_SEC_LBA48 },
  3475. { "Slimtype DVD A DS8A9SH", NULL, ATA_QUIRK_MAX_SEC_LBA48 },
  3476. /*
  3477. * Causes silent data corruption with higher max sects.
  3478. * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
  3479. */
  3480. { "ST380013AS", "3.20", ATA_QUIRK_MAX_SEC_1024 },
  3481. /*
  3482. * These devices time out with higher max sects.
  3483. * https://bugzilla.kernel.org/show_bug.cgi?id=121671
  3484. */
  3485. { "LITEON CX1-JB*-HP", NULL, ATA_QUIRK_MAX_SEC_1024 },
  3486. { "LITEON EP1-*", NULL, ATA_QUIRK_MAX_SEC_1024 },
  3487. /* Devices we expect to fail diagnostics */
  3488. /* Devices where NCQ should be avoided */
  3489. /* NCQ is slow */
  3490. { "WDC WD740ADFD-00", NULL, ATA_QUIRK_NONCQ },
  3491. { "WDC WD740ADFD-00NLR1", NULL, ATA_QUIRK_NONCQ },
  3492. /* http://thread.gmane.org/gmane.linux.ide/14907 */
  3493. { "FUJITSU MHT2060BH", NULL, ATA_QUIRK_NONCQ },
  3494. /* NCQ is broken */
  3495. { "Maxtor *", "BANC*", ATA_QUIRK_NONCQ },
  3496. { "Maxtor 7V300F0", "VA111630", ATA_QUIRK_NONCQ },
  3497. { "ST380817AS", "3.42", ATA_QUIRK_NONCQ },
  3498. { "ST3160023AS", "3.42", ATA_QUIRK_NONCQ },
  3499. { "OCZ CORE_SSD", "02.10104", ATA_QUIRK_NONCQ },
  3500. /* Seagate NCQ + FLUSH CACHE firmware bug */
  3501. { "ST31500341AS", "SD1[5-9]", ATA_QUIRK_NONCQ |
  3502. ATA_QUIRK_FIRMWARE_WARN },
  3503. { "ST31000333AS", "SD1[5-9]", ATA_QUIRK_NONCQ |
  3504. ATA_QUIRK_FIRMWARE_WARN },
  3505. { "ST3640[36]23AS", "SD1[5-9]", ATA_QUIRK_NONCQ |
  3506. ATA_QUIRK_FIRMWARE_WARN },
  3507. { "ST3320[68]13AS", "SD1[5-9]", ATA_QUIRK_NONCQ |
  3508. ATA_QUIRK_FIRMWARE_WARN },
  3509. /* drives which fail FPDMA_AA activation (some may freeze afterwards)
  3510. the ST disks also have LPM issues */
  3511. { "ST1000LM024 HN-M101MBB", NULL, ATA_QUIRK_BROKEN_FPDMA_AA |
  3512. ATA_QUIRK_NOLPM },
  3513. { "VB0250EAVER", "HPG7", ATA_QUIRK_BROKEN_FPDMA_AA },
  3514. /* Blacklist entries taken from Silicon Image 3124/3132
  3515. Windows driver .inf file - also several Linux problem reports */
  3516. { "HTS541060G9SA00", "MB3OC60D", ATA_QUIRK_NONCQ },
  3517. { "HTS541080G9SA00", "MB4OC60D", ATA_QUIRK_NONCQ },
  3518. { "HTS541010G9SA00", "MBZOC60D", ATA_QUIRK_NONCQ },
  3519. /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */
  3520. { "C300-CTFDDAC128MAG", "0001", ATA_QUIRK_NONCQ },
  3521. /* Sandisk SD7/8/9s lock up hard on large trims */
  3522. { "SanDisk SD[789]*", NULL, ATA_QUIRK_MAX_TRIM_128M },
  3523. /* devices which puke on READ_NATIVE_MAX */
  3524. { "HDS724040KLSA80", "KFAOA20N", ATA_QUIRK_BROKEN_HPA },
  3525. { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_QUIRK_BROKEN_HPA },
  3526. { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_QUIRK_BROKEN_HPA },
  3527. { "MAXTOR 6L080L4", "A93.0500", ATA_QUIRK_BROKEN_HPA },
  3528. /* this one allows HPA unlocking but fails IOs on the area */
  3529. { "OCZ-VERTEX", "1.30", ATA_QUIRK_BROKEN_HPA },
  3530. /* Devices which report 1 sector over size HPA */
  3531. { "ST340823A", NULL, ATA_QUIRK_HPA_SIZE },
  3532. { "ST320413A", NULL, ATA_QUIRK_HPA_SIZE },
  3533. { "ST310211A", NULL, ATA_QUIRK_HPA_SIZE },
  3534. /* Devices which get the IVB wrong */
  3535. { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_QUIRK_IVB },
  3536. /* Maybe we should just add all TSSTcorp devices... */
  3537. { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_QUIRK_IVB },
  3538. /* Devices that do not need bridging limits applied */
  3539. { "MTRON MSP-SATA*", NULL, ATA_QUIRK_BRIDGE_OK },
  3540. { "BUFFALO HD-QSU2/R5", NULL, ATA_QUIRK_BRIDGE_OK },
  3541. /* Devices which aren't very happy with higher link speeds */
  3542. { "WD My Book", NULL, ATA_QUIRK_1_5_GBPS },
  3543. { "Seagate FreeAgent GoFlex", NULL, ATA_QUIRK_1_5_GBPS },
  3544. /*
  3545. * Devices which choke on SETXFER. Applies only if both the
  3546. * device and controller are SATA.
  3547. */
  3548. { "PIONEER DVD-RW DVRTD08", NULL, ATA_QUIRK_NOSETXFER },
  3549. { "PIONEER DVD-RW DVRTD08A", NULL, ATA_QUIRK_NOSETXFER },
  3550. { "PIONEER DVD-RW DVR-215", NULL, ATA_QUIRK_NOSETXFER },
  3551. { "PIONEER DVD-RW DVR-212D", NULL, ATA_QUIRK_NOSETXFER },
  3552. { "PIONEER DVD-RW DVR-216D", NULL, ATA_QUIRK_NOSETXFER },
  3553. /* These specific Pioneer models have LPM issues */
  3554. { "PIONEER BD-RW BDR-207M", NULL, ATA_QUIRK_NOLPM },
  3555. { "PIONEER BD-RW BDR-205", NULL, ATA_QUIRK_NOLPM },
  3556. /* Crucial devices with broken LPM support */
  3557. { "CT*0BX*00SSD1", NULL, ATA_QUIRK_NOLPM },
  3558. /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
  3559. { "Crucial_CT512MX100*", "MU01", ATA_QUIRK_NO_NCQ_TRIM |
  3560. ATA_QUIRK_ZERO_AFTER_TRIM |
  3561. ATA_QUIRK_NOLPM },
  3562. /* 512GB MX100 with newer firmware has only LPM issues */
  3563. { "Crucial_CT512MX100*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM |
  3564. ATA_QUIRK_NOLPM },
  3565. /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
  3566. { "Crucial_CT480M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
  3567. ATA_QUIRK_ZERO_AFTER_TRIM |
  3568. ATA_QUIRK_NOLPM },
  3569. { "Crucial_CT960M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
  3570. ATA_QUIRK_ZERO_AFTER_TRIM |
  3571. ATA_QUIRK_NOLPM },
  3572. /* AMD Radeon devices with broken LPM support */
  3573. { "R3SL240G", NULL, ATA_QUIRK_NOLPM },
  3574. /* Apacer models with LPM issues */
  3575. { "Apacer AS340*", NULL, ATA_QUIRK_NOLPM },
  3576. /* These specific Samsung models/firmware-revs do not handle LPM well */
  3577. { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_QUIRK_NOLPM },
  3578. { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_QUIRK_NOLPM },
  3579. { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_QUIRK_NOLPM },
  3580. { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_QUIRK_NOLPM },
  3581. /* devices that don't properly handle queued TRIM commands */
  3582. { "Micron_M500IT_*", "MU01", ATA_QUIRK_NO_NCQ_TRIM |
  3583. ATA_QUIRK_ZERO_AFTER_TRIM },
  3584. { "Micron_M500_*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
  3585. ATA_QUIRK_ZERO_AFTER_TRIM },
  3586. { "Micron_M5[15]0_*", "MU01", ATA_QUIRK_NO_NCQ_TRIM |
  3587. ATA_QUIRK_ZERO_AFTER_TRIM },
  3588. { "Micron_1100_*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
  3589. ATA_QUIRK_ZERO_AFTER_TRIM, },
  3590. { "Crucial_CT*M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
  3591. ATA_QUIRK_ZERO_AFTER_TRIM },
  3592. { "Crucial_CT*M550*", "MU01", ATA_QUIRK_NO_NCQ_TRIM |
  3593. ATA_QUIRK_ZERO_AFTER_TRIM },
  3594. { "Crucial_CT*MX100*", "MU01", ATA_QUIRK_NO_NCQ_TRIM |
  3595. ATA_QUIRK_ZERO_AFTER_TRIM },
  3596. { "Samsung SSD 840 EVO*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
  3597. ATA_QUIRK_NO_DMA_LOG |
  3598. ATA_QUIRK_ZERO_AFTER_TRIM },
  3599. { "Samsung SSD 840*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
  3600. ATA_QUIRK_ZERO_AFTER_TRIM },
  3601. { "Samsung SSD 850*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
  3602. ATA_QUIRK_ZERO_AFTER_TRIM },
  3603. { "Samsung SSD 860*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
  3604. ATA_QUIRK_ZERO_AFTER_TRIM |
  3605. ATA_QUIRK_NO_NCQ_ON_ATI |
  3606. ATA_QUIRK_NO_LPM_ON_ATI },
  3607. { "Samsung SSD 870*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
  3608. ATA_QUIRK_ZERO_AFTER_TRIM |
  3609. ATA_QUIRK_NO_NCQ_ON_ATI |
  3610. ATA_QUIRK_NO_LPM_ON_ATI },
  3611. { "SAMSUNG*MZ7LH*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
  3612. ATA_QUIRK_ZERO_AFTER_TRIM |
  3613. ATA_QUIRK_NO_NCQ_ON_ATI |
  3614. ATA_QUIRK_NO_LPM_ON_ATI },
  3615. { "FCCT*M500*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
  3616. ATA_QUIRK_ZERO_AFTER_TRIM },
  3617. /* devices that don't properly handle TRIM commands */
  3618. { "SuperSSpeed S238*", NULL, ATA_QUIRK_NOTRIM },
  3619. { "M88V29*", NULL, ATA_QUIRK_NOTRIM },
  3620. /*
  3621. * As defined, the DRAT (Deterministic Read After Trim) and RZAT
  3622. * (Return Zero After Trim) flags in the ATA Command Set are
  3623. * unreliable in the sense that they only define what happens if
  3624. * the device successfully executed the DSM TRIM command. TRIM
  3625. * is only advisory, however, and the device is free to silently
  3626. * ignore all or parts of the request.
  3627. *
  3628. * Whitelist drives that are known to reliably return zeroes
  3629. * after TRIM.
  3630. */
  3631. /*
  3632. * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
  3633. * that model before whitelisting all other intel SSDs.
  3634. */
  3635. { "INTEL*SSDSC2MH*", NULL, 0 },
  3636. { "Micron*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
  3637. { "Crucial*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
  3638. { "INTEL*SSD*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
  3639. { "SSD*INTEL*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
  3640. { "Samsung*SSD*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
  3641. { "SAMSUNG*SSD*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
  3642. { "SAMSUNG*MZ7KM*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
  3643. { "ST[1248][0248]0[FH]*", NULL, ATA_QUIRK_ZERO_AFTER_TRIM },
  3644. /*
  3645. * Some WD SATA-I drives spin up and down erratically when the link
  3646. * is put into the slumber mode. We don't have full list of the
  3647. * affected devices. Disable LPM if the device matches one of the
  3648. * known prefixes and is SATA-1. As a side effect LPM partial is
  3649. * lost too.
  3650. *
  3651. * https://bugzilla.kernel.org/show_bug.cgi?id=57211
  3652. */
  3653. { "WDC WD800JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
  3654. { "WDC WD1200JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
  3655. { "WDC WD1600JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
  3656. { "WDC WD2000JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
  3657. { "WDC WD2500JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
  3658. { "WDC WD3000JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
  3659. { "WDC WD3200JD-*", NULL, ATA_QUIRK_WD_BROKEN_LPM },
  3660. /*
  3661. * This sata dom device goes on a walkabout when the ATA_LOG_DIRECTORY
  3662. * log page is accessed. Ensure we never ask for this log page with
  3663. * these devices.
  3664. */
  3665. { "SATADOM-ML 3ME", NULL, ATA_QUIRK_NO_LOG_DIR },
  3666. /* Buggy FUA */
  3667. { "Maxtor", "BANC1G10", ATA_QUIRK_NO_FUA },
  3668. { "WDC*WD2500J*", NULL, ATA_QUIRK_NO_FUA },
  3669. { "OCZ-VERTEX*", NULL, ATA_QUIRK_NO_FUA },
  3670. { "INTEL*SSDSC2CT*", NULL, ATA_QUIRK_NO_FUA },
  3671. /* End Marker */
  3672. { }
  3673. };
  3674. static unsigned int ata_dev_quirks(const struct ata_device *dev)
  3675. {
  3676. unsigned char model_num[ATA_ID_PROD_LEN + 1];
  3677. unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
  3678. const struct ata_dev_quirks_entry *ad = __ata_dev_quirks;
  3679. /* dev->quirks is an unsigned int. */
  3680. BUILD_BUG_ON(__ATA_QUIRK_MAX > 32);
  3681. ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
  3682. ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
  3683. while (ad->model_num) {
  3684. if (glob_match(ad->model_num, model_num) &&
  3685. (!ad->model_rev || glob_match(ad->model_rev, model_rev))) {
  3686. ata_dev_print_quirks(dev, model_num, model_rev,
  3687. ad->quirks);
  3688. return ad->quirks;
  3689. }
  3690. ad++;
  3691. }
  3692. return 0;
  3693. }
  3694. static bool ata_dev_nodma(const struct ata_device *dev)
  3695. {
  3696. /*
  3697. * We do not support polling DMA. Deny DMA for those ATAPI devices
  3698. * with CDB-intr (and use PIO) if the LLDD handles only interrupts in
  3699. * the HSM_ST_LAST state.
  3700. */
  3701. if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
  3702. (dev->flags & ATA_DFLAG_CDB_INTR))
  3703. return true;
  3704. return dev->quirks & ATA_QUIRK_NODMA;
  3705. }
  3706. /**
  3707. * ata_is_40wire - check drive side detection
  3708. * @dev: device
  3709. *
  3710. * Perform drive side detection decoding, allowing for device vendors
  3711. * who can't follow the documentation.
  3712. */
  3713. static int ata_is_40wire(struct ata_device *dev)
  3714. {
  3715. if (dev->quirks & ATA_QUIRK_IVB)
  3716. return ata_drive_40wire_relaxed(dev->id);
  3717. return ata_drive_40wire(dev->id);
  3718. }
  3719. /**
  3720. * cable_is_40wire - 40/80/SATA decider
  3721. * @ap: port to consider
  3722. *
  3723. * This function encapsulates the policy for speed management
  3724. * in one place. At the moment we don't cache the result but
  3725. * there is a good case for setting ap->cbl to the result when
  3726. * we are called with unknown cables (and figuring out if it
  3727. * impacts hotplug at all).
  3728. *
  3729. * Return 1 if the cable appears to be 40 wire.
  3730. */
  3731. static int cable_is_40wire(struct ata_port *ap)
  3732. {
  3733. struct ata_link *link;
  3734. struct ata_device *dev;
  3735. /* If the controller thinks we are 40 wire, we are. */
  3736. if (ap->cbl == ATA_CBL_PATA40)
  3737. return 1;
  3738. /* If the controller thinks we are 80 wire, we are. */
  3739. if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
  3740. return 0;
  3741. /* If the system is known to be 40 wire short cable (eg
  3742. * laptop), then we allow 80 wire modes even if the drive
  3743. * isn't sure.
  3744. */
  3745. if (ap->cbl == ATA_CBL_PATA40_SHORT)
  3746. return 0;
  3747. /* If the controller doesn't know, we scan.
  3748. *
  3749. * Note: We look for all 40 wire detects at this point. Any
  3750. * 80 wire detect is taken to be 80 wire cable because
  3751. * - in many setups only the one drive (slave if present) will
  3752. * give a valid detect
  3753. * - if you have a non detect capable drive you don't want it
  3754. * to colour the choice
  3755. */
  3756. ata_for_each_link(link, ap, EDGE) {
  3757. ata_for_each_dev(dev, link, ENABLED) {
  3758. if (!ata_is_40wire(dev))
  3759. return 0;
  3760. }
  3761. }
  3762. return 1;
  3763. }
  3764. /**
  3765. * ata_dev_xfermask - Compute supported xfermask of the given device
  3766. * @dev: Device to compute xfermask for
  3767. *
  3768. * Compute supported xfermask of @dev and store it in
  3769. * dev->*_mask. This function is responsible for applying all
  3770. * known limits including host controller limits, device quirks, etc...
  3771. *
  3772. * LOCKING:
  3773. * None.
  3774. */
  3775. static void ata_dev_xfermask(struct ata_device *dev)
  3776. {
  3777. struct ata_link *link = dev->link;
  3778. struct ata_port *ap = link->ap;
  3779. struct ata_host *host = ap->host;
  3780. unsigned int xfer_mask;
  3781. /* controller modes available */
  3782. xfer_mask = ata_pack_xfermask(ap->pio_mask,
  3783. ap->mwdma_mask, ap->udma_mask);
  3784. /* drive modes available */
  3785. xfer_mask &= ata_pack_xfermask(dev->pio_mask,
  3786. dev->mwdma_mask, dev->udma_mask);
  3787. xfer_mask &= ata_id_xfermask(dev->id);
  3788. /*
  3789. * CFA Advanced TrueIDE timings are not allowed on a shared
  3790. * cable
  3791. */
  3792. if (ata_dev_pair(dev)) {
  3793. /* No PIO5 or PIO6 */
  3794. xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
  3795. /* No MWDMA3 or MWDMA 4 */
  3796. xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
  3797. }
  3798. if (ata_dev_nodma(dev)) {
  3799. xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
  3800. ata_dev_warn(dev,
  3801. "device does not support DMA, disabling DMA\n");
  3802. }
  3803. if ((host->flags & ATA_HOST_SIMPLEX) &&
  3804. host->simplex_claimed && host->simplex_claimed != ap) {
  3805. xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
  3806. ata_dev_warn(dev,
  3807. "simplex DMA is claimed by other device, disabling DMA\n");
  3808. }
  3809. if (ap->flags & ATA_FLAG_NO_IORDY)
  3810. xfer_mask &= ata_pio_mask_no_iordy(dev);
  3811. if (ap->ops->mode_filter)
  3812. xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
  3813. /* Apply cable rule here. Don't apply it early because when
  3814. * we handle hot plug the cable type can itself change.
  3815. * Check this last so that we know if the transfer rate was
  3816. * solely limited by the cable.
  3817. * Unknown or 80 wire cables reported host side are checked
  3818. * drive side as well. Cases where we know a 40wire cable
  3819. * is used safely for 80 are not checked here.
  3820. */
  3821. if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
  3822. /* UDMA/44 or higher would be available */
  3823. if (cable_is_40wire(ap)) {
  3824. ata_dev_warn(dev,
  3825. "limited to UDMA/33 due to 40-wire cable\n");
  3826. xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
  3827. }
  3828. ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
  3829. &dev->mwdma_mask, &dev->udma_mask);
  3830. }
  3831. /**
  3832. * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
  3833. * @dev: Device to which command will be sent
  3834. *
  3835. * Issue SET FEATURES - XFER MODE command to device @dev
  3836. * on port @ap.
  3837. *
  3838. * LOCKING:
  3839. * PCI/etc. bus probe sem.
  3840. *
  3841. * RETURNS:
  3842. * 0 on success, AC_ERR_* mask otherwise.
  3843. */
  3844. static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
  3845. {
  3846. struct ata_taskfile tf;
  3847. /* set up set-features taskfile */
  3848. ata_dev_dbg(dev, "set features - xfer mode\n");
  3849. /* Some controllers and ATAPI devices show flaky interrupt
  3850. * behavior after setting xfer mode. Use polling instead.
  3851. */
  3852. ata_tf_init(dev, &tf);
  3853. tf.command = ATA_CMD_SET_FEATURES;
  3854. tf.feature = SETFEATURES_XFER;
  3855. tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
  3856. tf.protocol = ATA_PROT_NODATA;
  3857. /* If we are using IORDY we must send the mode setting command */
  3858. if (ata_pio_need_iordy(dev))
  3859. tf.nsect = dev->xfer_mode;
  3860. /* If the device has IORDY and the controller does not - turn it off */
  3861. else if (ata_id_has_iordy(dev->id))
  3862. tf.nsect = 0x01;
  3863. else /* In the ancient relic department - skip all of this */
  3864. return 0;
  3865. /*
  3866. * On some disks, this command causes spin-up, so we need longer
  3867. * timeout.
  3868. */
  3869. return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
  3870. }
  3871. /**
  3872. * ata_dev_set_feature - Issue SET FEATURES
  3873. * @dev: Device to which command will be sent
  3874. * @subcmd: The SET FEATURES subcommand to be sent
  3875. * @action: The sector count represents a subcommand specific action
  3876. *
  3877. * Issue SET FEATURES command to device @dev on port @ap with sector count
  3878. *
  3879. * LOCKING:
  3880. * PCI/etc. bus probe sem.
  3881. *
  3882. * RETURNS:
  3883. * 0 on success, AC_ERR_* mask otherwise.
  3884. */
  3885. unsigned int ata_dev_set_feature(struct ata_device *dev, u8 subcmd, u8 action)
  3886. {
  3887. struct ata_taskfile tf;
  3888. unsigned int timeout = 0;
  3889. /* set up set-features taskfile */
  3890. ata_dev_dbg(dev, "set features\n");
  3891. ata_tf_init(dev, &tf);
  3892. tf.command = ATA_CMD_SET_FEATURES;
  3893. tf.feature = subcmd;
  3894. tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
  3895. tf.protocol = ATA_PROT_NODATA;
  3896. tf.nsect = action;
  3897. if (subcmd == SETFEATURES_SPINUP)
  3898. timeout = ata_probe_timeout ?
  3899. ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
  3900. return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
  3901. }
  3902. EXPORT_SYMBOL_GPL(ata_dev_set_feature);
  3903. /**
  3904. * ata_dev_init_params - Issue INIT DEV PARAMS command
  3905. * @dev: Device to which command will be sent
  3906. * @heads: Number of heads (taskfile parameter)
  3907. * @sectors: Number of sectors (taskfile parameter)
  3908. *
  3909. * LOCKING:
  3910. * Kernel thread context (may sleep)
  3911. *
  3912. * RETURNS:
  3913. * 0 on success, AC_ERR_* mask otherwise.
  3914. */
  3915. static unsigned int ata_dev_init_params(struct ata_device *dev,
  3916. u16 heads, u16 sectors)
  3917. {
  3918. struct ata_taskfile tf;
  3919. unsigned int err_mask;
  3920. /* Number of sectors per track 1-255. Number of heads 1-16 */
  3921. if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
  3922. return AC_ERR_INVALID;
  3923. /* set up init dev params taskfile */
  3924. ata_dev_dbg(dev, "init dev params \n");
  3925. ata_tf_init(dev, &tf);
  3926. tf.command = ATA_CMD_INIT_DEV_PARAMS;
  3927. tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
  3928. tf.protocol = ATA_PROT_NODATA;
  3929. tf.nsect = sectors;
  3930. tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
  3931. err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
  3932. /* A clean abort indicates an original or just out of spec drive
  3933. and we should continue as we issue the setup based on the
  3934. drive reported working geometry */
  3935. if (err_mask == AC_ERR_DEV && (tf.error & ATA_ABORTED))
  3936. err_mask = 0;
  3937. return err_mask;
  3938. }
  3939. /**
  3940. * atapi_check_dma - Check whether ATAPI DMA can be supported
  3941. * @qc: Metadata associated with taskfile to check
  3942. *
  3943. * Allow low-level driver to filter ATA PACKET commands, returning
  3944. * a status indicating whether or not it is OK to use DMA for the
  3945. * supplied PACKET command.
  3946. *
  3947. * LOCKING:
  3948. * spin_lock_irqsave(host lock)
  3949. *
  3950. * RETURNS: 0 when ATAPI DMA can be used
  3951. * nonzero otherwise
  3952. */
  3953. int atapi_check_dma(struct ata_queued_cmd *qc)
  3954. {
  3955. struct ata_port *ap = qc->ap;
  3956. /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
  3957. * few ATAPI devices choke on such DMA requests.
  3958. */
  3959. if (!(qc->dev->quirks & ATA_QUIRK_ATAPI_MOD16_DMA) &&
  3960. unlikely(qc->nbytes & 15))
  3961. return 1;
  3962. if (ap->ops->check_atapi_dma)
  3963. return ap->ops->check_atapi_dma(qc);
  3964. return 0;
  3965. }
  3966. /**
  3967. * ata_std_qc_defer - Check whether a qc needs to be deferred
  3968. * @qc: ATA command in question
  3969. *
  3970. * Non-NCQ commands cannot run with any other command, NCQ or
  3971. * not. As upper layer only knows the queue depth, we are
  3972. * responsible for maintaining exclusion. This function checks
  3973. * whether a new command @qc can be issued.
  3974. *
  3975. * LOCKING:
  3976. * spin_lock_irqsave(host lock)
  3977. *
  3978. * RETURNS:
  3979. * ATA_DEFER_* if deferring is needed, 0 otherwise.
  3980. */
  3981. int ata_std_qc_defer(struct ata_queued_cmd *qc)
  3982. {
  3983. struct ata_link *link = qc->dev->link;
  3984. if (ata_is_ncq(qc->tf.protocol)) {
  3985. if (!ata_tag_valid(link->active_tag))
  3986. return 0;
  3987. } else {
  3988. if (!ata_tag_valid(link->active_tag) && !link->sactive)
  3989. return 0;
  3990. }
  3991. return ATA_DEFER_LINK;
  3992. }
  3993. EXPORT_SYMBOL_GPL(ata_std_qc_defer);
  3994. /**
  3995. * ata_sg_init - Associate command with scatter-gather table.
  3996. * @qc: Command to be associated
  3997. * @sg: Scatter-gather table.
  3998. * @n_elem: Number of elements in s/g table.
  3999. *
  4000. * Initialize the data-related elements of queued_cmd @qc
  4001. * to point to a scatter-gather table @sg, containing @n_elem
  4002. * elements.
  4003. *
  4004. * LOCKING:
  4005. * spin_lock_irqsave(host lock)
  4006. */
  4007. void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
  4008. unsigned int n_elem)
  4009. {
  4010. qc->sg = sg;
  4011. qc->n_elem = n_elem;
  4012. qc->cursg = qc->sg;
  4013. }
  4014. #ifdef CONFIG_HAS_DMA
  4015. /**
  4016. * ata_sg_clean - Unmap DMA memory associated with command
  4017. * @qc: Command containing DMA memory to be released
  4018. *
  4019. * Unmap all mapped DMA memory associated with this command.
  4020. *
  4021. * LOCKING:
  4022. * spin_lock_irqsave(host lock)
  4023. */
  4024. static void ata_sg_clean(struct ata_queued_cmd *qc)
  4025. {
  4026. struct ata_port *ap = qc->ap;
  4027. struct scatterlist *sg = qc->sg;
  4028. int dir = qc->dma_dir;
  4029. WARN_ON_ONCE(sg == NULL);
  4030. if (qc->n_elem)
  4031. dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
  4032. qc->flags &= ~ATA_QCFLAG_DMAMAP;
  4033. qc->sg = NULL;
  4034. }
  4035. /**
  4036. * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
  4037. * @qc: Command with scatter-gather table to be mapped.
  4038. *
  4039. * DMA-map the scatter-gather table associated with queued_cmd @qc.
  4040. *
  4041. * LOCKING:
  4042. * spin_lock_irqsave(host lock)
  4043. *
  4044. * RETURNS:
  4045. * Zero on success, negative on error.
  4046. *
  4047. */
  4048. static int ata_sg_setup(struct ata_queued_cmd *qc)
  4049. {
  4050. struct ata_port *ap = qc->ap;
  4051. unsigned int n_elem;
  4052. n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
  4053. if (n_elem < 1)
  4054. return -1;
  4055. qc->orig_n_elem = qc->n_elem;
  4056. qc->n_elem = n_elem;
  4057. qc->flags |= ATA_QCFLAG_DMAMAP;
  4058. return 0;
  4059. }
  4060. #else /* !CONFIG_HAS_DMA */
  4061. static inline void ata_sg_clean(struct ata_queued_cmd *qc) {}
  4062. static inline int ata_sg_setup(struct ata_queued_cmd *qc) { return -1; }
  4063. #endif /* !CONFIG_HAS_DMA */
  4064. /**
  4065. * swap_buf_le16 - swap halves of 16-bit words in place
  4066. * @buf: Buffer to swap
  4067. * @buf_words: Number of 16-bit words in buffer.
  4068. *
  4069. * Swap halves of 16-bit words if needed to convert from
  4070. * little-endian byte order to native cpu byte order, or
  4071. * vice-versa.
  4072. *
  4073. * LOCKING:
  4074. * Inherited from caller.
  4075. */
  4076. void swap_buf_le16(u16 *buf, unsigned int buf_words)
  4077. {
  4078. #ifdef __BIG_ENDIAN
  4079. unsigned int i;
  4080. for (i = 0; i < buf_words; i++)
  4081. buf[i] = le16_to_cpu(buf[i]);
  4082. #endif /* __BIG_ENDIAN */
  4083. }
  4084. /**
  4085. * ata_qc_free - free unused ata_queued_cmd
  4086. * @qc: Command to complete
  4087. *
  4088. * Designed to free unused ata_queued_cmd object
  4089. * in case something prevents using it.
  4090. *
  4091. * LOCKING:
  4092. * spin_lock_irqsave(host lock)
  4093. */
  4094. void ata_qc_free(struct ata_queued_cmd *qc)
  4095. {
  4096. qc->flags = 0;
  4097. if (ata_tag_valid(qc->tag))
  4098. qc->tag = ATA_TAG_POISON;
  4099. }
  4100. void __ata_qc_complete(struct ata_queued_cmd *qc)
  4101. {
  4102. struct ata_port *ap;
  4103. struct ata_link *link;
  4104. if (WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)))
  4105. return;
  4106. ap = qc->ap;
  4107. link = qc->dev->link;
  4108. if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
  4109. ata_sg_clean(qc);
  4110. /* command should be marked inactive atomically with qc completion */
  4111. if (ata_is_ncq(qc->tf.protocol)) {
  4112. link->sactive &= ~(1 << qc->hw_tag);
  4113. if (!link->sactive)
  4114. ap->nr_active_links--;
  4115. } else {
  4116. link->active_tag = ATA_TAG_POISON;
  4117. ap->nr_active_links--;
  4118. }
  4119. /* clear exclusive status */
  4120. if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
  4121. ap->excl_link == link))
  4122. ap->excl_link = NULL;
  4123. /*
  4124. * Mark qc as inactive to prevent the port interrupt handler from
  4125. * completing the command twice later, before the error handler is
  4126. * called.
  4127. */
  4128. qc->flags &= ~ATA_QCFLAG_ACTIVE;
  4129. ap->qc_active &= ~(1ULL << qc->tag);
  4130. /* call completion callback */
  4131. qc->complete_fn(qc);
  4132. }
  4133. static void fill_result_tf(struct ata_queued_cmd *qc)
  4134. {
  4135. struct ata_port *ap = qc->ap;
  4136. /*
  4137. * rtf may already be filled (e.g. for successful NCQ commands).
  4138. * If that is the case, we have nothing to do.
  4139. */
  4140. if (qc->flags & ATA_QCFLAG_RTF_FILLED)
  4141. return;
  4142. qc->result_tf.flags = qc->tf.flags;
  4143. ap->ops->qc_fill_rtf(qc);
  4144. qc->flags |= ATA_QCFLAG_RTF_FILLED;
  4145. }
  4146. static void ata_verify_xfer(struct ata_queued_cmd *qc)
  4147. {
  4148. struct ata_device *dev = qc->dev;
  4149. if (!ata_is_data(qc->tf.protocol))
  4150. return;
  4151. if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
  4152. return;
  4153. dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
  4154. }
  4155. /**
  4156. * ata_qc_complete - Complete an active ATA command
  4157. * @qc: Command to complete
  4158. *
  4159. * Indicate to the mid and upper layers that an ATA command has
  4160. * completed, with either an ok or not-ok status.
  4161. *
  4162. * Refrain from calling this function multiple times when
  4163. * successfully completing multiple NCQ commands.
  4164. * ata_qc_complete_multiple() should be used instead, which will
  4165. * properly update IRQ expect state.
  4166. *
  4167. * LOCKING:
  4168. * spin_lock_irqsave(host lock)
  4169. */
  4170. void ata_qc_complete(struct ata_queued_cmd *qc)
  4171. {
  4172. struct ata_port *ap = qc->ap;
  4173. struct ata_device *dev = qc->dev;
  4174. struct ata_eh_info *ehi = &dev->link->eh_info;
  4175. /* Trigger the LED (if available) */
  4176. ledtrig_disk_activity(!!(qc->tf.flags & ATA_TFLAG_WRITE));
  4177. /*
  4178. * In order to synchronize EH with the regular execution path, a qc that
  4179. * is owned by EH is marked with ATA_QCFLAG_EH.
  4180. *
  4181. * The normal execution path is responsible for not accessing a qc owned
  4182. * by EH. libata core enforces the rule by returning NULL from
  4183. * ata_qc_from_tag() for qcs owned by EH.
  4184. */
  4185. if (unlikely(qc->err_mask))
  4186. qc->flags |= ATA_QCFLAG_EH;
  4187. /*
  4188. * Finish internal commands without any further processing and always
  4189. * with the result TF filled.
  4190. */
  4191. if (unlikely(ata_tag_internal(qc->tag))) {
  4192. fill_result_tf(qc);
  4193. trace_ata_qc_complete_internal(qc);
  4194. __ata_qc_complete(qc);
  4195. return;
  4196. }
  4197. /* Non-internal qc has failed. Fill the result TF and summon EH. */
  4198. if (unlikely(qc->flags & ATA_QCFLAG_EH)) {
  4199. fill_result_tf(qc);
  4200. trace_ata_qc_complete_failed(qc);
  4201. ata_qc_schedule_eh(qc);
  4202. return;
  4203. }
  4204. WARN_ON_ONCE(ata_port_is_frozen(ap));
  4205. /* read result TF if requested */
  4206. if (qc->flags & ATA_QCFLAG_RESULT_TF)
  4207. fill_result_tf(qc);
  4208. trace_ata_qc_complete_done(qc);
  4209. /*
  4210. * For CDL commands that completed without an error, check if we have
  4211. * sense data (ATA_SENSE is set). If we do, then the command may have
  4212. * been aborted by the device due to a limit timeout using the policy
  4213. * 0xD. For these commands, invoke EH to get the command sense data.
  4214. */
  4215. if (qc->flags & ATA_QCFLAG_HAS_CDL &&
  4216. qc->result_tf.status & ATA_SENSE) {
  4217. /*
  4218. * Tell SCSI EH to not overwrite scmd->result even if this
  4219. * command is finished with result SAM_STAT_GOOD.
  4220. */
  4221. qc->scsicmd->flags |= SCMD_FORCE_EH_SUCCESS;
  4222. qc->flags |= ATA_QCFLAG_EH_SUCCESS_CMD;
  4223. ehi->dev_action[dev->devno] |= ATA_EH_GET_SUCCESS_SENSE;
  4224. /*
  4225. * set pending so that ata_qc_schedule_eh() does not trigger
  4226. * fast drain, and freeze the port.
  4227. */
  4228. ap->pflags |= ATA_PFLAG_EH_PENDING;
  4229. ata_qc_schedule_eh(qc);
  4230. return;
  4231. }
  4232. /* Some commands need post-processing after successful completion. */
  4233. switch (qc->tf.command) {
  4234. case ATA_CMD_SET_FEATURES:
  4235. if (qc->tf.feature != SETFEATURES_WC_ON &&
  4236. qc->tf.feature != SETFEATURES_WC_OFF &&
  4237. qc->tf.feature != SETFEATURES_RA_ON &&
  4238. qc->tf.feature != SETFEATURES_RA_OFF)
  4239. break;
  4240. fallthrough;
  4241. case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
  4242. case ATA_CMD_SET_MULTI: /* multi_count changed */
  4243. /* revalidate device */
  4244. ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
  4245. ata_port_schedule_eh(ap);
  4246. break;
  4247. case ATA_CMD_SLEEP:
  4248. dev->flags |= ATA_DFLAG_SLEEPING;
  4249. break;
  4250. }
  4251. if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
  4252. ata_verify_xfer(qc);
  4253. __ata_qc_complete(qc);
  4254. }
  4255. EXPORT_SYMBOL_GPL(ata_qc_complete);
  4256. /**
  4257. * ata_qc_get_active - get bitmask of active qcs
  4258. * @ap: port in question
  4259. *
  4260. * LOCKING:
  4261. * spin_lock_irqsave(host lock)
  4262. *
  4263. * RETURNS:
  4264. * Bitmask of active qcs
  4265. */
  4266. u64 ata_qc_get_active(struct ata_port *ap)
  4267. {
  4268. u64 qc_active = ap->qc_active;
  4269. /* ATA_TAG_INTERNAL is sent to hw as tag 0 */
  4270. if (qc_active & (1ULL << ATA_TAG_INTERNAL)) {
  4271. qc_active |= (1 << 0);
  4272. qc_active &= ~(1ULL << ATA_TAG_INTERNAL);
  4273. }
  4274. return qc_active;
  4275. }
  4276. EXPORT_SYMBOL_GPL(ata_qc_get_active);
  4277. /**
  4278. * ata_qc_issue - issue taskfile to device
  4279. * @qc: command to issue to device
  4280. *
  4281. * Prepare an ATA command to submission to device.
  4282. * This includes mapping the data into a DMA-able
  4283. * area, filling in the S/G table, and finally
  4284. * writing the taskfile to hardware, starting the command.
  4285. *
  4286. * LOCKING:
  4287. * spin_lock_irqsave(host lock)
  4288. */
  4289. void ata_qc_issue(struct ata_queued_cmd *qc)
  4290. {
  4291. struct ata_port *ap = qc->ap;
  4292. struct ata_link *link = qc->dev->link;
  4293. u8 prot = qc->tf.protocol;
  4294. /* Make sure only one non-NCQ command is outstanding. */
  4295. WARN_ON_ONCE(ata_tag_valid(link->active_tag));
  4296. if (ata_is_ncq(prot)) {
  4297. WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag));
  4298. if (!link->sactive)
  4299. ap->nr_active_links++;
  4300. link->sactive |= 1 << qc->hw_tag;
  4301. } else {
  4302. WARN_ON_ONCE(link->sactive);
  4303. ap->nr_active_links++;
  4304. link->active_tag = qc->tag;
  4305. }
  4306. qc->flags |= ATA_QCFLAG_ACTIVE;
  4307. ap->qc_active |= 1ULL << qc->tag;
  4308. /*
  4309. * We guarantee to LLDs that they will have at least one
  4310. * non-zero sg if the command is a data command.
  4311. */
  4312. if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
  4313. goto sys_err;
  4314. if (ata_is_dma(prot) || (ata_is_pio(prot) &&
  4315. (ap->flags & ATA_FLAG_PIO_DMA)))
  4316. if (ata_sg_setup(qc))
  4317. goto sys_err;
  4318. /* if device is sleeping, schedule reset and abort the link */
  4319. if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
  4320. link->eh_info.action |= ATA_EH_RESET;
  4321. ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
  4322. ata_link_abort(link);
  4323. return;
  4324. }
  4325. if (ap->ops->qc_prep) {
  4326. trace_ata_qc_prep(qc);
  4327. qc->err_mask |= ap->ops->qc_prep(qc);
  4328. if (unlikely(qc->err_mask))
  4329. goto err;
  4330. }
  4331. trace_ata_qc_issue(qc);
  4332. qc->err_mask |= ap->ops->qc_issue(qc);
  4333. if (unlikely(qc->err_mask))
  4334. goto err;
  4335. return;
  4336. sys_err:
  4337. qc->err_mask |= AC_ERR_SYSTEM;
  4338. err:
  4339. ata_qc_complete(qc);
  4340. }
  4341. /**
  4342. * ata_phys_link_online - test whether the given link is online
  4343. * @link: ATA link to test
  4344. *
  4345. * Test whether @link is online. Note that this function returns
  4346. * 0 if online status of @link cannot be obtained, so
  4347. * ata_link_online(link) != !ata_link_offline(link).
  4348. *
  4349. * LOCKING:
  4350. * None.
  4351. *
  4352. * RETURNS:
  4353. * True if the port online status is available and online.
  4354. */
  4355. bool ata_phys_link_online(struct ata_link *link)
  4356. {
  4357. u32 sstatus;
  4358. if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
  4359. ata_sstatus_online(sstatus))
  4360. return true;
  4361. return false;
  4362. }
  4363. /**
  4364. * ata_phys_link_offline - test whether the given link is offline
  4365. * @link: ATA link to test
  4366. *
  4367. * Test whether @link is offline. Note that this function
  4368. * returns 0 if offline status of @link cannot be obtained, so
  4369. * ata_link_online(link) != !ata_link_offline(link).
  4370. *
  4371. * LOCKING:
  4372. * None.
  4373. *
  4374. * RETURNS:
  4375. * True if the port offline status is available and offline.
  4376. */
  4377. bool ata_phys_link_offline(struct ata_link *link)
  4378. {
  4379. u32 sstatus;
  4380. if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
  4381. !ata_sstatus_online(sstatus))
  4382. return true;
  4383. return false;
  4384. }
  4385. /**
  4386. * ata_link_online - test whether the given link is online
  4387. * @link: ATA link to test
  4388. *
  4389. * Test whether @link is online. This is identical to
  4390. * ata_phys_link_online() when there's no slave link. When
  4391. * there's a slave link, this function should only be called on
  4392. * the master link and will return true if any of M/S links is
  4393. * online.
  4394. *
  4395. * LOCKING:
  4396. * None.
  4397. *
  4398. * RETURNS:
  4399. * True if the port online status is available and online.
  4400. */
  4401. bool ata_link_online(struct ata_link *link)
  4402. {
  4403. struct ata_link *slave = link->ap->slave_link;
  4404. WARN_ON(link == slave); /* shouldn't be called on slave link */
  4405. return ata_phys_link_online(link) ||
  4406. (slave && ata_phys_link_online(slave));
  4407. }
  4408. EXPORT_SYMBOL_GPL(ata_link_online);
  4409. /**
  4410. * ata_link_offline - test whether the given link is offline
  4411. * @link: ATA link to test
  4412. *
  4413. * Test whether @link is offline. This is identical to
  4414. * ata_phys_link_offline() when there's no slave link. When
  4415. * there's a slave link, this function should only be called on
  4416. * the master link and will return true if both M/S links are
  4417. * offline.
  4418. *
  4419. * LOCKING:
  4420. * None.
  4421. *
  4422. * RETURNS:
  4423. * True if the port offline status is available and offline.
  4424. */
  4425. bool ata_link_offline(struct ata_link *link)
  4426. {
  4427. struct ata_link *slave = link->ap->slave_link;
  4428. WARN_ON(link == slave); /* shouldn't be called on slave link */
  4429. return ata_phys_link_offline(link) &&
  4430. (!slave || ata_phys_link_offline(slave));
  4431. }
  4432. EXPORT_SYMBOL_GPL(ata_link_offline);
  4433. #ifdef CONFIG_PM
  4434. static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
  4435. unsigned int action, unsigned int ehi_flags,
  4436. bool async)
  4437. {
  4438. struct ata_link *link;
  4439. unsigned long flags;
  4440. spin_lock_irqsave(ap->lock, flags);
  4441. /*
  4442. * A previous PM operation might still be in progress. Wait for
  4443. * ATA_PFLAG_PM_PENDING to clear.
  4444. */
  4445. if (ap->pflags & ATA_PFLAG_PM_PENDING) {
  4446. spin_unlock_irqrestore(ap->lock, flags);
  4447. ata_port_wait_eh(ap);
  4448. spin_lock_irqsave(ap->lock, flags);
  4449. }
  4450. /* Request PM operation to EH */
  4451. ap->pm_mesg = mesg;
  4452. ap->pflags |= ATA_PFLAG_PM_PENDING;
  4453. ata_for_each_link(link, ap, HOST_FIRST) {
  4454. link->eh_info.action |= action;
  4455. link->eh_info.flags |= ehi_flags;
  4456. }
  4457. ata_port_schedule_eh(ap);
  4458. spin_unlock_irqrestore(ap->lock, flags);
  4459. if (!async)
  4460. ata_port_wait_eh(ap);
  4461. }
  4462. static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg,
  4463. bool async)
  4464. {
  4465. /*
  4466. * We are about to suspend the port, so we do not care about
  4467. * scsi_rescan_device() calls scheduled by previous resume operations.
  4468. * The next resume will schedule the rescan again. So cancel any rescan
  4469. * that is not done yet.
  4470. */
  4471. cancel_delayed_work_sync(&ap->scsi_rescan_task);
  4472. /*
  4473. * On some hardware, device fails to respond after spun down for
  4474. * suspend. As the device will not be used until being resumed, we
  4475. * do not need to touch the device. Ask EH to skip the usual stuff
  4476. * and proceed directly to suspend.
  4477. *
  4478. * http://thread.gmane.org/gmane.linux.ide/46764
  4479. */
  4480. ata_port_request_pm(ap, mesg, 0,
  4481. ATA_EHI_QUIET | ATA_EHI_NO_AUTOPSY |
  4482. ATA_EHI_NO_RECOVERY,
  4483. async);
  4484. }
  4485. static int ata_port_pm_suspend(struct device *dev)
  4486. {
  4487. struct ata_port *ap = to_ata_port(dev);
  4488. if (pm_runtime_suspended(dev))
  4489. return 0;
  4490. ata_port_suspend(ap, PMSG_SUSPEND, false);
  4491. return 0;
  4492. }
  4493. static int ata_port_pm_freeze(struct device *dev)
  4494. {
  4495. struct ata_port *ap = to_ata_port(dev);
  4496. if (pm_runtime_suspended(dev))
  4497. return 0;
  4498. ata_port_suspend(ap, PMSG_FREEZE, false);
  4499. return 0;
  4500. }
  4501. static int ata_port_pm_poweroff(struct device *dev)
  4502. {
  4503. if (!pm_runtime_suspended(dev))
  4504. ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE, false);
  4505. return 0;
  4506. }
  4507. static void ata_port_resume(struct ata_port *ap, pm_message_t mesg,
  4508. bool async)
  4509. {
  4510. ata_port_request_pm(ap, mesg, ATA_EH_RESET,
  4511. ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET,
  4512. async);
  4513. }
  4514. static int ata_port_pm_resume(struct device *dev)
  4515. {
  4516. if (!pm_runtime_suspended(dev))
  4517. ata_port_resume(to_ata_port(dev), PMSG_RESUME, true);
  4518. return 0;
  4519. }
  4520. /*
  4521. * For ODDs, the upper layer will poll for media change every few seconds,
  4522. * which will make it enter and leave suspend state every few seconds. And
  4523. * as each suspend will cause a hard/soft reset, the gain of runtime suspend
  4524. * is very little and the ODD may malfunction after constantly being reset.
  4525. * So the idle callback here will not proceed to suspend if a non-ZPODD capable
  4526. * ODD is attached to the port.
  4527. */
  4528. static int ata_port_runtime_idle(struct device *dev)
  4529. {
  4530. struct ata_port *ap = to_ata_port(dev);
  4531. struct ata_link *link;
  4532. struct ata_device *adev;
  4533. ata_for_each_link(link, ap, HOST_FIRST) {
  4534. ata_for_each_dev(adev, link, ENABLED)
  4535. if (adev->class == ATA_DEV_ATAPI &&
  4536. !zpodd_dev_enabled(adev))
  4537. return -EBUSY;
  4538. }
  4539. return 0;
  4540. }
  4541. static int ata_port_runtime_suspend(struct device *dev)
  4542. {
  4543. ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND, false);
  4544. return 0;
  4545. }
  4546. static int ata_port_runtime_resume(struct device *dev)
  4547. {
  4548. ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME, false);
  4549. return 0;
  4550. }
  4551. static const struct dev_pm_ops ata_port_pm_ops = {
  4552. .suspend = ata_port_pm_suspend,
  4553. .resume = ata_port_pm_resume,
  4554. .freeze = ata_port_pm_freeze,
  4555. .thaw = ata_port_pm_resume,
  4556. .poweroff = ata_port_pm_poweroff,
  4557. .restore = ata_port_pm_resume,
  4558. .runtime_suspend = ata_port_runtime_suspend,
  4559. .runtime_resume = ata_port_runtime_resume,
  4560. .runtime_idle = ata_port_runtime_idle,
  4561. };
  4562. /* sas ports don't participate in pm runtime management of ata_ports,
  4563. * and need to resume ata devices at the domain level, not the per-port
  4564. * level. sas suspend/resume is async to allow parallel port recovery
  4565. * since sas has multiple ata_port instances per Scsi_Host.
  4566. */
  4567. void ata_sas_port_suspend(struct ata_port *ap)
  4568. {
  4569. ata_port_suspend(ap, PMSG_SUSPEND, true);
  4570. }
  4571. EXPORT_SYMBOL_GPL(ata_sas_port_suspend);
  4572. void ata_sas_port_resume(struct ata_port *ap)
  4573. {
  4574. ata_port_resume(ap, PMSG_RESUME, true);
  4575. }
  4576. EXPORT_SYMBOL_GPL(ata_sas_port_resume);
  4577. /**
  4578. * ata_host_suspend - suspend host
  4579. * @host: host to suspend
  4580. * @mesg: PM message
  4581. *
  4582. * Suspend @host. Actual operation is performed by port suspend.
  4583. */
  4584. void ata_host_suspend(struct ata_host *host, pm_message_t mesg)
  4585. {
  4586. host->dev->power.power_state = mesg;
  4587. }
  4588. EXPORT_SYMBOL_GPL(ata_host_suspend);
  4589. /**
  4590. * ata_host_resume - resume host
  4591. * @host: host to resume
  4592. *
  4593. * Resume @host. Actual operation is performed by port resume.
  4594. */
  4595. void ata_host_resume(struct ata_host *host)
  4596. {
  4597. host->dev->power.power_state = PMSG_ON;
  4598. }
  4599. EXPORT_SYMBOL_GPL(ata_host_resume);
  4600. #endif
  4601. const struct device_type ata_port_type = {
  4602. .name = ATA_PORT_TYPE_NAME,
  4603. #ifdef CONFIG_PM
  4604. .pm = &ata_port_pm_ops,
  4605. #endif
  4606. };
  4607. /**
  4608. * ata_dev_init - Initialize an ata_device structure
  4609. * @dev: Device structure to initialize
  4610. *
  4611. * Initialize @dev in preparation for probing.
  4612. *
  4613. * LOCKING:
  4614. * Inherited from caller.
  4615. */
  4616. void ata_dev_init(struct ata_device *dev)
  4617. {
  4618. struct ata_link *link = ata_dev_phys_link(dev);
  4619. struct ata_port *ap = link->ap;
  4620. unsigned long flags;
  4621. /* SATA spd limit is bound to the attached device, reset together */
  4622. link->sata_spd_limit = link->hw_sata_spd_limit;
  4623. link->sata_spd = 0;
  4624. /* High bits of dev->flags are used to record warm plug
  4625. * requests which occur asynchronously. Synchronize using
  4626. * host lock.
  4627. */
  4628. spin_lock_irqsave(ap->lock, flags);
  4629. dev->flags &= ~ATA_DFLAG_INIT_MASK;
  4630. dev->quirks = 0;
  4631. spin_unlock_irqrestore(ap->lock, flags);
  4632. memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0,
  4633. ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN);
  4634. dev->pio_mask = UINT_MAX;
  4635. dev->mwdma_mask = UINT_MAX;
  4636. dev->udma_mask = UINT_MAX;
  4637. }
  4638. /**
  4639. * ata_link_init - Initialize an ata_link structure
  4640. * @ap: ATA port link is attached to
  4641. * @link: Link structure to initialize
  4642. * @pmp: Port multiplier port number
  4643. *
  4644. * Initialize @link.
  4645. *
  4646. * LOCKING:
  4647. * Kernel thread context (may sleep)
  4648. */
  4649. void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
  4650. {
  4651. int i;
  4652. /* clear everything except for devices */
  4653. memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0,
  4654. ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN);
  4655. link->ap = ap;
  4656. link->pmp = pmp;
  4657. link->active_tag = ATA_TAG_POISON;
  4658. link->hw_sata_spd_limit = UINT_MAX;
  4659. /* can't use iterator, ap isn't initialized yet */
  4660. for (i = 0; i < ATA_MAX_DEVICES; i++) {
  4661. struct ata_device *dev = &link->device[i];
  4662. dev->link = link;
  4663. dev->devno = dev - link->device;
  4664. #ifdef CONFIG_ATA_ACPI
  4665. dev->gtf_filter = ata_acpi_gtf_filter;
  4666. #endif
  4667. ata_dev_init(dev);
  4668. }
  4669. }
  4670. /**
  4671. * sata_link_init_spd - Initialize link->sata_spd_limit
  4672. * @link: Link to configure sata_spd_limit for
  4673. *
  4674. * Initialize ``link->[hw_]sata_spd_limit`` to the currently
  4675. * configured value.
  4676. *
  4677. * LOCKING:
  4678. * Kernel thread context (may sleep).
  4679. *
  4680. * RETURNS:
  4681. * 0 on success, -errno on failure.
  4682. */
  4683. int sata_link_init_spd(struct ata_link *link)
  4684. {
  4685. u8 spd;
  4686. int rc;
  4687. rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
  4688. if (rc)
  4689. return rc;
  4690. spd = (link->saved_scontrol >> 4) & 0xf;
  4691. if (spd)
  4692. link->hw_sata_spd_limit &= (1 << spd) - 1;
  4693. ata_force_link_limits(link);
  4694. link->sata_spd_limit = link->hw_sata_spd_limit;
  4695. return 0;
  4696. }
  4697. /**
  4698. * ata_port_alloc - allocate and initialize basic ATA port resources
  4699. * @host: ATA host this allocated port belongs to
  4700. *
  4701. * Allocate and initialize basic ATA port resources.
  4702. *
  4703. * RETURNS:
  4704. * Allocate ATA port on success, NULL on failure.
  4705. *
  4706. * LOCKING:
  4707. * Inherited from calling layer (may sleep).
  4708. */
  4709. struct ata_port *ata_port_alloc(struct ata_host *host)
  4710. {
  4711. struct ata_port *ap;
  4712. int id;
  4713. ap = kzalloc(sizeof(*ap), GFP_KERNEL);
  4714. if (!ap)
  4715. return NULL;
  4716. ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN;
  4717. ap->lock = &host->lock;
  4718. id = ida_alloc_min(&ata_ida, 1, GFP_KERNEL);
  4719. if (id < 0) {
  4720. kfree(ap);
  4721. return NULL;
  4722. }
  4723. ap->print_id = id;
  4724. ap->host = host;
  4725. ap->dev = host->dev;
  4726. mutex_init(&ap->scsi_scan_mutex);
  4727. INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
  4728. INIT_DELAYED_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
  4729. INIT_LIST_HEAD(&ap->eh_done_q);
  4730. init_waitqueue_head(&ap->eh_wait_q);
  4731. init_completion(&ap->park_req_pending);
  4732. timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn,
  4733. TIMER_DEFERRABLE);
  4734. ap->cbl = ATA_CBL_NONE;
  4735. ata_link_init(ap, &ap->link, 0);
  4736. #ifdef ATA_IRQ_TRAP
  4737. ap->stats.unhandled_irq = 1;
  4738. ap->stats.idle_irq = 1;
  4739. #endif
  4740. ata_sff_port_init(ap);
  4741. ata_force_pflags(ap);
  4742. return ap;
  4743. }
  4744. EXPORT_SYMBOL_GPL(ata_port_alloc);
  4745. void ata_port_free(struct ata_port *ap)
  4746. {
  4747. if (!ap)
  4748. return;
  4749. kfree(ap->pmp_link);
  4750. kfree(ap->slave_link);
  4751. ida_free(&ata_ida, ap->print_id);
  4752. kfree(ap);
  4753. }
  4754. EXPORT_SYMBOL_GPL(ata_port_free);
  4755. static void ata_devres_release(struct device *gendev, void *res)
  4756. {
  4757. struct ata_host *host = dev_get_drvdata(gendev);
  4758. int i;
  4759. for (i = 0; i < host->n_ports; i++) {
  4760. struct ata_port *ap = host->ports[i];
  4761. if (!ap)
  4762. continue;
  4763. if (ap->scsi_host)
  4764. scsi_host_put(ap->scsi_host);
  4765. }
  4766. dev_set_drvdata(gendev, NULL);
  4767. ata_host_put(host);
  4768. }
  4769. static void ata_host_release(struct kref *kref)
  4770. {
  4771. struct ata_host *host = container_of(kref, struct ata_host, kref);
  4772. int i;
  4773. for (i = 0; i < host->n_ports; i++) {
  4774. ata_port_free(host->ports[i]);
  4775. host->ports[i] = NULL;
  4776. }
  4777. kfree(host);
  4778. }
  4779. void ata_host_get(struct ata_host *host)
  4780. {
  4781. kref_get(&host->kref);
  4782. }
  4783. void ata_host_put(struct ata_host *host)
  4784. {
  4785. kref_put(&host->kref, ata_host_release);
  4786. }
  4787. EXPORT_SYMBOL_GPL(ata_host_put);
  4788. /**
  4789. * ata_host_alloc - allocate and init basic ATA host resources
  4790. * @dev: generic device this host is associated with
  4791. * @n_ports: the number of ATA ports associated with this host
  4792. *
  4793. * Allocate and initialize basic ATA host resources. LLD calls
  4794. * this function to allocate a host, initializes it fully and
  4795. * attaches it using ata_host_register().
  4796. *
  4797. * RETURNS:
  4798. * Allocate ATA host on success, NULL on failure.
  4799. *
  4800. * LOCKING:
  4801. * Inherited from calling layer (may sleep).
  4802. */
  4803. struct ata_host *ata_host_alloc(struct device *dev, int n_ports)
  4804. {
  4805. struct ata_host *host;
  4806. size_t sz;
  4807. int i;
  4808. void *dr;
  4809. /* alloc a container for our list of ATA ports (buses) */
  4810. sz = sizeof(struct ata_host) + n_ports * sizeof(void *);
  4811. host = kzalloc(sz, GFP_KERNEL);
  4812. if (!host)
  4813. return NULL;
  4814. if (!devres_open_group(dev, NULL, GFP_KERNEL)) {
  4815. kfree(host);
  4816. return NULL;
  4817. }
  4818. dr = devres_alloc(ata_devres_release, 0, GFP_KERNEL);
  4819. if (!dr) {
  4820. kfree(host);
  4821. goto err_out;
  4822. }
  4823. devres_add(dev, dr);
  4824. dev_set_drvdata(dev, host);
  4825. spin_lock_init(&host->lock);
  4826. mutex_init(&host->eh_mutex);
  4827. host->dev = dev;
  4828. host->n_ports = n_ports;
  4829. kref_init(&host->kref);
  4830. /* allocate ports bound to this host */
  4831. for (i = 0; i < n_ports; i++) {
  4832. struct ata_port *ap;
  4833. ap = ata_port_alloc(host);
  4834. if (!ap)
  4835. goto err_out;
  4836. ap->port_no = i;
  4837. host->ports[i] = ap;
  4838. }
  4839. devres_remove_group(dev, NULL);
  4840. return host;
  4841. err_out:
  4842. devres_release_group(dev, NULL);
  4843. return NULL;
  4844. }
  4845. EXPORT_SYMBOL_GPL(ata_host_alloc);
  4846. /**
  4847. * ata_host_alloc_pinfo - alloc host and init with port_info array
  4848. * @dev: generic device this host is associated with
  4849. * @ppi: array of ATA port_info to initialize host with
  4850. * @n_ports: number of ATA ports attached to this host
  4851. *
  4852. * Allocate ATA host and initialize with info from @ppi. If NULL
  4853. * terminated, @ppi may contain fewer entries than @n_ports. The
  4854. * last entry will be used for the remaining ports.
  4855. *
  4856. * RETURNS:
  4857. * Allocate ATA host on success, NULL on failure.
  4858. *
  4859. * LOCKING:
  4860. * Inherited from calling layer (may sleep).
  4861. */
  4862. struct ata_host *ata_host_alloc_pinfo(struct device *dev,
  4863. const struct ata_port_info * const * ppi,
  4864. int n_ports)
  4865. {
  4866. const struct ata_port_info *pi = &ata_dummy_port_info;
  4867. struct ata_host *host;
  4868. int i, j;
  4869. host = ata_host_alloc(dev, n_ports);
  4870. if (!host)
  4871. return NULL;
  4872. for (i = 0, j = 0; i < host->n_ports; i++) {
  4873. struct ata_port *ap = host->ports[i];
  4874. if (ppi[j])
  4875. pi = ppi[j++];
  4876. ap->pio_mask = pi->pio_mask;
  4877. ap->mwdma_mask = pi->mwdma_mask;
  4878. ap->udma_mask = pi->udma_mask;
  4879. ap->flags |= pi->flags;
  4880. ap->link.flags |= pi->link_flags;
  4881. ap->ops = pi->port_ops;
  4882. if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
  4883. host->ops = pi->port_ops;
  4884. }
  4885. return host;
  4886. }
  4887. EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
  4888. static void ata_host_stop(struct device *gendev, void *res)
  4889. {
  4890. struct ata_host *host = dev_get_drvdata(gendev);
  4891. int i;
  4892. WARN_ON(!(host->flags & ATA_HOST_STARTED));
  4893. for (i = 0; i < host->n_ports; i++) {
  4894. struct ata_port *ap = host->ports[i];
  4895. if (ap->ops->port_stop)
  4896. ap->ops->port_stop(ap);
  4897. }
  4898. if (host->ops->host_stop)
  4899. host->ops->host_stop(host);
  4900. }
  4901. /**
  4902. * ata_finalize_port_ops - finalize ata_port_operations
  4903. * @ops: ata_port_operations to finalize
  4904. *
  4905. * An ata_port_operations can inherit from another ops and that
  4906. * ops can again inherit from another. This can go on as many
  4907. * times as necessary as long as there is no loop in the
  4908. * inheritance chain.
  4909. *
  4910. * Ops tables are finalized when the host is started. NULL or
  4911. * unspecified entries are inherited from the closet ancestor
  4912. * which has the method and the entry is populated with it.
  4913. * After finalization, the ops table directly points to all the
  4914. * methods and ->inherits is no longer necessary and cleared.
  4915. *
  4916. * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
  4917. *
  4918. * LOCKING:
  4919. * None.
  4920. */
  4921. static void ata_finalize_port_ops(struct ata_port_operations *ops)
  4922. {
  4923. static DEFINE_SPINLOCK(lock);
  4924. const struct ata_port_operations *cur;
  4925. void **begin = (void **)ops;
  4926. void **end = (void **)&ops->inherits;
  4927. void **pp;
  4928. if (!ops || !ops->inherits)
  4929. return;
  4930. spin_lock(&lock);
  4931. for (cur = ops->inherits; cur; cur = cur->inherits) {
  4932. void **inherit = (void **)cur;
  4933. for (pp = begin; pp < end; pp++, inherit++)
  4934. if (!*pp)
  4935. *pp = *inherit;
  4936. }
  4937. for (pp = begin; pp < end; pp++)
  4938. if (IS_ERR(*pp))
  4939. *pp = NULL;
  4940. ops->inherits = NULL;
  4941. spin_unlock(&lock);
  4942. }
  4943. /**
  4944. * ata_host_start - start and freeze ports of an ATA host
  4945. * @host: ATA host to start ports for
  4946. *
  4947. * Start and then freeze ports of @host. Started status is
  4948. * recorded in host->flags, so this function can be called
  4949. * multiple times. Ports are guaranteed to get started only
  4950. * once. If host->ops is not initialized yet, it is set to the
  4951. * first non-dummy port ops.
  4952. *
  4953. * LOCKING:
  4954. * Inherited from calling layer (may sleep).
  4955. *
  4956. * RETURNS:
  4957. * 0 if all ports are started successfully, -errno otherwise.
  4958. */
  4959. int ata_host_start(struct ata_host *host)
  4960. {
  4961. int have_stop = 0;
  4962. void *start_dr = NULL;
  4963. int i, rc;
  4964. if (host->flags & ATA_HOST_STARTED)
  4965. return 0;
  4966. ata_finalize_port_ops(host->ops);
  4967. for (i = 0; i < host->n_ports; i++) {
  4968. struct ata_port *ap = host->ports[i];
  4969. ata_finalize_port_ops(ap->ops);
  4970. if (!host->ops && !ata_port_is_dummy(ap))
  4971. host->ops = ap->ops;
  4972. if (ap->ops->port_stop)
  4973. have_stop = 1;
  4974. }
  4975. if (host->ops && host->ops->host_stop)
  4976. have_stop = 1;
  4977. if (have_stop) {
  4978. start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
  4979. if (!start_dr)
  4980. return -ENOMEM;
  4981. }
  4982. for (i = 0; i < host->n_ports; i++) {
  4983. struct ata_port *ap = host->ports[i];
  4984. if (ap->ops->port_start) {
  4985. rc = ap->ops->port_start(ap);
  4986. if (rc) {
  4987. if (rc != -ENODEV)
  4988. dev_err(host->dev,
  4989. "failed to start port %d (errno=%d)\n",
  4990. i, rc);
  4991. goto err_out;
  4992. }
  4993. }
  4994. ata_eh_freeze_port(ap);
  4995. }
  4996. if (start_dr)
  4997. devres_add(host->dev, start_dr);
  4998. host->flags |= ATA_HOST_STARTED;
  4999. return 0;
  5000. err_out:
  5001. while (--i >= 0) {
  5002. struct ata_port *ap = host->ports[i];
  5003. if (ap->ops->port_stop)
  5004. ap->ops->port_stop(ap);
  5005. }
  5006. devres_free(start_dr);
  5007. return rc;
  5008. }
  5009. EXPORT_SYMBOL_GPL(ata_host_start);
  5010. /**
  5011. * ata_host_init - Initialize a host struct for sas (ipr, libsas)
  5012. * @host: host to initialize
  5013. * @dev: device host is attached to
  5014. * @ops: port_ops
  5015. *
  5016. */
  5017. void ata_host_init(struct ata_host *host, struct device *dev,
  5018. struct ata_port_operations *ops)
  5019. {
  5020. spin_lock_init(&host->lock);
  5021. mutex_init(&host->eh_mutex);
  5022. host->n_tags = ATA_MAX_QUEUE;
  5023. host->dev = dev;
  5024. host->ops = ops;
  5025. kref_init(&host->kref);
  5026. }
  5027. EXPORT_SYMBOL_GPL(ata_host_init);
  5028. void ata_port_probe(struct ata_port *ap)
  5029. {
  5030. struct ata_eh_info *ehi = &ap->link.eh_info;
  5031. unsigned long flags;
  5032. /* kick EH for boot probing */
  5033. spin_lock_irqsave(ap->lock, flags);
  5034. ehi->probe_mask |= ATA_ALL_DEVICES;
  5035. ehi->action |= ATA_EH_RESET;
  5036. ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
  5037. ap->pflags &= ~ATA_PFLAG_INITIALIZING;
  5038. ap->pflags |= ATA_PFLAG_LOADING;
  5039. ata_port_schedule_eh(ap);
  5040. spin_unlock_irqrestore(ap->lock, flags);
  5041. }
  5042. EXPORT_SYMBOL_GPL(ata_port_probe);
  5043. static void async_port_probe(void *data, async_cookie_t cookie)
  5044. {
  5045. struct ata_port *ap = data;
  5046. /*
  5047. * If we're not allowed to scan this host in parallel,
  5048. * we need to wait until all previous scans have completed
  5049. * before going further.
  5050. * Jeff Garzik says this is only within a controller, so we
  5051. * don't need to wait for port 0, only for later ports.
  5052. */
  5053. if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0)
  5054. async_synchronize_cookie(cookie);
  5055. ata_port_probe(ap);
  5056. ata_port_wait_eh(ap);
  5057. /* in order to keep device order, we need to synchronize at this point */
  5058. async_synchronize_cookie(cookie);
  5059. ata_scsi_scan_host(ap, 1);
  5060. }
  5061. /**
  5062. * ata_host_register - register initialized ATA host
  5063. * @host: ATA host to register
  5064. * @sht: template for SCSI host
  5065. *
  5066. * Register initialized ATA host. @host is allocated using
  5067. * ata_host_alloc() and fully initialized by LLD. This function
  5068. * starts ports, registers @host with ATA and SCSI layers and
  5069. * probe registered devices.
  5070. *
  5071. * LOCKING:
  5072. * Inherited from calling layer (may sleep).
  5073. *
  5074. * RETURNS:
  5075. * 0 on success, -errno otherwise.
  5076. */
  5077. int ata_host_register(struct ata_host *host, const struct scsi_host_template *sht)
  5078. {
  5079. int i, rc;
  5080. host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE);
  5081. /* host must have been started */
  5082. if (!(host->flags & ATA_HOST_STARTED)) {
  5083. dev_err(host->dev, "BUG: trying to register unstarted host\n");
  5084. WARN_ON(1);
  5085. return -EINVAL;
  5086. }
  5087. /* Create associated sysfs transport objects */
  5088. for (i = 0; i < host->n_ports; i++) {
  5089. rc = ata_tport_add(host->dev,host->ports[i]);
  5090. if (rc) {
  5091. goto err_tadd;
  5092. }
  5093. }
  5094. rc = ata_scsi_add_hosts(host, sht);
  5095. if (rc)
  5096. goto err_tadd;
  5097. /* set cable, sata_spd_limit and report */
  5098. for (i = 0; i < host->n_ports; i++) {
  5099. struct ata_port *ap = host->ports[i];
  5100. unsigned int xfer_mask;
  5101. /* set SATA cable type if still unset */
  5102. if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
  5103. ap->cbl = ATA_CBL_SATA;
  5104. /* init sata_spd_limit to the current value */
  5105. sata_link_init_spd(&ap->link);
  5106. if (ap->slave_link)
  5107. sata_link_init_spd(ap->slave_link);
  5108. /* print per-port info to dmesg */
  5109. xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
  5110. ap->udma_mask);
  5111. if (!ata_port_is_dummy(ap)) {
  5112. ata_port_info(ap, "%cATA max %s %s\n",
  5113. (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
  5114. ata_mode_string(xfer_mask),
  5115. ap->link.eh_info.desc);
  5116. ata_ehi_clear_desc(&ap->link.eh_info);
  5117. } else
  5118. ata_port_info(ap, "DUMMY\n");
  5119. }
  5120. /* perform each probe asynchronously */
  5121. for (i = 0; i < host->n_ports; i++) {
  5122. struct ata_port *ap = host->ports[i];
  5123. ap->cookie = async_schedule(async_port_probe, ap);
  5124. }
  5125. return 0;
  5126. err_tadd:
  5127. while (--i >= 0) {
  5128. ata_tport_delete(host->ports[i]);
  5129. }
  5130. return rc;
  5131. }
  5132. EXPORT_SYMBOL_GPL(ata_host_register);
  5133. /**
  5134. * ata_host_activate - start host, request IRQ and register it
  5135. * @host: target ATA host
  5136. * @irq: IRQ to request
  5137. * @irq_handler: irq_handler used when requesting IRQ
  5138. * @irq_flags: irq_flags used when requesting IRQ
  5139. * @sht: scsi_host_template to use when registering the host
  5140. *
  5141. * After allocating an ATA host and initializing it, most libata
  5142. * LLDs perform three steps to activate the host - start host,
  5143. * request IRQ and register it. This helper takes necessary
  5144. * arguments and performs the three steps in one go.
  5145. *
  5146. * An invalid IRQ skips the IRQ registration and expects the host to
  5147. * have set polling mode on the port. In this case, @irq_handler
  5148. * should be NULL.
  5149. *
  5150. * LOCKING:
  5151. * Inherited from calling layer (may sleep).
  5152. *
  5153. * RETURNS:
  5154. * 0 on success, -errno otherwise.
  5155. */
  5156. int ata_host_activate(struct ata_host *host, int irq,
  5157. irq_handler_t irq_handler, unsigned long irq_flags,
  5158. const struct scsi_host_template *sht)
  5159. {
  5160. int i, rc;
  5161. char *irq_desc;
  5162. rc = ata_host_start(host);
  5163. if (rc)
  5164. return rc;
  5165. /* Special case for polling mode */
  5166. if (!irq) {
  5167. WARN_ON(irq_handler);
  5168. return ata_host_register(host, sht);
  5169. }
  5170. irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]",
  5171. dev_driver_string(host->dev),
  5172. dev_name(host->dev));
  5173. if (!irq_desc)
  5174. return -ENOMEM;
  5175. rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
  5176. irq_desc, host);
  5177. if (rc)
  5178. return rc;
  5179. for (i = 0; i < host->n_ports; i++)
  5180. ata_port_desc_misc(host->ports[i], irq);
  5181. rc = ata_host_register(host, sht);
  5182. /* if failed, just free the IRQ and leave ports alone */
  5183. if (rc)
  5184. devm_free_irq(host->dev, irq, host);
  5185. return rc;
  5186. }
  5187. EXPORT_SYMBOL_GPL(ata_host_activate);
  5188. /**
  5189. * ata_dev_free_resources - Free a device resources
  5190. * @dev: Target ATA device
  5191. *
  5192. * Free resources allocated to support a device features.
  5193. *
  5194. * LOCKING:
  5195. * Kernel thread context (may sleep).
  5196. */
  5197. void ata_dev_free_resources(struct ata_device *dev)
  5198. {
  5199. if (zpodd_dev_enabled(dev))
  5200. zpodd_exit(dev);
  5201. ata_dev_cleanup_cdl_resources(dev);
  5202. }
  5203. /**
  5204. * ata_port_detach - Detach ATA port in preparation of device removal
  5205. * @ap: ATA port to be detached
  5206. *
  5207. * Detach all ATA devices and the associated SCSI devices of @ap;
  5208. * then, remove the associated SCSI host. @ap is guaranteed to
  5209. * be quiescent on return from this function.
  5210. *
  5211. * LOCKING:
  5212. * Kernel thread context (may sleep).
  5213. */
  5214. static void ata_port_detach(struct ata_port *ap)
  5215. {
  5216. unsigned long flags;
  5217. struct ata_link *link;
  5218. struct ata_device *dev;
  5219. /* Ensure ata_port probe has completed */
  5220. async_synchronize_cookie(ap->cookie + 1);
  5221. /* Wait for any ongoing EH */
  5222. ata_port_wait_eh(ap);
  5223. mutex_lock(&ap->scsi_scan_mutex);
  5224. spin_lock_irqsave(ap->lock, flags);
  5225. /* Remove scsi devices */
  5226. ata_for_each_link(link, ap, HOST_FIRST) {
  5227. ata_for_each_dev(dev, link, ALL) {
  5228. if (dev->sdev) {
  5229. spin_unlock_irqrestore(ap->lock, flags);
  5230. scsi_remove_device(dev->sdev);
  5231. spin_lock_irqsave(ap->lock, flags);
  5232. dev->sdev = NULL;
  5233. }
  5234. }
  5235. }
  5236. /* Tell EH to disable all devices */
  5237. ap->pflags |= ATA_PFLAG_UNLOADING;
  5238. ata_port_schedule_eh(ap);
  5239. spin_unlock_irqrestore(ap->lock, flags);
  5240. mutex_unlock(&ap->scsi_scan_mutex);
  5241. /* wait till EH commits suicide */
  5242. ata_port_wait_eh(ap);
  5243. /* it better be dead now */
  5244. WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED));
  5245. cancel_delayed_work_sync(&ap->hotplug_task);
  5246. cancel_delayed_work_sync(&ap->scsi_rescan_task);
  5247. /* Delete port multiplier link transport devices */
  5248. if (ap->pmp_link) {
  5249. int i;
  5250. for (i = 0; i < SATA_PMP_MAX_PORTS; i++)
  5251. ata_tlink_delete(&ap->pmp_link[i]);
  5252. }
  5253. /* Remove the associated SCSI host */
  5254. scsi_remove_host(ap->scsi_host);
  5255. ata_tport_delete(ap);
  5256. }
  5257. /**
  5258. * ata_host_detach - Detach all ports of an ATA host
  5259. * @host: Host to detach
  5260. *
  5261. * Detach all ports of @host.
  5262. *
  5263. * LOCKING:
  5264. * Kernel thread context (may sleep).
  5265. */
  5266. void ata_host_detach(struct ata_host *host)
  5267. {
  5268. int i;
  5269. for (i = 0; i < host->n_ports; i++)
  5270. ata_port_detach(host->ports[i]);
  5271. /* the host is dead now, dissociate ACPI */
  5272. ata_acpi_dissociate(host);
  5273. }
  5274. EXPORT_SYMBOL_GPL(ata_host_detach);
  5275. #ifdef CONFIG_PCI
  5276. /**
  5277. * ata_pci_remove_one - PCI layer callback for device removal
  5278. * @pdev: PCI device that was removed
  5279. *
  5280. * PCI layer indicates to libata via this hook that hot-unplug or
  5281. * module unload event has occurred. Detach all ports. Resource
  5282. * release is handled via devres.
  5283. *
  5284. * LOCKING:
  5285. * Inherited from PCI layer (may sleep).
  5286. */
  5287. void ata_pci_remove_one(struct pci_dev *pdev)
  5288. {
  5289. struct ata_host *host = pci_get_drvdata(pdev);
  5290. ata_host_detach(host);
  5291. }
  5292. EXPORT_SYMBOL_GPL(ata_pci_remove_one);
  5293. void ata_pci_shutdown_one(struct pci_dev *pdev)
  5294. {
  5295. struct ata_host *host = pci_get_drvdata(pdev);
  5296. int i;
  5297. for (i = 0; i < host->n_ports; i++) {
  5298. struct ata_port *ap = host->ports[i];
  5299. ap->pflags |= ATA_PFLAG_FROZEN;
  5300. /* Disable port interrupts */
  5301. if (ap->ops->freeze)
  5302. ap->ops->freeze(ap);
  5303. /* Stop the port DMA engines */
  5304. if (ap->ops->port_stop)
  5305. ap->ops->port_stop(ap);
  5306. }
  5307. }
  5308. EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
  5309. /* move to PCI subsystem */
  5310. int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
  5311. {
  5312. unsigned long tmp = 0;
  5313. switch (bits->width) {
  5314. case 1: {
  5315. u8 tmp8 = 0;
  5316. pci_read_config_byte(pdev, bits->reg, &tmp8);
  5317. tmp = tmp8;
  5318. break;
  5319. }
  5320. case 2: {
  5321. u16 tmp16 = 0;
  5322. pci_read_config_word(pdev, bits->reg, &tmp16);
  5323. tmp = tmp16;
  5324. break;
  5325. }
  5326. case 4: {
  5327. u32 tmp32 = 0;
  5328. pci_read_config_dword(pdev, bits->reg, &tmp32);
  5329. tmp = tmp32;
  5330. break;
  5331. }
  5332. default:
  5333. return -EINVAL;
  5334. }
  5335. tmp &= bits->mask;
  5336. return (tmp == bits->val) ? 1 : 0;
  5337. }
  5338. EXPORT_SYMBOL_GPL(pci_test_config_bits);
  5339. #ifdef CONFIG_PM
  5340. void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
  5341. {
  5342. pci_save_state(pdev);
  5343. pci_disable_device(pdev);
  5344. if (mesg.event & PM_EVENT_SLEEP)
  5345. pci_set_power_state(pdev, PCI_D3hot);
  5346. }
  5347. EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
  5348. int ata_pci_device_do_resume(struct pci_dev *pdev)
  5349. {
  5350. int rc;
  5351. pci_set_power_state(pdev, PCI_D0);
  5352. pci_restore_state(pdev);
  5353. rc = pcim_enable_device(pdev);
  5354. if (rc) {
  5355. dev_err(&pdev->dev,
  5356. "failed to enable device after resume (%d)\n", rc);
  5357. return rc;
  5358. }
  5359. pci_set_master(pdev);
  5360. return 0;
  5361. }
  5362. EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
  5363. int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
  5364. {
  5365. struct ata_host *host = pci_get_drvdata(pdev);
  5366. ata_host_suspend(host, mesg);
  5367. ata_pci_device_do_suspend(pdev, mesg);
  5368. return 0;
  5369. }
  5370. EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
  5371. int ata_pci_device_resume(struct pci_dev *pdev)
  5372. {
  5373. struct ata_host *host = pci_get_drvdata(pdev);
  5374. int rc;
  5375. rc = ata_pci_device_do_resume(pdev);
  5376. if (rc == 0)
  5377. ata_host_resume(host);
  5378. return rc;
  5379. }
  5380. EXPORT_SYMBOL_GPL(ata_pci_device_resume);
  5381. #endif /* CONFIG_PM */
  5382. #endif /* CONFIG_PCI */
  5383. /**
  5384. * ata_platform_remove_one - Platform layer callback for device removal
  5385. * @pdev: Platform device that was removed
  5386. *
  5387. * Platform layer indicates to libata via this hook that hot-unplug or
  5388. * module unload event has occurred. Detach all ports. Resource
  5389. * release is handled via devres.
  5390. *
  5391. * LOCKING:
  5392. * Inherited from platform layer (may sleep).
  5393. */
  5394. void ata_platform_remove_one(struct platform_device *pdev)
  5395. {
  5396. struct ata_host *host = platform_get_drvdata(pdev);
  5397. ata_host_detach(host);
  5398. }
  5399. EXPORT_SYMBOL_GPL(ata_platform_remove_one);
  5400. #ifdef CONFIG_ATA_FORCE
  5401. #define force_cbl(name, flag) \
  5402. { #name, .cbl = (flag) }
  5403. #define force_spd_limit(spd, val) \
  5404. { #spd, .spd_limit = (val) }
  5405. #define force_xfer(mode, shift) \
  5406. { #mode, .xfer_mask = (1UL << (shift)) }
  5407. #define force_lflag_on(name, flags) \
  5408. { #name, .lflags_on = (flags) }
  5409. #define force_lflag_onoff(name, flags) \
  5410. { "no" #name, .lflags_on = (flags) }, \
  5411. { #name, .lflags_off = (flags) }
  5412. #define force_pflag_on(name, flags) \
  5413. { #name, .pflags_on = (flags) }
  5414. #define force_quirk_on(name, flag) \
  5415. { #name, .quirk_on = (flag) }
  5416. #define force_quirk_onoff(name, flag) \
  5417. { "no" #name, .quirk_on = (flag) }, \
  5418. { #name, .quirk_off = (flag) }
  5419. static const struct ata_force_param force_tbl[] __initconst = {
  5420. force_cbl(40c, ATA_CBL_PATA40),
  5421. force_cbl(80c, ATA_CBL_PATA80),
  5422. force_cbl(short40c, ATA_CBL_PATA40_SHORT),
  5423. force_cbl(unk, ATA_CBL_PATA_UNK),
  5424. force_cbl(ign, ATA_CBL_PATA_IGN),
  5425. force_cbl(sata, ATA_CBL_SATA),
  5426. force_spd_limit(1.5Gbps, 1),
  5427. force_spd_limit(3.0Gbps, 2),
  5428. force_xfer(pio0, ATA_SHIFT_PIO + 0),
  5429. force_xfer(pio1, ATA_SHIFT_PIO + 1),
  5430. force_xfer(pio2, ATA_SHIFT_PIO + 2),
  5431. force_xfer(pio3, ATA_SHIFT_PIO + 3),
  5432. force_xfer(pio4, ATA_SHIFT_PIO + 4),
  5433. force_xfer(pio5, ATA_SHIFT_PIO + 5),
  5434. force_xfer(pio6, ATA_SHIFT_PIO + 6),
  5435. force_xfer(mwdma0, ATA_SHIFT_MWDMA + 0),
  5436. force_xfer(mwdma1, ATA_SHIFT_MWDMA + 1),
  5437. force_xfer(mwdma2, ATA_SHIFT_MWDMA + 2),
  5438. force_xfer(mwdma3, ATA_SHIFT_MWDMA + 3),
  5439. force_xfer(mwdma4, ATA_SHIFT_MWDMA + 4),
  5440. force_xfer(udma0, ATA_SHIFT_UDMA + 0),
  5441. force_xfer(udma16, ATA_SHIFT_UDMA + 0),
  5442. force_xfer(udma/16, ATA_SHIFT_UDMA + 0),
  5443. force_xfer(udma1, ATA_SHIFT_UDMA + 1),
  5444. force_xfer(udma25, ATA_SHIFT_UDMA + 1),
  5445. force_xfer(udma/25, ATA_SHIFT_UDMA + 1),
  5446. force_xfer(udma2, ATA_SHIFT_UDMA + 2),
  5447. force_xfer(udma33, ATA_SHIFT_UDMA + 2),
  5448. force_xfer(udma/33, ATA_SHIFT_UDMA + 2),
  5449. force_xfer(udma3, ATA_SHIFT_UDMA + 3),
  5450. force_xfer(udma44, ATA_SHIFT_UDMA + 3),
  5451. force_xfer(udma/44, ATA_SHIFT_UDMA + 3),
  5452. force_xfer(udma4, ATA_SHIFT_UDMA + 4),
  5453. force_xfer(udma66, ATA_SHIFT_UDMA + 4),
  5454. force_xfer(udma/66, ATA_SHIFT_UDMA + 4),
  5455. force_xfer(udma5, ATA_SHIFT_UDMA + 5),
  5456. force_xfer(udma100, ATA_SHIFT_UDMA + 5),
  5457. force_xfer(udma/100, ATA_SHIFT_UDMA + 5),
  5458. force_xfer(udma6, ATA_SHIFT_UDMA + 6),
  5459. force_xfer(udma133, ATA_SHIFT_UDMA + 6),
  5460. force_xfer(udma/133, ATA_SHIFT_UDMA + 6),
  5461. force_xfer(udma7, ATA_SHIFT_UDMA + 7),
  5462. force_lflag_on(nohrst, ATA_LFLAG_NO_HRST),
  5463. force_lflag_on(nosrst, ATA_LFLAG_NO_SRST),
  5464. force_lflag_on(norst, ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST),
  5465. force_lflag_on(rstonce, ATA_LFLAG_RST_ONCE),
  5466. force_lflag_onoff(dbdelay, ATA_LFLAG_NO_DEBOUNCE_DELAY),
  5467. force_pflag_on(external, ATA_PFLAG_EXTERNAL),
  5468. force_quirk_onoff(ncq, ATA_QUIRK_NONCQ),
  5469. force_quirk_onoff(ncqtrim, ATA_QUIRK_NO_NCQ_TRIM),
  5470. force_quirk_onoff(ncqati, ATA_QUIRK_NO_NCQ_ON_ATI),
  5471. force_quirk_onoff(trim, ATA_QUIRK_NOTRIM),
  5472. force_quirk_on(trim_zero, ATA_QUIRK_ZERO_AFTER_TRIM),
  5473. force_quirk_on(max_trim_128m, ATA_QUIRK_MAX_TRIM_128M),
  5474. force_quirk_onoff(dma, ATA_QUIRK_NODMA),
  5475. force_quirk_on(atapi_dmadir, ATA_QUIRK_ATAPI_DMADIR),
  5476. force_quirk_on(atapi_mod16_dma, ATA_QUIRK_ATAPI_MOD16_DMA),
  5477. force_quirk_onoff(dmalog, ATA_QUIRK_NO_DMA_LOG),
  5478. force_quirk_onoff(iddevlog, ATA_QUIRK_NO_ID_DEV_LOG),
  5479. force_quirk_onoff(logdir, ATA_QUIRK_NO_LOG_DIR),
  5480. force_quirk_on(max_sec_128, ATA_QUIRK_MAX_SEC_128),
  5481. force_quirk_on(max_sec_1024, ATA_QUIRK_MAX_SEC_1024),
  5482. force_quirk_on(max_sec_lba48, ATA_QUIRK_MAX_SEC_LBA48),
  5483. force_quirk_onoff(lpm, ATA_QUIRK_NOLPM),
  5484. force_quirk_onoff(setxfer, ATA_QUIRK_NOSETXFER),
  5485. force_quirk_on(dump_id, ATA_QUIRK_DUMP_ID),
  5486. force_quirk_onoff(fua, ATA_QUIRK_NO_FUA),
  5487. force_quirk_on(disable, ATA_QUIRK_DISABLE),
  5488. };
  5489. static int __init ata_parse_force_one(char **cur,
  5490. struct ata_force_ent *force_ent,
  5491. const char **reason)
  5492. {
  5493. char *start = *cur, *p = *cur;
  5494. char *id, *val, *endp;
  5495. const struct ata_force_param *match_fp = NULL;
  5496. int nr_matches = 0, i;
  5497. /* find where this param ends and update *cur */
  5498. while (*p != '\0' && *p != ',')
  5499. p++;
  5500. if (*p == '\0')
  5501. *cur = p;
  5502. else
  5503. *cur = p + 1;
  5504. *p = '\0';
  5505. /* parse */
  5506. p = strchr(start, ':');
  5507. if (!p) {
  5508. val = strstrip(start);
  5509. goto parse_val;
  5510. }
  5511. *p = '\0';
  5512. id = strstrip(start);
  5513. val = strstrip(p + 1);
  5514. /* parse id */
  5515. p = strchr(id, '.');
  5516. if (p) {
  5517. *p++ = '\0';
  5518. force_ent->device = simple_strtoul(p, &endp, 10);
  5519. if (p == endp || *endp != '\0') {
  5520. *reason = "invalid device";
  5521. return -EINVAL;
  5522. }
  5523. }
  5524. force_ent->port = simple_strtoul(id, &endp, 10);
  5525. if (id == endp || *endp != '\0') {
  5526. *reason = "invalid port/link";
  5527. return -EINVAL;
  5528. }
  5529. parse_val:
  5530. /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
  5531. for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
  5532. const struct ata_force_param *fp = &force_tbl[i];
  5533. if (strncasecmp(val, fp->name, strlen(val)))
  5534. continue;
  5535. nr_matches++;
  5536. match_fp = fp;
  5537. if (strcasecmp(val, fp->name) == 0) {
  5538. nr_matches = 1;
  5539. break;
  5540. }
  5541. }
  5542. if (!nr_matches) {
  5543. *reason = "unknown value";
  5544. return -EINVAL;
  5545. }
  5546. if (nr_matches > 1) {
  5547. *reason = "ambiguous value";
  5548. return -EINVAL;
  5549. }
  5550. force_ent->param = *match_fp;
  5551. return 0;
  5552. }
  5553. static void __init ata_parse_force_param(void)
  5554. {
  5555. int idx = 0, size = 1;
  5556. int last_port = -1, last_device = -1;
  5557. char *p, *cur, *next;
  5558. /* Calculate maximum number of params and allocate ata_force_tbl */
  5559. for (p = ata_force_param_buf; *p; p++)
  5560. if (*p == ',')
  5561. size++;
  5562. ata_force_tbl = kcalloc(size, sizeof(ata_force_tbl[0]), GFP_KERNEL);
  5563. if (!ata_force_tbl) {
  5564. printk(KERN_WARNING "ata: failed to extend force table, "
  5565. "libata.force ignored\n");
  5566. return;
  5567. }
  5568. /* parse and populate the table */
  5569. for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
  5570. const char *reason = "";
  5571. struct ata_force_ent te = { .port = -1, .device = -1 };
  5572. next = cur;
  5573. if (ata_parse_force_one(&next, &te, &reason)) {
  5574. printk(KERN_WARNING "ata: failed to parse force "
  5575. "parameter \"%s\" (%s)\n",
  5576. cur, reason);
  5577. continue;
  5578. }
  5579. if (te.port == -1) {
  5580. te.port = last_port;
  5581. te.device = last_device;
  5582. }
  5583. ata_force_tbl[idx++] = te;
  5584. last_port = te.port;
  5585. last_device = te.device;
  5586. }
  5587. ata_force_tbl_size = idx;
  5588. }
  5589. static void ata_free_force_param(void)
  5590. {
  5591. kfree(ata_force_tbl);
  5592. }
  5593. #else
  5594. static inline void ata_parse_force_param(void) { }
  5595. static inline void ata_free_force_param(void) { }
  5596. #endif
  5597. static int __init ata_init(void)
  5598. {
  5599. int rc;
  5600. ata_parse_force_param();
  5601. rc = ata_sff_init();
  5602. if (rc) {
  5603. ata_free_force_param();
  5604. return rc;
  5605. }
  5606. libata_transport_init();
  5607. ata_scsi_transport_template = ata_attach_transport();
  5608. if (!ata_scsi_transport_template) {
  5609. ata_sff_exit();
  5610. rc = -ENOMEM;
  5611. goto err_out;
  5612. }
  5613. printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
  5614. return 0;
  5615. err_out:
  5616. return rc;
  5617. }
  5618. static void __exit ata_exit(void)
  5619. {
  5620. ata_release_transport(ata_scsi_transport_template);
  5621. libata_transport_exit();
  5622. ata_sff_exit();
  5623. ata_free_force_param();
  5624. }
  5625. subsys_initcall(ata_init);
  5626. module_exit(ata_exit);
  5627. static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1);
  5628. int ata_ratelimit(void)
  5629. {
  5630. return __ratelimit(&ratelimit);
  5631. }
  5632. EXPORT_SYMBOL_GPL(ata_ratelimit);
  5633. /**
  5634. * ata_msleep - ATA EH owner aware msleep
  5635. * @ap: ATA port to attribute the sleep to
  5636. * @msecs: duration to sleep in milliseconds
  5637. *
  5638. * Sleeps @msecs. If the current task is owner of @ap's EH, the
  5639. * ownership is released before going to sleep and reacquired
  5640. * after the sleep is complete. IOW, other ports sharing the
  5641. * @ap->host will be allowed to own the EH while this task is
  5642. * sleeping.
  5643. *
  5644. * LOCKING:
  5645. * Might sleep.
  5646. */
  5647. void ata_msleep(struct ata_port *ap, unsigned int msecs)
  5648. {
  5649. bool owns_eh = ap && ap->host->eh_owner == current;
  5650. if (owns_eh)
  5651. ata_eh_release(ap);
  5652. if (msecs < 20) {
  5653. unsigned long usecs = msecs * USEC_PER_MSEC;
  5654. usleep_range(usecs, usecs + 50);
  5655. } else {
  5656. msleep(msecs);
  5657. }
  5658. if (owns_eh)
  5659. ata_eh_acquire(ap);
  5660. }
  5661. EXPORT_SYMBOL_GPL(ata_msleep);
  5662. /**
  5663. * ata_wait_register - wait until register value changes
  5664. * @ap: ATA port to wait register for, can be NULL
  5665. * @reg: IO-mapped register
  5666. * @mask: Mask to apply to read register value
  5667. * @val: Wait condition
  5668. * @interval: polling interval in milliseconds
  5669. * @timeout: timeout in milliseconds
  5670. *
  5671. * Waiting for some bits of register to change is a common
  5672. * operation for ATA controllers. This function reads 32bit LE
  5673. * IO-mapped register @reg and tests for the following condition.
  5674. *
  5675. * (*@reg & mask) != val
  5676. *
  5677. * If the condition is met, it returns; otherwise, the process is
  5678. * repeated after @interval_msec until timeout.
  5679. *
  5680. * LOCKING:
  5681. * Kernel thread context (may sleep)
  5682. *
  5683. * RETURNS:
  5684. * The final register value.
  5685. */
  5686. u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
  5687. unsigned int interval, unsigned int timeout)
  5688. {
  5689. unsigned long deadline;
  5690. u32 tmp;
  5691. tmp = ioread32(reg);
  5692. /* Calculate timeout _after_ the first read to make sure
  5693. * preceding writes reach the controller before starting to
  5694. * eat away the timeout.
  5695. */
  5696. deadline = ata_deadline(jiffies, timeout);
  5697. while ((tmp & mask) == val && time_before(jiffies, deadline)) {
  5698. ata_msleep(ap, interval);
  5699. tmp = ioread32(reg);
  5700. }
  5701. return tmp;
  5702. }
  5703. EXPORT_SYMBOL_GPL(ata_wait_register);
  5704. /*
  5705. * Dummy port_ops
  5706. */
  5707. static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
  5708. {
  5709. return AC_ERR_SYSTEM;
  5710. }
  5711. static void ata_dummy_error_handler(struct ata_port *ap)
  5712. {
  5713. /* truly dummy */
  5714. }
  5715. struct ata_port_operations ata_dummy_port_ops = {
  5716. .qc_issue = ata_dummy_qc_issue,
  5717. .error_handler = ata_dummy_error_handler,
  5718. .sched_eh = ata_std_sched_eh,
  5719. .end_eh = ata_std_end_eh,
  5720. };
  5721. EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
  5722. const struct ata_port_info ata_dummy_port_info = {
  5723. .port_ops = &ata_dummy_port_ops,
  5724. };
  5725. EXPORT_SYMBOL_GPL(ata_dummy_port_info);
  5726. void ata_print_version(const struct device *dev, const char *version)
  5727. {
  5728. dev_printk(KERN_DEBUG, dev, "version %s\n", version);
  5729. }
  5730. EXPORT_SYMBOL(ata_print_version);
  5731. EXPORT_TRACEPOINT_SYMBOL_GPL(ata_tf_load);
  5732. EXPORT_TRACEPOINT_SYMBOL_GPL(ata_exec_command);
  5733. EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_setup);
  5734. EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_start);
  5735. EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_status);