dasd_eckd.c 161 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
  4. * Horst Hummel <Horst.Hummel@de.ibm.com>
  5. * Carsten Otte <Cotte@de.ibm.com>
  6. * Martin Schwidefsky <schwidefsky@de.ibm.com>
  7. * Bugreports.to..: <Linux390@de.ibm.com>
  8. * Copyright IBM Corp. 1999, 2009
  9. * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
  10. * Author.........: Nigel Hislop <hislop_nigel@emc.com>
  11. */
  12. #define KMSG_COMPONENT "dasd-eckd"
  13. #include <linux/stddef.h>
  14. #include <linux/kernel.h>
  15. #include <linux/slab.h>
  16. #include <linux/hdreg.h> /* HDIO_GETGEO */
  17. #include <linux/bio.h>
  18. #include <linux/module.h>
  19. #include <linux/compat.h>
  20. #include <linux/init.h>
  21. #include <linux/seq_file.h>
  22. #include <asm/css_chars.h>
  23. #include <asm/debug.h>
  24. #include <asm/idals.h>
  25. #include <asm/ebcdic.h>
  26. #include <asm/io.h>
  27. #include <linux/uaccess.h>
  28. #include <asm/cio.h>
  29. #include <asm/ccwdev.h>
  30. #include <asm/itcw.h>
  31. #include <asm/schid.h>
  32. #include <asm/chpid.h>
  33. #include "dasd_int.h"
  34. #include "dasd_eckd.h"
  35. #ifdef PRINTK_HEADER
  36. #undef PRINTK_HEADER
  37. #endif /* PRINTK_HEADER */
  38. #define PRINTK_HEADER "dasd(eckd):"
  39. #define ECKD_C0(i) (i->home_bytes)
  40. #define ECKD_F(i) (i->formula)
  41. #define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\
  42. (i->factors.f_0x02.f1))
  43. #define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\
  44. (i->factors.f_0x02.f2))
  45. #define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\
  46. (i->factors.f_0x02.f3))
  47. #define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0)
  48. #define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0)
  49. #define ECKD_F6(i) (i->factor6)
  50. #define ECKD_F7(i) (i->factor7)
  51. #define ECKD_F8(i) (i->factor8)
  52. /*
  53. * raw track access always map to 64k in memory
  54. * so it maps to 16 blocks of 4k per track
  55. */
  56. #define DASD_RAW_BLOCK_PER_TRACK 16
  57. #define DASD_RAW_BLOCKSIZE 4096
  58. /* 64k are 128 x 512 byte sectors */
  59. #define DASD_RAW_SECTORS_PER_TRACK 128
  60. MODULE_LICENSE("GPL");
  61. static struct dasd_discipline dasd_eckd_discipline;
  62. /* The ccw bus type uses this table to find devices that it sends to
  63. * dasd_eckd_probe */
  64. static struct ccw_device_id dasd_eckd_ids[] = {
  65. { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
  66. { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
  67. { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
  68. { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
  69. { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
  70. { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
  71. { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
  72. { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
  73. { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
  74. { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
  75. { /* end of list */ },
  76. };
  77. MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
  78. static struct ccw_driver dasd_eckd_driver; /* see below */
  79. static void *rawpadpage;
  80. #define INIT_CQR_OK 0
  81. #define INIT_CQR_UNFORMATTED 1
  82. #define INIT_CQR_ERROR 2
  83. /* emergency request for reserve/release */
  84. static struct {
  85. struct dasd_ccw_req cqr;
  86. struct ccw1 ccw;
  87. char data[32];
  88. } *dasd_reserve_req;
  89. static DEFINE_MUTEX(dasd_reserve_mutex);
  90. /* definitions for the path verification worker */
  91. struct path_verification_work_data {
  92. struct work_struct worker;
  93. struct dasd_device *device;
  94. struct dasd_ccw_req cqr;
  95. struct ccw1 ccw;
  96. __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
  97. int isglobal;
  98. __u8 tbvpm;
  99. };
  100. static struct path_verification_work_data *path_verification_worker;
  101. static DEFINE_MUTEX(dasd_path_verification_mutex);
  102. struct check_attention_work_data {
  103. struct work_struct worker;
  104. struct dasd_device *device;
  105. __u8 lpum;
  106. };
  107. static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
  108. struct dasd_device *, struct dasd_device *,
  109. unsigned int, int, unsigned int, unsigned int,
  110. unsigned int, unsigned int);
  111. /* initial attempt at a probe function. this can be simplified once
  112. * the other detection code is gone */
  113. static int
  114. dasd_eckd_probe (struct ccw_device *cdev)
  115. {
  116. int ret;
  117. /* set ECKD specific ccw-device options */
  118. ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
  119. CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
  120. if (ret) {
  121. DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
  122. "dasd_eckd_probe: could not set "
  123. "ccw-device options");
  124. return ret;
  125. }
  126. ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
  127. return ret;
  128. }
  129. static int
  130. dasd_eckd_set_online(struct ccw_device *cdev)
  131. {
  132. return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
  133. }
  134. static const int sizes_trk0[] = { 28, 148, 84 };
  135. #define LABEL_SIZE 140
  136. /* head and record addresses of count_area read in analysis ccw */
  137. static const int count_area_head[] = { 0, 0, 0, 0, 2 };
  138. static const int count_area_rec[] = { 1, 2, 3, 4, 1 };
  139. static inline unsigned int
  140. round_up_multiple(unsigned int no, unsigned int mult)
  141. {
  142. int rem = no % mult;
  143. return (rem ? no - rem + mult : no);
  144. }
  145. static inline unsigned int
  146. ceil_quot(unsigned int d1, unsigned int d2)
  147. {
  148. return (d1 + (d2 - 1)) / d2;
  149. }
  150. static unsigned int
  151. recs_per_track(struct dasd_eckd_characteristics * rdc,
  152. unsigned int kl, unsigned int dl)
  153. {
  154. int dn, kn;
  155. switch (rdc->dev_type) {
  156. case 0x3380:
  157. if (kl)
  158. return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
  159. ceil_quot(dl + 12, 32));
  160. else
  161. return 1499 / (15 + ceil_quot(dl + 12, 32));
  162. case 0x3390:
  163. dn = ceil_quot(dl + 6, 232) + 1;
  164. if (kl) {
  165. kn = ceil_quot(kl + 6, 232) + 1;
  166. return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
  167. 9 + ceil_quot(dl + 6 * dn, 34));
  168. } else
  169. return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
  170. case 0x9345:
  171. dn = ceil_quot(dl + 6, 232) + 1;
  172. if (kl) {
  173. kn = ceil_quot(kl + 6, 232) + 1;
  174. return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
  175. ceil_quot(dl + 6 * dn, 34));
  176. } else
  177. return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
  178. }
  179. return 0;
  180. }
  181. static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
  182. {
  183. geo->cyl = (__u16) cyl;
  184. geo->head = cyl >> 16;
  185. geo->head <<= 4;
  186. geo->head |= head;
  187. }
  188. static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data,
  189. struct dasd_device *device)
  190. {
  191. struct dasd_eckd_private *private = device->private;
  192. int rc;
  193. rc = get_phys_clock(&data->ep_sys_time);
  194. /*
  195. * Ignore return code if XRC is not supported or
  196. * sync clock is switched off
  197. */
  198. if ((rc && !private->rdc_data.facilities.XRC_supported) ||
  199. rc == -EOPNOTSUPP || rc == -EACCES)
  200. return 0;
  201. /* switch on System Time Stamp - needed for XRC Support */
  202. data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
  203. data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
  204. if (ccw) {
  205. ccw->count = sizeof(struct DE_eckd_data);
  206. ccw->flags |= CCW_FLAG_SLI;
  207. }
  208. return rc;
  209. }
  210. static int
  211. define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
  212. unsigned int totrk, int cmd, struct dasd_device *device,
  213. int blksize)
  214. {
  215. struct dasd_eckd_private *private = device->private;
  216. u16 heads, beghead, endhead;
  217. u32 begcyl, endcyl;
  218. int rc = 0;
  219. if (ccw) {
  220. ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
  221. ccw->flags = 0;
  222. ccw->count = 16;
  223. ccw->cda = (__u32)__pa(data);
  224. }
  225. memset(data, 0, sizeof(struct DE_eckd_data));
  226. switch (cmd) {
  227. case DASD_ECKD_CCW_READ_HOME_ADDRESS:
  228. case DASD_ECKD_CCW_READ_RECORD_ZERO:
  229. case DASD_ECKD_CCW_READ:
  230. case DASD_ECKD_CCW_READ_MT:
  231. case DASD_ECKD_CCW_READ_CKD:
  232. case DASD_ECKD_CCW_READ_CKD_MT:
  233. case DASD_ECKD_CCW_READ_KD:
  234. case DASD_ECKD_CCW_READ_KD_MT:
  235. data->mask.perm = 0x1;
  236. data->attributes.operation = private->attrib.operation;
  237. break;
  238. case DASD_ECKD_CCW_READ_COUNT:
  239. data->mask.perm = 0x1;
  240. data->attributes.operation = DASD_BYPASS_CACHE;
  241. break;
  242. case DASD_ECKD_CCW_READ_TRACK:
  243. case DASD_ECKD_CCW_READ_TRACK_DATA:
  244. data->mask.perm = 0x1;
  245. data->attributes.operation = private->attrib.operation;
  246. data->blk_size = 0;
  247. break;
  248. case DASD_ECKD_CCW_WRITE:
  249. case DASD_ECKD_CCW_WRITE_MT:
  250. case DASD_ECKD_CCW_WRITE_KD:
  251. case DASD_ECKD_CCW_WRITE_KD_MT:
  252. data->mask.perm = 0x02;
  253. data->attributes.operation = private->attrib.operation;
  254. rc = set_timestamp(ccw, data, device);
  255. break;
  256. case DASD_ECKD_CCW_WRITE_CKD:
  257. case DASD_ECKD_CCW_WRITE_CKD_MT:
  258. data->attributes.operation = DASD_BYPASS_CACHE;
  259. rc = set_timestamp(ccw, data, device);
  260. break;
  261. case DASD_ECKD_CCW_ERASE:
  262. case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
  263. case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
  264. data->mask.perm = 0x3;
  265. data->mask.auth = 0x1;
  266. data->attributes.operation = DASD_BYPASS_CACHE;
  267. rc = set_timestamp(ccw, data, device);
  268. break;
  269. case DASD_ECKD_CCW_WRITE_FULL_TRACK:
  270. data->mask.perm = 0x03;
  271. data->attributes.operation = private->attrib.operation;
  272. data->blk_size = 0;
  273. break;
  274. case DASD_ECKD_CCW_WRITE_TRACK_DATA:
  275. data->mask.perm = 0x02;
  276. data->attributes.operation = private->attrib.operation;
  277. data->blk_size = blksize;
  278. rc = set_timestamp(ccw, data, device);
  279. break;
  280. default:
  281. dev_err(&device->cdev->dev,
  282. "0x%x is not a known command\n", cmd);
  283. break;
  284. }
  285. data->attributes.mode = 0x3; /* ECKD */
  286. if ((private->rdc_data.cu_type == 0x2105 ||
  287. private->rdc_data.cu_type == 0x2107 ||
  288. private->rdc_data.cu_type == 0x1750)
  289. && !(private->uses_cdl && trk < 2))
  290. data->ga_extended |= 0x40; /* Regular Data Format Mode */
  291. heads = private->rdc_data.trk_per_cyl;
  292. begcyl = trk / heads;
  293. beghead = trk % heads;
  294. endcyl = totrk / heads;
  295. endhead = totrk % heads;
  296. /* check for sequential prestage - enhance cylinder range */
  297. if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
  298. data->attributes.operation == DASD_SEQ_ACCESS) {
  299. if (endcyl + private->attrib.nr_cyl < private->real_cyl)
  300. endcyl += private->attrib.nr_cyl;
  301. else
  302. endcyl = (private->real_cyl - 1);
  303. }
  304. set_ch_t(&data->beg_ext, begcyl, beghead);
  305. set_ch_t(&data->end_ext, endcyl, endhead);
  306. return rc;
  307. }
  308. static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data,
  309. unsigned int trk, unsigned int rec_on_trk,
  310. int count, int cmd, struct dasd_device *device,
  311. unsigned int reclen, unsigned int tlf)
  312. {
  313. struct dasd_eckd_private *private = device->private;
  314. int sector;
  315. int dn, d;
  316. if (ccw) {
  317. ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD_EXT;
  318. ccw->flags = 0;
  319. if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK)
  320. ccw->count = 22;
  321. else
  322. ccw->count = 20;
  323. ccw->cda = (__u32)__pa(data);
  324. }
  325. memset(data, 0, sizeof(*data));
  326. sector = 0;
  327. if (rec_on_trk) {
  328. switch (private->rdc_data.dev_type) {
  329. case 0x3390:
  330. dn = ceil_quot(reclen + 6, 232);
  331. d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
  332. sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
  333. break;
  334. case 0x3380:
  335. d = 7 + ceil_quot(reclen + 12, 32);
  336. sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
  337. break;
  338. }
  339. }
  340. data->sector = sector;
  341. /* note: meaning of count depends on the operation
  342. * for record based I/O it's the number of records, but for
  343. * track based I/O it's the number of tracks
  344. */
  345. data->count = count;
  346. switch (cmd) {
  347. case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
  348. data->operation.orientation = 0x3;
  349. data->operation.operation = 0x03;
  350. break;
  351. case DASD_ECKD_CCW_READ_HOME_ADDRESS:
  352. data->operation.orientation = 0x3;
  353. data->operation.operation = 0x16;
  354. break;
  355. case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
  356. data->operation.orientation = 0x1;
  357. data->operation.operation = 0x03;
  358. data->count++;
  359. break;
  360. case DASD_ECKD_CCW_READ_RECORD_ZERO:
  361. data->operation.orientation = 0x3;
  362. data->operation.operation = 0x16;
  363. data->count++;
  364. break;
  365. case DASD_ECKD_CCW_WRITE:
  366. case DASD_ECKD_CCW_WRITE_MT:
  367. case DASD_ECKD_CCW_WRITE_KD:
  368. case DASD_ECKD_CCW_WRITE_KD_MT:
  369. data->auxiliary.length_valid = 0x1;
  370. data->length = reclen;
  371. data->operation.operation = 0x01;
  372. break;
  373. case DASD_ECKD_CCW_WRITE_CKD:
  374. case DASD_ECKD_CCW_WRITE_CKD_MT:
  375. data->auxiliary.length_valid = 0x1;
  376. data->length = reclen;
  377. data->operation.operation = 0x03;
  378. break;
  379. case DASD_ECKD_CCW_WRITE_FULL_TRACK:
  380. data->operation.orientation = 0x0;
  381. data->operation.operation = 0x3F;
  382. data->extended_operation = 0x11;
  383. data->length = 0;
  384. data->extended_parameter_length = 0x02;
  385. if (data->count > 8) {
  386. data->extended_parameter[0] = 0xFF;
  387. data->extended_parameter[1] = 0xFF;
  388. data->extended_parameter[1] <<= (16 - count);
  389. } else {
  390. data->extended_parameter[0] = 0xFF;
  391. data->extended_parameter[0] <<= (8 - count);
  392. data->extended_parameter[1] = 0x00;
  393. }
  394. data->sector = 0xFF;
  395. break;
  396. case DASD_ECKD_CCW_WRITE_TRACK_DATA:
  397. data->auxiliary.length_valid = 0x1;
  398. data->length = reclen; /* not tlf, as one might think */
  399. data->operation.operation = 0x3F;
  400. data->extended_operation = 0x23;
  401. break;
  402. case DASD_ECKD_CCW_READ:
  403. case DASD_ECKD_CCW_READ_MT:
  404. case DASD_ECKD_CCW_READ_KD:
  405. case DASD_ECKD_CCW_READ_KD_MT:
  406. data->auxiliary.length_valid = 0x1;
  407. data->length = reclen;
  408. data->operation.operation = 0x06;
  409. break;
  410. case DASD_ECKD_CCW_READ_CKD:
  411. case DASD_ECKD_CCW_READ_CKD_MT:
  412. data->auxiliary.length_valid = 0x1;
  413. data->length = reclen;
  414. data->operation.operation = 0x16;
  415. break;
  416. case DASD_ECKD_CCW_READ_COUNT:
  417. data->operation.operation = 0x06;
  418. break;
  419. case DASD_ECKD_CCW_READ_TRACK:
  420. data->operation.orientation = 0x1;
  421. data->operation.operation = 0x0C;
  422. data->extended_parameter_length = 0;
  423. data->sector = 0xFF;
  424. break;
  425. case DASD_ECKD_CCW_READ_TRACK_DATA:
  426. data->auxiliary.length_valid = 0x1;
  427. data->length = tlf;
  428. data->operation.operation = 0x0C;
  429. break;
  430. case DASD_ECKD_CCW_ERASE:
  431. data->length = reclen;
  432. data->auxiliary.length_valid = 0x1;
  433. data->operation.operation = 0x0b;
  434. break;
  435. default:
  436. DBF_DEV_EVENT(DBF_ERR, device,
  437. "fill LRE unknown opcode 0x%x", cmd);
  438. BUG();
  439. }
  440. set_ch_t(&data->seek_addr,
  441. trk / private->rdc_data.trk_per_cyl,
  442. trk % private->rdc_data.trk_per_cyl);
  443. data->search_arg.cyl = data->seek_addr.cyl;
  444. data->search_arg.head = data->seek_addr.head;
  445. data->search_arg.record = rec_on_trk;
  446. }
  447. static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
  448. unsigned int trk, unsigned int totrk, int cmd,
  449. struct dasd_device *basedev, struct dasd_device *startdev,
  450. unsigned int format, unsigned int rec_on_trk, int count,
  451. unsigned int blksize, unsigned int tlf)
  452. {
  453. struct dasd_eckd_private *basepriv, *startpriv;
  454. struct LRE_eckd_data *lredata;
  455. struct DE_eckd_data *dedata;
  456. int rc = 0;
  457. basepriv = basedev->private;
  458. startpriv = startdev->private;
  459. dedata = &pfxdata->define_extent;
  460. lredata = &pfxdata->locate_record;
  461. ccw->cmd_code = DASD_ECKD_CCW_PFX;
  462. ccw->flags = 0;
  463. if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
  464. ccw->count = sizeof(*pfxdata) + 2;
  465. ccw->cda = (__u32) __pa(pfxdata);
  466. memset(pfxdata, 0, sizeof(*pfxdata) + 2);
  467. } else {
  468. ccw->count = sizeof(*pfxdata);
  469. ccw->cda = (__u32) __pa(pfxdata);
  470. memset(pfxdata, 0, sizeof(*pfxdata));
  471. }
  472. /* prefix data */
  473. if (format > 1) {
  474. DBF_DEV_EVENT(DBF_ERR, basedev,
  475. "PFX LRE unknown format 0x%x", format);
  476. BUG();
  477. return -EINVAL;
  478. }
  479. pfxdata->format = format;
  480. pfxdata->base_address = basepriv->ned->unit_addr;
  481. pfxdata->base_lss = basepriv->ned->ID;
  482. pfxdata->validity.define_extent = 1;
  483. /* private uid is kept up to date, conf_data may be outdated */
  484. if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
  485. pfxdata->validity.verify_base = 1;
  486. if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
  487. pfxdata->validity.verify_base = 1;
  488. pfxdata->validity.hyper_pav = 1;
  489. }
  490. rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize);
  491. /*
  492. * For some commands the System Time Stamp is set in the define extent
  493. * data when XRC is supported. The validity of the time stamp must be
  494. * reflected in the prefix data as well.
  495. */
  496. if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
  497. pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */
  498. if (format == 1) {
  499. locate_record_ext(NULL, lredata, trk, rec_on_trk, count, cmd,
  500. basedev, blksize, tlf);
  501. }
  502. return rc;
  503. }
  504. static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
  505. unsigned int trk, unsigned int totrk, int cmd,
  506. struct dasd_device *basedev, struct dasd_device *startdev)
  507. {
  508. return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
  509. 0, 0, 0, 0, 0);
  510. }
  511. static void
  512. locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
  513. unsigned int rec_on_trk, int no_rec, int cmd,
  514. struct dasd_device * device, int reclen)
  515. {
  516. struct dasd_eckd_private *private = device->private;
  517. int sector;
  518. int dn, d;
  519. DBF_DEV_EVENT(DBF_INFO, device,
  520. "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
  521. trk, rec_on_trk, no_rec, cmd, reclen);
  522. ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
  523. ccw->flags = 0;
  524. ccw->count = 16;
  525. ccw->cda = (__u32) __pa(data);
  526. memset(data, 0, sizeof(struct LO_eckd_data));
  527. sector = 0;
  528. if (rec_on_trk) {
  529. switch (private->rdc_data.dev_type) {
  530. case 0x3390:
  531. dn = ceil_quot(reclen + 6, 232);
  532. d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
  533. sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
  534. break;
  535. case 0x3380:
  536. d = 7 + ceil_quot(reclen + 12, 32);
  537. sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
  538. break;
  539. }
  540. }
  541. data->sector = sector;
  542. data->count = no_rec;
  543. switch (cmd) {
  544. case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
  545. data->operation.orientation = 0x3;
  546. data->operation.operation = 0x03;
  547. break;
  548. case DASD_ECKD_CCW_READ_HOME_ADDRESS:
  549. data->operation.orientation = 0x3;
  550. data->operation.operation = 0x16;
  551. break;
  552. case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
  553. data->operation.orientation = 0x1;
  554. data->operation.operation = 0x03;
  555. data->count++;
  556. break;
  557. case DASD_ECKD_CCW_READ_RECORD_ZERO:
  558. data->operation.orientation = 0x3;
  559. data->operation.operation = 0x16;
  560. data->count++;
  561. break;
  562. case DASD_ECKD_CCW_WRITE:
  563. case DASD_ECKD_CCW_WRITE_MT:
  564. case DASD_ECKD_CCW_WRITE_KD:
  565. case DASD_ECKD_CCW_WRITE_KD_MT:
  566. data->auxiliary.last_bytes_used = 0x1;
  567. data->length = reclen;
  568. data->operation.operation = 0x01;
  569. break;
  570. case DASD_ECKD_CCW_WRITE_CKD:
  571. case DASD_ECKD_CCW_WRITE_CKD_MT:
  572. data->auxiliary.last_bytes_used = 0x1;
  573. data->length = reclen;
  574. data->operation.operation = 0x03;
  575. break;
  576. case DASD_ECKD_CCW_READ:
  577. case DASD_ECKD_CCW_READ_MT:
  578. case DASD_ECKD_CCW_READ_KD:
  579. case DASD_ECKD_CCW_READ_KD_MT:
  580. data->auxiliary.last_bytes_used = 0x1;
  581. data->length = reclen;
  582. data->operation.operation = 0x06;
  583. break;
  584. case DASD_ECKD_CCW_READ_CKD:
  585. case DASD_ECKD_CCW_READ_CKD_MT:
  586. data->auxiliary.last_bytes_used = 0x1;
  587. data->length = reclen;
  588. data->operation.operation = 0x16;
  589. break;
  590. case DASD_ECKD_CCW_READ_COUNT:
  591. data->operation.operation = 0x06;
  592. break;
  593. case DASD_ECKD_CCW_ERASE:
  594. data->length = reclen;
  595. data->auxiliary.last_bytes_used = 0x1;
  596. data->operation.operation = 0x0b;
  597. break;
  598. default:
  599. DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
  600. "opcode 0x%x", cmd);
  601. }
  602. set_ch_t(&data->seek_addr,
  603. trk / private->rdc_data.trk_per_cyl,
  604. trk % private->rdc_data.trk_per_cyl);
  605. data->search_arg.cyl = data->seek_addr.cyl;
  606. data->search_arg.head = data->seek_addr.head;
  607. data->search_arg.record = rec_on_trk;
  608. }
  609. /*
  610. * Returns 1 if the block is one of the special blocks that needs
  611. * to get read/written with the KD variant of the command.
  612. * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
  613. * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
  614. * Luckily the KD variants differ only by one bit (0x08) from the
  615. * normal variant. So don't wonder about code like:
  616. * if (dasd_eckd_cdl_special(blk_per_trk, recid))
  617. * ccw->cmd_code |= 0x8;
  618. */
  619. static inline int
  620. dasd_eckd_cdl_special(int blk_per_trk, int recid)
  621. {
  622. if (recid < 3)
  623. return 1;
  624. if (recid < blk_per_trk)
  625. return 0;
  626. if (recid < 2 * blk_per_trk)
  627. return 1;
  628. return 0;
  629. }
  630. /*
  631. * Returns the record size for the special blocks of the cdl format.
  632. * Only returns something useful if dasd_eckd_cdl_special is true
  633. * for the recid.
  634. */
  635. static inline int
  636. dasd_eckd_cdl_reclen(int recid)
  637. {
  638. if (recid < 3)
  639. return sizes_trk0[recid];
  640. return LABEL_SIZE;
  641. }
  642. /* create unique id from private structure. */
  643. static void create_uid(struct dasd_eckd_private *private)
  644. {
  645. int count;
  646. struct dasd_uid *uid;
  647. uid = &private->uid;
  648. memset(uid, 0, sizeof(struct dasd_uid));
  649. memcpy(uid->vendor, private->ned->HDA_manufacturer,
  650. sizeof(uid->vendor) - 1);
  651. EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
  652. memcpy(uid->serial, private->ned->HDA_location,
  653. sizeof(uid->serial) - 1);
  654. EBCASC(uid->serial, sizeof(uid->serial) - 1);
  655. uid->ssid = private->gneq->subsystemID;
  656. uid->real_unit_addr = private->ned->unit_addr;
  657. if (private->sneq) {
  658. uid->type = private->sneq->sua_flags;
  659. if (uid->type == UA_BASE_PAV_ALIAS)
  660. uid->base_unit_addr = private->sneq->base_unit_addr;
  661. } else {
  662. uid->type = UA_BASE_DEVICE;
  663. }
  664. if (private->vdsneq) {
  665. for (count = 0; count < 16; count++) {
  666. sprintf(uid->vduit+2*count, "%02x",
  667. private->vdsneq->uit[count]);
  668. }
  669. }
  670. }
  671. /*
  672. * Generate device unique id that specifies the physical device.
  673. */
  674. static int dasd_eckd_generate_uid(struct dasd_device *device)
  675. {
  676. struct dasd_eckd_private *private = device->private;
  677. unsigned long flags;
  678. if (!private)
  679. return -ENODEV;
  680. if (!private->ned || !private->gneq)
  681. return -ENODEV;
  682. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  683. create_uid(private);
  684. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  685. return 0;
  686. }
  687. static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
  688. {
  689. struct dasd_eckd_private *private = device->private;
  690. unsigned long flags;
  691. if (private) {
  692. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  693. *uid = private->uid;
  694. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  695. return 0;
  696. }
  697. return -EINVAL;
  698. }
  699. /*
  700. * compare device UID with data of a given dasd_eckd_private structure
  701. * return 0 for match
  702. */
  703. static int dasd_eckd_compare_path_uid(struct dasd_device *device,
  704. struct dasd_eckd_private *private)
  705. {
  706. struct dasd_uid device_uid;
  707. create_uid(private);
  708. dasd_eckd_get_uid(device, &device_uid);
  709. return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid));
  710. }
  711. static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
  712. struct dasd_ccw_req *cqr,
  713. __u8 *rcd_buffer,
  714. __u8 lpm)
  715. {
  716. struct ccw1 *ccw;
  717. /*
  718. * buffer has to start with EBCDIC "V1.0" to show
  719. * support for virtual device SNEQ
  720. */
  721. rcd_buffer[0] = 0xE5;
  722. rcd_buffer[1] = 0xF1;
  723. rcd_buffer[2] = 0x4B;
  724. rcd_buffer[3] = 0xF0;
  725. ccw = cqr->cpaddr;
  726. ccw->cmd_code = DASD_ECKD_CCW_RCD;
  727. ccw->flags = 0;
  728. ccw->cda = (__u32)(addr_t)rcd_buffer;
  729. ccw->count = DASD_ECKD_RCD_DATA_SIZE;
  730. cqr->magic = DASD_ECKD_MAGIC;
  731. cqr->startdev = device;
  732. cqr->memdev = device;
  733. cqr->block = NULL;
  734. cqr->expires = 10*HZ;
  735. cqr->lpm = lpm;
  736. cqr->retries = 256;
  737. cqr->buildclk = get_tod_clock();
  738. cqr->status = DASD_CQR_FILLED;
  739. set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
  740. }
  741. /*
  742. * Wakeup helper for read_conf
  743. * if the cqr is not done and needs some error recovery
  744. * the buffer has to be re-initialized with the EBCDIC "V1.0"
  745. * to show support for virtual device SNEQ
  746. */
  747. static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
  748. {
  749. struct ccw1 *ccw;
  750. __u8 *rcd_buffer;
  751. if (cqr->status != DASD_CQR_DONE) {
  752. ccw = cqr->cpaddr;
  753. rcd_buffer = (__u8 *)((addr_t) ccw->cda);
  754. memset(rcd_buffer, 0, sizeof(*rcd_buffer));
  755. rcd_buffer[0] = 0xE5;
  756. rcd_buffer[1] = 0xF1;
  757. rcd_buffer[2] = 0x4B;
  758. rcd_buffer[3] = 0xF0;
  759. }
  760. dasd_wakeup_cb(cqr, data);
  761. }
  762. static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
  763. struct dasd_ccw_req *cqr,
  764. __u8 *rcd_buffer,
  765. __u8 lpm)
  766. {
  767. struct ciw *ciw;
  768. int rc;
  769. /*
  770. * sanity check: scan for RCD command in extended SenseID data
  771. * some devices do not support RCD
  772. */
  773. ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
  774. if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
  775. return -EOPNOTSUPP;
  776. dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
  777. clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  778. set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
  779. cqr->retries = 5;
  780. cqr->callback = read_conf_cb;
  781. rc = dasd_sleep_on_immediatly(cqr);
  782. return rc;
  783. }
  784. static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
  785. void **rcd_buffer,
  786. int *rcd_buffer_size, __u8 lpm)
  787. {
  788. struct ciw *ciw;
  789. char *rcd_buf = NULL;
  790. int ret;
  791. struct dasd_ccw_req *cqr;
  792. /*
  793. * sanity check: scan for RCD command in extended SenseID data
  794. * some devices do not support RCD
  795. */
  796. ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
  797. if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
  798. ret = -EOPNOTSUPP;
  799. goto out_error;
  800. }
  801. rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
  802. if (!rcd_buf) {
  803. ret = -ENOMEM;
  804. goto out_error;
  805. }
  806. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
  807. 0, /* use rcd_buf as data ara */
  808. device, NULL);
  809. if (IS_ERR(cqr)) {
  810. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  811. "Could not allocate RCD request");
  812. ret = -ENOMEM;
  813. goto out_error;
  814. }
  815. dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
  816. cqr->callback = read_conf_cb;
  817. ret = dasd_sleep_on(cqr);
  818. /*
  819. * on success we update the user input parms
  820. */
  821. dasd_sfree_request(cqr, cqr->memdev);
  822. if (ret)
  823. goto out_error;
  824. *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
  825. *rcd_buffer = rcd_buf;
  826. return 0;
  827. out_error:
  828. kfree(rcd_buf);
  829. *rcd_buffer = NULL;
  830. *rcd_buffer_size = 0;
  831. return ret;
  832. }
  833. static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private)
  834. {
  835. struct dasd_sneq *sneq;
  836. int i, count;
  837. private->ned = NULL;
  838. private->sneq = NULL;
  839. private->vdsneq = NULL;
  840. private->gneq = NULL;
  841. count = private->conf_len / sizeof(struct dasd_sneq);
  842. sneq = (struct dasd_sneq *)private->conf_data;
  843. for (i = 0; i < count; ++i) {
  844. if (sneq->flags.identifier == 1 && sneq->format == 1)
  845. private->sneq = sneq;
  846. else if (sneq->flags.identifier == 1 && sneq->format == 4)
  847. private->vdsneq = (struct vd_sneq *)sneq;
  848. else if (sneq->flags.identifier == 2)
  849. private->gneq = (struct dasd_gneq *)sneq;
  850. else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
  851. private->ned = (struct dasd_ned *)sneq;
  852. sneq++;
  853. }
  854. if (!private->ned || !private->gneq) {
  855. private->ned = NULL;
  856. private->sneq = NULL;
  857. private->vdsneq = NULL;
  858. private->gneq = NULL;
  859. return -EINVAL;
  860. }
  861. return 0;
  862. };
  863. static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
  864. {
  865. struct dasd_gneq *gneq;
  866. int i, count, found;
  867. count = conf_len / sizeof(*gneq);
  868. gneq = (struct dasd_gneq *)conf_data;
  869. found = 0;
  870. for (i = 0; i < count; ++i) {
  871. if (gneq->flags.identifier == 2) {
  872. found = 1;
  873. break;
  874. }
  875. gneq++;
  876. }
  877. if (found)
  878. return ((char *)gneq)[18] & 0x07;
  879. else
  880. return 0;
  881. }
  882. static void dasd_eckd_clear_conf_data(struct dasd_device *device)
  883. {
  884. struct dasd_eckd_private *private = device->private;
  885. int i;
  886. private->conf_data = NULL;
  887. private->conf_len = 0;
  888. for (i = 0; i < 8; i++) {
  889. kfree(device->path[i].conf_data);
  890. device->path[i].conf_data = NULL;
  891. device->path[i].cssid = 0;
  892. device->path[i].ssid = 0;
  893. device->path[i].chpid = 0;
  894. }
  895. }
  896. static int dasd_eckd_read_conf(struct dasd_device *device)
  897. {
  898. void *conf_data;
  899. int conf_len, conf_data_saved;
  900. int rc, path_err, pos;
  901. __u8 lpm, opm;
  902. struct dasd_eckd_private *private, path_private;
  903. struct dasd_uid *uid;
  904. char print_path_uid[60], print_device_uid[60];
  905. struct channel_path_desc_fmt0 *chp_desc;
  906. struct subchannel_id sch_id;
  907. private = device->private;
  908. opm = ccw_device_get_path_mask(device->cdev);
  909. ccw_device_get_schid(device->cdev, &sch_id);
  910. conf_data_saved = 0;
  911. path_err = 0;
  912. /* get configuration data per operational path */
  913. for (lpm = 0x80; lpm; lpm>>= 1) {
  914. if (!(lpm & opm))
  915. continue;
  916. rc = dasd_eckd_read_conf_lpm(device, &conf_data,
  917. &conf_len, lpm);
  918. if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
  919. DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
  920. "Read configuration data returned "
  921. "error %d", rc);
  922. return rc;
  923. }
  924. if (conf_data == NULL) {
  925. DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
  926. "No configuration data "
  927. "retrieved");
  928. /* no further analysis possible */
  929. dasd_path_add_opm(device, opm);
  930. continue; /* no error */
  931. }
  932. /* save first valid configuration data */
  933. if (!conf_data_saved) {
  934. /* initially clear previously stored conf_data */
  935. dasd_eckd_clear_conf_data(device);
  936. private->conf_data = conf_data;
  937. private->conf_len = conf_len;
  938. if (dasd_eckd_identify_conf_parts(private)) {
  939. private->conf_data = NULL;
  940. private->conf_len = 0;
  941. kfree(conf_data);
  942. continue;
  943. }
  944. pos = pathmask_to_pos(lpm);
  945. /* store per path conf_data */
  946. device->path[pos].conf_data = conf_data;
  947. device->path[pos].cssid = sch_id.cssid;
  948. device->path[pos].ssid = sch_id.ssid;
  949. chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
  950. if (chp_desc)
  951. device->path[pos].chpid = chp_desc->chpid;
  952. kfree(chp_desc);
  953. /*
  954. * build device UID that other path data
  955. * can be compared to it
  956. */
  957. dasd_eckd_generate_uid(device);
  958. conf_data_saved++;
  959. } else {
  960. path_private.conf_data = conf_data;
  961. path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
  962. if (dasd_eckd_identify_conf_parts(
  963. &path_private)) {
  964. path_private.conf_data = NULL;
  965. path_private.conf_len = 0;
  966. kfree(conf_data);
  967. continue;
  968. }
  969. if (dasd_eckd_compare_path_uid(
  970. device, &path_private)) {
  971. uid = &path_private.uid;
  972. if (strlen(uid->vduit) > 0)
  973. snprintf(print_path_uid,
  974. sizeof(print_path_uid),
  975. "%s.%s.%04x.%02x.%s",
  976. uid->vendor, uid->serial,
  977. uid->ssid, uid->real_unit_addr,
  978. uid->vduit);
  979. else
  980. snprintf(print_path_uid,
  981. sizeof(print_path_uid),
  982. "%s.%s.%04x.%02x",
  983. uid->vendor, uid->serial,
  984. uid->ssid,
  985. uid->real_unit_addr);
  986. uid = &private->uid;
  987. if (strlen(uid->vduit) > 0)
  988. snprintf(print_device_uid,
  989. sizeof(print_device_uid),
  990. "%s.%s.%04x.%02x.%s",
  991. uid->vendor, uid->serial,
  992. uid->ssid, uid->real_unit_addr,
  993. uid->vduit);
  994. else
  995. snprintf(print_device_uid,
  996. sizeof(print_device_uid),
  997. "%s.%s.%04x.%02x",
  998. uid->vendor, uid->serial,
  999. uid->ssid,
  1000. uid->real_unit_addr);
  1001. dev_err(&device->cdev->dev,
  1002. "Not all channel paths lead to "
  1003. "the same device, path %02X leads to "
  1004. "device %s instead of %s\n", lpm,
  1005. print_path_uid, print_device_uid);
  1006. path_err = -EINVAL;
  1007. dasd_path_add_cablepm(device, lpm);
  1008. continue;
  1009. }
  1010. pos = pathmask_to_pos(lpm);
  1011. /* store per path conf_data */
  1012. device->path[pos].conf_data = conf_data;
  1013. device->path[pos].cssid = sch_id.cssid;
  1014. device->path[pos].ssid = sch_id.ssid;
  1015. chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
  1016. if (chp_desc)
  1017. device->path[pos].chpid = chp_desc->chpid;
  1018. kfree(chp_desc);
  1019. path_private.conf_data = NULL;
  1020. path_private.conf_len = 0;
  1021. }
  1022. switch (dasd_eckd_path_access(conf_data, conf_len)) {
  1023. case 0x02:
  1024. dasd_path_add_nppm(device, lpm);
  1025. break;
  1026. case 0x03:
  1027. dasd_path_add_ppm(device, lpm);
  1028. break;
  1029. }
  1030. if (!dasd_path_get_opm(device)) {
  1031. dasd_path_set_opm(device, lpm);
  1032. dasd_generic_path_operational(device);
  1033. } else {
  1034. dasd_path_add_opm(device, lpm);
  1035. }
  1036. }
  1037. return path_err;
  1038. }
  1039. static u32 get_fcx_max_data(struct dasd_device *device)
  1040. {
  1041. struct dasd_eckd_private *private = device->private;
  1042. int fcx_in_css, fcx_in_gneq, fcx_in_features;
  1043. unsigned int mdc;
  1044. int tpm;
  1045. if (dasd_nofcx)
  1046. return 0;
  1047. /* is transport mode supported? */
  1048. fcx_in_css = css_general_characteristics.fcx;
  1049. fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
  1050. fcx_in_features = private->features.feature[40] & 0x80;
  1051. tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
  1052. if (!tpm)
  1053. return 0;
  1054. mdc = ccw_device_get_mdc(device->cdev, 0);
  1055. if (mdc == 0) {
  1056. dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
  1057. return 0;
  1058. } else {
  1059. return (u32)mdc * FCX_MAX_DATA_FACTOR;
  1060. }
  1061. }
  1062. static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
  1063. {
  1064. struct dasd_eckd_private *private = device->private;
  1065. unsigned int mdc;
  1066. u32 fcx_max_data;
  1067. if (private->fcx_max_data) {
  1068. mdc = ccw_device_get_mdc(device->cdev, lpm);
  1069. if (mdc == 0) {
  1070. dev_warn(&device->cdev->dev,
  1071. "Detecting the maximum data size for zHPF "
  1072. "requests failed (rc=%d) for a new path %x\n",
  1073. mdc, lpm);
  1074. return mdc;
  1075. }
  1076. fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR;
  1077. if (fcx_max_data < private->fcx_max_data) {
  1078. dev_warn(&device->cdev->dev,
  1079. "The maximum data size for zHPF requests %u "
  1080. "on a new path %x is below the active maximum "
  1081. "%u\n", fcx_max_data, lpm,
  1082. private->fcx_max_data);
  1083. return -EACCES;
  1084. }
  1085. }
  1086. return 0;
  1087. }
  1088. static int rebuild_device_uid(struct dasd_device *device,
  1089. struct path_verification_work_data *data)
  1090. {
  1091. struct dasd_eckd_private *private = device->private;
  1092. __u8 lpm, opm = dasd_path_get_opm(device);
  1093. int rc = -ENODEV;
  1094. for (lpm = 0x80; lpm; lpm >>= 1) {
  1095. if (!(lpm & opm))
  1096. continue;
  1097. memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
  1098. memset(&data->cqr, 0, sizeof(data->cqr));
  1099. data->cqr.cpaddr = &data->ccw;
  1100. rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
  1101. data->rcd_buffer,
  1102. lpm);
  1103. if (rc) {
  1104. if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */
  1105. continue;
  1106. DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
  1107. "Read configuration data "
  1108. "returned error %d", rc);
  1109. break;
  1110. }
  1111. memcpy(private->conf_data, data->rcd_buffer,
  1112. DASD_ECKD_RCD_DATA_SIZE);
  1113. if (dasd_eckd_identify_conf_parts(private)) {
  1114. rc = -ENODEV;
  1115. } else /* first valid path is enough */
  1116. break;
  1117. }
  1118. if (!rc)
  1119. rc = dasd_eckd_generate_uid(device);
  1120. return rc;
  1121. }
  1122. static void do_path_verification_work(struct work_struct *work)
  1123. {
  1124. struct path_verification_work_data *data;
  1125. struct dasd_device *device;
  1126. struct dasd_eckd_private path_private;
  1127. struct dasd_uid *uid;
  1128. __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
  1129. __u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
  1130. unsigned long flags;
  1131. char print_uid[60];
  1132. int rc;
  1133. data = container_of(work, struct path_verification_work_data, worker);
  1134. device = data->device;
  1135. /* delay path verification until device was resumed */
  1136. if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
  1137. schedule_work(work);
  1138. return;
  1139. }
  1140. /* check if path verification already running and delay if so */
  1141. if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) {
  1142. schedule_work(work);
  1143. return;
  1144. }
  1145. opm = 0;
  1146. npm = 0;
  1147. ppm = 0;
  1148. epm = 0;
  1149. hpfpm = 0;
  1150. cablepm = 0;
  1151. for (lpm = 0x80; lpm; lpm >>= 1) {
  1152. if (!(lpm & data->tbvpm))
  1153. continue;
  1154. memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
  1155. memset(&data->cqr, 0, sizeof(data->cqr));
  1156. data->cqr.cpaddr = &data->ccw;
  1157. rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
  1158. data->rcd_buffer,
  1159. lpm);
  1160. if (!rc) {
  1161. switch (dasd_eckd_path_access(data->rcd_buffer,
  1162. DASD_ECKD_RCD_DATA_SIZE)
  1163. ) {
  1164. case 0x02:
  1165. npm |= lpm;
  1166. break;
  1167. case 0x03:
  1168. ppm |= lpm;
  1169. break;
  1170. }
  1171. opm |= lpm;
  1172. } else if (rc == -EOPNOTSUPP) {
  1173. DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
  1174. "path verification: No configuration "
  1175. "data retrieved");
  1176. opm |= lpm;
  1177. } else if (rc == -EAGAIN) {
  1178. DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
  1179. "path verification: device is stopped,"
  1180. " try again later");
  1181. epm |= lpm;
  1182. } else {
  1183. dev_warn(&device->cdev->dev,
  1184. "Reading device feature codes failed "
  1185. "(rc=%d) for new path %x\n", rc, lpm);
  1186. continue;
  1187. }
  1188. if (verify_fcx_max_data(device, lpm)) {
  1189. opm &= ~lpm;
  1190. npm &= ~lpm;
  1191. ppm &= ~lpm;
  1192. hpfpm |= lpm;
  1193. continue;
  1194. }
  1195. /*
  1196. * save conf_data for comparison after
  1197. * rebuild_device_uid may have changed
  1198. * the original data
  1199. */
  1200. memcpy(&path_rcd_buf, data->rcd_buffer,
  1201. DASD_ECKD_RCD_DATA_SIZE);
  1202. path_private.conf_data = (void *) &path_rcd_buf;
  1203. path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
  1204. if (dasd_eckd_identify_conf_parts(&path_private)) {
  1205. path_private.conf_data = NULL;
  1206. path_private.conf_len = 0;
  1207. continue;
  1208. }
  1209. /*
  1210. * compare path UID with device UID only if at least
  1211. * one valid path is left
  1212. * in other case the device UID may have changed and
  1213. * the first working path UID will be used as device UID
  1214. */
  1215. if (dasd_path_get_opm(device) &&
  1216. dasd_eckd_compare_path_uid(device, &path_private)) {
  1217. /*
  1218. * the comparison was not successful
  1219. * rebuild the device UID with at least one
  1220. * known path in case a z/VM hyperswap command
  1221. * has changed the device
  1222. *
  1223. * after this compare again
  1224. *
  1225. * if either the rebuild or the recompare fails
  1226. * the path can not be used
  1227. */
  1228. if (rebuild_device_uid(device, data) ||
  1229. dasd_eckd_compare_path_uid(
  1230. device, &path_private)) {
  1231. uid = &path_private.uid;
  1232. if (strlen(uid->vduit) > 0)
  1233. snprintf(print_uid, sizeof(print_uid),
  1234. "%s.%s.%04x.%02x.%s",
  1235. uid->vendor, uid->serial,
  1236. uid->ssid, uid->real_unit_addr,
  1237. uid->vduit);
  1238. else
  1239. snprintf(print_uid, sizeof(print_uid),
  1240. "%s.%s.%04x.%02x",
  1241. uid->vendor, uid->serial,
  1242. uid->ssid,
  1243. uid->real_unit_addr);
  1244. dev_err(&device->cdev->dev,
  1245. "The newly added channel path %02X "
  1246. "will not be used because it leads "
  1247. "to a different device %s\n",
  1248. lpm, print_uid);
  1249. opm &= ~lpm;
  1250. npm &= ~lpm;
  1251. ppm &= ~lpm;
  1252. cablepm |= lpm;
  1253. continue;
  1254. }
  1255. }
  1256. /*
  1257. * There is a small chance that a path is lost again between
  1258. * above path verification and the following modification of
  1259. * the device opm mask. We could avoid that race here by using
  1260. * yet another path mask, but we rather deal with this unlikely
  1261. * situation in dasd_start_IO.
  1262. */
  1263. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  1264. if (!dasd_path_get_opm(device) && opm) {
  1265. dasd_path_set_opm(device, opm);
  1266. dasd_generic_path_operational(device);
  1267. } else {
  1268. dasd_path_add_opm(device, opm);
  1269. }
  1270. dasd_path_add_nppm(device, npm);
  1271. dasd_path_add_ppm(device, ppm);
  1272. dasd_path_add_tbvpm(device, epm);
  1273. dasd_path_add_cablepm(device, cablepm);
  1274. dasd_path_add_nohpfpm(device, hpfpm);
  1275. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  1276. }
  1277. clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
  1278. dasd_put_device(device);
  1279. if (data->isglobal)
  1280. mutex_unlock(&dasd_path_verification_mutex);
  1281. else
  1282. kfree(data);
  1283. }
  1284. static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm)
  1285. {
  1286. struct path_verification_work_data *data;
  1287. data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
  1288. if (!data) {
  1289. if (mutex_trylock(&dasd_path_verification_mutex)) {
  1290. data = path_verification_worker;
  1291. data->isglobal = 1;
  1292. } else
  1293. return -ENOMEM;
  1294. } else {
  1295. memset(data, 0, sizeof(*data));
  1296. data->isglobal = 0;
  1297. }
  1298. INIT_WORK(&data->worker, do_path_verification_work);
  1299. dasd_get_device(device);
  1300. data->device = device;
  1301. data->tbvpm = lpm;
  1302. schedule_work(&data->worker);
  1303. return 0;
  1304. }
  1305. static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm)
  1306. {
  1307. struct dasd_eckd_private *private = device->private;
  1308. unsigned long flags;
  1309. if (!private->fcx_max_data)
  1310. private->fcx_max_data = get_fcx_max_data(device);
  1311. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  1312. dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device));
  1313. dasd_schedule_device_bh(device);
  1314. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  1315. }
  1316. static int dasd_eckd_read_features(struct dasd_device *device)
  1317. {
  1318. struct dasd_eckd_private *private = device->private;
  1319. struct dasd_psf_prssd_data *prssdp;
  1320. struct dasd_rssd_features *features;
  1321. struct dasd_ccw_req *cqr;
  1322. struct ccw1 *ccw;
  1323. int rc;
  1324. memset(&private->features, 0, sizeof(struct dasd_rssd_features));
  1325. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
  1326. (sizeof(struct dasd_psf_prssd_data) +
  1327. sizeof(struct dasd_rssd_features)),
  1328. device, NULL);
  1329. if (IS_ERR(cqr)) {
  1330. DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
  1331. "allocate initialization request");
  1332. return PTR_ERR(cqr);
  1333. }
  1334. cqr->startdev = device;
  1335. cqr->memdev = device;
  1336. cqr->block = NULL;
  1337. cqr->retries = 256;
  1338. cqr->expires = 10 * HZ;
  1339. /* Prepare for Read Subsystem Data */
  1340. prssdp = (struct dasd_psf_prssd_data *) cqr->data;
  1341. memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
  1342. prssdp->order = PSF_ORDER_PRSSD;
  1343. prssdp->suborder = 0x41; /* Read Feature Codes */
  1344. /* all other bytes of prssdp must be zero */
  1345. ccw = cqr->cpaddr;
  1346. ccw->cmd_code = DASD_ECKD_CCW_PSF;
  1347. ccw->count = sizeof(struct dasd_psf_prssd_data);
  1348. ccw->flags |= CCW_FLAG_CC;
  1349. ccw->cda = (__u32)(addr_t) prssdp;
  1350. /* Read Subsystem Data - feature codes */
  1351. features = (struct dasd_rssd_features *) (prssdp + 1);
  1352. memset(features, 0, sizeof(struct dasd_rssd_features));
  1353. ccw++;
  1354. ccw->cmd_code = DASD_ECKD_CCW_RSSD;
  1355. ccw->count = sizeof(struct dasd_rssd_features);
  1356. ccw->cda = (__u32)(addr_t) features;
  1357. cqr->buildclk = get_tod_clock();
  1358. cqr->status = DASD_CQR_FILLED;
  1359. rc = dasd_sleep_on(cqr);
  1360. if (rc == 0) {
  1361. prssdp = (struct dasd_psf_prssd_data *) cqr->data;
  1362. features = (struct dasd_rssd_features *) (prssdp + 1);
  1363. memcpy(&private->features, features,
  1364. sizeof(struct dasd_rssd_features));
  1365. } else
  1366. dev_warn(&device->cdev->dev, "Reading device feature codes"
  1367. " failed with rc=%d\n", rc);
  1368. dasd_sfree_request(cqr, cqr->memdev);
  1369. return rc;
  1370. }
  1371. /*
  1372. * Build CP for Perform Subsystem Function - SSC.
  1373. */
  1374. static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
  1375. int enable_pav)
  1376. {
  1377. struct dasd_ccw_req *cqr;
  1378. struct dasd_psf_ssc_data *psf_ssc_data;
  1379. struct ccw1 *ccw;
  1380. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
  1381. sizeof(struct dasd_psf_ssc_data),
  1382. device, NULL);
  1383. if (IS_ERR(cqr)) {
  1384. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  1385. "Could not allocate PSF-SSC request");
  1386. return cqr;
  1387. }
  1388. psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
  1389. psf_ssc_data->order = PSF_ORDER_SSC;
  1390. psf_ssc_data->suborder = 0xc0;
  1391. if (enable_pav) {
  1392. psf_ssc_data->suborder |= 0x08;
  1393. psf_ssc_data->reserved[0] = 0x88;
  1394. }
  1395. ccw = cqr->cpaddr;
  1396. ccw->cmd_code = DASD_ECKD_CCW_PSF;
  1397. ccw->cda = (__u32)(addr_t)psf_ssc_data;
  1398. ccw->count = 66;
  1399. cqr->startdev = device;
  1400. cqr->memdev = device;
  1401. cqr->block = NULL;
  1402. cqr->retries = 256;
  1403. cqr->expires = 10*HZ;
  1404. cqr->buildclk = get_tod_clock();
  1405. cqr->status = DASD_CQR_FILLED;
  1406. return cqr;
  1407. }
  1408. /*
  1409. * Perform Subsystem Function.
  1410. * It is necessary to trigger CIO for channel revalidation since this
  1411. * call might change behaviour of DASD devices.
  1412. */
  1413. static int
  1414. dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav,
  1415. unsigned long flags)
  1416. {
  1417. struct dasd_ccw_req *cqr;
  1418. int rc;
  1419. cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
  1420. if (IS_ERR(cqr))
  1421. return PTR_ERR(cqr);
  1422. /*
  1423. * set flags e.g. turn on failfast, to prevent blocking
  1424. * the calling function should handle failed requests
  1425. */
  1426. cqr->flags |= flags;
  1427. rc = dasd_sleep_on(cqr);
  1428. if (!rc)
  1429. /* trigger CIO to reprobe devices */
  1430. css_schedule_reprobe();
  1431. else if (cqr->intrc == -EAGAIN)
  1432. rc = -EAGAIN;
  1433. dasd_sfree_request(cqr, cqr->memdev);
  1434. return rc;
  1435. }
  1436. /*
  1437. * Valide storage server of current device.
  1438. */
  1439. static int dasd_eckd_validate_server(struct dasd_device *device,
  1440. unsigned long flags)
  1441. {
  1442. struct dasd_eckd_private *private = device->private;
  1443. int enable_pav, rc;
  1444. if (private->uid.type == UA_BASE_PAV_ALIAS ||
  1445. private->uid.type == UA_HYPER_PAV_ALIAS)
  1446. return 0;
  1447. if (dasd_nopav || MACHINE_IS_VM)
  1448. enable_pav = 0;
  1449. else
  1450. enable_pav = 1;
  1451. rc = dasd_eckd_psf_ssc(device, enable_pav, flags);
  1452. /* may be requested feature is not available on server,
  1453. * therefore just report error and go ahead */
  1454. DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
  1455. "returned rc=%d", private->uid.ssid, rc);
  1456. return rc;
  1457. }
  1458. /*
  1459. * worker to do a validate server in case of a lost pathgroup
  1460. */
  1461. static void dasd_eckd_do_validate_server(struct work_struct *work)
  1462. {
  1463. struct dasd_device *device = container_of(work, struct dasd_device,
  1464. kick_validate);
  1465. unsigned long flags = 0;
  1466. set_bit(DASD_CQR_FLAGS_FAILFAST, &flags);
  1467. if (dasd_eckd_validate_server(device, flags)
  1468. == -EAGAIN) {
  1469. /* schedule worker again if failed */
  1470. schedule_work(&device->kick_validate);
  1471. return;
  1472. }
  1473. dasd_put_device(device);
  1474. }
  1475. static void dasd_eckd_kick_validate_server(struct dasd_device *device)
  1476. {
  1477. dasd_get_device(device);
  1478. /* exit if device not online or in offline processing */
  1479. if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
  1480. device->state < DASD_STATE_ONLINE) {
  1481. dasd_put_device(device);
  1482. return;
  1483. }
  1484. /* queue call to do_validate_server to the kernel event daemon. */
  1485. if (!schedule_work(&device->kick_validate))
  1486. dasd_put_device(device);
  1487. }
  1488. /*
  1489. * Check device characteristics.
  1490. * If the device is accessible using ECKD discipline, the device is enabled.
  1491. */
  1492. static int
  1493. dasd_eckd_check_characteristics(struct dasd_device *device)
  1494. {
  1495. struct dasd_eckd_private *private = device->private;
  1496. struct dasd_block *block;
  1497. struct dasd_uid temp_uid;
  1498. int rc, i;
  1499. int readonly;
  1500. unsigned long value;
  1501. /* setup work queue for validate server*/
  1502. INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
  1503. /* setup work queue for summary unit check */
  1504. INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check);
  1505. if (!ccw_device_is_pathgroup(device->cdev)) {
  1506. dev_warn(&device->cdev->dev,
  1507. "A channel path group could not be established\n");
  1508. return -EIO;
  1509. }
  1510. if (!ccw_device_is_multipath(device->cdev)) {
  1511. dev_info(&device->cdev->dev,
  1512. "The DASD is not operating in multipath mode\n");
  1513. }
  1514. if (!private) {
  1515. private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
  1516. if (!private) {
  1517. dev_warn(&device->cdev->dev,
  1518. "Allocating memory for private DASD data "
  1519. "failed\n");
  1520. return -ENOMEM;
  1521. }
  1522. device->private = private;
  1523. } else {
  1524. memset(private, 0, sizeof(*private));
  1525. }
  1526. /* Invalidate status of initial analysis. */
  1527. private->init_cqr_status = -1;
  1528. /* Set default cache operations. */
  1529. private->attrib.operation = DASD_NORMAL_CACHE;
  1530. private->attrib.nr_cyl = 0;
  1531. /* Read Configuration Data */
  1532. rc = dasd_eckd_read_conf(device);
  1533. if (rc)
  1534. goto out_err1;
  1535. /* set some default values */
  1536. device->default_expires = DASD_EXPIRES;
  1537. device->default_retries = DASD_RETRIES;
  1538. device->path_thrhld = DASD_ECKD_PATH_THRHLD;
  1539. device->path_interval = DASD_ECKD_PATH_INTERVAL;
  1540. if (private->gneq) {
  1541. value = 1;
  1542. for (i = 0; i < private->gneq->timeout.value; i++)
  1543. value = 10 * value;
  1544. value = value * private->gneq->timeout.number;
  1545. /* do not accept useless values */
  1546. if (value != 0 && value <= DASD_EXPIRES_MAX)
  1547. device->default_expires = value;
  1548. }
  1549. dasd_eckd_get_uid(device, &temp_uid);
  1550. if (temp_uid.type == UA_BASE_DEVICE) {
  1551. block = dasd_alloc_block();
  1552. if (IS_ERR(block)) {
  1553. DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
  1554. "could not allocate dasd "
  1555. "block structure");
  1556. rc = PTR_ERR(block);
  1557. goto out_err1;
  1558. }
  1559. device->block = block;
  1560. block->base = device;
  1561. }
  1562. /* register lcu with alias handling, enable PAV */
  1563. rc = dasd_alias_make_device_known_to_lcu(device);
  1564. if (rc)
  1565. goto out_err2;
  1566. dasd_eckd_validate_server(device, 0);
  1567. /* device may report different configuration data after LCU setup */
  1568. rc = dasd_eckd_read_conf(device);
  1569. if (rc)
  1570. goto out_err3;
  1571. /* Read Feature Codes */
  1572. dasd_eckd_read_features(device);
  1573. /* Read Device Characteristics */
  1574. rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
  1575. &private->rdc_data, 64);
  1576. if (rc) {
  1577. DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
  1578. "Read device characteristic failed, rc=%d", rc);
  1579. goto out_err3;
  1580. }
  1581. if ((device->features & DASD_FEATURE_USERAW) &&
  1582. !(private->rdc_data.facilities.RT_in_LR)) {
  1583. dev_err(&device->cdev->dev, "The storage server does not "
  1584. "support raw-track access\n");
  1585. rc = -EINVAL;
  1586. goto out_err3;
  1587. }
  1588. /* find the valid cylinder size */
  1589. if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
  1590. private->rdc_data.long_no_cyl)
  1591. private->real_cyl = private->rdc_data.long_no_cyl;
  1592. else
  1593. private->real_cyl = private->rdc_data.no_cyl;
  1594. private->fcx_max_data = get_fcx_max_data(device);
  1595. readonly = dasd_device_is_ro(device);
  1596. if (readonly)
  1597. set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
  1598. dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
  1599. "with %d cylinders, %d heads, %d sectors%s\n",
  1600. private->rdc_data.dev_type,
  1601. private->rdc_data.dev_model,
  1602. private->rdc_data.cu_type,
  1603. private->rdc_data.cu_model.model,
  1604. private->real_cyl,
  1605. private->rdc_data.trk_per_cyl,
  1606. private->rdc_data.sec_per_trk,
  1607. readonly ? ", read-only device" : "");
  1608. return 0;
  1609. out_err3:
  1610. dasd_alias_disconnect_device_from_lcu(device);
  1611. out_err2:
  1612. dasd_free_block(device->block);
  1613. device->block = NULL;
  1614. out_err1:
  1615. dasd_eckd_clear_conf_data(device);
  1616. kfree(device->private);
  1617. device->private = NULL;
  1618. return rc;
  1619. }
  1620. static void dasd_eckd_uncheck_device(struct dasd_device *device)
  1621. {
  1622. struct dasd_eckd_private *private = device->private;
  1623. if (!private)
  1624. return;
  1625. dasd_alias_disconnect_device_from_lcu(device);
  1626. private->ned = NULL;
  1627. private->sneq = NULL;
  1628. private->vdsneq = NULL;
  1629. private->gneq = NULL;
  1630. dasd_eckd_clear_conf_data(device);
  1631. }
  1632. static struct dasd_ccw_req *
  1633. dasd_eckd_analysis_ccw(struct dasd_device *device)
  1634. {
  1635. struct dasd_eckd_private *private = device->private;
  1636. struct eckd_count *count_data;
  1637. struct LO_eckd_data *LO_data;
  1638. struct dasd_ccw_req *cqr;
  1639. struct ccw1 *ccw;
  1640. int cplength, datasize;
  1641. int i;
  1642. cplength = 8;
  1643. datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
  1644. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
  1645. NULL);
  1646. if (IS_ERR(cqr))
  1647. return cqr;
  1648. ccw = cqr->cpaddr;
  1649. /* Define extent for the first 3 tracks. */
  1650. define_extent(ccw++, cqr->data, 0, 2,
  1651. DASD_ECKD_CCW_READ_COUNT, device, 0);
  1652. LO_data = cqr->data + sizeof(struct DE_eckd_data);
  1653. /* Locate record for the first 4 records on track 0. */
  1654. ccw[-1].flags |= CCW_FLAG_CC;
  1655. locate_record(ccw++, LO_data++, 0, 0, 4,
  1656. DASD_ECKD_CCW_READ_COUNT, device, 0);
  1657. count_data = private->count_area;
  1658. for (i = 0; i < 4; i++) {
  1659. ccw[-1].flags |= CCW_FLAG_CC;
  1660. ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
  1661. ccw->flags = 0;
  1662. ccw->count = 8;
  1663. ccw->cda = (__u32)(addr_t) count_data;
  1664. ccw++;
  1665. count_data++;
  1666. }
  1667. /* Locate record for the first record on track 2. */
  1668. ccw[-1].flags |= CCW_FLAG_CC;
  1669. locate_record(ccw++, LO_data++, 2, 0, 1,
  1670. DASD_ECKD_CCW_READ_COUNT, device, 0);
  1671. /* Read count ccw. */
  1672. ccw[-1].flags |= CCW_FLAG_CC;
  1673. ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
  1674. ccw->flags = 0;
  1675. ccw->count = 8;
  1676. ccw->cda = (__u32)(addr_t) count_data;
  1677. cqr->block = NULL;
  1678. cqr->startdev = device;
  1679. cqr->memdev = device;
  1680. cqr->retries = 255;
  1681. cqr->buildclk = get_tod_clock();
  1682. cqr->status = DASD_CQR_FILLED;
  1683. return cqr;
  1684. }
  1685. /* differentiate between 'no record found' and any other error */
  1686. static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
  1687. {
  1688. char *sense;
  1689. if (init_cqr->status == DASD_CQR_DONE)
  1690. return INIT_CQR_OK;
  1691. else if (init_cqr->status == DASD_CQR_NEED_ERP ||
  1692. init_cqr->status == DASD_CQR_FAILED) {
  1693. sense = dasd_get_sense(&init_cqr->irb);
  1694. if (sense && (sense[1] & SNS1_NO_REC_FOUND))
  1695. return INIT_CQR_UNFORMATTED;
  1696. else
  1697. return INIT_CQR_ERROR;
  1698. } else
  1699. return INIT_CQR_ERROR;
  1700. }
  1701. /*
  1702. * This is the callback function for the init_analysis cqr. It saves
  1703. * the status of the initial analysis ccw before it frees it and kicks
  1704. * the device to continue the startup sequence. This will call
  1705. * dasd_eckd_do_analysis again (if the devices has not been marked
  1706. * for deletion in the meantime).
  1707. */
  1708. static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
  1709. void *data)
  1710. {
  1711. struct dasd_device *device = init_cqr->startdev;
  1712. struct dasd_eckd_private *private = device->private;
  1713. private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
  1714. dasd_sfree_request(init_cqr, device);
  1715. dasd_kick_device(device);
  1716. }
  1717. static int dasd_eckd_start_analysis(struct dasd_block *block)
  1718. {
  1719. struct dasd_ccw_req *init_cqr;
  1720. init_cqr = dasd_eckd_analysis_ccw(block->base);
  1721. if (IS_ERR(init_cqr))
  1722. return PTR_ERR(init_cqr);
  1723. init_cqr->callback = dasd_eckd_analysis_callback;
  1724. init_cqr->callback_data = NULL;
  1725. init_cqr->expires = 5*HZ;
  1726. /* first try without ERP, so we can later handle unformatted
  1727. * devices as special case
  1728. */
  1729. clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
  1730. init_cqr->retries = 0;
  1731. dasd_add_request_head(init_cqr);
  1732. return -EAGAIN;
  1733. }
  1734. static int dasd_eckd_end_analysis(struct dasd_block *block)
  1735. {
  1736. struct dasd_device *device = block->base;
  1737. struct dasd_eckd_private *private = device->private;
  1738. struct eckd_count *count_area;
  1739. unsigned int sb, blk_per_trk;
  1740. int status, i;
  1741. struct dasd_ccw_req *init_cqr;
  1742. status = private->init_cqr_status;
  1743. private->init_cqr_status = -1;
  1744. if (status == INIT_CQR_ERROR) {
  1745. /* try again, this time with full ERP */
  1746. init_cqr = dasd_eckd_analysis_ccw(device);
  1747. dasd_sleep_on(init_cqr);
  1748. status = dasd_eckd_analysis_evaluation(init_cqr);
  1749. dasd_sfree_request(init_cqr, device);
  1750. }
  1751. if (device->features & DASD_FEATURE_USERAW) {
  1752. block->bp_block = DASD_RAW_BLOCKSIZE;
  1753. blk_per_trk = DASD_RAW_BLOCK_PER_TRACK;
  1754. block->s2b_shift = 3;
  1755. goto raw;
  1756. }
  1757. if (status == INIT_CQR_UNFORMATTED) {
  1758. dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
  1759. return -EMEDIUMTYPE;
  1760. } else if (status == INIT_CQR_ERROR) {
  1761. dev_err(&device->cdev->dev,
  1762. "Detecting the DASD disk layout failed because "
  1763. "of an I/O error\n");
  1764. return -EIO;
  1765. }
  1766. private->uses_cdl = 1;
  1767. /* Check Track 0 for Compatible Disk Layout */
  1768. count_area = NULL;
  1769. for (i = 0; i < 3; i++) {
  1770. if (private->count_area[i].kl != 4 ||
  1771. private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 ||
  1772. private->count_area[i].cyl != 0 ||
  1773. private->count_area[i].head != count_area_head[i] ||
  1774. private->count_area[i].record != count_area_rec[i]) {
  1775. private->uses_cdl = 0;
  1776. break;
  1777. }
  1778. }
  1779. if (i == 3)
  1780. count_area = &private->count_area[4];
  1781. if (private->uses_cdl == 0) {
  1782. for (i = 0; i < 5; i++) {
  1783. if ((private->count_area[i].kl != 0) ||
  1784. (private->count_area[i].dl !=
  1785. private->count_area[0].dl) ||
  1786. private->count_area[i].cyl != 0 ||
  1787. private->count_area[i].head != count_area_head[i] ||
  1788. private->count_area[i].record != count_area_rec[i])
  1789. break;
  1790. }
  1791. if (i == 5)
  1792. count_area = &private->count_area[0];
  1793. } else {
  1794. if (private->count_area[3].record == 1)
  1795. dev_warn(&device->cdev->dev,
  1796. "Track 0 has no records following the VTOC\n");
  1797. }
  1798. if (count_area != NULL && count_area->kl == 0) {
  1799. /* we found notthing violating our disk layout */
  1800. if (dasd_check_blocksize(count_area->dl) == 0)
  1801. block->bp_block = count_area->dl;
  1802. }
  1803. if (block->bp_block == 0) {
  1804. dev_warn(&device->cdev->dev,
  1805. "The disk layout of the DASD is not supported\n");
  1806. return -EMEDIUMTYPE;
  1807. }
  1808. block->s2b_shift = 0; /* bits to shift 512 to get a block */
  1809. for (sb = 512; sb < block->bp_block; sb = sb << 1)
  1810. block->s2b_shift++;
  1811. blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
  1812. raw:
  1813. block->blocks = ((unsigned long) private->real_cyl *
  1814. private->rdc_data.trk_per_cyl *
  1815. blk_per_trk);
  1816. dev_info(&device->cdev->dev,
  1817. "DASD with %u KB/block, %lu KB total size, %u KB/track, "
  1818. "%s\n", (block->bp_block >> 10),
  1819. (((unsigned long) private->real_cyl *
  1820. private->rdc_data.trk_per_cyl *
  1821. blk_per_trk * (block->bp_block >> 9)) >> 1),
  1822. ((blk_per_trk * block->bp_block) >> 10),
  1823. private->uses_cdl ?
  1824. "compatible disk layout" : "linux disk layout");
  1825. return 0;
  1826. }
  1827. static int dasd_eckd_do_analysis(struct dasd_block *block)
  1828. {
  1829. struct dasd_eckd_private *private = block->base->private;
  1830. if (private->init_cqr_status < 0)
  1831. return dasd_eckd_start_analysis(block);
  1832. else
  1833. return dasd_eckd_end_analysis(block);
  1834. }
  1835. static int dasd_eckd_basic_to_ready(struct dasd_device *device)
  1836. {
  1837. return dasd_alias_add_device(device);
  1838. };
  1839. static int dasd_eckd_online_to_ready(struct dasd_device *device)
  1840. {
  1841. if (cancel_work_sync(&device->reload_device))
  1842. dasd_put_device(device);
  1843. if (cancel_work_sync(&device->kick_validate))
  1844. dasd_put_device(device);
  1845. return 0;
  1846. };
  1847. static int dasd_eckd_basic_to_known(struct dasd_device *device)
  1848. {
  1849. return dasd_alias_remove_device(device);
  1850. };
  1851. static int
  1852. dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
  1853. {
  1854. struct dasd_eckd_private *private = block->base->private;
  1855. if (dasd_check_blocksize(block->bp_block) == 0) {
  1856. geo->sectors = recs_per_track(&private->rdc_data,
  1857. 0, block->bp_block);
  1858. }
  1859. geo->cylinders = private->rdc_data.no_cyl;
  1860. geo->heads = private->rdc_data.trk_per_cyl;
  1861. return 0;
  1862. }
  1863. /*
  1864. * Build the TCW request for the format check
  1865. */
  1866. static struct dasd_ccw_req *
  1867. dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
  1868. int enable_pav, struct eckd_count *fmt_buffer,
  1869. int rpt)
  1870. {
  1871. struct dasd_eckd_private *start_priv;
  1872. struct dasd_device *startdev = NULL;
  1873. struct tidaw *last_tidaw = NULL;
  1874. struct dasd_ccw_req *cqr;
  1875. struct itcw *itcw;
  1876. int itcw_size;
  1877. int count;
  1878. int rc;
  1879. int i;
  1880. if (enable_pav)
  1881. startdev = dasd_alias_get_start_dev(base);
  1882. if (!startdev)
  1883. startdev = base;
  1884. start_priv = startdev->private;
  1885. count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
  1886. /*
  1887. * we're adding 'count' amount of tidaw to the itcw.
  1888. * calculate the corresponding itcw_size
  1889. */
  1890. itcw_size = itcw_calc_size(0, count, 0);
  1891. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
  1892. NULL);
  1893. if (IS_ERR(cqr))
  1894. return cqr;
  1895. start_priv->count++;
  1896. itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0);
  1897. if (IS_ERR(itcw)) {
  1898. rc = -EINVAL;
  1899. goto out_err;
  1900. }
  1901. cqr->cpaddr = itcw_get_tcw(itcw);
  1902. rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit,
  1903. DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count,
  1904. sizeof(struct eckd_count),
  1905. count * sizeof(struct eckd_count), 0, rpt);
  1906. if (rc)
  1907. goto out_err;
  1908. for (i = 0; i < count; i++) {
  1909. last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++,
  1910. sizeof(struct eckd_count));
  1911. if (IS_ERR(last_tidaw)) {
  1912. rc = -EINVAL;
  1913. goto out_err;
  1914. }
  1915. }
  1916. last_tidaw->flags |= TIDAW_FLAGS_LAST;
  1917. itcw_finalize(itcw);
  1918. cqr->cpmode = 1;
  1919. cqr->startdev = startdev;
  1920. cqr->memdev = startdev;
  1921. cqr->basedev = base;
  1922. cqr->retries = startdev->default_retries;
  1923. cqr->expires = startdev->default_expires * HZ;
  1924. cqr->buildclk = get_tod_clock();
  1925. cqr->status = DASD_CQR_FILLED;
  1926. /* Set flags to suppress output for expected errors */
  1927. set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
  1928. set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
  1929. return cqr;
  1930. out_err:
  1931. dasd_sfree_request(cqr, startdev);
  1932. return ERR_PTR(rc);
  1933. }
  1934. /*
  1935. * Build the CCW request for the format check
  1936. */
  1937. static struct dasd_ccw_req *
  1938. dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
  1939. int enable_pav, struct eckd_count *fmt_buffer, int rpt)
  1940. {
  1941. struct dasd_eckd_private *start_priv;
  1942. struct dasd_eckd_private *base_priv;
  1943. struct dasd_device *startdev = NULL;
  1944. struct dasd_ccw_req *cqr;
  1945. struct ccw1 *ccw;
  1946. void *data;
  1947. int cplength, datasize;
  1948. int use_prefix;
  1949. int count;
  1950. int i;
  1951. if (enable_pav)
  1952. startdev = dasd_alias_get_start_dev(base);
  1953. if (!startdev)
  1954. startdev = base;
  1955. start_priv = startdev->private;
  1956. base_priv = base->private;
  1957. count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
  1958. use_prefix = base_priv->features.feature[8] & 0x01;
  1959. if (use_prefix) {
  1960. cplength = 1;
  1961. datasize = sizeof(struct PFX_eckd_data);
  1962. } else {
  1963. cplength = 2;
  1964. datasize = sizeof(struct DE_eckd_data) +
  1965. sizeof(struct LO_eckd_data);
  1966. }
  1967. cplength += count;
  1968. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
  1969. startdev, NULL);
  1970. if (IS_ERR(cqr))
  1971. return cqr;
  1972. start_priv->count++;
  1973. data = cqr->data;
  1974. ccw = cqr->cpaddr;
  1975. if (use_prefix) {
  1976. prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit,
  1977. DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0,
  1978. count, 0, 0);
  1979. } else {
  1980. define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit,
  1981. DASD_ECKD_CCW_READ_COUNT, startdev, 0);
  1982. data += sizeof(struct DE_eckd_data);
  1983. ccw[-1].flags |= CCW_FLAG_CC;
  1984. locate_record(ccw++, data, fdata->start_unit, 0, count,
  1985. DASD_ECKD_CCW_READ_COUNT, base, 0);
  1986. }
  1987. for (i = 0; i < count; i++) {
  1988. ccw[-1].flags |= CCW_FLAG_CC;
  1989. ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
  1990. ccw->flags = CCW_FLAG_SLI;
  1991. ccw->count = 8;
  1992. ccw->cda = (__u32)(addr_t) fmt_buffer;
  1993. ccw++;
  1994. fmt_buffer++;
  1995. }
  1996. cqr->startdev = startdev;
  1997. cqr->memdev = startdev;
  1998. cqr->basedev = base;
  1999. cqr->retries = DASD_RETRIES;
  2000. cqr->expires = startdev->default_expires * HZ;
  2001. cqr->buildclk = get_tod_clock();
  2002. cqr->status = DASD_CQR_FILLED;
  2003. /* Set flags to suppress output for expected errors */
  2004. set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
  2005. return cqr;
  2006. }
  2007. static struct dasd_ccw_req *
  2008. dasd_eckd_build_format(struct dasd_device *base,
  2009. struct format_data_t *fdata,
  2010. int enable_pav)
  2011. {
  2012. struct dasd_eckd_private *base_priv;
  2013. struct dasd_eckd_private *start_priv;
  2014. struct dasd_device *startdev = NULL;
  2015. struct dasd_ccw_req *fcp;
  2016. struct eckd_count *ect;
  2017. struct ch_t address;
  2018. struct ccw1 *ccw;
  2019. void *data;
  2020. int rpt;
  2021. int cplength, datasize;
  2022. int i, j;
  2023. int intensity = 0;
  2024. int r0_perm;
  2025. int nr_tracks;
  2026. int use_prefix;
  2027. if (enable_pav)
  2028. startdev = dasd_alias_get_start_dev(base);
  2029. if (!startdev)
  2030. startdev = base;
  2031. start_priv = startdev->private;
  2032. base_priv = base->private;
  2033. rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize);
  2034. nr_tracks = fdata->stop_unit - fdata->start_unit + 1;
  2035. /*
  2036. * fdata->intensity is a bit string that tells us what to do:
  2037. * Bit 0: write record zero
  2038. * Bit 1: write home address, currently not supported
  2039. * Bit 2: invalidate tracks
  2040. * Bit 3: use OS/390 compatible disk layout (cdl)
  2041. * Bit 4: do not allow storage subsystem to modify record zero
  2042. * Only some bit combinations do make sense.
  2043. */
  2044. if (fdata->intensity & 0x10) {
  2045. r0_perm = 0;
  2046. intensity = fdata->intensity & ~0x10;
  2047. } else {
  2048. r0_perm = 1;
  2049. intensity = fdata->intensity;
  2050. }
  2051. use_prefix = base_priv->features.feature[8] & 0x01;
  2052. switch (intensity) {
  2053. case 0x00: /* Normal format */
  2054. case 0x08: /* Normal format, use cdl. */
  2055. cplength = 2 + (rpt*nr_tracks);
  2056. if (use_prefix)
  2057. datasize = sizeof(struct PFX_eckd_data) +
  2058. sizeof(struct LO_eckd_data) +
  2059. rpt * nr_tracks * sizeof(struct eckd_count);
  2060. else
  2061. datasize = sizeof(struct DE_eckd_data) +
  2062. sizeof(struct LO_eckd_data) +
  2063. rpt * nr_tracks * sizeof(struct eckd_count);
  2064. break;
  2065. case 0x01: /* Write record zero and format track. */
  2066. case 0x09: /* Write record zero and format track, use cdl. */
  2067. cplength = 2 + rpt * nr_tracks;
  2068. if (use_prefix)
  2069. datasize = sizeof(struct PFX_eckd_data) +
  2070. sizeof(struct LO_eckd_data) +
  2071. sizeof(struct eckd_count) +
  2072. rpt * nr_tracks * sizeof(struct eckd_count);
  2073. else
  2074. datasize = sizeof(struct DE_eckd_data) +
  2075. sizeof(struct LO_eckd_data) +
  2076. sizeof(struct eckd_count) +
  2077. rpt * nr_tracks * sizeof(struct eckd_count);
  2078. break;
  2079. case 0x04: /* Invalidate track. */
  2080. case 0x0c: /* Invalidate track, use cdl. */
  2081. cplength = 3;
  2082. if (use_prefix)
  2083. datasize = sizeof(struct PFX_eckd_data) +
  2084. sizeof(struct LO_eckd_data) +
  2085. sizeof(struct eckd_count);
  2086. else
  2087. datasize = sizeof(struct DE_eckd_data) +
  2088. sizeof(struct LO_eckd_data) +
  2089. sizeof(struct eckd_count);
  2090. break;
  2091. default:
  2092. dev_warn(&startdev->cdev->dev,
  2093. "An I/O control call used incorrect flags 0x%x\n",
  2094. fdata->intensity);
  2095. return ERR_PTR(-EINVAL);
  2096. }
  2097. /* Allocate the format ccw request. */
  2098. fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
  2099. datasize, startdev, NULL);
  2100. if (IS_ERR(fcp))
  2101. return fcp;
  2102. start_priv->count++;
  2103. data = fcp->data;
  2104. ccw = fcp->cpaddr;
  2105. switch (intensity & ~0x08) {
  2106. case 0x00: /* Normal format. */
  2107. if (use_prefix) {
  2108. prefix(ccw++, (struct PFX_eckd_data *) data,
  2109. fdata->start_unit, fdata->stop_unit,
  2110. DASD_ECKD_CCW_WRITE_CKD, base, startdev);
  2111. /* grant subsystem permission to format R0 */
  2112. if (r0_perm)
  2113. ((struct PFX_eckd_data *)data)
  2114. ->define_extent.ga_extended |= 0x04;
  2115. data += sizeof(struct PFX_eckd_data);
  2116. } else {
  2117. define_extent(ccw++, (struct DE_eckd_data *) data,
  2118. fdata->start_unit, fdata->stop_unit,
  2119. DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
  2120. /* grant subsystem permission to format R0 */
  2121. if (r0_perm)
  2122. ((struct DE_eckd_data *) data)
  2123. ->ga_extended |= 0x04;
  2124. data += sizeof(struct DE_eckd_data);
  2125. }
  2126. ccw[-1].flags |= CCW_FLAG_CC;
  2127. locate_record(ccw++, (struct LO_eckd_data *) data,
  2128. fdata->start_unit, 0, rpt*nr_tracks,
  2129. DASD_ECKD_CCW_WRITE_CKD, base,
  2130. fdata->blksize);
  2131. data += sizeof(struct LO_eckd_data);
  2132. break;
  2133. case 0x01: /* Write record zero + format track. */
  2134. if (use_prefix) {
  2135. prefix(ccw++, (struct PFX_eckd_data *) data,
  2136. fdata->start_unit, fdata->stop_unit,
  2137. DASD_ECKD_CCW_WRITE_RECORD_ZERO,
  2138. base, startdev);
  2139. data += sizeof(struct PFX_eckd_data);
  2140. } else {
  2141. define_extent(ccw++, (struct DE_eckd_data *) data,
  2142. fdata->start_unit, fdata->stop_unit,
  2143. DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev, 0);
  2144. data += sizeof(struct DE_eckd_data);
  2145. }
  2146. ccw[-1].flags |= CCW_FLAG_CC;
  2147. locate_record(ccw++, (struct LO_eckd_data *) data,
  2148. fdata->start_unit, 0, rpt * nr_tracks + 1,
  2149. DASD_ECKD_CCW_WRITE_RECORD_ZERO, base,
  2150. base->block->bp_block);
  2151. data += sizeof(struct LO_eckd_data);
  2152. break;
  2153. case 0x04: /* Invalidate track. */
  2154. if (use_prefix) {
  2155. prefix(ccw++, (struct PFX_eckd_data *) data,
  2156. fdata->start_unit, fdata->stop_unit,
  2157. DASD_ECKD_CCW_WRITE_CKD, base, startdev);
  2158. data += sizeof(struct PFX_eckd_data);
  2159. } else {
  2160. define_extent(ccw++, (struct DE_eckd_data *) data,
  2161. fdata->start_unit, fdata->stop_unit,
  2162. DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
  2163. data += sizeof(struct DE_eckd_data);
  2164. }
  2165. ccw[-1].flags |= CCW_FLAG_CC;
  2166. locate_record(ccw++, (struct LO_eckd_data *) data,
  2167. fdata->start_unit, 0, 1,
  2168. DASD_ECKD_CCW_WRITE_CKD, base, 8);
  2169. data += sizeof(struct LO_eckd_data);
  2170. break;
  2171. }
  2172. for (j = 0; j < nr_tracks; j++) {
  2173. /* calculate cylinder and head for the current track */
  2174. set_ch_t(&address,
  2175. (fdata->start_unit + j) /
  2176. base_priv->rdc_data.trk_per_cyl,
  2177. (fdata->start_unit + j) %
  2178. base_priv->rdc_data.trk_per_cyl);
  2179. if (intensity & 0x01) { /* write record zero */
  2180. ect = (struct eckd_count *) data;
  2181. data += sizeof(struct eckd_count);
  2182. ect->cyl = address.cyl;
  2183. ect->head = address.head;
  2184. ect->record = 0;
  2185. ect->kl = 0;
  2186. ect->dl = 8;
  2187. ccw[-1].flags |= CCW_FLAG_CC;
  2188. ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
  2189. ccw->flags = CCW_FLAG_SLI;
  2190. ccw->count = 8;
  2191. ccw->cda = (__u32)(addr_t) ect;
  2192. ccw++;
  2193. }
  2194. if ((intensity & ~0x08) & 0x04) { /* erase track */
  2195. ect = (struct eckd_count *) data;
  2196. data += sizeof(struct eckd_count);
  2197. ect->cyl = address.cyl;
  2198. ect->head = address.head;
  2199. ect->record = 1;
  2200. ect->kl = 0;
  2201. ect->dl = 0;
  2202. ccw[-1].flags |= CCW_FLAG_CC;
  2203. ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
  2204. ccw->flags = CCW_FLAG_SLI;
  2205. ccw->count = 8;
  2206. ccw->cda = (__u32)(addr_t) ect;
  2207. } else { /* write remaining records */
  2208. for (i = 0; i < rpt; i++) {
  2209. ect = (struct eckd_count *) data;
  2210. data += sizeof(struct eckd_count);
  2211. ect->cyl = address.cyl;
  2212. ect->head = address.head;
  2213. ect->record = i + 1;
  2214. ect->kl = 0;
  2215. ect->dl = fdata->blksize;
  2216. /*
  2217. * Check for special tracks 0-1
  2218. * when formatting CDL
  2219. */
  2220. if ((intensity & 0x08) &&
  2221. address.cyl == 0 && address.head == 0) {
  2222. if (i < 3) {
  2223. ect->kl = 4;
  2224. ect->dl = sizes_trk0[i] - 4;
  2225. }
  2226. }
  2227. if ((intensity & 0x08) &&
  2228. address.cyl == 0 && address.head == 1) {
  2229. ect->kl = 44;
  2230. ect->dl = LABEL_SIZE - 44;
  2231. }
  2232. ccw[-1].flags |= CCW_FLAG_CC;
  2233. if (i != 0 || j == 0)
  2234. ccw->cmd_code =
  2235. DASD_ECKD_CCW_WRITE_CKD;
  2236. else
  2237. ccw->cmd_code =
  2238. DASD_ECKD_CCW_WRITE_CKD_MT;
  2239. ccw->flags = CCW_FLAG_SLI;
  2240. ccw->count = 8;
  2241. ccw->cda = (__u32)(addr_t) ect;
  2242. ccw++;
  2243. }
  2244. }
  2245. }
  2246. fcp->startdev = startdev;
  2247. fcp->memdev = startdev;
  2248. fcp->basedev = base;
  2249. fcp->retries = 256;
  2250. fcp->expires = startdev->default_expires * HZ;
  2251. fcp->buildclk = get_tod_clock();
  2252. fcp->status = DASD_CQR_FILLED;
  2253. return fcp;
  2254. }
  2255. /*
  2256. * Wrapper function to build a CCW request depending on input data
  2257. */
  2258. static struct dasd_ccw_req *
  2259. dasd_eckd_format_build_ccw_req(struct dasd_device *base,
  2260. struct format_data_t *fdata, int enable_pav,
  2261. int tpm, struct eckd_count *fmt_buffer, int rpt)
  2262. {
  2263. struct dasd_ccw_req *ccw_req;
  2264. if (!fmt_buffer) {
  2265. ccw_req = dasd_eckd_build_format(base, fdata, enable_pav);
  2266. } else {
  2267. if (tpm)
  2268. ccw_req = dasd_eckd_build_check_tcw(base, fdata,
  2269. enable_pav,
  2270. fmt_buffer, rpt);
  2271. else
  2272. ccw_req = dasd_eckd_build_check(base, fdata, enable_pav,
  2273. fmt_buffer, rpt);
  2274. }
  2275. return ccw_req;
  2276. }
  2277. /*
  2278. * Sanity checks on format_data
  2279. */
  2280. static int dasd_eckd_format_sanity_checks(struct dasd_device *base,
  2281. struct format_data_t *fdata)
  2282. {
  2283. struct dasd_eckd_private *private = base->private;
  2284. if (fdata->start_unit >=
  2285. (private->real_cyl * private->rdc_data.trk_per_cyl)) {
  2286. dev_warn(&base->cdev->dev,
  2287. "Start track number %u used in formatting is too big\n",
  2288. fdata->start_unit);
  2289. return -EINVAL;
  2290. }
  2291. if (fdata->stop_unit >=
  2292. (private->real_cyl * private->rdc_data.trk_per_cyl)) {
  2293. dev_warn(&base->cdev->dev,
  2294. "Stop track number %u used in formatting is too big\n",
  2295. fdata->stop_unit);
  2296. return -EINVAL;
  2297. }
  2298. if (fdata->start_unit > fdata->stop_unit) {
  2299. dev_warn(&base->cdev->dev,
  2300. "Start track %u used in formatting exceeds end track\n",
  2301. fdata->start_unit);
  2302. return -EINVAL;
  2303. }
  2304. if (dasd_check_blocksize(fdata->blksize) != 0) {
  2305. dev_warn(&base->cdev->dev,
  2306. "The DASD cannot be formatted with block size %u\n",
  2307. fdata->blksize);
  2308. return -EINVAL;
  2309. }
  2310. return 0;
  2311. }
  2312. /*
  2313. * This function will process format_data originally coming from an IOCTL
  2314. */
  2315. static int dasd_eckd_format_process_data(struct dasd_device *base,
  2316. struct format_data_t *fdata,
  2317. int enable_pav, int tpm,
  2318. struct eckd_count *fmt_buffer, int rpt,
  2319. struct irb *irb)
  2320. {
  2321. struct dasd_eckd_private *private = base->private;
  2322. struct dasd_ccw_req *cqr, *n;
  2323. struct list_head format_queue;
  2324. struct dasd_device *device;
  2325. char *sense = NULL;
  2326. int old_start, old_stop, format_step;
  2327. int step, retry;
  2328. int rc;
  2329. rc = dasd_eckd_format_sanity_checks(base, fdata);
  2330. if (rc)
  2331. return rc;
  2332. INIT_LIST_HEAD(&format_queue);
  2333. old_start = fdata->start_unit;
  2334. old_stop = fdata->stop_unit;
  2335. if (!tpm && fmt_buffer != NULL) {
  2336. /* Command Mode / Format Check */
  2337. format_step = 1;
  2338. } else if (tpm && fmt_buffer != NULL) {
  2339. /* Transport Mode / Format Check */
  2340. format_step = DASD_CQR_MAX_CCW / rpt;
  2341. } else {
  2342. /* Normal Formatting */
  2343. format_step = DASD_CQR_MAX_CCW /
  2344. recs_per_track(&private->rdc_data, 0, fdata->blksize);
  2345. }
  2346. do {
  2347. retry = 0;
  2348. while (fdata->start_unit <= old_stop) {
  2349. step = fdata->stop_unit - fdata->start_unit + 1;
  2350. if (step > format_step) {
  2351. fdata->stop_unit =
  2352. fdata->start_unit + format_step - 1;
  2353. }
  2354. cqr = dasd_eckd_format_build_ccw_req(base, fdata,
  2355. enable_pav, tpm,
  2356. fmt_buffer, rpt);
  2357. if (IS_ERR(cqr)) {
  2358. rc = PTR_ERR(cqr);
  2359. if (rc == -ENOMEM) {
  2360. if (list_empty(&format_queue))
  2361. goto out;
  2362. /*
  2363. * not enough memory available, start
  2364. * requests retry after first requests
  2365. * were finished
  2366. */
  2367. retry = 1;
  2368. break;
  2369. }
  2370. goto out_err;
  2371. }
  2372. list_add_tail(&cqr->blocklist, &format_queue);
  2373. if (fmt_buffer) {
  2374. step = fdata->stop_unit - fdata->start_unit + 1;
  2375. fmt_buffer += rpt * step;
  2376. }
  2377. fdata->start_unit = fdata->stop_unit + 1;
  2378. fdata->stop_unit = old_stop;
  2379. }
  2380. rc = dasd_sleep_on_queue(&format_queue);
  2381. out_err:
  2382. list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
  2383. device = cqr->startdev;
  2384. private = device->private;
  2385. if (cqr->status == DASD_CQR_FAILED) {
  2386. /*
  2387. * Only get sense data if called by format
  2388. * check
  2389. */
  2390. if (fmt_buffer && irb) {
  2391. sense = dasd_get_sense(&cqr->irb);
  2392. memcpy(irb, &cqr->irb, sizeof(*irb));
  2393. }
  2394. rc = -EIO;
  2395. }
  2396. list_del_init(&cqr->blocklist);
  2397. dasd_sfree_request(cqr, device);
  2398. private->count--;
  2399. }
  2400. if (rc && rc != -EIO)
  2401. goto out;
  2402. if (rc == -EIO) {
  2403. /*
  2404. * In case fewer than the expected records are on the
  2405. * track, we will most likely get a 'No Record Found'
  2406. * error (in command mode) or a 'File Protected' error
  2407. * (in transport mode). Those particular cases shouldn't
  2408. * pass the -EIO to the IOCTL, therefore reset the rc
  2409. * and continue.
  2410. */
  2411. if (sense &&
  2412. (sense[1] & SNS1_NO_REC_FOUND ||
  2413. sense[1] & SNS1_FILE_PROTECTED))
  2414. retry = 1;
  2415. else
  2416. goto out;
  2417. }
  2418. } while (retry);
  2419. out:
  2420. fdata->start_unit = old_start;
  2421. fdata->stop_unit = old_stop;
  2422. return rc;
  2423. }
  2424. static int dasd_eckd_format_device(struct dasd_device *base,
  2425. struct format_data_t *fdata, int enable_pav)
  2426. {
  2427. return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL,
  2428. 0, NULL);
  2429. }
  2430. /*
  2431. * Helper function to count consecutive records of a single track.
  2432. */
  2433. static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start,
  2434. int max)
  2435. {
  2436. int head;
  2437. int i;
  2438. head = fmt_buffer[start].head;
  2439. /*
  2440. * There are 3 conditions where we stop counting:
  2441. * - if data reoccurs (same head and record may reoccur), which may
  2442. * happen due to the way DASD_ECKD_CCW_READ_COUNT works
  2443. * - when the head changes, because we're iterating over several tracks
  2444. * then (DASD_ECKD_CCW_READ_COUNT_MT)
  2445. * - when we've reached the end of sensible data in the buffer (the
  2446. * record will be 0 then)
  2447. */
  2448. for (i = start; i < max; i++) {
  2449. if (i > start) {
  2450. if ((fmt_buffer[i].head == head &&
  2451. fmt_buffer[i].record == 1) ||
  2452. fmt_buffer[i].head != head ||
  2453. fmt_buffer[i].record == 0)
  2454. break;
  2455. }
  2456. }
  2457. return i - start;
  2458. }
  2459. /*
  2460. * Evaluate a given range of tracks. Data like number of records, blocksize,
  2461. * record ids, and key length are compared with expected data.
  2462. *
  2463. * If a mismatch occurs, the corresponding error bit is set, as well as
  2464. * additional information, depending on the error.
  2465. */
  2466. static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer,
  2467. struct format_check_t *cdata,
  2468. int rpt_max, int rpt_exp,
  2469. int trk_per_cyl, int tpm)
  2470. {
  2471. struct ch_t geo;
  2472. int max_entries;
  2473. int count = 0;
  2474. int trkcount;
  2475. int blksize;
  2476. int pos = 0;
  2477. int i, j;
  2478. int kl;
  2479. trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
  2480. max_entries = trkcount * rpt_max;
  2481. for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) {
  2482. /* Calculate the correct next starting position in the buffer */
  2483. if (tpm) {
  2484. while (fmt_buffer[pos].record == 0 &&
  2485. fmt_buffer[pos].dl == 0) {
  2486. if (pos++ > max_entries)
  2487. break;
  2488. }
  2489. } else {
  2490. if (i != cdata->expect.start_unit)
  2491. pos += rpt_max - count;
  2492. }
  2493. /* Calculate the expected geo values for the current track */
  2494. set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl);
  2495. /* Count and check number of records */
  2496. count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max);
  2497. if (count < rpt_exp) {
  2498. cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS;
  2499. break;
  2500. }
  2501. if (count > rpt_exp) {
  2502. cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS;
  2503. break;
  2504. }
  2505. for (j = 0; j < count; j++, pos++) {
  2506. blksize = cdata->expect.blksize;
  2507. kl = 0;
  2508. /*
  2509. * Set special values when checking CDL formatted
  2510. * devices.
  2511. */
  2512. if ((cdata->expect.intensity & 0x08) &&
  2513. geo.cyl == 0 && geo.head == 0) {
  2514. if (j < 3) {
  2515. blksize = sizes_trk0[j] - 4;
  2516. kl = 4;
  2517. }
  2518. }
  2519. if ((cdata->expect.intensity & 0x08) &&
  2520. geo.cyl == 0 && geo.head == 1) {
  2521. blksize = LABEL_SIZE - 44;
  2522. kl = 44;
  2523. }
  2524. /* Check blocksize */
  2525. if (fmt_buffer[pos].dl != blksize) {
  2526. cdata->result = DASD_FMT_ERR_BLKSIZE;
  2527. goto out;
  2528. }
  2529. /* Check if key length is 0 */
  2530. if (fmt_buffer[pos].kl != kl) {
  2531. cdata->result = DASD_FMT_ERR_KEY_LENGTH;
  2532. goto out;
  2533. }
  2534. /* Check if record_id is correct */
  2535. if (fmt_buffer[pos].cyl != geo.cyl ||
  2536. fmt_buffer[pos].head != geo.head ||
  2537. fmt_buffer[pos].record != (j + 1)) {
  2538. cdata->result = DASD_FMT_ERR_RECORD_ID;
  2539. goto out;
  2540. }
  2541. }
  2542. }
  2543. out:
  2544. /*
  2545. * In case of no errors, we need to decrease by one
  2546. * to get the correct positions.
  2547. */
  2548. if (!cdata->result) {
  2549. i--;
  2550. pos--;
  2551. }
  2552. cdata->unit = i;
  2553. cdata->num_records = count;
  2554. cdata->rec = fmt_buffer[pos].record;
  2555. cdata->blksize = fmt_buffer[pos].dl;
  2556. cdata->key_length = fmt_buffer[pos].kl;
  2557. }
  2558. /*
  2559. * Check the format of a range of tracks of a DASD.
  2560. */
  2561. static int dasd_eckd_check_device_format(struct dasd_device *base,
  2562. struct format_check_t *cdata,
  2563. int enable_pav)
  2564. {
  2565. struct dasd_eckd_private *private = base->private;
  2566. struct eckd_count *fmt_buffer;
  2567. struct irb irb;
  2568. int rpt_max, rpt_exp;
  2569. int fmt_buffer_size;
  2570. int trk_per_cyl;
  2571. int trkcount;
  2572. int tpm = 0;
  2573. int rc;
  2574. trk_per_cyl = private->rdc_data.trk_per_cyl;
  2575. /* Get maximum and expected amount of records per track */
  2576. rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1;
  2577. rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize);
  2578. trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
  2579. fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count);
  2580. fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA);
  2581. if (!fmt_buffer)
  2582. return -ENOMEM;
  2583. /*
  2584. * A certain FICON feature subset is needed to operate in transport
  2585. * mode. Additionally, the support for transport mode is implicitly
  2586. * checked by comparing the buffer size with fcx_max_data. As long as
  2587. * the buffer size is smaller we can operate in transport mode and
  2588. * process multiple tracks. If not, only one track at once is being
  2589. * processed using command mode.
  2590. */
  2591. if ((private->features.feature[40] & 0x04) &&
  2592. fmt_buffer_size <= private->fcx_max_data)
  2593. tpm = 1;
  2594. rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav,
  2595. tpm, fmt_buffer, rpt_max, &irb);
  2596. if (rc && rc != -EIO)
  2597. goto out;
  2598. if (rc == -EIO) {
  2599. /*
  2600. * If our first attempt with transport mode enabled comes back
  2601. * with an incorrect length error, we're going to retry the
  2602. * check with command mode.
  2603. */
  2604. if (tpm && scsw_cstat(&irb.scsw) == 0x40) {
  2605. tpm = 0;
  2606. rc = dasd_eckd_format_process_data(base, &cdata->expect,
  2607. enable_pav, tpm,
  2608. fmt_buffer, rpt_max,
  2609. &irb);
  2610. if (rc)
  2611. goto out;
  2612. } else {
  2613. goto out;
  2614. }
  2615. }
  2616. dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp,
  2617. trk_per_cyl, tpm);
  2618. out:
  2619. kfree(fmt_buffer);
  2620. return rc;
  2621. }
  2622. static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
  2623. {
  2624. if (cqr->retries < 0) {
  2625. cqr->status = DASD_CQR_FAILED;
  2626. return;
  2627. }
  2628. cqr->status = DASD_CQR_FILLED;
  2629. if (cqr->block && (cqr->startdev != cqr->block->base)) {
  2630. dasd_eckd_reset_ccw_to_base_io(cqr);
  2631. cqr->startdev = cqr->block->base;
  2632. cqr->lpm = dasd_path_get_opm(cqr->block->base);
  2633. }
  2634. };
  2635. static dasd_erp_fn_t
  2636. dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
  2637. {
  2638. struct dasd_device *device = (struct dasd_device *) cqr->startdev;
  2639. struct ccw_device *cdev = device->cdev;
  2640. switch (cdev->id.cu_type) {
  2641. case 0x3990:
  2642. case 0x2105:
  2643. case 0x2107:
  2644. case 0x1750:
  2645. return dasd_3990_erp_action;
  2646. case 0x9343:
  2647. case 0x3880:
  2648. default:
  2649. return dasd_default_erp_action;
  2650. }
  2651. }
  2652. static dasd_erp_fn_t
  2653. dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
  2654. {
  2655. return dasd_default_erp_postaction;
  2656. }
  2657. static void dasd_eckd_check_for_device_change(struct dasd_device *device,
  2658. struct dasd_ccw_req *cqr,
  2659. struct irb *irb)
  2660. {
  2661. char mask;
  2662. char *sense = NULL;
  2663. struct dasd_eckd_private *private = device->private;
  2664. /* first of all check for state change pending interrupt */
  2665. mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
  2666. if ((scsw_dstat(&irb->scsw) & mask) == mask) {
  2667. /*
  2668. * for alias only, not in offline processing
  2669. * and only if not suspended
  2670. */
  2671. if (!device->block && private->lcu &&
  2672. device->state == DASD_STATE_ONLINE &&
  2673. !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
  2674. !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
  2675. /* schedule worker to reload device */
  2676. dasd_reload_device(device);
  2677. }
  2678. dasd_generic_handle_state_change(device);
  2679. return;
  2680. }
  2681. sense = dasd_get_sense(irb);
  2682. if (!sense)
  2683. return;
  2684. /* summary unit check */
  2685. if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
  2686. (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
  2687. if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) {
  2688. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  2689. "eckd suc: device already notified");
  2690. return;
  2691. }
  2692. sense = dasd_get_sense(irb);
  2693. if (!sense) {
  2694. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  2695. "eckd suc: no reason code available");
  2696. clear_bit(DASD_FLAG_SUC, &device->flags);
  2697. return;
  2698. }
  2699. private->suc_reason = sense[8];
  2700. DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
  2701. "eckd handle summary unit check: reason",
  2702. private->suc_reason);
  2703. dasd_get_device(device);
  2704. if (!schedule_work(&device->suc_work))
  2705. dasd_put_device(device);
  2706. return;
  2707. }
  2708. /* service information message SIM */
  2709. if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) &&
  2710. ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
  2711. dasd_3990_erp_handle_sim(device, sense);
  2712. return;
  2713. }
  2714. /* loss of device reservation is handled via base devices only
  2715. * as alias devices may be used with several bases
  2716. */
  2717. if (device->block && (sense[27] & DASD_SENSE_BIT_0) &&
  2718. (sense[7] == 0x3F) &&
  2719. (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
  2720. test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) {
  2721. if (device->features & DASD_FEATURE_FAILONSLCK)
  2722. set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
  2723. clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
  2724. dev_err(&device->cdev->dev,
  2725. "The device reservation was lost\n");
  2726. }
  2727. }
  2728. static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
  2729. struct dasd_device *startdev,
  2730. struct dasd_block *block,
  2731. struct request *req,
  2732. sector_t first_rec,
  2733. sector_t last_rec,
  2734. sector_t first_trk,
  2735. sector_t last_trk,
  2736. unsigned int first_offs,
  2737. unsigned int last_offs,
  2738. unsigned int blk_per_trk,
  2739. unsigned int blksize)
  2740. {
  2741. struct dasd_eckd_private *private;
  2742. unsigned long *idaws;
  2743. struct LO_eckd_data *LO_data;
  2744. struct dasd_ccw_req *cqr;
  2745. struct ccw1 *ccw;
  2746. struct req_iterator iter;
  2747. struct bio_vec bv;
  2748. char *dst;
  2749. unsigned int off;
  2750. int count, cidaw, cplength, datasize;
  2751. sector_t recid;
  2752. unsigned char cmd, rcmd;
  2753. int use_prefix;
  2754. struct dasd_device *basedev;
  2755. basedev = block->base;
  2756. private = basedev->private;
  2757. if (rq_data_dir(req) == READ)
  2758. cmd = DASD_ECKD_CCW_READ_MT;
  2759. else if (rq_data_dir(req) == WRITE)
  2760. cmd = DASD_ECKD_CCW_WRITE_MT;
  2761. else
  2762. return ERR_PTR(-EINVAL);
  2763. /* Check struct bio and count the number of blocks for the request. */
  2764. count = 0;
  2765. cidaw = 0;
  2766. rq_for_each_segment(bv, req, iter) {
  2767. if (bv.bv_len & (blksize - 1))
  2768. /* Eckd can only do full blocks. */
  2769. return ERR_PTR(-EINVAL);
  2770. count += bv.bv_len >> (block->s2b_shift + 9);
  2771. if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
  2772. cidaw += bv.bv_len >> (block->s2b_shift + 9);
  2773. }
  2774. /* Paranoia. */
  2775. if (count != last_rec - first_rec + 1)
  2776. return ERR_PTR(-EINVAL);
  2777. /* use the prefix command if available */
  2778. use_prefix = private->features.feature[8] & 0x01;
  2779. if (use_prefix) {
  2780. /* 1x prefix + number of blocks */
  2781. cplength = 2 + count;
  2782. /* 1x prefix + cidaws*sizeof(long) */
  2783. datasize = sizeof(struct PFX_eckd_data) +
  2784. sizeof(struct LO_eckd_data) +
  2785. cidaw * sizeof(unsigned long);
  2786. } else {
  2787. /* 1x define extent + 1x locate record + number of blocks */
  2788. cplength = 2 + count;
  2789. /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
  2790. datasize = sizeof(struct DE_eckd_data) +
  2791. sizeof(struct LO_eckd_data) +
  2792. cidaw * sizeof(unsigned long);
  2793. }
  2794. /* Find out the number of additional locate record ccws for cdl. */
  2795. if (private->uses_cdl && first_rec < 2*blk_per_trk) {
  2796. if (last_rec >= 2*blk_per_trk)
  2797. count = 2*blk_per_trk - first_rec;
  2798. cplength += count;
  2799. datasize += count*sizeof(struct LO_eckd_data);
  2800. }
  2801. /* Allocate the ccw request. */
  2802. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
  2803. startdev, blk_mq_rq_to_pdu(req));
  2804. if (IS_ERR(cqr))
  2805. return cqr;
  2806. ccw = cqr->cpaddr;
  2807. /* First ccw is define extent or prefix. */
  2808. if (use_prefix) {
  2809. if (prefix(ccw++, cqr->data, first_trk,
  2810. last_trk, cmd, basedev, startdev) == -EAGAIN) {
  2811. /* Clock not in sync and XRC is enabled.
  2812. * Try again later.
  2813. */
  2814. dasd_sfree_request(cqr, startdev);
  2815. return ERR_PTR(-EAGAIN);
  2816. }
  2817. idaws = (unsigned long *) (cqr->data +
  2818. sizeof(struct PFX_eckd_data));
  2819. } else {
  2820. if (define_extent(ccw++, cqr->data, first_trk,
  2821. last_trk, cmd, basedev, 0) == -EAGAIN) {
  2822. /* Clock not in sync and XRC is enabled.
  2823. * Try again later.
  2824. */
  2825. dasd_sfree_request(cqr, startdev);
  2826. return ERR_PTR(-EAGAIN);
  2827. }
  2828. idaws = (unsigned long *) (cqr->data +
  2829. sizeof(struct DE_eckd_data));
  2830. }
  2831. /* Build locate_record+read/write/ccws. */
  2832. LO_data = (struct LO_eckd_data *) (idaws + cidaw);
  2833. recid = first_rec;
  2834. if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
  2835. /* Only standard blocks so there is just one locate record. */
  2836. ccw[-1].flags |= CCW_FLAG_CC;
  2837. locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
  2838. last_rec - recid + 1, cmd, basedev, blksize);
  2839. }
  2840. rq_for_each_segment(bv, req, iter) {
  2841. dst = page_address(bv.bv_page) + bv.bv_offset;
  2842. if (dasd_page_cache) {
  2843. char *copy = kmem_cache_alloc(dasd_page_cache,
  2844. GFP_DMA | __GFP_NOWARN);
  2845. if (copy && rq_data_dir(req) == WRITE)
  2846. memcpy(copy + bv.bv_offset, dst, bv.bv_len);
  2847. if (copy)
  2848. dst = copy + bv.bv_offset;
  2849. }
  2850. for (off = 0; off < bv.bv_len; off += blksize) {
  2851. sector_t trkid = recid;
  2852. unsigned int recoffs = sector_div(trkid, blk_per_trk);
  2853. rcmd = cmd;
  2854. count = blksize;
  2855. /* Locate record for cdl special block ? */
  2856. if (private->uses_cdl && recid < 2*blk_per_trk) {
  2857. if (dasd_eckd_cdl_special(blk_per_trk, recid)){
  2858. rcmd |= 0x8;
  2859. count = dasd_eckd_cdl_reclen(recid);
  2860. if (count < blksize &&
  2861. rq_data_dir(req) == READ)
  2862. memset(dst + count, 0xe5,
  2863. blksize - count);
  2864. }
  2865. ccw[-1].flags |= CCW_FLAG_CC;
  2866. locate_record(ccw++, LO_data++,
  2867. trkid, recoffs + 1,
  2868. 1, rcmd, basedev, count);
  2869. }
  2870. /* Locate record for standard blocks ? */
  2871. if (private->uses_cdl && recid == 2*blk_per_trk) {
  2872. ccw[-1].flags |= CCW_FLAG_CC;
  2873. locate_record(ccw++, LO_data++,
  2874. trkid, recoffs + 1,
  2875. last_rec - recid + 1,
  2876. cmd, basedev, count);
  2877. }
  2878. /* Read/write ccw. */
  2879. ccw[-1].flags |= CCW_FLAG_CC;
  2880. ccw->cmd_code = rcmd;
  2881. ccw->count = count;
  2882. if (idal_is_needed(dst, blksize)) {
  2883. ccw->cda = (__u32)(addr_t) idaws;
  2884. ccw->flags = CCW_FLAG_IDA;
  2885. idaws = idal_create_words(idaws, dst, blksize);
  2886. } else {
  2887. ccw->cda = (__u32)(addr_t) dst;
  2888. ccw->flags = 0;
  2889. }
  2890. ccw++;
  2891. dst += blksize;
  2892. recid++;
  2893. }
  2894. }
  2895. if (blk_noretry_request(req) ||
  2896. block->base->features & DASD_FEATURE_FAILFAST)
  2897. set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
  2898. cqr->startdev = startdev;
  2899. cqr->memdev = startdev;
  2900. cqr->block = block;
  2901. cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
  2902. cqr->lpm = dasd_path_get_ppm(startdev);
  2903. cqr->retries = startdev->default_retries;
  2904. cqr->buildclk = get_tod_clock();
  2905. cqr->status = DASD_CQR_FILLED;
  2906. return cqr;
  2907. }
  2908. static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
  2909. struct dasd_device *startdev,
  2910. struct dasd_block *block,
  2911. struct request *req,
  2912. sector_t first_rec,
  2913. sector_t last_rec,
  2914. sector_t first_trk,
  2915. sector_t last_trk,
  2916. unsigned int first_offs,
  2917. unsigned int last_offs,
  2918. unsigned int blk_per_trk,
  2919. unsigned int blksize)
  2920. {
  2921. unsigned long *idaws;
  2922. struct dasd_ccw_req *cqr;
  2923. struct ccw1 *ccw;
  2924. struct req_iterator iter;
  2925. struct bio_vec bv;
  2926. char *dst, *idaw_dst;
  2927. unsigned int cidaw, cplength, datasize;
  2928. unsigned int tlf;
  2929. sector_t recid;
  2930. unsigned char cmd;
  2931. struct dasd_device *basedev;
  2932. unsigned int trkcount, count, count_to_trk_end;
  2933. unsigned int idaw_len, seg_len, part_len, len_to_track_end;
  2934. unsigned char new_track, end_idaw;
  2935. sector_t trkid;
  2936. unsigned int recoffs;
  2937. basedev = block->base;
  2938. if (rq_data_dir(req) == READ)
  2939. cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
  2940. else if (rq_data_dir(req) == WRITE)
  2941. cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
  2942. else
  2943. return ERR_PTR(-EINVAL);
  2944. /* Track based I/O needs IDAWs for each page, and not just for
  2945. * 64 bit addresses. We need additional idals for pages
  2946. * that get filled from two tracks, so we use the number
  2947. * of records as upper limit.
  2948. */
  2949. cidaw = last_rec - first_rec + 1;
  2950. trkcount = last_trk - first_trk + 1;
  2951. /* 1x prefix + one read/write ccw per track */
  2952. cplength = 1 + trkcount;
  2953. datasize = sizeof(struct PFX_eckd_data) + cidaw * sizeof(unsigned long);
  2954. /* Allocate the ccw request. */
  2955. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
  2956. startdev, blk_mq_rq_to_pdu(req));
  2957. if (IS_ERR(cqr))
  2958. return cqr;
  2959. ccw = cqr->cpaddr;
  2960. /* transfer length factor: how many bytes to read from the last track */
  2961. if (first_trk == last_trk)
  2962. tlf = last_offs - first_offs + 1;
  2963. else
  2964. tlf = last_offs + 1;
  2965. tlf *= blksize;
  2966. if (prefix_LRE(ccw++, cqr->data, first_trk,
  2967. last_trk, cmd, basedev, startdev,
  2968. 1 /* format */, first_offs + 1,
  2969. trkcount, blksize,
  2970. tlf) == -EAGAIN) {
  2971. /* Clock not in sync and XRC is enabled.
  2972. * Try again later.
  2973. */
  2974. dasd_sfree_request(cqr, startdev);
  2975. return ERR_PTR(-EAGAIN);
  2976. }
  2977. /*
  2978. * The translation of request into ccw programs must meet the
  2979. * following conditions:
  2980. * - all idaws but the first and the last must address full pages
  2981. * (or 2K blocks on 31-bit)
  2982. * - the scope of a ccw and it's idal ends with the track boundaries
  2983. */
  2984. idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
  2985. recid = first_rec;
  2986. new_track = 1;
  2987. end_idaw = 0;
  2988. len_to_track_end = 0;
  2989. idaw_dst = NULL;
  2990. idaw_len = 0;
  2991. rq_for_each_segment(bv, req, iter) {
  2992. dst = page_address(bv.bv_page) + bv.bv_offset;
  2993. seg_len = bv.bv_len;
  2994. while (seg_len) {
  2995. if (new_track) {
  2996. trkid = recid;
  2997. recoffs = sector_div(trkid, blk_per_trk);
  2998. count_to_trk_end = blk_per_trk - recoffs;
  2999. count = min((last_rec - recid + 1),
  3000. (sector_t)count_to_trk_end);
  3001. len_to_track_end = count * blksize;
  3002. ccw[-1].flags |= CCW_FLAG_CC;
  3003. ccw->cmd_code = cmd;
  3004. ccw->count = len_to_track_end;
  3005. ccw->cda = (__u32)(addr_t)idaws;
  3006. ccw->flags = CCW_FLAG_IDA;
  3007. ccw++;
  3008. recid += count;
  3009. new_track = 0;
  3010. /* first idaw for a ccw may start anywhere */
  3011. if (!idaw_dst)
  3012. idaw_dst = dst;
  3013. }
  3014. /* If we start a new idaw, we must make sure that it
  3015. * starts on an IDA_BLOCK_SIZE boundary.
  3016. * If we continue an idaw, we must make sure that the
  3017. * current segment begins where the so far accumulated
  3018. * idaw ends
  3019. */
  3020. if (!idaw_dst) {
  3021. if (__pa(dst) & (IDA_BLOCK_SIZE-1)) {
  3022. dasd_sfree_request(cqr, startdev);
  3023. return ERR_PTR(-ERANGE);
  3024. } else
  3025. idaw_dst = dst;
  3026. }
  3027. if ((idaw_dst + idaw_len) != dst) {
  3028. dasd_sfree_request(cqr, startdev);
  3029. return ERR_PTR(-ERANGE);
  3030. }
  3031. part_len = min(seg_len, len_to_track_end);
  3032. seg_len -= part_len;
  3033. dst += part_len;
  3034. idaw_len += part_len;
  3035. len_to_track_end -= part_len;
  3036. /* collected memory area ends on an IDA_BLOCK border,
  3037. * -> create an idaw
  3038. * idal_create_words will handle cases where idaw_len
  3039. * is larger then IDA_BLOCK_SIZE
  3040. */
  3041. if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1)))
  3042. end_idaw = 1;
  3043. /* We also need to end the idaw at track end */
  3044. if (!len_to_track_end) {
  3045. new_track = 1;
  3046. end_idaw = 1;
  3047. }
  3048. if (end_idaw) {
  3049. idaws = idal_create_words(idaws, idaw_dst,
  3050. idaw_len);
  3051. idaw_dst = NULL;
  3052. idaw_len = 0;
  3053. end_idaw = 0;
  3054. }
  3055. }
  3056. }
  3057. if (blk_noretry_request(req) ||
  3058. block->base->features & DASD_FEATURE_FAILFAST)
  3059. set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
  3060. cqr->startdev = startdev;
  3061. cqr->memdev = startdev;
  3062. cqr->block = block;
  3063. cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
  3064. cqr->lpm = dasd_path_get_ppm(startdev);
  3065. cqr->retries = startdev->default_retries;
  3066. cqr->buildclk = get_tod_clock();
  3067. cqr->status = DASD_CQR_FILLED;
  3068. return cqr;
  3069. }
  3070. static int prepare_itcw(struct itcw *itcw,
  3071. unsigned int trk, unsigned int totrk, int cmd,
  3072. struct dasd_device *basedev,
  3073. struct dasd_device *startdev,
  3074. unsigned int rec_on_trk, int count,
  3075. unsigned int blksize,
  3076. unsigned int total_data_size,
  3077. unsigned int tlf,
  3078. unsigned int blk_per_trk)
  3079. {
  3080. struct PFX_eckd_data pfxdata;
  3081. struct dasd_eckd_private *basepriv, *startpriv;
  3082. struct DE_eckd_data *dedata;
  3083. struct LRE_eckd_data *lredata;
  3084. struct dcw *dcw;
  3085. u32 begcyl, endcyl;
  3086. u16 heads, beghead, endhead;
  3087. u8 pfx_cmd;
  3088. int rc = 0;
  3089. int sector = 0;
  3090. int dn, d;
  3091. /* setup prefix data */
  3092. basepriv = basedev->private;
  3093. startpriv = startdev->private;
  3094. dedata = &pfxdata.define_extent;
  3095. lredata = &pfxdata.locate_record;
  3096. memset(&pfxdata, 0, sizeof(pfxdata));
  3097. pfxdata.format = 1; /* PFX with LRE */
  3098. pfxdata.base_address = basepriv->ned->unit_addr;
  3099. pfxdata.base_lss = basepriv->ned->ID;
  3100. pfxdata.validity.define_extent = 1;
  3101. /* private uid is kept up to date, conf_data may be outdated */
  3102. if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
  3103. pfxdata.validity.verify_base = 1;
  3104. if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
  3105. pfxdata.validity.verify_base = 1;
  3106. pfxdata.validity.hyper_pav = 1;
  3107. }
  3108. switch (cmd) {
  3109. case DASD_ECKD_CCW_READ_TRACK_DATA:
  3110. dedata->mask.perm = 0x1;
  3111. dedata->attributes.operation = basepriv->attrib.operation;
  3112. dedata->blk_size = blksize;
  3113. dedata->ga_extended |= 0x42;
  3114. lredata->operation.orientation = 0x0;
  3115. lredata->operation.operation = 0x0C;
  3116. lredata->auxiliary.check_bytes = 0x01;
  3117. pfx_cmd = DASD_ECKD_CCW_PFX_READ;
  3118. break;
  3119. case DASD_ECKD_CCW_WRITE_TRACK_DATA:
  3120. dedata->mask.perm = 0x02;
  3121. dedata->attributes.operation = basepriv->attrib.operation;
  3122. dedata->blk_size = blksize;
  3123. rc = set_timestamp(NULL, dedata, basedev);
  3124. dedata->ga_extended |= 0x42;
  3125. lredata->operation.orientation = 0x0;
  3126. lredata->operation.operation = 0x3F;
  3127. lredata->extended_operation = 0x23;
  3128. lredata->auxiliary.check_bytes = 0x2;
  3129. /*
  3130. * If XRC is supported the System Time Stamp is set. The
  3131. * validity of the time stamp must be reflected in the prefix
  3132. * data as well.
  3133. */
  3134. if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
  3135. pfxdata.validity.time_stamp = 1; /* 'Time Stamp Valid' */
  3136. pfx_cmd = DASD_ECKD_CCW_PFX;
  3137. break;
  3138. case DASD_ECKD_CCW_READ_COUNT_MT:
  3139. dedata->mask.perm = 0x1;
  3140. dedata->attributes.operation = DASD_BYPASS_CACHE;
  3141. dedata->ga_extended |= 0x42;
  3142. dedata->blk_size = blksize;
  3143. lredata->operation.orientation = 0x2;
  3144. lredata->operation.operation = 0x16;
  3145. lredata->auxiliary.check_bytes = 0x01;
  3146. pfx_cmd = DASD_ECKD_CCW_PFX_READ;
  3147. break;
  3148. default:
  3149. DBF_DEV_EVENT(DBF_ERR, basedev,
  3150. "prepare itcw, unknown opcode 0x%x", cmd);
  3151. BUG();
  3152. break;
  3153. }
  3154. if (rc)
  3155. return rc;
  3156. dedata->attributes.mode = 0x3; /* ECKD */
  3157. heads = basepriv->rdc_data.trk_per_cyl;
  3158. begcyl = trk / heads;
  3159. beghead = trk % heads;
  3160. endcyl = totrk / heads;
  3161. endhead = totrk % heads;
  3162. /* check for sequential prestage - enhance cylinder range */
  3163. if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
  3164. dedata->attributes.operation == DASD_SEQ_ACCESS) {
  3165. if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
  3166. endcyl += basepriv->attrib.nr_cyl;
  3167. else
  3168. endcyl = (basepriv->real_cyl - 1);
  3169. }
  3170. set_ch_t(&dedata->beg_ext, begcyl, beghead);
  3171. set_ch_t(&dedata->end_ext, endcyl, endhead);
  3172. dedata->ep_format = 0x20; /* records per track is valid */
  3173. dedata->ep_rec_per_track = blk_per_trk;
  3174. if (rec_on_trk) {
  3175. switch (basepriv->rdc_data.dev_type) {
  3176. case 0x3390:
  3177. dn = ceil_quot(blksize + 6, 232);
  3178. d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
  3179. sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
  3180. break;
  3181. case 0x3380:
  3182. d = 7 + ceil_quot(blksize + 12, 32);
  3183. sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
  3184. break;
  3185. }
  3186. }
  3187. if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) {
  3188. lredata->auxiliary.length_valid = 0;
  3189. lredata->auxiliary.length_scope = 0;
  3190. lredata->sector = 0xff;
  3191. } else {
  3192. lredata->auxiliary.length_valid = 1;
  3193. lredata->auxiliary.length_scope = 1;
  3194. lredata->sector = sector;
  3195. }
  3196. lredata->auxiliary.imbedded_ccw_valid = 1;
  3197. lredata->length = tlf;
  3198. lredata->imbedded_ccw = cmd;
  3199. lredata->count = count;
  3200. set_ch_t(&lredata->seek_addr, begcyl, beghead);
  3201. lredata->search_arg.cyl = lredata->seek_addr.cyl;
  3202. lredata->search_arg.head = lredata->seek_addr.head;
  3203. lredata->search_arg.record = rec_on_trk;
  3204. dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
  3205. &pfxdata, sizeof(pfxdata), total_data_size);
  3206. return PTR_ERR_OR_ZERO(dcw);
  3207. }
  3208. static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
  3209. struct dasd_device *startdev,
  3210. struct dasd_block *block,
  3211. struct request *req,
  3212. sector_t first_rec,
  3213. sector_t last_rec,
  3214. sector_t first_trk,
  3215. sector_t last_trk,
  3216. unsigned int first_offs,
  3217. unsigned int last_offs,
  3218. unsigned int blk_per_trk,
  3219. unsigned int blksize)
  3220. {
  3221. struct dasd_ccw_req *cqr;
  3222. struct req_iterator iter;
  3223. struct bio_vec bv;
  3224. char *dst;
  3225. unsigned int trkcount, ctidaw;
  3226. unsigned char cmd;
  3227. struct dasd_device *basedev;
  3228. unsigned int tlf;
  3229. struct itcw *itcw;
  3230. struct tidaw *last_tidaw = NULL;
  3231. int itcw_op;
  3232. size_t itcw_size;
  3233. u8 tidaw_flags;
  3234. unsigned int seg_len, part_len, len_to_track_end;
  3235. unsigned char new_track;
  3236. sector_t recid, trkid;
  3237. unsigned int offs;
  3238. unsigned int count, count_to_trk_end;
  3239. int ret;
  3240. basedev = block->base;
  3241. if (rq_data_dir(req) == READ) {
  3242. cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
  3243. itcw_op = ITCW_OP_READ;
  3244. } else if (rq_data_dir(req) == WRITE) {
  3245. cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
  3246. itcw_op = ITCW_OP_WRITE;
  3247. } else
  3248. return ERR_PTR(-EINVAL);
  3249. /* trackbased I/O needs address all memory via TIDAWs,
  3250. * not just for 64 bit addresses. This allows us to map
  3251. * each segment directly to one tidaw.
  3252. * In the case of write requests, additional tidaws may
  3253. * be needed when a segment crosses a track boundary.
  3254. */
  3255. trkcount = last_trk - first_trk + 1;
  3256. ctidaw = 0;
  3257. rq_for_each_segment(bv, req, iter) {
  3258. ++ctidaw;
  3259. }
  3260. if (rq_data_dir(req) == WRITE)
  3261. ctidaw += (last_trk - first_trk);
  3262. /* Allocate the ccw request. */
  3263. itcw_size = itcw_calc_size(0, ctidaw, 0);
  3264. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
  3265. blk_mq_rq_to_pdu(req));
  3266. if (IS_ERR(cqr))
  3267. return cqr;
  3268. /* transfer length factor: how many bytes to read from the last track */
  3269. if (first_trk == last_trk)
  3270. tlf = last_offs - first_offs + 1;
  3271. else
  3272. tlf = last_offs + 1;
  3273. tlf *= blksize;
  3274. itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
  3275. if (IS_ERR(itcw)) {
  3276. ret = -EINVAL;
  3277. goto out_error;
  3278. }
  3279. cqr->cpaddr = itcw_get_tcw(itcw);
  3280. if (prepare_itcw(itcw, first_trk, last_trk,
  3281. cmd, basedev, startdev,
  3282. first_offs + 1,
  3283. trkcount, blksize,
  3284. (last_rec - first_rec + 1) * blksize,
  3285. tlf, blk_per_trk) == -EAGAIN) {
  3286. /* Clock not in sync and XRC is enabled.
  3287. * Try again later.
  3288. */
  3289. ret = -EAGAIN;
  3290. goto out_error;
  3291. }
  3292. len_to_track_end = 0;
  3293. /*
  3294. * A tidaw can address 4k of memory, but must not cross page boundaries
  3295. * We can let the block layer handle this by setting
  3296. * blk_queue_segment_boundary to page boundaries and
  3297. * blk_max_segment_size to page size when setting up the request queue.
  3298. * For write requests, a TIDAW must not cross track boundaries, because
  3299. * we have to set the CBC flag on the last tidaw for each track.
  3300. */
  3301. if (rq_data_dir(req) == WRITE) {
  3302. new_track = 1;
  3303. recid = first_rec;
  3304. rq_for_each_segment(bv, req, iter) {
  3305. dst = page_address(bv.bv_page) + bv.bv_offset;
  3306. seg_len = bv.bv_len;
  3307. while (seg_len) {
  3308. if (new_track) {
  3309. trkid = recid;
  3310. offs = sector_div(trkid, blk_per_trk);
  3311. count_to_trk_end = blk_per_trk - offs;
  3312. count = min((last_rec - recid + 1),
  3313. (sector_t)count_to_trk_end);
  3314. len_to_track_end = count * blksize;
  3315. recid += count;
  3316. new_track = 0;
  3317. }
  3318. part_len = min(seg_len, len_to_track_end);
  3319. seg_len -= part_len;
  3320. len_to_track_end -= part_len;
  3321. /* We need to end the tidaw at track end */
  3322. if (!len_to_track_end) {
  3323. new_track = 1;
  3324. tidaw_flags = TIDAW_FLAGS_INSERT_CBC;
  3325. } else
  3326. tidaw_flags = 0;
  3327. last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
  3328. dst, part_len);
  3329. if (IS_ERR(last_tidaw)) {
  3330. ret = -EINVAL;
  3331. goto out_error;
  3332. }
  3333. dst += part_len;
  3334. }
  3335. }
  3336. } else {
  3337. rq_for_each_segment(bv, req, iter) {
  3338. dst = page_address(bv.bv_page) + bv.bv_offset;
  3339. last_tidaw = itcw_add_tidaw(itcw, 0x00,
  3340. dst, bv.bv_len);
  3341. if (IS_ERR(last_tidaw)) {
  3342. ret = -EINVAL;
  3343. goto out_error;
  3344. }
  3345. }
  3346. }
  3347. last_tidaw->flags |= TIDAW_FLAGS_LAST;
  3348. last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC;
  3349. itcw_finalize(itcw);
  3350. if (blk_noretry_request(req) ||
  3351. block->base->features & DASD_FEATURE_FAILFAST)
  3352. set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
  3353. cqr->cpmode = 1;
  3354. cqr->startdev = startdev;
  3355. cqr->memdev = startdev;
  3356. cqr->block = block;
  3357. cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */
  3358. cqr->lpm = dasd_path_get_ppm(startdev);
  3359. cqr->retries = startdev->default_retries;
  3360. cqr->buildclk = get_tod_clock();
  3361. cqr->status = DASD_CQR_FILLED;
  3362. return cqr;
  3363. out_error:
  3364. dasd_sfree_request(cqr, startdev);
  3365. return ERR_PTR(ret);
  3366. }
  3367. static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
  3368. struct dasd_block *block,
  3369. struct request *req)
  3370. {
  3371. int cmdrtd, cmdwtd;
  3372. int use_prefix;
  3373. int fcx_multitrack;
  3374. struct dasd_eckd_private *private;
  3375. struct dasd_device *basedev;
  3376. sector_t first_rec, last_rec;
  3377. sector_t first_trk, last_trk;
  3378. unsigned int first_offs, last_offs;
  3379. unsigned int blk_per_trk, blksize;
  3380. int cdlspecial;
  3381. unsigned int data_size;
  3382. struct dasd_ccw_req *cqr;
  3383. basedev = block->base;
  3384. private = basedev->private;
  3385. /* Calculate number of blocks/records per track. */
  3386. blksize = block->bp_block;
  3387. blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
  3388. if (blk_per_trk == 0)
  3389. return ERR_PTR(-EINVAL);
  3390. /* Calculate record id of first and last block. */
  3391. first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
  3392. first_offs = sector_div(first_trk, blk_per_trk);
  3393. last_rec = last_trk =
  3394. (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
  3395. last_offs = sector_div(last_trk, blk_per_trk);
  3396. cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
  3397. fcx_multitrack = private->features.feature[40] & 0x20;
  3398. data_size = blk_rq_bytes(req);
  3399. if (data_size % blksize)
  3400. return ERR_PTR(-EINVAL);
  3401. /* tpm write request add CBC data on each track boundary */
  3402. if (rq_data_dir(req) == WRITE)
  3403. data_size += (last_trk - first_trk) * 4;
  3404. /* is read track data and write track data in command mode supported? */
  3405. cmdrtd = private->features.feature[9] & 0x20;
  3406. cmdwtd = private->features.feature[12] & 0x40;
  3407. use_prefix = private->features.feature[8] & 0x01;
  3408. cqr = NULL;
  3409. if (cdlspecial || dasd_page_cache) {
  3410. /* do nothing, just fall through to the cmd mode single case */
  3411. } else if ((data_size <= private->fcx_max_data)
  3412. && (fcx_multitrack || (first_trk == last_trk))) {
  3413. cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
  3414. first_rec, last_rec,
  3415. first_trk, last_trk,
  3416. first_offs, last_offs,
  3417. blk_per_trk, blksize);
  3418. if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
  3419. (PTR_ERR(cqr) != -ENOMEM))
  3420. cqr = NULL;
  3421. } else if (use_prefix &&
  3422. (((rq_data_dir(req) == READ) && cmdrtd) ||
  3423. ((rq_data_dir(req) == WRITE) && cmdwtd))) {
  3424. cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
  3425. first_rec, last_rec,
  3426. first_trk, last_trk,
  3427. first_offs, last_offs,
  3428. blk_per_trk, blksize);
  3429. if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
  3430. (PTR_ERR(cqr) != -ENOMEM))
  3431. cqr = NULL;
  3432. }
  3433. if (!cqr)
  3434. cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
  3435. first_rec, last_rec,
  3436. first_trk, last_trk,
  3437. first_offs, last_offs,
  3438. blk_per_trk, blksize);
  3439. return cqr;
  3440. }
  3441. static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
  3442. struct dasd_block *block,
  3443. struct request *req)
  3444. {
  3445. sector_t start_padding_sectors, end_sector_offset, end_padding_sectors;
  3446. unsigned int seg_len, len_to_track_end;
  3447. unsigned int cidaw, cplength, datasize;
  3448. sector_t first_trk, last_trk, sectors;
  3449. struct dasd_eckd_private *base_priv;
  3450. struct dasd_device *basedev;
  3451. struct req_iterator iter;
  3452. struct dasd_ccw_req *cqr;
  3453. unsigned int first_offs;
  3454. unsigned int trkcount;
  3455. unsigned long *idaws;
  3456. unsigned int size;
  3457. unsigned char cmd;
  3458. struct bio_vec bv;
  3459. struct ccw1 *ccw;
  3460. int use_prefix;
  3461. void *data;
  3462. char *dst;
  3463. /*
  3464. * raw track access needs to be mutiple of 64k and on 64k boundary
  3465. * For read requests we can fix an incorrect alignment by padding
  3466. * the request with dummy pages.
  3467. */
  3468. start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK;
  3469. end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) %
  3470. DASD_RAW_SECTORS_PER_TRACK;
  3471. end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) %
  3472. DASD_RAW_SECTORS_PER_TRACK;
  3473. basedev = block->base;
  3474. if ((start_padding_sectors || end_padding_sectors) &&
  3475. (rq_data_dir(req) == WRITE)) {
  3476. DBF_DEV_EVENT(DBF_ERR, basedev,
  3477. "raw write not track aligned (%lu,%lu) req %p",
  3478. start_padding_sectors, end_padding_sectors, req);
  3479. return ERR_PTR(-EINVAL);
  3480. }
  3481. first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
  3482. last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
  3483. DASD_RAW_SECTORS_PER_TRACK;
  3484. trkcount = last_trk - first_trk + 1;
  3485. first_offs = 0;
  3486. if (rq_data_dir(req) == READ)
  3487. cmd = DASD_ECKD_CCW_READ_TRACK;
  3488. else if (rq_data_dir(req) == WRITE)
  3489. cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK;
  3490. else
  3491. return ERR_PTR(-EINVAL);
  3492. /*
  3493. * Raw track based I/O needs IDAWs for each page,
  3494. * and not just for 64 bit addresses.
  3495. */
  3496. cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK;
  3497. /*
  3498. * struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes
  3499. * of extended parameter. This is needed for write full track.
  3500. */
  3501. base_priv = basedev->private;
  3502. use_prefix = base_priv->features.feature[8] & 0x01;
  3503. if (use_prefix) {
  3504. cplength = 1 + trkcount;
  3505. size = sizeof(struct PFX_eckd_data) + 2;
  3506. } else {
  3507. cplength = 2 + trkcount;
  3508. size = sizeof(struct DE_eckd_data) +
  3509. sizeof(struct LRE_eckd_data) + 2;
  3510. }
  3511. size = ALIGN(size, 8);
  3512. datasize = size + cidaw * sizeof(unsigned long);
  3513. /* Allocate the ccw request. */
  3514. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
  3515. datasize, startdev, blk_mq_rq_to_pdu(req));
  3516. if (IS_ERR(cqr))
  3517. return cqr;
  3518. ccw = cqr->cpaddr;
  3519. data = cqr->data;
  3520. if (use_prefix) {
  3521. prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev,
  3522. startdev, 1, first_offs + 1, trkcount, 0, 0);
  3523. } else {
  3524. define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0);
  3525. ccw[-1].flags |= CCW_FLAG_CC;
  3526. data += sizeof(struct DE_eckd_data);
  3527. locate_record_ext(ccw++, data, first_trk, first_offs + 1,
  3528. trkcount, cmd, basedev, 0, 0);
  3529. }
  3530. idaws = (unsigned long *)(cqr->data + size);
  3531. len_to_track_end = 0;
  3532. if (start_padding_sectors) {
  3533. ccw[-1].flags |= CCW_FLAG_CC;
  3534. ccw->cmd_code = cmd;
  3535. /* maximum 3390 track size */
  3536. ccw->count = 57326;
  3537. /* 64k map to one track */
  3538. len_to_track_end = 65536 - start_padding_sectors * 512;
  3539. ccw->cda = (__u32)(addr_t)idaws;
  3540. ccw->flags |= CCW_FLAG_IDA;
  3541. ccw->flags |= CCW_FLAG_SLI;
  3542. ccw++;
  3543. for (sectors = 0; sectors < start_padding_sectors; sectors += 8)
  3544. idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
  3545. }
  3546. rq_for_each_segment(bv, req, iter) {
  3547. dst = page_address(bv.bv_page) + bv.bv_offset;
  3548. seg_len = bv.bv_len;
  3549. if (cmd == DASD_ECKD_CCW_READ_TRACK)
  3550. memset(dst, 0, seg_len);
  3551. if (!len_to_track_end) {
  3552. ccw[-1].flags |= CCW_FLAG_CC;
  3553. ccw->cmd_code = cmd;
  3554. /* maximum 3390 track size */
  3555. ccw->count = 57326;
  3556. /* 64k map to one track */
  3557. len_to_track_end = 65536;
  3558. ccw->cda = (__u32)(addr_t)idaws;
  3559. ccw->flags |= CCW_FLAG_IDA;
  3560. ccw->flags |= CCW_FLAG_SLI;
  3561. ccw++;
  3562. }
  3563. len_to_track_end -= seg_len;
  3564. idaws = idal_create_words(idaws, dst, seg_len);
  3565. }
  3566. for (sectors = 0; sectors < end_padding_sectors; sectors += 8)
  3567. idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
  3568. if (blk_noretry_request(req) ||
  3569. block->base->features & DASD_FEATURE_FAILFAST)
  3570. set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
  3571. cqr->startdev = startdev;
  3572. cqr->memdev = startdev;
  3573. cqr->block = block;
  3574. cqr->expires = startdev->default_expires * HZ;
  3575. cqr->lpm = dasd_path_get_ppm(startdev);
  3576. cqr->retries = startdev->default_retries;
  3577. cqr->buildclk = get_tod_clock();
  3578. cqr->status = DASD_CQR_FILLED;
  3579. return cqr;
  3580. }
  3581. static int
  3582. dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
  3583. {
  3584. struct dasd_eckd_private *private;
  3585. struct ccw1 *ccw;
  3586. struct req_iterator iter;
  3587. struct bio_vec bv;
  3588. char *dst, *cda;
  3589. unsigned int blksize, blk_per_trk, off;
  3590. sector_t recid;
  3591. int status;
  3592. if (!dasd_page_cache)
  3593. goto out;
  3594. private = cqr->block->base->private;
  3595. blksize = cqr->block->bp_block;
  3596. blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
  3597. recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
  3598. ccw = cqr->cpaddr;
  3599. /* Skip over define extent & locate record. */
  3600. ccw++;
  3601. if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
  3602. ccw++;
  3603. rq_for_each_segment(bv, req, iter) {
  3604. dst = page_address(bv.bv_page) + bv.bv_offset;
  3605. for (off = 0; off < bv.bv_len; off += blksize) {
  3606. /* Skip locate record. */
  3607. if (private->uses_cdl && recid <= 2*blk_per_trk)
  3608. ccw++;
  3609. if (dst) {
  3610. if (ccw->flags & CCW_FLAG_IDA)
  3611. cda = *((char **)((addr_t) ccw->cda));
  3612. else
  3613. cda = (char *)((addr_t) ccw->cda);
  3614. if (dst != cda) {
  3615. if (rq_data_dir(req) == READ)
  3616. memcpy(dst, cda, bv.bv_len);
  3617. kmem_cache_free(dasd_page_cache,
  3618. (void *)((addr_t)cda & PAGE_MASK));
  3619. }
  3620. dst = NULL;
  3621. }
  3622. ccw++;
  3623. recid++;
  3624. }
  3625. }
  3626. out:
  3627. status = cqr->status == DASD_CQR_DONE;
  3628. dasd_sfree_request(cqr, cqr->memdev);
  3629. return status;
  3630. }
  3631. /*
  3632. * Modify ccw/tcw in cqr so it can be started on a base device.
  3633. *
  3634. * Note that this is not enough to restart the cqr!
  3635. * Either reset cqr->startdev as well (summary unit check handling)
  3636. * or restart via separate cqr (as in ERP handling).
  3637. */
  3638. void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
  3639. {
  3640. struct ccw1 *ccw;
  3641. struct PFX_eckd_data *pfxdata;
  3642. struct tcw *tcw;
  3643. struct tccb *tccb;
  3644. struct dcw *dcw;
  3645. if (cqr->cpmode == 1) {
  3646. tcw = cqr->cpaddr;
  3647. tccb = tcw_get_tccb(tcw);
  3648. dcw = (struct dcw *)&tccb->tca[0];
  3649. pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
  3650. pfxdata->validity.verify_base = 0;
  3651. pfxdata->validity.hyper_pav = 0;
  3652. } else {
  3653. ccw = cqr->cpaddr;
  3654. pfxdata = cqr->data;
  3655. if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
  3656. pfxdata->validity.verify_base = 0;
  3657. pfxdata->validity.hyper_pav = 0;
  3658. }
  3659. }
  3660. }
  3661. #define DASD_ECKD_CHANQ_MAX_SIZE 4
  3662. static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
  3663. struct dasd_block *block,
  3664. struct request *req)
  3665. {
  3666. struct dasd_eckd_private *private;
  3667. struct dasd_device *startdev;
  3668. unsigned long flags;
  3669. struct dasd_ccw_req *cqr;
  3670. startdev = dasd_alias_get_start_dev(base);
  3671. if (!startdev)
  3672. startdev = base;
  3673. private = startdev->private;
  3674. if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
  3675. return ERR_PTR(-EBUSY);
  3676. spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
  3677. private->count++;
  3678. if ((base->features & DASD_FEATURE_USERAW))
  3679. cqr = dasd_eckd_build_cp_raw(startdev, block, req);
  3680. else
  3681. cqr = dasd_eckd_build_cp(startdev, block, req);
  3682. if (IS_ERR(cqr))
  3683. private->count--;
  3684. spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
  3685. return cqr;
  3686. }
  3687. static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
  3688. struct request *req)
  3689. {
  3690. struct dasd_eckd_private *private;
  3691. unsigned long flags;
  3692. spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
  3693. private = cqr->memdev->private;
  3694. private->count--;
  3695. spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
  3696. return dasd_eckd_free_cp(cqr, req);
  3697. }
  3698. static int
  3699. dasd_eckd_fill_info(struct dasd_device * device,
  3700. struct dasd_information2_t * info)
  3701. {
  3702. struct dasd_eckd_private *private = device->private;
  3703. info->label_block = 2;
  3704. info->FBA_layout = private->uses_cdl ? 0 : 1;
  3705. info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
  3706. info->characteristics_size = sizeof(private->rdc_data);
  3707. memcpy(info->characteristics, &private->rdc_data,
  3708. sizeof(private->rdc_data));
  3709. info->confdata_size = min((unsigned long)private->conf_len,
  3710. sizeof(info->configuration_data));
  3711. memcpy(info->configuration_data, private->conf_data,
  3712. info->confdata_size);
  3713. return 0;
  3714. }
  3715. /*
  3716. * SECTION: ioctl functions for eckd devices.
  3717. */
  3718. /*
  3719. * Release device ioctl.
  3720. * Buils a channel programm to releases a prior reserved
  3721. * (see dasd_eckd_reserve) device.
  3722. */
  3723. static int
  3724. dasd_eckd_release(struct dasd_device *device)
  3725. {
  3726. struct dasd_ccw_req *cqr;
  3727. int rc;
  3728. struct ccw1 *ccw;
  3729. int useglobal;
  3730. if (!capable(CAP_SYS_ADMIN))
  3731. return -EACCES;
  3732. useglobal = 0;
  3733. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
  3734. if (IS_ERR(cqr)) {
  3735. mutex_lock(&dasd_reserve_mutex);
  3736. useglobal = 1;
  3737. cqr = &dasd_reserve_req->cqr;
  3738. memset(cqr, 0, sizeof(*cqr));
  3739. memset(&dasd_reserve_req->ccw, 0,
  3740. sizeof(dasd_reserve_req->ccw));
  3741. cqr->cpaddr = &dasd_reserve_req->ccw;
  3742. cqr->data = &dasd_reserve_req->data;
  3743. cqr->magic = DASD_ECKD_MAGIC;
  3744. }
  3745. ccw = cqr->cpaddr;
  3746. ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
  3747. ccw->flags |= CCW_FLAG_SLI;
  3748. ccw->count = 32;
  3749. ccw->cda = (__u32)(addr_t) cqr->data;
  3750. cqr->startdev = device;
  3751. cqr->memdev = device;
  3752. clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  3753. set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
  3754. cqr->retries = 2; /* set retry counter to enable basic ERP */
  3755. cqr->expires = 2 * HZ;
  3756. cqr->buildclk = get_tod_clock();
  3757. cqr->status = DASD_CQR_FILLED;
  3758. rc = dasd_sleep_on_immediatly(cqr);
  3759. if (!rc)
  3760. clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
  3761. if (useglobal)
  3762. mutex_unlock(&dasd_reserve_mutex);
  3763. else
  3764. dasd_sfree_request(cqr, cqr->memdev);
  3765. return rc;
  3766. }
  3767. /*
  3768. * Reserve device ioctl.
  3769. * Options are set to 'synchronous wait for interrupt' and
  3770. * 'timeout the request'. This leads to a terminate IO if
  3771. * the interrupt is outstanding for a certain time.
  3772. */
  3773. static int
  3774. dasd_eckd_reserve(struct dasd_device *device)
  3775. {
  3776. struct dasd_ccw_req *cqr;
  3777. int rc;
  3778. struct ccw1 *ccw;
  3779. int useglobal;
  3780. if (!capable(CAP_SYS_ADMIN))
  3781. return -EACCES;
  3782. useglobal = 0;
  3783. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
  3784. if (IS_ERR(cqr)) {
  3785. mutex_lock(&dasd_reserve_mutex);
  3786. useglobal = 1;
  3787. cqr = &dasd_reserve_req->cqr;
  3788. memset(cqr, 0, sizeof(*cqr));
  3789. memset(&dasd_reserve_req->ccw, 0,
  3790. sizeof(dasd_reserve_req->ccw));
  3791. cqr->cpaddr = &dasd_reserve_req->ccw;
  3792. cqr->data = &dasd_reserve_req->data;
  3793. cqr->magic = DASD_ECKD_MAGIC;
  3794. }
  3795. ccw = cqr->cpaddr;
  3796. ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
  3797. ccw->flags |= CCW_FLAG_SLI;
  3798. ccw->count = 32;
  3799. ccw->cda = (__u32)(addr_t) cqr->data;
  3800. cqr->startdev = device;
  3801. cqr->memdev = device;
  3802. clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  3803. set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
  3804. cqr->retries = 2; /* set retry counter to enable basic ERP */
  3805. cqr->expires = 2 * HZ;
  3806. cqr->buildclk = get_tod_clock();
  3807. cqr->status = DASD_CQR_FILLED;
  3808. rc = dasd_sleep_on_immediatly(cqr);
  3809. if (!rc)
  3810. set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
  3811. if (useglobal)
  3812. mutex_unlock(&dasd_reserve_mutex);
  3813. else
  3814. dasd_sfree_request(cqr, cqr->memdev);
  3815. return rc;
  3816. }
  3817. /*
  3818. * Steal lock ioctl - unconditional reserve device.
  3819. * Buils a channel programm to break a device's reservation.
  3820. * (unconditional reserve)
  3821. */
  3822. static int
  3823. dasd_eckd_steal_lock(struct dasd_device *device)
  3824. {
  3825. struct dasd_ccw_req *cqr;
  3826. int rc;
  3827. struct ccw1 *ccw;
  3828. int useglobal;
  3829. if (!capable(CAP_SYS_ADMIN))
  3830. return -EACCES;
  3831. useglobal = 0;
  3832. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
  3833. if (IS_ERR(cqr)) {
  3834. mutex_lock(&dasd_reserve_mutex);
  3835. useglobal = 1;
  3836. cqr = &dasd_reserve_req->cqr;
  3837. memset(cqr, 0, sizeof(*cqr));
  3838. memset(&dasd_reserve_req->ccw, 0,
  3839. sizeof(dasd_reserve_req->ccw));
  3840. cqr->cpaddr = &dasd_reserve_req->ccw;
  3841. cqr->data = &dasd_reserve_req->data;
  3842. cqr->magic = DASD_ECKD_MAGIC;
  3843. }
  3844. ccw = cqr->cpaddr;
  3845. ccw->cmd_code = DASD_ECKD_CCW_SLCK;
  3846. ccw->flags |= CCW_FLAG_SLI;
  3847. ccw->count = 32;
  3848. ccw->cda = (__u32)(addr_t) cqr->data;
  3849. cqr->startdev = device;
  3850. cqr->memdev = device;
  3851. clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  3852. set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
  3853. cqr->retries = 2; /* set retry counter to enable basic ERP */
  3854. cqr->expires = 2 * HZ;
  3855. cqr->buildclk = get_tod_clock();
  3856. cqr->status = DASD_CQR_FILLED;
  3857. rc = dasd_sleep_on_immediatly(cqr);
  3858. if (!rc)
  3859. set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
  3860. if (useglobal)
  3861. mutex_unlock(&dasd_reserve_mutex);
  3862. else
  3863. dasd_sfree_request(cqr, cqr->memdev);
  3864. return rc;
  3865. }
  3866. /*
  3867. * SNID - Sense Path Group ID
  3868. * This ioctl may be used in situations where I/O is stalled due to
  3869. * a reserve, so if the normal dasd_smalloc_request fails, we use the
  3870. * preallocated dasd_reserve_req.
  3871. */
  3872. static int dasd_eckd_snid(struct dasd_device *device,
  3873. void __user *argp)
  3874. {
  3875. struct dasd_ccw_req *cqr;
  3876. int rc;
  3877. struct ccw1 *ccw;
  3878. int useglobal;
  3879. struct dasd_snid_ioctl_data usrparm;
  3880. if (!capable(CAP_SYS_ADMIN))
  3881. return -EACCES;
  3882. if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
  3883. return -EFAULT;
  3884. useglobal = 0;
  3885. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
  3886. sizeof(struct dasd_snid_data), device,
  3887. NULL);
  3888. if (IS_ERR(cqr)) {
  3889. mutex_lock(&dasd_reserve_mutex);
  3890. useglobal = 1;
  3891. cqr = &dasd_reserve_req->cqr;
  3892. memset(cqr, 0, sizeof(*cqr));
  3893. memset(&dasd_reserve_req->ccw, 0,
  3894. sizeof(dasd_reserve_req->ccw));
  3895. cqr->cpaddr = &dasd_reserve_req->ccw;
  3896. cqr->data = &dasd_reserve_req->data;
  3897. cqr->magic = DASD_ECKD_MAGIC;
  3898. }
  3899. ccw = cqr->cpaddr;
  3900. ccw->cmd_code = DASD_ECKD_CCW_SNID;
  3901. ccw->flags |= CCW_FLAG_SLI;
  3902. ccw->count = 12;
  3903. ccw->cda = (__u32)(addr_t) cqr->data;
  3904. cqr->startdev = device;
  3905. cqr->memdev = device;
  3906. clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  3907. set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
  3908. set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
  3909. cqr->retries = 5;
  3910. cqr->expires = 10 * HZ;
  3911. cqr->buildclk = get_tod_clock();
  3912. cqr->status = DASD_CQR_FILLED;
  3913. cqr->lpm = usrparm.path_mask;
  3914. rc = dasd_sleep_on_immediatly(cqr);
  3915. /* verify that I/O processing didn't modify the path mask */
  3916. if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
  3917. rc = -EIO;
  3918. if (!rc) {
  3919. usrparm.data = *((struct dasd_snid_data *)cqr->data);
  3920. if (copy_to_user(argp, &usrparm, sizeof(usrparm)))
  3921. rc = -EFAULT;
  3922. }
  3923. if (useglobal)
  3924. mutex_unlock(&dasd_reserve_mutex);
  3925. else
  3926. dasd_sfree_request(cqr, cqr->memdev);
  3927. return rc;
  3928. }
  3929. /*
  3930. * Read performance statistics
  3931. */
  3932. static int
  3933. dasd_eckd_performance(struct dasd_device *device, void __user *argp)
  3934. {
  3935. struct dasd_psf_prssd_data *prssdp;
  3936. struct dasd_rssd_perf_stats_t *stats;
  3937. struct dasd_ccw_req *cqr;
  3938. struct ccw1 *ccw;
  3939. int rc;
  3940. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
  3941. (sizeof(struct dasd_psf_prssd_data) +
  3942. sizeof(struct dasd_rssd_perf_stats_t)),
  3943. device, NULL);
  3944. if (IS_ERR(cqr)) {
  3945. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  3946. "Could not allocate initialization request");
  3947. return PTR_ERR(cqr);
  3948. }
  3949. cqr->startdev = device;
  3950. cqr->memdev = device;
  3951. cqr->retries = 0;
  3952. clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  3953. cqr->expires = 10 * HZ;
  3954. /* Prepare for Read Subsystem Data */
  3955. prssdp = (struct dasd_psf_prssd_data *) cqr->data;
  3956. memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
  3957. prssdp->order = PSF_ORDER_PRSSD;
  3958. prssdp->suborder = 0x01; /* Performance Statistics */
  3959. prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */
  3960. ccw = cqr->cpaddr;
  3961. ccw->cmd_code = DASD_ECKD_CCW_PSF;
  3962. ccw->count = sizeof(struct dasd_psf_prssd_data);
  3963. ccw->flags |= CCW_FLAG_CC;
  3964. ccw->cda = (__u32)(addr_t) prssdp;
  3965. /* Read Subsystem Data - Performance Statistics */
  3966. stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
  3967. memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
  3968. ccw++;
  3969. ccw->cmd_code = DASD_ECKD_CCW_RSSD;
  3970. ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
  3971. ccw->cda = (__u32)(addr_t) stats;
  3972. cqr->buildclk = get_tod_clock();
  3973. cqr->status = DASD_CQR_FILLED;
  3974. rc = dasd_sleep_on(cqr);
  3975. if (rc == 0) {
  3976. prssdp = (struct dasd_psf_prssd_data *) cqr->data;
  3977. stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
  3978. if (copy_to_user(argp, stats,
  3979. sizeof(struct dasd_rssd_perf_stats_t)))
  3980. rc = -EFAULT;
  3981. }
  3982. dasd_sfree_request(cqr, cqr->memdev);
  3983. return rc;
  3984. }
  3985. /*
  3986. * Get attributes (cache operations)
  3987. * Returnes the cache attributes used in Define Extend (DE).
  3988. */
  3989. static int
  3990. dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
  3991. {
  3992. struct dasd_eckd_private *private = device->private;
  3993. struct attrib_data_t attrib = private->attrib;
  3994. int rc;
  3995. if (!capable(CAP_SYS_ADMIN))
  3996. return -EACCES;
  3997. if (!argp)
  3998. return -EINVAL;
  3999. rc = 0;
  4000. if (copy_to_user(argp, (long *) &attrib,
  4001. sizeof(struct attrib_data_t)))
  4002. rc = -EFAULT;
  4003. return rc;
  4004. }
  4005. /*
  4006. * Set attributes (cache operations)
  4007. * Stores the attributes for cache operation to be used in Define Extend (DE).
  4008. */
  4009. static int
  4010. dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
  4011. {
  4012. struct dasd_eckd_private *private = device->private;
  4013. struct attrib_data_t attrib;
  4014. if (!capable(CAP_SYS_ADMIN))
  4015. return -EACCES;
  4016. if (!argp)
  4017. return -EINVAL;
  4018. if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
  4019. return -EFAULT;
  4020. private->attrib = attrib;
  4021. dev_info(&device->cdev->dev,
  4022. "The DASD cache mode was set to %x (%i cylinder prestage)\n",
  4023. private->attrib.operation, private->attrib.nr_cyl);
  4024. return 0;
  4025. }
  4026. /*
  4027. * Issue syscall I/O to EMC Symmetrix array.
  4028. * CCWs are PSF and RSSD
  4029. */
  4030. static int dasd_symm_io(struct dasd_device *device, void __user *argp)
  4031. {
  4032. struct dasd_symmio_parms usrparm;
  4033. char *psf_data, *rssd_result;
  4034. struct dasd_ccw_req *cqr;
  4035. struct ccw1 *ccw;
  4036. char psf0, psf1;
  4037. int rc;
  4038. if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
  4039. return -EACCES;
  4040. psf0 = psf1 = 0;
  4041. /* Copy parms from caller */
  4042. rc = -EFAULT;
  4043. if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
  4044. goto out;
  4045. if (is_compat_task()) {
  4046. /* Make sure pointers are sane even on 31 bit. */
  4047. rc = -EINVAL;
  4048. if ((usrparm.psf_data >> 32) != 0)
  4049. goto out;
  4050. if ((usrparm.rssd_result >> 32) != 0)
  4051. goto out;
  4052. usrparm.psf_data &= 0x7fffffffULL;
  4053. usrparm.rssd_result &= 0x7fffffffULL;
  4054. }
  4055. /* at least 2 bytes are accessed and should be allocated */
  4056. if (usrparm.psf_data_len < 2) {
  4057. DBF_DEV_EVENT(DBF_WARNING, device,
  4058. "Symmetrix ioctl invalid data length %d",
  4059. usrparm.psf_data_len);
  4060. rc = -EINVAL;
  4061. goto out;
  4062. }
  4063. /* alloc I/O data area */
  4064. psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
  4065. rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
  4066. if (!psf_data || !rssd_result) {
  4067. rc = -ENOMEM;
  4068. goto out_free;
  4069. }
  4070. /* get syscall header from user space */
  4071. rc = -EFAULT;
  4072. if (copy_from_user(psf_data,
  4073. (void __user *)(unsigned long) usrparm.psf_data,
  4074. usrparm.psf_data_len))
  4075. goto out_free;
  4076. psf0 = psf_data[0];
  4077. psf1 = psf_data[1];
  4078. /* setup CCWs for PSF + RSSD */
  4079. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
  4080. if (IS_ERR(cqr)) {
  4081. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  4082. "Could not allocate initialization request");
  4083. rc = PTR_ERR(cqr);
  4084. goto out_free;
  4085. }
  4086. cqr->startdev = device;
  4087. cqr->memdev = device;
  4088. cqr->retries = 3;
  4089. cqr->expires = 10 * HZ;
  4090. cqr->buildclk = get_tod_clock();
  4091. cqr->status = DASD_CQR_FILLED;
  4092. /* Build the ccws */
  4093. ccw = cqr->cpaddr;
  4094. /* PSF ccw */
  4095. ccw->cmd_code = DASD_ECKD_CCW_PSF;
  4096. ccw->count = usrparm.psf_data_len;
  4097. ccw->flags |= CCW_FLAG_CC;
  4098. ccw->cda = (__u32)(addr_t) psf_data;
  4099. ccw++;
  4100. /* RSSD ccw */
  4101. ccw->cmd_code = DASD_ECKD_CCW_RSSD;
  4102. ccw->count = usrparm.rssd_result_len;
  4103. ccw->flags = CCW_FLAG_SLI ;
  4104. ccw->cda = (__u32)(addr_t) rssd_result;
  4105. rc = dasd_sleep_on(cqr);
  4106. if (rc)
  4107. goto out_sfree;
  4108. rc = -EFAULT;
  4109. if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
  4110. rssd_result, usrparm.rssd_result_len))
  4111. goto out_sfree;
  4112. rc = 0;
  4113. out_sfree:
  4114. dasd_sfree_request(cqr, cqr->memdev);
  4115. out_free:
  4116. kfree(rssd_result);
  4117. kfree(psf_data);
  4118. out:
  4119. DBF_DEV_EVENT(DBF_WARNING, device,
  4120. "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
  4121. (int) psf0, (int) psf1, rc);
  4122. return rc;
  4123. }
  4124. static int
  4125. dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
  4126. {
  4127. struct dasd_device *device = block->base;
  4128. switch (cmd) {
  4129. case BIODASDGATTR:
  4130. return dasd_eckd_get_attrib(device, argp);
  4131. case BIODASDSATTR:
  4132. return dasd_eckd_set_attrib(device, argp);
  4133. case BIODASDPSRD:
  4134. return dasd_eckd_performance(device, argp);
  4135. case BIODASDRLSE:
  4136. return dasd_eckd_release(device);
  4137. case BIODASDRSRV:
  4138. return dasd_eckd_reserve(device);
  4139. case BIODASDSLCK:
  4140. return dasd_eckd_steal_lock(device);
  4141. case BIODASDSNID:
  4142. return dasd_eckd_snid(device, argp);
  4143. case BIODASDSYMMIO:
  4144. return dasd_symm_io(device, argp);
  4145. default:
  4146. return -ENOTTY;
  4147. }
  4148. }
  4149. /*
  4150. * Dump the range of CCWs into 'page' buffer
  4151. * and return number of printed chars.
  4152. */
  4153. static int
  4154. dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
  4155. {
  4156. int len, count;
  4157. char *datap;
  4158. len = 0;
  4159. while (from <= to) {
  4160. len += sprintf(page + len, PRINTK_HEADER
  4161. " CCW %p: %08X %08X DAT:",
  4162. from, ((int *) from)[0], ((int *) from)[1]);
  4163. /* get pointer to data (consider IDALs) */
  4164. if (from->flags & CCW_FLAG_IDA)
  4165. datap = (char *) *((addr_t *) (addr_t) from->cda);
  4166. else
  4167. datap = (char *) ((addr_t) from->cda);
  4168. /* dump data (max 32 bytes) */
  4169. for (count = 0; count < from->count && count < 32; count++) {
  4170. if (count % 8 == 0) len += sprintf(page + len, " ");
  4171. if (count % 4 == 0) len += sprintf(page + len, " ");
  4172. len += sprintf(page + len, "%02x", datap[count]);
  4173. }
  4174. len += sprintf(page + len, "\n");
  4175. from++;
  4176. }
  4177. return len;
  4178. }
  4179. static void
  4180. dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
  4181. char *reason)
  4182. {
  4183. u64 *sense;
  4184. u64 *stat;
  4185. sense = (u64 *) dasd_get_sense(irb);
  4186. stat = (u64 *) &irb->scsw;
  4187. if (sense) {
  4188. DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : "
  4189. "%016llx %016llx %016llx %016llx",
  4190. reason, *stat, *((u32 *) (stat + 1)),
  4191. sense[0], sense[1], sense[2], sense[3]);
  4192. } else {
  4193. DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s",
  4194. reason, *stat, *((u32 *) (stat + 1)),
  4195. "NO VALID SENSE");
  4196. }
  4197. }
  4198. /*
  4199. * Print sense data and related channel program.
  4200. * Parts are printed because printk buffer is only 1024 bytes.
  4201. */
  4202. static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
  4203. struct dasd_ccw_req *req, struct irb *irb)
  4204. {
  4205. char *page;
  4206. struct ccw1 *first, *last, *fail, *from, *to;
  4207. int len, sl, sct;
  4208. page = (char *) get_zeroed_page(GFP_ATOMIC);
  4209. if (page == NULL) {
  4210. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  4211. "No memory to dump sense data\n");
  4212. return;
  4213. }
  4214. /* dump the sense data */
  4215. len = sprintf(page, PRINTK_HEADER
  4216. " I/O status report for device %s:\n",
  4217. dev_name(&device->cdev->dev));
  4218. len += sprintf(page + len, PRINTK_HEADER
  4219. " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
  4220. "CS:%02X RC:%d\n",
  4221. req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
  4222. scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
  4223. scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
  4224. req ? req->intrc : 0);
  4225. len += sprintf(page + len, PRINTK_HEADER
  4226. " device %s: Failing CCW: %p\n",
  4227. dev_name(&device->cdev->dev),
  4228. (void *) (addr_t) irb->scsw.cmd.cpa);
  4229. if (irb->esw.esw0.erw.cons) {
  4230. for (sl = 0; sl < 4; sl++) {
  4231. len += sprintf(page + len, PRINTK_HEADER
  4232. " Sense(hex) %2d-%2d:",
  4233. (8 * sl), ((8 * sl) + 7));
  4234. for (sct = 0; sct < 8; sct++) {
  4235. len += sprintf(page + len, " %02x",
  4236. irb->ecw[8 * sl + sct]);
  4237. }
  4238. len += sprintf(page + len, "\n");
  4239. }
  4240. if (irb->ecw[27] & DASD_SENSE_BIT_0) {
  4241. /* 24 Byte Sense Data */
  4242. sprintf(page + len, PRINTK_HEADER
  4243. " 24 Byte: %x MSG %x, "
  4244. "%s MSGb to SYSOP\n",
  4245. irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
  4246. irb->ecw[1] & 0x10 ? "" : "no");
  4247. } else {
  4248. /* 32 Byte Sense Data */
  4249. sprintf(page + len, PRINTK_HEADER
  4250. " 32 Byte: Format: %x "
  4251. "Exception class %x\n",
  4252. irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
  4253. }
  4254. } else {
  4255. sprintf(page + len, PRINTK_HEADER
  4256. " SORRY - NO VALID SENSE AVAILABLE\n");
  4257. }
  4258. printk(KERN_ERR "%s", page);
  4259. if (req) {
  4260. /* req == NULL for unsolicited interrupts */
  4261. /* dump the Channel Program (max 140 Bytes per line) */
  4262. /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
  4263. first = req->cpaddr;
  4264. for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
  4265. to = min(first + 6, last);
  4266. len = sprintf(page, PRINTK_HEADER
  4267. " Related CP in req: %p\n", req);
  4268. dasd_eckd_dump_ccw_range(first, to, page + len);
  4269. printk(KERN_ERR "%s", page);
  4270. /* print failing CCW area (maximum 4) */
  4271. /* scsw->cda is either valid or zero */
  4272. len = 0;
  4273. from = ++to;
  4274. fail = (struct ccw1 *)(addr_t)
  4275. irb->scsw.cmd.cpa; /* failing CCW */
  4276. if (from < fail - 2) {
  4277. from = fail - 2; /* there is a gap - print header */
  4278. len += sprintf(page, PRINTK_HEADER "......\n");
  4279. }
  4280. to = min(fail + 1, last);
  4281. len += dasd_eckd_dump_ccw_range(from, to, page + len);
  4282. /* print last CCWs (maximum 2) */
  4283. from = max(from, ++to);
  4284. if (from < last - 1) {
  4285. from = last - 1; /* there is a gap - print header */
  4286. len += sprintf(page + len, PRINTK_HEADER "......\n");
  4287. }
  4288. len += dasd_eckd_dump_ccw_range(from, last, page + len);
  4289. if (len > 0)
  4290. printk(KERN_ERR "%s", page);
  4291. }
  4292. free_page((unsigned long) page);
  4293. }
  4294. /*
  4295. * Print sense data from a tcw.
  4296. */
  4297. static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
  4298. struct dasd_ccw_req *req, struct irb *irb)
  4299. {
  4300. char *page;
  4301. int len, sl, sct, residual;
  4302. struct tsb *tsb;
  4303. u8 *sense, *rcq;
  4304. page = (char *) get_zeroed_page(GFP_ATOMIC);
  4305. if (page == NULL) {
  4306. DBF_DEV_EVENT(DBF_WARNING, device, " %s",
  4307. "No memory to dump sense data");
  4308. return;
  4309. }
  4310. /* dump the sense data */
  4311. len = sprintf(page, PRINTK_HEADER
  4312. " I/O status report for device %s:\n",
  4313. dev_name(&device->cdev->dev));
  4314. len += sprintf(page + len, PRINTK_HEADER
  4315. " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
  4316. "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
  4317. req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
  4318. scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
  4319. scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
  4320. irb->scsw.tm.fcxs,
  4321. (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
  4322. req ? req->intrc : 0);
  4323. len += sprintf(page + len, PRINTK_HEADER
  4324. " device %s: Failing TCW: %p\n",
  4325. dev_name(&device->cdev->dev),
  4326. (void *) (addr_t) irb->scsw.tm.tcw);
  4327. tsb = NULL;
  4328. sense = NULL;
  4329. if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
  4330. tsb = tcw_get_tsb(
  4331. (struct tcw *)(unsigned long)irb->scsw.tm.tcw);
  4332. if (tsb) {
  4333. len += sprintf(page + len, PRINTK_HEADER
  4334. " tsb->length %d\n", tsb->length);
  4335. len += sprintf(page + len, PRINTK_HEADER
  4336. " tsb->flags %x\n", tsb->flags);
  4337. len += sprintf(page + len, PRINTK_HEADER
  4338. " tsb->dcw_offset %d\n", tsb->dcw_offset);
  4339. len += sprintf(page + len, PRINTK_HEADER
  4340. " tsb->count %d\n", tsb->count);
  4341. residual = tsb->count - 28;
  4342. len += sprintf(page + len, PRINTK_HEADER
  4343. " residual %d\n", residual);
  4344. switch (tsb->flags & 0x07) {
  4345. case 1: /* tsa_iostat */
  4346. len += sprintf(page + len, PRINTK_HEADER
  4347. " tsb->tsa.iostat.dev_time %d\n",
  4348. tsb->tsa.iostat.dev_time);
  4349. len += sprintf(page + len, PRINTK_HEADER
  4350. " tsb->tsa.iostat.def_time %d\n",
  4351. tsb->tsa.iostat.def_time);
  4352. len += sprintf(page + len, PRINTK_HEADER
  4353. " tsb->tsa.iostat.queue_time %d\n",
  4354. tsb->tsa.iostat.queue_time);
  4355. len += sprintf(page + len, PRINTK_HEADER
  4356. " tsb->tsa.iostat.dev_busy_time %d\n",
  4357. tsb->tsa.iostat.dev_busy_time);
  4358. len += sprintf(page + len, PRINTK_HEADER
  4359. " tsb->tsa.iostat.dev_act_time %d\n",
  4360. tsb->tsa.iostat.dev_act_time);
  4361. sense = tsb->tsa.iostat.sense;
  4362. break;
  4363. case 2: /* ts_ddpc */
  4364. len += sprintf(page + len, PRINTK_HEADER
  4365. " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
  4366. for (sl = 0; sl < 2; sl++) {
  4367. len += sprintf(page + len, PRINTK_HEADER
  4368. " tsb->tsa.ddpc.rcq %2d-%2d: ",
  4369. (8 * sl), ((8 * sl) + 7));
  4370. rcq = tsb->tsa.ddpc.rcq;
  4371. for (sct = 0; sct < 8; sct++) {
  4372. len += sprintf(page + len, " %02x",
  4373. rcq[8 * sl + sct]);
  4374. }
  4375. len += sprintf(page + len, "\n");
  4376. }
  4377. sense = tsb->tsa.ddpc.sense;
  4378. break;
  4379. case 3: /* tsa_intrg */
  4380. len += sprintf(page + len, PRINTK_HEADER
  4381. " tsb->tsa.intrg.: not supported yet\n");
  4382. break;
  4383. }
  4384. if (sense) {
  4385. for (sl = 0; sl < 4; sl++) {
  4386. len += sprintf(page + len, PRINTK_HEADER
  4387. " Sense(hex) %2d-%2d:",
  4388. (8 * sl), ((8 * sl) + 7));
  4389. for (sct = 0; sct < 8; sct++) {
  4390. len += sprintf(page + len, " %02x",
  4391. sense[8 * sl + sct]);
  4392. }
  4393. len += sprintf(page + len, "\n");
  4394. }
  4395. if (sense[27] & DASD_SENSE_BIT_0) {
  4396. /* 24 Byte Sense Data */
  4397. sprintf(page + len, PRINTK_HEADER
  4398. " 24 Byte: %x MSG %x, "
  4399. "%s MSGb to SYSOP\n",
  4400. sense[7] >> 4, sense[7] & 0x0f,
  4401. sense[1] & 0x10 ? "" : "no");
  4402. } else {
  4403. /* 32 Byte Sense Data */
  4404. sprintf(page + len, PRINTK_HEADER
  4405. " 32 Byte: Format: %x "
  4406. "Exception class %x\n",
  4407. sense[6] & 0x0f, sense[22] >> 4);
  4408. }
  4409. } else {
  4410. sprintf(page + len, PRINTK_HEADER
  4411. " SORRY - NO VALID SENSE AVAILABLE\n");
  4412. }
  4413. } else {
  4414. sprintf(page + len, PRINTK_HEADER
  4415. " SORRY - NO TSB DATA AVAILABLE\n");
  4416. }
  4417. printk(KERN_ERR "%s", page);
  4418. free_page((unsigned long) page);
  4419. }
  4420. static void dasd_eckd_dump_sense(struct dasd_device *device,
  4421. struct dasd_ccw_req *req, struct irb *irb)
  4422. {
  4423. u8 *sense = dasd_get_sense(irb);
  4424. if (scsw_is_tm(&irb->scsw)) {
  4425. /*
  4426. * In some cases the 'File Protected' or 'Incorrect Length'
  4427. * error might be expected and log messages shouldn't be written
  4428. * then. Check if the according suppress bit is set.
  4429. */
  4430. if (sense && (sense[1] & SNS1_FILE_PROTECTED) &&
  4431. test_bit(DASD_CQR_SUPPRESS_FP, &req->flags))
  4432. return;
  4433. if (scsw_cstat(&irb->scsw) == 0x40 &&
  4434. test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
  4435. return;
  4436. dasd_eckd_dump_sense_tcw(device, req, irb);
  4437. } else {
  4438. /*
  4439. * In some cases the 'Command Reject' or 'No Record Found'
  4440. * error might be expected and log messages shouldn't be
  4441. * written then. Check if the according suppress bit is set.
  4442. */
  4443. if (sense && sense[0] & SNS0_CMD_REJECT &&
  4444. test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
  4445. return;
  4446. if (sense && sense[1] & SNS1_NO_REC_FOUND &&
  4447. test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
  4448. return;
  4449. dasd_eckd_dump_sense_ccw(device, req, irb);
  4450. }
  4451. }
  4452. static int dasd_eckd_pm_freeze(struct dasd_device *device)
  4453. {
  4454. /*
  4455. * the device should be disconnected from our LCU structure
  4456. * on restore we will reconnect it and reread LCU specific
  4457. * information like PAV support that might have changed
  4458. */
  4459. dasd_alias_remove_device(device);
  4460. dasd_alias_disconnect_device_from_lcu(device);
  4461. return 0;
  4462. }
  4463. static int dasd_eckd_restore_device(struct dasd_device *device)
  4464. {
  4465. struct dasd_eckd_private *private = device->private;
  4466. struct dasd_eckd_characteristics temp_rdc_data;
  4467. int rc;
  4468. struct dasd_uid temp_uid;
  4469. unsigned long flags;
  4470. unsigned long cqr_flags = 0;
  4471. /* Read Configuration Data */
  4472. rc = dasd_eckd_read_conf(device);
  4473. if (rc) {
  4474. DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
  4475. "Read configuration data failed, rc=%d", rc);
  4476. goto out_err;
  4477. }
  4478. dasd_eckd_get_uid(device, &temp_uid);
  4479. /* Generate device unique id */
  4480. rc = dasd_eckd_generate_uid(device);
  4481. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  4482. if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
  4483. dev_err(&device->cdev->dev, "The UID of the DASD has "
  4484. "changed\n");
  4485. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  4486. if (rc)
  4487. goto out_err;
  4488. /* register lcu with alias handling, enable PAV if this is a new lcu */
  4489. rc = dasd_alias_make_device_known_to_lcu(device);
  4490. if (rc)
  4491. goto out_err;
  4492. set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr_flags);
  4493. dasd_eckd_validate_server(device, cqr_flags);
  4494. /* RE-Read Configuration Data */
  4495. rc = dasd_eckd_read_conf(device);
  4496. if (rc) {
  4497. DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
  4498. "Read configuration data failed, rc=%d", rc);
  4499. goto out_err2;
  4500. }
  4501. /* Read Feature Codes */
  4502. dasd_eckd_read_features(device);
  4503. /* Read Device Characteristics */
  4504. rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
  4505. &temp_rdc_data, 64);
  4506. if (rc) {
  4507. DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
  4508. "Read device characteristic failed, rc=%d", rc);
  4509. goto out_err2;
  4510. }
  4511. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  4512. memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data));
  4513. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  4514. /* add device to alias management */
  4515. dasd_alias_add_device(device);
  4516. return 0;
  4517. out_err2:
  4518. dasd_alias_disconnect_device_from_lcu(device);
  4519. out_err:
  4520. return -1;
  4521. }
  4522. static int dasd_eckd_reload_device(struct dasd_device *device)
  4523. {
  4524. struct dasd_eckd_private *private = device->private;
  4525. int rc, old_base;
  4526. char print_uid[60];
  4527. struct dasd_uid uid;
  4528. unsigned long flags;
  4529. /*
  4530. * remove device from alias handling to prevent new requests
  4531. * from being scheduled on the wrong alias device
  4532. */
  4533. dasd_alias_remove_device(device);
  4534. spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
  4535. old_base = private->uid.base_unit_addr;
  4536. spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
  4537. /* Read Configuration Data */
  4538. rc = dasd_eckd_read_conf(device);
  4539. if (rc)
  4540. goto out_err;
  4541. rc = dasd_eckd_generate_uid(device);
  4542. if (rc)
  4543. goto out_err;
  4544. /*
  4545. * update unit address configuration and
  4546. * add device to alias management
  4547. */
  4548. dasd_alias_update_add_device(device);
  4549. dasd_eckd_get_uid(device, &uid);
  4550. if (old_base != uid.base_unit_addr) {
  4551. if (strlen(uid.vduit) > 0)
  4552. snprintf(print_uid, sizeof(print_uid),
  4553. "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial,
  4554. uid.ssid, uid.base_unit_addr, uid.vduit);
  4555. else
  4556. snprintf(print_uid, sizeof(print_uid),
  4557. "%s.%s.%04x.%02x", uid.vendor, uid.serial,
  4558. uid.ssid, uid.base_unit_addr);
  4559. dev_info(&device->cdev->dev,
  4560. "An Alias device was reassigned to a new base device "
  4561. "with UID: %s\n", print_uid);
  4562. }
  4563. return 0;
  4564. out_err:
  4565. return -1;
  4566. }
  4567. static int dasd_eckd_read_message_buffer(struct dasd_device *device,
  4568. struct dasd_rssd_messages *messages,
  4569. __u8 lpum)
  4570. {
  4571. struct dasd_rssd_messages *message_buf;
  4572. struct dasd_psf_prssd_data *prssdp;
  4573. struct dasd_ccw_req *cqr;
  4574. struct ccw1 *ccw;
  4575. int rc;
  4576. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
  4577. (sizeof(struct dasd_psf_prssd_data) +
  4578. sizeof(struct dasd_rssd_messages)),
  4579. device, NULL);
  4580. if (IS_ERR(cqr)) {
  4581. DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
  4582. "Could not allocate read message buffer request");
  4583. return PTR_ERR(cqr);
  4584. }
  4585. cqr->lpm = lpum;
  4586. retry:
  4587. cqr->startdev = device;
  4588. cqr->memdev = device;
  4589. cqr->block = NULL;
  4590. cqr->expires = 10 * HZ;
  4591. set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
  4592. /* dasd_sleep_on_immediatly does not do complex error
  4593. * recovery so clear erp flag and set retry counter to
  4594. * do basic erp */
  4595. clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
  4596. cqr->retries = 256;
  4597. /* Prepare for Read Subsystem Data */
  4598. prssdp = (struct dasd_psf_prssd_data *) cqr->data;
  4599. memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
  4600. prssdp->order = PSF_ORDER_PRSSD;
  4601. prssdp->suborder = 0x03; /* Message Buffer */
  4602. /* all other bytes of prssdp must be zero */
  4603. ccw = cqr->cpaddr;
  4604. ccw->cmd_code = DASD_ECKD_CCW_PSF;
  4605. ccw->count = sizeof(struct dasd_psf_prssd_data);
  4606. ccw->flags |= CCW_FLAG_CC;
  4607. ccw->flags |= CCW_FLAG_SLI;
  4608. ccw->cda = (__u32)(addr_t) prssdp;
  4609. /* Read Subsystem Data - message buffer */
  4610. message_buf = (struct dasd_rssd_messages *) (prssdp + 1);
  4611. memset(message_buf, 0, sizeof(struct dasd_rssd_messages));
  4612. ccw++;
  4613. ccw->cmd_code = DASD_ECKD_CCW_RSSD;
  4614. ccw->count = sizeof(struct dasd_rssd_messages);
  4615. ccw->flags |= CCW_FLAG_SLI;
  4616. ccw->cda = (__u32)(addr_t) message_buf;
  4617. cqr->buildclk = get_tod_clock();
  4618. cqr->status = DASD_CQR_FILLED;
  4619. rc = dasd_sleep_on_immediatly(cqr);
  4620. if (rc == 0) {
  4621. prssdp = (struct dasd_psf_prssd_data *) cqr->data;
  4622. message_buf = (struct dasd_rssd_messages *)
  4623. (prssdp + 1);
  4624. memcpy(messages, message_buf,
  4625. sizeof(struct dasd_rssd_messages));
  4626. } else if (cqr->lpm) {
  4627. /*
  4628. * on z/VM we might not be able to do I/O on the requested path
  4629. * but instead we get the required information on any path
  4630. * so retry with open path mask
  4631. */
  4632. cqr->lpm = 0;
  4633. goto retry;
  4634. } else
  4635. DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
  4636. "Reading messages failed with rc=%d\n"
  4637. , rc);
  4638. dasd_sfree_request(cqr, cqr->memdev);
  4639. return rc;
  4640. }
  4641. static int dasd_eckd_query_host_access(struct dasd_device *device,
  4642. struct dasd_psf_query_host_access *data)
  4643. {
  4644. struct dasd_eckd_private *private = device->private;
  4645. struct dasd_psf_query_host_access *host_access;
  4646. struct dasd_psf_prssd_data *prssdp;
  4647. struct dasd_ccw_req *cqr;
  4648. struct ccw1 *ccw;
  4649. int rc;
  4650. /* not available for HYPER PAV alias devices */
  4651. if (!device->block && private->lcu->pav == HYPER_PAV)
  4652. return -EOPNOTSUPP;
  4653. /* may not be supported by the storage server */
  4654. if (!(private->features.feature[14] & 0x80))
  4655. return -EOPNOTSUPP;
  4656. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
  4657. sizeof(struct dasd_psf_prssd_data) + 1,
  4658. device, NULL);
  4659. if (IS_ERR(cqr)) {
  4660. DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
  4661. "Could not allocate read message buffer request");
  4662. return PTR_ERR(cqr);
  4663. }
  4664. host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA);
  4665. if (!host_access) {
  4666. dasd_sfree_request(cqr, device);
  4667. DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
  4668. "Could not allocate host_access buffer");
  4669. return -ENOMEM;
  4670. }
  4671. cqr->startdev = device;
  4672. cqr->memdev = device;
  4673. cqr->block = NULL;
  4674. cqr->retries = 256;
  4675. cqr->expires = 10 * HZ;
  4676. /* Prepare for Read Subsystem Data */
  4677. prssdp = (struct dasd_psf_prssd_data *) cqr->data;
  4678. memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
  4679. prssdp->order = PSF_ORDER_PRSSD;
  4680. prssdp->suborder = PSF_SUBORDER_QHA; /* query host access */
  4681. /* LSS and Volume that will be queried */
  4682. prssdp->lss = private->ned->ID;
  4683. prssdp->volume = private->ned->unit_addr;
  4684. /* all other bytes of prssdp must be zero */
  4685. ccw = cqr->cpaddr;
  4686. ccw->cmd_code = DASD_ECKD_CCW_PSF;
  4687. ccw->count = sizeof(struct dasd_psf_prssd_data);
  4688. ccw->flags |= CCW_FLAG_CC;
  4689. ccw->flags |= CCW_FLAG_SLI;
  4690. ccw->cda = (__u32)(addr_t) prssdp;
  4691. /* Read Subsystem Data - query host access */
  4692. ccw++;
  4693. ccw->cmd_code = DASD_ECKD_CCW_RSSD;
  4694. ccw->count = sizeof(struct dasd_psf_query_host_access);
  4695. ccw->flags |= CCW_FLAG_SLI;
  4696. ccw->cda = (__u32)(addr_t) host_access;
  4697. cqr->buildclk = get_tod_clock();
  4698. cqr->status = DASD_CQR_FILLED;
  4699. /* the command might not be supported, suppress error message */
  4700. __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
  4701. rc = dasd_sleep_on_interruptible(cqr);
  4702. if (rc == 0) {
  4703. *data = *host_access;
  4704. } else {
  4705. DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
  4706. "Reading host access data failed with rc=%d\n",
  4707. rc);
  4708. rc = -EOPNOTSUPP;
  4709. }
  4710. dasd_sfree_request(cqr, cqr->memdev);
  4711. kfree(host_access);
  4712. return rc;
  4713. }
  4714. /*
  4715. * return number of grouped devices
  4716. */
  4717. static int dasd_eckd_host_access_count(struct dasd_device *device)
  4718. {
  4719. struct dasd_psf_query_host_access *access;
  4720. struct dasd_ckd_path_group_entry *entry;
  4721. struct dasd_ckd_host_information *info;
  4722. int count = 0;
  4723. int rc, i;
  4724. access = kzalloc(sizeof(*access), GFP_NOIO);
  4725. if (!access) {
  4726. DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
  4727. "Could not allocate access buffer");
  4728. return -ENOMEM;
  4729. }
  4730. rc = dasd_eckd_query_host_access(device, access);
  4731. if (rc) {
  4732. kfree(access);
  4733. return rc;
  4734. }
  4735. info = (struct dasd_ckd_host_information *)
  4736. access->host_access_information;
  4737. for (i = 0; i < info->entry_count; i++) {
  4738. entry = (struct dasd_ckd_path_group_entry *)
  4739. (info->entry + i * info->entry_size);
  4740. if (entry->status_flags & DASD_ECKD_PG_GROUPED)
  4741. count++;
  4742. }
  4743. kfree(access);
  4744. return count;
  4745. }
  4746. /*
  4747. * write host access information to a sequential file
  4748. */
  4749. static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
  4750. {
  4751. struct dasd_psf_query_host_access *access;
  4752. struct dasd_ckd_path_group_entry *entry;
  4753. struct dasd_ckd_host_information *info;
  4754. char sysplex[9] = "";
  4755. int rc, i;
  4756. access = kzalloc(sizeof(*access), GFP_NOIO);
  4757. if (!access) {
  4758. DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
  4759. "Could not allocate access buffer");
  4760. return -ENOMEM;
  4761. }
  4762. rc = dasd_eckd_query_host_access(device, access);
  4763. if (rc) {
  4764. kfree(access);
  4765. return rc;
  4766. }
  4767. info = (struct dasd_ckd_host_information *)
  4768. access->host_access_information;
  4769. for (i = 0; i < info->entry_count; i++) {
  4770. entry = (struct dasd_ckd_path_group_entry *)
  4771. (info->entry + i * info->entry_size);
  4772. /* PGID */
  4773. seq_printf(m, "pgid %*phN\n", 11, entry->pgid);
  4774. /* FLAGS */
  4775. seq_printf(m, "status_flags %02x\n", entry->status_flags);
  4776. /* SYSPLEX NAME */
  4777. memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1);
  4778. EBCASC(sysplex, sizeof(sysplex));
  4779. seq_printf(m, "sysplex_name %8s\n", sysplex);
  4780. /* SUPPORTED CYLINDER */
  4781. seq_printf(m, "supported_cylinder %d\n", entry->cylinder);
  4782. /* TIMESTAMP */
  4783. seq_printf(m, "timestamp %lu\n", (unsigned long)
  4784. entry->timestamp);
  4785. }
  4786. kfree(access);
  4787. return 0;
  4788. }
  4789. /*
  4790. * Perform Subsystem Function - CUIR response
  4791. */
  4792. static int
  4793. dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
  4794. __u32 message_id, __u8 lpum)
  4795. {
  4796. struct dasd_psf_cuir_response *psf_cuir;
  4797. int pos = pathmask_to_pos(lpum);
  4798. struct dasd_ccw_req *cqr;
  4799. struct ccw1 *ccw;
  4800. int rc;
  4801. cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
  4802. sizeof(struct dasd_psf_cuir_response),
  4803. device, NULL);
  4804. if (IS_ERR(cqr)) {
  4805. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  4806. "Could not allocate PSF-CUIR request");
  4807. return PTR_ERR(cqr);
  4808. }
  4809. psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
  4810. psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
  4811. psf_cuir->cc = response;
  4812. psf_cuir->chpid = device->path[pos].chpid;
  4813. psf_cuir->message_id = message_id;
  4814. psf_cuir->cssid = device->path[pos].cssid;
  4815. psf_cuir->ssid = device->path[pos].ssid;
  4816. ccw = cqr->cpaddr;
  4817. ccw->cmd_code = DASD_ECKD_CCW_PSF;
  4818. ccw->cda = (__u32)(addr_t)psf_cuir;
  4819. ccw->flags = CCW_FLAG_SLI;
  4820. ccw->count = sizeof(struct dasd_psf_cuir_response);
  4821. cqr->startdev = device;
  4822. cqr->memdev = device;
  4823. cqr->block = NULL;
  4824. cqr->retries = 256;
  4825. cqr->expires = 10*HZ;
  4826. cqr->buildclk = get_tod_clock();
  4827. cqr->status = DASD_CQR_FILLED;
  4828. set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
  4829. rc = dasd_sleep_on(cqr);
  4830. dasd_sfree_request(cqr, cqr->memdev);
  4831. return rc;
  4832. }
  4833. /*
  4834. * return configuration data that is referenced by record selector
  4835. * if a record selector is specified or per default return the
  4836. * conf_data pointer for the path specified by lpum
  4837. */
  4838. static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
  4839. __u8 lpum,
  4840. struct dasd_cuir_message *cuir)
  4841. {
  4842. struct dasd_conf_data *conf_data;
  4843. int path, pos;
  4844. if (cuir->record_selector == 0)
  4845. goto out;
  4846. for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
  4847. conf_data = device->path[pos].conf_data;
  4848. if (conf_data->gneq.record_selector ==
  4849. cuir->record_selector)
  4850. return conf_data;
  4851. }
  4852. out:
  4853. return device->path[pathmask_to_pos(lpum)].conf_data;
  4854. }
  4855. /*
  4856. * This function determines the scope of a reconfiguration request by
  4857. * analysing the path and device selection data provided in the CUIR request.
  4858. * Returns a path mask containing CUIR affected paths for the give device.
  4859. *
  4860. * If the CUIR request does not contain the required information return the
  4861. * path mask of the path the attention message for the CUIR request was reveived
  4862. * on.
  4863. */
  4864. static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
  4865. struct dasd_cuir_message *cuir)
  4866. {
  4867. struct dasd_conf_data *ref_conf_data;
  4868. unsigned long bitmask = 0, mask = 0;
  4869. struct dasd_conf_data *conf_data;
  4870. unsigned int pos, path;
  4871. char *ref_gneq, *gneq;
  4872. char *ref_ned, *ned;
  4873. int tbcpm = 0;
  4874. /* if CUIR request does not specify the scope use the path
  4875. the attention message was presented on */
  4876. if (!cuir->ned_map ||
  4877. !(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2]))
  4878. return lpum;
  4879. /* get reference conf data */
  4880. ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir);
  4881. /* reference ned is determined by ned_map field */
  4882. pos = 8 - ffs(cuir->ned_map);
  4883. ref_ned = (char *)&ref_conf_data->neds[pos];
  4884. ref_gneq = (char *)&ref_conf_data->gneq;
  4885. /* transfer 24 bit neq_map to mask */
  4886. mask = cuir->neq_map[2];
  4887. mask |= cuir->neq_map[1] << 8;
  4888. mask |= cuir->neq_map[0] << 16;
  4889. for (path = 0; path < 8; path++) {
  4890. /* initialise data per path */
  4891. bitmask = mask;
  4892. conf_data = device->path[path].conf_data;
  4893. pos = 8 - ffs(cuir->ned_map);
  4894. ned = (char *) &conf_data->neds[pos];
  4895. /* compare reference ned and per path ned */
  4896. if (memcmp(ref_ned, ned, sizeof(*ned)) != 0)
  4897. continue;
  4898. gneq = (char *)&conf_data->gneq;
  4899. /* compare reference gneq and per_path gneq under
  4900. 24 bit mask where mask bit 0 equals byte 7 of
  4901. the gneq and mask bit 24 equals byte 31 */
  4902. while (bitmask) {
  4903. pos = ffs(bitmask) - 1;
  4904. if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1)
  4905. != 0)
  4906. break;
  4907. clear_bit(pos, &bitmask);
  4908. }
  4909. if (bitmask)
  4910. continue;
  4911. /* device and path match the reference values
  4912. add path to CUIR scope */
  4913. tbcpm |= 0x80 >> path;
  4914. }
  4915. return tbcpm;
  4916. }
  4917. static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
  4918. unsigned long paths, int action)
  4919. {
  4920. int pos;
  4921. while (paths) {
  4922. /* get position of bit in mask */
  4923. pos = 8 - ffs(paths);
  4924. /* get channel path descriptor from this position */
  4925. if (action == CUIR_QUIESCE)
  4926. pr_warn("Service on the storage server caused path %x.%02x to go offline",
  4927. device->path[pos].cssid,
  4928. device->path[pos].chpid);
  4929. else if (action == CUIR_RESUME)
  4930. pr_info("Path %x.%02x is back online after service on the storage server",
  4931. device->path[pos].cssid,
  4932. device->path[pos].chpid);
  4933. clear_bit(7 - pos, &paths);
  4934. }
  4935. }
  4936. static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
  4937. struct dasd_cuir_message *cuir)
  4938. {
  4939. unsigned long tbcpm;
  4940. tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
  4941. /* nothing to do if path is not in use */
  4942. if (!(dasd_path_get_opm(device) & tbcpm))
  4943. return 0;
  4944. if (!(dasd_path_get_opm(device) & ~tbcpm)) {
  4945. /* no path would be left if the CUIR action is taken
  4946. return error */
  4947. return -EINVAL;
  4948. }
  4949. /* remove device from operational path mask */
  4950. dasd_path_remove_opm(device, tbcpm);
  4951. dasd_path_add_cuirpm(device, tbcpm);
  4952. return tbcpm;
  4953. }
  4954. /*
  4955. * walk through all devices and build a path mask to quiesce them
  4956. * return an error if the last path to a device would be removed
  4957. *
  4958. * if only part of the devices are quiesced and an error
  4959. * occurs no onlining necessary, the storage server will
  4960. * notify the already set offline devices again
  4961. */
  4962. static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
  4963. struct dasd_cuir_message *cuir)
  4964. {
  4965. struct dasd_eckd_private *private = device->private;
  4966. struct alias_pav_group *pavgroup, *tempgroup;
  4967. struct dasd_device *dev, *n;
  4968. unsigned long paths = 0;
  4969. unsigned long flags;
  4970. int tbcpm;
  4971. /* active devices */
  4972. list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
  4973. alias_list) {
  4974. spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
  4975. tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
  4976. spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
  4977. if (tbcpm < 0)
  4978. goto out_err;
  4979. paths |= tbcpm;
  4980. }
  4981. /* inactive devices */
  4982. list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
  4983. alias_list) {
  4984. spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
  4985. tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
  4986. spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
  4987. if (tbcpm < 0)
  4988. goto out_err;
  4989. paths |= tbcpm;
  4990. }
  4991. /* devices in PAV groups */
  4992. list_for_each_entry_safe(pavgroup, tempgroup,
  4993. &private->lcu->grouplist, group) {
  4994. list_for_each_entry_safe(dev, n, &pavgroup->baselist,
  4995. alias_list) {
  4996. spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
  4997. tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
  4998. spin_unlock_irqrestore(
  4999. get_ccwdev_lock(dev->cdev), flags);
  5000. if (tbcpm < 0)
  5001. goto out_err;
  5002. paths |= tbcpm;
  5003. }
  5004. list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
  5005. alias_list) {
  5006. spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
  5007. tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
  5008. spin_unlock_irqrestore(
  5009. get_ccwdev_lock(dev->cdev), flags);
  5010. if (tbcpm < 0)
  5011. goto out_err;
  5012. paths |= tbcpm;
  5013. }
  5014. }
  5015. /* notify user about all paths affected by CUIR action */
  5016. dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE);
  5017. return 0;
  5018. out_err:
  5019. return tbcpm;
  5020. }
  5021. static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
  5022. struct dasd_cuir_message *cuir)
  5023. {
  5024. struct dasd_eckd_private *private = device->private;
  5025. struct alias_pav_group *pavgroup, *tempgroup;
  5026. struct dasd_device *dev, *n;
  5027. unsigned long paths = 0;
  5028. int tbcpm;
  5029. /*
  5030. * the path may have been added through a generic path event before
  5031. * only trigger path verification if the path is not already in use
  5032. */
  5033. list_for_each_entry_safe(dev, n,
  5034. &private->lcu->active_devices,
  5035. alias_list) {
  5036. tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
  5037. paths |= tbcpm;
  5038. if (!(dasd_path_get_opm(dev) & tbcpm)) {
  5039. dasd_path_add_tbvpm(dev, tbcpm);
  5040. dasd_schedule_device_bh(dev);
  5041. }
  5042. }
  5043. list_for_each_entry_safe(dev, n,
  5044. &private->lcu->inactive_devices,
  5045. alias_list) {
  5046. tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
  5047. paths |= tbcpm;
  5048. if (!(dasd_path_get_opm(dev) & tbcpm)) {
  5049. dasd_path_add_tbvpm(dev, tbcpm);
  5050. dasd_schedule_device_bh(dev);
  5051. }
  5052. }
  5053. /* devices in PAV groups */
  5054. list_for_each_entry_safe(pavgroup, tempgroup,
  5055. &private->lcu->grouplist,
  5056. group) {
  5057. list_for_each_entry_safe(dev, n,
  5058. &pavgroup->baselist,
  5059. alias_list) {
  5060. tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
  5061. paths |= tbcpm;
  5062. if (!(dasd_path_get_opm(dev) & tbcpm)) {
  5063. dasd_path_add_tbvpm(dev, tbcpm);
  5064. dasd_schedule_device_bh(dev);
  5065. }
  5066. }
  5067. list_for_each_entry_safe(dev, n,
  5068. &pavgroup->aliaslist,
  5069. alias_list) {
  5070. tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
  5071. paths |= tbcpm;
  5072. if (!(dasd_path_get_opm(dev) & tbcpm)) {
  5073. dasd_path_add_tbvpm(dev, tbcpm);
  5074. dasd_schedule_device_bh(dev);
  5075. }
  5076. }
  5077. }
  5078. /* notify user about all paths affected by CUIR action */
  5079. dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME);
  5080. return 0;
  5081. }
  5082. static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
  5083. __u8 lpum)
  5084. {
  5085. struct dasd_cuir_message *cuir = messages;
  5086. int response;
  5087. DBF_DEV_EVENT(DBF_WARNING, device,
  5088. "CUIR request: %016llx %016llx %016llx %08x",
  5089. ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
  5090. ((u32 *)cuir)[3]);
  5091. if (cuir->code == CUIR_QUIESCE) {
  5092. /* quiesce */
  5093. if (dasd_eckd_cuir_quiesce(device, lpum, cuir))
  5094. response = PSF_CUIR_LAST_PATH;
  5095. else
  5096. response = PSF_CUIR_COMPLETED;
  5097. } else if (cuir->code == CUIR_RESUME) {
  5098. /* resume */
  5099. dasd_eckd_cuir_resume(device, lpum, cuir);
  5100. response = PSF_CUIR_COMPLETED;
  5101. } else
  5102. response = PSF_CUIR_NOT_SUPPORTED;
  5103. dasd_eckd_psf_cuir_response(device, response,
  5104. cuir->message_id, lpum);
  5105. DBF_DEV_EVENT(DBF_WARNING, device,
  5106. "CUIR response: %d on message ID %08x", response,
  5107. cuir->message_id);
  5108. /* to make sure there is no attention left schedule work again */
  5109. device->discipline->check_attention(device, lpum);
  5110. }
  5111. static void dasd_eckd_check_attention_work(struct work_struct *work)
  5112. {
  5113. struct check_attention_work_data *data;
  5114. struct dasd_rssd_messages *messages;
  5115. struct dasd_device *device;
  5116. int rc;
  5117. data = container_of(work, struct check_attention_work_data, worker);
  5118. device = data->device;
  5119. messages = kzalloc(sizeof(*messages), GFP_KERNEL);
  5120. if (!messages) {
  5121. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  5122. "Could not allocate attention message buffer");
  5123. goto out;
  5124. }
  5125. rc = dasd_eckd_read_message_buffer(device, messages, data->lpum);
  5126. if (rc)
  5127. goto out;
  5128. if (messages->length == ATTENTION_LENGTH_CUIR &&
  5129. messages->format == ATTENTION_FORMAT_CUIR)
  5130. dasd_eckd_handle_cuir(device, messages, data->lpum);
  5131. out:
  5132. dasd_put_device(device);
  5133. kfree(messages);
  5134. kfree(data);
  5135. }
  5136. static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
  5137. {
  5138. struct check_attention_work_data *data;
  5139. data = kzalloc(sizeof(*data), GFP_ATOMIC);
  5140. if (!data)
  5141. return -ENOMEM;
  5142. INIT_WORK(&data->worker, dasd_eckd_check_attention_work);
  5143. dasd_get_device(device);
  5144. data->device = device;
  5145. data->lpum = lpum;
  5146. schedule_work(&data->worker);
  5147. return 0;
  5148. }
  5149. static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum)
  5150. {
  5151. if (~lpum & dasd_path_get_opm(device)) {
  5152. dasd_path_add_nohpfpm(device, lpum);
  5153. dasd_path_remove_opm(device, lpum);
  5154. dev_err(&device->cdev->dev,
  5155. "Channel path %02X lost HPF functionality and is disabled\n",
  5156. lpum);
  5157. return 1;
  5158. }
  5159. return 0;
  5160. }
  5161. static void dasd_eckd_disable_hpf_device(struct dasd_device *device)
  5162. {
  5163. struct dasd_eckd_private *private = device->private;
  5164. dev_err(&device->cdev->dev,
  5165. "High Performance FICON disabled\n");
  5166. private->fcx_max_data = 0;
  5167. }
  5168. static int dasd_eckd_hpf_enabled(struct dasd_device *device)
  5169. {
  5170. struct dasd_eckd_private *private = device->private;
  5171. return private->fcx_max_data ? 1 : 0;
  5172. }
  5173. static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
  5174. struct irb *irb)
  5175. {
  5176. struct dasd_eckd_private *private = device->private;
  5177. if (!private->fcx_max_data) {
  5178. /* sanity check for no HPF, the error makes no sense */
  5179. DBF_DEV_EVENT(DBF_WARNING, device, "%s",
  5180. "Trying to disable HPF for a non HPF device");
  5181. return;
  5182. }
  5183. if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) {
  5184. dasd_eckd_disable_hpf_device(device);
  5185. } else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) {
  5186. if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum))
  5187. return;
  5188. dasd_eckd_disable_hpf_device(device);
  5189. dasd_path_set_tbvpm(device,
  5190. dasd_path_get_hpfpm(device));
  5191. }
  5192. /*
  5193. * prevent that any new I/O ist started on the device and schedule a
  5194. * requeue of existing requests
  5195. */
  5196. dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
  5197. dasd_schedule_requeue(device);
  5198. }
  5199. static struct ccw_driver dasd_eckd_driver = {
  5200. .driver = {
  5201. .name = "dasd-eckd",
  5202. .owner = THIS_MODULE,
  5203. },
  5204. .ids = dasd_eckd_ids,
  5205. .probe = dasd_eckd_probe,
  5206. .remove = dasd_generic_remove,
  5207. .set_offline = dasd_generic_set_offline,
  5208. .set_online = dasd_eckd_set_online,
  5209. .notify = dasd_generic_notify,
  5210. .path_event = dasd_generic_path_event,
  5211. .shutdown = dasd_generic_shutdown,
  5212. .freeze = dasd_generic_pm_freeze,
  5213. .thaw = dasd_generic_restore_device,
  5214. .restore = dasd_generic_restore_device,
  5215. .uc_handler = dasd_generic_uc_handler,
  5216. .int_class = IRQIO_DAS,
  5217. };
  5218. /*
  5219. * max_blocks is dependent on the amount of storage that is available
  5220. * in the static io buffer for each device. Currently each device has
  5221. * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
  5222. * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
  5223. * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
  5224. * addition we have one define extent ccw + 16 bytes of data and one
  5225. * locate record ccw + 16 bytes of data. That makes:
  5226. * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum.
  5227. * We want to fit two into the available memory so that we can immediately
  5228. * start the next request if one finishes off. That makes 249.5 blocks
  5229. * for one request. Give a little safety and the result is 240.
  5230. */
  5231. static struct dasd_discipline dasd_eckd_discipline = {
  5232. .owner = THIS_MODULE,
  5233. .name = "ECKD",
  5234. .ebcname = "ECKD",
  5235. .max_blocks = 190,
  5236. .check_device = dasd_eckd_check_characteristics,
  5237. .uncheck_device = dasd_eckd_uncheck_device,
  5238. .do_analysis = dasd_eckd_do_analysis,
  5239. .verify_path = dasd_eckd_verify_path,
  5240. .basic_to_ready = dasd_eckd_basic_to_ready,
  5241. .online_to_ready = dasd_eckd_online_to_ready,
  5242. .basic_to_known = dasd_eckd_basic_to_known,
  5243. .fill_geometry = dasd_eckd_fill_geometry,
  5244. .start_IO = dasd_start_IO,
  5245. .term_IO = dasd_term_IO,
  5246. .handle_terminated_request = dasd_eckd_handle_terminated_request,
  5247. .format_device = dasd_eckd_format_device,
  5248. .check_device_format = dasd_eckd_check_device_format,
  5249. .erp_action = dasd_eckd_erp_action,
  5250. .erp_postaction = dasd_eckd_erp_postaction,
  5251. .check_for_device_change = dasd_eckd_check_for_device_change,
  5252. .build_cp = dasd_eckd_build_alias_cp,
  5253. .free_cp = dasd_eckd_free_alias_cp,
  5254. .dump_sense = dasd_eckd_dump_sense,
  5255. .dump_sense_dbf = dasd_eckd_dump_sense_dbf,
  5256. .fill_info = dasd_eckd_fill_info,
  5257. .ioctl = dasd_eckd_ioctl,
  5258. .freeze = dasd_eckd_pm_freeze,
  5259. .restore = dasd_eckd_restore_device,
  5260. .reload = dasd_eckd_reload_device,
  5261. .get_uid = dasd_eckd_get_uid,
  5262. .kick_validate = dasd_eckd_kick_validate_server,
  5263. .check_attention = dasd_eckd_check_attention,
  5264. .host_access_count = dasd_eckd_host_access_count,
  5265. .hosts_print = dasd_hosts_print,
  5266. .handle_hpf_error = dasd_eckd_handle_hpf_error,
  5267. .disable_hpf = dasd_eckd_disable_hpf_device,
  5268. .hpf_enabled = dasd_eckd_hpf_enabled,
  5269. .reset_path = dasd_eckd_reset_path,
  5270. };
  5271. static int __init
  5272. dasd_eckd_init(void)
  5273. {
  5274. int ret;
  5275. ASCEBC(dasd_eckd_discipline.ebcname, 4);
  5276. dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req),
  5277. GFP_KERNEL | GFP_DMA);
  5278. if (!dasd_reserve_req)
  5279. return -ENOMEM;
  5280. path_verification_worker = kmalloc(sizeof(*path_verification_worker),
  5281. GFP_KERNEL | GFP_DMA);
  5282. if (!path_verification_worker) {
  5283. kfree(dasd_reserve_req);
  5284. return -ENOMEM;
  5285. }
  5286. rawpadpage = (void *)__get_free_page(GFP_KERNEL);
  5287. if (!rawpadpage) {
  5288. kfree(path_verification_worker);
  5289. kfree(dasd_reserve_req);
  5290. return -ENOMEM;
  5291. }
  5292. ret = ccw_driver_register(&dasd_eckd_driver);
  5293. if (!ret)
  5294. wait_for_device_probe();
  5295. else {
  5296. kfree(path_verification_worker);
  5297. kfree(dasd_reserve_req);
  5298. free_page((unsigned long)rawpadpage);
  5299. }
  5300. return ret;
  5301. }
  5302. static void __exit
  5303. dasd_eckd_cleanup(void)
  5304. {
  5305. ccw_driver_unregister(&dasd_eckd_driver);
  5306. kfree(path_verification_worker);
  5307. kfree(dasd_reserve_req);
  5308. free_page((unsigned long)rawpadpage);
  5309. }
  5310. module_init(dasd_eckd_init);
  5311. module_exit(dasd_eckd_cleanup);