nand_base.c 176 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Overview:
  4. * This is the generic MTD driver for NAND flash devices. It should be
  5. * capable of working with almost all NAND chips currently available.
  6. *
  7. * Additional technical information is available on
  8. * http://www.linux-mtd.infradead.org/doc/nand.html
  9. *
  10. * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
  11. * 2002-2006 Thomas Gleixner (tglx@linutronix.de)
  12. *
  13. * Credits:
  14. * David Woodhouse for adding multichip support
  15. *
  16. * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
  17. * rework for 2K page size chips
  18. *
  19. * TODO:
  20. * Enable cached programming for 2k page size chips
  21. * Check, if mtd->ecctype should be set to MTD_ECC_HW
  22. * if we have HW ECC support.
  23. * BBT table is not serialized, has to be fixed
  24. */
  25. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  26. #include <linux/module.h>
  27. #include <linux/delay.h>
  28. #include <linux/errno.h>
  29. #include <linux/err.h>
  30. #include <linux/sched.h>
  31. #include <linux/slab.h>
  32. #include <linux/mm.h>
  33. #include <linux/types.h>
  34. #include <linux/mtd/mtd.h>
  35. #include <linux/mtd/nand.h>
  36. #include <linux/mtd/nand-ecc-sw-hamming.h>
  37. #include <linux/mtd/nand-ecc-sw-bch.h>
  38. #include <linux/interrupt.h>
  39. #include <linux/bitops.h>
  40. #include <linux/io.h>
  41. #include <linux/mtd/partitions.h>
  42. #include <linux/of.h>
  43. #include <linux/gpio/consumer.h>
  44. #include "internals.h"
  45. static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page,
  46. struct mtd_pairing_info *info)
  47. {
  48. int lastpage = (mtd->erasesize / mtd->writesize) - 1;
  49. int dist = 3;
  50. if (page == lastpage)
  51. dist = 2;
  52. if (!page || (page & 1)) {
  53. info->group = 0;
  54. info->pair = (page + 1) / 2;
  55. } else {
  56. info->group = 1;
  57. info->pair = (page + 1 - dist) / 2;
  58. }
  59. return 0;
  60. }
  61. static int nand_pairing_dist3_get_wunit(struct mtd_info *mtd,
  62. const struct mtd_pairing_info *info)
  63. {
  64. int lastpair = ((mtd->erasesize / mtd->writesize) - 1) / 2;
  65. int page = info->pair * 2;
  66. int dist = 3;
  67. if (!info->group && !info->pair)
  68. return 0;
  69. if (info->pair == lastpair && info->group)
  70. dist = 2;
  71. if (!info->group)
  72. page--;
  73. else if (info->pair)
  74. page += dist - 1;
  75. if (page >= mtd->erasesize / mtd->writesize)
  76. return -EINVAL;
  77. return page;
  78. }
  79. const struct mtd_pairing_scheme dist3_pairing_scheme = {
  80. .ngroups = 2,
  81. .get_info = nand_pairing_dist3_get_info,
  82. .get_wunit = nand_pairing_dist3_get_wunit,
  83. };
  84. static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
  85. {
  86. int ret = 0;
  87. /* Start address must align on block boundary */
  88. if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
  89. pr_debug("%s: unaligned address\n", __func__);
  90. ret = -EINVAL;
  91. }
  92. /* Length must align on block boundary */
  93. if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
  94. pr_debug("%s: length not block aligned\n", __func__);
  95. ret = -EINVAL;
  96. }
  97. return ret;
  98. }
  99. /**
  100. * nand_extract_bits - Copy unaligned bits from one buffer to another one
  101. * @dst: destination buffer
  102. * @dst_off: bit offset at which the writing starts
  103. * @src: source buffer
  104. * @src_off: bit offset at which the reading starts
  105. * @nbits: number of bits to copy from @src to @dst
  106. *
  107. * Copy bits from one memory region to another (overlap authorized).
  108. */
  109. void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
  110. unsigned int src_off, unsigned int nbits)
  111. {
  112. unsigned int tmp, n;
  113. dst += dst_off / 8;
  114. dst_off %= 8;
  115. src += src_off / 8;
  116. src_off %= 8;
  117. while (nbits) {
  118. n = min3(8 - dst_off, 8 - src_off, nbits);
  119. tmp = (*src >> src_off) & GENMASK(n - 1, 0);
  120. *dst &= ~GENMASK(n - 1 + dst_off, dst_off);
  121. *dst |= tmp << dst_off;
  122. dst_off += n;
  123. if (dst_off >= 8) {
  124. dst++;
  125. dst_off -= 8;
  126. }
  127. src_off += n;
  128. if (src_off >= 8) {
  129. src++;
  130. src_off -= 8;
  131. }
  132. nbits -= n;
  133. }
  134. }
  135. EXPORT_SYMBOL_GPL(nand_extract_bits);
  136. /**
  137. * nand_select_target() - Select a NAND target (A.K.A. die)
  138. * @chip: NAND chip object
  139. * @cs: the CS line to select. Note that this CS id is always from the chip
  140. * PoV, not the controller one
  141. *
  142. * Select a NAND target so that further operations executed on @chip go to the
  143. * selected NAND target.
  144. */
  145. void nand_select_target(struct nand_chip *chip, unsigned int cs)
  146. {
  147. /*
  148. * cs should always lie between 0 and nanddev_ntargets(), when that's
  149. * not the case it's a bug and the caller should be fixed.
  150. */
  151. if (WARN_ON(cs > nanddev_ntargets(&chip->base)))
  152. return;
  153. chip->cur_cs = cs;
  154. if (chip->legacy.select_chip)
  155. chip->legacy.select_chip(chip, cs);
  156. }
  157. EXPORT_SYMBOL_GPL(nand_select_target);
  158. /**
  159. * nand_deselect_target() - Deselect the currently selected target
  160. * @chip: NAND chip object
  161. *
  162. * Deselect the currently selected NAND target. The result of operations
  163. * executed on @chip after the target has been deselected is undefined.
  164. */
  165. void nand_deselect_target(struct nand_chip *chip)
  166. {
  167. if (chip->legacy.select_chip)
  168. chip->legacy.select_chip(chip, -1);
  169. chip->cur_cs = -1;
  170. }
  171. EXPORT_SYMBOL_GPL(nand_deselect_target);
  172. /**
  173. * nand_release_device - [GENERIC] release chip
  174. * @chip: NAND chip object
  175. *
  176. * Release chip lock and wake up anyone waiting on the device.
  177. */
  178. static void nand_release_device(struct nand_chip *chip)
  179. {
  180. /* Release the controller and the chip */
  181. mutex_unlock(&chip->controller->lock);
  182. mutex_unlock(&chip->lock);
  183. }
  184. /**
  185. * nand_bbm_get_next_page - Get the next page for bad block markers
  186. * @chip: NAND chip object
  187. * @page: First page to start checking for bad block marker usage
  188. *
  189. * Returns an integer that corresponds to the page offset within a block, for
  190. * a page that is used to store bad block markers. If no more pages are
  191. * available, -EINVAL is returned.
  192. */
  193. int nand_bbm_get_next_page(struct nand_chip *chip, int page)
  194. {
  195. struct mtd_info *mtd = nand_to_mtd(chip);
  196. int last_page = ((mtd->erasesize - mtd->writesize) >>
  197. chip->page_shift) & chip->pagemask;
  198. unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE
  199. | NAND_BBM_LASTPAGE;
  200. if (page == 0 && !(chip->options & bbm_flags))
  201. return 0;
  202. if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE)
  203. return 0;
  204. if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
  205. return 1;
  206. if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
  207. return last_page;
  208. return -EINVAL;
  209. }
  210. /**
  211. * nand_block_bad - [DEFAULT] Read bad block marker from the chip
  212. * @chip: NAND chip object
  213. * @ofs: offset from device start
  214. *
  215. * Check, if the block is bad.
  216. */
  217. static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
  218. {
  219. int first_page, page_offset;
  220. int res;
  221. u8 bad;
  222. first_page = (int)(ofs >> chip->page_shift) & chip->pagemask;
  223. page_offset = nand_bbm_get_next_page(chip, 0);
  224. while (page_offset >= 0) {
  225. res = chip->ecc.read_oob(chip, first_page + page_offset);
  226. if (res < 0)
  227. return res;
  228. bad = chip->oob_poi[chip->badblockpos];
  229. if (likely(chip->badblockbits == 8))
  230. res = bad != 0xFF;
  231. else
  232. res = hweight8(bad) < chip->badblockbits;
  233. if (res)
  234. return res;
  235. page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
  236. }
  237. return 0;
  238. }
  239. /**
  240. * nand_region_is_secured() - Check if the region is secured
  241. * @chip: NAND chip object
  242. * @offset: Offset of the region to check
  243. * @size: Size of the region to check
  244. *
  245. * Checks if the region is secured by comparing the offset and size with the
  246. * list of secure regions obtained from DT. Returns true if the region is
  247. * secured else false.
  248. */
  249. static bool nand_region_is_secured(struct nand_chip *chip, loff_t offset, u64 size)
  250. {
  251. int i;
  252. /* Skip touching the secure regions if present */
  253. for (i = 0; i < chip->nr_secure_regions; i++) {
  254. const struct nand_secure_region *region = &chip->secure_regions[i];
  255. if (offset + size <= region->offset ||
  256. offset >= region->offset + region->size)
  257. continue;
  258. pr_debug("%s: Region 0x%llx - 0x%llx is secured!",
  259. __func__, offset, offset + size);
  260. return true;
  261. }
  262. return false;
  263. }
  264. static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
  265. {
  266. struct mtd_info *mtd = nand_to_mtd(chip);
  267. if (chip->options & NAND_NO_BBM_QUIRK)
  268. return 0;
  269. /* Check if the region is secured */
  270. if (nand_region_is_secured(chip, ofs, mtd->erasesize))
  271. return -EIO;
  272. if (mtd_check_expert_analysis_mode())
  273. return 0;
  274. if (chip->legacy.block_bad)
  275. return chip->legacy.block_bad(chip, ofs);
  276. return nand_block_bad(chip, ofs);
  277. }
  278. /**
  279. * nand_get_device - [GENERIC] Get chip for selected access
  280. * @chip: NAND chip structure
  281. *
  282. * Lock the device and its controller for exclusive access
  283. */
  284. static void nand_get_device(struct nand_chip *chip)
  285. {
  286. /* Wait until the device is resumed. */
  287. while (1) {
  288. mutex_lock(&chip->lock);
  289. if (!chip->suspended) {
  290. mutex_lock(&chip->controller->lock);
  291. return;
  292. }
  293. mutex_unlock(&chip->lock);
  294. wait_event(chip->resume_wq, !chip->suspended);
  295. }
  296. }
  297. /**
  298. * nand_check_wp - [GENERIC] check if the chip is write protected
  299. * @chip: NAND chip object
  300. *
  301. * Check, if the device is write protected. The function expects, that the
  302. * device is already selected.
  303. */
  304. static int nand_check_wp(struct nand_chip *chip)
  305. {
  306. u8 status;
  307. int ret;
  308. /* Broken xD cards report WP despite being writable */
  309. if (chip->options & NAND_BROKEN_XD)
  310. return 0;
  311. /* controller responsible for NAND write protect */
  312. if (chip->controller->controller_wp)
  313. return 0;
  314. /* Check the WP bit */
  315. ret = nand_status_op(chip, &status);
  316. if (ret)
  317. return ret;
  318. return status & NAND_STATUS_WP ? 0 : 1;
  319. }
  320. /**
  321. * nand_fill_oob - [INTERN] Transfer client buffer to oob
  322. * @chip: NAND chip object
  323. * @oob: oob data buffer
  324. * @len: oob data write length
  325. * @ops: oob ops structure
  326. */
  327. static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
  328. struct mtd_oob_ops *ops)
  329. {
  330. struct mtd_info *mtd = nand_to_mtd(chip);
  331. int ret;
  332. /*
  333. * Initialise to all 0xFF, to avoid the possibility of left over OOB
  334. * data from a previous OOB read.
  335. */
  336. memset(chip->oob_poi, 0xff, mtd->oobsize);
  337. switch (ops->mode) {
  338. case MTD_OPS_PLACE_OOB:
  339. case MTD_OPS_RAW:
  340. memcpy(chip->oob_poi + ops->ooboffs, oob, len);
  341. return oob + len;
  342. case MTD_OPS_AUTO_OOB:
  343. ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
  344. ops->ooboffs, len);
  345. BUG_ON(ret);
  346. return oob + len;
  347. default:
  348. BUG();
  349. }
  350. return NULL;
  351. }
  352. /**
  353. * nand_do_write_oob - [MTD Interface] NAND write out-of-band
  354. * @chip: NAND chip object
  355. * @to: offset to write to
  356. * @ops: oob operation description structure
  357. *
  358. * NAND write out-of-band.
  359. */
  360. static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
  361. struct mtd_oob_ops *ops)
  362. {
  363. struct mtd_info *mtd = nand_to_mtd(chip);
  364. int chipnr, page, status, len, ret;
  365. pr_debug("%s: to = 0x%08x, len = %i\n",
  366. __func__, (unsigned int)to, (int)ops->ooblen);
  367. len = mtd_oobavail(mtd, ops);
  368. /* Do not allow write past end of page */
  369. if ((ops->ooboffs + ops->ooblen) > len) {
  370. pr_debug("%s: attempt to write past end of page\n",
  371. __func__);
  372. return -EINVAL;
  373. }
  374. /* Check if the region is secured */
  375. if (nand_region_is_secured(chip, to, ops->ooblen))
  376. return -EIO;
  377. chipnr = (int)(to >> chip->chip_shift);
  378. /*
  379. * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
  380. * of my DiskOnChip 2000 test units) will clear the whole data page too
  381. * if we don't do this. I have no clue why, but I seem to have 'fixed'
  382. * it in the doc2000 driver in August 1999. dwmw2.
  383. */
  384. ret = nand_reset(chip, chipnr);
  385. if (ret)
  386. return ret;
  387. nand_select_target(chip, chipnr);
  388. /* Shift to get page */
  389. page = (int)(to >> chip->page_shift);
  390. /* Check, if it is write protected */
  391. if (nand_check_wp(chip)) {
  392. nand_deselect_target(chip);
  393. return -EROFS;
  394. }
  395. /* Invalidate the page cache, if we write to the cached page */
  396. if (page == chip->pagecache.page)
  397. chip->pagecache.page = -1;
  398. nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
  399. if (ops->mode == MTD_OPS_RAW)
  400. status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
  401. else
  402. status = chip->ecc.write_oob(chip, page & chip->pagemask);
  403. nand_deselect_target(chip);
  404. if (status)
  405. return status;
  406. ops->oobretlen = ops->ooblen;
  407. return 0;
  408. }
  409. /**
  410. * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
  411. * @chip: NAND chip object
  412. * @ofs: offset from device start
  413. *
  414. * This is the default implementation, which can be overridden by a hardware
  415. * specific driver. It provides the details for writing a bad block marker to a
  416. * block.
  417. */
  418. static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
  419. {
  420. struct mtd_info *mtd = nand_to_mtd(chip);
  421. struct mtd_oob_ops ops;
  422. uint8_t buf[2] = { 0, 0 };
  423. int ret = 0, res, page_offset;
  424. memset(&ops, 0, sizeof(ops));
  425. ops.oobbuf = buf;
  426. ops.ooboffs = chip->badblockpos;
  427. if (chip->options & NAND_BUSWIDTH_16) {
  428. ops.ooboffs &= ~0x01;
  429. ops.len = ops.ooblen = 2;
  430. } else {
  431. ops.len = ops.ooblen = 1;
  432. }
  433. ops.mode = MTD_OPS_PLACE_OOB;
  434. page_offset = nand_bbm_get_next_page(chip, 0);
  435. while (page_offset >= 0) {
  436. res = nand_do_write_oob(chip,
  437. ofs + (page_offset * mtd->writesize),
  438. &ops);
  439. if (!ret)
  440. ret = res;
  441. page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
  442. }
  443. return ret;
  444. }
  445. /**
  446. * nand_markbad_bbm - mark a block by updating the BBM
  447. * @chip: NAND chip object
  448. * @ofs: offset of the block to mark bad
  449. */
  450. int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
  451. {
  452. if (chip->legacy.block_markbad)
  453. return chip->legacy.block_markbad(chip, ofs);
  454. return nand_default_block_markbad(chip, ofs);
  455. }
  456. /**
  457. * nand_block_markbad_lowlevel - mark a block bad
  458. * @chip: NAND chip object
  459. * @ofs: offset from device start
  460. *
  461. * This function performs the generic NAND bad block marking steps (i.e., bad
  462. * block table(s) and/or marker(s)). We only allow the hardware driver to
  463. * specify how to write bad block markers to OOB (chip->legacy.block_markbad).
  464. *
  465. * We try operations in the following order:
  466. *
  467. * (1) erase the affected block, to allow OOB marker to be written cleanly
  468. * (2) write bad block marker to OOB area of affected block (unless flag
  469. * NAND_BBT_NO_OOB_BBM is present)
  470. * (3) update the BBT
  471. *
  472. * Note that we retain the first error encountered in (2) or (3), finish the
  473. * procedures, and dump the error in the end.
  474. */
  475. static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
  476. {
  477. struct mtd_info *mtd = nand_to_mtd(chip);
  478. int res, ret = 0;
  479. if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
  480. struct erase_info einfo;
  481. /* Attempt erase before marking OOB */
  482. memset(&einfo, 0, sizeof(einfo));
  483. einfo.addr = ofs;
  484. einfo.len = 1ULL << chip->phys_erase_shift;
  485. nand_erase_nand(chip, &einfo, 0);
  486. /* Write bad block marker to OOB */
  487. nand_get_device(chip);
  488. ret = nand_markbad_bbm(chip, ofs);
  489. nand_release_device(chip);
  490. }
  491. /* Mark block bad in BBT */
  492. if (chip->bbt) {
  493. res = nand_markbad_bbt(chip, ofs);
  494. if (!ret)
  495. ret = res;
  496. }
  497. if (!ret)
  498. mtd->ecc_stats.badblocks++;
  499. return ret;
  500. }
  501. /**
  502. * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
  503. * @mtd: MTD device structure
  504. * @ofs: offset from device start
  505. *
  506. * Check if the block is marked as reserved.
  507. */
  508. static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
  509. {
  510. struct nand_chip *chip = mtd_to_nand(mtd);
  511. if (!chip->bbt)
  512. return 0;
  513. /* Return info from the table */
  514. return nand_isreserved_bbt(chip, ofs);
  515. }
  516. /**
  517. * nand_block_checkbad - [GENERIC] Check if a block is marked bad
  518. * @chip: NAND chip object
  519. * @ofs: offset from device start
  520. * @allowbbt: 1, if its allowed to access the bbt area
  521. *
  522. * Check, if the block is bad. Either by reading the bad block table or
  523. * calling of the scan function.
  524. */
  525. static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
  526. {
  527. /* Return info from the table */
  528. if (chip->bbt)
  529. return nand_isbad_bbt(chip, ofs, allowbbt);
  530. return nand_isbad_bbm(chip, ofs);
  531. }
  532. /**
  533. * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
  534. * @chip: NAND chip structure
  535. * @timeout_ms: Timeout in ms
  536. *
  537. * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
  538. * If that does not happen whitin the specified timeout, -ETIMEDOUT is
  539. * returned.
  540. *
  541. * This helper is intended to be used when the controller does not have access
  542. * to the NAND R/B pin.
  543. *
  544. * Be aware that calling this helper from an ->exec_op() implementation means
  545. * ->exec_op() must be re-entrant.
  546. *
  547. * Return 0 if the NAND chip is ready, a negative error otherwise.
  548. */
  549. int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
  550. {
  551. const struct nand_interface_config *conf;
  552. u8 status = 0;
  553. int ret;
  554. if (!nand_has_exec_op(chip))
  555. return -ENOTSUPP;
  556. /* Wait tWB before polling the STATUS reg. */
  557. conf = nand_get_interface_config(chip);
  558. ndelay(NAND_COMMON_TIMING_NS(conf, tWB_max));
  559. ret = nand_status_op(chip, NULL);
  560. if (ret)
  561. return ret;
  562. /*
  563. * +1 below is necessary because if we are now in the last fraction
  564. * of jiffy and msecs_to_jiffies is 1 then we will wait only that
  565. * small jiffy fraction - possibly leading to false timeout
  566. */
  567. timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
  568. do {
  569. ret = nand_read_data_op(chip, &status, sizeof(status), true,
  570. false);
  571. if (ret)
  572. break;
  573. if (status & NAND_STATUS_READY)
  574. break;
  575. /*
  576. * Typical lowest execution time for a tR on most NANDs is 10us,
  577. * use this as polling delay before doing something smarter (ie.
  578. * deriving a delay from the timeout value, timeout_ms/ratio).
  579. */
  580. udelay(10);
  581. } while (time_before(jiffies, timeout_ms));
  582. /*
  583. * We have to exit READ_STATUS mode in order to read real data on the
  584. * bus in case the WAITRDY instruction is preceding a DATA_IN
  585. * instruction.
  586. */
  587. nand_exit_status_op(chip);
  588. if (ret)
  589. return ret;
  590. return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
  591. };
  592. EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
  593. /**
  594. * nand_gpio_waitrdy - Poll R/B GPIO pin until ready
  595. * @chip: NAND chip structure
  596. * @gpiod: GPIO descriptor of R/B pin
  597. * @timeout_ms: Timeout in ms
  598. *
  599. * Poll the R/B GPIO pin until it becomes ready. If that does not happen
  600. * whitin the specified timeout, -ETIMEDOUT is returned.
  601. *
  602. * This helper is intended to be used when the controller has access to the
  603. * NAND R/B pin over GPIO.
  604. *
  605. * Return 0 if the R/B pin indicates chip is ready, a negative error otherwise.
  606. */
  607. int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
  608. unsigned long timeout_ms)
  609. {
  610. /*
  611. * Wait until R/B pin indicates chip is ready or timeout occurs.
  612. * +1 below is necessary because if we are now in the last fraction
  613. * of jiffy and msecs_to_jiffies is 1 then we will wait only that
  614. * small jiffy fraction - possibly leading to false timeout.
  615. */
  616. timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
  617. do {
  618. if (gpiod_get_value_cansleep(gpiod))
  619. return 0;
  620. cond_resched();
  621. } while (time_before(jiffies, timeout_ms));
  622. return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT;
  623. };
  624. EXPORT_SYMBOL_GPL(nand_gpio_waitrdy);
  625. /**
  626. * panic_nand_wait - [GENERIC] wait until the command is done
  627. * @chip: NAND chip structure
  628. * @timeo: timeout
  629. *
  630. * Wait for command done. This is a helper function for nand_wait used when
  631. * we are in interrupt context. May happen when in panic and trying to write
  632. * an oops through mtdoops.
  633. */
  634. void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
  635. {
  636. int i;
  637. for (i = 0; i < timeo; i++) {
  638. if (chip->legacy.dev_ready) {
  639. if (chip->legacy.dev_ready(chip))
  640. break;
  641. } else {
  642. int ret;
  643. u8 status;
  644. ret = nand_read_data_op(chip, &status, sizeof(status),
  645. true, false);
  646. if (ret)
  647. return;
  648. if (status & NAND_STATUS_READY)
  649. break;
  650. }
  651. mdelay(1);
  652. }
  653. }
  654. static bool nand_supports_get_features(struct nand_chip *chip, int addr)
  655. {
  656. return (chip->parameters.supports_set_get_features &&
  657. test_bit(addr, chip->parameters.get_feature_list));
  658. }
  659. static bool nand_supports_set_features(struct nand_chip *chip, int addr)
  660. {
  661. return (chip->parameters.supports_set_get_features &&
  662. test_bit(addr, chip->parameters.set_feature_list));
  663. }
  664. /**
  665. * nand_reset_interface - Reset data interface and timings
  666. * @chip: The NAND chip
  667. * @chipnr: Internal die id
  668. *
  669. * Reset the Data interface and timings to ONFI mode 0.
  670. *
  671. * Returns 0 for success or negative error code otherwise.
  672. */
  673. static int nand_reset_interface(struct nand_chip *chip, int chipnr)
  674. {
  675. const struct nand_controller_ops *ops = chip->controller->ops;
  676. int ret;
  677. if (!nand_controller_can_setup_interface(chip))
  678. return 0;
  679. /*
  680. * The ONFI specification says:
  681. * "
  682. * To transition from NV-DDR or NV-DDR2 to the SDR data
  683. * interface, the host shall use the Reset (FFh) command
  684. * using SDR timing mode 0. A device in any timing mode is
  685. * required to recognize Reset (FFh) command issued in SDR
  686. * timing mode 0.
  687. * "
  688. *
  689. * Configure the data interface in SDR mode and set the
  690. * timings to timing mode 0.
  691. */
  692. chip->current_interface_config = nand_get_reset_interface_config();
  693. ret = ops->setup_interface(chip, chipnr,
  694. chip->current_interface_config);
  695. if (ret)
  696. pr_err("Failed to configure data interface to SDR timing mode 0\n");
  697. return ret;
  698. }
  699. /**
  700. * nand_setup_interface - Setup the best data interface and timings
  701. * @chip: The NAND chip
  702. * @chipnr: Internal die id
  703. *
  704. * Configure what has been reported to be the best data interface and NAND
  705. * timings supported by the chip and the driver.
  706. *
  707. * Returns 0 for success or negative error code otherwise.
  708. */
  709. static int nand_setup_interface(struct nand_chip *chip, int chipnr)
  710. {
  711. const struct nand_controller_ops *ops = chip->controller->ops;
  712. u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { }, request;
  713. int ret;
  714. if (!nand_controller_can_setup_interface(chip))
  715. return 0;
  716. /*
  717. * A nand_reset_interface() put both the NAND chip and the NAND
  718. * controller in timings mode 0. If the default mode for this chip is
  719. * also 0, no need to proceed to the change again. Plus, at probe time,
  720. * nand_setup_interface() uses ->set/get_features() which would
  721. * fail anyway as the parameter page is not available yet.
  722. */
  723. if (!chip->best_interface_config)
  724. return 0;
  725. request = chip->best_interface_config->timings.mode;
  726. if (nand_interface_is_sdr(chip->best_interface_config))
  727. request |= ONFI_DATA_INTERFACE_SDR;
  728. else
  729. request |= ONFI_DATA_INTERFACE_NVDDR;
  730. tmode_param[0] = request;
  731. /* Change the mode on the chip side (if supported by the NAND chip) */
  732. if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
  733. nand_select_target(chip, chipnr);
  734. ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
  735. tmode_param);
  736. nand_deselect_target(chip);
  737. if (ret)
  738. return ret;
  739. }
  740. /* Change the mode on the controller side */
  741. ret = ops->setup_interface(chip, chipnr, chip->best_interface_config);
  742. if (ret)
  743. return ret;
  744. /* Check the mode has been accepted by the chip, if supported */
  745. if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
  746. goto update_interface_config;
  747. memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
  748. nand_select_target(chip, chipnr);
  749. ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
  750. tmode_param);
  751. nand_deselect_target(chip);
  752. if (ret)
  753. goto err_reset_chip;
  754. if (request != tmode_param[0]) {
  755. pr_warn("%s timing mode %d not acknowledged by the NAND chip\n",
  756. nand_interface_is_nvddr(chip->best_interface_config) ? "NV-DDR" : "SDR",
  757. chip->best_interface_config->timings.mode);
  758. pr_debug("NAND chip would work in %s timing mode %d\n",
  759. tmode_param[0] & ONFI_DATA_INTERFACE_NVDDR ? "NV-DDR" : "SDR",
  760. (unsigned int)ONFI_TIMING_MODE_PARAM(tmode_param[0]));
  761. goto err_reset_chip;
  762. }
  763. update_interface_config:
  764. chip->current_interface_config = chip->best_interface_config;
  765. return 0;
  766. err_reset_chip:
  767. /*
  768. * Fallback to mode 0 if the chip explicitly did not ack the chosen
  769. * timing mode.
  770. */
  771. nand_reset_interface(chip, chipnr);
  772. nand_select_target(chip, chipnr);
  773. nand_reset_op(chip);
  774. nand_deselect_target(chip);
  775. return ret;
  776. }
  777. /**
  778. * nand_choose_best_sdr_timings - Pick up the best SDR timings that both the
  779. * NAND controller and the NAND chip support
  780. * @chip: the NAND chip
  781. * @iface: the interface configuration (can eventually be updated)
  782. * @spec_timings: specific timings, when not fitting the ONFI specification
  783. *
  784. * If specific timings are provided, use them. Otherwise, retrieve supported
  785. * timing modes from ONFI information.
  786. */
  787. int nand_choose_best_sdr_timings(struct nand_chip *chip,
  788. struct nand_interface_config *iface,
  789. struct nand_sdr_timings *spec_timings)
  790. {
  791. const struct nand_controller_ops *ops = chip->controller->ops;
  792. int best_mode = 0, mode, ret = -EOPNOTSUPP;
  793. iface->type = NAND_SDR_IFACE;
  794. if (spec_timings) {
  795. iface->timings.sdr = *spec_timings;
  796. iface->timings.mode = onfi_find_closest_sdr_mode(spec_timings);
  797. /* Verify the controller supports the requested interface */
  798. ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
  799. iface);
  800. if (!ret) {
  801. chip->best_interface_config = iface;
  802. return ret;
  803. }
  804. /* Fallback to slower modes */
  805. best_mode = iface->timings.mode;
  806. } else if (chip->parameters.onfi) {
  807. best_mode = fls(chip->parameters.onfi->sdr_timing_modes) - 1;
  808. }
  809. for (mode = best_mode; mode >= 0; mode--) {
  810. onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, mode);
  811. ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
  812. iface);
  813. if (!ret) {
  814. chip->best_interface_config = iface;
  815. break;
  816. }
  817. }
  818. return ret;
  819. }
  820. /**
  821. * nand_choose_best_nvddr_timings - Pick up the best NVDDR timings that both the
  822. * NAND controller and the NAND chip support
  823. * @chip: the NAND chip
  824. * @iface: the interface configuration (can eventually be updated)
  825. * @spec_timings: specific timings, when not fitting the ONFI specification
  826. *
  827. * If specific timings are provided, use them. Otherwise, retrieve supported
  828. * timing modes from ONFI information.
  829. */
  830. int nand_choose_best_nvddr_timings(struct nand_chip *chip,
  831. struct nand_interface_config *iface,
  832. struct nand_nvddr_timings *spec_timings)
  833. {
  834. const struct nand_controller_ops *ops = chip->controller->ops;
  835. int best_mode = 0, mode, ret = -EOPNOTSUPP;
  836. iface->type = NAND_NVDDR_IFACE;
  837. if (spec_timings) {
  838. iface->timings.nvddr = *spec_timings;
  839. iface->timings.mode = onfi_find_closest_nvddr_mode(spec_timings);
  840. /* Verify the controller supports the requested interface */
  841. ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
  842. iface);
  843. if (!ret) {
  844. chip->best_interface_config = iface;
  845. return ret;
  846. }
  847. /* Fallback to slower modes */
  848. best_mode = iface->timings.mode;
  849. } else if (chip->parameters.onfi) {
  850. best_mode = fls(chip->parameters.onfi->nvddr_timing_modes) - 1;
  851. }
  852. for (mode = best_mode; mode >= 0; mode--) {
  853. onfi_fill_interface_config(chip, iface, NAND_NVDDR_IFACE, mode);
  854. ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
  855. iface);
  856. if (!ret) {
  857. chip->best_interface_config = iface;
  858. break;
  859. }
  860. }
  861. return ret;
  862. }
  863. /**
  864. * nand_choose_best_timings - Pick up the best NVDDR or SDR timings that both
  865. * NAND controller and the NAND chip support
  866. * @chip: the NAND chip
  867. * @iface: the interface configuration (can eventually be updated)
  868. *
  869. * If specific timings are provided, use them. Otherwise, retrieve supported
  870. * timing modes from ONFI information.
  871. */
  872. static int nand_choose_best_timings(struct nand_chip *chip,
  873. struct nand_interface_config *iface)
  874. {
  875. int ret;
  876. /* Try the fastest timings: NV-DDR */
  877. ret = nand_choose_best_nvddr_timings(chip, iface, NULL);
  878. if (!ret)
  879. return 0;
  880. /* Fallback to SDR timings otherwise */
  881. return nand_choose_best_sdr_timings(chip, iface, NULL);
  882. }
  883. /**
  884. * nand_choose_interface_config - find the best data interface and timings
  885. * @chip: The NAND chip
  886. *
  887. * Find the best data interface and NAND timings supported by the chip
  888. * and the driver. Eventually let the NAND manufacturer driver propose his own
  889. * set of timings.
  890. *
  891. * After this function nand_chip->interface_config is initialized with the best
  892. * timing mode available.
  893. *
  894. * Returns 0 for success or negative error code otherwise.
  895. */
  896. static int nand_choose_interface_config(struct nand_chip *chip)
  897. {
  898. struct nand_interface_config *iface;
  899. int ret;
  900. if (!nand_controller_can_setup_interface(chip))
  901. return 0;
  902. iface = kzalloc(sizeof(*iface), GFP_KERNEL);
  903. if (!iface)
  904. return -ENOMEM;
  905. if (chip->ops.choose_interface_config)
  906. ret = chip->ops.choose_interface_config(chip, iface);
  907. else
  908. ret = nand_choose_best_timings(chip, iface);
  909. if (ret)
  910. kfree(iface);
  911. return ret;
  912. }
  913. /**
  914. * nand_fill_column_cycles - fill the column cycles of an address
  915. * @chip: The NAND chip
  916. * @addrs: Array of address cycles to fill
  917. * @offset_in_page: The offset in the page
  918. *
  919. * Fills the first or the first two bytes of the @addrs field depending
  920. * on the NAND bus width and the page size.
  921. *
  922. * Returns the number of cycles needed to encode the column, or a negative
  923. * error code in case one of the arguments is invalid.
  924. */
  925. static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
  926. unsigned int offset_in_page)
  927. {
  928. struct mtd_info *mtd = nand_to_mtd(chip);
  929. bool ident_stage = !mtd->writesize;
  930. /* Bypass all checks during NAND identification */
  931. if (likely(!ident_stage)) {
  932. /* Make sure the offset is less than the actual page size. */
  933. if (offset_in_page > mtd->writesize + mtd->oobsize)
  934. return -EINVAL;
  935. /*
  936. * On small page NANDs, there's a dedicated command to access the OOB
  937. * area, and the column address is relative to the start of the OOB
  938. * area, not the start of the page. Asjust the address accordingly.
  939. */
  940. if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
  941. offset_in_page -= mtd->writesize;
  942. /*
  943. * The offset in page is expressed in bytes, if the NAND bus is 16-bit
  944. * wide, then it must be divided by 2.
  945. */
  946. if (chip->options & NAND_BUSWIDTH_16) {
  947. if (WARN_ON(offset_in_page % 2))
  948. return -EINVAL;
  949. offset_in_page /= 2;
  950. }
  951. }
  952. addrs[0] = offset_in_page;
  953. /*
  954. * Small page NANDs use 1 cycle for the columns, while large page NANDs
  955. * need 2
  956. */
  957. if (!ident_stage && mtd->writesize <= 512)
  958. return 1;
  959. addrs[1] = offset_in_page >> 8;
  960. return 2;
  961. }
  962. static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
  963. unsigned int offset_in_page, void *buf,
  964. unsigned int len)
  965. {
  966. const struct nand_interface_config *conf =
  967. nand_get_interface_config(chip);
  968. struct mtd_info *mtd = nand_to_mtd(chip);
  969. u8 addrs[4];
  970. struct nand_op_instr instrs[] = {
  971. NAND_OP_CMD(NAND_CMD_READ0, 0),
  972. NAND_OP_ADDR(3, addrs, NAND_COMMON_TIMING_NS(conf, tWB_max)),
  973. NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
  974. NAND_COMMON_TIMING_NS(conf, tRR_min)),
  975. NAND_OP_DATA_IN(len, buf, 0),
  976. };
  977. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  978. int ret;
  979. /* Drop the DATA_IN instruction if len is set to 0. */
  980. if (!len)
  981. op.ninstrs--;
  982. if (offset_in_page >= mtd->writesize)
  983. instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
  984. else if (offset_in_page >= 256 &&
  985. !(chip->options & NAND_BUSWIDTH_16))
  986. instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
  987. ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
  988. if (ret < 0)
  989. return ret;
  990. addrs[1] = page;
  991. addrs[2] = page >> 8;
  992. if (chip->options & NAND_ROW_ADDR_3) {
  993. addrs[3] = page >> 16;
  994. instrs[1].ctx.addr.naddrs++;
  995. }
  996. return nand_exec_op(chip, &op);
  997. }
  998. static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
  999. unsigned int offset_in_page, void *buf,
  1000. unsigned int len)
  1001. {
  1002. const struct nand_interface_config *conf =
  1003. nand_get_interface_config(chip);
  1004. u8 addrs[5];
  1005. struct nand_op_instr instrs[] = {
  1006. NAND_OP_CMD(NAND_CMD_READ0, 0),
  1007. NAND_OP_ADDR(4, addrs, 0),
  1008. NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)),
  1009. NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
  1010. NAND_COMMON_TIMING_NS(conf, tRR_min)),
  1011. NAND_OP_DATA_IN(len, buf, 0),
  1012. };
  1013. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1014. int ret;
  1015. /* Drop the DATA_IN instruction if len is set to 0. */
  1016. if (!len)
  1017. op.ninstrs--;
  1018. ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
  1019. if (ret < 0)
  1020. return ret;
  1021. addrs[2] = page;
  1022. addrs[3] = page >> 8;
  1023. if (chip->options & NAND_ROW_ADDR_3) {
  1024. addrs[4] = page >> 16;
  1025. instrs[1].ctx.addr.naddrs++;
  1026. }
  1027. return nand_exec_op(chip, &op);
  1028. }
  1029. static unsigned int rawnand_last_page_of_lun(unsigned int pages_per_lun, unsigned int lun)
  1030. {
  1031. /* lun is expected to be very small */
  1032. return (lun * pages_per_lun) + pages_per_lun - 1;
  1033. }
  1034. static void rawnand_cap_cont_reads(struct nand_chip *chip)
  1035. {
  1036. struct nand_memory_organization *memorg;
  1037. unsigned int ppl, first_lun, last_lun;
  1038. memorg = nanddev_get_memorg(&chip->base);
  1039. ppl = memorg->pages_per_eraseblock * memorg->eraseblocks_per_lun;
  1040. first_lun = chip->cont_read.first_page / ppl;
  1041. last_lun = chip->cont_read.last_page / ppl;
  1042. /* Prevent sequential cache reads across LUN boundaries */
  1043. if (first_lun != last_lun)
  1044. chip->cont_read.pause_page = rawnand_last_page_of_lun(ppl, first_lun);
  1045. else
  1046. chip->cont_read.pause_page = chip->cont_read.last_page;
  1047. if (chip->cont_read.first_page == chip->cont_read.pause_page) {
  1048. chip->cont_read.first_page++;
  1049. chip->cont_read.pause_page = min(chip->cont_read.last_page,
  1050. rawnand_last_page_of_lun(ppl, first_lun + 1));
  1051. }
  1052. if (chip->cont_read.first_page >= chip->cont_read.last_page)
  1053. chip->cont_read.ongoing = false;
  1054. }
  1055. static int nand_lp_exec_cont_read_page_op(struct nand_chip *chip, unsigned int page,
  1056. unsigned int offset_in_page, void *buf,
  1057. unsigned int len, bool check_only)
  1058. {
  1059. const struct nand_interface_config *conf =
  1060. nand_get_interface_config(chip);
  1061. u8 addrs[5];
  1062. struct nand_op_instr start_instrs[] = {
  1063. NAND_OP_CMD(NAND_CMD_READ0, 0),
  1064. NAND_OP_ADDR(4, addrs, 0),
  1065. NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)),
  1066. NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 0),
  1067. NAND_OP_CMD(NAND_CMD_READCACHESEQ, NAND_COMMON_TIMING_NS(conf, tWB_max)),
  1068. NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
  1069. NAND_COMMON_TIMING_NS(conf, tRR_min)),
  1070. NAND_OP_DATA_IN(len, buf, 0),
  1071. };
  1072. struct nand_op_instr cont_instrs[] = {
  1073. NAND_OP_CMD(page == chip->cont_read.pause_page ?
  1074. NAND_CMD_READCACHEEND : NAND_CMD_READCACHESEQ,
  1075. NAND_COMMON_TIMING_NS(conf, tWB_max)),
  1076. NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
  1077. NAND_COMMON_TIMING_NS(conf, tRR_min)),
  1078. NAND_OP_DATA_IN(len, buf, 0),
  1079. };
  1080. struct nand_operation start_op = NAND_OPERATION(chip->cur_cs, start_instrs);
  1081. struct nand_operation cont_op = NAND_OPERATION(chip->cur_cs, cont_instrs);
  1082. int ret;
  1083. if (!len) {
  1084. start_op.ninstrs--;
  1085. cont_op.ninstrs--;
  1086. }
  1087. ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
  1088. if (ret < 0)
  1089. return ret;
  1090. addrs[2] = page;
  1091. addrs[3] = page >> 8;
  1092. if (chip->options & NAND_ROW_ADDR_3) {
  1093. addrs[4] = page >> 16;
  1094. start_instrs[1].ctx.addr.naddrs++;
  1095. }
  1096. /* Check if cache reads are supported */
  1097. if (check_only) {
  1098. if (nand_check_op(chip, &start_op) || nand_check_op(chip, &cont_op))
  1099. return -EOPNOTSUPP;
  1100. return 0;
  1101. }
  1102. if (page == chip->cont_read.first_page)
  1103. ret = nand_exec_op(chip, &start_op);
  1104. else
  1105. ret = nand_exec_op(chip, &cont_op);
  1106. if (ret)
  1107. return ret;
  1108. if (!chip->cont_read.ongoing)
  1109. return 0;
  1110. if (page == chip->cont_read.last_page) {
  1111. chip->cont_read.ongoing = false;
  1112. } else if (page == chip->cont_read.pause_page) {
  1113. chip->cont_read.first_page++;
  1114. rawnand_cap_cont_reads(chip);
  1115. }
  1116. return 0;
  1117. }
  1118. static bool rawnand_cont_read_ongoing(struct nand_chip *chip, unsigned int page)
  1119. {
  1120. return chip->cont_read.ongoing && page >= chip->cont_read.first_page;
  1121. }
  1122. /**
  1123. * nand_read_page_op - Do a READ PAGE operation
  1124. * @chip: The NAND chip
  1125. * @page: page to read
  1126. * @offset_in_page: offset within the page
  1127. * @buf: buffer used to store the data
  1128. * @len: length of the buffer
  1129. *
  1130. * This function issues a READ PAGE operation.
  1131. * This function does not select/unselect the CS line.
  1132. *
  1133. * Returns 0 on success, a negative error code otherwise.
  1134. */
  1135. int nand_read_page_op(struct nand_chip *chip, unsigned int page,
  1136. unsigned int offset_in_page, void *buf, unsigned int len)
  1137. {
  1138. struct mtd_info *mtd = nand_to_mtd(chip);
  1139. if (len && !buf)
  1140. return -EINVAL;
  1141. if (offset_in_page + len > mtd->writesize + mtd->oobsize)
  1142. return -EINVAL;
  1143. if (nand_has_exec_op(chip)) {
  1144. if (mtd->writesize > 512) {
  1145. if (rawnand_cont_read_ongoing(chip, page))
  1146. return nand_lp_exec_cont_read_page_op(chip, page,
  1147. offset_in_page,
  1148. buf, len, false);
  1149. else
  1150. return nand_lp_exec_read_page_op(chip, page,
  1151. offset_in_page, buf,
  1152. len);
  1153. }
  1154. return nand_sp_exec_read_page_op(chip, page, offset_in_page,
  1155. buf, len);
  1156. }
  1157. chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
  1158. if (len)
  1159. chip->legacy.read_buf(chip, buf, len);
  1160. return 0;
  1161. }
  1162. EXPORT_SYMBOL_GPL(nand_read_page_op);
  1163. /**
  1164. * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
  1165. * @chip: The NAND chip
  1166. * @page: parameter page to read
  1167. * @buf: buffer used to store the data
  1168. * @len: length of the buffer
  1169. *
  1170. * This function issues a READ PARAMETER PAGE operation.
  1171. * This function does not select/unselect the CS line.
  1172. *
  1173. * Returns 0 on success, a negative error code otherwise.
  1174. */
  1175. int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
  1176. unsigned int len)
  1177. {
  1178. unsigned int i;
  1179. u8 *p = buf;
  1180. if (len && !buf)
  1181. return -EINVAL;
  1182. if (nand_has_exec_op(chip)) {
  1183. const struct nand_interface_config *conf =
  1184. nand_get_interface_config(chip);
  1185. struct nand_op_instr instrs[] = {
  1186. NAND_OP_CMD(NAND_CMD_PARAM, 0),
  1187. NAND_OP_ADDR(1, &page,
  1188. NAND_COMMON_TIMING_NS(conf, tWB_max)),
  1189. NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
  1190. NAND_COMMON_TIMING_NS(conf, tRR_min)),
  1191. NAND_OP_8BIT_DATA_IN(len, buf, 0),
  1192. };
  1193. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1194. /* Drop the DATA_IN instruction if len is set to 0. */
  1195. if (!len)
  1196. op.ninstrs--;
  1197. return nand_exec_op(chip, &op);
  1198. }
  1199. chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
  1200. for (i = 0; i < len; i++)
  1201. p[i] = chip->legacy.read_byte(chip);
  1202. return 0;
  1203. }
  1204. /**
  1205. * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
  1206. * @chip: The NAND chip
  1207. * @offset_in_page: offset within the page
  1208. * @buf: buffer used to store the data
  1209. * @len: length of the buffer
  1210. * @force_8bit: force 8-bit bus access
  1211. *
  1212. * This function issues a CHANGE READ COLUMN operation.
  1213. * This function does not select/unselect the CS line.
  1214. *
  1215. * Returns 0 on success, a negative error code otherwise.
  1216. */
  1217. int nand_change_read_column_op(struct nand_chip *chip,
  1218. unsigned int offset_in_page, void *buf,
  1219. unsigned int len, bool force_8bit)
  1220. {
  1221. struct mtd_info *mtd = nand_to_mtd(chip);
  1222. bool ident_stage = !mtd->writesize;
  1223. if (len && !buf)
  1224. return -EINVAL;
  1225. if (!ident_stage) {
  1226. if (offset_in_page + len > mtd->writesize + mtd->oobsize)
  1227. return -EINVAL;
  1228. /* Small page NANDs do not support column change. */
  1229. if (mtd->writesize <= 512)
  1230. return -ENOTSUPP;
  1231. }
  1232. if (nand_has_exec_op(chip)) {
  1233. const struct nand_interface_config *conf =
  1234. nand_get_interface_config(chip);
  1235. u8 addrs[2] = {};
  1236. struct nand_op_instr instrs[] = {
  1237. NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
  1238. NAND_OP_ADDR(2, addrs, 0),
  1239. NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
  1240. NAND_COMMON_TIMING_NS(conf, tCCS_min)),
  1241. NAND_OP_DATA_IN(len, buf, 0),
  1242. };
  1243. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1244. int ret;
  1245. ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
  1246. if (ret < 0)
  1247. return ret;
  1248. /* Drop the DATA_IN instruction if len is set to 0. */
  1249. if (!len)
  1250. op.ninstrs--;
  1251. instrs[3].ctx.data.force_8bit = force_8bit;
  1252. return nand_exec_op(chip, &op);
  1253. }
  1254. chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1);
  1255. if (len)
  1256. chip->legacy.read_buf(chip, buf, len);
  1257. return 0;
  1258. }
  1259. EXPORT_SYMBOL_GPL(nand_change_read_column_op);
  1260. /**
  1261. * nand_read_oob_op - Do a READ OOB operation
  1262. * @chip: The NAND chip
  1263. * @page: page to read
  1264. * @offset_in_oob: offset within the OOB area
  1265. * @buf: buffer used to store the data
  1266. * @len: length of the buffer
  1267. *
  1268. * This function issues a READ OOB operation.
  1269. * This function does not select/unselect the CS line.
  1270. *
  1271. * Returns 0 on success, a negative error code otherwise.
  1272. */
  1273. int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
  1274. unsigned int offset_in_oob, void *buf, unsigned int len)
  1275. {
  1276. struct mtd_info *mtd = nand_to_mtd(chip);
  1277. if (len && !buf)
  1278. return -EINVAL;
  1279. if (offset_in_oob + len > mtd->oobsize)
  1280. return -EINVAL;
  1281. if (nand_has_exec_op(chip))
  1282. return nand_read_page_op(chip, page,
  1283. mtd->writesize + offset_in_oob,
  1284. buf, len);
  1285. chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
  1286. if (len)
  1287. chip->legacy.read_buf(chip, buf, len);
  1288. return 0;
  1289. }
  1290. EXPORT_SYMBOL_GPL(nand_read_oob_op);
  1291. static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
  1292. unsigned int offset_in_page, const void *buf,
  1293. unsigned int len, bool prog)
  1294. {
  1295. const struct nand_interface_config *conf =
  1296. nand_get_interface_config(chip);
  1297. struct mtd_info *mtd = nand_to_mtd(chip);
  1298. u8 addrs[5] = {};
  1299. struct nand_op_instr instrs[] = {
  1300. /*
  1301. * The first instruction will be dropped if we're dealing
  1302. * with a large page NAND and adjusted if we're dealing
  1303. * with a small page NAND and the page offset is > 255.
  1304. */
  1305. NAND_OP_CMD(NAND_CMD_READ0, 0),
  1306. NAND_OP_CMD(NAND_CMD_SEQIN, 0),
  1307. NAND_OP_ADDR(0, addrs, NAND_COMMON_TIMING_NS(conf, tADL_min)),
  1308. NAND_OP_DATA_OUT(len, buf, 0),
  1309. NAND_OP_CMD(NAND_CMD_PAGEPROG,
  1310. NAND_COMMON_TIMING_NS(conf, tWB_max)),
  1311. NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max), 0),
  1312. };
  1313. struct nand_operation op = NAND_DESTRUCTIVE_OPERATION(chip->cur_cs,
  1314. instrs);
  1315. int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
  1316. if (naddrs < 0)
  1317. return naddrs;
  1318. addrs[naddrs++] = page;
  1319. addrs[naddrs++] = page >> 8;
  1320. if (chip->options & NAND_ROW_ADDR_3)
  1321. addrs[naddrs++] = page >> 16;
  1322. instrs[2].ctx.addr.naddrs = naddrs;
  1323. /* Drop the last two instructions if we're not programming the page. */
  1324. if (!prog) {
  1325. op.ninstrs -= 2;
  1326. /* Also drop the DATA_OUT instruction if empty. */
  1327. if (!len)
  1328. op.ninstrs--;
  1329. }
  1330. if (mtd->writesize <= 512) {
  1331. /*
  1332. * Small pages need some more tweaking: we have to adjust the
  1333. * first instruction depending on the page offset we're trying
  1334. * to access.
  1335. */
  1336. if (offset_in_page >= mtd->writesize)
  1337. instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
  1338. else if (offset_in_page >= 256 &&
  1339. !(chip->options & NAND_BUSWIDTH_16))
  1340. instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
  1341. } else {
  1342. /*
  1343. * Drop the first command if we're dealing with a large page
  1344. * NAND.
  1345. */
  1346. op.instrs++;
  1347. op.ninstrs--;
  1348. }
  1349. return nand_exec_op(chip, &op);
  1350. }
  1351. /**
  1352. * nand_prog_page_begin_op - starts a PROG PAGE operation
  1353. * @chip: The NAND chip
  1354. * @page: page to write
  1355. * @offset_in_page: offset within the page
  1356. * @buf: buffer containing the data to write to the page
  1357. * @len: length of the buffer
  1358. *
  1359. * This function issues the first half of a PROG PAGE operation.
  1360. * This function does not select/unselect the CS line.
  1361. *
  1362. * Returns 0 on success, a negative error code otherwise.
  1363. */
  1364. int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
  1365. unsigned int offset_in_page, const void *buf,
  1366. unsigned int len)
  1367. {
  1368. struct mtd_info *mtd = nand_to_mtd(chip);
  1369. if (len && !buf)
  1370. return -EINVAL;
  1371. if (offset_in_page + len > mtd->writesize + mtd->oobsize)
  1372. return -EINVAL;
  1373. if (nand_has_exec_op(chip))
  1374. return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
  1375. len, false);
  1376. chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
  1377. if (buf)
  1378. chip->legacy.write_buf(chip, buf, len);
  1379. return 0;
  1380. }
  1381. EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
  1382. /**
  1383. * nand_prog_page_end_op - ends a PROG PAGE operation
  1384. * @chip: The NAND chip
  1385. *
  1386. * This function issues the second half of a PROG PAGE operation.
  1387. * This function does not select/unselect the CS line.
  1388. *
  1389. * Returns 0 on success, a negative error code otherwise.
  1390. */
  1391. int nand_prog_page_end_op(struct nand_chip *chip)
  1392. {
  1393. int ret;
  1394. u8 status;
  1395. if (nand_has_exec_op(chip)) {
  1396. const struct nand_interface_config *conf =
  1397. nand_get_interface_config(chip);
  1398. struct nand_op_instr instrs[] = {
  1399. NAND_OP_CMD(NAND_CMD_PAGEPROG,
  1400. NAND_COMMON_TIMING_NS(conf, tWB_max)),
  1401. NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tPROG_max),
  1402. 0),
  1403. };
  1404. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1405. ret = nand_exec_op(chip, &op);
  1406. if (ret)
  1407. return ret;
  1408. ret = nand_status_op(chip, &status);
  1409. if (ret)
  1410. return ret;
  1411. } else {
  1412. chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
  1413. ret = chip->legacy.waitfunc(chip);
  1414. if (ret < 0)
  1415. return ret;
  1416. status = ret;
  1417. }
  1418. if (status & NAND_STATUS_FAIL)
  1419. return -EIO;
  1420. return 0;
  1421. }
  1422. EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
  1423. /**
  1424. * nand_prog_page_op - Do a full PROG PAGE operation
  1425. * @chip: The NAND chip
  1426. * @page: page to write
  1427. * @offset_in_page: offset within the page
  1428. * @buf: buffer containing the data to write to the page
  1429. * @len: length of the buffer
  1430. *
  1431. * This function issues a full PROG PAGE operation.
  1432. * This function does not select/unselect the CS line.
  1433. *
  1434. * Returns 0 on success, a negative error code otherwise.
  1435. */
  1436. int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
  1437. unsigned int offset_in_page, const void *buf,
  1438. unsigned int len)
  1439. {
  1440. struct mtd_info *mtd = nand_to_mtd(chip);
  1441. u8 status;
  1442. int ret;
  1443. if (!len || !buf)
  1444. return -EINVAL;
  1445. if (offset_in_page + len > mtd->writesize + mtd->oobsize)
  1446. return -EINVAL;
  1447. if (nand_has_exec_op(chip)) {
  1448. ret = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
  1449. len, true);
  1450. if (ret)
  1451. return ret;
  1452. ret = nand_status_op(chip, &status);
  1453. if (ret)
  1454. return ret;
  1455. } else {
  1456. chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page,
  1457. page);
  1458. chip->legacy.write_buf(chip, buf, len);
  1459. chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
  1460. ret = chip->legacy.waitfunc(chip);
  1461. if (ret < 0)
  1462. return ret;
  1463. status = ret;
  1464. }
  1465. if (status & NAND_STATUS_FAIL)
  1466. return -EIO;
  1467. return 0;
  1468. }
  1469. EXPORT_SYMBOL_GPL(nand_prog_page_op);
  1470. /**
  1471. * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
  1472. * @chip: The NAND chip
  1473. * @offset_in_page: offset within the page
  1474. * @buf: buffer containing the data to send to the NAND
  1475. * @len: length of the buffer
  1476. * @force_8bit: force 8-bit bus access
  1477. *
  1478. * This function issues a CHANGE WRITE COLUMN operation.
  1479. * This function does not select/unselect the CS line.
  1480. *
  1481. * Returns 0 on success, a negative error code otherwise.
  1482. */
  1483. int nand_change_write_column_op(struct nand_chip *chip,
  1484. unsigned int offset_in_page,
  1485. const void *buf, unsigned int len,
  1486. bool force_8bit)
  1487. {
  1488. struct mtd_info *mtd = nand_to_mtd(chip);
  1489. if (len && !buf)
  1490. return -EINVAL;
  1491. if (offset_in_page + len > mtd->writesize + mtd->oobsize)
  1492. return -EINVAL;
  1493. /* Small page NANDs do not support column change. */
  1494. if (mtd->writesize <= 512)
  1495. return -ENOTSUPP;
  1496. if (nand_has_exec_op(chip)) {
  1497. const struct nand_interface_config *conf =
  1498. nand_get_interface_config(chip);
  1499. u8 addrs[2];
  1500. struct nand_op_instr instrs[] = {
  1501. NAND_OP_CMD(NAND_CMD_RNDIN, 0),
  1502. NAND_OP_ADDR(2, addrs, NAND_COMMON_TIMING_NS(conf, tCCS_min)),
  1503. NAND_OP_DATA_OUT(len, buf, 0),
  1504. };
  1505. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1506. int ret;
  1507. ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
  1508. if (ret < 0)
  1509. return ret;
  1510. instrs[2].ctx.data.force_8bit = force_8bit;
  1511. /* Drop the DATA_OUT instruction if len is set to 0. */
  1512. if (!len)
  1513. op.ninstrs--;
  1514. return nand_exec_op(chip, &op);
  1515. }
  1516. chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1);
  1517. if (len)
  1518. chip->legacy.write_buf(chip, buf, len);
  1519. return 0;
  1520. }
  1521. EXPORT_SYMBOL_GPL(nand_change_write_column_op);
  1522. /**
  1523. * nand_readid_op - Do a READID operation
  1524. * @chip: The NAND chip
  1525. * @addr: address cycle to pass after the READID command
  1526. * @buf: buffer used to store the ID
  1527. * @len: length of the buffer
  1528. *
  1529. * This function sends a READID command and reads back the ID returned by the
  1530. * NAND.
  1531. * This function does not select/unselect the CS line.
  1532. *
  1533. * Returns 0 on success, a negative error code otherwise.
  1534. */
  1535. int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
  1536. unsigned int len)
  1537. {
  1538. unsigned int i;
  1539. u8 *id = buf, *ddrbuf = NULL;
  1540. if (len && !buf)
  1541. return -EINVAL;
  1542. if (nand_has_exec_op(chip)) {
  1543. const struct nand_interface_config *conf =
  1544. nand_get_interface_config(chip);
  1545. struct nand_op_instr instrs[] = {
  1546. NAND_OP_CMD(NAND_CMD_READID, 0),
  1547. NAND_OP_ADDR(1, &addr,
  1548. NAND_COMMON_TIMING_NS(conf, tADL_min)),
  1549. NAND_OP_8BIT_DATA_IN(len, buf, 0),
  1550. };
  1551. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1552. int ret;
  1553. /* READ_ID data bytes are received twice in NV-DDR mode */
  1554. if (len && nand_interface_is_nvddr(conf)) {
  1555. ddrbuf = kzalloc(len * 2, GFP_KERNEL);
  1556. if (!ddrbuf)
  1557. return -ENOMEM;
  1558. instrs[2].ctx.data.len *= 2;
  1559. instrs[2].ctx.data.buf.in = ddrbuf;
  1560. }
  1561. /* Drop the DATA_IN instruction if len is set to 0. */
  1562. if (!len)
  1563. op.ninstrs--;
  1564. ret = nand_exec_op(chip, &op);
  1565. if (!ret && len && nand_interface_is_nvddr(conf)) {
  1566. for (i = 0; i < len; i++)
  1567. id[i] = ddrbuf[i * 2];
  1568. }
  1569. kfree(ddrbuf);
  1570. return ret;
  1571. }
  1572. chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
  1573. for (i = 0; i < len; i++)
  1574. id[i] = chip->legacy.read_byte(chip);
  1575. return 0;
  1576. }
  1577. EXPORT_SYMBOL_GPL(nand_readid_op);
  1578. /**
  1579. * nand_status_op - Do a STATUS operation
  1580. * @chip: The NAND chip
  1581. * @status: out variable to store the NAND status
  1582. *
  1583. * This function sends a STATUS command and reads back the status returned by
  1584. * the NAND.
  1585. * This function does not select/unselect the CS line.
  1586. *
  1587. * Returns 0 on success, a negative error code otherwise.
  1588. */
  1589. int nand_status_op(struct nand_chip *chip, u8 *status)
  1590. {
  1591. if (nand_has_exec_op(chip)) {
  1592. const struct nand_interface_config *conf =
  1593. nand_get_interface_config(chip);
  1594. u8 ddrstatus[2];
  1595. struct nand_op_instr instrs[] = {
  1596. NAND_OP_CMD(NAND_CMD_STATUS,
  1597. NAND_COMMON_TIMING_NS(conf, tADL_min)),
  1598. NAND_OP_8BIT_DATA_IN(1, status, 0),
  1599. };
  1600. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1601. int ret;
  1602. /* The status data byte will be received twice in NV-DDR mode */
  1603. if (status && nand_interface_is_nvddr(conf)) {
  1604. instrs[1].ctx.data.len *= 2;
  1605. instrs[1].ctx.data.buf.in = ddrstatus;
  1606. }
  1607. if (!status)
  1608. op.ninstrs--;
  1609. ret = nand_exec_op(chip, &op);
  1610. if (!ret && status && nand_interface_is_nvddr(conf))
  1611. *status = ddrstatus[0];
  1612. return ret;
  1613. }
  1614. chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
  1615. if (status)
  1616. *status = chip->legacy.read_byte(chip);
  1617. return 0;
  1618. }
  1619. EXPORT_SYMBOL_GPL(nand_status_op);
  1620. /**
  1621. * nand_exit_status_op - Exit a STATUS operation
  1622. * @chip: The NAND chip
  1623. *
  1624. * This function sends a READ0 command to cancel the effect of the STATUS
  1625. * command to avoid reading only the status until a new read command is sent.
  1626. *
  1627. * This function does not select/unselect the CS line.
  1628. *
  1629. * Returns 0 on success, a negative error code otherwise.
  1630. */
  1631. int nand_exit_status_op(struct nand_chip *chip)
  1632. {
  1633. if (nand_has_exec_op(chip)) {
  1634. struct nand_op_instr instrs[] = {
  1635. NAND_OP_CMD(NAND_CMD_READ0, 0),
  1636. };
  1637. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1638. return nand_exec_op(chip, &op);
  1639. }
  1640. chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1);
  1641. return 0;
  1642. }
  1643. EXPORT_SYMBOL_GPL(nand_exit_status_op);
  1644. /**
  1645. * nand_erase_op - Do an erase operation
  1646. * @chip: The NAND chip
  1647. * @eraseblock: block to erase
  1648. *
  1649. * This function sends an ERASE command and waits for the NAND to be ready
  1650. * before returning.
  1651. * This function does not select/unselect the CS line.
  1652. *
  1653. * Returns 0 on success, a negative error code otherwise.
  1654. */
  1655. int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
  1656. {
  1657. unsigned int page = eraseblock <<
  1658. (chip->phys_erase_shift - chip->page_shift);
  1659. int ret;
  1660. u8 status;
  1661. if (nand_has_exec_op(chip)) {
  1662. const struct nand_interface_config *conf =
  1663. nand_get_interface_config(chip);
  1664. u8 addrs[3] = { page, page >> 8, page >> 16 };
  1665. struct nand_op_instr instrs[] = {
  1666. NAND_OP_CMD(NAND_CMD_ERASE1, 0),
  1667. NAND_OP_ADDR(2, addrs, 0),
  1668. NAND_OP_CMD(NAND_CMD_ERASE2,
  1669. NAND_COMMON_TIMING_NS(conf, tWB_max)),
  1670. NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max),
  1671. 0),
  1672. };
  1673. struct nand_operation op = NAND_DESTRUCTIVE_OPERATION(chip->cur_cs,
  1674. instrs);
  1675. if (chip->options & NAND_ROW_ADDR_3)
  1676. instrs[1].ctx.addr.naddrs++;
  1677. ret = nand_exec_op(chip, &op);
  1678. if (ret)
  1679. return ret;
  1680. ret = nand_status_op(chip, &status);
  1681. if (ret)
  1682. return ret;
  1683. } else {
  1684. chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
  1685. chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
  1686. ret = chip->legacy.waitfunc(chip);
  1687. if (ret < 0)
  1688. return ret;
  1689. status = ret;
  1690. }
  1691. if (status & NAND_STATUS_FAIL)
  1692. return -EIO;
  1693. return 0;
  1694. }
  1695. EXPORT_SYMBOL_GPL(nand_erase_op);
  1696. /**
  1697. * nand_set_features_op - Do a SET FEATURES operation
  1698. * @chip: The NAND chip
  1699. * @feature: feature id
  1700. * @data: 4 bytes of data
  1701. *
  1702. * This function sends a SET FEATURES command and waits for the NAND to be
  1703. * ready before returning.
  1704. * This function does not select/unselect the CS line.
  1705. *
  1706. * Returns 0 on success, a negative error code otherwise.
  1707. */
  1708. static int nand_set_features_op(struct nand_chip *chip, u8 feature,
  1709. const void *data)
  1710. {
  1711. const u8 *params = data;
  1712. int i, ret;
  1713. if (nand_has_exec_op(chip)) {
  1714. const struct nand_interface_config *conf =
  1715. nand_get_interface_config(chip);
  1716. struct nand_op_instr instrs[] = {
  1717. NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
  1718. NAND_OP_ADDR(1, &feature, NAND_COMMON_TIMING_NS(conf,
  1719. tADL_min)),
  1720. NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
  1721. NAND_COMMON_TIMING_NS(conf,
  1722. tWB_max)),
  1723. NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max),
  1724. 0),
  1725. };
  1726. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1727. return nand_exec_op(chip, &op);
  1728. }
  1729. chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1);
  1730. for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
  1731. chip->legacy.write_byte(chip, params[i]);
  1732. ret = chip->legacy.waitfunc(chip);
  1733. if (ret < 0)
  1734. return ret;
  1735. if (ret & NAND_STATUS_FAIL)
  1736. return -EIO;
  1737. return 0;
  1738. }
  1739. /**
  1740. * nand_get_features_op - Do a GET FEATURES operation
  1741. * @chip: The NAND chip
  1742. * @feature: feature id
  1743. * @data: 4 bytes of data
  1744. *
  1745. * This function sends a GET FEATURES command and waits for the NAND to be
  1746. * ready before returning.
  1747. * This function does not select/unselect the CS line.
  1748. *
  1749. * Returns 0 on success, a negative error code otherwise.
  1750. */
  1751. static int nand_get_features_op(struct nand_chip *chip, u8 feature,
  1752. void *data)
  1753. {
  1754. u8 *params = data, ddrbuf[ONFI_SUBFEATURE_PARAM_LEN * 2];
  1755. int i;
  1756. if (nand_has_exec_op(chip)) {
  1757. const struct nand_interface_config *conf =
  1758. nand_get_interface_config(chip);
  1759. struct nand_op_instr instrs[] = {
  1760. NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
  1761. NAND_OP_ADDR(1, &feature,
  1762. NAND_COMMON_TIMING_NS(conf, tWB_max)),
  1763. NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tFEAT_max),
  1764. NAND_COMMON_TIMING_NS(conf, tRR_min)),
  1765. NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
  1766. data, 0),
  1767. };
  1768. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1769. int ret;
  1770. /* GET_FEATURE data bytes are received twice in NV-DDR mode */
  1771. if (nand_interface_is_nvddr(conf)) {
  1772. instrs[3].ctx.data.len *= 2;
  1773. instrs[3].ctx.data.buf.in = ddrbuf;
  1774. }
  1775. ret = nand_exec_op(chip, &op);
  1776. if (nand_interface_is_nvddr(conf)) {
  1777. for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; i++)
  1778. params[i] = ddrbuf[i * 2];
  1779. }
  1780. return ret;
  1781. }
  1782. chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
  1783. for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
  1784. params[i] = chip->legacy.read_byte(chip);
  1785. return 0;
  1786. }
  1787. static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
  1788. unsigned int delay_ns)
  1789. {
  1790. if (nand_has_exec_op(chip)) {
  1791. struct nand_op_instr instrs[] = {
  1792. NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
  1793. PSEC_TO_NSEC(delay_ns)),
  1794. };
  1795. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1796. return nand_exec_op(chip, &op);
  1797. }
  1798. /* Apply delay or wait for ready/busy pin */
  1799. if (!chip->legacy.dev_ready)
  1800. udelay(chip->legacy.chip_delay);
  1801. else
  1802. nand_wait_ready(chip);
  1803. return 0;
  1804. }
  1805. /**
  1806. * nand_reset_op - Do a reset operation
  1807. * @chip: The NAND chip
  1808. *
  1809. * This function sends a RESET command and waits for the NAND to be ready
  1810. * before returning.
  1811. * This function does not select/unselect the CS line.
  1812. *
  1813. * Returns 0 on success, a negative error code otherwise.
  1814. */
  1815. int nand_reset_op(struct nand_chip *chip)
  1816. {
  1817. if (nand_has_exec_op(chip)) {
  1818. const struct nand_interface_config *conf =
  1819. nand_get_interface_config(chip);
  1820. struct nand_op_instr instrs[] = {
  1821. NAND_OP_CMD(NAND_CMD_RESET,
  1822. NAND_COMMON_TIMING_NS(conf, tWB_max)),
  1823. NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tRST_max),
  1824. 0),
  1825. };
  1826. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1827. return nand_exec_op(chip, &op);
  1828. }
  1829. chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
  1830. return 0;
  1831. }
  1832. EXPORT_SYMBOL_GPL(nand_reset_op);
  1833. /**
  1834. * nand_read_data_op - Read data from the NAND
  1835. * @chip: The NAND chip
  1836. * @buf: buffer used to store the data
  1837. * @len: length of the buffer
  1838. * @force_8bit: force 8-bit bus access
  1839. * @check_only: do not actually run the command, only checks if the
  1840. * controller driver supports it
  1841. *
  1842. * This function does a raw data read on the bus. Usually used after launching
  1843. * another NAND operation like nand_read_page_op().
  1844. * This function does not select/unselect the CS line.
  1845. *
  1846. * Returns 0 on success, a negative error code otherwise.
  1847. */
  1848. int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
  1849. bool force_8bit, bool check_only)
  1850. {
  1851. if (!len || (!check_only && !buf))
  1852. return -EINVAL;
  1853. if (nand_has_exec_op(chip)) {
  1854. const struct nand_interface_config *conf =
  1855. nand_get_interface_config(chip);
  1856. struct nand_op_instr instrs[] = {
  1857. NAND_OP_DATA_IN(len, buf, 0),
  1858. };
  1859. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1860. u8 *ddrbuf = NULL;
  1861. int ret, i;
  1862. instrs[0].ctx.data.force_8bit = force_8bit;
  1863. /*
  1864. * Parameter payloads (ID, status, features, etc) do not go
  1865. * through the same pipeline as regular data, hence the
  1866. * force_8bit flag must be set and this also indicates that in
  1867. * case NV-DDR timings are being used the data will be received
  1868. * twice.
  1869. */
  1870. if (force_8bit && nand_interface_is_nvddr(conf)) {
  1871. ddrbuf = kzalloc(len * 2, GFP_KERNEL);
  1872. if (!ddrbuf)
  1873. return -ENOMEM;
  1874. instrs[0].ctx.data.len *= 2;
  1875. instrs[0].ctx.data.buf.in = ddrbuf;
  1876. }
  1877. if (check_only) {
  1878. ret = nand_check_op(chip, &op);
  1879. kfree(ddrbuf);
  1880. return ret;
  1881. }
  1882. ret = nand_exec_op(chip, &op);
  1883. if (!ret && force_8bit && nand_interface_is_nvddr(conf)) {
  1884. u8 *dst = buf;
  1885. for (i = 0; i < len; i++)
  1886. dst[i] = ddrbuf[i * 2];
  1887. }
  1888. kfree(ddrbuf);
  1889. return ret;
  1890. }
  1891. if (check_only)
  1892. return 0;
  1893. if (force_8bit) {
  1894. u8 *p = buf;
  1895. unsigned int i;
  1896. for (i = 0; i < len; i++)
  1897. p[i] = chip->legacy.read_byte(chip);
  1898. } else {
  1899. chip->legacy.read_buf(chip, buf, len);
  1900. }
  1901. return 0;
  1902. }
  1903. EXPORT_SYMBOL_GPL(nand_read_data_op);
  1904. /**
  1905. * nand_write_data_op - Write data from the NAND
  1906. * @chip: The NAND chip
  1907. * @buf: buffer containing the data to send on the bus
  1908. * @len: length of the buffer
  1909. * @force_8bit: force 8-bit bus access
  1910. *
  1911. * This function does a raw data write on the bus. Usually used after launching
  1912. * another NAND operation like nand_write_page_begin_op().
  1913. * This function does not select/unselect the CS line.
  1914. *
  1915. * Returns 0 on success, a negative error code otherwise.
  1916. */
  1917. int nand_write_data_op(struct nand_chip *chip, const void *buf,
  1918. unsigned int len, bool force_8bit)
  1919. {
  1920. if (!len || !buf)
  1921. return -EINVAL;
  1922. if (nand_has_exec_op(chip)) {
  1923. struct nand_op_instr instrs[] = {
  1924. NAND_OP_DATA_OUT(len, buf, 0),
  1925. };
  1926. struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
  1927. instrs[0].ctx.data.force_8bit = force_8bit;
  1928. return nand_exec_op(chip, &op);
  1929. }
  1930. if (force_8bit) {
  1931. const u8 *p = buf;
  1932. unsigned int i;
  1933. for (i = 0; i < len; i++)
  1934. chip->legacy.write_byte(chip, p[i]);
  1935. } else {
  1936. chip->legacy.write_buf(chip, buf, len);
  1937. }
  1938. return 0;
  1939. }
  1940. EXPORT_SYMBOL_GPL(nand_write_data_op);
  1941. /**
  1942. * struct nand_op_parser_ctx - Context used by the parser
  1943. * @instrs: array of all the instructions that must be addressed
  1944. * @ninstrs: length of the @instrs array
  1945. * @subop: Sub-operation to be passed to the NAND controller
  1946. *
  1947. * This structure is used by the core to split NAND operations into
  1948. * sub-operations that can be handled by the NAND controller.
  1949. */
  1950. struct nand_op_parser_ctx {
  1951. const struct nand_op_instr *instrs;
  1952. unsigned int ninstrs;
  1953. struct nand_subop subop;
  1954. };
  1955. /**
  1956. * nand_op_parser_must_split_instr - Checks if an instruction must be split
  1957. * @pat: the parser pattern element that matches @instr
  1958. * @instr: pointer to the instruction to check
  1959. * @start_offset: this is an in/out parameter. If @instr has already been
  1960. * split, then @start_offset is the offset from which to start
  1961. * (either an address cycle or an offset in the data buffer).
  1962. * Conversely, if the function returns true (ie. instr must be
  1963. * split), this parameter is updated to point to the first
  1964. * data/address cycle that has not been taken care of.
  1965. *
  1966. * Some NAND controllers are limited and cannot send X address cycles with a
  1967. * unique operation, or cannot read/write more than Y bytes at the same time.
  1968. * In this case, split the instruction that does not fit in a single
  1969. * controller-operation into two or more chunks.
  1970. *
  1971. * Returns true if the instruction must be split, false otherwise.
  1972. * The @start_offset parameter is also updated to the offset at which the next
  1973. * bundle of instruction must start (if an address or a data instruction).
  1974. */
  1975. static bool
  1976. nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
  1977. const struct nand_op_instr *instr,
  1978. unsigned int *start_offset)
  1979. {
  1980. switch (pat->type) {
  1981. case NAND_OP_ADDR_INSTR:
  1982. if (!pat->ctx.addr.maxcycles)
  1983. break;
  1984. if (instr->ctx.addr.naddrs - *start_offset >
  1985. pat->ctx.addr.maxcycles) {
  1986. *start_offset += pat->ctx.addr.maxcycles;
  1987. return true;
  1988. }
  1989. break;
  1990. case NAND_OP_DATA_IN_INSTR:
  1991. case NAND_OP_DATA_OUT_INSTR:
  1992. if (!pat->ctx.data.maxlen)
  1993. break;
  1994. if (instr->ctx.data.len - *start_offset >
  1995. pat->ctx.data.maxlen) {
  1996. *start_offset += pat->ctx.data.maxlen;
  1997. return true;
  1998. }
  1999. break;
  2000. default:
  2001. break;
  2002. }
  2003. return false;
  2004. }
  2005. /**
  2006. * nand_op_parser_match_pat - Checks if a pattern matches the instructions
  2007. * remaining in the parser context
  2008. * @pat: the pattern to test
  2009. * @ctx: the parser context structure to match with the pattern @pat
  2010. *
  2011. * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
  2012. * Returns true if this is the case, false ortherwise. When true is returned,
  2013. * @ctx->subop is updated with the set of instructions to be passed to the
  2014. * controller driver.
  2015. */
  2016. static bool
  2017. nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
  2018. struct nand_op_parser_ctx *ctx)
  2019. {
  2020. unsigned int instr_offset = ctx->subop.first_instr_start_off;
  2021. const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
  2022. const struct nand_op_instr *instr = ctx->subop.instrs;
  2023. unsigned int i, ninstrs;
  2024. for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
  2025. /*
  2026. * The pattern instruction does not match the operation
  2027. * instruction. If the instruction is marked optional in the
  2028. * pattern definition, we skip the pattern element and continue
  2029. * to the next one. If the element is mandatory, there's no
  2030. * match and we can return false directly.
  2031. */
  2032. if (instr->type != pat->elems[i].type) {
  2033. if (!pat->elems[i].optional)
  2034. return false;
  2035. continue;
  2036. }
  2037. /*
  2038. * Now check the pattern element constraints. If the pattern is
  2039. * not able to handle the whole instruction in a single step,
  2040. * we have to split it.
  2041. * The last_instr_end_off value comes back updated to point to
  2042. * the position where we have to split the instruction (the
  2043. * start of the next subop chunk).
  2044. */
  2045. if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
  2046. &instr_offset)) {
  2047. ninstrs++;
  2048. i++;
  2049. break;
  2050. }
  2051. instr++;
  2052. ninstrs++;
  2053. instr_offset = 0;
  2054. }
  2055. /*
  2056. * This can happen if all instructions of a pattern are optional.
  2057. * Still, if there's not at least one instruction handled by this
  2058. * pattern, this is not a match, and we should try the next one (if
  2059. * any).
  2060. */
  2061. if (!ninstrs)
  2062. return false;
  2063. /*
  2064. * We had a match on the pattern head, but the pattern may be longer
  2065. * than the instructions we're asked to execute. We need to make sure
  2066. * there's no mandatory elements in the pattern tail.
  2067. */
  2068. for (; i < pat->nelems; i++) {
  2069. if (!pat->elems[i].optional)
  2070. return false;
  2071. }
  2072. /*
  2073. * We have a match: update the subop structure accordingly and return
  2074. * true.
  2075. */
  2076. ctx->subop.ninstrs = ninstrs;
  2077. ctx->subop.last_instr_end_off = instr_offset;
  2078. return true;
  2079. }
  2080. #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
  2081. static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
  2082. {
  2083. const struct nand_op_instr *instr;
  2084. char *prefix = " ";
  2085. unsigned int i;
  2086. pr_debug("executing subop (CS%d):\n", ctx->subop.cs);
  2087. for (i = 0; i < ctx->ninstrs; i++) {
  2088. instr = &ctx->instrs[i];
  2089. if (instr == &ctx->subop.instrs[0])
  2090. prefix = " ->";
  2091. nand_op_trace(prefix, instr);
  2092. if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
  2093. prefix = " ";
  2094. }
  2095. }
  2096. #else
  2097. static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
  2098. {
  2099. /* NOP */
  2100. }
  2101. #endif
  2102. static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a,
  2103. const struct nand_op_parser_ctx *b)
  2104. {
  2105. if (a->subop.ninstrs < b->subop.ninstrs)
  2106. return -1;
  2107. else if (a->subop.ninstrs > b->subop.ninstrs)
  2108. return 1;
  2109. if (a->subop.last_instr_end_off < b->subop.last_instr_end_off)
  2110. return -1;
  2111. else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off)
  2112. return 1;
  2113. return 0;
  2114. }
  2115. /**
  2116. * nand_op_parser_exec_op - exec_op parser
  2117. * @chip: the NAND chip
  2118. * @parser: patterns description provided by the controller driver
  2119. * @op: the NAND operation to address
  2120. * @check_only: when true, the function only checks if @op can be handled but
  2121. * does not execute the operation
  2122. *
  2123. * Helper function designed to ease integration of NAND controller drivers that
  2124. * only support a limited set of instruction sequences. The supported sequences
  2125. * are described in @parser, and the framework takes care of splitting @op into
  2126. * multiple sub-operations (if required) and pass them back to the ->exec()
  2127. * callback of the matching pattern if @check_only is set to false.
  2128. *
  2129. * NAND controller drivers should call this function from their own ->exec_op()
  2130. * implementation.
  2131. *
  2132. * Returns 0 on success, a negative error code otherwise. A failure can be
  2133. * caused by an unsupported operation (none of the supported patterns is able
  2134. * to handle the requested operation), or an error returned by one of the
  2135. * matching pattern->exec() hook.
  2136. */
  2137. int nand_op_parser_exec_op(struct nand_chip *chip,
  2138. const struct nand_op_parser *parser,
  2139. const struct nand_operation *op, bool check_only)
  2140. {
  2141. struct nand_op_parser_ctx ctx = {
  2142. .subop.cs = op->cs,
  2143. .subop.instrs = op->instrs,
  2144. .instrs = op->instrs,
  2145. .ninstrs = op->ninstrs,
  2146. };
  2147. unsigned int i;
  2148. while (ctx.subop.instrs < op->instrs + op->ninstrs) {
  2149. const struct nand_op_parser_pattern *pattern;
  2150. struct nand_op_parser_ctx best_ctx;
  2151. int ret, best_pattern = -1;
  2152. for (i = 0; i < parser->npatterns; i++) {
  2153. struct nand_op_parser_ctx test_ctx = ctx;
  2154. pattern = &parser->patterns[i];
  2155. if (!nand_op_parser_match_pat(pattern, &test_ctx))
  2156. continue;
  2157. if (best_pattern >= 0 &&
  2158. nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0)
  2159. continue;
  2160. best_pattern = i;
  2161. best_ctx = test_ctx;
  2162. }
  2163. if (best_pattern < 0) {
  2164. pr_debug("->exec_op() parser: pattern not found!\n");
  2165. return -ENOTSUPP;
  2166. }
  2167. ctx = best_ctx;
  2168. nand_op_parser_trace(&ctx);
  2169. if (!check_only) {
  2170. pattern = &parser->patterns[best_pattern];
  2171. ret = pattern->exec(chip, &ctx.subop);
  2172. if (ret)
  2173. return ret;
  2174. }
  2175. /*
  2176. * Update the context structure by pointing to the start of the
  2177. * next subop.
  2178. */
  2179. ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
  2180. if (ctx.subop.last_instr_end_off)
  2181. ctx.subop.instrs -= 1;
  2182. ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
  2183. }
  2184. return 0;
  2185. }
  2186. EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
  2187. static bool nand_instr_is_data(const struct nand_op_instr *instr)
  2188. {
  2189. return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
  2190. instr->type == NAND_OP_DATA_OUT_INSTR);
  2191. }
  2192. static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
  2193. unsigned int instr_idx)
  2194. {
  2195. return subop && instr_idx < subop->ninstrs;
  2196. }
  2197. static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
  2198. unsigned int instr_idx)
  2199. {
  2200. if (instr_idx)
  2201. return 0;
  2202. return subop->first_instr_start_off;
  2203. }
  2204. /**
  2205. * nand_subop_get_addr_start_off - Get the start offset in an address array
  2206. * @subop: The entire sub-operation
  2207. * @instr_idx: Index of the instruction inside the sub-operation
  2208. *
  2209. * During driver development, one could be tempted to directly use the
  2210. * ->addr.addrs field of address instructions. This is wrong as address
  2211. * instructions might be split.
  2212. *
  2213. * Given an address instruction, returns the offset of the first cycle to issue.
  2214. */
  2215. unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
  2216. unsigned int instr_idx)
  2217. {
  2218. if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
  2219. subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
  2220. return 0;
  2221. return nand_subop_get_start_off(subop, instr_idx);
  2222. }
  2223. EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
  2224. /**
  2225. * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
  2226. * @subop: The entire sub-operation
  2227. * @instr_idx: Index of the instruction inside the sub-operation
  2228. *
  2229. * During driver development, one could be tempted to directly use the
  2230. * ->addr->naddrs field of a data instruction. This is wrong as instructions
  2231. * might be split.
  2232. *
  2233. * Given an address instruction, returns the number of address cycle to issue.
  2234. */
  2235. unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
  2236. unsigned int instr_idx)
  2237. {
  2238. int start_off, end_off;
  2239. if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
  2240. subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
  2241. return 0;
  2242. start_off = nand_subop_get_addr_start_off(subop, instr_idx);
  2243. if (instr_idx == subop->ninstrs - 1 &&
  2244. subop->last_instr_end_off)
  2245. end_off = subop->last_instr_end_off;
  2246. else
  2247. end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
  2248. return end_off - start_off;
  2249. }
  2250. EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
  2251. /**
  2252. * nand_subop_get_data_start_off - Get the start offset in a data array
  2253. * @subop: The entire sub-operation
  2254. * @instr_idx: Index of the instruction inside the sub-operation
  2255. *
  2256. * During driver development, one could be tempted to directly use the
  2257. * ->data->buf.{in,out} field of data instructions. This is wrong as data
  2258. * instructions might be split.
  2259. *
  2260. * Given a data instruction, returns the offset to start from.
  2261. */
  2262. unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
  2263. unsigned int instr_idx)
  2264. {
  2265. if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
  2266. !nand_instr_is_data(&subop->instrs[instr_idx])))
  2267. return 0;
  2268. return nand_subop_get_start_off(subop, instr_idx);
  2269. }
  2270. EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
  2271. /**
  2272. * nand_subop_get_data_len - Get the number of bytes to retrieve
  2273. * @subop: The entire sub-operation
  2274. * @instr_idx: Index of the instruction inside the sub-operation
  2275. *
  2276. * During driver development, one could be tempted to directly use the
  2277. * ->data->len field of a data instruction. This is wrong as data instructions
  2278. * might be split.
  2279. *
  2280. * Returns the length of the chunk of data to send/receive.
  2281. */
  2282. unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
  2283. unsigned int instr_idx)
  2284. {
  2285. int start_off = 0, end_off;
  2286. if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
  2287. !nand_instr_is_data(&subop->instrs[instr_idx])))
  2288. return 0;
  2289. start_off = nand_subop_get_data_start_off(subop, instr_idx);
  2290. if (instr_idx == subop->ninstrs - 1 &&
  2291. subop->last_instr_end_off)
  2292. end_off = subop->last_instr_end_off;
  2293. else
  2294. end_off = subop->instrs[instr_idx].ctx.data.len;
  2295. return end_off - start_off;
  2296. }
  2297. EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
  2298. /**
  2299. * nand_reset - Reset and initialize a NAND device
  2300. * @chip: The NAND chip
  2301. * @chipnr: Internal die id
  2302. *
  2303. * Save the timings data structure, then apply SDR timings mode 0 (see
  2304. * nand_reset_interface for details), do the reset operation, and apply
  2305. * back the previous timings.
  2306. *
  2307. * Returns 0 on success, a negative error code otherwise.
  2308. */
  2309. int nand_reset(struct nand_chip *chip, int chipnr)
  2310. {
  2311. int ret;
  2312. ret = nand_reset_interface(chip, chipnr);
  2313. if (ret)
  2314. return ret;
  2315. /*
  2316. * The CS line has to be released before we can apply the new NAND
  2317. * interface settings, hence this weird nand_select_target()
  2318. * nand_deselect_target() dance.
  2319. */
  2320. nand_select_target(chip, chipnr);
  2321. ret = nand_reset_op(chip);
  2322. nand_deselect_target(chip);
  2323. if (ret)
  2324. return ret;
  2325. ret = nand_setup_interface(chip, chipnr);
  2326. if (ret)
  2327. return ret;
  2328. return 0;
  2329. }
  2330. EXPORT_SYMBOL_GPL(nand_reset);
  2331. /**
  2332. * nand_get_features - wrapper to perform a GET_FEATURE
  2333. * @chip: NAND chip info structure
  2334. * @addr: feature address
  2335. * @subfeature_param: the subfeature parameters, a four bytes array
  2336. *
  2337. * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
  2338. * operation cannot be handled.
  2339. */
  2340. int nand_get_features(struct nand_chip *chip, int addr,
  2341. u8 *subfeature_param)
  2342. {
  2343. if (!nand_supports_get_features(chip, addr))
  2344. return -ENOTSUPP;
  2345. if (chip->legacy.get_features)
  2346. return chip->legacy.get_features(chip, addr, subfeature_param);
  2347. return nand_get_features_op(chip, addr, subfeature_param);
  2348. }
  2349. /**
  2350. * nand_set_features - wrapper to perform a SET_FEATURE
  2351. * @chip: NAND chip info structure
  2352. * @addr: feature address
  2353. * @subfeature_param: the subfeature parameters, a four bytes array
  2354. *
  2355. * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
  2356. * operation cannot be handled.
  2357. */
  2358. int nand_set_features(struct nand_chip *chip, int addr,
  2359. u8 *subfeature_param)
  2360. {
  2361. if (!nand_supports_set_features(chip, addr))
  2362. return -ENOTSUPP;
  2363. if (chip->legacy.set_features)
  2364. return chip->legacy.set_features(chip, addr, subfeature_param);
  2365. return nand_set_features_op(chip, addr, subfeature_param);
  2366. }
  2367. /**
  2368. * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
  2369. * @buf: buffer to test
  2370. * @len: buffer length
  2371. * @bitflips_threshold: maximum number of bitflips
  2372. *
  2373. * Check if a buffer contains only 0xff, which means the underlying region
  2374. * has been erased and is ready to be programmed.
  2375. * The bitflips_threshold specify the maximum number of bitflips before
  2376. * considering the region is not erased.
  2377. * Note: The logic of this function has been extracted from the memweight
  2378. * implementation, except that nand_check_erased_buf function exit before
  2379. * testing the whole buffer if the number of bitflips exceed the
  2380. * bitflips_threshold value.
  2381. *
  2382. * Returns a positive number of bitflips less than or equal to
  2383. * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
  2384. * threshold.
  2385. */
  2386. static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
  2387. {
  2388. const unsigned char *bitmap = buf;
  2389. int bitflips = 0;
  2390. int weight;
  2391. for (; len && ((uintptr_t)bitmap) % sizeof(long);
  2392. len--, bitmap++) {
  2393. weight = hweight8(*bitmap);
  2394. bitflips += BITS_PER_BYTE - weight;
  2395. if (unlikely(bitflips > bitflips_threshold))
  2396. return -EBADMSG;
  2397. }
  2398. for (; len >= sizeof(long);
  2399. len -= sizeof(long), bitmap += sizeof(long)) {
  2400. unsigned long d = *((unsigned long *)bitmap);
  2401. if (d == ~0UL)
  2402. continue;
  2403. weight = hweight_long(d);
  2404. bitflips += BITS_PER_LONG - weight;
  2405. if (unlikely(bitflips > bitflips_threshold))
  2406. return -EBADMSG;
  2407. }
  2408. for (; len > 0; len--, bitmap++) {
  2409. weight = hweight8(*bitmap);
  2410. bitflips += BITS_PER_BYTE - weight;
  2411. if (unlikely(bitflips > bitflips_threshold))
  2412. return -EBADMSG;
  2413. }
  2414. return bitflips;
  2415. }
  2416. /**
  2417. * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
  2418. * 0xff data
  2419. * @data: data buffer to test
  2420. * @datalen: data length
  2421. * @ecc: ECC buffer
  2422. * @ecclen: ECC length
  2423. * @extraoob: extra OOB buffer
  2424. * @extraooblen: extra OOB length
  2425. * @bitflips_threshold: maximum number of bitflips
  2426. *
  2427. * Check if a data buffer and its associated ECC and OOB data contains only
  2428. * 0xff pattern, which means the underlying region has been erased and is
  2429. * ready to be programmed.
  2430. * The bitflips_threshold specify the maximum number of bitflips before
  2431. * considering the region as not erased.
  2432. *
  2433. * Note:
  2434. * 1/ ECC algorithms are working on pre-defined block sizes which are usually
  2435. * different from the NAND page size. When fixing bitflips, ECC engines will
  2436. * report the number of errors per chunk, and the NAND core infrastructure
  2437. * expect you to return the maximum number of bitflips for the whole page.
  2438. * This is why you should always use this function on a single chunk and
  2439. * not on the whole page. After checking each chunk you should update your
  2440. * max_bitflips value accordingly.
  2441. * 2/ When checking for bitflips in erased pages you should not only check
  2442. * the payload data but also their associated ECC data, because a user might
  2443. * have programmed almost all bits to 1 but a few. In this case, we
  2444. * shouldn't consider the chunk as erased, and checking ECC bytes prevent
  2445. * this case.
  2446. * 3/ The extraoob argument is optional, and should be used if some of your OOB
  2447. * data are protected by the ECC engine.
  2448. * It could also be used if you support subpages and want to attach some
  2449. * extra OOB data to an ECC chunk.
  2450. *
  2451. * Returns a positive number of bitflips less than or equal to
  2452. * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
  2453. * threshold. In case of success, the passed buffers are filled with 0xff.
  2454. */
  2455. int nand_check_erased_ecc_chunk(void *data, int datalen,
  2456. void *ecc, int ecclen,
  2457. void *extraoob, int extraooblen,
  2458. int bitflips_threshold)
  2459. {
  2460. int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
  2461. data_bitflips = nand_check_erased_buf(data, datalen,
  2462. bitflips_threshold);
  2463. if (data_bitflips < 0)
  2464. return data_bitflips;
  2465. bitflips_threshold -= data_bitflips;
  2466. ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
  2467. if (ecc_bitflips < 0)
  2468. return ecc_bitflips;
  2469. bitflips_threshold -= ecc_bitflips;
  2470. extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
  2471. bitflips_threshold);
  2472. if (extraoob_bitflips < 0)
  2473. return extraoob_bitflips;
  2474. if (data_bitflips)
  2475. memset(data, 0xff, datalen);
  2476. if (ecc_bitflips)
  2477. memset(ecc, 0xff, ecclen);
  2478. if (extraoob_bitflips)
  2479. memset(extraoob, 0xff, extraooblen);
  2480. return data_bitflips + ecc_bitflips + extraoob_bitflips;
  2481. }
  2482. EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
  2483. /**
  2484. * nand_read_page_raw_notsupp - dummy read raw page function
  2485. * @chip: nand chip info structure
  2486. * @buf: buffer to store read data
  2487. * @oob_required: caller requires OOB data read to chip->oob_poi
  2488. * @page: page number to read
  2489. *
  2490. * Returns -ENOTSUPP unconditionally.
  2491. */
  2492. int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
  2493. int oob_required, int page)
  2494. {
  2495. return -ENOTSUPP;
  2496. }
  2497. /**
  2498. * nand_read_page_raw - [INTERN] read raw page data without ecc
  2499. * @chip: nand chip info structure
  2500. * @buf: buffer to store read data
  2501. * @oob_required: caller requires OOB data read to chip->oob_poi
  2502. * @page: page number to read
  2503. *
  2504. * Not for syndrome calculating ECC controllers, which use a special oob layout.
  2505. */
  2506. int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
  2507. int page)
  2508. {
  2509. struct mtd_info *mtd = nand_to_mtd(chip);
  2510. int ret;
  2511. ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
  2512. if (ret)
  2513. return ret;
  2514. if (oob_required) {
  2515. ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
  2516. false, false);
  2517. if (ret)
  2518. return ret;
  2519. }
  2520. return 0;
  2521. }
  2522. EXPORT_SYMBOL(nand_read_page_raw);
  2523. /**
  2524. * nand_monolithic_read_page_raw - Monolithic page read in raw mode
  2525. * @chip: NAND chip info structure
  2526. * @buf: buffer to store read data
  2527. * @oob_required: caller requires OOB data read to chip->oob_poi
  2528. * @page: page number to read
  2529. *
  2530. * This is a raw page read, ie. without any error detection/correction.
  2531. * Monolithic means we are requesting all the relevant data (main plus
  2532. * eventually OOB) to be loaded in the NAND cache and sent over the
  2533. * bus (from the NAND chip to the NAND controller) in a single
  2534. * operation. This is an alternative to nand_read_page_raw(), which
  2535. * first reads the main data, and if the OOB data is requested too,
  2536. * then reads more data on the bus.
  2537. */
  2538. int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf,
  2539. int oob_required, int page)
  2540. {
  2541. struct mtd_info *mtd = nand_to_mtd(chip);
  2542. unsigned int size = mtd->writesize;
  2543. u8 *read_buf = buf;
  2544. int ret;
  2545. if (oob_required) {
  2546. size += mtd->oobsize;
  2547. if (buf != chip->data_buf)
  2548. read_buf = nand_get_data_buf(chip);
  2549. }
  2550. ret = nand_read_page_op(chip, page, 0, read_buf, size);
  2551. if (ret)
  2552. return ret;
  2553. if (buf != chip->data_buf)
  2554. memcpy(buf, read_buf, mtd->writesize);
  2555. return 0;
  2556. }
  2557. EXPORT_SYMBOL(nand_monolithic_read_page_raw);
  2558. /**
  2559. * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
  2560. * @chip: nand chip info structure
  2561. * @buf: buffer to store read data
  2562. * @oob_required: caller requires OOB data read to chip->oob_poi
  2563. * @page: page number to read
  2564. *
  2565. * We need a special oob layout and handling even when OOB isn't used.
  2566. */
  2567. static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
  2568. int oob_required, int page)
  2569. {
  2570. struct mtd_info *mtd = nand_to_mtd(chip);
  2571. int eccsize = chip->ecc.size;
  2572. int eccbytes = chip->ecc.bytes;
  2573. uint8_t *oob = chip->oob_poi;
  2574. int steps, size, ret;
  2575. ret = nand_read_page_op(chip, page, 0, NULL, 0);
  2576. if (ret)
  2577. return ret;
  2578. for (steps = chip->ecc.steps; steps > 0; steps--) {
  2579. ret = nand_read_data_op(chip, buf, eccsize, false, false);
  2580. if (ret)
  2581. return ret;
  2582. buf += eccsize;
  2583. if (chip->ecc.prepad) {
  2584. ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
  2585. false, false);
  2586. if (ret)
  2587. return ret;
  2588. oob += chip->ecc.prepad;
  2589. }
  2590. ret = nand_read_data_op(chip, oob, eccbytes, false, false);
  2591. if (ret)
  2592. return ret;
  2593. oob += eccbytes;
  2594. if (chip->ecc.postpad) {
  2595. ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
  2596. false, false);
  2597. if (ret)
  2598. return ret;
  2599. oob += chip->ecc.postpad;
  2600. }
  2601. }
  2602. size = mtd->oobsize - (oob - chip->oob_poi);
  2603. if (size) {
  2604. ret = nand_read_data_op(chip, oob, size, false, false);
  2605. if (ret)
  2606. return ret;
  2607. }
  2608. return 0;
  2609. }
  2610. /**
  2611. * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
  2612. * @chip: nand chip info structure
  2613. * @buf: buffer to store read data
  2614. * @oob_required: caller requires OOB data read to chip->oob_poi
  2615. * @page: page number to read
  2616. */
  2617. static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf,
  2618. int oob_required, int page)
  2619. {
  2620. struct mtd_info *mtd = nand_to_mtd(chip);
  2621. int i, eccsize = chip->ecc.size, ret;
  2622. int eccbytes = chip->ecc.bytes;
  2623. int eccsteps = chip->ecc.steps;
  2624. uint8_t *p = buf;
  2625. uint8_t *ecc_calc = chip->ecc.calc_buf;
  2626. uint8_t *ecc_code = chip->ecc.code_buf;
  2627. unsigned int max_bitflips = 0;
  2628. chip->ecc.read_page_raw(chip, buf, 1, page);
  2629. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
  2630. chip->ecc.calculate(chip, p, &ecc_calc[i]);
  2631. ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
  2632. chip->ecc.total);
  2633. if (ret)
  2634. return ret;
  2635. eccsteps = chip->ecc.steps;
  2636. p = buf;
  2637. for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  2638. int stat;
  2639. stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
  2640. if (stat < 0) {
  2641. mtd->ecc_stats.failed++;
  2642. } else {
  2643. mtd->ecc_stats.corrected += stat;
  2644. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  2645. }
  2646. }
  2647. return max_bitflips;
  2648. }
  2649. /**
  2650. * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
  2651. * @chip: nand chip info structure
  2652. * @data_offs: offset of requested data within the page
  2653. * @readlen: data length
  2654. * @bufpoi: buffer to store read data
  2655. * @page: page number to read
  2656. */
  2657. static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs,
  2658. uint32_t readlen, uint8_t *bufpoi, int page)
  2659. {
  2660. struct mtd_info *mtd = nand_to_mtd(chip);
  2661. int start_step, end_step, num_steps, ret;
  2662. uint8_t *p;
  2663. int data_col_addr, i, gaps = 0;
  2664. int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
  2665. int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
  2666. int index, section = 0;
  2667. unsigned int max_bitflips = 0;
  2668. struct mtd_oob_region oobregion = { };
  2669. /* Column address within the page aligned to ECC size (256bytes) */
  2670. start_step = data_offs / chip->ecc.size;
  2671. end_step = (data_offs + readlen - 1) / chip->ecc.size;
  2672. num_steps = end_step - start_step + 1;
  2673. index = start_step * chip->ecc.bytes;
  2674. /* Data size aligned to ECC ecc.size */
  2675. datafrag_len = num_steps * chip->ecc.size;
  2676. eccfrag_len = num_steps * chip->ecc.bytes;
  2677. data_col_addr = start_step * chip->ecc.size;
  2678. /* If we read not a page aligned data */
  2679. p = bufpoi + data_col_addr;
  2680. ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
  2681. if (ret)
  2682. return ret;
  2683. /* Calculate ECC */
  2684. for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
  2685. chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
  2686. /*
  2687. * The performance is faster if we position offsets according to
  2688. * ecc.pos. Let's make sure that there are no gaps in ECC positions.
  2689. */
  2690. ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
  2691. if (ret)
  2692. return ret;
  2693. if (oobregion.length < eccfrag_len)
  2694. gaps = 1;
  2695. if (gaps) {
  2696. ret = nand_change_read_column_op(chip, mtd->writesize,
  2697. chip->oob_poi, mtd->oobsize,
  2698. false);
  2699. if (ret)
  2700. return ret;
  2701. } else {
  2702. /*
  2703. * Send the command to read the particular ECC bytes take care
  2704. * about buswidth alignment in read_buf.
  2705. */
  2706. aligned_pos = oobregion.offset & ~(busw - 1);
  2707. aligned_len = eccfrag_len;
  2708. if (oobregion.offset & (busw - 1))
  2709. aligned_len++;
  2710. if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
  2711. (busw - 1))
  2712. aligned_len++;
  2713. ret = nand_change_read_column_op(chip,
  2714. mtd->writesize + aligned_pos,
  2715. &chip->oob_poi[aligned_pos],
  2716. aligned_len, false);
  2717. if (ret)
  2718. return ret;
  2719. }
  2720. ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
  2721. chip->oob_poi, index, eccfrag_len);
  2722. if (ret)
  2723. return ret;
  2724. p = bufpoi + data_col_addr;
  2725. for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
  2726. int stat;
  2727. stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
  2728. &chip->ecc.calc_buf[i]);
  2729. if (stat == -EBADMSG &&
  2730. (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
  2731. /* check for empty pages with bitflips */
  2732. stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
  2733. &chip->ecc.code_buf[i],
  2734. chip->ecc.bytes,
  2735. NULL, 0,
  2736. chip->ecc.strength);
  2737. }
  2738. if (stat < 0) {
  2739. mtd->ecc_stats.failed++;
  2740. } else {
  2741. mtd->ecc_stats.corrected += stat;
  2742. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  2743. }
  2744. }
  2745. return max_bitflips;
  2746. }
  2747. /**
  2748. * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
  2749. * @chip: nand chip info structure
  2750. * @buf: buffer to store read data
  2751. * @oob_required: caller requires OOB data read to chip->oob_poi
  2752. * @page: page number to read
  2753. *
  2754. * Not for syndrome calculating ECC controllers which need a special oob layout.
  2755. */
  2756. static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
  2757. int oob_required, int page)
  2758. {
  2759. struct mtd_info *mtd = nand_to_mtd(chip);
  2760. int i, eccsize = chip->ecc.size, ret;
  2761. int eccbytes = chip->ecc.bytes;
  2762. int eccsteps = chip->ecc.steps;
  2763. uint8_t *p = buf;
  2764. uint8_t *ecc_calc = chip->ecc.calc_buf;
  2765. uint8_t *ecc_code = chip->ecc.code_buf;
  2766. unsigned int max_bitflips = 0;
  2767. ret = nand_read_page_op(chip, page, 0, NULL, 0);
  2768. if (ret)
  2769. return ret;
  2770. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  2771. chip->ecc.hwctl(chip, NAND_ECC_READ);
  2772. ret = nand_read_data_op(chip, p, eccsize, false, false);
  2773. if (ret)
  2774. return ret;
  2775. chip->ecc.calculate(chip, p, &ecc_calc[i]);
  2776. }
  2777. ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
  2778. false);
  2779. if (ret)
  2780. return ret;
  2781. ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
  2782. chip->ecc.total);
  2783. if (ret)
  2784. return ret;
  2785. eccsteps = chip->ecc.steps;
  2786. p = buf;
  2787. for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  2788. int stat;
  2789. stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
  2790. if (stat == -EBADMSG &&
  2791. (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
  2792. /* check for empty pages with bitflips */
  2793. stat = nand_check_erased_ecc_chunk(p, eccsize,
  2794. &ecc_code[i], eccbytes,
  2795. NULL, 0,
  2796. chip->ecc.strength);
  2797. }
  2798. if (stat < 0) {
  2799. mtd->ecc_stats.failed++;
  2800. } else {
  2801. mtd->ecc_stats.corrected += stat;
  2802. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  2803. }
  2804. }
  2805. return max_bitflips;
  2806. }
  2807. /**
  2808. * nand_read_page_hwecc_oob_first - Hardware ECC page read with ECC
  2809. * data read from OOB area
  2810. * @chip: nand chip info structure
  2811. * @buf: buffer to store read data
  2812. * @oob_required: caller requires OOB data read to chip->oob_poi
  2813. * @page: page number to read
  2814. *
  2815. * Hardware ECC for large page chips, which requires the ECC data to be
  2816. * extracted from the OOB before the actual data is read.
  2817. */
  2818. int nand_read_page_hwecc_oob_first(struct nand_chip *chip, uint8_t *buf,
  2819. int oob_required, int page)
  2820. {
  2821. struct mtd_info *mtd = nand_to_mtd(chip);
  2822. int i, eccsize = chip->ecc.size, ret;
  2823. int eccbytes = chip->ecc.bytes;
  2824. int eccsteps = chip->ecc.steps;
  2825. uint8_t *p = buf;
  2826. uint8_t *ecc_code = chip->ecc.code_buf;
  2827. unsigned int max_bitflips = 0;
  2828. /* Read the OOB area first */
  2829. ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
  2830. if (ret)
  2831. return ret;
  2832. /* Move read cursor to start of page */
  2833. ret = nand_change_read_column_op(chip, 0, NULL, 0, false);
  2834. if (ret)
  2835. return ret;
  2836. ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
  2837. chip->ecc.total);
  2838. if (ret)
  2839. return ret;
  2840. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  2841. int stat;
  2842. chip->ecc.hwctl(chip, NAND_ECC_READ);
  2843. ret = nand_read_data_op(chip, p, eccsize, false, false);
  2844. if (ret)
  2845. return ret;
  2846. stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
  2847. if (stat == -EBADMSG &&
  2848. (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
  2849. /* check for empty pages with bitflips */
  2850. stat = nand_check_erased_ecc_chunk(p, eccsize,
  2851. &ecc_code[i],
  2852. eccbytes, NULL, 0,
  2853. chip->ecc.strength);
  2854. }
  2855. if (stat < 0) {
  2856. mtd->ecc_stats.failed++;
  2857. } else {
  2858. mtd->ecc_stats.corrected += stat;
  2859. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  2860. }
  2861. }
  2862. return max_bitflips;
  2863. }
  2864. EXPORT_SYMBOL_GPL(nand_read_page_hwecc_oob_first);
  2865. /**
  2866. * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
  2867. * @chip: nand chip info structure
  2868. * @buf: buffer to store read data
  2869. * @oob_required: caller requires OOB data read to chip->oob_poi
  2870. * @page: page number to read
  2871. *
  2872. * The hw generator calculates the error syndrome automatically. Therefore we
  2873. * need a special oob layout and handling.
  2874. */
  2875. static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
  2876. int oob_required, int page)
  2877. {
  2878. struct mtd_info *mtd = nand_to_mtd(chip);
  2879. int ret, i, eccsize = chip->ecc.size;
  2880. int eccbytes = chip->ecc.bytes;
  2881. int eccsteps = chip->ecc.steps;
  2882. int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
  2883. uint8_t *p = buf;
  2884. uint8_t *oob = chip->oob_poi;
  2885. unsigned int max_bitflips = 0;
  2886. ret = nand_read_page_op(chip, page, 0, NULL, 0);
  2887. if (ret)
  2888. return ret;
  2889. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  2890. int stat;
  2891. chip->ecc.hwctl(chip, NAND_ECC_READ);
  2892. ret = nand_read_data_op(chip, p, eccsize, false, false);
  2893. if (ret)
  2894. return ret;
  2895. if (chip->ecc.prepad) {
  2896. ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
  2897. false, false);
  2898. if (ret)
  2899. return ret;
  2900. oob += chip->ecc.prepad;
  2901. }
  2902. chip->ecc.hwctl(chip, NAND_ECC_READSYN);
  2903. ret = nand_read_data_op(chip, oob, eccbytes, false, false);
  2904. if (ret)
  2905. return ret;
  2906. stat = chip->ecc.correct(chip, p, oob, NULL);
  2907. oob += eccbytes;
  2908. if (chip->ecc.postpad) {
  2909. ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
  2910. false, false);
  2911. if (ret)
  2912. return ret;
  2913. oob += chip->ecc.postpad;
  2914. }
  2915. if (stat == -EBADMSG &&
  2916. (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
  2917. /* check for empty pages with bitflips */
  2918. stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
  2919. oob - eccpadbytes,
  2920. eccpadbytes,
  2921. NULL, 0,
  2922. chip->ecc.strength);
  2923. }
  2924. if (stat < 0) {
  2925. mtd->ecc_stats.failed++;
  2926. } else {
  2927. mtd->ecc_stats.corrected += stat;
  2928. max_bitflips = max_t(unsigned int, max_bitflips, stat);
  2929. }
  2930. }
  2931. /* Calculate remaining oob bytes */
  2932. i = mtd->oobsize - (oob - chip->oob_poi);
  2933. if (i) {
  2934. ret = nand_read_data_op(chip, oob, i, false, false);
  2935. if (ret)
  2936. return ret;
  2937. }
  2938. return max_bitflips;
  2939. }
  2940. /**
  2941. * nand_transfer_oob - [INTERN] Transfer oob to client buffer
  2942. * @chip: NAND chip object
  2943. * @oob: oob destination address
  2944. * @ops: oob ops structure
  2945. * @len: size of oob to transfer
  2946. */
  2947. static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
  2948. struct mtd_oob_ops *ops, size_t len)
  2949. {
  2950. struct mtd_info *mtd = nand_to_mtd(chip);
  2951. int ret;
  2952. switch (ops->mode) {
  2953. case MTD_OPS_PLACE_OOB:
  2954. case MTD_OPS_RAW:
  2955. memcpy(oob, chip->oob_poi + ops->ooboffs, len);
  2956. return oob + len;
  2957. case MTD_OPS_AUTO_OOB:
  2958. ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
  2959. ops->ooboffs, len);
  2960. BUG_ON(ret);
  2961. return oob + len;
  2962. default:
  2963. BUG();
  2964. }
  2965. return NULL;
  2966. }
  2967. static void rawnand_enable_cont_reads(struct nand_chip *chip, unsigned int page,
  2968. u32 readlen, int col)
  2969. {
  2970. struct mtd_info *mtd = nand_to_mtd(chip);
  2971. unsigned int first_page, last_page;
  2972. chip->cont_read.ongoing = false;
  2973. if (!chip->controller->supported_op.cont_read)
  2974. return;
  2975. /*
  2976. * Don't bother making any calculations if the length is too small.
  2977. * Side effect: avoids possible integer underflows below.
  2978. */
  2979. if (readlen < (2 * mtd->writesize))
  2980. return;
  2981. /* Derive the page where continuous read should start (the first full page read) */
  2982. first_page = page;
  2983. if (col)
  2984. first_page++;
  2985. /* Derive the page where continuous read should stop (the last full page read) */
  2986. last_page = page + ((col + readlen) / mtd->writesize) - 1;
  2987. /* Configure and enable continuous read when suitable */
  2988. if (first_page < last_page) {
  2989. chip->cont_read.first_page = first_page;
  2990. chip->cont_read.last_page = last_page;
  2991. chip->cont_read.ongoing = true;
  2992. /* May reset the ongoing flag */
  2993. rawnand_cap_cont_reads(chip);
  2994. }
  2995. }
  2996. static void rawnand_cont_read_skip_first_page(struct nand_chip *chip, unsigned int page)
  2997. {
  2998. if (!chip->cont_read.ongoing || page != chip->cont_read.first_page)
  2999. return;
  3000. chip->cont_read.first_page++;
  3001. rawnand_cap_cont_reads(chip);
  3002. }
  3003. /**
  3004. * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
  3005. * @chip: NAND chip object
  3006. * @retry_mode: the retry mode to use
  3007. *
  3008. * Some vendors supply a special command to shift the Vt threshold, to be used
  3009. * when there are too many bitflips in a page (i.e., ECC error). After setting
  3010. * a new threshold, the host should retry reading the page.
  3011. */
  3012. static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
  3013. {
  3014. pr_debug("setting READ RETRY mode %d\n", retry_mode);
  3015. if (retry_mode >= chip->read_retries)
  3016. return -EINVAL;
  3017. if (!chip->ops.setup_read_retry)
  3018. return -EOPNOTSUPP;
  3019. return chip->ops.setup_read_retry(chip, retry_mode);
  3020. }
  3021. static void nand_wait_readrdy(struct nand_chip *chip)
  3022. {
  3023. const struct nand_interface_config *conf;
  3024. if (!(chip->options & NAND_NEED_READRDY))
  3025. return;
  3026. conf = nand_get_interface_config(chip);
  3027. WARN_ON(nand_wait_rdy_op(chip, NAND_COMMON_TIMING_MS(conf, tR_max), 0));
  3028. }
  3029. /**
  3030. * nand_do_read_ops - [INTERN] Read data with ECC
  3031. * @chip: NAND chip object
  3032. * @from: offset to read from
  3033. * @ops: oob ops structure
  3034. *
  3035. * Internal function. Called with chip held.
  3036. */
  3037. static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
  3038. struct mtd_oob_ops *ops)
  3039. {
  3040. int chipnr, page, realpage, col, bytes, aligned, oob_required;
  3041. struct mtd_info *mtd = nand_to_mtd(chip);
  3042. int ret = 0;
  3043. uint32_t readlen = ops->len;
  3044. uint32_t oobreadlen = ops->ooblen;
  3045. uint32_t max_oobsize = mtd_oobavail(mtd, ops);
  3046. uint8_t *bufpoi, *oob, *buf;
  3047. int use_bounce_buf;
  3048. unsigned int max_bitflips = 0;
  3049. int retry_mode = 0;
  3050. bool ecc_fail = false;
  3051. /* Check if the region is secured */
  3052. if (nand_region_is_secured(chip, from, readlen))
  3053. return -EIO;
  3054. chipnr = (int)(from >> chip->chip_shift);
  3055. nand_select_target(chip, chipnr);
  3056. realpage = (int)(from >> chip->page_shift);
  3057. page = realpage & chip->pagemask;
  3058. col = (int)(from & (mtd->writesize - 1));
  3059. buf = ops->datbuf;
  3060. oob = ops->oobbuf;
  3061. oob_required = oob ? 1 : 0;
  3062. if (likely(ops->mode != MTD_OPS_RAW))
  3063. rawnand_enable_cont_reads(chip, page, readlen, col);
  3064. while (1) {
  3065. struct mtd_ecc_stats ecc_stats = mtd->ecc_stats;
  3066. bytes = min(mtd->writesize - col, readlen);
  3067. aligned = (bytes == mtd->writesize);
  3068. if (!aligned)
  3069. use_bounce_buf = 1;
  3070. else if (chip->options & NAND_USES_DMA)
  3071. use_bounce_buf = !virt_addr_valid(buf) ||
  3072. !IS_ALIGNED((unsigned long)buf,
  3073. chip->buf_align);
  3074. else
  3075. use_bounce_buf = 0;
  3076. /* Is the current page in the buffer? */
  3077. if (realpage != chip->pagecache.page || oob) {
  3078. bufpoi = use_bounce_buf ? chip->data_buf : buf;
  3079. if (use_bounce_buf && aligned)
  3080. pr_debug("%s: using read bounce buffer for buf@%p\n",
  3081. __func__, buf);
  3082. read_retry:
  3083. /*
  3084. * Now read the page into the buffer. Absent an error,
  3085. * the read methods return max bitflips per ecc step.
  3086. */
  3087. if (unlikely(ops->mode == MTD_OPS_RAW))
  3088. ret = chip->ecc.read_page_raw(chip, bufpoi,
  3089. oob_required,
  3090. page);
  3091. else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
  3092. !oob)
  3093. ret = chip->ecc.read_subpage(chip, col, bytes,
  3094. bufpoi, page);
  3095. else
  3096. ret = chip->ecc.read_page(chip, bufpoi,
  3097. oob_required, page);
  3098. if (ret < 0) {
  3099. if (use_bounce_buf)
  3100. /* Invalidate page cache */
  3101. chip->pagecache.page = -1;
  3102. break;
  3103. }
  3104. /*
  3105. * Copy back the data in the initial buffer when reading
  3106. * partial pages or when a bounce buffer is required.
  3107. */
  3108. if (use_bounce_buf) {
  3109. if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
  3110. !(mtd->ecc_stats.failed - ecc_stats.failed) &&
  3111. (ops->mode != MTD_OPS_RAW)) {
  3112. chip->pagecache.page = realpage;
  3113. chip->pagecache.bitflips = ret;
  3114. } else {
  3115. /* Invalidate page cache */
  3116. chip->pagecache.page = -1;
  3117. }
  3118. memcpy(buf, bufpoi + col, bytes);
  3119. }
  3120. if (unlikely(oob)) {
  3121. int toread = min(oobreadlen, max_oobsize);
  3122. if (toread) {
  3123. oob = nand_transfer_oob(chip, oob, ops,
  3124. toread);
  3125. oobreadlen -= toread;
  3126. }
  3127. }
  3128. nand_wait_readrdy(chip);
  3129. if (mtd->ecc_stats.failed - ecc_stats.failed) {
  3130. if (retry_mode + 1 < chip->read_retries) {
  3131. retry_mode++;
  3132. ret = nand_setup_read_retry(chip,
  3133. retry_mode);
  3134. if (ret < 0)
  3135. break;
  3136. /* Reset ecc_stats; retry */
  3137. mtd->ecc_stats = ecc_stats;
  3138. goto read_retry;
  3139. } else {
  3140. /* No more retry modes; real failure */
  3141. ecc_fail = true;
  3142. }
  3143. }
  3144. buf += bytes;
  3145. max_bitflips = max_t(unsigned int, max_bitflips, ret);
  3146. } else {
  3147. memcpy(buf, chip->data_buf + col, bytes);
  3148. buf += bytes;
  3149. max_bitflips = max_t(unsigned int, max_bitflips,
  3150. chip->pagecache.bitflips);
  3151. rawnand_cont_read_skip_first_page(chip, page);
  3152. }
  3153. readlen -= bytes;
  3154. /* Reset to retry mode 0 */
  3155. if (retry_mode) {
  3156. ret = nand_setup_read_retry(chip, 0);
  3157. if (ret < 0)
  3158. break;
  3159. retry_mode = 0;
  3160. }
  3161. if (!readlen)
  3162. break;
  3163. /* For subsequent reads align to page boundary */
  3164. col = 0;
  3165. /* Increment page address */
  3166. realpage++;
  3167. page = realpage & chip->pagemask;
  3168. /* Check, if we cross a chip boundary */
  3169. if (!page) {
  3170. chipnr++;
  3171. nand_deselect_target(chip);
  3172. nand_select_target(chip, chipnr);
  3173. }
  3174. }
  3175. nand_deselect_target(chip);
  3176. if (WARN_ON_ONCE(chip->cont_read.ongoing))
  3177. chip->cont_read.ongoing = false;
  3178. ops->retlen = ops->len - (size_t) readlen;
  3179. if (oob)
  3180. ops->oobretlen = ops->ooblen - oobreadlen;
  3181. if (ret < 0)
  3182. return ret;
  3183. if (ecc_fail)
  3184. return -EBADMSG;
  3185. return max_bitflips;
  3186. }
  3187. /**
  3188. * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
  3189. * @chip: nand chip info structure
  3190. * @page: page number to read
  3191. */
  3192. int nand_read_oob_std(struct nand_chip *chip, int page)
  3193. {
  3194. struct mtd_info *mtd = nand_to_mtd(chip);
  3195. return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
  3196. }
  3197. EXPORT_SYMBOL(nand_read_oob_std);
  3198. /**
  3199. * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
  3200. * with syndromes
  3201. * @chip: nand chip info structure
  3202. * @page: page number to read
  3203. */
  3204. static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
  3205. {
  3206. struct mtd_info *mtd = nand_to_mtd(chip);
  3207. int length = mtd->oobsize;
  3208. int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
  3209. int eccsize = chip->ecc.size;
  3210. uint8_t *bufpoi = chip->oob_poi;
  3211. int i, toread, sndrnd = 0, pos, ret;
  3212. ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
  3213. if (ret)
  3214. return ret;
  3215. for (i = 0; i < chip->ecc.steps; i++) {
  3216. if (sndrnd) {
  3217. int ret;
  3218. pos = eccsize + i * (eccsize + chunk);
  3219. if (mtd->writesize > 512)
  3220. ret = nand_change_read_column_op(chip, pos,
  3221. NULL, 0,
  3222. false);
  3223. else
  3224. ret = nand_read_page_op(chip, page, pos, NULL,
  3225. 0);
  3226. if (ret)
  3227. return ret;
  3228. } else
  3229. sndrnd = 1;
  3230. toread = min_t(int, length, chunk);
  3231. ret = nand_read_data_op(chip, bufpoi, toread, false, false);
  3232. if (ret)
  3233. return ret;
  3234. bufpoi += toread;
  3235. length -= toread;
  3236. }
  3237. if (length > 0) {
  3238. ret = nand_read_data_op(chip, bufpoi, length, false, false);
  3239. if (ret)
  3240. return ret;
  3241. }
  3242. return 0;
  3243. }
  3244. /**
  3245. * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
  3246. * @chip: nand chip info structure
  3247. * @page: page number to write
  3248. */
  3249. int nand_write_oob_std(struct nand_chip *chip, int page)
  3250. {
  3251. struct mtd_info *mtd = nand_to_mtd(chip);
  3252. return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
  3253. mtd->oobsize);
  3254. }
  3255. EXPORT_SYMBOL(nand_write_oob_std);
  3256. /**
  3257. * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
  3258. * with syndrome - only for large page flash
  3259. * @chip: nand chip info structure
  3260. * @page: page number to write
  3261. */
  3262. static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
  3263. {
  3264. struct mtd_info *mtd = nand_to_mtd(chip);
  3265. int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
  3266. int eccsize = chip->ecc.size, length = mtd->oobsize;
  3267. int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
  3268. const uint8_t *bufpoi = chip->oob_poi;
  3269. /*
  3270. * data-ecc-data-ecc ... ecc-oob
  3271. * or
  3272. * data-pad-ecc-pad-data-pad .... ecc-pad-oob
  3273. */
  3274. if (!chip->ecc.prepad && !chip->ecc.postpad) {
  3275. pos = steps * (eccsize + chunk);
  3276. steps = 0;
  3277. } else
  3278. pos = eccsize;
  3279. ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
  3280. if (ret)
  3281. return ret;
  3282. for (i = 0; i < steps; i++) {
  3283. if (sndcmd) {
  3284. if (mtd->writesize <= 512) {
  3285. uint32_t fill = 0xFFFFFFFF;
  3286. len = eccsize;
  3287. while (len > 0) {
  3288. int num = min_t(int, len, 4);
  3289. ret = nand_write_data_op(chip, &fill,
  3290. num, false);
  3291. if (ret)
  3292. return ret;
  3293. len -= num;
  3294. }
  3295. } else {
  3296. pos = eccsize + i * (eccsize + chunk);
  3297. ret = nand_change_write_column_op(chip, pos,
  3298. NULL, 0,
  3299. false);
  3300. if (ret)
  3301. return ret;
  3302. }
  3303. } else
  3304. sndcmd = 1;
  3305. len = min_t(int, length, chunk);
  3306. ret = nand_write_data_op(chip, bufpoi, len, false);
  3307. if (ret)
  3308. return ret;
  3309. bufpoi += len;
  3310. length -= len;
  3311. }
  3312. if (length > 0) {
  3313. ret = nand_write_data_op(chip, bufpoi, length, false);
  3314. if (ret)
  3315. return ret;
  3316. }
  3317. return nand_prog_page_end_op(chip);
  3318. }
  3319. /**
  3320. * nand_do_read_oob - [INTERN] NAND read out-of-band
  3321. * @chip: NAND chip object
  3322. * @from: offset to read from
  3323. * @ops: oob operations description structure
  3324. *
  3325. * NAND read out-of-band data from the spare area.
  3326. */
  3327. static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
  3328. struct mtd_oob_ops *ops)
  3329. {
  3330. struct mtd_info *mtd = nand_to_mtd(chip);
  3331. unsigned int max_bitflips = 0;
  3332. int page, realpage, chipnr;
  3333. struct mtd_ecc_stats stats;
  3334. int readlen = ops->ooblen;
  3335. int len;
  3336. uint8_t *buf = ops->oobbuf;
  3337. int ret = 0;
  3338. pr_debug("%s: from = 0x%08Lx, len = %i\n",
  3339. __func__, (unsigned long long)from, readlen);
  3340. /* Check if the region is secured */
  3341. if (nand_region_is_secured(chip, from, readlen))
  3342. return -EIO;
  3343. stats = mtd->ecc_stats;
  3344. len = mtd_oobavail(mtd, ops);
  3345. chipnr = (int)(from >> chip->chip_shift);
  3346. nand_select_target(chip, chipnr);
  3347. /* Shift to get page */
  3348. realpage = (int)(from >> chip->page_shift);
  3349. page = realpage & chip->pagemask;
  3350. while (1) {
  3351. if (ops->mode == MTD_OPS_RAW)
  3352. ret = chip->ecc.read_oob_raw(chip, page);
  3353. else
  3354. ret = chip->ecc.read_oob(chip, page);
  3355. if (ret < 0)
  3356. break;
  3357. len = min(len, readlen);
  3358. buf = nand_transfer_oob(chip, buf, ops, len);
  3359. nand_wait_readrdy(chip);
  3360. max_bitflips = max_t(unsigned int, max_bitflips, ret);
  3361. readlen -= len;
  3362. if (!readlen)
  3363. break;
  3364. /* Increment page address */
  3365. realpage++;
  3366. page = realpage & chip->pagemask;
  3367. /* Check, if we cross a chip boundary */
  3368. if (!page) {
  3369. chipnr++;
  3370. nand_deselect_target(chip);
  3371. nand_select_target(chip, chipnr);
  3372. }
  3373. }
  3374. nand_deselect_target(chip);
  3375. ops->oobretlen = ops->ooblen - readlen;
  3376. if (ret < 0)
  3377. return ret;
  3378. if (mtd->ecc_stats.failed - stats.failed)
  3379. return -EBADMSG;
  3380. return max_bitflips;
  3381. }
  3382. /**
  3383. * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
  3384. * @mtd: MTD device structure
  3385. * @from: offset to read from
  3386. * @ops: oob operation description structure
  3387. *
  3388. * NAND read data and/or out-of-band data.
  3389. */
  3390. static int nand_read_oob(struct mtd_info *mtd, loff_t from,
  3391. struct mtd_oob_ops *ops)
  3392. {
  3393. struct nand_chip *chip = mtd_to_nand(mtd);
  3394. struct mtd_ecc_stats old_stats;
  3395. int ret;
  3396. ops->retlen = 0;
  3397. if (ops->mode != MTD_OPS_PLACE_OOB &&
  3398. ops->mode != MTD_OPS_AUTO_OOB &&
  3399. ops->mode != MTD_OPS_RAW)
  3400. return -ENOTSUPP;
  3401. nand_get_device(chip);
  3402. old_stats = mtd->ecc_stats;
  3403. if (!ops->datbuf)
  3404. ret = nand_do_read_oob(chip, from, ops);
  3405. else
  3406. ret = nand_do_read_ops(chip, from, ops);
  3407. if (ops->stats) {
  3408. ops->stats->uncorrectable_errors +=
  3409. mtd->ecc_stats.failed - old_stats.failed;
  3410. ops->stats->corrected_bitflips +=
  3411. mtd->ecc_stats.corrected - old_stats.corrected;
  3412. }
  3413. nand_release_device(chip);
  3414. return ret;
  3415. }
  3416. /**
  3417. * nand_write_page_raw_notsupp - dummy raw page write function
  3418. * @chip: nand chip info structure
  3419. * @buf: data buffer
  3420. * @oob_required: must write chip->oob_poi to OOB
  3421. * @page: page number to write
  3422. *
  3423. * Returns -ENOTSUPP unconditionally.
  3424. */
  3425. int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
  3426. int oob_required, int page)
  3427. {
  3428. return -ENOTSUPP;
  3429. }
  3430. /**
  3431. * nand_write_page_raw - [INTERN] raw page write function
  3432. * @chip: nand chip info structure
  3433. * @buf: data buffer
  3434. * @oob_required: must write chip->oob_poi to OOB
  3435. * @page: page number to write
  3436. *
  3437. * Not for syndrome calculating ECC controllers, which use a special oob layout.
  3438. */
  3439. int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
  3440. int oob_required, int page)
  3441. {
  3442. struct mtd_info *mtd = nand_to_mtd(chip);
  3443. int ret;
  3444. ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
  3445. if (ret)
  3446. return ret;
  3447. if (oob_required) {
  3448. ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
  3449. false);
  3450. if (ret)
  3451. return ret;
  3452. }
  3453. return nand_prog_page_end_op(chip);
  3454. }
  3455. EXPORT_SYMBOL(nand_write_page_raw);
  3456. /**
  3457. * nand_monolithic_write_page_raw - Monolithic page write in raw mode
  3458. * @chip: NAND chip info structure
  3459. * @buf: data buffer to write
  3460. * @oob_required: must write chip->oob_poi to OOB
  3461. * @page: page number to write
  3462. *
  3463. * This is a raw page write, ie. without any error detection/correction.
  3464. * Monolithic means we are requesting all the relevant data (main plus
  3465. * eventually OOB) to be sent over the bus and effectively programmed
  3466. * into the NAND chip arrays in a single operation. This is an
  3467. * alternative to nand_write_page_raw(), which first sends the main
  3468. * data, then eventually send the OOB data by latching more data
  3469. * cycles on the NAND bus, and finally sends the program command to
  3470. * synchronyze the NAND chip cache.
  3471. */
  3472. int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf,
  3473. int oob_required, int page)
  3474. {
  3475. struct mtd_info *mtd = nand_to_mtd(chip);
  3476. unsigned int size = mtd->writesize;
  3477. u8 *write_buf = (u8 *)buf;
  3478. if (oob_required) {
  3479. size += mtd->oobsize;
  3480. if (buf != chip->data_buf) {
  3481. write_buf = nand_get_data_buf(chip);
  3482. memcpy(write_buf, buf, mtd->writesize);
  3483. }
  3484. }
  3485. return nand_prog_page_op(chip, page, 0, write_buf, size);
  3486. }
  3487. EXPORT_SYMBOL(nand_monolithic_write_page_raw);
  3488. /**
  3489. * nand_write_page_raw_syndrome - [INTERN] raw page write function
  3490. * @chip: nand chip info structure
  3491. * @buf: data buffer
  3492. * @oob_required: must write chip->oob_poi to OOB
  3493. * @page: page number to write
  3494. *
  3495. * We need a special oob layout and handling even when ECC isn't checked.
  3496. */
  3497. static int nand_write_page_raw_syndrome(struct nand_chip *chip,
  3498. const uint8_t *buf, int oob_required,
  3499. int page)
  3500. {
  3501. struct mtd_info *mtd = nand_to_mtd(chip);
  3502. int eccsize = chip->ecc.size;
  3503. int eccbytes = chip->ecc.bytes;
  3504. uint8_t *oob = chip->oob_poi;
  3505. int steps, size, ret;
  3506. ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
  3507. if (ret)
  3508. return ret;
  3509. for (steps = chip->ecc.steps; steps > 0; steps--) {
  3510. ret = nand_write_data_op(chip, buf, eccsize, false);
  3511. if (ret)
  3512. return ret;
  3513. buf += eccsize;
  3514. if (chip->ecc.prepad) {
  3515. ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
  3516. false);
  3517. if (ret)
  3518. return ret;
  3519. oob += chip->ecc.prepad;
  3520. }
  3521. ret = nand_write_data_op(chip, oob, eccbytes, false);
  3522. if (ret)
  3523. return ret;
  3524. oob += eccbytes;
  3525. if (chip->ecc.postpad) {
  3526. ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
  3527. false);
  3528. if (ret)
  3529. return ret;
  3530. oob += chip->ecc.postpad;
  3531. }
  3532. }
  3533. size = mtd->oobsize - (oob - chip->oob_poi);
  3534. if (size) {
  3535. ret = nand_write_data_op(chip, oob, size, false);
  3536. if (ret)
  3537. return ret;
  3538. }
  3539. return nand_prog_page_end_op(chip);
  3540. }
  3541. /**
  3542. * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
  3543. * @chip: nand chip info structure
  3544. * @buf: data buffer
  3545. * @oob_required: must write chip->oob_poi to OOB
  3546. * @page: page number to write
  3547. */
  3548. static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf,
  3549. int oob_required, int page)
  3550. {
  3551. struct mtd_info *mtd = nand_to_mtd(chip);
  3552. int i, eccsize = chip->ecc.size, ret;
  3553. int eccbytes = chip->ecc.bytes;
  3554. int eccsteps = chip->ecc.steps;
  3555. uint8_t *ecc_calc = chip->ecc.calc_buf;
  3556. const uint8_t *p = buf;
  3557. /* Software ECC calculation */
  3558. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
  3559. chip->ecc.calculate(chip, p, &ecc_calc[i]);
  3560. ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
  3561. chip->ecc.total);
  3562. if (ret)
  3563. return ret;
  3564. return chip->ecc.write_page_raw(chip, buf, 1, page);
  3565. }
  3566. /**
  3567. * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
  3568. * @chip: nand chip info structure
  3569. * @buf: data buffer
  3570. * @oob_required: must write chip->oob_poi to OOB
  3571. * @page: page number to write
  3572. */
  3573. static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
  3574. int oob_required, int page)
  3575. {
  3576. struct mtd_info *mtd = nand_to_mtd(chip);
  3577. int i, eccsize = chip->ecc.size, ret;
  3578. int eccbytes = chip->ecc.bytes;
  3579. int eccsteps = chip->ecc.steps;
  3580. uint8_t *ecc_calc = chip->ecc.calc_buf;
  3581. const uint8_t *p = buf;
  3582. ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
  3583. if (ret)
  3584. return ret;
  3585. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  3586. chip->ecc.hwctl(chip, NAND_ECC_WRITE);
  3587. ret = nand_write_data_op(chip, p, eccsize, false);
  3588. if (ret)
  3589. return ret;
  3590. chip->ecc.calculate(chip, p, &ecc_calc[i]);
  3591. }
  3592. ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
  3593. chip->ecc.total);
  3594. if (ret)
  3595. return ret;
  3596. ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
  3597. if (ret)
  3598. return ret;
  3599. return nand_prog_page_end_op(chip);
  3600. }
  3601. /**
  3602. * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
  3603. * @chip: nand chip info structure
  3604. * @offset: column address of subpage within the page
  3605. * @data_len: data length
  3606. * @buf: data buffer
  3607. * @oob_required: must write chip->oob_poi to OOB
  3608. * @page: page number to write
  3609. */
  3610. static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset,
  3611. uint32_t data_len, const uint8_t *buf,
  3612. int oob_required, int page)
  3613. {
  3614. struct mtd_info *mtd = nand_to_mtd(chip);
  3615. uint8_t *oob_buf = chip->oob_poi;
  3616. uint8_t *ecc_calc = chip->ecc.calc_buf;
  3617. int ecc_size = chip->ecc.size;
  3618. int ecc_bytes = chip->ecc.bytes;
  3619. int ecc_steps = chip->ecc.steps;
  3620. uint32_t start_step = offset / ecc_size;
  3621. uint32_t end_step = (offset + data_len - 1) / ecc_size;
  3622. int oob_bytes = mtd->oobsize / ecc_steps;
  3623. int step, ret;
  3624. ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
  3625. if (ret)
  3626. return ret;
  3627. for (step = 0; step < ecc_steps; step++) {
  3628. /* configure controller for WRITE access */
  3629. chip->ecc.hwctl(chip, NAND_ECC_WRITE);
  3630. /* write data (untouched subpages already masked by 0xFF) */
  3631. ret = nand_write_data_op(chip, buf, ecc_size, false);
  3632. if (ret)
  3633. return ret;
  3634. /* mask ECC of un-touched subpages by padding 0xFF */
  3635. if ((step < start_step) || (step > end_step))
  3636. memset(ecc_calc, 0xff, ecc_bytes);
  3637. else
  3638. chip->ecc.calculate(chip, buf, ecc_calc);
  3639. /* mask OOB of un-touched subpages by padding 0xFF */
  3640. /* if oob_required, preserve OOB metadata of written subpage */
  3641. if (!oob_required || (step < start_step) || (step > end_step))
  3642. memset(oob_buf, 0xff, oob_bytes);
  3643. buf += ecc_size;
  3644. ecc_calc += ecc_bytes;
  3645. oob_buf += oob_bytes;
  3646. }
  3647. /* copy calculated ECC for whole page to chip->buffer->oob */
  3648. /* this include masked-value(0xFF) for unwritten subpages */
  3649. ecc_calc = chip->ecc.calc_buf;
  3650. ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
  3651. chip->ecc.total);
  3652. if (ret)
  3653. return ret;
  3654. /* write OOB buffer to NAND device */
  3655. ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
  3656. if (ret)
  3657. return ret;
  3658. return nand_prog_page_end_op(chip);
  3659. }
  3660. /**
  3661. * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
  3662. * @chip: nand chip info structure
  3663. * @buf: data buffer
  3664. * @oob_required: must write chip->oob_poi to OOB
  3665. * @page: page number to write
  3666. *
  3667. * The hw generator calculates the error syndrome automatically. Therefore we
  3668. * need a special oob layout and handling.
  3669. */
  3670. static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
  3671. int oob_required, int page)
  3672. {
  3673. struct mtd_info *mtd = nand_to_mtd(chip);
  3674. int i, eccsize = chip->ecc.size;
  3675. int eccbytes = chip->ecc.bytes;
  3676. int eccsteps = chip->ecc.steps;
  3677. const uint8_t *p = buf;
  3678. uint8_t *oob = chip->oob_poi;
  3679. int ret;
  3680. ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
  3681. if (ret)
  3682. return ret;
  3683. for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
  3684. chip->ecc.hwctl(chip, NAND_ECC_WRITE);
  3685. ret = nand_write_data_op(chip, p, eccsize, false);
  3686. if (ret)
  3687. return ret;
  3688. if (chip->ecc.prepad) {
  3689. ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
  3690. false);
  3691. if (ret)
  3692. return ret;
  3693. oob += chip->ecc.prepad;
  3694. }
  3695. chip->ecc.calculate(chip, p, oob);
  3696. ret = nand_write_data_op(chip, oob, eccbytes, false);
  3697. if (ret)
  3698. return ret;
  3699. oob += eccbytes;
  3700. if (chip->ecc.postpad) {
  3701. ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
  3702. false);
  3703. if (ret)
  3704. return ret;
  3705. oob += chip->ecc.postpad;
  3706. }
  3707. }
  3708. /* Calculate remaining oob bytes */
  3709. i = mtd->oobsize - (oob - chip->oob_poi);
  3710. if (i) {
  3711. ret = nand_write_data_op(chip, oob, i, false);
  3712. if (ret)
  3713. return ret;
  3714. }
  3715. return nand_prog_page_end_op(chip);
  3716. }
  3717. /**
  3718. * nand_write_page - write one page
  3719. * @chip: NAND chip descriptor
  3720. * @offset: address offset within the page
  3721. * @data_len: length of actual data to be written
  3722. * @buf: the data to write
  3723. * @oob_required: must write chip->oob_poi to OOB
  3724. * @page: page number to write
  3725. * @raw: use _raw version of write_page
  3726. */
  3727. static int nand_write_page(struct nand_chip *chip, uint32_t offset,
  3728. int data_len, const uint8_t *buf, int oob_required,
  3729. int page, int raw)
  3730. {
  3731. struct mtd_info *mtd = nand_to_mtd(chip);
  3732. int status, subpage;
  3733. if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
  3734. chip->ecc.write_subpage)
  3735. subpage = offset || (data_len < mtd->writesize);
  3736. else
  3737. subpage = 0;
  3738. if (unlikely(raw))
  3739. status = chip->ecc.write_page_raw(chip, buf, oob_required,
  3740. page);
  3741. else if (subpage)
  3742. status = chip->ecc.write_subpage(chip, offset, data_len, buf,
  3743. oob_required, page);
  3744. else
  3745. status = chip->ecc.write_page(chip, buf, oob_required, page);
  3746. if (status < 0)
  3747. return status;
  3748. return 0;
  3749. }
  3750. #define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
  3751. /**
  3752. * nand_do_write_ops - [INTERN] NAND write with ECC
  3753. * @chip: NAND chip object
  3754. * @to: offset to write to
  3755. * @ops: oob operations description structure
  3756. *
  3757. * NAND write with ECC.
  3758. */
  3759. static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
  3760. struct mtd_oob_ops *ops)
  3761. {
  3762. struct mtd_info *mtd = nand_to_mtd(chip);
  3763. int chipnr, realpage, page, column;
  3764. uint32_t writelen = ops->len;
  3765. uint32_t oobwritelen = ops->ooblen;
  3766. uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
  3767. uint8_t *oob = ops->oobbuf;
  3768. uint8_t *buf = ops->datbuf;
  3769. int ret;
  3770. int oob_required = oob ? 1 : 0;
  3771. ops->retlen = 0;
  3772. if (!writelen)
  3773. return 0;
  3774. /* Reject writes, which are not page aligned */
  3775. if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
  3776. pr_notice("%s: attempt to write non page aligned data\n",
  3777. __func__);
  3778. return -EINVAL;
  3779. }
  3780. /* Check if the region is secured */
  3781. if (nand_region_is_secured(chip, to, writelen))
  3782. return -EIO;
  3783. column = to & (mtd->writesize - 1);
  3784. chipnr = (int)(to >> chip->chip_shift);
  3785. nand_select_target(chip, chipnr);
  3786. /* Check, if it is write protected */
  3787. if (nand_check_wp(chip)) {
  3788. ret = -EIO;
  3789. goto err_out;
  3790. }
  3791. realpage = (int)(to >> chip->page_shift);
  3792. page = realpage & chip->pagemask;
  3793. /* Invalidate the page cache, when we write to the cached page */
  3794. if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) &&
  3795. ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len))
  3796. chip->pagecache.page = -1;
  3797. /* Don't allow multipage oob writes with offset */
  3798. if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
  3799. ret = -EINVAL;
  3800. goto err_out;
  3801. }
  3802. while (1) {
  3803. int bytes = mtd->writesize;
  3804. uint8_t *wbuf = buf;
  3805. int use_bounce_buf;
  3806. int part_pagewr = (column || writelen < mtd->writesize);
  3807. if (part_pagewr)
  3808. use_bounce_buf = 1;
  3809. else if (chip->options & NAND_USES_DMA)
  3810. use_bounce_buf = !virt_addr_valid(buf) ||
  3811. !IS_ALIGNED((unsigned long)buf,
  3812. chip->buf_align);
  3813. else
  3814. use_bounce_buf = 0;
  3815. /*
  3816. * Copy the data from the initial buffer when doing partial page
  3817. * writes or when a bounce buffer is required.
  3818. */
  3819. if (use_bounce_buf) {
  3820. pr_debug("%s: using write bounce buffer for buf@%p\n",
  3821. __func__, buf);
  3822. if (part_pagewr)
  3823. bytes = min_t(int, bytes - column, writelen);
  3824. wbuf = nand_get_data_buf(chip);
  3825. memset(wbuf, 0xff, mtd->writesize);
  3826. memcpy(&wbuf[column], buf, bytes);
  3827. }
  3828. if (unlikely(oob)) {
  3829. size_t len = min(oobwritelen, oobmaxlen);
  3830. oob = nand_fill_oob(chip, oob, len, ops);
  3831. oobwritelen -= len;
  3832. } else {
  3833. /* We still need to erase leftover OOB data */
  3834. memset(chip->oob_poi, 0xff, mtd->oobsize);
  3835. }
  3836. ret = nand_write_page(chip, column, bytes, wbuf,
  3837. oob_required, page,
  3838. (ops->mode == MTD_OPS_RAW));
  3839. if (ret)
  3840. break;
  3841. writelen -= bytes;
  3842. if (!writelen)
  3843. break;
  3844. column = 0;
  3845. buf += bytes;
  3846. realpage++;
  3847. page = realpage & chip->pagemask;
  3848. /* Check, if we cross a chip boundary */
  3849. if (!page) {
  3850. chipnr++;
  3851. nand_deselect_target(chip);
  3852. nand_select_target(chip, chipnr);
  3853. }
  3854. }
  3855. ops->retlen = ops->len - writelen;
  3856. if (unlikely(oob))
  3857. ops->oobretlen = ops->ooblen;
  3858. err_out:
  3859. nand_deselect_target(chip);
  3860. return ret;
  3861. }
  3862. /**
  3863. * panic_nand_write - [MTD Interface] NAND write with ECC
  3864. * @mtd: MTD device structure
  3865. * @to: offset to write to
  3866. * @len: number of bytes to write
  3867. * @retlen: pointer to variable to store the number of written bytes
  3868. * @buf: the data to write
  3869. *
  3870. * NAND write with ECC. Used when performing writes in interrupt context, this
  3871. * may for example be called by mtdoops when writing an oops while in panic.
  3872. */
  3873. static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
  3874. size_t *retlen, const uint8_t *buf)
  3875. {
  3876. struct nand_chip *chip = mtd_to_nand(mtd);
  3877. int chipnr = (int)(to >> chip->chip_shift);
  3878. struct mtd_oob_ops ops;
  3879. int ret;
  3880. nand_select_target(chip, chipnr);
  3881. /* Wait for the device to get ready */
  3882. panic_nand_wait(chip, 400);
  3883. memset(&ops, 0, sizeof(ops));
  3884. ops.len = len;
  3885. ops.datbuf = (uint8_t *)buf;
  3886. ops.mode = MTD_OPS_PLACE_OOB;
  3887. ret = nand_do_write_ops(chip, to, &ops);
  3888. *retlen = ops.retlen;
  3889. return ret;
  3890. }
  3891. /**
  3892. * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
  3893. * @mtd: MTD device structure
  3894. * @to: offset to write to
  3895. * @ops: oob operation description structure
  3896. */
  3897. static int nand_write_oob(struct mtd_info *mtd, loff_t to,
  3898. struct mtd_oob_ops *ops)
  3899. {
  3900. struct nand_chip *chip = mtd_to_nand(mtd);
  3901. int ret = 0;
  3902. ops->retlen = 0;
  3903. nand_get_device(chip);
  3904. switch (ops->mode) {
  3905. case MTD_OPS_PLACE_OOB:
  3906. case MTD_OPS_AUTO_OOB:
  3907. case MTD_OPS_RAW:
  3908. break;
  3909. default:
  3910. goto out;
  3911. }
  3912. if (!ops->datbuf)
  3913. ret = nand_do_write_oob(chip, to, ops);
  3914. else
  3915. ret = nand_do_write_ops(chip, to, ops);
  3916. out:
  3917. nand_release_device(chip);
  3918. return ret;
  3919. }
  3920. /**
  3921. * nand_erase - [MTD Interface] erase block(s)
  3922. * @mtd: MTD device structure
  3923. * @instr: erase instruction
  3924. *
  3925. * Erase one ore more blocks.
  3926. */
  3927. static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
  3928. {
  3929. return nand_erase_nand(mtd_to_nand(mtd), instr, 0);
  3930. }
  3931. /**
  3932. * nand_erase_nand - [INTERN] erase block(s)
  3933. * @chip: NAND chip object
  3934. * @instr: erase instruction
  3935. * @allowbbt: allow erasing the bbt area
  3936. *
  3937. * Erase one ore more blocks.
  3938. */
  3939. int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
  3940. int allowbbt)
  3941. {
  3942. int page, pages_per_block, ret, chipnr;
  3943. loff_t len;
  3944. pr_debug("%s: start = 0x%012llx, len = %llu\n",
  3945. __func__, (unsigned long long)instr->addr,
  3946. (unsigned long long)instr->len);
  3947. if (check_offs_len(chip, instr->addr, instr->len))
  3948. return -EINVAL;
  3949. /* Check if the region is secured */
  3950. if (nand_region_is_secured(chip, instr->addr, instr->len))
  3951. return -EIO;
  3952. /* Grab the lock and see if the device is available */
  3953. nand_get_device(chip);
  3954. /* Shift to get first page */
  3955. page = (int)(instr->addr >> chip->page_shift);
  3956. chipnr = (int)(instr->addr >> chip->chip_shift);
  3957. /* Calculate pages in each block */
  3958. pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
  3959. /* Select the NAND device */
  3960. nand_select_target(chip, chipnr);
  3961. /* Check, if it is write protected */
  3962. if (nand_check_wp(chip)) {
  3963. pr_debug("%s: device is write protected!\n",
  3964. __func__);
  3965. ret = -EIO;
  3966. goto erase_exit;
  3967. }
  3968. /* Loop through the pages */
  3969. len = instr->len;
  3970. while (len) {
  3971. loff_t ofs = (loff_t)page << chip->page_shift;
  3972. /* Check if we have a bad block, we do not erase bad blocks! */
  3973. if (nand_block_checkbad(chip, ((loff_t) page) <<
  3974. chip->page_shift, allowbbt)) {
  3975. pr_warn("%s: attempt to erase a bad block at 0x%08llx\n",
  3976. __func__, (unsigned long long)ofs);
  3977. ret = -EIO;
  3978. goto erase_exit;
  3979. }
  3980. /*
  3981. * Invalidate the page cache, if we erase the block which
  3982. * contains the current cached page.
  3983. */
  3984. if (page <= chip->pagecache.page && chip->pagecache.page <
  3985. (page + pages_per_block))
  3986. chip->pagecache.page = -1;
  3987. ret = nand_erase_op(chip, (page & chip->pagemask) >>
  3988. (chip->phys_erase_shift - chip->page_shift));
  3989. if (ret) {
  3990. pr_debug("%s: failed erase, page 0x%08x\n",
  3991. __func__, page);
  3992. instr->fail_addr = ofs;
  3993. goto erase_exit;
  3994. }
  3995. /* Increment page address and decrement length */
  3996. len -= (1ULL << chip->phys_erase_shift);
  3997. page += pages_per_block;
  3998. /* Check, if we cross a chip boundary */
  3999. if (len && !(page & chip->pagemask)) {
  4000. chipnr++;
  4001. nand_deselect_target(chip);
  4002. nand_select_target(chip, chipnr);
  4003. }
  4004. }
  4005. ret = 0;
  4006. erase_exit:
  4007. /* Deselect and wake up anyone waiting on the device */
  4008. nand_deselect_target(chip);
  4009. nand_release_device(chip);
  4010. /* Return more or less happy */
  4011. return ret;
  4012. }
  4013. /**
  4014. * nand_sync - [MTD Interface] sync
  4015. * @mtd: MTD device structure
  4016. *
  4017. * Sync is actually a wait for chip ready function.
  4018. */
  4019. static void nand_sync(struct mtd_info *mtd)
  4020. {
  4021. struct nand_chip *chip = mtd_to_nand(mtd);
  4022. pr_debug("%s: called\n", __func__);
  4023. /* Grab the lock and see if the device is available */
  4024. nand_get_device(chip);
  4025. /* Release it and go back */
  4026. nand_release_device(chip);
  4027. }
  4028. /**
  4029. * nand_block_isbad - [MTD Interface] Check if block at offset is bad
  4030. * @mtd: MTD device structure
  4031. * @offs: offset relative to mtd start
  4032. */
  4033. static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
  4034. {
  4035. struct nand_chip *chip = mtd_to_nand(mtd);
  4036. int chipnr = (int)(offs >> chip->chip_shift);
  4037. int ret;
  4038. /* Select the NAND device */
  4039. nand_get_device(chip);
  4040. nand_select_target(chip, chipnr);
  4041. ret = nand_block_checkbad(chip, offs, 0);
  4042. nand_deselect_target(chip);
  4043. nand_release_device(chip);
  4044. return ret;
  4045. }
  4046. /**
  4047. * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
  4048. * @mtd: MTD device structure
  4049. * @ofs: offset relative to mtd start
  4050. */
  4051. static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
  4052. {
  4053. int ret;
  4054. ret = nand_block_isbad(mtd, ofs);
  4055. if (ret) {
  4056. /* If it was bad already, return success and do nothing */
  4057. if (ret > 0)
  4058. return 0;
  4059. return ret;
  4060. }
  4061. return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs);
  4062. }
  4063. /**
  4064. * nand_suspend - [MTD Interface] Suspend the NAND flash
  4065. * @mtd: MTD device structure
  4066. *
  4067. * Returns 0 for success or negative error code otherwise.
  4068. */
  4069. static int nand_suspend(struct mtd_info *mtd)
  4070. {
  4071. struct nand_chip *chip = mtd_to_nand(mtd);
  4072. int ret = 0;
  4073. mutex_lock(&chip->lock);
  4074. if (chip->ops.suspend)
  4075. ret = chip->ops.suspend(chip);
  4076. if (!ret)
  4077. chip->suspended = 1;
  4078. mutex_unlock(&chip->lock);
  4079. return ret;
  4080. }
  4081. /**
  4082. * nand_resume - [MTD Interface] Resume the NAND flash
  4083. * @mtd: MTD device structure
  4084. */
  4085. static void nand_resume(struct mtd_info *mtd)
  4086. {
  4087. struct nand_chip *chip = mtd_to_nand(mtd);
  4088. mutex_lock(&chip->lock);
  4089. if (chip->suspended) {
  4090. if (chip->ops.resume)
  4091. chip->ops.resume(chip);
  4092. chip->suspended = 0;
  4093. } else {
  4094. pr_err("%s called for a chip which is not in suspended state\n",
  4095. __func__);
  4096. }
  4097. mutex_unlock(&chip->lock);
  4098. wake_up_all(&chip->resume_wq);
  4099. }
  4100. /**
  4101. * nand_shutdown - [MTD Interface] Finish the current NAND operation and
  4102. * prevent further operations
  4103. * @mtd: MTD device structure
  4104. */
  4105. static void nand_shutdown(struct mtd_info *mtd)
  4106. {
  4107. nand_suspend(mtd);
  4108. }
  4109. /**
  4110. * nand_lock - [MTD Interface] Lock the NAND flash
  4111. * @mtd: MTD device structure
  4112. * @ofs: offset byte address
  4113. * @len: number of bytes to lock (must be a multiple of block/page size)
  4114. */
  4115. static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  4116. {
  4117. struct nand_chip *chip = mtd_to_nand(mtd);
  4118. if (!chip->ops.lock_area)
  4119. return -ENOTSUPP;
  4120. return chip->ops.lock_area(chip, ofs, len);
  4121. }
  4122. /**
  4123. * nand_unlock - [MTD Interface] Unlock the NAND flash
  4124. * @mtd: MTD device structure
  4125. * @ofs: offset byte address
  4126. * @len: number of bytes to unlock (must be a multiple of block/page size)
  4127. */
  4128. static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  4129. {
  4130. struct nand_chip *chip = mtd_to_nand(mtd);
  4131. if (!chip->ops.unlock_area)
  4132. return -ENOTSUPP;
  4133. return chip->ops.unlock_area(chip, ofs, len);
  4134. }
  4135. /* Set default functions */
  4136. static void nand_set_defaults(struct nand_chip *chip)
  4137. {
  4138. /* If no controller is provided, use the dummy, legacy one. */
  4139. if (!chip->controller) {
  4140. chip->controller = &chip->legacy.dummy_controller;
  4141. nand_controller_init(chip->controller);
  4142. }
  4143. nand_legacy_set_defaults(chip);
  4144. if (!chip->buf_align)
  4145. chip->buf_align = 1;
  4146. }
  4147. /* Sanitize ONFI strings so we can safely print them */
  4148. void sanitize_string(uint8_t *s, size_t len)
  4149. {
  4150. ssize_t i;
  4151. /* Null terminate */
  4152. s[len - 1] = 0;
  4153. /* Remove non printable chars */
  4154. for (i = 0; i < len - 1; i++) {
  4155. if (s[i] < ' ' || s[i] > 127)
  4156. s[i] = '?';
  4157. }
  4158. /* Remove trailing spaces */
  4159. strim(s);
  4160. }
  4161. /*
  4162. * nand_id_has_period - Check if an ID string has a given wraparound period
  4163. * @id_data: the ID string
  4164. * @arrlen: the length of the @id_data array
  4165. * @period: the period of repitition
  4166. *
  4167. * Check if an ID string is repeated within a given sequence of bytes at
  4168. * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
  4169. * period of 3). This is a helper function for nand_id_len(). Returns non-zero
  4170. * if the repetition has a period of @period; otherwise, returns zero.
  4171. */
  4172. static int nand_id_has_period(u8 *id_data, int arrlen, int period)
  4173. {
  4174. int i, j;
  4175. for (i = 0; i < period; i++)
  4176. for (j = i + period; j < arrlen; j += period)
  4177. if (id_data[i] != id_data[j])
  4178. return 0;
  4179. return 1;
  4180. }
  4181. /*
  4182. * nand_id_len - Get the length of an ID string returned by CMD_READID
  4183. * @id_data: the ID string
  4184. * @arrlen: the length of the @id_data array
  4185. * Returns the length of the ID string, according to known wraparound/trailing
  4186. * zero patterns. If no pattern exists, returns the length of the array.
  4187. */
  4188. static int nand_id_len(u8 *id_data, int arrlen)
  4189. {
  4190. int last_nonzero, period;
  4191. /* Find last non-zero byte */
  4192. for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
  4193. if (id_data[last_nonzero])
  4194. break;
  4195. /* All zeros */
  4196. if (last_nonzero < 0)
  4197. return 0;
  4198. /* Calculate wraparound period */
  4199. for (period = 1; period < arrlen; period++)
  4200. if (nand_id_has_period(id_data, arrlen, period))
  4201. break;
  4202. /* There's a repeated pattern */
  4203. if (period < arrlen)
  4204. return period;
  4205. /* There are trailing zeros */
  4206. if (last_nonzero < arrlen - 1)
  4207. return last_nonzero + 1;
  4208. /* No pattern detected */
  4209. return arrlen;
  4210. }
  4211. /* Extract the bits of per cell from the 3rd byte of the extended ID */
  4212. static int nand_get_bits_per_cell(u8 cellinfo)
  4213. {
  4214. int bits;
  4215. bits = cellinfo & NAND_CI_CELLTYPE_MSK;
  4216. bits >>= NAND_CI_CELLTYPE_SHIFT;
  4217. return bits + 1;
  4218. }
  4219. /*
  4220. * Many new NAND share similar device ID codes, which represent the size of the
  4221. * chip. The rest of the parameters must be decoded according to generic or
  4222. * manufacturer-specific "extended ID" decoding patterns.
  4223. */
  4224. void nand_decode_ext_id(struct nand_chip *chip)
  4225. {
  4226. struct nand_memory_organization *memorg;
  4227. struct mtd_info *mtd = nand_to_mtd(chip);
  4228. int extid;
  4229. u8 *id_data = chip->id.data;
  4230. memorg = nanddev_get_memorg(&chip->base);
  4231. /* The 3rd id byte holds MLC / multichip data */
  4232. memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
  4233. /* The 4th id byte is the important one */
  4234. extid = id_data[3];
  4235. /* Calc pagesize */
  4236. memorg->pagesize = 1024 << (extid & 0x03);
  4237. mtd->writesize = memorg->pagesize;
  4238. extid >>= 2;
  4239. /* Calc oobsize */
  4240. memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
  4241. mtd->oobsize = memorg->oobsize;
  4242. extid >>= 2;
  4243. /* Calc blocksize. Blocksize is multiples of 64KiB */
  4244. memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) /
  4245. memorg->pagesize;
  4246. mtd->erasesize = (64 * 1024) << (extid & 0x03);
  4247. extid >>= 2;
  4248. /* Get buswidth information */
  4249. if (extid & 0x1)
  4250. chip->options |= NAND_BUSWIDTH_16;
  4251. }
  4252. EXPORT_SYMBOL_GPL(nand_decode_ext_id);
  4253. /*
  4254. * Old devices have chip data hardcoded in the device ID table. nand_decode_id
  4255. * decodes a matching ID table entry and assigns the MTD size parameters for
  4256. * the chip.
  4257. */
  4258. static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
  4259. {
  4260. struct mtd_info *mtd = nand_to_mtd(chip);
  4261. struct nand_memory_organization *memorg;
  4262. memorg = nanddev_get_memorg(&chip->base);
  4263. memorg->pages_per_eraseblock = type->erasesize / type->pagesize;
  4264. mtd->erasesize = type->erasesize;
  4265. memorg->pagesize = type->pagesize;
  4266. mtd->writesize = memorg->pagesize;
  4267. memorg->oobsize = memorg->pagesize / 32;
  4268. mtd->oobsize = memorg->oobsize;
  4269. /* All legacy ID NAND are small-page, SLC */
  4270. memorg->bits_per_cell = 1;
  4271. }
  4272. /*
  4273. * Set the bad block marker/indicator (BBM/BBI) patterns according to some
  4274. * heuristic patterns using various detected parameters (e.g., manufacturer,
  4275. * page size, cell-type information).
  4276. */
  4277. static void nand_decode_bbm_options(struct nand_chip *chip)
  4278. {
  4279. struct mtd_info *mtd = nand_to_mtd(chip);
  4280. /* Set the bad block position */
  4281. if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
  4282. chip->badblockpos = NAND_BBM_POS_LARGE;
  4283. else
  4284. chip->badblockpos = NAND_BBM_POS_SMALL;
  4285. }
  4286. static inline bool is_full_id_nand(struct nand_flash_dev *type)
  4287. {
  4288. return type->id_len;
  4289. }
  4290. static bool find_full_id_nand(struct nand_chip *chip,
  4291. struct nand_flash_dev *type)
  4292. {
  4293. struct nand_device *base = &chip->base;
  4294. struct nand_ecc_props requirements;
  4295. struct mtd_info *mtd = nand_to_mtd(chip);
  4296. struct nand_memory_organization *memorg;
  4297. u8 *id_data = chip->id.data;
  4298. memorg = nanddev_get_memorg(&chip->base);
  4299. if (!strncmp(type->id, id_data, type->id_len)) {
  4300. memorg->pagesize = type->pagesize;
  4301. mtd->writesize = memorg->pagesize;
  4302. memorg->pages_per_eraseblock = type->erasesize /
  4303. type->pagesize;
  4304. mtd->erasesize = type->erasesize;
  4305. memorg->oobsize = type->oobsize;
  4306. mtd->oobsize = memorg->oobsize;
  4307. memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
  4308. memorg->eraseblocks_per_lun =
  4309. DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
  4310. memorg->pagesize *
  4311. memorg->pages_per_eraseblock);
  4312. chip->options |= type->options;
  4313. requirements.strength = NAND_ECC_STRENGTH(type);
  4314. requirements.step_size = NAND_ECC_STEP(type);
  4315. nanddev_set_ecc_requirements(base, &requirements);
  4316. chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
  4317. if (!chip->parameters.model)
  4318. return false;
  4319. return true;
  4320. }
  4321. return false;
  4322. }
  4323. /*
  4324. * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
  4325. * compliant and does not have a full-id or legacy-id entry in the nand_ids
  4326. * table.
  4327. */
  4328. static void nand_manufacturer_detect(struct nand_chip *chip)
  4329. {
  4330. /*
  4331. * Try manufacturer detection if available and use
  4332. * nand_decode_ext_id() otherwise.
  4333. */
  4334. if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
  4335. chip->manufacturer.desc->ops->detect) {
  4336. struct nand_memory_organization *memorg;
  4337. memorg = nanddev_get_memorg(&chip->base);
  4338. /* The 3rd id byte holds MLC / multichip data */
  4339. memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
  4340. chip->manufacturer.desc->ops->detect(chip);
  4341. } else {
  4342. nand_decode_ext_id(chip);
  4343. }
  4344. }
  4345. /*
  4346. * Manufacturer initialization. This function is called for all NANDs including
  4347. * ONFI and JEDEC compliant ones.
  4348. * Manufacturer drivers should put all their specific initialization code in
  4349. * their ->init() hook.
  4350. */
  4351. static int nand_manufacturer_init(struct nand_chip *chip)
  4352. {
  4353. if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
  4354. !chip->manufacturer.desc->ops->init)
  4355. return 0;
  4356. return chip->manufacturer.desc->ops->init(chip);
  4357. }
  4358. /*
  4359. * Manufacturer cleanup. This function is called for all NANDs including
  4360. * ONFI and JEDEC compliant ones.
  4361. * Manufacturer drivers should put all their specific cleanup code in their
  4362. * ->cleanup() hook.
  4363. */
  4364. static void nand_manufacturer_cleanup(struct nand_chip *chip)
  4365. {
  4366. /* Release manufacturer private data */
  4367. if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
  4368. chip->manufacturer.desc->ops->cleanup)
  4369. chip->manufacturer.desc->ops->cleanup(chip);
  4370. }
  4371. static const char *
  4372. nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc)
  4373. {
  4374. return manufacturer_desc ? manufacturer_desc->name : "Unknown";
  4375. }
  4376. static void rawnand_check_data_only_read_support(struct nand_chip *chip)
  4377. {
  4378. /* Use an arbitrary size for the check */
  4379. if (!nand_read_data_op(chip, NULL, SZ_512, true, true))
  4380. chip->controller->supported_op.data_only_read = 1;
  4381. }
  4382. static void rawnand_early_check_supported_ops(struct nand_chip *chip)
  4383. {
  4384. /* The supported_op fields should not be set by individual drivers */
  4385. WARN_ON_ONCE(chip->controller->supported_op.data_only_read);
  4386. if (!nand_has_exec_op(chip))
  4387. return;
  4388. rawnand_check_data_only_read_support(chip);
  4389. }
  4390. static void rawnand_check_cont_read_support(struct nand_chip *chip)
  4391. {
  4392. struct mtd_info *mtd = nand_to_mtd(chip);
  4393. if (!chip->parameters.supports_read_cache)
  4394. return;
  4395. if (chip->read_retries)
  4396. return;
  4397. if (!nand_lp_exec_cont_read_page_op(chip, 0, 0, NULL,
  4398. mtd->writesize, true))
  4399. chip->controller->supported_op.cont_read = 1;
  4400. }
  4401. static void rawnand_late_check_supported_ops(struct nand_chip *chip)
  4402. {
  4403. /* The supported_op fields should not be set by individual drivers */
  4404. WARN_ON_ONCE(chip->controller->supported_op.cont_read);
  4405. /*
  4406. * Too many devices do not support sequential cached reads with on-die
  4407. * ECC correction enabled, so in this case refuse to perform the
  4408. * automation.
  4409. */
  4410. if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_DIE)
  4411. return;
  4412. if (!nand_has_exec_op(chip))
  4413. return;
  4414. /*
  4415. * For now, continuous reads can only be used with the core page helpers.
  4416. * This can be extended later.
  4417. */
  4418. if (!(chip->ecc.read_page == nand_read_page_hwecc ||
  4419. chip->ecc.read_page == nand_read_page_syndrome ||
  4420. chip->ecc.read_page == nand_read_page_swecc))
  4421. return;
  4422. rawnand_check_cont_read_support(chip);
  4423. }
  4424. /*
  4425. * Get the flash and manufacturer id and lookup if the type is supported.
  4426. */
  4427. static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
  4428. {
  4429. const struct nand_manufacturer_desc *manufacturer_desc;
  4430. struct mtd_info *mtd = nand_to_mtd(chip);
  4431. struct nand_memory_organization *memorg;
  4432. int busw, ret;
  4433. u8 *id_data = chip->id.data;
  4434. u8 maf_id, dev_id;
  4435. u64 targetsize;
  4436. /*
  4437. * Let's start by initializing memorg fields that might be left
  4438. * unassigned by the ID-based detection logic.
  4439. */
  4440. memorg = nanddev_get_memorg(&chip->base);
  4441. memorg->planes_per_lun = 1;
  4442. memorg->luns_per_target = 1;
  4443. /*
  4444. * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
  4445. * after power-up.
  4446. */
  4447. ret = nand_reset(chip, 0);
  4448. if (ret)
  4449. return ret;
  4450. /* Select the device */
  4451. nand_select_target(chip, 0);
  4452. rawnand_early_check_supported_ops(chip);
  4453. /* Send the command for reading device ID */
  4454. ret = nand_readid_op(chip, 0, id_data, 2);
  4455. if (ret)
  4456. return ret;
  4457. /* Read manufacturer and device IDs */
  4458. maf_id = id_data[0];
  4459. dev_id = id_data[1];
  4460. /*
  4461. * Try again to make sure, as some systems the bus-hold or other
  4462. * interface concerns can cause random data which looks like a
  4463. * possibly credible NAND flash to appear. If the two results do
  4464. * not match, ignore the device completely.
  4465. */
  4466. /* Read entire ID string */
  4467. ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
  4468. if (ret)
  4469. return ret;
  4470. if (id_data[0] != maf_id || id_data[1] != dev_id) {
  4471. pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
  4472. maf_id, dev_id, id_data[0], id_data[1]);
  4473. return -ENODEV;
  4474. }
  4475. chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
  4476. /* Try to identify manufacturer */
  4477. manufacturer_desc = nand_get_manufacturer_desc(maf_id);
  4478. chip->manufacturer.desc = manufacturer_desc;
  4479. if (!type)
  4480. type = nand_flash_ids;
  4481. /*
  4482. * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
  4483. * override it.
  4484. * This is required to make sure initial NAND bus width set by the
  4485. * NAND controller driver is coherent with the real NAND bus width
  4486. * (extracted by auto-detection code).
  4487. */
  4488. busw = chip->options & NAND_BUSWIDTH_16;
  4489. /*
  4490. * The flag is only set (never cleared), reset it to its default value
  4491. * before starting auto-detection.
  4492. */
  4493. chip->options &= ~NAND_BUSWIDTH_16;
  4494. for (; type->name != NULL; type++) {
  4495. if (is_full_id_nand(type)) {
  4496. if (find_full_id_nand(chip, type))
  4497. goto ident_done;
  4498. } else if (dev_id == type->dev_id) {
  4499. break;
  4500. }
  4501. }
  4502. if (!type->name || !type->pagesize) {
  4503. /* Check if the chip is ONFI compliant */
  4504. ret = nand_onfi_detect(chip);
  4505. if (ret < 0)
  4506. return ret;
  4507. else if (ret)
  4508. goto ident_done;
  4509. /* Check if the chip is JEDEC compliant */
  4510. ret = nand_jedec_detect(chip);
  4511. if (ret < 0)
  4512. return ret;
  4513. else if (ret)
  4514. goto ident_done;
  4515. }
  4516. if (!type->name)
  4517. return -ENODEV;
  4518. chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
  4519. if (!chip->parameters.model)
  4520. return -ENOMEM;
  4521. if (!type->pagesize)
  4522. nand_manufacturer_detect(chip);
  4523. else
  4524. nand_decode_id(chip, type);
  4525. /* Get chip options */
  4526. chip->options |= type->options;
  4527. memorg->eraseblocks_per_lun =
  4528. DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
  4529. memorg->pagesize *
  4530. memorg->pages_per_eraseblock);
  4531. ident_done:
  4532. if (!mtd->name)
  4533. mtd->name = chip->parameters.model;
  4534. if (chip->options & NAND_BUSWIDTH_AUTO) {
  4535. WARN_ON(busw & NAND_BUSWIDTH_16);
  4536. nand_set_defaults(chip);
  4537. } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
  4538. /*
  4539. * Check, if buswidth is correct. Hardware drivers should set
  4540. * chip correct!
  4541. */
  4542. pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
  4543. maf_id, dev_id);
  4544. pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
  4545. mtd->name);
  4546. pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
  4547. (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
  4548. ret = -EINVAL;
  4549. goto free_detect_allocation;
  4550. }
  4551. nand_decode_bbm_options(chip);
  4552. /* Calculate the address shift from the page size */
  4553. chip->page_shift = ffs(mtd->writesize) - 1;
  4554. /* Convert chipsize to number of pages per chip -1 */
  4555. targetsize = nanddev_target_size(&chip->base);
  4556. chip->pagemask = (targetsize >> chip->page_shift) - 1;
  4557. chip->bbt_erase_shift = chip->phys_erase_shift =
  4558. ffs(mtd->erasesize) - 1;
  4559. if (targetsize & 0xffffffff)
  4560. chip->chip_shift = ffs((unsigned)targetsize) - 1;
  4561. else {
  4562. chip->chip_shift = ffs((unsigned)(targetsize >> 32));
  4563. chip->chip_shift += 32 - 1;
  4564. }
  4565. if (chip->chip_shift - chip->page_shift > 16)
  4566. chip->options |= NAND_ROW_ADDR_3;
  4567. chip->badblockbits = 8;
  4568. nand_legacy_adjust_cmdfunc(chip);
  4569. pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
  4570. maf_id, dev_id);
  4571. pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
  4572. chip->parameters.model);
  4573. pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
  4574. (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
  4575. mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
  4576. return 0;
  4577. free_detect_allocation:
  4578. kfree(chip->parameters.model);
  4579. return ret;
  4580. }
  4581. static enum nand_ecc_engine_type
  4582. of_get_rawnand_ecc_engine_type_legacy(struct device_node *np)
  4583. {
  4584. enum nand_ecc_legacy_mode {
  4585. NAND_ECC_INVALID,
  4586. NAND_ECC_NONE,
  4587. NAND_ECC_SOFT,
  4588. NAND_ECC_SOFT_BCH,
  4589. NAND_ECC_HW,
  4590. NAND_ECC_HW_SYNDROME,
  4591. NAND_ECC_ON_DIE,
  4592. };
  4593. const char * const nand_ecc_legacy_modes[] = {
  4594. [NAND_ECC_NONE] = "none",
  4595. [NAND_ECC_SOFT] = "soft",
  4596. [NAND_ECC_SOFT_BCH] = "soft_bch",
  4597. [NAND_ECC_HW] = "hw",
  4598. [NAND_ECC_HW_SYNDROME] = "hw_syndrome",
  4599. [NAND_ECC_ON_DIE] = "on-die",
  4600. };
  4601. enum nand_ecc_legacy_mode eng_type;
  4602. const char *pm;
  4603. int err;
  4604. err = of_property_read_string(np, "nand-ecc-mode", &pm);
  4605. if (err)
  4606. return NAND_ECC_ENGINE_TYPE_INVALID;
  4607. for (eng_type = NAND_ECC_NONE;
  4608. eng_type < ARRAY_SIZE(nand_ecc_legacy_modes); eng_type++) {
  4609. if (!strcasecmp(pm, nand_ecc_legacy_modes[eng_type])) {
  4610. switch (eng_type) {
  4611. case NAND_ECC_NONE:
  4612. return NAND_ECC_ENGINE_TYPE_NONE;
  4613. case NAND_ECC_SOFT:
  4614. case NAND_ECC_SOFT_BCH:
  4615. return NAND_ECC_ENGINE_TYPE_SOFT;
  4616. case NAND_ECC_HW:
  4617. case NAND_ECC_HW_SYNDROME:
  4618. return NAND_ECC_ENGINE_TYPE_ON_HOST;
  4619. case NAND_ECC_ON_DIE:
  4620. return NAND_ECC_ENGINE_TYPE_ON_DIE;
  4621. default:
  4622. break;
  4623. }
  4624. }
  4625. }
  4626. return NAND_ECC_ENGINE_TYPE_INVALID;
  4627. }
  4628. static enum nand_ecc_placement
  4629. of_get_rawnand_ecc_placement_legacy(struct device_node *np)
  4630. {
  4631. const char *pm;
  4632. int err;
  4633. err = of_property_read_string(np, "nand-ecc-mode", &pm);
  4634. if (!err) {
  4635. if (!strcasecmp(pm, "hw_syndrome"))
  4636. return NAND_ECC_PLACEMENT_INTERLEAVED;
  4637. }
  4638. return NAND_ECC_PLACEMENT_UNKNOWN;
  4639. }
  4640. static enum nand_ecc_algo of_get_rawnand_ecc_algo_legacy(struct device_node *np)
  4641. {
  4642. const char *pm;
  4643. int err;
  4644. err = of_property_read_string(np, "nand-ecc-mode", &pm);
  4645. if (!err) {
  4646. if (!strcasecmp(pm, "soft"))
  4647. return NAND_ECC_ALGO_HAMMING;
  4648. else if (!strcasecmp(pm, "soft_bch"))
  4649. return NAND_ECC_ALGO_BCH;
  4650. }
  4651. return NAND_ECC_ALGO_UNKNOWN;
  4652. }
  4653. static void of_get_nand_ecc_legacy_user_config(struct nand_chip *chip)
  4654. {
  4655. struct device_node *dn = nand_get_flash_node(chip);
  4656. struct nand_ecc_props *user_conf = &chip->base.ecc.user_conf;
  4657. if (user_conf->engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
  4658. user_conf->engine_type = of_get_rawnand_ecc_engine_type_legacy(dn);
  4659. if (user_conf->algo == NAND_ECC_ALGO_UNKNOWN)
  4660. user_conf->algo = of_get_rawnand_ecc_algo_legacy(dn);
  4661. if (user_conf->placement == NAND_ECC_PLACEMENT_UNKNOWN)
  4662. user_conf->placement = of_get_rawnand_ecc_placement_legacy(dn);
  4663. }
  4664. static int of_get_nand_bus_width(struct nand_chip *chip)
  4665. {
  4666. struct device_node *dn = nand_get_flash_node(chip);
  4667. u32 val;
  4668. int ret;
  4669. ret = of_property_read_u32(dn, "nand-bus-width", &val);
  4670. if (ret == -EINVAL)
  4671. /* Buswidth defaults to 8 if the property does not exist .*/
  4672. return 0;
  4673. else if (ret)
  4674. return ret;
  4675. if (val == 16)
  4676. chip->options |= NAND_BUSWIDTH_16;
  4677. else if (val != 8)
  4678. return -EINVAL;
  4679. return 0;
  4680. }
  4681. static int of_get_nand_secure_regions(struct nand_chip *chip)
  4682. {
  4683. struct device_node *dn = nand_get_flash_node(chip);
  4684. struct property *prop;
  4685. int nr_elem, i, j;
  4686. /* Only proceed if the "secure-regions" property is present in DT */
  4687. prop = of_find_property(dn, "secure-regions", NULL);
  4688. if (!prop)
  4689. return 0;
  4690. nr_elem = of_property_count_elems_of_size(dn, "secure-regions", sizeof(u64));
  4691. if (nr_elem <= 0)
  4692. return nr_elem;
  4693. chip->nr_secure_regions = nr_elem / 2;
  4694. chip->secure_regions = kcalloc(chip->nr_secure_regions, sizeof(*chip->secure_regions),
  4695. GFP_KERNEL);
  4696. if (!chip->secure_regions)
  4697. return -ENOMEM;
  4698. for (i = 0, j = 0; i < chip->nr_secure_regions; i++, j += 2) {
  4699. of_property_read_u64_index(dn, "secure-regions", j,
  4700. &chip->secure_regions[i].offset);
  4701. of_property_read_u64_index(dn, "secure-regions", j + 1,
  4702. &chip->secure_regions[i].size);
  4703. }
  4704. return 0;
  4705. }
  4706. /**
  4707. * rawnand_dt_parse_gpio_cs - Parse the gpio-cs property of a controller
  4708. * @dev: Device that will be parsed. Also used for managed allocations.
  4709. * @cs_array: Array of GPIO desc pointers allocated on success
  4710. * @ncs_array: Number of entries in @cs_array updated on success.
  4711. * @return 0 on success, an error otherwise.
  4712. */
  4713. int rawnand_dt_parse_gpio_cs(struct device *dev, struct gpio_desc ***cs_array,
  4714. unsigned int *ncs_array)
  4715. {
  4716. struct gpio_desc **descs;
  4717. int ndescs, i;
  4718. ndescs = gpiod_count(dev, "cs");
  4719. if (ndescs < 0) {
  4720. dev_dbg(dev, "No valid cs-gpios property\n");
  4721. return 0;
  4722. }
  4723. descs = devm_kcalloc(dev, ndescs, sizeof(*descs), GFP_KERNEL);
  4724. if (!descs)
  4725. return -ENOMEM;
  4726. for (i = 0; i < ndescs; i++) {
  4727. descs[i] = gpiod_get_index_optional(dev, "cs", i,
  4728. GPIOD_OUT_HIGH);
  4729. if (IS_ERR(descs[i]))
  4730. return PTR_ERR(descs[i]);
  4731. }
  4732. *ncs_array = ndescs;
  4733. *cs_array = descs;
  4734. return 0;
  4735. }
  4736. EXPORT_SYMBOL(rawnand_dt_parse_gpio_cs);
  4737. static int rawnand_dt_init(struct nand_chip *chip)
  4738. {
  4739. struct nand_device *nand = mtd_to_nanddev(nand_to_mtd(chip));
  4740. struct device_node *dn = nand_get_flash_node(chip);
  4741. int ret;
  4742. if (!dn)
  4743. return 0;
  4744. ret = of_get_nand_bus_width(chip);
  4745. if (ret)
  4746. return ret;
  4747. if (of_property_read_bool(dn, "nand-is-boot-medium"))
  4748. chip->options |= NAND_IS_BOOT_MEDIUM;
  4749. if (of_property_read_bool(dn, "nand-on-flash-bbt"))
  4750. chip->bbt_options |= NAND_BBT_USE_FLASH;
  4751. of_get_nand_ecc_user_config(nand);
  4752. of_get_nand_ecc_legacy_user_config(chip);
  4753. /*
  4754. * If neither the user nor the NAND controller have requested a specific
  4755. * ECC engine type, we will default to NAND_ECC_ENGINE_TYPE_ON_HOST.
  4756. */
  4757. nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
  4758. /*
  4759. * Use the user requested engine type, unless there is none, in this
  4760. * case default to the NAND controller choice, otherwise fallback to
  4761. * the raw NAND default one.
  4762. */
  4763. if (nand->ecc.user_conf.engine_type != NAND_ECC_ENGINE_TYPE_INVALID)
  4764. chip->ecc.engine_type = nand->ecc.user_conf.engine_type;
  4765. if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
  4766. chip->ecc.engine_type = nand->ecc.defaults.engine_type;
  4767. chip->ecc.placement = nand->ecc.user_conf.placement;
  4768. chip->ecc.algo = nand->ecc.user_conf.algo;
  4769. chip->ecc.strength = nand->ecc.user_conf.strength;
  4770. chip->ecc.size = nand->ecc.user_conf.step_size;
  4771. return 0;
  4772. }
  4773. /**
  4774. * nand_scan_ident - Scan for the NAND device
  4775. * @chip: NAND chip object
  4776. * @maxchips: number of chips to scan for
  4777. * @table: alternative NAND ID table
  4778. *
  4779. * This is the first phase of the normal nand_scan() function. It reads the
  4780. * flash ID and sets up MTD fields accordingly.
  4781. *
  4782. * This helper used to be called directly from controller drivers that needed
  4783. * to tweak some ECC-related parameters before nand_scan_tail(). This separation
  4784. * prevented dynamic allocations during this phase which was unconvenient and
  4785. * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks.
  4786. */
  4787. static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
  4788. struct nand_flash_dev *table)
  4789. {
  4790. struct mtd_info *mtd = nand_to_mtd(chip);
  4791. struct nand_memory_organization *memorg;
  4792. int nand_maf_id, nand_dev_id;
  4793. unsigned int i;
  4794. int ret;
  4795. memorg = nanddev_get_memorg(&chip->base);
  4796. /* Assume all dies are deselected when we enter nand_scan_ident(). */
  4797. chip->cur_cs = -1;
  4798. mutex_init(&chip->lock);
  4799. init_waitqueue_head(&chip->resume_wq);
  4800. /* Enforce the right timings for reset/detection */
  4801. chip->current_interface_config = nand_get_reset_interface_config();
  4802. ret = rawnand_dt_init(chip);
  4803. if (ret)
  4804. return ret;
  4805. if (!mtd->name && mtd->dev.parent)
  4806. mtd->name = dev_name(mtd->dev.parent);
  4807. /* Set the default functions */
  4808. nand_set_defaults(chip);
  4809. ret = nand_legacy_check_hooks(chip);
  4810. if (ret)
  4811. return ret;
  4812. memorg->ntargets = maxchips;
  4813. /* Read the flash type */
  4814. ret = nand_detect(chip, table);
  4815. if (ret) {
  4816. if (!(chip->options & NAND_SCAN_SILENT_NODEV))
  4817. pr_warn("No NAND device found\n");
  4818. nand_deselect_target(chip);
  4819. return ret;
  4820. }
  4821. nand_maf_id = chip->id.data[0];
  4822. nand_dev_id = chip->id.data[1];
  4823. nand_deselect_target(chip);
  4824. /* Check for a chip array */
  4825. for (i = 1; i < maxchips; i++) {
  4826. u8 id[2];
  4827. /* See comment in nand_get_flash_type for reset */
  4828. ret = nand_reset(chip, i);
  4829. if (ret)
  4830. break;
  4831. nand_select_target(chip, i);
  4832. /* Send the command for reading device ID */
  4833. ret = nand_readid_op(chip, 0, id, sizeof(id));
  4834. if (ret)
  4835. break;
  4836. /* Read manufacturer and device IDs */
  4837. if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
  4838. nand_deselect_target(chip);
  4839. break;
  4840. }
  4841. nand_deselect_target(chip);
  4842. }
  4843. if (i > 1)
  4844. pr_info("%d chips detected\n", i);
  4845. /* Store the number of chips and calc total size for mtd */
  4846. memorg->ntargets = i;
  4847. mtd->size = i * nanddev_target_size(&chip->base);
  4848. return 0;
  4849. }
  4850. static void nand_scan_ident_cleanup(struct nand_chip *chip)
  4851. {
  4852. kfree(chip->parameters.model);
  4853. kfree(chip->parameters.onfi);
  4854. }
  4855. int rawnand_sw_hamming_init(struct nand_chip *chip)
  4856. {
  4857. struct nand_ecc_sw_hamming_conf *engine_conf;
  4858. struct nand_device *base = &chip->base;
  4859. int ret;
  4860. base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
  4861. base->ecc.user_conf.algo = NAND_ECC_ALGO_HAMMING;
  4862. base->ecc.user_conf.strength = chip->ecc.strength;
  4863. base->ecc.user_conf.step_size = chip->ecc.size;
  4864. ret = nand_ecc_sw_hamming_init_ctx(base);
  4865. if (ret)
  4866. return ret;
  4867. engine_conf = base->ecc.ctx.priv;
  4868. if (chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER)
  4869. engine_conf->sm_order = true;
  4870. chip->ecc.size = base->ecc.ctx.conf.step_size;
  4871. chip->ecc.strength = base->ecc.ctx.conf.strength;
  4872. chip->ecc.total = base->ecc.ctx.total;
  4873. chip->ecc.steps = nanddev_get_ecc_nsteps(base);
  4874. chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base);
  4875. return 0;
  4876. }
  4877. EXPORT_SYMBOL(rawnand_sw_hamming_init);
  4878. int rawnand_sw_hamming_calculate(struct nand_chip *chip,
  4879. const unsigned char *buf,
  4880. unsigned char *code)
  4881. {
  4882. struct nand_device *base = &chip->base;
  4883. return nand_ecc_sw_hamming_calculate(base, buf, code);
  4884. }
  4885. EXPORT_SYMBOL(rawnand_sw_hamming_calculate);
  4886. int rawnand_sw_hamming_correct(struct nand_chip *chip,
  4887. unsigned char *buf,
  4888. unsigned char *read_ecc,
  4889. unsigned char *calc_ecc)
  4890. {
  4891. struct nand_device *base = &chip->base;
  4892. return nand_ecc_sw_hamming_correct(base, buf, read_ecc, calc_ecc);
  4893. }
  4894. EXPORT_SYMBOL(rawnand_sw_hamming_correct);
  4895. void rawnand_sw_hamming_cleanup(struct nand_chip *chip)
  4896. {
  4897. struct nand_device *base = &chip->base;
  4898. nand_ecc_sw_hamming_cleanup_ctx(base);
  4899. }
  4900. EXPORT_SYMBOL(rawnand_sw_hamming_cleanup);
  4901. int rawnand_sw_bch_init(struct nand_chip *chip)
  4902. {
  4903. struct nand_device *base = &chip->base;
  4904. const struct nand_ecc_props *ecc_conf = nanddev_get_ecc_conf(base);
  4905. int ret;
  4906. base->ecc.user_conf.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
  4907. base->ecc.user_conf.algo = NAND_ECC_ALGO_BCH;
  4908. base->ecc.user_conf.step_size = chip->ecc.size;
  4909. base->ecc.user_conf.strength = chip->ecc.strength;
  4910. ret = nand_ecc_sw_bch_init_ctx(base);
  4911. if (ret)
  4912. return ret;
  4913. chip->ecc.size = ecc_conf->step_size;
  4914. chip->ecc.strength = ecc_conf->strength;
  4915. chip->ecc.total = base->ecc.ctx.total;
  4916. chip->ecc.steps = nanddev_get_ecc_nsteps(base);
  4917. chip->ecc.bytes = base->ecc.ctx.total / nanddev_get_ecc_nsteps(base);
  4918. return 0;
  4919. }
  4920. EXPORT_SYMBOL(rawnand_sw_bch_init);
  4921. static int rawnand_sw_bch_calculate(struct nand_chip *chip,
  4922. const unsigned char *buf,
  4923. unsigned char *code)
  4924. {
  4925. struct nand_device *base = &chip->base;
  4926. return nand_ecc_sw_bch_calculate(base, buf, code);
  4927. }
  4928. int rawnand_sw_bch_correct(struct nand_chip *chip, unsigned char *buf,
  4929. unsigned char *read_ecc, unsigned char *calc_ecc)
  4930. {
  4931. struct nand_device *base = &chip->base;
  4932. return nand_ecc_sw_bch_correct(base, buf, read_ecc, calc_ecc);
  4933. }
  4934. EXPORT_SYMBOL(rawnand_sw_bch_correct);
  4935. void rawnand_sw_bch_cleanup(struct nand_chip *chip)
  4936. {
  4937. struct nand_device *base = &chip->base;
  4938. nand_ecc_sw_bch_cleanup_ctx(base);
  4939. }
  4940. EXPORT_SYMBOL(rawnand_sw_bch_cleanup);
  4941. static int nand_set_ecc_on_host_ops(struct nand_chip *chip)
  4942. {
  4943. struct nand_ecc_ctrl *ecc = &chip->ecc;
  4944. switch (ecc->placement) {
  4945. case NAND_ECC_PLACEMENT_UNKNOWN:
  4946. case NAND_ECC_PLACEMENT_OOB:
  4947. /* Use standard hwecc read page function? */
  4948. if (!ecc->read_page)
  4949. ecc->read_page = nand_read_page_hwecc;
  4950. if (!ecc->write_page)
  4951. ecc->write_page = nand_write_page_hwecc;
  4952. if (!ecc->read_page_raw)
  4953. ecc->read_page_raw = nand_read_page_raw;
  4954. if (!ecc->write_page_raw)
  4955. ecc->write_page_raw = nand_write_page_raw;
  4956. if (!ecc->read_oob)
  4957. ecc->read_oob = nand_read_oob_std;
  4958. if (!ecc->write_oob)
  4959. ecc->write_oob = nand_write_oob_std;
  4960. if (!ecc->read_subpage)
  4961. ecc->read_subpage = nand_read_subpage;
  4962. if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
  4963. ecc->write_subpage = nand_write_subpage_hwecc;
  4964. fallthrough;
  4965. case NAND_ECC_PLACEMENT_INTERLEAVED:
  4966. if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
  4967. (!ecc->read_page ||
  4968. ecc->read_page == nand_read_page_hwecc ||
  4969. !ecc->write_page ||
  4970. ecc->write_page == nand_write_page_hwecc)) {
  4971. WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
  4972. return -EINVAL;
  4973. }
  4974. /* Use standard syndrome read/write page function? */
  4975. if (!ecc->read_page)
  4976. ecc->read_page = nand_read_page_syndrome;
  4977. if (!ecc->write_page)
  4978. ecc->write_page = nand_write_page_syndrome;
  4979. if (!ecc->read_page_raw)
  4980. ecc->read_page_raw = nand_read_page_raw_syndrome;
  4981. if (!ecc->write_page_raw)
  4982. ecc->write_page_raw = nand_write_page_raw_syndrome;
  4983. if (!ecc->read_oob)
  4984. ecc->read_oob = nand_read_oob_syndrome;
  4985. if (!ecc->write_oob)
  4986. ecc->write_oob = nand_write_oob_syndrome;
  4987. break;
  4988. default:
  4989. pr_warn("Invalid NAND_ECC_PLACEMENT %d\n",
  4990. ecc->placement);
  4991. return -EINVAL;
  4992. }
  4993. return 0;
  4994. }
  4995. static int nand_set_ecc_soft_ops(struct nand_chip *chip)
  4996. {
  4997. struct mtd_info *mtd = nand_to_mtd(chip);
  4998. struct nand_device *nanddev = mtd_to_nanddev(mtd);
  4999. struct nand_ecc_ctrl *ecc = &chip->ecc;
  5000. int ret;
  5001. if (WARN_ON(ecc->engine_type != NAND_ECC_ENGINE_TYPE_SOFT))
  5002. return -EINVAL;
  5003. switch (ecc->algo) {
  5004. case NAND_ECC_ALGO_HAMMING:
  5005. ecc->calculate = rawnand_sw_hamming_calculate;
  5006. ecc->correct = rawnand_sw_hamming_correct;
  5007. ecc->read_page = nand_read_page_swecc;
  5008. ecc->read_subpage = nand_read_subpage;
  5009. ecc->write_page = nand_write_page_swecc;
  5010. if (!ecc->read_page_raw)
  5011. ecc->read_page_raw = nand_read_page_raw;
  5012. if (!ecc->write_page_raw)
  5013. ecc->write_page_raw = nand_write_page_raw;
  5014. ecc->read_oob = nand_read_oob_std;
  5015. ecc->write_oob = nand_write_oob_std;
  5016. if (!ecc->size)
  5017. ecc->size = 256;
  5018. ecc->bytes = 3;
  5019. ecc->strength = 1;
  5020. if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC))
  5021. ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
  5022. ret = rawnand_sw_hamming_init(chip);
  5023. if (ret) {
  5024. WARN(1, "Hamming ECC initialization failed!\n");
  5025. return ret;
  5026. }
  5027. return 0;
  5028. case NAND_ECC_ALGO_BCH:
  5029. if (!IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) {
  5030. WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
  5031. return -EINVAL;
  5032. }
  5033. ecc->calculate = rawnand_sw_bch_calculate;
  5034. ecc->correct = rawnand_sw_bch_correct;
  5035. ecc->read_page = nand_read_page_swecc;
  5036. ecc->read_subpage = nand_read_subpage;
  5037. ecc->write_page = nand_write_page_swecc;
  5038. if (!ecc->read_page_raw)
  5039. ecc->read_page_raw = nand_read_page_raw;
  5040. if (!ecc->write_page_raw)
  5041. ecc->write_page_raw = nand_write_page_raw;
  5042. ecc->read_oob = nand_read_oob_std;
  5043. ecc->write_oob = nand_write_oob_std;
  5044. /*
  5045. * We can only maximize ECC config when the default layout is
  5046. * used, otherwise we don't know how many bytes can really be
  5047. * used.
  5048. */
  5049. if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH &&
  5050. mtd->ooblayout != nand_get_large_page_ooblayout())
  5051. nanddev->ecc.user_conf.flags &= ~NAND_ECC_MAXIMIZE_STRENGTH;
  5052. ret = rawnand_sw_bch_init(chip);
  5053. if (ret) {
  5054. WARN(1, "BCH ECC initialization failed!\n");
  5055. return ret;
  5056. }
  5057. return 0;
  5058. default:
  5059. WARN(1, "Unsupported ECC algorithm!\n");
  5060. return -EINVAL;
  5061. }
  5062. }
  5063. /**
  5064. * nand_check_ecc_caps - check the sanity of preset ECC settings
  5065. * @chip: nand chip info structure
  5066. * @caps: ECC caps info structure
  5067. * @oobavail: OOB size that the ECC engine can use
  5068. *
  5069. * When ECC step size and strength are already set, check if they are supported
  5070. * by the controller and the calculated ECC bytes fit within the chip's OOB.
  5071. * On success, the calculated ECC bytes is set.
  5072. */
  5073. static int
  5074. nand_check_ecc_caps(struct nand_chip *chip,
  5075. const struct nand_ecc_caps *caps, int oobavail)
  5076. {
  5077. struct mtd_info *mtd = nand_to_mtd(chip);
  5078. const struct nand_ecc_step_info *stepinfo;
  5079. int preset_step = chip->ecc.size;
  5080. int preset_strength = chip->ecc.strength;
  5081. int ecc_bytes, nsteps = mtd->writesize / preset_step;
  5082. int i, j;
  5083. for (i = 0; i < caps->nstepinfos; i++) {
  5084. stepinfo = &caps->stepinfos[i];
  5085. if (stepinfo->stepsize != preset_step)
  5086. continue;
  5087. for (j = 0; j < stepinfo->nstrengths; j++) {
  5088. if (stepinfo->strengths[j] != preset_strength)
  5089. continue;
  5090. ecc_bytes = caps->calc_ecc_bytes(preset_step,
  5091. preset_strength);
  5092. if (WARN_ON_ONCE(ecc_bytes < 0))
  5093. return ecc_bytes;
  5094. if (ecc_bytes * nsteps > oobavail) {
  5095. pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
  5096. preset_step, preset_strength);
  5097. return -ENOSPC;
  5098. }
  5099. chip->ecc.bytes = ecc_bytes;
  5100. return 0;
  5101. }
  5102. }
  5103. pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
  5104. preset_step, preset_strength);
  5105. return -ENOTSUPP;
  5106. }
  5107. /**
  5108. * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
  5109. * @chip: nand chip info structure
  5110. * @caps: ECC engine caps info structure
  5111. * @oobavail: OOB size that the ECC engine can use
  5112. *
  5113. * If a chip's ECC requirement is provided, try to meet it with the least
  5114. * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
  5115. * On success, the chosen ECC settings are set.
  5116. */
  5117. static int
  5118. nand_match_ecc_req(struct nand_chip *chip,
  5119. const struct nand_ecc_caps *caps, int oobavail)
  5120. {
  5121. const struct nand_ecc_props *requirements =
  5122. nanddev_get_ecc_requirements(&chip->base);
  5123. struct mtd_info *mtd = nand_to_mtd(chip);
  5124. const struct nand_ecc_step_info *stepinfo;
  5125. int req_step = requirements->step_size;
  5126. int req_strength = requirements->strength;
  5127. int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
  5128. int best_step = 0, best_strength = 0, best_ecc_bytes = 0;
  5129. int best_ecc_bytes_total = INT_MAX;
  5130. int i, j;
  5131. /* No information provided by the NAND chip */
  5132. if (!req_step || !req_strength)
  5133. return -ENOTSUPP;
  5134. /* number of correctable bits the chip requires in a page */
  5135. req_corr = mtd->writesize / req_step * req_strength;
  5136. for (i = 0; i < caps->nstepinfos; i++) {
  5137. stepinfo = &caps->stepinfos[i];
  5138. step_size = stepinfo->stepsize;
  5139. for (j = 0; j < stepinfo->nstrengths; j++) {
  5140. strength = stepinfo->strengths[j];
  5141. /*
  5142. * If both step size and strength are smaller than the
  5143. * chip's requirement, it is not easy to compare the
  5144. * resulted reliability.
  5145. */
  5146. if (step_size < req_step && strength < req_strength)
  5147. continue;
  5148. if (mtd->writesize % step_size)
  5149. continue;
  5150. nsteps = mtd->writesize / step_size;
  5151. ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
  5152. if (WARN_ON_ONCE(ecc_bytes < 0))
  5153. continue;
  5154. ecc_bytes_total = ecc_bytes * nsteps;
  5155. if (ecc_bytes_total > oobavail ||
  5156. strength * nsteps < req_corr)
  5157. continue;
  5158. /*
  5159. * We assume the best is to meet the chip's requrement
  5160. * with the least number of ECC bytes.
  5161. */
  5162. if (ecc_bytes_total < best_ecc_bytes_total) {
  5163. best_ecc_bytes_total = ecc_bytes_total;
  5164. best_step = step_size;
  5165. best_strength = strength;
  5166. best_ecc_bytes = ecc_bytes;
  5167. }
  5168. }
  5169. }
  5170. if (best_ecc_bytes_total == INT_MAX)
  5171. return -ENOTSUPP;
  5172. chip->ecc.size = best_step;
  5173. chip->ecc.strength = best_strength;
  5174. chip->ecc.bytes = best_ecc_bytes;
  5175. return 0;
  5176. }
  5177. /**
  5178. * nand_maximize_ecc - choose the max ECC strength available
  5179. * @chip: nand chip info structure
  5180. * @caps: ECC engine caps info structure
  5181. * @oobavail: OOB size that the ECC engine can use
  5182. *
  5183. * Choose the max ECC strength that is supported on the controller, and can fit
  5184. * within the chip's OOB. On success, the chosen ECC settings are set.
  5185. */
  5186. static int
  5187. nand_maximize_ecc(struct nand_chip *chip,
  5188. const struct nand_ecc_caps *caps, int oobavail)
  5189. {
  5190. struct mtd_info *mtd = nand_to_mtd(chip);
  5191. const struct nand_ecc_step_info *stepinfo;
  5192. int step_size, strength, nsteps, ecc_bytes, corr;
  5193. int best_corr = 0;
  5194. int best_step = 0;
  5195. int best_strength = 0, best_ecc_bytes = 0;
  5196. int i, j;
  5197. for (i = 0; i < caps->nstepinfos; i++) {
  5198. stepinfo = &caps->stepinfos[i];
  5199. step_size = stepinfo->stepsize;
  5200. /* If chip->ecc.size is already set, respect it */
  5201. if (chip->ecc.size && step_size != chip->ecc.size)
  5202. continue;
  5203. for (j = 0; j < stepinfo->nstrengths; j++) {
  5204. strength = stepinfo->strengths[j];
  5205. if (mtd->writesize % step_size)
  5206. continue;
  5207. nsteps = mtd->writesize / step_size;
  5208. ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
  5209. if (WARN_ON_ONCE(ecc_bytes < 0))
  5210. continue;
  5211. if (ecc_bytes * nsteps > oobavail)
  5212. continue;
  5213. corr = strength * nsteps;
  5214. /*
  5215. * If the number of correctable bits is the same,
  5216. * bigger step_size has more reliability.
  5217. */
  5218. if (corr > best_corr ||
  5219. (corr == best_corr && step_size > best_step)) {
  5220. best_corr = corr;
  5221. best_step = step_size;
  5222. best_strength = strength;
  5223. best_ecc_bytes = ecc_bytes;
  5224. }
  5225. }
  5226. }
  5227. if (!best_corr)
  5228. return -ENOTSUPP;
  5229. chip->ecc.size = best_step;
  5230. chip->ecc.strength = best_strength;
  5231. chip->ecc.bytes = best_ecc_bytes;
  5232. return 0;
  5233. }
  5234. /**
  5235. * nand_ecc_choose_conf - Set the ECC strength and ECC step size
  5236. * @chip: nand chip info structure
  5237. * @caps: ECC engine caps info structure
  5238. * @oobavail: OOB size that the ECC engine can use
  5239. *
  5240. * Choose the ECC configuration according to following logic.
  5241. *
  5242. * 1. If both ECC step size and ECC strength are already set (usually by DT)
  5243. * then check if it is supported by this controller.
  5244. * 2. If the user provided the nand-ecc-maximize property, then select maximum
  5245. * ECC strength.
  5246. * 3. Otherwise, try to match the ECC step size and ECC strength closest
  5247. * to the chip's requirement. If available OOB size can't fit the chip
  5248. * requirement then fallback to the maximum ECC step size and ECC strength.
  5249. *
  5250. * On success, the chosen ECC settings are set.
  5251. */
  5252. int nand_ecc_choose_conf(struct nand_chip *chip,
  5253. const struct nand_ecc_caps *caps, int oobavail)
  5254. {
  5255. struct mtd_info *mtd = nand_to_mtd(chip);
  5256. struct nand_device *nanddev = mtd_to_nanddev(mtd);
  5257. if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
  5258. return -EINVAL;
  5259. if (chip->ecc.size && chip->ecc.strength)
  5260. return nand_check_ecc_caps(chip, caps, oobavail);
  5261. if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH)
  5262. return nand_maximize_ecc(chip, caps, oobavail);
  5263. if (!nand_match_ecc_req(chip, caps, oobavail))
  5264. return 0;
  5265. return nand_maximize_ecc(chip, caps, oobavail);
  5266. }
  5267. EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
  5268. static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos)
  5269. {
  5270. struct nand_chip *chip = container_of(nand, struct nand_chip,
  5271. base);
  5272. unsigned int eb = nanddev_pos_to_row(nand, pos);
  5273. int ret;
  5274. eb >>= nand->rowconv.eraseblock_addr_shift;
  5275. nand_select_target(chip, pos->target);
  5276. ret = nand_erase_op(chip, eb);
  5277. nand_deselect_target(chip);
  5278. return ret;
  5279. }
  5280. static int rawnand_markbad(struct nand_device *nand,
  5281. const struct nand_pos *pos)
  5282. {
  5283. struct nand_chip *chip = container_of(nand, struct nand_chip,
  5284. base);
  5285. return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
  5286. }
  5287. static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos)
  5288. {
  5289. struct nand_chip *chip = container_of(nand, struct nand_chip,
  5290. base);
  5291. int ret;
  5292. nand_select_target(chip, pos->target);
  5293. ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
  5294. nand_deselect_target(chip);
  5295. return ret;
  5296. }
  5297. static const struct nand_ops rawnand_ops = {
  5298. .erase = rawnand_erase,
  5299. .markbad = rawnand_markbad,
  5300. .isbad = rawnand_isbad,
  5301. };
  5302. /**
  5303. * nand_scan_tail - Scan for the NAND device
  5304. * @chip: NAND chip object
  5305. *
  5306. * This is the second phase of the normal nand_scan() function. It fills out
  5307. * all the uninitialized function pointers with the defaults and scans for a
  5308. * bad block table if appropriate.
  5309. */
  5310. static int nand_scan_tail(struct nand_chip *chip)
  5311. {
  5312. struct mtd_info *mtd = nand_to_mtd(chip);
  5313. struct nand_device *base = &chip->base;
  5314. struct nand_ecc_ctrl *ecc = &chip->ecc;
  5315. int ret, i;
  5316. /* New bad blocks should be marked in OOB, flash-based BBT, or both */
  5317. if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
  5318. !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
  5319. return -EINVAL;
  5320. }
  5321. chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
  5322. if (!chip->data_buf)
  5323. return -ENOMEM;
  5324. /*
  5325. * FIXME: some NAND manufacturer drivers expect the first die to be
  5326. * selected when manufacturer->init() is called. They should be fixed
  5327. * to explictly select the relevant die when interacting with the NAND
  5328. * chip.
  5329. */
  5330. nand_select_target(chip, 0);
  5331. ret = nand_manufacturer_init(chip);
  5332. nand_deselect_target(chip);
  5333. if (ret)
  5334. goto err_free_buf;
  5335. /* Set the internal oob buffer location, just after the page data */
  5336. chip->oob_poi = chip->data_buf + mtd->writesize;
  5337. /*
  5338. * If no default placement scheme is given, select an appropriate one.
  5339. */
  5340. if (!mtd->ooblayout &&
  5341. !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
  5342. ecc->algo == NAND_ECC_ALGO_BCH) &&
  5343. !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
  5344. ecc->algo == NAND_ECC_ALGO_HAMMING)) {
  5345. switch (mtd->oobsize) {
  5346. case 8:
  5347. case 16:
  5348. mtd_set_ooblayout(mtd, nand_get_small_page_ooblayout());
  5349. break;
  5350. case 64:
  5351. case 128:
  5352. mtd_set_ooblayout(mtd,
  5353. nand_get_large_page_hamming_ooblayout());
  5354. break;
  5355. default:
  5356. /*
  5357. * Expose the whole OOB area to users if ECC_NONE
  5358. * is passed. We could do that for all kind of
  5359. * ->oobsize, but we must keep the old large/small
  5360. * page with ECC layout when ->oobsize <= 128 for
  5361. * compatibility reasons.
  5362. */
  5363. if (ecc->engine_type == NAND_ECC_ENGINE_TYPE_NONE) {
  5364. mtd_set_ooblayout(mtd,
  5365. nand_get_large_page_ooblayout());
  5366. break;
  5367. }
  5368. WARN(1, "No oob scheme defined for oobsize %d\n",
  5369. mtd->oobsize);
  5370. ret = -EINVAL;
  5371. goto err_nand_manuf_cleanup;
  5372. }
  5373. }
  5374. /*
  5375. * Check ECC mode, default to software if 3byte/512byte hardware ECC is
  5376. * selected and we have 256 byte pagesize fallback to software ECC
  5377. */
  5378. switch (ecc->engine_type) {
  5379. case NAND_ECC_ENGINE_TYPE_ON_HOST:
  5380. ret = nand_set_ecc_on_host_ops(chip);
  5381. if (ret)
  5382. goto err_nand_manuf_cleanup;
  5383. if (mtd->writesize >= ecc->size) {
  5384. if (!ecc->strength) {
  5385. WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
  5386. ret = -EINVAL;
  5387. goto err_nand_manuf_cleanup;
  5388. }
  5389. break;
  5390. }
  5391. pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
  5392. ecc->size, mtd->writesize);
  5393. ecc->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
  5394. ecc->algo = NAND_ECC_ALGO_HAMMING;
  5395. fallthrough;
  5396. case NAND_ECC_ENGINE_TYPE_SOFT:
  5397. ret = nand_set_ecc_soft_ops(chip);
  5398. if (ret)
  5399. goto err_nand_manuf_cleanup;
  5400. break;
  5401. case NAND_ECC_ENGINE_TYPE_ON_DIE:
  5402. if (!ecc->read_page || !ecc->write_page) {
  5403. WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
  5404. ret = -EINVAL;
  5405. goto err_nand_manuf_cleanup;
  5406. }
  5407. if (!ecc->read_oob)
  5408. ecc->read_oob = nand_read_oob_std;
  5409. if (!ecc->write_oob)
  5410. ecc->write_oob = nand_write_oob_std;
  5411. break;
  5412. case NAND_ECC_ENGINE_TYPE_NONE:
  5413. pr_warn("NAND_ECC_ENGINE_TYPE_NONE selected by board driver. This is not recommended!\n");
  5414. ecc->read_page = nand_read_page_raw;
  5415. ecc->write_page = nand_write_page_raw;
  5416. ecc->read_oob = nand_read_oob_std;
  5417. ecc->read_page_raw = nand_read_page_raw;
  5418. ecc->write_page_raw = nand_write_page_raw;
  5419. ecc->write_oob = nand_write_oob_std;
  5420. ecc->size = mtd->writesize;
  5421. ecc->bytes = 0;
  5422. ecc->strength = 0;
  5423. break;
  5424. default:
  5425. WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->engine_type);
  5426. ret = -EINVAL;
  5427. goto err_nand_manuf_cleanup;
  5428. }
  5429. if (ecc->correct || ecc->calculate) {
  5430. ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
  5431. ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
  5432. if (!ecc->calc_buf || !ecc->code_buf) {
  5433. ret = -ENOMEM;
  5434. goto err_nand_manuf_cleanup;
  5435. }
  5436. }
  5437. /* For many systems, the standard OOB write also works for raw */
  5438. if (!ecc->read_oob_raw)
  5439. ecc->read_oob_raw = ecc->read_oob;
  5440. if (!ecc->write_oob_raw)
  5441. ecc->write_oob_raw = ecc->write_oob;
  5442. /* Propagate ECC info to the generic NAND and MTD layers */
  5443. mtd->ecc_strength = ecc->strength;
  5444. if (!base->ecc.ctx.conf.strength)
  5445. base->ecc.ctx.conf.strength = ecc->strength;
  5446. mtd->ecc_step_size = ecc->size;
  5447. if (!base->ecc.ctx.conf.step_size)
  5448. base->ecc.ctx.conf.step_size = ecc->size;
  5449. /*
  5450. * Set the number of read / write steps for one page depending on ECC
  5451. * mode.
  5452. */
  5453. if (!ecc->steps)
  5454. ecc->steps = mtd->writesize / ecc->size;
  5455. if (!base->ecc.ctx.nsteps)
  5456. base->ecc.ctx.nsteps = ecc->steps;
  5457. if (ecc->steps * ecc->size != mtd->writesize) {
  5458. WARN(1, "Invalid ECC parameters\n");
  5459. ret = -EINVAL;
  5460. goto err_nand_manuf_cleanup;
  5461. }
  5462. if (!ecc->total) {
  5463. ecc->total = ecc->steps * ecc->bytes;
  5464. chip->base.ecc.ctx.total = ecc->total;
  5465. }
  5466. if (ecc->total > mtd->oobsize) {
  5467. WARN(1, "Total number of ECC bytes exceeded oobsize\n");
  5468. ret = -EINVAL;
  5469. goto err_nand_manuf_cleanup;
  5470. }
  5471. /*
  5472. * The number of bytes available for a client to place data into
  5473. * the out of band area.
  5474. */
  5475. ret = mtd_ooblayout_count_freebytes(mtd);
  5476. if (ret < 0)
  5477. ret = 0;
  5478. mtd->oobavail = ret;
  5479. /* ECC sanity check: warn if it's too weak */
  5480. if (!nand_ecc_is_strong_enough(&chip->base))
  5481. pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n",
  5482. mtd->name, chip->ecc.strength, chip->ecc.size,
  5483. nanddev_get_ecc_requirements(&chip->base)->strength,
  5484. nanddev_get_ecc_requirements(&chip->base)->step_size);
  5485. /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
  5486. if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
  5487. switch (ecc->steps) {
  5488. case 2:
  5489. mtd->subpage_sft = 1;
  5490. break;
  5491. case 4:
  5492. case 8:
  5493. case 16:
  5494. mtd->subpage_sft = 2;
  5495. break;
  5496. }
  5497. }
  5498. chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
  5499. /* Invalidate the pagebuffer reference */
  5500. chip->pagecache.page = -1;
  5501. /* Large page NAND with SOFT_ECC should support subpage reads */
  5502. switch (ecc->engine_type) {
  5503. case NAND_ECC_ENGINE_TYPE_SOFT:
  5504. if (chip->page_shift > 9)
  5505. chip->options |= NAND_SUBPAGE_READ;
  5506. break;
  5507. default:
  5508. break;
  5509. }
  5510. ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner);
  5511. if (ret)
  5512. goto err_nand_manuf_cleanup;
  5513. /* Adjust the MTD_CAP_ flags when NAND_ROM is set. */
  5514. if (chip->options & NAND_ROM)
  5515. mtd->flags = MTD_CAP_ROM;
  5516. /* Fill in remaining MTD driver data */
  5517. mtd->_erase = nand_erase;
  5518. mtd->_point = NULL;
  5519. mtd->_unpoint = NULL;
  5520. mtd->_panic_write = panic_nand_write;
  5521. mtd->_read_oob = nand_read_oob;
  5522. mtd->_write_oob = nand_write_oob;
  5523. mtd->_sync = nand_sync;
  5524. mtd->_lock = nand_lock;
  5525. mtd->_unlock = nand_unlock;
  5526. mtd->_suspend = nand_suspend;
  5527. mtd->_resume = nand_resume;
  5528. mtd->_reboot = nand_shutdown;
  5529. mtd->_block_isreserved = nand_block_isreserved;
  5530. mtd->_block_isbad = nand_block_isbad;
  5531. mtd->_block_markbad = nand_block_markbad;
  5532. mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
  5533. /*
  5534. * Initialize bitflip_threshold to its default prior scan_bbt() call.
  5535. * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
  5536. * properly set.
  5537. */
  5538. if (!mtd->bitflip_threshold)
  5539. mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
  5540. /* Find the fastest data interface for this chip */
  5541. ret = nand_choose_interface_config(chip);
  5542. if (ret)
  5543. goto err_nanddev_cleanup;
  5544. /* Enter fastest possible mode on all dies. */
  5545. for (i = 0; i < nanddev_ntargets(&chip->base); i++) {
  5546. ret = nand_setup_interface(chip, i);
  5547. if (ret)
  5548. goto err_free_interface_config;
  5549. }
  5550. rawnand_late_check_supported_ops(chip);
  5551. /*
  5552. * Look for secure regions in the NAND chip. These regions are supposed
  5553. * to be protected by a secure element like Trustzone. So the read/write
  5554. * accesses to these regions will be blocked in the runtime by this
  5555. * driver.
  5556. */
  5557. ret = of_get_nand_secure_regions(chip);
  5558. if (ret)
  5559. goto err_free_interface_config;
  5560. /* Check, if we should skip the bad block table scan */
  5561. if (chip->options & NAND_SKIP_BBTSCAN)
  5562. return 0;
  5563. /* Build bad block table */
  5564. ret = nand_create_bbt(chip);
  5565. if (ret)
  5566. goto err_free_secure_regions;
  5567. return 0;
  5568. err_free_secure_regions:
  5569. kfree(chip->secure_regions);
  5570. err_free_interface_config:
  5571. kfree(chip->best_interface_config);
  5572. err_nanddev_cleanup:
  5573. nanddev_cleanup(&chip->base);
  5574. err_nand_manuf_cleanup:
  5575. nand_manufacturer_cleanup(chip);
  5576. err_free_buf:
  5577. kfree(chip->data_buf);
  5578. kfree(ecc->code_buf);
  5579. kfree(ecc->calc_buf);
  5580. return ret;
  5581. }
  5582. static int nand_attach(struct nand_chip *chip)
  5583. {
  5584. if (chip->controller->ops && chip->controller->ops->attach_chip)
  5585. return chip->controller->ops->attach_chip(chip);
  5586. return 0;
  5587. }
  5588. static void nand_detach(struct nand_chip *chip)
  5589. {
  5590. if (chip->controller->ops && chip->controller->ops->detach_chip)
  5591. chip->controller->ops->detach_chip(chip);
  5592. }
  5593. /**
  5594. * nand_scan_with_ids - [NAND Interface] Scan for the NAND device
  5595. * @chip: NAND chip object
  5596. * @maxchips: number of chips to scan for.
  5597. * @ids: optional flash IDs table
  5598. *
  5599. * This fills out all the uninitialized function pointers with the defaults.
  5600. * The flash ID is read and the mtd/chip structures are filled with the
  5601. * appropriate values.
  5602. */
  5603. int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips,
  5604. struct nand_flash_dev *ids)
  5605. {
  5606. int ret;
  5607. if (!maxchips)
  5608. return -EINVAL;
  5609. ret = nand_scan_ident(chip, maxchips, ids);
  5610. if (ret)
  5611. return ret;
  5612. ret = nand_attach(chip);
  5613. if (ret)
  5614. goto cleanup_ident;
  5615. ret = nand_scan_tail(chip);
  5616. if (ret)
  5617. goto detach_chip;
  5618. return 0;
  5619. detach_chip:
  5620. nand_detach(chip);
  5621. cleanup_ident:
  5622. nand_scan_ident_cleanup(chip);
  5623. return ret;
  5624. }
  5625. EXPORT_SYMBOL(nand_scan_with_ids);
  5626. /**
  5627. * nand_cleanup - [NAND Interface] Free resources held by the NAND device
  5628. * @chip: NAND chip object
  5629. */
  5630. void nand_cleanup(struct nand_chip *chip)
  5631. {
  5632. if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT) {
  5633. if (chip->ecc.algo == NAND_ECC_ALGO_HAMMING)
  5634. rawnand_sw_hamming_cleanup(chip);
  5635. else if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
  5636. rawnand_sw_bch_cleanup(chip);
  5637. }
  5638. nanddev_cleanup(&chip->base);
  5639. /* Free secure regions data */
  5640. kfree(chip->secure_regions);
  5641. /* Free bad block table memory */
  5642. kfree(chip->bbt);
  5643. kfree(chip->data_buf);
  5644. kfree(chip->ecc.code_buf);
  5645. kfree(chip->ecc.calc_buf);
  5646. /* Free bad block descriptor memory */
  5647. if (chip->badblock_pattern && chip->badblock_pattern->options
  5648. & NAND_BBT_DYNAMICSTRUCT)
  5649. kfree(chip->badblock_pattern);
  5650. /* Free the data interface */
  5651. kfree(chip->best_interface_config);
  5652. /* Free manufacturer priv data. */
  5653. nand_manufacturer_cleanup(chip);
  5654. /* Free controller specific allocations after chip identification */
  5655. nand_detach(chip);
  5656. /* Free identification phase allocations */
  5657. nand_scan_ident_cleanup(chip);
  5658. }
  5659. EXPORT_SYMBOL_GPL(nand_cleanup);
  5660. MODULE_LICENSE("GPL");
  5661. MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
  5662. MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
  5663. MODULE_DESCRIPTION("Generic NAND flash driver code");