bcmsdstd.c 144 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847
  1. /*
  2. * 'Standard' SDIO HOST CONTROLLER driver
  3. *
  4. * Portions of this code are copyright (c) 2020 Cypress Semiconductor Corporation
  5. *
  6. * Copyright (C) 1999-2020, Broadcom Corporation
  7. *
  8. * Unless you and Broadcom execute a separate written software license
  9. * agreement governing use of this software, this software is licensed to you
  10. * under the terms of the GNU General Public License version 2 (the "GPL"),
  11. * available at http://www.broadcom.com/licenses/GPLv2.php, with the
  12. * following added to such license:
  13. *
  14. * As a special exception, the copyright holders of this software give you
  15. * permission to link this software with independent modules, and to copy and
  16. * distribute the resulting executable under terms of your choice, provided that
  17. * you also meet, for each linked independent module, the terms and conditions of
  18. * the license of that module. An independent module is a module which is not
  19. * derived from this software. The special exception does not apply to any
  20. * modifications of the software.
  21. *
  22. * Notwithstanding the above, under no circumstances may you combine this
  23. * software in any way with any other Broadcom software provided under a license
  24. * other than the GPL, without Broadcom's express prior written consent.
  25. *
  26. *
  27. * <<Broadcom-WL-IPTag/Open:>>
  28. *
  29. * $Id: bcmsdstd.c 700323 2017-05-18 16:12:11Z $
  30. */
  31. #include <typedefs.h>
  32. #include <bcmdevs.h>
  33. #include <bcmendian.h>
  34. #include <bcmutils.h>
  35. #include <osl.h>
  36. #include <siutils.h>
  37. #include <sdio.h> /* SDIO Device and Protocol Specs */
  38. #include <sdioh.h> /* Standard SDIO Host Controller Specification */
  39. #include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */
  40. #include <sdiovar.h> /* ioctl/iovars */
  41. #include <pcicfg.h>
  42. #include <bcmsdstd.h>
  43. #define SD_PAGE_BITS 12
  44. #define SD_PAGE (1 << SD_PAGE_BITS)
  45. #define SDSTD_MAX_TUNING_PHASE 5
  46. /*
  47. * Upper GPIO 16 - 31 are available on J22
  48. * J22.pin3 == gpio16, J22.pin5 == gpio17, etc.
  49. * Lower GPIO 0 - 15 are available on J15 (WL_GPIO)
  50. */
  51. #define SDH_GPIO16 16
  52. #define SDH_GPIO_ENABLE 0xffff
  53. #include <bcmsdstd.h>
  54. #include <sbsdio.h> /* SDIOH (host controller) core hardware definitions */
  55. /* Globals */
  56. uint sd_msglevel = SDH_ERROR_VAL;
  57. uint sd_hiok = TRUE; /* Use hi-speed mode if available? */
  58. uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */
  59. uint sd_f2_blocksize = 256; /* Default blocksize */
  60. uint sd_f1_blocksize = BLOCK_SIZE_4318; /* Default blocksize */
  61. #define sd3_trace(x)
  62. /* sd3ClkMode: 0-SDR12 [25MHz]
  63. * 1-SDR25 [50MHz]+SHS=1
  64. * 2-SDR50 [100MHz]+SSDR50=1
  65. * 3-SDR104 [208MHz]+SSDR104=1
  66. * 4-DDR50 [50MHz]+SDDR50=1
  67. */
  68. #define SD3CLKMODE_0_SDR12 (0)
  69. #define SD3CLKMODE_1_SDR25 (1)
  70. #define SD3CLKMODE_2_SDR50 (2)
  71. #define SD3CLKMODE_3_SDR104 (3)
  72. #define SD3CLKMODE_4_DDR50 (4)
  73. #define SD3CLKMODE_DISABLED (-1)
  74. #define SD3CLKMODE_AUTO (99)
  75. /* values for global_UHSI_Supp : Means host and card caps match. */
  76. #define HOST_SDR_UNSUPP (0)
  77. #define HOST_SDR_12_25 (1)
  78. #define HOST_SDR_50_104_DDR (2)
  79. /* depends-on/affects sd3_autoselect_uhsi_max.
  80. * see sd3_autoselect_uhsi_max
  81. */
  82. int sd_uhsimode = SD3CLKMODE_DISABLED;
  83. uint sd_tuning_period = CAP3_RETUNING_TC_OTHER;
  84. uint sd_delay_value = 500000;
  85. /* Enables host to dongle glomming. Also increases the
  86. * dma buffer size. This will increase the rx throughput
  87. * as there will be lesser CMD53 transactions
  88. */
  89. #ifdef BCMSDIOH_TXGLOM
  90. uint sd_txglom;
  91. module_param(sd_txglom, uint, 0);
  92. #endif /* BCMSDIOH_TXGLOM */
  93. char dhd_sdiod_uhsi_ds_override[2] = {' '};
  94. #define MAX_DTS_INDEX (3)
  95. #define DRVSTRN_MAX_CHAR ('D')
  96. #define DRVSTRN_IGNORE_CHAR (' ')
  97. char DTS_vals[MAX_DTS_INDEX + 1] = {
  98. 0x1, /* Driver Strength Type-A */
  99. 0x0, /* Driver Strength Type-B */
  100. 0x2, /* Driver Strength Type-C */
  101. 0x3, /* Driver Strength Type-D */
  102. };
  103. uint32 sd3_autoselect_uhsi_max = 0;
  104. #define MAX_TUNING_ITERS (40)
  105. /* (150+10)millisecs total time; so dividing it for per-loop */
  106. #define PER_TRY_TUNING_DELAY_MS (160/MAX_TUNING_ITERS)
  107. #define CLKTUNING_MAX_BRR_RETRIES (1000) /* 1 ms: 1000 retries with 1 us delay per loop */
  108. /* table analogous to preset value register.
  109. * This is bcos current HC doesn't have preset value reg support.
  110. * All has DrvStr as 'B' [val:0] and CLKGEN as 0.
  111. */
  112. static unsigned short presetval_sw_table[] = {
  113. 0x0520, /* initialization: DrvStr:'B' [0]; CLKGen:0;
  114. * SDCLKFreqSel: 520 [division: 320*2 = 640: ~400 KHz]
  115. */
  116. 0x0008, /* default speed:DrvStr:'B' [0]; CLKGen:0;
  117. * SDCLKFreqSel: 8 [division: 6*2 = 12: ~25 MHz]
  118. */
  119. 0x0004, /* High speed: DrvStr:'B' [0]; CLKGen:0;
  120. * SDCLKFreqSel: 4 [division: 3*2 = 6: ~50 MHz]
  121. */
  122. 0x0008, /* SDR12: DrvStr:'B' [0]; CLKGen:0;
  123. * SDCLKFreqSel: 8 [division: 6*2 = 12: ~25 MHz]
  124. */
  125. 0x0004, /* SDR25: DrvStr:'B' [0]; CLKGen:0;
  126. * SDCLKFreqSel: 4 [division: 3*2 = 6: ~50 MHz]
  127. */
  128. 0x0001, /* SDR50: DrvStr:'B' [0]; CLKGen:0;
  129. * SDCLKFreqSel: 2 [division: 1*2 = 2: ~100 MHz]
  130. */
  131. 0x0001, /* SDR104: DrvStr:'B' [0]; CLKGen:0;
  132. SDCLKFreqSel: 1 [no division: ~255/~208 MHz]
  133. */
  134. 0x0002 /* DDR50: DrvStr:'B' [0]; CLKGen:0;
  135. SDCLKFreqSel: 4 [division: 3*2 = 6: ~50 MHz]
  136. */
  137. };
  138. /* This is to have software overrides to the hardware. Info follows:
  139. For override [1]: Preset registers: not supported
  140. Voltage switch: not supported
  141. Clock Tuning: not supported
  142. */
  143. bool sd3_sw_override1 = FALSE;
  144. bool sd3_sw_read_magic_bytes = FALSE;
  145. #define SD3_TUNING_REQD(sd, sd_uhsimode) ((sd_uhsimode != SD3CLKMODE_DISABLED) && \
  146. (sd->version == HOST_CONTR_VER_3) && \
  147. ((sd_uhsimode == SD3CLKMODE_3_SDR104) || \
  148. ((sd_uhsimode == SD3CLKMODE_2_SDR50) && \
  149. (GFIELD(sd->caps3, CAP3_TUNING_SDR50)))))
  150. /* find next power of 2 */
  151. #define NEXT_POW2(n) {n--; n |= n>>1; n |= n>>2; n |= n>>4; n++;}
  152. #ifdef BCMSDYIELD
  153. bool sd_yieldcpu = TRUE; /* Allow CPU yielding for buffer requests */
  154. uint sd_minyield = 0; /* Minimum xfer size to allow CPU yield */
  155. bool sd_forcerb = FALSE; /* Force sync readback in intrs_on/off */
  156. #endif // endif
  157. #define F1_SLEEPCSR_ADDR 0x1001F
  158. uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz
  159. :might get changed in code for 208
  160. */
  161. uint sd_power = 1; /* Default to SD Slot powered ON */
  162. uint sd_3_power_save = 1; /* Default to SDIO 3.0 power save */
  163. uint sd_clock = 1; /* Default to SD Clock turned ON */
  164. uint sd_pci_slot = 0xFFFFffff; /* Used to force selection of a particular PCI slot */
  165. uint8 sd_dma_mode = DMA_MODE_AUTO; /* Default to AUTO & program based on capability */
  166. uint sd_toctl = 7;
  167. static bool trap_errs = FALSE;
  168. static const char *dma_mode_description[] = { "PIO", "SDMA", "ADMA1", "32b ADMA2", "64b ADMA2" };
  169. /* Prototypes */
  170. static bool sdstd_start_clock(sdioh_info_t *sd, uint16 divisor);
  171. static uint16 sdstd_start_power(sdioh_info_t *sd, int volts_req);
  172. static bool sdstd_bus_width(sdioh_info_t *sd, int width);
  173. static int sdstd_set_highspeed_mode(sdioh_info_t *sd, bool HSMode);
  174. static int sdstd_set_dma_mode(sdioh_info_t *sd, int8 dma_mode);
  175. static int sdstd_card_enablefuncs(sdioh_info_t *sd);
  176. static void sdstd_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count);
  177. static int sdstd_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd, uint32 arg);
  178. static int sdstd_card_regread(sdioh_info_t *sd, int func, uint32 regaddr,
  179. int regsize, uint32 *data);
  180. static int sdstd_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr,
  181. int regsize, uint32 data);
  182. static int sdstd_driver_init(sdioh_info_t *sd);
  183. static bool sdstd_reset(sdioh_info_t *sd, bool host_reset, bool client_reset);
  184. static int sdstd_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo,
  185. uint32 addr, int nbytes, uint32 *data);
  186. static int sdstd_abort(sdioh_info_t *sd, uint func);
  187. static int sdstd_check_errs(sdioh_info_t *sdioh_info, uint32 cmd, uint32 arg);
  188. static int set_client_block_size(sdioh_info_t *sd, int func, int blocksize);
  189. static void sd_map_dma(sdioh_info_t * sd);
  190. static void sd_unmap_dma(sdioh_info_t * sd);
  191. static void sd_clear_adma_dscr_buf(sdioh_info_t *sd);
  192. static void sd_fill_dma_data_buf(sdioh_info_t *sd, uint8 data);
  193. static void sd_create_adma_descriptor(sdioh_info_t *sd,
  194. uint32 index, uint32 addr_phys,
  195. uint16 length, uint16 flags);
  196. static void sd_dump_adma_dscr(sdioh_info_t *sd);
  197. static void sdstd_dumpregs(sdioh_info_t *sd);
  198. static int sdstd_3_set_highspeed_uhsi_mode(sdioh_info_t *sd, int sd3ClkMode);
  199. static int sdstd_3_sigvoltswitch_proc(sdioh_info_t *sd);
  200. static int sdstd_3_get_matching_uhsi_clkmode(sdioh_info_t *sd,
  201. int sd3_requested_clkmode);
  202. static bool sdstd_3_get_matching_drvstrn(sdioh_info_t *sd,
  203. int sd3_requested_clkmode, uint32 *drvstrn, uint16 *presetval);
  204. static int sdstd_3_clock_wrapper(sdioh_info_t *sd);
  205. static int sdstd_clock_wrapper(sdioh_info_t *sd);
  206. /*
  207. * Private register access routines.
  208. */
  209. /* 16 bit PCI regs */
  210. extern uint16 sdstd_rreg16(sdioh_info_t *sd, uint reg);
  211. uint16
  212. sdstd_rreg16(sdioh_info_t *sd, uint reg)
  213. {
  214. volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg);
  215. sd_ctrl(("16: R Reg 0x%02x, Data 0x%x\n", reg, data));
  216. return data;
  217. }
  218. extern void sdstd_wreg16(sdioh_info_t *sd, uint reg, uint16 data);
  219. void
  220. sdstd_wreg16(sdioh_info_t *sd, uint reg, uint16 data)
  221. {
  222. *(volatile uint16 *)(sd->mem_space + reg) = (uint16) data;
  223. sd_ctrl(("16: W Reg 0x%02x, Data 0x%x\n", reg, data));
  224. }
  225. static void
  226. sdstd_or_reg16(sdioh_info_t *sd, uint reg, uint16 val)
  227. {
  228. volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg);
  229. sd_ctrl(("16: OR Reg 0x%02x, Val 0x%x\n", reg, val));
  230. data |= val;
  231. *(volatile uint16 *)(sd->mem_space + reg) = (uint16)data;
  232. }
  233. static void
  234. sdstd_mod_reg16(sdioh_info_t *sd, uint reg, int16 mask, uint16 val)
  235. {
  236. volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg);
  237. sd_ctrl(("16: MOD Reg 0x%02x, Mask 0x%x, Val 0x%x\n", reg, mask, val));
  238. data &= ~mask;
  239. data |= (val & mask);
  240. *(volatile uint16 *)(sd->mem_space + reg) = (uint16)data;
  241. }
  242. /* 32 bit PCI regs */
  243. static uint32
  244. sdstd_rreg(sdioh_info_t *sd, uint reg)
  245. {
  246. volatile uint32 data = *(volatile uint32 *)(sd->mem_space + reg);
  247. sd_ctrl(("32: R Reg 0x%02x, Data 0x%x\n", reg, data));
  248. return data;
  249. }
  250. static inline void
  251. sdstd_wreg(sdioh_info_t *sd, uint reg, uint32 data)
  252. {
  253. *(volatile uint32 *)(sd->mem_space + reg) = (uint32)data;
  254. sd_ctrl(("32: W Reg 0x%02x, Data 0x%x\n", reg, data));
  255. }
  256. /* 8 bit PCI regs */
  257. static inline void
  258. sdstd_wreg8(sdioh_info_t *sd, uint reg, uint8 data)
  259. {
  260. *(volatile uint8 *)(sd->mem_space + reg) = (uint8)data;
  261. sd_ctrl(("08: W Reg 0x%02x, Data 0x%x\n", reg, data));
  262. }
  263. static uint8
  264. sdstd_rreg8(sdioh_info_t *sd, uint reg)
  265. {
  266. volatile uint8 data = *(volatile uint8 *)(sd->mem_space + reg);
  267. sd_ctrl(("08: R Reg 0x%02x, Data 0x%x\n", reg, data));
  268. return data;
  269. }
  270. /*
  271. * Private work routines
  272. */
  273. sdioh_info_t *glob_sd;
  274. /*
  275. * Public entry points & extern's
  276. */
  277. extern sdioh_info_t *
  278. sdioh_attach(osl_t *osh, void *bar0, uint irq)
  279. {
  280. sdioh_info_t *sd;
  281. sd_trace(("%s\n", __FUNCTION__));
  282. if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) {
  283. sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh)));
  284. return NULL;
  285. }
  286. bzero((char *)sd, sizeof(sdioh_info_t));
  287. glob_sd = sd;
  288. sd->osh = osh;
  289. if (sdstd_osinit(sd) != 0) {
  290. sd_err(("%s:sdstd_osinit() failed\n", __FUNCTION__));
  291. MFREE(sd->osh, sd, sizeof(sdioh_info_t));
  292. return NULL;
  293. }
  294. sd->mem_space = (volatile char *)sdstd_reg_map(osh, (ulong)bar0, SDIOH_REG_WINSZ);
  295. sd_init_dma(sd);
  296. sd->irq = irq;
  297. if (sd->mem_space == NULL) {
  298. sd_err(("%s:ioremap() failed\n", __FUNCTION__));
  299. sdstd_osfree(sd);
  300. MFREE(sd->osh, sd, sizeof(sdioh_info_t));
  301. return NULL;
  302. }
  303. sd_info(("%s:sd->mem_space = %p\n", __FUNCTION__, sd->mem_space));
  304. sd->intr_handler = NULL;
  305. sd->intr_handler_arg = NULL;
  306. sd->intr_handler_valid = FALSE;
  307. /* Set defaults */
  308. sd->sd_blockmode = TRUE;
  309. sd->use_client_ints = TRUE;
  310. sd->sd_dma_mode = sd_dma_mode;
  311. if (!sd->sd_blockmode)
  312. sd->sd_dma_mode = DMA_MODE_NONE;
  313. if (sdstd_driver_init(sd) != SUCCESS) {
  314. /* If host CPU was reset without resetting SD bus or
  315. SD device, the device will still have its RCA but
  316. driver no longer knows what it is (since driver has been restarted).
  317. go through once to clear the RCA and a gain reassign it.
  318. */
  319. sd_info(("driver_init failed - Reset RCA and try again\n"));
  320. if (sdstd_driver_init(sd) != SUCCESS) {
  321. sd_err(("%s:driver_init() failed()\n", __FUNCTION__));
  322. if (sd->mem_space) {
  323. sdstd_reg_unmap(osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ);
  324. sd->mem_space = NULL;
  325. }
  326. sdstd_osfree(sd);
  327. MFREE(sd->osh, sd, sizeof(sdioh_info_t));
  328. return (NULL);
  329. }
  330. }
  331. OSL_DMADDRWIDTH(osh, 32);
  332. /* Always map DMA buffers, so we can switch between DMA modes. */
  333. sd_map_dma(sd);
  334. if (sdstd_register_irq(sd, irq) != SUCCESS) {
  335. sd_err(("%s: sdstd_register_irq() failed for irq = %d\n", __FUNCTION__, irq));
  336. sdstd_free_irq(sd->irq, sd);
  337. if (sd->mem_space) {
  338. sdstd_reg_unmap(osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ);
  339. sd->mem_space = NULL;
  340. }
  341. sdstd_osfree(sd);
  342. MFREE(sd->osh, sd, sizeof(sdioh_info_t));
  343. return (NULL);
  344. }
  345. sd_trace(("%s: Done\n", __FUNCTION__));
  346. return sd;
  347. }
  348. extern SDIOH_API_RC
  349. sdioh_detach(osl_t *osh, sdioh_info_t *sd)
  350. {
  351. sd_trace(("%s\n", __FUNCTION__));
  352. if (sd) {
  353. sd_unmap_dma(sd);
  354. sdstd_wreg16(sd, SD_IntrSignalEnable, 0);
  355. if (sd->sd3_tuning_reqd == TRUE) {
  356. sdstd_3_osclean_tuning(sd);
  357. sd->sd3_tuning_reqd = FALSE;
  358. }
  359. sd->sd3_tuning_disable = FALSE;
  360. sd_trace(("%s: freeing irq %d\n", __FUNCTION__, sd->irq));
  361. sdstd_free_irq(sd->irq, sd);
  362. if (sd->card_init_done)
  363. sdstd_reset(sd, 1, 1);
  364. if (sd->mem_space) {
  365. sdstd_reg_unmap(osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ);
  366. sd->mem_space = NULL;
  367. }
  368. sdstd_osfree(sd);
  369. MFREE(sd->osh, sd, sizeof(sdioh_info_t));
  370. }
  371. return SDIOH_API_RC_SUCCESS;
  372. }
  373. /* Configure callback to client when we receive client interrupt */
  374. extern SDIOH_API_RC
  375. sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh)
  376. {
  377. sd_trace(("%s: Entering\n", __FUNCTION__));
  378. sd->intr_handler = fn;
  379. sd->intr_handler_arg = argh;
  380. sd->intr_handler_valid = TRUE;
  381. return SDIOH_API_RC_SUCCESS;
  382. }
  383. extern SDIOH_API_RC
  384. sdioh_interrupt_deregister(sdioh_info_t *sd)
  385. {
  386. sd_trace(("%s: Entering\n", __FUNCTION__));
  387. sd->intr_handler_valid = FALSE;
  388. sd->intr_handler = NULL;
  389. sd->intr_handler_arg = NULL;
  390. return SDIOH_API_RC_SUCCESS;
  391. }
  392. extern SDIOH_API_RC
  393. sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff)
  394. {
  395. sd_trace(("%s: Entering\n", __FUNCTION__));
  396. *onoff = sd->client_intr_enabled;
  397. return SDIOH_API_RC_SUCCESS;
  398. }
  399. #if defined(DHD_DEBUG)
  400. extern bool
  401. sdioh_interrupt_pending(sdioh_info_t *sd)
  402. {
  403. uint16 intrstatus;
  404. intrstatus = sdstd_rreg16(sd, SD_IntrStatus);
  405. return !!(intrstatus & CLIENT_INTR);
  406. }
  407. #endif // endif
  408. uint
  409. sdioh_query_iofnum(sdioh_info_t *sd)
  410. {
  411. return sd->num_funcs;
  412. }
  413. /* IOVar table */
  414. enum {
  415. IOV_MSGLEVEL = 1,
  416. IOV_BLOCKMODE,
  417. IOV_BLOCKSIZE,
  418. IOV_DMA,
  419. IOV_USEINTS,
  420. IOV_NUMINTS,
  421. IOV_NUMLOCALINTS,
  422. IOV_HOSTREG,
  423. IOV_DEVREG,
  424. IOV_DIVISOR,
  425. IOV_SDMODE,
  426. IOV_HISPEED,
  427. IOV_HCIREGS,
  428. IOV_POWER,
  429. IOV_POWER_SAVE,
  430. IOV_YIELDCPU,
  431. IOV_MINYIELD,
  432. IOV_FORCERB,
  433. IOV_CLOCK,
  434. IOV_UHSIMOD,
  435. IOV_TUNEMOD,
  436. IOV_TUNEDIS
  437. };
  438. const bcm_iovar_t sdioh_iovars[] = {
  439. {"sd_msglevel", IOV_MSGLEVEL, 0, 0, IOVT_UINT32, 0 },
  440. {"sd_blockmode", IOV_BLOCKMODE, 0, 0, IOVT_BOOL, 0 },
  441. {"sd_blocksize", IOV_BLOCKSIZE, 0, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */
  442. {"sd_dma", IOV_DMA, 0, 0, IOVT_UINT32, 0 },
  443. #ifdef BCMSDYIELD
  444. {"sd_yieldcpu", IOV_YIELDCPU, 0, 0, IOVT_BOOL, 0 },
  445. {"sd_minyield", IOV_MINYIELD, 0, 0, IOVT_UINT32, 0 },
  446. {"sd_forcerb", IOV_FORCERB, 0, 0, IOVT_BOOL, 0 },
  447. #endif // endif
  448. {"sd_ints", IOV_USEINTS, 0, 0, IOVT_BOOL, 0 },
  449. {"sd_numints", IOV_NUMINTS, 0, 0, IOVT_UINT32, 0 },
  450. {"sd_numlocalints", IOV_NUMLOCALINTS, 0, 0, IOVT_UINT32, 0 },
  451. {"sd_hostreg", IOV_HOSTREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
  452. {"sd_devreg", IOV_DEVREG, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
  453. {"sd_divisor", IOV_DIVISOR, 0, 0, IOVT_UINT32, 0 },
  454. {"sd_power", IOV_POWER, 0, 0, IOVT_UINT32, 0 },
  455. {"sd_power_save", IOV_POWER_SAVE, 0, 0, IOVT_UINT32, 0 },
  456. {"sd_clock", IOV_CLOCK, 0, 0, IOVT_UINT32, 0 },
  457. {"sd_mode", IOV_SDMODE, 0, 0, IOVT_UINT32, 100},
  458. {"sd_highspeed", IOV_HISPEED, 0, 0, IOVT_UINT32, 0},
  459. {"sd_uhsimode", IOV_UHSIMOD, 0, 0, IOVT_UINT32, 0},
  460. {"tuning_mode", IOV_TUNEMOD, 0, 0, IOVT_UINT32, 0},
  461. {"sd3_tuning_disable", IOV_TUNEDIS, 0, 0, IOVT_BOOL, 0},
  462. {NULL, 0, 0, 0, 0, 0 }
  463. };
  464. uint8 sdstd_turn_on_clock(sdioh_info_t *sd)
  465. {
  466. sdstd_or_reg16(sd, SD_ClockCntrl, 0x4);
  467. return 0;
  468. }
  469. uint8 sdstd_turn_off_clock(sdioh_info_t *sd)
  470. {
  471. sdstd_wreg16(sd, SD_ClockCntrl, sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4));
  472. return 0;
  473. }
  474. int
  475. sdioh_iovar_op(sdioh_info_t *si, const char *name,
  476. void *params, int plen, void *arg, int len, bool set)
  477. {
  478. const bcm_iovar_t *vi = NULL;
  479. int bcmerror = 0;
  480. int val_size;
  481. int32 int_val = 0;
  482. bool bool_val;
  483. uint32 actionid;
  484. ASSERT(name);
  485. ASSERT(len >= 0);
  486. /* Get must have return space; Set does not take qualifiers */
  487. ASSERT(set || (arg && len));
  488. ASSERT(!set || (!params && !plen));
  489. sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name));
  490. if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) {
  491. bcmerror = BCME_UNSUPPORTED;
  492. goto exit;
  493. }
  494. if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0)
  495. goto exit;
  496. /* Set up params so get and set can share the convenience variables */
  497. if (params == NULL) {
  498. params = arg;
  499. plen = len;
  500. }
  501. if (vi->type == IOVT_VOID)
  502. val_size = 0;
  503. else if (vi->type == IOVT_BUFFER)
  504. val_size = len;
  505. else
  506. val_size = sizeof(int);
  507. if (plen >= (int)sizeof(int_val))
  508. bcopy(params, &int_val, sizeof(int_val));
  509. bool_val = (int_val != 0) ? TRUE : FALSE;
  510. BCM_REFERENCE(bool_val);
  511. actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
  512. switch (actionid) {
  513. case IOV_GVAL(IOV_MSGLEVEL):
  514. int_val = (int32)sd_msglevel;
  515. bcopy(&int_val, arg, val_size);
  516. break;
  517. case IOV_SVAL(IOV_MSGLEVEL):
  518. sd_msglevel = int_val;
  519. break;
  520. case IOV_GVAL(IOV_BLOCKMODE):
  521. int_val = (int32)si->sd_blockmode;
  522. bcopy(&int_val, arg, val_size);
  523. break;
  524. case IOV_SVAL(IOV_BLOCKMODE):
  525. si->sd_blockmode = (bool)int_val;
  526. /* Haven't figured out how to make non-block mode with DMA */
  527. if (!si->sd_blockmode)
  528. si->sd_dma_mode = DMA_MODE_NONE;
  529. break;
  530. #ifdef BCMSDYIELD
  531. case IOV_GVAL(IOV_YIELDCPU):
  532. int_val = sd_yieldcpu;
  533. bcopy(&int_val, arg, val_size);
  534. break;
  535. case IOV_SVAL(IOV_YIELDCPU):
  536. sd_yieldcpu = (bool)int_val;
  537. break;
  538. case IOV_GVAL(IOV_MINYIELD):
  539. int_val = sd_minyield;
  540. bcopy(&int_val, arg, val_size);
  541. break;
  542. case IOV_SVAL(IOV_MINYIELD):
  543. sd_minyield = (bool)int_val;
  544. break;
  545. case IOV_GVAL(IOV_FORCERB):
  546. int_val = sd_forcerb;
  547. bcopy(&int_val, arg, val_size);
  548. break;
  549. case IOV_SVAL(IOV_FORCERB):
  550. sd_forcerb = (bool)int_val;
  551. break;
  552. #endif /* BCMSDYIELD */
  553. case IOV_GVAL(IOV_BLOCKSIZE):
  554. if ((uint32)int_val > si->num_funcs) {
  555. bcmerror = BCME_BADARG;
  556. break;
  557. }
  558. int_val = (int32)si->client_block_size[int_val];
  559. bcopy(&int_val, arg, val_size);
  560. break;
  561. case IOV_SVAL(IOV_BLOCKSIZE):
  562. {
  563. uint func = ((uint32)int_val >> 16);
  564. uint blksize = (uint16)int_val;
  565. if (func > si->num_funcs) {
  566. bcmerror = BCME_BADARG;
  567. break;
  568. }
  569. /* Now set it */
  570. sdstd_lock(si);
  571. bcmerror = set_client_block_size(si, func, blksize);
  572. sdstd_unlock(si);
  573. break;
  574. }
  575. case IOV_GVAL(IOV_DMA):
  576. int_val = (int32)si->sd_dma_mode;
  577. bcopy(&int_val, arg, val_size);
  578. break;
  579. case IOV_SVAL(IOV_DMA):
  580. si->sd_dma_mode = (char)int_val;
  581. sdstd_set_dma_mode(si, si->sd_dma_mode);
  582. break;
  583. case IOV_GVAL(IOV_USEINTS):
  584. int_val = (int32)si->use_client_ints;
  585. bcopy(&int_val, arg, val_size);
  586. break;
  587. case IOV_SVAL(IOV_USEINTS):
  588. si->use_client_ints = (bool)int_val;
  589. if (si->use_client_ints)
  590. si->intmask |= CLIENT_INTR;
  591. else
  592. si->intmask &= ~CLIENT_INTR;
  593. break;
  594. case IOV_GVAL(IOV_DIVISOR):
  595. int_val = (uint32)sd_divisor;
  596. bcopy(&int_val, arg, val_size);
  597. break;
  598. case IOV_SVAL(IOV_DIVISOR):
  599. sd_divisor = int_val;
  600. if (!sdstd_start_clock(si, (uint16)sd_divisor)) {
  601. sd_err(("set clock failed!\n"));
  602. bcmerror = BCME_ERROR;
  603. }
  604. break;
  605. case IOV_GVAL(IOV_POWER):
  606. int_val = (uint32)sd_power;
  607. bcopy(&int_val, arg, val_size);
  608. break;
  609. case IOV_GVAL(IOV_POWER_SAVE):
  610. int_val = (uint32)sd_3_power_save;
  611. bcopy(&int_val, arg, val_size);
  612. break;
  613. case IOV_SVAL(IOV_POWER):
  614. sd_power = int_val;
  615. if (sd_power == 1) {
  616. if (sdstd_driver_init(si) != SUCCESS) {
  617. sd_err(("set SD Slot power failed!\n"));
  618. bcmerror = BCME_ERROR;
  619. } else {
  620. sd_err(("SD Slot Powered ON.\n"));
  621. }
  622. } else {
  623. uint8 pwr = 0;
  624. pwr = SFIELD(pwr, PWR_BUS_EN, 0);
  625. sdstd_wreg8(si, SD_PwrCntrl, pwr); /* Set Voltage level */
  626. sd_err(("SD Slot Powered OFF.\n"));
  627. }
  628. break;
  629. case IOV_SVAL(IOV_POWER_SAVE):
  630. sd_3_power_save = int_val;
  631. break;
  632. case IOV_GVAL(IOV_CLOCK):
  633. int_val = (uint32)sd_clock;
  634. bcopy(&int_val, arg, val_size);
  635. break;
  636. case IOV_SVAL(IOV_CLOCK):
  637. sd_clock = int_val;
  638. if (sd_clock == 1) {
  639. sd_info(("SD Clock turned ON.\n"));
  640. if (!sdstd_start_clock(si, (uint16)sd_divisor)) {
  641. sd_err(("sdstd_start_clock failed\n"));
  642. bcmerror = BCME_ERROR;
  643. }
  644. } else {
  645. /* turn off HC clock */
  646. sdstd_wreg16(si, SD_ClockCntrl,
  647. sdstd_rreg16(si, SD_ClockCntrl) & ~((uint16)0x4));
  648. sd_info(("SD Clock turned OFF.\n"));
  649. }
  650. break;
  651. case IOV_GVAL(IOV_SDMODE):
  652. int_val = (uint32)sd_sdmode;
  653. bcopy(&int_val, arg, val_size);
  654. break;
  655. case IOV_SVAL(IOV_SDMODE):
  656. sd_sdmode = int_val;
  657. if (!sdstd_bus_width(si, sd_sdmode)) {
  658. sd_err(("sdstd_bus_width failed\n"));
  659. bcmerror = BCME_ERROR;
  660. }
  661. break;
  662. case IOV_GVAL(IOV_HISPEED):
  663. int_val = (uint32)sd_hiok;
  664. bcopy(&int_val, arg, val_size);
  665. break;
  666. case IOV_SVAL(IOV_HISPEED):
  667. sd_hiok = int_val;
  668. bcmerror = sdstd_set_highspeed_mode(si, (bool)sd_hiok);
  669. break;
  670. case IOV_GVAL(IOV_UHSIMOD):
  671. sd3_trace(("%s: Get UHSI: \n", __FUNCTION__));
  672. int_val = (int)sd_uhsimode;
  673. bcopy(&int_val, arg, val_size);
  674. break;
  675. case IOV_SVAL(IOV_UHSIMOD):
  676. {
  677. int oldval = sd_uhsimode; /* save old, working value */
  678. sd3_trace(("%s: Set UHSI: \n", __FUNCTION__));
  679. /* check if UHSI is supported by card/host */
  680. if (!(si->card_UHSI_voltage_Supported && si->host_UHSISupported)) {
  681. sd_err(("%s:UHSI not suppoted!\n", __FUNCTION__));
  682. bcmerror = BCME_UNSUPPORTED;
  683. break;
  684. }
  685. /* check for valid values */
  686. if (!((int_val == SD3CLKMODE_AUTO) ||
  687. (int_val == SD3CLKMODE_DISABLED) ||
  688. ((int_val >= SD3CLKMODE_0_SDR12) &&
  689. (int_val <= SD3CLKMODE_4_DDR50)))) {
  690. sd_err(("%s:CLK: bad arg!\n", __FUNCTION__));
  691. bcmerror = BCME_BADARG;
  692. break;
  693. }
  694. sd_uhsimode = int_val;
  695. if (SUCCESS != sdstd_3_clock_wrapper(si)) {
  696. sd_err(("%s:Error in setting uhsi clkmode:%d,"
  697. "restoring back to %d\n", __FUNCTION__,
  698. sd_uhsimode, oldval));
  699. /* try to set back the old one */
  700. sd_uhsimode = oldval;
  701. if (SUCCESS != sdstd_3_clock_wrapper(si)) {
  702. sd_err(("%s:Error in setting uhsi to old mode;"
  703. "ignoring:\n", __FUNCTION__));
  704. }
  705. }
  706. break;
  707. }
  708. #ifdef DHD_DEBUG
  709. case IOV_SVAL(IOV_TUNEMOD):
  710. {
  711. if( int_val == SD_DHD_DISABLE_PERIODIC_TUNING) { /* do tuning single time */
  712. sd3_trace(("Start tuning from Iovar\n"));
  713. si->sd3_tuning_reqd = TRUE;
  714. sdstd_enable_disable_periodic_timer(si, int_val);
  715. sdstd_lock(si);
  716. sdstd_3_clk_tuning(si, sdstd_3_get_uhsi_clkmode(si));
  717. sdstd_unlock(si);
  718. si->sd3_tuning_reqd = FALSE;
  719. }
  720. if (int_val == SD_DHD_ENABLE_PERIODIC_TUNING) {
  721. sd3_trace(("Enabling automatic tuning\n"));
  722. si->sd3_tuning_reqd = TRUE;
  723. sdstd_enable_disable_periodic_timer(si, int_val);
  724. }
  725. break;
  726. }
  727. #endif /* debugging purpose */
  728. case IOV_GVAL(IOV_NUMINTS):
  729. int_val = (int32)si->intrcount;
  730. bcopy(&int_val, arg, val_size);
  731. break;
  732. case IOV_GVAL(IOV_NUMLOCALINTS):
  733. int_val = (int32)si->local_intrcount;
  734. bcopy(&int_val, arg, val_size);
  735. break;
  736. case IOV_GVAL(IOV_HOSTREG):
  737. {
  738. sdreg_t *sd_ptr = (sdreg_t *)params;
  739. if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD3_WL_BT_reset_register) {
  740. sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
  741. bcmerror = BCME_BADARG;
  742. break;
  743. }
  744. sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__,
  745. (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
  746. sd_ptr->offset));
  747. if (sd_ptr->offset & 1)
  748. int_val = sdstd_rreg8(si, sd_ptr->offset);
  749. else if (sd_ptr->offset & 2)
  750. int_val = sdstd_rreg16(si, sd_ptr->offset);
  751. else
  752. int_val = sdstd_rreg(si, sd_ptr->offset);
  753. bcopy(&int_val, arg, sizeof(int_val));
  754. break;
  755. }
  756. case IOV_SVAL(IOV_HOSTREG):
  757. {
  758. sdreg_t *sd_ptr = (sdreg_t *)params;
  759. if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD3_WL_BT_reset_register) {
  760. sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset));
  761. bcmerror = BCME_BADARG;
  762. break;
  763. }
  764. sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value,
  765. (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32),
  766. sd_ptr->offset));
  767. if (sd_ptr->offset & 1)
  768. sdstd_wreg8(si, sd_ptr->offset, (uint8)sd_ptr->value);
  769. else if (sd_ptr->offset & 2)
  770. sdstd_wreg16(si, sd_ptr->offset, (uint16)sd_ptr->value);
  771. else
  772. sdstd_wreg(si, sd_ptr->offset, (uint32)sd_ptr->value);
  773. break;
  774. }
  775. case IOV_GVAL(IOV_DEVREG):
  776. {
  777. sdreg_t *sd_ptr = (sdreg_t *)params;
  778. uint8 data;
  779. if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) {
  780. bcmerror = BCME_SDIO_ERROR;
  781. break;
  782. }
  783. int_val = (int)data;
  784. bcopy(&int_val, arg, sizeof(int_val));
  785. break;
  786. }
  787. case IOV_SVAL(IOV_DEVREG):
  788. {
  789. sdreg_t *sd_ptr = (sdreg_t *)params;
  790. uint8 data = (uint8)sd_ptr->value;
  791. if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) {
  792. bcmerror = BCME_SDIO_ERROR;
  793. break;
  794. }
  795. break;
  796. }
  797. case IOV_SVAL(IOV_TUNEDIS):
  798. si->sd3_tuning_disable = (bool)int_val;
  799. break;
  800. default:
  801. bcmerror = BCME_UNSUPPORTED;
  802. break;
  803. }
  804. exit:
  805. return bcmerror;
  806. }
  807. extern SDIOH_API_RC
  808. sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
  809. {
  810. SDIOH_API_RC status;
  811. /* No lock needed since sdioh_request_byte does locking */
  812. status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data);
  813. return status;
  814. }
  815. extern SDIOH_API_RC
  816. sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data)
  817. {
  818. /* No lock needed since sdioh_request_byte does locking */
  819. SDIOH_API_RC status;
  820. status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data);
  821. return status;
  822. }
  823. extern SDIOH_API_RC
  824. sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length)
  825. {
  826. uint32 count;
  827. int offset;
  828. uint32 foo;
  829. uint8 *cis = cisd;
  830. sd_trace(("%s: Func = %d\n", __FUNCTION__, func));
  831. if (!sd->func_cis_ptr[func]) {
  832. bzero(cis, length);
  833. return SDIOH_API_RC_FAIL;
  834. }
  835. sdstd_lock(sd);
  836. *cis = 0;
  837. for (count = 0; count < length; count++) {
  838. offset = sd->func_cis_ptr[func] + count;
  839. if (sdstd_card_regread(sd, 0, offset, 1, &foo)) {
  840. sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__));
  841. sdstd_unlock(sd);
  842. return SDIOH_API_RC_FAIL;
  843. }
  844. *cis = (uint8)(foo & 0xff);
  845. cis++;
  846. }
  847. sdstd_unlock(sd);
  848. return SDIOH_API_RC_SUCCESS;
  849. }
  850. extern SDIOH_API_RC
  851. sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte)
  852. {
  853. int status = SDIOH_API_RC_SUCCESS;
  854. uint32 cmd_arg;
  855. uint32 rsp5;
  856. sdstd_lock(sd);
  857. if (rw == SDIOH_READ)
  858. sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_PRE_DATA);
  859. /* Change to DATA_TRANSFER_ONGOING , protection against tuning tasklet */
  860. sdstd_3_set_data_state(sd, DATA_TRANSFER_ONGOING);
  861. cmd_arg = 0;
  862. cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
  863. cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
  864. cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, rw == SDIOH_READ ? 0 : 1);
  865. cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
  866. cmd_arg = SFIELD(cmd_arg, CMD52_DATA, rw == SDIOH_READ ? 0 : *byte);
  867. if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg)) != SUCCESS) {
  868. /* Change to DATA_TRANSFER_IDLE */
  869. sdstd_3_set_data_state(sd, DATA_TRANSFER_IDLE);
  870. sdstd_unlock(sd);
  871. return status;
  872. }
  873. sdstd_cmd_getrsp(sd, &rsp5, 1);
  874. if (sdstd_rreg16 (sd, SD_ErrorIntrStatus) != 0) {
  875. sd_err(("%s: 1: ErrorintrStatus 0x%x\n",
  876. __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus)));
  877. status = SDIOH_API_RC_FAIL;
  878. }
  879. if (GFIELD(rsp5, RSP5_FLAGS) != 0x10) {
  880. if (GFIELD(cmd_arg, CMD52_REG_ADDR) != F1_SLEEPCSR_ADDR) {
  881. sd_err(("%s: rsp5 flags is 0x%x\t %d \n",
  882. __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func));
  883. }
  884. status = SDIOH_API_RC_FAIL;
  885. }
  886. if (GFIELD(rsp5, RSP5_STUFF)) {
  887. sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
  888. __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
  889. status = SDIOH_API_RC_FAIL;
  890. }
  891. if (rw == SDIOH_READ)
  892. *byte = GFIELD(rsp5, RSP5_DATA);
  893. /* Change to DATA_TRANSFER_IDLE */
  894. sdstd_3_set_data_state(sd, DATA_TRANSFER_IDLE);
  895. /* check if we have to do tuning; if so, start */
  896. sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_POST_DATA);
  897. sdstd_unlock(sd);
  898. return status;
  899. }
  900. extern SDIOH_API_RC
  901. sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr,
  902. uint32 *word, uint nbytes)
  903. {
  904. int status;
  905. sdstd_lock(sd);
  906. sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_PRE_DATA);
  907. /* Change to DATA_TRANSFER_ONGOING , protection against tuning tasklet */
  908. sdstd_3_set_data_state(sd, DATA_TRANSFER_ONGOING);
  909. if (rw == SDIOH_READ) {
  910. status = sdstd_card_regread(sd, func, addr, nbytes, word);
  911. } else {
  912. status = sdstd_card_regwrite(sd, func, addr, nbytes, *word);
  913. }
  914. /* Change to DATA_TRANSFER_IDLE */
  915. sdstd_3_set_data_state(sd, DATA_TRANSFER_IDLE);
  916. /* check if we have to do tuning; if so, start */
  917. sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_POST_DATA);
  918. sdstd_unlock(sd);
  919. return (status == SUCCESS ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL);
  920. }
  921. #ifdef BCMSDIOH_TXGLOM
  922. void
  923. sdioh_glom_post(sdioh_info_t *sd, uint8 *frame, void *pkt, uint len)
  924. {
  925. BCM_REFERENCE(pkt);
  926. sd->glom_info.dma_buf_arr[sd->glom_info.count] = frame;
  927. sd->glom_info.nbytes[sd->glom_info.count] = len;
  928. /* Convert the frame addr to phy addr for DMA in case of host controller version3 */
  929. if (sd->txglom_mode == SDPCM_TXGLOM_MDESC) {
  930. sd->glom_info.dma_phys_arr[sd->glom_info.count] = DMA_MAP(sd->osh,
  931. frame,
  932. len,
  933. DMA_TX, 0, 0);
  934. }
  935. sd->glom_info.count++;
  936. }
  937. void
  938. sdioh_glom_clear(sdioh_info_t *sd)
  939. {
  940. int i;
  941. /* DMA_MAP is done per frame only if host controller version is 3 */
  942. if (sd->txglom_mode == SDPCM_TXGLOM_MDESC) {
  943. for (i = 0; i < sd->glom_info.count; i++) {
  944. DMA_UNMAP(sd->osh,
  945. sd->glom_info.dma_phys_arr[i],
  946. sd->glom_info.nbytes[i],
  947. DMA_TX, 0, 0);
  948. }
  949. }
  950. sd->glom_info.count = 0;
  951. }
  952. uint
  953. sdioh_set_mode(sdioh_info_t *sd, uint mode)
  954. {
  955. if (mode == SDPCM_TXGLOM_CPY)
  956. sd->txglom_mode = mode;
  957. else if ((mode == SDPCM_TXGLOM_MDESC) && (sd->version == HOST_CONTR_VER_3))
  958. sd->txglom_mode = mode;
  959. return (sd->txglom_mode);
  960. }
  961. bool
  962. sdioh_glom_enabled(void)
  963. {
  964. return sd_txglom;
  965. }
  966. #endif /* BCMSDIOH_TXGLOM */
  967. extern SDIOH_API_RC
  968. sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func,
  969. uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt)
  970. {
  971. uint8 is_ddr50 = FALSE;
  972. int len;
  973. int buflen = (int)buflen_u;
  974. bool fifo = (fix_inc == SDIOH_DATA_FIX);
  975. uint8 *localbuf = NULL, *tmpbuf = NULL;
  976. bool local_blockmode = sd->sd_blockmode;
  977. SDIOH_API_RC status = SDIOH_API_RC_SUCCESS;
  978. sdstd_lock(sd);
  979. is_ddr50 = (sd_uhsimode == SD3CLKMODE_4_DDR50) ? TRUE : FALSE;
  980. sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_PRE_DATA);
  981. /* Change to DATA_TRANSFER_ONGOING , protection against tuning tasklet */
  982. sdstd_3_set_data_state(sd, DATA_TRANSFER_ONGOING);
  983. ASSERT(reg_width == 4);
  984. ASSERT(buflen_u < (1 << 30));
  985. ASSERT(sd->client_block_size[func]);
  986. #ifdef BCMSDIOH_TXGLOM
  987. if (sd_txglom) {
  988. while (pkt) {
  989. sdioh_glom_post(sd, PKTDATA(sd->osh, pkt), pkt, PKTLEN(sd->osh, pkt));
  990. pkt = PKTNEXT(sd->osh, pkt);
  991. }
  992. }
  993. #endif // endif
  994. sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n",
  995. __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W',
  996. buflen_u, sd->r_cnt, sd->t_cnt, pkt));
  997. /* Break buffer down into blocksize chunks:
  998. * Bytemode: 1 block at a time.
  999. * Blockmode: Multiples of blocksizes at a time w/ max of SD_PAGE.
  1000. * Both: leftovers are handled last (will be sent via bytemode).
  1001. */
  1002. while (buflen > 0) {
  1003. if (local_blockmode) {
  1004. int max_tran_size = SD_PAGE;
  1005. #ifdef BCMSDIOH_TXGLOM
  1006. /* There is no alignment requirement for HC3 */
  1007. if ((sd->version == HOST_CONTR_VER_3) && sd_txglom)
  1008. max_tran_size = SD_PAGE * 4;
  1009. #endif // endif
  1010. /* Max xfer is Page size */
  1011. len = MIN(max_tran_size, buflen);
  1012. /* Round down to a block boundry */
  1013. if (buflen > sd->client_block_size[func])
  1014. len = (len/sd->client_block_size[func]) *
  1015. sd->client_block_size[func];
  1016. if ((func == SDIO_FUNC_1) && (((len % 4) == 3) || (((len % 2) == 1) &&
  1017. (is_ddr50))) && ((rw == SDIOH_WRITE) || (rw == SDIOH_READ))) {
  1018. sd_err(("%s: Rounding up buffer to mod4 length.\n", __FUNCTION__));
  1019. len++;
  1020. tmpbuf = buffer;
  1021. if ((localbuf = (uint8 *)MALLOC(sd->osh, len)) == NULL) {
  1022. sd_err(("out of memory, malloced %d bytes\n",
  1023. MALLOCED(sd->osh)));
  1024. status = SDIOH_API_RC_FAIL;
  1025. goto done;
  1026. }
  1027. bcopy(buffer, localbuf, len);
  1028. buffer = localbuf;
  1029. }
  1030. } else {
  1031. /* Byte mode: One block at a time */
  1032. len = MIN(sd->client_block_size[func], buflen);
  1033. }
  1034. if (sdstd_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) {
  1035. status = SDIOH_API_RC_FAIL;
  1036. }
  1037. if (local_blockmode && localbuf) {
  1038. MFREE(sd->osh, localbuf, len);
  1039. localbuf = NULL;
  1040. len--;
  1041. buffer = tmpbuf;
  1042. sd_err(("%s: Restoring back buffer ptr and len.\n", __FUNCTION__));
  1043. }
  1044. if (status == SDIOH_API_RC_FAIL) {
  1045. goto done;
  1046. }
  1047. buffer += len;
  1048. buflen -= len;
  1049. if (!fifo)
  1050. addr += len;
  1051. #ifdef BCMSDIOH_TXGLOM
  1052. /* This loop should not come in case of glommed pkts as it is send in
  1053. * multiple of blocks or total pkt size less than a block
  1054. */
  1055. if (sd->glom_info.count != 0)
  1056. buflen = 0;
  1057. #endif // endif
  1058. }
  1059. done:
  1060. /* Change to DATA_TRANSFER_IDLE */
  1061. sdstd_3_set_data_state(sd, DATA_TRANSFER_IDLE);
  1062. /* check if we have to do tuning; if so, start */
  1063. sdstd_3_check_and_do_tuning(sd, CHECK_TUNING_POST_DATA);
  1064. sdstd_unlock(sd);
  1065. #ifdef BCMSDIOH_TXGLOM
  1066. if (sd_txglom)
  1067. sdioh_glom_clear(sd);
  1068. #endif // endif
  1069. return status;
  1070. }
  1071. extern SDIOH_API_RC
  1072. sdioh_gpioouten(sdioh_info_t *sd, uint32 gpio)
  1073. {
  1074. uint offset = 0;
  1075. uint16 val;
  1076. /* check if upper bank */
  1077. if (gpio >= SDH_GPIO16) {
  1078. gpio -= SDH_GPIO16;
  1079. offset = 2;
  1080. }
  1081. val = sdstd_rreg16(sd, SD_GPIO_OE + offset);
  1082. val |= (1 << gpio);
  1083. sdstd_wreg16(sd, SD_GPIO_OE + offset, val);
  1084. return SDIOH_API_RC_SUCCESS;
  1085. }
  1086. extern SDIOH_API_RC
  1087. sdioh_gpioout(sdioh_info_t *sd, uint32 gpio, bool enab)
  1088. {
  1089. uint offset = 0;
  1090. uint16 val;
  1091. /* check if upper bank */
  1092. if (gpio >= SDH_GPIO16) {
  1093. gpio -= SDH_GPIO16;
  1094. offset = 2;
  1095. }
  1096. val = sdstd_rreg16(sd, SD_GPIO_Reg + offset);
  1097. if (enab == TRUE)
  1098. val |= (1 << gpio);
  1099. else
  1100. val &= ~(1 << gpio);
  1101. sdstd_wreg16(sd, SD_GPIO_Reg + offset, val);
  1102. return SDIOH_API_RC_SUCCESS;
  1103. }
  1104. extern bool
  1105. sdioh_gpioin(sdioh_info_t *sd, uint32 gpio)
  1106. {
  1107. uint offset = 0;
  1108. uint16 val;
  1109. /* check if upper bank */
  1110. if (gpio >= SDH_GPIO16) {
  1111. gpio -= SDH_GPIO16;
  1112. offset = 2;
  1113. }
  1114. val = sdstd_rreg16(sd, SD_GPIO_Reg + offset);
  1115. val = (val >> gpio) & 1;
  1116. return (val == 1);
  1117. }
  1118. extern SDIOH_API_RC
  1119. sdioh_gpio_init(sdioh_info_t *sd)
  1120. {
  1121. uint rev;
  1122. rev = sdstd_rreg16(sd, SD_HostControllerVersion) >> 8;
  1123. /* Only P206 (fpga rev >= 16) supports gpio */
  1124. if (rev < 16) {
  1125. sd_err(("%s: gpio not supported in rev %d \n", __FUNCTION__, rev));
  1126. return SDIOH_API_RC_FAIL;
  1127. }
  1128. sdstd_wreg16(sd, SD_GPIO_Enable, SDH_GPIO_ENABLE);
  1129. sdstd_wreg16(sd, SD_GPIO_Enable + 2, SDH_GPIO_ENABLE);
  1130. /* Default to input */
  1131. sdstd_wreg16(sd, SD_GPIO_OE, 0);
  1132. sdstd_wreg16(sd, SD_GPIO_OE + 2, 0);
  1133. return SDIOH_API_RC_SUCCESS;
  1134. }
  1135. extern SDIOH_API_RC
  1136. sdioh_sleep(sdioh_info_t *sd, bool enab)
  1137. {
  1138. SDIOH_API_RC status;
  1139. uint32 cmd_arg = 0, rsp1 = 0;
  1140. int retry = 100;
  1141. sdstd_lock(sd);
  1142. cmd_arg = SFIELD(cmd_arg, CMD14_RCA, sd->card_rca);
  1143. cmd_arg = SFIELD(cmd_arg, CMD14_SLEEP, enab);
  1144. /*
  1145. * For ExitSleep:
  1146. * 1) Repeat CMD14 until R1 is received
  1147. * 2) Send CMD7
  1148. */
  1149. status = SDIOH_API_RC_FAIL;
  1150. while (retry-- > 0) {
  1151. if ((sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_14, cmd_arg)) == SUCCESS) {
  1152. status = SDIOH_API_RC_SUCCESS;
  1153. break;
  1154. }
  1155. OSL_DELAY(1400);
  1156. }
  1157. if (status == SDIOH_API_RC_FAIL) {
  1158. sd_err(("%s: CMD14: failed! enable:%d\n", __FUNCTION__, enab));
  1159. goto exit;
  1160. }
  1161. sdstd_cmd_getrsp(sd, &rsp1, 1);
  1162. sd_info(("%s: CMD14 OK: cmd_resp:0x%x\n", __FUNCTION__, rsp1));
  1163. /* ExitSleep: Send CMD7 After R1 */
  1164. if (enab == FALSE) {
  1165. /* Select the card */
  1166. cmd_arg = SFIELD(0, CMD7_RCA, sd->card_rca);
  1167. if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_7, cmd_arg)) != SUCCESS) {
  1168. sd_err(("%s: CMD14 send CMD7 failed!\n", __FUNCTION__));
  1169. status = SDIOH_API_RC_FAIL;
  1170. goto exit;
  1171. }
  1172. sdstd_cmd_getrsp(sd, &rsp1, 1);
  1173. if (rsp1 != SDIOH_CMD7_EXP_STATUS) {
  1174. sd_err(("%s: CMD7 response error. Response = 0x%x!\n",
  1175. __FUNCTION__, rsp1));
  1176. status = SDIOH_API_RC_FAIL;
  1177. goto exit;
  1178. }
  1179. }
  1180. exit:
  1181. sdstd_unlock(sd);
  1182. return status;
  1183. }
  1184. static int
  1185. sdstd_abort(sdioh_info_t *sd, uint func)
  1186. {
  1187. int err = 0;
  1188. int retries;
  1189. uint16 cmd_reg;
  1190. uint32 cmd_arg;
  1191. uint32 rsp5;
  1192. uint8 rflags;
  1193. uint16 int_reg = 0;
  1194. uint16 plain_intstatus;
  1195. /* Argument is write to F0 (CCCR) IOAbort with function number */
  1196. cmd_arg = 0;
  1197. cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, SDIO_FUNC_0);
  1198. cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, SDIOD_CCCR_IOABORT);
  1199. cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SD_IO_OP_WRITE);
  1200. cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
  1201. cmd_arg = SFIELD(cmd_arg, CMD52_DATA, func);
  1202. /* Command is CMD52 write */
  1203. cmd_reg = 0;
  1204. cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48_BUSY);
  1205. cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
  1206. cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
  1207. cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
  1208. cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_ABORT);
  1209. cmd_reg = SFIELD(cmd_reg, CMD_INDEX, SDIOH_CMD_52);
  1210. if (sd->sd_mode == SDIOH_MODE_SPI) {
  1211. cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
  1212. cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
  1213. }
  1214. /* Wait for CMD_INHIBIT to go away as per spec section 3.6.1.1 */
  1215. retries = RETRIES_SMALL;
  1216. while (GFIELD(sdstd_rreg(sd, SD_PresentState), PRES_CMD_INHIBIT)) {
  1217. if (retries == RETRIES_SMALL)
  1218. sd_err(("%s: Waiting for Command Inhibit, state 0x%08x\n",
  1219. __FUNCTION__, sdstd_rreg(sd, SD_PresentState)));
  1220. if (!--retries) {
  1221. sd_err(("%s: Command Inhibit timeout, state 0x%08x\n",
  1222. __FUNCTION__, sdstd_rreg(sd, SD_PresentState)));
  1223. if (trap_errs)
  1224. ASSERT(0);
  1225. err = BCME_SDIO_ERROR;
  1226. goto done;
  1227. }
  1228. }
  1229. /* Clear errors from any previous commands */
  1230. if ((plain_intstatus = sdstd_rreg16(sd, SD_ErrorIntrStatus)) != 0) {
  1231. sd_err(("abort: clearing errstat 0x%04x\n", plain_intstatus));
  1232. sdstd_wreg16(sd, SD_ErrorIntrStatus, plain_intstatus);
  1233. }
  1234. plain_intstatus = sdstd_rreg16(sd, SD_IntrStatus);
  1235. if (plain_intstatus & ~(SFIELD(0, INTSTAT_CARD_INT, 1))) {
  1236. sd_err(("abort: intstatus 0x%04x\n", plain_intstatus));
  1237. if (GFIELD(plain_intstatus, INTSTAT_CMD_COMPLETE)) {
  1238. sd_err(("SDSTD_ABORT: CMD COMPLETE SET BEFORE COMMAND GIVEN!!!\n"));
  1239. }
  1240. if (GFIELD(plain_intstatus, INTSTAT_CARD_REMOVAL)) {
  1241. sd_err(("SDSTD_ABORT: INTSTAT_CARD_REMOVAL\n"));
  1242. err = BCME_NODEVICE;
  1243. goto done;
  1244. }
  1245. }
  1246. /* Issue the command */
  1247. sdstd_wreg(sd, SD_Arg0, cmd_arg);
  1248. sdstd_wreg16(sd, SD_Command, cmd_reg);
  1249. /* In interrupt mode return, expect later CMD_COMPLETE interrupt */
  1250. if (!sd->polled_mode)
  1251. return err;
  1252. /* Otherwise, wait for the command to complete */
  1253. retries = RETRIES_LARGE;
  1254. do {
  1255. int_reg = sdstd_rreg16(sd, SD_IntrStatus);
  1256. } while (--retries &&
  1257. (GFIELD(int_reg, INTSTAT_ERROR_INT) == 0) &&
  1258. (GFIELD(int_reg, INTSTAT_CMD_COMPLETE) == 0));
  1259. /* If command completion fails, do a cmd reset and note the error */
  1260. if (!retries) {
  1261. sd_err(("%s: CMD_COMPLETE timeout: intr 0x%04x err 0x%04x state 0x%08x\n",
  1262. __FUNCTION__, int_reg,
  1263. sdstd_rreg16(sd, SD_ErrorIntrStatus),
  1264. sdstd_rreg(sd, SD_PresentState)));
  1265. sdstd_wreg8(sd, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1));
  1266. retries = RETRIES_LARGE;
  1267. do {
  1268. sd_trace(("%s: waiting for CMD line reset\n", __FUNCTION__));
  1269. } while ((GFIELD(sdstd_rreg8(sd, SD_SoftwareReset),
  1270. SW_RESET_CMD)) && retries--);
  1271. if (!retries) {
  1272. sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__));
  1273. }
  1274. if (trap_errs)
  1275. ASSERT(0);
  1276. err = BCME_SDIO_ERROR;
  1277. }
  1278. /* Clear Command Complete interrupt */
  1279. int_reg = SFIELD(0, INTSTAT_CMD_COMPLETE, 1);
  1280. sdstd_wreg16(sd, SD_IntrStatus, int_reg);
  1281. /* Check for Errors */
  1282. if ((plain_intstatus = sdstd_rreg16 (sd, SD_ErrorIntrStatus)) != 0) {
  1283. sd_err(("%s: ErrorintrStatus: 0x%x, "
  1284. "(intrstatus = 0x%x, present state 0x%x) clearing\n",
  1285. __FUNCTION__, plain_intstatus,
  1286. sdstd_rreg16(sd, SD_IntrStatus),
  1287. sdstd_rreg(sd, SD_PresentState)));
  1288. sdstd_wreg16(sd, SD_ErrorIntrStatus, plain_intstatus);
  1289. sdstd_wreg8(sd, SD_SoftwareReset, SFIELD(0, SW_RESET_DAT, 1));
  1290. retries = RETRIES_LARGE;
  1291. do {
  1292. sd_trace(("%s: waiting for DAT line reset\n", __FUNCTION__));
  1293. } while ((GFIELD(sdstd_rreg8(sd, SD_SoftwareReset),
  1294. SW_RESET_DAT)) && retries--);
  1295. if (!retries) {
  1296. sd_err(("%s: Timeout waiting for DAT line reset\n", __FUNCTION__));
  1297. }
  1298. if (trap_errs)
  1299. ASSERT(0);
  1300. /* ABORT is dataless, only cmd errs count */
  1301. if (plain_intstatus & ERRINT_CMD_ERRS)
  1302. err = BCME_SDIO_ERROR;
  1303. }
  1304. /* If command failed don't bother looking at response */
  1305. if (err)
  1306. goto done;
  1307. /* Otherwise, check the response */
  1308. sdstd_cmd_getrsp(sd, &rsp5, 1);
  1309. rflags = GFIELD(rsp5, RSP5_FLAGS);
  1310. if (rflags & SD_RSP_R5_ERRBITS) {
  1311. sd_err(("%s: R5 flags include errbits: 0x%02x\n", __FUNCTION__, rflags));
  1312. /* The CRC error flag applies to the previous command */
  1313. if (rflags & (SD_RSP_R5_ERRBITS & ~SD_RSP_R5_COM_CRC_ERROR)) {
  1314. err = BCME_SDIO_ERROR;
  1315. goto done;
  1316. }
  1317. }
  1318. if (((rflags & (SD_RSP_R5_IO_CURRENTSTATE0 | SD_RSP_R5_IO_CURRENTSTATE1)) != 0x10) &&
  1319. ((rflags & (SD_RSP_R5_IO_CURRENTSTATE0 | SD_RSP_R5_IO_CURRENTSTATE1)) != 0x20)) {
  1320. sd_err(("%s: R5 flags has bad state: 0x%02x\n", __FUNCTION__, rflags));
  1321. err = BCME_SDIO_ERROR;
  1322. goto done;
  1323. }
  1324. if (GFIELD(rsp5, RSP5_STUFF)) {
  1325. sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
  1326. __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
  1327. err = BCME_SDIO_ERROR;
  1328. goto done;
  1329. }
  1330. done:
  1331. if (err == BCME_NODEVICE)
  1332. return err;
  1333. sdstd_wreg8(sd, SD_SoftwareReset,
  1334. SFIELD(SFIELD(0, SW_RESET_DAT, 1), SW_RESET_CMD, 1));
  1335. retries = RETRIES_LARGE;
  1336. do {
  1337. rflags = sdstd_rreg8(sd, SD_SoftwareReset);
  1338. if (!GFIELD(rflags, SW_RESET_DAT) && !GFIELD(rflags, SW_RESET_CMD))
  1339. break;
  1340. } while (--retries);
  1341. if (!retries) {
  1342. sd_err(("%s: Timeout waiting for DAT/CMD reset: 0x%02x\n",
  1343. __FUNCTION__, rflags));
  1344. err = BCME_SDIO_ERROR;
  1345. }
  1346. return err;
  1347. }
  1348. extern int
  1349. sdioh_abort(sdioh_info_t *sd, uint fnum)
  1350. {
  1351. int ret;
  1352. sdstd_lock(sd);
  1353. ret = sdstd_abort(sd, fnum);
  1354. sdstd_unlock(sd);
  1355. return ret;
  1356. }
  1357. int
  1358. sdioh_start(sdioh_info_t *sd, int stage)
  1359. {
  1360. return SUCCESS;
  1361. }
  1362. int
  1363. sdioh_stop(sdioh_info_t *sd)
  1364. {
  1365. return SUCCESS;
  1366. }
  1367. int
  1368. sdioh_waitlockfree(sdioh_info_t *sd)
  1369. {
  1370. sdstd_waitlockfree(sd);
  1371. return SUCCESS;
  1372. }
  1373. static int
  1374. sdstd_check_errs(sdioh_info_t *sdioh_info, uint32 cmd, uint32 arg)
  1375. {
  1376. uint16 regval;
  1377. uint retries;
  1378. uint function = 0;
  1379. /* If no errors, we're done */
  1380. if ((regval = sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus)) == 0)
  1381. return SUCCESS;
  1382. sd_info(("%s: ErrorIntrStatus 0x%04x (clearing), IntrStatus 0x%04x PresentState 0x%08x\n",
  1383. __FUNCTION__, regval, sdstd_rreg16(sdioh_info, SD_IntrStatus),
  1384. sdstd_rreg(sdioh_info, SD_PresentState)));
  1385. sdstd_wreg16(sdioh_info, SD_ErrorIntrStatus, regval);
  1386. if (cmd == SDIOH_CMD_14) {
  1387. if (regval & ERRINT_CMD_TIMEOUT_BIT) {
  1388. regval &= ~ERRINT_CMD_TIMEOUT_BIT;
  1389. }
  1390. }
  1391. /* On command error, issue CMD reset */
  1392. if (regval & ERRINT_CMD_ERRS) {
  1393. sd_trace(("%s: issuing CMD reset\n", __FUNCTION__));
  1394. sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1));
  1395. for (retries = RETRIES_LARGE; retries; retries--)
  1396. if (!(GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), SW_RESET_CMD)))
  1397. break;
  1398. if (!retries) {
  1399. sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__));
  1400. }
  1401. }
  1402. /* On data error, issue DAT reset */
  1403. if (regval & ERRINT_DATA_ERRS) {
  1404. if (regval & ERRINT_ADMA_BIT)
  1405. sd_err(("%s:ADMAError: status:0x%x\n",
  1406. __FUNCTION__, sdstd_rreg(sdioh_info, SD_ADMA_ErrStatus)));
  1407. sd_trace(("%s: issuing DAT reset\n", __FUNCTION__));
  1408. sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_DAT, 1));
  1409. for (retries = RETRIES_LARGE; retries; retries--)
  1410. if (!(GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), SW_RESET_DAT)))
  1411. break;
  1412. if (!retries) {
  1413. sd_err(("%s: Timeout waiting for DAT line reset\n", __FUNCTION__));
  1414. }
  1415. }
  1416. /* For an IO command (CMD52 or CMD53) issue an abort to the appropriate function */
  1417. if (cmd == SDIOH_CMD_53)
  1418. function = GFIELD(arg, CMD53_FUNCTION);
  1419. else if (cmd == SDIOH_CMD_52) {
  1420. if (GFIELD(arg, CMD52_REG_ADDR) != F1_SLEEPCSR_ADDR)
  1421. function = GFIELD(arg, CMD52_FUNCTION);
  1422. }
  1423. if (function) {
  1424. sd_trace(("%s: requesting abort for function %d after cmd %d\n",
  1425. __FUNCTION__, function, cmd));
  1426. sdstd_abort(sdioh_info, function);
  1427. }
  1428. if (trap_errs)
  1429. ASSERT(0);
  1430. return ERROR;
  1431. }
  1432. /*
  1433. * Private/Static work routines
  1434. */
  1435. static bool
  1436. sdstd_reset(sdioh_info_t *sd, bool host_reset, bool client_reset)
  1437. {
  1438. int retries = RETRIES_LARGE;
  1439. uchar regval;
  1440. if (!sd)
  1441. return TRUE;
  1442. sdstd_lock(sd);
  1443. /* Reset client card */
  1444. if (client_reset && (sd->adapter_slot != -1)) {
  1445. if (sdstd_card_regwrite(sd, 0, SDIOD_CCCR_IOABORT, 1, 0x8) != SUCCESS)
  1446. sd_err(("%s: Cannot write to card reg 0x%x\n",
  1447. __FUNCTION__, SDIOD_CCCR_IOABORT));
  1448. else
  1449. sd->card_rca = 0;
  1450. }
  1451. /* Reset host controller */
  1452. if (host_reset) {
  1453. regval = SFIELD(0, SW_RESET_ALL, 1);
  1454. sdstd_wreg8(sd, SD_SoftwareReset, regval);
  1455. do {
  1456. sd_trace(("%s: waiting for reset\n", __FUNCTION__));
  1457. } while ((sdstd_rreg8(sd, SD_SoftwareReset) & regval) && retries--);
  1458. if (!retries) {
  1459. sd_err(("%s: Timeout waiting for host reset\n", __FUNCTION__));
  1460. sdstd_unlock(sd);
  1461. return (FALSE);
  1462. }
  1463. /* A reset should reset bus back to 1 bit mode */
  1464. sd->sd_mode = SDIOH_MODE_SD1;
  1465. sdstd_set_dma_mode(sd, sd->sd_dma_mode);
  1466. }
  1467. sdstd_unlock(sd);
  1468. return TRUE;
  1469. }
  1470. /* Disable device interrupt */
  1471. void
  1472. sdstd_devintr_off(sdioh_info_t *sd)
  1473. {
  1474. sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
  1475. if (sd->use_client_ints) {
  1476. sd->intmask &= ~CLIENT_INTR;
  1477. sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
  1478. sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
  1479. }
  1480. }
  1481. /* Enable device interrupt */
  1482. void
  1483. sdstd_devintr_on(sdioh_info_t *sd)
  1484. {
  1485. ASSERT(sd->lockcount == 0);
  1486. sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints));
  1487. if (sd->use_client_ints) {
  1488. if (sd->version < HOST_CONTR_VER_3) {
  1489. uint16 status = sdstd_rreg16(sd, SD_IntrStatusEnable);
  1490. sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(status, INTSTAT_CARD_INT, 0));
  1491. sdstd_wreg16(sd, SD_IntrStatusEnable, status);
  1492. }
  1493. sd->intmask |= CLIENT_INTR;
  1494. sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
  1495. sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
  1496. }
  1497. }
  1498. #ifdef BCMSDYIELD
  1499. /* Enable/disable other interrupts */
  1500. void
  1501. sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err)
  1502. {
  1503. if (err) {
  1504. norm = SFIELD(norm, INTSTAT_ERROR_INT, 1);
  1505. sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, err);
  1506. }
  1507. sd->intmask |= norm;
  1508. sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
  1509. if (sd_forcerb)
  1510. sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
  1511. }
  1512. void
  1513. sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err)
  1514. {
  1515. if (err) {
  1516. norm = SFIELD(norm, INTSTAT_ERROR_INT, 1);
  1517. sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, 0);
  1518. }
  1519. sd->intmask &= ~norm;
  1520. sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
  1521. if (sd_forcerb)
  1522. sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
  1523. }
  1524. #endif /* BCMSDYIELD */
  1525. static int
  1526. sdstd_host_init(sdioh_info_t *sd)
  1527. {
  1528. int num_slots, full_slot;
  1529. uint8 reg8;
  1530. uint32 card_ins;
  1531. int slot, first_bar = 0;
  1532. bool detect_slots = FALSE;
  1533. uint bar;
  1534. /* Check for Arasan ID */
  1535. if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_SI_IMAGE) {
  1536. sd_info(("%s: Found Arasan Standard SDIO Host Controller\n", __FUNCTION__));
  1537. sd->controller_type = SDIOH_TYPE_ARASAN_HDK;
  1538. detect_slots = TRUE;
  1539. /* Controller supports SDMA, so turn it on here. */
  1540. sd->sd_dma_mode = DMA_MODE_SDMA;
  1541. } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_BROADCOM) {
  1542. sd_info(("%s: Found Broadcom 27xx Standard SDIO Host Controller\n", __FUNCTION__));
  1543. sd->controller_type = SDIOH_TYPE_BCM27XX;
  1544. detect_slots = FALSE;
  1545. } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_TI) {
  1546. sd_info(("%s: Found TI PCIxx21 Standard SDIO Host Controller\n", __FUNCTION__));
  1547. sd->controller_type = SDIOH_TYPE_TI_PCIXX21;
  1548. detect_slots = TRUE;
  1549. } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_RICOH) {
  1550. sd_info(("%s: Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter\n",
  1551. __FUNCTION__));
  1552. sd->controller_type = SDIOH_TYPE_RICOH_R5C822;
  1553. detect_slots = TRUE;
  1554. } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_JMICRON) {
  1555. sd_info(("%s: JMicron Standard SDIO Host Controller\n",
  1556. __FUNCTION__));
  1557. sd->controller_type = SDIOH_TYPE_JMICRON;
  1558. detect_slots = TRUE;
  1559. } else {
  1560. return ERROR;
  1561. }
  1562. /*
  1563. * Determine num of slots
  1564. * Search each slot
  1565. */
  1566. first_bar = OSL_PCI_READ_CONFIG(sd->osh, SD_SlotInfo, 4) & 0x7;
  1567. num_slots = (OSL_PCI_READ_CONFIG(sd->osh, SD_SlotInfo, 4) & 0xff) >> 4;
  1568. num_slots &= 7;
  1569. num_slots++; /* map bits to num slots according to spec */
  1570. if (OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) ==
  1571. ((SDIOH_FPGA_ID << 16) | VENDOR_BROADCOM)) {
  1572. sd_err(("%s: Found Broadcom Standard SDIO Host Controller FPGA\n", __FUNCTION__));
  1573. /* Set BAR0 Window to SDIOSTH core */
  1574. OSL_PCI_WRITE_CONFIG(sd->osh, PCI_BAR0_WIN, 4, 0x18001000);
  1575. /* Set defaults particular to this controller. */
  1576. detect_slots = TRUE;
  1577. num_slots = 1;
  1578. first_bar = 0;
  1579. /* Controller supports ADMA2, so turn it on here. */
  1580. sd->sd_dma_mode = DMA_MODE_ADMA2;
  1581. }
  1582. /* Map in each slot on the board and query it to see if a
  1583. * card is inserted. Use the first populated slot found.
  1584. */
  1585. if (sd->mem_space) {
  1586. sdstd_reg_unmap(sd->osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ);
  1587. sd->mem_space = NULL;
  1588. }
  1589. full_slot = -1;
  1590. for (slot = 0; slot < num_slots; slot++) {
  1591. bar = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(slot + first_bar)), 4);
  1592. sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh,
  1593. (uintptr)bar, SDIOH_REG_WINSZ);
  1594. sd->adapter_slot = -1;
  1595. if (detect_slots) {
  1596. card_ins = GFIELD(sdstd_rreg(sd, SD_PresentState), PRES_CARD_PRESENT);
  1597. } else {
  1598. card_ins = TRUE;
  1599. }
  1600. if (card_ins) {
  1601. sd_info(("%s: SDIO slot %d: Full\n", __FUNCTION__, slot));
  1602. if (full_slot < 0)
  1603. full_slot = slot;
  1604. } else {
  1605. sd_info(("%s: SDIO slot %d: Empty\n", __FUNCTION__, slot));
  1606. }
  1607. if (sd->mem_space) {
  1608. sdstd_reg_unmap(sd->osh, (ulong)sd->mem_space, SDIOH_REG_WINSZ);
  1609. sd->mem_space = NULL;
  1610. }
  1611. }
  1612. if (full_slot < 0) {
  1613. sd_err(("No slots on SDIO controller are populated\n"));
  1614. return -1;
  1615. }
  1616. bar = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4);
  1617. sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh, (uintptr)bar, SDIOH_REG_WINSZ);
  1618. sd_err(("Using slot %d at BAR%d [0x%08x] mem_space 0x%p\n",
  1619. full_slot,
  1620. (full_slot + first_bar),
  1621. OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4),
  1622. sd->mem_space));
  1623. sd->adapter_slot = full_slot;
  1624. sd->version = sdstd_rreg16(sd, SD_HostControllerVersion) & 0xFF;
  1625. switch (sd->version) {
  1626. case 0:
  1627. sd_err(("Host Controller version 1.0, Vendor Revision: 0x%02x\n",
  1628. sdstd_rreg16(sd, SD_HostControllerVersion) >> 8));
  1629. break;
  1630. case 1:
  1631. sd_err(("Host Controller version 2.0, Vendor Revision: 0x%02x\n",
  1632. sdstd_rreg16(sd, SD_HostControllerVersion) >> 8));
  1633. break;
  1634. case 2:
  1635. sd_err(("Host Controller version 3.0, Vendor Revision: 0x%02x\n",
  1636. sdstd_rreg16(sd, SD_HostControllerVersion) >> 8));
  1637. break;
  1638. default:
  1639. sd_err(("%s: Host Controller version 0x%02x not supported.\n",
  1640. __FUNCTION__, sd->version));
  1641. break;
  1642. }
  1643. sd->caps = sdstd_rreg(sd, SD_Capabilities); /* Cache this for later use */
  1644. /* MSB 32 bits of caps supported in sdio 3.0 */
  1645. sd->caps3 = sdstd_rreg(sd, SD_Capabilities3); /* Cache this for later use */
  1646. sd3_trace(("sd3: %s: caps: 0x%x; MCCap:0x%x\n", __FUNCTION__, sd->caps, sd->curr_caps));
  1647. sd3_trace(("sd3: %s: caps3: 0x%x\n", __FUNCTION__, sd->caps3));
  1648. sd->curr_caps = sdstd_rreg(sd, SD_MaxCurCap);
  1649. sd_info(("%s: caps: 0x%x; MCCap:0x%x\n", __FUNCTION__, sd->caps, sd->curr_caps));
  1650. sdstd_set_dma_mode(sd, sd->sd_dma_mode);
  1651. sdstd_reset(sd, 1, 0);
  1652. /* Read SD4/SD1 mode */
  1653. if ((reg8 = sdstd_rreg8(sd, SD_HostCntrl))) {
  1654. if (reg8 & SD4_MODE) {
  1655. sd_err(("%s: Host cntrlr already in 4 bit mode: 0x%x\n",
  1656. __FUNCTION__, reg8));
  1657. }
  1658. }
  1659. /* Default power on mode is SD1 */
  1660. sd->sd_mode = SDIOH_MODE_SD1;
  1661. sd->polled_mode = TRUE;
  1662. sd->host_init_done = TRUE;
  1663. sd->card_init_done = FALSE;
  1664. sd->adapter_slot = full_slot;
  1665. if (sd_uhsimode == SD3CLKMODE_DISABLED) {
  1666. sd->version = HOST_CONTR_VER_2;
  1667. sd3_trace(("%s:forcing to SDIO HC 2.0\n", __FUNCTION__));
  1668. }
  1669. if (sd->version == HOST_CONTR_VER_3) {
  1670. /* read host ctrl 2 */
  1671. uint16 reg16 = 0;
  1672. sd3_trace(("sd3: %s: HC3: reading additional regs\n", __FUNCTION__));
  1673. reg16 = sdstd_rreg16(sd, SD3_HostCntrl2);
  1674. sd_info(("%s: HCtrl: 0x%x; HCtrl2:0x%x\n", __FUNCTION__, reg8, reg16));
  1675. BCM_REFERENCE(reg16);
  1676. /* if HC supports 1.8V and one of the SDR/DDR modes, hc uhci support is PRESENT */
  1677. if ((GFIELD(sd->caps, CAP_VOLT_1_8)) &&
  1678. (GFIELD(sd->caps3, CAP3_SDR50_SUP) ||
  1679. GFIELD(sd->caps3, CAP3_SDR104_SUP) ||
  1680. GFIELD(sd->caps3, CAP3_DDR50_SUP)))
  1681. sd->host_UHSISupported = 1;
  1682. }
  1683. return (SUCCESS);
  1684. }
  1685. #define CMD5_RETRIES 200
  1686. static int
  1687. get_ocr(sdioh_info_t *sd, uint32 *cmd_arg, uint32 *cmd_rsp)
  1688. {
  1689. int retries, status;
  1690. /* Get the Card's Operation Condition. Occasionally the board
  1691. * takes a while to become ready
  1692. */
  1693. retries = CMD5_RETRIES;
  1694. do {
  1695. *cmd_rsp = 0;
  1696. if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_5, *cmd_arg))
  1697. != SUCCESS) {
  1698. sd_err(("%s: CMD5 failed\n", __FUNCTION__));
  1699. return status;
  1700. }
  1701. sdstd_cmd_getrsp(sd, cmd_rsp, 1);
  1702. if (!GFIELD(*cmd_rsp, RSP4_CARD_READY))
  1703. sd_trace(("%s: Waiting for card to become ready\n", __FUNCTION__));
  1704. } while ((!GFIELD(*cmd_rsp, RSP4_CARD_READY)) && --retries);
  1705. if (!retries)
  1706. return ERROR;
  1707. return (SUCCESS);
  1708. }
  1709. static int
  1710. sdstd_client_init(sdioh_info_t *sd)
  1711. {
  1712. uint32 cmd_arg, cmd_rsp;
  1713. int status;
  1714. uint8 fn_ints;
  1715. uint32 regdata;
  1716. uint16 powerstat = 0;
  1717. sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot));
  1718. /* Clear any pending ints */
  1719. sdstd_wreg16(sd, SD_IntrStatus, 0x1fff);
  1720. sdstd_wreg16(sd, SD_ErrorIntrStatus, 0x0fff);
  1721. /* Enable both Normal and Error Status. This does not enable
  1722. * interrupts, it only enables the status bits to
  1723. * become 'live'
  1724. */
  1725. if (!sd->host_UHSISupported)
  1726. sdstd_wreg16(sd, SD_IntrStatusEnable, 0x1ff);
  1727. else
  1728. {
  1729. /* INT_x interrupts, but DO NOT enable signalling [enable retuning
  1730. * will happen later]
  1731. */
  1732. sdstd_wreg16(sd, SD_IntrStatusEnable, 0x0fff);
  1733. }
  1734. sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, 0xffff);
  1735. sdstd_wreg16(sd, SD_IntrSignalEnable, 0); /* Disable ints for now. */
  1736. if (sd->host_UHSISupported) {
  1737. /* when HC is started for SDIO 3.0 mode, start in lowest voltage mode first. */
  1738. powerstat = sdstd_start_power(sd, 1);
  1739. if (SDIO_OCR_READ_FAIL == powerstat) {
  1740. /* This could be because the device is 3.3V, and possible does
  1741. * not have sdio3.0 support. So, try in highest voltage
  1742. */
  1743. sd_err(("sdstd_start_power: legacy device: trying highest voltage\n"));
  1744. sd_err(("%s failed\n", __FUNCTION__));
  1745. return ERROR;
  1746. } else if (TRUE != powerstat) {
  1747. sd_err(("sdstd_start_power failed\n"));
  1748. return ERROR;
  1749. }
  1750. } else
  1751. if (TRUE != sdstd_start_power(sd, 0)) {
  1752. sd_err(("sdstd_start_power failed\n"));
  1753. return ERROR;
  1754. }
  1755. if (sd->num_funcs == 0) {
  1756. sd_err(("%s: No IO funcs!\n", __FUNCTION__));
  1757. return ERROR;
  1758. }
  1759. /* In SPI mode, issue CMD0 first */
  1760. if (sd->sd_mode == SDIOH_MODE_SPI) {
  1761. cmd_arg = 0;
  1762. if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_0, cmd_arg))
  1763. != SUCCESS) {
  1764. sd_err(("BCMSDIOH: cardinit: CMD0 failed!\n"));
  1765. return status;
  1766. }
  1767. }
  1768. if (sd->sd_mode != SDIOH_MODE_SPI) {
  1769. uint16 rsp6_status;
  1770. /* Card is operational. Ask it to send an RCA */
  1771. cmd_arg = 0;
  1772. if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_3, cmd_arg))
  1773. != SUCCESS) {
  1774. sd_err(("%s: CMD3 failed!\n", __FUNCTION__));
  1775. return status;
  1776. }
  1777. /* Verify the card status returned with the cmd response */
  1778. sdstd_cmd_getrsp(sd, &cmd_rsp, 1);
  1779. rsp6_status = GFIELD(cmd_rsp, RSP6_STATUS);
  1780. if (GFIELD(rsp6_status, RSP6STAT_COM_CRC_ERROR) ||
  1781. GFIELD(rsp6_status, RSP6STAT_ILLEGAL_CMD) ||
  1782. GFIELD(rsp6_status, RSP6STAT_ERROR)) {
  1783. sd_err(("%s: CMD3 response error. Response = 0x%x!\n",
  1784. __FUNCTION__, rsp6_status));
  1785. return ERROR;
  1786. }
  1787. /* Save the Card's RCA */
  1788. sd->card_rca = GFIELD(cmd_rsp, RSP6_IO_RCA);
  1789. sd_info(("RCA is 0x%x\n", sd->card_rca));
  1790. if (rsp6_status)
  1791. sd_err(("raw status is 0x%x\n", rsp6_status));
  1792. /* Select the card */
  1793. cmd_arg = SFIELD(0, CMD7_RCA, sd->card_rca);
  1794. if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_7, cmd_arg))
  1795. != SUCCESS) {
  1796. sd_err(("%s: CMD7 failed!\n", __FUNCTION__));
  1797. return status;
  1798. }
  1799. sdstd_cmd_getrsp(sd, &cmd_rsp, 1);
  1800. if (cmd_rsp != SDIOH_CMD7_EXP_STATUS) {
  1801. sd_err(("%s: CMD7 response error. Response = 0x%x!\n",
  1802. __FUNCTION__, cmd_rsp));
  1803. return ERROR;
  1804. }
  1805. }
  1806. /* Disable default/power-up device Card Detect (CD) pull up resistor on DAT3
  1807. * via CCCR bus interface control register. Set CD disable bit while leaving
  1808. * others alone.
  1809. */
  1810. if (sdstd_card_regread (sd, 0, SDIOD_CCCR_BICTRL, 1, &regdata) != SUCCESS) {
  1811. sd_err(("Disabling card detect: read of device CCCR BICTRL register failed\n"));
  1812. return ERROR;
  1813. }
  1814. regdata |= BUS_CARD_DETECT_DIS;
  1815. if (sdstd_card_regwrite (sd, 0, SDIOD_CCCR_BICTRL, 1, regdata) != SUCCESS) {
  1816. sd_err(("Disabling card detect: write of device CCCR BICTRL register failed\n"));
  1817. return ERROR;
  1818. }
  1819. sdstd_card_enablefuncs(sd);
  1820. if (!sdstd_bus_width(sd, sd_sdmode)) {
  1821. sd_err(("sdstd_bus_width failed\n"));
  1822. return ERROR;
  1823. }
  1824. set_client_block_size(sd, 1, sd_f1_blocksize);
  1825. fn_ints = INTR_CTL_FUNC1_EN;
  1826. if (sd->num_funcs >= 2) {
  1827. set_client_block_size(sd, 2, sd_f2_blocksize /* BLOCK_SIZE_4328 */);
  1828. fn_ints |= INTR_CTL_FUNC2_EN;
  1829. }
  1830. /* Enable/Disable Client interrupts */
  1831. /* Turn on here but disable at host controller? */
  1832. if (sdstd_card_regwrite(sd, 0, SDIOD_CCCR_INTEN, 1,
  1833. (fn_ints | INTR_CTL_MASTER_EN)) != SUCCESS) {
  1834. sd_err(("%s: Could not enable ints in CCCR\n", __FUNCTION__));
  1835. return ERROR;
  1836. }
  1837. if (sd_uhsimode != SD3CLKMODE_DISABLED) {
  1838. /* Switch to High-speed clocking mode if both host and device support it */
  1839. if (sdstd_3_clock_wrapper(sd) != SUCCESS) {
  1840. sd_err(("sdstd_3_clock_wrapper failed\n"));
  1841. return ERROR;
  1842. }
  1843. } else
  1844. {
  1845. if (sdstd_clock_wrapper(sd)) {
  1846. sd_err(("sdstd_start_clock failed\n"));
  1847. return ERROR;
  1848. }
  1849. }
  1850. sd->card_init_done = TRUE;
  1851. return SUCCESS;
  1852. }
  1853. static int
  1854. sdstd_clock_wrapper(sdioh_info_t *sd)
  1855. {
  1856. sd_trace(("%s:Enter\n", __FUNCTION__));
  1857. /* After configuring for High-Speed mode, set the desired clock rate. */
  1858. sdstd_set_highspeed_mode(sd, (bool)sd_hiok);
  1859. if (FALSE == sdstd_start_clock(sd, (uint16)sd_divisor)) {
  1860. sd_err(("sdstd_start_clock failed\n"));
  1861. return ERROR;
  1862. }
  1863. return SUCCESS;
  1864. }
  1865. static int
  1866. sdstd_3_clock_wrapper(sdioh_info_t *sd)
  1867. {
  1868. int retclk = 0;
  1869. sd_info(("%s: Enter\n", __FUNCTION__));
  1870. if (sd->card_UHSI_voltage_Supported) {
  1871. /* check if clk config requested is supported by both host and target. */
  1872. retclk = sdstd_3_get_matching_uhsi_clkmode(sd, sd_uhsimode);
  1873. /* if no match for requested caps, try to get the max match possible */
  1874. if (retclk == -1) {
  1875. /* if auto enabled */
  1876. if (sd3_autoselect_uhsi_max == 1) {
  1877. retclk = sdstd_3_get_matching_uhsi_clkmode(sd, SD3CLKMODE_AUTO);
  1878. /* still NO match */
  1879. if (retclk == -1) {
  1880. /* NO match with HC and card capabilities. Now try the
  1881. * High speed/legacy mode if possible.
  1882. */
  1883. sd_err(("%s: Not able to set requested clock\n",
  1884. __FUNCTION__));
  1885. return ERROR;
  1886. }
  1887. } else {
  1888. /* means user doesn't want auto clock. So return ERROR */
  1889. sd_err(("%s: Not able to set requested clock, Try"
  1890. "auto mode\n", __FUNCTION__));
  1891. return ERROR;
  1892. }
  1893. }
  1894. if (retclk != -1) {
  1895. /* set the current clk to be selected clock */
  1896. sd_uhsimode = retclk;
  1897. if (BCME_OK != sdstd_3_set_highspeed_uhsi_mode(sd, sd_uhsimode)) {
  1898. sd_err(("%s: Not able to set requested clock\n", __FUNCTION__));
  1899. return ERROR;
  1900. }
  1901. } else {
  1902. /* try legacy mode */
  1903. if (SUCCESS != sdstd_clock_wrapper(sd)) {
  1904. sd_err(("sdstd_start_clock failed\n"));
  1905. return ERROR;
  1906. }
  1907. }
  1908. } else {
  1909. sd_info(("%s: Legacy Mode Clock\n", __FUNCTION__));
  1910. /* try legacy mode */
  1911. if (SUCCESS != sdstd_clock_wrapper(sd)) {
  1912. sd_err(("%s sdstd_clock_wrapper failed\n", __FUNCTION__));
  1913. return ERROR;
  1914. }
  1915. }
  1916. return SUCCESS;
  1917. }
  1918. int
  1919. sdstd_3_clk_tuning(sdioh_info_t *sd, uint32 sd3ClkMode)
  1920. {
  1921. int status, lcount = 0, brr_count = 0;
  1922. uint16 val1 = 0, bufready = 0;
  1923. uint32 val2 = 0;
  1924. uint8 phase_info_local = 0;
  1925. sd3_trace(("sd3: %s: Enter\n", __FUNCTION__));
  1926. /* if (NOT SDR104) OR
  1927. * (SDR_50 AND sdr50_tuning_reqd is NOT enabled)
  1928. * return success, as tuning not reqd.
  1929. */
  1930. if (!sd->sd3_tuning_reqd) {
  1931. sd_info(("%s: Tuning NOT reqd!\n", __FUNCTION__));
  1932. return SUCCESS;
  1933. }
  1934. /* execute tuning procedure */
  1935. /* enable Buffer ready status. [donot enable the interrupt right now] */
  1936. /* Execute tuning */
  1937. sd_trace(("sd3: %s: Execute tuning\n", __FUNCTION__));
  1938. val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
  1939. val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 1);
  1940. sdstd_wreg16(sd, SD3_HostCntrl2, val1);
  1941. do {
  1942. sd3_trace(("sd3: %s: cmd19 issue\n", __FUNCTION__));
  1943. /* Issue cmd19 */
  1944. if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_19, 0))
  1945. != SUCCESS) {
  1946. sd_err(("%s: CMD19 failed\n", __FUNCTION__));
  1947. val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
  1948. val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0);
  1949. val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0);
  1950. sdstd_wreg16(sd, SD3_HostCntrl2, val1);
  1951. return status;
  1952. }
  1953. /* wait for buffer read ready */
  1954. brr_count = 0;
  1955. do {
  1956. bufready = sdstd_rreg16(sd, SD_IntrStatus);
  1957. if (GFIELD(bufready, INTSTAT_BUF_READ_READY))
  1958. break;
  1959. /* delay after checking bufready becuase INTSTAT_BUF_READ_READY
  1960. might have been most likely set already in the first check
  1961. */
  1962. OSL_DELAY(1);
  1963. } while (++brr_count < CLKTUNING_MAX_BRR_RETRIES);
  1964. /* buffer read ready timedout */
  1965. if (brr_count == CLKTUNING_MAX_BRR_RETRIES) {
  1966. sd_err(("%s: TUNINGFAILED: BRR response timedout!\n",
  1967. __FUNCTION__));
  1968. val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
  1969. val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0);
  1970. val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0);
  1971. sdstd_wreg16(sd, SD3_HostCntrl2, val1);
  1972. return ERROR;
  1973. }
  1974. /* In response to CMD19 card will send 64 magic bytes.
  1975. * Current Aizyc HC h/w doesn't auto clear those bytes.
  1976. * So read 64 bytes send by card.
  1977. * Aizyc need to implement in hw to do an auto clear.
  1978. */
  1979. if (sd3_sw_read_magic_bytes == TRUE)
  1980. {
  1981. uint8 l_cnt_1 = 0;
  1982. uint32 l_val_1 = 0;
  1983. for (l_cnt_1 = 0; l_cnt_1 < 16; l_cnt_1++) {
  1984. l_val_1 = sdstd_rreg(sd, SD_BufferDataPort0);
  1985. sd_trace(("%s:l_val_1 = 0x%x", __FUNCTION__, l_val_1));
  1986. }
  1987. BCM_REFERENCE(l_val_1);
  1988. }
  1989. /* clear BuffReadReady int */
  1990. bufready = SFIELD(bufready, INTSTAT_BUF_READ_READY, 1);
  1991. sdstd_wreg16(sd, SD_IntrStatus, bufready);
  1992. /* wait before continuing */
  1993. /* OSL_DELAY(PER_TRY_TUNING_DELAY_MS * 1000); */ /* Not required */
  1994. /* check execute tuning bit */
  1995. val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
  1996. if (!GFIELD(val1, HOSTCtrl2_EXEC_TUNING)) {
  1997. /* done tuning, break from loop */
  1998. break;
  1999. }
  2000. /* max tuning iterations exceeded */
  2001. if (lcount++ > MAX_TUNING_ITERS) {
  2002. sd_err(("%s: TUNINGFAILED: Max tuning iterations"
  2003. "exceeded!\n", __FUNCTION__));
  2004. val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
  2005. val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0);
  2006. val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0);
  2007. sdstd_wreg16(sd, SD3_HostCntrl2, val1);
  2008. return ERROR;
  2009. }
  2010. } while (1);
  2011. val2 = sdstd_rreg(sd, SD3_Tuning_Info_Register);
  2012. phase_info_local = ((val2>>15)& 0x7);
  2013. sd_info(("Phase passed info: 0x%x\n", (val2>>8)& 0x3F));
  2014. sd_info(("Phase selected post tune: 0x%x\n", phase_info_local));
  2015. if (phase_info_local > SDSTD_MAX_TUNING_PHASE) {
  2016. sd_err(("!!Phase selected:%x\n", phase_info_local));
  2017. }
  2018. /* check sampling clk select */
  2019. val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
  2020. if (!GFIELD(val1, HOSTCtrl2_SAMPCLK_SEL)) {
  2021. /* error in selecting clk */
  2022. sd_err(("%s: TUNINGFAILED: SamplClkSel failed!\n", __FUNCTION__));
  2023. val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
  2024. val1 = SFIELD(val1, HOSTCtrl2_EXEC_TUNING, 0);
  2025. val1 = SFIELD(val1, HOSTCtrl2_SAMPCLK_SEL, 0);
  2026. sdstd_wreg16(sd, SD3_HostCntrl2, val1);
  2027. return ERROR;
  2028. }
  2029. /* done: */
  2030. sd_info(("%s: TUNING Success!\n", __FUNCTION__));
  2031. return SUCCESS;
  2032. }
  2033. void
  2034. sdstd_3_enable_retuning_int(sdioh_info_t *sd)
  2035. {
  2036. uint16 raw_int;
  2037. unsigned long flags;
  2038. sdstd_os_lock_irqsave(sd, &flags);
  2039. raw_int = sdstd_rreg16(sd, SD_IntrSignalEnable);
  2040. sdstd_wreg16(sd, SD_IntrSignalEnable, (raw_int | HC_INTR_RETUNING));
  2041. /* Enable retuning status */
  2042. raw_int = sdstd_rreg16(sd, SD_IntrStatusEnable);
  2043. sdstd_wreg16(sd, SD_IntrStatusEnable, (raw_int | HC_INTR_RETUNING));
  2044. sdstd_os_unlock_irqrestore(sd, &flags);
  2045. }
  2046. void
  2047. sdstd_3_disable_retuning_int(sdioh_info_t *sd)
  2048. {
  2049. uint16 raw_int;
  2050. unsigned long flags;
  2051. sdstd_os_lock_irqsave(sd, &flags);
  2052. sd->intmask &= ~HC_INTR_RETUNING;
  2053. raw_int = sdstd_rreg16(sd, SD_IntrSignalEnable);
  2054. sdstd_wreg16(sd, SD_IntrSignalEnable, (raw_int & (~HC_INTR_RETUNING)));
  2055. /* Disable retuning status */
  2056. raw_int = sdstd_rreg16(sd, SD_IntrStatusEnable);
  2057. sdstd_wreg16(sd, SD_IntrStatusEnable, (raw_int & (~HC_INTR_RETUNING)));
  2058. sdstd_os_unlock_irqrestore(sd, &flags);
  2059. }
  2060. bool
  2061. sdstd_3_is_retuning_int_set(sdioh_info_t *sd)
  2062. {
  2063. uint16 raw_int;
  2064. raw_int = sdstd_rreg16(sd, SD_IntrStatus);
  2065. if (GFIELD(raw_int, INTSTAT_RETUNING_INT))
  2066. return TRUE;
  2067. return FALSE;
  2068. }
  2069. /*
  2070. Assumption: sd3ClkMode is checked to be present in both host/card
  2071. capabilities before entering this function. VALID values for sd3ClkMode
  2072. in this function: SD3CLKMODE_2, 3, 4 [0 and 1 NOT supported as
  2073. they are legacy] For that, need to call
  2074. sdstd_3_get_matching_uhsi_clkmode()
  2075. */
  2076. static int
  2077. sdstd_3_set_highspeed_uhsi_mode(sdioh_info_t *sd, int sd3ClkMode)
  2078. {
  2079. uint32 drvstrn;
  2080. int status;
  2081. uint8 hc_reg8;
  2082. uint16 val1 = 0, presetval = 0;
  2083. uint32 regdata;
  2084. sd3_trace(("sd3: %s:enter:clkmode:%d\n", __FUNCTION__, sd3ClkMode));
  2085. hc_reg8 = sdstd_rreg8(sd, SD_HostCntrl);
  2086. if (HOST_SDR_UNSUPP == sd->global_UHSI_Supp) {
  2087. sd_err(("%s:Trying to set clk with unsupported global support\n", __FUNCTION__));
  2088. return BCME_ERROR;
  2089. }
  2090. /* get [double check, as this is already done in
  2091. sdstd_3_get_matching_uhsi_clkmode] drvstrn
  2092. */
  2093. if (!sdstd_3_get_matching_drvstrn(sd, sd3ClkMode, &drvstrn, &presetval)) {
  2094. sd_err(("%s:DRVStrn mismatch!: card strn:0x%x; HC preset"
  2095. "val:0x%x\n", __FUNCTION__, drvstrn, presetval));
  2096. return BCME_SDIO_ERROR;
  2097. }
  2098. /* also set driver type select in CCCR */
  2099. if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_DRIVER_STRENGTH,
  2100. 1, drvstrn)) != BCME_OK) {
  2101. sd_err(("%s:Setting SDIOD_CCCR_DRIVER_STRENGTH in card Failed!\n", __FUNCTION__));
  2102. return BCME_SDIO_ERROR;
  2103. }
  2104. /* ********** change Bus speed select in device */
  2105. if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
  2106. 1, &regdata)) != SUCCESS) {
  2107. sd_err(("%s:FAILED 1\n", __FUNCTION__));
  2108. return BCME_SDIO_ERROR;
  2109. }
  2110. sd_info(("Attempting to change BSS.current val:0x%x\n", regdata));
  2111. if (regdata & SDIO_SPEED_SHS) {
  2112. sd_info(("Device supports High-Speed mode.\n"));
  2113. /* clear existing BSS */
  2114. regdata &= ~0xE;
  2115. regdata |= (sd3ClkMode << 1);
  2116. sd_info(("Writing %08x to Card at %08x\n",
  2117. regdata, SDIOD_CCCR_SPEED_CONTROL));
  2118. if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
  2119. 1, regdata)) != BCME_OK) {
  2120. sd_err(("%s:FAILED 2\n", __FUNCTION__));
  2121. return BCME_SDIO_ERROR;
  2122. }
  2123. if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
  2124. 1, &regdata)) != BCME_OK) {
  2125. sd_err(("%s:FAILED 3\n", __FUNCTION__));
  2126. return BCME_SDIO_ERROR;
  2127. }
  2128. sd_info(("Read %08x from Card at %08x\n", regdata, SDIOD_CCCR_SPEED_CONTROL));
  2129. }
  2130. else {
  2131. sd_err(("Device does not support High-Speed Mode.\n"));
  2132. }
  2133. /* SD Clock Enable = 0 */
  2134. sdstd_wreg16(sd, SD_ClockCntrl,
  2135. sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4));
  2136. /* set to HighSpeed mode */
  2137. /* TBD: is these to change SD_HostCntrl reqd for UHSI? */
  2138. hc_reg8 = SFIELD(hc_reg8, HOST_HI_SPEED_EN, 1);
  2139. sdstd_wreg8(sd, SD_HostCntrl, hc_reg8);
  2140. /* set UHS Mode select in HC2 and also set preset */
  2141. val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
  2142. val1 = SFIELD(val1, HOSTCtrl2_UHSMODE_SEL, sd3ClkMode);
  2143. if (TRUE != sd3_sw_override1) {
  2144. val1 = SFIELD(val1, HOSTCtrl2_PRESVAL_EN, 1);
  2145. } else {
  2146. /* set hC registers manually using the retreived values */
  2147. /* *set drvstrn */
  2148. val1 = SFIELD(val1, HOSTCtrl2_DRIVSTRENGTH_SEL,
  2149. GFIELD(presetval, PRESET_DRIVR_SELECT));
  2150. val1 = SFIELD(val1, HOSTCtrl2_PRESVAL_EN, 0);
  2151. }
  2152. /* finally write Hcontrol2 */
  2153. sdstd_wreg16(sd, SD3_HostCntrl2, val1);
  2154. sd_err(("%s:HostCtrl2 final value:0x%x\n", __FUNCTION__, val1));
  2155. /* start clock : clk will be enabled inside. */
  2156. if (FALSE == sdstd_start_clock(sd, GFIELD(presetval, PRESET_CLK_DIV))) {
  2157. sd_err(("sdstd_start_clock failed\n"));
  2158. return ERROR;
  2159. }
  2160. /* execute first tuning procedure */
  2161. if (!sd3_sw_override1) {
  2162. if (SD3_TUNING_REQD(sd, sd3ClkMode)) {
  2163. sd_err(("%s: Tuning start..\n", __FUNCTION__));
  2164. sd->sd3_tuning_reqd = TRUE;
  2165. /* TBD: first time: enabling INT's could be problem? */
  2166. sdstd_3_start_tuning(sd);
  2167. }
  2168. else
  2169. sd->sd3_tuning_reqd = FALSE;
  2170. }
  2171. return BCME_OK;
  2172. }
  2173. /* Check & do tuning if required */
  2174. void sdstd_3_check_and_do_tuning(sdioh_info_t *sd, int tuning_param)
  2175. {
  2176. int retries = 0;
  2177. if (!sd->sd3_tuning_disable && sd->sd3_tuning_reqd) {
  2178. sd3_trace(("sd3: %s: tuning reqd\n", __FUNCTION__));
  2179. if (tuning_param == CHECK_TUNING_PRE_DATA) {
  2180. if (sd->sd3_tun_state == TUNING_ONGOING) {
  2181. retries = RETRIES_SMALL;
  2182. /* check if tuning is already going on */
  2183. while ((GFIELD(sdstd_rreg(sd, SD3_HostCntrl2),
  2184. HOSTCtrl2_EXEC_TUNING)) && retries--) {
  2185. if (retries == RETRIES_SMALL)
  2186. sd_err(("%s: Waiting for Tuning to complete\n",
  2187. __FUNCTION__));
  2188. }
  2189. if (!retries) {
  2190. sd_err(("%s: Tuning wait timeout\n", __FUNCTION__));
  2191. if (trap_errs)
  2192. ASSERT(0);
  2193. }
  2194. } else if (sd->sd3_tun_state == TUNING_START) {
  2195. /* check and start tuning if required. */
  2196. sd3_trace(("sd3 : %s : Doing Tuning before Data Transfer\n",
  2197. __FUNCTION__));
  2198. sdstd_3_start_tuning(sd);
  2199. }
  2200. } else if (tuning_param == CHECK_TUNING_POST_DATA) {
  2201. if (sd->sd3_tun_state == TUNING_START_AFTER_DAT) {
  2202. sd3_trace(("sd3: %s: tuning start\n", __FUNCTION__));
  2203. /* check and start tuning if required. */
  2204. sdstd_3_start_tuning(sd);
  2205. }
  2206. }
  2207. }
  2208. }
  2209. /* Need to run this function in interrupt-disabled context */
  2210. bool sdstd_3_check_and_set_retuning(sdioh_info_t *sd)
  2211. {
  2212. sd3_trace(("sd3: %s:\n", __FUNCTION__));
  2213. /* if already initiated, just return without anything */
  2214. if ((sd->sd3_tun_state == TUNING_START) ||
  2215. (sd->sd3_tun_state == TUNING_ONGOING) ||
  2216. (sd->sd3_tun_state == TUNING_START_AFTER_DAT)) {
  2217. /* do nothing */
  2218. return FALSE;
  2219. }
  2220. if (sd->sd3_dat_state == DATA_TRANSFER_IDLE) {
  2221. sd->sd3_tun_state = TUNING_START; /* tuning to be started by the tasklet */
  2222. return TRUE;
  2223. } else {
  2224. /* tuning to be started after finishing the existing data transfer */
  2225. sd->sd3_tun_state = TUNING_START_AFTER_DAT;
  2226. }
  2227. return FALSE;
  2228. }
  2229. int sdstd_3_get_data_state(sdioh_info_t *sd)
  2230. {
  2231. return sd->sd3_dat_state;
  2232. }
  2233. void sdstd_3_set_data_state(sdioh_info_t *sd, int state)
  2234. {
  2235. sd->sd3_dat_state = state;
  2236. }
  2237. int sdstd_3_get_tune_state(sdioh_info_t *sd)
  2238. {
  2239. return sd->sd3_tun_state;
  2240. }
  2241. void sdstd_3_set_tune_state(sdioh_info_t *sd, int state)
  2242. {
  2243. sd->sd3_tun_state = state;
  2244. }
  2245. uint8 sdstd_3_get_tuning_exp(sdioh_info_t *sd)
  2246. {
  2247. if (sd_tuning_period == CAP3_RETUNING_TC_OTHER) {
  2248. return GFIELD(sd->caps3, CAP3_RETUNING_TC);
  2249. } else {
  2250. return (uint8)sd_tuning_period;
  2251. }
  2252. }
  2253. uint32 sdstd_3_get_uhsi_clkmode(sdioh_info_t *sd)
  2254. {
  2255. return sd_uhsimode;
  2256. }
  2257. /* check, to see if the card supports driver_type corr to the driver_type
  2258. in preset value, which will be selected by requested UHSI mode
  2259. input:
  2260. clk mode: valid values: SD3CLKMODE_2_SDR50, SD3CLKMODE_3_SDR104,
  2261. SD3CLKMODE_4_DDR50, SD3CLKMODE_AUTO
  2262. outputs:
  2263. return_val: TRUE; if a matching drvstrn for the given clkmode is
  2264. found in both HC and card. otherwise, FALSE.
  2265. [other outputs below valid ONLY if return_val is TRUE]
  2266. drvstrn : driver strength read from CCCR.
  2267. presetval: value of preset reg, corr to the clkmode.
  2268. */
  2269. static bool
  2270. sdstd_3_get_matching_drvstrn(sdioh_info_t *sd, int sd3_requested_clkmode,
  2271. uint32 *drvstrn, uint16 *presetval)
  2272. {
  2273. int status;
  2274. uint8 presetreg;
  2275. uint8 cccr_reqd_dtype_mask = 1;
  2276. sd3_trace(("sd3: %s:\n", __FUNCTION__));
  2277. if (sd3_requested_clkmode != SD3CLKMODE_AUTO) {
  2278. /* CARD: get the card driver strength from cccr */
  2279. if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_DRIVER_STRENGTH,
  2280. 1, drvstrn)) != BCME_OK) {
  2281. sd_err(("%s:Reading SDIOD_CCCR_DRIVER_STRENGTH from card"
  2282. "Failed!\n", __FUNCTION__));
  2283. return FALSE;
  2284. }
  2285. if (TRUE != sd3_sw_override1) {
  2286. /* HOSTC: get the addr of preset register indexed by the clkmode */
  2287. presetreg = SD3_PresetValStart +
  2288. (2*sd3_requested_clkmode + 6);
  2289. *presetval = sdstd_rreg16(sd, presetreg);
  2290. } else {
  2291. /* Note: +3 for mapping between SD3CLKMODE_xxx and presetval_sw_table */
  2292. *presetval = presetval_sw_table[sd3_requested_clkmode + 3];
  2293. }
  2294. sd_err(("%s:reqCLK: %d, presetval: 0x%x\n",
  2295. __FUNCTION__, sd3_requested_clkmode, *presetval));
  2296. cccr_reqd_dtype_mask <<= GFIELD(*presetval, PRESET_DRIVR_SELECT);
  2297. /* compare/match */
  2298. if (!(cccr_reqd_dtype_mask & GFIELD(*drvstrn, SDIO_BUS_DRVR_TYPE_CAP))) {
  2299. sd_err(("%s:cccr_reqd_dtype_mask and SDIO_BUS_DRVR_TYPE_CAP"
  2300. "not matching!:reqd:0x%x, cap:0x%x\n", __FUNCTION__,
  2301. cccr_reqd_dtype_mask, GFIELD(*drvstrn, SDIO_BUS_DRVR_TYPE_CAP)));
  2302. return FALSE;
  2303. } else {
  2304. /* check if drive strength override is required. If so, first setit */
  2305. if (*dhd_sdiod_uhsi_ds_override != DRVSTRN_IGNORE_CHAR) {
  2306. int ds_offset = 0;
  2307. uint32 temp = 0;
  2308. /* drvstrn to reflect the preset val: this is default */
  2309. *drvstrn = GFIELD(*presetval, PRESET_DRIVR_SELECT);
  2310. /* now check override */
  2311. ds_offset = (((int)DRVSTRN_MAX_CHAR -
  2312. (int)(*dhd_sdiod_uhsi_ds_override)));
  2313. if ((ds_offset >= 0) && (ds_offset <= MAX_DTS_INDEX)) {
  2314. ds_offset = MAX_DTS_INDEX - ds_offset;
  2315. sd_err(("%s:Drive strength override: %c, offset: "
  2316. "%d, val: %d\n", __FUNCTION__,
  2317. *dhd_sdiod_uhsi_ds_override,
  2318. ds_offset, DTS_vals[ds_offset]));
  2319. temp = SFIELD(*drvstrn, SDIO_BUS_DRVR_TYPE_SEL,
  2320. DTS_vals[ds_offset]);
  2321. sd_err(("%s:DrvStrn orig: 0x%x, modif: 0x%x\n",
  2322. __FUNCTION__, *drvstrn, temp));
  2323. *drvstrn = temp;
  2324. } else {
  2325. /* else case is default: use preset val */
  2326. sd_err(("%s:override invalid: DrvStrn is from "
  2327. "preset: 0x%x\n",
  2328. __FUNCTION__, *drvstrn));
  2329. }
  2330. } else {
  2331. sd_err(("%s:DrvStrn is from preset: 0x%x\n",
  2332. __FUNCTION__, *drvstrn));
  2333. }
  2334. }
  2335. } else {
  2336. /* TBD check for sd3_requested_clkmode : -1 also. */
  2337. sd_err(("%s: Automode not supported!\n", __FUNCTION__));
  2338. return FALSE;
  2339. }
  2340. return TRUE;
  2341. }
  2342. /* Returns a matching UHSI clk speed is found. If not, returns -1.
  2343. Also, if sd3_requested_clkmode is -1, finds the closest max match clk and returns.
  2344. */
  2345. static int
  2346. sdstd_3_get_matching_uhsi_clkmode(sdioh_info_t *sd, int sd3_requested_clkmode)
  2347. {
  2348. uint32 card_val_uhsisupp;
  2349. uint8 speedmask = 1;
  2350. uint32 drvstrn;
  2351. uint16 presetval;
  2352. int status;
  2353. sd3_trace(("sd3: %s:\n", __FUNCTION__));
  2354. sd->global_UHSI_Supp = HOST_SDR_UNSUPP;
  2355. /* for legacy/25MHz/50MHz bus speeds, no checks done here */
  2356. if ((sd3_requested_clkmode == SD3CLKMODE_0_SDR12) ||
  2357. (sd3_requested_clkmode == SD3CLKMODE_1_SDR25)) {
  2358. sd->global_UHSI_Supp = HOST_SDR_12_25;
  2359. return sd3_requested_clkmode;
  2360. }
  2361. /* get cap of card */
  2362. if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_UHSI_SUPPORT,
  2363. 1, &card_val_uhsisupp)) != BCME_OK) {
  2364. sd_err(("%s:SDIOD_CCCR_UHSI_SUPPORT query failed!\n", __FUNCTION__));
  2365. return -1;
  2366. }
  2367. sd_info(("%s:Read %08x from Card at %08x\n", __FUNCTION__,
  2368. card_val_uhsisupp, SDIOD_CCCR_UHSI_SUPPORT));
  2369. if (sd3_requested_clkmode != SD3CLKMODE_AUTO) {
  2370. /* Note: it is assumed that, following are executed when (sd3ClkMode >= 2) */
  2371. speedmask <<= (sd3_requested_clkmode - SD3CLKMODE_2_SDR50);
  2372. /* check first about 3.0 HS CLK modes */
  2373. if (!(GFIELD(sd->caps3, CAP3_30CLKCAP) & speedmask)) {
  2374. sd_err(("%s:HC does not support req 3.0 UHSI mode."
  2375. "requested:%d; capable:0x%x\n", __FUNCTION__,
  2376. sd3_requested_clkmode, GFIELD(sd->caps3, CAP3_30CLKCAP)));
  2377. return -1;
  2378. }
  2379. /* check first about 3.0 CARD CLK modes */
  2380. if (!(GFIELD(card_val_uhsisupp, SDIO_BUS_SPEED_UHSICAP) & speedmask)) {
  2381. sd_err(("%s:Card does not support req 3.0 UHSI mode. requested:%d;"
  2382. "capable:0x%x\n", __FUNCTION__, sd3_requested_clkmode,
  2383. GFIELD(card_val_uhsisupp, SDIO_BUS_SPEED_UHSICAP)));
  2384. return -1;
  2385. }
  2386. /* check, to see if the card supports driver_type corr to the
  2387. driver_type in preset value, which will be selected by
  2388. requested UHSI mode
  2389. */
  2390. if (!sdstd_3_get_matching_drvstrn(sd, sd3_requested_clkmode,
  2391. &drvstrn, &presetval)) {
  2392. sd_err(("%s:DRVStrn mismatch!: card strn:0x%x; HC preset"
  2393. "val:0x%x\n", __FUNCTION__, drvstrn, presetval));
  2394. return -1;
  2395. }
  2396. /* success path. change the support variable accordingly */
  2397. sd->global_UHSI_Supp = HOST_SDR_50_104_DDR;
  2398. return sd3_requested_clkmode;
  2399. } else {
  2400. /* auto clk selection: get the highest clock capable by both card and HC */
  2401. /* TBD TOBE DONE */
  2402. /* sd->global_UHSI_Supp = TRUE; on success */
  2403. return -1;
  2404. }
  2405. }
  2406. static int
  2407. sdstd_3_sigvoltswitch_proc(sdioh_info_t *sd)
  2408. {
  2409. int status;
  2410. uint32 cmd_rsp = 0, presst;
  2411. uint16 val1 = 0;
  2412. sd3_trace(("sd3: %s:\n", __FUNCTION__));
  2413. /* Issue cmd11 */
  2414. if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_11, 0))
  2415. != SUCCESS) {
  2416. sd_err(("%s: CMD11 failed\n", __FUNCTION__));
  2417. return status;
  2418. }
  2419. /* check response */
  2420. sdstd_cmd_getrsp(sd, &cmd_rsp, 1);
  2421. if (
  2422. GFIELD(cmd_rsp, RSP1_ERROR) || /* bit 19 */
  2423. GFIELD(cmd_rsp, RSP1_ILLEGAL_CMD) || /* bit 22 */
  2424. GFIELD(cmd_rsp, RSP1_COM_CRC_ERROR) || /* bit 23 */
  2425. GFIELD(cmd_rsp, RSP1_CARD_LOCKED) /* bit 25 */ ) {
  2426. sd_err(("%s: FAIL:CMD11: cmd_resp:0x%x\n", __FUNCTION__, cmd_rsp));
  2427. return ERROR;
  2428. }
  2429. /* SD Clock Enable = 0 */
  2430. sdstd_wreg16(sd, SD_ClockCntrl,
  2431. sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4));
  2432. /* check DAT[3..0] using Present State Reg. If not 0, error */
  2433. presst = sdstd_rreg(sd, SD_PresentState);
  2434. if (0 != GFIELD(presst, PRES_DAT_SIGNAL)) {
  2435. sd_err(("%s: FAIL: PRESTT:0x%x\n", __FUNCTION__, presst));
  2436. return ERROR;
  2437. }
  2438. /* turn 1.8V sig enable in HC2 */
  2439. val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
  2440. val1 = SFIELD(val1, HOSTCtrl2_1_8SIG_EN, 1);
  2441. sdstd_wreg16(sd, SD3_HostCntrl2, val1);
  2442. /* wait 5ms */
  2443. OSL_DELAY(5000);
  2444. /* check 1.8V sig enable in HC2. if cleared, error */
  2445. val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
  2446. if (!GFIELD(val1, HOSTCtrl2_1_8SIG_EN)) {
  2447. sd_err(("%s: FAIL: HC2:1.8V_En:0x%x\n", __FUNCTION__, val1));
  2448. return ERROR;
  2449. }
  2450. /* SD Clock Enable = 1 */
  2451. val1 = sdstd_rreg16(sd, SD_ClockCntrl);
  2452. sdstd_wreg16(sd, SD_ClockCntrl, val1 | 0x4);
  2453. /* wait 1ms */
  2454. OSL_DELAY(1000);
  2455. /* check DAT[3..0] using Present State Reg. If not 0b1111, error */
  2456. presst = sdstd_rreg(sd, SD_PresentState);
  2457. if (0xf != GFIELD(presst, PRES_DAT_SIGNAL)) {
  2458. sd_err(("%s: FAIL: PRESTT_FINAL:0x%x\n", __FUNCTION__, presst));
  2459. return ERROR;
  2460. }
  2461. return (SUCCESS);
  2462. }
  2463. static int
  2464. sdstd_set_highspeed_mode(sdioh_info_t *sd, bool HSMode)
  2465. {
  2466. uint32 regdata;
  2467. int status;
  2468. uint8 reg8;
  2469. uint32 drvstrn;
  2470. reg8 = sdstd_rreg8(sd, SD_HostCntrl);
  2471. if (HSMode == TRUE) {
  2472. if (sd_hiok && (GFIELD(sd->caps, CAP_HIGHSPEED)) == 0) {
  2473. sd_err(("Host Controller does not support hi-speed mode.\n"));
  2474. return BCME_ERROR;
  2475. }
  2476. sd_info(("Attempting to enable High-Speed mode.\n"));
  2477. if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
  2478. 1, &regdata)) != SUCCESS) {
  2479. return BCME_SDIO_ERROR;
  2480. }
  2481. if (regdata & SDIO_SPEED_SHS) {
  2482. sd_info(("Device supports High-Speed mode.\n"));
  2483. regdata |= SDIO_SPEED_EHS;
  2484. sd_info(("Writing %08x to Card at %08x\n",
  2485. regdata, SDIOD_CCCR_SPEED_CONTROL));
  2486. if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
  2487. 1, regdata)) != BCME_OK) {
  2488. return BCME_SDIO_ERROR;
  2489. }
  2490. if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
  2491. 1, &regdata)) != BCME_OK) {
  2492. return BCME_SDIO_ERROR;
  2493. }
  2494. sd_info(("Read %08x to Card at %08x\n", regdata, SDIOD_CCCR_SPEED_CONTROL));
  2495. reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 1);
  2496. sd_err(("High-speed clocking mode enabled.\n"));
  2497. }
  2498. else {
  2499. sd_err(("Device does not support High-Speed Mode.\n"));
  2500. reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 0);
  2501. }
  2502. } else {
  2503. /* Force off device bit */
  2504. if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
  2505. 1, &regdata)) != BCME_OK) {
  2506. return status;
  2507. }
  2508. if (regdata & SDIO_SPEED_EHS) {
  2509. regdata &= ~SDIO_SPEED_EHS;
  2510. if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL,
  2511. 1, regdata)) != BCME_OK) {
  2512. return status;
  2513. }
  2514. }
  2515. sd_err(("High-speed clocking mode disabled.\n"));
  2516. reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 0);
  2517. }
  2518. if ((sd->host_UHSISupported) && (sd->card_UHSI_voltage_Supported)) {
  2519. /* also set the default driver strength in the card/HC [this is reqd because,
  2520. if earlier we selected any other drv_strn, we need to reset it]
  2521. */
  2522. /* get the card driver strength from cccr */
  2523. if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_DRIVER_STRENGTH,
  2524. 1, &drvstrn)) != BCME_OK) {
  2525. sd_err(("%s:Reading SDIOD_CCCR_DRIVER_STRENGTH from card"
  2526. "Failed!\n", __FUNCTION__));
  2527. return BCME_SDIO_ERROR;
  2528. }
  2529. /* reset card drv strn */
  2530. drvstrn = SFIELD(drvstrn, SDIO_BUS_DRVR_TYPE_SEL, 0);
  2531. /* set card drv strn */
  2532. if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_DRIVER_STRENGTH,
  2533. 1, drvstrn)) != BCME_OK) {
  2534. sd_err(("%s:Setting SDIOD_CCCR_DRIVER_STRENGTH in"
  2535. "card Failed!\n", __FUNCTION__));
  2536. return BCME_SDIO_ERROR;
  2537. }
  2538. }
  2539. sdstd_wreg8(sd, SD_HostCntrl, reg8);
  2540. return BCME_OK;
  2541. }
  2542. /* Select DMA Mode:
  2543. * If dma_mode == DMA_MODE_AUTO, pick the "best" mode.
  2544. * Otherwise, pick the selected mode if supported.
  2545. * If not supported, use PIO mode.
  2546. */
  2547. static int
  2548. sdstd_set_dma_mode(sdioh_info_t *sd, int8 dma_mode)
  2549. {
  2550. uint8 reg8, dma_sel_bits = SDIOH_SDMA_MODE;
  2551. int8 prev_dma_mode = sd->sd_dma_mode;
  2552. switch (prev_dma_mode) {
  2553. case DMA_MODE_AUTO:
  2554. sd_dma(("%s: Selecting best DMA mode supported by controller.\n",
  2555. __FUNCTION__));
  2556. if (GFIELD(sd->caps, CAP_ADMA2)) {
  2557. sd->sd_dma_mode = DMA_MODE_ADMA2;
  2558. dma_sel_bits = SDIOH_ADMA2_MODE;
  2559. } else if (GFIELD(sd->caps, CAP_ADMA1)) {
  2560. sd->sd_dma_mode = DMA_MODE_ADMA1;
  2561. dma_sel_bits = SDIOH_ADMA1_MODE;
  2562. } else if (GFIELD(sd->caps, CAP_DMA)) {
  2563. sd->sd_dma_mode = DMA_MODE_SDMA;
  2564. } else {
  2565. sd->sd_dma_mode = DMA_MODE_NONE;
  2566. }
  2567. break;
  2568. case DMA_MODE_NONE:
  2569. sd->sd_dma_mode = DMA_MODE_NONE;
  2570. break;
  2571. case DMA_MODE_SDMA:
  2572. if (GFIELD(sd->caps, CAP_DMA)) {
  2573. sd->sd_dma_mode = DMA_MODE_SDMA;
  2574. } else {
  2575. sd_err(("%s: SDMA not supported by controller.\n", __FUNCTION__));
  2576. sd->sd_dma_mode = DMA_MODE_NONE;
  2577. }
  2578. break;
  2579. case DMA_MODE_ADMA1:
  2580. if (GFIELD(sd->caps, CAP_ADMA1)) {
  2581. sd->sd_dma_mode = DMA_MODE_ADMA1;
  2582. dma_sel_bits = SDIOH_ADMA1_MODE;
  2583. } else {
  2584. sd_err(("%s: ADMA1 not supported by controller.\n", __FUNCTION__));
  2585. sd->sd_dma_mode = DMA_MODE_NONE;
  2586. }
  2587. break;
  2588. case DMA_MODE_ADMA2:
  2589. if (GFIELD(sd->caps, CAP_ADMA2)) {
  2590. sd->sd_dma_mode = DMA_MODE_ADMA2;
  2591. dma_sel_bits = SDIOH_ADMA2_MODE;
  2592. } else {
  2593. sd_err(("%s: ADMA2 not supported by controller.\n", __FUNCTION__));
  2594. sd->sd_dma_mode = DMA_MODE_NONE;
  2595. }
  2596. break;
  2597. case DMA_MODE_ADMA2_64:
  2598. sd_err(("%s: 64b ADMA2 not supported by driver.\n", __FUNCTION__));
  2599. sd->sd_dma_mode = DMA_MODE_NONE;
  2600. break;
  2601. default:
  2602. sd_err(("%s: Unsupported DMA Mode %d requested.\n", __FUNCTION__,
  2603. prev_dma_mode));
  2604. sd->sd_dma_mode = DMA_MODE_NONE;
  2605. break;
  2606. }
  2607. /* clear SysAddr, only used for SDMA */
  2608. sdstd_wreg(sd, SD_SysAddr, 0);
  2609. sd_err(("%s: %s mode selected.\n", __FUNCTION__, dma_mode_description[sd->sd_dma_mode]));
  2610. reg8 = sdstd_rreg8(sd, SD_HostCntrl);
  2611. reg8 = SFIELD(reg8, HOST_DMA_SEL, dma_sel_bits);
  2612. sdstd_wreg8(sd, SD_HostCntrl, reg8);
  2613. sd_dma(("%s: SD_HostCntrl=0x%02x\n", __FUNCTION__, reg8));
  2614. return BCME_OK;
  2615. }
  2616. bool
  2617. sdstd_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor)
  2618. {
  2619. uint rc, count;
  2620. uint16 divisor;
  2621. uint16 regdata;
  2622. uint16 val1;
  2623. sd3_trace(("%s: starting clk\n", __FUNCTION__));
  2624. /* turn off HC clock */
  2625. sdstd_wreg16(sd, SD_ClockCntrl,
  2626. sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4)); /* Disable the HC clock */
  2627. /* Set divisor */
  2628. if (sd->host_UHSISupported) {
  2629. divisor = (new_sd_divisor >> 1);
  2630. } else
  2631. {
  2632. /* new logic: if divisor > 256, restrict to 256 */
  2633. if (new_sd_divisor > 256)
  2634. new_sd_divisor = 256;
  2635. divisor = (new_sd_divisor >> 1) << 8;
  2636. }
  2637. sd_info(("Clock control is 0x%x\n", sdstd_rreg16(sd, SD_ClockCntrl)));
  2638. if (sd->host_UHSISupported) {
  2639. /* *get preset value and shift so that.
  2640. * bits 0-7 are in 15-8 and 9-8 are in 7-6 of clkctrl
  2641. */
  2642. val1 = divisor << 2;
  2643. val1 &= 0x0ffc;
  2644. val1 |= divisor >> 8;
  2645. val1 <<= 6;
  2646. printf("divisor:%x;val1:%x\n", divisor, val1);
  2647. sdstd_mod_reg16(sd, SD_ClockCntrl, 0xffC0, val1);
  2648. } else
  2649. {
  2650. sdstd_mod_reg16(sd, SD_ClockCntrl, 0xff00, divisor);
  2651. }
  2652. sd_err(("%s: Using clock divisor of %d (regval 0x%04x)\n", __FUNCTION__,
  2653. new_sd_divisor, divisor));
  2654. if (new_sd_divisor > 0)
  2655. sd_err(("%s:now, divided clk is: %d Hz\n",
  2656. __FUNCTION__, GFIELD(sd->caps, CAP_BASECLK)*1000000/new_sd_divisor));
  2657. else
  2658. sd_err(("Using Primary Clock Freq of %d MHz\n", GFIELD(sd->caps, CAP_BASECLK)));
  2659. sd_info(("Primary Clock Freq = %d MHz\n", GFIELD(sd->caps, CAP_BASECLK)));
  2660. if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 50) {
  2661. sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
  2662. ((50 % new_sd_divisor) ? (50000 / new_sd_divisor) : (50 / new_sd_divisor)),
  2663. ((50 % new_sd_divisor) ? "KHz" : "MHz")));
  2664. } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 48) {
  2665. sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
  2666. ((48 % new_sd_divisor) ? (48000 / new_sd_divisor) : (48 / new_sd_divisor)),
  2667. ((48 % new_sd_divisor) ? "KHz" : "MHz")));
  2668. } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 33) {
  2669. sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
  2670. ((33 % new_sd_divisor) ? (33000 / new_sd_divisor) : (33 / new_sd_divisor)),
  2671. ((33 % new_sd_divisor) ? "KHz" : "MHz")));
  2672. } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 31) {
  2673. sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
  2674. ((31 % new_sd_divisor) ? (31000 / new_sd_divisor) : (31 / new_sd_divisor)),
  2675. ((31 % new_sd_divisor) ? "KHz" : "MHz")));
  2676. } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 8) {
  2677. sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__,
  2678. ((8 % new_sd_divisor) ? (8000 / new_sd_divisor) : (8 / new_sd_divisor)),
  2679. ((8 % new_sd_divisor) ? "KHz" : "MHz")));
  2680. } else if (sd->controller_type == SDIOH_TYPE_BCM27XX) {
  2681. } else {
  2682. sd_err(("Need to determine divisor for %d MHz clocks\n",
  2683. GFIELD(sd->caps, CAP_BASECLK)));
  2684. sd_err(("Consult SD Host Controller Spec: Clock Control Register\n"));
  2685. return (FALSE);
  2686. }
  2687. sdstd_or_reg16(sd, SD_ClockCntrl, 0x1); /* Enable the clock */
  2688. /* Wait for clock to stabilize */
  2689. rc = (sdstd_rreg16(sd, SD_ClockCntrl) & 2);
  2690. count = 0;
  2691. while (!rc) {
  2692. OSL_DELAY(1);
  2693. sd_info(("Waiting for clock to become stable 0x%x\n", rc));
  2694. rc = (sdstd_rreg16(sd, SD_ClockCntrl) & 2);
  2695. count++;
  2696. if (count > 10000) {
  2697. sd_err(("%s:Clocks failed to stabilize after %u attempts\n",
  2698. __FUNCTION__, count));
  2699. return (FALSE);
  2700. }
  2701. }
  2702. /* Turn on clock */
  2703. sdstd_or_reg16(sd, SD_ClockCntrl, 0x4);
  2704. OSL_DELAY(20);
  2705. /* Set timeout control (adjust default value based on divisor).
  2706. * Disabling timeout interrupts during setting is advised by host spec.
  2707. */
  2708. {
  2709. uint toval;
  2710. toval = sd_toctl;
  2711. divisor = new_sd_divisor;
  2712. while (toval && !(divisor & 1)) {
  2713. toval -= 1;
  2714. divisor >>= 1;
  2715. }
  2716. regdata = sdstd_rreg16(sd, SD_ErrorIntrStatusEnable);
  2717. sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, (regdata & ~ERRINT_DATA_TIMEOUT_BIT));
  2718. sdstd_wreg8(sd, SD_TimeoutCntrl, (uint8)toval);
  2719. sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, regdata);
  2720. }
  2721. OSL_DELAY(2);
  2722. sd_info(("Final Clock control is 0x%x\n", sdstd_rreg16(sd, SD_ClockCntrl)));
  2723. return TRUE;
  2724. }
  2725. uint16
  2726. sdstd_start_power(sdioh_info_t *sd, int volts_req)
  2727. {
  2728. char *s;
  2729. uint32 cmd_arg;
  2730. uint32 cmd_rsp;
  2731. uint8 pwr = 0;
  2732. int volts = 0;
  2733. uint16 val1;
  2734. uint16 init_divider = 0;
  2735. uint8 baseclk = 0;
  2736. bool selhighest = (volts_req == 0) ? TRUE : FALSE;
  2737. /* reset the card uhsi volt support to false */
  2738. sd->card_UHSI_voltage_Supported = FALSE;
  2739. /* Ensure a power on reset by turning off bus power in case it happened to
  2740. * be on already. (This might happen if driver doesn't unload/clean up correctly,
  2741. * crash, etc.) Leave off for 100ms to make sure the power off isn't
  2742. * ignored/filtered by the device. Note we can't skip this step if the power is
  2743. * off already since we don't know how long it has been off before starting
  2744. * the driver.
  2745. */
  2746. sdstd_wreg8(sd, SD_PwrCntrl, 0);
  2747. sd_info(("Turning off VDD/bus power briefly (100ms) to ensure reset\n"));
  2748. OSL_DELAY(100000);
  2749. /* For selecting highest available voltage, start from lowest and iterate */
  2750. if (!volts_req)
  2751. volts_req = 1;
  2752. s = NULL;
  2753. if (volts_req == 1) {
  2754. if (GFIELD(sd->caps, CAP_VOLT_1_8)) {
  2755. volts = 5;
  2756. s = "1.8";
  2757. if (FALSE == selhighest)
  2758. goto voltsel;
  2759. else
  2760. volts_req++;
  2761. } else {
  2762. sd_err(("HC doesn't support voltage! trying higher voltage: %d\n", volts));
  2763. volts_req++;
  2764. }
  2765. }
  2766. if (volts_req == 2) {
  2767. if (GFIELD(sd->caps, CAP_VOLT_3_0)) {
  2768. volts = 6;
  2769. s = "3.0";
  2770. if (FALSE == selhighest)
  2771. goto voltsel;
  2772. else volts_req++;
  2773. } else {
  2774. sd_err(("HC doesn't support voltage! trying higher voltage: %d\n", volts));
  2775. volts_req++;
  2776. }
  2777. }
  2778. if (volts_req == 3) {
  2779. if (GFIELD(sd->caps, CAP_VOLT_3_3)) {
  2780. volts = 7;
  2781. s = "3.3";
  2782. } else {
  2783. if ((FALSE == selhighest) || (volts == 0)) {
  2784. sd_err(("HC doesn't support any voltage! error!\n"));
  2785. return FALSE;
  2786. }
  2787. }
  2788. }
  2789. voltsel:
  2790. pwr = SFIELD(pwr, PWR_VOLTS, volts);
  2791. pwr = SFIELD(pwr, PWR_BUS_EN, 1);
  2792. sdstd_wreg8(sd, SD_PwrCntrl, pwr); /* Set Voltage level */
  2793. sd_info(("Setting Bus Power to %s Volts\n", s));
  2794. BCM_REFERENCE(s);
  2795. if ((sd->version == HOST_CONTR_VER_3) && (volts == 5)) {
  2796. val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
  2797. val1 = SFIELD(val1, HOSTCtrl2_1_8SIG_EN, 1);
  2798. sdstd_wreg16(sd, SD3_HostCntrl2, val1);
  2799. }
  2800. /* Wait for 500ms for power to stabilize. Some designs have reset IC's
  2801. * which can hold reset low for close to 300ms. In addition there can
  2802. * be ramp time for VDD and/or VDDIO which might be provided from a LDO.
  2803. * For these reasons we need a pretty conservative delay here to have
  2804. * predictable reset behavior in the face of an unknown design.
  2805. */
  2806. OSL_DELAY(500000);
  2807. baseclk = GFIELD(sd->caps, CAP_BASECLK);
  2808. sd_info(("%s:baseclk: %d MHz\n", __FUNCTION__, baseclk));
  2809. /* for 3.0, find divisor */
  2810. if (sd->host_UHSISupported) {
  2811. /* ToDo : Dynamic modification of preset value table based on base clk */
  2812. sd3_trace(("sd3: %s: checking divisor\n", __FUNCTION__));
  2813. if (GFIELD(sd->caps3, CAP3_CLK_MULT) != 0) {
  2814. sd_err(("%s:Possible error: CLK Mul 1 CLOCKING NOT supported!\n",
  2815. __FUNCTION__));
  2816. return FALSE;
  2817. } else {
  2818. /* calculate dividor, which leads to 400KHz. */
  2819. init_divider = baseclk*10/4; /* baseclk*1000000/(400000); */
  2820. /* make it a multiple of 2. */
  2821. init_divider += (init_divider & 0x1);
  2822. sd_err(("%s:divider used for init:%d\n",
  2823. __FUNCTION__, init_divider));
  2824. }
  2825. } else {
  2826. /* Note: sd_divisor assumes that SDIO Base CLK is 50MHz. */
  2827. int final_freq_based_on_div = 50/sd_divisor;
  2828. if (baseclk > 50)
  2829. sd_divisor = baseclk/final_freq_based_on_div;
  2830. /* TBD: merge both SDIO 2.0 and 3.0 to share same divider logic */
  2831. init_divider = baseclk*10/4; /* baseclk*1000000/(400000); */
  2832. /* find next power of 2 */
  2833. NEXT_POW2(init_divider);
  2834. sd_err(("%s:NONUHSI: divider used for init:%d\n",
  2835. __FUNCTION__, init_divider));
  2836. }
  2837. /* Start at ~400KHz clock rate for initialization */
  2838. if (!sdstd_start_clock(sd, init_divider)) {
  2839. sd_err(("%s: sdstd_start_clock failed\n", __FUNCTION__));
  2840. return FALSE;
  2841. }
  2842. /* Get the Card's Operation Condition. Occasionally the board
  2843. * takes a while to become ready
  2844. */
  2845. cmd_arg = 0;
  2846. cmd_rsp = 0;
  2847. if (get_ocr(sd, &cmd_arg, &cmd_rsp) != SUCCESS) {
  2848. sd_err(("%s: Failed to get OCR bailing\n", __FUNCTION__));
  2849. /* No need to reset as not sure in what state the card is. */
  2850. return SDIO_OCR_READ_FAIL;
  2851. }
  2852. sd_info(("cmd_rsp = 0x%x\n", cmd_rsp));
  2853. sd_info(("mem_present = %d\n", GFIELD(cmd_rsp, RSP4_MEM_PRESENT)));
  2854. sd_info(("num_funcs = %d\n", GFIELD(cmd_rsp, RSP4_NUM_FUNCS)));
  2855. sd_info(("card_ready = %d\n", GFIELD(cmd_rsp, RSP4_CARD_READY)));
  2856. sd_info(("OCR = 0x%x\n", GFIELD(cmd_rsp, RSP4_IO_OCR)));
  2857. /* Verify that the card supports I/O mode */
  2858. if (GFIELD(cmd_rsp, RSP4_NUM_FUNCS) == 0) {
  2859. sd_err(("%s: Card does not support I/O\n", __FUNCTION__));
  2860. return ERROR;
  2861. }
  2862. sd->num_funcs = GFIELD(cmd_rsp, RSP4_NUM_FUNCS);
  2863. /* Examine voltage: Arasan only supports 3.3 volts,
  2864. * so look for 3.2-3.3 Volts and also 3.3-3.4 volts.
  2865. */
  2866. if ((GFIELD(cmd_rsp, RSP4_IO_OCR) & (0x3 << 20)) == 0) {
  2867. sd_err(("This client does not support 3.3 volts!\n"));
  2868. return ERROR;
  2869. }
  2870. sd_info(("Leaving bus power at 3.3 Volts\n"));
  2871. cmd_arg = SFIELD(0, CMD5_OCR, 0xfff000);
  2872. /* if HC uhsi supported and card voltage set is 3.3V then switch to 1.8V */
  2873. if ((sd->host_UHSISupported) && (volts == 5)) {
  2874. /* set S18R also */
  2875. cmd_arg = SFIELD(cmd_arg, CMD5_S18R, 1);
  2876. }
  2877. cmd_rsp = 0;
  2878. get_ocr(sd, &cmd_arg, &cmd_rsp);
  2879. sd_info(("OCR = 0x%x\n", GFIELD(cmd_rsp, RSP4_IO_OCR)));
  2880. if ((sd->host_UHSISupported)) {
  2881. /* card responded with s18A => card supports sdio3.0,do tuning proc */
  2882. if (GFIELD(cmd_rsp, RSP4_S18A) == 1) {
  2883. if (sdstd_3_sigvoltswitch_proc(sd)) {
  2884. /* continue with legacy way of working */
  2885. sd_err(("%s: voltage switch not done. error, stopping\n",
  2886. __FUNCTION__));
  2887. /* How to gracefully proceced here? */
  2888. return FALSE;
  2889. } else {
  2890. sd->card_UHSI_voltage_Supported = TRUE;
  2891. sd_err(("%s: voltage switch SUCCESS!\n", __FUNCTION__));
  2892. }
  2893. } else {
  2894. /* This could happen for 2 cases.
  2895. * 1) means card is NOT sdio3.0 . Note that
  2896. * card_UHSI_voltage_Supported is already false.
  2897. * 2) card is sdio3.0 but it is already in 1.8V.
  2898. * But now, how to change host controller's voltage?
  2899. * In this case we need to do the following.
  2900. * sd->card_UHSI_voltage_Supported = TRUE;
  2901. * turn 1.8V sig enable in HC2
  2902. * val1 = sdstd_rreg16(sd, SD3_HostCntrl2);
  2903. * val1 = SFIELD(val1, HOSTCtrl2_1_8SIG_EN, 1);
  2904. * sdstd_wreg16(sd, SD3_HostCntrl2, val1);
  2905. */
  2906. sd_info(("%s: Not sdio3.0: host_UHSISupported: %d; HC volts=%d\n",
  2907. __FUNCTION__, sd->host_UHSISupported, volts));
  2908. }
  2909. } else {
  2910. sd_info(("%s: Legacy [non sdio3.0] HC\n", __FUNCTION__));
  2911. }
  2912. return TRUE;
  2913. }
  2914. bool
  2915. sdstd_bus_width(sdioh_info_t *sd, int new_mode)
  2916. {
  2917. uint32 regdata;
  2918. int status;
  2919. uint8 reg8;
  2920. sd_trace(("%s\n", __FUNCTION__));
  2921. if (sd->sd_mode == new_mode) {
  2922. sd_info(("%s: Already at width %d\n", __FUNCTION__, new_mode));
  2923. /* Could exit, but continue just in case... */
  2924. }
  2925. /* Set client side via reg 0x7 in CCCR */
  2926. if ((status = sdstd_card_regread (sd, 0, SDIOD_CCCR_BICTRL, 1, &regdata)) != SUCCESS)
  2927. return (bool)status;
  2928. regdata &= ~BUS_SD_DATA_WIDTH_MASK;
  2929. if (new_mode == SDIOH_MODE_SD4) {
  2930. sd_info(("Changing to SD4 Mode\n"));
  2931. regdata |= SD4_MODE;
  2932. } else if (new_mode == SDIOH_MODE_SD1) {
  2933. sd_info(("Changing to SD1 Mode\n"));
  2934. } else {
  2935. sd_err(("SPI Mode not supported by Standard Host Controller\n"));
  2936. }
  2937. if ((status = sdstd_card_regwrite (sd, 0, SDIOD_CCCR_BICTRL, 1, regdata)) != SUCCESS)
  2938. return (bool)status;
  2939. if (sd->host_UHSISupported) {
  2940. uint32 card_asyncint = 0;
  2941. uint16 host_asyncint = 0;
  2942. if ((status = sdstd_card_regread (sd, 0, SDIOD_CCCR_INTR_EXTN, 1,
  2943. &card_asyncint)) != SUCCESS) {
  2944. sd_err(("%s:INTR EXT getting failed!, ignoring\n", __FUNCTION__));
  2945. } else {
  2946. host_asyncint = sdstd_rreg16(sd, SD3_HostCntrl2);
  2947. /* check if supported by host and card */
  2948. if ((regdata & SD4_MODE) &&
  2949. (GFIELD(card_asyncint, SDIO_BUS_ASYNCINT_CAP)) &&
  2950. (GFIELD(sd->caps, CAP_ASYNCINT_SUP))) {
  2951. /* set enable async int in card */
  2952. card_asyncint = SFIELD(card_asyncint, SDIO_BUS_ASYNCINT_SEL, 1);
  2953. if ((status = sdstd_card_regwrite (sd, 0,
  2954. SDIOD_CCCR_INTR_EXTN, 1, card_asyncint)) != SUCCESS)
  2955. sd_err(("%s:INTR EXT setting failed!, ignoring\n",
  2956. __FUNCTION__));
  2957. else {
  2958. /* set enable async int in host */
  2959. host_asyncint = SFIELD(host_asyncint,
  2960. HOSTCtrl2_ASYINT_EN, 1);
  2961. sdstd_wreg16(sd, SD3_HostCntrl2, host_asyncint);
  2962. }
  2963. } else {
  2964. sd_err(("%s:INTR EXT NOT supported by either host or"
  2965. "card!, ignoring\n", __FUNCTION__));
  2966. }
  2967. }
  2968. }
  2969. /* Set host side via Host reg */
  2970. reg8 = sdstd_rreg8(sd, SD_HostCntrl) & ~SD4_MODE;
  2971. if (new_mode == SDIOH_MODE_SD4)
  2972. reg8 |= SD4_MODE;
  2973. sdstd_wreg8(sd, SD_HostCntrl, reg8);
  2974. sd->sd_mode = new_mode;
  2975. return TRUE;
  2976. }
  2977. static int
  2978. sdstd_driver_init(sdioh_info_t *sd)
  2979. {
  2980. sd_trace(("%s\n", __FUNCTION__));
  2981. sd->sd3_tuning_reqd = FALSE;
  2982. sd->sd3_tuning_disable = FALSE;
  2983. if ((sdstd_host_init(sd)) != SUCCESS) {
  2984. return ERROR;
  2985. }
  2986. /* Give WL_reset before sending CMD5 to dongle for Revx SDIO3 HC's */
  2987. if ((sd->controller_type == SDIOH_TYPE_RICOH_R5C822) && (sd->version == HOST_CONTR_VER_3))
  2988. {
  2989. sdstd_wreg16(sd, SD3_WL_BT_reset_register, 0x8);
  2990. OSL_DELAY(sd_delay_value);
  2991. sdstd_wreg16(sd, SD3_WL_BT_reset_register, 0x0);
  2992. OSL_DELAY(500000);
  2993. }
  2994. if (sdstd_client_init(sd) != SUCCESS) {
  2995. return ERROR;
  2996. }
  2997. /* if the global cap matched and is SDR 104/50 [if 50 it is reqd] enable tuning. */
  2998. if ((TRUE != sd3_sw_override1) && SD3_TUNING_REQD(sd, sd_uhsimode)) {
  2999. sd->sd3_tuning_reqd = TRUE;
  3000. /* init OS structs for tuning */
  3001. sdstd_3_osinit_tuning(sd);
  3002. /* enable HC tuning interrupt OR timer based on tuning method */
  3003. if (GFIELD(sd->caps3, CAP3_RETUNING_MODES)) {
  3004. /* enable both RTReq and timer */
  3005. sd->intmask |= HC_INTR_RETUNING;
  3006. sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask);
  3007. #ifdef BCMSDYIELD
  3008. if (sd_forcerb)
  3009. sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */
  3010. #endif /* BCMSDYIELD */
  3011. }
  3012. }
  3013. return SUCCESS;
  3014. }
  3015. static int
  3016. sdstd_get_cisaddr(sdioh_info_t *sd, uint32 regaddr)
  3017. {
  3018. /* read 24 bits and return valid 17 bit addr */
  3019. int i;
  3020. uint32 scratch, regdata;
  3021. uint8 *ptr = (uint8 *)&scratch;
  3022. for (i = 0; i < 3; i++) {
  3023. if ((sdstd_card_regread (sd, 0, regaddr, 1, &regdata)) != SUCCESS)
  3024. sd_err(("%s: Can't read!\n", __FUNCTION__));
  3025. *ptr++ = (uint8) regdata;
  3026. regaddr++;
  3027. }
  3028. /* Only the lower 17-bits are valid */
  3029. scratch = ltoh32(scratch);
  3030. scratch &= 0x0001FFFF;
  3031. return (scratch);
  3032. }
  3033. static int
  3034. sdstd_card_enablefuncs(sdioh_info_t *sd)
  3035. {
  3036. int status;
  3037. uint32 regdata;
  3038. uint32 fbraddr;
  3039. uint8 func;
  3040. sd_trace(("%s\n", __FUNCTION__));
  3041. /* Get the Card's common CIS address */
  3042. sd->com_cis_ptr = sdstd_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0);
  3043. sd->func_cis_ptr[0] = sd->com_cis_ptr;
  3044. sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr));
  3045. /* Get the Card's function CIS (for each function) */
  3046. for (fbraddr = SDIOD_FBR_STARTADDR, func = 1;
  3047. func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) {
  3048. sd->func_cis_ptr[func] = sdstd_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr);
  3049. sd_info(("%s: Function %d CIS Ptr = 0x%x\n",
  3050. __FUNCTION__, func, sd->func_cis_ptr[func]));
  3051. }
  3052. /* Enable function 1 on the card */
  3053. regdata = SDIO_FUNC_ENABLE_1;
  3054. if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_IOEN, 1, regdata)) != SUCCESS)
  3055. return status;
  3056. return SUCCESS;
  3057. }
  3058. /* Read client card reg */
  3059. static int
  3060. sdstd_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data)
  3061. {
  3062. int status;
  3063. uint32 cmd_arg;
  3064. uint32 rsp5;
  3065. cmd_arg = 0;
  3066. if ((func == 0) || (regsize == 1)) {
  3067. cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
  3068. cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
  3069. cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_READ);
  3070. cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
  3071. cmd_arg = SFIELD(cmd_arg, CMD52_DATA, 0);
  3072. if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg))
  3073. != SUCCESS)
  3074. return status;
  3075. sdstd_cmd_getrsp(sd, &rsp5, 1);
  3076. if (sdstd_rreg16(sd, SD_ErrorIntrStatus) != 0) {
  3077. sd_err(("%s: 1: ErrorintrStatus 0x%x\n",
  3078. __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus)));
  3079. }
  3080. if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
  3081. sd_err(("%s: rsp5 flags is 0x%x\t %d\n",
  3082. __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func));
  3083. if (GFIELD(rsp5, RSP5_STUFF))
  3084. sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
  3085. __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
  3086. *data = GFIELD(rsp5, RSP5_DATA);
  3087. sd_data(("%s: Resp data(0x%x)\n", __FUNCTION__, *data));
  3088. } else {
  3089. cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize);
  3090. cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1);
  3091. cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
  3092. cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
  3093. cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr);
  3094. cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ);
  3095. sd->data_xfer_count = regsize;
  3096. /* sdstd_cmd_issue() returns with the command complete bit
  3097. * in the ISR already cleared
  3098. */
  3099. if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_53, cmd_arg))
  3100. != SUCCESS)
  3101. return status;
  3102. sdstd_cmd_getrsp(sd, &rsp5, 1);
  3103. if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
  3104. sd_err(("%s: rsp5 flags is 0x%x\t %d\n",
  3105. __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func));
  3106. if (GFIELD(rsp5, RSP5_STUFF))
  3107. sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n",
  3108. __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
  3109. if (sd->polled_mode) {
  3110. volatile uint16 int_reg;
  3111. int retries = RETRIES_LARGE;
  3112. /* Wait for Read Buffer to become ready */
  3113. do {
  3114. sdstd_os_yield(sd);
  3115. int_reg = sdstd_rreg16(sd, SD_IntrStatus);
  3116. } while (--retries && (GFIELD(int_reg, INTSTAT_BUF_READ_READY) == 0));
  3117. if (!retries) {
  3118. sd_err(("%s: Timeout on Buf_Read_Ready: "
  3119. "intStat: 0x%x errint: 0x%x PresentState 0x%x\n",
  3120. __FUNCTION__, int_reg,
  3121. sdstd_rreg16(sd, SD_ErrorIntrStatus),
  3122. sdstd_rreg(sd, SD_PresentState)));
  3123. sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg);
  3124. return (ERROR);
  3125. }
  3126. /* Have Buffer Ready, so clear it and read the data */
  3127. sdstd_wreg16(sd, SD_IntrStatus, SFIELD(0, INTSTAT_BUF_READ_READY, 1));
  3128. if (regsize == 2)
  3129. *data = sdstd_rreg16(sd, SD_BufferDataPort0);
  3130. else
  3131. *data = sdstd_rreg(sd, SD_BufferDataPort0);
  3132. sd_data(("%s: Resp data(0x%x)\n", __FUNCTION__, *data));
  3133. /* Check Status.
  3134. * After the data is read, the Transfer Complete bit should be on
  3135. */
  3136. retries = RETRIES_LARGE;
  3137. do {
  3138. int_reg = sdstd_rreg16(sd, SD_IntrStatus);
  3139. } while (--retries && (GFIELD(int_reg, INTSTAT_XFER_COMPLETE) == 0));
  3140. /* Check for any errors from the data phase */
  3141. if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg))
  3142. return ERROR;
  3143. if (!retries) {
  3144. sd_err(("%s: Timeout on xfer complete: "
  3145. "intr 0x%04x err 0x%04x state 0x%08x\n",
  3146. __FUNCTION__, int_reg,
  3147. sdstd_rreg16(sd, SD_ErrorIntrStatus),
  3148. sdstd_rreg(sd, SD_PresentState)));
  3149. return (ERROR);
  3150. }
  3151. sdstd_wreg16(sd, SD_IntrStatus, SFIELD(0, INTSTAT_XFER_COMPLETE, 1));
  3152. }
  3153. }
  3154. if (sd->polled_mode) {
  3155. if (regsize == 2)
  3156. *data &= 0xffff;
  3157. }
  3158. return SUCCESS;
  3159. }
  3160. bool
  3161. check_client_intr(sdioh_info_t *sd)
  3162. {
  3163. uint16 raw_int, cur_int, old_int;
  3164. raw_int = sdstd_rreg16(sd, SD_IntrStatus);
  3165. cur_int = raw_int & sd->intmask;
  3166. if (!cur_int) {
  3167. /* Not an error -- might share interrupts... */
  3168. return FALSE;
  3169. }
  3170. if (GFIELD(cur_int, INTSTAT_CARD_INT)) {
  3171. unsigned long flags;
  3172. sdstd_os_lock_irqsave(sd, &flags);
  3173. old_int = sdstd_rreg16(sd, SD_IntrStatusEnable);
  3174. sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(old_int, INTSTAT_CARD_INT, 0));
  3175. sdstd_os_unlock_irqrestore(sd, &flags);
  3176. if (sd->client_intr_enabled && sd->use_client_ints) {
  3177. sd->intrcount++;
  3178. ASSERT(sd->intr_handler);
  3179. ASSERT(sd->intr_handler_arg);
  3180. (sd->intr_handler)(sd->intr_handler_arg);
  3181. } else {
  3182. sd_err(("%s: Not ready for intr: enabled %d, handler %p\n",
  3183. __FUNCTION__, sd->client_intr_enabled, sd->intr_handler));
  3184. }
  3185. sdstd_os_lock_irqsave(sd, &flags);
  3186. old_int = sdstd_rreg16(sd, SD_IntrStatusEnable);
  3187. sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(old_int, INTSTAT_CARD_INT, 1));
  3188. sdstd_os_unlock_irqrestore(sd, &flags);
  3189. } else {
  3190. /* Local interrupt: disable, set flag, and save intrstatus */
  3191. sdstd_wreg16(sd, SD_IntrSignalEnable, 0);
  3192. sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, 0);
  3193. sd->local_intrcount++;
  3194. sd->got_hcint = TRUE;
  3195. sd->last_intrstatus = cur_int;
  3196. }
  3197. return TRUE;
  3198. }
  3199. void
  3200. sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err)
  3201. {
  3202. uint16 int_reg, err_reg;
  3203. int retries = RETRIES_LARGE;
  3204. do {
  3205. int_reg = sdstd_rreg16(sd, SD_IntrStatus);
  3206. err_reg = sdstd_rreg16(sd, SD_ErrorIntrStatus);
  3207. } while (--retries && !(int_reg & norm) && !(err_reg & err));
  3208. norm |= sd->intmask;
  3209. if (err_reg & err)
  3210. norm = SFIELD(norm, INTSTAT_ERROR_INT, 1);
  3211. sd->last_intrstatus = int_reg & norm;
  3212. }
  3213. /* write a client register */
  3214. static int
  3215. sdstd_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data)
  3216. {
  3217. int status;
  3218. uint32 cmd_arg, rsp5, flags;
  3219. cmd_arg = 0;
  3220. if ((func == 0) || (regsize == 1)) {
  3221. cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func);
  3222. cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr);
  3223. cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
  3224. cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0);
  3225. cmd_arg = SFIELD(cmd_arg, CMD52_DATA, data & 0xff);
  3226. if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg))
  3227. != SUCCESS)
  3228. return status;
  3229. sdstd_cmd_getrsp(sd, &rsp5, 1);
  3230. flags = GFIELD(rsp5, RSP5_FLAGS);
  3231. if (flags && (flags != 0x10))
  3232. sd_err(("%s: rsp5.rsp5.flags = 0x%x, expecting 0x10\n",
  3233. __FUNCTION__, flags));
  3234. }
  3235. else {
  3236. cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize);
  3237. cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1);
  3238. cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
  3239. cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
  3240. cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr);
  3241. cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
  3242. sd->data_xfer_count = regsize;
  3243. /* sdstd_cmd_issue() returns with the command complete bit
  3244. * in the ISR already cleared
  3245. */
  3246. if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_53, cmd_arg))
  3247. != SUCCESS)
  3248. return status;
  3249. sdstd_cmd_getrsp(sd, &rsp5, 1);
  3250. if (GFIELD(rsp5, RSP5_FLAGS) != 0x10)
  3251. sd_err(("%s: rsp5 flags = 0x%x, expecting 0x10\n",
  3252. __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS)));
  3253. if (GFIELD(rsp5, RSP5_STUFF))
  3254. sd_err(("%s: rsp5 stuff is 0x%x: expecting 0\n",
  3255. __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
  3256. if (sd->polled_mode) {
  3257. uint16 int_reg;
  3258. int retries = RETRIES_LARGE;
  3259. /* Wait for Write Buffer to become ready */
  3260. do {
  3261. int_reg = sdstd_rreg16(sd, SD_IntrStatus);
  3262. } while (--retries && (GFIELD(int_reg, INTSTAT_BUF_WRITE_READY) == 0));
  3263. if (!retries) {
  3264. sd_err(("%s: Timeout on Buf_Write_Ready: intStat: 0x%x "
  3265. "errint: 0x%x PresentState 0x%x\n",
  3266. __FUNCTION__, int_reg,
  3267. sdstd_rreg16(sd, SD_ErrorIntrStatus),
  3268. sdstd_rreg(sd, SD_PresentState)));
  3269. sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg);
  3270. return (ERROR);
  3271. }
  3272. /* Clear Write Buf Ready bit */
  3273. int_reg = 0;
  3274. int_reg = SFIELD(int_reg, INTSTAT_BUF_WRITE_READY, 1);
  3275. sdstd_wreg16(sd, SD_IntrStatus, int_reg);
  3276. /* At this point we have Buffer Ready, so write the data */
  3277. if (regsize == 2)
  3278. sdstd_wreg16(sd, SD_BufferDataPort0, (uint16) data);
  3279. else
  3280. sdstd_wreg(sd, SD_BufferDataPort0, data);
  3281. /* Wait for Transfer Complete */
  3282. retries = RETRIES_LARGE;
  3283. do {
  3284. int_reg = sdstd_rreg16(sd, SD_IntrStatus);
  3285. } while (--retries && (GFIELD(int_reg, INTSTAT_XFER_COMPLETE) == 0));
  3286. /* Check for any errors from the data phase */
  3287. if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg))
  3288. return ERROR;
  3289. if (retries == 0) {
  3290. sd_err(("%s: Timeout for xfer complete; State = 0x%x, "
  3291. "intr state=0x%x, Errintstatus 0x%x rcnt %d, tcnt %d\n",
  3292. __FUNCTION__, sdstd_rreg(sd, SD_PresentState),
  3293. int_reg, sdstd_rreg16(sd, SD_ErrorIntrStatus),
  3294. sd->r_cnt, sd->t_cnt));
  3295. }
  3296. /* Clear the status bits */
  3297. sdstd_wreg16(sd, SD_IntrStatus, SFIELD(int_reg, INTSTAT_CARD_INT, 0));
  3298. }
  3299. }
  3300. return SUCCESS;
  3301. }
  3302. void
  3303. sdstd_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count /* num 32 bit words */)
  3304. {
  3305. int rsp_count;
  3306. int respaddr = SD_Response0;
  3307. if (count > 4)
  3308. count = 4;
  3309. for (rsp_count = 0; rsp_count < count; rsp_count++) {
  3310. *rsp_buffer++ = sdstd_rreg(sd, respaddr);
  3311. respaddr += 4;
  3312. }
  3313. }
  3314. /*
  3315. Note: options: 0 - default
  3316. 1 - tuning option: Means that, this cmd issue is as a part
  3317. of tuning. So no need to check the start tuning function.
  3318. */
  3319. static int
  3320. sdstd_cmd_issue(sdioh_info_t *sdioh_info, bool use_dma, uint32 cmd, uint32 arg)
  3321. {
  3322. uint16 cmd_reg;
  3323. int retries;
  3324. uint32 cmd_arg;
  3325. uint16 xfer_reg = 0;
  3326. if ((sdioh_info->sd_mode == SDIOH_MODE_SPI) &&
  3327. ((cmd == SDIOH_CMD_3) || (cmd == SDIOH_CMD_7) || (cmd == SDIOH_CMD_15))) {
  3328. sd_err(("%s: Cmd %d is not for SPI\n", __FUNCTION__, cmd));
  3329. return ERROR;
  3330. }
  3331. retries = RETRIES_SMALL;
  3332. while ((GFIELD(sdstd_rreg(sdioh_info, SD_PresentState), PRES_CMD_INHIBIT)) && --retries) {
  3333. if (retries == RETRIES_SMALL)
  3334. sd_err(("%s: Waiting for Command Inhibit cmd = %d 0x%x\n",
  3335. __FUNCTION__, cmd, sdstd_rreg(sdioh_info, SD_PresentState)));
  3336. }
  3337. if (!retries) {
  3338. sd_err(("%s: Command Inhibit timeout\n", __FUNCTION__));
  3339. if (trap_errs)
  3340. ASSERT(0);
  3341. return ERROR;
  3342. }
  3343. cmd_reg = 0;
  3344. switch (cmd) {
  3345. case SDIOH_CMD_0: /* Set Card to Idle State - No Response */
  3346. sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
  3347. cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_NONE);
  3348. cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
  3349. cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
  3350. cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
  3351. cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
  3352. cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
  3353. break;
  3354. case SDIOH_CMD_3: /* Ask card to send RCA - Response R6 */
  3355. sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
  3356. cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
  3357. cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
  3358. cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
  3359. cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
  3360. cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
  3361. cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
  3362. break;
  3363. case SDIOH_CMD_5: /* Send Operation condition - Response R4 */
  3364. sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
  3365. cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
  3366. cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
  3367. cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
  3368. cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
  3369. cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
  3370. cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
  3371. break;
  3372. case SDIOH_CMD_7: /* Select card - Response R1 */
  3373. sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
  3374. cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
  3375. cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
  3376. cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
  3377. cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
  3378. cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
  3379. cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
  3380. break;
  3381. case SDIOH_CMD_14: /* eSD Sleep - Response R1 */
  3382. case SDIOH_CMD_11: /* Select card - Response R1 */
  3383. sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
  3384. cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
  3385. cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
  3386. cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
  3387. cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
  3388. cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
  3389. cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
  3390. break;
  3391. case SDIOH_CMD_15: /* Set card to inactive state - Response None */
  3392. sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
  3393. cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_NONE);
  3394. cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
  3395. cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
  3396. cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
  3397. cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
  3398. cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
  3399. break;
  3400. case SDIOH_CMD_19: /* clock tuning - Response R1 */
  3401. sd_data(("%s: CMD%d\n", __FUNCTION__, cmd));
  3402. cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
  3403. cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
  3404. cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
  3405. cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 1);
  3406. cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
  3407. cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
  3408. /* Host controller reads 64 byte magic pattern from card
  3409. * Hence Direction = 1 ( READ )
  3410. */
  3411. xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1);
  3412. break;
  3413. case SDIOH_CMD_52: /* IO R/W Direct (single byte) - Response R5 */
  3414. sd_data(("%s: CMD52 func(%d) addr(0x%x) %s data(0x%x)\n",
  3415. __FUNCTION__,
  3416. GFIELD(arg, CMD52_FUNCTION),
  3417. GFIELD(arg, CMD52_REG_ADDR),
  3418. GFIELD(arg, CMD52_RW_FLAG) ? "W" : "R",
  3419. GFIELD(arg, CMD52_DATA)));
  3420. cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
  3421. cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
  3422. cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
  3423. cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0);
  3424. cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
  3425. cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
  3426. break;
  3427. case SDIOH_CMD_53: /* IO R/W Extended (multiple bytes/blocks) */
  3428. sd_data(("%s: CMD53 func(%d) addr(0x%x) %s mode(%s) cnt(%d), %s\n",
  3429. __FUNCTION__,
  3430. GFIELD(arg, CMD53_FUNCTION),
  3431. GFIELD(arg, CMD53_REG_ADDR),
  3432. GFIELD(arg, CMD53_RW_FLAG) ? "W" : "R",
  3433. GFIELD(arg, CMD53_BLK_MODE) ? "Block" : "Byte",
  3434. GFIELD(arg, CMD53_BYTE_BLK_CNT),
  3435. GFIELD(arg, CMD53_OP_CODE) ? "Incrementing addr" : "Single addr"));
  3436. cmd_arg = arg;
  3437. xfer_reg = 0;
  3438. cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48);
  3439. cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1);
  3440. cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1);
  3441. cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 1);
  3442. cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL);
  3443. cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd);
  3444. use_dma = USE_DMA(sdioh_info) && GFIELD(cmd_arg, CMD53_BLK_MODE);
  3445. if (GFIELD(cmd_arg, CMD53_BLK_MODE)) {
  3446. uint16 blocksize;
  3447. uint16 blockcount;
  3448. int func;
  3449. ASSERT(sdioh_info->sd_blockmode);
  3450. func = GFIELD(cmd_arg, CMD53_FUNCTION);
  3451. blocksize = MIN((int)sdioh_info->data_xfer_count,
  3452. sdioh_info->client_block_size[func]);
  3453. blockcount = GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT);
  3454. /* data_xfer_cnt is already setup so that for multiblock mode,
  3455. * it is the entire buffer length. For non-block or single block,
  3456. * it is < 64 bytes
  3457. */
  3458. if (use_dma) {
  3459. switch (sdioh_info->sd_dma_mode) {
  3460. case DMA_MODE_SDMA:
  3461. sd_dma(("%s: SDMA: SysAddr reg was 0x%x now 0x%x\n",
  3462. __FUNCTION__, sdstd_rreg(sdioh_info, SD_SysAddr),
  3463. (uint32)sdioh_info->dma_phys));
  3464. sdstd_wreg(sdioh_info, SD_SysAddr, sdioh_info->dma_phys);
  3465. break;
  3466. case DMA_MODE_ADMA1:
  3467. case DMA_MODE_ADMA2:
  3468. sd_dma(("%s: ADMA: Using ADMA\n", __FUNCTION__));
  3469. #ifdef BCMSDIOH_TXGLOM
  3470. /* multi-descriptor is currently used only for hc3 */
  3471. if ((sdioh_info->glom_info.count != 0) &&
  3472. (sdioh_info->txglom_mode == SDPCM_TXGLOM_MDESC)) {
  3473. uint32 i = 0;
  3474. for (i = 0;
  3475. i < sdioh_info->glom_info.count-1;
  3476. i++) {
  3477. glom_buf_t *glom_info;
  3478. glom_info = &(sdioh_info->glom_info);
  3479. sd_create_adma_descriptor(sdioh_info,
  3480. i,
  3481. glom_info->dma_phys_arr[i],
  3482. glom_info->nbytes[i],
  3483. ADMA2_ATTRIBUTE_VALID |
  3484. ADMA2_ATTRIBUTE_ACT_TRAN);
  3485. }
  3486. sd_create_adma_descriptor(sdioh_info,
  3487. i,
  3488. sdioh_info->glom_info.dma_phys_arr[i],
  3489. sdioh_info->glom_info.nbytes[i],
  3490. ADMA2_ATTRIBUTE_VALID |
  3491. ADMA2_ATTRIBUTE_END |
  3492. ADMA2_ATTRIBUTE_INT |
  3493. ADMA2_ATTRIBUTE_ACT_TRAN);
  3494. } else
  3495. #endif /* BCMSDIOH_TXGLOM */
  3496. {
  3497. sd_create_adma_descriptor(sdioh_info, 0,
  3498. sdioh_info->dma_phys, blockcount*blocksize,
  3499. ADMA2_ATTRIBUTE_VALID | ADMA2_ATTRIBUTE_END |
  3500. ADMA2_ATTRIBUTE_INT | ADMA2_ATTRIBUTE_ACT_TRAN);
  3501. }
  3502. /* Dump descriptor if DMA debugging is enabled. */
  3503. if (sd_msglevel & SDH_DMA_VAL) {
  3504. sd_dump_adma_dscr(sdioh_info);
  3505. }
  3506. sdstd_wreg(sdioh_info, SD_ADMA_SysAddr,
  3507. sdioh_info->adma2_dscr_phys);
  3508. break;
  3509. default:
  3510. sd_err(("%s: unsupported DMA mode %d.\n",
  3511. __FUNCTION__, sdioh_info->sd_dma_mode));
  3512. break;
  3513. }
  3514. }
  3515. sd_trace(("%s: Setting block count %d, block size %d bytes\n",
  3516. __FUNCTION__, blockcount, blocksize));
  3517. sdstd_wreg16(sdioh_info, SD_BlockSize, blocksize);
  3518. sdstd_wreg16(sdioh_info, SD_BlockCount, blockcount);
  3519. xfer_reg = SFIELD(xfer_reg, XFER_DMA_ENABLE, use_dma);
  3520. if (sdioh_info->client_block_size[func] != blocksize)
  3521. set_client_block_size(sdioh_info, func, blocksize);
  3522. if (blockcount > 1) {
  3523. xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 1);
  3524. xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 1);
  3525. xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0);
  3526. } else {
  3527. xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 0);
  3528. xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 0);
  3529. xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0);
  3530. }
  3531. if (GFIELD(cmd_arg, CMD53_RW_FLAG) == SDIOH_XFER_TYPE_READ)
  3532. xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1);
  3533. else
  3534. xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 0);
  3535. retries = RETRIES_SMALL;
  3536. while (GFIELD(sdstd_rreg(sdioh_info, SD_PresentState),
  3537. PRES_DAT_INHIBIT) && --retries)
  3538. sd_err(("%s: Waiting for Data Inhibit cmd = %d\n",
  3539. __FUNCTION__, cmd));
  3540. if (!retries) {
  3541. sd_err(("%s: Data Inhibit timeout\n", __FUNCTION__));
  3542. if (trap_errs)
  3543. ASSERT(0);
  3544. return ERROR;
  3545. }
  3546. /* Consider deferring this write to the comment below "Deferred Write" */
  3547. sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg);
  3548. } else { /* Non block mode */
  3549. uint16 bytes = GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT);
  3550. /* The byte/block count field only has 9 bits,
  3551. * so, to do a 512-byte bytemode transfer, this
  3552. * field will contain 0, but we need to tell the
  3553. * controller we're transferring 512 bytes.
  3554. */
  3555. if (bytes == 0) bytes = 512;
  3556. if (use_dma)
  3557. sdstd_wreg(sdioh_info, SD_SysAddr, sdioh_info->dma_phys);
  3558. /* PCI: Transfer Mode register 0x0c */
  3559. xfer_reg = SFIELD(xfer_reg, XFER_DMA_ENABLE, bytes <= 4 ? 0 : use_dma);
  3560. xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0);
  3561. if (GFIELD(cmd_arg, CMD53_RW_FLAG) == SDIOH_XFER_TYPE_READ)
  3562. xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1);
  3563. else
  3564. xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 0);
  3565. /* See table 2-8 Host Controller spec ver 1.00 */
  3566. xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 0); /* Dont care */
  3567. xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 0);
  3568. sdstd_wreg16(sdioh_info, SD_BlockSize, bytes);
  3569. sdstd_wreg16(sdioh_info, SD_BlockCount, 1);
  3570. retries = RETRIES_SMALL;
  3571. while (GFIELD(sdstd_rreg(sdioh_info, SD_PresentState),
  3572. PRES_DAT_INHIBIT) && --retries)
  3573. sd_err(("%s: Waiting for Data Inhibit cmd = %d\n",
  3574. __FUNCTION__, cmd));
  3575. if (!retries) {
  3576. sd_err(("%s: Data Inhibit timeout\n", __FUNCTION__));
  3577. if (trap_errs)
  3578. ASSERT(0);
  3579. return ERROR;
  3580. }
  3581. /* Consider deferring this write to the comment below "Deferred Write" */
  3582. sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg);
  3583. }
  3584. break;
  3585. default:
  3586. sd_err(("%s: Unknown command\n", __FUNCTION__));
  3587. return ERROR;
  3588. }
  3589. if (sdioh_info->sd_mode == SDIOH_MODE_SPI) {
  3590. cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0);
  3591. cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0);
  3592. }
  3593. /* Setup and issue the SDIO command */
  3594. sdstd_wreg(sdioh_info, SD_Arg0, arg);
  3595. /* Deferred Write
  3596. * Consider deferring the two writes above until this point in the code.
  3597. * The following would do one 32 bit write.
  3598. *
  3599. * {
  3600. * uint32 tmp32 = cmd_reg << 16;
  3601. * tmp32 |= xfer_reg;
  3602. * sdstd_wreg(sdioh_info, SD_TransferMode, tmp32);
  3603. * }
  3604. */
  3605. /* Alternate to Deferred Write START */
  3606. /* In response to CMD19 card sends 64 byte magic pattern.
  3607. * So SD_BlockSize = 64 & SD_BlockCount = 1
  3608. */
  3609. if (GFIELD(cmd_reg, CMD_INDEX) == SDIOH_CMD_19) {
  3610. sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg);
  3611. sdstd_wreg16(sdioh_info, SD_BlockSize, 64);
  3612. sdstd_wreg16(sdioh_info, SD_BlockCount, 1);
  3613. }
  3614. sdstd_wreg16(sdioh_info, SD_Command, cmd_reg);
  3615. /* Alternate to Deferred Write END */
  3616. /* If we are in polled mode, wait for the command to complete.
  3617. * In interrupt mode, return immediately. The calling function will
  3618. * know that the command has completed when the CMDATDONE interrupt
  3619. * is asserted
  3620. */
  3621. if (sdioh_info->polled_mode) {
  3622. uint16 int_reg = 0;
  3623. retries = RETRIES_LARGE;
  3624. /* For CMD19 no need to wait for cmd completion */
  3625. if (GFIELD(cmd_reg, CMD_INDEX) == SDIOH_CMD_19)
  3626. return SUCCESS;
  3627. do {
  3628. int_reg = sdstd_rreg16(sdioh_info, SD_IntrStatus);
  3629. sdstd_os_yield(sdioh_info);
  3630. } while (--retries &&
  3631. (GFIELD(int_reg, INTSTAT_ERROR_INT) == 0) &&
  3632. (GFIELD(int_reg, INTSTAT_CMD_COMPLETE) == 0));
  3633. if (!retries) {
  3634. sd_err(("%s: CMD_COMPLETE timeout: intrStatus: 0x%x "
  3635. "error stat 0x%x state 0x%x\n",
  3636. __FUNCTION__, int_reg,
  3637. sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus),
  3638. sdstd_rreg(sdioh_info, SD_PresentState)));
  3639. /* Attempt to reset CMD line when we get a CMD timeout */
  3640. sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1));
  3641. retries = RETRIES_LARGE;
  3642. do {
  3643. sd_trace(("%s: waiting for CMD line reset\n", __FUNCTION__));
  3644. } while ((GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset),
  3645. SW_RESET_CMD)) && retries--);
  3646. if (!retries) {
  3647. sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__));
  3648. }
  3649. if (trap_errs)
  3650. ASSERT(0);
  3651. return (ERROR);
  3652. }
  3653. /* Clear Command Complete interrupt */
  3654. int_reg = SFIELD(0, INTSTAT_CMD_COMPLETE, 1);
  3655. sdstd_wreg16(sdioh_info, SD_IntrStatus, int_reg);
  3656. /* Check for Errors */
  3657. if (sdstd_check_errs(sdioh_info, cmd, arg)) {
  3658. if (trap_errs)
  3659. ASSERT(0);
  3660. return ERROR;
  3661. }
  3662. }
  3663. return SUCCESS;
  3664. }
  3665. static int
  3666. sdstd_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, uint32 addr, int nbytes, uint32 *data)
  3667. {
  3668. int status;
  3669. uint32 cmd_arg;
  3670. uint32 rsp5;
  3671. uint16 int_reg, int_bit;
  3672. uint flags;
  3673. int num_blocks, blocksize;
  3674. bool local_blockmode, local_dma;
  3675. bool read = rw == SDIOH_READ ? 1 : 0;
  3676. bool local_yield = FALSE;
  3677. #ifdef BCMSDIOH_TXGLOM
  3678. uint32 i;
  3679. uint8 *localbuf = NULL;
  3680. #endif // endif
  3681. ASSERT(nbytes);
  3682. cmd_arg = 0;
  3683. sd_data(("%s: %s 53 addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n",
  3684. __FUNCTION__, read ? "Rd" : "Wr", addr, nbytes, sd->r_cnt, sd->t_cnt));
  3685. if (read) sd->r_cnt++; else sd->t_cnt++;
  3686. local_blockmode = sd->sd_blockmode;
  3687. local_dma = USE_DMA(sd);
  3688. #ifdef BCMSDIOH_TXGLOM
  3689. /* If multiple buffers are there, then calculate the nbytes from that */
  3690. if (!read && (func == SDIO_FUNC_2) && (sd->glom_info.count != 0)) {
  3691. uint32 ii;
  3692. nbytes = 0;
  3693. for (ii = 0; ii < sd->glom_info.count; ii++) {
  3694. nbytes += sd->glom_info.nbytes[ii];
  3695. }
  3696. ASSERT(nbytes <= sd->alloced_dma_size);
  3697. }
  3698. #endif // endif
  3699. /* Don't bother with block mode on small xfers */
  3700. if (nbytes < sd->client_block_size[func]) {
  3701. sd_data(("setting local blockmode to false: nbytes (%d) != block_size (%d)\n",
  3702. nbytes, sd->client_block_size[func]));
  3703. local_blockmode = FALSE;
  3704. local_dma = FALSE;
  3705. #ifdef BCMSDIOH_TXGLOM
  3706. /* In glommed case, create a single pkt from multiple pkts */
  3707. if (!read && (func == SDIO_FUNC_2) && (sd->glom_info.count != 0)) {
  3708. uint32 offset = 0;
  3709. localbuf = (uint8 *)MALLOC(sd->osh, nbytes);
  3710. data = (uint32 *)localbuf;
  3711. for (i = 0; i < sd->glom_info.count; i++) {
  3712. bcopy(sd->glom_info.dma_buf_arr[i],
  3713. ((uint8 *)data + offset),
  3714. sd->glom_info.nbytes[i]);
  3715. offset += sd->glom_info.nbytes[i];
  3716. }
  3717. }
  3718. #endif // endif
  3719. }
  3720. if (local_blockmode) {
  3721. blocksize = MIN(sd->client_block_size[func], nbytes);
  3722. num_blocks = nbytes/blocksize;
  3723. cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, num_blocks);
  3724. cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 1);
  3725. } else {
  3726. num_blocks = 1;
  3727. blocksize = nbytes;
  3728. cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, nbytes);
  3729. cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0);
  3730. }
  3731. if (local_dma && !read) {
  3732. #ifdef BCMSDIOH_TXGLOM
  3733. if ((func == SDIO_FUNC_2) && (sd->glom_info.count != 0)) {
  3734. /* In case of hc ver 2 DMA_MAP may not work properly due to 4K alignment
  3735. * requirements. So copying pkt to 4K aligned pre-allocated pkt.
  3736. * Total length should not cross the pre-alloced memory size
  3737. */
  3738. if (sd->txglom_mode == SDPCM_TXGLOM_CPY) {
  3739. uint32 total_bytes = 0;
  3740. for (i = 0; i < sd->glom_info.count; i++) {
  3741. bcopy(sd->glom_info.dma_buf_arr[i],
  3742. (uint8 *)sd->dma_buf + total_bytes,
  3743. sd->glom_info.nbytes[i]);
  3744. total_bytes += sd->glom_info.nbytes[i];
  3745. }
  3746. sd_sync_dma(sd, read, total_bytes);
  3747. }
  3748. } else
  3749. #endif /* BCMSDIOH_TXGLOM */
  3750. {
  3751. bcopy(data, sd->dma_buf, nbytes);
  3752. sd_sync_dma(sd, read, nbytes);
  3753. }
  3754. }
  3755. if (fifo)
  3756. cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 0);
  3757. else
  3758. cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1);
  3759. cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func);
  3760. cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, addr);
  3761. if (read)
  3762. cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ);
  3763. else
  3764. cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE);
  3765. sd->data_xfer_count = nbytes;
  3766. /* sdstd_cmd_issue() returns with the command complete bit
  3767. * in the ISR already cleared
  3768. */
  3769. if ((status = sdstd_cmd_issue(sd, local_dma, SDIOH_CMD_53, cmd_arg)) != SUCCESS) {
  3770. sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__, (read ? "read" : "write")));
  3771. return status;
  3772. }
  3773. sdstd_cmd_getrsp(sd, &rsp5, 1);
  3774. if ((flags = GFIELD(rsp5, RSP5_FLAGS)) != 0x10) {
  3775. sd_err(("%s: Rsp5: nbytes %d, dma %d blockmode %d, read %d "
  3776. "numblocks %d, blocksize %d\n",
  3777. __FUNCTION__, nbytes, local_dma, local_dma, read, num_blocks, blocksize));
  3778. if (flags & 1)
  3779. sd_err(("%s: rsp5: Command not accepted: arg out of range 0x%x, "
  3780. "bytes %d dma %d\n",
  3781. __FUNCTION__, flags, GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT),
  3782. GFIELD(cmd_arg, CMD53_BLK_MODE)));
  3783. if (flags & 0x8)
  3784. sd_err(("%s: Rsp5: General Error\n", __FUNCTION__));
  3785. sd_err(("%s: rsp5 flags = 0x%x, expecting 0x10 returning error\n",
  3786. __FUNCTION__, flags));
  3787. if (trap_errs)
  3788. ASSERT(0);
  3789. return ERROR;
  3790. }
  3791. if (GFIELD(rsp5, RSP5_STUFF))
  3792. sd_err(("%s: rsp5 stuff is 0x%x: expecting 0\n",
  3793. __FUNCTION__, GFIELD(rsp5, RSP5_STUFF)));
  3794. #ifdef BCMSDYIELD
  3795. local_yield = sd_yieldcpu && ((uint)nbytes >= sd_minyield);
  3796. #endif // endif
  3797. if (!local_dma) {
  3798. int bytes, ii;
  3799. uint32 tmp;
  3800. for (ii = 0; ii < num_blocks; ii++) {
  3801. int words;
  3802. /* Decide which status bit we're waiting for */
  3803. if (read)
  3804. int_bit = SFIELD(0, INTSTAT_BUF_READ_READY, 1);
  3805. else
  3806. int_bit = SFIELD(0, INTSTAT_BUF_WRITE_READY, 1);
  3807. /* If not on, wait for it (or for xfer error) */
  3808. int_reg = sdstd_rreg16(sd, SD_IntrStatus);
  3809. if (!(int_reg & int_bit)) {
  3810. status = sdstd_waitbits(sd, int_bit, ERRINT_TRANSFER_ERRS,
  3811. local_yield, &int_reg);
  3812. switch (status) {
  3813. case -1:
  3814. sd_err(("%s: pio interrupted\n", __FUNCTION__));
  3815. return ERROR;
  3816. case -2:
  3817. sd_err(("%s: pio timeout waiting for interrupt\n",
  3818. __FUNCTION__));
  3819. return ERROR;
  3820. }
  3821. }
  3822. /* Confirm we got the bit w/o error */
  3823. if (!(int_reg & int_bit) || GFIELD(int_reg, INTSTAT_ERROR_INT)) {
  3824. sd_err(("%s: Error or timeout for Buf_%s_Ready: intStat: 0x%x "
  3825. "errint: 0x%x PresentState 0x%x\n",
  3826. __FUNCTION__, read ? "Read" : "Write", int_reg,
  3827. sdstd_rreg16(sd, SD_ErrorIntrStatus),
  3828. sdstd_rreg(sd, SD_PresentState)));
  3829. sdstd_dumpregs(sd);
  3830. sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg);
  3831. return (ERROR);
  3832. }
  3833. /* Clear Buf Ready bit */
  3834. sdstd_wreg16(sd, SD_IntrStatus, int_bit);
  3835. /* At this point we have Buffer Ready, write the data 4 bytes at a time */
  3836. for (words = blocksize/4; words; words--) {
  3837. if (read)
  3838. *data = sdstd_rreg(sd, SD_BufferDataPort0);
  3839. else
  3840. sdstd_wreg(sd, SD_BufferDataPort0, *data);
  3841. data++;
  3842. }
  3843. bytes = blocksize % 4;
  3844. /* If no leftover bytes, go to next block */
  3845. if (!bytes)
  3846. continue;
  3847. switch (bytes) {
  3848. case 1:
  3849. /* R/W 8 bits */
  3850. if (read)
  3851. *(data++) = (uint32)(sdstd_rreg8(sd, SD_BufferDataPort0));
  3852. else
  3853. sdstd_wreg8(sd, SD_BufferDataPort0,
  3854. (uint8)(*(data++) & 0xff));
  3855. break;
  3856. case 2:
  3857. /* R/W 16 bits */
  3858. if (read)
  3859. *(data++) = (uint32)sdstd_rreg16(sd, SD_BufferDataPort0);
  3860. else
  3861. sdstd_wreg16(sd, SD_BufferDataPort0, (uint16)(*(data++)));
  3862. break;
  3863. case 3:
  3864. /* R/W 24 bits:
  3865. * SD_BufferDataPort0[0-15] | SD_BufferDataPort1[16-23]
  3866. */
  3867. if (read) {
  3868. tmp = (uint32)sdstd_rreg16(sd, SD_BufferDataPort0);
  3869. tmp |= ((uint32)(sdstd_rreg8(sd,
  3870. SD_BufferDataPort1)) << 16);
  3871. *(data++) = tmp;
  3872. } else {
  3873. tmp = *(data++);
  3874. sdstd_wreg16(sd, SD_BufferDataPort0, (uint16)tmp & 0xffff);
  3875. sdstd_wreg8(sd, SD_BufferDataPort1,
  3876. (uint8)((tmp >> 16) & 0xff));
  3877. }
  3878. break;
  3879. default:
  3880. sd_err(("%s: Unexpected bytes leftover %d\n",
  3881. __FUNCTION__, bytes));
  3882. ASSERT(0);
  3883. break;
  3884. }
  3885. }
  3886. } /* End PIO processing */
  3887. /* Wait for Transfer Complete or Transfer Error */
  3888. int_bit = SFIELD(0, INTSTAT_XFER_COMPLETE, 1);
  3889. /* If not on, wait for it (or for xfer error) */
  3890. int_reg = sdstd_rreg16(sd, SD_IntrStatus);
  3891. if (!(int_reg & int_bit)) {
  3892. status = sdstd_waitbits(sd, int_bit, ERRINT_TRANSFER_ERRS, local_yield, &int_reg);
  3893. switch (status) {
  3894. case -1:
  3895. sd_err(("%s: interrupted\n", __FUNCTION__));
  3896. return ERROR;
  3897. case -2:
  3898. sd_err(("%s: timeout waiting for interrupt\n", __FUNCTION__));
  3899. return ERROR;
  3900. }
  3901. }
  3902. /* Check for any errors from the data phase */
  3903. if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg))
  3904. return ERROR;
  3905. /* May have gotten a software timeout if not blocking? */
  3906. int_reg = sdstd_rreg16(sd, SD_IntrStatus);
  3907. if (!(int_reg & int_bit)) {
  3908. sd_err(("%s: Error or Timeout for xfer complete; %s, dma %d, State 0x%08x, "
  3909. "intr 0x%04x, Err 0x%04x, len = %d, rcnt %d, tcnt %d\n",
  3910. __FUNCTION__, read ? "R" : "W", local_dma,
  3911. sdstd_rreg(sd, SD_PresentState), int_reg,
  3912. sdstd_rreg16(sd, SD_ErrorIntrStatus), nbytes,
  3913. sd->r_cnt, sd->t_cnt));
  3914. sdstd_dumpregs(sd);
  3915. return ERROR;
  3916. }
  3917. /* Clear the status bits */
  3918. int_reg = int_bit;
  3919. if (local_dma) {
  3920. /* DMA Complete */
  3921. /* Reads in particular don't have DMA_COMPLETE set */
  3922. int_reg = SFIELD(int_reg, INTSTAT_DMA_INT, 1);
  3923. }
  3924. sdstd_wreg16(sd, SD_IntrStatus, int_reg);
  3925. /* Fetch data */
  3926. if (local_dma && read) {
  3927. sd_sync_dma(sd, read, nbytes);
  3928. bcopy(sd->dma_buf, data, nbytes);
  3929. }
  3930. #ifdef BCMSDIOH_TXGLOM
  3931. if (localbuf)
  3932. MFREE(sd->osh, localbuf, nbytes);
  3933. #endif // endif
  3934. return SUCCESS;
  3935. }
  3936. static int
  3937. set_client_block_size(sdioh_info_t *sd, int func, int block_size)
  3938. {
  3939. int base;
  3940. int err = 0;
  3941. if (func == 1)
  3942. sd_f1_blocksize = block_size;
  3943. sd_err(("%s: Setting block size %d, func %d\n", __FUNCTION__, block_size, func));
  3944. sd->client_block_size[func] = block_size;
  3945. /* Set the block size in the SDIO Card register */
  3946. base = func * SDIOD_FBR_SIZE;
  3947. err = sdstd_card_regwrite(sd, 0, base+SDIOD_CCCR_BLKSIZE_0, 1, block_size & 0xff);
  3948. if (!err) {
  3949. err = sdstd_card_regwrite(sd, 0, base+SDIOD_CCCR_BLKSIZE_1, 1,
  3950. (block_size >> 8) & 0xff);
  3951. }
  3952. /* Do not set the block size in the SDIO Host register, that
  3953. * is func dependent and will get done on an individual
  3954. * transaction basis
  3955. */
  3956. return (err ? BCME_SDIO_ERROR : 0);
  3957. }
  3958. /* Reset and re-initialize the device */
  3959. int
  3960. sdioh_sdio_reset(sdioh_info_t *si)
  3961. {
  3962. uint8 hreg;
  3963. /* Reset the attached device (use slower clock for safety) */
  3964. if (!sdstd_start_clock(si, 128)) {
  3965. sd_err(("set clock failed!\n"));
  3966. return ERROR;
  3967. }
  3968. sdstd_reset(si, 0, 1);
  3969. /* Reset portions of the host state accordingly */
  3970. hreg = sdstd_rreg8(si, SD_HostCntrl);
  3971. hreg = SFIELD(hreg, HOST_HI_SPEED_EN, 0);
  3972. hreg = SFIELD(hreg, HOST_DATA_WIDTH, 0);
  3973. si->sd_mode = SDIOH_MODE_SD1;
  3974. /* Reinitialize the card */
  3975. si->card_init_done = FALSE;
  3976. return sdstd_client_init(si);
  3977. }
  3978. static void
  3979. sd_map_dma(sdioh_info_t * sd)
  3980. {
  3981. int alloced;
  3982. void *va;
  3983. uint dma_buf_size = SD_PAGE;
  3984. #ifdef BCMSDIOH_TXGLOM
  3985. /* There is no alignment requirement for HC3 */
  3986. if ((sd->version == HOST_CONTR_VER_3) && sd_txglom) {
  3987. /* Max glom packet length is 64KB */
  3988. dma_buf_size = SD_PAGE * 16;
  3989. }
  3990. #endif // endif
  3991. alloced = 0;
  3992. if ((va = DMA_ALLOC_CONSISTENT(sd->osh, dma_buf_size, SD_PAGE_BITS, &alloced,
  3993. &sd->dma_start_phys, 0x12)) == NULL) {
  3994. sd->sd_dma_mode = DMA_MODE_NONE;
  3995. sd->dma_start_buf = 0;
  3996. sd->dma_buf = (void *)0;
  3997. sd->dma_phys = 0;
  3998. sd->alloced_dma_size = 0;
  3999. sd_err(("%s: DMA_ALLOC failed. Disabling DMA support.\n", __FUNCTION__));
  4000. } else {
  4001. sd->dma_start_buf = va;
  4002. sd->dma_buf = (void *)ROUNDUP((uintptr)va, SD_PAGE);
  4003. sd->dma_phys = ROUNDUP((sd->dma_start_phys), SD_PAGE);
  4004. sd->alloced_dma_size = alloced;
  4005. sd_err(("%s: Mapped DMA Buffer %dbytes @virt/phys: %p/0x%lx\n",
  4006. __FUNCTION__, sd->alloced_dma_size, sd->dma_buf, sd->dma_phys));
  4007. sd_fill_dma_data_buf(sd, 0xA5);
  4008. }
  4009. if ((va = DMA_ALLOC_CONSISTENT(sd->osh, SD_PAGE, SD_PAGE_BITS, &alloced,
  4010. &sd->adma2_dscr_start_phys, 0x12)) == NULL) {
  4011. sd->sd_dma_mode = DMA_MODE_NONE;
  4012. sd->adma2_dscr_start_buf = 0;
  4013. sd->adma2_dscr_buf = (void *)0;
  4014. sd->adma2_dscr_phys = 0;
  4015. sd->alloced_adma2_dscr_size = 0;
  4016. sd_err(("%s: DMA_ALLOC failed for descriptor buffer. "
  4017. "Disabling DMA support.\n", __FUNCTION__));
  4018. } else {
  4019. sd->adma2_dscr_start_buf = va;
  4020. sd->adma2_dscr_buf = (void *)ROUNDUP((uintptr)va, SD_PAGE);
  4021. sd->adma2_dscr_phys = ROUNDUP((sd->adma2_dscr_start_phys), SD_PAGE);
  4022. sd->alloced_adma2_dscr_size = alloced;
  4023. sd_err(("%s: Mapped ADMA2 Descriptor Buffer %dbytes @virt/phys: %p/0x%lx\n",
  4024. __FUNCTION__, sd->alloced_adma2_dscr_size, sd->adma2_dscr_buf,
  4025. sd->adma2_dscr_phys));
  4026. sd_clear_adma_dscr_buf(sd);
  4027. }
  4028. }
  4029. static void
  4030. sd_unmap_dma(sdioh_info_t * sd)
  4031. {
  4032. if (sd->dma_start_buf) {
  4033. DMA_FREE_CONSISTENT(sd->osh, sd->dma_start_buf, sd->alloced_dma_size,
  4034. sd->dma_start_phys, 0x12);
  4035. }
  4036. if (sd->adma2_dscr_start_buf) {
  4037. DMA_FREE_CONSISTENT(sd->osh, sd->adma2_dscr_start_buf, sd->alloced_adma2_dscr_size,
  4038. sd->adma2_dscr_start_phys, 0x12);
  4039. }
  4040. }
  4041. static void
  4042. sd_clear_adma_dscr_buf(sdioh_info_t *sd)
  4043. {
  4044. bzero((char *)sd->adma2_dscr_buf, SD_PAGE);
  4045. sd_dump_adma_dscr(sd);
  4046. }
  4047. static void
  4048. sd_fill_dma_data_buf(sdioh_info_t *sd, uint8 data)
  4049. {
  4050. memset((char *)sd->dma_buf, data, SD_PAGE);
  4051. }
  4052. static void
  4053. sd_create_adma_descriptor(sdioh_info_t *sd, uint32 index,
  4054. uint32 addr_phys, uint16 length, uint16 flags)
  4055. {
  4056. adma2_dscr_32b_t *adma2_dscr_table;
  4057. adma1_dscr_t *adma1_dscr_table;
  4058. adma2_dscr_table = sd->adma2_dscr_buf;
  4059. adma1_dscr_table = sd->adma2_dscr_buf;
  4060. switch (sd->sd_dma_mode) {
  4061. case DMA_MODE_ADMA2:
  4062. sd_dma(("%s: creating ADMA2 descriptor for index %d\n",
  4063. __FUNCTION__, index));
  4064. adma2_dscr_table[index].phys_addr = addr_phys;
  4065. adma2_dscr_table[index].len_attr = length << 16;
  4066. adma2_dscr_table[index].len_attr |= flags;
  4067. break;
  4068. case DMA_MODE_ADMA1:
  4069. /* ADMA1 requires two descriptors, one for len
  4070. * and the other for data transfer
  4071. */
  4072. index <<= 1;
  4073. sd_dma(("%s: creating ADMA1 descriptor for index %d\n",
  4074. __FUNCTION__, index));
  4075. adma1_dscr_table[index].phys_addr_attr = length << 12;
  4076. adma1_dscr_table[index].phys_addr_attr |= (ADMA1_ATTRIBUTE_ACT_SET |
  4077. ADMA2_ATTRIBUTE_VALID);
  4078. adma1_dscr_table[index+1].phys_addr_attr = addr_phys & 0xFFFFF000;
  4079. adma1_dscr_table[index+1].phys_addr_attr |= (flags & 0x3f);
  4080. break;
  4081. default:
  4082. sd_err(("%s: cannot create ADMA descriptor for DMA mode %d\n",
  4083. __FUNCTION__, sd->sd_dma_mode));
  4084. break;
  4085. }
  4086. }
  4087. static void
  4088. sd_dump_adma_dscr(sdioh_info_t *sd)
  4089. {
  4090. adma2_dscr_32b_t *adma2_dscr_table;
  4091. adma1_dscr_t *adma1_dscr_table;
  4092. uint32 i = 0;
  4093. uint16 flags;
  4094. char flags_str[32];
  4095. ASSERT(sd->adma2_dscr_buf != NULL);
  4096. adma2_dscr_table = sd->adma2_dscr_buf;
  4097. adma1_dscr_table = sd->adma2_dscr_buf;
  4098. switch (sd->sd_dma_mode) {
  4099. case DMA_MODE_ADMA2:
  4100. sd_err(("ADMA2 Descriptor Table (%dbytes) @virt/phys: %p/0x%lx\n",
  4101. SD_PAGE, sd->adma2_dscr_buf, sd->adma2_dscr_phys));
  4102. sd_err((" #[Descr VA ] Buffer PA | Len | Flags (5:4 2 1 0)"
  4103. " |\n"));
  4104. while (adma2_dscr_table->len_attr & ADMA2_ATTRIBUTE_VALID) {
  4105. flags = adma2_dscr_table->len_attr & 0xFFFF;
  4106. sprintf(flags_str, "%s%s%s%s",
  4107. ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
  4108. ADMA2_ATTRIBUTE_ACT_LINK) ? "LINK " :
  4109. ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
  4110. ADMA2_ATTRIBUTE_ACT_TRAN) ? "TRAN " :
  4111. ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
  4112. ADMA2_ATTRIBUTE_ACT_NOP) ? "NOP " : "RSV ",
  4113. (flags & ADMA2_ATTRIBUTE_INT ? "INT " : " "),
  4114. (flags & ADMA2_ATTRIBUTE_END ? "END " : " "),
  4115. (flags & ADMA2_ATTRIBUTE_VALID ? "VALID" : ""));
  4116. sd_err(("%2d[0x%p]: 0x%08x | 0x%04x | 0x%04x (%s) |\n",
  4117. i, adma2_dscr_table, adma2_dscr_table->phys_addr,
  4118. adma2_dscr_table->len_attr >> 16, flags, flags_str));
  4119. i++;
  4120. /* Follow LINK descriptors or skip to next. */
  4121. if ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
  4122. ADMA2_ATTRIBUTE_ACT_LINK) {
  4123. adma2_dscr_table = phys_to_virt(
  4124. adma2_dscr_table->phys_addr);
  4125. } else {
  4126. adma2_dscr_table++;
  4127. }
  4128. }
  4129. break;
  4130. case DMA_MODE_ADMA1:
  4131. sd_err(("ADMA1 Descriptor Table (%dbytes) @virt/phys: %p/0x%lx\n",
  4132. SD_PAGE, sd->adma2_dscr_buf, sd->adma2_dscr_phys));
  4133. sd_err((" #[Descr VA ] Buffer PA | Flags (5:4 2 1 0) |\n"));
  4134. for (i = 0; adma1_dscr_table->phys_addr_attr & ADMA2_ATTRIBUTE_VALID; i++) {
  4135. flags = adma1_dscr_table->phys_addr_attr & 0x3F;
  4136. sprintf(flags_str, "%s%s%s%s",
  4137. ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
  4138. ADMA2_ATTRIBUTE_ACT_LINK) ? "LINK " :
  4139. ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
  4140. ADMA2_ATTRIBUTE_ACT_TRAN) ? "TRAN " :
  4141. ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
  4142. ADMA2_ATTRIBUTE_ACT_NOP) ? "NOP " : "SET ",
  4143. (flags & ADMA2_ATTRIBUTE_INT ? "INT " : " "),
  4144. (flags & ADMA2_ATTRIBUTE_END ? "END " : " "),
  4145. (flags & ADMA2_ATTRIBUTE_VALID ? "VALID" : ""));
  4146. sd_err(("%2d[0x%p]: 0x%08x | 0x%04x | (%s) |\n",
  4147. i, adma1_dscr_table,
  4148. adma1_dscr_table->phys_addr_attr & 0xFFFFF000,
  4149. flags, flags_str));
  4150. /* Follow LINK descriptors or skip to next. */
  4151. if ((flags & ADMA2_ATTRIBUTE_ACT_LINK) ==
  4152. ADMA2_ATTRIBUTE_ACT_LINK) {
  4153. adma1_dscr_table = phys_to_virt(
  4154. adma1_dscr_table->phys_addr_attr & 0xFFFFF000);
  4155. } else {
  4156. adma1_dscr_table++;
  4157. }
  4158. }
  4159. break;
  4160. default:
  4161. sd_err(("Unknown DMA Descriptor Table Format.\n"));
  4162. break;
  4163. }
  4164. }
  4165. static void
  4166. sdstd_dumpregs(sdioh_info_t *sd)
  4167. {
  4168. sd_err(("IntrStatus: 0x%04x ErrorIntrStatus 0x%04x\n",
  4169. sdstd_rreg16(sd, SD_IntrStatus),
  4170. sdstd_rreg16(sd, SD_ErrorIntrStatus)));
  4171. sd_err(("IntrStatusEnable: 0x%04x ErrorIntrStatusEnable 0x%04x\n",
  4172. sdstd_rreg16(sd, SD_IntrStatusEnable),
  4173. sdstd_rreg16(sd, SD_ErrorIntrStatusEnable)));
  4174. sd_err(("IntrSignalEnable: 0x%04x ErrorIntrSignalEnable 0x%04x\n",
  4175. sdstd_rreg16(sd, SD_IntrSignalEnable),
  4176. sdstd_rreg16(sd, SD_ErrorIntrSignalEnable)));
  4177. }