t4_hw.c 291 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220
  1. /*
  2. * This file is part of the Chelsio T4 Ethernet driver for Linux.
  3. *
  4. * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include <linux/delay.h>
  35. #include "cxgb4.h"
  36. #include "t4_regs.h"
  37. #include "t4_values.h"
  38. #include "t4fw_api.h"
  39. #include "t4fw_version.h"
  40. /**
  41. * t4_wait_op_done_val - wait until an operation is completed
  42. * @adapter: the adapter performing the operation
  43. * @reg: the register to check for completion
  44. * @mask: a single-bit field within @reg that indicates completion
  45. * @polarity: the value of the field when the operation is completed
  46. * @attempts: number of check iterations
  47. * @delay: delay in usecs between iterations
  48. * @valp: where to store the value of the register at completion time
  49. *
  50. * Wait until an operation is completed by checking a bit in a register
  51. * up to @attempts times. If @valp is not NULL the value of the register
  52. * at the time it indicated completion is stored there. Returns 0 if the
  53. * operation completes and -EAGAIN otherwise.
  54. */
  55. static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
  56. int polarity, int attempts, int delay, u32 *valp)
  57. {
  58. while (1) {
  59. u32 val = t4_read_reg(adapter, reg);
  60. if (!!(val & mask) == polarity) {
  61. if (valp)
  62. *valp = val;
  63. return 0;
  64. }
  65. if (--attempts == 0)
  66. return -EAGAIN;
  67. if (delay)
  68. udelay(delay);
  69. }
  70. }
  71. static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
  72. int polarity, int attempts, int delay)
  73. {
  74. return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
  75. delay, NULL);
  76. }
  77. /**
  78. * t4_set_reg_field - set a register field to a value
  79. * @adapter: the adapter to program
  80. * @addr: the register address
  81. * @mask: specifies the portion of the register to modify
  82. * @val: the new value for the register field
  83. *
  84. * Sets a register field specified by the supplied mask to the
  85. * given value.
  86. */
  87. void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
  88. u32 val)
  89. {
  90. u32 v = t4_read_reg(adapter, addr) & ~mask;
  91. t4_write_reg(adapter, addr, v | val);
  92. (void) t4_read_reg(adapter, addr); /* flush */
  93. }
  94. /**
  95. * t4_read_indirect - read indirectly addressed registers
  96. * @adap: the adapter
  97. * @addr_reg: register holding the indirect address
  98. * @data_reg: register holding the value of the indirect register
  99. * @vals: where the read register values are stored
  100. * @nregs: how many indirect registers to read
  101. * @start_idx: index of first indirect register to read
  102. *
  103. * Reads registers that are accessed indirectly through an address/data
  104. * register pair.
  105. */
  106. void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
  107. unsigned int data_reg, u32 *vals,
  108. unsigned int nregs, unsigned int start_idx)
  109. {
  110. while (nregs--) {
  111. t4_write_reg(adap, addr_reg, start_idx);
  112. *vals++ = t4_read_reg(adap, data_reg);
  113. start_idx++;
  114. }
  115. }
  116. /**
  117. * t4_write_indirect - write indirectly addressed registers
  118. * @adap: the adapter
  119. * @addr_reg: register holding the indirect addresses
  120. * @data_reg: register holding the value for the indirect registers
  121. * @vals: values to write
  122. * @nregs: how many indirect registers to write
  123. * @start_idx: address of first indirect register to write
  124. *
  125. * Writes a sequential block of registers that are accessed indirectly
  126. * through an address/data register pair.
  127. */
  128. void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
  129. unsigned int data_reg, const u32 *vals,
  130. unsigned int nregs, unsigned int start_idx)
  131. {
  132. while (nregs--) {
  133. t4_write_reg(adap, addr_reg, start_idx++);
  134. t4_write_reg(adap, data_reg, *vals++);
  135. }
  136. }
  137. /*
  138. * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
  139. * mechanism. This guarantees that we get the real value even if we're
  140. * operating within a Virtual Machine and the Hypervisor is trapping our
  141. * Configuration Space accesses.
  142. */
  143. void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
  144. {
  145. u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
  146. if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
  147. req |= ENABLE_F;
  148. else
  149. req |= T6_ENABLE_F;
  150. if (is_t4(adap->params.chip))
  151. req |= LOCALCFG_F;
  152. t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
  153. *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
  154. /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
  155. * Configuration Space read. (None of the other fields matter when
  156. * ENABLE is 0 so a simple register write is easier than a
  157. * read-modify-write via t4_set_reg_field().)
  158. */
  159. t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
  160. }
  161. /*
  162. * t4_report_fw_error - report firmware error
  163. * @adap: the adapter
  164. *
  165. * The adapter firmware can indicate error conditions to the host.
  166. * If the firmware has indicated an error, print out the reason for
  167. * the firmware error.
  168. */
  169. static void t4_report_fw_error(struct adapter *adap)
  170. {
  171. static const char *const reason[] = {
  172. "Crash", /* PCIE_FW_EVAL_CRASH */
  173. "During Device Preparation", /* PCIE_FW_EVAL_PREP */
  174. "During Device Configuration", /* PCIE_FW_EVAL_CONF */
  175. "During Device Initialization", /* PCIE_FW_EVAL_INIT */
  176. "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
  177. "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
  178. "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
  179. "Reserved", /* reserved */
  180. };
  181. u32 pcie_fw;
  182. pcie_fw = t4_read_reg(adap, PCIE_FW_A);
  183. if (pcie_fw & PCIE_FW_ERR_F) {
  184. dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
  185. reason[PCIE_FW_EVAL_G(pcie_fw)]);
  186. adap->flags &= ~FW_OK;
  187. }
  188. }
  189. /*
  190. * Get the reply to a mailbox command and store it in @rpl in big-endian order.
  191. */
  192. static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
  193. u32 mbox_addr)
  194. {
  195. for ( ; nflit; nflit--, mbox_addr += 8)
  196. *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
  197. }
  198. /*
  199. * Handle a FW assertion reported in a mailbox.
  200. */
  201. static void fw_asrt(struct adapter *adap, u32 mbox_addr)
  202. {
  203. struct fw_debug_cmd asrt;
  204. get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
  205. dev_alert(adap->pdev_dev,
  206. "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
  207. asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
  208. be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
  209. }
  210. /**
  211. * t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
  212. * @adapter: the adapter
  213. * @cmd: the Firmware Mailbox Command or Reply
  214. * @size: command length in bytes
  215. * @access: the time (ms) needed to access the Firmware Mailbox
  216. * @execute: the time (ms) the command spent being executed
  217. */
  218. static void t4_record_mbox(struct adapter *adapter,
  219. const __be64 *cmd, unsigned int size,
  220. int access, int execute)
  221. {
  222. struct mbox_cmd_log *log = adapter->mbox_log;
  223. struct mbox_cmd *entry;
  224. int i;
  225. entry = mbox_cmd_log_entry(log, log->cursor++);
  226. if (log->cursor == log->size)
  227. log->cursor = 0;
  228. for (i = 0; i < size / 8; i++)
  229. entry->cmd[i] = be64_to_cpu(cmd[i]);
  230. while (i < MBOX_LEN / 8)
  231. entry->cmd[i++] = 0;
  232. entry->timestamp = jiffies;
  233. entry->seqno = log->seqno++;
  234. entry->access = access;
  235. entry->execute = execute;
  236. }
  237. /**
  238. * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
  239. * @adap: the adapter
  240. * @mbox: index of the mailbox to use
  241. * @cmd: the command to write
  242. * @size: command length in bytes
  243. * @rpl: where to optionally store the reply
  244. * @sleep_ok: if true we may sleep while awaiting command completion
  245. * @timeout: time to wait for command to finish before timing out
  246. *
  247. * Sends the given command to FW through the selected mailbox and waits
  248. * for the FW to execute the command. If @rpl is not %NULL it is used to
  249. * store the FW's reply to the command. The command and its optional
  250. * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
  251. * to respond. @sleep_ok determines whether we may sleep while awaiting
  252. * the response. If sleeping is allowed we use progressive backoff
  253. * otherwise we spin.
  254. *
  255. * The return value is 0 on success or a negative errno on failure. A
  256. * failure can happen either because we are not able to execute the
  257. * command or FW executes it but signals an error. In the latter case
  258. * the return value is the error code indicated by FW (negated).
  259. */
  260. int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
  261. int size, void *rpl, bool sleep_ok, int timeout)
  262. {
  263. static const int delay[] = {
  264. 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
  265. };
  266. struct mbox_list entry;
  267. u16 access = 0;
  268. u16 execute = 0;
  269. u32 v;
  270. u64 res;
  271. int i, ms, delay_idx, ret;
  272. const __be64 *p = cmd;
  273. u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
  274. u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
  275. __be64 cmd_rpl[MBOX_LEN / 8];
  276. u32 pcie_fw;
  277. if ((size & 15) || size > MBOX_LEN)
  278. return -EINVAL;
  279. /*
  280. * If the device is off-line, as in EEH, commands will time out.
  281. * Fail them early so we don't waste time waiting.
  282. */
  283. if (adap->pdev->error_state != pci_channel_io_normal)
  284. return -EIO;
  285. /* If we have a negative timeout, that implies that we can't sleep. */
  286. if (timeout < 0) {
  287. sleep_ok = false;
  288. timeout = -timeout;
  289. }
  290. /* Queue ourselves onto the mailbox access list. When our entry is at
  291. * the front of the list, we have rights to access the mailbox. So we
  292. * wait [for a while] till we're at the front [or bail out with an
  293. * EBUSY] ...
  294. */
  295. spin_lock_bh(&adap->mbox_lock);
  296. list_add_tail(&entry.list, &adap->mlist.list);
  297. spin_unlock_bh(&adap->mbox_lock);
  298. delay_idx = 0;
  299. ms = delay[0];
  300. for (i = 0; ; i += ms) {
  301. /* If we've waited too long, return a busy indication. This
  302. * really ought to be based on our initial position in the
  303. * mailbox access list but this is a start. We very rearely
  304. * contend on access to the mailbox ...
  305. */
  306. pcie_fw = t4_read_reg(adap, PCIE_FW_A);
  307. if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) {
  308. spin_lock_bh(&adap->mbox_lock);
  309. list_del(&entry.list);
  310. spin_unlock_bh(&adap->mbox_lock);
  311. ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY;
  312. t4_record_mbox(adap, cmd, size, access, ret);
  313. return ret;
  314. }
  315. /* If we're at the head, break out and start the mailbox
  316. * protocol.
  317. */
  318. if (list_first_entry(&adap->mlist.list, struct mbox_list,
  319. list) == &entry)
  320. break;
  321. /* Delay for a bit before checking again ... */
  322. if (sleep_ok) {
  323. ms = delay[delay_idx]; /* last element may repeat */
  324. if (delay_idx < ARRAY_SIZE(delay) - 1)
  325. delay_idx++;
  326. msleep(ms);
  327. } else {
  328. mdelay(ms);
  329. }
  330. }
  331. /* Loop trying to get ownership of the mailbox. Return an error
  332. * if we can't gain ownership.
  333. */
  334. v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
  335. for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
  336. v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
  337. if (v != MBOX_OWNER_DRV) {
  338. spin_lock_bh(&adap->mbox_lock);
  339. list_del(&entry.list);
  340. spin_unlock_bh(&adap->mbox_lock);
  341. ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
  342. t4_record_mbox(adap, cmd, size, access, ret);
  343. return ret;
  344. }
  345. /* Copy in the new mailbox command and send it on its way ... */
  346. t4_record_mbox(adap, cmd, size, access, 0);
  347. for (i = 0; i < size; i += 8)
  348. t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
  349. t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
  350. t4_read_reg(adap, ctl_reg); /* flush write */
  351. delay_idx = 0;
  352. ms = delay[0];
  353. for (i = 0;
  354. !((pcie_fw = t4_read_reg(adap, PCIE_FW_A)) & PCIE_FW_ERR_F) &&
  355. i < timeout;
  356. i += ms) {
  357. if (sleep_ok) {
  358. ms = delay[delay_idx]; /* last element may repeat */
  359. if (delay_idx < ARRAY_SIZE(delay) - 1)
  360. delay_idx++;
  361. msleep(ms);
  362. } else
  363. mdelay(ms);
  364. v = t4_read_reg(adap, ctl_reg);
  365. if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
  366. if (!(v & MBMSGVALID_F)) {
  367. t4_write_reg(adap, ctl_reg, 0);
  368. continue;
  369. }
  370. get_mbox_rpl(adap, cmd_rpl, MBOX_LEN / 8, data_reg);
  371. res = be64_to_cpu(cmd_rpl[0]);
  372. if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
  373. fw_asrt(adap, data_reg);
  374. res = FW_CMD_RETVAL_V(EIO);
  375. } else if (rpl) {
  376. memcpy(rpl, cmd_rpl, size);
  377. }
  378. t4_write_reg(adap, ctl_reg, 0);
  379. execute = i + ms;
  380. t4_record_mbox(adap, cmd_rpl,
  381. MBOX_LEN, access, execute);
  382. spin_lock_bh(&adap->mbox_lock);
  383. list_del(&entry.list);
  384. spin_unlock_bh(&adap->mbox_lock);
  385. return -FW_CMD_RETVAL_G((int)res);
  386. }
  387. }
  388. ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
  389. t4_record_mbox(adap, cmd, size, access, ret);
  390. dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
  391. *(const u8 *)cmd, mbox);
  392. t4_report_fw_error(adap);
  393. spin_lock_bh(&adap->mbox_lock);
  394. list_del(&entry.list);
  395. spin_unlock_bh(&adap->mbox_lock);
  396. t4_fatal_err(adap);
  397. return ret;
  398. }
  399. int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
  400. void *rpl, bool sleep_ok)
  401. {
  402. return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
  403. FW_CMD_MAX_TIMEOUT);
  404. }
  405. static int t4_edc_err_read(struct adapter *adap, int idx)
  406. {
  407. u32 edc_ecc_err_addr_reg;
  408. u32 rdata_reg;
  409. if (is_t4(adap->params.chip)) {
  410. CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
  411. return 0;
  412. }
  413. if (idx != 0 && idx != 1) {
  414. CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
  415. return 0;
  416. }
  417. edc_ecc_err_addr_reg = EDC_T5_REG(EDC_H_ECC_ERR_ADDR_A, idx);
  418. rdata_reg = EDC_T5_REG(EDC_H_BIST_STATUS_RDATA_A, idx);
  419. CH_WARN(adap,
  420. "edc%d err addr 0x%x: 0x%x.\n",
  421. idx, edc_ecc_err_addr_reg,
  422. t4_read_reg(adap, edc_ecc_err_addr_reg));
  423. CH_WARN(adap,
  424. "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
  425. rdata_reg,
  426. (unsigned long long)t4_read_reg64(adap, rdata_reg),
  427. (unsigned long long)t4_read_reg64(adap, rdata_reg + 8),
  428. (unsigned long long)t4_read_reg64(adap, rdata_reg + 16),
  429. (unsigned long long)t4_read_reg64(adap, rdata_reg + 24),
  430. (unsigned long long)t4_read_reg64(adap, rdata_reg + 32),
  431. (unsigned long long)t4_read_reg64(adap, rdata_reg + 40),
  432. (unsigned long long)t4_read_reg64(adap, rdata_reg + 48),
  433. (unsigned long long)t4_read_reg64(adap, rdata_reg + 56),
  434. (unsigned long long)t4_read_reg64(adap, rdata_reg + 64));
  435. return 0;
  436. }
  437. /**
  438. * t4_memory_rw_init - Get memory window relative offset, base, and size.
  439. * @adap: the adapter
  440. * @win: PCI-E Memory Window to use
  441. * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_HMA or MEM_MC
  442. * @mem_off: memory relative offset with respect to @mtype.
  443. * @mem_base: configured memory base address.
  444. * @mem_aperture: configured memory window aperture.
  445. *
  446. * Get the configured memory window's relative offset, base, and size.
  447. */
  448. int t4_memory_rw_init(struct adapter *adap, int win, int mtype, u32 *mem_off,
  449. u32 *mem_base, u32 *mem_aperture)
  450. {
  451. u32 edc_size, mc_size, mem_reg;
  452. /* Offset into the region of memory which is being accessed
  453. * MEM_EDC0 = 0
  454. * MEM_EDC1 = 1
  455. * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
  456. * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
  457. * MEM_HMA = 4
  458. */
  459. edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
  460. if (mtype == MEM_HMA) {
  461. *mem_off = 2 * (edc_size * 1024 * 1024);
  462. } else if (mtype != MEM_MC1) {
  463. *mem_off = (mtype * (edc_size * 1024 * 1024));
  464. } else {
  465. mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
  466. MA_EXT_MEMORY0_BAR_A));
  467. *mem_off = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
  468. }
  469. /* Each PCI-E Memory Window is programmed with a window size -- or
  470. * "aperture" -- which controls the granularity of its mapping onto
  471. * adapter memory. We need to grab that aperture in order to know
  472. * how to use the specified window. The window is also programmed
  473. * with the base address of the Memory Window in BAR0's address
  474. * space. For T4 this is an absolute PCI-E Bus Address. For T5
  475. * the address is relative to BAR0.
  476. */
  477. mem_reg = t4_read_reg(adap,
  478. PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
  479. win));
  480. /* a dead adapter will return 0xffffffff for PIO reads */
  481. if (mem_reg == 0xffffffff)
  482. return -ENXIO;
  483. *mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
  484. *mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
  485. if (is_t4(adap->params.chip))
  486. *mem_base -= adap->t4_bar0;
  487. return 0;
  488. }
  489. /**
  490. * t4_memory_update_win - Move memory window to specified address.
  491. * @adap: the adapter
  492. * @win: PCI-E Memory Window to use
  493. * @addr: location to move.
  494. *
  495. * Move memory window to specified address.
  496. */
  497. void t4_memory_update_win(struct adapter *adap, int win, u32 addr)
  498. {
  499. t4_write_reg(adap,
  500. PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
  501. addr);
  502. /* Read it back to ensure that changes propagate before we
  503. * attempt to use the new value.
  504. */
  505. t4_read_reg(adap,
  506. PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
  507. }
  508. /**
  509. * t4_memory_rw_residual - Read/Write residual data.
  510. * @adap: the adapter
  511. * @off: relative offset within residual to start read/write.
  512. * @addr: address within indicated memory type.
  513. * @buf: host memory buffer
  514. * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
  515. *
  516. * Read/Write residual data less than 32-bits.
  517. */
  518. void t4_memory_rw_residual(struct adapter *adap, u32 off, u32 addr, u8 *buf,
  519. int dir)
  520. {
  521. union {
  522. u32 word;
  523. char byte[4];
  524. } last;
  525. unsigned char *bp;
  526. int i;
  527. if (dir == T4_MEMORY_READ) {
  528. last.word = le32_to_cpu((__force __le32)
  529. t4_read_reg(adap, addr));
  530. for (bp = (unsigned char *)buf, i = off; i < 4; i++)
  531. bp[i] = last.byte[i];
  532. } else {
  533. last.word = *buf;
  534. for (i = off; i < 4; i++)
  535. last.byte[i] = 0;
  536. t4_write_reg(adap, addr,
  537. (__force u32)cpu_to_le32(last.word));
  538. }
  539. }
  540. /**
  541. * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
  542. * @adap: the adapter
  543. * @win: PCI-E Memory Window to use
  544. * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
  545. * @addr: address within indicated memory type
  546. * @len: amount of memory to transfer
  547. * @hbuf: host memory buffer
  548. * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
  549. *
  550. * Reads/writes an [almost] arbitrary memory region in the firmware: the
  551. * firmware memory address and host buffer must be aligned on 32-bit
  552. * boudaries; the length may be arbitrary. The memory is transferred as
  553. * a raw byte sequence from/to the firmware's memory. If this memory
  554. * contains data structures which contain multi-byte integers, it's the
  555. * caller's responsibility to perform appropriate byte order conversions.
  556. */
  557. int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
  558. u32 len, void *hbuf, int dir)
  559. {
  560. u32 pos, offset, resid, memoffset;
  561. u32 win_pf, mem_aperture, mem_base;
  562. u32 *buf;
  563. int ret;
  564. /* Argument sanity checks ...
  565. */
  566. if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
  567. return -EINVAL;
  568. buf = (u32 *)hbuf;
  569. /* It's convenient to be able to handle lengths which aren't a
  570. * multiple of 32-bits because we often end up transferring files to
  571. * the firmware. So we'll handle that by normalizing the length here
  572. * and then handling any residual transfer at the end.
  573. */
  574. resid = len & 0x3;
  575. len -= resid;
  576. ret = t4_memory_rw_init(adap, win, mtype, &memoffset, &mem_base,
  577. &mem_aperture);
  578. if (ret)
  579. return ret;
  580. /* Determine the PCIE_MEM_ACCESS_OFFSET */
  581. addr = addr + memoffset;
  582. win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
  583. /* Calculate our initial PCI-E Memory Window Position and Offset into
  584. * that Window.
  585. */
  586. pos = addr & ~(mem_aperture - 1);
  587. offset = addr - pos;
  588. /* Set up initial PCI-E Memory Window to cover the start of our
  589. * transfer.
  590. */
  591. t4_memory_update_win(adap, win, pos | win_pf);
  592. /* Transfer data to/from the adapter as long as there's an integral
  593. * number of 32-bit transfers to complete.
  594. *
  595. * A note on Endianness issues:
  596. *
  597. * The "register" reads and writes below from/to the PCI-E Memory
  598. * Window invoke the standard adapter Big-Endian to PCI-E Link
  599. * Little-Endian "swizzel." As a result, if we have the following
  600. * data in adapter memory:
  601. *
  602. * Memory: ... | b0 | b1 | b2 | b3 | ...
  603. * Address: i+0 i+1 i+2 i+3
  604. *
  605. * Then a read of the adapter memory via the PCI-E Memory Window
  606. * will yield:
  607. *
  608. * x = readl(i)
  609. * 31 0
  610. * [ b3 | b2 | b1 | b0 ]
  611. *
  612. * If this value is stored into local memory on a Little-Endian system
  613. * it will show up correctly in local memory as:
  614. *
  615. * ( ..., b0, b1, b2, b3, ... )
  616. *
  617. * But on a Big-Endian system, the store will show up in memory
  618. * incorrectly swizzled as:
  619. *
  620. * ( ..., b3, b2, b1, b0, ... )
  621. *
  622. * So we need to account for this in the reads and writes to the
  623. * PCI-E Memory Window below by undoing the register read/write
  624. * swizzels.
  625. */
  626. while (len > 0) {
  627. if (dir == T4_MEMORY_READ)
  628. *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
  629. mem_base + offset));
  630. else
  631. t4_write_reg(adap, mem_base + offset,
  632. (__force u32)cpu_to_le32(*buf++));
  633. offset += sizeof(__be32);
  634. len -= sizeof(__be32);
  635. /* If we've reached the end of our current window aperture,
  636. * move the PCI-E Memory Window on to the next. Note that
  637. * doing this here after "len" may be 0 allows us to set up
  638. * the PCI-E Memory Window for a possible final residual
  639. * transfer below ...
  640. */
  641. if (offset == mem_aperture) {
  642. pos += mem_aperture;
  643. offset = 0;
  644. t4_memory_update_win(adap, win, pos | win_pf);
  645. }
  646. }
  647. /* If the original transfer had a length which wasn't a multiple of
  648. * 32-bits, now's where we need to finish off the transfer of the
  649. * residual amount. The PCI-E Memory Window has already been moved
  650. * above (if necessary) to cover this final transfer.
  651. */
  652. if (resid)
  653. t4_memory_rw_residual(adap, resid, mem_base + offset,
  654. (u8 *)buf, dir);
  655. return 0;
  656. }
  657. /* Return the specified PCI-E Configuration Space register from our Physical
  658. * Function. We try first via a Firmware LDST Command since we prefer to let
  659. * the firmware own all of these registers, but if that fails we go for it
  660. * directly ourselves.
  661. */
  662. u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
  663. {
  664. u32 val, ldst_addrspace;
  665. /* If fw_attach != 0, construct and send the Firmware LDST Command to
  666. * retrieve the specified PCI-E Configuration Space register.
  667. */
  668. struct fw_ldst_cmd ldst_cmd;
  669. int ret;
  670. memset(&ldst_cmd, 0, sizeof(ldst_cmd));
  671. ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
  672. ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
  673. FW_CMD_REQUEST_F |
  674. FW_CMD_READ_F |
  675. ldst_addrspace);
  676. ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
  677. ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
  678. ldst_cmd.u.pcie.ctrl_to_fn =
  679. (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
  680. ldst_cmd.u.pcie.r = reg;
  681. /* If the LDST Command succeeds, return the result, otherwise
  682. * fall through to reading it directly ourselves ...
  683. */
  684. ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
  685. &ldst_cmd);
  686. if (ret == 0)
  687. val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
  688. else
  689. /* Read the desired Configuration Space register via the PCI-E
  690. * Backdoor mechanism.
  691. */
  692. t4_hw_pci_read_cfg4(adap, reg, &val);
  693. return val;
  694. }
  695. /* Get the window based on base passed to it.
  696. * Window aperture is currently unhandled, but there is no use case for it
  697. * right now
  698. */
  699. static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
  700. u32 memwin_base)
  701. {
  702. u32 ret;
  703. if (is_t4(adap->params.chip)) {
  704. u32 bar0;
  705. /* Truncation intentional: we only read the bottom 32-bits of
  706. * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
  707. * mechanism to read BAR0 instead of using
  708. * pci_resource_start() because we could be operating from
  709. * within a Virtual Machine which is trapping our accesses to
  710. * our Configuration Space and we need to set up the PCI-E
  711. * Memory Window decoders with the actual addresses which will
  712. * be coming across the PCI-E link.
  713. */
  714. bar0 = t4_read_pcie_cfg4(adap, pci_base);
  715. bar0 &= pci_mask;
  716. adap->t4_bar0 = bar0;
  717. ret = bar0 + memwin_base;
  718. } else {
  719. /* For T5, only relative offset inside the PCIe BAR is passed */
  720. ret = memwin_base;
  721. }
  722. return ret;
  723. }
  724. /* Get the default utility window (win0) used by everyone */
  725. u32 t4_get_util_window(struct adapter *adap)
  726. {
  727. return t4_get_window(adap, PCI_BASE_ADDRESS_0,
  728. PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
  729. }
  730. /* Set up memory window for accessing adapter memory ranges. (Read
  731. * back MA register to ensure that changes propagate before we attempt
  732. * to use the new values.)
  733. */
  734. void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
  735. {
  736. t4_write_reg(adap,
  737. PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
  738. memwin_base | BIR_V(0) |
  739. WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
  740. t4_read_reg(adap,
  741. PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
  742. }
  743. /**
  744. * t4_get_regs_len - return the size of the chips register set
  745. * @adapter: the adapter
  746. *
  747. * Returns the size of the chip's BAR0 register space.
  748. */
  749. unsigned int t4_get_regs_len(struct adapter *adapter)
  750. {
  751. unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
  752. switch (chip_version) {
  753. case CHELSIO_T4:
  754. return T4_REGMAP_SIZE;
  755. case CHELSIO_T5:
  756. case CHELSIO_T6:
  757. return T5_REGMAP_SIZE;
  758. }
  759. dev_err(adapter->pdev_dev,
  760. "Unsupported chip version %d\n", chip_version);
  761. return 0;
  762. }
  763. /**
  764. * t4_get_regs - read chip registers into provided buffer
  765. * @adap: the adapter
  766. * @buf: register buffer
  767. * @buf_size: size (in bytes) of register buffer
  768. *
  769. * If the provided register buffer isn't large enough for the chip's
  770. * full register range, the register dump will be truncated to the
  771. * register buffer's size.
  772. */
  773. void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
  774. {
  775. static const unsigned int t4_reg_ranges[] = {
  776. 0x1008, 0x1108,
  777. 0x1180, 0x1184,
  778. 0x1190, 0x1194,
  779. 0x11a0, 0x11a4,
  780. 0x11b0, 0x11b4,
  781. 0x11fc, 0x123c,
  782. 0x1300, 0x173c,
  783. 0x1800, 0x18fc,
  784. 0x3000, 0x30d8,
  785. 0x30e0, 0x30e4,
  786. 0x30ec, 0x5910,
  787. 0x5920, 0x5924,
  788. 0x5960, 0x5960,
  789. 0x5968, 0x5968,
  790. 0x5970, 0x5970,
  791. 0x5978, 0x5978,
  792. 0x5980, 0x5980,
  793. 0x5988, 0x5988,
  794. 0x5990, 0x5990,
  795. 0x5998, 0x5998,
  796. 0x59a0, 0x59d4,
  797. 0x5a00, 0x5ae0,
  798. 0x5ae8, 0x5ae8,
  799. 0x5af0, 0x5af0,
  800. 0x5af8, 0x5af8,
  801. 0x6000, 0x6098,
  802. 0x6100, 0x6150,
  803. 0x6200, 0x6208,
  804. 0x6240, 0x6248,
  805. 0x6280, 0x62b0,
  806. 0x62c0, 0x6338,
  807. 0x6370, 0x638c,
  808. 0x6400, 0x643c,
  809. 0x6500, 0x6524,
  810. 0x6a00, 0x6a04,
  811. 0x6a14, 0x6a38,
  812. 0x6a60, 0x6a70,
  813. 0x6a78, 0x6a78,
  814. 0x6b00, 0x6b0c,
  815. 0x6b1c, 0x6b84,
  816. 0x6bf0, 0x6bf8,
  817. 0x6c00, 0x6c0c,
  818. 0x6c1c, 0x6c84,
  819. 0x6cf0, 0x6cf8,
  820. 0x6d00, 0x6d0c,
  821. 0x6d1c, 0x6d84,
  822. 0x6df0, 0x6df8,
  823. 0x6e00, 0x6e0c,
  824. 0x6e1c, 0x6e84,
  825. 0x6ef0, 0x6ef8,
  826. 0x6f00, 0x6f0c,
  827. 0x6f1c, 0x6f84,
  828. 0x6ff0, 0x6ff8,
  829. 0x7000, 0x700c,
  830. 0x701c, 0x7084,
  831. 0x70f0, 0x70f8,
  832. 0x7100, 0x710c,
  833. 0x711c, 0x7184,
  834. 0x71f0, 0x71f8,
  835. 0x7200, 0x720c,
  836. 0x721c, 0x7284,
  837. 0x72f0, 0x72f8,
  838. 0x7300, 0x730c,
  839. 0x731c, 0x7384,
  840. 0x73f0, 0x73f8,
  841. 0x7400, 0x7450,
  842. 0x7500, 0x7530,
  843. 0x7600, 0x760c,
  844. 0x7614, 0x761c,
  845. 0x7680, 0x76cc,
  846. 0x7700, 0x7798,
  847. 0x77c0, 0x77fc,
  848. 0x7900, 0x79fc,
  849. 0x7b00, 0x7b58,
  850. 0x7b60, 0x7b84,
  851. 0x7b8c, 0x7c38,
  852. 0x7d00, 0x7d38,
  853. 0x7d40, 0x7d80,
  854. 0x7d8c, 0x7ddc,
  855. 0x7de4, 0x7e04,
  856. 0x7e10, 0x7e1c,
  857. 0x7e24, 0x7e38,
  858. 0x7e40, 0x7e44,
  859. 0x7e4c, 0x7e78,
  860. 0x7e80, 0x7ea4,
  861. 0x7eac, 0x7edc,
  862. 0x7ee8, 0x7efc,
  863. 0x8dc0, 0x8e04,
  864. 0x8e10, 0x8e1c,
  865. 0x8e30, 0x8e78,
  866. 0x8ea0, 0x8eb8,
  867. 0x8ec0, 0x8f6c,
  868. 0x8fc0, 0x9008,
  869. 0x9010, 0x9058,
  870. 0x9060, 0x9060,
  871. 0x9068, 0x9074,
  872. 0x90fc, 0x90fc,
  873. 0x9400, 0x9408,
  874. 0x9410, 0x9458,
  875. 0x9600, 0x9600,
  876. 0x9608, 0x9638,
  877. 0x9640, 0x96bc,
  878. 0x9800, 0x9808,
  879. 0x9820, 0x983c,
  880. 0x9850, 0x9864,
  881. 0x9c00, 0x9c6c,
  882. 0x9c80, 0x9cec,
  883. 0x9d00, 0x9d6c,
  884. 0x9d80, 0x9dec,
  885. 0x9e00, 0x9e6c,
  886. 0x9e80, 0x9eec,
  887. 0x9f00, 0x9f6c,
  888. 0x9f80, 0x9fec,
  889. 0xd004, 0xd004,
  890. 0xd010, 0xd03c,
  891. 0xdfc0, 0xdfe0,
  892. 0xe000, 0xea7c,
  893. 0xf000, 0x11110,
  894. 0x11118, 0x11190,
  895. 0x19040, 0x1906c,
  896. 0x19078, 0x19080,
  897. 0x1908c, 0x190e4,
  898. 0x190f0, 0x190f8,
  899. 0x19100, 0x19110,
  900. 0x19120, 0x19124,
  901. 0x19150, 0x19194,
  902. 0x1919c, 0x191b0,
  903. 0x191d0, 0x191e8,
  904. 0x19238, 0x1924c,
  905. 0x193f8, 0x1943c,
  906. 0x1944c, 0x19474,
  907. 0x19490, 0x194e0,
  908. 0x194f0, 0x194f8,
  909. 0x19800, 0x19c08,
  910. 0x19c10, 0x19c90,
  911. 0x19ca0, 0x19ce4,
  912. 0x19cf0, 0x19d40,
  913. 0x19d50, 0x19d94,
  914. 0x19da0, 0x19de8,
  915. 0x19df0, 0x19e40,
  916. 0x19e50, 0x19e90,
  917. 0x19ea0, 0x19f4c,
  918. 0x1a000, 0x1a004,
  919. 0x1a010, 0x1a06c,
  920. 0x1a0b0, 0x1a0e4,
  921. 0x1a0ec, 0x1a0f4,
  922. 0x1a100, 0x1a108,
  923. 0x1a114, 0x1a120,
  924. 0x1a128, 0x1a130,
  925. 0x1a138, 0x1a138,
  926. 0x1a190, 0x1a1c4,
  927. 0x1a1fc, 0x1a1fc,
  928. 0x1e040, 0x1e04c,
  929. 0x1e284, 0x1e28c,
  930. 0x1e2c0, 0x1e2c0,
  931. 0x1e2e0, 0x1e2e0,
  932. 0x1e300, 0x1e384,
  933. 0x1e3c0, 0x1e3c8,
  934. 0x1e440, 0x1e44c,
  935. 0x1e684, 0x1e68c,
  936. 0x1e6c0, 0x1e6c0,
  937. 0x1e6e0, 0x1e6e0,
  938. 0x1e700, 0x1e784,
  939. 0x1e7c0, 0x1e7c8,
  940. 0x1e840, 0x1e84c,
  941. 0x1ea84, 0x1ea8c,
  942. 0x1eac0, 0x1eac0,
  943. 0x1eae0, 0x1eae0,
  944. 0x1eb00, 0x1eb84,
  945. 0x1ebc0, 0x1ebc8,
  946. 0x1ec40, 0x1ec4c,
  947. 0x1ee84, 0x1ee8c,
  948. 0x1eec0, 0x1eec0,
  949. 0x1eee0, 0x1eee0,
  950. 0x1ef00, 0x1ef84,
  951. 0x1efc0, 0x1efc8,
  952. 0x1f040, 0x1f04c,
  953. 0x1f284, 0x1f28c,
  954. 0x1f2c0, 0x1f2c0,
  955. 0x1f2e0, 0x1f2e0,
  956. 0x1f300, 0x1f384,
  957. 0x1f3c0, 0x1f3c8,
  958. 0x1f440, 0x1f44c,
  959. 0x1f684, 0x1f68c,
  960. 0x1f6c0, 0x1f6c0,
  961. 0x1f6e0, 0x1f6e0,
  962. 0x1f700, 0x1f784,
  963. 0x1f7c0, 0x1f7c8,
  964. 0x1f840, 0x1f84c,
  965. 0x1fa84, 0x1fa8c,
  966. 0x1fac0, 0x1fac0,
  967. 0x1fae0, 0x1fae0,
  968. 0x1fb00, 0x1fb84,
  969. 0x1fbc0, 0x1fbc8,
  970. 0x1fc40, 0x1fc4c,
  971. 0x1fe84, 0x1fe8c,
  972. 0x1fec0, 0x1fec0,
  973. 0x1fee0, 0x1fee0,
  974. 0x1ff00, 0x1ff84,
  975. 0x1ffc0, 0x1ffc8,
  976. 0x20000, 0x2002c,
  977. 0x20100, 0x2013c,
  978. 0x20190, 0x201a0,
  979. 0x201a8, 0x201b8,
  980. 0x201c4, 0x201c8,
  981. 0x20200, 0x20318,
  982. 0x20400, 0x204b4,
  983. 0x204c0, 0x20528,
  984. 0x20540, 0x20614,
  985. 0x21000, 0x21040,
  986. 0x2104c, 0x21060,
  987. 0x210c0, 0x210ec,
  988. 0x21200, 0x21268,
  989. 0x21270, 0x21284,
  990. 0x212fc, 0x21388,
  991. 0x21400, 0x21404,
  992. 0x21500, 0x21500,
  993. 0x21510, 0x21518,
  994. 0x2152c, 0x21530,
  995. 0x2153c, 0x2153c,
  996. 0x21550, 0x21554,
  997. 0x21600, 0x21600,
  998. 0x21608, 0x2161c,
  999. 0x21624, 0x21628,
  1000. 0x21630, 0x21634,
  1001. 0x2163c, 0x2163c,
  1002. 0x21700, 0x2171c,
  1003. 0x21780, 0x2178c,
  1004. 0x21800, 0x21818,
  1005. 0x21820, 0x21828,
  1006. 0x21830, 0x21848,
  1007. 0x21850, 0x21854,
  1008. 0x21860, 0x21868,
  1009. 0x21870, 0x21870,
  1010. 0x21878, 0x21898,
  1011. 0x218a0, 0x218a8,
  1012. 0x218b0, 0x218c8,
  1013. 0x218d0, 0x218d4,
  1014. 0x218e0, 0x218e8,
  1015. 0x218f0, 0x218f0,
  1016. 0x218f8, 0x21a18,
  1017. 0x21a20, 0x21a28,
  1018. 0x21a30, 0x21a48,
  1019. 0x21a50, 0x21a54,
  1020. 0x21a60, 0x21a68,
  1021. 0x21a70, 0x21a70,
  1022. 0x21a78, 0x21a98,
  1023. 0x21aa0, 0x21aa8,
  1024. 0x21ab0, 0x21ac8,
  1025. 0x21ad0, 0x21ad4,
  1026. 0x21ae0, 0x21ae8,
  1027. 0x21af0, 0x21af0,
  1028. 0x21af8, 0x21c18,
  1029. 0x21c20, 0x21c20,
  1030. 0x21c28, 0x21c30,
  1031. 0x21c38, 0x21c38,
  1032. 0x21c80, 0x21c98,
  1033. 0x21ca0, 0x21ca8,
  1034. 0x21cb0, 0x21cc8,
  1035. 0x21cd0, 0x21cd4,
  1036. 0x21ce0, 0x21ce8,
  1037. 0x21cf0, 0x21cf0,
  1038. 0x21cf8, 0x21d7c,
  1039. 0x21e00, 0x21e04,
  1040. 0x22000, 0x2202c,
  1041. 0x22100, 0x2213c,
  1042. 0x22190, 0x221a0,
  1043. 0x221a8, 0x221b8,
  1044. 0x221c4, 0x221c8,
  1045. 0x22200, 0x22318,
  1046. 0x22400, 0x224b4,
  1047. 0x224c0, 0x22528,
  1048. 0x22540, 0x22614,
  1049. 0x23000, 0x23040,
  1050. 0x2304c, 0x23060,
  1051. 0x230c0, 0x230ec,
  1052. 0x23200, 0x23268,
  1053. 0x23270, 0x23284,
  1054. 0x232fc, 0x23388,
  1055. 0x23400, 0x23404,
  1056. 0x23500, 0x23500,
  1057. 0x23510, 0x23518,
  1058. 0x2352c, 0x23530,
  1059. 0x2353c, 0x2353c,
  1060. 0x23550, 0x23554,
  1061. 0x23600, 0x23600,
  1062. 0x23608, 0x2361c,
  1063. 0x23624, 0x23628,
  1064. 0x23630, 0x23634,
  1065. 0x2363c, 0x2363c,
  1066. 0x23700, 0x2371c,
  1067. 0x23780, 0x2378c,
  1068. 0x23800, 0x23818,
  1069. 0x23820, 0x23828,
  1070. 0x23830, 0x23848,
  1071. 0x23850, 0x23854,
  1072. 0x23860, 0x23868,
  1073. 0x23870, 0x23870,
  1074. 0x23878, 0x23898,
  1075. 0x238a0, 0x238a8,
  1076. 0x238b0, 0x238c8,
  1077. 0x238d0, 0x238d4,
  1078. 0x238e0, 0x238e8,
  1079. 0x238f0, 0x238f0,
  1080. 0x238f8, 0x23a18,
  1081. 0x23a20, 0x23a28,
  1082. 0x23a30, 0x23a48,
  1083. 0x23a50, 0x23a54,
  1084. 0x23a60, 0x23a68,
  1085. 0x23a70, 0x23a70,
  1086. 0x23a78, 0x23a98,
  1087. 0x23aa0, 0x23aa8,
  1088. 0x23ab0, 0x23ac8,
  1089. 0x23ad0, 0x23ad4,
  1090. 0x23ae0, 0x23ae8,
  1091. 0x23af0, 0x23af0,
  1092. 0x23af8, 0x23c18,
  1093. 0x23c20, 0x23c20,
  1094. 0x23c28, 0x23c30,
  1095. 0x23c38, 0x23c38,
  1096. 0x23c80, 0x23c98,
  1097. 0x23ca0, 0x23ca8,
  1098. 0x23cb0, 0x23cc8,
  1099. 0x23cd0, 0x23cd4,
  1100. 0x23ce0, 0x23ce8,
  1101. 0x23cf0, 0x23cf0,
  1102. 0x23cf8, 0x23d7c,
  1103. 0x23e00, 0x23e04,
  1104. 0x24000, 0x2402c,
  1105. 0x24100, 0x2413c,
  1106. 0x24190, 0x241a0,
  1107. 0x241a8, 0x241b8,
  1108. 0x241c4, 0x241c8,
  1109. 0x24200, 0x24318,
  1110. 0x24400, 0x244b4,
  1111. 0x244c0, 0x24528,
  1112. 0x24540, 0x24614,
  1113. 0x25000, 0x25040,
  1114. 0x2504c, 0x25060,
  1115. 0x250c0, 0x250ec,
  1116. 0x25200, 0x25268,
  1117. 0x25270, 0x25284,
  1118. 0x252fc, 0x25388,
  1119. 0x25400, 0x25404,
  1120. 0x25500, 0x25500,
  1121. 0x25510, 0x25518,
  1122. 0x2552c, 0x25530,
  1123. 0x2553c, 0x2553c,
  1124. 0x25550, 0x25554,
  1125. 0x25600, 0x25600,
  1126. 0x25608, 0x2561c,
  1127. 0x25624, 0x25628,
  1128. 0x25630, 0x25634,
  1129. 0x2563c, 0x2563c,
  1130. 0x25700, 0x2571c,
  1131. 0x25780, 0x2578c,
  1132. 0x25800, 0x25818,
  1133. 0x25820, 0x25828,
  1134. 0x25830, 0x25848,
  1135. 0x25850, 0x25854,
  1136. 0x25860, 0x25868,
  1137. 0x25870, 0x25870,
  1138. 0x25878, 0x25898,
  1139. 0x258a0, 0x258a8,
  1140. 0x258b0, 0x258c8,
  1141. 0x258d0, 0x258d4,
  1142. 0x258e0, 0x258e8,
  1143. 0x258f0, 0x258f0,
  1144. 0x258f8, 0x25a18,
  1145. 0x25a20, 0x25a28,
  1146. 0x25a30, 0x25a48,
  1147. 0x25a50, 0x25a54,
  1148. 0x25a60, 0x25a68,
  1149. 0x25a70, 0x25a70,
  1150. 0x25a78, 0x25a98,
  1151. 0x25aa0, 0x25aa8,
  1152. 0x25ab0, 0x25ac8,
  1153. 0x25ad0, 0x25ad4,
  1154. 0x25ae0, 0x25ae8,
  1155. 0x25af0, 0x25af0,
  1156. 0x25af8, 0x25c18,
  1157. 0x25c20, 0x25c20,
  1158. 0x25c28, 0x25c30,
  1159. 0x25c38, 0x25c38,
  1160. 0x25c80, 0x25c98,
  1161. 0x25ca0, 0x25ca8,
  1162. 0x25cb0, 0x25cc8,
  1163. 0x25cd0, 0x25cd4,
  1164. 0x25ce0, 0x25ce8,
  1165. 0x25cf0, 0x25cf0,
  1166. 0x25cf8, 0x25d7c,
  1167. 0x25e00, 0x25e04,
  1168. 0x26000, 0x2602c,
  1169. 0x26100, 0x2613c,
  1170. 0x26190, 0x261a0,
  1171. 0x261a8, 0x261b8,
  1172. 0x261c4, 0x261c8,
  1173. 0x26200, 0x26318,
  1174. 0x26400, 0x264b4,
  1175. 0x264c0, 0x26528,
  1176. 0x26540, 0x26614,
  1177. 0x27000, 0x27040,
  1178. 0x2704c, 0x27060,
  1179. 0x270c0, 0x270ec,
  1180. 0x27200, 0x27268,
  1181. 0x27270, 0x27284,
  1182. 0x272fc, 0x27388,
  1183. 0x27400, 0x27404,
  1184. 0x27500, 0x27500,
  1185. 0x27510, 0x27518,
  1186. 0x2752c, 0x27530,
  1187. 0x2753c, 0x2753c,
  1188. 0x27550, 0x27554,
  1189. 0x27600, 0x27600,
  1190. 0x27608, 0x2761c,
  1191. 0x27624, 0x27628,
  1192. 0x27630, 0x27634,
  1193. 0x2763c, 0x2763c,
  1194. 0x27700, 0x2771c,
  1195. 0x27780, 0x2778c,
  1196. 0x27800, 0x27818,
  1197. 0x27820, 0x27828,
  1198. 0x27830, 0x27848,
  1199. 0x27850, 0x27854,
  1200. 0x27860, 0x27868,
  1201. 0x27870, 0x27870,
  1202. 0x27878, 0x27898,
  1203. 0x278a0, 0x278a8,
  1204. 0x278b0, 0x278c8,
  1205. 0x278d0, 0x278d4,
  1206. 0x278e0, 0x278e8,
  1207. 0x278f0, 0x278f0,
  1208. 0x278f8, 0x27a18,
  1209. 0x27a20, 0x27a28,
  1210. 0x27a30, 0x27a48,
  1211. 0x27a50, 0x27a54,
  1212. 0x27a60, 0x27a68,
  1213. 0x27a70, 0x27a70,
  1214. 0x27a78, 0x27a98,
  1215. 0x27aa0, 0x27aa8,
  1216. 0x27ab0, 0x27ac8,
  1217. 0x27ad0, 0x27ad4,
  1218. 0x27ae0, 0x27ae8,
  1219. 0x27af0, 0x27af0,
  1220. 0x27af8, 0x27c18,
  1221. 0x27c20, 0x27c20,
  1222. 0x27c28, 0x27c30,
  1223. 0x27c38, 0x27c38,
  1224. 0x27c80, 0x27c98,
  1225. 0x27ca0, 0x27ca8,
  1226. 0x27cb0, 0x27cc8,
  1227. 0x27cd0, 0x27cd4,
  1228. 0x27ce0, 0x27ce8,
  1229. 0x27cf0, 0x27cf0,
  1230. 0x27cf8, 0x27d7c,
  1231. 0x27e00, 0x27e04,
  1232. };
  1233. static const unsigned int t5_reg_ranges[] = {
  1234. 0x1008, 0x10c0,
  1235. 0x10cc, 0x10f8,
  1236. 0x1100, 0x1100,
  1237. 0x110c, 0x1148,
  1238. 0x1180, 0x1184,
  1239. 0x1190, 0x1194,
  1240. 0x11a0, 0x11a4,
  1241. 0x11b0, 0x11b4,
  1242. 0x11fc, 0x123c,
  1243. 0x1280, 0x173c,
  1244. 0x1800, 0x18fc,
  1245. 0x3000, 0x3028,
  1246. 0x3060, 0x30b0,
  1247. 0x30b8, 0x30d8,
  1248. 0x30e0, 0x30fc,
  1249. 0x3140, 0x357c,
  1250. 0x35a8, 0x35cc,
  1251. 0x35ec, 0x35ec,
  1252. 0x3600, 0x5624,
  1253. 0x56cc, 0x56ec,
  1254. 0x56f4, 0x5720,
  1255. 0x5728, 0x575c,
  1256. 0x580c, 0x5814,
  1257. 0x5890, 0x589c,
  1258. 0x58a4, 0x58ac,
  1259. 0x58b8, 0x58bc,
  1260. 0x5940, 0x59c8,
  1261. 0x59d0, 0x59dc,
  1262. 0x59fc, 0x5a18,
  1263. 0x5a60, 0x5a70,
  1264. 0x5a80, 0x5a9c,
  1265. 0x5b94, 0x5bfc,
  1266. 0x6000, 0x6020,
  1267. 0x6028, 0x6040,
  1268. 0x6058, 0x609c,
  1269. 0x60a8, 0x614c,
  1270. 0x7700, 0x7798,
  1271. 0x77c0, 0x78fc,
  1272. 0x7b00, 0x7b58,
  1273. 0x7b60, 0x7b84,
  1274. 0x7b8c, 0x7c54,
  1275. 0x7d00, 0x7d38,
  1276. 0x7d40, 0x7d80,
  1277. 0x7d8c, 0x7ddc,
  1278. 0x7de4, 0x7e04,
  1279. 0x7e10, 0x7e1c,
  1280. 0x7e24, 0x7e38,
  1281. 0x7e40, 0x7e44,
  1282. 0x7e4c, 0x7e78,
  1283. 0x7e80, 0x7edc,
  1284. 0x7ee8, 0x7efc,
  1285. 0x8dc0, 0x8de0,
  1286. 0x8df8, 0x8e04,
  1287. 0x8e10, 0x8e84,
  1288. 0x8ea0, 0x8f84,
  1289. 0x8fc0, 0x9058,
  1290. 0x9060, 0x9060,
  1291. 0x9068, 0x90f8,
  1292. 0x9400, 0x9408,
  1293. 0x9410, 0x9470,
  1294. 0x9600, 0x9600,
  1295. 0x9608, 0x9638,
  1296. 0x9640, 0x96f4,
  1297. 0x9800, 0x9808,
  1298. 0x9820, 0x983c,
  1299. 0x9850, 0x9864,
  1300. 0x9c00, 0x9c6c,
  1301. 0x9c80, 0x9cec,
  1302. 0x9d00, 0x9d6c,
  1303. 0x9d80, 0x9dec,
  1304. 0x9e00, 0x9e6c,
  1305. 0x9e80, 0x9eec,
  1306. 0x9f00, 0x9f6c,
  1307. 0x9f80, 0xa020,
  1308. 0xd004, 0xd004,
  1309. 0xd010, 0xd03c,
  1310. 0xdfc0, 0xdfe0,
  1311. 0xe000, 0x1106c,
  1312. 0x11074, 0x11088,
  1313. 0x1109c, 0x1117c,
  1314. 0x11190, 0x11204,
  1315. 0x19040, 0x1906c,
  1316. 0x19078, 0x19080,
  1317. 0x1908c, 0x190e8,
  1318. 0x190f0, 0x190f8,
  1319. 0x19100, 0x19110,
  1320. 0x19120, 0x19124,
  1321. 0x19150, 0x19194,
  1322. 0x1919c, 0x191b0,
  1323. 0x191d0, 0x191e8,
  1324. 0x19238, 0x19290,
  1325. 0x193f8, 0x19428,
  1326. 0x19430, 0x19444,
  1327. 0x1944c, 0x1946c,
  1328. 0x19474, 0x19474,
  1329. 0x19490, 0x194cc,
  1330. 0x194f0, 0x194f8,
  1331. 0x19c00, 0x19c08,
  1332. 0x19c10, 0x19c60,
  1333. 0x19c94, 0x19ce4,
  1334. 0x19cf0, 0x19d40,
  1335. 0x19d50, 0x19d94,
  1336. 0x19da0, 0x19de8,
  1337. 0x19df0, 0x19e10,
  1338. 0x19e50, 0x19e90,
  1339. 0x19ea0, 0x19f24,
  1340. 0x19f34, 0x19f34,
  1341. 0x19f40, 0x19f50,
  1342. 0x19f90, 0x19fb4,
  1343. 0x19fc4, 0x19fe4,
  1344. 0x1a000, 0x1a004,
  1345. 0x1a010, 0x1a06c,
  1346. 0x1a0b0, 0x1a0e4,
  1347. 0x1a0ec, 0x1a0f8,
  1348. 0x1a100, 0x1a108,
  1349. 0x1a114, 0x1a120,
  1350. 0x1a128, 0x1a130,
  1351. 0x1a138, 0x1a138,
  1352. 0x1a190, 0x1a1c4,
  1353. 0x1a1fc, 0x1a1fc,
  1354. 0x1e008, 0x1e00c,
  1355. 0x1e040, 0x1e044,
  1356. 0x1e04c, 0x1e04c,
  1357. 0x1e284, 0x1e290,
  1358. 0x1e2c0, 0x1e2c0,
  1359. 0x1e2e0, 0x1e2e0,
  1360. 0x1e300, 0x1e384,
  1361. 0x1e3c0, 0x1e3c8,
  1362. 0x1e408, 0x1e40c,
  1363. 0x1e440, 0x1e444,
  1364. 0x1e44c, 0x1e44c,
  1365. 0x1e684, 0x1e690,
  1366. 0x1e6c0, 0x1e6c0,
  1367. 0x1e6e0, 0x1e6e0,
  1368. 0x1e700, 0x1e784,
  1369. 0x1e7c0, 0x1e7c8,
  1370. 0x1e808, 0x1e80c,
  1371. 0x1e840, 0x1e844,
  1372. 0x1e84c, 0x1e84c,
  1373. 0x1ea84, 0x1ea90,
  1374. 0x1eac0, 0x1eac0,
  1375. 0x1eae0, 0x1eae0,
  1376. 0x1eb00, 0x1eb84,
  1377. 0x1ebc0, 0x1ebc8,
  1378. 0x1ec08, 0x1ec0c,
  1379. 0x1ec40, 0x1ec44,
  1380. 0x1ec4c, 0x1ec4c,
  1381. 0x1ee84, 0x1ee90,
  1382. 0x1eec0, 0x1eec0,
  1383. 0x1eee0, 0x1eee0,
  1384. 0x1ef00, 0x1ef84,
  1385. 0x1efc0, 0x1efc8,
  1386. 0x1f008, 0x1f00c,
  1387. 0x1f040, 0x1f044,
  1388. 0x1f04c, 0x1f04c,
  1389. 0x1f284, 0x1f290,
  1390. 0x1f2c0, 0x1f2c0,
  1391. 0x1f2e0, 0x1f2e0,
  1392. 0x1f300, 0x1f384,
  1393. 0x1f3c0, 0x1f3c8,
  1394. 0x1f408, 0x1f40c,
  1395. 0x1f440, 0x1f444,
  1396. 0x1f44c, 0x1f44c,
  1397. 0x1f684, 0x1f690,
  1398. 0x1f6c0, 0x1f6c0,
  1399. 0x1f6e0, 0x1f6e0,
  1400. 0x1f700, 0x1f784,
  1401. 0x1f7c0, 0x1f7c8,
  1402. 0x1f808, 0x1f80c,
  1403. 0x1f840, 0x1f844,
  1404. 0x1f84c, 0x1f84c,
  1405. 0x1fa84, 0x1fa90,
  1406. 0x1fac0, 0x1fac0,
  1407. 0x1fae0, 0x1fae0,
  1408. 0x1fb00, 0x1fb84,
  1409. 0x1fbc0, 0x1fbc8,
  1410. 0x1fc08, 0x1fc0c,
  1411. 0x1fc40, 0x1fc44,
  1412. 0x1fc4c, 0x1fc4c,
  1413. 0x1fe84, 0x1fe90,
  1414. 0x1fec0, 0x1fec0,
  1415. 0x1fee0, 0x1fee0,
  1416. 0x1ff00, 0x1ff84,
  1417. 0x1ffc0, 0x1ffc8,
  1418. 0x30000, 0x30030,
  1419. 0x30100, 0x30144,
  1420. 0x30190, 0x301a0,
  1421. 0x301a8, 0x301b8,
  1422. 0x301c4, 0x301c8,
  1423. 0x301d0, 0x301d0,
  1424. 0x30200, 0x30318,
  1425. 0x30400, 0x304b4,
  1426. 0x304c0, 0x3052c,
  1427. 0x30540, 0x3061c,
  1428. 0x30800, 0x30828,
  1429. 0x30834, 0x30834,
  1430. 0x308c0, 0x30908,
  1431. 0x30910, 0x309ac,
  1432. 0x30a00, 0x30a14,
  1433. 0x30a1c, 0x30a2c,
  1434. 0x30a44, 0x30a50,
  1435. 0x30a74, 0x30a74,
  1436. 0x30a7c, 0x30afc,
  1437. 0x30b08, 0x30c24,
  1438. 0x30d00, 0x30d00,
  1439. 0x30d08, 0x30d14,
  1440. 0x30d1c, 0x30d20,
  1441. 0x30d3c, 0x30d3c,
  1442. 0x30d48, 0x30d50,
  1443. 0x31200, 0x3120c,
  1444. 0x31220, 0x31220,
  1445. 0x31240, 0x31240,
  1446. 0x31600, 0x3160c,
  1447. 0x31a00, 0x31a1c,
  1448. 0x31e00, 0x31e20,
  1449. 0x31e38, 0x31e3c,
  1450. 0x31e80, 0x31e80,
  1451. 0x31e88, 0x31ea8,
  1452. 0x31eb0, 0x31eb4,
  1453. 0x31ec8, 0x31ed4,
  1454. 0x31fb8, 0x32004,
  1455. 0x32200, 0x32200,
  1456. 0x32208, 0x32240,
  1457. 0x32248, 0x32280,
  1458. 0x32288, 0x322c0,
  1459. 0x322c8, 0x322fc,
  1460. 0x32600, 0x32630,
  1461. 0x32a00, 0x32abc,
  1462. 0x32b00, 0x32b10,
  1463. 0x32b20, 0x32b30,
  1464. 0x32b40, 0x32b50,
  1465. 0x32b60, 0x32b70,
  1466. 0x33000, 0x33028,
  1467. 0x33030, 0x33048,
  1468. 0x33060, 0x33068,
  1469. 0x33070, 0x3309c,
  1470. 0x330f0, 0x33128,
  1471. 0x33130, 0x33148,
  1472. 0x33160, 0x33168,
  1473. 0x33170, 0x3319c,
  1474. 0x331f0, 0x33238,
  1475. 0x33240, 0x33240,
  1476. 0x33248, 0x33250,
  1477. 0x3325c, 0x33264,
  1478. 0x33270, 0x332b8,
  1479. 0x332c0, 0x332e4,
  1480. 0x332f8, 0x33338,
  1481. 0x33340, 0x33340,
  1482. 0x33348, 0x33350,
  1483. 0x3335c, 0x33364,
  1484. 0x33370, 0x333b8,
  1485. 0x333c0, 0x333e4,
  1486. 0x333f8, 0x33428,
  1487. 0x33430, 0x33448,
  1488. 0x33460, 0x33468,
  1489. 0x33470, 0x3349c,
  1490. 0x334f0, 0x33528,
  1491. 0x33530, 0x33548,
  1492. 0x33560, 0x33568,
  1493. 0x33570, 0x3359c,
  1494. 0x335f0, 0x33638,
  1495. 0x33640, 0x33640,
  1496. 0x33648, 0x33650,
  1497. 0x3365c, 0x33664,
  1498. 0x33670, 0x336b8,
  1499. 0x336c0, 0x336e4,
  1500. 0x336f8, 0x33738,
  1501. 0x33740, 0x33740,
  1502. 0x33748, 0x33750,
  1503. 0x3375c, 0x33764,
  1504. 0x33770, 0x337b8,
  1505. 0x337c0, 0x337e4,
  1506. 0x337f8, 0x337fc,
  1507. 0x33814, 0x33814,
  1508. 0x3382c, 0x3382c,
  1509. 0x33880, 0x3388c,
  1510. 0x338e8, 0x338ec,
  1511. 0x33900, 0x33928,
  1512. 0x33930, 0x33948,
  1513. 0x33960, 0x33968,
  1514. 0x33970, 0x3399c,
  1515. 0x339f0, 0x33a38,
  1516. 0x33a40, 0x33a40,
  1517. 0x33a48, 0x33a50,
  1518. 0x33a5c, 0x33a64,
  1519. 0x33a70, 0x33ab8,
  1520. 0x33ac0, 0x33ae4,
  1521. 0x33af8, 0x33b10,
  1522. 0x33b28, 0x33b28,
  1523. 0x33b3c, 0x33b50,
  1524. 0x33bf0, 0x33c10,
  1525. 0x33c28, 0x33c28,
  1526. 0x33c3c, 0x33c50,
  1527. 0x33cf0, 0x33cfc,
  1528. 0x34000, 0x34030,
  1529. 0x34100, 0x34144,
  1530. 0x34190, 0x341a0,
  1531. 0x341a8, 0x341b8,
  1532. 0x341c4, 0x341c8,
  1533. 0x341d0, 0x341d0,
  1534. 0x34200, 0x34318,
  1535. 0x34400, 0x344b4,
  1536. 0x344c0, 0x3452c,
  1537. 0x34540, 0x3461c,
  1538. 0x34800, 0x34828,
  1539. 0x34834, 0x34834,
  1540. 0x348c0, 0x34908,
  1541. 0x34910, 0x349ac,
  1542. 0x34a00, 0x34a14,
  1543. 0x34a1c, 0x34a2c,
  1544. 0x34a44, 0x34a50,
  1545. 0x34a74, 0x34a74,
  1546. 0x34a7c, 0x34afc,
  1547. 0x34b08, 0x34c24,
  1548. 0x34d00, 0x34d00,
  1549. 0x34d08, 0x34d14,
  1550. 0x34d1c, 0x34d20,
  1551. 0x34d3c, 0x34d3c,
  1552. 0x34d48, 0x34d50,
  1553. 0x35200, 0x3520c,
  1554. 0x35220, 0x35220,
  1555. 0x35240, 0x35240,
  1556. 0x35600, 0x3560c,
  1557. 0x35a00, 0x35a1c,
  1558. 0x35e00, 0x35e20,
  1559. 0x35e38, 0x35e3c,
  1560. 0x35e80, 0x35e80,
  1561. 0x35e88, 0x35ea8,
  1562. 0x35eb0, 0x35eb4,
  1563. 0x35ec8, 0x35ed4,
  1564. 0x35fb8, 0x36004,
  1565. 0x36200, 0x36200,
  1566. 0x36208, 0x36240,
  1567. 0x36248, 0x36280,
  1568. 0x36288, 0x362c0,
  1569. 0x362c8, 0x362fc,
  1570. 0x36600, 0x36630,
  1571. 0x36a00, 0x36abc,
  1572. 0x36b00, 0x36b10,
  1573. 0x36b20, 0x36b30,
  1574. 0x36b40, 0x36b50,
  1575. 0x36b60, 0x36b70,
  1576. 0x37000, 0x37028,
  1577. 0x37030, 0x37048,
  1578. 0x37060, 0x37068,
  1579. 0x37070, 0x3709c,
  1580. 0x370f0, 0x37128,
  1581. 0x37130, 0x37148,
  1582. 0x37160, 0x37168,
  1583. 0x37170, 0x3719c,
  1584. 0x371f0, 0x37238,
  1585. 0x37240, 0x37240,
  1586. 0x37248, 0x37250,
  1587. 0x3725c, 0x37264,
  1588. 0x37270, 0x372b8,
  1589. 0x372c0, 0x372e4,
  1590. 0x372f8, 0x37338,
  1591. 0x37340, 0x37340,
  1592. 0x37348, 0x37350,
  1593. 0x3735c, 0x37364,
  1594. 0x37370, 0x373b8,
  1595. 0x373c0, 0x373e4,
  1596. 0x373f8, 0x37428,
  1597. 0x37430, 0x37448,
  1598. 0x37460, 0x37468,
  1599. 0x37470, 0x3749c,
  1600. 0x374f0, 0x37528,
  1601. 0x37530, 0x37548,
  1602. 0x37560, 0x37568,
  1603. 0x37570, 0x3759c,
  1604. 0x375f0, 0x37638,
  1605. 0x37640, 0x37640,
  1606. 0x37648, 0x37650,
  1607. 0x3765c, 0x37664,
  1608. 0x37670, 0x376b8,
  1609. 0x376c0, 0x376e4,
  1610. 0x376f8, 0x37738,
  1611. 0x37740, 0x37740,
  1612. 0x37748, 0x37750,
  1613. 0x3775c, 0x37764,
  1614. 0x37770, 0x377b8,
  1615. 0x377c0, 0x377e4,
  1616. 0x377f8, 0x377fc,
  1617. 0x37814, 0x37814,
  1618. 0x3782c, 0x3782c,
  1619. 0x37880, 0x3788c,
  1620. 0x378e8, 0x378ec,
  1621. 0x37900, 0x37928,
  1622. 0x37930, 0x37948,
  1623. 0x37960, 0x37968,
  1624. 0x37970, 0x3799c,
  1625. 0x379f0, 0x37a38,
  1626. 0x37a40, 0x37a40,
  1627. 0x37a48, 0x37a50,
  1628. 0x37a5c, 0x37a64,
  1629. 0x37a70, 0x37ab8,
  1630. 0x37ac0, 0x37ae4,
  1631. 0x37af8, 0x37b10,
  1632. 0x37b28, 0x37b28,
  1633. 0x37b3c, 0x37b50,
  1634. 0x37bf0, 0x37c10,
  1635. 0x37c28, 0x37c28,
  1636. 0x37c3c, 0x37c50,
  1637. 0x37cf0, 0x37cfc,
  1638. 0x38000, 0x38030,
  1639. 0x38100, 0x38144,
  1640. 0x38190, 0x381a0,
  1641. 0x381a8, 0x381b8,
  1642. 0x381c4, 0x381c8,
  1643. 0x381d0, 0x381d0,
  1644. 0x38200, 0x38318,
  1645. 0x38400, 0x384b4,
  1646. 0x384c0, 0x3852c,
  1647. 0x38540, 0x3861c,
  1648. 0x38800, 0x38828,
  1649. 0x38834, 0x38834,
  1650. 0x388c0, 0x38908,
  1651. 0x38910, 0x389ac,
  1652. 0x38a00, 0x38a14,
  1653. 0x38a1c, 0x38a2c,
  1654. 0x38a44, 0x38a50,
  1655. 0x38a74, 0x38a74,
  1656. 0x38a7c, 0x38afc,
  1657. 0x38b08, 0x38c24,
  1658. 0x38d00, 0x38d00,
  1659. 0x38d08, 0x38d14,
  1660. 0x38d1c, 0x38d20,
  1661. 0x38d3c, 0x38d3c,
  1662. 0x38d48, 0x38d50,
  1663. 0x39200, 0x3920c,
  1664. 0x39220, 0x39220,
  1665. 0x39240, 0x39240,
  1666. 0x39600, 0x3960c,
  1667. 0x39a00, 0x39a1c,
  1668. 0x39e00, 0x39e20,
  1669. 0x39e38, 0x39e3c,
  1670. 0x39e80, 0x39e80,
  1671. 0x39e88, 0x39ea8,
  1672. 0x39eb0, 0x39eb4,
  1673. 0x39ec8, 0x39ed4,
  1674. 0x39fb8, 0x3a004,
  1675. 0x3a200, 0x3a200,
  1676. 0x3a208, 0x3a240,
  1677. 0x3a248, 0x3a280,
  1678. 0x3a288, 0x3a2c0,
  1679. 0x3a2c8, 0x3a2fc,
  1680. 0x3a600, 0x3a630,
  1681. 0x3aa00, 0x3aabc,
  1682. 0x3ab00, 0x3ab10,
  1683. 0x3ab20, 0x3ab30,
  1684. 0x3ab40, 0x3ab50,
  1685. 0x3ab60, 0x3ab70,
  1686. 0x3b000, 0x3b028,
  1687. 0x3b030, 0x3b048,
  1688. 0x3b060, 0x3b068,
  1689. 0x3b070, 0x3b09c,
  1690. 0x3b0f0, 0x3b128,
  1691. 0x3b130, 0x3b148,
  1692. 0x3b160, 0x3b168,
  1693. 0x3b170, 0x3b19c,
  1694. 0x3b1f0, 0x3b238,
  1695. 0x3b240, 0x3b240,
  1696. 0x3b248, 0x3b250,
  1697. 0x3b25c, 0x3b264,
  1698. 0x3b270, 0x3b2b8,
  1699. 0x3b2c0, 0x3b2e4,
  1700. 0x3b2f8, 0x3b338,
  1701. 0x3b340, 0x3b340,
  1702. 0x3b348, 0x3b350,
  1703. 0x3b35c, 0x3b364,
  1704. 0x3b370, 0x3b3b8,
  1705. 0x3b3c0, 0x3b3e4,
  1706. 0x3b3f8, 0x3b428,
  1707. 0x3b430, 0x3b448,
  1708. 0x3b460, 0x3b468,
  1709. 0x3b470, 0x3b49c,
  1710. 0x3b4f0, 0x3b528,
  1711. 0x3b530, 0x3b548,
  1712. 0x3b560, 0x3b568,
  1713. 0x3b570, 0x3b59c,
  1714. 0x3b5f0, 0x3b638,
  1715. 0x3b640, 0x3b640,
  1716. 0x3b648, 0x3b650,
  1717. 0x3b65c, 0x3b664,
  1718. 0x3b670, 0x3b6b8,
  1719. 0x3b6c0, 0x3b6e4,
  1720. 0x3b6f8, 0x3b738,
  1721. 0x3b740, 0x3b740,
  1722. 0x3b748, 0x3b750,
  1723. 0x3b75c, 0x3b764,
  1724. 0x3b770, 0x3b7b8,
  1725. 0x3b7c0, 0x3b7e4,
  1726. 0x3b7f8, 0x3b7fc,
  1727. 0x3b814, 0x3b814,
  1728. 0x3b82c, 0x3b82c,
  1729. 0x3b880, 0x3b88c,
  1730. 0x3b8e8, 0x3b8ec,
  1731. 0x3b900, 0x3b928,
  1732. 0x3b930, 0x3b948,
  1733. 0x3b960, 0x3b968,
  1734. 0x3b970, 0x3b99c,
  1735. 0x3b9f0, 0x3ba38,
  1736. 0x3ba40, 0x3ba40,
  1737. 0x3ba48, 0x3ba50,
  1738. 0x3ba5c, 0x3ba64,
  1739. 0x3ba70, 0x3bab8,
  1740. 0x3bac0, 0x3bae4,
  1741. 0x3baf8, 0x3bb10,
  1742. 0x3bb28, 0x3bb28,
  1743. 0x3bb3c, 0x3bb50,
  1744. 0x3bbf0, 0x3bc10,
  1745. 0x3bc28, 0x3bc28,
  1746. 0x3bc3c, 0x3bc50,
  1747. 0x3bcf0, 0x3bcfc,
  1748. 0x3c000, 0x3c030,
  1749. 0x3c100, 0x3c144,
  1750. 0x3c190, 0x3c1a0,
  1751. 0x3c1a8, 0x3c1b8,
  1752. 0x3c1c4, 0x3c1c8,
  1753. 0x3c1d0, 0x3c1d0,
  1754. 0x3c200, 0x3c318,
  1755. 0x3c400, 0x3c4b4,
  1756. 0x3c4c0, 0x3c52c,
  1757. 0x3c540, 0x3c61c,
  1758. 0x3c800, 0x3c828,
  1759. 0x3c834, 0x3c834,
  1760. 0x3c8c0, 0x3c908,
  1761. 0x3c910, 0x3c9ac,
  1762. 0x3ca00, 0x3ca14,
  1763. 0x3ca1c, 0x3ca2c,
  1764. 0x3ca44, 0x3ca50,
  1765. 0x3ca74, 0x3ca74,
  1766. 0x3ca7c, 0x3cafc,
  1767. 0x3cb08, 0x3cc24,
  1768. 0x3cd00, 0x3cd00,
  1769. 0x3cd08, 0x3cd14,
  1770. 0x3cd1c, 0x3cd20,
  1771. 0x3cd3c, 0x3cd3c,
  1772. 0x3cd48, 0x3cd50,
  1773. 0x3d200, 0x3d20c,
  1774. 0x3d220, 0x3d220,
  1775. 0x3d240, 0x3d240,
  1776. 0x3d600, 0x3d60c,
  1777. 0x3da00, 0x3da1c,
  1778. 0x3de00, 0x3de20,
  1779. 0x3de38, 0x3de3c,
  1780. 0x3de80, 0x3de80,
  1781. 0x3de88, 0x3dea8,
  1782. 0x3deb0, 0x3deb4,
  1783. 0x3dec8, 0x3ded4,
  1784. 0x3dfb8, 0x3e004,
  1785. 0x3e200, 0x3e200,
  1786. 0x3e208, 0x3e240,
  1787. 0x3e248, 0x3e280,
  1788. 0x3e288, 0x3e2c0,
  1789. 0x3e2c8, 0x3e2fc,
  1790. 0x3e600, 0x3e630,
  1791. 0x3ea00, 0x3eabc,
  1792. 0x3eb00, 0x3eb10,
  1793. 0x3eb20, 0x3eb30,
  1794. 0x3eb40, 0x3eb50,
  1795. 0x3eb60, 0x3eb70,
  1796. 0x3f000, 0x3f028,
  1797. 0x3f030, 0x3f048,
  1798. 0x3f060, 0x3f068,
  1799. 0x3f070, 0x3f09c,
  1800. 0x3f0f0, 0x3f128,
  1801. 0x3f130, 0x3f148,
  1802. 0x3f160, 0x3f168,
  1803. 0x3f170, 0x3f19c,
  1804. 0x3f1f0, 0x3f238,
  1805. 0x3f240, 0x3f240,
  1806. 0x3f248, 0x3f250,
  1807. 0x3f25c, 0x3f264,
  1808. 0x3f270, 0x3f2b8,
  1809. 0x3f2c0, 0x3f2e4,
  1810. 0x3f2f8, 0x3f338,
  1811. 0x3f340, 0x3f340,
  1812. 0x3f348, 0x3f350,
  1813. 0x3f35c, 0x3f364,
  1814. 0x3f370, 0x3f3b8,
  1815. 0x3f3c0, 0x3f3e4,
  1816. 0x3f3f8, 0x3f428,
  1817. 0x3f430, 0x3f448,
  1818. 0x3f460, 0x3f468,
  1819. 0x3f470, 0x3f49c,
  1820. 0x3f4f0, 0x3f528,
  1821. 0x3f530, 0x3f548,
  1822. 0x3f560, 0x3f568,
  1823. 0x3f570, 0x3f59c,
  1824. 0x3f5f0, 0x3f638,
  1825. 0x3f640, 0x3f640,
  1826. 0x3f648, 0x3f650,
  1827. 0x3f65c, 0x3f664,
  1828. 0x3f670, 0x3f6b8,
  1829. 0x3f6c0, 0x3f6e4,
  1830. 0x3f6f8, 0x3f738,
  1831. 0x3f740, 0x3f740,
  1832. 0x3f748, 0x3f750,
  1833. 0x3f75c, 0x3f764,
  1834. 0x3f770, 0x3f7b8,
  1835. 0x3f7c0, 0x3f7e4,
  1836. 0x3f7f8, 0x3f7fc,
  1837. 0x3f814, 0x3f814,
  1838. 0x3f82c, 0x3f82c,
  1839. 0x3f880, 0x3f88c,
  1840. 0x3f8e8, 0x3f8ec,
  1841. 0x3f900, 0x3f928,
  1842. 0x3f930, 0x3f948,
  1843. 0x3f960, 0x3f968,
  1844. 0x3f970, 0x3f99c,
  1845. 0x3f9f0, 0x3fa38,
  1846. 0x3fa40, 0x3fa40,
  1847. 0x3fa48, 0x3fa50,
  1848. 0x3fa5c, 0x3fa64,
  1849. 0x3fa70, 0x3fab8,
  1850. 0x3fac0, 0x3fae4,
  1851. 0x3faf8, 0x3fb10,
  1852. 0x3fb28, 0x3fb28,
  1853. 0x3fb3c, 0x3fb50,
  1854. 0x3fbf0, 0x3fc10,
  1855. 0x3fc28, 0x3fc28,
  1856. 0x3fc3c, 0x3fc50,
  1857. 0x3fcf0, 0x3fcfc,
  1858. 0x40000, 0x4000c,
  1859. 0x40040, 0x40050,
  1860. 0x40060, 0x40068,
  1861. 0x4007c, 0x4008c,
  1862. 0x40094, 0x400b0,
  1863. 0x400c0, 0x40144,
  1864. 0x40180, 0x4018c,
  1865. 0x40200, 0x40254,
  1866. 0x40260, 0x40264,
  1867. 0x40270, 0x40288,
  1868. 0x40290, 0x40298,
  1869. 0x402ac, 0x402c8,
  1870. 0x402d0, 0x402e0,
  1871. 0x402f0, 0x402f0,
  1872. 0x40300, 0x4033c,
  1873. 0x403f8, 0x403fc,
  1874. 0x41304, 0x413c4,
  1875. 0x41400, 0x4140c,
  1876. 0x41414, 0x4141c,
  1877. 0x41480, 0x414d0,
  1878. 0x44000, 0x44054,
  1879. 0x4405c, 0x44078,
  1880. 0x440c0, 0x44174,
  1881. 0x44180, 0x441ac,
  1882. 0x441b4, 0x441b8,
  1883. 0x441c0, 0x44254,
  1884. 0x4425c, 0x44278,
  1885. 0x442c0, 0x44374,
  1886. 0x44380, 0x443ac,
  1887. 0x443b4, 0x443b8,
  1888. 0x443c0, 0x44454,
  1889. 0x4445c, 0x44478,
  1890. 0x444c0, 0x44574,
  1891. 0x44580, 0x445ac,
  1892. 0x445b4, 0x445b8,
  1893. 0x445c0, 0x44654,
  1894. 0x4465c, 0x44678,
  1895. 0x446c0, 0x44774,
  1896. 0x44780, 0x447ac,
  1897. 0x447b4, 0x447b8,
  1898. 0x447c0, 0x44854,
  1899. 0x4485c, 0x44878,
  1900. 0x448c0, 0x44974,
  1901. 0x44980, 0x449ac,
  1902. 0x449b4, 0x449b8,
  1903. 0x449c0, 0x449fc,
  1904. 0x45000, 0x45004,
  1905. 0x45010, 0x45030,
  1906. 0x45040, 0x45060,
  1907. 0x45068, 0x45068,
  1908. 0x45080, 0x45084,
  1909. 0x450a0, 0x450b0,
  1910. 0x45200, 0x45204,
  1911. 0x45210, 0x45230,
  1912. 0x45240, 0x45260,
  1913. 0x45268, 0x45268,
  1914. 0x45280, 0x45284,
  1915. 0x452a0, 0x452b0,
  1916. 0x460c0, 0x460e4,
  1917. 0x47000, 0x4703c,
  1918. 0x47044, 0x4708c,
  1919. 0x47200, 0x47250,
  1920. 0x47400, 0x47408,
  1921. 0x47414, 0x47420,
  1922. 0x47600, 0x47618,
  1923. 0x47800, 0x47814,
  1924. 0x48000, 0x4800c,
  1925. 0x48040, 0x48050,
  1926. 0x48060, 0x48068,
  1927. 0x4807c, 0x4808c,
  1928. 0x48094, 0x480b0,
  1929. 0x480c0, 0x48144,
  1930. 0x48180, 0x4818c,
  1931. 0x48200, 0x48254,
  1932. 0x48260, 0x48264,
  1933. 0x48270, 0x48288,
  1934. 0x48290, 0x48298,
  1935. 0x482ac, 0x482c8,
  1936. 0x482d0, 0x482e0,
  1937. 0x482f0, 0x482f0,
  1938. 0x48300, 0x4833c,
  1939. 0x483f8, 0x483fc,
  1940. 0x49304, 0x493c4,
  1941. 0x49400, 0x4940c,
  1942. 0x49414, 0x4941c,
  1943. 0x49480, 0x494d0,
  1944. 0x4c000, 0x4c054,
  1945. 0x4c05c, 0x4c078,
  1946. 0x4c0c0, 0x4c174,
  1947. 0x4c180, 0x4c1ac,
  1948. 0x4c1b4, 0x4c1b8,
  1949. 0x4c1c0, 0x4c254,
  1950. 0x4c25c, 0x4c278,
  1951. 0x4c2c0, 0x4c374,
  1952. 0x4c380, 0x4c3ac,
  1953. 0x4c3b4, 0x4c3b8,
  1954. 0x4c3c0, 0x4c454,
  1955. 0x4c45c, 0x4c478,
  1956. 0x4c4c0, 0x4c574,
  1957. 0x4c580, 0x4c5ac,
  1958. 0x4c5b4, 0x4c5b8,
  1959. 0x4c5c0, 0x4c654,
  1960. 0x4c65c, 0x4c678,
  1961. 0x4c6c0, 0x4c774,
  1962. 0x4c780, 0x4c7ac,
  1963. 0x4c7b4, 0x4c7b8,
  1964. 0x4c7c0, 0x4c854,
  1965. 0x4c85c, 0x4c878,
  1966. 0x4c8c0, 0x4c974,
  1967. 0x4c980, 0x4c9ac,
  1968. 0x4c9b4, 0x4c9b8,
  1969. 0x4c9c0, 0x4c9fc,
  1970. 0x4d000, 0x4d004,
  1971. 0x4d010, 0x4d030,
  1972. 0x4d040, 0x4d060,
  1973. 0x4d068, 0x4d068,
  1974. 0x4d080, 0x4d084,
  1975. 0x4d0a0, 0x4d0b0,
  1976. 0x4d200, 0x4d204,
  1977. 0x4d210, 0x4d230,
  1978. 0x4d240, 0x4d260,
  1979. 0x4d268, 0x4d268,
  1980. 0x4d280, 0x4d284,
  1981. 0x4d2a0, 0x4d2b0,
  1982. 0x4e0c0, 0x4e0e4,
  1983. 0x4f000, 0x4f03c,
  1984. 0x4f044, 0x4f08c,
  1985. 0x4f200, 0x4f250,
  1986. 0x4f400, 0x4f408,
  1987. 0x4f414, 0x4f420,
  1988. 0x4f600, 0x4f618,
  1989. 0x4f800, 0x4f814,
  1990. 0x50000, 0x50084,
  1991. 0x50090, 0x500cc,
  1992. 0x50400, 0x50400,
  1993. 0x50800, 0x50884,
  1994. 0x50890, 0x508cc,
  1995. 0x50c00, 0x50c00,
  1996. 0x51000, 0x5101c,
  1997. 0x51300, 0x51308,
  1998. };
  1999. static const unsigned int t6_reg_ranges[] = {
  2000. 0x1008, 0x101c,
  2001. 0x1024, 0x10a8,
  2002. 0x10b4, 0x10f8,
  2003. 0x1100, 0x1114,
  2004. 0x111c, 0x112c,
  2005. 0x1138, 0x113c,
  2006. 0x1144, 0x114c,
  2007. 0x1180, 0x1184,
  2008. 0x1190, 0x1194,
  2009. 0x11a0, 0x11a4,
  2010. 0x11b0, 0x11b4,
  2011. 0x11fc, 0x123c,
  2012. 0x1254, 0x1274,
  2013. 0x1280, 0x133c,
  2014. 0x1800, 0x18fc,
  2015. 0x3000, 0x302c,
  2016. 0x3060, 0x30b0,
  2017. 0x30b8, 0x30d8,
  2018. 0x30e0, 0x30fc,
  2019. 0x3140, 0x357c,
  2020. 0x35a8, 0x35cc,
  2021. 0x35ec, 0x35ec,
  2022. 0x3600, 0x5624,
  2023. 0x56cc, 0x56ec,
  2024. 0x56f4, 0x5720,
  2025. 0x5728, 0x575c,
  2026. 0x580c, 0x5814,
  2027. 0x5890, 0x589c,
  2028. 0x58a4, 0x58ac,
  2029. 0x58b8, 0x58bc,
  2030. 0x5940, 0x595c,
  2031. 0x5980, 0x598c,
  2032. 0x59b0, 0x59c8,
  2033. 0x59d0, 0x59dc,
  2034. 0x59fc, 0x5a18,
  2035. 0x5a60, 0x5a6c,
  2036. 0x5a80, 0x5a8c,
  2037. 0x5a94, 0x5a9c,
  2038. 0x5b94, 0x5bfc,
  2039. 0x5c10, 0x5e48,
  2040. 0x5e50, 0x5e94,
  2041. 0x5ea0, 0x5eb0,
  2042. 0x5ec0, 0x5ec0,
  2043. 0x5ec8, 0x5ed0,
  2044. 0x5ee0, 0x5ee0,
  2045. 0x5ef0, 0x5ef0,
  2046. 0x5f00, 0x5f00,
  2047. 0x6000, 0x6020,
  2048. 0x6028, 0x6040,
  2049. 0x6058, 0x609c,
  2050. 0x60a8, 0x619c,
  2051. 0x7700, 0x7798,
  2052. 0x77c0, 0x7880,
  2053. 0x78cc, 0x78fc,
  2054. 0x7b00, 0x7b58,
  2055. 0x7b60, 0x7b84,
  2056. 0x7b8c, 0x7c54,
  2057. 0x7d00, 0x7d38,
  2058. 0x7d40, 0x7d84,
  2059. 0x7d8c, 0x7ddc,
  2060. 0x7de4, 0x7e04,
  2061. 0x7e10, 0x7e1c,
  2062. 0x7e24, 0x7e38,
  2063. 0x7e40, 0x7e44,
  2064. 0x7e4c, 0x7e78,
  2065. 0x7e80, 0x7edc,
  2066. 0x7ee8, 0x7efc,
  2067. 0x8dc0, 0x8de4,
  2068. 0x8df8, 0x8e04,
  2069. 0x8e10, 0x8e84,
  2070. 0x8ea0, 0x8f88,
  2071. 0x8fb8, 0x9058,
  2072. 0x9060, 0x9060,
  2073. 0x9068, 0x90f8,
  2074. 0x9100, 0x9124,
  2075. 0x9400, 0x9470,
  2076. 0x9600, 0x9600,
  2077. 0x9608, 0x9638,
  2078. 0x9640, 0x9704,
  2079. 0x9710, 0x971c,
  2080. 0x9800, 0x9808,
  2081. 0x9820, 0x983c,
  2082. 0x9850, 0x9864,
  2083. 0x9c00, 0x9c6c,
  2084. 0x9c80, 0x9cec,
  2085. 0x9d00, 0x9d6c,
  2086. 0x9d80, 0x9dec,
  2087. 0x9e00, 0x9e6c,
  2088. 0x9e80, 0x9eec,
  2089. 0x9f00, 0x9f6c,
  2090. 0x9f80, 0xa020,
  2091. 0xd004, 0xd03c,
  2092. 0xd100, 0xd118,
  2093. 0xd200, 0xd214,
  2094. 0xd220, 0xd234,
  2095. 0xd240, 0xd254,
  2096. 0xd260, 0xd274,
  2097. 0xd280, 0xd294,
  2098. 0xd2a0, 0xd2b4,
  2099. 0xd2c0, 0xd2d4,
  2100. 0xd2e0, 0xd2f4,
  2101. 0xd300, 0xd31c,
  2102. 0xdfc0, 0xdfe0,
  2103. 0xe000, 0xf008,
  2104. 0xf010, 0xf018,
  2105. 0xf020, 0xf028,
  2106. 0x11000, 0x11014,
  2107. 0x11048, 0x1106c,
  2108. 0x11074, 0x11088,
  2109. 0x11098, 0x11120,
  2110. 0x1112c, 0x1117c,
  2111. 0x11190, 0x112e0,
  2112. 0x11300, 0x1130c,
  2113. 0x12000, 0x1206c,
  2114. 0x19040, 0x1906c,
  2115. 0x19078, 0x19080,
  2116. 0x1908c, 0x190e8,
  2117. 0x190f0, 0x190f8,
  2118. 0x19100, 0x19110,
  2119. 0x19120, 0x19124,
  2120. 0x19150, 0x19194,
  2121. 0x1919c, 0x191b0,
  2122. 0x191d0, 0x191e8,
  2123. 0x19238, 0x19290,
  2124. 0x192a4, 0x192b0,
  2125. 0x192bc, 0x192bc,
  2126. 0x19348, 0x1934c,
  2127. 0x193f8, 0x19418,
  2128. 0x19420, 0x19428,
  2129. 0x19430, 0x19444,
  2130. 0x1944c, 0x1946c,
  2131. 0x19474, 0x19474,
  2132. 0x19490, 0x194cc,
  2133. 0x194f0, 0x194f8,
  2134. 0x19c00, 0x19c48,
  2135. 0x19c50, 0x19c80,
  2136. 0x19c94, 0x19c98,
  2137. 0x19ca0, 0x19cbc,
  2138. 0x19ce4, 0x19ce4,
  2139. 0x19cf0, 0x19cf8,
  2140. 0x19d00, 0x19d28,
  2141. 0x19d50, 0x19d78,
  2142. 0x19d94, 0x19d98,
  2143. 0x19da0, 0x19dc8,
  2144. 0x19df0, 0x19e10,
  2145. 0x19e50, 0x19e6c,
  2146. 0x19ea0, 0x19ebc,
  2147. 0x19ec4, 0x19ef4,
  2148. 0x19f04, 0x19f2c,
  2149. 0x19f34, 0x19f34,
  2150. 0x19f40, 0x19f50,
  2151. 0x19f90, 0x19fac,
  2152. 0x19fc4, 0x19fc8,
  2153. 0x19fd0, 0x19fe4,
  2154. 0x1a000, 0x1a004,
  2155. 0x1a010, 0x1a06c,
  2156. 0x1a0b0, 0x1a0e4,
  2157. 0x1a0ec, 0x1a0f8,
  2158. 0x1a100, 0x1a108,
  2159. 0x1a114, 0x1a120,
  2160. 0x1a128, 0x1a130,
  2161. 0x1a138, 0x1a138,
  2162. 0x1a190, 0x1a1c4,
  2163. 0x1a1fc, 0x1a1fc,
  2164. 0x1e008, 0x1e00c,
  2165. 0x1e040, 0x1e044,
  2166. 0x1e04c, 0x1e04c,
  2167. 0x1e284, 0x1e290,
  2168. 0x1e2c0, 0x1e2c0,
  2169. 0x1e2e0, 0x1e2e0,
  2170. 0x1e300, 0x1e384,
  2171. 0x1e3c0, 0x1e3c8,
  2172. 0x1e408, 0x1e40c,
  2173. 0x1e440, 0x1e444,
  2174. 0x1e44c, 0x1e44c,
  2175. 0x1e684, 0x1e690,
  2176. 0x1e6c0, 0x1e6c0,
  2177. 0x1e6e0, 0x1e6e0,
  2178. 0x1e700, 0x1e784,
  2179. 0x1e7c0, 0x1e7c8,
  2180. 0x1e808, 0x1e80c,
  2181. 0x1e840, 0x1e844,
  2182. 0x1e84c, 0x1e84c,
  2183. 0x1ea84, 0x1ea90,
  2184. 0x1eac0, 0x1eac0,
  2185. 0x1eae0, 0x1eae0,
  2186. 0x1eb00, 0x1eb84,
  2187. 0x1ebc0, 0x1ebc8,
  2188. 0x1ec08, 0x1ec0c,
  2189. 0x1ec40, 0x1ec44,
  2190. 0x1ec4c, 0x1ec4c,
  2191. 0x1ee84, 0x1ee90,
  2192. 0x1eec0, 0x1eec0,
  2193. 0x1eee0, 0x1eee0,
  2194. 0x1ef00, 0x1ef84,
  2195. 0x1efc0, 0x1efc8,
  2196. 0x1f008, 0x1f00c,
  2197. 0x1f040, 0x1f044,
  2198. 0x1f04c, 0x1f04c,
  2199. 0x1f284, 0x1f290,
  2200. 0x1f2c0, 0x1f2c0,
  2201. 0x1f2e0, 0x1f2e0,
  2202. 0x1f300, 0x1f384,
  2203. 0x1f3c0, 0x1f3c8,
  2204. 0x1f408, 0x1f40c,
  2205. 0x1f440, 0x1f444,
  2206. 0x1f44c, 0x1f44c,
  2207. 0x1f684, 0x1f690,
  2208. 0x1f6c0, 0x1f6c0,
  2209. 0x1f6e0, 0x1f6e0,
  2210. 0x1f700, 0x1f784,
  2211. 0x1f7c0, 0x1f7c8,
  2212. 0x1f808, 0x1f80c,
  2213. 0x1f840, 0x1f844,
  2214. 0x1f84c, 0x1f84c,
  2215. 0x1fa84, 0x1fa90,
  2216. 0x1fac0, 0x1fac0,
  2217. 0x1fae0, 0x1fae0,
  2218. 0x1fb00, 0x1fb84,
  2219. 0x1fbc0, 0x1fbc8,
  2220. 0x1fc08, 0x1fc0c,
  2221. 0x1fc40, 0x1fc44,
  2222. 0x1fc4c, 0x1fc4c,
  2223. 0x1fe84, 0x1fe90,
  2224. 0x1fec0, 0x1fec0,
  2225. 0x1fee0, 0x1fee0,
  2226. 0x1ff00, 0x1ff84,
  2227. 0x1ffc0, 0x1ffc8,
  2228. 0x30000, 0x30030,
  2229. 0x30100, 0x30168,
  2230. 0x30190, 0x301a0,
  2231. 0x301a8, 0x301b8,
  2232. 0x301c4, 0x301c8,
  2233. 0x301d0, 0x301d0,
  2234. 0x30200, 0x30320,
  2235. 0x30400, 0x304b4,
  2236. 0x304c0, 0x3052c,
  2237. 0x30540, 0x3061c,
  2238. 0x30800, 0x308a0,
  2239. 0x308c0, 0x30908,
  2240. 0x30910, 0x309b8,
  2241. 0x30a00, 0x30a04,
  2242. 0x30a0c, 0x30a14,
  2243. 0x30a1c, 0x30a2c,
  2244. 0x30a44, 0x30a50,
  2245. 0x30a74, 0x30a74,
  2246. 0x30a7c, 0x30afc,
  2247. 0x30b08, 0x30c24,
  2248. 0x30d00, 0x30d14,
  2249. 0x30d1c, 0x30d3c,
  2250. 0x30d44, 0x30d4c,
  2251. 0x30d54, 0x30d74,
  2252. 0x30d7c, 0x30d7c,
  2253. 0x30de0, 0x30de0,
  2254. 0x30e00, 0x30ed4,
  2255. 0x30f00, 0x30fa4,
  2256. 0x30fc0, 0x30fc4,
  2257. 0x31000, 0x31004,
  2258. 0x31080, 0x310fc,
  2259. 0x31208, 0x31220,
  2260. 0x3123c, 0x31254,
  2261. 0x31300, 0x31300,
  2262. 0x31308, 0x3131c,
  2263. 0x31338, 0x3133c,
  2264. 0x31380, 0x31380,
  2265. 0x31388, 0x313a8,
  2266. 0x313b4, 0x313b4,
  2267. 0x31400, 0x31420,
  2268. 0x31438, 0x3143c,
  2269. 0x31480, 0x31480,
  2270. 0x314a8, 0x314a8,
  2271. 0x314b0, 0x314b4,
  2272. 0x314c8, 0x314d4,
  2273. 0x31a40, 0x31a4c,
  2274. 0x31af0, 0x31b20,
  2275. 0x31b38, 0x31b3c,
  2276. 0x31b80, 0x31b80,
  2277. 0x31ba8, 0x31ba8,
  2278. 0x31bb0, 0x31bb4,
  2279. 0x31bc8, 0x31bd4,
  2280. 0x32140, 0x3218c,
  2281. 0x321f0, 0x321f4,
  2282. 0x32200, 0x32200,
  2283. 0x32218, 0x32218,
  2284. 0x32400, 0x32400,
  2285. 0x32408, 0x3241c,
  2286. 0x32618, 0x32620,
  2287. 0x32664, 0x32664,
  2288. 0x326a8, 0x326a8,
  2289. 0x326ec, 0x326ec,
  2290. 0x32a00, 0x32abc,
  2291. 0x32b00, 0x32b18,
  2292. 0x32b20, 0x32b38,
  2293. 0x32b40, 0x32b58,
  2294. 0x32b60, 0x32b78,
  2295. 0x32c00, 0x32c00,
  2296. 0x32c08, 0x32c3c,
  2297. 0x33000, 0x3302c,
  2298. 0x33034, 0x33050,
  2299. 0x33058, 0x33058,
  2300. 0x33060, 0x3308c,
  2301. 0x3309c, 0x330ac,
  2302. 0x330c0, 0x330c0,
  2303. 0x330c8, 0x330d0,
  2304. 0x330d8, 0x330e0,
  2305. 0x330ec, 0x3312c,
  2306. 0x33134, 0x33150,
  2307. 0x33158, 0x33158,
  2308. 0x33160, 0x3318c,
  2309. 0x3319c, 0x331ac,
  2310. 0x331c0, 0x331c0,
  2311. 0x331c8, 0x331d0,
  2312. 0x331d8, 0x331e0,
  2313. 0x331ec, 0x33290,
  2314. 0x33298, 0x332c4,
  2315. 0x332e4, 0x33390,
  2316. 0x33398, 0x333c4,
  2317. 0x333e4, 0x3342c,
  2318. 0x33434, 0x33450,
  2319. 0x33458, 0x33458,
  2320. 0x33460, 0x3348c,
  2321. 0x3349c, 0x334ac,
  2322. 0x334c0, 0x334c0,
  2323. 0x334c8, 0x334d0,
  2324. 0x334d8, 0x334e0,
  2325. 0x334ec, 0x3352c,
  2326. 0x33534, 0x33550,
  2327. 0x33558, 0x33558,
  2328. 0x33560, 0x3358c,
  2329. 0x3359c, 0x335ac,
  2330. 0x335c0, 0x335c0,
  2331. 0x335c8, 0x335d0,
  2332. 0x335d8, 0x335e0,
  2333. 0x335ec, 0x33690,
  2334. 0x33698, 0x336c4,
  2335. 0x336e4, 0x33790,
  2336. 0x33798, 0x337c4,
  2337. 0x337e4, 0x337fc,
  2338. 0x33814, 0x33814,
  2339. 0x33854, 0x33868,
  2340. 0x33880, 0x3388c,
  2341. 0x338c0, 0x338d0,
  2342. 0x338e8, 0x338ec,
  2343. 0x33900, 0x3392c,
  2344. 0x33934, 0x33950,
  2345. 0x33958, 0x33958,
  2346. 0x33960, 0x3398c,
  2347. 0x3399c, 0x339ac,
  2348. 0x339c0, 0x339c0,
  2349. 0x339c8, 0x339d0,
  2350. 0x339d8, 0x339e0,
  2351. 0x339ec, 0x33a90,
  2352. 0x33a98, 0x33ac4,
  2353. 0x33ae4, 0x33b10,
  2354. 0x33b24, 0x33b28,
  2355. 0x33b38, 0x33b50,
  2356. 0x33bf0, 0x33c10,
  2357. 0x33c24, 0x33c28,
  2358. 0x33c38, 0x33c50,
  2359. 0x33cf0, 0x33cfc,
  2360. 0x34000, 0x34030,
  2361. 0x34100, 0x34168,
  2362. 0x34190, 0x341a0,
  2363. 0x341a8, 0x341b8,
  2364. 0x341c4, 0x341c8,
  2365. 0x341d0, 0x341d0,
  2366. 0x34200, 0x34320,
  2367. 0x34400, 0x344b4,
  2368. 0x344c0, 0x3452c,
  2369. 0x34540, 0x3461c,
  2370. 0x34800, 0x348a0,
  2371. 0x348c0, 0x34908,
  2372. 0x34910, 0x349b8,
  2373. 0x34a00, 0x34a04,
  2374. 0x34a0c, 0x34a14,
  2375. 0x34a1c, 0x34a2c,
  2376. 0x34a44, 0x34a50,
  2377. 0x34a74, 0x34a74,
  2378. 0x34a7c, 0x34afc,
  2379. 0x34b08, 0x34c24,
  2380. 0x34d00, 0x34d14,
  2381. 0x34d1c, 0x34d3c,
  2382. 0x34d44, 0x34d4c,
  2383. 0x34d54, 0x34d74,
  2384. 0x34d7c, 0x34d7c,
  2385. 0x34de0, 0x34de0,
  2386. 0x34e00, 0x34ed4,
  2387. 0x34f00, 0x34fa4,
  2388. 0x34fc0, 0x34fc4,
  2389. 0x35000, 0x35004,
  2390. 0x35080, 0x350fc,
  2391. 0x35208, 0x35220,
  2392. 0x3523c, 0x35254,
  2393. 0x35300, 0x35300,
  2394. 0x35308, 0x3531c,
  2395. 0x35338, 0x3533c,
  2396. 0x35380, 0x35380,
  2397. 0x35388, 0x353a8,
  2398. 0x353b4, 0x353b4,
  2399. 0x35400, 0x35420,
  2400. 0x35438, 0x3543c,
  2401. 0x35480, 0x35480,
  2402. 0x354a8, 0x354a8,
  2403. 0x354b0, 0x354b4,
  2404. 0x354c8, 0x354d4,
  2405. 0x35a40, 0x35a4c,
  2406. 0x35af0, 0x35b20,
  2407. 0x35b38, 0x35b3c,
  2408. 0x35b80, 0x35b80,
  2409. 0x35ba8, 0x35ba8,
  2410. 0x35bb0, 0x35bb4,
  2411. 0x35bc8, 0x35bd4,
  2412. 0x36140, 0x3618c,
  2413. 0x361f0, 0x361f4,
  2414. 0x36200, 0x36200,
  2415. 0x36218, 0x36218,
  2416. 0x36400, 0x36400,
  2417. 0x36408, 0x3641c,
  2418. 0x36618, 0x36620,
  2419. 0x36664, 0x36664,
  2420. 0x366a8, 0x366a8,
  2421. 0x366ec, 0x366ec,
  2422. 0x36a00, 0x36abc,
  2423. 0x36b00, 0x36b18,
  2424. 0x36b20, 0x36b38,
  2425. 0x36b40, 0x36b58,
  2426. 0x36b60, 0x36b78,
  2427. 0x36c00, 0x36c00,
  2428. 0x36c08, 0x36c3c,
  2429. 0x37000, 0x3702c,
  2430. 0x37034, 0x37050,
  2431. 0x37058, 0x37058,
  2432. 0x37060, 0x3708c,
  2433. 0x3709c, 0x370ac,
  2434. 0x370c0, 0x370c0,
  2435. 0x370c8, 0x370d0,
  2436. 0x370d8, 0x370e0,
  2437. 0x370ec, 0x3712c,
  2438. 0x37134, 0x37150,
  2439. 0x37158, 0x37158,
  2440. 0x37160, 0x3718c,
  2441. 0x3719c, 0x371ac,
  2442. 0x371c0, 0x371c0,
  2443. 0x371c8, 0x371d0,
  2444. 0x371d8, 0x371e0,
  2445. 0x371ec, 0x37290,
  2446. 0x37298, 0x372c4,
  2447. 0x372e4, 0x37390,
  2448. 0x37398, 0x373c4,
  2449. 0x373e4, 0x3742c,
  2450. 0x37434, 0x37450,
  2451. 0x37458, 0x37458,
  2452. 0x37460, 0x3748c,
  2453. 0x3749c, 0x374ac,
  2454. 0x374c0, 0x374c0,
  2455. 0x374c8, 0x374d0,
  2456. 0x374d8, 0x374e0,
  2457. 0x374ec, 0x3752c,
  2458. 0x37534, 0x37550,
  2459. 0x37558, 0x37558,
  2460. 0x37560, 0x3758c,
  2461. 0x3759c, 0x375ac,
  2462. 0x375c0, 0x375c0,
  2463. 0x375c8, 0x375d0,
  2464. 0x375d8, 0x375e0,
  2465. 0x375ec, 0x37690,
  2466. 0x37698, 0x376c4,
  2467. 0x376e4, 0x37790,
  2468. 0x37798, 0x377c4,
  2469. 0x377e4, 0x377fc,
  2470. 0x37814, 0x37814,
  2471. 0x37854, 0x37868,
  2472. 0x37880, 0x3788c,
  2473. 0x378c0, 0x378d0,
  2474. 0x378e8, 0x378ec,
  2475. 0x37900, 0x3792c,
  2476. 0x37934, 0x37950,
  2477. 0x37958, 0x37958,
  2478. 0x37960, 0x3798c,
  2479. 0x3799c, 0x379ac,
  2480. 0x379c0, 0x379c0,
  2481. 0x379c8, 0x379d0,
  2482. 0x379d8, 0x379e0,
  2483. 0x379ec, 0x37a90,
  2484. 0x37a98, 0x37ac4,
  2485. 0x37ae4, 0x37b10,
  2486. 0x37b24, 0x37b28,
  2487. 0x37b38, 0x37b50,
  2488. 0x37bf0, 0x37c10,
  2489. 0x37c24, 0x37c28,
  2490. 0x37c38, 0x37c50,
  2491. 0x37cf0, 0x37cfc,
  2492. 0x40040, 0x40040,
  2493. 0x40080, 0x40084,
  2494. 0x40100, 0x40100,
  2495. 0x40140, 0x401bc,
  2496. 0x40200, 0x40214,
  2497. 0x40228, 0x40228,
  2498. 0x40240, 0x40258,
  2499. 0x40280, 0x40280,
  2500. 0x40304, 0x40304,
  2501. 0x40330, 0x4033c,
  2502. 0x41304, 0x413c8,
  2503. 0x413d0, 0x413dc,
  2504. 0x413f0, 0x413f0,
  2505. 0x41400, 0x4140c,
  2506. 0x41414, 0x4141c,
  2507. 0x41480, 0x414d0,
  2508. 0x44000, 0x4407c,
  2509. 0x440c0, 0x441ac,
  2510. 0x441b4, 0x4427c,
  2511. 0x442c0, 0x443ac,
  2512. 0x443b4, 0x4447c,
  2513. 0x444c0, 0x445ac,
  2514. 0x445b4, 0x4467c,
  2515. 0x446c0, 0x447ac,
  2516. 0x447b4, 0x4487c,
  2517. 0x448c0, 0x449ac,
  2518. 0x449b4, 0x44a7c,
  2519. 0x44ac0, 0x44bac,
  2520. 0x44bb4, 0x44c7c,
  2521. 0x44cc0, 0x44dac,
  2522. 0x44db4, 0x44e7c,
  2523. 0x44ec0, 0x44fac,
  2524. 0x44fb4, 0x4507c,
  2525. 0x450c0, 0x451ac,
  2526. 0x451b4, 0x451fc,
  2527. 0x45800, 0x45804,
  2528. 0x45810, 0x45830,
  2529. 0x45840, 0x45860,
  2530. 0x45868, 0x45868,
  2531. 0x45880, 0x45884,
  2532. 0x458a0, 0x458b0,
  2533. 0x45a00, 0x45a04,
  2534. 0x45a10, 0x45a30,
  2535. 0x45a40, 0x45a60,
  2536. 0x45a68, 0x45a68,
  2537. 0x45a80, 0x45a84,
  2538. 0x45aa0, 0x45ab0,
  2539. 0x460c0, 0x460e4,
  2540. 0x47000, 0x4703c,
  2541. 0x47044, 0x4708c,
  2542. 0x47200, 0x47250,
  2543. 0x47400, 0x47408,
  2544. 0x47414, 0x47420,
  2545. 0x47600, 0x47618,
  2546. 0x47800, 0x47814,
  2547. 0x47820, 0x4782c,
  2548. 0x50000, 0x50084,
  2549. 0x50090, 0x500cc,
  2550. 0x50300, 0x50384,
  2551. 0x50400, 0x50400,
  2552. 0x50800, 0x50884,
  2553. 0x50890, 0x508cc,
  2554. 0x50b00, 0x50b84,
  2555. 0x50c00, 0x50c00,
  2556. 0x51000, 0x51020,
  2557. 0x51028, 0x510b0,
  2558. 0x51300, 0x51324,
  2559. };
  2560. u32 *buf_end = (u32 *)((char *)buf + buf_size);
  2561. const unsigned int *reg_ranges;
  2562. int reg_ranges_size, range;
  2563. unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
  2564. /* Select the right set of register ranges to dump depending on the
  2565. * adapter chip type.
  2566. */
  2567. switch (chip_version) {
  2568. case CHELSIO_T4:
  2569. reg_ranges = t4_reg_ranges;
  2570. reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
  2571. break;
  2572. case CHELSIO_T5:
  2573. reg_ranges = t5_reg_ranges;
  2574. reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
  2575. break;
  2576. case CHELSIO_T6:
  2577. reg_ranges = t6_reg_ranges;
  2578. reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
  2579. break;
  2580. default:
  2581. dev_err(adap->pdev_dev,
  2582. "Unsupported chip version %d\n", chip_version);
  2583. return;
  2584. }
  2585. /* Clear the register buffer and insert the appropriate register
  2586. * values selected by the above register ranges.
  2587. */
  2588. memset(buf, 0, buf_size);
  2589. for (range = 0; range < reg_ranges_size; range += 2) {
  2590. unsigned int reg = reg_ranges[range];
  2591. unsigned int last_reg = reg_ranges[range + 1];
  2592. u32 *bufp = (u32 *)((char *)buf + reg);
  2593. /* Iterate across the register range filling in the register
  2594. * buffer but don't write past the end of the register buffer.
  2595. */
  2596. while (reg <= last_reg && bufp < buf_end) {
  2597. *bufp++ = t4_read_reg(adap, reg);
  2598. reg += sizeof(u32);
  2599. }
  2600. }
  2601. }
  2602. #define EEPROM_STAT_ADDR 0x7bfc
  2603. #define VPD_BASE 0x400
  2604. #define VPD_BASE_OLD 0
  2605. #define VPD_LEN 1024
  2606. #define CHELSIO_VPD_UNIQUE_ID 0x82
  2607. /**
  2608. * t4_eeprom_ptov - translate a physical EEPROM address to virtual
  2609. * @phys_addr: the physical EEPROM address
  2610. * @fn: the PCI function number
  2611. * @sz: size of function-specific area
  2612. *
  2613. * Translate a physical EEPROM address to virtual. The first 1K is
  2614. * accessed through virtual addresses starting at 31K, the rest is
  2615. * accessed through virtual addresses starting at 0.
  2616. *
  2617. * The mapping is as follows:
  2618. * [0..1K) -> [31K..32K)
  2619. * [1K..1K+A) -> [31K-A..31K)
  2620. * [1K+A..ES) -> [0..ES-A-1K)
  2621. *
  2622. * where A = @fn * @sz, and ES = EEPROM size.
  2623. */
  2624. int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
  2625. {
  2626. fn *= sz;
  2627. if (phys_addr < 1024)
  2628. return phys_addr + (31 << 10);
  2629. if (phys_addr < 1024 + fn)
  2630. return 31744 - fn + phys_addr - 1024;
  2631. if (phys_addr < EEPROMSIZE)
  2632. return phys_addr - 1024 - fn;
  2633. return -EINVAL;
  2634. }
  2635. /**
  2636. * t4_seeprom_wp - enable/disable EEPROM write protection
  2637. * @adapter: the adapter
  2638. * @enable: whether to enable or disable write protection
  2639. *
  2640. * Enables or disables write protection on the serial EEPROM.
  2641. */
  2642. int t4_seeprom_wp(struct adapter *adapter, bool enable)
  2643. {
  2644. unsigned int v = enable ? 0xc : 0;
  2645. int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
  2646. return ret < 0 ? ret : 0;
  2647. }
  2648. /**
  2649. * t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
  2650. * @adapter: adapter to read
  2651. * @p: where to store the parameters
  2652. *
  2653. * Reads card parameters stored in VPD EEPROM.
  2654. */
  2655. int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
  2656. {
  2657. int i, ret = 0, addr;
  2658. int ec, sn, pn, na;
  2659. u8 *vpd, csum;
  2660. unsigned int vpdr_len, kw_offset, id_len;
  2661. vpd = vmalloc(VPD_LEN);
  2662. if (!vpd)
  2663. return -ENOMEM;
  2664. /* Card information normally starts at VPD_BASE but early cards had
  2665. * it at 0.
  2666. */
  2667. ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
  2668. if (ret < 0)
  2669. goto out;
  2670. /* The VPD shall have a unique identifier specified by the PCI SIG.
  2671. * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
  2672. * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
  2673. * is expected to automatically put this entry at the
  2674. * beginning of the VPD.
  2675. */
  2676. addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
  2677. ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
  2678. if (ret < 0)
  2679. goto out;
  2680. if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
  2681. dev_err(adapter->pdev_dev, "missing VPD ID string\n");
  2682. ret = -EINVAL;
  2683. goto out;
  2684. }
  2685. id_len = pci_vpd_lrdt_size(vpd);
  2686. if (id_len > ID_LEN)
  2687. id_len = ID_LEN;
  2688. i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
  2689. if (i < 0) {
  2690. dev_err(adapter->pdev_dev, "missing VPD-R section\n");
  2691. ret = -EINVAL;
  2692. goto out;
  2693. }
  2694. vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
  2695. kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
  2696. if (vpdr_len + kw_offset > VPD_LEN) {
  2697. dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
  2698. ret = -EINVAL;
  2699. goto out;
  2700. }
  2701. #define FIND_VPD_KW(var, name) do { \
  2702. var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
  2703. if (var < 0) { \
  2704. dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
  2705. ret = -EINVAL; \
  2706. goto out; \
  2707. } \
  2708. var += PCI_VPD_INFO_FLD_HDR_SIZE; \
  2709. } while (0)
  2710. FIND_VPD_KW(i, "RV");
  2711. for (csum = 0; i >= 0; i--)
  2712. csum += vpd[i];
  2713. if (csum) {
  2714. dev_err(adapter->pdev_dev,
  2715. "corrupted VPD EEPROM, actual csum %u\n", csum);
  2716. ret = -EINVAL;
  2717. goto out;
  2718. }
  2719. FIND_VPD_KW(ec, "EC");
  2720. FIND_VPD_KW(sn, "SN");
  2721. FIND_VPD_KW(pn, "PN");
  2722. FIND_VPD_KW(na, "NA");
  2723. #undef FIND_VPD_KW
  2724. memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
  2725. strim(p->id);
  2726. memcpy(p->ec, vpd + ec, EC_LEN);
  2727. strim(p->ec);
  2728. i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
  2729. memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
  2730. strim(p->sn);
  2731. i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
  2732. memcpy(p->pn, vpd + pn, min(i, PN_LEN));
  2733. strim(p->pn);
  2734. memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
  2735. strim((char *)p->na);
  2736. out:
  2737. vfree(vpd);
  2738. return ret < 0 ? ret : 0;
  2739. }
  2740. /**
  2741. * t4_get_vpd_params - read VPD parameters & retrieve Core Clock
  2742. * @adapter: adapter to read
  2743. * @p: where to store the parameters
  2744. *
  2745. * Reads card parameters stored in VPD EEPROM and retrieves the Core
  2746. * Clock. This can only be called after a connection to the firmware
  2747. * is established.
  2748. */
  2749. int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
  2750. {
  2751. u32 cclk_param, cclk_val;
  2752. int ret;
  2753. /* Grab the raw VPD parameters.
  2754. */
  2755. ret = t4_get_raw_vpd_params(adapter, p);
  2756. if (ret)
  2757. return ret;
  2758. /* Ask firmware for the Core Clock since it knows how to translate the
  2759. * Reference Clock ('V2') VPD field into a Core Clock value ...
  2760. */
  2761. cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
  2762. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
  2763. ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
  2764. 1, &cclk_param, &cclk_val);
  2765. if (ret)
  2766. return ret;
  2767. p->cclk = cclk_val;
  2768. return 0;
  2769. }
  2770. /**
  2771. * t4_get_pfres - retrieve VF resource limits
  2772. * @adapter: the adapter
  2773. *
  2774. * Retrieves configured resource limits and capabilities for a physical
  2775. * function. The results are stored in @adapter->pfres.
  2776. */
  2777. int t4_get_pfres(struct adapter *adapter)
  2778. {
  2779. struct pf_resources *pfres = &adapter->params.pfres;
  2780. struct fw_pfvf_cmd cmd, rpl;
  2781. int v;
  2782. u32 word;
  2783. /* Execute PFVF Read command to get VF resource limits; bail out early
  2784. * with error on command failure.
  2785. */
  2786. memset(&cmd, 0, sizeof(cmd));
  2787. cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
  2788. FW_CMD_REQUEST_F |
  2789. FW_CMD_READ_F |
  2790. FW_PFVF_CMD_PFN_V(adapter->pf) |
  2791. FW_PFVF_CMD_VFN_V(0));
  2792. cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
  2793. v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
  2794. if (v != FW_SUCCESS)
  2795. return v;
  2796. /* Extract PF resource limits and return success.
  2797. */
  2798. word = be32_to_cpu(rpl.niqflint_niq);
  2799. pfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word);
  2800. pfres->niq = FW_PFVF_CMD_NIQ_G(word);
  2801. word = be32_to_cpu(rpl.type_to_neq);
  2802. pfres->neq = FW_PFVF_CMD_NEQ_G(word);
  2803. pfres->pmask = FW_PFVF_CMD_PMASK_G(word);
  2804. word = be32_to_cpu(rpl.tc_to_nexactf);
  2805. pfres->tc = FW_PFVF_CMD_TC_G(word);
  2806. pfres->nvi = FW_PFVF_CMD_NVI_G(word);
  2807. pfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word);
  2808. word = be32_to_cpu(rpl.r_caps_to_nethctrl);
  2809. pfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word);
  2810. pfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word);
  2811. pfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word);
  2812. return 0;
  2813. }
  2814. /* serial flash and firmware constants */
  2815. enum {
  2816. SF_ATTEMPTS = 10, /* max retries for SF operations */
  2817. /* flash command opcodes */
  2818. SF_PROG_PAGE = 2, /* program page */
  2819. SF_WR_DISABLE = 4, /* disable writes */
  2820. SF_RD_STATUS = 5, /* read status register */
  2821. SF_WR_ENABLE = 6, /* enable writes */
  2822. SF_RD_DATA_FAST = 0xb, /* read flash */
  2823. SF_RD_ID = 0x9f, /* read ID */
  2824. SF_ERASE_SECTOR = 0xd8, /* erase sector */
  2825. };
  2826. /**
  2827. * sf1_read - read data from the serial flash
  2828. * @adapter: the adapter
  2829. * @byte_cnt: number of bytes to read
  2830. * @cont: whether another operation will be chained
  2831. * @lock: whether to lock SF for PL access only
  2832. * @valp: where to store the read data
  2833. *
  2834. * Reads up to 4 bytes of data from the serial flash. The location of
  2835. * the read needs to be specified prior to calling this by issuing the
  2836. * appropriate commands to the serial flash.
  2837. */
  2838. static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
  2839. int lock, u32 *valp)
  2840. {
  2841. int ret;
  2842. if (!byte_cnt || byte_cnt > 4)
  2843. return -EINVAL;
  2844. if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
  2845. return -EBUSY;
  2846. t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
  2847. SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
  2848. ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
  2849. if (!ret)
  2850. *valp = t4_read_reg(adapter, SF_DATA_A);
  2851. return ret;
  2852. }
  2853. /**
  2854. * sf1_write - write data to the serial flash
  2855. * @adapter: the adapter
  2856. * @byte_cnt: number of bytes to write
  2857. * @cont: whether another operation will be chained
  2858. * @lock: whether to lock SF for PL access only
  2859. * @val: value to write
  2860. *
  2861. * Writes up to 4 bytes of data to the serial flash. The location of
  2862. * the write needs to be specified prior to calling this by issuing the
  2863. * appropriate commands to the serial flash.
  2864. */
  2865. static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
  2866. int lock, u32 val)
  2867. {
  2868. if (!byte_cnt || byte_cnt > 4)
  2869. return -EINVAL;
  2870. if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
  2871. return -EBUSY;
  2872. t4_write_reg(adapter, SF_DATA_A, val);
  2873. t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
  2874. SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
  2875. return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
  2876. }
  2877. /**
  2878. * flash_wait_op - wait for a flash operation to complete
  2879. * @adapter: the adapter
  2880. * @attempts: max number of polls of the status register
  2881. * @delay: delay between polls in ms
  2882. *
  2883. * Wait for a flash operation to complete by polling the status register.
  2884. */
  2885. static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
  2886. {
  2887. int ret;
  2888. u32 status;
  2889. while (1) {
  2890. if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
  2891. (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
  2892. return ret;
  2893. if (!(status & 1))
  2894. return 0;
  2895. if (--attempts == 0)
  2896. return -EAGAIN;
  2897. if (delay)
  2898. msleep(delay);
  2899. }
  2900. }
  2901. /**
  2902. * t4_read_flash - read words from serial flash
  2903. * @adapter: the adapter
  2904. * @addr: the start address for the read
  2905. * @nwords: how many 32-bit words to read
  2906. * @data: where to store the read data
  2907. * @byte_oriented: whether to store data as bytes or as words
  2908. *
  2909. * Read the specified number of 32-bit words from the serial flash.
  2910. * If @byte_oriented is set the read data is stored as a byte array
  2911. * (i.e., big-endian), otherwise as 32-bit words in the platform's
  2912. * natural endianness.
  2913. */
  2914. int t4_read_flash(struct adapter *adapter, unsigned int addr,
  2915. unsigned int nwords, u32 *data, int byte_oriented)
  2916. {
  2917. int ret;
  2918. if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
  2919. return -EINVAL;
  2920. addr = swab32(addr) | SF_RD_DATA_FAST;
  2921. if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
  2922. (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
  2923. return ret;
  2924. for ( ; nwords; nwords--, data++) {
  2925. ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
  2926. if (nwords == 1)
  2927. t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
  2928. if (ret)
  2929. return ret;
  2930. if (byte_oriented)
  2931. *data = (__force __u32)(cpu_to_be32(*data));
  2932. }
  2933. return 0;
  2934. }
  2935. /**
  2936. * t4_write_flash - write up to a page of data to the serial flash
  2937. * @adapter: the adapter
  2938. * @addr: the start address to write
  2939. * @n: length of data to write in bytes
  2940. * @data: the data to write
  2941. *
  2942. * Writes up to a page of data (256 bytes) to the serial flash starting
  2943. * at the given address. All the data must be written to the same page.
  2944. */
  2945. static int t4_write_flash(struct adapter *adapter, unsigned int addr,
  2946. unsigned int n, const u8 *data)
  2947. {
  2948. int ret;
  2949. u32 buf[64];
  2950. unsigned int i, c, left, val, offset = addr & 0xff;
  2951. if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
  2952. return -EINVAL;
  2953. val = swab32(addr) | SF_PROG_PAGE;
  2954. if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
  2955. (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
  2956. goto unlock;
  2957. for (left = n; left; left -= c) {
  2958. c = min(left, 4U);
  2959. for (val = 0, i = 0; i < c; ++i)
  2960. val = (val << 8) + *data++;
  2961. ret = sf1_write(adapter, c, c != left, 1, val);
  2962. if (ret)
  2963. goto unlock;
  2964. }
  2965. ret = flash_wait_op(adapter, 8, 1);
  2966. if (ret)
  2967. goto unlock;
  2968. t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
  2969. /* Read the page to verify the write succeeded */
  2970. ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
  2971. if (ret)
  2972. return ret;
  2973. if (memcmp(data - n, (u8 *)buf + offset, n)) {
  2974. dev_err(adapter->pdev_dev,
  2975. "failed to correctly write the flash page at %#x\n",
  2976. addr);
  2977. return -EIO;
  2978. }
  2979. return 0;
  2980. unlock:
  2981. t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
  2982. return ret;
  2983. }
  2984. /**
  2985. * t4_get_fw_version - read the firmware version
  2986. * @adapter: the adapter
  2987. * @vers: where to place the version
  2988. *
  2989. * Reads the FW version from flash.
  2990. */
  2991. int t4_get_fw_version(struct adapter *adapter, u32 *vers)
  2992. {
  2993. return t4_read_flash(adapter, FLASH_FW_START +
  2994. offsetof(struct fw_hdr, fw_ver), 1,
  2995. vers, 0);
  2996. }
  2997. /**
  2998. * t4_get_bs_version - read the firmware bootstrap version
  2999. * @adapter: the adapter
  3000. * @vers: where to place the version
  3001. *
  3002. * Reads the FW Bootstrap version from flash.
  3003. */
  3004. int t4_get_bs_version(struct adapter *adapter, u32 *vers)
  3005. {
  3006. return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
  3007. offsetof(struct fw_hdr, fw_ver), 1,
  3008. vers, 0);
  3009. }
  3010. /**
  3011. * t4_get_tp_version - read the TP microcode version
  3012. * @adapter: the adapter
  3013. * @vers: where to place the version
  3014. *
  3015. * Reads the TP microcode version from flash.
  3016. */
  3017. int t4_get_tp_version(struct adapter *adapter, u32 *vers)
  3018. {
  3019. return t4_read_flash(adapter, FLASH_FW_START +
  3020. offsetof(struct fw_hdr, tp_microcode_ver),
  3021. 1, vers, 0);
  3022. }
  3023. /**
  3024. * t4_get_exprom_version - return the Expansion ROM version (if any)
  3025. * @adapter: the adapter
  3026. * @vers: where to place the version
  3027. *
  3028. * Reads the Expansion ROM header from FLASH and returns the version
  3029. * number (if present) through the @vers return value pointer. We return
  3030. * this in the Firmware Version Format since it's convenient. Return
  3031. * 0 on success, -ENOENT if no Expansion ROM is present.
  3032. */
  3033. int t4_get_exprom_version(struct adapter *adap, u32 *vers)
  3034. {
  3035. struct exprom_header {
  3036. unsigned char hdr_arr[16]; /* must start with 0x55aa */
  3037. unsigned char hdr_ver[4]; /* Expansion ROM version */
  3038. } *hdr;
  3039. u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
  3040. sizeof(u32))];
  3041. int ret;
  3042. ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
  3043. ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
  3044. 0);
  3045. if (ret)
  3046. return ret;
  3047. hdr = (struct exprom_header *)exprom_header_buf;
  3048. if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
  3049. return -ENOENT;
  3050. *vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) |
  3051. FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) |
  3052. FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) |
  3053. FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3]));
  3054. return 0;
  3055. }
  3056. /**
  3057. * t4_get_vpd_version - return the VPD version
  3058. * @adapter: the adapter
  3059. * @vers: where to place the version
  3060. *
  3061. * Reads the VPD via the Firmware interface (thus this can only be called
  3062. * once we're ready to issue Firmware commands). The format of the
  3063. * VPD version is adapter specific. Returns 0 on success, an error on
  3064. * failure.
  3065. *
  3066. * Note that early versions of the Firmware didn't include the ability
  3067. * to retrieve the VPD version, so we zero-out the return-value parameter
  3068. * in that case to avoid leaving it with garbage in it.
  3069. *
  3070. * Also note that the Firmware will return its cached copy of the VPD
  3071. * Revision ID, not the actual Revision ID as written in the Serial
  3072. * EEPROM. This is only an issue if a new VPD has been written and the
  3073. * Firmware/Chip haven't yet gone through a RESET sequence. So it's best
  3074. * to defer calling this routine till after a FW_RESET_CMD has been issued
  3075. * if the Host Driver will be performing a full adapter initialization.
  3076. */
  3077. int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
  3078. {
  3079. u32 vpdrev_param;
  3080. int ret;
  3081. vpdrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
  3082. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_VPDREV));
  3083. ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
  3084. 1, &vpdrev_param, vers);
  3085. if (ret)
  3086. *vers = 0;
  3087. return ret;
  3088. }
  3089. /**
  3090. * t4_get_scfg_version - return the Serial Configuration version
  3091. * @adapter: the adapter
  3092. * @vers: where to place the version
  3093. *
  3094. * Reads the Serial Configuration Version via the Firmware interface
  3095. * (thus this can only be called once we're ready to issue Firmware
  3096. * commands). The format of the Serial Configuration version is
  3097. * adapter specific. Returns 0 on success, an error on failure.
  3098. *
  3099. * Note that early versions of the Firmware didn't include the ability
  3100. * to retrieve the Serial Configuration version, so we zero-out the
  3101. * return-value parameter in that case to avoid leaving it with
  3102. * garbage in it.
  3103. *
  3104. * Also note that the Firmware will return its cached copy of the Serial
  3105. * Initialization Revision ID, not the actual Revision ID as written in
  3106. * the Serial EEPROM. This is only an issue if a new VPD has been written
  3107. * and the Firmware/Chip haven't yet gone through a RESET sequence. So
  3108. * it's best to defer calling this routine till after a FW_RESET_CMD has
  3109. * been issued if the Host Driver will be performing a full adapter
  3110. * initialization.
  3111. */
  3112. int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
  3113. {
  3114. u32 scfgrev_param;
  3115. int ret;
  3116. scfgrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
  3117. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_SCFGREV));
  3118. ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
  3119. 1, &scfgrev_param, vers);
  3120. if (ret)
  3121. *vers = 0;
  3122. return ret;
  3123. }
  3124. /**
  3125. * t4_get_version_info - extract various chip/firmware version information
  3126. * @adapter: the adapter
  3127. *
  3128. * Reads various chip/firmware version numbers and stores them into the
  3129. * adapter Adapter Parameters structure. If any of the efforts fails
  3130. * the first failure will be returned, but all of the version numbers
  3131. * will be read.
  3132. */
  3133. int t4_get_version_info(struct adapter *adapter)
  3134. {
  3135. int ret = 0;
  3136. #define FIRST_RET(__getvinfo) \
  3137. do { \
  3138. int __ret = __getvinfo; \
  3139. if (__ret && !ret) \
  3140. ret = __ret; \
  3141. } while (0)
  3142. FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
  3143. FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
  3144. FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
  3145. FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
  3146. FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
  3147. FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
  3148. #undef FIRST_RET
  3149. return ret;
  3150. }
  3151. /**
  3152. * t4_dump_version_info - dump all of the adapter configuration IDs
  3153. * @adapter: the adapter
  3154. *
  3155. * Dumps all of the various bits of adapter configuration version/revision
  3156. * IDs information. This is typically called at some point after
  3157. * t4_get_version_info() has been called.
  3158. */
  3159. void t4_dump_version_info(struct adapter *adapter)
  3160. {
  3161. /* Device information */
  3162. dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n",
  3163. adapter->params.vpd.id,
  3164. CHELSIO_CHIP_RELEASE(adapter->params.chip));
  3165. dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n",
  3166. adapter->params.vpd.sn, adapter->params.vpd.pn);
  3167. /* Firmware Version */
  3168. if (!adapter->params.fw_vers)
  3169. dev_warn(adapter->pdev_dev, "No firmware loaded\n");
  3170. else
  3171. dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n",
  3172. FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
  3173. FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
  3174. FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
  3175. FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers));
  3176. /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap
  3177. * Firmware, so dev_info() is more appropriate here.)
  3178. */
  3179. if (!adapter->params.bs_vers)
  3180. dev_info(adapter->pdev_dev, "No bootstrap loaded\n");
  3181. else
  3182. dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n",
  3183. FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers),
  3184. FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers),
  3185. FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers),
  3186. FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers));
  3187. /* TP Microcode Version */
  3188. if (!adapter->params.tp_vers)
  3189. dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n");
  3190. else
  3191. dev_info(adapter->pdev_dev,
  3192. "TP Microcode version: %u.%u.%u.%u\n",
  3193. FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
  3194. FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
  3195. FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
  3196. FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
  3197. /* Expansion ROM version */
  3198. if (!adapter->params.er_vers)
  3199. dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n");
  3200. else
  3201. dev_info(adapter->pdev_dev,
  3202. "Expansion ROM version: %u.%u.%u.%u\n",
  3203. FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers),
  3204. FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers),
  3205. FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers),
  3206. FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers));
  3207. /* Serial Configuration version */
  3208. dev_info(adapter->pdev_dev, "Serial Configuration version: %#x\n",
  3209. adapter->params.scfg_vers);
  3210. /* VPD Version */
  3211. dev_info(adapter->pdev_dev, "VPD version: %#x\n",
  3212. adapter->params.vpd_vers);
  3213. }
  3214. /**
  3215. * t4_check_fw_version - check if the FW is supported with this driver
  3216. * @adap: the adapter
  3217. *
  3218. * Checks if an adapter's FW is compatible with the driver. Returns 0
  3219. * if there's exact match, a negative error if the version could not be
  3220. * read or there's a major version mismatch
  3221. */
  3222. int t4_check_fw_version(struct adapter *adap)
  3223. {
  3224. int i, ret, major, minor, micro;
  3225. int exp_major, exp_minor, exp_micro;
  3226. unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
  3227. ret = t4_get_fw_version(adap, &adap->params.fw_vers);
  3228. /* Try multiple times before returning error */
  3229. for (i = 0; (ret == -EBUSY || ret == -EAGAIN) && i < 3; i++)
  3230. ret = t4_get_fw_version(adap, &adap->params.fw_vers);
  3231. if (ret)
  3232. return ret;
  3233. major = FW_HDR_FW_VER_MAJOR_G(adap->params.fw_vers);
  3234. minor = FW_HDR_FW_VER_MINOR_G(adap->params.fw_vers);
  3235. micro = FW_HDR_FW_VER_MICRO_G(adap->params.fw_vers);
  3236. switch (chip_version) {
  3237. case CHELSIO_T4:
  3238. exp_major = T4FW_MIN_VERSION_MAJOR;
  3239. exp_minor = T4FW_MIN_VERSION_MINOR;
  3240. exp_micro = T4FW_MIN_VERSION_MICRO;
  3241. break;
  3242. case CHELSIO_T5:
  3243. exp_major = T5FW_MIN_VERSION_MAJOR;
  3244. exp_minor = T5FW_MIN_VERSION_MINOR;
  3245. exp_micro = T5FW_MIN_VERSION_MICRO;
  3246. break;
  3247. case CHELSIO_T6:
  3248. exp_major = T6FW_MIN_VERSION_MAJOR;
  3249. exp_minor = T6FW_MIN_VERSION_MINOR;
  3250. exp_micro = T6FW_MIN_VERSION_MICRO;
  3251. break;
  3252. default:
  3253. dev_err(adap->pdev_dev, "Unsupported chip type, %x\n",
  3254. adap->chip);
  3255. return -EINVAL;
  3256. }
  3257. if (major < exp_major || (major == exp_major && minor < exp_minor) ||
  3258. (major == exp_major && minor == exp_minor && micro < exp_micro)) {
  3259. dev_err(adap->pdev_dev,
  3260. "Card has firmware version %u.%u.%u, minimum "
  3261. "supported firmware is %u.%u.%u.\n", major, minor,
  3262. micro, exp_major, exp_minor, exp_micro);
  3263. return -EFAULT;
  3264. }
  3265. return 0;
  3266. }
  3267. /* Is the given firmware API compatible with the one the driver was compiled
  3268. * with?
  3269. */
  3270. static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
  3271. {
  3272. /* short circuit if it's the exact same firmware version */
  3273. if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
  3274. return 1;
  3275. #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
  3276. if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
  3277. SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
  3278. return 1;
  3279. #undef SAME_INTF
  3280. return 0;
  3281. }
  3282. /* The firmware in the filesystem is usable, but should it be installed?
  3283. * This routine explains itself in detail if it indicates the filesystem
  3284. * firmware should be installed.
  3285. */
  3286. static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
  3287. int k, int c)
  3288. {
  3289. const char *reason;
  3290. if (!card_fw_usable) {
  3291. reason = "incompatible or unusable";
  3292. goto install;
  3293. }
  3294. if (k > c) {
  3295. reason = "older than the version supported with this driver";
  3296. goto install;
  3297. }
  3298. return 0;
  3299. install:
  3300. dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
  3301. "installing firmware %u.%u.%u.%u on card.\n",
  3302. FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
  3303. FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
  3304. FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
  3305. FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
  3306. return 1;
  3307. }
  3308. int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
  3309. const u8 *fw_data, unsigned int fw_size,
  3310. struct fw_hdr *card_fw, enum dev_state state,
  3311. int *reset)
  3312. {
  3313. int ret, card_fw_usable, fs_fw_usable;
  3314. const struct fw_hdr *fs_fw;
  3315. const struct fw_hdr *drv_fw;
  3316. drv_fw = &fw_info->fw_hdr;
  3317. /* Read the header of the firmware on the card */
  3318. ret = t4_read_flash(adap, FLASH_FW_START,
  3319. sizeof(*card_fw) / sizeof(uint32_t),
  3320. (uint32_t *)card_fw, 1);
  3321. if (ret == 0) {
  3322. card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
  3323. } else {
  3324. dev_err(adap->pdev_dev,
  3325. "Unable to read card's firmware header: %d\n", ret);
  3326. card_fw_usable = 0;
  3327. }
  3328. if (fw_data != NULL) {
  3329. fs_fw = (const void *)fw_data;
  3330. fs_fw_usable = fw_compatible(drv_fw, fs_fw);
  3331. } else {
  3332. fs_fw = NULL;
  3333. fs_fw_usable = 0;
  3334. }
  3335. if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
  3336. (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
  3337. /* Common case: the firmware on the card is an exact match and
  3338. * the filesystem one is an exact match too, or the filesystem
  3339. * one is absent/incompatible.
  3340. */
  3341. } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
  3342. should_install_fs_fw(adap, card_fw_usable,
  3343. be32_to_cpu(fs_fw->fw_ver),
  3344. be32_to_cpu(card_fw->fw_ver))) {
  3345. ret = t4_fw_upgrade(adap, adap->mbox, fw_data,
  3346. fw_size, 0);
  3347. if (ret != 0) {
  3348. dev_err(adap->pdev_dev,
  3349. "failed to install firmware: %d\n", ret);
  3350. goto bye;
  3351. }
  3352. /* Installed successfully, update the cached header too. */
  3353. *card_fw = *fs_fw;
  3354. card_fw_usable = 1;
  3355. *reset = 0; /* already reset as part of load_fw */
  3356. }
  3357. if (!card_fw_usable) {
  3358. uint32_t d, c, k;
  3359. d = be32_to_cpu(drv_fw->fw_ver);
  3360. c = be32_to_cpu(card_fw->fw_ver);
  3361. k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
  3362. dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
  3363. "chip state %d, "
  3364. "driver compiled with %d.%d.%d.%d, "
  3365. "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
  3366. state,
  3367. FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
  3368. FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
  3369. FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
  3370. FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
  3371. FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
  3372. FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
  3373. ret = -EINVAL;
  3374. goto bye;
  3375. }
  3376. /* We're using whatever's on the card and it's known to be good. */
  3377. adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
  3378. adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
  3379. bye:
  3380. return ret;
  3381. }
  3382. /**
  3383. * t4_flash_erase_sectors - erase a range of flash sectors
  3384. * @adapter: the adapter
  3385. * @start: the first sector to erase
  3386. * @end: the last sector to erase
  3387. *
  3388. * Erases the sectors in the given inclusive range.
  3389. */
  3390. static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
  3391. {
  3392. int ret = 0;
  3393. if (end >= adapter->params.sf_nsec)
  3394. return -EINVAL;
  3395. while (start <= end) {
  3396. if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
  3397. (ret = sf1_write(adapter, 4, 0, 1,
  3398. SF_ERASE_SECTOR | (start << 8))) != 0 ||
  3399. (ret = flash_wait_op(adapter, 14, 500)) != 0) {
  3400. dev_err(adapter->pdev_dev,
  3401. "erase of flash sector %d failed, error %d\n",
  3402. start, ret);
  3403. break;
  3404. }
  3405. start++;
  3406. }
  3407. t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
  3408. return ret;
  3409. }
  3410. /**
  3411. * t4_flash_cfg_addr - return the address of the flash configuration file
  3412. * @adapter: the adapter
  3413. *
  3414. * Return the address within the flash where the Firmware Configuration
  3415. * File is stored.
  3416. */
  3417. unsigned int t4_flash_cfg_addr(struct adapter *adapter)
  3418. {
  3419. if (adapter->params.sf_size == 0x100000)
  3420. return FLASH_FPGA_CFG_START;
  3421. else
  3422. return FLASH_CFG_START;
  3423. }
  3424. /* Return TRUE if the specified firmware matches the adapter. I.e. T4
  3425. * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
  3426. * and emit an error message for mismatched firmware to save our caller the
  3427. * effort ...
  3428. */
  3429. static bool t4_fw_matches_chip(const struct adapter *adap,
  3430. const struct fw_hdr *hdr)
  3431. {
  3432. /* The expression below will return FALSE for any unsupported adapter
  3433. * which will keep us "honest" in the future ...
  3434. */
  3435. if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
  3436. (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
  3437. (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
  3438. return true;
  3439. dev_err(adap->pdev_dev,
  3440. "FW image (%d) is not suitable for this adapter (%d)\n",
  3441. hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
  3442. return false;
  3443. }
  3444. /**
  3445. * t4_load_fw - download firmware
  3446. * @adap: the adapter
  3447. * @fw_data: the firmware image to write
  3448. * @size: image size
  3449. *
  3450. * Write the supplied firmware image to the card's serial flash.
  3451. */
  3452. int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
  3453. {
  3454. u32 csum;
  3455. int ret, addr;
  3456. unsigned int i;
  3457. u8 first_page[SF_PAGE_SIZE];
  3458. const __be32 *p = (const __be32 *)fw_data;
  3459. const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
  3460. unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
  3461. unsigned int fw_start_sec = FLASH_FW_START_SEC;
  3462. unsigned int fw_size = FLASH_FW_MAX_SIZE;
  3463. unsigned int fw_start = FLASH_FW_START;
  3464. if (!size) {
  3465. dev_err(adap->pdev_dev, "FW image has no data\n");
  3466. return -EINVAL;
  3467. }
  3468. if (size & 511) {
  3469. dev_err(adap->pdev_dev,
  3470. "FW image size not multiple of 512 bytes\n");
  3471. return -EINVAL;
  3472. }
  3473. if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
  3474. dev_err(adap->pdev_dev,
  3475. "FW image size differs from size in FW header\n");
  3476. return -EINVAL;
  3477. }
  3478. if (size > fw_size) {
  3479. dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
  3480. fw_size);
  3481. return -EFBIG;
  3482. }
  3483. if (!t4_fw_matches_chip(adap, hdr))
  3484. return -EINVAL;
  3485. for (csum = 0, i = 0; i < size / sizeof(csum); i++)
  3486. csum += be32_to_cpu(p[i]);
  3487. if (csum != 0xffffffff) {
  3488. dev_err(adap->pdev_dev,
  3489. "corrupted firmware image, checksum %#x\n", csum);
  3490. return -EINVAL;
  3491. }
  3492. i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
  3493. ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
  3494. if (ret)
  3495. goto out;
  3496. /*
  3497. * We write the correct version at the end so the driver can see a bad
  3498. * version if the FW write fails. Start by writing a copy of the
  3499. * first page with a bad version.
  3500. */
  3501. memcpy(first_page, fw_data, SF_PAGE_SIZE);
  3502. ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
  3503. ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
  3504. if (ret)
  3505. goto out;
  3506. addr = fw_start;
  3507. for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
  3508. addr += SF_PAGE_SIZE;
  3509. fw_data += SF_PAGE_SIZE;
  3510. ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
  3511. if (ret)
  3512. goto out;
  3513. }
  3514. ret = t4_write_flash(adap,
  3515. fw_start + offsetof(struct fw_hdr, fw_ver),
  3516. sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
  3517. out:
  3518. if (ret)
  3519. dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
  3520. ret);
  3521. else
  3522. ret = t4_get_fw_version(adap, &adap->params.fw_vers);
  3523. return ret;
  3524. }
  3525. /**
  3526. * t4_phy_fw_ver - return current PHY firmware version
  3527. * @adap: the adapter
  3528. * @phy_fw_ver: return value buffer for PHY firmware version
  3529. *
  3530. * Returns the current version of external PHY firmware on the
  3531. * adapter.
  3532. */
  3533. int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
  3534. {
  3535. u32 param, val;
  3536. int ret;
  3537. param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
  3538. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
  3539. FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
  3540. FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
  3541. ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
  3542. &param, &val);
  3543. if (ret)
  3544. return ret;
  3545. *phy_fw_ver = val;
  3546. return 0;
  3547. }
  3548. /**
  3549. * t4_load_phy_fw - download port PHY firmware
  3550. * @adap: the adapter
  3551. * @win: the PCI-E Memory Window index to use for t4_memory_rw()
  3552. * @win_lock: the lock to use to guard the memory copy
  3553. * @phy_fw_version: function to check PHY firmware versions
  3554. * @phy_fw_data: the PHY firmware image to write
  3555. * @phy_fw_size: image size
  3556. *
  3557. * Transfer the specified PHY firmware to the adapter. If a non-NULL
  3558. * @phy_fw_version is supplied, then it will be used to determine if
  3559. * it's necessary to perform the transfer by comparing the version
  3560. * of any existing adapter PHY firmware with that of the passed in
  3561. * PHY firmware image. If @win_lock is non-NULL then it will be used
  3562. * around the call to t4_memory_rw() which transfers the PHY firmware
  3563. * to the adapter.
  3564. *
  3565. * A negative error number will be returned if an error occurs. If
  3566. * version number support is available and there's no need to upgrade
  3567. * the firmware, 0 will be returned. If firmware is successfully
  3568. * transferred to the adapter, 1 will be retured.
  3569. *
  3570. * NOTE: some adapters only have local RAM to store the PHY firmware. As
  3571. * a result, a RESET of the adapter would cause that RAM to lose its
  3572. * contents. Thus, loading PHY firmware on such adapters must happen
  3573. * after any FW_RESET_CMDs ...
  3574. */
  3575. int t4_load_phy_fw(struct adapter *adap,
  3576. int win, spinlock_t *win_lock,
  3577. int (*phy_fw_version)(const u8 *, size_t),
  3578. const u8 *phy_fw_data, size_t phy_fw_size)
  3579. {
  3580. unsigned long mtype = 0, maddr = 0;
  3581. u32 param, val;
  3582. int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
  3583. int ret;
  3584. /* If we have version number support, then check to see if the adapter
  3585. * already has up-to-date PHY firmware loaded.
  3586. */
  3587. if (phy_fw_version) {
  3588. new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
  3589. ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
  3590. if (ret < 0)
  3591. return ret;
  3592. if (cur_phy_fw_ver >= new_phy_fw_vers) {
  3593. CH_WARN(adap, "PHY Firmware already up-to-date, "
  3594. "version %#x\n", cur_phy_fw_ver);
  3595. return 0;
  3596. }
  3597. }
  3598. /* Ask the firmware where it wants us to copy the PHY firmware image.
  3599. * The size of the file requires a special version of the READ coommand
  3600. * which will pass the file size via the values field in PARAMS_CMD and
  3601. * retrieve the return value from firmware and place it in the same
  3602. * buffer values
  3603. */
  3604. param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
  3605. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
  3606. FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
  3607. FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
  3608. val = phy_fw_size;
  3609. ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
  3610. &param, &val, 1, true);
  3611. if (ret < 0)
  3612. return ret;
  3613. mtype = val >> 8;
  3614. maddr = (val & 0xff) << 16;
  3615. /* Copy the supplied PHY Firmware image to the adapter memory location
  3616. * allocated by the adapter firmware.
  3617. */
  3618. if (win_lock)
  3619. spin_lock_bh(win_lock);
  3620. ret = t4_memory_rw(adap, win, mtype, maddr,
  3621. phy_fw_size, (__be32 *)phy_fw_data,
  3622. T4_MEMORY_WRITE);
  3623. if (win_lock)
  3624. spin_unlock_bh(win_lock);
  3625. if (ret)
  3626. return ret;
  3627. /* Tell the firmware that the PHY firmware image has been written to
  3628. * RAM and it can now start copying it over to the PHYs. The chip
  3629. * firmware will RESET the affected PHYs as part of this operation
  3630. * leaving them running the new PHY firmware image.
  3631. */
  3632. param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
  3633. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
  3634. FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
  3635. FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
  3636. ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
  3637. &param, &val, 30000);
  3638. /* If we have version number support, then check to see that the new
  3639. * firmware got loaded properly.
  3640. */
  3641. if (phy_fw_version) {
  3642. ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
  3643. if (ret < 0)
  3644. return ret;
  3645. if (cur_phy_fw_ver != new_phy_fw_vers) {
  3646. CH_WARN(adap, "PHY Firmware did not update: "
  3647. "version on adapter %#x, "
  3648. "version flashed %#x\n",
  3649. cur_phy_fw_ver, new_phy_fw_vers);
  3650. return -ENXIO;
  3651. }
  3652. }
  3653. return 1;
  3654. }
  3655. /**
  3656. * t4_fwcache - firmware cache operation
  3657. * @adap: the adapter
  3658. * @op : the operation (flush or flush and invalidate)
  3659. */
  3660. int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
  3661. {
  3662. struct fw_params_cmd c;
  3663. memset(&c, 0, sizeof(c));
  3664. c.op_to_vfn =
  3665. cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
  3666. FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
  3667. FW_PARAMS_CMD_PFN_V(adap->pf) |
  3668. FW_PARAMS_CMD_VFN_V(0));
  3669. c.retval_len16 = cpu_to_be32(FW_LEN16(c));
  3670. c.param[0].mnem =
  3671. cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
  3672. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
  3673. c.param[0].val = cpu_to_be32(op);
  3674. return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
  3675. }
  3676. void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
  3677. unsigned int *pif_req_wrptr,
  3678. unsigned int *pif_rsp_wrptr)
  3679. {
  3680. int i, j;
  3681. u32 cfg, val, req, rsp;
  3682. cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
  3683. if (cfg & LADBGEN_F)
  3684. t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
  3685. val = t4_read_reg(adap, CIM_DEBUGSTS_A);
  3686. req = POLADBGWRPTR_G(val);
  3687. rsp = PILADBGWRPTR_G(val);
  3688. if (pif_req_wrptr)
  3689. *pif_req_wrptr = req;
  3690. if (pif_rsp_wrptr)
  3691. *pif_rsp_wrptr = rsp;
  3692. for (i = 0; i < CIM_PIFLA_SIZE; i++) {
  3693. for (j = 0; j < 6; j++) {
  3694. t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(req) |
  3695. PILADBGRDPTR_V(rsp));
  3696. *pif_req++ = t4_read_reg(adap, CIM_PO_LA_DEBUGDATA_A);
  3697. *pif_rsp++ = t4_read_reg(adap, CIM_PI_LA_DEBUGDATA_A);
  3698. req++;
  3699. rsp++;
  3700. }
  3701. req = (req + 2) & POLADBGRDPTR_M;
  3702. rsp = (rsp + 2) & PILADBGRDPTR_M;
  3703. }
  3704. t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
  3705. }
  3706. void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
  3707. {
  3708. u32 cfg;
  3709. int i, j, idx;
  3710. cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
  3711. if (cfg & LADBGEN_F)
  3712. t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
  3713. for (i = 0; i < CIM_MALA_SIZE; i++) {
  3714. for (j = 0; j < 5; j++) {
  3715. idx = 8 * i + j;
  3716. t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(idx) |
  3717. PILADBGRDPTR_V(idx));
  3718. *ma_req++ = t4_read_reg(adap, CIM_PO_LA_MADEBUGDATA_A);
  3719. *ma_rsp++ = t4_read_reg(adap, CIM_PI_LA_MADEBUGDATA_A);
  3720. }
  3721. }
  3722. t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
  3723. }
  3724. void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
  3725. {
  3726. unsigned int i, j;
  3727. for (i = 0; i < 8; i++) {
  3728. u32 *p = la_buf + i;
  3729. t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
  3730. j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
  3731. t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
  3732. for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
  3733. *p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
  3734. }
  3735. }
  3736. #define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \
  3737. FW_PORT_CAP32_ANEG)
  3738. /**
  3739. * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
  3740. * @caps16: a 16-bit Port Capabilities value
  3741. *
  3742. * Returns the equivalent 32-bit Port Capabilities value.
  3743. */
  3744. static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
  3745. {
  3746. fw_port_cap32_t caps32 = 0;
  3747. #define CAP16_TO_CAP32(__cap) \
  3748. do { \
  3749. if (caps16 & FW_PORT_CAP_##__cap) \
  3750. caps32 |= FW_PORT_CAP32_##__cap; \
  3751. } while (0)
  3752. CAP16_TO_CAP32(SPEED_100M);
  3753. CAP16_TO_CAP32(SPEED_1G);
  3754. CAP16_TO_CAP32(SPEED_25G);
  3755. CAP16_TO_CAP32(SPEED_10G);
  3756. CAP16_TO_CAP32(SPEED_40G);
  3757. CAP16_TO_CAP32(SPEED_100G);
  3758. CAP16_TO_CAP32(FC_RX);
  3759. CAP16_TO_CAP32(FC_TX);
  3760. CAP16_TO_CAP32(ANEG);
  3761. CAP16_TO_CAP32(FORCE_PAUSE);
  3762. CAP16_TO_CAP32(MDIAUTO);
  3763. CAP16_TO_CAP32(MDISTRAIGHT);
  3764. CAP16_TO_CAP32(FEC_RS);
  3765. CAP16_TO_CAP32(FEC_BASER_RS);
  3766. CAP16_TO_CAP32(802_3_PAUSE);
  3767. CAP16_TO_CAP32(802_3_ASM_DIR);
  3768. #undef CAP16_TO_CAP32
  3769. return caps32;
  3770. }
  3771. /**
  3772. * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
  3773. * @caps32: a 32-bit Port Capabilities value
  3774. *
  3775. * Returns the equivalent 16-bit Port Capabilities value. Note that
  3776. * not all 32-bit Port Capabilities can be represented in the 16-bit
  3777. * Port Capabilities and some fields/values may not make it.
  3778. */
  3779. static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
  3780. {
  3781. fw_port_cap16_t caps16 = 0;
  3782. #define CAP32_TO_CAP16(__cap) \
  3783. do { \
  3784. if (caps32 & FW_PORT_CAP32_##__cap) \
  3785. caps16 |= FW_PORT_CAP_##__cap; \
  3786. } while (0)
  3787. CAP32_TO_CAP16(SPEED_100M);
  3788. CAP32_TO_CAP16(SPEED_1G);
  3789. CAP32_TO_CAP16(SPEED_10G);
  3790. CAP32_TO_CAP16(SPEED_25G);
  3791. CAP32_TO_CAP16(SPEED_40G);
  3792. CAP32_TO_CAP16(SPEED_100G);
  3793. CAP32_TO_CAP16(FC_RX);
  3794. CAP32_TO_CAP16(FC_TX);
  3795. CAP32_TO_CAP16(802_3_PAUSE);
  3796. CAP32_TO_CAP16(802_3_ASM_DIR);
  3797. CAP32_TO_CAP16(ANEG);
  3798. CAP32_TO_CAP16(FORCE_PAUSE);
  3799. CAP32_TO_CAP16(MDIAUTO);
  3800. CAP32_TO_CAP16(MDISTRAIGHT);
  3801. CAP32_TO_CAP16(FEC_RS);
  3802. CAP32_TO_CAP16(FEC_BASER_RS);
  3803. #undef CAP32_TO_CAP16
  3804. return caps16;
  3805. }
  3806. /* Translate Firmware Port Capabilities Pause specification to Common Code */
  3807. static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
  3808. {
  3809. enum cc_pause cc_pause = 0;
  3810. if (fw_pause & FW_PORT_CAP32_FC_RX)
  3811. cc_pause |= PAUSE_RX;
  3812. if (fw_pause & FW_PORT_CAP32_FC_TX)
  3813. cc_pause |= PAUSE_TX;
  3814. return cc_pause;
  3815. }
  3816. /* Translate Common Code Pause specification into Firmware Port Capabilities */
  3817. static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
  3818. {
  3819. fw_port_cap32_t fw_pause = 0;
  3820. if (cc_pause & PAUSE_RX)
  3821. fw_pause |= FW_PORT_CAP32_FC_RX;
  3822. if (cc_pause & PAUSE_TX)
  3823. fw_pause |= FW_PORT_CAP32_FC_TX;
  3824. if (!(cc_pause & PAUSE_AUTONEG))
  3825. fw_pause |= FW_PORT_CAP32_FORCE_PAUSE;
  3826. return fw_pause;
  3827. }
  3828. /* Translate Firmware Forward Error Correction specification to Common Code */
  3829. static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
  3830. {
  3831. enum cc_fec cc_fec = 0;
  3832. if (fw_fec & FW_PORT_CAP32_FEC_RS)
  3833. cc_fec |= FEC_RS;
  3834. if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
  3835. cc_fec |= FEC_BASER_RS;
  3836. return cc_fec;
  3837. }
  3838. /* Translate Common Code Forward Error Correction specification to Firmware */
  3839. static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
  3840. {
  3841. fw_port_cap32_t fw_fec = 0;
  3842. if (cc_fec & FEC_RS)
  3843. fw_fec |= FW_PORT_CAP32_FEC_RS;
  3844. if (cc_fec & FEC_BASER_RS)
  3845. fw_fec |= FW_PORT_CAP32_FEC_BASER_RS;
  3846. return fw_fec;
  3847. }
  3848. /**
  3849. * t4_link_l1cfg - apply link configuration to MAC/PHY
  3850. * @adapter: the adapter
  3851. * @mbox: the Firmware Mailbox to use
  3852. * @port: the Port ID
  3853. * @lc: the Port's Link Configuration
  3854. *
  3855. * Set up a port's MAC and PHY according to a desired link configuration.
  3856. * - If the PHY can auto-negotiate first decide what to advertise, then
  3857. * enable/disable auto-negotiation as desired, and reset.
  3858. * - If the PHY does not auto-negotiate just reset it.
  3859. * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
  3860. * otherwise do it later based on the outcome of auto-negotiation.
  3861. */
  3862. int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
  3863. unsigned int port, struct link_config *lc,
  3864. bool sleep_ok, int timeout)
  3865. {
  3866. unsigned int fw_caps = adapter->params.fw_caps_support;
  3867. fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap;
  3868. struct fw_port_cmd cmd;
  3869. unsigned int fw_mdi;
  3870. int ret;
  3871. fw_mdi = (FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO) & lc->pcaps);
  3872. /* Convert driver coding of Pause Frame Flow Control settings into the
  3873. * Firmware's API.
  3874. */
  3875. fw_fc = cc_to_fwcap_pause(lc->requested_fc);
  3876. /* Convert Common Code Forward Error Control settings into the
  3877. * Firmware's API. If the current Requested FEC has "Automatic"
  3878. * (IEEE 802.3) specified, then we use whatever the Firmware
  3879. * sent us as part of it's IEEE 802.3-based interpratation of
  3880. * the Transceiver Module EPROM FEC parameters. Otherwise we
  3881. * use whatever is in the current Requested FEC settings.
  3882. */
  3883. if (lc->requested_fec & FEC_AUTO)
  3884. cc_fec = fwcap_to_cc_fec(lc->def_acaps);
  3885. else
  3886. cc_fec = lc->requested_fec;
  3887. fw_fec = cc_to_fwcap_fec(cc_fec);
  3888. /* Figure out what our Requested Port Capabilities are going to be.
  3889. */
  3890. if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
  3891. rcap = lc->acaps | fw_fc | fw_fec;
  3892. lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
  3893. lc->fec = cc_fec;
  3894. } else if (lc->autoneg == AUTONEG_DISABLE) {
  3895. rcap = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
  3896. lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
  3897. lc->fec = cc_fec;
  3898. } else {
  3899. rcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
  3900. }
  3901. /* Note that older Firmware doesn't have FW_PORT_CAP32_FORCE_PAUSE, so
  3902. * we need to exclude this from this check in order to maintain
  3903. * compatibility ...
  3904. */
  3905. if ((rcap & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) {
  3906. dev_err(adapter->pdev_dev,
  3907. "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n",
  3908. rcap, lc->pcaps);
  3909. return -EINVAL;
  3910. }
  3911. /* And send that on to the Firmware ...
  3912. */
  3913. memset(&cmd, 0, sizeof(cmd));
  3914. cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
  3915. FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
  3916. FW_PORT_CMD_PORTID_V(port));
  3917. cmd.action_to_len16 =
  3918. cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
  3919. ? FW_PORT_ACTION_L1_CFG
  3920. : FW_PORT_ACTION_L1_CFG32) |
  3921. FW_LEN16(cmd));
  3922. if (fw_caps == FW_CAPS16)
  3923. cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
  3924. else
  3925. cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
  3926. ret = t4_wr_mbox_meat_timeout(adapter, mbox, &cmd, sizeof(cmd), NULL,
  3927. sleep_ok, timeout);
  3928. if (ret) {
  3929. dev_err(adapter->pdev_dev,
  3930. "Requested Port Capabilities %#x rejected, error %d\n",
  3931. rcap, -ret);
  3932. return ret;
  3933. }
  3934. return ret;
  3935. }
  3936. /**
  3937. * t4_restart_aneg - restart autonegotiation
  3938. * @adap: the adapter
  3939. * @mbox: mbox to use for the FW command
  3940. * @port: the port id
  3941. *
  3942. * Restarts autonegotiation for the selected port.
  3943. */
  3944. int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
  3945. {
  3946. struct fw_port_cmd c;
  3947. memset(&c, 0, sizeof(c));
  3948. c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
  3949. FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
  3950. FW_PORT_CMD_PORTID_V(port));
  3951. c.action_to_len16 =
  3952. cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
  3953. FW_LEN16(c));
  3954. c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP32_ANEG);
  3955. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  3956. }
  3957. typedef void (*int_handler_t)(struct adapter *adap);
  3958. struct intr_info {
  3959. unsigned int mask; /* bits to check in interrupt status */
  3960. const char *msg; /* message to print or NULL */
  3961. short stat_idx; /* stat counter to increment or -1 */
  3962. unsigned short fatal; /* whether the condition reported is fatal */
  3963. int_handler_t int_handler; /* platform-specific int handler */
  3964. };
  3965. /**
  3966. * t4_handle_intr_status - table driven interrupt handler
  3967. * @adapter: the adapter that generated the interrupt
  3968. * @reg: the interrupt status register to process
  3969. * @acts: table of interrupt actions
  3970. *
  3971. * A table driven interrupt handler that applies a set of masks to an
  3972. * interrupt status word and performs the corresponding actions if the
  3973. * interrupts described by the mask have occurred. The actions include
  3974. * optionally emitting a warning or alert message. The table is terminated
  3975. * by an entry specifying mask 0. Returns the number of fatal interrupt
  3976. * conditions.
  3977. */
  3978. static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
  3979. const struct intr_info *acts)
  3980. {
  3981. int fatal = 0;
  3982. unsigned int mask = 0;
  3983. unsigned int status = t4_read_reg(adapter, reg);
  3984. for ( ; acts->mask; ++acts) {
  3985. if (!(status & acts->mask))
  3986. continue;
  3987. if (acts->fatal) {
  3988. fatal++;
  3989. dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
  3990. status & acts->mask);
  3991. } else if (acts->msg && printk_ratelimit())
  3992. dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
  3993. status & acts->mask);
  3994. if (acts->int_handler)
  3995. acts->int_handler(adapter);
  3996. mask |= acts->mask;
  3997. }
  3998. status &= mask;
  3999. if (status) /* clear processed interrupts */
  4000. t4_write_reg(adapter, reg, status);
  4001. return fatal;
  4002. }
  4003. /*
  4004. * Interrupt handler for the PCIE module.
  4005. */
  4006. static void pcie_intr_handler(struct adapter *adapter)
  4007. {
  4008. static const struct intr_info sysbus_intr_info[] = {
  4009. { RNPP_F, "RXNP array parity error", -1, 1 },
  4010. { RPCP_F, "RXPC array parity error", -1, 1 },
  4011. { RCIP_F, "RXCIF array parity error", -1, 1 },
  4012. { RCCP_F, "Rx completions control array parity error", -1, 1 },
  4013. { RFTP_F, "RXFT array parity error", -1, 1 },
  4014. { 0 }
  4015. };
  4016. static const struct intr_info pcie_port_intr_info[] = {
  4017. { TPCP_F, "TXPC array parity error", -1, 1 },
  4018. { TNPP_F, "TXNP array parity error", -1, 1 },
  4019. { TFTP_F, "TXFT array parity error", -1, 1 },
  4020. { TCAP_F, "TXCA array parity error", -1, 1 },
  4021. { TCIP_F, "TXCIF array parity error", -1, 1 },
  4022. { RCAP_F, "RXCA array parity error", -1, 1 },
  4023. { OTDD_F, "outbound request TLP discarded", -1, 1 },
  4024. { RDPE_F, "Rx data parity error", -1, 1 },
  4025. { TDUE_F, "Tx uncorrectable data error", -1, 1 },
  4026. { 0 }
  4027. };
  4028. static const struct intr_info pcie_intr_info[] = {
  4029. { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
  4030. { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
  4031. { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
  4032. { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
  4033. { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
  4034. { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
  4035. { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
  4036. { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
  4037. { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
  4038. { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
  4039. { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
  4040. { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
  4041. { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
  4042. { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
  4043. { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
  4044. { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
  4045. { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
  4046. { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
  4047. { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
  4048. { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
  4049. { FIDPERR_F, "PCI FID parity error", -1, 1 },
  4050. { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
  4051. { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
  4052. { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
  4053. { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
  4054. { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
  4055. { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
  4056. { PCIESINT_F, "PCI core secondary fault", -1, 1 },
  4057. { PCIEPINT_F, "PCI core primary fault", -1, 1 },
  4058. { UNXSPLCPLERR_F, "PCI unexpected split completion error",
  4059. -1, 0 },
  4060. { 0 }
  4061. };
  4062. static struct intr_info t5_pcie_intr_info[] = {
  4063. { MSTGRPPERR_F, "Master Response Read Queue parity error",
  4064. -1, 1 },
  4065. { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
  4066. { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
  4067. { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
  4068. { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
  4069. { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
  4070. { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
  4071. { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
  4072. -1, 1 },
  4073. { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
  4074. -1, 1 },
  4075. { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
  4076. { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
  4077. { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
  4078. { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
  4079. { DREQWRPERR_F, "PCI DMA channel write request parity error",
  4080. -1, 1 },
  4081. { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
  4082. { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
  4083. { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
  4084. { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
  4085. { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
  4086. { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
  4087. { FIDPERR_F, "PCI FID parity error", -1, 1 },
  4088. { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
  4089. { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
  4090. { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
  4091. { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
  4092. -1, 1 },
  4093. { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
  4094. -1, 1 },
  4095. { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
  4096. { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
  4097. { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
  4098. { READRSPERR_F, "Outbound read error", -1, 0 },
  4099. { 0 }
  4100. };
  4101. int fat;
  4102. if (is_t4(adapter->params.chip))
  4103. fat = t4_handle_intr_status(adapter,
  4104. PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
  4105. sysbus_intr_info) +
  4106. t4_handle_intr_status(adapter,
  4107. PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
  4108. pcie_port_intr_info) +
  4109. t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
  4110. pcie_intr_info);
  4111. else
  4112. fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
  4113. t5_pcie_intr_info);
  4114. if (fat)
  4115. t4_fatal_err(adapter);
  4116. }
  4117. /*
  4118. * TP interrupt handler.
  4119. */
  4120. static void tp_intr_handler(struct adapter *adapter)
  4121. {
  4122. static const struct intr_info tp_intr_info[] = {
  4123. { 0x3fffffff, "TP parity error", -1, 1 },
  4124. { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
  4125. { 0 }
  4126. };
  4127. if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
  4128. t4_fatal_err(adapter);
  4129. }
  4130. /*
  4131. * SGE interrupt handler.
  4132. */
  4133. static void sge_intr_handler(struct adapter *adapter)
  4134. {
  4135. u64 v;
  4136. u32 err;
  4137. static const struct intr_info sge_intr_info[] = {
  4138. { ERR_CPL_EXCEED_IQE_SIZE_F,
  4139. "SGE received CPL exceeding IQE size", -1, 1 },
  4140. { ERR_INVALID_CIDX_INC_F,
  4141. "SGE GTS CIDX increment too large", -1, 0 },
  4142. { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
  4143. { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
  4144. { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
  4145. "SGE IQID > 1023 received CPL for FL", -1, 0 },
  4146. { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
  4147. 0 },
  4148. { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
  4149. 0 },
  4150. { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
  4151. 0 },
  4152. { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
  4153. 0 },
  4154. { ERR_ING_CTXT_PRIO_F,
  4155. "SGE too many priority ingress contexts", -1, 0 },
  4156. { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
  4157. { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
  4158. { 0 }
  4159. };
  4160. static struct intr_info t4t5_sge_intr_info[] = {
  4161. { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
  4162. { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
  4163. { ERR_EGR_CTXT_PRIO_F,
  4164. "SGE too many priority egress contexts", -1, 0 },
  4165. { 0 }
  4166. };
  4167. v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
  4168. ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
  4169. if (v) {
  4170. dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
  4171. (unsigned long long)v);
  4172. t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
  4173. t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
  4174. }
  4175. v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
  4176. if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
  4177. v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
  4178. t4t5_sge_intr_info);
  4179. err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
  4180. if (err & ERROR_QID_VALID_F) {
  4181. dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
  4182. ERROR_QID_G(err));
  4183. if (err & UNCAPTURED_ERROR_F)
  4184. dev_err(adapter->pdev_dev,
  4185. "SGE UNCAPTURED_ERROR set (clearing)\n");
  4186. t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
  4187. UNCAPTURED_ERROR_F);
  4188. }
  4189. if (v != 0)
  4190. t4_fatal_err(adapter);
  4191. }
  4192. #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
  4193. OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
  4194. #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
  4195. IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
  4196. /*
  4197. * CIM interrupt handler.
  4198. */
  4199. static void cim_intr_handler(struct adapter *adapter)
  4200. {
  4201. static const struct intr_info cim_intr_info[] = {
  4202. { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
  4203. { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
  4204. { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
  4205. { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
  4206. { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
  4207. { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
  4208. { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
  4209. { TIMER0INT_F, "CIM TIMER0 interrupt", -1, 1 },
  4210. { 0 }
  4211. };
  4212. static const struct intr_info cim_upintr_info[] = {
  4213. { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
  4214. { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
  4215. { ILLWRINT_F, "CIM illegal write", -1, 1 },
  4216. { ILLRDINT_F, "CIM illegal read", -1, 1 },
  4217. { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
  4218. { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
  4219. { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
  4220. { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
  4221. { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
  4222. { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
  4223. { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
  4224. { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
  4225. { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
  4226. { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
  4227. { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
  4228. { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
  4229. { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
  4230. { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
  4231. { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
  4232. { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
  4233. { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
  4234. { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
  4235. { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
  4236. { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
  4237. { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
  4238. { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
  4239. { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
  4240. { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
  4241. { 0 }
  4242. };
  4243. u32 val, fw_err;
  4244. int fat;
  4245. fw_err = t4_read_reg(adapter, PCIE_FW_A);
  4246. if (fw_err & PCIE_FW_ERR_F)
  4247. t4_report_fw_error(adapter);
  4248. /* When the Firmware detects an internal error which normally
  4249. * wouldn't raise a Host Interrupt, it forces a CIM Timer0 interrupt
  4250. * in order to make sure the Host sees the Firmware Crash. So
  4251. * if we have a Timer0 interrupt and don't see a Firmware Crash,
  4252. * ignore the Timer0 interrupt.
  4253. */
  4254. val = t4_read_reg(adapter, CIM_HOST_INT_CAUSE_A);
  4255. if (val & TIMER0INT_F)
  4256. if (!(fw_err & PCIE_FW_ERR_F) ||
  4257. (PCIE_FW_EVAL_G(fw_err) != PCIE_FW_EVAL_CRASH))
  4258. t4_write_reg(adapter, CIM_HOST_INT_CAUSE_A,
  4259. TIMER0INT_F);
  4260. fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
  4261. cim_intr_info) +
  4262. t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
  4263. cim_upintr_info);
  4264. if (fat)
  4265. t4_fatal_err(adapter);
  4266. }
  4267. /*
  4268. * ULP RX interrupt handler.
  4269. */
  4270. static void ulprx_intr_handler(struct adapter *adapter)
  4271. {
  4272. static const struct intr_info ulprx_intr_info[] = {
  4273. { 0x1800000, "ULPRX context error", -1, 1 },
  4274. { 0x7fffff, "ULPRX parity error", -1, 1 },
  4275. { 0 }
  4276. };
  4277. if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
  4278. t4_fatal_err(adapter);
  4279. }
  4280. /*
  4281. * ULP TX interrupt handler.
  4282. */
  4283. static void ulptx_intr_handler(struct adapter *adapter)
  4284. {
  4285. static const struct intr_info ulptx_intr_info[] = {
  4286. { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
  4287. 0 },
  4288. { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
  4289. 0 },
  4290. { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
  4291. 0 },
  4292. { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
  4293. 0 },
  4294. { 0xfffffff, "ULPTX parity error", -1, 1 },
  4295. { 0 }
  4296. };
  4297. if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
  4298. t4_fatal_err(adapter);
  4299. }
  4300. /*
  4301. * PM TX interrupt handler.
  4302. */
  4303. static void pmtx_intr_handler(struct adapter *adapter)
  4304. {
  4305. static const struct intr_info pmtx_intr_info[] = {
  4306. { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
  4307. { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
  4308. { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
  4309. { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
  4310. { PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
  4311. { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
  4312. { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
  4313. -1, 1 },
  4314. { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
  4315. { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
  4316. { 0 }
  4317. };
  4318. if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
  4319. t4_fatal_err(adapter);
  4320. }
  4321. /*
  4322. * PM RX interrupt handler.
  4323. */
  4324. static void pmrx_intr_handler(struct adapter *adapter)
  4325. {
  4326. static const struct intr_info pmrx_intr_info[] = {
  4327. { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
  4328. { PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
  4329. { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
  4330. { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
  4331. -1, 1 },
  4332. { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
  4333. { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
  4334. { 0 }
  4335. };
  4336. if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
  4337. t4_fatal_err(adapter);
  4338. }
  4339. /*
  4340. * CPL switch interrupt handler.
  4341. */
  4342. static void cplsw_intr_handler(struct adapter *adapter)
  4343. {
  4344. static const struct intr_info cplsw_intr_info[] = {
  4345. { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
  4346. { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
  4347. { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
  4348. { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
  4349. { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
  4350. { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
  4351. { 0 }
  4352. };
  4353. if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
  4354. t4_fatal_err(adapter);
  4355. }
  4356. /*
  4357. * LE interrupt handler.
  4358. */
  4359. static void le_intr_handler(struct adapter *adap)
  4360. {
  4361. enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
  4362. static const struct intr_info le_intr_info[] = {
  4363. { LIPMISS_F, "LE LIP miss", -1, 0 },
  4364. { LIP0_F, "LE 0 LIP error", -1, 0 },
  4365. { PARITYERR_F, "LE parity error", -1, 1 },
  4366. { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
  4367. { REQQPARERR_F, "LE request queue parity error", -1, 1 },
  4368. { 0 }
  4369. };
  4370. static struct intr_info t6_le_intr_info[] = {
  4371. { T6_LIPMISS_F, "LE LIP miss", -1, 0 },
  4372. { T6_LIP0_F, "LE 0 LIP error", -1, 0 },
  4373. { TCAMINTPERR_F, "LE parity error", -1, 1 },
  4374. { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
  4375. { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
  4376. { 0 }
  4377. };
  4378. if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
  4379. (chip <= CHELSIO_T5) ?
  4380. le_intr_info : t6_le_intr_info))
  4381. t4_fatal_err(adap);
  4382. }
  4383. /*
  4384. * MPS interrupt handler.
  4385. */
  4386. static void mps_intr_handler(struct adapter *adapter)
  4387. {
  4388. static const struct intr_info mps_rx_intr_info[] = {
  4389. { 0xffffff, "MPS Rx parity error", -1, 1 },
  4390. { 0 }
  4391. };
  4392. static const struct intr_info mps_tx_intr_info[] = {
  4393. { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
  4394. { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
  4395. { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
  4396. -1, 1 },
  4397. { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
  4398. -1, 1 },
  4399. { BUBBLE_F, "MPS Tx underflow", -1, 1 },
  4400. { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
  4401. { FRMERR_F, "MPS Tx framing error", -1, 1 },
  4402. { 0 }
  4403. };
  4404. static const struct intr_info t6_mps_tx_intr_info[] = {
  4405. { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
  4406. { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
  4407. { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
  4408. -1, 1 },
  4409. { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
  4410. -1, 1 },
  4411. /* MPS Tx Bubble is normal for T6 */
  4412. { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
  4413. { FRMERR_F, "MPS Tx framing error", -1, 1 },
  4414. { 0 }
  4415. };
  4416. static const struct intr_info mps_trc_intr_info[] = {
  4417. { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
  4418. { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
  4419. -1, 1 },
  4420. { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
  4421. { 0 }
  4422. };
  4423. static const struct intr_info mps_stat_sram_intr_info[] = {
  4424. { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
  4425. { 0 }
  4426. };
  4427. static const struct intr_info mps_stat_tx_intr_info[] = {
  4428. { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
  4429. { 0 }
  4430. };
  4431. static const struct intr_info mps_stat_rx_intr_info[] = {
  4432. { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
  4433. { 0 }
  4434. };
  4435. static const struct intr_info mps_cls_intr_info[] = {
  4436. { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
  4437. { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
  4438. { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
  4439. { 0 }
  4440. };
  4441. int fat;
  4442. fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
  4443. mps_rx_intr_info) +
  4444. t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
  4445. is_t6(adapter->params.chip)
  4446. ? t6_mps_tx_intr_info
  4447. : mps_tx_intr_info) +
  4448. t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
  4449. mps_trc_intr_info) +
  4450. t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
  4451. mps_stat_sram_intr_info) +
  4452. t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
  4453. mps_stat_tx_intr_info) +
  4454. t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
  4455. mps_stat_rx_intr_info) +
  4456. t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
  4457. mps_cls_intr_info);
  4458. t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
  4459. t4_read_reg(adapter, MPS_INT_CAUSE_A); /* flush */
  4460. if (fat)
  4461. t4_fatal_err(adapter);
  4462. }
  4463. #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
  4464. ECC_UE_INT_CAUSE_F)
  4465. /*
  4466. * EDC/MC interrupt handler.
  4467. */
  4468. static void mem_intr_handler(struct adapter *adapter, int idx)
  4469. {
  4470. static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
  4471. unsigned int addr, cnt_addr, v;
  4472. if (idx <= MEM_EDC1) {
  4473. addr = EDC_REG(EDC_INT_CAUSE_A, idx);
  4474. cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
  4475. } else if (idx == MEM_MC) {
  4476. if (is_t4(adapter->params.chip)) {
  4477. addr = MC_INT_CAUSE_A;
  4478. cnt_addr = MC_ECC_STATUS_A;
  4479. } else {
  4480. addr = MC_P_INT_CAUSE_A;
  4481. cnt_addr = MC_P_ECC_STATUS_A;
  4482. }
  4483. } else {
  4484. addr = MC_REG(MC_P_INT_CAUSE_A, 1);
  4485. cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
  4486. }
  4487. v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
  4488. if (v & PERR_INT_CAUSE_F)
  4489. dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
  4490. name[idx]);
  4491. if (v & ECC_CE_INT_CAUSE_F) {
  4492. u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
  4493. t4_edc_err_read(adapter, idx);
  4494. t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
  4495. if (printk_ratelimit())
  4496. dev_warn(adapter->pdev_dev,
  4497. "%u %s correctable ECC data error%s\n",
  4498. cnt, name[idx], cnt > 1 ? "s" : "");
  4499. }
  4500. if (v & ECC_UE_INT_CAUSE_F)
  4501. dev_alert(adapter->pdev_dev,
  4502. "%s uncorrectable ECC data error\n", name[idx]);
  4503. t4_write_reg(adapter, addr, v);
  4504. if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
  4505. t4_fatal_err(adapter);
  4506. }
  4507. /*
  4508. * MA interrupt handler.
  4509. */
  4510. static void ma_intr_handler(struct adapter *adap)
  4511. {
  4512. u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
  4513. if (status & MEM_PERR_INT_CAUSE_F) {
  4514. dev_alert(adap->pdev_dev,
  4515. "MA parity error, parity status %#x\n",
  4516. t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
  4517. if (is_t5(adap->params.chip))
  4518. dev_alert(adap->pdev_dev,
  4519. "MA parity error, parity status %#x\n",
  4520. t4_read_reg(adap,
  4521. MA_PARITY_ERROR_STATUS2_A));
  4522. }
  4523. if (status & MEM_WRAP_INT_CAUSE_F) {
  4524. v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
  4525. dev_alert(adap->pdev_dev, "MA address wrap-around error by "
  4526. "client %u to address %#x\n",
  4527. MEM_WRAP_CLIENT_NUM_G(v),
  4528. MEM_WRAP_ADDRESS_G(v) << 4);
  4529. }
  4530. t4_write_reg(adap, MA_INT_CAUSE_A, status);
  4531. t4_fatal_err(adap);
  4532. }
  4533. /*
  4534. * SMB interrupt handler.
  4535. */
  4536. static void smb_intr_handler(struct adapter *adap)
  4537. {
  4538. static const struct intr_info smb_intr_info[] = {
  4539. { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
  4540. { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
  4541. { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
  4542. { 0 }
  4543. };
  4544. if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
  4545. t4_fatal_err(adap);
  4546. }
  4547. /*
  4548. * NC-SI interrupt handler.
  4549. */
  4550. static void ncsi_intr_handler(struct adapter *adap)
  4551. {
  4552. static const struct intr_info ncsi_intr_info[] = {
  4553. { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
  4554. { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
  4555. { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
  4556. { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
  4557. { 0 }
  4558. };
  4559. if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
  4560. t4_fatal_err(adap);
  4561. }
  4562. /*
  4563. * XGMAC interrupt handler.
  4564. */
  4565. static void xgmac_intr_handler(struct adapter *adap, int port)
  4566. {
  4567. u32 v, int_cause_reg;
  4568. if (is_t4(adap->params.chip))
  4569. int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
  4570. else
  4571. int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
  4572. v = t4_read_reg(adap, int_cause_reg);
  4573. v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
  4574. if (!v)
  4575. return;
  4576. if (v & TXFIFO_PRTY_ERR_F)
  4577. dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
  4578. port);
  4579. if (v & RXFIFO_PRTY_ERR_F)
  4580. dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
  4581. port);
  4582. t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
  4583. t4_fatal_err(adap);
  4584. }
  4585. /*
  4586. * PL interrupt handler.
  4587. */
  4588. static void pl_intr_handler(struct adapter *adap)
  4589. {
  4590. static const struct intr_info pl_intr_info[] = {
  4591. { FATALPERR_F, "T4 fatal parity error", -1, 1 },
  4592. { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
  4593. { 0 }
  4594. };
  4595. if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
  4596. t4_fatal_err(adap);
  4597. }
  4598. #define PF_INTR_MASK (PFSW_F)
  4599. #define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
  4600. EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
  4601. CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F)
  4602. /**
  4603. * t4_slow_intr_handler - control path interrupt handler
  4604. * @adapter: the adapter
  4605. *
  4606. * T4 interrupt handler for non-data global interrupt events, e.g., errors.
  4607. * The designation 'slow' is because it involves register reads, while
  4608. * data interrupts typically don't involve any MMIOs.
  4609. */
  4610. int t4_slow_intr_handler(struct adapter *adapter)
  4611. {
  4612. u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
  4613. if (!(cause & GLBL_INTR_MASK))
  4614. return 0;
  4615. if (cause & CIM_F)
  4616. cim_intr_handler(adapter);
  4617. if (cause & MPS_F)
  4618. mps_intr_handler(adapter);
  4619. if (cause & NCSI_F)
  4620. ncsi_intr_handler(adapter);
  4621. if (cause & PL_F)
  4622. pl_intr_handler(adapter);
  4623. if (cause & SMB_F)
  4624. smb_intr_handler(adapter);
  4625. if (cause & XGMAC0_F)
  4626. xgmac_intr_handler(adapter, 0);
  4627. if (cause & XGMAC1_F)
  4628. xgmac_intr_handler(adapter, 1);
  4629. if (cause & XGMAC_KR0_F)
  4630. xgmac_intr_handler(adapter, 2);
  4631. if (cause & XGMAC_KR1_F)
  4632. xgmac_intr_handler(adapter, 3);
  4633. if (cause & PCIE_F)
  4634. pcie_intr_handler(adapter);
  4635. if (cause & MC_F)
  4636. mem_intr_handler(adapter, MEM_MC);
  4637. if (is_t5(adapter->params.chip) && (cause & MC1_F))
  4638. mem_intr_handler(adapter, MEM_MC1);
  4639. if (cause & EDC0_F)
  4640. mem_intr_handler(adapter, MEM_EDC0);
  4641. if (cause & EDC1_F)
  4642. mem_intr_handler(adapter, MEM_EDC1);
  4643. if (cause & LE_F)
  4644. le_intr_handler(adapter);
  4645. if (cause & TP_F)
  4646. tp_intr_handler(adapter);
  4647. if (cause & MA_F)
  4648. ma_intr_handler(adapter);
  4649. if (cause & PM_TX_F)
  4650. pmtx_intr_handler(adapter);
  4651. if (cause & PM_RX_F)
  4652. pmrx_intr_handler(adapter);
  4653. if (cause & ULP_RX_F)
  4654. ulprx_intr_handler(adapter);
  4655. if (cause & CPL_SWITCH_F)
  4656. cplsw_intr_handler(adapter);
  4657. if (cause & SGE_F)
  4658. sge_intr_handler(adapter);
  4659. if (cause & ULP_TX_F)
  4660. ulptx_intr_handler(adapter);
  4661. /* Clear the interrupts just processed for which we are the master. */
  4662. t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK);
  4663. (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
  4664. return 1;
  4665. }
  4666. /**
  4667. * t4_intr_enable - enable interrupts
  4668. * @adapter: the adapter whose interrupts should be enabled
  4669. *
  4670. * Enable PF-specific interrupts for the calling function and the top-level
  4671. * interrupt concentrator for global interrupts. Interrupts are already
  4672. * enabled at each module, here we just enable the roots of the interrupt
  4673. * hierarchies.
  4674. *
  4675. * Note: this function should be called only when the driver manages
  4676. * non PF-specific interrupts from the various HW modules. Only one PCI
  4677. * function at a time should be doing this.
  4678. */
  4679. void t4_intr_enable(struct adapter *adapter)
  4680. {
  4681. u32 val = 0;
  4682. u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
  4683. u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
  4684. SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
  4685. if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
  4686. val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
  4687. t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
  4688. ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
  4689. ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
  4690. ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
  4691. ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
  4692. ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
  4693. DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
  4694. t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
  4695. t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
  4696. }
  4697. /**
  4698. * t4_intr_disable - disable interrupts
  4699. * @adapter: the adapter whose interrupts should be disabled
  4700. *
  4701. * Disable interrupts. We only disable the top-level interrupt
  4702. * concentrators. The caller must be a PCI function managing global
  4703. * interrupts.
  4704. */
  4705. void t4_intr_disable(struct adapter *adapter)
  4706. {
  4707. u32 whoami, pf;
  4708. if (pci_channel_offline(adapter->pdev))
  4709. return;
  4710. whoami = t4_read_reg(adapter, PL_WHOAMI_A);
  4711. pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
  4712. SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
  4713. t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
  4714. t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
  4715. }
  4716. unsigned int t4_chip_rss_size(struct adapter *adap)
  4717. {
  4718. if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
  4719. return RSS_NENTRIES;
  4720. else
  4721. return T6_RSS_NENTRIES;
  4722. }
  4723. /**
  4724. * t4_config_rss_range - configure a portion of the RSS mapping table
  4725. * @adapter: the adapter
  4726. * @mbox: mbox to use for the FW command
  4727. * @viid: virtual interface whose RSS subtable is to be written
  4728. * @start: start entry in the table to write
  4729. * @n: how many table entries to write
  4730. * @rspq: values for the response queue lookup table
  4731. * @nrspq: number of values in @rspq
  4732. *
  4733. * Programs the selected part of the VI's RSS mapping table with the
  4734. * provided values. If @nrspq < @n the supplied values are used repeatedly
  4735. * until the full table range is populated.
  4736. *
  4737. * The caller must ensure the values in @rspq are in the range allowed for
  4738. * @viid.
  4739. */
  4740. int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
  4741. int start, int n, const u16 *rspq, unsigned int nrspq)
  4742. {
  4743. int ret;
  4744. const u16 *rsp = rspq;
  4745. const u16 *rsp_end = rspq + nrspq;
  4746. struct fw_rss_ind_tbl_cmd cmd;
  4747. memset(&cmd, 0, sizeof(cmd));
  4748. cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
  4749. FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
  4750. FW_RSS_IND_TBL_CMD_VIID_V(viid));
  4751. cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
  4752. /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
  4753. while (n > 0) {
  4754. int nq = min(n, 32);
  4755. __be32 *qp = &cmd.iq0_to_iq2;
  4756. cmd.niqid = cpu_to_be16(nq);
  4757. cmd.startidx = cpu_to_be16(start);
  4758. start += nq;
  4759. n -= nq;
  4760. while (nq > 0) {
  4761. unsigned int v;
  4762. v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
  4763. if (++rsp >= rsp_end)
  4764. rsp = rspq;
  4765. v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
  4766. if (++rsp >= rsp_end)
  4767. rsp = rspq;
  4768. v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
  4769. if (++rsp >= rsp_end)
  4770. rsp = rspq;
  4771. *qp++ = cpu_to_be32(v);
  4772. nq -= 3;
  4773. }
  4774. ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
  4775. if (ret)
  4776. return ret;
  4777. }
  4778. return 0;
  4779. }
  4780. /**
  4781. * t4_config_glbl_rss - configure the global RSS mode
  4782. * @adapter: the adapter
  4783. * @mbox: mbox to use for the FW command
  4784. * @mode: global RSS mode
  4785. * @flags: mode-specific flags
  4786. *
  4787. * Sets the global RSS mode.
  4788. */
  4789. int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
  4790. unsigned int flags)
  4791. {
  4792. struct fw_rss_glb_config_cmd c;
  4793. memset(&c, 0, sizeof(c));
  4794. c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
  4795. FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
  4796. c.retval_len16 = cpu_to_be32(FW_LEN16(c));
  4797. if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
  4798. c.u.manual.mode_pkd =
  4799. cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
  4800. } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
  4801. c.u.basicvirtual.mode_pkd =
  4802. cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
  4803. c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
  4804. } else
  4805. return -EINVAL;
  4806. return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
  4807. }
  4808. /**
  4809. * t4_config_vi_rss - configure per VI RSS settings
  4810. * @adapter: the adapter
  4811. * @mbox: mbox to use for the FW command
  4812. * @viid: the VI id
  4813. * @flags: RSS flags
  4814. * @defq: id of the default RSS queue for the VI.
  4815. *
  4816. * Configures VI-specific RSS properties.
  4817. */
  4818. int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
  4819. unsigned int flags, unsigned int defq)
  4820. {
  4821. struct fw_rss_vi_config_cmd c;
  4822. memset(&c, 0, sizeof(c));
  4823. c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
  4824. FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
  4825. FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
  4826. c.retval_len16 = cpu_to_be32(FW_LEN16(c));
  4827. c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
  4828. FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
  4829. return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
  4830. }
  4831. /* Read an RSS table row */
  4832. static int rd_rss_row(struct adapter *adap, int row, u32 *val)
  4833. {
  4834. t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
  4835. return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
  4836. 5, 0, val);
  4837. }
  4838. /**
  4839. * t4_read_rss - read the contents of the RSS mapping table
  4840. * @adapter: the adapter
  4841. * @map: holds the contents of the RSS mapping table
  4842. *
  4843. * Reads the contents of the RSS hash->queue mapping table.
  4844. */
  4845. int t4_read_rss(struct adapter *adapter, u16 *map)
  4846. {
  4847. int i, ret, nentries;
  4848. u32 val;
  4849. nentries = t4_chip_rss_size(adapter);
  4850. for (i = 0; i < nentries / 2; ++i) {
  4851. ret = rd_rss_row(adapter, i, &val);
  4852. if (ret)
  4853. return ret;
  4854. *map++ = LKPTBLQUEUE0_G(val);
  4855. *map++ = LKPTBLQUEUE1_G(val);
  4856. }
  4857. return 0;
  4858. }
  4859. static unsigned int t4_use_ldst(struct adapter *adap)
  4860. {
  4861. return (adap->flags & FW_OK) && !adap->use_bd;
  4862. }
  4863. /**
  4864. * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
  4865. * @adap: the adapter
  4866. * @cmd: TP fw ldst address space type
  4867. * @vals: where the indirect register values are stored/written
  4868. * @nregs: how many indirect registers to read/write
  4869. * @start_idx: index of first indirect register to read/write
  4870. * @rw: Read (1) or Write (0)
  4871. * @sleep_ok: if true we may sleep while awaiting command completion
  4872. *
  4873. * Access TP indirect registers through LDST
  4874. */
  4875. static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
  4876. unsigned int nregs, unsigned int start_index,
  4877. unsigned int rw, bool sleep_ok)
  4878. {
  4879. int ret = 0;
  4880. unsigned int i;
  4881. struct fw_ldst_cmd c;
  4882. for (i = 0; i < nregs; i++) {
  4883. memset(&c, 0, sizeof(c));
  4884. c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
  4885. FW_CMD_REQUEST_F |
  4886. (rw ? FW_CMD_READ_F :
  4887. FW_CMD_WRITE_F) |
  4888. FW_LDST_CMD_ADDRSPACE_V(cmd));
  4889. c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
  4890. c.u.addrval.addr = cpu_to_be32(start_index + i);
  4891. c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
  4892. ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
  4893. sleep_ok);
  4894. if (ret)
  4895. return ret;
  4896. if (rw)
  4897. vals[i] = be32_to_cpu(c.u.addrval.val);
  4898. }
  4899. return 0;
  4900. }
  4901. /**
  4902. * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
  4903. * @adap: the adapter
  4904. * @reg_addr: Address Register
  4905. * @reg_data: Data register
  4906. * @buff: where the indirect register values are stored/written
  4907. * @nregs: how many indirect registers to read/write
  4908. * @start_index: index of first indirect register to read/write
  4909. * @rw: READ(1) or WRITE(0)
  4910. * @sleep_ok: if true we may sleep while awaiting command completion
  4911. *
  4912. * Read/Write TP indirect registers through LDST if possible.
  4913. * Else, use backdoor access
  4914. **/
  4915. static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
  4916. u32 *buff, u32 nregs, u32 start_index, int rw,
  4917. bool sleep_ok)
  4918. {
  4919. int rc = -EINVAL;
  4920. int cmd;
  4921. switch (reg_addr) {
  4922. case TP_PIO_ADDR_A:
  4923. cmd = FW_LDST_ADDRSPC_TP_PIO;
  4924. break;
  4925. case TP_TM_PIO_ADDR_A:
  4926. cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
  4927. break;
  4928. case TP_MIB_INDEX_A:
  4929. cmd = FW_LDST_ADDRSPC_TP_MIB;
  4930. break;
  4931. default:
  4932. goto indirect_access;
  4933. }
  4934. if (t4_use_ldst(adap))
  4935. rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
  4936. sleep_ok);
  4937. indirect_access:
  4938. if (rc) {
  4939. if (rw)
  4940. t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
  4941. start_index);
  4942. else
  4943. t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
  4944. start_index);
  4945. }
  4946. }
  4947. /**
  4948. * t4_tp_pio_read - Read TP PIO registers
  4949. * @adap: the adapter
  4950. * @buff: where the indirect register values are written
  4951. * @nregs: how many indirect registers to read
  4952. * @start_index: index of first indirect register to read
  4953. * @sleep_ok: if true we may sleep while awaiting command completion
  4954. *
  4955. * Read TP PIO Registers
  4956. **/
  4957. void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
  4958. u32 start_index, bool sleep_ok)
  4959. {
  4960. t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
  4961. start_index, 1, sleep_ok);
  4962. }
  4963. /**
  4964. * t4_tp_pio_write - Write TP PIO registers
  4965. * @adap: the adapter
  4966. * @buff: where the indirect register values are stored
  4967. * @nregs: how many indirect registers to write
  4968. * @start_index: index of first indirect register to write
  4969. * @sleep_ok: if true we may sleep while awaiting command completion
  4970. *
  4971. * Write TP PIO Registers
  4972. **/
  4973. static void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs,
  4974. u32 start_index, bool sleep_ok)
  4975. {
  4976. t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
  4977. start_index, 0, sleep_ok);
  4978. }
  4979. /**
  4980. * t4_tp_tm_pio_read - Read TP TM PIO registers
  4981. * @adap: the adapter
  4982. * @buff: where the indirect register values are written
  4983. * @nregs: how many indirect registers to read
  4984. * @start_index: index of first indirect register to read
  4985. * @sleep_ok: if true we may sleep while awaiting command completion
  4986. *
  4987. * Read TP TM PIO Registers
  4988. **/
  4989. void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
  4990. u32 start_index, bool sleep_ok)
  4991. {
  4992. t4_tp_indirect_rw(adap, TP_TM_PIO_ADDR_A, TP_TM_PIO_DATA_A, buff,
  4993. nregs, start_index, 1, sleep_ok);
  4994. }
  4995. /**
  4996. * t4_tp_mib_read - Read TP MIB registers
  4997. * @adap: the adapter
  4998. * @buff: where the indirect register values are written
  4999. * @nregs: how many indirect registers to read
  5000. * @start_index: index of first indirect register to read
  5001. * @sleep_ok: if true we may sleep while awaiting command completion
  5002. *
  5003. * Read TP MIB Registers
  5004. **/
  5005. void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
  5006. bool sleep_ok)
  5007. {
  5008. t4_tp_indirect_rw(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, buff, nregs,
  5009. start_index, 1, sleep_ok);
  5010. }
  5011. /**
  5012. * t4_read_rss_key - read the global RSS key
  5013. * @adap: the adapter
  5014. * @key: 10-entry array holding the 320-bit RSS key
  5015. * @sleep_ok: if true we may sleep while awaiting command completion
  5016. *
  5017. * Reads the global 320-bit RSS key.
  5018. */
  5019. void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
  5020. {
  5021. t4_tp_pio_read(adap, key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
  5022. }
  5023. /**
  5024. * t4_write_rss_key - program one of the RSS keys
  5025. * @adap: the adapter
  5026. * @key: 10-entry array holding the 320-bit RSS key
  5027. * @idx: which RSS key to write
  5028. * @sleep_ok: if true we may sleep while awaiting command completion
  5029. *
  5030. * Writes one of the RSS keys with the given 320-bit value. If @idx is
  5031. * 0..15 the corresponding entry in the RSS key table is written,
  5032. * otherwise the global RSS key is written.
  5033. */
  5034. void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
  5035. bool sleep_ok)
  5036. {
  5037. u8 rss_key_addr_cnt = 16;
  5038. u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
  5039. /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
  5040. * allows access to key addresses 16-63 by using KeyWrAddrX
  5041. * as index[5:4](upper 2) into key table
  5042. */
  5043. if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
  5044. (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
  5045. rss_key_addr_cnt = 32;
  5046. t4_tp_pio_write(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
  5047. if (idx >= 0 && idx < rss_key_addr_cnt) {
  5048. if (rss_key_addr_cnt > 16)
  5049. t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
  5050. KEYWRADDRX_V(idx >> 4) |
  5051. T6_VFWRADDR_V(idx) | KEYWREN_F);
  5052. else
  5053. t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
  5054. KEYWRADDR_V(idx) | KEYWREN_F);
  5055. }
  5056. }
  5057. /**
  5058. * t4_read_rss_pf_config - read PF RSS Configuration Table
  5059. * @adapter: the adapter
  5060. * @index: the entry in the PF RSS table to read
  5061. * @valp: where to store the returned value
  5062. * @sleep_ok: if true we may sleep while awaiting command completion
  5063. *
  5064. * Reads the PF RSS Configuration Table at the specified index and returns
  5065. * the value found there.
  5066. */
  5067. void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
  5068. u32 *valp, bool sleep_ok)
  5069. {
  5070. t4_tp_pio_read(adapter, valp, 1, TP_RSS_PF0_CONFIG_A + index, sleep_ok);
  5071. }
  5072. /**
  5073. * t4_read_rss_vf_config - read VF RSS Configuration Table
  5074. * @adapter: the adapter
  5075. * @index: the entry in the VF RSS table to read
  5076. * @vfl: where to store the returned VFL
  5077. * @vfh: where to store the returned VFH
  5078. * @sleep_ok: if true we may sleep while awaiting command completion
  5079. *
  5080. * Reads the VF RSS Configuration Table at the specified index and returns
  5081. * the (VFL, VFH) values found there.
  5082. */
  5083. void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
  5084. u32 *vfl, u32 *vfh, bool sleep_ok)
  5085. {
  5086. u32 vrt, mask, data;
  5087. if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
  5088. mask = VFWRADDR_V(VFWRADDR_M);
  5089. data = VFWRADDR_V(index);
  5090. } else {
  5091. mask = T6_VFWRADDR_V(T6_VFWRADDR_M);
  5092. data = T6_VFWRADDR_V(index);
  5093. }
  5094. /* Request that the index'th VF Table values be read into VFL/VFH.
  5095. */
  5096. vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
  5097. vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
  5098. vrt |= data | VFRDEN_F;
  5099. t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
  5100. /* Grab the VFL/VFH values ...
  5101. */
  5102. t4_tp_pio_read(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, sleep_ok);
  5103. t4_tp_pio_read(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, sleep_ok);
  5104. }
  5105. /**
  5106. * t4_read_rss_pf_map - read PF RSS Map
  5107. * @adapter: the adapter
  5108. * @sleep_ok: if true we may sleep while awaiting command completion
  5109. *
  5110. * Reads the PF RSS Map register and returns its value.
  5111. */
  5112. u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
  5113. {
  5114. u32 pfmap;
  5115. t4_tp_pio_read(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, sleep_ok);
  5116. return pfmap;
  5117. }
  5118. /**
  5119. * t4_read_rss_pf_mask - read PF RSS Mask
  5120. * @adapter: the adapter
  5121. * @sleep_ok: if true we may sleep while awaiting command completion
  5122. *
  5123. * Reads the PF RSS Mask register and returns its value.
  5124. */
  5125. u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
  5126. {
  5127. u32 pfmask;
  5128. t4_tp_pio_read(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, sleep_ok);
  5129. return pfmask;
  5130. }
  5131. /**
  5132. * t4_tp_get_tcp_stats - read TP's TCP MIB counters
  5133. * @adap: the adapter
  5134. * @v4: holds the TCP/IP counter values
  5135. * @v6: holds the TCP/IPv6 counter values
  5136. * @sleep_ok: if true we may sleep while awaiting command completion
  5137. *
  5138. * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
  5139. * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
  5140. */
  5141. void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
  5142. struct tp_tcp_stats *v6, bool sleep_ok)
  5143. {
  5144. u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
  5145. #define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
  5146. #define STAT(x) val[STAT_IDX(x)]
  5147. #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
  5148. if (v4) {
  5149. t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
  5150. TP_MIB_TCP_OUT_RST_A, sleep_ok);
  5151. v4->tcp_out_rsts = STAT(OUT_RST);
  5152. v4->tcp_in_segs = STAT64(IN_SEG);
  5153. v4->tcp_out_segs = STAT64(OUT_SEG);
  5154. v4->tcp_retrans_segs = STAT64(RXT_SEG);
  5155. }
  5156. if (v6) {
  5157. t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
  5158. TP_MIB_TCP_V6OUT_RST_A, sleep_ok);
  5159. v6->tcp_out_rsts = STAT(OUT_RST);
  5160. v6->tcp_in_segs = STAT64(IN_SEG);
  5161. v6->tcp_out_segs = STAT64(OUT_SEG);
  5162. v6->tcp_retrans_segs = STAT64(RXT_SEG);
  5163. }
  5164. #undef STAT64
  5165. #undef STAT
  5166. #undef STAT_IDX
  5167. }
  5168. /**
  5169. * t4_tp_get_err_stats - read TP's error MIB counters
  5170. * @adap: the adapter
  5171. * @st: holds the counter values
  5172. * @sleep_ok: if true we may sleep while awaiting command completion
  5173. *
  5174. * Returns the values of TP's error counters.
  5175. */
  5176. void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
  5177. bool sleep_ok)
  5178. {
  5179. int nchan = adap->params.arch.nchan;
  5180. t4_tp_mib_read(adap, st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A,
  5181. sleep_ok);
  5182. t4_tp_mib_read(adap, st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A,
  5183. sleep_ok);
  5184. t4_tp_mib_read(adap, st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A,
  5185. sleep_ok);
  5186. t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
  5187. TP_MIB_TNL_CNG_DROP_0_A, sleep_ok);
  5188. t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
  5189. TP_MIB_OFD_CHN_DROP_0_A, sleep_ok);
  5190. t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A,
  5191. sleep_ok);
  5192. t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
  5193. TP_MIB_OFD_VLN_DROP_0_A, sleep_ok);
  5194. t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
  5195. TP_MIB_TCP_V6IN_ERR_0_A, sleep_ok);
  5196. t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A,
  5197. sleep_ok);
  5198. }
  5199. /**
  5200. * t4_tp_get_cpl_stats - read TP's CPL MIB counters
  5201. * @adap: the adapter
  5202. * @st: holds the counter values
  5203. * @sleep_ok: if true we may sleep while awaiting command completion
  5204. *
  5205. * Returns the values of TP's CPL counters.
  5206. */
  5207. void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
  5208. bool sleep_ok)
  5209. {
  5210. int nchan = adap->params.arch.nchan;
  5211. t4_tp_mib_read(adap, st->req, nchan, TP_MIB_CPL_IN_REQ_0_A, sleep_ok);
  5212. t4_tp_mib_read(adap, st->rsp, nchan, TP_MIB_CPL_OUT_RSP_0_A, sleep_ok);
  5213. }
  5214. /**
  5215. * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
  5216. * @adap: the adapter
  5217. * @st: holds the counter values
  5218. * @sleep_ok: if true we may sleep while awaiting command completion
  5219. *
  5220. * Returns the values of TP's RDMA counters.
  5221. */
  5222. void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
  5223. bool sleep_ok)
  5224. {
  5225. t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, TP_MIB_RQE_DFR_PKT_A,
  5226. sleep_ok);
  5227. }
  5228. /**
  5229. * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
  5230. * @adap: the adapter
  5231. * @idx: the port index
  5232. * @st: holds the counter values
  5233. * @sleep_ok: if true we may sleep while awaiting command completion
  5234. *
  5235. * Returns the values of TP's FCoE counters for the selected port.
  5236. */
  5237. void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
  5238. struct tp_fcoe_stats *st, bool sleep_ok)
  5239. {
  5240. u32 val[2];
  5241. t4_tp_mib_read(adap, &st->frames_ddp, 1, TP_MIB_FCOE_DDP_0_A + idx,
  5242. sleep_ok);
  5243. t4_tp_mib_read(adap, &st->frames_drop, 1,
  5244. TP_MIB_FCOE_DROP_0_A + idx, sleep_ok);
  5245. t4_tp_mib_read(adap, val, 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx,
  5246. sleep_ok);
  5247. st->octets_ddp = ((u64)val[0] << 32) | val[1];
  5248. }
  5249. /**
  5250. * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
  5251. * @adap: the adapter
  5252. * @st: holds the counter values
  5253. * @sleep_ok: if true we may sleep while awaiting command completion
  5254. *
  5255. * Returns the values of TP's counters for non-TCP directly-placed packets.
  5256. */
  5257. void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
  5258. bool sleep_ok)
  5259. {
  5260. u32 val[4];
  5261. t4_tp_mib_read(adap, val, 4, TP_MIB_USM_PKTS_A, sleep_ok);
  5262. st->frames = val[0];
  5263. st->drops = val[1];
  5264. st->octets = ((u64)val[2] << 32) | val[3];
  5265. }
  5266. /**
  5267. * t4_read_mtu_tbl - returns the values in the HW path MTU table
  5268. * @adap: the adapter
  5269. * @mtus: where to store the MTU values
  5270. * @mtu_log: where to store the MTU base-2 log (may be %NULL)
  5271. *
  5272. * Reads the HW path MTU table.
  5273. */
  5274. void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
  5275. {
  5276. u32 v;
  5277. int i;
  5278. for (i = 0; i < NMTUS; ++i) {
  5279. t4_write_reg(adap, TP_MTU_TABLE_A,
  5280. MTUINDEX_V(0xff) | MTUVALUE_V(i));
  5281. v = t4_read_reg(adap, TP_MTU_TABLE_A);
  5282. mtus[i] = MTUVALUE_G(v);
  5283. if (mtu_log)
  5284. mtu_log[i] = MTUWIDTH_G(v);
  5285. }
  5286. }
  5287. /**
  5288. * t4_read_cong_tbl - reads the congestion control table
  5289. * @adap: the adapter
  5290. * @incr: where to store the alpha values
  5291. *
  5292. * Reads the additive increments programmed into the HW congestion
  5293. * control table.
  5294. */
  5295. void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
  5296. {
  5297. unsigned int mtu, w;
  5298. for (mtu = 0; mtu < NMTUS; ++mtu)
  5299. for (w = 0; w < NCCTRL_WIN; ++w) {
  5300. t4_write_reg(adap, TP_CCTRL_TABLE_A,
  5301. ROWINDEX_V(0xffff) | (mtu << 5) | w);
  5302. incr[mtu][w] = (u16)t4_read_reg(adap,
  5303. TP_CCTRL_TABLE_A) & 0x1fff;
  5304. }
  5305. }
  5306. /**
  5307. * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
  5308. * @adap: the adapter
  5309. * @addr: the indirect TP register address
  5310. * @mask: specifies the field within the register to modify
  5311. * @val: new value for the field
  5312. *
  5313. * Sets a field of an indirect TP register to the given value.
  5314. */
  5315. void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
  5316. unsigned int mask, unsigned int val)
  5317. {
  5318. t4_write_reg(adap, TP_PIO_ADDR_A, addr);
  5319. val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
  5320. t4_write_reg(adap, TP_PIO_DATA_A, val);
  5321. }
  5322. /**
  5323. * init_cong_ctrl - initialize congestion control parameters
  5324. * @a: the alpha values for congestion control
  5325. * @b: the beta values for congestion control
  5326. *
  5327. * Initialize the congestion control parameters.
  5328. */
  5329. static void init_cong_ctrl(unsigned short *a, unsigned short *b)
  5330. {
  5331. a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
  5332. a[9] = 2;
  5333. a[10] = 3;
  5334. a[11] = 4;
  5335. a[12] = 5;
  5336. a[13] = 6;
  5337. a[14] = 7;
  5338. a[15] = 8;
  5339. a[16] = 9;
  5340. a[17] = 10;
  5341. a[18] = 14;
  5342. a[19] = 17;
  5343. a[20] = 21;
  5344. a[21] = 25;
  5345. a[22] = 30;
  5346. a[23] = 35;
  5347. a[24] = 45;
  5348. a[25] = 60;
  5349. a[26] = 80;
  5350. a[27] = 100;
  5351. a[28] = 200;
  5352. a[29] = 300;
  5353. a[30] = 400;
  5354. a[31] = 500;
  5355. b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
  5356. b[9] = b[10] = 1;
  5357. b[11] = b[12] = 2;
  5358. b[13] = b[14] = b[15] = b[16] = 3;
  5359. b[17] = b[18] = b[19] = b[20] = b[21] = 4;
  5360. b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
  5361. b[28] = b[29] = 6;
  5362. b[30] = b[31] = 7;
  5363. }
  5364. /* The minimum additive increment value for the congestion control table */
  5365. #define CC_MIN_INCR 2U
  5366. /**
  5367. * t4_load_mtus - write the MTU and congestion control HW tables
  5368. * @adap: the adapter
  5369. * @mtus: the values for the MTU table
  5370. * @alpha: the values for the congestion control alpha parameter
  5371. * @beta: the values for the congestion control beta parameter
  5372. *
  5373. * Write the HW MTU table with the supplied MTUs and the high-speed
  5374. * congestion control table with the supplied alpha, beta, and MTUs.
  5375. * We write the two tables together because the additive increments
  5376. * depend on the MTUs.
  5377. */
  5378. void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
  5379. const unsigned short *alpha, const unsigned short *beta)
  5380. {
  5381. static const unsigned int avg_pkts[NCCTRL_WIN] = {
  5382. 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
  5383. 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
  5384. 28672, 40960, 57344, 81920, 114688, 163840, 229376
  5385. };
  5386. unsigned int i, w;
  5387. for (i = 0; i < NMTUS; ++i) {
  5388. unsigned int mtu = mtus[i];
  5389. unsigned int log2 = fls(mtu);
  5390. if (!(mtu & ((1 << log2) >> 2))) /* round */
  5391. log2--;
  5392. t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
  5393. MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
  5394. for (w = 0; w < NCCTRL_WIN; ++w) {
  5395. unsigned int inc;
  5396. inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
  5397. CC_MIN_INCR);
  5398. t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
  5399. (w << 16) | (beta[w] << 13) | inc);
  5400. }
  5401. }
  5402. }
  5403. /* Calculates a rate in bytes/s given the number of 256-byte units per 4K core
  5404. * clocks. The formula is
  5405. *
  5406. * bytes/s = bytes256 * 256 * ClkFreq / 4096
  5407. *
  5408. * which is equivalent to
  5409. *
  5410. * bytes/s = 62.5 * bytes256 * ClkFreq_ms
  5411. */
  5412. static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
  5413. {
  5414. u64 v = bytes256 * adap->params.vpd.cclk;
  5415. return v * 62 + v / 2;
  5416. }
  5417. /**
  5418. * t4_get_chan_txrate - get the current per channel Tx rates
  5419. * @adap: the adapter
  5420. * @nic_rate: rates for NIC traffic
  5421. * @ofld_rate: rates for offloaded traffic
  5422. *
  5423. * Return the current Tx rates in bytes/s for NIC and offloaded traffic
  5424. * for each channel.
  5425. */
  5426. void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
  5427. {
  5428. u32 v;
  5429. v = t4_read_reg(adap, TP_TX_TRATE_A);
  5430. nic_rate[0] = chan_rate(adap, TNLRATE0_G(v));
  5431. nic_rate[1] = chan_rate(adap, TNLRATE1_G(v));
  5432. if (adap->params.arch.nchan == NCHAN) {
  5433. nic_rate[2] = chan_rate(adap, TNLRATE2_G(v));
  5434. nic_rate[3] = chan_rate(adap, TNLRATE3_G(v));
  5435. }
  5436. v = t4_read_reg(adap, TP_TX_ORATE_A);
  5437. ofld_rate[0] = chan_rate(adap, OFDRATE0_G(v));
  5438. ofld_rate[1] = chan_rate(adap, OFDRATE1_G(v));
  5439. if (adap->params.arch.nchan == NCHAN) {
  5440. ofld_rate[2] = chan_rate(adap, OFDRATE2_G(v));
  5441. ofld_rate[3] = chan_rate(adap, OFDRATE3_G(v));
  5442. }
  5443. }
  5444. /**
  5445. * t4_set_trace_filter - configure one of the tracing filters
  5446. * @adap: the adapter
  5447. * @tp: the desired trace filter parameters
  5448. * @idx: which filter to configure
  5449. * @enable: whether to enable or disable the filter
  5450. *
  5451. * Configures one of the tracing filters available in HW. If @enable is
  5452. * %0 @tp is not examined and may be %NULL. The user is responsible to
  5453. * set the single/multiple trace mode by writing to MPS_TRC_CFG_A register
  5454. */
  5455. int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
  5456. int idx, int enable)
  5457. {
  5458. int i, ofst = idx * 4;
  5459. u32 data_reg, mask_reg, cfg;
  5460. u32 multitrc = TRCMULTIFILTER_F;
  5461. if (!enable) {
  5462. t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
  5463. return 0;
  5464. }
  5465. cfg = t4_read_reg(adap, MPS_TRC_CFG_A);
  5466. if (cfg & TRCMULTIFILTER_F) {
  5467. /* If multiple tracers are enabled, then maximum
  5468. * capture size is 2.5KB (FIFO size of a single channel)
  5469. * minus 2 flits for CPL_TRACE_PKT header.
  5470. */
  5471. if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
  5472. return -EINVAL;
  5473. } else {
  5474. /* If multiple tracers are disabled, to avoid deadlocks
  5475. * maximum packet capture size of 9600 bytes is recommended.
  5476. * Also in this mode, only trace0 can be enabled and running.
  5477. */
  5478. multitrc = 0;
  5479. if (tp->snap_len > 9600 || idx)
  5480. return -EINVAL;
  5481. }
  5482. if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
  5483. tp->skip_len > TFLENGTH_M || tp->skip_ofst > TFOFFSET_M ||
  5484. tp->min_len > TFMINPKTSIZE_M)
  5485. return -EINVAL;
  5486. /* stop the tracer we'll be changing */
  5487. t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
  5488. idx *= (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A);
  5489. data_reg = MPS_TRC_FILTER0_MATCH_A + idx;
  5490. mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + idx;
  5491. for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
  5492. t4_write_reg(adap, data_reg, tp->data[i]);
  5493. t4_write_reg(adap, mask_reg, ~tp->mask[i]);
  5494. }
  5495. t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst,
  5496. TFCAPTUREMAX_V(tp->snap_len) |
  5497. TFMINPKTSIZE_V(tp->min_len));
  5498. t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst,
  5499. TFOFFSET_V(tp->skip_ofst) | TFLENGTH_V(tp->skip_len) |
  5500. (is_t4(adap->params.chip) ?
  5501. TFPORT_V(tp->port) | TFEN_F | TFINVERTMATCH_V(tp->invert) :
  5502. T5_TFPORT_V(tp->port) | T5_TFEN_F |
  5503. T5_TFINVERTMATCH_V(tp->invert)));
  5504. return 0;
  5505. }
  5506. /**
  5507. * t4_get_trace_filter - query one of the tracing filters
  5508. * @adap: the adapter
  5509. * @tp: the current trace filter parameters
  5510. * @idx: which trace filter to query
  5511. * @enabled: non-zero if the filter is enabled
  5512. *
  5513. * Returns the current settings of one of the HW tracing filters.
  5514. */
  5515. void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
  5516. int *enabled)
  5517. {
  5518. u32 ctla, ctlb;
  5519. int i, ofst = idx * 4;
  5520. u32 data_reg, mask_reg;
  5521. ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst);
  5522. ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst);
  5523. if (is_t4(adap->params.chip)) {
  5524. *enabled = !!(ctla & TFEN_F);
  5525. tp->port = TFPORT_G(ctla);
  5526. tp->invert = !!(ctla & TFINVERTMATCH_F);
  5527. } else {
  5528. *enabled = !!(ctla & T5_TFEN_F);
  5529. tp->port = T5_TFPORT_G(ctla);
  5530. tp->invert = !!(ctla & T5_TFINVERTMATCH_F);
  5531. }
  5532. tp->snap_len = TFCAPTUREMAX_G(ctlb);
  5533. tp->min_len = TFMINPKTSIZE_G(ctlb);
  5534. tp->skip_ofst = TFOFFSET_G(ctla);
  5535. tp->skip_len = TFLENGTH_G(ctla);
  5536. ofst = (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A) * idx;
  5537. data_reg = MPS_TRC_FILTER0_MATCH_A + ofst;
  5538. mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + ofst;
  5539. for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
  5540. tp->mask[i] = ~t4_read_reg(adap, mask_reg);
  5541. tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
  5542. }
  5543. }
  5544. /**
  5545. * t4_pmtx_get_stats - returns the HW stats from PMTX
  5546. * @adap: the adapter
  5547. * @cnt: where to store the count statistics
  5548. * @cycles: where to store the cycle statistics
  5549. *
  5550. * Returns performance statistics from PMTX.
  5551. */
  5552. void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
  5553. {
  5554. int i;
  5555. u32 data[2];
  5556. for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
  5557. t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
  5558. cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
  5559. if (is_t4(adap->params.chip)) {
  5560. cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
  5561. } else {
  5562. t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
  5563. PM_TX_DBG_DATA_A, data, 2,
  5564. PM_TX_DBG_STAT_MSB_A);
  5565. cycles[i] = (((u64)data[0] << 32) | data[1]);
  5566. }
  5567. }
  5568. }
  5569. /**
  5570. * t4_pmrx_get_stats - returns the HW stats from PMRX
  5571. * @adap: the adapter
  5572. * @cnt: where to store the count statistics
  5573. * @cycles: where to store the cycle statistics
  5574. *
  5575. * Returns performance statistics from PMRX.
  5576. */
  5577. void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
  5578. {
  5579. int i;
  5580. u32 data[2];
  5581. for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
  5582. t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
  5583. cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
  5584. if (is_t4(adap->params.chip)) {
  5585. cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
  5586. } else {
  5587. t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
  5588. PM_RX_DBG_DATA_A, data, 2,
  5589. PM_RX_DBG_STAT_MSB_A);
  5590. cycles[i] = (((u64)data[0] << 32) | data[1]);
  5591. }
  5592. }
  5593. }
  5594. /**
  5595. * compute_mps_bg_map - compute the MPS Buffer Group Map for a Port
  5596. * @adap: the adapter
  5597. * @pidx: the port index
  5598. *
  5599. * Computes and returns a bitmap indicating which MPS buffer groups are
  5600. * associated with the given Port. Bit i is set if buffer group i is
  5601. * used by the Port.
  5602. */
  5603. static inline unsigned int compute_mps_bg_map(struct adapter *adapter,
  5604. int pidx)
  5605. {
  5606. unsigned int chip_version, nports;
  5607. chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
  5608. nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
  5609. switch (chip_version) {
  5610. case CHELSIO_T4:
  5611. case CHELSIO_T5:
  5612. switch (nports) {
  5613. case 1: return 0xf;
  5614. case 2: return 3 << (2 * pidx);
  5615. case 4: return 1 << pidx;
  5616. }
  5617. break;
  5618. case CHELSIO_T6:
  5619. switch (nports) {
  5620. case 2: return 1 << (2 * pidx);
  5621. }
  5622. break;
  5623. }
  5624. dev_err(adapter->pdev_dev, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
  5625. chip_version, nports);
  5626. return 0;
  5627. }
  5628. /**
  5629. * t4_get_mps_bg_map - return the buffer groups associated with a port
  5630. * @adapter: the adapter
  5631. * @pidx: the port index
  5632. *
  5633. * Returns a bitmap indicating which MPS buffer groups are associated
  5634. * with the given Port. Bit i is set if buffer group i is used by the
  5635. * Port.
  5636. */
  5637. unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
  5638. {
  5639. u8 *mps_bg_map;
  5640. unsigned int nports;
  5641. nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
  5642. if (pidx >= nports) {
  5643. CH_WARN(adapter, "MPS Port Index %d >= Nports %d\n",
  5644. pidx, nports);
  5645. return 0;
  5646. }
  5647. /* If we've already retrieved/computed this, just return the result.
  5648. */
  5649. mps_bg_map = adapter->params.mps_bg_map;
  5650. if (mps_bg_map[pidx])
  5651. return mps_bg_map[pidx];
  5652. /* Newer Firmware can tell us what the MPS Buffer Group Map is.
  5653. * If we're talking to such Firmware, let it tell us. If the new
  5654. * API isn't supported, revert back to old hardcoded way. The value
  5655. * obtained from Firmware is encoded in below format:
  5656. *
  5657. * val = (( MPSBGMAP[Port 3] << 24 ) |
  5658. * ( MPSBGMAP[Port 2] << 16 ) |
  5659. * ( MPSBGMAP[Port 1] << 8 ) |
  5660. * ( MPSBGMAP[Port 0] << 0 ))
  5661. */
  5662. if (adapter->flags & FW_OK) {
  5663. u32 param, val;
  5664. int ret;
  5665. param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
  5666. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_MPSBGMAP));
  5667. ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
  5668. 0, 1, &param, &val);
  5669. if (!ret) {
  5670. int p;
  5671. /* Store the BG Map for all of the Ports in order to
  5672. * avoid more calls to the Firmware in the future.
  5673. */
  5674. for (p = 0; p < MAX_NPORTS; p++, val >>= 8)
  5675. mps_bg_map[p] = val & 0xff;
  5676. return mps_bg_map[pidx];
  5677. }
  5678. }
  5679. /* Either we're not talking to the Firmware or we're dealing with
  5680. * older Firmware which doesn't support the new API to get the MPS
  5681. * Buffer Group Map. Fall back to computing it ourselves.
  5682. */
  5683. mps_bg_map[pidx] = compute_mps_bg_map(adapter, pidx);
  5684. return mps_bg_map[pidx];
  5685. }
  5686. /**
  5687. * t4_get_tp_ch_map - return TP ingress channels associated with a port
  5688. * @adapter: the adapter
  5689. * @pidx: the port index
  5690. *
  5691. * Returns a bitmap indicating which TP Ingress Channels are associated
  5692. * with a given Port. Bit i is set if TP Ingress Channel i is used by
  5693. * the Port.
  5694. */
  5695. unsigned int t4_get_tp_ch_map(struct adapter *adap, int pidx)
  5696. {
  5697. unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
  5698. unsigned int nports = 1 << NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
  5699. if (pidx >= nports) {
  5700. dev_warn(adap->pdev_dev, "TP Port Index %d >= Nports %d\n",
  5701. pidx, nports);
  5702. return 0;
  5703. }
  5704. switch (chip_version) {
  5705. case CHELSIO_T4:
  5706. case CHELSIO_T5:
  5707. /* Note that this happens to be the same values as the MPS
  5708. * Buffer Group Map for these Chips. But we replicate the code
  5709. * here because they're really separate concepts.
  5710. */
  5711. switch (nports) {
  5712. case 1: return 0xf;
  5713. case 2: return 3 << (2 * pidx);
  5714. case 4: return 1 << pidx;
  5715. }
  5716. break;
  5717. case CHELSIO_T6:
  5718. switch (nports) {
  5719. case 1:
  5720. case 2: return 1 << pidx;
  5721. }
  5722. break;
  5723. }
  5724. dev_err(adap->pdev_dev, "Need TP Channel Map for Chip %0x, Nports %d\n",
  5725. chip_version, nports);
  5726. return 0;
  5727. }
  5728. /**
  5729. * t4_get_port_type_description - return Port Type string description
  5730. * @port_type: firmware Port Type enumeration
  5731. */
  5732. const char *t4_get_port_type_description(enum fw_port_type port_type)
  5733. {
  5734. static const char *const port_type_description[] = {
  5735. "Fiber_XFI",
  5736. "Fiber_XAUI",
  5737. "BT_SGMII",
  5738. "BT_XFI",
  5739. "BT_XAUI",
  5740. "KX4",
  5741. "CX4",
  5742. "KX",
  5743. "KR",
  5744. "SFP",
  5745. "BP_AP",
  5746. "BP4_AP",
  5747. "QSFP_10G",
  5748. "QSA",
  5749. "QSFP",
  5750. "BP40_BA",
  5751. "KR4_100G",
  5752. "CR4_QSFP",
  5753. "CR_QSFP",
  5754. "CR2_QSFP",
  5755. "SFP28",
  5756. "KR_SFP28",
  5757. "KR_XLAUI"
  5758. };
  5759. if (port_type < ARRAY_SIZE(port_type_description))
  5760. return port_type_description[port_type];
  5761. return "UNKNOWN";
  5762. }
  5763. /**
  5764. * t4_get_port_stats_offset - collect port stats relative to a previous
  5765. * snapshot
  5766. * @adap: The adapter
  5767. * @idx: The port
  5768. * @stats: Current stats to fill
  5769. * @offset: Previous stats snapshot
  5770. */
  5771. void t4_get_port_stats_offset(struct adapter *adap, int idx,
  5772. struct port_stats *stats,
  5773. struct port_stats *offset)
  5774. {
  5775. u64 *s, *o;
  5776. int i;
  5777. t4_get_port_stats(adap, idx, stats);
  5778. for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
  5779. i < (sizeof(struct port_stats) / sizeof(u64));
  5780. i++, s++, o++)
  5781. *s -= *o;
  5782. }
  5783. /**
  5784. * t4_get_port_stats - collect port statistics
  5785. * @adap: the adapter
  5786. * @idx: the port index
  5787. * @p: the stats structure to fill
  5788. *
  5789. * Collect statistics related to the given port from HW.
  5790. */
  5791. void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
  5792. {
  5793. u32 bgmap = t4_get_mps_bg_map(adap, idx);
  5794. u32 stat_ctl = t4_read_reg(adap, MPS_STAT_CTL_A);
  5795. #define GET_STAT(name) \
  5796. t4_read_reg64(adap, \
  5797. (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
  5798. T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
  5799. #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
  5800. p->tx_octets = GET_STAT(TX_PORT_BYTES);
  5801. p->tx_frames = GET_STAT(TX_PORT_FRAMES);
  5802. p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
  5803. p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
  5804. p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
  5805. p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
  5806. p->tx_frames_64 = GET_STAT(TX_PORT_64B);
  5807. p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
  5808. p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
  5809. p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
  5810. p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
  5811. p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
  5812. p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
  5813. p->tx_drop = GET_STAT(TX_PORT_DROP);
  5814. p->tx_pause = GET_STAT(TX_PORT_PAUSE);
  5815. p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
  5816. p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
  5817. p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
  5818. p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
  5819. p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
  5820. p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
  5821. p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
  5822. p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
  5823. if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
  5824. if (stat_ctl & COUNTPAUSESTATTX_F)
  5825. p->tx_frames_64 -= p->tx_pause;
  5826. if (stat_ctl & COUNTPAUSEMCTX_F)
  5827. p->tx_mcast_frames -= p->tx_pause;
  5828. }
  5829. p->rx_octets = GET_STAT(RX_PORT_BYTES);
  5830. p->rx_frames = GET_STAT(RX_PORT_FRAMES);
  5831. p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
  5832. p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
  5833. p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
  5834. p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
  5835. p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
  5836. p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
  5837. p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
  5838. p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
  5839. p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
  5840. p->rx_frames_64 = GET_STAT(RX_PORT_64B);
  5841. p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
  5842. p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
  5843. p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
  5844. p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
  5845. p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
  5846. p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
  5847. p->rx_pause = GET_STAT(RX_PORT_PAUSE);
  5848. p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
  5849. p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
  5850. p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
  5851. p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
  5852. p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
  5853. p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
  5854. p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
  5855. p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
  5856. if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
  5857. if (stat_ctl & COUNTPAUSESTATRX_F)
  5858. p->rx_frames_64 -= p->rx_pause;
  5859. if (stat_ctl & COUNTPAUSEMCRX_F)
  5860. p->rx_mcast_frames -= p->rx_pause;
  5861. }
  5862. p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
  5863. p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
  5864. p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
  5865. p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
  5866. p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
  5867. p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
  5868. p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
  5869. p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
  5870. #undef GET_STAT
  5871. #undef GET_STAT_COM
  5872. }
  5873. /**
  5874. * t4_get_lb_stats - collect loopback port statistics
  5875. * @adap: the adapter
  5876. * @idx: the loopback port index
  5877. * @p: the stats structure to fill
  5878. *
  5879. * Return HW statistics for the given loopback port.
  5880. */
  5881. void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
  5882. {
  5883. u32 bgmap = t4_get_mps_bg_map(adap, idx);
  5884. #define GET_STAT(name) \
  5885. t4_read_reg64(adap, \
  5886. (is_t4(adap->params.chip) ? \
  5887. PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
  5888. T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
  5889. #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
  5890. p->octets = GET_STAT(BYTES);
  5891. p->frames = GET_STAT(FRAMES);
  5892. p->bcast_frames = GET_STAT(BCAST);
  5893. p->mcast_frames = GET_STAT(MCAST);
  5894. p->ucast_frames = GET_STAT(UCAST);
  5895. p->error_frames = GET_STAT(ERROR);
  5896. p->frames_64 = GET_STAT(64B);
  5897. p->frames_65_127 = GET_STAT(65B_127B);
  5898. p->frames_128_255 = GET_STAT(128B_255B);
  5899. p->frames_256_511 = GET_STAT(256B_511B);
  5900. p->frames_512_1023 = GET_STAT(512B_1023B);
  5901. p->frames_1024_1518 = GET_STAT(1024B_1518B);
  5902. p->frames_1519_max = GET_STAT(1519B_MAX);
  5903. p->drop = GET_STAT(DROP_FRAMES);
  5904. p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
  5905. p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
  5906. p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
  5907. p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
  5908. p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
  5909. p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
  5910. p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
  5911. p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
  5912. #undef GET_STAT
  5913. #undef GET_STAT_COM
  5914. }
  5915. /* t4_mk_filtdelwr - create a delete filter WR
  5916. * @ftid: the filter ID
  5917. * @wr: the filter work request to populate
  5918. * @qid: ingress queue to receive the delete notification
  5919. *
  5920. * Creates a filter work request to delete the supplied filter. If @qid is
  5921. * negative the delete notification is suppressed.
  5922. */
  5923. void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
  5924. {
  5925. memset(wr, 0, sizeof(*wr));
  5926. wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
  5927. wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
  5928. wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
  5929. FW_FILTER_WR_NOREPLY_V(qid < 0));
  5930. wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
  5931. if (qid >= 0)
  5932. wr->rx_chan_rx_rpl_iq =
  5933. cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
  5934. }
  5935. #define INIT_CMD(var, cmd, rd_wr) do { \
  5936. (var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
  5937. FW_CMD_REQUEST_F | \
  5938. FW_CMD_##rd_wr##_F); \
  5939. (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
  5940. } while (0)
  5941. int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
  5942. u32 addr, u32 val)
  5943. {
  5944. u32 ldst_addrspace;
  5945. struct fw_ldst_cmd c;
  5946. memset(&c, 0, sizeof(c));
  5947. ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
  5948. c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
  5949. FW_CMD_REQUEST_F |
  5950. FW_CMD_WRITE_F |
  5951. ldst_addrspace);
  5952. c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
  5953. c.u.addrval.addr = cpu_to_be32(addr);
  5954. c.u.addrval.val = cpu_to_be32(val);
  5955. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  5956. }
  5957. /**
  5958. * t4_mdio_rd - read a PHY register through MDIO
  5959. * @adap: the adapter
  5960. * @mbox: mailbox to use for the FW command
  5961. * @phy_addr: the PHY address
  5962. * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
  5963. * @reg: the register to read
  5964. * @valp: where to store the value
  5965. *
  5966. * Issues a FW command through the given mailbox to read a PHY register.
  5967. */
  5968. int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
  5969. unsigned int mmd, unsigned int reg, u16 *valp)
  5970. {
  5971. int ret;
  5972. u32 ldst_addrspace;
  5973. struct fw_ldst_cmd c;
  5974. memset(&c, 0, sizeof(c));
  5975. ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
  5976. c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
  5977. FW_CMD_REQUEST_F | FW_CMD_READ_F |
  5978. ldst_addrspace);
  5979. c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
  5980. c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
  5981. FW_LDST_CMD_MMD_V(mmd));
  5982. c.u.mdio.raddr = cpu_to_be16(reg);
  5983. ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
  5984. if (ret == 0)
  5985. *valp = be16_to_cpu(c.u.mdio.rval);
  5986. return ret;
  5987. }
  5988. /**
  5989. * t4_mdio_wr - write a PHY register through MDIO
  5990. * @adap: the adapter
  5991. * @mbox: mailbox to use for the FW command
  5992. * @phy_addr: the PHY address
  5993. * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
  5994. * @reg: the register to write
  5995. * @valp: value to write
  5996. *
  5997. * Issues a FW command through the given mailbox to write a PHY register.
  5998. */
  5999. int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
  6000. unsigned int mmd, unsigned int reg, u16 val)
  6001. {
  6002. u32 ldst_addrspace;
  6003. struct fw_ldst_cmd c;
  6004. memset(&c, 0, sizeof(c));
  6005. ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
  6006. c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
  6007. FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
  6008. ldst_addrspace);
  6009. c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
  6010. c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
  6011. FW_LDST_CMD_MMD_V(mmd));
  6012. c.u.mdio.raddr = cpu_to_be16(reg);
  6013. c.u.mdio.rval = cpu_to_be16(val);
  6014. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  6015. }
  6016. /**
  6017. * t4_sge_decode_idma_state - decode the idma state
  6018. * @adap: the adapter
  6019. * @state: the state idma is stuck in
  6020. */
  6021. void t4_sge_decode_idma_state(struct adapter *adapter, int state)
  6022. {
  6023. static const char * const t4_decode[] = {
  6024. "IDMA_IDLE",
  6025. "IDMA_PUSH_MORE_CPL_FIFO",
  6026. "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
  6027. "Not used",
  6028. "IDMA_PHYSADDR_SEND_PCIEHDR",
  6029. "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
  6030. "IDMA_PHYSADDR_SEND_PAYLOAD",
  6031. "IDMA_SEND_FIFO_TO_IMSG",
  6032. "IDMA_FL_REQ_DATA_FL_PREP",
  6033. "IDMA_FL_REQ_DATA_FL",
  6034. "IDMA_FL_DROP",
  6035. "IDMA_FL_H_REQ_HEADER_FL",
  6036. "IDMA_FL_H_SEND_PCIEHDR",
  6037. "IDMA_FL_H_PUSH_CPL_FIFO",
  6038. "IDMA_FL_H_SEND_CPL",
  6039. "IDMA_FL_H_SEND_IP_HDR_FIRST",
  6040. "IDMA_FL_H_SEND_IP_HDR",
  6041. "IDMA_FL_H_REQ_NEXT_HEADER_FL",
  6042. "IDMA_FL_H_SEND_NEXT_PCIEHDR",
  6043. "IDMA_FL_H_SEND_IP_HDR_PADDING",
  6044. "IDMA_FL_D_SEND_PCIEHDR",
  6045. "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
  6046. "IDMA_FL_D_REQ_NEXT_DATA_FL",
  6047. "IDMA_FL_SEND_PCIEHDR",
  6048. "IDMA_FL_PUSH_CPL_FIFO",
  6049. "IDMA_FL_SEND_CPL",
  6050. "IDMA_FL_SEND_PAYLOAD_FIRST",
  6051. "IDMA_FL_SEND_PAYLOAD",
  6052. "IDMA_FL_REQ_NEXT_DATA_FL",
  6053. "IDMA_FL_SEND_NEXT_PCIEHDR",
  6054. "IDMA_FL_SEND_PADDING",
  6055. "IDMA_FL_SEND_COMPLETION_TO_IMSG",
  6056. "IDMA_FL_SEND_FIFO_TO_IMSG",
  6057. "IDMA_FL_REQ_DATAFL_DONE",
  6058. "IDMA_FL_REQ_HEADERFL_DONE",
  6059. };
  6060. static const char * const t5_decode[] = {
  6061. "IDMA_IDLE",
  6062. "IDMA_ALMOST_IDLE",
  6063. "IDMA_PUSH_MORE_CPL_FIFO",
  6064. "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
  6065. "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
  6066. "IDMA_PHYSADDR_SEND_PCIEHDR",
  6067. "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
  6068. "IDMA_PHYSADDR_SEND_PAYLOAD",
  6069. "IDMA_SEND_FIFO_TO_IMSG",
  6070. "IDMA_FL_REQ_DATA_FL",
  6071. "IDMA_FL_DROP",
  6072. "IDMA_FL_DROP_SEND_INC",
  6073. "IDMA_FL_H_REQ_HEADER_FL",
  6074. "IDMA_FL_H_SEND_PCIEHDR",
  6075. "IDMA_FL_H_PUSH_CPL_FIFO",
  6076. "IDMA_FL_H_SEND_CPL",
  6077. "IDMA_FL_H_SEND_IP_HDR_FIRST",
  6078. "IDMA_FL_H_SEND_IP_HDR",
  6079. "IDMA_FL_H_REQ_NEXT_HEADER_FL",
  6080. "IDMA_FL_H_SEND_NEXT_PCIEHDR",
  6081. "IDMA_FL_H_SEND_IP_HDR_PADDING",
  6082. "IDMA_FL_D_SEND_PCIEHDR",
  6083. "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
  6084. "IDMA_FL_D_REQ_NEXT_DATA_FL",
  6085. "IDMA_FL_SEND_PCIEHDR",
  6086. "IDMA_FL_PUSH_CPL_FIFO",
  6087. "IDMA_FL_SEND_CPL",
  6088. "IDMA_FL_SEND_PAYLOAD_FIRST",
  6089. "IDMA_FL_SEND_PAYLOAD",
  6090. "IDMA_FL_REQ_NEXT_DATA_FL",
  6091. "IDMA_FL_SEND_NEXT_PCIEHDR",
  6092. "IDMA_FL_SEND_PADDING",
  6093. "IDMA_FL_SEND_COMPLETION_TO_IMSG",
  6094. };
  6095. static const char * const t6_decode[] = {
  6096. "IDMA_IDLE",
  6097. "IDMA_PUSH_MORE_CPL_FIFO",
  6098. "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
  6099. "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
  6100. "IDMA_PHYSADDR_SEND_PCIEHDR",
  6101. "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
  6102. "IDMA_PHYSADDR_SEND_PAYLOAD",
  6103. "IDMA_FL_REQ_DATA_FL",
  6104. "IDMA_FL_DROP",
  6105. "IDMA_FL_DROP_SEND_INC",
  6106. "IDMA_FL_H_REQ_HEADER_FL",
  6107. "IDMA_FL_H_SEND_PCIEHDR",
  6108. "IDMA_FL_H_PUSH_CPL_FIFO",
  6109. "IDMA_FL_H_SEND_CPL",
  6110. "IDMA_FL_H_SEND_IP_HDR_FIRST",
  6111. "IDMA_FL_H_SEND_IP_HDR",
  6112. "IDMA_FL_H_REQ_NEXT_HEADER_FL",
  6113. "IDMA_FL_H_SEND_NEXT_PCIEHDR",
  6114. "IDMA_FL_H_SEND_IP_HDR_PADDING",
  6115. "IDMA_FL_D_SEND_PCIEHDR",
  6116. "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
  6117. "IDMA_FL_D_REQ_NEXT_DATA_FL",
  6118. "IDMA_FL_SEND_PCIEHDR",
  6119. "IDMA_FL_PUSH_CPL_FIFO",
  6120. "IDMA_FL_SEND_CPL",
  6121. "IDMA_FL_SEND_PAYLOAD_FIRST",
  6122. "IDMA_FL_SEND_PAYLOAD",
  6123. "IDMA_FL_REQ_NEXT_DATA_FL",
  6124. "IDMA_FL_SEND_NEXT_PCIEHDR",
  6125. "IDMA_FL_SEND_PADDING",
  6126. "IDMA_FL_SEND_COMPLETION_TO_IMSG",
  6127. };
  6128. static const u32 sge_regs[] = {
  6129. SGE_DEBUG_DATA_LOW_INDEX_2_A,
  6130. SGE_DEBUG_DATA_LOW_INDEX_3_A,
  6131. SGE_DEBUG_DATA_HIGH_INDEX_10_A,
  6132. };
  6133. const char **sge_idma_decode;
  6134. int sge_idma_decode_nstates;
  6135. int i;
  6136. unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
  6137. /* Select the right set of decode strings to dump depending on the
  6138. * adapter chip type.
  6139. */
  6140. switch (chip_version) {
  6141. case CHELSIO_T4:
  6142. sge_idma_decode = (const char **)t4_decode;
  6143. sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
  6144. break;
  6145. case CHELSIO_T5:
  6146. sge_idma_decode = (const char **)t5_decode;
  6147. sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
  6148. break;
  6149. case CHELSIO_T6:
  6150. sge_idma_decode = (const char **)t6_decode;
  6151. sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
  6152. break;
  6153. default:
  6154. dev_err(adapter->pdev_dev,
  6155. "Unsupported chip version %d\n", chip_version);
  6156. return;
  6157. }
  6158. if (is_t4(adapter->params.chip)) {
  6159. sge_idma_decode = (const char **)t4_decode;
  6160. sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
  6161. } else {
  6162. sge_idma_decode = (const char **)t5_decode;
  6163. sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
  6164. }
  6165. if (state < sge_idma_decode_nstates)
  6166. CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
  6167. else
  6168. CH_WARN(adapter, "idma state %d unknown\n", state);
  6169. for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
  6170. CH_WARN(adapter, "SGE register %#x value %#x\n",
  6171. sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
  6172. }
  6173. /**
  6174. * t4_sge_ctxt_flush - flush the SGE context cache
  6175. * @adap: the adapter
  6176. * @mbox: mailbox to use for the FW command
  6177. * @ctx_type: Egress or Ingress
  6178. *
  6179. * Issues a FW command through the given mailbox to flush the
  6180. * SGE context cache.
  6181. */
  6182. int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
  6183. {
  6184. int ret;
  6185. u32 ldst_addrspace;
  6186. struct fw_ldst_cmd c;
  6187. memset(&c, 0, sizeof(c));
  6188. ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(ctxt_type == CTXT_EGRESS ?
  6189. FW_LDST_ADDRSPC_SGE_EGRC :
  6190. FW_LDST_ADDRSPC_SGE_INGC);
  6191. c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
  6192. FW_CMD_REQUEST_F | FW_CMD_READ_F |
  6193. ldst_addrspace);
  6194. c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
  6195. c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
  6196. ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
  6197. return ret;
  6198. }
  6199. /**
  6200. * t4_fw_hello - establish communication with FW
  6201. * @adap: the adapter
  6202. * @mbox: mailbox to use for the FW command
  6203. * @evt_mbox: mailbox to receive async FW events
  6204. * @master: specifies the caller's willingness to be the device master
  6205. * @state: returns the current device state (if non-NULL)
  6206. *
  6207. * Issues a command to establish communication with FW. Returns either
  6208. * an error (negative integer) or the mailbox of the Master PF.
  6209. */
  6210. int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
  6211. enum dev_master master, enum dev_state *state)
  6212. {
  6213. int ret;
  6214. struct fw_hello_cmd c;
  6215. u32 v;
  6216. unsigned int master_mbox;
  6217. int retries = FW_CMD_HELLO_RETRIES;
  6218. retry:
  6219. memset(&c, 0, sizeof(c));
  6220. INIT_CMD(c, HELLO, WRITE);
  6221. c.err_to_clearinit = cpu_to_be32(
  6222. FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
  6223. FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
  6224. FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
  6225. mbox : FW_HELLO_CMD_MBMASTER_M) |
  6226. FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
  6227. FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
  6228. FW_HELLO_CMD_CLEARINIT_F);
  6229. /*
  6230. * Issue the HELLO command to the firmware. If it's not successful
  6231. * but indicates that we got a "busy" or "timeout" condition, retry
  6232. * the HELLO until we exhaust our retry limit. If we do exceed our
  6233. * retry limit, check to see if the firmware left us any error
  6234. * information and report that if so.
  6235. */
  6236. ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
  6237. if (ret < 0) {
  6238. if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
  6239. goto retry;
  6240. if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
  6241. t4_report_fw_error(adap);
  6242. return ret;
  6243. }
  6244. v = be32_to_cpu(c.err_to_clearinit);
  6245. master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
  6246. if (state) {
  6247. if (v & FW_HELLO_CMD_ERR_F)
  6248. *state = DEV_STATE_ERR;
  6249. else if (v & FW_HELLO_CMD_INIT_F)
  6250. *state = DEV_STATE_INIT;
  6251. else
  6252. *state = DEV_STATE_UNINIT;
  6253. }
  6254. /*
  6255. * If we're not the Master PF then we need to wait around for the
  6256. * Master PF Driver to finish setting up the adapter.
  6257. *
  6258. * Note that we also do this wait if we're a non-Master-capable PF and
  6259. * there is no current Master PF; a Master PF may show up momentarily
  6260. * and we wouldn't want to fail pointlessly. (This can happen when an
  6261. * OS loads lots of different drivers rapidly at the same time). In
  6262. * this case, the Master PF returned by the firmware will be
  6263. * PCIE_FW_MASTER_M so the test below will work ...
  6264. */
  6265. if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
  6266. master_mbox != mbox) {
  6267. int waiting = FW_CMD_HELLO_TIMEOUT;
  6268. /*
  6269. * Wait for the firmware to either indicate an error or
  6270. * initialized state. If we see either of these we bail out
  6271. * and report the issue to the caller. If we exhaust the
  6272. * "hello timeout" and we haven't exhausted our retries, try
  6273. * again. Otherwise bail with a timeout error.
  6274. */
  6275. for (;;) {
  6276. u32 pcie_fw;
  6277. msleep(50);
  6278. waiting -= 50;
  6279. /*
  6280. * If neither Error nor Initialialized are indicated
  6281. * by the firmware keep waiting till we exaust our
  6282. * timeout ... and then retry if we haven't exhausted
  6283. * our retries ...
  6284. */
  6285. pcie_fw = t4_read_reg(adap, PCIE_FW_A);
  6286. if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
  6287. if (waiting <= 0) {
  6288. if (retries-- > 0)
  6289. goto retry;
  6290. return -ETIMEDOUT;
  6291. }
  6292. continue;
  6293. }
  6294. /*
  6295. * We either have an Error or Initialized condition
  6296. * report errors preferentially.
  6297. */
  6298. if (state) {
  6299. if (pcie_fw & PCIE_FW_ERR_F)
  6300. *state = DEV_STATE_ERR;
  6301. else if (pcie_fw & PCIE_FW_INIT_F)
  6302. *state = DEV_STATE_INIT;
  6303. }
  6304. /*
  6305. * If we arrived before a Master PF was selected and
  6306. * there's not a valid Master PF, grab its identity
  6307. * for our caller.
  6308. */
  6309. if (master_mbox == PCIE_FW_MASTER_M &&
  6310. (pcie_fw & PCIE_FW_MASTER_VLD_F))
  6311. master_mbox = PCIE_FW_MASTER_G(pcie_fw);
  6312. break;
  6313. }
  6314. }
  6315. return master_mbox;
  6316. }
  6317. /**
  6318. * t4_fw_bye - end communication with FW
  6319. * @adap: the adapter
  6320. * @mbox: mailbox to use for the FW command
  6321. *
  6322. * Issues a command to terminate communication with FW.
  6323. */
  6324. int t4_fw_bye(struct adapter *adap, unsigned int mbox)
  6325. {
  6326. struct fw_bye_cmd c;
  6327. memset(&c, 0, sizeof(c));
  6328. INIT_CMD(c, BYE, WRITE);
  6329. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  6330. }
  6331. /**
  6332. * t4_init_cmd - ask FW to initialize the device
  6333. * @adap: the adapter
  6334. * @mbox: mailbox to use for the FW command
  6335. *
  6336. * Issues a command to FW to partially initialize the device. This
  6337. * performs initialization that generally doesn't depend on user input.
  6338. */
  6339. int t4_early_init(struct adapter *adap, unsigned int mbox)
  6340. {
  6341. struct fw_initialize_cmd c;
  6342. memset(&c, 0, sizeof(c));
  6343. INIT_CMD(c, INITIALIZE, WRITE);
  6344. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  6345. }
  6346. /**
  6347. * t4_fw_reset - issue a reset to FW
  6348. * @adap: the adapter
  6349. * @mbox: mailbox to use for the FW command
  6350. * @reset: specifies the type of reset to perform
  6351. *
  6352. * Issues a reset command of the specified type to FW.
  6353. */
  6354. int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
  6355. {
  6356. struct fw_reset_cmd c;
  6357. memset(&c, 0, sizeof(c));
  6358. INIT_CMD(c, RESET, WRITE);
  6359. c.val = cpu_to_be32(reset);
  6360. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  6361. }
  6362. /**
  6363. * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
  6364. * @adap: the adapter
  6365. * @mbox: mailbox to use for the FW RESET command (if desired)
  6366. * @force: force uP into RESET even if FW RESET command fails
  6367. *
  6368. * Issues a RESET command to firmware (if desired) with a HALT indication
  6369. * and then puts the microprocessor into RESET state. The RESET command
  6370. * will only be issued if a legitimate mailbox is provided (mbox <=
  6371. * PCIE_FW_MASTER_M).
  6372. *
  6373. * This is generally used in order for the host to safely manipulate the
  6374. * adapter without fear of conflicting with whatever the firmware might
  6375. * be doing. The only way out of this state is to RESTART the firmware
  6376. * ...
  6377. */
  6378. static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
  6379. {
  6380. int ret = 0;
  6381. /*
  6382. * If a legitimate mailbox is provided, issue a RESET command
  6383. * with a HALT indication.
  6384. */
  6385. if (mbox <= PCIE_FW_MASTER_M) {
  6386. struct fw_reset_cmd c;
  6387. memset(&c, 0, sizeof(c));
  6388. INIT_CMD(c, RESET, WRITE);
  6389. c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
  6390. c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
  6391. ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  6392. }
  6393. /*
  6394. * Normally we won't complete the operation if the firmware RESET
  6395. * command fails but if our caller insists we'll go ahead and put the
  6396. * uP into RESET. This can be useful if the firmware is hung or even
  6397. * missing ... We'll have to take the risk of putting the uP into
  6398. * RESET without the cooperation of firmware in that case.
  6399. *
  6400. * We also force the firmware's HALT flag to be on in case we bypassed
  6401. * the firmware RESET command above or we're dealing with old firmware
  6402. * which doesn't have the HALT capability. This will serve as a flag
  6403. * for the incoming firmware to know that it's coming out of a HALT
  6404. * rather than a RESET ... if it's new enough to understand that ...
  6405. */
  6406. if (ret == 0 || force) {
  6407. t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
  6408. t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
  6409. PCIE_FW_HALT_F);
  6410. }
  6411. /*
  6412. * And we always return the result of the firmware RESET command
  6413. * even when we force the uP into RESET ...
  6414. */
  6415. return ret;
  6416. }
  6417. /**
  6418. * t4_fw_restart - restart the firmware by taking the uP out of RESET
  6419. * @adap: the adapter
  6420. * @reset: if we want to do a RESET to restart things
  6421. *
  6422. * Restart firmware previously halted by t4_fw_halt(). On successful
  6423. * return the previous PF Master remains as the new PF Master and there
  6424. * is no need to issue a new HELLO command, etc.
  6425. *
  6426. * We do this in two ways:
  6427. *
  6428. * 1. If we're dealing with newer firmware we'll simply want to take
  6429. * the chip's microprocessor out of RESET. This will cause the
  6430. * firmware to start up from its start vector. And then we'll loop
  6431. * until the firmware indicates it's started again (PCIE_FW.HALT
  6432. * reset to 0) or we timeout.
  6433. *
  6434. * 2. If we're dealing with older firmware then we'll need to RESET
  6435. * the chip since older firmware won't recognize the PCIE_FW.HALT
  6436. * flag and automatically RESET itself on startup.
  6437. */
  6438. static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
  6439. {
  6440. if (reset) {
  6441. /*
  6442. * Since we're directing the RESET instead of the firmware
  6443. * doing it automatically, we need to clear the PCIE_FW.HALT
  6444. * bit.
  6445. */
  6446. t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
  6447. /*
  6448. * If we've been given a valid mailbox, first try to get the
  6449. * firmware to do the RESET. If that works, great and we can
  6450. * return success. Otherwise, if we haven't been given a
  6451. * valid mailbox or the RESET command failed, fall back to
  6452. * hitting the chip with a hammer.
  6453. */
  6454. if (mbox <= PCIE_FW_MASTER_M) {
  6455. t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
  6456. msleep(100);
  6457. if (t4_fw_reset(adap, mbox,
  6458. PIORST_F | PIORSTMODE_F) == 0)
  6459. return 0;
  6460. }
  6461. t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
  6462. msleep(2000);
  6463. } else {
  6464. int ms;
  6465. t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
  6466. for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
  6467. if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
  6468. return 0;
  6469. msleep(100);
  6470. ms += 100;
  6471. }
  6472. return -ETIMEDOUT;
  6473. }
  6474. return 0;
  6475. }
  6476. /**
  6477. * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
  6478. * @adap: the adapter
  6479. * @mbox: mailbox to use for the FW RESET command (if desired)
  6480. * @fw_data: the firmware image to write
  6481. * @size: image size
  6482. * @force: force upgrade even if firmware doesn't cooperate
  6483. *
  6484. * Perform all of the steps necessary for upgrading an adapter's
  6485. * firmware image. Normally this requires the cooperation of the
  6486. * existing firmware in order to halt all existing activities
  6487. * but if an invalid mailbox token is passed in we skip that step
  6488. * (though we'll still put the adapter microprocessor into RESET in
  6489. * that case).
  6490. *
  6491. * On successful return the new firmware will have been loaded and
  6492. * the adapter will have been fully RESET losing all previous setup
  6493. * state. On unsuccessful return the adapter may be completely hosed ...
  6494. * positive errno indicates that the adapter is ~probably~ intact, a
  6495. * negative errno indicates that things are looking bad ...
  6496. */
  6497. int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
  6498. const u8 *fw_data, unsigned int size, int force)
  6499. {
  6500. const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
  6501. int reset, ret;
  6502. if (!t4_fw_matches_chip(adap, fw_hdr))
  6503. return -EINVAL;
  6504. /* Disable FW_OK flag so that mbox commands with FW_OK flag set
  6505. * wont be sent when we are flashing FW.
  6506. */
  6507. adap->flags &= ~FW_OK;
  6508. ret = t4_fw_halt(adap, mbox, force);
  6509. if (ret < 0 && !force)
  6510. goto out;
  6511. ret = t4_load_fw(adap, fw_data, size);
  6512. if (ret < 0)
  6513. goto out;
  6514. /*
  6515. * If there was a Firmware Configuration File stored in FLASH,
  6516. * there's a good chance that it won't be compatible with the new
  6517. * Firmware. In order to prevent difficult to diagnose adapter
  6518. * initialization issues, we clear out the Firmware Configuration File
  6519. * portion of the FLASH . The user will need to re-FLASH a new
  6520. * Firmware Configuration File which is compatible with the new
  6521. * Firmware if that's desired.
  6522. */
  6523. (void)t4_load_cfg(adap, NULL, 0);
  6524. /*
  6525. * Older versions of the firmware don't understand the new
  6526. * PCIE_FW.HALT flag and so won't know to perform a RESET when they
  6527. * restart. So for newly loaded older firmware we'll have to do the
  6528. * RESET for it so it starts up on a clean slate. We can tell if
  6529. * the newly loaded firmware will handle this right by checking
  6530. * its header flags to see if it advertises the capability.
  6531. */
  6532. reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
  6533. ret = t4_fw_restart(adap, mbox, reset);
  6534. /* Grab potentially new Firmware Device Log parameters so we can see
  6535. * how healthy the new Firmware is. It's okay to contact the new
  6536. * Firmware for these parameters even though, as far as it's
  6537. * concerned, we've never said "HELLO" to it ...
  6538. */
  6539. (void)t4_init_devlog_params(adap);
  6540. out:
  6541. adap->flags |= FW_OK;
  6542. return ret;
  6543. }
  6544. /**
  6545. * t4_fl_pkt_align - return the fl packet alignment
  6546. * @adap: the adapter
  6547. *
  6548. * T4 has a single field to specify the packing and padding boundary.
  6549. * T5 onwards has separate fields for this and hence the alignment for
  6550. * next packet offset is maximum of these two.
  6551. *
  6552. */
  6553. int t4_fl_pkt_align(struct adapter *adap)
  6554. {
  6555. u32 sge_control, sge_control2;
  6556. unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
  6557. sge_control = t4_read_reg(adap, SGE_CONTROL_A);
  6558. /* T4 uses a single control field to specify both the PCIe Padding and
  6559. * Packing Boundary. T5 introduced the ability to specify these
  6560. * separately. The actual Ingress Packet Data alignment boundary
  6561. * within Packed Buffer Mode is the maximum of these two
  6562. * specifications. (Note that it makes no real practical sense to
  6563. * have the Pading Boudary be larger than the Packing Boundary but you
  6564. * could set the chip up that way and, in fact, legacy T4 code would
  6565. * end doing this because it would initialize the Padding Boundary and
  6566. * leave the Packing Boundary initialized to 0 (16 bytes).)
  6567. * Padding Boundary values in T6 starts from 8B,
  6568. * where as it is 32B for T4 and T5.
  6569. */
  6570. if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
  6571. ingpad_shift = INGPADBOUNDARY_SHIFT_X;
  6572. else
  6573. ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
  6574. ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
  6575. fl_align = ingpadboundary;
  6576. if (!is_t4(adap->params.chip)) {
  6577. /* T5 has a weird interpretation of one of the PCIe Packing
  6578. * Boundary values. No idea why ...
  6579. */
  6580. sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
  6581. ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
  6582. if (ingpackboundary == INGPACKBOUNDARY_16B_X)
  6583. ingpackboundary = 16;
  6584. else
  6585. ingpackboundary = 1 << (ingpackboundary +
  6586. INGPACKBOUNDARY_SHIFT_X);
  6587. fl_align = max(ingpadboundary, ingpackboundary);
  6588. }
  6589. return fl_align;
  6590. }
  6591. /**
  6592. * t4_fixup_host_params - fix up host-dependent parameters
  6593. * @adap: the adapter
  6594. * @page_size: the host's Base Page Size
  6595. * @cache_line_size: the host's Cache Line Size
  6596. *
  6597. * Various registers in T4 contain values which are dependent on the
  6598. * host's Base Page and Cache Line Sizes. This function will fix all of
  6599. * those registers with the appropriate values as passed in ...
  6600. */
  6601. int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
  6602. unsigned int cache_line_size)
  6603. {
  6604. unsigned int page_shift = fls(page_size) - 1;
  6605. unsigned int sge_hps = page_shift - 10;
  6606. unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
  6607. unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
  6608. unsigned int fl_align_log = fls(fl_align) - 1;
  6609. t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
  6610. HOSTPAGESIZEPF0_V(sge_hps) |
  6611. HOSTPAGESIZEPF1_V(sge_hps) |
  6612. HOSTPAGESIZEPF2_V(sge_hps) |
  6613. HOSTPAGESIZEPF3_V(sge_hps) |
  6614. HOSTPAGESIZEPF4_V(sge_hps) |
  6615. HOSTPAGESIZEPF5_V(sge_hps) |
  6616. HOSTPAGESIZEPF6_V(sge_hps) |
  6617. HOSTPAGESIZEPF7_V(sge_hps));
  6618. if (is_t4(adap->params.chip)) {
  6619. t4_set_reg_field(adap, SGE_CONTROL_A,
  6620. INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
  6621. EGRSTATUSPAGESIZE_F,
  6622. INGPADBOUNDARY_V(fl_align_log -
  6623. INGPADBOUNDARY_SHIFT_X) |
  6624. EGRSTATUSPAGESIZE_V(stat_len != 64));
  6625. } else {
  6626. unsigned int pack_align;
  6627. unsigned int ingpad, ingpack;
  6628. unsigned int pcie_cap;
  6629. /* T5 introduced the separation of the Free List Padding and
  6630. * Packing Boundaries. Thus, we can select a smaller Padding
  6631. * Boundary to avoid uselessly chewing up PCIe Link and Memory
  6632. * Bandwidth, and use a Packing Boundary which is large enough
  6633. * to avoid false sharing between CPUs, etc.
  6634. *
  6635. * For the PCI Link, the smaller the Padding Boundary the
  6636. * better. For the Memory Controller, a smaller Padding
  6637. * Boundary is better until we cross under the Memory Line
  6638. * Size (the minimum unit of transfer to/from Memory). If we
  6639. * have a Padding Boundary which is smaller than the Memory
  6640. * Line Size, that'll involve a Read-Modify-Write cycle on the
  6641. * Memory Controller which is never good.
  6642. */
  6643. /* We want the Packing Boundary to be based on the Cache Line
  6644. * Size in order to help avoid False Sharing performance
  6645. * issues between CPUs, etc. We also want the Packing
  6646. * Boundary to incorporate the PCI-E Maximum Payload Size. We
  6647. * get best performance when the Packing Boundary is a
  6648. * multiple of the Maximum Payload Size.
  6649. */
  6650. pack_align = fl_align;
  6651. pcie_cap = pci_find_capability(adap->pdev, PCI_CAP_ID_EXP);
  6652. if (pcie_cap) {
  6653. unsigned int mps, mps_log;
  6654. u16 devctl;
  6655. /* The PCIe Device Control Maximum Payload Size field
  6656. * [bits 7:5] encodes sizes as powers of 2 starting at
  6657. * 128 bytes.
  6658. */
  6659. pci_read_config_word(adap->pdev,
  6660. pcie_cap + PCI_EXP_DEVCTL,
  6661. &devctl);
  6662. mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
  6663. mps = 1 << mps_log;
  6664. if (mps > pack_align)
  6665. pack_align = mps;
  6666. }
  6667. /* N.B. T5/T6 have a crazy special interpretation of the "0"
  6668. * value for the Packing Boundary. This corresponds to 16
  6669. * bytes instead of the expected 32 bytes. So if we want 32
  6670. * bytes, the best we can really do is 64 bytes ...
  6671. */
  6672. if (pack_align <= 16) {
  6673. ingpack = INGPACKBOUNDARY_16B_X;
  6674. fl_align = 16;
  6675. } else if (pack_align == 32) {
  6676. ingpack = INGPACKBOUNDARY_64B_X;
  6677. fl_align = 64;
  6678. } else {
  6679. unsigned int pack_align_log = fls(pack_align) - 1;
  6680. ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X;
  6681. fl_align = pack_align;
  6682. }
  6683. /* Use the smallest Ingress Padding which isn't smaller than
  6684. * the Memory Controller Read/Write Size. We'll take that as
  6685. * being 8 bytes since we don't know of any system with a
  6686. * wider Memory Controller Bus Width.
  6687. */
  6688. if (is_t5(adap->params.chip))
  6689. ingpad = INGPADBOUNDARY_32B_X;
  6690. else
  6691. ingpad = T6_INGPADBOUNDARY_8B_X;
  6692. t4_set_reg_field(adap, SGE_CONTROL_A,
  6693. INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
  6694. EGRSTATUSPAGESIZE_F,
  6695. INGPADBOUNDARY_V(ingpad) |
  6696. EGRSTATUSPAGESIZE_V(stat_len != 64));
  6697. t4_set_reg_field(adap, SGE_CONTROL2_A,
  6698. INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
  6699. INGPACKBOUNDARY_V(ingpack));
  6700. }
  6701. /*
  6702. * Adjust various SGE Free List Host Buffer Sizes.
  6703. *
  6704. * This is something of a crock since we're using fixed indices into
  6705. * the array which are also known by the sge.c code and the T4
  6706. * Firmware Configuration File. We need to come up with a much better
  6707. * approach to managing this array. For now, the first four entries
  6708. * are:
  6709. *
  6710. * 0: Host Page Size
  6711. * 1: 64KB
  6712. * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
  6713. * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
  6714. *
  6715. * For the single-MTU buffers in unpacked mode we need to include
  6716. * space for the SGE Control Packet Shift, 14 byte Ethernet header,
  6717. * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
  6718. * Padding boundary. All of these are accommodated in the Factory
  6719. * Default Firmware Configuration File but we need to adjust it for
  6720. * this host's cache line size.
  6721. */
  6722. t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
  6723. t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
  6724. (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
  6725. & ~(fl_align-1));
  6726. t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
  6727. (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
  6728. & ~(fl_align-1));
  6729. t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
  6730. return 0;
  6731. }
  6732. /**
  6733. * t4_fw_initialize - ask FW to initialize the device
  6734. * @adap: the adapter
  6735. * @mbox: mailbox to use for the FW command
  6736. *
  6737. * Issues a command to FW to partially initialize the device. This
  6738. * performs initialization that generally doesn't depend on user input.
  6739. */
  6740. int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
  6741. {
  6742. struct fw_initialize_cmd c;
  6743. memset(&c, 0, sizeof(c));
  6744. INIT_CMD(c, INITIALIZE, WRITE);
  6745. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  6746. }
  6747. /**
  6748. * t4_query_params_rw - query FW or device parameters
  6749. * @adap: the adapter
  6750. * @mbox: mailbox to use for the FW command
  6751. * @pf: the PF
  6752. * @vf: the VF
  6753. * @nparams: the number of parameters
  6754. * @params: the parameter names
  6755. * @val: the parameter values
  6756. * @rw: Write and read flag
  6757. * @sleep_ok: if true, we may sleep awaiting mbox cmd completion
  6758. *
  6759. * Reads the value of FW or device parameters. Up to 7 parameters can be
  6760. * queried at once.
  6761. */
  6762. int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
  6763. unsigned int vf, unsigned int nparams, const u32 *params,
  6764. u32 *val, int rw, bool sleep_ok)
  6765. {
  6766. int i, ret;
  6767. struct fw_params_cmd c;
  6768. __be32 *p = &c.param[0].mnem;
  6769. if (nparams > 7)
  6770. return -EINVAL;
  6771. memset(&c, 0, sizeof(c));
  6772. c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
  6773. FW_CMD_REQUEST_F | FW_CMD_READ_F |
  6774. FW_PARAMS_CMD_PFN_V(pf) |
  6775. FW_PARAMS_CMD_VFN_V(vf));
  6776. c.retval_len16 = cpu_to_be32(FW_LEN16(c));
  6777. for (i = 0; i < nparams; i++) {
  6778. *p++ = cpu_to_be32(*params++);
  6779. if (rw)
  6780. *p = cpu_to_be32(*(val + i));
  6781. p++;
  6782. }
  6783. ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
  6784. if (ret == 0)
  6785. for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
  6786. *val++ = be32_to_cpu(*p);
  6787. return ret;
  6788. }
  6789. int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
  6790. unsigned int vf, unsigned int nparams, const u32 *params,
  6791. u32 *val)
  6792. {
  6793. return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
  6794. true);
  6795. }
  6796. int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf,
  6797. unsigned int vf, unsigned int nparams, const u32 *params,
  6798. u32 *val)
  6799. {
  6800. return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
  6801. false);
  6802. }
  6803. /**
  6804. * t4_set_params_timeout - sets FW or device parameters
  6805. * @adap: the adapter
  6806. * @mbox: mailbox to use for the FW command
  6807. * @pf: the PF
  6808. * @vf: the VF
  6809. * @nparams: the number of parameters
  6810. * @params: the parameter names
  6811. * @val: the parameter values
  6812. * @timeout: the timeout time
  6813. *
  6814. * Sets the value of FW or device parameters. Up to 7 parameters can be
  6815. * specified at once.
  6816. */
  6817. int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
  6818. unsigned int pf, unsigned int vf,
  6819. unsigned int nparams, const u32 *params,
  6820. const u32 *val, int timeout)
  6821. {
  6822. struct fw_params_cmd c;
  6823. __be32 *p = &c.param[0].mnem;
  6824. if (nparams > 7)
  6825. return -EINVAL;
  6826. memset(&c, 0, sizeof(c));
  6827. c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
  6828. FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
  6829. FW_PARAMS_CMD_PFN_V(pf) |
  6830. FW_PARAMS_CMD_VFN_V(vf));
  6831. c.retval_len16 = cpu_to_be32(FW_LEN16(c));
  6832. while (nparams--) {
  6833. *p++ = cpu_to_be32(*params++);
  6834. *p++ = cpu_to_be32(*val++);
  6835. }
  6836. return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
  6837. }
  6838. /**
  6839. * t4_set_params - sets FW or device parameters
  6840. * @adap: the adapter
  6841. * @mbox: mailbox to use for the FW command
  6842. * @pf: the PF
  6843. * @vf: the VF
  6844. * @nparams: the number of parameters
  6845. * @params: the parameter names
  6846. * @val: the parameter values
  6847. *
  6848. * Sets the value of FW or device parameters. Up to 7 parameters can be
  6849. * specified at once.
  6850. */
  6851. int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
  6852. unsigned int vf, unsigned int nparams, const u32 *params,
  6853. const u32 *val)
  6854. {
  6855. return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
  6856. FW_CMD_MAX_TIMEOUT);
  6857. }
  6858. /**
  6859. * t4_cfg_pfvf - configure PF/VF resource limits
  6860. * @adap: the adapter
  6861. * @mbox: mailbox to use for the FW command
  6862. * @pf: the PF being configured
  6863. * @vf: the VF being configured
  6864. * @txq: the max number of egress queues
  6865. * @txq_eth_ctrl: the max number of egress Ethernet or control queues
  6866. * @rxqi: the max number of interrupt-capable ingress queues
  6867. * @rxq: the max number of interruptless ingress queues
  6868. * @tc: the PCI traffic class
  6869. * @vi: the max number of virtual interfaces
  6870. * @cmask: the channel access rights mask for the PF/VF
  6871. * @pmask: the port access rights mask for the PF/VF
  6872. * @nexact: the maximum number of exact MPS filters
  6873. * @rcaps: read capabilities
  6874. * @wxcaps: write/execute capabilities
  6875. *
  6876. * Configures resource limits and capabilities for a physical or virtual
  6877. * function.
  6878. */
  6879. int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
  6880. unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
  6881. unsigned int rxqi, unsigned int rxq, unsigned int tc,
  6882. unsigned int vi, unsigned int cmask, unsigned int pmask,
  6883. unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
  6884. {
  6885. struct fw_pfvf_cmd c;
  6886. memset(&c, 0, sizeof(c));
  6887. c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
  6888. FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
  6889. FW_PFVF_CMD_VFN_V(vf));
  6890. c.retval_len16 = cpu_to_be32(FW_LEN16(c));
  6891. c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
  6892. FW_PFVF_CMD_NIQ_V(rxq));
  6893. c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
  6894. FW_PFVF_CMD_PMASK_V(pmask) |
  6895. FW_PFVF_CMD_NEQ_V(txq));
  6896. c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
  6897. FW_PFVF_CMD_NVI_V(vi) |
  6898. FW_PFVF_CMD_NEXACTF_V(nexact));
  6899. c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
  6900. FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
  6901. FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
  6902. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  6903. }
  6904. /**
  6905. * t4_alloc_vi - allocate a virtual interface
  6906. * @adap: the adapter
  6907. * @mbox: mailbox to use for the FW command
  6908. * @port: physical port associated with the VI
  6909. * @pf: the PF owning the VI
  6910. * @vf: the VF owning the VI
  6911. * @nmac: number of MAC addresses needed (1 to 5)
  6912. * @mac: the MAC addresses of the VI
  6913. * @rss_size: size of RSS table slice associated with this VI
  6914. *
  6915. * Allocates a virtual interface for the given physical port. If @mac is
  6916. * not %NULL it contains the MAC addresses of the VI as assigned by FW.
  6917. * @mac should be large enough to hold @nmac Ethernet addresses, they are
  6918. * stored consecutively so the space needed is @nmac * 6 bytes.
  6919. * Returns a negative error number or the non-negative VI id.
  6920. */
  6921. int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
  6922. unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
  6923. unsigned int *rss_size)
  6924. {
  6925. int ret;
  6926. struct fw_vi_cmd c;
  6927. memset(&c, 0, sizeof(c));
  6928. c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
  6929. FW_CMD_WRITE_F | FW_CMD_EXEC_F |
  6930. FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
  6931. c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
  6932. c.portid_pkd = FW_VI_CMD_PORTID_V(port);
  6933. c.nmac = nmac - 1;
  6934. ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
  6935. if (ret)
  6936. return ret;
  6937. if (mac) {
  6938. memcpy(mac, c.mac, sizeof(c.mac));
  6939. switch (nmac) {
  6940. case 5:
  6941. memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
  6942. /* Fall through */
  6943. case 4:
  6944. memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
  6945. /* Fall through */
  6946. case 3:
  6947. memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
  6948. /* Fall through */
  6949. case 2:
  6950. memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
  6951. }
  6952. }
  6953. if (rss_size)
  6954. *rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
  6955. return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
  6956. }
  6957. /**
  6958. * t4_free_vi - free a virtual interface
  6959. * @adap: the adapter
  6960. * @mbox: mailbox to use for the FW command
  6961. * @pf: the PF owning the VI
  6962. * @vf: the VF owning the VI
  6963. * @viid: virtual interface identifiler
  6964. *
  6965. * Free a previously allocated virtual interface.
  6966. */
  6967. int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
  6968. unsigned int vf, unsigned int viid)
  6969. {
  6970. struct fw_vi_cmd c;
  6971. memset(&c, 0, sizeof(c));
  6972. c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
  6973. FW_CMD_REQUEST_F |
  6974. FW_CMD_EXEC_F |
  6975. FW_VI_CMD_PFN_V(pf) |
  6976. FW_VI_CMD_VFN_V(vf));
  6977. c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
  6978. c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
  6979. return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
  6980. }
  6981. /**
  6982. * t4_set_rxmode - set Rx properties of a virtual interface
  6983. * @adap: the adapter
  6984. * @mbox: mailbox to use for the FW command
  6985. * @viid: the VI id
  6986. * @mtu: the new MTU or -1
  6987. * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
  6988. * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
  6989. * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
  6990. * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
  6991. * @sleep_ok: if true we may sleep while awaiting command completion
  6992. *
  6993. * Sets Rx properties of a virtual interface.
  6994. */
  6995. int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
  6996. int mtu, int promisc, int all_multi, int bcast, int vlanex,
  6997. bool sleep_ok)
  6998. {
  6999. struct fw_vi_rxmode_cmd c;
  7000. /* convert to FW values */
  7001. if (mtu < 0)
  7002. mtu = FW_RXMODE_MTU_NO_CHG;
  7003. if (promisc < 0)
  7004. promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
  7005. if (all_multi < 0)
  7006. all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
  7007. if (bcast < 0)
  7008. bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
  7009. if (vlanex < 0)
  7010. vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
  7011. memset(&c, 0, sizeof(c));
  7012. c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
  7013. FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
  7014. FW_VI_RXMODE_CMD_VIID_V(viid));
  7015. c.retval_len16 = cpu_to_be32(FW_LEN16(c));
  7016. c.mtu_to_vlanexen =
  7017. cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
  7018. FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
  7019. FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
  7020. FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
  7021. FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
  7022. return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
  7023. }
  7024. /**
  7025. * t4_free_encap_mac_filt - frees MPS entry at given index
  7026. * @adap: the adapter
  7027. * @viid: the VI id
  7028. * @idx: index of MPS entry to be freed
  7029. * @sleep_ok: call is allowed to sleep
  7030. *
  7031. * Frees the MPS entry at supplied index
  7032. *
  7033. * Returns a negative error number or zero on success
  7034. */
  7035. int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
  7036. int idx, bool sleep_ok)
  7037. {
  7038. struct fw_vi_mac_exact *p;
  7039. u8 addr[] = {0, 0, 0, 0, 0, 0};
  7040. struct fw_vi_mac_cmd c;
  7041. int ret = 0;
  7042. u32 exact;
  7043. memset(&c, 0, sizeof(c));
  7044. c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
  7045. FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
  7046. FW_CMD_EXEC_V(0) |
  7047. FW_VI_MAC_CMD_VIID_V(viid));
  7048. exact = FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_EXACTMAC);
  7049. c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
  7050. exact |
  7051. FW_CMD_LEN16_V(1));
  7052. p = c.u.exact;
  7053. p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
  7054. FW_VI_MAC_CMD_IDX_V(idx));
  7055. memcpy(p->macaddr, addr, sizeof(p->macaddr));
  7056. ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
  7057. return ret;
  7058. }
  7059. /**
  7060. * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
  7061. * @adap: the adapter
  7062. * @viid: the VI id
  7063. * @addr: the MAC address
  7064. * @mask: the mask
  7065. * @idx: index of the entry in mps tcam
  7066. * @lookup_type: MAC address for inner (1) or outer (0) header
  7067. * @port_id: the port index
  7068. * @sleep_ok: call is allowed to sleep
  7069. *
  7070. * Removes the mac entry at the specified index using raw mac interface.
  7071. *
  7072. * Returns a negative error number on failure.
  7073. */
  7074. int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
  7075. const u8 *addr, const u8 *mask, unsigned int idx,
  7076. u8 lookup_type, u8 port_id, bool sleep_ok)
  7077. {
  7078. struct fw_vi_mac_cmd c;
  7079. struct fw_vi_mac_raw *p = &c.u.raw;
  7080. u32 val;
  7081. memset(&c, 0, sizeof(c));
  7082. c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
  7083. FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
  7084. FW_CMD_EXEC_V(0) |
  7085. FW_VI_MAC_CMD_VIID_V(viid));
  7086. val = FW_CMD_LEN16_V(1) |
  7087. FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
  7088. c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
  7089. FW_CMD_LEN16_V(val));
  7090. p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx) |
  7091. FW_VI_MAC_ID_BASED_FREE);
  7092. /* Lookup Type. Outer header: 0, Inner header: 1 */
  7093. p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
  7094. DATAPORTNUM_V(port_id));
  7095. /* Lookup mask and port mask */
  7096. p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
  7097. DATAPORTNUM_V(DATAPORTNUM_M));
  7098. /* Copy the address and the mask */
  7099. memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
  7100. memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
  7101. return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
  7102. }
  7103. /**
  7104. * t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support
  7105. * @adap: the adapter
  7106. * @viid: the VI id
  7107. * @mac: the MAC address
  7108. * @mask: the mask
  7109. * @vni: the VNI id for the tunnel protocol
  7110. * @vni_mask: mask for the VNI id
  7111. * @dip_hit: to enable DIP match for the MPS entry
  7112. * @lookup_type: MAC address for inner (1) or outer (0) header
  7113. * @sleep_ok: call is allowed to sleep
  7114. *
  7115. * Allocates an MPS entry with specified MAC address and VNI value.
  7116. *
  7117. * Returns a negative error number or the allocated index for this mac.
  7118. */
  7119. int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
  7120. const u8 *addr, const u8 *mask, unsigned int vni,
  7121. unsigned int vni_mask, u8 dip_hit, u8 lookup_type,
  7122. bool sleep_ok)
  7123. {
  7124. struct fw_vi_mac_cmd c;
  7125. struct fw_vi_mac_vni *p = c.u.exact_vni;
  7126. int ret = 0;
  7127. u32 val;
  7128. memset(&c, 0, sizeof(c));
  7129. c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
  7130. FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
  7131. FW_VI_MAC_CMD_VIID_V(viid));
  7132. val = FW_CMD_LEN16_V(1) |
  7133. FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_EXACTMAC_VNI);
  7134. c.freemacs_to_len16 = cpu_to_be32(val);
  7135. p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
  7136. FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
  7137. memcpy(p->macaddr, addr, sizeof(p->macaddr));
  7138. memcpy(p->macaddr_mask, mask, sizeof(p->macaddr_mask));
  7139. p->lookup_type_to_vni =
  7140. cpu_to_be32(FW_VI_MAC_CMD_VNI_V(vni) |
  7141. FW_VI_MAC_CMD_DIP_HIT_V(dip_hit) |
  7142. FW_VI_MAC_CMD_LOOKUP_TYPE_V(lookup_type));
  7143. p->vni_mask_pkd = cpu_to_be32(FW_VI_MAC_CMD_VNI_MASK_V(vni_mask));
  7144. ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
  7145. if (ret == 0)
  7146. ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
  7147. return ret;
  7148. }
  7149. /**
  7150. * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
  7151. * @adap: the adapter
  7152. * @viid: the VI id
  7153. * @mac: the MAC address
  7154. * @mask: the mask
  7155. * @idx: index at which to add this entry
  7156. * @port_id: the port index
  7157. * @lookup_type: MAC address for inner (1) or outer (0) header
  7158. * @sleep_ok: call is allowed to sleep
  7159. *
  7160. * Adds the mac entry at the specified index using raw mac interface.
  7161. *
  7162. * Returns a negative error number or the allocated index for this mac.
  7163. */
  7164. int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
  7165. const u8 *addr, const u8 *mask, unsigned int idx,
  7166. u8 lookup_type, u8 port_id, bool sleep_ok)
  7167. {
  7168. int ret = 0;
  7169. struct fw_vi_mac_cmd c;
  7170. struct fw_vi_mac_raw *p = &c.u.raw;
  7171. u32 val;
  7172. memset(&c, 0, sizeof(c));
  7173. c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
  7174. FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
  7175. FW_VI_MAC_CMD_VIID_V(viid));
  7176. val = FW_CMD_LEN16_V(1) |
  7177. FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
  7178. c.freemacs_to_len16 = cpu_to_be32(val);
  7179. /* Specify that this is an inner mac address */
  7180. p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx));
  7181. /* Lookup Type. Outer header: 0, Inner header: 1 */
  7182. p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
  7183. DATAPORTNUM_V(port_id));
  7184. /* Lookup mask and port mask */
  7185. p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
  7186. DATAPORTNUM_V(DATAPORTNUM_M));
  7187. /* Copy the address and the mask */
  7188. memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
  7189. memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
  7190. ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
  7191. if (ret == 0) {
  7192. ret = FW_VI_MAC_CMD_RAW_IDX_G(be32_to_cpu(p->raw_idx_pkd));
  7193. if (ret != idx)
  7194. ret = -ENOMEM;
  7195. }
  7196. return ret;
  7197. }
  7198. /**
  7199. * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
  7200. * @adap: the adapter
  7201. * @mbox: mailbox to use for the FW command
  7202. * @viid: the VI id
  7203. * @free: if true any existing filters for this VI id are first removed
  7204. * @naddr: the number of MAC addresses to allocate filters for (up to 7)
  7205. * @addr: the MAC address(es)
  7206. * @idx: where to store the index of each allocated filter
  7207. * @hash: pointer to hash address filter bitmap
  7208. * @sleep_ok: call is allowed to sleep
  7209. *
  7210. * Allocates an exact-match filter for each of the supplied addresses and
  7211. * sets it to the corresponding address. If @idx is not %NULL it should
  7212. * have at least @naddr entries, each of which will be set to the index of
  7213. * the filter allocated for the corresponding MAC address. If a filter
  7214. * could not be allocated for an address its index is set to 0xffff.
  7215. * If @hash is not %NULL addresses that fail to allocate an exact filter
  7216. * are hashed and update the hash filter bitmap pointed at by @hash.
  7217. *
  7218. * Returns a negative error number or the number of filters allocated.
  7219. */
  7220. int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
  7221. unsigned int viid, bool free, unsigned int naddr,
  7222. const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
  7223. {
  7224. int offset, ret = 0;
  7225. struct fw_vi_mac_cmd c;
  7226. unsigned int nfilters = 0;
  7227. unsigned int max_naddr = adap->params.arch.mps_tcam_size;
  7228. unsigned int rem = naddr;
  7229. if (naddr > max_naddr)
  7230. return -EINVAL;
  7231. for (offset = 0; offset < naddr ; /**/) {
  7232. unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
  7233. rem : ARRAY_SIZE(c.u.exact));
  7234. size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
  7235. u.exact[fw_naddr]), 16);
  7236. struct fw_vi_mac_exact *p;
  7237. int i;
  7238. memset(&c, 0, sizeof(c));
  7239. c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
  7240. FW_CMD_REQUEST_F |
  7241. FW_CMD_WRITE_F |
  7242. FW_CMD_EXEC_V(free) |
  7243. FW_VI_MAC_CMD_VIID_V(viid));
  7244. c.freemacs_to_len16 =
  7245. cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
  7246. FW_CMD_LEN16_V(len16));
  7247. for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
  7248. p->valid_to_idx =
  7249. cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
  7250. FW_VI_MAC_CMD_IDX_V(
  7251. FW_VI_MAC_ADD_MAC));
  7252. memcpy(p->macaddr, addr[offset + i],
  7253. sizeof(p->macaddr));
  7254. }
  7255. /* It's okay if we run out of space in our MAC address arena.
  7256. * Some of the addresses we submit may get stored so we need
  7257. * to run through the reply to see what the results were ...
  7258. */
  7259. ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
  7260. if (ret && ret != -FW_ENOMEM)
  7261. break;
  7262. for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
  7263. u16 index = FW_VI_MAC_CMD_IDX_G(
  7264. be16_to_cpu(p->valid_to_idx));
  7265. if (idx)
  7266. idx[offset + i] = (index >= max_naddr ?
  7267. 0xffff : index);
  7268. if (index < max_naddr)
  7269. nfilters++;
  7270. else if (hash)
  7271. *hash |= (1ULL <<
  7272. hash_mac_addr(addr[offset + i]));
  7273. }
  7274. free = false;
  7275. offset += fw_naddr;
  7276. rem -= fw_naddr;
  7277. }
  7278. if (ret == 0 || ret == -FW_ENOMEM)
  7279. ret = nfilters;
  7280. return ret;
  7281. }
  7282. /**
  7283. * t4_free_mac_filt - frees exact-match filters of given MAC addresses
  7284. * @adap: the adapter
  7285. * @mbox: mailbox to use for the FW command
  7286. * @viid: the VI id
  7287. * @naddr: the number of MAC addresses to allocate filters for (up to 7)
  7288. * @addr: the MAC address(es)
  7289. * @sleep_ok: call is allowed to sleep
  7290. *
  7291. * Frees the exact-match filter for each of the supplied addresses
  7292. *
  7293. * Returns a negative error number or the number of filters freed.
  7294. */
  7295. int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
  7296. unsigned int viid, unsigned int naddr,
  7297. const u8 **addr, bool sleep_ok)
  7298. {
  7299. int offset, ret = 0;
  7300. struct fw_vi_mac_cmd c;
  7301. unsigned int nfilters = 0;
  7302. unsigned int max_naddr = is_t4(adap->params.chip) ?
  7303. NUM_MPS_CLS_SRAM_L_INSTANCES :
  7304. NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
  7305. unsigned int rem = naddr;
  7306. if (naddr > max_naddr)
  7307. return -EINVAL;
  7308. for (offset = 0; offset < (int)naddr ; /**/) {
  7309. unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
  7310. ? rem
  7311. : ARRAY_SIZE(c.u.exact));
  7312. size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
  7313. u.exact[fw_naddr]), 16);
  7314. struct fw_vi_mac_exact *p;
  7315. int i;
  7316. memset(&c, 0, sizeof(c));
  7317. c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
  7318. FW_CMD_REQUEST_F |
  7319. FW_CMD_WRITE_F |
  7320. FW_CMD_EXEC_V(0) |
  7321. FW_VI_MAC_CMD_VIID_V(viid));
  7322. c.freemacs_to_len16 =
  7323. cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
  7324. FW_CMD_LEN16_V(len16));
  7325. for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
  7326. p->valid_to_idx = cpu_to_be16(
  7327. FW_VI_MAC_CMD_VALID_F |
  7328. FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE));
  7329. memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
  7330. }
  7331. ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
  7332. if (ret)
  7333. break;
  7334. for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
  7335. u16 index = FW_VI_MAC_CMD_IDX_G(
  7336. be16_to_cpu(p->valid_to_idx));
  7337. if (index < max_naddr)
  7338. nfilters++;
  7339. }
  7340. offset += fw_naddr;
  7341. rem -= fw_naddr;
  7342. }
  7343. if (ret == 0)
  7344. ret = nfilters;
  7345. return ret;
  7346. }
  7347. /**
  7348. * t4_change_mac - modifies the exact-match filter for a MAC address
  7349. * @adap: the adapter
  7350. * @mbox: mailbox to use for the FW command
  7351. * @viid: the VI id
  7352. * @idx: index of existing filter for old value of MAC address, or -1
  7353. * @addr: the new MAC address value
  7354. * @persist: whether a new MAC allocation should be persistent
  7355. * @add_smt: if true also add the address to the HW SMT
  7356. *
  7357. * Modifies an exact-match filter and sets it to the new MAC address.
  7358. * Note that in general it is not possible to modify the value of a given
  7359. * filter so the generic way to modify an address filter is to free the one
  7360. * being used by the old address value and allocate a new filter for the
  7361. * new address value. @idx can be -1 if the address is a new addition.
  7362. *
  7363. * Returns a negative error number or the index of the filter with the new
  7364. * MAC value.
  7365. */
  7366. int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
  7367. int idx, const u8 *addr, bool persist, bool add_smt)
  7368. {
  7369. int ret, mode;
  7370. struct fw_vi_mac_cmd c;
  7371. struct fw_vi_mac_exact *p = c.u.exact;
  7372. unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
  7373. if (idx < 0) /* new allocation */
  7374. idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
  7375. mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
  7376. memset(&c, 0, sizeof(c));
  7377. c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
  7378. FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
  7379. FW_VI_MAC_CMD_VIID_V(viid));
  7380. c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
  7381. p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
  7382. FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
  7383. FW_VI_MAC_CMD_IDX_V(idx));
  7384. memcpy(p->macaddr, addr, sizeof(p->macaddr));
  7385. ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
  7386. if (ret == 0) {
  7387. ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
  7388. if (ret >= max_mac_addr)
  7389. ret = -ENOMEM;
  7390. }
  7391. return ret;
  7392. }
  7393. /**
  7394. * t4_set_addr_hash - program the MAC inexact-match hash filter
  7395. * @adap: the adapter
  7396. * @mbox: mailbox to use for the FW command
  7397. * @viid: the VI id
  7398. * @ucast: whether the hash filter should also match unicast addresses
  7399. * @vec: the value to be written to the hash filter
  7400. * @sleep_ok: call is allowed to sleep
  7401. *
  7402. * Sets the 64-bit inexact-match hash filter for a virtual interface.
  7403. */
  7404. int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
  7405. bool ucast, u64 vec, bool sleep_ok)
  7406. {
  7407. struct fw_vi_mac_cmd c;
  7408. memset(&c, 0, sizeof(c));
  7409. c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
  7410. FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
  7411. FW_VI_ENABLE_CMD_VIID_V(viid));
  7412. c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
  7413. FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
  7414. FW_CMD_LEN16_V(1));
  7415. c.u.hash.hashvec = cpu_to_be64(vec);
  7416. return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
  7417. }
  7418. /**
  7419. * t4_enable_vi_params - enable/disable a virtual interface
  7420. * @adap: the adapter
  7421. * @mbox: mailbox to use for the FW command
  7422. * @viid: the VI id
  7423. * @rx_en: 1=enable Rx, 0=disable Rx
  7424. * @tx_en: 1=enable Tx, 0=disable Tx
  7425. * @dcb_en: 1=enable delivery of Data Center Bridging messages.
  7426. *
  7427. * Enables/disables a virtual interface. Note that setting DCB Enable
  7428. * only makes sense when enabling a Virtual Interface ...
  7429. */
  7430. int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
  7431. unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
  7432. {
  7433. struct fw_vi_enable_cmd c;
  7434. memset(&c, 0, sizeof(c));
  7435. c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
  7436. FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
  7437. FW_VI_ENABLE_CMD_VIID_V(viid));
  7438. c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
  7439. FW_VI_ENABLE_CMD_EEN_V(tx_en) |
  7440. FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
  7441. FW_LEN16(c));
  7442. return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
  7443. }
  7444. /**
  7445. * t4_enable_vi - enable/disable a virtual interface
  7446. * @adap: the adapter
  7447. * @mbox: mailbox to use for the FW command
  7448. * @viid: the VI id
  7449. * @rx_en: 1=enable Rx, 0=disable Rx
  7450. * @tx_en: 1=enable Tx, 0=disable Tx
  7451. *
  7452. * Enables/disables a virtual interface.
  7453. */
  7454. int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
  7455. bool rx_en, bool tx_en)
  7456. {
  7457. return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
  7458. }
  7459. /**
  7460. * t4_enable_pi_params - enable/disable a Port's Virtual Interface
  7461. * @adap: the adapter
  7462. * @mbox: mailbox to use for the FW command
  7463. * @pi: the Port Information structure
  7464. * @rx_en: 1=enable Rx, 0=disable Rx
  7465. * @tx_en: 1=enable Tx, 0=disable Tx
  7466. * @dcb_en: 1=enable delivery of Data Center Bridging messages.
  7467. *
  7468. * Enables/disables a Port's Virtual Interface. Note that setting DCB
  7469. * Enable only makes sense when enabling a Virtual Interface ...
  7470. * If the Virtual Interface enable/disable operation is successful,
  7471. * we notify the OS-specific code of a potential Link Status change
  7472. * via the OS Contract API t4_os_link_changed().
  7473. */
  7474. int t4_enable_pi_params(struct adapter *adap, unsigned int mbox,
  7475. struct port_info *pi,
  7476. bool rx_en, bool tx_en, bool dcb_en)
  7477. {
  7478. int ret = t4_enable_vi_params(adap, mbox, pi->viid,
  7479. rx_en, tx_en, dcb_en);
  7480. if (ret)
  7481. return ret;
  7482. t4_os_link_changed(adap, pi->port_id,
  7483. rx_en && tx_en && pi->link_cfg.link_ok);
  7484. return 0;
  7485. }
  7486. /**
  7487. * t4_identify_port - identify a VI's port by blinking its LED
  7488. * @adap: the adapter
  7489. * @mbox: mailbox to use for the FW command
  7490. * @viid: the VI id
  7491. * @nblinks: how many times to blink LED at 2.5 Hz
  7492. *
  7493. * Identifies a VI's port by blinking its LED.
  7494. */
  7495. int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
  7496. unsigned int nblinks)
  7497. {
  7498. struct fw_vi_enable_cmd c;
  7499. memset(&c, 0, sizeof(c));
  7500. c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
  7501. FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
  7502. FW_VI_ENABLE_CMD_VIID_V(viid));
  7503. c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
  7504. c.blinkdur = cpu_to_be16(nblinks);
  7505. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  7506. }
  7507. /**
  7508. * t4_iq_stop - stop an ingress queue and its FLs
  7509. * @adap: the adapter
  7510. * @mbox: mailbox to use for the FW command
  7511. * @pf: the PF owning the queues
  7512. * @vf: the VF owning the queues
  7513. * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
  7514. * @iqid: ingress queue id
  7515. * @fl0id: FL0 queue id or 0xffff if no attached FL0
  7516. * @fl1id: FL1 queue id or 0xffff if no attached FL1
  7517. *
  7518. * Stops an ingress queue and its associated FLs, if any. This causes
  7519. * any current or future data/messages destined for these queues to be
  7520. * tossed.
  7521. */
  7522. int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
  7523. unsigned int vf, unsigned int iqtype, unsigned int iqid,
  7524. unsigned int fl0id, unsigned int fl1id)
  7525. {
  7526. struct fw_iq_cmd c;
  7527. memset(&c, 0, sizeof(c));
  7528. c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
  7529. FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
  7530. FW_IQ_CMD_VFN_V(vf));
  7531. c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
  7532. c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
  7533. c.iqid = cpu_to_be16(iqid);
  7534. c.fl0id = cpu_to_be16(fl0id);
  7535. c.fl1id = cpu_to_be16(fl1id);
  7536. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  7537. }
  7538. /**
  7539. * t4_iq_free - free an ingress queue and its FLs
  7540. * @adap: the adapter
  7541. * @mbox: mailbox to use for the FW command
  7542. * @pf: the PF owning the queues
  7543. * @vf: the VF owning the queues
  7544. * @iqtype: the ingress queue type
  7545. * @iqid: ingress queue id
  7546. * @fl0id: FL0 queue id or 0xffff if no attached FL0
  7547. * @fl1id: FL1 queue id or 0xffff if no attached FL1
  7548. *
  7549. * Frees an ingress queue and its associated FLs, if any.
  7550. */
  7551. int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
  7552. unsigned int vf, unsigned int iqtype, unsigned int iqid,
  7553. unsigned int fl0id, unsigned int fl1id)
  7554. {
  7555. struct fw_iq_cmd c;
  7556. memset(&c, 0, sizeof(c));
  7557. c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
  7558. FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
  7559. FW_IQ_CMD_VFN_V(vf));
  7560. c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
  7561. c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
  7562. c.iqid = cpu_to_be16(iqid);
  7563. c.fl0id = cpu_to_be16(fl0id);
  7564. c.fl1id = cpu_to_be16(fl1id);
  7565. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  7566. }
  7567. /**
  7568. * t4_eth_eq_free - free an Ethernet egress queue
  7569. * @adap: the adapter
  7570. * @mbox: mailbox to use for the FW command
  7571. * @pf: the PF owning the queue
  7572. * @vf: the VF owning the queue
  7573. * @eqid: egress queue id
  7574. *
  7575. * Frees an Ethernet egress queue.
  7576. */
  7577. int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
  7578. unsigned int vf, unsigned int eqid)
  7579. {
  7580. struct fw_eq_eth_cmd c;
  7581. memset(&c, 0, sizeof(c));
  7582. c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
  7583. FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
  7584. FW_EQ_ETH_CMD_PFN_V(pf) |
  7585. FW_EQ_ETH_CMD_VFN_V(vf));
  7586. c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
  7587. c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
  7588. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  7589. }
  7590. /**
  7591. * t4_ctrl_eq_free - free a control egress queue
  7592. * @adap: the adapter
  7593. * @mbox: mailbox to use for the FW command
  7594. * @pf: the PF owning the queue
  7595. * @vf: the VF owning the queue
  7596. * @eqid: egress queue id
  7597. *
  7598. * Frees a control egress queue.
  7599. */
  7600. int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
  7601. unsigned int vf, unsigned int eqid)
  7602. {
  7603. struct fw_eq_ctrl_cmd c;
  7604. memset(&c, 0, sizeof(c));
  7605. c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
  7606. FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
  7607. FW_EQ_CTRL_CMD_PFN_V(pf) |
  7608. FW_EQ_CTRL_CMD_VFN_V(vf));
  7609. c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
  7610. c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
  7611. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  7612. }
  7613. /**
  7614. * t4_ofld_eq_free - free an offload egress queue
  7615. * @adap: the adapter
  7616. * @mbox: mailbox to use for the FW command
  7617. * @pf: the PF owning the queue
  7618. * @vf: the VF owning the queue
  7619. * @eqid: egress queue id
  7620. *
  7621. * Frees a control egress queue.
  7622. */
  7623. int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
  7624. unsigned int vf, unsigned int eqid)
  7625. {
  7626. struct fw_eq_ofld_cmd c;
  7627. memset(&c, 0, sizeof(c));
  7628. c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
  7629. FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
  7630. FW_EQ_OFLD_CMD_PFN_V(pf) |
  7631. FW_EQ_OFLD_CMD_VFN_V(vf));
  7632. c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
  7633. c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
  7634. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  7635. }
  7636. /**
  7637. * t4_link_down_rc_str - return a string for a Link Down Reason Code
  7638. * @adap: the adapter
  7639. * @link_down_rc: Link Down Reason Code
  7640. *
  7641. * Returns a string representation of the Link Down Reason Code.
  7642. */
  7643. static const char *t4_link_down_rc_str(unsigned char link_down_rc)
  7644. {
  7645. static const char * const reason[] = {
  7646. "Link Down",
  7647. "Remote Fault",
  7648. "Auto-negotiation Failure",
  7649. "Reserved",
  7650. "Insufficient Airflow",
  7651. "Unable To Determine Reason",
  7652. "No RX Signal Detected",
  7653. "Reserved",
  7654. };
  7655. if (link_down_rc >= ARRAY_SIZE(reason))
  7656. return "Bad Reason Code";
  7657. return reason[link_down_rc];
  7658. }
  7659. /**
  7660. * Return the highest speed set in the port capabilities, in Mb/s.
  7661. */
  7662. static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
  7663. {
  7664. #define TEST_SPEED_RETURN(__caps_speed, __speed) \
  7665. do { \
  7666. if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
  7667. return __speed; \
  7668. } while (0)
  7669. TEST_SPEED_RETURN(400G, 400000);
  7670. TEST_SPEED_RETURN(200G, 200000);
  7671. TEST_SPEED_RETURN(100G, 100000);
  7672. TEST_SPEED_RETURN(50G, 50000);
  7673. TEST_SPEED_RETURN(40G, 40000);
  7674. TEST_SPEED_RETURN(25G, 25000);
  7675. TEST_SPEED_RETURN(10G, 10000);
  7676. TEST_SPEED_RETURN(1G, 1000);
  7677. TEST_SPEED_RETURN(100M, 100);
  7678. #undef TEST_SPEED_RETURN
  7679. return 0;
  7680. }
  7681. /**
  7682. * fwcap_to_fwspeed - return highest speed in Port Capabilities
  7683. * @acaps: advertised Port Capabilities
  7684. *
  7685. * Get the highest speed for the port from the advertised Port
  7686. * Capabilities. It will be either the highest speed from the list of
  7687. * speeds or whatever user has set using ethtool.
  7688. */
  7689. static fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps)
  7690. {
  7691. #define TEST_SPEED_RETURN(__caps_speed) \
  7692. do { \
  7693. if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \
  7694. return FW_PORT_CAP32_SPEED_##__caps_speed; \
  7695. } while (0)
  7696. TEST_SPEED_RETURN(400G);
  7697. TEST_SPEED_RETURN(200G);
  7698. TEST_SPEED_RETURN(100G);
  7699. TEST_SPEED_RETURN(50G);
  7700. TEST_SPEED_RETURN(40G);
  7701. TEST_SPEED_RETURN(25G);
  7702. TEST_SPEED_RETURN(10G);
  7703. TEST_SPEED_RETURN(1G);
  7704. TEST_SPEED_RETURN(100M);
  7705. #undef TEST_SPEED_RETURN
  7706. return 0;
  7707. }
  7708. /**
  7709. * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
  7710. * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
  7711. *
  7712. * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
  7713. * 32-bit Port Capabilities value.
  7714. */
  7715. static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
  7716. {
  7717. fw_port_cap32_t linkattr = 0;
  7718. /* Unfortunately the format of the Link Status in the old
  7719. * 16-bit Port Information message isn't the same as the
  7720. * 16-bit Port Capabilities bitfield used everywhere else ...
  7721. */
  7722. if (lstatus & FW_PORT_CMD_RXPAUSE_F)
  7723. linkattr |= FW_PORT_CAP32_FC_RX;
  7724. if (lstatus & FW_PORT_CMD_TXPAUSE_F)
  7725. linkattr |= FW_PORT_CAP32_FC_TX;
  7726. if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
  7727. linkattr |= FW_PORT_CAP32_SPEED_100M;
  7728. if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
  7729. linkattr |= FW_PORT_CAP32_SPEED_1G;
  7730. if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
  7731. linkattr |= FW_PORT_CAP32_SPEED_10G;
  7732. if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
  7733. linkattr |= FW_PORT_CAP32_SPEED_25G;
  7734. if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
  7735. linkattr |= FW_PORT_CAP32_SPEED_40G;
  7736. if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
  7737. linkattr |= FW_PORT_CAP32_SPEED_100G;
  7738. return linkattr;
  7739. }
  7740. /**
  7741. * t4_handle_get_port_info - process a FW reply message
  7742. * @pi: the port info
  7743. * @rpl: start of the FW message
  7744. *
  7745. * Processes a GET_PORT_INFO FW reply message.
  7746. */
  7747. void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
  7748. {
  7749. const struct fw_port_cmd *cmd = (const void *)rpl;
  7750. int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
  7751. struct adapter *adapter = pi->adapter;
  7752. struct link_config *lc = &pi->link_cfg;
  7753. int link_ok, linkdnrc;
  7754. enum fw_port_type port_type;
  7755. enum fw_port_module_type mod_type;
  7756. unsigned int speed, fc, fec;
  7757. fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
  7758. /* Extract the various fields from the Port Information message.
  7759. */
  7760. switch (action) {
  7761. case FW_PORT_ACTION_GET_PORT_INFO: {
  7762. u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
  7763. link_ok = (lstatus & FW_PORT_CMD_LSTATUS_F) != 0;
  7764. linkdnrc = FW_PORT_CMD_LINKDNRC_G(lstatus);
  7765. port_type = FW_PORT_CMD_PTYPE_G(lstatus);
  7766. mod_type = FW_PORT_CMD_MODTYPE_G(lstatus);
  7767. pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap));
  7768. acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap));
  7769. lpacaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.lpacap));
  7770. linkattr = lstatus_to_fwcap(lstatus);
  7771. break;
  7772. }
  7773. case FW_PORT_ACTION_GET_PORT_INFO32: {
  7774. u32 lstatus32;
  7775. lstatus32 = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
  7776. link_ok = (lstatus32 & FW_PORT_CMD_LSTATUS32_F) != 0;
  7777. linkdnrc = FW_PORT_CMD_LINKDNRC32_G(lstatus32);
  7778. port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
  7779. mod_type = FW_PORT_CMD_MODTYPE32_G(lstatus32);
  7780. pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
  7781. acaps = be32_to_cpu(cmd->u.info32.acaps32);
  7782. lpacaps = be32_to_cpu(cmd->u.info32.lpacaps32);
  7783. linkattr = be32_to_cpu(cmd->u.info32.linkattr32);
  7784. break;
  7785. }
  7786. default:
  7787. dev_err(adapter->pdev_dev, "Handle Port Information: Bad Command/Action %#x\n",
  7788. be32_to_cpu(cmd->action_to_len16));
  7789. return;
  7790. }
  7791. fec = fwcap_to_cc_fec(acaps);
  7792. fc = fwcap_to_cc_pause(linkattr);
  7793. speed = fwcap_to_speed(linkattr);
  7794. lc->new_module = false;
  7795. lc->redo_l1cfg = false;
  7796. if (mod_type != pi->mod_type) {
  7797. /* With the newer SFP28 and QSFP28 Transceiver Module Types,
  7798. * various fundamental Port Capabilities which used to be
  7799. * immutable can now change radically. We can now have
  7800. * Speeds, Auto-Negotiation, Forward Error Correction, etc.
  7801. * all change based on what Transceiver Module is inserted.
  7802. * So we need to record the Physical "Port" Capabilities on
  7803. * every Transceiver Module change.
  7804. */
  7805. lc->pcaps = pcaps;
  7806. /* When a new Transceiver Module is inserted, the Firmware
  7807. * will examine its i2c EPROM to determine its type and
  7808. * general operating parameters including things like Forward
  7809. * Error Control, etc. Various IEEE 802.3 standards dictate
  7810. * how to interpret these i2c values to determine default
  7811. * "sutomatic" settings. We record these for future use when
  7812. * the user explicitly requests these standards-based values.
  7813. */
  7814. lc->def_acaps = acaps;
  7815. /* Some versions of the early T6 Firmware "cheated" when
  7816. * handling different Transceiver Modules by changing the
  7817. * underlaying Port Type reported to the Host Drivers. As
  7818. * such we need to capture whatever Port Type the Firmware
  7819. * sends us and record it in case it's different from what we
  7820. * were told earlier. Unfortunately, since Firmware is
  7821. * forever, we'll need to keep this code here forever, but in
  7822. * later T6 Firmware it should just be an assignment of the
  7823. * same value already recorded.
  7824. */
  7825. pi->port_type = port_type;
  7826. pi->mod_type = mod_type;
  7827. lc->new_module = t4_is_inserted_mod_type(mod_type);
  7828. t4_os_portmod_changed(adapter, pi->port_id);
  7829. }
  7830. if (link_ok != lc->link_ok || speed != lc->speed ||
  7831. fc != lc->fc || fec != lc->fec) { /* something changed */
  7832. if (!link_ok && lc->link_ok) {
  7833. lc->link_down_rc = linkdnrc;
  7834. dev_warn(adapter->pdev_dev, "Port %d link down, reason: %s\n",
  7835. pi->tx_chan, t4_link_down_rc_str(linkdnrc));
  7836. }
  7837. lc->link_ok = link_ok;
  7838. lc->speed = speed;
  7839. lc->fc = fc;
  7840. lc->fec = fec;
  7841. lc->lpacaps = lpacaps;
  7842. lc->acaps = acaps & ADVERT_MASK;
  7843. if (!(lc->acaps & FW_PORT_CAP32_ANEG)) {
  7844. lc->autoneg = AUTONEG_DISABLE;
  7845. } else if (lc->acaps & FW_PORT_CAP32_ANEG) {
  7846. lc->autoneg = AUTONEG_ENABLE;
  7847. } else {
  7848. /* When Autoneg is disabled, user needs to set
  7849. * single speed.
  7850. * Similar to cxgb4_ethtool.c: set_link_ksettings
  7851. */
  7852. lc->acaps = 0;
  7853. lc->speed_caps = fwcap_to_fwspeed(acaps);
  7854. lc->autoneg = AUTONEG_DISABLE;
  7855. }
  7856. t4_os_link_changed(adapter, pi->port_id, link_ok);
  7857. }
  7858. if (lc->new_module && lc->redo_l1cfg) {
  7859. struct link_config old_lc;
  7860. int ret;
  7861. /* Save the current L1 Configuration and restore it if an
  7862. * error occurs. We probably should fix the l1_cfg*()
  7863. * routines not to change the link_config when an error
  7864. * occurs ...
  7865. */
  7866. old_lc = *lc;
  7867. ret = t4_link_l1cfg_ns(adapter, adapter->mbox, pi->lport, lc);
  7868. if (ret) {
  7869. *lc = old_lc;
  7870. dev_warn(adapter->pdev_dev,
  7871. "Attempt to update new Transceiver Module settings failed\n");
  7872. }
  7873. }
  7874. lc->new_module = false;
  7875. lc->redo_l1cfg = false;
  7876. }
  7877. /**
  7878. * t4_update_port_info - retrieve and update port information if changed
  7879. * @pi: the port_info
  7880. *
  7881. * We issue a Get Port Information Command to the Firmware and, if
  7882. * successful, we check to see if anything is different from what we
  7883. * last recorded and update things accordingly.
  7884. */
  7885. int t4_update_port_info(struct port_info *pi)
  7886. {
  7887. unsigned int fw_caps = pi->adapter->params.fw_caps_support;
  7888. struct fw_port_cmd port_cmd;
  7889. int ret;
  7890. memset(&port_cmd, 0, sizeof(port_cmd));
  7891. port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
  7892. FW_CMD_REQUEST_F | FW_CMD_READ_F |
  7893. FW_PORT_CMD_PORTID_V(pi->tx_chan));
  7894. port_cmd.action_to_len16 = cpu_to_be32(
  7895. FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
  7896. ? FW_PORT_ACTION_GET_PORT_INFO
  7897. : FW_PORT_ACTION_GET_PORT_INFO32) |
  7898. FW_LEN16(port_cmd));
  7899. ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
  7900. &port_cmd, sizeof(port_cmd), &port_cmd);
  7901. if (ret)
  7902. return ret;
  7903. t4_handle_get_port_info(pi, (__be64 *)&port_cmd);
  7904. return 0;
  7905. }
  7906. /**
  7907. * t4_get_link_params - retrieve basic link parameters for given port
  7908. * @pi: the port
  7909. * @link_okp: value return pointer for link up/down
  7910. * @speedp: value return pointer for speed (Mb/s)
  7911. * @mtup: value return pointer for mtu
  7912. *
  7913. * Retrieves basic link parameters for a port: link up/down, speed (Mb/s),
  7914. * and MTU for a specified port. A negative error is returned on
  7915. * failure; 0 on success.
  7916. */
  7917. int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
  7918. unsigned int *speedp, unsigned int *mtup)
  7919. {
  7920. unsigned int fw_caps = pi->adapter->params.fw_caps_support;
  7921. struct fw_port_cmd port_cmd;
  7922. unsigned int action, link_ok, speed, mtu;
  7923. fw_port_cap32_t linkattr;
  7924. int ret;
  7925. memset(&port_cmd, 0, sizeof(port_cmd));
  7926. port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
  7927. FW_CMD_REQUEST_F | FW_CMD_READ_F |
  7928. FW_PORT_CMD_PORTID_V(pi->tx_chan));
  7929. action = (fw_caps == FW_CAPS16
  7930. ? FW_PORT_ACTION_GET_PORT_INFO
  7931. : FW_PORT_ACTION_GET_PORT_INFO32);
  7932. port_cmd.action_to_len16 = cpu_to_be32(
  7933. FW_PORT_CMD_ACTION_V(action) |
  7934. FW_LEN16(port_cmd));
  7935. ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
  7936. &port_cmd, sizeof(port_cmd), &port_cmd);
  7937. if (ret)
  7938. return ret;
  7939. if (action == FW_PORT_ACTION_GET_PORT_INFO) {
  7940. u32 lstatus = be32_to_cpu(port_cmd.u.info.lstatus_to_modtype);
  7941. link_ok = !!(lstatus & FW_PORT_CMD_LSTATUS_F);
  7942. linkattr = lstatus_to_fwcap(lstatus);
  7943. mtu = be16_to_cpu(port_cmd.u.info.mtu);
  7944. } else {
  7945. u32 lstatus32 =
  7946. be32_to_cpu(port_cmd.u.info32.lstatus32_to_cbllen32);
  7947. link_ok = !!(lstatus32 & FW_PORT_CMD_LSTATUS32_F);
  7948. linkattr = be32_to_cpu(port_cmd.u.info32.linkattr32);
  7949. mtu = FW_PORT_CMD_MTU32_G(
  7950. be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32));
  7951. }
  7952. speed = fwcap_to_speed(linkattr);
  7953. *link_okp = link_ok;
  7954. *speedp = fwcap_to_speed(linkattr);
  7955. *mtup = mtu;
  7956. return 0;
  7957. }
  7958. /**
  7959. * t4_handle_fw_rpl - process a FW reply message
  7960. * @adap: the adapter
  7961. * @rpl: start of the FW message
  7962. *
  7963. * Processes a FW message, such as link state change messages.
  7964. */
  7965. int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
  7966. {
  7967. u8 opcode = *(const u8 *)rpl;
  7968. /* This might be a port command ... this simplifies the following
  7969. * conditionals ... We can get away with pre-dereferencing
  7970. * action_to_len16 because it's in the first 16 bytes and all messages
  7971. * will be at least that long.
  7972. */
  7973. const struct fw_port_cmd *p = (const void *)rpl;
  7974. unsigned int action =
  7975. FW_PORT_CMD_ACTION_G(be32_to_cpu(p->action_to_len16));
  7976. if (opcode == FW_PORT_CMD &&
  7977. (action == FW_PORT_ACTION_GET_PORT_INFO ||
  7978. action == FW_PORT_ACTION_GET_PORT_INFO32)) {
  7979. int i;
  7980. int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
  7981. struct port_info *pi = NULL;
  7982. for_each_port(adap, i) {
  7983. pi = adap2pinfo(adap, i);
  7984. if (pi->tx_chan == chan)
  7985. break;
  7986. }
  7987. t4_handle_get_port_info(pi, rpl);
  7988. } else {
  7989. dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n",
  7990. opcode);
  7991. return -EINVAL;
  7992. }
  7993. return 0;
  7994. }
  7995. static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
  7996. {
  7997. u16 val;
  7998. if (pci_is_pcie(adapter->pdev)) {
  7999. pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
  8000. p->speed = val & PCI_EXP_LNKSTA_CLS;
  8001. p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
  8002. }
  8003. }
  8004. /**
  8005. * init_link_config - initialize a link's SW state
  8006. * @lc: pointer to structure holding the link state
  8007. * @pcaps: link Port Capabilities
  8008. * @acaps: link current Advertised Port Capabilities
  8009. *
  8010. * Initializes the SW state maintained for each link, including the link's
  8011. * capabilities and default speed/flow-control/autonegotiation settings.
  8012. */
  8013. static void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
  8014. fw_port_cap32_t acaps)
  8015. {
  8016. lc->pcaps = pcaps;
  8017. lc->def_acaps = acaps;
  8018. lc->lpacaps = 0;
  8019. lc->speed_caps = 0;
  8020. lc->speed = 0;
  8021. lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
  8022. /* For Forward Error Control, we default to whatever the Firmware
  8023. * tells us the Link is currently advertising.
  8024. */
  8025. lc->requested_fec = FEC_AUTO;
  8026. lc->fec = fwcap_to_cc_fec(lc->def_acaps);
  8027. /* If the Port is capable of Auto-Negtotiation, initialize it as
  8028. * "enabled" and copy over all of the Physical Port Capabilities
  8029. * to the Advertised Port Capabilities. Otherwise mark it as
  8030. * Auto-Negotiate disabled and select the highest supported speed
  8031. * for the link. Note parallel structure in t4_link_l1cfg_core()
  8032. * and t4_handle_get_port_info().
  8033. */
  8034. if (lc->pcaps & FW_PORT_CAP32_ANEG) {
  8035. lc->acaps = lc->pcaps & ADVERT_MASK;
  8036. lc->autoneg = AUTONEG_ENABLE;
  8037. lc->requested_fc |= PAUSE_AUTONEG;
  8038. } else {
  8039. lc->acaps = 0;
  8040. lc->autoneg = AUTONEG_DISABLE;
  8041. lc->speed_caps = fwcap_to_fwspeed(acaps);
  8042. }
  8043. }
  8044. #define CIM_PF_NOACCESS 0xeeeeeeee
  8045. int t4_wait_dev_ready(void __iomem *regs)
  8046. {
  8047. u32 whoami;
  8048. whoami = readl(regs + PL_WHOAMI_A);
  8049. if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
  8050. return 0;
  8051. msleep(500);
  8052. whoami = readl(regs + PL_WHOAMI_A);
  8053. return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
  8054. }
  8055. struct flash_desc {
  8056. u32 vendor_and_model_id;
  8057. u32 size_mb;
  8058. };
  8059. static int t4_get_flash_params(struct adapter *adap)
  8060. {
  8061. /* Table for non-Numonix supported flash parts. Numonix parts are left
  8062. * to the preexisting code. All flash parts have 64KB sectors.
  8063. */
  8064. static struct flash_desc supported_flash[] = {
  8065. { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
  8066. };
  8067. unsigned int part, manufacturer;
  8068. unsigned int density, size = 0;
  8069. u32 flashid = 0;
  8070. int ret;
  8071. /* Issue a Read ID Command to the Flash part. We decode supported
  8072. * Flash parts and their sizes from this. There's a newer Query
  8073. * Command which can retrieve detailed geometry information but many
  8074. * Flash parts don't support it.
  8075. */
  8076. ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
  8077. if (!ret)
  8078. ret = sf1_read(adap, 3, 0, 1, &flashid);
  8079. t4_write_reg(adap, SF_OP_A, 0); /* unlock SF */
  8080. if (ret)
  8081. return ret;
  8082. /* Check to see if it's one of our non-standard supported Flash parts.
  8083. */
  8084. for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
  8085. if (supported_flash[part].vendor_and_model_id == flashid) {
  8086. adap->params.sf_size = supported_flash[part].size_mb;
  8087. adap->params.sf_nsec =
  8088. adap->params.sf_size / SF_SEC_SIZE;
  8089. goto found;
  8090. }
  8091. /* Decode Flash part size. The code below looks repetative with
  8092. * common encodings, but that's not guaranteed in the JEDEC
  8093. * specification for the Read JADEC ID command. The only thing that
  8094. * we're guaranteed by the JADEC specification is where the
  8095. * Manufacturer ID is in the returned result. After that each
  8096. * Manufacturer ~could~ encode things completely differently.
  8097. * Note, all Flash parts must have 64KB sectors.
  8098. */
  8099. manufacturer = flashid & 0xff;
  8100. switch (manufacturer) {
  8101. case 0x20: { /* Micron/Numonix */
  8102. /* This Density -> Size decoding table is taken from Micron
  8103. * Data Sheets.
  8104. */
  8105. density = (flashid >> 16) & 0xff;
  8106. switch (density) {
  8107. case 0x14: /* 1MB */
  8108. size = 1 << 20;
  8109. break;
  8110. case 0x15: /* 2MB */
  8111. size = 1 << 21;
  8112. break;
  8113. case 0x16: /* 4MB */
  8114. size = 1 << 22;
  8115. break;
  8116. case 0x17: /* 8MB */
  8117. size = 1 << 23;
  8118. break;
  8119. case 0x18: /* 16MB */
  8120. size = 1 << 24;
  8121. break;
  8122. case 0x19: /* 32MB */
  8123. size = 1 << 25;
  8124. break;
  8125. case 0x20: /* 64MB */
  8126. size = 1 << 26;
  8127. break;
  8128. case 0x21: /* 128MB */
  8129. size = 1 << 27;
  8130. break;
  8131. case 0x22: /* 256MB */
  8132. size = 1 << 28;
  8133. break;
  8134. }
  8135. break;
  8136. }
  8137. case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
  8138. /* This Density -> Size decoding table is taken from ISSI
  8139. * Data Sheets.
  8140. */
  8141. density = (flashid >> 16) & 0xff;
  8142. switch (density) {
  8143. case 0x16: /* 32 MB */
  8144. size = 1 << 25;
  8145. break;
  8146. case 0x17: /* 64MB */
  8147. size = 1 << 26;
  8148. break;
  8149. }
  8150. break;
  8151. }
  8152. case 0xc2: { /* Macronix */
  8153. /* This Density -> Size decoding table is taken from Macronix
  8154. * Data Sheets.
  8155. */
  8156. density = (flashid >> 16) & 0xff;
  8157. switch (density) {
  8158. case 0x17: /* 8MB */
  8159. size = 1 << 23;
  8160. break;
  8161. case 0x18: /* 16MB */
  8162. size = 1 << 24;
  8163. break;
  8164. }
  8165. break;
  8166. }
  8167. case 0xef: { /* Winbond */
  8168. /* This Density -> Size decoding table is taken from Winbond
  8169. * Data Sheets.
  8170. */
  8171. density = (flashid >> 16) & 0xff;
  8172. switch (density) {
  8173. case 0x17: /* 8MB */
  8174. size = 1 << 23;
  8175. break;
  8176. case 0x18: /* 16MB */
  8177. size = 1 << 24;
  8178. break;
  8179. }
  8180. break;
  8181. }
  8182. }
  8183. /* If we didn't recognize the FLASH part, that's no real issue: the
  8184. * Hardware/Software contract says that Hardware will _*ALWAYS*_
  8185. * use a FLASH part which is at least 4MB in size and has 64KB
  8186. * sectors. The unrecognized FLASH part is likely to be much larger
  8187. * than 4MB, but that's all we really need.
  8188. */
  8189. if (size == 0) {
  8190. dev_warn(adap->pdev_dev, "Unknown Flash Part, ID = %#x, assuming 4MB\n",
  8191. flashid);
  8192. size = 1 << 22;
  8193. }
  8194. /* Store decoded Flash size and fall through into vetting code. */
  8195. adap->params.sf_size = size;
  8196. adap->params.sf_nsec = size / SF_SEC_SIZE;
  8197. found:
  8198. if (adap->params.sf_size < FLASH_MIN_SIZE)
  8199. dev_warn(adap->pdev_dev, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
  8200. flashid, adap->params.sf_size, FLASH_MIN_SIZE);
  8201. return 0;
  8202. }
  8203. /**
  8204. * t4_prep_adapter - prepare SW and HW for operation
  8205. * @adapter: the adapter
  8206. * @reset: if true perform a HW reset
  8207. *
  8208. * Initialize adapter SW state for the various HW modules, set initial
  8209. * values for some adapter tunables, take PHYs out of reset, and
  8210. * initialize the MDIO interface.
  8211. */
  8212. int t4_prep_adapter(struct adapter *adapter)
  8213. {
  8214. int ret, ver;
  8215. uint16_t device_id;
  8216. u32 pl_rev;
  8217. get_pci_mode(adapter, &adapter->params.pci);
  8218. pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
  8219. ret = t4_get_flash_params(adapter);
  8220. if (ret < 0) {
  8221. dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
  8222. return ret;
  8223. }
  8224. /* Retrieve adapter's device ID
  8225. */
  8226. pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
  8227. ver = device_id >> 12;
  8228. adapter->params.chip = 0;
  8229. switch (ver) {
  8230. case CHELSIO_T4:
  8231. adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
  8232. adapter->params.arch.sge_fl_db = DBPRIO_F;
  8233. adapter->params.arch.mps_tcam_size =
  8234. NUM_MPS_CLS_SRAM_L_INSTANCES;
  8235. adapter->params.arch.mps_rplc_size = 128;
  8236. adapter->params.arch.nchan = NCHAN;
  8237. adapter->params.arch.pm_stats_cnt = PM_NSTATS;
  8238. adapter->params.arch.vfcount = 128;
  8239. /* Congestion map is for 4 channels so that
  8240. * MPS can have 4 priority per port.
  8241. */
  8242. adapter->params.arch.cng_ch_bits_log = 2;
  8243. break;
  8244. case CHELSIO_T5:
  8245. adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
  8246. adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
  8247. adapter->params.arch.mps_tcam_size =
  8248. NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
  8249. adapter->params.arch.mps_rplc_size = 128;
  8250. adapter->params.arch.nchan = NCHAN;
  8251. adapter->params.arch.pm_stats_cnt = PM_NSTATS;
  8252. adapter->params.arch.vfcount = 128;
  8253. adapter->params.arch.cng_ch_bits_log = 2;
  8254. break;
  8255. case CHELSIO_T6:
  8256. adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
  8257. adapter->params.arch.sge_fl_db = 0;
  8258. adapter->params.arch.mps_tcam_size =
  8259. NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
  8260. adapter->params.arch.mps_rplc_size = 256;
  8261. adapter->params.arch.nchan = 2;
  8262. adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS;
  8263. adapter->params.arch.vfcount = 256;
  8264. /* Congestion map will be for 2 channels so that
  8265. * MPS can have 8 priority per port.
  8266. */
  8267. adapter->params.arch.cng_ch_bits_log = 3;
  8268. break;
  8269. default:
  8270. dev_err(adapter->pdev_dev, "Device %d is not supported\n",
  8271. device_id);
  8272. return -EINVAL;
  8273. }
  8274. adapter->params.cim_la_size = CIMLA_SIZE;
  8275. init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
  8276. /*
  8277. * Default port for debugging in case we can't reach FW.
  8278. */
  8279. adapter->params.nports = 1;
  8280. adapter->params.portvec = 1;
  8281. adapter->params.vpd.cclk = 50000;
  8282. /* Set PCIe completion timeout to 4 seconds. */
  8283. pcie_capability_clear_and_set_word(adapter->pdev, PCI_EXP_DEVCTL2,
  8284. PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
  8285. return 0;
  8286. }
  8287. /**
  8288. * t4_shutdown_adapter - shut down adapter, host & wire
  8289. * @adapter: the adapter
  8290. *
  8291. * Perform an emergency shutdown of the adapter and stop it from
  8292. * continuing any further communication on the ports or DMA to the
  8293. * host. This is typically used when the adapter and/or firmware
  8294. * have crashed and we want to prevent any further accidental
  8295. * communication with the rest of the world. This will also force
  8296. * the port Link Status to go down -- if register writes work --
  8297. * which should help our peers figure out that we're down.
  8298. */
  8299. int t4_shutdown_adapter(struct adapter *adapter)
  8300. {
  8301. int port;
  8302. t4_intr_disable(adapter);
  8303. t4_write_reg(adapter, DBG_GPIO_EN_A, 0);
  8304. for_each_port(adapter, port) {
  8305. u32 a_port_cfg = is_t4(adapter->params.chip) ?
  8306. PORT_REG(port, XGMAC_PORT_CFG_A) :
  8307. T5_PORT_REG(port, MAC_PORT_CFG_A);
  8308. t4_write_reg(adapter, a_port_cfg,
  8309. t4_read_reg(adapter, a_port_cfg)
  8310. & ~SIGNAL_DET_V(1));
  8311. }
  8312. t4_set_reg_field(adapter, SGE_CONTROL_A, GLOBALENABLE_F, 0);
  8313. return 0;
  8314. }
  8315. /**
  8316. * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
  8317. * @adapter: the adapter
  8318. * @qid: the Queue ID
  8319. * @qtype: the Ingress or Egress type for @qid
  8320. * @user: true if this request is for a user mode queue
  8321. * @pbar2_qoffset: BAR2 Queue Offset
  8322. * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
  8323. *
  8324. * Returns the BAR2 SGE Queue Registers information associated with the
  8325. * indicated Absolute Queue ID. These are passed back in return value
  8326. * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
  8327. * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
  8328. *
  8329. * This may return an error which indicates that BAR2 SGE Queue
  8330. * registers aren't available. If an error is not returned, then the
  8331. * following values are returned:
  8332. *
  8333. * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
  8334. * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
  8335. *
  8336. * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
  8337. * require the "Inferred Queue ID" ability may be used. E.g. the
  8338. * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
  8339. * then these "Inferred Queue ID" register may not be used.
  8340. */
  8341. int t4_bar2_sge_qregs(struct adapter *adapter,
  8342. unsigned int qid,
  8343. enum t4_bar2_qtype qtype,
  8344. int user,
  8345. u64 *pbar2_qoffset,
  8346. unsigned int *pbar2_qid)
  8347. {
  8348. unsigned int page_shift, page_size, qpp_shift, qpp_mask;
  8349. u64 bar2_page_offset, bar2_qoffset;
  8350. unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
  8351. /* T4 doesn't support BAR2 SGE Queue registers for kernel mode queues */
  8352. if (!user && is_t4(adapter->params.chip))
  8353. return -EINVAL;
  8354. /* Get our SGE Page Size parameters.
  8355. */
  8356. page_shift = adapter->params.sge.hps + 10;
  8357. page_size = 1 << page_shift;
  8358. /* Get the right Queues per Page parameters for our Queue.
  8359. */
  8360. qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
  8361. ? adapter->params.sge.eq_qpp
  8362. : adapter->params.sge.iq_qpp);
  8363. qpp_mask = (1 << qpp_shift) - 1;
  8364. /* Calculate the basics of the BAR2 SGE Queue register area:
  8365. * o The BAR2 page the Queue registers will be in.
  8366. * o The BAR2 Queue ID.
  8367. * o The BAR2 Queue ID Offset into the BAR2 page.
  8368. */
  8369. bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
  8370. bar2_qid = qid & qpp_mask;
  8371. bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
  8372. /* If the BAR2 Queue ID Offset is less than the Page Size, then the
  8373. * hardware will infer the Absolute Queue ID simply from the writes to
  8374. * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
  8375. * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
  8376. * write to the first BAR2 SGE Queue Area within the BAR2 Page with
  8377. * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
  8378. * from the BAR2 Page and BAR2 Queue ID.
  8379. *
  8380. * One important censequence of this is that some BAR2 SGE registers
  8381. * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
  8382. * there. But other registers synthesize the SGE Queue ID purely
  8383. * from the writes to the registers -- the Write Combined Doorbell
  8384. * Buffer is a good example. These BAR2 SGE Registers are only
  8385. * available for those BAR2 SGE Register areas where the SGE Absolute
  8386. * Queue ID can be inferred from simple writes.
  8387. */
  8388. bar2_qoffset = bar2_page_offset;
  8389. bar2_qinferred = (bar2_qid_offset < page_size);
  8390. if (bar2_qinferred) {
  8391. bar2_qoffset += bar2_qid_offset;
  8392. bar2_qid = 0;
  8393. }
  8394. *pbar2_qoffset = bar2_qoffset;
  8395. *pbar2_qid = bar2_qid;
  8396. return 0;
  8397. }
  8398. /**
  8399. * t4_init_devlog_params - initialize adapter->params.devlog
  8400. * @adap: the adapter
  8401. *
  8402. * Initialize various fields of the adapter's Firmware Device Log
  8403. * Parameters structure.
  8404. */
  8405. int t4_init_devlog_params(struct adapter *adap)
  8406. {
  8407. struct devlog_params *dparams = &adap->params.devlog;
  8408. u32 pf_dparams;
  8409. unsigned int devlog_meminfo;
  8410. struct fw_devlog_cmd devlog_cmd;
  8411. int ret;
  8412. /* If we're dealing with newer firmware, the Device Log Paramerters
  8413. * are stored in a designated register which allows us to access the
  8414. * Device Log even if we can't talk to the firmware.
  8415. */
  8416. pf_dparams =
  8417. t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
  8418. if (pf_dparams) {
  8419. unsigned int nentries, nentries128;
  8420. dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
  8421. dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
  8422. nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
  8423. nentries = (nentries128 + 1) * 128;
  8424. dparams->size = nentries * sizeof(struct fw_devlog_e);
  8425. return 0;
  8426. }
  8427. /* Otherwise, ask the firmware for it's Device Log Parameters.
  8428. */
  8429. memset(&devlog_cmd, 0, sizeof(devlog_cmd));
  8430. devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
  8431. FW_CMD_REQUEST_F | FW_CMD_READ_F);
  8432. devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
  8433. ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
  8434. &devlog_cmd);
  8435. if (ret)
  8436. return ret;
  8437. devlog_meminfo =
  8438. be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
  8439. dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
  8440. dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
  8441. dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
  8442. return 0;
  8443. }
  8444. /**
  8445. * t4_init_sge_params - initialize adap->params.sge
  8446. * @adapter: the adapter
  8447. *
  8448. * Initialize various fields of the adapter's SGE Parameters structure.
  8449. */
  8450. int t4_init_sge_params(struct adapter *adapter)
  8451. {
  8452. struct sge_params *sge_params = &adapter->params.sge;
  8453. u32 hps, qpp;
  8454. unsigned int s_hps, s_qpp;
  8455. /* Extract the SGE Page Size for our PF.
  8456. */
  8457. hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
  8458. s_hps = (HOSTPAGESIZEPF0_S +
  8459. (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
  8460. sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
  8461. /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
  8462. */
  8463. s_qpp = (QUEUESPERPAGEPF0_S +
  8464. (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
  8465. qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
  8466. sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
  8467. qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
  8468. sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
  8469. return 0;
  8470. }
  8471. /**
  8472. * t4_init_tp_params - initialize adap->params.tp
  8473. * @adap: the adapter
  8474. * @sleep_ok: if true we may sleep while awaiting command completion
  8475. *
  8476. * Initialize various fields of the adapter's TP Parameters structure.
  8477. */
  8478. int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
  8479. {
  8480. int chan;
  8481. u32 v;
  8482. v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
  8483. adap->params.tp.tre = TIMERRESOLUTION_G(v);
  8484. adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
  8485. /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
  8486. for (chan = 0; chan < NCHAN; chan++)
  8487. adap->params.tp.tx_modq[chan] = chan;
  8488. /* Cache the adapter's Compressed Filter Mode and global Incress
  8489. * Configuration.
  8490. */
  8491. t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1,
  8492. TP_VLAN_PRI_MAP_A, sleep_ok);
  8493. t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1,
  8494. TP_INGRESS_CONFIG_A, sleep_ok);
  8495. /* For T6, cache the adapter's compressed error vector
  8496. * and passing outer header info for encapsulated packets.
  8497. */
  8498. if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
  8499. v = t4_read_reg(adap, TP_OUT_CONFIG_A);
  8500. adap->params.tp.rx_pkt_encap = (v & CRXPKTENC_F) ? 1 : 0;
  8501. }
  8502. /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
  8503. * shift positions of several elements of the Compressed Filter Tuple
  8504. * for this adapter which we need frequently ...
  8505. */
  8506. adap->params.tp.fcoe_shift = t4_filter_field_shift(adap, FCOE_F);
  8507. adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
  8508. adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
  8509. adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
  8510. adap->params.tp.tos_shift = t4_filter_field_shift(adap, TOS_F);
  8511. adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
  8512. PROTOCOL_F);
  8513. adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
  8514. ETHERTYPE_F);
  8515. adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
  8516. MACMATCH_F);
  8517. adap->params.tp.matchtype_shift = t4_filter_field_shift(adap,
  8518. MPSHITTYPE_F);
  8519. adap->params.tp.frag_shift = t4_filter_field_shift(adap,
  8520. FRAGMENTATION_F);
  8521. /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
  8522. * represents the presence of an Outer VLAN instead of a VNIC ID.
  8523. */
  8524. if ((adap->params.tp.ingress_config & VNIC_F) == 0)
  8525. adap->params.tp.vnic_shift = -1;
  8526. v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A);
  8527. adap->params.tp.hash_filter_mask = v;
  8528. v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A);
  8529. adap->params.tp.hash_filter_mask |= ((u64)v << 32);
  8530. return 0;
  8531. }
  8532. /**
  8533. * t4_filter_field_shift - calculate filter field shift
  8534. * @adap: the adapter
  8535. * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
  8536. *
  8537. * Return the shift position of a filter field within the Compressed
  8538. * Filter Tuple. The filter field is specified via its selection bit
  8539. * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
  8540. */
  8541. int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
  8542. {
  8543. unsigned int filter_mode = adap->params.tp.vlan_pri_map;
  8544. unsigned int sel;
  8545. int field_shift;
  8546. if ((filter_mode & filter_sel) == 0)
  8547. return -1;
  8548. for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
  8549. switch (filter_mode & sel) {
  8550. case FCOE_F:
  8551. field_shift += FT_FCOE_W;
  8552. break;
  8553. case PORT_F:
  8554. field_shift += FT_PORT_W;
  8555. break;
  8556. case VNIC_ID_F:
  8557. field_shift += FT_VNIC_ID_W;
  8558. break;
  8559. case VLAN_F:
  8560. field_shift += FT_VLAN_W;
  8561. break;
  8562. case TOS_F:
  8563. field_shift += FT_TOS_W;
  8564. break;
  8565. case PROTOCOL_F:
  8566. field_shift += FT_PROTOCOL_W;
  8567. break;
  8568. case ETHERTYPE_F:
  8569. field_shift += FT_ETHERTYPE_W;
  8570. break;
  8571. case MACMATCH_F:
  8572. field_shift += FT_MACMATCH_W;
  8573. break;
  8574. case MPSHITTYPE_F:
  8575. field_shift += FT_MPSHITTYPE_W;
  8576. break;
  8577. case FRAGMENTATION_F:
  8578. field_shift += FT_FRAGMENTATION_W;
  8579. break;
  8580. }
  8581. }
  8582. return field_shift;
  8583. }
  8584. int t4_init_rss_mode(struct adapter *adap, int mbox)
  8585. {
  8586. int i, ret;
  8587. struct fw_rss_vi_config_cmd rvc;
  8588. memset(&rvc, 0, sizeof(rvc));
  8589. for_each_port(adap, i) {
  8590. struct port_info *p = adap2pinfo(adap, i);
  8591. rvc.op_to_viid =
  8592. cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
  8593. FW_CMD_REQUEST_F | FW_CMD_READ_F |
  8594. FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
  8595. rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
  8596. ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
  8597. if (ret)
  8598. return ret;
  8599. p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
  8600. }
  8601. return 0;
  8602. }
  8603. /**
  8604. * t4_init_portinfo - allocate a virtual interface and initialize port_info
  8605. * @pi: the port_info
  8606. * @mbox: mailbox to use for the FW command
  8607. * @port: physical port associated with the VI
  8608. * @pf: the PF owning the VI
  8609. * @vf: the VF owning the VI
  8610. * @mac: the MAC address of the VI
  8611. *
  8612. * Allocates a virtual interface for the given physical port. If @mac is
  8613. * not %NULL it contains the MAC address of the VI as assigned by FW.
  8614. * @mac should be large enough to hold an Ethernet address.
  8615. * Returns < 0 on error.
  8616. */
  8617. int t4_init_portinfo(struct port_info *pi, int mbox,
  8618. int port, int pf, int vf, u8 mac[])
  8619. {
  8620. struct adapter *adapter = pi->adapter;
  8621. unsigned int fw_caps = adapter->params.fw_caps_support;
  8622. struct fw_port_cmd cmd;
  8623. unsigned int rss_size;
  8624. enum fw_port_type port_type;
  8625. int mdio_addr;
  8626. fw_port_cap32_t pcaps, acaps;
  8627. int ret;
  8628. /* If we haven't yet determined whether we're talking to Firmware
  8629. * which knows the new 32-bit Port Capabilities, it's time to find
  8630. * out now. This will also tell new Firmware to send us Port Status
  8631. * Updates using the new 32-bit Port Capabilities version of the
  8632. * Port Information message.
  8633. */
  8634. if (fw_caps == FW_CAPS_UNKNOWN) {
  8635. u32 param, val;
  8636. param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
  8637. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
  8638. val = 1;
  8639. ret = t4_set_params(adapter, mbox, pf, vf, 1, &param, &val);
  8640. fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16);
  8641. adapter->params.fw_caps_support = fw_caps;
  8642. }
  8643. memset(&cmd, 0, sizeof(cmd));
  8644. cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
  8645. FW_CMD_REQUEST_F | FW_CMD_READ_F |
  8646. FW_PORT_CMD_PORTID_V(port));
  8647. cmd.action_to_len16 = cpu_to_be32(
  8648. FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
  8649. ? FW_PORT_ACTION_GET_PORT_INFO
  8650. : FW_PORT_ACTION_GET_PORT_INFO32) |
  8651. FW_LEN16(cmd));
  8652. ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
  8653. if (ret)
  8654. return ret;
  8655. /* Extract the various fields from the Port Information message.
  8656. */
  8657. if (fw_caps == FW_CAPS16) {
  8658. u32 lstatus = be32_to_cpu(cmd.u.info.lstatus_to_modtype);
  8659. port_type = FW_PORT_CMD_PTYPE_G(lstatus);
  8660. mdio_addr = ((lstatus & FW_PORT_CMD_MDIOCAP_F)
  8661. ? FW_PORT_CMD_MDIOADDR_G(lstatus)
  8662. : -1);
  8663. pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.pcap));
  8664. acaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.acap));
  8665. } else {
  8666. u32 lstatus32 = be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
  8667. port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
  8668. mdio_addr = ((lstatus32 & FW_PORT_CMD_MDIOCAP32_F)
  8669. ? FW_PORT_CMD_MDIOADDR32_G(lstatus32)
  8670. : -1);
  8671. pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
  8672. acaps = be32_to_cpu(cmd.u.info32.acaps32);
  8673. }
  8674. ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size);
  8675. if (ret < 0)
  8676. return ret;
  8677. pi->viid = ret;
  8678. pi->tx_chan = port;
  8679. pi->lport = port;
  8680. pi->rss_size = rss_size;
  8681. pi->port_type = port_type;
  8682. pi->mdio_addr = mdio_addr;
  8683. pi->mod_type = FW_PORT_MOD_TYPE_NA;
  8684. init_link_config(&pi->link_cfg, pcaps, acaps);
  8685. return 0;
  8686. }
  8687. int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
  8688. {
  8689. u8 addr[6];
  8690. int ret, i, j = 0;
  8691. for_each_port(adap, i) {
  8692. struct port_info *pi = adap2pinfo(adap, i);
  8693. while ((adap->params.portvec & (1 << j)) == 0)
  8694. j++;
  8695. ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
  8696. if (ret)
  8697. return ret;
  8698. memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
  8699. j++;
  8700. }
  8701. return 0;
  8702. }
  8703. /**
  8704. * t4_read_cimq_cfg - read CIM queue configuration
  8705. * @adap: the adapter
  8706. * @base: holds the queue base addresses in bytes
  8707. * @size: holds the queue sizes in bytes
  8708. * @thres: holds the queue full thresholds in bytes
  8709. *
  8710. * Returns the current configuration of the CIM queues, starting with
  8711. * the IBQs, then the OBQs.
  8712. */
  8713. void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
  8714. {
  8715. unsigned int i, v;
  8716. int cim_num_obq = is_t4(adap->params.chip) ?
  8717. CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
  8718. for (i = 0; i < CIM_NUM_IBQ; i++) {
  8719. t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
  8720. QUENUMSELECT_V(i));
  8721. v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
  8722. /* value is in 256-byte units */
  8723. *base++ = CIMQBASE_G(v) * 256;
  8724. *size++ = CIMQSIZE_G(v) * 256;
  8725. *thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
  8726. }
  8727. for (i = 0; i < cim_num_obq; i++) {
  8728. t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
  8729. QUENUMSELECT_V(i));
  8730. v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
  8731. /* value is in 256-byte units */
  8732. *base++ = CIMQBASE_G(v) * 256;
  8733. *size++ = CIMQSIZE_G(v) * 256;
  8734. }
  8735. }
  8736. /**
  8737. * t4_read_cim_ibq - read the contents of a CIM inbound queue
  8738. * @adap: the adapter
  8739. * @qid: the queue index
  8740. * @data: where to store the queue contents
  8741. * @n: capacity of @data in 32-bit words
  8742. *
  8743. * Reads the contents of the selected CIM queue starting at address 0 up
  8744. * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
  8745. * error and the number of 32-bit words actually read on success.
  8746. */
  8747. int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
  8748. {
  8749. int i, err, attempts;
  8750. unsigned int addr;
  8751. const unsigned int nwords = CIM_IBQ_SIZE * 4;
  8752. if (qid > 5 || (n & 3))
  8753. return -EINVAL;
  8754. addr = qid * nwords;
  8755. if (n > nwords)
  8756. n = nwords;
  8757. /* It might take 3-10ms before the IBQ debug read access is allowed.
  8758. * Wait for 1 Sec with a delay of 1 usec.
  8759. */
  8760. attempts = 1000000;
  8761. for (i = 0; i < n; i++, addr++) {
  8762. t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
  8763. IBQDBGEN_F);
  8764. err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
  8765. attempts, 1);
  8766. if (err)
  8767. return err;
  8768. *data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
  8769. }
  8770. t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
  8771. return i;
  8772. }
  8773. /**
  8774. * t4_read_cim_obq - read the contents of a CIM outbound queue
  8775. * @adap: the adapter
  8776. * @qid: the queue index
  8777. * @data: where to store the queue contents
  8778. * @n: capacity of @data in 32-bit words
  8779. *
  8780. * Reads the contents of the selected CIM queue starting at address 0 up
  8781. * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
  8782. * error and the number of 32-bit words actually read on success.
  8783. */
  8784. int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
  8785. {
  8786. int i, err;
  8787. unsigned int addr, v, nwords;
  8788. int cim_num_obq = is_t4(adap->params.chip) ?
  8789. CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
  8790. if ((qid > (cim_num_obq - 1)) || (n & 3))
  8791. return -EINVAL;
  8792. t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
  8793. QUENUMSELECT_V(qid));
  8794. v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
  8795. addr = CIMQBASE_G(v) * 64; /* muliple of 256 -> muliple of 4 */
  8796. nwords = CIMQSIZE_G(v) * 64; /* same */
  8797. if (n > nwords)
  8798. n = nwords;
  8799. for (i = 0; i < n; i++, addr++) {
  8800. t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
  8801. OBQDBGEN_F);
  8802. err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
  8803. 2, 1);
  8804. if (err)
  8805. return err;
  8806. *data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
  8807. }
  8808. t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
  8809. return i;
  8810. }
  8811. /**
  8812. * t4_cim_read - read a block from CIM internal address space
  8813. * @adap: the adapter
  8814. * @addr: the start address within the CIM address space
  8815. * @n: number of words to read
  8816. * @valp: where to store the result
  8817. *
  8818. * Reads a block of 4-byte words from the CIM intenal address space.
  8819. */
  8820. int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
  8821. unsigned int *valp)
  8822. {
  8823. int ret = 0;
  8824. if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
  8825. return -EBUSY;
  8826. for ( ; !ret && n--; addr += 4) {
  8827. t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
  8828. ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
  8829. 0, 5, 2);
  8830. if (!ret)
  8831. *valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
  8832. }
  8833. return ret;
  8834. }
  8835. /**
  8836. * t4_cim_write - write a block into CIM internal address space
  8837. * @adap: the adapter
  8838. * @addr: the start address within the CIM address space
  8839. * @n: number of words to write
  8840. * @valp: set of values to write
  8841. *
  8842. * Writes a block of 4-byte words into the CIM intenal address space.
  8843. */
  8844. int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
  8845. const unsigned int *valp)
  8846. {
  8847. int ret = 0;
  8848. if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
  8849. return -EBUSY;
  8850. for ( ; !ret && n--; addr += 4) {
  8851. t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
  8852. t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
  8853. ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
  8854. 0, 5, 2);
  8855. }
  8856. return ret;
  8857. }
  8858. static int t4_cim_write1(struct adapter *adap, unsigned int addr,
  8859. unsigned int val)
  8860. {
  8861. return t4_cim_write(adap, addr, 1, &val);
  8862. }
  8863. /**
  8864. * t4_cim_read_la - read CIM LA capture buffer
  8865. * @adap: the adapter
  8866. * @la_buf: where to store the LA data
  8867. * @wrptr: the HW write pointer within the capture buffer
  8868. *
  8869. * Reads the contents of the CIM LA buffer with the most recent entry at
  8870. * the end of the returned data and with the entry at @wrptr first.
  8871. * We try to leave the LA in the running state we find it in.
  8872. */
  8873. int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
  8874. {
  8875. int i, ret;
  8876. unsigned int cfg, val, idx;
  8877. ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
  8878. if (ret)
  8879. return ret;
  8880. if (cfg & UPDBGLAEN_F) { /* LA is running, freeze it */
  8881. ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
  8882. if (ret)
  8883. return ret;
  8884. }
  8885. ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
  8886. if (ret)
  8887. goto restart;
  8888. idx = UPDBGLAWRPTR_G(val);
  8889. if (wrptr)
  8890. *wrptr = idx;
  8891. for (i = 0; i < adap->params.cim_la_size; i++) {
  8892. ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
  8893. UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
  8894. if (ret)
  8895. break;
  8896. ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
  8897. if (ret)
  8898. break;
  8899. if (val & UPDBGLARDEN_F) {
  8900. ret = -ETIMEDOUT;
  8901. break;
  8902. }
  8903. ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
  8904. if (ret)
  8905. break;
  8906. /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
  8907. * identify the 32-bit portion of the full 312-bit data
  8908. */
  8909. if (is_t6(adap->params.chip) && (idx & 0xf) >= 9)
  8910. idx = (idx & 0xff0) + 0x10;
  8911. else
  8912. idx++;
  8913. /* address can't exceed 0xfff */
  8914. idx &= UPDBGLARDPTR_M;
  8915. }
  8916. restart:
  8917. if (cfg & UPDBGLAEN_F) {
  8918. int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
  8919. cfg & ~UPDBGLARDEN_F);
  8920. if (!ret)
  8921. ret = r;
  8922. }
  8923. return ret;
  8924. }
  8925. /**
  8926. * t4_tp_read_la - read TP LA capture buffer
  8927. * @adap: the adapter
  8928. * @la_buf: where to store the LA data
  8929. * @wrptr: the HW write pointer within the capture buffer
  8930. *
  8931. * Reads the contents of the TP LA buffer with the most recent entry at
  8932. * the end of the returned data and with the entry at @wrptr first.
  8933. * We leave the LA in the running state we find it in.
  8934. */
  8935. void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
  8936. {
  8937. bool last_incomplete;
  8938. unsigned int i, cfg, val, idx;
  8939. cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
  8940. if (cfg & DBGLAENABLE_F) /* freeze LA */
  8941. t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
  8942. adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
  8943. val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
  8944. idx = DBGLAWPTR_G(val);
  8945. last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
  8946. if (last_incomplete)
  8947. idx = (idx + 1) & DBGLARPTR_M;
  8948. if (wrptr)
  8949. *wrptr = idx;
  8950. val &= 0xffff;
  8951. val &= ~DBGLARPTR_V(DBGLARPTR_M);
  8952. val |= adap->params.tp.la_mask;
  8953. for (i = 0; i < TPLA_SIZE; i++) {
  8954. t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
  8955. la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
  8956. idx = (idx + 1) & DBGLARPTR_M;
  8957. }
  8958. /* Wipe out last entry if it isn't valid */
  8959. if (last_incomplete)
  8960. la_buf[TPLA_SIZE - 1] = ~0ULL;
  8961. if (cfg & DBGLAENABLE_F) /* restore running state */
  8962. t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
  8963. cfg | adap->params.tp.la_mask);
  8964. }
  8965. /* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
  8966. * seconds). If we find one of the SGE Ingress DMA State Machines in the same
  8967. * state for more than the Warning Threshold then we'll issue a warning about
  8968. * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
  8969. * appears to be hung every Warning Repeat second till the situation clears.
  8970. * If the situation clears, we'll note that as well.
  8971. */
  8972. #define SGE_IDMA_WARN_THRESH 1
  8973. #define SGE_IDMA_WARN_REPEAT 300
  8974. /**
  8975. * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
  8976. * @adapter: the adapter
  8977. * @idma: the adapter IDMA Monitor state
  8978. *
  8979. * Initialize the state of an SGE Ingress DMA Monitor.
  8980. */
  8981. void t4_idma_monitor_init(struct adapter *adapter,
  8982. struct sge_idma_monitor_state *idma)
  8983. {
  8984. /* Initialize the state variables for detecting an SGE Ingress DMA
  8985. * hang. The SGE has internal counters which count up on each clock
  8986. * tick whenever the SGE finds its Ingress DMA State Engines in the
  8987. * same state they were on the previous clock tick. The clock used is
  8988. * the Core Clock so we have a limit on the maximum "time" they can
  8989. * record; typically a very small number of seconds. For instance,
  8990. * with a 600MHz Core Clock, we can only count up to a bit more than
  8991. * 7s. So we'll synthesize a larger counter in order to not run the
  8992. * risk of having the "timers" overflow and give us the flexibility to
  8993. * maintain a Hung SGE State Machine of our own which operates across
  8994. * a longer time frame.
  8995. */
  8996. idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
  8997. idma->idma_stalled[0] = 0;
  8998. idma->idma_stalled[1] = 0;
  8999. }
  9000. /**
  9001. * t4_idma_monitor - monitor SGE Ingress DMA state
  9002. * @adapter: the adapter
  9003. * @idma: the adapter IDMA Monitor state
  9004. * @hz: number of ticks/second
  9005. * @ticks: number of ticks since the last IDMA Monitor call
  9006. */
  9007. void t4_idma_monitor(struct adapter *adapter,
  9008. struct sge_idma_monitor_state *idma,
  9009. int hz, int ticks)
  9010. {
  9011. int i, idma_same_state_cnt[2];
  9012. /* Read the SGE Debug Ingress DMA Same State Count registers. These
  9013. * are counters inside the SGE which count up on each clock when the
  9014. * SGE finds its Ingress DMA State Engines in the same states they
  9015. * were in the previous clock. The counters will peg out at
  9016. * 0xffffffff without wrapping around so once they pass the 1s
  9017. * threshold they'll stay above that till the IDMA state changes.
  9018. */
  9019. t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
  9020. idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
  9021. idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
  9022. for (i = 0; i < 2; i++) {
  9023. u32 debug0, debug11;
  9024. /* If the Ingress DMA Same State Counter ("timer") is less
  9025. * than 1s, then we can reset our synthesized Stall Timer and
  9026. * continue. If we have previously emitted warnings about a
  9027. * potential stalled Ingress Queue, issue a note indicating
  9028. * that the Ingress Queue has resumed forward progress.
  9029. */
  9030. if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
  9031. if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
  9032. dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
  9033. "resumed after %d seconds\n",
  9034. i, idma->idma_qid[i],
  9035. idma->idma_stalled[i] / hz);
  9036. idma->idma_stalled[i] = 0;
  9037. continue;
  9038. }
  9039. /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
  9040. * domain. The first time we get here it'll be because we
  9041. * passed the 1s Threshold; each additional time it'll be
  9042. * because the RX Timer Callback is being fired on its regular
  9043. * schedule.
  9044. *
  9045. * If the stall is below our Potential Hung Ingress Queue
  9046. * Warning Threshold, continue.
  9047. */
  9048. if (idma->idma_stalled[i] == 0) {
  9049. idma->idma_stalled[i] = hz;
  9050. idma->idma_warn[i] = 0;
  9051. } else {
  9052. idma->idma_stalled[i] += ticks;
  9053. idma->idma_warn[i] -= ticks;
  9054. }
  9055. if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
  9056. continue;
  9057. /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
  9058. */
  9059. if (idma->idma_warn[i] > 0)
  9060. continue;
  9061. idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
  9062. /* Read and save the SGE IDMA State and Queue ID information.
  9063. * We do this every time in case it changes across time ...
  9064. * can't be too careful ...
  9065. */
  9066. t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
  9067. debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
  9068. idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
  9069. t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
  9070. debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
  9071. idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
  9072. dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
  9073. "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
  9074. i, idma->idma_qid[i], idma->idma_state[i],
  9075. idma->idma_stalled[i] / hz,
  9076. debug0, debug11);
  9077. t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
  9078. }
  9079. }
  9080. /**
  9081. * t4_load_cfg - download config file
  9082. * @adap: the adapter
  9083. * @cfg_data: the cfg text file to write
  9084. * @size: text file size
  9085. *
  9086. * Write the supplied config text file to the card's serial flash.
  9087. */
  9088. int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
  9089. {
  9090. int ret, i, n, cfg_addr;
  9091. unsigned int addr;
  9092. unsigned int flash_cfg_start_sec;
  9093. unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
  9094. cfg_addr = t4_flash_cfg_addr(adap);
  9095. if (cfg_addr < 0)
  9096. return cfg_addr;
  9097. addr = cfg_addr;
  9098. flash_cfg_start_sec = addr / SF_SEC_SIZE;
  9099. if (size > FLASH_CFG_MAX_SIZE) {
  9100. dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
  9101. FLASH_CFG_MAX_SIZE);
  9102. return -EFBIG;
  9103. }
  9104. i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
  9105. sf_sec_size);
  9106. ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
  9107. flash_cfg_start_sec + i - 1);
  9108. /* If size == 0 then we're simply erasing the FLASH sectors associated
  9109. * with the on-adapter Firmware Configuration File.
  9110. */
  9111. if (ret || size == 0)
  9112. goto out;
  9113. /* this will write to the flash up to SF_PAGE_SIZE at a time */
  9114. for (i = 0; i < size; i += SF_PAGE_SIZE) {
  9115. if ((size - i) < SF_PAGE_SIZE)
  9116. n = size - i;
  9117. else
  9118. n = SF_PAGE_SIZE;
  9119. ret = t4_write_flash(adap, addr, n, cfg_data);
  9120. if (ret)
  9121. goto out;
  9122. addr += SF_PAGE_SIZE;
  9123. cfg_data += SF_PAGE_SIZE;
  9124. }
  9125. out:
  9126. if (ret)
  9127. dev_err(adap->pdev_dev, "config file %s failed %d\n",
  9128. (size == 0 ? "clear" : "download"), ret);
  9129. return ret;
  9130. }
  9131. /**
  9132. * t4_set_vf_mac - Set MAC address for the specified VF
  9133. * @adapter: The adapter
  9134. * @vf: one of the VFs instantiated by the specified PF
  9135. * @naddr: the number of MAC addresses
  9136. * @addr: the MAC address(es) to be set to the specified VF
  9137. */
  9138. int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
  9139. unsigned int naddr, u8 *addr)
  9140. {
  9141. struct fw_acl_mac_cmd cmd;
  9142. memset(&cmd, 0, sizeof(cmd));
  9143. cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) |
  9144. FW_CMD_REQUEST_F |
  9145. FW_CMD_WRITE_F |
  9146. FW_ACL_MAC_CMD_PFN_V(adapter->pf) |
  9147. FW_ACL_MAC_CMD_VFN_V(vf));
  9148. /* Note: Do not enable the ACL */
  9149. cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
  9150. cmd.nmac = naddr;
  9151. switch (adapter->pf) {
  9152. case 3:
  9153. memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
  9154. break;
  9155. case 2:
  9156. memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
  9157. break;
  9158. case 1:
  9159. memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
  9160. break;
  9161. case 0:
  9162. memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
  9163. break;
  9164. }
  9165. return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
  9166. }
  9167. /**
  9168. * t4_read_pace_tbl - read the pace table
  9169. * @adap: the adapter
  9170. * @pace_vals: holds the returned values
  9171. *
  9172. * Returns the values of TP's pace table in microseconds.
  9173. */
  9174. void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
  9175. {
  9176. unsigned int i, v;
  9177. for (i = 0; i < NTX_SCHED; i++) {
  9178. t4_write_reg(adap, TP_PACE_TABLE_A, 0xffff0000 + i);
  9179. v = t4_read_reg(adap, TP_PACE_TABLE_A);
  9180. pace_vals[i] = dack_ticks_to_usec(adap, v);
  9181. }
  9182. }
  9183. /**
  9184. * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
  9185. * @adap: the adapter
  9186. * @sched: the scheduler index
  9187. * @kbps: the byte rate in Kbps
  9188. * @ipg: the interpacket delay in tenths of nanoseconds
  9189. * @sleep_ok: if true we may sleep while awaiting command completion
  9190. *
  9191. * Return the current configuration of a HW Tx scheduler.
  9192. */
  9193. void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
  9194. unsigned int *kbps, unsigned int *ipg, bool sleep_ok)
  9195. {
  9196. unsigned int v, addr, bpt, cpt;
  9197. if (kbps) {
  9198. addr = TP_TX_MOD_Q1_Q0_RATE_LIMIT_A - sched / 2;
  9199. t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
  9200. if (sched & 1)
  9201. v >>= 16;
  9202. bpt = (v >> 8) & 0xff;
  9203. cpt = v & 0xff;
  9204. if (!cpt) {
  9205. *kbps = 0; /* scheduler disabled */
  9206. } else {
  9207. v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
  9208. *kbps = (v * bpt) / 125;
  9209. }
  9210. }
  9211. if (ipg) {
  9212. addr = TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR_A - sched / 2;
  9213. t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
  9214. if (sched & 1)
  9215. v >>= 16;
  9216. v &= 0xffff;
  9217. *ipg = (10000 * v) / core_ticks_per_usec(adap);
  9218. }
  9219. }
  9220. /* t4_sge_ctxt_rd - read an SGE context through FW
  9221. * @adap: the adapter
  9222. * @mbox: mailbox to use for the FW command
  9223. * @cid: the context id
  9224. * @ctype: the context type
  9225. * @data: where to store the context data
  9226. *
  9227. * Issues a FW command through the given mailbox to read an SGE context.
  9228. */
  9229. int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
  9230. enum ctxt_type ctype, u32 *data)
  9231. {
  9232. struct fw_ldst_cmd c;
  9233. int ret;
  9234. if (ctype == CTXT_FLM)
  9235. ret = FW_LDST_ADDRSPC_SGE_FLMC;
  9236. else
  9237. ret = FW_LDST_ADDRSPC_SGE_CONMC;
  9238. memset(&c, 0, sizeof(c));
  9239. c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
  9240. FW_CMD_REQUEST_F | FW_CMD_READ_F |
  9241. FW_LDST_CMD_ADDRSPACE_V(ret));
  9242. c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
  9243. c.u.idctxt.physid = cpu_to_be32(cid);
  9244. ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
  9245. if (ret == 0) {
  9246. data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
  9247. data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
  9248. data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
  9249. data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
  9250. data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
  9251. data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
  9252. }
  9253. return ret;
  9254. }
  9255. /**
  9256. * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
  9257. * @adap: the adapter
  9258. * @cid: the context id
  9259. * @ctype: the context type
  9260. * @data: where to store the context data
  9261. *
  9262. * Reads an SGE context directly, bypassing FW. This is only for
  9263. * debugging when FW is unavailable.
  9264. */
  9265. int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
  9266. enum ctxt_type ctype, u32 *data)
  9267. {
  9268. int i, ret;
  9269. t4_write_reg(adap, SGE_CTXT_CMD_A, CTXTQID_V(cid) | CTXTTYPE_V(ctype));
  9270. ret = t4_wait_op_done(adap, SGE_CTXT_CMD_A, BUSY_F, 0, 3, 1);
  9271. if (!ret)
  9272. for (i = SGE_CTXT_DATA0_A; i <= SGE_CTXT_DATA5_A; i += 4)
  9273. *data++ = t4_read_reg(adap, i);
  9274. return ret;
  9275. }
  9276. int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
  9277. int rateunit, int ratemode, int channel, int class,
  9278. int minrate, int maxrate, int weight, int pktsize)
  9279. {
  9280. struct fw_sched_cmd cmd;
  9281. memset(&cmd, 0, sizeof(cmd));
  9282. cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_SCHED_CMD) |
  9283. FW_CMD_REQUEST_F |
  9284. FW_CMD_WRITE_F);
  9285. cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
  9286. cmd.u.params.sc = FW_SCHED_SC_PARAMS;
  9287. cmd.u.params.type = type;
  9288. cmd.u.params.level = level;
  9289. cmd.u.params.mode = mode;
  9290. cmd.u.params.ch = channel;
  9291. cmd.u.params.cl = class;
  9292. cmd.u.params.unit = rateunit;
  9293. cmd.u.params.rate = ratemode;
  9294. cmd.u.params.min = cpu_to_be32(minrate);
  9295. cmd.u.params.max = cpu_to_be32(maxrate);
  9296. cmd.u.params.weight = cpu_to_be16(weight);
  9297. cmd.u.params.pktsize = cpu_to_be16(pktsize);
  9298. return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
  9299. NULL, 1);
  9300. }
  9301. /**
  9302. * t4_i2c_rd - read I2C data from adapter
  9303. * @adap: the adapter
  9304. * @port: Port number if per-port device; <0 if not
  9305. * @devid: per-port device ID or absolute device ID
  9306. * @offset: byte offset into device I2C space
  9307. * @len: byte length of I2C space data
  9308. * @buf: buffer in which to return I2C data
  9309. *
  9310. * Reads the I2C data from the indicated device and location.
  9311. */
  9312. int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
  9313. unsigned int devid, unsigned int offset,
  9314. unsigned int len, u8 *buf)
  9315. {
  9316. struct fw_ldst_cmd ldst_cmd, ldst_rpl;
  9317. unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data);
  9318. int ret = 0;
  9319. if (len > I2C_PAGE_SIZE)
  9320. return -EINVAL;
  9321. /* Dont allow reads that spans multiple pages */
  9322. if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE)
  9323. return -EINVAL;
  9324. memset(&ldst_cmd, 0, sizeof(ldst_cmd));
  9325. ldst_cmd.op_to_addrspace =
  9326. cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
  9327. FW_CMD_REQUEST_F |
  9328. FW_CMD_READ_F |
  9329. FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_I2C));
  9330. ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
  9331. ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port);
  9332. ldst_cmd.u.i2c.did = devid;
  9333. while (len > 0) {
  9334. unsigned int i2c_len = (len < i2c_max) ? len : i2c_max;
  9335. ldst_cmd.u.i2c.boffset = offset;
  9336. ldst_cmd.u.i2c.blen = i2c_len;
  9337. ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd),
  9338. &ldst_rpl);
  9339. if (ret)
  9340. break;
  9341. memcpy(buf, ldst_rpl.u.i2c.data, i2c_len);
  9342. offset += i2c_len;
  9343. buf += i2c_len;
  9344. len -= i2c_len;
  9345. }
  9346. return ret;
  9347. }
  9348. /**
  9349. * t4_set_vlan_acl - Set a VLAN id for the specified VF
  9350. * @adapter: the adapter
  9351. * @mbox: mailbox to use for the FW command
  9352. * @vf: one of the VFs instantiated by the specified PF
  9353. * @vlan: The vlanid to be set
  9354. */
  9355. int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
  9356. u16 vlan)
  9357. {
  9358. struct fw_acl_vlan_cmd vlan_cmd;
  9359. unsigned int enable;
  9360. enable = (vlan ? FW_ACL_VLAN_CMD_EN_F : 0);
  9361. memset(&vlan_cmd, 0, sizeof(vlan_cmd));
  9362. vlan_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_VLAN_CMD) |
  9363. FW_CMD_REQUEST_F |
  9364. FW_CMD_WRITE_F |
  9365. FW_CMD_EXEC_F |
  9366. FW_ACL_VLAN_CMD_PFN_V(adap->pf) |
  9367. FW_ACL_VLAN_CMD_VFN_V(vf));
  9368. vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd));
  9369. /* Drop all packets that donot match vlan id */
  9370. vlan_cmd.dropnovlan_fm = FW_ACL_VLAN_CMD_FM_F;
  9371. if (enable != 0) {
  9372. vlan_cmd.nvlan = 1;
  9373. vlan_cmd.vlanid[0] = cpu_to_be16(vlan);
  9374. }
  9375. return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL);
  9376. }