i40iw_ctrl.c 153 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198
  1. /*******************************************************************************
  2. *
  3. * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenFabrics.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. *
  33. *******************************************************************************/
  34. #include "i40iw_osdep.h"
  35. #include "i40iw_register.h"
  36. #include "i40iw_status.h"
  37. #include "i40iw_hmc.h"
  38. #include "i40iw_d.h"
  39. #include "i40iw_type.h"
  40. #include "i40iw_p.h"
  41. #include "i40iw_vf.h"
  42. #include "i40iw_virtchnl.h"
  43. /**
  44. * i40iw_insert_wqe_hdr - write wqe header
  45. * @wqe: cqp wqe for header
  46. * @header: header for the cqp wqe
  47. */
  48. void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
  49. {
  50. wmb(); /* make sure WQE is populated before polarity is set */
  51. set_64bit_val(wqe, 24, header);
  52. }
  53. void i40iw_check_cqp_progress(struct i40iw_cqp_timeout *cqp_timeout, struct i40iw_sc_dev *dev)
  54. {
  55. if (cqp_timeout->compl_cqp_cmds != dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]) {
  56. cqp_timeout->compl_cqp_cmds = dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS];
  57. cqp_timeout->count = 0;
  58. } else {
  59. if (dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] != cqp_timeout->compl_cqp_cmds)
  60. cqp_timeout->count++;
  61. }
  62. }
  63. /**
  64. * i40iw_get_cqp_reg_info - get head and tail for cqp using registers
  65. * @cqp: struct for cqp hw
  66. * @val: cqp tail register value
  67. * @tail:wqtail register value
  68. * @error: cqp processing err
  69. */
  70. static inline void i40iw_get_cqp_reg_info(struct i40iw_sc_cqp *cqp,
  71. u32 *val,
  72. u32 *tail,
  73. u32 *error)
  74. {
  75. if (cqp->dev->is_pf) {
  76. *val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPTAIL);
  77. *tail = RS_32(*val, I40E_PFPE_CQPTAIL_WQTAIL);
  78. *error = RS_32(*val, I40E_PFPE_CQPTAIL_CQP_OP_ERR);
  79. } else {
  80. *val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPTAIL1);
  81. *tail = RS_32(*val, I40E_VFPE_CQPTAIL_WQTAIL);
  82. *error = RS_32(*val, I40E_VFPE_CQPTAIL_CQP_OP_ERR);
  83. }
  84. }
  85. /**
  86. * i40iw_cqp_poll_registers - poll cqp registers
  87. * @cqp: struct for cqp hw
  88. * @tail:wqtail register value
  89. * @count: how many times to try for completion
  90. */
  91. static enum i40iw_status_code i40iw_cqp_poll_registers(
  92. struct i40iw_sc_cqp *cqp,
  93. u32 tail,
  94. u32 count)
  95. {
  96. u32 i = 0;
  97. u32 newtail, error, val;
  98. while (i < count) {
  99. i++;
  100. i40iw_get_cqp_reg_info(cqp, &val, &newtail, &error);
  101. if (error) {
  102. error = (cqp->dev->is_pf) ?
  103. i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES) :
  104. i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);
  105. return I40IW_ERR_CQP_COMPL_ERROR;
  106. }
  107. if (newtail != tail) {
  108. /* SUCCESS */
  109. I40IW_RING_MOVE_TAIL(cqp->sq_ring);
  110. cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;
  111. return 0;
  112. }
  113. udelay(I40IW_SLEEP_COUNT);
  114. }
  115. return I40IW_ERR_TIMEOUT;
  116. }
  117. /**
  118. * i40iw_sc_parse_fpm_commit_buf - parse fpm commit buffer
  119. * @buf: ptr to fpm commit buffer
  120. * @info: ptr to i40iw_hmc_obj_info struct
  121. * @sd: number of SDs for HMC objects
  122. *
  123. * parses fpm commit info and copy base value
  124. * of hmc objects in hmc_info
  125. */
  126. static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf(
  127. u64 *buf,
  128. struct i40iw_hmc_obj_info *info,
  129. u32 *sd)
  130. {
  131. u64 temp;
  132. u64 size;
  133. u64 base = 0;
  134. u32 i, j;
  135. u32 k = 0;
  136. /* copy base values in obj_info */
  137. for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE; i++, j += 8) {
  138. if ((i == I40IW_HMC_IW_SRQ) ||
  139. (i == I40IW_HMC_IW_FSIMC) ||
  140. (i == I40IW_HMC_IW_FSIAV)) {
  141. info[i].base = 0;
  142. info[i].cnt = 0;
  143. continue;
  144. }
  145. get_64bit_val(buf, j, &temp);
  146. info[i].base = RS_64_1(temp, 32) * 512;
  147. if (info[i].base > base) {
  148. base = info[i].base;
  149. k = i;
  150. }
  151. if (i == I40IW_HMC_IW_APBVT_ENTRY) {
  152. info[i].cnt = 1;
  153. continue;
  154. }
  155. if (i == I40IW_HMC_IW_QP)
  156. info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
  157. else if (i == I40IW_HMC_IW_CQ)
  158. info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
  159. else
  160. info[i].cnt = (u32)(temp);
  161. }
  162. size = info[k].cnt * info[k].size + info[k].base;
  163. if (size & 0x1FFFFF)
  164. *sd = (u32)((size >> 21) + 1); /* add 1 for remainder */
  165. else
  166. *sd = (u32)(size >> 21);
  167. return 0;
  168. }
  169. /**
  170. * i40iw_sc_decode_fpm_query() - Decode a 64 bit value into max count and size
  171. * @buf: ptr to fpm query buffer
  172. * @buf_idx: index into buf
  173. * @info: ptr to i40iw_hmc_obj_info struct
  174. * @rsrc_idx: resource index into info
  175. *
  176. * Decode a 64 bit value from fpm query buffer into max count and size
  177. */
  178. static u64 i40iw_sc_decode_fpm_query(u64 *buf,
  179. u32 buf_idx,
  180. struct i40iw_hmc_obj_info *obj_info,
  181. u32 rsrc_idx)
  182. {
  183. u64 temp;
  184. u32 size;
  185. get_64bit_val(buf, buf_idx, &temp);
  186. obj_info[rsrc_idx].max_cnt = (u32)temp;
  187. size = (u32)RS_64_1(temp, 32);
  188. obj_info[rsrc_idx].size = LS_64_1(1, size);
  189. return temp;
  190. }
  191. /**
  192. * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer
  193. * @buf: ptr to fpm query buffer
  194. * @info: ptr to i40iw_hmc_obj_info struct
  195. * @hmc_fpm_misc: ptr to fpm data
  196. *
  197. * parses fpm query buffer and copy max_cnt and
  198. * size value of hmc objects in hmc_info
  199. */
  200. static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf(
  201. u64 *buf,
  202. struct i40iw_hmc_info *hmc_info,
  203. struct i40iw_hmc_fpm_misc *hmc_fpm_misc)
  204. {
  205. struct i40iw_hmc_obj_info *obj_info;
  206. u64 temp;
  207. u32 size;
  208. u16 max_pe_sds;
  209. obj_info = hmc_info->hmc_obj;
  210. get_64bit_val(buf, 0, &temp);
  211. hmc_info->first_sd_index = (u16)RS_64(temp, I40IW_QUERY_FPM_FIRST_PE_SD_INDEX);
  212. max_pe_sds = (u16)RS_64(temp, I40IW_QUERY_FPM_MAX_PE_SDS);
  213. /* Reduce SD count for VFs by 1 to account for PBLE backing page rounding */
  214. if (hmc_info->hmc_fn_id >= I40IW_FIRST_VF_FPM_ID)
  215. max_pe_sds--;
  216. hmc_fpm_misc->max_sds = max_pe_sds;
  217. hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index;
  218. get_64bit_val(buf, 8, &temp);
  219. obj_info[I40IW_HMC_IW_QP].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS);
  220. size = (u32)RS_64_1(temp, 32);
  221. obj_info[I40IW_HMC_IW_QP].size = LS_64_1(1, size);
  222. get_64bit_val(buf, 16, &temp);
  223. obj_info[I40IW_HMC_IW_CQ].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS);
  224. size = (u32)RS_64_1(temp, 32);
  225. obj_info[I40IW_HMC_IW_CQ].size = LS_64_1(1, size);
  226. i40iw_sc_decode_fpm_query(buf, 32, obj_info, I40IW_HMC_IW_HTE);
  227. i40iw_sc_decode_fpm_query(buf, 40, obj_info, I40IW_HMC_IW_ARP);
  228. obj_info[I40IW_HMC_IW_APBVT_ENTRY].size = 8192;
  229. obj_info[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1;
  230. i40iw_sc_decode_fpm_query(buf, 48, obj_info, I40IW_HMC_IW_MR);
  231. i40iw_sc_decode_fpm_query(buf, 56, obj_info, I40IW_HMC_IW_XF);
  232. get_64bit_val(buf, 64, &temp);
  233. obj_info[I40IW_HMC_IW_XFFL].max_cnt = (u32)temp;
  234. obj_info[I40IW_HMC_IW_XFFL].size = 4;
  235. hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE);
  236. if (!hmc_fpm_misc->xf_block_size)
  237. return I40IW_ERR_INVALID_SIZE;
  238. i40iw_sc_decode_fpm_query(buf, 72, obj_info, I40IW_HMC_IW_Q1);
  239. get_64bit_val(buf, 80, &temp);
  240. obj_info[I40IW_HMC_IW_Q1FL].max_cnt = (u32)temp;
  241. obj_info[I40IW_HMC_IW_Q1FL].size = 4;
  242. hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE);
  243. if (!hmc_fpm_misc->q1_block_size)
  244. return I40IW_ERR_INVALID_SIZE;
  245. i40iw_sc_decode_fpm_query(buf, 88, obj_info, I40IW_HMC_IW_TIMER);
  246. get_64bit_val(buf, 112, &temp);
  247. obj_info[I40IW_HMC_IW_PBLE].max_cnt = (u32)temp;
  248. obj_info[I40IW_HMC_IW_PBLE].size = 8;
  249. get_64bit_val(buf, 120, &temp);
  250. hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS);
  251. hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER);
  252. hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET);
  253. return 0;
  254. }
  255. /**
  256. * i40iw_fill_qos_list - Change all unknown qs handles to available ones
  257. * @qs_list: list of qs_handles to be fixed with valid qs_handles
  258. */
  259. static void i40iw_fill_qos_list(u16 *qs_list)
  260. {
  261. u16 qshandle = qs_list[0];
  262. int i;
  263. for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
  264. if (qs_list[i] == QS_HANDLE_UNKNOWN)
  265. qs_list[i] = qshandle;
  266. else
  267. qshandle = qs_list[i];
  268. }
  269. }
  270. /**
  271. * i40iw_qp_from_entry - Given entry, get to the qp structure
  272. * @entry: Points to list of qp structure
  273. */
  274. static struct i40iw_sc_qp *i40iw_qp_from_entry(struct list_head *entry)
  275. {
  276. if (!entry)
  277. return NULL;
  278. return (struct i40iw_sc_qp *)((char *)entry - offsetof(struct i40iw_sc_qp, list));
  279. }
  280. /**
  281. * i40iw_get_qp - get the next qp from the list given current qp
  282. * @head: Listhead of qp's
  283. * @qp: current qp
  284. */
  285. static struct i40iw_sc_qp *i40iw_get_qp(struct list_head *head, struct i40iw_sc_qp *qp)
  286. {
  287. struct list_head *entry = NULL;
  288. struct list_head *lastentry;
  289. if (list_empty(head))
  290. return NULL;
  291. if (!qp) {
  292. entry = head->next;
  293. } else {
  294. lastentry = &qp->list;
  295. entry = (lastentry != head) ? lastentry->next : NULL;
  296. }
  297. return i40iw_qp_from_entry(entry);
  298. }
  299. /**
  300. * i40iw_change_l2params - given the new l2 parameters, change all qp
  301. * @vsi: pointer to the vsi structure
  302. * @l2params: New paramaters from l2
  303. */
  304. void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params)
  305. {
  306. struct i40iw_sc_dev *dev = vsi->dev;
  307. struct i40iw_sc_qp *qp = NULL;
  308. bool qs_handle_change = false;
  309. unsigned long flags;
  310. u16 qs_handle;
  311. int i;
  312. if (vsi->mtu != l2params->mtu) {
  313. vsi->mtu = l2params->mtu;
  314. i40iw_reinitialize_ieq(dev);
  315. }
  316. i40iw_fill_qos_list(l2params->qs_handle_list);
  317. for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
  318. qs_handle = l2params->qs_handle_list[i];
  319. if (vsi->qos[i].qs_handle != qs_handle)
  320. qs_handle_change = true;
  321. spin_lock_irqsave(&vsi->qos[i].lock, flags);
  322. qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
  323. while (qp) {
  324. if (qs_handle_change) {
  325. qp->qs_handle = qs_handle;
  326. /* issue cqp suspend command */
  327. i40iw_qp_suspend_resume(dev, qp, true);
  328. }
  329. qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
  330. }
  331. spin_unlock_irqrestore(&vsi->qos[i].lock, flags);
  332. vsi->qos[i].qs_handle = qs_handle;
  333. }
  334. }
  335. /**
  336. * i40iw_qp_rem_qos - remove qp from qos lists during destroy qp
  337. * @qp: qp to be removed from qos
  338. */
  339. void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp)
  340. {
  341. struct i40iw_sc_vsi *vsi = qp->vsi;
  342. unsigned long flags;
  343. if (!qp->on_qoslist)
  344. return;
  345. spin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags);
  346. list_del(&qp->list);
  347. spin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags);
  348. }
  349. /**
  350. * i40iw_qp_add_qos - called during setctx fot qp to be added to qos
  351. * @qp: qp to be added to qos
  352. */
  353. void i40iw_qp_add_qos(struct i40iw_sc_qp *qp)
  354. {
  355. struct i40iw_sc_vsi *vsi = qp->vsi;
  356. unsigned long flags;
  357. if (qp->on_qoslist)
  358. return;
  359. spin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags);
  360. qp->qs_handle = vsi->qos[qp->user_pri].qs_handle;
  361. list_add(&qp->list, &vsi->qos[qp->user_pri].qplist);
  362. qp->on_qoslist = true;
  363. spin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags);
  364. }
  365. /**
  366. * i40iw_sc_pd_init - initialize sc pd struct
  367. * @dev: sc device struct
  368. * @pd: sc pd ptr
  369. * @pd_id: pd_id for allocated pd
  370. * @abi_ver: ABI version from user context, -1 if not valid
  371. */
  372. static void i40iw_sc_pd_init(struct i40iw_sc_dev *dev,
  373. struct i40iw_sc_pd *pd,
  374. u16 pd_id,
  375. int abi_ver)
  376. {
  377. pd->size = sizeof(*pd);
  378. pd->pd_id = pd_id;
  379. pd->abi_ver = abi_ver;
  380. pd->dev = dev;
  381. }
  382. /**
  383. * i40iw_get_encoded_wqe_size - given wq size, returns hardware encoded size
  384. * @wqsize: size of the wq (sq, rq, srq) to encoded_size
  385. * @cqpsq: encoded size for sq for cqp as its encoded size is 1+ other wq's
  386. */
  387. u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq)
  388. {
  389. u8 encoded_size = 0;
  390. /* cqp sq's hw coded value starts from 1 for size of 4
  391. * while it starts from 0 for qp' wq's.
  392. */
  393. if (cqpsq)
  394. encoded_size = 1;
  395. wqsize >>= 2;
  396. while (wqsize >>= 1)
  397. encoded_size++;
  398. return encoded_size;
  399. }
  400. /**
  401. * i40iw_sc_cqp_init - Initialize buffers for a control Queue Pair
  402. * @cqp: IWARP control queue pair pointer
  403. * @info: IWARP control queue pair init info pointer
  404. *
  405. * Initializes the object and context buffers for a control Queue Pair.
  406. */
  407. static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp,
  408. struct i40iw_cqp_init_info *info)
  409. {
  410. u8 hw_sq_size;
  411. if ((info->sq_size > I40IW_CQP_SW_SQSIZE_2048) ||
  412. (info->sq_size < I40IW_CQP_SW_SQSIZE_4) ||
  413. ((info->sq_size & (info->sq_size - 1))))
  414. return I40IW_ERR_INVALID_SIZE;
  415. hw_sq_size = i40iw_get_encoded_wqe_size(info->sq_size, true);
  416. cqp->size = sizeof(*cqp);
  417. cqp->sq_size = info->sq_size;
  418. cqp->hw_sq_size = hw_sq_size;
  419. cqp->sq_base = info->sq;
  420. cqp->host_ctx = info->host_ctx;
  421. cqp->sq_pa = info->sq_pa;
  422. cqp->host_ctx_pa = info->host_ctx_pa;
  423. cqp->dev = info->dev;
  424. cqp->struct_ver = info->struct_ver;
  425. cqp->scratch_array = info->scratch_array;
  426. cqp->polarity = 0;
  427. cqp->en_datacenter_tcp = info->en_datacenter_tcp;
  428. cqp->enabled_vf_count = info->enabled_vf_count;
  429. cqp->hmc_profile = info->hmc_profile;
  430. info->dev->cqp = cqp;
  431. I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size);
  432. cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] = 0;
  433. cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS] = 0;
  434. INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head); /* for the cqp commands backlog. */
  435. i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPTAIL, 0);
  436. i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, 0);
  437. i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
  438. "%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n",
  439. __func__, cqp->sq_size, cqp->hw_sq_size,
  440. cqp->sq_base, cqp->sq_pa, cqp, cqp->polarity);
  441. return 0;
  442. }
  443. /**
  444. * i40iw_sc_cqp_create - create cqp during bringup
  445. * @cqp: struct for cqp hw
  446. * @maj_err: If error, major err number
  447. * @min_err: If error, minor err number
  448. */
  449. static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp,
  450. u16 *maj_err,
  451. u16 *min_err)
  452. {
  453. u64 temp;
  454. u32 cnt = 0, p1, p2, val = 0, err_code;
  455. enum i40iw_status_code ret_code;
  456. *maj_err = 0;
  457. *min_err = 0;
  458. ret_code = i40iw_allocate_dma_mem(cqp->dev->hw,
  459. &cqp->sdbuf,
  460. I40IW_UPDATE_SD_BUF_SIZE * cqp->sq_size,
  461. I40IW_SD_BUF_ALIGNMENT);
  462. if (ret_code)
  463. goto exit;
  464. temp = LS_64(cqp->hw_sq_size, I40IW_CQPHC_SQSIZE) |
  465. LS_64(cqp->struct_ver, I40IW_CQPHC_SVER);
  466. set_64bit_val(cqp->host_ctx, 0, temp);
  467. set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa);
  468. temp = LS_64(cqp->enabled_vf_count, I40IW_CQPHC_ENABLED_VFS) |
  469. LS_64(cqp->hmc_profile, I40IW_CQPHC_HMC_PROFILE);
  470. set_64bit_val(cqp->host_ctx, 16, temp);
  471. set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp);
  472. set_64bit_val(cqp->host_ctx, 32, 0);
  473. set_64bit_val(cqp->host_ctx, 40, 0);
  474. set_64bit_val(cqp->host_ctx, 48, 0);
  475. set_64bit_val(cqp->host_ctx, 56, 0);
  476. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQP_HOST_CTX",
  477. cqp->host_ctx, I40IW_CQP_CTX_SIZE * 8);
  478. p1 = RS_32_1(cqp->host_ctx_pa, 32);
  479. p2 = (u32)cqp->host_ctx_pa;
  480. if (cqp->dev->is_pf) {
  481. i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, p1);
  482. i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, p2);
  483. } else {
  484. i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, p1);
  485. i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, p2);
  486. }
  487. do {
  488. if (cnt++ > I40IW_DONE_COUNT) {
  489. i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
  490. ret_code = I40IW_ERR_TIMEOUT;
  491. /*
  492. * read PFPE_CQPERRORCODES register to get the minor
  493. * and major error code
  494. */
  495. if (cqp->dev->is_pf)
  496. err_code = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES);
  497. else
  498. err_code = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1);
  499. *min_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE);
  500. *maj_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE);
  501. goto exit;
  502. }
  503. udelay(I40IW_SLEEP_COUNT);
  504. if (cqp->dev->is_pf)
  505. val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CCQPSTATUS);
  506. else
  507. val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CCQPSTATUS1);
  508. } while (!val);
  509. exit:
  510. if (!ret_code)
  511. cqp->process_cqp_sds = i40iw_update_sds_noccq;
  512. return ret_code;
  513. }
  514. /**
  515. * i40iw_sc_cqp_post_sq - post of cqp's sq
  516. * @cqp: struct for cqp hw
  517. */
  518. void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp)
  519. {
  520. if (cqp->dev->is_pf)
  521. i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));
  522. else
  523. i40iw_wr32(cqp->dev->hw, I40E_VFPE_CQPDB1, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring));
  524. i40iw_debug(cqp->dev,
  525. I40IW_DEBUG_WQE,
  526. "%s: HEAD_TAIL[%04d,%04d,%04d]\n",
  527. __func__,
  528. cqp->sq_ring.head,
  529. cqp->sq_ring.tail,
  530. cqp->sq_ring.size);
  531. }
  532. /**
  533. * i40iw_sc_cqp_get_next_send_wqe_idx - get next WQE on CQP SQ and pass back the index
  534. * @cqp: pointer to CQP structure
  535. * @scratch: private data for CQP WQE
  536. * @wqe_idx: WQE index for next WQE on CQP SQ
  537. */
  538. static u64 *i40iw_sc_cqp_get_next_send_wqe_idx(struct i40iw_sc_cqp *cqp,
  539. u64 scratch, u32 *wqe_idx)
  540. {
  541. u64 *wqe = NULL;
  542. enum i40iw_status_code ret_code;
  543. if (I40IW_RING_FULL_ERR(cqp->sq_ring)) {
  544. i40iw_debug(cqp->dev,
  545. I40IW_DEBUG_WQE,
  546. "%s: ring is full head %x tail %x size %x\n",
  547. __func__,
  548. cqp->sq_ring.head,
  549. cqp->sq_ring.tail,
  550. cqp->sq_ring.size);
  551. return NULL;
  552. }
  553. I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code);
  554. cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++;
  555. if (ret_code)
  556. return NULL;
  557. if (!*wqe_idx)
  558. cqp->polarity = !cqp->polarity;
  559. wqe = cqp->sq_base[*wqe_idx].elem;
  560. cqp->scratch_array[*wqe_idx] = scratch;
  561. I40IW_CQP_INIT_WQE(wqe);
  562. return wqe;
  563. }
  564. /**
  565. * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
  566. * @cqp: struct for cqp hw
  567. * @scratch: private data for CQP WQE
  568. */
  569. u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch)
  570. {
  571. u32 wqe_idx;
  572. return i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
  573. }
  574. /**
  575. * i40iw_sc_cqp_destroy - destroy cqp during close
  576. * @cqp: struct for cqp hw
  577. */
  578. static enum i40iw_status_code i40iw_sc_cqp_destroy(struct i40iw_sc_cqp *cqp)
  579. {
  580. u32 cnt = 0, val = 1;
  581. enum i40iw_status_code ret_code = 0;
  582. u32 cqpstat_addr;
  583. if (cqp->dev->is_pf) {
  584. i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, 0);
  585. i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, 0);
  586. cqpstat_addr = I40E_PFPE_CCQPSTATUS;
  587. } else {
  588. i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, 0);
  589. i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, 0);
  590. cqpstat_addr = I40E_VFPE_CCQPSTATUS1;
  591. }
  592. do {
  593. if (cnt++ > I40IW_DONE_COUNT) {
  594. ret_code = I40IW_ERR_TIMEOUT;
  595. break;
  596. }
  597. udelay(I40IW_SLEEP_COUNT);
  598. val = i40iw_rd32(cqp->dev->hw, cqpstat_addr);
  599. } while (val);
  600. i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf);
  601. return ret_code;
  602. }
  603. /**
  604. * i40iw_sc_ccq_arm - enable intr for control cq
  605. * @ccq: ccq sc struct
  606. */
  607. static void i40iw_sc_ccq_arm(struct i40iw_sc_cq *ccq)
  608. {
  609. u64 temp_val;
  610. u16 sw_cq_sel;
  611. u8 arm_next_se;
  612. u8 arm_seq_num;
  613. /* write to cq doorbell shadow area */
  614. /* arm next se should always be zero */
  615. get_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val);
  616. sw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT);
  617. arm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE);
  618. arm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM);
  619. arm_seq_num++;
  620. temp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) |
  621. LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) |
  622. LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) |
  623. LS_64(1, I40IW_CQ_DBSA_ARM_NEXT);
  624. set_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val);
  625. wmb(); /* make sure shadow area is updated before arming */
  626. if (ccq->dev->is_pf)
  627. i40iw_wr32(ccq->dev->hw, I40E_PFPE_CQARM, ccq->cq_uk.cq_id);
  628. else
  629. i40iw_wr32(ccq->dev->hw, I40E_VFPE_CQARM1, ccq->cq_uk.cq_id);
  630. }
  631. /**
  632. * i40iw_sc_ccq_get_cqe_info - get ccq's cq entry
  633. * @ccq: ccq sc struct
  634. * @info: completion q entry to return
  635. */
  636. static enum i40iw_status_code i40iw_sc_ccq_get_cqe_info(
  637. struct i40iw_sc_cq *ccq,
  638. struct i40iw_ccq_cqe_info *info)
  639. {
  640. u64 qp_ctx, temp, temp1;
  641. u64 *cqe;
  642. struct i40iw_sc_cqp *cqp;
  643. u32 wqe_idx;
  644. u8 polarity;
  645. enum i40iw_status_code ret_code = 0;
  646. if (ccq->cq_uk.avoid_mem_cflct)
  647. cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(&ccq->cq_uk);
  648. else
  649. cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&ccq->cq_uk);
  650. get_64bit_val(cqe, 24, &temp);
  651. polarity = (u8)RS_64(temp, I40IW_CQ_VALID);
  652. if (polarity != ccq->cq_uk.polarity)
  653. return I40IW_ERR_QUEUE_EMPTY;
  654. get_64bit_val(cqe, 8, &qp_ctx);
  655. cqp = (struct i40iw_sc_cqp *)(unsigned long)qp_ctx;
  656. info->error = (bool)RS_64(temp, I40IW_CQ_ERROR);
  657. info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR);
  658. if (info->error) {
  659. info->maj_err_code = (u16)RS_64(temp, I40IW_CQ_MAJERR);
  660. info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR);
  661. }
  662. wqe_idx = (u32)RS_64(temp, I40IW_CQ_WQEIDX);
  663. info->scratch = cqp->scratch_array[wqe_idx];
  664. get_64bit_val(cqe, 16, &temp1);
  665. info->op_ret_val = (u32)RS_64(temp1, I40IW_CCQ_OPRETVAL);
  666. get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1);
  667. info->op_code = (u8)RS_64(temp1, I40IW_CQPSQ_OPCODE);
  668. info->cqp = cqp;
  669. /* move the head for cq */
  670. I40IW_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code);
  671. if (I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring) == 0)
  672. ccq->cq_uk.polarity ^= 1;
  673. /* update cq tail in cq shadow memory also */
  674. I40IW_RING_MOVE_TAIL(ccq->cq_uk.cq_ring);
  675. set_64bit_val(ccq->cq_uk.shadow_area,
  676. 0,
  677. I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring));
  678. wmb(); /* write shadow area before tail */
  679. I40IW_RING_MOVE_TAIL(cqp->sq_ring);
  680. ccq->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++;
  681. return ret_code;
  682. }
  683. /**
  684. * i40iw_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ
  685. * @cqp: struct for cqp hw
  686. * @op_code: cqp opcode for completion
  687. * @info: completion q entry to return
  688. */
  689. static enum i40iw_status_code i40iw_sc_poll_for_cqp_op_done(
  690. struct i40iw_sc_cqp *cqp,
  691. u8 op_code,
  692. struct i40iw_ccq_cqe_info *compl_info)
  693. {
  694. struct i40iw_ccq_cqe_info info;
  695. struct i40iw_sc_cq *ccq;
  696. enum i40iw_status_code ret_code = 0;
  697. u32 cnt = 0;
  698. memset(&info, 0, sizeof(info));
  699. ccq = cqp->dev->ccq;
  700. while (1) {
  701. if (cnt++ > I40IW_DONE_COUNT)
  702. return I40IW_ERR_TIMEOUT;
  703. if (i40iw_sc_ccq_get_cqe_info(ccq, &info)) {
  704. udelay(I40IW_SLEEP_COUNT);
  705. continue;
  706. }
  707. if (info.error) {
  708. ret_code = I40IW_ERR_CQP_COMPL_ERROR;
  709. break;
  710. }
  711. /* check if opcode is cq create */
  712. if (op_code != info.op_code) {
  713. i40iw_debug(cqp->dev, I40IW_DEBUG_WQE,
  714. "%s: opcode mismatch for my op code 0x%x, returned opcode %x\n",
  715. __func__, op_code, info.op_code);
  716. }
  717. /* success, exit out of the loop */
  718. if (op_code == info.op_code)
  719. break;
  720. }
  721. if (compl_info)
  722. memcpy(compl_info, &info, sizeof(*compl_info));
  723. return ret_code;
  724. }
  725. /**
  726. * i40iw_sc_manage_push_page - Handle push page
  727. * @cqp: struct for cqp hw
  728. * @info: push page info
  729. * @scratch: u64 saved to be used during cqp completion
  730. * @post_sq: flag for cqp db to ring
  731. */
  732. static enum i40iw_status_code i40iw_sc_manage_push_page(
  733. struct i40iw_sc_cqp *cqp,
  734. struct i40iw_cqp_manage_push_page_info *info,
  735. u64 scratch,
  736. bool post_sq)
  737. {
  738. u64 *wqe;
  739. u64 header;
  740. if (info->push_idx >= I40IW_MAX_PUSH_PAGE_COUNT)
  741. return I40IW_ERR_INVALID_PUSH_PAGE_INDEX;
  742. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  743. if (!wqe)
  744. return I40IW_ERR_RING_FULL;
  745. set_64bit_val(wqe, 16, info->qs_handle);
  746. header = LS_64(info->push_idx, I40IW_CQPSQ_MPP_PPIDX) |
  747. LS_64(I40IW_CQP_OP_MANAGE_PUSH_PAGES, I40IW_CQPSQ_OPCODE) |
  748. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
  749. LS_64(info->free_page, I40IW_CQPSQ_MPP_FREE_PAGE);
  750. i40iw_insert_wqe_hdr(wqe, header);
  751. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_PUSH_PAGES WQE",
  752. wqe, I40IW_CQP_WQE_SIZE * 8);
  753. if (post_sq)
  754. i40iw_sc_cqp_post_sq(cqp);
  755. return 0;
  756. }
  757. /**
  758. * i40iw_sc_manage_hmc_pm_func_table - manage of function table
  759. * @cqp: struct for cqp hw
  760. * @scratch: u64 saved to be used during cqp completion
  761. * @vf_index: vf index for cqp
  762. * @free_pm_fcn: function number
  763. * @post_sq: flag for cqp db to ring
  764. */
  765. static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table(
  766. struct i40iw_sc_cqp *cqp,
  767. u64 scratch,
  768. u8 vf_index,
  769. bool free_pm_fcn,
  770. bool post_sq)
  771. {
  772. u64 *wqe;
  773. u64 header;
  774. if (vf_index >= I40IW_MAX_VF_PER_PF)
  775. return I40IW_ERR_INVALID_VF_ID;
  776. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  777. if (!wqe)
  778. return I40IW_ERR_RING_FULL;
  779. header = LS_64(vf_index, I40IW_CQPSQ_MHMC_VFIDX) |
  780. LS_64(I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, I40IW_CQPSQ_OPCODE) |
  781. LS_64(free_pm_fcn, I40IW_CQPSQ_MHMC_FREEPMFN) |
  782. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  783. i40iw_insert_wqe_hdr(wqe, header);
  784. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE",
  785. wqe, I40IW_CQP_WQE_SIZE * 8);
  786. if (post_sq)
  787. i40iw_sc_cqp_post_sq(cqp);
  788. return 0;
  789. }
  790. /**
  791. * i40iw_sc_set_hmc_resource_profile - cqp wqe for hmc profile
  792. * @cqp: struct for cqp hw
  793. * @scratch: u64 saved to be used during cqp completion
  794. * @hmc_profile_type: type of profile to set
  795. * @vf_num: vf number for profile
  796. * @post_sq: flag for cqp db to ring
  797. * @poll_registers: flag to poll register for cqp completion
  798. */
  799. static enum i40iw_status_code i40iw_sc_set_hmc_resource_profile(
  800. struct i40iw_sc_cqp *cqp,
  801. u64 scratch,
  802. u8 hmc_profile_type,
  803. u8 vf_num, bool post_sq,
  804. bool poll_registers)
  805. {
  806. u64 *wqe;
  807. u64 header;
  808. u32 val, tail, error;
  809. enum i40iw_status_code ret_code = 0;
  810. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  811. if (!wqe)
  812. return I40IW_ERR_RING_FULL;
  813. set_64bit_val(wqe, 16,
  814. (LS_64(hmc_profile_type, I40IW_CQPSQ_SHMCRP_HMC_PROFILE) |
  815. LS_64(vf_num, I40IW_CQPSQ_SHMCRP_VFNUM)));
  816. header = LS_64(I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE, I40IW_CQPSQ_OPCODE) |
  817. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  818. i40iw_insert_wqe_hdr(wqe, header);
  819. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE",
  820. wqe, I40IW_CQP_WQE_SIZE * 8);
  821. i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
  822. if (error)
  823. return I40IW_ERR_CQP_COMPL_ERROR;
  824. if (post_sq) {
  825. i40iw_sc_cqp_post_sq(cqp);
  826. if (poll_registers)
  827. ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000000);
  828. else
  829. ret_code = i40iw_sc_poll_for_cqp_op_done(cqp,
  830. I40IW_CQP_OP_SHMC_PAGES_ALLOCATED,
  831. NULL);
  832. }
  833. return ret_code;
  834. }
  835. /**
  836. * i40iw_sc_manage_hmc_pm_func_table_done - wait for cqp wqe completion for function table
  837. * @cqp: struct for cqp hw
  838. */
  839. static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table_done(struct i40iw_sc_cqp *cqp)
  840. {
  841. return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, NULL);
  842. }
  843. /**
  844. * i40iw_sc_commit_fpm_values_done - wait for cqp eqe completion for fpm commit
  845. * @cqp: struct for cqp hw
  846. */
  847. static enum i40iw_status_code i40iw_sc_commit_fpm_values_done(struct i40iw_sc_cqp *cqp)
  848. {
  849. return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_COMMIT_FPM_VALUES, NULL);
  850. }
  851. /**
  852. * i40iw_sc_commit_fpm_values - cqp wqe for commit fpm values
  853. * @cqp: struct for cqp hw
  854. * @scratch: u64 saved to be used during cqp completion
  855. * @hmc_fn_id: hmc function id
  856. * @commit_fpm_mem; Memory for fpm values
  857. * @post_sq: flag for cqp db to ring
  858. * @wait_type: poll ccq or cqp registers for cqp completion
  859. */
  860. static enum i40iw_status_code i40iw_sc_commit_fpm_values(
  861. struct i40iw_sc_cqp *cqp,
  862. u64 scratch,
  863. u8 hmc_fn_id,
  864. struct i40iw_dma_mem *commit_fpm_mem,
  865. bool post_sq,
  866. u8 wait_type)
  867. {
  868. u64 *wqe;
  869. u64 header;
  870. u32 tail, val, error;
  871. enum i40iw_status_code ret_code = 0;
  872. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  873. if (!wqe)
  874. return I40IW_ERR_RING_FULL;
  875. set_64bit_val(wqe, 16, hmc_fn_id);
  876. set_64bit_val(wqe, 32, commit_fpm_mem->pa);
  877. header = LS_64(I40IW_CQP_OP_COMMIT_FPM_VALUES, I40IW_CQPSQ_OPCODE) |
  878. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  879. i40iw_insert_wqe_hdr(wqe, header);
  880. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "COMMIT_FPM_VALUES WQE",
  881. wqe, I40IW_CQP_WQE_SIZE * 8);
  882. i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
  883. if (error)
  884. return I40IW_ERR_CQP_COMPL_ERROR;
  885. if (post_sq) {
  886. i40iw_sc_cqp_post_sq(cqp);
  887. if (wait_type == I40IW_CQP_WAIT_POLL_REGS)
  888. ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
  889. else if (wait_type == I40IW_CQP_WAIT_POLL_CQ)
  890. ret_code = i40iw_sc_commit_fpm_values_done(cqp);
  891. }
  892. return ret_code;
  893. }
  894. /**
  895. * i40iw_sc_query_fpm_values_done - poll for cqp wqe completion for query fpm
  896. * @cqp: struct for cqp hw
  897. */
  898. static enum i40iw_status_code i40iw_sc_query_fpm_values_done(struct i40iw_sc_cqp *cqp)
  899. {
  900. return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_QUERY_FPM_VALUES, NULL);
  901. }
  902. /**
  903. * i40iw_sc_query_fpm_values - cqp wqe query fpm values
  904. * @cqp: struct for cqp hw
  905. * @scratch: u64 saved to be used during cqp completion
  906. * @hmc_fn_id: hmc function id
  907. * @query_fpm_mem: memory for return fpm values
  908. * @post_sq: flag for cqp db to ring
  909. * @wait_type: poll ccq or cqp registers for cqp completion
  910. */
  911. static enum i40iw_status_code i40iw_sc_query_fpm_values(
  912. struct i40iw_sc_cqp *cqp,
  913. u64 scratch,
  914. u8 hmc_fn_id,
  915. struct i40iw_dma_mem *query_fpm_mem,
  916. bool post_sq,
  917. u8 wait_type)
  918. {
  919. u64 *wqe;
  920. u64 header;
  921. u32 tail, val, error;
  922. enum i40iw_status_code ret_code = 0;
  923. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  924. if (!wqe)
  925. return I40IW_ERR_RING_FULL;
  926. set_64bit_val(wqe, 16, hmc_fn_id);
  927. set_64bit_val(wqe, 32, query_fpm_mem->pa);
  928. header = LS_64(I40IW_CQP_OP_QUERY_FPM_VALUES, I40IW_CQPSQ_OPCODE) |
  929. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  930. i40iw_insert_wqe_hdr(wqe, header);
  931. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_FPM WQE",
  932. wqe, I40IW_CQP_WQE_SIZE * 8);
  933. /* read the tail from CQP_TAIL register */
  934. i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
  935. if (error)
  936. return I40IW_ERR_CQP_COMPL_ERROR;
  937. if (post_sq) {
  938. i40iw_sc_cqp_post_sq(cqp);
  939. if (wait_type == I40IW_CQP_WAIT_POLL_REGS)
  940. ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
  941. else if (wait_type == I40IW_CQP_WAIT_POLL_CQ)
  942. ret_code = i40iw_sc_query_fpm_values_done(cqp);
  943. }
  944. return ret_code;
  945. }
  946. /**
  947. * i40iw_sc_add_arp_cache_entry - cqp wqe add arp cache entry
  948. * @cqp: struct for cqp hw
  949. * @info: arp entry information
  950. * @scratch: u64 saved to be used during cqp completion
  951. * @post_sq: flag for cqp db to ring
  952. */
  953. static enum i40iw_status_code i40iw_sc_add_arp_cache_entry(
  954. struct i40iw_sc_cqp *cqp,
  955. struct i40iw_add_arp_cache_entry_info *info,
  956. u64 scratch,
  957. bool post_sq)
  958. {
  959. u64 *wqe;
  960. u64 temp, header;
  961. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  962. if (!wqe)
  963. return I40IW_ERR_RING_FULL;
  964. set_64bit_val(wqe, 8, info->reach_max);
  965. temp = info->mac_addr[5] |
  966. LS_64_1(info->mac_addr[4], 8) |
  967. LS_64_1(info->mac_addr[3], 16) |
  968. LS_64_1(info->mac_addr[2], 24) |
  969. LS_64_1(info->mac_addr[1], 32) |
  970. LS_64_1(info->mac_addr[0], 40);
  971. set_64bit_val(wqe, 16, temp);
  972. header = info->arp_index |
  973. LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
  974. LS_64((info->permanent ? 1 : 0), I40IW_CQPSQ_MAT_PERMANENT) |
  975. LS_64(1, I40IW_CQPSQ_MAT_ENTRYVALID) |
  976. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  977. i40iw_insert_wqe_hdr(wqe, header);
  978. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_ENTRY WQE",
  979. wqe, I40IW_CQP_WQE_SIZE * 8);
  980. if (post_sq)
  981. i40iw_sc_cqp_post_sq(cqp);
  982. return 0;
  983. }
  984. /**
  985. * i40iw_sc_del_arp_cache_entry - dele arp cache entry
  986. * @cqp: struct for cqp hw
  987. * @scratch: u64 saved to be used during cqp completion
  988. * @arp_index: arp index to delete arp entry
  989. * @post_sq: flag for cqp db to ring
  990. */
  991. static enum i40iw_status_code i40iw_sc_del_arp_cache_entry(
  992. struct i40iw_sc_cqp *cqp,
  993. u64 scratch,
  994. u16 arp_index,
  995. bool post_sq)
  996. {
  997. u64 *wqe;
  998. u64 header;
  999. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  1000. if (!wqe)
  1001. return I40IW_ERR_RING_FULL;
  1002. header = arp_index |
  1003. LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
  1004. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  1005. i40iw_insert_wqe_hdr(wqe, header);
  1006. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_DEL_ENTRY WQE",
  1007. wqe, I40IW_CQP_WQE_SIZE * 8);
  1008. if (post_sq)
  1009. i40iw_sc_cqp_post_sq(cqp);
  1010. return 0;
  1011. }
  1012. /**
  1013. * i40iw_sc_query_arp_cache_entry - cqp wqe to query arp and arp index
  1014. * @cqp: struct for cqp hw
  1015. * @scratch: u64 saved to be used during cqp completion
  1016. * @arp_index: arp index to delete arp entry
  1017. * @post_sq: flag for cqp db to ring
  1018. */
  1019. static enum i40iw_status_code i40iw_sc_query_arp_cache_entry(
  1020. struct i40iw_sc_cqp *cqp,
  1021. u64 scratch,
  1022. u16 arp_index,
  1023. bool post_sq)
  1024. {
  1025. u64 *wqe;
  1026. u64 header;
  1027. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  1028. if (!wqe)
  1029. return I40IW_ERR_RING_FULL;
  1030. header = arp_index |
  1031. LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) |
  1032. LS_64(1, I40IW_CQPSQ_MAT_QUERY) |
  1033. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  1034. i40iw_insert_wqe_hdr(wqe, header);
  1035. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_ARP_CACHE_ENTRY WQE",
  1036. wqe, I40IW_CQP_WQE_SIZE * 8);
  1037. if (post_sq)
  1038. i40iw_sc_cqp_post_sq(cqp);
  1039. return 0;
  1040. }
  1041. /**
  1042. * i40iw_sc_manage_apbvt_entry - for adding and deleting apbvt entries
  1043. * @cqp: struct for cqp hw
  1044. * @info: info for apbvt entry to add or delete
  1045. * @scratch: u64 saved to be used during cqp completion
  1046. * @post_sq: flag for cqp db to ring
  1047. */
  1048. static enum i40iw_status_code i40iw_sc_manage_apbvt_entry(
  1049. struct i40iw_sc_cqp *cqp,
  1050. struct i40iw_apbvt_info *info,
  1051. u64 scratch,
  1052. bool post_sq)
  1053. {
  1054. u64 *wqe;
  1055. u64 header;
  1056. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  1057. if (!wqe)
  1058. return I40IW_ERR_RING_FULL;
  1059. set_64bit_val(wqe, 16, info->port);
  1060. header = LS_64(I40IW_CQP_OP_MANAGE_APBVT, I40IW_CQPSQ_OPCODE) |
  1061. LS_64(info->add, I40IW_CQPSQ_MAPT_ADDPORT) |
  1062. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  1063. i40iw_insert_wqe_hdr(wqe, header);
  1064. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_APBVT WQE",
  1065. wqe, I40IW_CQP_WQE_SIZE * 8);
  1066. if (post_sq)
  1067. i40iw_sc_cqp_post_sq(cqp);
  1068. return 0;
  1069. }
  1070. /**
  1071. * i40iw_sc_manage_qhash_table_entry - manage quad hash entries
  1072. * @cqp: struct for cqp hw
  1073. * @info: info for quad hash to manage
  1074. * @scratch: u64 saved to be used during cqp completion
  1075. * @post_sq: flag for cqp db to ring
  1076. *
  1077. * This is called before connection establishment is started. For passive connections, when
  1078. * listener is created, it will call with entry type of I40IW_QHASH_TYPE_TCP_SYN with local
  1079. * ip address and tcp port. When SYN is received (passive connections) or
  1080. * sent (active connections), this routine is called with entry type of
  1081. * I40IW_QHASH_TYPE_TCP_ESTABLISHED and quad is passed in info.
  1082. *
  1083. * When iwarp connection is done and its state moves to RTS, the quad hash entry in
  1084. * the hardware will point to iwarp's qp number and requires no calls from the driver.
  1085. */
  1086. static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry(
  1087. struct i40iw_sc_cqp *cqp,
  1088. struct i40iw_qhash_table_info *info,
  1089. u64 scratch,
  1090. bool post_sq)
  1091. {
  1092. u64 *wqe;
  1093. u64 qw1 = 0;
  1094. u64 qw2 = 0;
  1095. u64 temp;
  1096. struct i40iw_sc_vsi *vsi = info->vsi;
  1097. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  1098. if (!wqe)
  1099. return I40IW_ERR_RING_FULL;
  1100. temp = info->mac_addr[5] |
  1101. LS_64_1(info->mac_addr[4], 8) |
  1102. LS_64_1(info->mac_addr[3], 16) |
  1103. LS_64_1(info->mac_addr[2], 24) |
  1104. LS_64_1(info->mac_addr[1], 32) |
  1105. LS_64_1(info->mac_addr[0], 40);
  1106. set_64bit_val(wqe, 0, temp);
  1107. qw1 = LS_64(info->qp_num, I40IW_CQPSQ_QHASH_QPN) |
  1108. LS_64(info->dest_port, I40IW_CQPSQ_QHASH_DEST_PORT);
  1109. if (info->ipv4_valid) {
  1110. set_64bit_val(wqe,
  1111. 48,
  1112. LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR3));
  1113. } else {
  1114. set_64bit_val(wqe,
  1115. 56,
  1116. LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR0) |
  1117. LS_64(info->dest_ip[1], I40IW_CQPSQ_QHASH_ADDR1));
  1118. set_64bit_val(wqe,
  1119. 48,
  1120. LS_64(info->dest_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
  1121. LS_64(info->dest_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
  1122. }
  1123. qw2 = LS_64(vsi->qos[info->user_pri].qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE);
  1124. if (info->vlan_valid)
  1125. qw2 |= LS_64(info->vlan_id, I40IW_CQPSQ_QHASH_VLANID);
  1126. set_64bit_val(wqe, 16, qw2);
  1127. if (info->entry_type == I40IW_QHASH_TYPE_TCP_ESTABLISHED) {
  1128. qw1 |= LS_64(info->src_port, I40IW_CQPSQ_QHASH_SRC_PORT);
  1129. if (!info->ipv4_valid) {
  1130. set_64bit_val(wqe,
  1131. 40,
  1132. LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR0) |
  1133. LS_64(info->src_ip[1], I40IW_CQPSQ_QHASH_ADDR1));
  1134. set_64bit_val(wqe,
  1135. 32,
  1136. LS_64(info->src_ip[2], I40IW_CQPSQ_QHASH_ADDR2) |
  1137. LS_64(info->src_ip[3], I40IW_CQPSQ_QHASH_ADDR3));
  1138. } else {
  1139. set_64bit_val(wqe,
  1140. 32,
  1141. LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR3));
  1142. }
  1143. }
  1144. set_64bit_val(wqe, 8, qw1);
  1145. temp = LS_64(cqp->polarity, I40IW_CQPSQ_QHASH_WQEVALID) |
  1146. LS_64(I40IW_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY, I40IW_CQPSQ_QHASH_OPCODE) |
  1147. LS_64(info->manage, I40IW_CQPSQ_QHASH_MANAGE) |
  1148. LS_64(info->ipv4_valid, I40IW_CQPSQ_QHASH_IPV4VALID) |
  1149. LS_64(info->vlan_valid, I40IW_CQPSQ_QHASH_VLANVALID) |
  1150. LS_64(info->entry_type, I40IW_CQPSQ_QHASH_ENTRYTYPE);
  1151. i40iw_insert_wqe_hdr(wqe, temp);
  1152. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_QHASH WQE",
  1153. wqe, I40IW_CQP_WQE_SIZE * 8);
  1154. if (post_sq)
  1155. i40iw_sc_cqp_post_sq(cqp);
  1156. return 0;
  1157. }
  1158. /**
  1159. * i40iw_sc_alloc_local_mac_ipaddr_entry - cqp wqe for loc mac entry
  1160. * @cqp: struct for cqp hw
  1161. * @scratch: u64 saved to be used during cqp completion
  1162. * @post_sq: flag for cqp db to ring
  1163. */
  1164. static enum i40iw_status_code i40iw_sc_alloc_local_mac_ipaddr_entry(
  1165. struct i40iw_sc_cqp *cqp,
  1166. u64 scratch,
  1167. bool post_sq)
  1168. {
  1169. u64 *wqe;
  1170. u64 header;
  1171. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  1172. if (!wqe)
  1173. return I40IW_ERR_RING_FULL;
  1174. header = LS_64(I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY, I40IW_CQPSQ_OPCODE) |
  1175. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  1176. i40iw_insert_wqe_hdr(wqe, header);
  1177. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ALLOCATE_LOCAL_MAC_IPADDR WQE",
  1178. wqe, I40IW_CQP_WQE_SIZE * 8);
  1179. if (post_sq)
  1180. i40iw_sc_cqp_post_sq(cqp);
  1181. return 0;
  1182. }
  1183. /**
  1184. * i40iw_sc_add_local_mac_ipaddr_entry - add mac enry
  1185. * @cqp: struct for cqp hw
  1186. * @info:mac addr info
  1187. * @scratch: u64 saved to be used during cqp completion
  1188. * @post_sq: flag for cqp db to ring
  1189. */
  1190. static enum i40iw_status_code i40iw_sc_add_local_mac_ipaddr_entry(
  1191. struct i40iw_sc_cqp *cqp,
  1192. struct i40iw_local_mac_ipaddr_entry_info *info,
  1193. u64 scratch,
  1194. bool post_sq)
  1195. {
  1196. u64 *wqe;
  1197. u64 temp, header;
  1198. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  1199. if (!wqe)
  1200. return I40IW_ERR_RING_FULL;
  1201. temp = info->mac_addr[5] |
  1202. LS_64_1(info->mac_addr[4], 8) |
  1203. LS_64_1(info->mac_addr[3], 16) |
  1204. LS_64_1(info->mac_addr[2], 24) |
  1205. LS_64_1(info->mac_addr[1], 32) |
  1206. LS_64_1(info->mac_addr[0], 40);
  1207. set_64bit_val(wqe, 32, temp);
  1208. header = LS_64(info->entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) |
  1209. LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) |
  1210. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  1211. i40iw_insert_wqe_hdr(wqe, header);
  1212. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ADD_LOCAL_MAC_IPADDR WQE",
  1213. wqe, I40IW_CQP_WQE_SIZE * 8);
  1214. if (post_sq)
  1215. i40iw_sc_cqp_post_sq(cqp);
  1216. return 0;
  1217. }
  1218. /**
  1219. * i40iw_sc_del_local_mac_ipaddr_entry - cqp wqe to dele local mac
  1220. * @cqp: struct for cqp hw
  1221. * @scratch: u64 saved to be used during cqp completion
  1222. * @entry_idx: index of mac entry
  1223. * @ ignore_ref_count: to force mac adde delete
  1224. * @post_sq: flag for cqp db to ring
  1225. */
  1226. static enum i40iw_status_code i40iw_sc_del_local_mac_ipaddr_entry(
  1227. struct i40iw_sc_cqp *cqp,
  1228. u64 scratch,
  1229. u8 entry_idx,
  1230. u8 ignore_ref_count,
  1231. bool post_sq)
  1232. {
  1233. u64 *wqe;
  1234. u64 header;
  1235. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  1236. if (!wqe)
  1237. return I40IW_ERR_RING_FULL;
  1238. header = LS_64(entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) |
  1239. LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) |
  1240. LS_64(1, I40IW_CQPSQ_MLIPA_FREEENTRY) |
  1241. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
  1242. LS_64(ignore_ref_count, I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT);
  1243. i40iw_insert_wqe_hdr(wqe, header);
  1244. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "DEL_LOCAL_MAC_IPADDR WQE",
  1245. wqe, I40IW_CQP_WQE_SIZE * 8);
  1246. if (post_sq)
  1247. i40iw_sc_cqp_post_sq(cqp);
  1248. return 0;
  1249. }
  1250. /**
  1251. * i40iw_sc_cqp_nop - send a nop wqe
  1252. * @cqp: struct for cqp hw
  1253. * @scratch: u64 saved to be used during cqp completion
  1254. * @post_sq: flag for cqp db to ring
  1255. */
  1256. static enum i40iw_status_code i40iw_sc_cqp_nop(struct i40iw_sc_cqp *cqp,
  1257. u64 scratch,
  1258. bool post_sq)
  1259. {
  1260. u64 *wqe;
  1261. u64 header;
  1262. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  1263. if (!wqe)
  1264. return I40IW_ERR_RING_FULL;
  1265. header = LS_64(I40IW_CQP_OP_NOP, I40IW_CQPSQ_OPCODE) |
  1266. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  1267. i40iw_insert_wqe_hdr(wqe, header);
  1268. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "NOP WQE",
  1269. wqe, I40IW_CQP_WQE_SIZE * 8);
  1270. if (post_sq)
  1271. i40iw_sc_cqp_post_sq(cqp);
  1272. return 0;
  1273. }
  1274. /**
  1275. * i40iw_sc_ceq_init - initialize ceq
  1276. * @ceq: ceq sc structure
  1277. * @info: ceq initialization info
  1278. */
  1279. static enum i40iw_status_code i40iw_sc_ceq_init(struct i40iw_sc_ceq *ceq,
  1280. struct i40iw_ceq_init_info *info)
  1281. {
  1282. u32 pble_obj_cnt;
  1283. if ((info->elem_cnt < I40IW_MIN_CEQ_ENTRIES) ||
  1284. (info->elem_cnt > I40IW_MAX_CEQ_ENTRIES))
  1285. return I40IW_ERR_INVALID_SIZE;
  1286. if (info->ceq_id >= I40IW_MAX_CEQID)
  1287. return I40IW_ERR_INVALID_CEQ_ID;
  1288. pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
  1289. if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
  1290. return I40IW_ERR_INVALID_PBLE_INDEX;
  1291. ceq->size = sizeof(*ceq);
  1292. ceq->ceqe_base = (struct i40iw_ceqe *)info->ceqe_base;
  1293. ceq->ceq_id = info->ceq_id;
  1294. ceq->dev = info->dev;
  1295. ceq->elem_cnt = info->elem_cnt;
  1296. ceq->ceq_elem_pa = info->ceqe_pa;
  1297. ceq->virtual_map = info->virtual_map;
  1298. ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0);
  1299. ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0);
  1300. ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL);
  1301. ceq->tph_en = info->tph_en;
  1302. ceq->tph_val = info->tph_val;
  1303. ceq->polarity = 1;
  1304. I40IW_RING_INIT(ceq->ceq_ring, ceq->elem_cnt);
  1305. ceq->dev->ceq[info->ceq_id] = ceq;
  1306. return 0;
  1307. }
  1308. /**
  1309. * i40iw_sc_ceq_create - create ceq wqe
  1310. * @ceq: ceq sc structure
  1311. * @scratch: u64 saved to be used during cqp completion
  1312. * @post_sq: flag for cqp db to ring
  1313. */
  1314. static enum i40iw_status_code i40iw_sc_ceq_create(struct i40iw_sc_ceq *ceq,
  1315. u64 scratch,
  1316. bool post_sq)
  1317. {
  1318. struct i40iw_sc_cqp *cqp;
  1319. u64 *wqe;
  1320. u64 header;
  1321. cqp = ceq->dev->cqp;
  1322. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  1323. if (!wqe)
  1324. return I40IW_ERR_RING_FULL;
  1325. set_64bit_val(wqe, 16, ceq->elem_cnt);
  1326. set_64bit_val(wqe, 32, (ceq->virtual_map ? 0 : ceq->ceq_elem_pa));
  1327. set_64bit_val(wqe, 48, (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0));
  1328. set_64bit_val(wqe, 56, LS_64(ceq->tph_val, I40IW_CQPSQ_TPHVAL));
  1329. header = ceq->ceq_id |
  1330. LS_64(I40IW_CQP_OP_CREATE_CEQ, I40IW_CQPSQ_OPCODE) |
  1331. LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) |
  1332. LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) |
  1333. LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) |
  1334. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  1335. i40iw_insert_wqe_hdr(wqe, header);
  1336. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_CREATE WQE",
  1337. wqe, I40IW_CQP_WQE_SIZE * 8);
  1338. if (post_sq)
  1339. i40iw_sc_cqp_post_sq(cqp);
  1340. return 0;
  1341. }
  1342. /**
  1343. * i40iw_sc_cceq_create_done - poll for control ceq wqe to complete
  1344. * @ceq: ceq sc structure
  1345. */
  1346. static enum i40iw_status_code i40iw_sc_cceq_create_done(struct i40iw_sc_ceq *ceq)
  1347. {
  1348. struct i40iw_sc_cqp *cqp;
  1349. cqp = ceq->dev->cqp;
  1350. return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CEQ, NULL);
  1351. }
  1352. /**
  1353. * i40iw_sc_cceq_destroy_done - poll for destroy cceq to complete
  1354. * @ceq: ceq sc structure
  1355. */
  1356. static enum i40iw_status_code i40iw_sc_cceq_destroy_done(struct i40iw_sc_ceq *ceq)
  1357. {
  1358. struct i40iw_sc_cqp *cqp;
  1359. cqp = ceq->dev->cqp;
  1360. cqp->process_cqp_sds = i40iw_update_sds_noccq;
  1361. return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_CEQ, NULL);
  1362. }
  1363. /**
  1364. * i40iw_sc_cceq_create - create cceq
  1365. * @ceq: ceq sc structure
  1366. * @scratch: u64 saved to be used during cqp completion
  1367. */
  1368. static enum i40iw_status_code i40iw_sc_cceq_create(struct i40iw_sc_ceq *ceq, u64 scratch)
  1369. {
  1370. enum i40iw_status_code ret_code;
  1371. ret_code = i40iw_sc_ceq_create(ceq, scratch, true);
  1372. if (!ret_code)
  1373. ret_code = i40iw_sc_cceq_create_done(ceq);
  1374. return ret_code;
  1375. }
  1376. /**
  1377. * i40iw_sc_ceq_destroy - destroy ceq
  1378. * @ceq: ceq sc structure
  1379. * @scratch: u64 saved to be used during cqp completion
  1380. * @post_sq: flag for cqp db to ring
  1381. */
  1382. static enum i40iw_status_code i40iw_sc_ceq_destroy(struct i40iw_sc_ceq *ceq,
  1383. u64 scratch,
  1384. bool post_sq)
  1385. {
  1386. struct i40iw_sc_cqp *cqp;
  1387. u64 *wqe;
  1388. u64 header;
  1389. cqp = ceq->dev->cqp;
  1390. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  1391. if (!wqe)
  1392. return I40IW_ERR_RING_FULL;
  1393. set_64bit_val(wqe, 16, ceq->elem_cnt);
  1394. set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx);
  1395. header = ceq->ceq_id |
  1396. LS_64(I40IW_CQP_OP_DESTROY_CEQ, I40IW_CQPSQ_OPCODE) |
  1397. LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) |
  1398. LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) |
  1399. LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) |
  1400. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  1401. i40iw_insert_wqe_hdr(wqe, header);
  1402. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_DESTROY WQE",
  1403. wqe, I40IW_CQP_WQE_SIZE * 8);
  1404. if (post_sq)
  1405. i40iw_sc_cqp_post_sq(cqp);
  1406. return 0;
  1407. }
  1408. /**
  1409. * i40iw_sc_process_ceq - process ceq
  1410. * @dev: sc device struct
  1411. * @ceq: ceq sc structure
  1412. */
  1413. static void *i40iw_sc_process_ceq(struct i40iw_sc_dev *dev, struct i40iw_sc_ceq *ceq)
  1414. {
  1415. u64 temp;
  1416. u64 *ceqe;
  1417. struct i40iw_sc_cq *cq = NULL;
  1418. u8 polarity;
  1419. ceqe = (u64 *)I40IW_GET_CURRENT_CEQ_ELEMENT(ceq);
  1420. get_64bit_val(ceqe, 0, &temp);
  1421. polarity = (u8)RS_64(temp, I40IW_CEQE_VALID);
  1422. if (polarity != ceq->polarity)
  1423. return cq;
  1424. cq = (struct i40iw_sc_cq *)(unsigned long)LS_64_1(temp, 1);
  1425. I40IW_RING_MOVE_TAIL(ceq->ceq_ring);
  1426. if (I40IW_RING_GETCURRENT_TAIL(ceq->ceq_ring) == 0)
  1427. ceq->polarity ^= 1;
  1428. if (dev->is_pf)
  1429. i40iw_wr32(dev->hw, I40E_PFPE_CQACK, cq->cq_uk.cq_id);
  1430. else
  1431. i40iw_wr32(dev->hw, I40E_VFPE_CQACK1, cq->cq_uk.cq_id);
  1432. return cq;
  1433. }
  1434. /**
  1435. * i40iw_sc_aeq_init - initialize aeq
  1436. * @aeq: aeq structure ptr
  1437. * @info: aeq initialization info
  1438. */
  1439. static enum i40iw_status_code i40iw_sc_aeq_init(struct i40iw_sc_aeq *aeq,
  1440. struct i40iw_aeq_init_info *info)
  1441. {
  1442. u32 pble_obj_cnt;
  1443. if ((info->elem_cnt < I40IW_MIN_AEQ_ENTRIES) ||
  1444. (info->elem_cnt > I40IW_MAX_AEQ_ENTRIES))
  1445. return I40IW_ERR_INVALID_SIZE;
  1446. pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
  1447. if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
  1448. return I40IW_ERR_INVALID_PBLE_INDEX;
  1449. aeq->size = sizeof(*aeq);
  1450. aeq->polarity = 1;
  1451. aeq->aeqe_base = (struct i40iw_sc_aeqe *)info->aeqe_base;
  1452. aeq->dev = info->dev;
  1453. aeq->elem_cnt = info->elem_cnt;
  1454. aeq->aeq_elem_pa = info->aeq_elem_pa;
  1455. I40IW_RING_INIT(aeq->aeq_ring, aeq->elem_cnt);
  1456. info->dev->aeq = aeq;
  1457. aeq->virtual_map = info->virtual_map;
  1458. aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL);
  1459. aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0);
  1460. aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0);
  1461. info->dev->aeq = aeq;
  1462. return 0;
  1463. }
  1464. /**
  1465. * i40iw_sc_aeq_create - create aeq
  1466. * @aeq: aeq structure ptr
  1467. * @scratch: u64 saved to be used during cqp completion
  1468. * @post_sq: flag for cqp db to ring
  1469. */
  1470. static enum i40iw_status_code i40iw_sc_aeq_create(struct i40iw_sc_aeq *aeq,
  1471. u64 scratch,
  1472. bool post_sq)
  1473. {
  1474. u64 *wqe;
  1475. struct i40iw_sc_cqp *cqp;
  1476. u64 header;
  1477. cqp = aeq->dev->cqp;
  1478. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  1479. if (!wqe)
  1480. return I40IW_ERR_RING_FULL;
  1481. set_64bit_val(wqe, 16, aeq->elem_cnt);
  1482. set_64bit_val(wqe, 32,
  1483. (aeq->virtual_map ? 0 : aeq->aeq_elem_pa));
  1484. set_64bit_val(wqe, 48,
  1485. (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0));
  1486. header = LS_64(I40IW_CQP_OP_CREATE_AEQ, I40IW_CQPSQ_OPCODE) |
  1487. LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) |
  1488. LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) |
  1489. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  1490. i40iw_insert_wqe_hdr(wqe, header);
  1491. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_CREATE WQE",
  1492. wqe, I40IW_CQP_WQE_SIZE * 8);
  1493. if (post_sq)
  1494. i40iw_sc_cqp_post_sq(cqp);
  1495. return 0;
  1496. }
  1497. /**
  1498. * i40iw_sc_aeq_destroy - destroy aeq during close
  1499. * @aeq: aeq structure ptr
  1500. * @scratch: u64 saved to be used during cqp completion
  1501. * @post_sq: flag for cqp db to ring
  1502. */
  1503. static enum i40iw_status_code i40iw_sc_aeq_destroy(struct i40iw_sc_aeq *aeq,
  1504. u64 scratch,
  1505. bool post_sq)
  1506. {
  1507. u64 *wqe;
  1508. struct i40iw_sc_cqp *cqp;
  1509. u64 header;
  1510. cqp = aeq->dev->cqp;
  1511. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  1512. if (!wqe)
  1513. return I40IW_ERR_RING_FULL;
  1514. set_64bit_val(wqe, 16, aeq->elem_cnt);
  1515. set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx);
  1516. header = LS_64(I40IW_CQP_OP_DESTROY_AEQ, I40IW_CQPSQ_OPCODE) |
  1517. LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) |
  1518. LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) |
  1519. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  1520. i40iw_insert_wqe_hdr(wqe, header);
  1521. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_DESTROY WQE",
  1522. wqe, I40IW_CQP_WQE_SIZE * 8);
  1523. if (post_sq)
  1524. i40iw_sc_cqp_post_sq(cqp);
  1525. return 0;
  1526. }
  1527. /**
  1528. * i40iw_sc_get_next_aeqe - get next aeq entry
  1529. * @aeq: aeq structure ptr
  1530. * @info: aeqe info to be returned
  1531. */
  1532. static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq,
  1533. struct i40iw_aeqe_info *info)
  1534. {
  1535. u64 temp, compl_ctx;
  1536. u64 *aeqe;
  1537. u16 wqe_idx;
  1538. u8 ae_src;
  1539. u8 polarity;
  1540. aeqe = (u64 *)I40IW_GET_CURRENT_AEQ_ELEMENT(aeq);
  1541. get_64bit_val(aeqe, 0, &compl_ctx);
  1542. get_64bit_val(aeqe, 8, &temp);
  1543. polarity = (u8)RS_64(temp, I40IW_AEQE_VALID);
  1544. if (aeq->polarity != polarity)
  1545. return I40IW_ERR_QUEUE_EMPTY;
  1546. i40iw_debug_buf(aeq->dev, I40IW_DEBUG_WQE, "AEQ_ENTRY", aeqe, 16);
  1547. ae_src = (u8)RS_64(temp, I40IW_AEQE_AESRC);
  1548. wqe_idx = (u16)RS_64(temp, I40IW_AEQE_WQDESCIDX);
  1549. info->qp_cq_id = (u32)RS_64(temp, I40IW_AEQE_QPCQID);
  1550. info->ae_id = (u16)RS_64(temp, I40IW_AEQE_AECODE);
  1551. info->tcp_state = (u8)RS_64(temp, I40IW_AEQE_TCPSTATE);
  1552. info->iwarp_state = (u8)RS_64(temp, I40IW_AEQE_IWSTATE);
  1553. info->q2_data_written = (u8)RS_64(temp, I40IW_AEQE_Q2DATA);
  1554. info->aeqe_overflow = (bool)RS_64(temp, I40IW_AEQE_OVERFLOW);
  1555. switch (info->ae_id) {
  1556. case I40IW_AE_PRIV_OPERATION_DENIED:
  1557. case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG:
  1558. case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT:
  1559. case I40IW_AE_BAD_CLOSE:
  1560. case I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE:
  1561. case I40IW_AE_RDMA_READ_WHILE_ORD_ZERO:
  1562. case I40IW_AE_STAG_ZERO_INVALID:
  1563. case I40IW_AE_IB_RREQ_AND_Q1_FULL:
  1564. case I40IW_AE_WQE_UNEXPECTED_OPCODE:
  1565. case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
  1566. case I40IW_AE_DDP_UBE_INVALID_MO:
  1567. case I40IW_AE_DDP_UBE_INVALID_QN:
  1568. case I40IW_AE_DDP_NO_L_BIT:
  1569. case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
  1570. case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
  1571. case I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST:
  1572. case I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
  1573. case I40IW_AE_INVALID_ARP_ENTRY:
  1574. case I40IW_AE_INVALID_TCP_OPTION_RCVD:
  1575. case I40IW_AE_STALE_ARP_ENTRY:
  1576. case I40IW_AE_LLP_CLOSE_COMPLETE:
  1577. case I40IW_AE_LLP_CONNECTION_RESET:
  1578. case I40IW_AE_LLP_FIN_RECEIVED:
  1579. case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
  1580. case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
  1581. case I40IW_AE_LLP_SYN_RECEIVED:
  1582. case I40IW_AE_LLP_TERMINATE_RECEIVED:
  1583. case I40IW_AE_LLP_TOO_MANY_RETRIES:
  1584. case I40IW_AE_LLP_DOUBT_REACHABILITY:
  1585. case I40IW_AE_RESET_SENT:
  1586. case I40IW_AE_TERMINATE_SENT:
  1587. case I40IW_AE_RESET_NOT_SENT:
  1588. case I40IW_AE_LCE_QP_CATASTROPHIC:
  1589. case I40IW_AE_QP_SUSPEND_COMPLETE:
  1590. info->qp = true;
  1591. info->compl_ctx = compl_ctx;
  1592. ae_src = I40IW_AE_SOURCE_RSVD;
  1593. break;
  1594. case I40IW_AE_LCE_CQ_CATASTROPHIC:
  1595. info->cq = true;
  1596. info->compl_ctx = LS_64_1(compl_ctx, 1);
  1597. ae_src = I40IW_AE_SOURCE_RSVD;
  1598. break;
  1599. }
  1600. switch (ae_src) {
  1601. case I40IW_AE_SOURCE_RQ:
  1602. case I40IW_AE_SOURCE_RQ_0011:
  1603. info->qp = true;
  1604. info->wqe_idx = wqe_idx;
  1605. info->compl_ctx = compl_ctx;
  1606. break;
  1607. case I40IW_AE_SOURCE_CQ:
  1608. case I40IW_AE_SOURCE_CQ_0110:
  1609. case I40IW_AE_SOURCE_CQ_1010:
  1610. case I40IW_AE_SOURCE_CQ_1110:
  1611. info->cq = true;
  1612. info->compl_ctx = LS_64_1(compl_ctx, 1);
  1613. break;
  1614. case I40IW_AE_SOURCE_SQ:
  1615. case I40IW_AE_SOURCE_SQ_0111:
  1616. info->qp = true;
  1617. info->sq = true;
  1618. info->wqe_idx = wqe_idx;
  1619. info->compl_ctx = compl_ctx;
  1620. break;
  1621. case I40IW_AE_SOURCE_IN_RR_WR:
  1622. case I40IW_AE_SOURCE_IN_RR_WR_1011:
  1623. info->qp = true;
  1624. info->compl_ctx = compl_ctx;
  1625. info->in_rdrsp_wr = true;
  1626. break;
  1627. case I40IW_AE_SOURCE_OUT_RR:
  1628. case I40IW_AE_SOURCE_OUT_RR_1111:
  1629. info->qp = true;
  1630. info->compl_ctx = compl_ctx;
  1631. info->out_rdrsp = true;
  1632. break;
  1633. case I40IW_AE_SOURCE_RSVD:
  1634. /* fallthrough */
  1635. default:
  1636. break;
  1637. }
  1638. I40IW_RING_MOVE_TAIL(aeq->aeq_ring);
  1639. if (I40IW_RING_GETCURRENT_TAIL(aeq->aeq_ring) == 0)
  1640. aeq->polarity ^= 1;
  1641. return 0;
  1642. }
  1643. /**
  1644. * i40iw_sc_repost_aeq_entries - repost completed aeq entries
  1645. * @dev: sc device struct
  1646. * @count: allocate count
  1647. */
  1648. static enum i40iw_status_code i40iw_sc_repost_aeq_entries(struct i40iw_sc_dev *dev,
  1649. u32 count)
  1650. {
  1651. if (dev->is_pf)
  1652. i40iw_wr32(dev->hw, I40E_PFPE_AEQALLOC, count);
  1653. else
  1654. i40iw_wr32(dev->hw, I40E_VFPE_AEQALLOC1, count);
  1655. return 0;
  1656. }
  1657. /**
  1658. * i40iw_sc_aeq_create_done - create aeq
  1659. * @aeq: aeq structure ptr
  1660. */
  1661. static enum i40iw_status_code i40iw_sc_aeq_create_done(struct i40iw_sc_aeq *aeq)
  1662. {
  1663. struct i40iw_sc_cqp *cqp;
  1664. cqp = aeq->dev->cqp;
  1665. return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_AEQ, NULL);
  1666. }
  1667. /**
  1668. * i40iw_sc_aeq_destroy_done - destroy of aeq during close
  1669. * @aeq: aeq structure ptr
  1670. */
  1671. static enum i40iw_status_code i40iw_sc_aeq_destroy_done(struct i40iw_sc_aeq *aeq)
  1672. {
  1673. struct i40iw_sc_cqp *cqp;
  1674. cqp = aeq->dev->cqp;
  1675. return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_AEQ, NULL);
  1676. }
  1677. /**
  1678. * i40iw_sc_ccq_init - initialize control cq
  1679. * @cq: sc's cq ctruct
  1680. * @info: info for control cq initialization
  1681. */
  1682. static enum i40iw_status_code i40iw_sc_ccq_init(struct i40iw_sc_cq *cq,
  1683. struct i40iw_ccq_init_info *info)
  1684. {
  1685. u32 pble_obj_cnt;
  1686. if (info->num_elem < I40IW_MIN_CQ_SIZE || info->num_elem > I40IW_MAX_CQ_SIZE)
  1687. return I40IW_ERR_INVALID_SIZE;
  1688. if (info->ceq_id > I40IW_MAX_CEQID)
  1689. return I40IW_ERR_INVALID_CEQ_ID;
  1690. pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
  1691. if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
  1692. return I40IW_ERR_INVALID_PBLE_INDEX;
  1693. cq->cq_pa = info->cq_pa;
  1694. cq->cq_uk.cq_base = info->cq_base;
  1695. cq->shadow_area_pa = info->shadow_area_pa;
  1696. cq->cq_uk.shadow_area = info->shadow_area;
  1697. cq->shadow_read_threshold = info->shadow_read_threshold;
  1698. cq->dev = info->dev;
  1699. cq->ceq_id = info->ceq_id;
  1700. cq->cq_uk.cq_size = info->num_elem;
  1701. cq->cq_type = I40IW_CQ_TYPE_CQP;
  1702. cq->ceqe_mask = info->ceqe_mask;
  1703. I40IW_RING_INIT(cq->cq_uk.cq_ring, info->num_elem);
  1704. cq->cq_uk.cq_id = 0; /* control cq is id 0 always */
  1705. cq->ceq_id_valid = info->ceq_id_valid;
  1706. cq->tph_en = info->tph_en;
  1707. cq->tph_val = info->tph_val;
  1708. cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct;
  1709. cq->pbl_list = info->pbl_list;
  1710. cq->virtual_map = info->virtual_map;
  1711. cq->pbl_chunk_size = info->pbl_chunk_size;
  1712. cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
  1713. cq->cq_uk.polarity = true;
  1714. /* following are only for iw cqs so initialize them to zero */
  1715. cq->cq_uk.cqe_alloc_reg = NULL;
  1716. info->dev->ccq = cq;
  1717. return 0;
  1718. }
  1719. /**
  1720. * i40iw_sc_ccq_create_done - poll cqp for ccq create
  1721. * @ccq: ccq sc struct
  1722. */
  1723. static enum i40iw_status_code i40iw_sc_ccq_create_done(struct i40iw_sc_cq *ccq)
  1724. {
  1725. struct i40iw_sc_cqp *cqp;
  1726. cqp = ccq->dev->cqp;
  1727. return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CQ, NULL);
  1728. }
  1729. /**
  1730. * i40iw_sc_ccq_create - create control cq
  1731. * @ccq: ccq sc struct
  1732. * @scratch: u64 saved to be used during cqp completion
  1733. * @check_overflow: overlow flag for ccq
  1734. * @post_sq: flag for cqp db to ring
  1735. */
  1736. static enum i40iw_status_code i40iw_sc_ccq_create(struct i40iw_sc_cq *ccq,
  1737. u64 scratch,
  1738. bool check_overflow,
  1739. bool post_sq)
  1740. {
  1741. u64 *wqe;
  1742. struct i40iw_sc_cqp *cqp;
  1743. u64 header;
  1744. enum i40iw_status_code ret_code;
  1745. cqp = ccq->dev->cqp;
  1746. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  1747. if (!wqe)
  1748. return I40IW_ERR_RING_FULL;
  1749. set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
  1750. set_64bit_val(wqe, 8, RS_64_1(ccq, 1));
  1751. set_64bit_val(wqe, 16,
  1752. LS_64(ccq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
  1753. set_64bit_val(wqe, 32, (ccq->virtual_map ? 0 : ccq->cq_pa));
  1754. set_64bit_val(wqe, 40, ccq->shadow_area_pa);
  1755. set_64bit_val(wqe, 48,
  1756. (ccq->virtual_map ? ccq->first_pm_pbl_idx : 0));
  1757. set_64bit_val(wqe, 56,
  1758. LS_64(ccq->tph_val, I40IW_CQPSQ_TPHVAL));
  1759. header = ccq->cq_uk.cq_id |
  1760. LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
  1761. LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
  1762. LS_64(ccq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
  1763. LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
  1764. LS_64(ccq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
  1765. LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
  1766. LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
  1767. LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) |
  1768. LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
  1769. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  1770. i40iw_insert_wqe_hdr(wqe, header);
  1771. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_CREATE WQE",
  1772. wqe, I40IW_CQP_WQE_SIZE * 8);
  1773. if (post_sq) {
  1774. i40iw_sc_cqp_post_sq(cqp);
  1775. ret_code = i40iw_sc_ccq_create_done(ccq);
  1776. if (ret_code)
  1777. return ret_code;
  1778. }
  1779. cqp->process_cqp_sds = i40iw_cqp_sds_cmd;
  1780. return 0;
  1781. }
  1782. /**
  1783. * i40iw_sc_ccq_destroy - destroy ccq during close
  1784. * @ccq: ccq sc struct
  1785. * @scratch: u64 saved to be used during cqp completion
  1786. * @post_sq: flag for cqp db to ring
  1787. */
  1788. static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq,
  1789. u64 scratch,
  1790. bool post_sq)
  1791. {
  1792. struct i40iw_sc_cqp *cqp;
  1793. u64 *wqe;
  1794. u64 header;
  1795. enum i40iw_status_code ret_code = 0;
  1796. u32 tail, val, error;
  1797. cqp = ccq->dev->cqp;
  1798. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  1799. if (!wqe)
  1800. return I40IW_ERR_RING_FULL;
  1801. set_64bit_val(wqe, 0, ccq->cq_uk.cq_size);
  1802. set_64bit_val(wqe, 8, RS_64_1(ccq, 1));
  1803. set_64bit_val(wqe, 40, ccq->shadow_area_pa);
  1804. header = ccq->cq_uk.cq_id |
  1805. LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
  1806. LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) |
  1807. LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
  1808. LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
  1809. LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) |
  1810. LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
  1811. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  1812. i40iw_insert_wqe_hdr(wqe, header);
  1813. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_DESTROY WQE",
  1814. wqe, I40IW_CQP_WQE_SIZE * 8);
  1815. i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
  1816. if (error)
  1817. return I40IW_ERR_CQP_COMPL_ERROR;
  1818. if (post_sq) {
  1819. i40iw_sc_cqp_post_sq(cqp);
  1820. ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
  1821. }
  1822. cqp->process_cqp_sds = i40iw_update_sds_noccq;
  1823. return ret_code;
  1824. }
  1825. /**
  1826. * i40iw_sc_cq_init - initialize completion q
  1827. * @cq: cq struct
  1828. * @info: cq initialization info
  1829. */
  1830. static enum i40iw_status_code i40iw_sc_cq_init(struct i40iw_sc_cq *cq,
  1831. struct i40iw_cq_init_info *info)
  1832. {
  1833. u32 __iomem *cqe_alloc_reg = NULL;
  1834. enum i40iw_status_code ret_code;
  1835. u32 pble_obj_cnt;
  1836. u32 arm_offset;
  1837. pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
  1838. if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt))
  1839. return I40IW_ERR_INVALID_PBLE_INDEX;
  1840. cq->cq_pa = info->cq_base_pa;
  1841. cq->dev = info->dev;
  1842. cq->ceq_id = info->ceq_id;
  1843. arm_offset = (info->dev->is_pf) ? I40E_PFPE_CQARM : I40E_VFPE_CQARM1;
  1844. if (i40iw_get_hw_addr(cq->dev))
  1845. cqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(cq->dev) +
  1846. arm_offset);
  1847. info->cq_uk_init_info.cqe_alloc_reg = cqe_alloc_reg;
  1848. ret_code = i40iw_cq_uk_init(&cq->cq_uk, &info->cq_uk_init_info);
  1849. if (ret_code)
  1850. return ret_code;
  1851. cq->virtual_map = info->virtual_map;
  1852. cq->pbl_chunk_size = info->pbl_chunk_size;
  1853. cq->ceqe_mask = info->ceqe_mask;
  1854. cq->cq_type = (info->type) ? info->type : I40IW_CQ_TYPE_IWARP;
  1855. cq->shadow_area_pa = info->shadow_area_pa;
  1856. cq->shadow_read_threshold = info->shadow_read_threshold;
  1857. cq->ceq_id_valid = info->ceq_id_valid;
  1858. cq->tph_en = info->tph_en;
  1859. cq->tph_val = info->tph_val;
  1860. cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
  1861. return 0;
  1862. }
  1863. /**
  1864. * i40iw_sc_cq_create - create completion q
  1865. * @cq: cq struct
  1866. * @scratch: u64 saved to be used during cqp completion
  1867. * @check_overflow: flag for overflow check
  1868. * @post_sq: flag for cqp db to ring
  1869. */
  1870. static enum i40iw_status_code i40iw_sc_cq_create(struct i40iw_sc_cq *cq,
  1871. u64 scratch,
  1872. bool check_overflow,
  1873. bool post_sq)
  1874. {
  1875. u64 *wqe;
  1876. struct i40iw_sc_cqp *cqp;
  1877. u64 header;
  1878. if (cq->cq_uk.cq_id > I40IW_MAX_CQID)
  1879. return I40IW_ERR_INVALID_CQ_ID;
  1880. if (cq->ceq_id > I40IW_MAX_CEQID)
  1881. return I40IW_ERR_INVALID_CEQ_ID;
  1882. cqp = cq->dev->cqp;
  1883. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  1884. if (!wqe)
  1885. return I40IW_ERR_RING_FULL;
  1886. set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
  1887. set_64bit_val(wqe, 8, RS_64_1(cq, 1));
  1888. set_64bit_val(wqe,
  1889. 16,
  1890. LS_64(cq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
  1891. set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
  1892. set_64bit_val(wqe, 40, cq->shadow_area_pa);
  1893. set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
  1894. set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL));
  1895. header = cq->cq_uk.cq_id |
  1896. LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
  1897. LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) |
  1898. LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
  1899. LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
  1900. LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
  1901. LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
  1902. LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
  1903. LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
  1904. LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
  1905. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  1906. i40iw_insert_wqe_hdr(wqe, header);
  1907. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_CREATE WQE",
  1908. wqe, I40IW_CQP_WQE_SIZE * 8);
  1909. if (post_sq)
  1910. i40iw_sc_cqp_post_sq(cqp);
  1911. return 0;
  1912. }
  1913. /**
  1914. * i40iw_sc_cq_destroy - destroy completion q
  1915. * @cq: cq struct
  1916. * @scratch: u64 saved to be used during cqp completion
  1917. * @post_sq: flag for cqp db to ring
  1918. */
  1919. static enum i40iw_status_code i40iw_sc_cq_destroy(struct i40iw_sc_cq *cq,
  1920. u64 scratch,
  1921. bool post_sq)
  1922. {
  1923. struct i40iw_sc_cqp *cqp;
  1924. u64 *wqe;
  1925. u64 header;
  1926. cqp = cq->dev->cqp;
  1927. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  1928. if (!wqe)
  1929. return I40IW_ERR_RING_FULL;
  1930. set_64bit_val(wqe, 0, cq->cq_uk.cq_size);
  1931. set_64bit_val(wqe, 8, RS_64_1(cq, 1));
  1932. set_64bit_val(wqe, 40, cq->shadow_area_pa);
  1933. set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
  1934. header = cq->cq_uk.cq_id |
  1935. LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) |
  1936. LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) |
  1937. LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
  1938. LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
  1939. LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
  1940. LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
  1941. LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
  1942. LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
  1943. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  1944. i40iw_insert_wqe_hdr(wqe, header);
  1945. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_DESTROY WQE",
  1946. wqe, I40IW_CQP_WQE_SIZE * 8);
  1947. if (post_sq)
  1948. i40iw_sc_cqp_post_sq(cqp);
  1949. return 0;
  1950. }
  1951. /**
  1952. * i40iw_sc_cq_modify - modify a Completion Queue
  1953. * @cq: cq struct
  1954. * @info: modification info struct
  1955. * @scratch:
  1956. * @post_sq: flag to post to sq
  1957. */
  1958. static enum i40iw_status_code i40iw_sc_cq_modify(struct i40iw_sc_cq *cq,
  1959. struct i40iw_modify_cq_info *info,
  1960. u64 scratch,
  1961. bool post_sq)
  1962. {
  1963. struct i40iw_sc_cqp *cqp;
  1964. u64 *wqe;
  1965. u64 header;
  1966. u32 cq_size, ceq_id, first_pm_pbl_idx;
  1967. u8 pbl_chunk_size;
  1968. bool virtual_map, ceq_id_valid, check_overflow;
  1969. u32 pble_obj_cnt;
  1970. if (info->ceq_valid && (info->ceq_id > I40IW_MAX_CEQID))
  1971. return I40IW_ERR_INVALID_CEQ_ID;
  1972. pble_obj_cnt = cq->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
  1973. if (info->cq_resize && info->virtual_map &&
  1974. (info->first_pm_pbl_idx >= pble_obj_cnt))
  1975. return I40IW_ERR_INVALID_PBLE_INDEX;
  1976. cqp = cq->dev->cqp;
  1977. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  1978. if (!wqe)
  1979. return I40IW_ERR_RING_FULL;
  1980. cq->pbl_list = info->pbl_list;
  1981. cq->cq_pa = info->cq_pa;
  1982. cq->first_pm_pbl_idx = info->first_pm_pbl_idx;
  1983. cq_size = info->cq_resize ? info->cq_size : cq->cq_uk.cq_size;
  1984. if (info->ceq_change) {
  1985. ceq_id_valid = true;
  1986. ceq_id = info->ceq_id;
  1987. } else {
  1988. ceq_id_valid = cq->ceq_id_valid;
  1989. ceq_id = ceq_id_valid ? cq->ceq_id : 0;
  1990. }
  1991. virtual_map = info->cq_resize ? info->virtual_map : cq->virtual_map;
  1992. first_pm_pbl_idx = (info->cq_resize ?
  1993. (info->virtual_map ? info->first_pm_pbl_idx : 0) :
  1994. (cq->virtual_map ? cq->first_pm_pbl_idx : 0));
  1995. pbl_chunk_size = (info->cq_resize ?
  1996. (info->virtual_map ? info->pbl_chunk_size : 0) :
  1997. (cq->virtual_map ? cq->pbl_chunk_size : 0));
  1998. check_overflow = info->check_overflow_change ? info->check_overflow :
  1999. cq->check_overflow;
  2000. cq->cq_uk.cq_size = cq_size;
  2001. cq->ceq_id_valid = ceq_id_valid;
  2002. cq->ceq_id = ceq_id;
  2003. cq->virtual_map = virtual_map;
  2004. cq->first_pm_pbl_idx = first_pm_pbl_idx;
  2005. cq->pbl_chunk_size = pbl_chunk_size;
  2006. cq->check_overflow = check_overflow;
  2007. set_64bit_val(wqe, 0, cq_size);
  2008. set_64bit_val(wqe, 8, RS_64_1(cq, 1));
  2009. set_64bit_val(wqe, 16,
  2010. LS_64(info->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD));
  2011. set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa));
  2012. set_64bit_val(wqe, 40, cq->shadow_area_pa);
  2013. set_64bit_val(wqe, 48, (cq->virtual_map ? first_pm_pbl_idx : 0));
  2014. set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL));
  2015. header = cq->cq_uk.cq_id |
  2016. LS_64(ceq_id, I40IW_CQPSQ_CQ_CEQID) |
  2017. LS_64(I40IW_CQP_OP_MODIFY_CQ, I40IW_CQPSQ_OPCODE) |
  2018. LS_64(info->cq_resize, I40IW_CQPSQ_CQ_CQRESIZE) |
  2019. LS_64(pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) |
  2020. LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) |
  2021. LS_64(virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) |
  2022. LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) |
  2023. LS_64(ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) |
  2024. LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) |
  2025. LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) |
  2026. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  2027. i40iw_insert_wqe_hdr(wqe, header);
  2028. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_MODIFY WQE",
  2029. wqe, I40IW_CQP_WQE_SIZE * 8);
  2030. if (post_sq)
  2031. i40iw_sc_cqp_post_sq(cqp);
  2032. return 0;
  2033. }
  2034. /**
  2035. * i40iw_sc_qp_init - initialize qp
  2036. * @qp: sc qp
  2037. * @info: initialization qp info
  2038. */
  2039. static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp,
  2040. struct i40iw_qp_init_info *info)
  2041. {
  2042. u32 __iomem *wqe_alloc_reg = NULL;
  2043. enum i40iw_status_code ret_code;
  2044. u32 pble_obj_cnt;
  2045. u8 wqe_size;
  2046. u32 offset;
  2047. qp->dev = info->pd->dev;
  2048. qp->vsi = info->vsi;
  2049. qp->sq_pa = info->sq_pa;
  2050. qp->rq_pa = info->rq_pa;
  2051. qp->hw_host_ctx_pa = info->host_ctx_pa;
  2052. qp->q2_pa = info->q2_pa;
  2053. qp->shadow_area_pa = info->shadow_area_pa;
  2054. qp->q2_buf = info->q2;
  2055. qp->pd = info->pd;
  2056. qp->hw_host_ctx = info->host_ctx;
  2057. offset = (qp->pd->dev->is_pf) ? I40E_PFPE_WQEALLOC : I40E_VFPE_WQEALLOC1;
  2058. if (i40iw_get_hw_addr(qp->pd->dev))
  2059. wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) +
  2060. offset);
  2061. info->qp_uk_init_info.wqe_alloc_reg = wqe_alloc_reg;
  2062. info->qp_uk_init_info.abi_ver = qp->pd->abi_ver;
  2063. ret_code = i40iw_qp_uk_init(&qp->qp_uk, &info->qp_uk_init_info);
  2064. if (ret_code)
  2065. return ret_code;
  2066. qp->virtual_map = info->virtual_map;
  2067. pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
  2068. if ((info->virtual_map && (info->sq_pa >= pble_obj_cnt)) ||
  2069. (info->virtual_map && (info->rq_pa >= pble_obj_cnt)))
  2070. return I40IW_ERR_INVALID_PBLE_INDEX;
  2071. qp->llp_stream_handle = (void *)(-1);
  2072. qp->qp_type = (info->type) ? info->type : I40IW_QP_TYPE_IWARP;
  2073. qp->hw_sq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.sq_ring.size,
  2074. false);
  2075. i40iw_debug(qp->dev, I40IW_DEBUG_WQE, "%s: hw_sq_size[%04d] sq_ring.size[%04d]\n",
  2076. __func__, qp->hw_sq_size, qp->qp_uk.sq_ring.size);
  2077. switch (qp->pd->abi_ver) {
  2078. case 4:
  2079. ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt,
  2080. &wqe_size);
  2081. if (ret_code)
  2082. return ret_code;
  2083. break;
  2084. case 5: /* fallthrough until next ABI version */
  2085. default:
  2086. if (qp->qp_uk.max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT)
  2087. return I40IW_ERR_INVALID_FRAG_COUNT;
  2088. wqe_size = I40IW_MAX_WQE_SIZE_RQ;
  2089. break;
  2090. }
  2091. qp->hw_rq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.rq_size *
  2092. (wqe_size / I40IW_QP_WQE_MIN_SIZE), false);
  2093. i40iw_debug(qp->dev, I40IW_DEBUG_WQE,
  2094. "%s: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n",
  2095. __func__, qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size);
  2096. qp->sq_tph_val = info->sq_tph_val;
  2097. qp->rq_tph_val = info->rq_tph_val;
  2098. qp->sq_tph_en = info->sq_tph_en;
  2099. qp->rq_tph_en = info->rq_tph_en;
  2100. qp->rcv_tph_en = info->rcv_tph_en;
  2101. qp->xmit_tph_en = info->xmit_tph_en;
  2102. qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle;
  2103. return 0;
  2104. }
  2105. /**
  2106. * i40iw_sc_qp_create - create qp
  2107. * @qp: sc qp
  2108. * @info: qp create info
  2109. * @scratch: u64 saved to be used during cqp completion
  2110. * @post_sq: flag for cqp db to ring
  2111. */
  2112. static enum i40iw_status_code i40iw_sc_qp_create(
  2113. struct i40iw_sc_qp *qp,
  2114. struct i40iw_create_qp_info *info,
  2115. u64 scratch,
  2116. bool post_sq)
  2117. {
  2118. struct i40iw_sc_cqp *cqp;
  2119. u64 *wqe;
  2120. u64 header;
  2121. if ((qp->qp_uk.qp_id < I40IW_MIN_IW_QP_ID) ||
  2122. (qp->qp_uk.qp_id > I40IW_MAX_IW_QP_ID))
  2123. return I40IW_ERR_INVALID_QP_ID;
  2124. cqp = qp->pd->dev->cqp;
  2125. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  2126. if (!wqe)
  2127. return I40IW_ERR_RING_FULL;
  2128. set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
  2129. set_64bit_val(wqe, 40, qp->shadow_area_pa);
  2130. header = qp->qp_uk.qp_id |
  2131. LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) |
  2132. LS_64((info->ord_valid ? 1 : 0), I40IW_CQPSQ_QP_ORDVALID) |
  2133. LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) |
  2134. LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
  2135. LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
  2136. LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
  2137. LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
  2138. LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
  2139. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  2140. i40iw_insert_wqe_hdr(wqe, header);
  2141. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_CREATE WQE",
  2142. wqe, I40IW_CQP_WQE_SIZE * 8);
  2143. if (post_sq)
  2144. i40iw_sc_cqp_post_sq(cqp);
  2145. return 0;
  2146. }
  2147. /**
  2148. * i40iw_sc_qp_modify - modify qp cqp wqe
  2149. * @qp: sc qp
  2150. * @info: modify qp info
  2151. * @scratch: u64 saved to be used during cqp completion
  2152. * @post_sq: flag for cqp db to ring
  2153. */
  2154. static enum i40iw_status_code i40iw_sc_qp_modify(
  2155. struct i40iw_sc_qp *qp,
  2156. struct i40iw_modify_qp_info *info,
  2157. u64 scratch,
  2158. bool post_sq)
  2159. {
  2160. u64 *wqe;
  2161. struct i40iw_sc_cqp *cqp;
  2162. u64 header;
  2163. u8 term_actions = 0;
  2164. u8 term_len = 0;
  2165. cqp = qp->pd->dev->cqp;
  2166. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  2167. if (!wqe)
  2168. return I40IW_ERR_RING_FULL;
  2169. if (info->next_iwarp_state == I40IW_QP_STATE_TERMINATE) {
  2170. if (info->dont_send_fin)
  2171. term_actions += I40IWQP_TERM_SEND_TERM_ONLY;
  2172. if (info->dont_send_term)
  2173. term_actions += I40IWQP_TERM_SEND_FIN_ONLY;
  2174. if ((term_actions == I40IWQP_TERM_SEND_TERM_AND_FIN) ||
  2175. (term_actions == I40IWQP_TERM_SEND_TERM_ONLY))
  2176. term_len = info->termlen;
  2177. }
  2178. set_64bit_val(wqe,
  2179. 8,
  2180. LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN));
  2181. set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
  2182. set_64bit_val(wqe, 40, qp->shadow_area_pa);
  2183. header = qp->qp_uk.qp_id |
  2184. LS_64(I40IW_CQP_OP_MODIFY_QP, I40IW_CQPSQ_OPCODE) |
  2185. LS_64(info->ord_valid, I40IW_CQPSQ_QP_ORDVALID) |
  2186. LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) |
  2187. LS_64(info->cached_var_valid, I40IW_CQPSQ_QP_CACHEDVARVALID) |
  2188. LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) |
  2189. LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
  2190. LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) |
  2191. LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
  2192. LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
  2193. LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |
  2194. LS_64(info->reset_tcp_conn, I40IW_CQPSQ_QP_RESETCON) |
  2195. LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) |
  2196. LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) |
  2197. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  2198. i40iw_insert_wqe_hdr(wqe, header);
  2199. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_MODIFY WQE",
  2200. wqe, I40IW_CQP_WQE_SIZE * 8);
  2201. if (post_sq)
  2202. i40iw_sc_cqp_post_sq(cqp);
  2203. return 0;
  2204. }
  2205. /**
  2206. * i40iw_sc_qp_destroy - cqp destroy qp
  2207. * @qp: sc qp
  2208. * @scratch: u64 saved to be used during cqp completion
  2209. * @remove_hash_idx: flag if to remove hash idx
  2210. * @ignore_mw_bnd: memory window bind flag
  2211. * @post_sq: flag for cqp db to ring
  2212. */
  2213. static enum i40iw_status_code i40iw_sc_qp_destroy(
  2214. struct i40iw_sc_qp *qp,
  2215. u64 scratch,
  2216. bool remove_hash_idx,
  2217. bool ignore_mw_bnd,
  2218. bool post_sq)
  2219. {
  2220. u64 *wqe;
  2221. struct i40iw_sc_cqp *cqp;
  2222. u64 header;
  2223. i40iw_qp_rem_qos(qp);
  2224. cqp = qp->pd->dev->cqp;
  2225. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  2226. if (!wqe)
  2227. return I40IW_ERR_RING_FULL;
  2228. set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
  2229. set_64bit_val(wqe, 40, qp->shadow_area_pa);
  2230. header = qp->qp_uk.qp_id |
  2231. LS_64(I40IW_CQP_OP_DESTROY_QP, I40IW_CQPSQ_OPCODE) |
  2232. LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
  2233. LS_64(ignore_mw_bnd, I40IW_CQPSQ_QP_IGNOREMWBOUND) |
  2234. LS_64(remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
  2235. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  2236. i40iw_insert_wqe_hdr(wqe, header);
  2237. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_DESTROY WQE",
  2238. wqe, I40IW_CQP_WQE_SIZE * 8);
  2239. if (post_sq)
  2240. i40iw_sc_cqp_post_sq(cqp);
  2241. return 0;
  2242. }
  2243. /**
  2244. * i40iw_sc_qp_flush_wqes - flush qp's wqe
  2245. * @qp: sc qp
  2246. * @info: dlush information
  2247. * @scratch: u64 saved to be used during cqp completion
  2248. * @post_sq: flag for cqp db to ring
  2249. */
  2250. static enum i40iw_status_code i40iw_sc_qp_flush_wqes(
  2251. struct i40iw_sc_qp *qp,
  2252. struct i40iw_qp_flush_info *info,
  2253. u64 scratch,
  2254. bool post_sq)
  2255. {
  2256. u64 temp = 0;
  2257. u64 *wqe;
  2258. struct i40iw_sc_cqp *cqp;
  2259. u64 header;
  2260. bool flush_sq = false, flush_rq = false;
  2261. if (info->rq && !qp->flush_rq)
  2262. flush_rq = true;
  2263. if (info->sq && !qp->flush_sq)
  2264. flush_sq = true;
  2265. qp->flush_sq |= flush_sq;
  2266. qp->flush_rq |= flush_rq;
  2267. if (!flush_sq && !flush_rq)
  2268. return 0;
  2269. cqp = qp->pd->dev->cqp;
  2270. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  2271. if (!wqe)
  2272. return I40IW_ERR_RING_FULL;
  2273. if (info->userflushcode) {
  2274. if (flush_rq) {
  2275. temp |= LS_64(info->rq_minor_code, I40IW_CQPSQ_FWQE_RQMNERR) |
  2276. LS_64(info->rq_major_code, I40IW_CQPSQ_FWQE_RQMJERR);
  2277. }
  2278. if (flush_sq) {
  2279. temp |= LS_64(info->sq_minor_code, I40IW_CQPSQ_FWQE_SQMNERR) |
  2280. LS_64(info->sq_major_code, I40IW_CQPSQ_FWQE_SQMJERR);
  2281. }
  2282. }
  2283. set_64bit_val(wqe, 16, temp);
  2284. temp = (info->generate_ae) ?
  2285. info->ae_code | LS_64(info->ae_source, I40IW_CQPSQ_FWQE_AESOURCE) : 0;
  2286. set_64bit_val(wqe, 8, temp);
  2287. header = qp->qp_uk.qp_id |
  2288. LS_64(I40IW_CQP_OP_FLUSH_WQES, I40IW_CQPSQ_OPCODE) |
  2289. LS_64(info->generate_ae, I40IW_CQPSQ_FWQE_GENERATE_AE) |
  2290. LS_64(info->userflushcode, I40IW_CQPSQ_FWQE_USERFLCODE) |
  2291. LS_64(flush_sq, I40IW_CQPSQ_FWQE_FLUSHSQ) |
  2292. LS_64(flush_rq, I40IW_CQPSQ_FWQE_FLUSHRQ) |
  2293. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  2294. i40iw_insert_wqe_hdr(wqe, header);
  2295. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_FLUSH WQE",
  2296. wqe, I40IW_CQP_WQE_SIZE * 8);
  2297. if (post_sq)
  2298. i40iw_sc_cqp_post_sq(cqp);
  2299. return 0;
  2300. }
  2301. /**
  2302. * i40iw_sc_gen_ae - generate AE, currently uses flush WQE CQP OP
  2303. * @qp: sc qp
  2304. * @info: gen ae information
  2305. * @scratch: u64 saved to be used during cqp completion
  2306. * @post_sq: flag for cqp db to ring
  2307. */
  2308. static enum i40iw_status_code i40iw_sc_gen_ae(
  2309. struct i40iw_sc_qp *qp,
  2310. struct i40iw_gen_ae_info *info,
  2311. u64 scratch,
  2312. bool post_sq)
  2313. {
  2314. u64 temp;
  2315. u64 *wqe;
  2316. struct i40iw_sc_cqp *cqp;
  2317. u64 header;
  2318. cqp = qp->pd->dev->cqp;
  2319. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  2320. if (!wqe)
  2321. return I40IW_ERR_RING_FULL;
  2322. temp = info->ae_code |
  2323. LS_64(info->ae_source, I40IW_CQPSQ_FWQE_AESOURCE);
  2324. set_64bit_val(wqe, 8, temp);
  2325. header = qp->qp_uk.qp_id |
  2326. LS_64(I40IW_CQP_OP_GEN_AE, I40IW_CQPSQ_OPCODE) |
  2327. LS_64(1, I40IW_CQPSQ_FWQE_GENERATE_AE) |
  2328. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  2329. i40iw_insert_wqe_hdr(wqe, header);
  2330. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "GEN_AE WQE",
  2331. wqe, I40IW_CQP_WQE_SIZE * 8);
  2332. if (post_sq)
  2333. i40iw_sc_cqp_post_sq(cqp);
  2334. return 0;
  2335. }
  2336. /**
  2337. * i40iw_sc_qp_upload_context - upload qp's context
  2338. * @dev: sc device struct
  2339. * @info: upload context info ptr for return
  2340. * @scratch: u64 saved to be used during cqp completion
  2341. * @post_sq: flag for cqp db to ring
  2342. */
  2343. static enum i40iw_status_code i40iw_sc_qp_upload_context(
  2344. struct i40iw_sc_dev *dev,
  2345. struct i40iw_upload_context_info *info,
  2346. u64 scratch,
  2347. bool post_sq)
  2348. {
  2349. u64 *wqe;
  2350. struct i40iw_sc_cqp *cqp;
  2351. u64 header;
  2352. cqp = dev->cqp;
  2353. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  2354. if (!wqe)
  2355. return I40IW_ERR_RING_FULL;
  2356. set_64bit_val(wqe, 16, info->buf_pa);
  2357. header = LS_64(info->qp_id, I40IW_CQPSQ_UCTX_QPID) |
  2358. LS_64(I40IW_CQP_OP_UPLOAD_CONTEXT, I40IW_CQPSQ_OPCODE) |
  2359. LS_64(info->qp_type, I40IW_CQPSQ_UCTX_QPTYPE) |
  2360. LS_64(info->raw_format, I40IW_CQPSQ_UCTX_RAWFORMAT) |
  2361. LS_64(info->freeze_qp, I40IW_CQPSQ_UCTX_FREEZEQP) |
  2362. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  2363. i40iw_insert_wqe_hdr(wqe, header);
  2364. i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QP_UPLOAD_CTX WQE",
  2365. wqe, I40IW_CQP_WQE_SIZE * 8);
  2366. if (post_sq)
  2367. i40iw_sc_cqp_post_sq(cqp);
  2368. return 0;
  2369. }
  2370. /**
  2371. * i40iw_sc_qp_setctx - set qp's context
  2372. * @qp: sc qp
  2373. * @qp_ctx: context ptr
  2374. * @info: ctx info
  2375. */
  2376. static enum i40iw_status_code i40iw_sc_qp_setctx(
  2377. struct i40iw_sc_qp *qp,
  2378. u64 *qp_ctx,
  2379. struct i40iw_qp_host_ctx_info *info)
  2380. {
  2381. struct i40iwarp_offload_info *iw;
  2382. struct i40iw_tcp_offload_info *tcp;
  2383. struct i40iw_sc_vsi *vsi;
  2384. struct i40iw_sc_dev *dev;
  2385. u64 qw0, qw3, qw7 = 0;
  2386. iw = info->iwarp_info;
  2387. tcp = info->tcp_info;
  2388. vsi = qp->vsi;
  2389. dev = qp->dev;
  2390. if (info->add_to_qoslist) {
  2391. qp->user_pri = info->user_pri;
  2392. i40iw_qp_add_qos(qp);
  2393. i40iw_debug(qp->dev, I40IW_DEBUG_DCB, "%s qp[%d] UP[%d] qset[%d]\n",
  2394. __func__, qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle);
  2395. }
  2396. qw0 = LS_64(qp->qp_uk.rq_wqe_size, I40IWQPC_RQWQESIZE) |
  2397. LS_64(info->err_rq_idx_valid, I40IWQPC_ERR_RQ_IDX_VALID) |
  2398. LS_64(qp->rcv_tph_en, I40IWQPC_RCVTPHEN) |
  2399. LS_64(qp->xmit_tph_en, I40IWQPC_XMITTPHEN) |
  2400. LS_64(qp->rq_tph_en, I40IWQPC_RQTPHEN) |
  2401. LS_64(qp->sq_tph_en, I40IWQPC_SQTPHEN) |
  2402. LS_64(info->push_idx, I40IWQPC_PPIDX) |
  2403. LS_64(info->push_mode_en, I40IWQPC_PMENA);
  2404. set_64bit_val(qp_ctx, 8, qp->sq_pa);
  2405. set_64bit_val(qp_ctx, 16, qp->rq_pa);
  2406. qw3 = LS_64(qp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) |
  2407. LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) |
  2408. LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE);
  2409. set_64bit_val(qp_ctx,
  2410. 128,
  2411. LS_64(info->err_rq_idx, I40IWQPC_ERR_RQ_IDX));
  2412. set_64bit_val(qp_ctx,
  2413. 136,
  2414. LS_64(info->send_cq_num, I40IWQPC_TXCQNUM) |
  2415. LS_64(info->rcv_cq_num, I40IWQPC_RXCQNUM));
  2416. set_64bit_val(qp_ctx,
  2417. 168,
  2418. LS_64(info->qp_compl_ctx, I40IWQPC_QPCOMPCTX));
  2419. set_64bit_val(qp_ctx,
  2420. 176,
  2421. LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) |
  2422. LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) |
  2423. LS_64(qp->qs_handle, I40IWQPC_QSHANDLE) |
  2424. LS_64(vsi->exception_lan_queue, I40IWQPC_EXCEPTION_LAN_QUEUE));
  2425. if (info->iwarp_info_valid) {
  2426. qw0 |= LS_64(iw->ddp_ver, I40IWQPC_DDP_VER) |
  2427. LS_64(iw->rdmap_ver, I40IWQPC_RDMAP_VER);
  2428. qw7 |= LS_64(iw->pd_id, I40IWQPC_PDIDX);
  2429. set_64bit_val(qp_ctx,
  2430. 144,
  2431. LS_64(qp->q2_pa, I40IWQPC_Q2ADDR) |
  2432. LS_64(vsi->fcn_id, I40IWQPC_STAT_INDEX));
  2433. set_64bit_val(qp_ctx,
  2434. 152,
  2435. LS_64(iw->last_byte_sent, I40IWQPC_LASTBYTESENT));
  2436. set_64bit_val(qp_ctx,
  2437. 160,
  2438. LS_64(iw->ord_size, I40IWQPC_ORDSIZE) |
  2439. LS_64(iw->ird_size, I40IWQPC_IRDSIZE) |
  2440. LS_64(iw->wr_rdresp_en, I40IWQPC_WRRDRSPOK) |
  2441. LS_64(iw->rd_enable, I40IWQPC_RDOK) |
  2442. LS_64(iw->snd_mark_en, I40IWQPC_SNDMARKERS) |
  2443. LS_64(iw->bind_en, I40IWQPC_BINDEN) |
  2444. LS_64(iw->fast_reg_en, I40IWQPC_FASTREGEN) |
  2445. LS_64(iw->priv_mode_en, I40IWQPC_PRIVEN) |
  2446. LS_64((((vsi->stats_fcn_id_alloc) &&
  2447. (dev->is_pf) && (vsi->fcn_id >= I40IW_FIRST_NON_PF_STAT)) ? 1 : 0),
  2448. I40IWQPC_USESTATSINSTANCE) |
  2449. LS_64(1, I40IWQPC_IWARPMODE) |
  2450. LS_64(iw->rcv_mark_en, I40IWQPC_RCVMARKERS) |
  2451. LS_64(iw->align_hdrs, I40IWQPC_ALIGNHDRS) |
  2452. LS_64(iw->rcv_no_mpa_crc, I40IWQPC_RCVNOMPACRC) |
  2453. LS_64(iw->rcv_mark_offset, I40IWQPC_RCVMARKOFFSET) |
  2454. LS_64(iw->snd_mark_offset, I40IWQPC_SNDMARKOFFSET));
  2455. }
  2456. if (info->tcp_info_valid) {
  2457. qw0 |= LS_64(tcp->ipv4, I40IWQPC_IPV4) |
  2458. LS_64(tcp->no_nagle, I40IWQPC_NONAGLE) |
  2459. LS_64(tcp->insert_vlan_tag, I40IWQPC_INSERTVLANTAG) |
  2460. LS_64(tcp->time_stamp, I40IWQPC_TIMESTAMP) |
  2461. LS_64(tcp->cwnd_inc_limit, I40IWQPC_LIMIT) |
  2462. LS_64(tcp->drop_ooo_seg, I40IWQPC_DROPOOOSEG) |
  2463. LS_64(tcp->dup_ack_thresh, I40IWQPC_DUPACK_THRESH);
  2464. qw3 |= LS_64(tcp->ttl, I40IWQPC_TTL) |
  2465. LS_64(tcp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) |
  2466. LS_64(tcp->avoid_stretch_ack, I40IWQPC_AVOIDSTRETCHACK) |
  2467. LS_64(tcp->tos, I40IWQPC_TOS) |
  2468. LS_64(tcp->src_port, I40IWQPC_SRCPORTNUM) |
  2469. LS_64(tcp->dst_port, I40IWQPC_DESTPORTNUM);
  2470. qp->src_mac_addr_idx = tcp->src_mac_addr_idx;
  2471. set_64bit_val(qp_ctx,
  2472. 32,
  2473. LS_64(tcp->dest_ip_addr2, I40IWQPC_DESTIPADDR2) |
  2474. LS_64(tcp->dest_ip_addr3, I40IWQPC_DESTIPADDR3));
  2475. set_64bit_val(qp_ctx,
  2476. 40,
  2477. LS_64(tcp->dest_ip_addr0, I40IWQPC_DESTIPADDR0) |
  2478. LS_64(tcp->dest_ip_addr1, I40IWQPC_DESTIPADDR1));
  2479. set_64bit_val(qp_ctx,
  2480. 48,
  2481. LS_64(tcp->snd_mss, I40IWQPC_SNDMSS) |
  2482. LS_64(tcp->vlan_tag, I40IWQPC_VLANTAG) |
  2483. LS_64(tcp->arp_idx, I40IWQPC_ARPIDX));
  2484. qw7 |= LS_64(tcp->flow_label, I40IWQPC_FLOWLABEL) |
  2485. LS_64(tcp->wscale, I40IWQPC_WSCALE) |
  2486. LS_64(tcp->ignore_tcp_opt, I40IWQPC_IGNORE_TCP_OPT) |
  2487. LS_64(tcp->ignore_tcp_uns_opt, I40IWQPC_IGNORE_TCP_UNS_OPT) |
  2488. LS_64(tcp->tcp_state, I40IWQPC_TCPSTATE) |
  2489. LS_64(tcp->rcv_wscale, I40IWQPC_RCVSCALE) |
  2490. LS_64(tcp->snd_wscale, I40IWQPC_SNDSCALE);
  2491. set_64bit_val(qp_ctx,
  2492. 72,
  2493. LS_64(tcp->time_stamp_recent, I40IWQPC_TIMESTAMP_RECENT) |
  2494. LS_64(tcp->time_stamp_age, I40IWQPC_TIMESTAMP_AGE));
  2495. set_64bit_val(qp_ctx,
  2496. 80,
  2497. LS_64(tcp->snd_nxt, I40IWQPC_SNDNXT) |
  2498. LS_64(tcp->snd_wnd, I40IWQPC_SNDWND));
  2499. set_64bit_val(qp_ctx,
  2500. 88,
  2501. LS_64(tcp->rcv_nxt, I40IWQPC_RCVNXT) |
  2502. LS_64(tcp->rcv_wnd, I40IWQPC_RCVWND));
  2503. set_64bit_val(qp_ctx,
  2504. 96,
  2505. LS_64(tcp->snd_max, I40IWQPC_SNDMAX) |
  2506. LS_64(tcp->snd_una, I40IWQPC_SNDUNA));
  2507. set_64bit_val(qp_ctx,
  2508. 104,
  2509. LS_64(tcp->srtt, I40IWQPC_SRTT) |
  2510. LS_64(tcp->rtt_var, I40IWQPC_RTTVAR));
  2511. set_64bit_val(qp_ctx,
  2512. 112,
  2513. LS_64(tcp->ss_thresh, I40IWQPC_SSTHRESH) |
  2514. LS_64(tcp->cwnd, I40IWQPC_CWND));
  2515. set_64bit_val(qp_ctx,
  2516. 120,
  2517. LS_64(tcp->snd_wl1, I40IWQPC_SNDWL1) |
  2518. LS_64(tcp->snd_wl2, I40IWQPC_SNDWL2));
  2519. set_64bit_val(qp_ctx,
  2520. 128,
  2521. LS_64(tcp->max_snd_window, I40IWQPC_MAXSNDWND) |
  2522. LS_64(tcp->rexmit_thresh, I40IWQPC_REXMIT_THRESH));
  2523. set_64bit_val(qp_ctx,
  2524. 184,
  2525. LS_64(tcp->local_ipaddr3, I40IWQPC_LOCAL_IPADDR3) |
  2526. LS_64(tcp->local_ipaddr2, I40IWQPC_LOCAL_IPADDR2));
  2527. set_64bit_val(qp_ctx,
  2528. 192,
  2529. LS_64(tcp->local_ipaddr1, I40IWQPC_LOCAL_IPADDR1) |
  2530. LS_64(tcp->local_ipaddr0, I40IWQPC_LOCAL_IPADDR0));
  2531. }
  2532. set_64bit_val(qp_ctx, 0, qw0);
  2533. set_64bit_val(qp_ctx, 24, qw3);
  2534. set_64bit_val(qp_ctx, 56, qw7);
  2535. i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "QP_HOST)CTX WQE",
  2536. qp_ctx, I40IW_QP_CTX_SIZE);
  2537. return 0;
  2538. }
  2539. /**
  2540. * i40iw_sc_alloc_stag - mr stag alloc
  2541. * @dev: sc device struct
  2542. * @info: stag info
  2543. * @scratch: u64 saved to be used during cqp completion
  2544. * @post_sq: flag for cqp db to ring
  2545. */
  2546. static enum i40iw_status_code i40iw_sc_alloc_stag(
  2547. struct i40iw_sc_dev *dev,
  2548. struct i40iw_allocate_stag_info *info,
  2549. u64 scratch,
  2550. bool post_sq)
  2551. {
  2552. u64 *wqe;
  2553. struct i40iw_sc_cqp *cqp;
  2554. u64 header;
  2555. enum i40iw_page_size page_size;
  2556. page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
  2557. cqp = dev->cqp;
  2558. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  2559. if (!wqe)
  2560. return I40IW_ERR_RING_FULL;
  2561. set_64bit_val(wqe,
  2562. 8,
  2563. LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID) |
  2564. LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN));
  2565. set_64bit_val(wqe,
  2566. 16,
  2567. LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
  2568. set_64bit_val(wqe,
  2569. 40,
  2570. LS_64(info->hmc_fcn_index, I40IW_CQPSQ_STAG_HMCFNIDX));
  2571. header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) |
  2572. LS_64(1, I40IW_CQPSQ_STAG_MR) |
  2573. LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
  2574. LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
  2575. LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
  2576. LS_64(info->remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
  2577. LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
  2578. LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
  2579. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  2580. i40iw_insert_wqe_hdr(wqe, header);
  2581. i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "ALLOC_STAG WQE",
  2582. wqe, I40IW_CQP_WQE_SIZE * 8);
  2583. if (post_sq)
  2584. i40iw_sc_cqp_post_sq(cqp);
  2585. return 0;
  2586. }
  2587. /**
  2588. * i40iw_sc_mr_reg_non_shared - non-shared mr registration
  2589. * @dev: sc device struct
  2590. * @info: mr info
  2591. * @scratch: u64 saved to be used during cqp completion
  2592. * @post_sq: flag for cqp db to ring
  2593. */
  2594. static enum i40iw_status_code i40iw_sc_mr_reg_non_shared(
  2595. struct i40iw_sc_dev *dev,
  2596. struct i40iw_reg_ns_stag_info *info,
  2597. u64 scratch,
  2598. bool post_sq)
  2599. {
  2600. u64 *wqe;
  2601. u64 temp;
  2602. struct i40iw_sc_cqp *cqp;
  2603. u64 header;
  2604. u32 pble_obj_cnt;
  2605. bool remote_access;
  2606. u8 addr_type;
  2607. enum i40iw_page_size page_size;
  2608. page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
  2609. if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
  2610. I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
  2611. remote_access = true;
  2612. else
  2613. remote_access = false;
  2614. pble_obj_cnt = dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt;
  2615. if (info->chunk_size && (info->first_pm_pbl_index >= pble_obj_cnt))
  2616. return I40IW_ERR_INVALID_PBLE_INDEX;
  2617. cqp = dev->cqp;
  2618. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  2619. if (!wqe)
  2620. return I40IW_ERR_RING_FULL;
  2621. temp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo;
  2622. set_64bit_val(wqe, 0, temp);
  2623. set_64bit_val(wqe,
  2624. 8,
  2625. LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN) |
  2626. LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
  2627. set_64bit_val(wqe,
  2628. 16,
  2629. LS_64(info->stag_key, I40IW_CQPSQ_STAG_KEY) |
  2630. LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
  2631. if (!info->chunk_size) {
  2632. set_64bit_val(wqe, 32, info->reg_addr_pa);
  2633. set_64bit_val(wqe, 48, 0);
  2634. } else {
  2635. set_64bit_val(wqe, 32, 0);
  2636. set_64bit_val(wqe, 48, info->first_pm_pbl_index);
  2637. }
  2638. set_64bit_val(wqe, 40, info->hmc_fcn_index);
  2639. set_64bit_val(wqe, 56, 0);
  2640. addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0;
  2641. header = LS_64(I40IW_CQP_OP_REG_MR, I40IW_CQPSQ_OPCODE) |
  2642. LS_64(1, I40IW_CQPSQ_STAG_MR) |
  2643. LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) |
  2644. LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) |
  2645. LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
  2646. LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
  2647. LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
  2648. LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) |
  2649. LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) |
  2650. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  2651. i40iw_insert_wqe_hdr(wqe, header);
  2652. i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_NS WQE",
  2653. wqe, I40IW_CQP_WQE_SIZE * 8);
  2654. if (post_sq)
  2655. i40iw_sc_cqp_post_sq(cqp);
  2656. return 0;
  2657. }
  2658. /**
  2659. * i40iw_sc_mr_reg_shared - registered shared memory region
  2660. * @dev: sc device struct
  2661. * @info: info for shared memory registeration
  2662. * @scratch: u64 saved to be used during cqp completion
  2663. * @post_sq: flag for cqp db to ring
  2664. */
  2665. static enum i40iw_status_code i40iw_sc_mr_reg_shared(
  2666. struct i40iw_sc_dev *dev,
  2667. struct i40iw_register_shared_stag *info,
  2668. u64 scratch,
  2669. bool post_sq)
  2670. {
  2671. u64 *wqe;
  2672. struct i40iw_sc_cqp *cqp;
  2673. u64 temp, va64, fbo, header;
  2674. u32 va32;
  2675. bool remote_access;
  2676. u8 addr_type;
  2677. if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY |
  2678. I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY))
  2679. remote_access = true;
  2680. else
  2681. remote_access = false;
  2682. cqp = dev->cqp;
  2683. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  2684. if (!wqe)
  2685. return I40IW_ERR_RING_FULL;
  2686. va64 = (uintptr_t)(info->va);
  2687. va32 = (u32)(va64 & 0x00000000FFFFFFFF);
  2688. fbo = (u64)(va32 & (4096 - 1));
  2689. set_64bit_val(wqe,
  2690. 0,
  2691. (info->addr_type == I40IW_ADDR_TYPE_VA_BASED ? (uintptr_t)info->va : fbo));
  2692. set_64bit_val(wqe,
  2693. 8,
  2694. LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
  2695. temp = LS_64(info->new_stag_key, I40IW_CQPSQ_STAG_KEY) |
  2696. LS_64(info->new_stag_idx, I40IW_CQPSQ_STAG_IDX) |
  2697. LS_64(info->parent_stag_idx, I40IW_CQPSQ_STAG_PARENTSTAGIDX);
  2698. set_64bit_val(wqe, 16, temp);
  2699. addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0;
  2700. header = LS_64(I40IW_CQP_OP_REG_SMR, I40IW_CQPSQ_OPCODE) |
  2701. LS_64(1, I40IW_CQPSQ_STAG_MR) |
  2702. LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) |
  2703. LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) |
  2704. LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) |
  2705. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  2706. i40iw_insert_wqe_hdr(wqe, header);
  2707. i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_SHARED WQE",
  2708. wqe, I40IW_CQP_WQE_SIZE * 8);
  2709. if (post_sq)
  2710. i40iw_sc_cqp_post_sq(cqp);
  2711. return 0;
  2712. }
  2713. /**
  2714. * i40iw_sc_dealloc_stag - deallocate stag
  2715. * @dev: sc device struct
  2716. * @info: dealloc stag info
  2717. * @scratch: u64 saved to be used during cqp completion
  2718. * @post_sq: flag for cqp db to ring
  2719. */
  2720. static enum i40iw_status_code i40iw_sc_dealloc_stag(
  2721. struct i40iw_sc_dev *dev,
  2722. struct i40iw_dealloc_stag_info *info,
  2723. u64 scratch,
  2724. bool post_sq)
  2725. {
  2726. u64 header;
  2727. u64 *wqe;
  2728. struct i40iw_sc_cqp *cqp;
  2729. cqp = dev->cqp;
  2730. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  2731. if (!wqe)
  2732. return I40IW_ERR_RING_FULL;
  2733. set_64bit_val(wqe,
  2734. 8,
  2735. LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID));
  2736. set_64bit_val(wqe,
  2737. 16,
  2738. LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX));
  2739. header = LS_64(I40IW_CQP_OP_DEALLOC_STAG, I40IW_CQPSQ_OPCODE) |
  2740. LS_64(info->mr, I40IW_CQPSQ_STAG_MR) |
  2741. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  2742. i40iw_insert_wqe_hdr(wqe, header);
  2743. i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "DEALLOC_STAG WQE",
  2744. wqe, I40IW_CQP_WQE_SIZE * 8);
  2745. if (post_sq)
  2746. i40iw_sc_cqp_post_sq(cqp);
  2747. return 0;
  2748. }
  2749. /**
  2750. * i40iw_sc_query_stag - query hardware for stag
  2751. * @dev: sc device struct
  2752. * @scratch: u64 saved to be used during cqp completion
  2753. * @stag_index: stag index for query
  2754. * @post_sq: flag for cqp db to ring
  2755. */
  2756. static enum i40iw_status_code i40iw_sc_query_stag(struct i40iw_sc_dev *dev,
  2757. u64 scratch,
  2758. u32 stag_index,
  2759. bool post_sq)
  2760. {
  2761. u64 header;
  2762. u64 *wqe;
  2763. struct i40iw_sc_cqp *cqp;
  2764. cqp = dev->cqp;
  2765. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  2766. if (!wqe)
  2767. return I40IW_ERR_RING_FULL;
  2768. set_64bit_val(wqe,
  2769. 16,
  2770. LS_64(stag_index, I40IW_CQPSQ_QUERYSTAG_IDX));
  2771. header = LS_64(I40IW_CQP_OP_QUERY_STAG, I40IW_CQPSQ_OPCODE) |
  2772. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  2773. i40iw_insert_wqe_hdr(wqe, header);
  2774. i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QUERY_STAG WQE",
  2775. wqe, I40IW_CQP_WQE_SIZE * 8);
  2776. if (post_sq)
  2777. i40iw_sc_cqp_post_sq(cqp);
  2778. return 0;
  2779. }
  2780. /**
  2781. * i40iw_sc_mw_alloc - mw allocate
  2782. * @dev: sc device struct
  2783. * @scratch: u64 saved to be used during cqp completion
  2784. * @mw_stag_index:stag index
  2785. * @pd_id: pd is for this mw
  2786. * @post_sq: flag for cqp db to ring
  2787. */
  2788. static enum i40iw_status_code i40iw_sc_mw_alloc(
  2789. struct i40iw_sc_dev *dev,
  2790. u64 scratch,
  2791. u32 mw_stag_index,
  2792. u16 pd_id,
  2793. bool post_sq)
  2794. {
  2795. u64 header;
  2796. struct i40iw_sc_cqp *cqp;
  2797. u64 *wqe;
  2798. cqp = dev->cqp;
  2799. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  2800. if (!wqe)
  2801. return I40IW_ERR_RING_FULL;
  2802. set_64bit_val(wqe, 8, LS_64(pd_id, I40IW_CQPSQ_STAG_PDID));
  2803. set_64bit_val(wqe,
  2804. 16,
  2805. LS_64(mw_stag_index, I40IW_CQPSQ_STAG_IDX));
  2806. header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) |
  2807. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  2808. i40iw_insert_wqe_hdr(wqe, header);
  2809. i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MW_ALLOC WQE",
  2810. wqe, I40IW_CQP_WQE_SIZE * 8);
  2811. if (post_sq)
  2812. i40iw_sc_cqp_post_sq(cqp);
  2813. return 0;
  2814. }
  2815. /**
  2816. * i40iw_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp
  2817. * @qp: sc qp struct
  2818. * @info: fast mr info
  2819. * @post_sq: flag for cqp db to ring
  2820. */
  2821. enum i40iw_status_code i40iw_sc_mr_fast_register(
  2822. struct i40iw_sc_qp *qp,
  2823. struct i40iw_fast_reg_stag_info *info,
  2824. bool post_sq)
  2825. {
  2826. u64 temp, header;
  2827. u64 *wqe;
  2828. u32 wqe_idx;
  2829. enum i40iw_page_size page_size;
  2830. page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K;
  2831. wqe = i40iw_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, I40IW_QP_WQE_MIN_SIZE,
  2832. 0, info->wr_id);
  2833. if (!wqe)
  2834. return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
  2835. i40iw_debug(qp->dev, I40IW_DEBUG_MR, "%s: wr_id[%llxh] wqe_idx[%04d] location[%p]\n",
  2836. __func__, info->wr_id, wqe_idx,
  2837. &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid);
  2838. temp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo;
  2839. set_64bit_val(wqe, 0, temp);
  2840. temp = RS_64(info->first_pm_pbl_index >> 16, I40IWQPSQ_FIRSTPMPBLIDXHI);
  2841. set_64bit_val(wqe,
  2842. 8,
  2843. LS_64(temp, I40IWQPSQ_FIRSTPMPBLIDXHI) |
  2844. LS_64(info->reg_addr_pa >> I40IWQPSQ_PBLADDR_SHIFT, I40IWQPSQ_PBLADDR));
  2845. set_64bit_val(wqe,
  2846. 16,
  2847. info->total_len |
  2848. LS_64(info->first_pm_pbl_index, I40IWQPSQ_FIRSTPMPBLIDXLO));
  2849. header = LS_64(info->stag_key, I40IWQPSQ_STAGKEY) |
  2850. LS_64(info->stag_idx, I40IWQPSQ_STAGINDEX) |
  2851. LS_64(I40IWQP_OP_FAST_REGISTER, I40IWQPSQ_OPCODE) |
  2852. LS_64(info->chunk_size, I40IWQPSQ_LPBLSIZE) |
  2853. LS_64(page_size, I40IWQPSQ_HPAGESIZE) |
  2854. LS_64(info->access_rights, I40IWQPSQ_STAGRIGHTS) |
  2855. LS_64(info->addr_type, I40IWQPSQ_VABASEDTO) |
  2856. LS_64(info->read_fence, I40IWQPSQ_READFENCE) |
  2857. LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) |
  2858. LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) |
  2859. LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
  2860. i40iw_insert_wqe_hdr(wqe, header);
  2861. i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "FAST_REG WQE",
  2862. wqe, I40IW_QP_WQE_MIN_SIZE);
  2863. if (post_sq)
  2864. i40iw_qp_post_wr(&qp->qp_uk);
  2865. return 0;
  2866. }
  2867. /**
  2868. * i40iw_sc_send_lsmm - send last streaming mode message
  2869. * @qp: sc qp struct
  2870. * @lsmm_buf: buffer with lsmm message
  2871. * @size: size of lsmm buffer
  2872. * @stag: stag of lsmm buffer
  2873. */
  2874. static void i40iw_sc_send_lsmm(struct i40iw_sc_qp *qp,
  2875. void *lsmm_buf,
  2876. u32 size,
  2877. i40iw_stag stag)
  2878. {
  2879. u64 *wqe;
  2880. u64 header;
  2881. struct i40iw_qp_uk *qp_uk;
  2882. qp_uk = &qp->qp_uk;
  2883. wqe = qp_uk->sq_base->elem;
  2884. set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
  2885. set_64bit_val(wqe, 8, (size | LS_64(stag, I40IWQPSQ_FRAG_STAG)));
  2886. set_64bit_val(wqe, 16, 0);
  2887. header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
  2888. LS_64(1, I40IWQPSQ_STREAMMODE) |
  2889. LS_64(1, I40IWQPSQ_WAITFORRCVPDU) |
  2890. LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
  2891. i40iw_insert_wqe_hdr(wqe, header);
  2892. i40iw_debug_buf(qp->dev, I40IW_DEBUG_QP, "SEND_LSMM WQE",
  2893. wqe, I40IW_QP_WQE_MIN_SIZE);
  2894. }
  2895. /**
  2896. * i40iw_sc_send_lsmm_nostag - for privilege qp
  2897. * @qp: sc qp struct
  2898. * @lsmm_buf: buffer with lsmm message
  2899. * @size: size of lsmm buffer
  2900. */
  2901. static void i40iw_sc_send_lsmm_nostag(struct i40iw_sc_qp *qp,
  2902. void *lsmm_buf,
  2903. u32 size)
  2904. {
  2905. u64 *wqe;
  2906. u64 header;
  2907. struct i40iw_qp_uk *qp_uk;
  2908. qp_uk = &qp->qp_uk;
  2909. wqe = qp_uk->sq_base->elem;
  2910. set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
  2911. set_64bit_val(wqe, 8, size);
  2912. set_64bit_val(wqe, 16, 0);
  2913. header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
  2914. LS_64(1, I40IWQPSQ_STREAMMODE) |
  2915. LS_64(1, I40IWQPSQ_WAITFORRCVPDU) |
  2916. LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
  2917. i40iw_insert_wqe_hdr(wqe, header);
  2918. i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "SEND_LSMM_NOSTAG WQE",
  2919. wqe, I40IW_QP_WQE_MIN_SIZE);
  2920. }
  2921. /**
  2922. * i40iw_sc_send_rtt - send last read0 or write0
  2923. * @qp: sc qp struct
  2924. * @read: Do read0 or write0
  2925. */
  2926. static void i40iw_sc_send_rtt(struct i40iw_sc_qp *qp, bool read)
  2927. {
  2928. u64 *wqe;
  2929. u64 header;
  2930. struct i40iw_qp_uk *qp_uk;
  2931. qp_uk = &qp->qp_uk;
  2932. wqe = qp_uk->sq_base->elem;
  2933. set_64bit_val(wqe, 0, 0);
  2934. set_64bit_val(wqe, 8, 0);
  2935. set_64bit_val(wqe, 16, 0);
  2936. if (read) {
  2937. header = LS_64(0x1234, I40IWQPSQ_REMSTAG) |
  2938. LS_64(I40IWQP_OP_RDMA_READ, I40IWQPSQ_OPCODE) |
  2939. LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
  2940. set_64bit_val(wqe, 8, ((u64)0xabcd << 32));
  2941. } else {
  2942. header = LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) |
  2943. LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
  2944. }
  2945. i40iw_insert_wqe_hdr(wqe, header);
  2946. i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "RTR WQE",
  2947. wqe, I40IW_QP_WQE_MIN_SIZE);
  2948. }
  2949. /**
  2950. * i40iw_sc_post_wqe0 - send wqe with opcode
  2951. * @qp: sc qp struct
  2952. * @opcode: opcode to use for wqe0
  2953. */
  2954. static enum i40iw_status_code i40iw_sc_post_wqe0(struct i40iw_sc_qp *qp, u8 opcode)
  2955. {
  2956. u64 *wqe;
  2957. u64 header;
  2958. struct i40iw_qp_uk *qp_uk;
  2959. qp_uk = &qp->qp_uk;
  2960. wqe = qp_uk->sq_base->elem;
  2961. if (!wqe)
  2962. return I40IW_ERR_QP_TOOMANY_WRS_POSTED;
  2963. switch (opcode) {
  2964. case I40IWQP_OP_NOP:
  2965. set_64bit_val(wqe, 0, 0);
  2966. set_64bit_val(wqe, 8, 0);
  2967. set_64bit_val(wqe, 16, 0);
  2968. header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) |
  2969. LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID);
  2970. i40iw_insert_wqe_hdr(wqe, header);
  2971. break;
  2972. case I40IWQP_OP_RDMA_SEND:
  2973. set_64bit_val(wqe, 0, 0);
  2974. set_64bit_val(wqe, 8, 0);
  2975. set_64bit_val(wqe, 16, 0);
  2976. header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) |
  2977. LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID) |
  2978. LS_64(1, I40IWQPSQ_STREAMMODE) |
  2979. LS_64(1, I40IWQPSQ_WAITFORRCVPDU);
  2980. i40iw_insert_wqe_hdr(wqe, header);
  2981. break;
  2982. default:
  2983. i40iw_debug(qp->dev, I40IW_DEBUG_QP, "%s: Invalid WQE zero opcode\n",
  2984. __func__);
  2985. break;
  2986. }
  2987. return 0;
  2988. }
  2989. /**
  2990. * i40iw_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info
  2991. * @dev : ptr to i40iw_dev struct
  2992. * @hmc_fn_id: hmc function id
  2993. */
  2994. enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_id)
  2995. {
  2996. struct i40iw_hmc_info *hmc_info;
  2997. struct i40iw_dma_mem query_fpm_mem;
  2998. struct i40iw_virt_mem virt_mem;
  2999. struct i40iw_vfdev *vf_dev = NULL;
  3000. u32 mem_size;
  3001. enum i40iw_status_code ret_code = 0;
  3002. bool poll_registers = true;
  3003. u16 iw_vf_idx;
  3004. u8 wait_type;
  3005. if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID ||
  3006. (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID))
  3007. return I40IW_ERR_INVALID_HMCFN_ID;
  3008. i40iw_debug(dev, I40IW_DEBUG_HMC, "hmc_fn_id %u, dev->hmc_fn_id %u\n", hmc_fn_id,
  3009. dev->hmc_fn_id);
  3010. if (hmc_fn_id == dev->hmc_fn_id) {
  3011. hmc_info = dev->hmc_info;
  3012. query_fpm_mem.pa = dev->fpm_query_buf_pa;
  3013. query_fpm_mem.va = dev->fpm_query_buf;
  3014. } else {
  3015. vf_dev = i40iw_vfdev_from_fpm(dev, hmc_fn_id);
  3016. if (!vf_dev)
  3017. return I40IW_ERR_INVALID_VF_ID;
  3018. hmc_info = &vf_dev->hmc_info;
  3019. iw_vf_idx = vf_dev->iw_vf_idx;
  3020. i40iw_debug(dev, I40IW_DEBUG_HMC, "vf_dev %p, hmc_info %p, hmc_obj %p\n", vf_dev,
  3021. hmc_info, hmc_info->hmc_obj);
  3022. if (!vf_dev->fpm_query_buf) {
  3023. if (!dev->vf_fpm_query_buf[iw_vf_idx].va) {
  3024. ret_code = i40iw_alloc_query_fpm_buf(dev,
  3025. &dev->vf_fpm_query_buf[iw_vf_idx]);
  3026. if (ret_code)
  3027. return ret_code;
  3028. }
  3029. vf_dev->fpm_query_buf = dev->vf_fpm_query_buf[iw_vf_idx].va;
  3030. vf_dev->fpm_query_buf_pa = dev->vf_fpm_query_buf[iw_vf_idx].pa;
  3031. }
  3032. query_fpm_mem.pa = vf_dev->fpm_query_buf_pa;
  3033. query_fpm_mem.va = vf_dev->fpm_query_buf;
  3034. /**
  3035. * It is HARDWARE specific:
  3036. * this call is done by PF for VF and
  3037. * i40iw_sc_query_fpm_values needs ccq poll
  3038. * because PF ccq is already created.
  3039. */
  3040. poll_registers = false;
  3041. }
  3042. hmc_info->hmc_fn_id = hmc_fn_id;
  3043. if (hmc_fn_id != dev->hmc_fn_id) {
  3044. ret_code =
  3045. i40iw_cqp_query_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);
  3046. } else {
  3047. wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS :
  3048. (u8)I40IW_CQP_WAIT_POLL_CQ;
  3049. ret_code = i40iw_sc_query_fpm_values(
  3050. dev->cqp,
  3051. 0,
  3052. hmc_info->hmc_fn_id,
  3053. &query_fpm_mem,
  3054. true,
  3055. wait_type);
  3056. }
  3057. if (ret_code)
  3058. return ret_code;
  3059. /* parse the fpm_query_buf and fill hmc obj info */
  3060. ret_code =
  3061. i40iw_sc_parse_fpm_query_buf((u64 *)query_fpm_mem.va,
  3062. hmc_info,
  3063. &dev->hmc_fpm_misc);
  3064. if (ret_code)
  3065. return ret_code;
  3066. i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "QUERY FPM BUFFER",
  3067. query_fpm_mem.va, I40IW_QUERY_FPM_BUF_SIZE);
  3068. if (hmc_fn_id != dev->hmc_fn_id) {
  3069. i40iw_cqp_commit_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id);
  3070. /* parse the fpm_commit_buf and fill hmc obj info */
  3071. i40iw_sc_parse_fpm_commit_buf((u64 *)query_fpm_mem.va, hmc_info->hmc_obj, &hmc_info->sd_table.sd_cnt);
  3072. mem_size = sizeof(struct i40iw_hmc_sd_entry) *
  3073. (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index);
  3074. ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);
  3075. if (ret_code)
  3076. return ret_code;
  3077. hmc_info->sd_table.sd_entry = virt_mem.va;
  3078. }
  3079. return ret_code;
  3080. }
  3081. /**
  3082. * i40iw_sc_configure_iw_fpm() - commits hmc obj cnt values using cqp command and
  3083. * populates fpm base address in hmc_info
  3084. * @dev : ptr to i40iw_dev struct
  3085. * @hmc_fn_id: hmc function id
  3086. */
  3087. static enum i40iw_status_code i40iw_sc_configure_iw_fpm(struct i40iw_sc_dev *dev,
  3088. u8 hmc_fn_id)
  3089. {
  3090. struct i40iw_hmc_info *hmc_info;
  3091. struct i40iw_hmc_obj_info *obj_info;
  3092. u64 *buf;
  3093. struct i40iw_dma_mem commit_fpm_mem;
  3094. u32 i, j;
  3095. enum i40iw_status_code ret_code = 0;
  3096. bool poll_registers = true;
  3097. u8 wait_type;
  3098. if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID ||
  3099. (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID))
  3100. return I40IW_ERR_INVALID_HMCFN_ID;
  3101. if (hmc_fn_id == dev->hmc_fn_id) {
  3102. hmc_info = dev->hmc_info;
  3103. } else {
  3104. hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, hmc_fn_id);
  3105. poll_registers = false;
  3106. }
  3107. if (!hmc_info)
  3108. return I40IW_ERR_BAD_PTR;
  3109. obj_info = hmc_info->hmc_obj;
  3110. buf = dev->fpm_commit_buf;
  3111. /* copy cnt values in commit buf */
  3112. for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE;
  3113. i++, j += 8)
  3114. set_64bit_val(buf, j, (u64)obj_info[i].cnt);
  3115. set_64bit_val(buf, 40, 0); /* APBVT rsvd */
  3116. commit_fpm_mem.pa = dev->fpm_commit_buf_pa;
  3117. commit_fpm_mem.va = dev->fpm_commit_buf;
  3118. wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS :
  3119. (u8)I40IW_CQP_WAIT_POLL_CQ;
  3120. ret_code = i40iw_sc_commit_fpm_values(
  3121. dev->cqp,
  3122. 0,
  3123. hmc_info->hmc_fn_id,
  3124. &commit_fpm_mem,
  3125. true,
  3126. wait_type);
  3127. /* parse the fpm_commit_buf and fill hmc obj info */
  3128. if (!ret_code)
  3129. ret_code = i40iw_sc_parse_fpm_commit_buf(dev->fpm_commit_buf,
  3130. hmc_info->hmc_obj,
  3131. &hmc_info->sd_table.sd_cnt);
  3132. i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "COMMIT FPM BUFFER",
  3133. commit_fpm_mem.va, I40IW_COMMIT_FPM_BUF_SIZE);
  3134. return ret_code;
  3135. }
  3136. /**
  3137. * cqp_sds_wqe_fill - fill cqp wqe doe sd
  3138. * @cqp: struct for cqp hw
  3139. * @info; sd info for wqe
  3140. * @scratch: u64 saved to be used during cqp completion
  3141. */
  3142. static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp,
  3143. struct i40iw_update_sds_info *info,
  3144. u64 scratch)
  3145. {
  3146. u64 data;
  3147. u64 header;
  3148. u64 *wqe;
  3149. int mem_entries, wqe_entries;
  3150. struct i40iw_dma_mem *sdbuf = &cqp->sdbuf;
  3151. u64 offset;
  3152. u32 wqe_idx;
  3153. wqe = i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
  3154. if (!wqe)
  3155. return I40IW_ERR_RING_FULL;
  3156. I40IW_CQP_INIT_WQE(wqe);
  3157. wqe_entries = (info->cnt > 3) ? 3 : info->cnt;
  3158. mem_entries = info->cnt - wqe_entries;
  3159. header = LS_64(I40IW_CQP_OP_UPDATE_PE_SDS, I40IW_CQPSQ_OPCODE) |
  3160. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) |
  3161. LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT);
  3162. if (mem_entries) {
  3163. offset = wqe_idx * I40IW_UPDATE_SD_BUF_SIZE;
  3164. memcpy((char *)sdbuf->va + offset, &info->entry[3],
  3165. mem_entries << 4);
  3166. data = (u64)sdbuf->pa + offset;
  3167. } else {
  3168. data = 0;
  3169. }
  3170. data |= LS_64(info->hmc_fn_id, I40IW_CQPSQ_UPESD_HMCFNID);
  3171. set_64bit_val(wqe, 16, data);
  3172. switch (wqe_entries) {
  3173. case 3:
  3174. set_64bit_val(wqe, 48,
  3175. (LS_64(info->entry[2].cmd, I40IW_CQPSQ_UPESD_SDCMD) |
  3176. LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
  3177. set_64bit_val(wqe, 56, info->entry[2].data);
  3178. /* fallthrough */
  3179. case 2:
  3180. set_64bit_val(wqe, 32,
  3181. (LS_64(info->entry[1].cmd, I40IW_CQPSQ_UPESD_SDCMD) |
  3182. LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID)));
  3183. set_64bit_val(wqe, 40, info->entry[1].data);
  3184. /* fallthrough */
  3185. case 1:
  3186. set_64bit_val(wqe, 0,
  3187. LS_64(info->entry[0].cmd, I40IW_CQPSQ_UPESD_SDCMD));
  3188. set_64bit_val(wqe, 8, info->entry[0].data);
  3189. break;
  3190. default:
  3191. break;
  3192. }
  3193. i40iw_insert_wqe_hdr(wqe, header);
  3194. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "UPDATE_PE_SDS WQE",
  3195. wqe, I40IW_CQP_WQE_SIZE * 8);
  3196. return 0;
  3197. }
  3198. /**
  3199. * i40iw_update_pe_sds - cqp wqe for sd
  3200. * @dev: ptr to i40iw_dev struct
  3201. * @info: sd info for sd's
  3202. * @scratch: u64 saved to be used during cqp completion
  3203. */
  3204. static enum i40iw_status_code i40iw_update_pe_sds(struct i40iw_sc_dev *dev,
  3205. struct i40iw_update_sds_info *info,
  3206. u64 scratch)
  3207. {
  3208. struct i40iw_sc_cqp *cqp = dev->cqp;
  3209. enum i40iw_status_code ret_code;
  3210. ret_code = cqp_sds_wqe_fill(cqp, info, scratch);
  3211. if (!ret_code)
  3212. i40iw_sc_cqp_post_sq(cqp);
  3213. return ret_code;
  3214. }
  3215. /**
  3216. * i40iw_update_sds_noccq - update sd before ccq created
  3217. * @dev: sc device struct
  3218. * @info: sd info for sd's
  3219. */
  3220. enum i40iw_status_code i40iw_update_sds_noccq(struct i40iw_sc_dev *dev,
  3221. struct i40iw_update_sds_info *info)
  3222. {
  3223. u32 error, val, tail;
  3224. struct i40iw_sc_cqp *cqp = dev->cqp;
  3225. enum i40iw_status_code ret_code;
  3226. ret_code = cqp_sds_wqe_fill(cqp, info, 0);
  3227. if (ret_code)
  3228. return ret_code;
  3229. i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
  3230. if (error)
  3231. return I40IW_ERR_CQP_COMPL_ERROR;
  3232. i40iw_sc_cqp_post_sq(cqp);
  3233. ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT);
  3234. return ret_code;
  3235. }
  3236. /**
  3237. * i40iw_sc_suspend_qp - suspend qp for param change
  3238. * @cqp: struct for cqp hw
  3239. * @qp: sc qp struct
  3240. * @scratch: u64 saved to be used during cqp completion
  3241. */
  3242. enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp,
  3243. struct i40iw_sc_qp *qp,
  3244. u64 scratch)
  3245. {
  3246. u64 header;
  3247. u64 *wqe;
  3248. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  3249. if (!wqe)
  3250. return I40IW_ERR_RING_FULL;
  3251. header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_SUSPENDQP_QPID) |
  3252. LS_64(I40IW_CQP_OP_SUSPEND_QP, I40IW_CQPSQ_OPCODE) |
  3253. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  3254. i40iw_insert_wqe_hdr(wqe, header);
  3255. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SUSPEND_QP WQE",
  3256. wqe, I40IW_CQP_WQE_SIZE * 8);
  3257. i40iw_sc_cqp_post_sq(cqp);
  3258. return 0;
  3259. }
  3260. /**
  3261. * i40iw_sc_resume_qp - resume qp after suspend
  3262. * @cqp: struct for cqp hw
  3263. * @qp: sc qp struct
  3264. * @scratch: u64 saved to be used during cqp completion
  3265. */
  3266. enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp,
  3267. struct i40iw_sc_qp *qp,
  3268. u64 scratch)
  3269. {
  3270. u64 header;
  3271. u64 *wqe;
  3272. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  3273. if (!wqe)
  3274. return I40IW_ERR_RING_FULL;
  3275. set_64bit_val(wqe,
  3276. 16,
  3277. LS_64(qp->qs_handle, I40IW_CQPSQ_RESUMEQP_QSHANDLE));
  3278. header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_RESUMEQP_QPID) |
  3279. LS_64(I40IW_CQP_OP_RESUME_QP, I40IW_CQPSQ_OPCODE) |
  3280. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  3281. i40iw_insert_wqe_hdr(wqe, header);
  3282. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "RESUME_QP WQE",
  3283. wqe, I40IW_CQP_WQE_SIZE * 8);
  3284. i40iw_sc_cqp_post_sq(cqp);
  3285. return 0;
  3286. }
  3287. /**
  3288. * i40iw_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages
  3289. * @cqp: struct for cqp hw
  3290. * @scratch: u64 saved to be used during cqp completion
  3291. * @hmc_fn_id: hmc function id
  3292. * @post_sq: flag for cqp db to ring
  3293. * @poll_registers: flag to poll register for cqp completion
  3294. */
  3295. enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated(
  3296. struct i40iw_sc_cqp *cqp,
  3297. u64 scratch,
  3298. u8 hmc_fn_id,
  3299. bool post_sq,
  3300. bool poll_registers)
  3301. {
  3302. u64 header;
  3303. u64 *wqe;
  3304. u32 tail, val, error;
  3305. enum i40iw_status_code ret_code = 0;
  3306. wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch);
  3307. if (!wqe)
  3308. return I40IW_ERR_RING_FULL;
  3309. set_64bit_val(wqe,
  3310. 16,
  3311. LS_64(hmc_fn_id, I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID));
  3312. header = LS_64(I40IW_CQP_OP_SHMC_PAGES_ALLOCATED, I40IW_CQPSQ_OPCODE) |
  3313. LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
  3314. i40iw_insert_wqe_hdr(wqe, header);
  3315. i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SHMC_PAGES_ALLOCATED WQE",
  3316. wqe, I40IW_CQP_WQE_SIZE * 8);
  3317. i40iw_get_cqp_reg_info(cqp, &val, &tail, &error);
  3318. if (error) {
  3319. ret_code = I40IW_ERR_CQP_COMPL_ERROR;
  3320. return ret_code;
  3321. }
  3322. if (post_sq) {
  3323. i40iw_sc_cqp_post_sq(cqp);
  3324. if (poll_registers)
  3325. /* check for cqp sq tail update */
  3326. ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000);
  3327. else
  3328. ret_code = i40iw_sc_poll_for_cqp_op_done(cqp,
  3329. I40IW_CQP_OP_SHMC_PAGES_ALLOCATED,
  3330. NULL);
  3331. }
  3332. return ret_code;
  3333. }
  3334. /**
  3335. * i40iw_ring_full - check if cqp ring is full
  3336. * @cqp: struct for cqp hw
  3337. */
  3338. static bool i40iw_ring_full(struct i40iw_sc_cqp *cqp)
  3339. {
  3340. return I40IW_RING_FULL_ERR(cqp->sq_ring);
  3341. }
  3342. /**
  3343. * i40iw_est_sd - returns approximate number of SDs for HMC
  3344. * @dev: sc device struct
  3345. * @hmc_info: hmc structure, size and count for HMC objects
  3346. */
  3347. static u64 i40iw_est_sd(struct i40iw_sc_dev *dev, struct i40iw_hmc_info *hmc_info)
  3348. {
  3349. int i;
  3350. u64 size = 0;
  3351. u64 sd;
  3352. for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_PBLE; i++)
  3353. size += hmc_info->hmc_obj[i].cnt * hmc_info->hmc_obj[i].size;
  3354. if (dev->is_pf)
  3355. size += hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt * hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size;
  3356. if (size & 0x1FFFFF)
  3357. sd = (size >> 21) + 1; /* add 1 for remainder */
  3358. else
  3359. sd = size >> 21;
  3360. if (!dev->is_pf) {
  3361. /* 2MB alignment for VF PBLE HMC */
  3362. size = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt * hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size;
  3363. if (size & 0x1FFFFF)
  3364. sd += (size >> 21) + 1; /* add 1 for remainder */
  3365. else
  3366. sd += size >> 21;
  3367. }
  3368. return sd;
  3369. }
  3370. /**
  3371. * i40iw_config_fpm_values - configure HMC objects
  3372. * @dev: sc device struct
  3373. * @qp_count: desired qp count
  3374. */
  3375. enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count)
  3376. {
  3377. struct i40iw_virt_mem virt_mem;
  3378. u32 i, mem_size;
  3379. u32 qpwantedoriginal, qpwanted, mrwanted, pblewanted;
  3380. u64 sd_needed;
  3381. u32 loop_count = 0;
  3382. struct i40iw_hmc_info *hmc_info;
  3383. struct i40iw_hmc_fpm_misc *hmc_fpm_misc;
  3384. enum i40iw_status_code ret_code = 0;
  3385. hmc_info = dev->hmc_info;
  3386. hmc_fpm_misc = &dev->hmc_fpm_misc;
  3387. ret_code = i40iw_sc_init_iw_hmc(dev, dev->hmc_fn_id);
  3388. if (ret_code) {
  3389. i40iw_debug(dev, I40IW_DEBUG_HMC,
  3390. "i40iw_sc_init_iw_hmc returned error_code = %d\n",
  3391. ret_code);
  3392. return ret_code;
  3393. }
  3394. for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)
  3395. hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
  3396. sd_needed = i40iw_est_sd(dev, hmc_info);
  3397. i40iw_debug(dev, I40IW_DEBUG_HMC,
  3398. "%s: FW initial max sd_count[%08lld] first_sd_index[%04d]\n",
  3399. __func__, sd_needed, hmc_info->first_sd_index);
  3400. i40iw_debug(dev, I40IW_DEBUG_HMC,
  3401. "%s: sd count %d where max sd is %d\n",
  3402. __func__, hmc_info->sd_table.sd_cnt,
  3403. hmc_fpm_misc->max_sds);
  3404. qpwanted = min(qp_count, hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt);
  3405. qpwantedoriginal = qpwanted;
  3406. mrwanted = hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt;
  3407. pblewanted = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt;
  3408. i40iw_debug(dev, I40IW_DEBUG_HMC,
  3409. "req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d\n",
  3410. qp_count, hmc_fpm_misc->max_sds,
  3411. hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt,
  3412. hmc_info->hmc_obj[I40IW_HMC_IW_CQ].max_cnt,
  3413. hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt,
  3414. hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt);
  3415. do {
  3416. ++loop_count;
  3417. hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt = qpwanted;
  3418. hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt =
  3419. min(2 * qpwanted, hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt);
  3420. hmc_info->hmc_obj[I40IW_HMC_IW_SRQ].cnt = 0x00; /* Reserved */
  3421. hmc_info->hmc_obj[I40IW_HMC_IW_HTE].cnt =
  3422. qpwanted * hmc_fpm_misc->ht_multiplier;
  3423. hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt =
  3424. hmc_info->hmc_obj[I40IW_HMC_IW_ARP].max_cnt;
  3425. hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1;
  3426. hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted;
  3427. hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt =
  3428. roundup_pow_of_two(I40IW_MAX_WQ_ENTRIES * qpwanted);
  3429. hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt =
  3430. roundup_pow_of_two(2 * I40IW_MAX_IRD_SIZE * qpwanted);
  3431. hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt =
  3432. hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size;
  3433. hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt =
  3434. hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size;
  3435. hmc_info->hmc_obj[I40IW_HMC_IW_TIMER].cnt =
  3436. ((qpwanted) / 512 + 1) * hmc_fpm_misc->timer_bucket;
  3437. hmc_info->hmc_obj[I40IW_HMC_IW_FSIMC].cnt = 0x00;
  3438. hmc_info->hmc_obj[I40IW_HMC_IW_FSIAV].cnt = 0x00;
  3439. hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt = pblewanted;
  3440. /* How much memory is needed for all the objects. */
  3441. sd_needed = i40iw_est_sd(dev, hmc_info);
  3442. if ((loop_count > 1000) ||
  3443. ((!(loop_count % 10)) &&
  3444. (qpwanted > qpwantedoriginal * 2 / 3))) {
  3445. if (qpwanted > FPM_MULTIPLIER)
  3446. qpwanted = roundup_pow_of_two(qpwanted -
  3447. FPM_MULTIPLIER);
  3448. qpwanted >>= 1;
  3449. }
  3450. if (mrwanted > FPM_MULTIPLIER * 10)
  3451. mrwanted -= FPM_MULTIPLIER * 10;
  3452. if (pblewanted > FPM_MULTIPLIER * 1000)
  3453. pblewanted -= FPM_MULTIPLIER * 1000;
  3454. } while (sd_needed > hmc_fpm_misc->max_sds && loop_count < 2000);
  3455. i40iw_debug(dev, I40IW_DEBUG_HMC,
  3456. "loop_cnt=%d, sd_needed=%lld, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d\n",
  3457. loop_count, sd_needed,
  3458. hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt,
  3459. hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt,
  3460. hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt,
  3461. hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt);
  3462. ret_code = i40iw_sc_configure_iw_fpm(dev, dev->hmc_fn_id);
  3463. if (ret_code) {
  3464. i40iw_debug(dev, I40IW_DEBUG_HMC,
  3465. "configure_iw_fpm returned error_code[x%08X]\n",
  3466. i40iw_rd32(dev->hw, dev->is_pf ? I40E_PFPE_CQPERRCODES : I40E_VFPE_CQPERRCODES1));
  3467. return ret_code;
  3468. }
  3469. mem_size = sizeof(struct i40iw_hmc_sd_entry) *
  3470. (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1);
  3471. ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size);
  3472. if (ret_code) {
  3473. i40iw_debug(dev, I40IW_DEBUG_HMC,
  3474. "%s: failed to allocate memory for sd_entry buffer\n",
  3475. __func__);
  3476. return ret_code;
  3477. }
  3478. hmc_info->sd_table.sd_entry = virt_mem.va;
  3479. return ret_code;
  3480. }
  3481. /**
  3482. * i40iw_exec_cqp_cmd - execute cqp cmd when wqe are available
  3483. * @dev: rdma device
  3484. * @pcmdinfo: cqp command info
  3485. */
  3486. static enum i40iw_status_code i40iw_exec_cqp_cmd(struct i40iw_sc_dev *dev,
  3487. struct cqp_commands_info *pcmdinfo)
  3488. {
  3489. enum i40iw_status_code status;
  3490. struct i40iw_dma_mem values_mem;
  3491. dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++;
  3492. switch (pcmdinfo->cqp_cmd) {
  3493. case OP_DELETE_LOCAL_MAC_IPADDR_ENTRY:
  3494. status = i40iw_sc_del_local_mac_ipaddr_entry(
  3495. pcmdinfo->in.u.del_local_mac_ipaddr_entry.cqp,
  3496. pcmdinfo->in.u.del_local_mac_ipaddr_entry.scratch,
  3497. pcmdinfo->in.u.del_local_mac_ipaddr_entry.entry_idx,
  3498. pcmdinfo->in.u.del_local_mac_ipaddr_entry.ignore_ref_count,
  3499. pcmdinfo->post_sq);
  3500. break;
  3501. case OP_CEQ_DESTROY:
  3502. status = i40iw_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq,
  3503. pcmdinfo->in.u.ceq_destroy.scratch,
  3504. pcmdinfo->post_sq);
  3505. break;
  3506. case OP_AEQ_DESTROY:
  3507. status = i40iw_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq,
  3508. pcmdinfo->in.u.aeq_destroy.scratch,
  3509. pcmdinfo->post_sq);
  3510. break;
  3511. case OP_DELETE_ARP_CACHE_ENTRY:
  3512. status = i40iw_sc_del_arp_cache_entry(
  3513. pcmdinfo->in.u.del_arp_cache_entry.cqp,
  3514. pcmdinfo->in.u.del_arp_cache_entry.scratch,
  3515. pcmdinfo->in.u.del_arp_cache_entry.arp_index,
  3516. pcmdinfo->post_sq);
  3517. break;
  3518. case OP_MANAGE_APBVT_ENTRY:
  3519. status = i40iw_sc_manage_apbvt_entry(
  3520. pcmdinfo->in.u.manage_apbvt_entry.cqp,
  3521. &pcmdinfo->in.u.manage_apbvt_entry.info,
  3522. pcmdinfo->in.u.manage_apbvt_entry.scratch,
  3523. pcmdinfo->post_sq);
  3524. break;
  3525. case OP_CEQ_CREATE:
  3526. status = i40iw_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq,
  3527. pcmdinfo->in.u.ceq_create.scratch,
  3528. pcmdinfo->post_sq);
  3529. break;
  3530. case OP_AEQ_CREATE:
  3531. status = i40iw_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq,
  3532. pcmdinfo->in.u.aeq_create.scratch,
  3533. pcmdinfo->post_sq);
  3534. break;
  3535. case OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY:
  3536. status = i40iw_sc_alloc_local_mac_ipaddr_entry(
  3537. pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.cqp,
  3538. pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.scratch,
  3539. pcmdinfo->post_sq);
  3540. break;
  3541. case OP_ADD_LOCAL_MAC_IPADDR_ENTRY:
  3542. status = i40iw_sc_add_local_mac_ipaddr_entry(
  3543. pcmdinfo->in.u.add_local_mac_ipaddr_entry.cqp,
  3544. &pcmdinfo->in.u.add_local_mac_ipaddr_entry.info,
  3545. pcmdinfo->in.u.add_local_mac_ipaddr_entry.scratch,
  3546. pcmdinfo->post_sq);
  3547. break;
  3548. case OP_MANAGE_QHASH_TABLE_ENTRY:
  3549. status = i40iw_sc_manage_qhash_table_entry(
  3550. pcmdinfo->in.u.manage_qhash_table_entry.cqp,
  3551. &pcmdinfo->in.u.manage_qhash_table_entry.info,
  3552. pcmdinfo->in.u.manage_qhash_table_entry.scratch,
  3553. pcmdinfo->post_sq);
  3554. break;
  3555. case OP_QP_MODIFY:
  3556. status = i40iw_sc_qp_modify(
  3557. pcmdinfo->in.u.qp_modify.qp,
  3558. &pcmdinfo->in.u.qp_modify.info,
  3559. pcmdinfo->in.u.qp_modify.scratch,
  3560. pcmdinfo->post_sq);
  3561. break;
  3562. case OP_QP_UPLOAD_CONTEXT:
  3563. status = i40iw_sc_qp_upload_context(
  3564. pcmdinfo->in.u.qp_upload_context.dev,
  3565. &pcmdinfo->in.u.qp_upload_context.info,
  3566. pcmdinfo->in.u.qp_upload_context.scratch,
  3567. pcmdinfo->post_sq);
  3568. break;
  3569. case OP_CQ_CREATE:
  3570. status = i40iw_sc_cq_create(
  3571. pcmdinfo->in.u.cq_create.cq,
  3572. pcmdinfo->in.u.cq_create.scratch,
  3573. pcmdinfo->in.u.cq_create.check_overflow,
  3574. pcmdinfo->post_sq);
  3575. break;
  3576. case OP_CQ_DESTROY:
  3577. status = i40iw_sc_cq_destroy(
  3578. pcmdinfo->in.u.cq_destroy.cq,
  3579. pcmdinfo->in.u.cq_destroy.scratch,
  3580. pcmdinfo->post_sq);
  3581. break;
  3582. case OP_QP_CREATE:
  3583. status = i40iw_sc_qp_create(
  3584. pcmdinfo->in.u.qp_create.qp,
  3585. &pcmdinfo->in.u.qp_create.info,
  3586. pcmdinfo->in.u.qp_create.scratch,
  3587. pcmdinfo->post_sq);
  3588. break;
  3589. case OP_QP_DESTROY:
  3590. status = i40iw_sc_qp_destroy(
  3591. pcmdinfo->in.u.qp_destroy.qp,
  3592. pcmdinfo->in.u.qp_destroy.scratch,
  3593. pcmdinfo->in.u.qp_destroy.remove_hash_idx,
  3594. pcmdinfo->in.u.qp_destroy.
  3595. ignore_mw_bnd,
  3596. pcmdinfo->post_sq);
  3597. break;
  3598. case OP_ALLOC_STAG:
  3599. status = i40iw_sc_alloc_stag(
  3600. pcmdinfo->in.u.alloc_stag.dev,
  3601. &pcmdinfo->in.u.alloc_stag.info,
  3602. pcmdinfo->in.u.alloc_stag.scratch,
  3603. pcmdinfo->post_sq);
  3604. break;
  3605. case OP_MR_REG_NON_SHARED:
  3606. status = i40iw_sc_mr_reg_non_shared(
  3607. pcmdinfo->in.u.mr_reg_non_shared.dev,
  3608. &pcmdinfo->in.u.mr_reg_non_shared.info,
  3609. pcmdinfo->in.u.mr_reg_non_shared.scratch,
  3610. pcmdinfo->post_sq);
  3611. break;
  3612. case OP_DEALLOC_STAG:
  3613. status = i40iw_sc_dealloc_stag(
  3614. pcmdinfo->in.u.dealloc_stag.dev,
  3615. &pcmdinfo->in.u.dealloc_stag.info,
  3616. pcmdinfo->in.u.dealloc_stag.scratch,
  3617. pcmdinfo->post_sq);
  3618. break;
  3619. case OP_MW_ALLOC:
  3620. status = i40iw_sc_mw_alloc(
  3621. pcmdinfo->in.u.mw_alloc.dev,
  3622. pcmdinfo->in.u.mw_alloc.scratch,
  3623. pcmdinfo->in.u.mw_alloc.mw_stag_index,
  3624. pcmdinfo->in.u.mw_alloc.pd_id,
  3625. pcmdinfo->post_sq);
  3626. break;
  3627. case OP_QP_FLUSH_WQES:
  3628. status = i40iw_sc_qp_flush_wqes(
  3629. pcmdinfo->in.u.qp_flush_wqes.qp,
  3630. &pcmdinfo->in.u.qp_flush_wqes.info,
  3631. pcmdinfo->in.u.qp_flush_wqes.
  3632. scratch, pcmdinfo->post_sq);
  3633. break;
  3634. case OP_GEN_AE:
  3635. status = i40iw_sc_gen_ae(
  3636. pcmdinfo->in.u.gen_ae.qp,
  3637. &pcmdinfo->in.u.gen_ae.info,
  3638. pcmdinfo->in.u.gen_ae.scratch,
  3639. pcmdinfo->post_sq);
  3640. break;
  3641. case OP_ADD_ARP_CACHE_ENTRY:
  3642. status = i40iw_sc_add_arp_cache_entry(
  3643. pcmdinfo->in.u.add_arp_cache_entry.cqp,
  3644. &pcmdinfo->in.u.add_arp_cache_entry.info,
  3645. pcmdinfo->in.u.add_arp_cache_entry.scratch,
  3646. pcmdinfo->post_sq);
  3647. break;
  3648. case OP_MANAGE_PUSH_PAGE:
  3649. status = i40iw_sc_manage_push_page(
  3650. pcmdinfo->in.u.manage_push_page.cqp,
  3651. &pcmdinfo->in.u.manage_push_page.info,
  3652. pcmdinfo->in.u.manage_push_page.scratch,
  3653. pcmdinfo->post_sq);
  3654. break;
  3655. case OP_UPDATE_PE_SDS:
  3656. /* case I40IW_CQP_OP_UPDATE_PE_SDS */
  3657. status = i40iw_update_pe_sds(
  3658. pcmdinfo->in.u.update_pe_sds.dev,
  3659. &pcmdinfo->in.u.update_pe_sds.info,
  3660. pcmdinfo->in.u.update_pe_sds.
  3661. scratch);
  3662. break;
  3663. case OP_MANAGE_HMC_PM_FUNC_TABLE:
  3664. status = i40iw_sc_manage_hmc_pm_func_table(
  3665. pcmdinfo->in.u.manage_hmc_pm.dev->cqp,
  3666. pcmdinfo->in.u.manage_hmc_pm.scratch,
  3667. (u8)pcmdinfo->in.u.manage_hmc_pm.info.vf_id,
  3668. pcmdinfo->in.u.manage_hmc_pm.info.free_fcn,
  3669. true);
  3670. break;
  3671. case OP_SUSPEND:
  3672. status = i40iw_sc_suspend_qp(
  3673. pcmdinfo->in.u.suspend_resume.cqp,
  3674. pcmdinfo->in.u.suspend_resume.qp,
  3675. pcmdinfo->in.u.suspend_resume.scratch);
  3676. break;
  3677. case OP_RESUME:
  3678. status = i40iw_sc_resume_qp(
  3679. pcmdinfo->in.u.suspend_resume.cqp,
  3680. pcmdinfo->in.u.suspend_resume.qp,
  3681. pcmdinfo->in.u.suspend_resume.scratch);
  3682. break;
  3683. case OP_MANAGE_VF_PBLE_BP:
  3684. status = i40iw_manage_vf_pble_bp(
  3685. pcmdinfo->in.u.manage_vf_pble_bp.cqp,
  3686. &pcmdinfo->in.u.manage_vf_pble_bp.info,
  3687. pcmdinfo->in.u.manage_vf_pble_bp.scratch, true);
  3688. break;
  3689. case OP_QUERY_FPM_VALUES:
  3690. values_mem.pa = pcmdinfo->in.u.query_fpm_values.fpm_values_pa;
  3691. values_mem.va = pcmdinfo->in.u.query_fpm_values.fpm_values_va;
  3692. status = i40iw_sc_query_fpm_values(
  3693. pcmdinfo->in.u.query_fpm_values.cqp,
  3694. pcmdinfo->in.u.query_fpm_values.scratch,
  3695. pcmdinfo->in.u.query_fpm_values.hmc_fn_id,
  3696. &values_mem, true, I40IW_CQP_WAIT_EVENT);
  3697. break;
  3698. case OP_COMMIT_FPM_VALUES:
  3699. values_mem.pa = pcmdinfo->in.u.commit_fpm_values.fpm_values_pa;
  3700. values_mem.va = pcmdinfo->in.u.commit_fpm_values.fpm_values_va;
  3701. status = i40iw_sc_commit_fpm_values(
  3702. pcmdinfo->in.u.commit_fpm_values.cqp,
  3703. pcmdinfo->in.u.commit_fpm_values.scratch,
  3704. pcmdinfo->in.u.commit_fpm_values.hmc_fn_id,
  3705. &values_mem,
  3706. true,
  3707. I40IW_CQP_WAIT_EVENT);
  3708. break;
  3709. default:
  3710. status = I40IW_NOT_SUPPORTED;
  3711. break;
  3712. }
  3713. return status;
  3714. }
  3715. /**
  3716. * i40iw_process_cqp_cmd - process all cqp commands
  3717. * @dev: sc device struct
  3718. * @pcmdinfo: cqp command info
  3719. */
  3720. enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev,
  3721. struct cqp_commands_info *pcmdinfo)
  3722. {
  3723. enum i40iw_status_code status = 0;
  3724. unsigned long flags;
  3725. spin_lock_irqsave(&dev->cqp_lock, flags);
  3726. if (list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp))
  3727. status = i40iw_exec_cqp_cmd(dev, pcmdinfo);
  3728. else
  3729. list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head);
  3730. spin_unlock_irqrestore(&dev->cqp_lock, flags);
  3731. return status;
  3732. }
  3733. /**
  3734. * i40iw_process_bh - called from tasklet for cqp list
  3735. * @dev: sc device struct
  3736. */
  3737. enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev)
  3738. {
  3739. enum i40iw_status_code status = 0;
  3740. struct cqp_commands_info *pcmdinfo;
  3741. unsigned long flags;
  3742. spin_lock_irqsave(&dev->cqp_lock, flags);
  3743. while (!list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp)) {
  3744. pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head);
  3745. status = i40iw_exec_cqp_cmd(dev, pcmdinfo);
  3746. if (status)
  3747. break;
  3748. }
  3749. spin_unlock_irqrestore(&dev->cqp_lock, flags);
  3750. return status;
  3751. }
  3752. /**
  3753. * i40iw_iwarp_opcode - determine if incoming is rdma layer
  3754. * @info: aeq info for the packet
  3755. * @pkt: packet for error
  3756. */
  3757. static u32 i40iw_iwarp_opcode(struct i40iw_aeqe_info *info, u8 *pkt)
  3758. {
  3759. __be16 *mpa;
  3760. u32 opcode = 0xffffffff;
  3761. if (info->q2_data_written) {
  3762. mpa = (__be16 *)pkt;
  3763. opcode = ntohs(mpa[1]) & 0xf;
  3764. }
  3765. return opcode;
  3766. }
  3767. /**
  3768. * i40iw_locate_mpa - return pointer to mpa in the pkt
  3769. * @pkt: packet with data
  3770. */
  3771. static u8 *i40iw_locate_mpa(u8 *pkt)
  3772. {
  3773. /* skip over ethernet header */
  3774. pkt += I40IW_MAC_HLEN;
  3775. /* Skip over IP and TCP headers */
  3776. pkt += 4 * (pkt[0] & 0x0f);
  3777. pkt += 4 * ((pkt[12] >> 4) & 0x0f);
  3778. return pkt;
  3779. }
  3780. /**
  3781. * i40iw_setup_termhdr - termhdr for terminate pkt
  3782. * @qp: sc qp ptr for pkt
  3783. * @hdr: term hdr
  3784. * @opcode: flush opcode for termhdr
  3785. * @layer_etype: error layer + error type
  3786. * @err: error cod ein the header
  3787. */
  3788. static void i40iw_setup_termhdr(struct i40iw_sc_qp *qp,
  3789. struct i40iw_terminate_hdr *hdr,
  3790. enum i40iw_flush_opcode opcode,
  3791. u8 layer_etype,
  3792. u8 err)
  3793. {
  3794. qp->flush_code = opcode;
  3795. hdr->layer_etype = layer_etype;
  3796. hdr->error_code = err;
  3797. }
  3798. /**
  3799. * i40iw_bld_terminate_hdr - build terminate message header
  3800. * @qp: qp associated with received terminate AE
  3801. * @info: the struct contiaing AE information
  3802. */
  3803. static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp,
  3804. struct i40iw_aeqe_info *info)
  3805. {
  3806. u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
  3807. u16 ddp_seg_len;
  3808. int copy_len = 0;
  3809. u8 is_tagged = 0;
  3810. u32 opcode;
  3811. struct i40iw_terminate_hdr *termhdr;
  3812. termhdr = (struct i40iw_terminate_hdr *)qp->q2_buf;
  3813. memset(termhdr, 0, Q2_BAD_FRAME_OFFSET);
  3814. if (info->q2_data_written) {
  3815. /* Use data from offending packet to fill in ddp & rdma hdrs */
  3816. pkt = i40iw_locate_mpa(pkt);
  3817. ddp_seg_len = ntohs(*(__be16 *)pkt);
  3818. if (ddp_seg_len) {
  3819. copy_len = 2;
  3820. termhdr->hdrct = DDP_LEN_FLAG;
  3821. if (pkt[2] & 0x80) {
  3822. is_tagged = 1;
  3823. if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
  3824. copy_len += TERM_DDP_LEN_TAGGED;
  3825. termhdr->hdrct |= DDP_HDR_FLAG;
  3826. }
  3827. } else {
  3828. if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
  3829. copy_len += TERM_DDP_LEN_UNTAGGED;
  3830. termhdr->hdrct |= DDP_HDR_FLAG;
  3831. }
  3832. if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) {
  3833. if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) {
  3834. copy_len += TERM_RDMA_LEN;
  3835. termhdr->hdrct |= RDMA_HDR_FLAG;
  3836. }
  3837. }
  3838. }
  3839. }
  3840. }
  3841. opcode = i40iw_iwarp_opcode(info, pkt);
  3842. switch (info->ae_id) {
  3843. case I40IW_AE_AMP_UNALLOCATED_STAG:
  3844. qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
  3845. if (opcode == I40IW_OP_TYPE_RDMA_WRITE)
  3846. i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
  3847. (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_STAG);
  3848. else
  3849. i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
  3850. (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG);
  3851. break;
  3852. case I40IW_AE_AMP_BOUNDS_VIOLATION:
  3853. qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
  3854. if (info->q2_data_written)
  3855. i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
  3856. (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_BOUNDS);
  3857. else
  3858. i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
  3859. (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_BOUNDS);
  3860. break;
  3861. case I40IW_AE_AMP_BAD_PD:
  3862. switch (opcode) {
  3863. case I40IW_OP_TYPE_RDMA_WRITE:
  3864. i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR,
  3865. (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_UNASSOC_STAG);
  3866. break;
  3867. case I40IW_OP_TYPE_SEND_INV:
  3868. case I40IW_OP_TYPE_SEND_SOL_INV:
  3869. i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
  3870. (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_CANT_INV_STAG);
  3871. break;
  3872. default:
  3873. i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
  3874. (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_UNASSOC_STAG);
  3875. }
  3876. break;
  3877. case I40IW_AE_AMP_INVALID_STAG:
  3878. qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
  3879. i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
  3880. (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG);
  3881. break;
  3882. case I40IW_AE_AMP_BAD_QP:
  3883. i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
  3884. (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN);
  3885. break;
  3886. case I40IW_AE_AMP_BAD_STAG_KEY:
  3887. case I40IW_AE_AMP_BAD_STAG_INDEX:
  3888. qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
  3889. switch (opcode) {
  3890. case I40IW_OP_TYPE_SEND_INV:
  3891. case I40IW_OP_TYPE_SEND_SOL_INV:
  3892. i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR,
  3893. (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_CANT_INV_STAG);
  3894. break;
  3895. default:
  3896. i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
  3897. (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_STAG);
  3898. }
  3899. break;
  3900. case I40IW_AE_AMP_RIGHTS_VIOLATION:
  3901. case I40IW_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
  3902. case I40IW_AE_PRIV_OPERATION_DENIED:
  3903. qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
  3904. i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
  3905. (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_ACCESS);
  3906. break;
  3907. case I40IW_AE_AMP_TO_WRAP:
  3908. qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
  3909. i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR,
  3910. (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_TO_WRAP);
  3911. break;
  3912. case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR:
  3913. i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
  3914. (LAYER_MPA << 4) | DDP_LLP, MPA_CRC);
  3915. break;
  3916. case I40IW_AE_LLP_SEGMENT_TOO_LARGE:
  3917. case I40IW_AE_LLP_SEGMENT_TOO_SMALL:
  3918. i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
  3919. (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
  3920. break;
  3921. case I40IW_AE_LCE_QP_CATASTROPHIC:
  3922. case I40IW_AE_DDP_NO_L_BIT:
  3923. i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR,
  3924. (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL);
  3925. break;
  3926. case I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN:
  3927. i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
  3928. (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_RANGE);
  3929. break;
  3930. case I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
  3931. qp->eventtype = TERM_EVENT_QP_ACCESS_ERR;
  3932. i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR,
  3933. (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_TOO_LONG);
  3934. break;
  3935. case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION:
  3936. if (is_tagged)
  3937. i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
  3938. (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_DDP_VER);
  3939. else
  3940. i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
  3941. (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_DDP_VER);
  3942. break;
  3943. case I40IW_AE_DDP_UBE_INVALID_MO:
  3944. i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
  3945. (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MO);
  3946. break;
  3947. case I40IW_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
  3948. i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR,
  3949. (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_NO_BUF);
  3950. break;
  3951. case I40IW_AE_DDP_UBE_INVALID_QN:
  3952. i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
  3953. (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN);
  3954. break;
  3955. case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION:
  3956. i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR,
  3957. (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_RDMAP_VER);
  3958. break;
  3959. case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE:
  3960. i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR,
  3961. (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNEXPECTED_OP);
  3962. break;
  3963. default:
  3964. i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR,
  3965. (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNSPECIFIED);
  3966. break;
  3967. }
  3968. if (copy_len)
  3969. memcpy(termhdr + 1, pkt, copy_len);
  3970. return sizeof(struct i40iw_terminate_hdr) + copy_len;
  3971. }
  3972. /**
  3973. * i40iw_terminate_send_fin() - Send fin for terminate message
  3974. * @qp: qp associated with received terminate AE
  3975. */
  3976. void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp)
  3977. {
  3978. /* Send the fin only */
  3979. i40iw_term_modify_qp(qp,
  3980. I40IW_QP_STATE_TERMINATE,
  3981. I40IWQP_TERM_SEND_FIN_ONLY,
  3982. 0);
  3983. }
  3984. /**
  3985. * i40iw_terminate_connection() - Bad AE and send terminate to remote QP
  3986. * @qp: qp associated with received terminate AE
  3987. * @info: the struct contiaing AE information
  3988. */
  3989. void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)
  3990. {
  3991. u8 termlen = 0;
  3992. if (qp->term_flags & I40IW_TERM_SENT)
  3993. return; /* Sanity check */
  3994. /* Eventtype can change from bld_terminate_hdr */
  3995. qp->eventtype = TERM_EVENT_QP_FATAL;
  3996. termlen = i40iw_bld_terminate_hdr(qp, info);
  3997. i40iw_terminate_start_timer(qp);
  3998. qp->term_flags |= I40IW_TERM_SENT;
  3999. i40iw_term_modify_qp(qp, I40IW_QP_STATE_TERMINATE,
  4000. I40IWQP_TERM_SEND_TERM_ONLY, termlen);
  4001. }
  4002. /**
  4003. * i40iw_terminate_received - handle terminate received AE
  4004. * @qp: qp associated with received terminate AE
  4005. * @info: the struct contiaing AE information
  4006. */
  4007. void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info)
  4008. {
  4009. u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET;
  4010. __be32 *mpa;
  4011. u8 ddp_ctl;
  4012. u8 rdma_ctl;
  4013. u16 aeq_id = 0;
  4014. struct i40iw_terminate_hdr *termhdr;
  4015. mpa = (__be32 *)i40iw_locate_mpa(pkt);
  4016. if (info->q2_data_written) {
  4017. /* did not validate the frame - do it now */
  4018. ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff;
  4019. rdma_ctl = ntohl(mpa[0]) & 0xff;
  4020. if ((ddp_ctl & 0xc0) != 0x40)
  4021. aeq_id = I40IW_AE_LCE_QP_CATASTROPHIC;
  4022. else if ((ddp_ctl & 0x03) != 1)
  4023. aeq_id = I40IW_AE_DDP_UBE_INVALID_DDP_VERSION;
  4024. else if (ntohl(mpa[2]) != 2)
  4025. aeq_id = I40IW_AE_DDP_UBE_INVALID_QN;
  4026. else if (ntohl(mpa[3]) != 1)
  4027. aeq_id = I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN;
  4028. else if (ntohl(mpa[4]) != 0)
  4029. aeq_id = I40IW_AE_DDP_UBE_INVALID_MO;
  4030. else if ((rdma_ctl & 0xc0) != 0x40)
  4031. aeq_id = I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION;
  4032. info->ae_id = aeq_id;
  4033. if (info->ae_id) {
  4034. /* Bad terminate recvd - send back a terminate */
  4035. i40iw_terminate_connection(qp, info);
  4036. return;
  4037. }
  4038. }
  4039. qp->term_flags |= I40IW_TERM_RCVD;
  4040. qp->eventtype = TERM_EVENT_QP_FATAL;
  4041. termhdr = (struct i40iw_terminate_hdr *)&mpa[5];
  4042. if (termhdr->layer_etype == RDMAP_REMOTE_PROT ||
  4043. termhdr->layer_etype == RDMAP_REMOTE_OP) {
  4044. i40iw_terminate_done(qp, 0);
  4045. } else {
  4046. i40iw_terminate_start_timer(qp);
  4047. i40iw_terminate_send_fin(qp);
  4048. }
  4049. }
  4050. /**
  4051. * i40iw_sc_vsi_init - Initialize virtual device
  4052. * @vsi: pointer to the vsi structure
  4053. * @info: parameters to initialize vsi
  4054. **/
  4055. void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *info)
  4056. {
  4057. int i;
  4058. vsi->dev = info->dev;
  4059. vsi->back_vsi = info->back_vsi;
  4060. vsi->mtu = info->params->mtu;
  4061. vsi->exception_lan_queue = info->exception_lan_queue;
  4062. i40iw_fill_qos_list(info->params->qs_handle_list);
  4063. for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
  4064. vsi->qos[i].qs_handle = info->params->qs_handle_list[i];
  4065. i40iw_debug(vsi->dev, I40IW_DEBUG_DCB, "qset[%d]: %d\n", i,
  4066. vsi->qos[i].qs_handle);
  4067. spin_lock_init(&vsi->qos[i].lock);
  4068. INIT_LIST_HEAD(&vsi->qos[i].qplist);
  4069. }
  4070. }
  4071. /**
  4072. * i40iw_hw_stats_init - Initiliaze HW stats table
  4073. * @stats: pestat struct
  4074. * @fcn_idx: PCI fn id
  4075. * @is_pf: Is it a PF?
  4076. *
  4077. * Populate the HW stats table with register offset addr for each
  4078. * stats. And start the perioidic stats timer.
  4079. */
  4080. void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 fcn_idx, bool is_pf)
  4081. {
  4082. u32 stats_reg_offset;
  4083. u32 stats_index;
  4084. struct i40iw_dev_hw_stats_offsets *stats_table =
  4085. &stats->hw_stats_offsets;
  4086. struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
  4087. if (is_pf) {
  4088. stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
  4089. I40E_GLPES_PFIP4RXDISCARD(fcn_idx);
  4090. stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
  4091. I40E_GLPES_PFIP4RXTRUNC(fcn_idx);
  4092. stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
  4093. I40E_GLPES_PFIP4TXNOROUTE(fcn_idx);
  4094. stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
  4095. I40E_GLPES_PFIP6RXDISCARD(fcn_idx);
  4096. stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
  4097. I40E_GLPES_PFIP6RXTRUNC(fcn_idx);
  4098. stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
  4099. I40E_GLPES_PFIP6TXNOROUTE(fcn_idx);
  4100. stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
  4101. I40E_GLPES_PFTCPRTXSEG(fcn_idx);
  4102. stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
  4103. I40E_GLPES_PFTCPRXOPTERR(fcn_idx);
  4104. stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
  4105. I40E_GLPES_PFTCPRXPROTOERR(fcn_idx);
  4106. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
  4107. I40E_GLPES_PFIP4RXOCTSLO(fcn_idx);
  4108. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
  4109. I40E_GLPES_PFIP4RXPKTSLO(fcn_idx);
  4110. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
  4111. I40E_GLPES_PFIP4RXFRAGSLO(fcn_idx);
  4112. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
  4113. I40E_GLPES_PFIP4RXMCPKTSLO(fcn_idx);
  4114. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
  4115. I40E_GLPES_PFIP4TXOCTSLO(fcn_idx);
  4116. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
  4117. I40E_GLPES_PFIP4TXPKTSLO(fcn_idx);
  4118. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
  4119. I40E_GLPES_PFIP4TXFRAGSLO(fcn_idx);
  4120. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
  4121. I40E_GLPES_PFIP4TXMCPKTSLO(fcn_idx);
  4122. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
  4123. I40E_GLPES_PFIP6RXOCTSLO(fcn_idx);
  4124. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
  4125. I40E_GLPES_PFIP6RXPKTSLO(fcn_idx);
  4126. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
  4127. I40E_GLPES_PFIP6RXFRAGSLO(fcn_idx);
  4128. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
  4129. I40E_GLPES_PFIP6RXMCPKTSLO(fcn_idx);
  4130. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
  4131. I40E_GLPES_PFIP6TXOCTSLO(fcn_idx);
  4132. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
  4133. I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
  4134. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
  4135. I40E_GLPES_PFIP6TXPKTSLO(fcn_idx);
  4136. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
  4137. I40E_GLPES_PFIP6TXFRAGSLO(fcn_idx);
  4138. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
  4139. I40E_GLPES_PFTCPRXSEGSLO(fcn_idx);
  4140. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
  4141. I40E_GLPES_PFTCPTXSEGLO(fcn_idx);
  4142. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
  4143. I40E_GLPES_PFRDMARXRDSLO(fcn_idx);
  4144. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
  4145. I40E_GLPES_PFRDMARXSNDSLO(fcn_idx);
  4146. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
  4147. I40E_GLPES_PFRDMARXWRSLO(fcn_idx);
  4148. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
  4149. I40E_GLPES_PFRDMATXRDSLO(fcn_idx);
  4150. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
  4151. I40E_GLPES_PFRDMATXSNDSLO(fcn_idx);
  4152. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
  4153. I40E_GLPES_PFRDMATXWRSLO(fcn_idx);
  4154. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
  4155. I40E_GLPES_PFRDMAVBNDLO(fcn_idx);
  4156. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
  4157. I40E_GLPES_PFRDMAVINVLO(fcn_idx);
  4158. } else {
  4159. stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] =
  4160. I40E_GLPES_VFIP4RXDISCARD(fcn_idx);
  4161. stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] =
  4162. I40E_GLPES_VFIP4RXTRUNC(fcn_idx);
  4163. stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] =
  4164. I40E_GLPES_VFIP4TXNOROUTE(fcn_idx);
  4165. stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] =
  4166. I40E_GLPES_VFIP6RXDISCARD(fcn_idx);
  4167. stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] =
  4168. I40E_GLPES_VFIP6RXTRUNC(fcn_idx);
  4169. stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] =
  4170. I40E_GLPES_VFIP6TXNOROUTE(fcn_idx);
  4171. stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] =
  4172. I40E_GLPES_VFTCPRTXSEG(fcn_idx);
  4173. stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] =
  4174. I40E_GLPES_VFTCPRXOPTERR(fcn_idx);
  4175. stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] =
  4176. I40E_GLPES_VFTCPRXPROTOERR(fcn_idx);
  4177. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] =
  4178. I40E_GLPES_VFIP4RXOCTSLO(fcn_idx);
  4179. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] =
  4180. I40E_GLPES_VFIP4RXPKTSLO(fcn_idx);
  4181. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] =
  4182. I40E_GLPES_VFIP4RXFRAGSLO(fcn_idx);
  4183. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] =
  4184. I40E_GLPES_VFIP4RXMCPKTSLO(fcn_idx);
  4185. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] =
  4186. I40E_GLPES_VFIP4TXOCTSLO(fcn_idx);
  4187. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] =
  4188. I40E_GLPES_VFIP4TXPKTSLO(fcn_idx);
  4189. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] =
  4190. I40E_GLPES_VFIP4TXFRAGSLO(fcn_idx);
  4191. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] =
  4192. I40E_GLPES_VFIP4TXMCPKTSLO(fcn_idx);
  4193. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] =
  4194. I40E_GLPES_VFIP6RXOCTSLO(fcn_idx);
  4195. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] =
  4196. I40E_GLPES_VFIP6RXPKTSLO(fcn_idx);
  4197. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] =
  4198. I40E_GLPES_VFIP6RXFRAGSLO(fcn_idx);
  4199. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] =
  4200. I40E_GLPES_VFIP6RXMCPKTSLO(fcn_idx);
  4201. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] =
  4202. I40E_GLPES_VFIP6TXOCTSLO(fcn_idx);
  4203. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
  4204. I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
  4205. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] =
  4206. I40E_GLPES_VFIP6TXPKTSLO(fcn_idx);
  4207. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] =
  4208. I40E_GLPES_VFIP6TXFRAGSLO(fcn_idx);
  4209. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] =
  4210. I40E_GLPES_VFTCPRXSEGSLO(fcn_idx);
  4211. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] =
  4212. I40E_GLPES_VFTCPTXSEGLO(fcn_idx);
  4213. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] =
  4214. I40E_GLPES_VFRDMARXRDSLO(fcn_idx);
  4215. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] =
  4216. I40E_GLPES_VFRDMARXSNDSLO(fcn_idx);
  4217. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] =
  4218. I40E_GLPES_VFRDMARXWRSLO(fcn_idx);
  4219. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] =
  4220. I40E_GLPES_VFRDMATXRDSLO(fcn_idx);
  4221. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] =
  4222. I40E_GLPES_VFRDMATXSNDSLO(fcn_idx);
  4223. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] =
  4224. I40E_GLPES_VFRDMATXWRSLO(fcn_idx);
  4225. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] =
  4226. I40E_GLPES_VFRDMAVBNDLO(fcn_idx);
  4227. stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] =
  4228. I40E_GLPES_VFRDMAVINVLO(fcn_idx);
  4229. }
  4230. for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
  4231. stats_index++) {
  4232. stats_reg_offset = stats_table->stats_offset_64[stats_index];
  4233. last_rd_stats->stats_value_64[stats_index] =
  4234. readq(stats->hw->hw_addr + stats_reg_offset);
  4235. }
  4236. for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
  4237. stats_index++) {
  4238. stats_reg_offset = stats_table->stats_offset_32[stats_index];
  4239. last_rd_stats->stats_value_32[stats_index] =
  4240. i40iw_rd32(stats->hw, stats_reg_offset);
  4241. }
  4242. }
  4243. /**
  4244. * i40iw_hw_stats_read_32 - Read 32-bit HW stats counters and accommodates for roll-overs.
  4245. * @stat: pestat struct
  4246. * @index: index in HW stats table which contains offset reg-addr
  4247. * @value: hw stats value
  4248. */
  4249. void i40iw_hw_stats_read_32(struct i40iw_vsi_pestat *stats,
  4250. enum i40iw_hw_stats_index_32b index,
  4251. u64 *value)
  4252. {
  4253. struct i40iw_dev_hw_stats_offsets *stats_table =
  4254. &stats->hw_stats_offsets;
  4255. struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
  4256. struct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats;
  4257. u64 new_stats_value = 0;
  4258. u32 stats_reg_offset = stats_table->stats_offset_32[index];
  4259. new_stats_value = i40iw_rd32(stats->hw, stats_reg_offset);
  4260. /*roll-over case */
  4261. if (new_stats_value < last_rd_stats->stats_value_32[index])
  4262. hw_stats->stats_value_32[index] += new_stats_value;
  4263. else
  4264. hw_stats->stats_value_32[index] +=
  4265. new_stats_value - last_rd_stats->stats_value_32[index];
  4266. last_rd_stats->stats_value_32[index] = new_stats_value;
  4267. *value = hw_stats->stats_value_32[index];
  4268. }
  4269. /**
  4270. * i40iw_hw_stats_read_64 - Read HW stats counters (greater than 32-bit) and accommodates for roll-overs.
  4271. * @stats: pestat struct
  4272. * @index: index in HW stats table which contains offset reg-addr
  4273. * @value: hw stats value
  4274. */
  4275. void i40iw_hw_stats_read_64(struct i40iw_vsi_pestat *stats,
  4276. enum i40iw_hw_stats_index_64b index,
  4277. u64 *value)
  4278. {
  4279. struct i40iw_dev_hw_stats_offsets *stats_table =
  4280. &stats->hw_stats_offsets;
  4281. struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats;
  4282. struct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats;
  4283. u64 new_stats_value = 0;
  4284. u32 stats_reg_offset = stats_table->stats_offset_64[index];
  4285. new_stats_value = readq(stats->hw->hw_addr + stats_reg_offset);
  4286. /*roll-over case */
  4287. if (new_stats_value < last_rd_stats->stats_value_64[index])
  4288. hw_stats->stats_value_64[index] += new_stats_value;
  4289. else
  4290. hw_stats->stats_value_64[index] +=
  4291. new_stats_value - last_rd_stats->stats_value_64[index];
  4292. last_rd_stats->stats_value_64[index] = new_stats_value;
  4293. *value = hw_stats->stats_value_64[index];
  4294. }
  4295. /**
  4296. * i40iw_hw_stats_read_all - read all HW stat counters
  4297. * @stats: pestat struct
  4298. * @stats_values: hw stats structure
  4299. *
  4300. * Read all the HW stat counters and populates hw_stats structure
  4301. * of passed-in vsi's pestat as well as copy created in stat_values.
  4302. */
  4303. void i40iw_hw_stats_read_all(struct i40iw_vsi_pestat *stats,
  4304. struct i40iw_dev_hw_stats *stats_values)
  4305. {
  4306. u32 stats_index;
  4307. unsigned long flags;
  4308. spin_lock_irqsave(&stats->lock, flags);
  4309. for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
  4310. stats_index++)
  4311. i40iw_hw_stats_read_32(stats, stats_index,
  4312. &stats_values->stats_value_32[stats_index]);
  4313. for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
  4314. stats_index++)
  4315. i40iw_hw_stats_read_64(stats, stats_index,
  4316. &stats_values->stats_value_64[stats_index]);
  4317. spin_unlock_irqrestore(&stats->lock, flags);
  4318. }
  4319. /**
  4320. * i40iw_hw_stats_refresh_all - Update all HW stats structs
  4321. * @stats: pestat struct
  4322. *
  4323. * Read all the HW stats counters to refresh values in hw_stats structure
  4324. * of passed-in dev's pestat
  4325. */
  4326. void i40iw_hw_stats_refresh_all(struct i40iw_vsi_pestat *stats)
  4327. {
  4328. u64 stats_value;
  4329. u32 stats_index;
  4330. unsigned long flags;
  4331. spin_lock_irqsave(&stats->lock, flags);
  4332. for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32;
  4333. stats_index++)
  4334. i40iw_hw_stats_read_32(stats, stats_index, &stats_value);
  4335. for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64;
  4336. stats_index++)
  4337. i40iw_hw_stats_read_64(stats, stats_index, &stats_value);
  4338. spin_unlock_irqrestore(&stats->lock, flags);
  4339. }
  4340. /**
  4341. * i40iw_get_fcn_id - Return the function id
  4342. * @dev: pointer to the device
  4343. */
  4344. static u8 i40iw_get_fcn_id(struct i40iw_sc_dev *dev)
  4345. {
  4346. u8 fcn_id = I40IW_INVALID_FCN_ID;
  4347. u8 i;
  4348. for (i = I40IW_FIRST_NON_PF_STAT; i < I40IW_MAX_STATS_COUNT; i++)
  4349. if (!dev->fcn_id_array[i]) {
  4350. fcn_id = i;
  4351. dev->fcn_id_array[i] = true;
  4352. break;
  4353. }
  4354. return fcn_id;
  4355. }
  4356. /**
  4357. * i40iw_vsi_stats_init - Initialize the vsi statistics
  4358. * @vsi: pointer to the vsi structure
  4359. * @info: The info structure used for initialization
  4360. */
  4361. enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_stats_info *info)
  4362. {
  4363. u8 fcn_id = info->fcn_id;
  4364. if (info->alloc_fcn_id)
  4365. fcn_id = i40iw_get_fcn_id(vsi->dev);
  4366. if (fcn_id == I40IW_INVALID_FCN_ID)
  4367. return I40IW_ERR_NOT_READY;
  4368. vsi->pestat = info->pestat;
  4369. vsi->pestat->hw = vsi->dev->hw;
  4370. vsi->pestat->vsi = vsi;
  4371. if (info->stats_initialize) {
  4372. i40iw_hw_stats_init(vsi->pestat, fcn_id, true);
  4373. spin_lock_init(&vsi->pestat->lock);
  4374. i40iw_hw_stats_start_timer(vsi);
  4375. }
  4376. vsi->stats_fcn_id_alloc = info->alloc_fcn_id;
  4377. vsi->fcn_id = fcn_id;
  4378. return I40IW_SUCCESS;
  4379. }
  4380. /**
  4381. * i40iw_vsi_stats_free - Free the vsi stats
  4382. * @vsi: pointer to the vsi structure
  4383. */
  4384. void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi)
  4385. {
  4386. u8 fcn_id = vsi->fcn_id;
  4387. if (vsi->stats_fcn_id_alloc && fcn_id < I40IW_MAX_STATS_COUNT)
  4388. vsi->dev->fcn_id_array[fcn_id] = false;
  4389. i40iw_hw_stats_stop_timer(vsi);
  4390. }
  4391. static struct i40iw_cqp_ops iw_cqp_ops = {
  4392. .cqp_init = i40iw_sc_cqp_init,
  4393. .cqp_create = i40iw_sc_cqp_create,
  4394. .cqp_post_sq = i40iw_sc_cqp_post_sq,
  4395. .cqp_get_next_send_wqe = i40iw_sc_cqp_get_next_send_wqe,
  4396. .cqp_destroy = i40iw_sc_cqp_destroy,
  4397. .poll_for_cqp_op_done = i40iw_sc_poll_for_cqp_op_done
  4398. };
  4399. static struct i40iw_ccq_ops iw_ccq_ops = {
  4400. .ccq_init = i40iw_sc_ccq_init,
  4401. .ccq_create = i40iw_sc_ccq_create,
  4402. .ccq_destroy = i40iw_sc_ccq_destroy,
  4403. .ccq_create_done = i40iw_sc_ccq_create_done,
  4404. .ccq_get_cqe_info = i40iw_sc_ccq_get_cqe_info,
  4405. .ccq_arm = i40iw_sc_ccq_arm
  4406. };
  4407. static struct i40iw_ceq_ops iw_ceq_ops = {
  4408. .ceq_init = i40iw_sc_ceq_init,
  4409. .ceq_create = i40iw_sc_ceq_create,
  4410. .cceq_create_done = i40iw_sc_cceq_create_done,
  4411. .cceq_destroy_done = i40iw_sc_cceq_destroy_done,
  4412. .cceq_create = i40iw_sc_cceq_create,
  4413. .ceq_destroy = i40iw_sc_ceq_destroy,
  4414. .process_ceq = i40iw_sc_process_ceq
  4415. };
  4416. static struct i40iw_aeq_ops iw_aeq_ops = {
  4417. .aeq_init = i40iw_sc_aeq_init,
  4418. .aeq_create = i40iw_sc_aeq_create,
  4419. .aeq_destroy = i40iw_sc_aeq_destroy,
  4420. .get_next_aeqe = i40iw_sc_get_next_aeqe,
  4421. .repost_aeq_entries = i40iw_sc_repost_aeq_entries,
  4422. .aeq_create_done = i40iw_sc_aeq_create_done,
  4423. .aeq_destroy_done = i40iw_sc_aeq_destroy_done
  4424. };
  4425. /* iwarp pd ops */
  4426. static struct i40iw_pd_ops iw_pd_ops = {
  4427. .pd_init = i40iw_sc_pd_init,
  4428. };
  4429. static struct i40iw_priv_qp_ops iw_priv_qp_ops = {
  4430. .qp_init = i40iw_sc_qp_init,
  4431. .qp_create = i40iw_sc_qp_create,
  4432. .qp_modify = i40iw_sc_qp_modify,
  4433. .qp_destroy = i40iw_sc_qp_destroy,
  4434. .qp_flush_wqes = i40iw_sc_qp_flush_wqes,
  4435. .qp_upload_context = i40iw_sc_qp_upload_context,
  4436. .qp_setctx = i40iw_sc_qp_setctx,
  4437. .qp_send_lsmm = i40iw_sc_send_lsmm,
  4438. .qp_send_lsmm_nostag = i40iw_sc_send_lsmm_nostag,
  4439. .qp_send_rtt = i40iw_sc_send_rtt,
  4440. .qp_post_wqe0 = i40iw_sc_post_wqe0,
  4441. .iw_mr_fast_register = i40iw_sc_mr_fast_register
  4442. };
  4443. static struct i40iw_priv_cq_ops iw_priv_cq_ops = {
  4444. .cq_init = i40iw_sc_cq_init,
  4445. .cq_create = i40iw_sc_cq_create,
  4446. .cq_destroy = i40iw_sc_cq_destroy,
  4447. .cq_modify = i40iw_sc_cq_modify,
  4448. };
  4449. static struct i40iw_mr_ops iw_mr_ops = {
  4450. .alloc_stag = i40iw_sc_alloc_stag,
  4451. .mr_reg_non_shared = i40iw_sc_mr_reg_non_shared,
  4452. .mr_reg_shared = i40iw_sc_mr_reg_shared,
  4453. .dealloc_stag = i40iw_sc_dealloc_stag,
  4454. .query_stag = i40iw_sc_query_stag,
  4455. .mw_alloc = i40iw_sc_mw_alloc
  4456. };
  4457. static struct i40iw_cqp_misc_ops iw_cqp_misc_ops = {
  4458. .manage_push_page = i40iw_sc_manage_push_page,
  4459. .manage_hmc_pm_func_table = i40iw_sc_manage_hmc_pm_func_table,
  4460. .set_hmc_resource_profile = i40iw_sc_set_hmc_resource_profile,
  4461. .commit_fpm_values = i40iw_sc_commit_fpm_values,
  4462. .query_fpm_values = i40iw_sc_query_fpm_values,
  4463. .static_hmc_pages_allocated = i40iw_sc_static_hmc_pages_allocated,
  4464. .add_arp_cache_entry = i40iw_sc_add_arp_cache_entry,
  4465. .del_arp_cache_entry = i40iw_sc_del_arp_cache_entry,
  4466. .query_arp_cache_entry = i40iw_sc_query_arp_cache_entry,
  4467. .manage_apbvt_entry = i40iw_sc_manage_apbvt_entry,
  4468. .manage_qhash_table_entry = i40iw_sc_manage_qhash_table_entry,
  4469. .alloc_local_mac_ipaddr_table_entry = i40iw_sc_alloc_local_mac_ipaddr_entry,
  4470. .add_local_mac_ipaddr_entry = i40iw_sc_add_local_mac_ipaddr_entry,
  4471. .del_local_mac_ipaddr_entry = i40iw_sc_del_local_mac_ipaddr_entry,
  4472. .cqp_nop = i40iw_sc_cqp_nop,
  4473. .commit_fpm_values_done = i40iw_sc_commit_fpm_values_done,
  4474. .query_fpm_values_done = i40iw_sc_query_fpm_values_done,
  4475. .manage_hmc_pm_func_table_done = i40iw_sc_manage_hmc_pm_func_table_done,
  4476. .update_suspend_qp = i40iw_sc_suspend_qp,
  4477. .update_resume_qp = i40iw_sc_resume_qp
  4478. };
  4479. static struct i40iw_hmc_ops iw_hmc_ops = {
  4480. .init_iw_hmc = i40iw_sc_init_iw_hmc,
  4481. .parse_fpm_query_buf = i40iw_sc_parse_fpm_query_buf,
  4482. .configure_iw_fpm = i40iw_sc_configure_iw_fpm,
  4483. .parse_fpm_commit_buf = i40iw_sc_parse_fpm_commit_buf,
  4484. .create_hmc_object = i40iw_sc_create_hmc_obj,
  4485. .del_hmc_object = i40iw_sc_del_hmc_obj
  4486. };
  4487. /**
  4488. * i40iw_device_init - Initialize IWARP device
  4489. * @dev: IWARP device pointer
  4490. * @info: IWARP init info
  4491. */
  4492. enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev,
  4493. struct i40iw_device_init_info *info)
  4494. {
  4495. u32 val;
  4496. u32 vchnl_ver = 0;
  4497. u16 hmc_fcn = 0;
  4498. enum i40iw_status_code ret_code = 0;
  4499. u8 db_size;
  4500. spin_lock_init(&dev->cqp_lock);
  4501. i40iw_device_init_uk(&dev->dev_uk);
  4502. dev->debug_mask = info->debug_mask;
  4503. dev->hmc_fn_id = info->hmc_fn_id;
  4504. dev->is_pf = info->is_pf;
  4505. dev->fpm_query_buf_pa = info->fpm_query_buf_pa;
  4506. dev->fpm_query_buf = info->fpm_query_buf;
  4507. dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa;
  4508. dev->fpm_commit_buf = info->fpm_commit_buf;
  4509. dev->hw = info->hw;
  4510. dev->hw->hw_addr = info->bar0;
  4511. if (dev->is_pf) {
  4512. val = i40iw_rd32(dev->hw, I40E_GLPCI_DREVID);
  4513. dev->hw_rev = (u8)RS_32(val, I40E_GLPCI_DREVID_DEFAULT_REVID);
  4514. val = i40iw_rd32(dev->hw, I40E_GLPCI_LBARCTRL);
  4515. db_size = (u8)RS_32(val, I40E_GLPCI_LBARCTRL_PE_DB_SIZE);
  4516. if ((db_size != I40IW_PE_DB_SIZE_4M) &&
  4517. (db_size != I40IW_PE_DB_SIZE_8M)) {
  4518. i40iw_debug(dev, I40IW_DEBUG_DEV,
  4519. "%s: PE doorbell is not enabled in CSR val 0x%x\n",
  4520. __func__, val);
  4521. ret_code = I40IW_ERR_PE_DOORBELL_NOT_ENABLED;
  4522. return ret_code;
  4523. }
  4524. dev->db_addr = dev->hw->hw_addr + I40IW_DB_ADDR_OFFSET;
  4525. dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_pf;
  4526. } else {
  4527. dev->db_addr = dev->hw->hw_addr + I40IW_VF_DB_ADDR_OFFSET;
  4528. }
  4529. dev->cqp_ops = &iw_cqp_ops;
  4530. dev->ccq_ops = &iw_ccq_ops;
  4531. dev->ceq_ops = &iw_ceq_ops;
  4532. dev->aeq_ops = &iw_aeq_ops;
  4533. dev->cqp_misc_ops = &iw_cqp_misc_ops;
  4534. dev->iw_pd_ops = &iw_pd_ops;
  4535. dev->iw_priv_qp_ops = &iw_priv_qp_ops;
  4536. dev->iw_priv_cq_ops = &iw_priv_cq_ops;
  4537. dev->mr_ops = &iw_mr_ops;
  4538. dev->hmc_ops = &iw_hmc_ops;
  4539. dev->vchnl_if.vchnl_send = info->vchnl_send;
  4540. if (dev->vchnl_if.vchnl_send)
  4541. dev->vchnl_up = true;
  4542. else
  4543. dev->vchnl_up = false;
  4544. if (!dev->is_pf) {
  4545. dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_vf;
  4546. ret_code = i40iw_vchnl_vf_get_ver(dev, &vchnl_ver);
  4547. if (!ret_code) {
  4548. i40iw_debug(dev, I40IW_DEBUG_DEV,
  4549. "%s: Get Channel version rc = 0x%0x, version is %u\n",
  4550. __func__, ret_code, vchnl_ver);
  4551. ret_code = i40iw_vchnl_vf_get_hmc_fcn(dev, &hmc_fcn);
  4552. if (!ret_code) {
  4553. i40iw_debug(dev, I40IW_DEBUG_DEV,
  4554. "%s Get HMC function rc = 0x%0x, hmc fcn is %u\n",
  4555. __func__, ret_code, hmc_fcn);
  4556. dev->hmc_fn_id = (u8)hmc_fcn;
  4557. }
  4558. }
  4559. }
  4560. dev->iw_vf_cqp_ops = &iw_vf_cqp_ops;
  4561. return ret_code;
  4562. }