hns_roce_hw_v1.c 149 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068
  1. /*
  2. * Copyright (c) 2016 Hisilicon Limited.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/platform_device.h>
  33. #include <linux/acpi.h>
  34. #include <linux/etherdevice.h>
  35. #include <linux/interrupt.h>
  36. #include <linux/of.h>
  37. #include <linux/of_platform.h>
  38. #include <rdma/ib_umem.h>
  39. #include "hns_roce_common.h"
  40. #include "hns_roce_device.h"
  41. #include "hns_roce_cmd.h"
  42. #include "hns_roce_hem.h"
  43. #include "hns_roce_hw_v1.h"
  44. static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg)
  45. {
  46. dseg->lkey = cpu_to_le32(sg->lkey);
  47. dseg->addr = cpu_to_le64(sg->addr);
  48. dseg->len = cpu_to_le32(sg->length);
  49. }
  50. static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr,
  51. u32 rkey)
  52. {
  53. rseg->raddr = cpu_to_le64(remote_addr);
  54. rseg->rkey = cpu_to_le32(rkey);
  55. rseg->len = 0;
  56. }
  57. static int hns_roce_v1_post_send(struct ib_qp *ibqp,
  58. const struct ib_send_wr *wr,
  59. const struct ib_send_wr **bad_wr)
  60. {
  61. struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
  62. struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
  63. struct hns_roce_ud_send_wqe *ud_sq_wqe = NULL;
  64. struct hns_roce_wqe_ctrl_seg *ctrl = NULL;
  65. struct hns_roce_wqe_data_seg *dseg = NULL;
  66. struct hns_roce_qp *qp = to_hr_qp(ibqp);
  67. struct device *dev = &hr_dev->pdev->dev;
  68. struct hns_roce_sq_db sq_db;
  69. int ps_opcode = 0, i = 0;
  70. unsigned long flags = 0;
  71. void *wqe = NULL;
  72. u32 doorbell[2];
  73. int nreq = 0;
  74. u32 ind = 0;
  75. int ret = 0;
  76. u8 *smac;
  77. int loopback;
  78. if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
  79. ibqp->qp_type != IB_QPT_RC)) {
  80. dev_err(dev, "un-supported QP type\n");
  81. *bad_wr = NULL;
  82. return -EOPNOTSUPP;
  83. }
  84. spin_lock_irqsave(&qp->sq.lock, flags);
  85. ind = qp->sq_next_wqe;
  86. for (nreq = 0; wr; ++nreq, wr = wr->next) {
  87. if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
  88. ret = -ENOMEM;
  89. *bad_wr = wr;
  90. goto out;
  91. }
  92. if (unlikely(wr->num_sge > qp->sq.max_gs)) {
  93. dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
  94. wr->num_sge, qp->sq.max_gs);
  95. ret = -EINVAL;
  96. *bad_wr = wr;
  97. goto out;
  98. }
  99. wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
  100. qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
  101. wr->wr_id;
  102. /* Corresponding to the RC and RD type wqe process separately */
  103. if (ibqp->qp_type == IB_QPT_GSI) {
  104. ud_sq_wqe = wqe;
  105. roce_set_field(ud_sq_wqe->dmac_h,
  106. UD_SEND_WQE_U32_4_DMAC_0_M,
  107. UD_SEND_WQE_U32_4_DMAC_0_S,
  108. ah->av.mac[0]);
  109. roce_set_field(ud_sq_wqe->dmac_h,
  110. UD_SEND_WQE_U32_4_DMAC_1_M,
  111. UD_SEND_WQE_U32_4_DMAC_1_S,
  112. ah->av.mac[1]);
  113. roce_set_field(ud_sq_wqe->dmac_h,
  114. UD_SEND_WQE_U32_4_DMAC_2_M,
  115. UD_SEND_WQE_U32_4_DMAC_2_S,
  116. ah->av.mac[2]);
  117. roce_set_field(ud_sq_wqe->dmac_h,
  118. UD_SEND_WQE_U32_4_DMAC_3_M,
  119. UD_SEND_WQE_U32_4_DMAC_3_S,
  120. ah->av.mac[3]);
  121. roce_set_field(ud_sq_wqe->u32_8,
  122. UD_SEND_WQE_U32_8_DMAC_4_M,
  123. UD_SEND_WQE_U32_8_DMAC_4_S,
  124. ah->av.mac[4]);
  125. roce_set_field(ud_sq_wqe->u32_8,
  126. UD_SEND_WQE_U32_8_DMAC_5_M,
  127. UD_SEND_WQE_U32_8_DMAC_5_S,
  128. ah->av.mac[5]);
  129. smac = (u8 *)hr_dev->dev_addr[qp->port];
  130. loopback = ether_addr_equal_unaligned(ah->av.mac,
  131. smac) ? 1 : 0;
  132. roce_set_bit(ud_sq_wqe->u32_8,
  133. UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S,
  134. loopback);
  135. roce_set_field(ud_sq_wqe->u32_8,
  136. UD_SEND_WQE_U32_8_OPERATION_TYPE_M,
  137. UD_SEND_WQE_U32_8_OPERATION_TYPE_S,
  138. HNS_ROCE_WQE_OPCODE_SEND);
  139. roce_set_field(ud_sq_wqe->u32_8,
  140. UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M,
  141. UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S,
  142. 2);
  143. roce_set_bit(ud_sq_wqe->u32_8,
  144. UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S,
  145. 1);
  146. ud_sq_wqe->u32_8 |= (wr->send_flags & IB_SEND_SIGNALED ?
  147. cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
  148. (wr->send_flags & IB_SEND_SOLICITED ?
  149. cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
  150. ((wr->opcode == IB_WR_SEND_WITH_IMM) ?
  151. cpu_to_le32(HNS_ROCE_WQE_IMM) : 0);
  152. roce_set_field(ud_sq_wqe->u32_16,
  153. UD_SEND_WQE_U32_16_DEST_QP_M,
  154. UD_SEND_WQE_U32_16_DEST_QP_S,
  155. ud_wr(wr)->remote_qpn);
  156. roce_set_field(ud_sq_wqe->u32_16,
  157. UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M,
  158. UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S,
  159. ah->av.stat_rate);
  160. roce_set_field(ud_sq_wqe->u32_36,
  161. UD_SEND_WQE_U32_36_FLOW_LABEL_M,
  162. UD_SEND_WQE_U32_36_FLOW_LABEL_S,
  163. ah->av.sl_tclass_flowlabel &
  164. HNS_ROCE_FLOW_LABEL_MASK);
  165. roce_set_field(ud_sq_wqe->u32_36,
  166. UD_SEND_WQE_U32_36_PRIORITY_M,
  167. UD_SEND_WQE_U32_36_PRIORITY_S,
  168. le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
  169. HNS_ROCE_SL_SHIFT);
  170. roce_set_field(ud_sq_wqe->u32_36,
  171. UD_SEND_WQE_U32_36_SGID_INDEX_M,
  172. UD_SEND_WQE_U32_36_SGID_INDEX_S,
  173. hns_get_gid_index(hr_dev, qp->phy_port,
  174. ah->av.gid_index));
  175. roce_set_field(ud_sq_wqe->u32_40,
  176. UD_SEND_WQE_U32_40_HOP_LIMIT_M,
  177. UD_SEND_WQE_U32_40_HOP_LIMIT_S,
  178. ah->av.hop_limit);
  179. roce_set_field(ud_sq_wqe->u32_40,
  180. UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M,
  181. UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S,
  182. ah->av.sl_tclass_flowlabel >>
  183. HNS_ROCE_TCLASS_SHIFT);
  184. memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN);
  185. ud_sq_wqe->va0_l =
  186. cpu_to_le32((u32)wr->sg_list[0].addr);
  187. ud_sq_wqe->va0_h =
  188. cpu_to_le32((wr->sg_list[0].addr) >> 32);
  189. ud_sq_wqe->l_key0 =
  190. cpu_to_le32(wr->sg_list[0].lkey);
  191. ud_sq_wqe->va1_l =
  192. cpu_to_le32((u32)wr->sg_list[1].addr);
  193. ud_sq_wqe->va1_h =
  194. cpu_to_le32((wr->sg_list[1].addr) >> 32);
  195. ud_sq_wqe->l_key1 =
  196. cpu_to_le32(wr->sg_list[1].lkey);
  197. ind++;
  198. } else if (ibqp->qp_type == IB_QPT_RC) {
  199. u32 tmp_len = 0;
  200. ctrl = wqe;
  201. memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg));
  202. for (i = 0; i < wr->num_sge; i++)
  203. tmp_len += wr->sg_list[i].length;
  204. ctrl->msg_length =
  205. cpu_to_le32(le32_to_cpu(ctrl->msg_length) + tmp_len);
  206. ctrl->sgl_pa_h = 0;
  207. ctrl->flag = 0;
  208. switch (wr->opcode) {
  209. case IB_WR_SEND_WITH_IMM:
  210. case IB_WR_RDMA_WRITE_WITH_IMM:
  211. ctrl->imm_data = wr->ex.imm_data;
  212. break;
  213. case IB_WR_SEND_WITH_INV:
  214. ctrl->inv_key =
  215. cpu_to_le32(wr->ex.invalidate_rkey);
  216. break;
  217. default:
  218. ctrl->imm_data = 0;
  219. break;
  220. }
  221. /*Ctrl field, ctrl set type: sig, solic, imm, fence */
  222. /* SO wait for conforming application scenarios */
  223. ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ?
  224. cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
  225. (wr->send_flags & IB_SEND_SOLICITED ?
  226. cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
  227. ((wr->opcode == IB_WR_SEND_WITH_IMM ||
  228. wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ?
  229. cpu_to_le32(HNS_ROCE_WQE_IMM) : 0) |
  230. (wr->send_flags & IB_SEND_FENCE ?
  231. (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0);
  232. wqe += sizeof(struct hns_roce_wqe_ctrl_seg);
  233. switch (wr->opcode) {
  234. case IB_WR_RDMA_READ:
  235. ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
  236. set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
  237. rdma_wr(wr)->rkey);
  238. break;
  239. case IB_WR_RDMA_WRITE:
  240. case IB_WR_RDMA_WRITE_WITH_IMM:
  241. ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
  242. set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
  243. rdma_wr(wr)->rkey);
  244. break;
  245. case IB_WR_SEND:
  246. case IB_WR_SEND_WITH_INV:
  247. case IB_WR_SEND_WITH_IMM:
  248. ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
  249. break;
  250. case IB_WR_LOCAL_INV:
  251. case IB_WR_ATOMIC_CMP_AND_SWP:
  252. case IB_WR_ATOMIC_FETCH_AND_ADD:
  253. case IB_WR_LSO:
  254. default:
  255. ps_opcode = HNS_ROCE_WQE_OPCODE_MASK;
  256. break;
  257. }
  258. ctrl->flag |= cpu_to_le32(ps_opcode);
  259. wqe += sizeof(struct hns_roce_wqe_raddr_seg);
  260. dseg = wqe;
  261. if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
  262. if (le32_to_cpu(ctrl->msg_length) >
  263. hr_dev->caps.max_sq_inline) {
  264. ret = -EINVAL;
  265. *bad_wr = wr;
  266. dev_err(dev, "inline len(1-%d)=%d, illegal",
  267. ctrl->msg_length,
  268. hr_dev->caps.max_sq_inline);
  269. goto out;
  270. }
  271. for (i = 0; i < wr->num_sge; i++) {
  272. memcpy(wqe, ((void *) (uintptr_t)
  273. wr->sg_list[i].addr),
  274. wr->sg_list[i].length);
  275. wqe += wr->sg_list[i].length;
  276. }
  277. ctrl->flag |= cpu_to_le32(HNS_ROCE_WQE_INLINE);
  278. } else {
  279. /*sqe num is two */
  280. for (i = 0; i < wr->num_sge; i++)
  281. set_data_seg(dseg + i, wr->sg_list + i);
  282. ctrl->flag |= cpu_to_le32(wr->num_sge <<
  283. HNS_ROCE_WQE_SGE_NUM_BIT);
  284. }
  285. ind++;
  286. }
  287. }
  288. out:
  289. /* Set DB return */
  290. if (likely(nreq)) {
  291. qp->sq.head += nreq;
  292. /* Memory barrier */
  293. wmb();
  294. sq_db.u32_4 = 0;
  295. sq_db.u32_8 = 0;
  296. roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
  297. SQ_DOORBELL_U32_4_SQ_HEAD_S,
  298. (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
  299. roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M,
  300. SQ_DOORBELL_U32_4_SL_S, qp->sl);
  301. roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
  302. SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
  303. roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
  304. SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
  305. roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
  306. doorbell[0] = le32_to_cpu(sq_db.u32_4);
  307. doorbell[1] = le32_to_cpu(sq_db.u32_8);
  308. hns_roce_write64_k((__le32 *)doorbell, qp->sq.db_reg_l);
  309. qp->sq_next_wqe = ind;
  310. }
  311. spin_unlock_irqrestore(&qp->sq.lock, flags);
  312. return ret;
  313. }
  314. static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
  315. const struct ib_recv_wr *wr,
  316. const struct ib_recv_wr **bad_wr)
  317. {
  318. int ret = 0;
  319. int nreq = 0;
  320. int ind = 0;
  321. int i = 0;
  322. u32 reg_val;
  323. unsigned long flags = 0;
  324. struct hns_roce_rq_wqe_ctrl *ctrl = NULL;
  325. struct hns_roce_wqe_data_seg *scat = NULL;
  326. struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
  327. struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
  328. struct device *dev = &hr_dev->pdev->dev;
  329. struct hns_roce_rq_db rq_db;
  330. uint32_t doorbell[2] = {0};
  331. spin_lock_irqsave(&hr_qp->rq.lock, flags);
  332. ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
  333. for (nreq = 0; wr; ++nreq, wr = wr->next) {
  334. if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
  335. hr_qp->ibqp.recv_cq)) {
  336. ret = -ENOMEM;
  337. *bad_wr = wr;
  338. goto out;
  339. }
  340. if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
  341. dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
  342. wr->num_sge, hr_qp->rq.max_gs);
  343. ret = -EINVAL;
  344. *bad_wr = wr;
  345. goto out;
  346. }
  347. ctrl = get_recv_wqe(hr_qp, ind);
  348. roce_set_field(ctrl->rwqe_byte_12,
  349. RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M,
  350. RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S,
  351. wr->num_sge);
  352. scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1);
  353. for (i = 0; i < wr->num_sge; i++)
  354. set_data_seg(scat + i, wr->sg_list + i);
  355. hr_qp->rq.wrid[ind] = wr->wr_id;
  356. ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
  357. }
  358. out:
  359. if (likely(nreq)) {
  360. hr_qp->rq.head += nreq;
  361. /* Memory barrier */
  362. wmb();
  363. if (ibqp->qp_type == IB_QPT_GSI) {
  364. __le32 tmp;
  365. /* SW update GSI rq header */
  366. reg_val = roce_read(to_hr_dev(ibqp->device),
  367. ROCEE_QP1C_CFG3_0_REG +
  368. QP1C_CFGN_OFFSET * hr_qp->phy_port);
  369. tmp = cpu_to_le32(reg_val);
  370. roce_set_field(tmp,
  371. ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
  372. ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
  373. hr_qp->rq.head);
  374. reg_val = le32_to_cpu(tmp);
  375. roce_write(to_hr_dev(ibqp->device),
  376. ROCEE_QP1C_CFG3_0_REG +
  377. QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
  378. } else {
  379. rq_db.u32_4 = 0;
  380. rq_db.u32_8 = 0;
  381. roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
  382. RQ_DOORBELL_U32_4_RQ_HEAD_S,
  383. hr_qp->rq.head);
  384. roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M,
  385. RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
  386. roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M,
  387. RQ_DOORBELL_U32_8_CMD_S, 1);
  388. roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
  389. 1);
  390. doorbell[0] = le32_to_cpu(rq_db.u32_4);
  391. doorbell[1] = le32_to_cpu(rq_db.u32_8);
  392. hns_roce_write64_k((__le32 *)doorbell,
  393. hr_qp->rq.db_reg_l);
  394. }
  395. }
  396. spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
  397. return ret;
  398. }
  399. static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev,
  400. int sdb_mode, int odb_mode)
  401. {
  402. __le32 tmp;
  403. u32 val;
  404. val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
  405. tmp = cpu_to_le32(val);
  406. roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode);
  407. roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode);
  408. val = le32_to_cpu(tmp);
  409. roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
  410. }
  411. static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode,
  412. u32 odb_mode)
  413. {
  414. __le32 tmp;
  415. u32 val;
  416. /* Configure SDB/ODB extend mode */
  417. val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
  418. tmp = cpu_to_le32(val);
  419. roce_set_bit(tmp, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode);
  420. roce_set_bit(tmp, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode);
  421. val = le32_to_cpu(tmp);
  422. roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
  423. }
  424. static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept,
  425. u32 sdb_alful)
  426. {
  427. __le32 tmp;
  428. u32 val;
  429. /* Configure SDB */
  430. val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG);
  431. tmp = cpu_to_le32(val);
  432. roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M,
  433. ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful);
  434. roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M,
  435. ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept);
  436. val = le32_to_cpu(tmp);
  437. roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val);
  438. }
  439. static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept,
  440. u32 odb_alful)
  441. {
  442. __le32 tmp;
  443. u32 val;
  444. /* Configure ODB */
  445. val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG);
  446. tmp = cpu_to_le32(val);
  447. roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M,
  448. ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful);
  449. roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M,
  450. ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept);
  451. val = le32_to_cpu(tmp);
  452. roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val);
  453. }
  454. static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
  455. u32 ext_sdb_alful)
  456. {
  457. struct device *dev = &hr_dev->pdev->dev;
  458. struct hns_roce_v1_priv *priv;
  459. struct hns_roce_db_table *db;
  460. dma_addr_t sdb_dma_addr;
  461. __le32 tmp;
  462. u32 val;
  463. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  464. db = &priv->db_table;
  465. /* Configure extend SDB threshold */
  466. roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept);
  467. roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful);
  468. /* Configure extend SDB base addr */
  469. sdb_dma_addr = db->ext_db->sdb_buf_list->map;
  470. roce_write(hr_dev, ROCEE_EXT_DB_SQ_REG, (u32)(sdb_dma_addr >> 12));
  471. /* Configure extend SDB depth */
  472. val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG);
  473. tmp = cpu_to_le32(val);
  474. roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M,
  475. ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S,
  476. db->ext_db->esdb_dep);
  477. /*
  478. * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
  479. * using 4K page, and shift more 32 because of
  480. * caculating the high 32 bit value evaluated to hardware.
  481. */
  482. roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M,
  483. ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44);
  484. val = le32_to_cpu(tmp);
  485. roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val);
  486. dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep);
  487. dev_dbg(dev, "ext SDB threshold: epmty: 0x%x, ful: 0x%x\n",
  488. ext_sdb_alept, ext_sdb_alful);
  489. }
  490. static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
  491. u32 ext_odb_alful)
  492. {
  493. struct device *dev = &hr_dev->pdev->dev;
  494. struct hns_roce_v1_priv *priv;
  495. struct hns_roce_db_table *db;
  496. dma_addr_t odb_dma_addr;
  497. __le32 tmp;
  498. u32 val;
  499. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  500. db = &priv->db_table;
  501. /* Configure extend ODB threshold */
  502. roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept);
  503. roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful);
  504. /* Configure extend ODB base addr */
  505. odb_dma_addr = db->ext_db->odb_buf_list->map;
  506. roce_write(hr_dev, ROCEE_EXT_DB_OTH_REG, (u32)(odb_dma_addr >> 12));
  507. /* Configure extend ODB depth */
  508. val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG);
  509. tmp = cpu_to_le32(val);
  510. roce_set_field(tmp, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M,
  511. ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S,
  512. db->ext_db->eodb_dep);
  513. roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M,
  514. ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S,
  515. db->ext_db->eodb_dep);
  516. val = le32_to_cpu(tmp);
  517. roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val);
  518. dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep);
  519. dev_dbg(dev, "ext ODB threshold: empty: 0x%x, ful: 0x%x\n",
  520. ext_odb_alept, ext_odb_alful);
  521. }
  522. static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
  523. u32 odb_ext_mod)
  524. {
  525. struct device *dev = &hr_dev->pdev->dev;
  526. struct hns_roce_v1_priv *priv;
  527. struct hns_roce_db_table *db;
  528. dma_addr_t sdb_dma_addr;
  529. dma_addr_t odb_dma_addr;
  530. int ret = 0;
  531. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  532. db = &priv->db_table;
  533. db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL);
  534. if (!db->ext_db)
  535. return -ENOMEM;
  536. if (sdb_ext_mod) {
  537. db->ext_db->sdb_buf_list = kmalloc(
  538. sizeof(*db->ext_db->sdb_buf_list), GFP_KERNEL);
  539. if (!db->ext_db->sdb_buf_list) {
  540. ret = -ENOMEM;
  541. goto ext_sdb_buf_fail_out;
  542. }
  543. db->ext_db->sdb_buf_list->buf = dma_alloc_coherent(dev,
  544. HNS_ROCE_V1_EXT_SDB_SIZE,
  545. &sdb_dma_addr, GFP_KERNEL);
  546. if (!db->ext_db->sdb_buf_list->buf) {
  547. ret = -ENOMEM;
  548. goto alloc_sq_db_buf_fail;
  549. }
  550. db->ext_db->sdb_buf_list->map = sdb_dma_addr;
  551. db->ext_db->esdb_dep = ilog2(HNS_ROCE_V1_EXT_SDB_DEPTH);
  552. hns_roce_set_sdb_ext(hr_dev, HNS_ROCE_V1_EXT_SDB_ALEPT,
  553. HNS_ROCE_V1_EXT_SDB_ALFUL);
  554. } else
  555. hns_roce_set_sdb(hr_dev, HNS_ROCE_V1_SDB_ALEPT,
  556. HNS_ROCE_V1_SDB_ALFUL);
  557. if (odb_ext_mod) {
  558. db->ext_db->odb_buf_list = kmalloc(
  559. sizeof(*db->ext_db->odb_buf_list), GFP_KERNEL);
  560. if (!db->ext_db->odb_buf_list) {
  561. ret = -ENOMEM;
  562. goto ext_odb_buf_fail_out;
  563. }
  564. db->ext_db->odb_buf_list->buf = dma_alloc_coherent(dev,
  565. HNS_ROCE_V1_EXT_ODB_SIZE,
  566. &odb_dma_addr, GFP_KERNEL);
  567. if (!db->ext_db->odb_buf_list->buf) {
  568. ret = -ENOMEM;
  569. goto alloc_otr_db_buf_fail;
  570. }
  571. db->ext_db->odb_buf_list->map = odb_dma_addr;
  572. db->ext_db->eodb_dep = ilog2(HNS_ROCE_V1_EXT_ODB_DEPTH);
  573. hns_roce_set_odb_ext(hr_dev, HNS_ROCE_V1_EXT_ODB_ALEPT,
  574. HNS_ROCE_V1_EXT_ODB_ALFUL);
  575. } else
  576. hns_roce_set_odb(hr_dev, HNS_ROCE_V1_ODB_ALEPT,
  577. HNS_ROCE_V1_ODB_ALFUL);
  578. hns_roce_set_db_ext_mode(hr_dev, sdb_ext_mod, odb_ext_mod);
  579. return 0;
  580. alloc_otr_db_buf_fail:
  581. kfree(db->ext_db->odb_buf_list);
  582. ext_odb_buf_fail_out:
  583. if (sdb_ext_mod) {
  584. dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
  585. db->ext_db->sdb_buf_list->buf,
  586. db->ext_db->sdb_buf_list->map);
  587. }
  588. alloc_sq_db_buf_fail:
  589. if (sdb_ext_mod)
  590. kfree(db->ext_db->sdb_buf_list);
  591. ext_sdb_buf_fail_out:
  592. kfree(db->ext_db);
  593. return ret;
  594. }
  595. static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev,
  596. struct ib_pd *pd)
  597. {
  598. struct device *dev = &hr_dev->pdev->dev;
  599. struct ib_qp_init_attr init_attr;
  600. struct ib_qp *qp;
  601. memset(&init_attr, 0, sizeof(struct ib_qp_init_attr));
  602. init_attr.qp_type = IB_QPT_RC;
  603. init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
  604. init_attr.cap.max_recv_wr = HNS_ROCE_MIN_WQE_NUM;
  605. init_attr.cap.max_send_wr = HNS_ROCE_MIN_WQE_NUM;
  606. qp = hns_roce_create_qp(pd, &init_attr, NULL);
  607. if (IS_ERR(qp)) {
  608. dev_err(dev, "Create loop qp for mr free failed!");
  609. return NULL;
  610. }
  611. return to_hr_qp(qp);
  612. }
  613. static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
  614. {
  615. struct hns_roce_caps *caps = &hr_dev->caps;
  616. struct device *dev = &hr_dev->pdev->dev;
  617. struct ib_cq_init_attr cq_init_attr;
  618. struct hns_roce_free_mr *free_mr;
  619. struct ib_qp_attr attr = { 0 };
  620. struct hns_roce_v1_priv *priv;
  621. struct hns_roce_qp *hr_qp;
  622. struct ib_cq *cq;
  623. struct ib_pd *pd;
  624. union ib_gid dgid;
  625. u64 subnet_prefix;
  626. int attr_mask = 0;
  627. int i, j;
  628. int ret;
  629. u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
  630. u8 phy_port;
  631. u8 port = 0;
  632. u8 sl;
  633. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  634. free_mr = &priv->free_mr;
  635. /* Reserved cq for loop qp */
  636. cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2;
  637. cq_init_attr.comp_vector = 0;
  638. cq = hns_roce_ib_create_cq(&hr_dev->ib_dev, &cq_init_attr, NULL, NULL);
  639. if (IS_ERR(cq)) {
  640. dev_err(dev, "Create cq for reseved loop qp failed!");
  641. return -ENOMEM;
  642. }
  643. free_mr->mr_free_cq = to_hr_cq(cq);
  644. free_mr->mr_free_cq->ib_cq.device = &hr_dev->ib_dev;
  645. free_mr->mr_free_cq->ib_cq.uobject = NULL;
  646. free_mr->mr_free_cq->ib_cq.comp_handler = NULL;
  647. free_mr->mr_free_cq->ib_cq.event_handler = NULL;
  648. free_mr->mr_free_cq->ib_cq.cq_context = NULL;
  649. atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
  650. pd = hns_roce_alloc_pd(&hr_dev->ib_dev, NULL, NULL);
  651. if (IS_ERR(pd)) {
  652. dev_err(dev, "Create pd for reseved loop qp failed!");
  653. ret = -ENOMEM;
  654. goto alloc_pd_failed;
  655. }
  656. free_mr->mr_free_pd = to_hr_pd(pd);
  657. free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev;
  658. free_mr->mr_free_pd->ibpd.uobject = NULL;
  659. free_mr->mr_free_pd->ibpd.__internal_mr = NULL;
  660. atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
  661. attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE;
  662. attr.pkey_index = 0;
  663. attr.min_rnr_timer = 0;
  664. /* Disable read ability */
  665. attr.max_dest_rd_atomic = 0;
  666. attr.max_rd_atomic = 0;
  667. /* Use arbitrary values as rq_psn and sq_psn */
  668. attr.rq_psn = 0x0808;
  669. attr.sq_psn = 0x0808;
  670. attr.retry_cnt = 7;
  671. attr.rnr_retry = 7;
  672. attr.timeout = 0x12;
  673. attr.path_mtu = IB_MTU_256;
  674. attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
  675. rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
  676. rdma_ah_set_static_rate(&attr.ah_attr, 3);
  677. subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
  678. for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
  679. phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
  680. (i % HNS_ROCE_MAX_PORTS);
  681. sl = i / HNS_ROCE_MAX_PORTS;
  682. for (j = 0; j < caps->num_ports; j++) {
  683. if (hr_dev->iboe.phy_port[j] == phy_port) {
  684. queue_en[i] = 1;
  685. port = j;
  686. break;
  687. }
  688. }
  689. if (!queue_en[i])
  690. continue;
  691. free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
  692. if (!free_mr->mr_free_qp[i]) {
  693. dev_err(dev, "Create loop qp failed!\n");
  694. ret = -ENOMEM;
  695. goto create_lp_qp_failed;
  696. }
  697. hr_qp = free_mr->mr_free_qp[i];
  698. hr_qp->port = port;
  699. hr_qp->phy_port = phy_port;
  700. hr_qp->ibqp.qp_type = IB_QPT_RC;
  701. hr_qp->ibqp.device = &hr_dev->ib_dev;
  702. hr_qp->ibqp.uobject = NULL;
  703. atomic_set(&hr_qp->ibqp.usecnt, 0);
  704. hr_qp->ibqp.pd = pd;
  705. hr_qp->ibqp.recv_cq = cq;
  706. hr_qp->ibqp.send_cq = cq;
  707. rdma_ah_set_port_num(&attr.ah_attr, port + 1);
  708. rdma_ah_set_sl(&attr.ah_attr, sl);
  709. attr.port_num = port + 1;
  710. attr.dest_qp_num = hr_qp->qpn;
  711. memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
  712. hr_dev->dev_addr[port],
  713. MAC_ADDR_OCTET_NUM);
  714. memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
  715. memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
  716. memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3);
  717. dgid.raw[11] = 0xff;
  718. dgid.raw[12] = 0xfe;
  719. dgid.raw[8] ^= 2;
  720. rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw);
  721. ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
  722. IB_QPS_RESET, IB_QPS_INIT);
  723. if (ret) {
  724. dev_err(dev, "modify qp failed(%d)!\n", ret);
  725. goto create_lp_qp_failed;
  726. }
  727. ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, IB_QP_DEST_QPN,
  728. IB_QPS_INIT, IB_QPS_RTR);
  729. if (ret) {
  730. dev_err(dev, "modify qp failed(%d)!\n", ret);
  731. goto create_lp_qp_failed;
  732. }
  733. ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
  734. IB_QPS_RTR, IB_QPS_RTS);
  735. if (ret) {
  736. dev_err(dev, "modify qp failed(%d)!\n", ret);
  737. goto create_lp_qp_failed;
  738. }
  739. }
  740. return 0;
  741. create_lp_qp_failed:
  742. for (i -= 1; i >= 0; i--) {
  743. hr_qp = free_mr->mr_free_qp[i];
  744. if (hns_roce_v1_destroy_qp(&hr_qp->ibqp))
  745. dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
  746. }
  747. if (hns_roce_dealloc_pd(pd))
  748. dev_err(dev, "Destroy pd for create_lp_qp failed!\n");
  749. alloc_pd_failed:
  750. if (hns_roce_ib_destroy_cq(cq))
  751. dev_err(dev, "Destroy cq for create_lp_qp failed!\n");
  752. return ret;
  753. }
  754. static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
  755. {
  756. struct device *dev = &hr_dev->pdev->dev;
  757. struct hns_roce_free_mr *free_mr;
  758. struct hns_roce_v1_priv *priv;
  759. struct hns_roce_qp *hr_qp;
  760. int ret;
  761. int i;
  762. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  763. free_mr = &priv->free_mr;
  764. for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
  765. hr_qp = free_mr->mr_free_qp[i];
  766. if (!hr_qp)
  767. continue;
  768. ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp);
  769. if (ret)
  770. dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
  771. i, ret);
  772. }
  773. ret = hns_roce_ib_destroy_cq(&free_mr->mr_free_cq->ib_cq);
  774. if (ret)
  775. dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret);
  776. ret = hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd);
  777. if (ret)
  778. dev_err(dev, "Destroy pd for mr_free failed(%d)!\n", ret);
  779. }
  780. static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
  781. {
  782. struct device *dev = &hr_dev->pdev->dev;
  783. struct hns_roce_v1_priv *priv;
  784. struct hns_roce_db_table *db;
  785. u32 sdb_ext_mod;
  786. u32 odb_ext_mod;
  787. u32 sdb_evt_mod;
  788. u32 odb_evt_mod;
  789. int ret = 0;
  790. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  791. db = &priv->db_table;
  792. memset(db, 0, sizeof(*db));
  793. /* Default DB mode */
  794. sdb_ext_mod = HNS_ROCE_SDB_EXTEND_MODE;
  795. odb_ext_mod = HNS_ROCE_ODB_EXTEND_MODE;
  796. sdb_evt_mod = HNS_ROCE_SDB_NORMAL_MODE;
  797. odb_evt_mod = HNS_ROCE_ODB_POLL_MODE;
  798. db->sdb_ext_mod = sdb_ext_mod;
  799. db->odb_ext_mod = odb_ext_mod;
  800. /* Init extend DB */
  801. ret = hns_roce_db_ext_init(hr_dev, sdb_ext_mod, odb_ext_mod);
  802. if (ret) {
  803. dev_err(dev, "Failed in extend DB configuration.\n");
  804. return ret;
  805. }
  806. hns_roce_set_db_event_mode(hr_dev, sdb_evt_mod, odb_evt_mod);
  807. return 0;
  808. }
  809. static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
  810. {
  811. struct hns_roce_recreate_lp_qp_work *lp_qp_work;
  812. struct hns_roce_dev *hr_dev;
  813. lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work,
  814. work);
  815. hr_dev = to_hr_dev(lp_qp_work->ib_dev);
  816. hns_roce_v1_release_lp_qp(hr_dev);
  817. if (hns_roce_v1_rsv_lp_qp(hr_dev))
  818. dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n");
  819. if (lp_qp_work->comp_flag)
  820. complete(lp_qp_work->comp);
  821. kfree(lp_qp_work);
  822. }
  823. static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
  824. {
  825. struct device *dev = &hr_dev->pdev->dev;
  826. struct hns_roce_recreate_lp_qp_work *lp_qp_work;
  827. struct hns_roce_free_mr *free_mr;
  828. struct hns_roce_v1_priv *priv;
  829. struct completion comp;
  830. unsigned long end =
  831. msecs_to_jiffies(HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS) + jiffies;
  832. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  833. free_mr = &priv->free_mr;
  834. lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
  835. GFP_KERNEL);
  836. if (!lp_qp_work)
  837. return -ENOMEM;
  838. INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn);
  839. lp_qp_work->ib_dev = &(hr_dev->ib_dev);
  840. lp_qp_work->comp = &comp;
  841. lp_qp_work->comp_flag = 1;
  842. init_completion(lp_qp_work->comp);
  843. queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
  844. while (time_before_eq(jiffies, end)) {
  845. if (try_wait_for_completion(&comp))
  846. return 0;
  847. msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
  848. }
  849. lp_qp_work->comp_flag = 0;
  850. if (try_wait_for_completion(&comp))
  851. return 0;
  852. dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n");
  853. return -ETIMEDOUT;
  854. }
  855. static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
  856. {
  857. struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
  858. struct device *dev = &hr_dev->pdev->dev;
  859. struct ib_send_wr send_wr;
  860. const struct ib_send_wr *bad_wr;
  861. int ret;
  862. memset(&send_wr, 0, sizeof(send_wr));
  863. send_wr.next = NULL;
  864. send_wr.num_sge = 0;
  865. send_wr.send_flags = 0;
  866. send_wr.sg_list = NULL;
  867. send_wr.wr_id = (unsigned long long)&send_wr;
  868. send_wr.opcode = IB_WR_RDMA_WRITE;
  869. ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr);
  870. if (ret) {
  871. dev_err(dev, "Post write wqe for mr free failed(%d)!", ret);
  872. return ret;
  873. }
  874. return 0;
  875. }
  876. static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
  877. {
  878. struct hns_roce_mr_free_work *mr_work;
  879. struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
  880. struct hns_roce_free_mr *free_mr;
  881. struct hns_roce_cq *mr_free_cq;
  882. struct hns_roce_v1_priv *priv;
  883. struct hns_roce_dev *hr_dev;
  884. struct hns_roce_mr *hr_mr;
  885. struct hns_roce_qp *hr_qp;
  886. struct device *dev;
  887. unsigned long end =
  888. msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
  889. int i;
  890. int ret;
  891. int ne = 0;
  892. mr_work = container_of(work, struct hns_roce_mr_free_work, work);
  893. hr_mr = (struct hns_roce_mr *)mr_work->mr;
  894. hr_dev = to_hr_dev(mr_work->ib_dev);
  895. dev = &hr_dev->pdev->dev;
  896. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  897. free_mr = &priv->free_mr;
  898. mr_free_cq = free_mr->mr_free_cq;
  899. for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
  900. hr_qp = free_mr->mr_free_qp[i];
  901. if (!hr_qp)
  902. continue;
  903. ne++;
  904. ret = hns_roce_v1_send_lp_wqe(hr_qp);
  905. if (ret) {
  906. dev_err(dev,
  907. "Send wqe (qp:0x%lx) for mr free failed(%d)!\n",
  908. hr_qp->qpn, ret);
  909. goto free_work;
  910. }
  911. }
  912. if (!ne) {
  913. dev_err(dev, "Reserved loop qp is absent!\n");
  914. goto free_work;
  915. }
  916. do {
  917. ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
  918. if (ret < 0 && hr_qp) {
  919. dev_err(dev,
  920. "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
  921. hr_qp->qpn, ret, hr_mr->key, ne);
  922. goto free_work;
  923. }
  924. ne -= ret;
  925. usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000,
  926. (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000);
  927. } while (ne && time_before_eq(jiffies, end));
  928. if (ne != 0)
  929. dev_err(dev,
  930. "Poll cqe for mr 0x%x free timeout! Remain %d cqe\n",
  931. hr_mr->key, ne);
  932. free_work:
  933. if (mr_work->comp_flag)
  934. complete(mr_work->comp);
  935. kfree(mr_work);
  936. }
  937. static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
  938. struct hns_roce_mr *mr)
  939. {
  940. struct device *dev = &hr_dev->pdev->dev;
  941. struct hns_roce_mr_free_work *mr_work;
  942. struct hns_roce_free_mr *free_mr;
  943. struct hns_roce_v1_priv *priv;
  944. struct completion comp;
  945. unsigned long end =
  946. msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
  947. unsigned long start = jiffies;
  948. int npages;
  949. int ret = 0;
  950. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  951. free_mr = &priv->free_mr;
  952. if (mr->enabled) {
  953. if (hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
  954. & (hr_dev->caps.num_mtpts - 1)))
  955. dev_warn(dev, "HW2SW_MPT failed!\n");
  956. }
  957. mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL);
  958. if (!mr_work) {
  959. ret = -ENOMEM;
  960. goto free_mr;
  961. }
  962. INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn);
  963. mr_work->ib_dev = &(hr_dev->ib_dev);
  964. mr_work->comp = &comp;
  965. mr_work->comp_flag = 1;
  966. mr_work->mr = (void *)mr;
  967. init_completion(mr_work->comp);
  968. queue_work(free_mr->free_mr_wq, &(mr_work->work));
  969. while (time_before_eq(jiffies, end)) {
  970. if (try_wait_for_completion(&comp))
  971. goto free_mr;
  972. msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
  973. }
  974. mr_work->comp_flag = 0;
  975. if (try_wait_for_completion(&comp))
  976. goto free_mr;
  977. dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key);
  978. ret = -ETIMEDOUT;
  979. free_mr:
  980. dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n",
  981. mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start));
  982. if (mr->size != ~0ULL) {
  983. npages = ib_umem_page_count(mr->umem);
  984. dma_free_coherent(dev, npages * 8, mr->pbl_buf,
  985. mr->pbl_dma_addr);
  986. }
  987. hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
  988. key_to_hw_index(mr->key), 0);
  989. if (mr->umem)
  990. ib_umem_release(mr->umem);
  991. kfree(mr);
  992. return ret;
  993. }
  994. static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
  995. {
  996. struct device *dev = &hr_dev->pdev->dev;
  997. struct hns_roce_v1_priv *priv;
  998. struct hns_roce_db_table *db;
  999. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  1000. db = &priv->db_table;
  1001. if (db->sdb_ext_mod) {
  1002. dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
  1003. db->ext_db->sdb_buf_list->buf,
  1004. db->ext_db->sdb_buf_list->map);
  1005. kfree(db->ext_db->sdb_buf_list);
  1006. }
  1007. if (db->odb_ext_mod) {
  1008. dma_free_coherent(dev, HNS_ROCE_V1_EXT_ODB_SIZE,
  1009. db->ext_db->odb_buf_list->buf,
  1010. db->ext_db->odb_buf_list->map);
  1011. kfree(db->ext_db->odb_buf_list);
  1012. }
  1013. kfree(db->ext_db);
  1014. }
  1015. static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
  1016. {
  1017. int ret;
  1018. u32 val;
  1019. __le32 tmp;
  1020. int raq_shift = 0;
  1021. dma_addr_t addr;
  1022. struct hns_roce_v1_priv *priv;
  1023. struct hns_roce_raq_table *raq;
  1024. struct device *dev = &hr_dev->pdev->dev;
  1025. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  1026. raq = &priv->raq_table;
  1027. raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL);
  1028. if (!raq->e_raq_buf)
  1029. return -ENOMEM;
  1030. raq->e_raq_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_RAQ_SIZE,
  1031. &addr, GFP_KERNEL);
  1032. if (!raq->e_raq_buf->buf) {
  1033. ret = -ENOMEM;
  1034. goto err_dma_alloc_raq;
  1035. }
  1036. raq->e_raq_buf->map = addr;
  1037. /* Configure raq extended address. 48bit 4K align*/
  1038. roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12);
  1039. /* Configure raq_shift */
  1040. raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY);
  1041. val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG);
  1042. tmp = cpu_to_le32(val);
  1043. roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M,
  1044. ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift);
  1045. /*
  1046. * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
  1047. * using 4K page, and shift more 32 because of
  1048. * caculating the high 32 bit value evaluated to hardware.
  1049. */
  1050. roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M,
  1051. ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S,
  1052. raq->e_raq_buf->map >> 44);
  1053. val = le32_to_cpu(tmp);
  1054. roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val);
  1055. dev_dbg(dev, "Configure raq_shift 0x%x.\n", val);
  1056. /* Configure raq threshold */
  1057. val = roce_read(hr_dev, ROCEE_RAQ_WL_REG);
  1058. tmp = cpu_to_le32(val);
  1059. roce_set_field(tmp, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M,
  1060. ROCEE_RAQ_WL_ROCEE_RAQ_WL_S,
  1061. HNS_ROCE_V1_EXT_RAQ_WF);
  1062. val = le32_to_cpu(tmp);
  1063. roce_write(hr_dev, ROCEE_RAQ_WL_REG, val);
  1064. dev_dbg(dev, "Configure raq_wl 0x%x.\n", val);
  1065. /* Enable extend raq */
  1066. val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG);
  1067. tmp = cpu_to_le32(val);
  1068. roce_set_field(tmp,
  1069. ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M,
  1070. ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S,
  1071. POL_TIME_INTERVAL_VAL);
  1072. roce_set_bit(tmp, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1);
  1073. roce_set_field(tmp,
  1074. ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M,
  1075. ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S,
  1076. 2);
  1077. roce_set_bit(tmp,
  1078. ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1);
  1079. val = le32_to_cpu(tmp);
  1080. roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val);
  1081. dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val);
  1082. /* Enable raq drop */
  1083. val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
  1084. tmp = cpu_to_le32(val);
  1085. roce_set_bit(tmp, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1);
  1086. val = le32_to_cpu(tmp);
  1087. roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
  1088. dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val);
  1089. return 0;
  1090. err_dma_alloc_raq:
  1091. kfree(raq->e_raq_buf);
  1092. return ret;
  1093. }
  1094. static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
  1095. {
  1096. struct device *dev = &hr_dev->pdev->dev;
  1097. struct hns_roce_v1_priv *priv;
  1098. struct hns_roce_raq_table *raq;
  1099. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  1100. raq = &priv->raq_table;
  1101. dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf,
  1102. raq->e_raq_buf->map);
  1103. kfree(raq->e_raq_buf);
  1104. }
  1105. static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
  1106. {
  1107. __le32 tmp;
  1108. u32 val;
  1109. if (enable_flag) {
  1110. val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
  1111. /* Open all ports */
  1112. tmp = cpu_to_le32(val);
  1113. roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
  1114. ROCEE_GLB_CFG_ROCEE_PORT_ST_S,
  1115. ALL_PORT_VAL_OPEN);
  1116. val = le32_to_cpu(tmp);
  1117. roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
  1118. } else {
  1119. val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
  1120. /* Close all ports */
  1121. tmp = cpu_to_le32(val);
  1122. roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
  1123. ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0);
  1124. val = le32_to_cpu(tmp);
  1125. roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
  1126. }
  1127. }
  1128. static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
  1129. {
  1130. struct device *dev = &hr_dev->pdev->dev;
  1131. struct hns_roce_v1_priv *priv;
  1132. int ret;
  1133. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  1134. priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
  1135. HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
  1136. GFP_KERNEL);
  1137. if (!priv->bt_table.qpc_buf.buf)
  1138. return -ENOMEM;
  1139. priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev,
  1140. HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map,
  1141. GFP_KERNEL);
  1142. if (!priv->bt_table.mtpt_buf.buf) {
  1143. ret = -ENOMEM;
  1144. goto err_failed_alloc_mtpt_buf;
  1145. }
  1146. priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev,
  1147. HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map,
  1148. GFP_KERNEL);
  1149. if (!priv->bt_table.cqc_buf.buf) {
  1150. ret = -ENOMEM;
  1151. goto err_failed_alloc_cqc_buf;
  1152. }
  1153. return 0;
  1154. err_failed_alloc_cqc_buf:
  1155. dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
  1156. priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
  1157. err_failed_alloc_mtpt_buf:
  1158. dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
  1159. priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
  1160. return ret;
  1161. }
  1162. static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
  1163. {
  1164. struct device *dev = &hr_dev->pdev->dev;
  1165. struct hns_roce_v1_priv *priv;
  1166. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  1167. dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
  1168. priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
  1169. dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
  1170. priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
  1171. dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
  1172. priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
  1173. }
  1174. static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
  1175. {
  1176. struct device *dev = &hr_dev->pdev->dev;
  1177. struct hns_roce_buf_list *tptr_buf;
  1178. struct hns_roce_v1_priv *priv;
  1179. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  1180. tptr_buf = &priv->tptr_table.tptr_buf;
  1181. /*
  1182. * This buffer will be used for CQ's tptr(tail pointer), also
  1183. * named ci(customer index). Every CQ will use 2 bytes to save
  1184. * cqe ci in hip06. Hardware will read this area to get new ci
  1185. * when the queue is almost full.
  1186. */
  1187. tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
  1188. &tptr_buf->map, GFP_KERNEL);
  1189. if (!tptr_buf->buf)
  1190. return -ENOMEM;
  1191. hr_dev->tptr_dma_addr = tptr_buf->map;
  1192. hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE;
  1193. return 0;
  1194. }
  1195. static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
  1196. {
  1197. struct device *dev = &hr_dev->pdev->dev;
  1198. struct hns_roce_buf_list *tptr_buf;
  1199. struct hns_roce_v1_priv *priv;
  1200. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  1201. tptr_buf = &priv->tptr_table.tptr_buf;
  1202. dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
  1203. tptr_buf->buf, tptr_buf->map);
  1204. }
  1205. static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
  1206. {
  1207. struct device *dev = &hr_dev->pdev->dev;
  1208. struct hns_roce_free_mr *free_mr;
  1209. struct hns_roce_v1_priv *priv;
  1210. int ret = 0;
  1211. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  1212. free_mr = &priv->free_mr;
  1213. free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
  1214. if (!free_mr->free_mr_wq) {
  1215. dev_err(dev, "Create free mr workqueue failed!\n");
  1216. return -ENOMEM;
  1217. }
  1218. ret = hns_roce_v1_rsv_lp_qp(hr_dev);
  1219. if (ret) {
  1220. dev_err(dev, "Reserved loop qp failed(%d)!\n", ret);
  1221. flush_workqueue(free_mr->free_mr_wq);
  1222. destroy_workqueue(free_mr->free_mr_wq);
  1223. }
  1224. return ret;
  1225. }
  1226. static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
  1227. {
  1228. struct hns_roce_free_mr *free_mr;
  1229. struct hns_roce_v1_priv *priv;
  1230. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  1231. free_mr = &priv->free_mr;
  1232. flush_workqueue(free_mr->free_mr_wq);
  1233. destroy_workqueue(free_mr->free_mr_wq);
  1234. hns_roce_v1_release_lp_qp(hr_dev);
  1235. }
  1236. /**
  1237. * hns_roce_v1_reset - reset RoCE
  1238. * @hr_dev: RoCE device struct pointer
  1239. * @enable: true -- drop reset, false -- reset
  1240. * return 0 - success , negative --fail
  1241. */
  1242. static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
  1243. {
  1244. struct device_node *dsaf_node;
  1245. struct device *dev = &hr_dev->pdev->dev;
  1246. struct device_node *np = dev->of_node;
  1247. struct fwnode_handle *fwnode;
  1248. int ret;
  1249. /* check if this is DT/ACPI case */
  1250. if (dev_of_node(dev)) {
  1251. dsaf_node = of_parse_phandle(np, "dsaf-handle", 0);
  1252. if (!dsaf_node) {
  1253. dev_err(dev, "could not find dsaf-handle\n");
  1254. return -EINVAL;
  1255. }
  1256. fwnode = &dsaf_node->fwnode;
  1257. } else if (is_acpi_device_node(dev->fwnode)) {
  1258. struct fwnode_reference_args args;
  1259. ret = acpi_node_get_property_reference(dev->fwnode,
  1260. "dsaf-handle", 0, &args);
  1261. if (ret) {
  1262. dev_err(dev, "could not find dsaf-handle\n");
  1263. return ret;
  1264. }
  1265. fwnode = args.fwnode;
  1266. } else {
  1267. dev_err(dev, "cannot read data from DT or ACPI\n");
  1268. return -ENXIO;
  1269. }
  1270. ret = hns_dsaf_roce_reset(fwnode, false);
  1271. if (ret)
  1272. return ret;
  1273. if (dereset) {
  1274. msleep(SLEEP_TIME_INTERVAL);
  1275. ret = hns_dsaf_roce_reset(fwnode, true);
  1276. }
  1277. return ret;
  1278. }
  1279. static int hns_roce_des_qp_init(struct hns_roce_dev *hr_dev)
  1280. {
  1281. struct device *dev = &hr_dev->pdev->dev;
  1282. struct hns_roce_v1_priv *priv;
  1283. struct hns_roce_des_qp *des_qp;
  1284. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  1285. des_qp = &priv->des_qp;
  1286. des_qp->requeue_flag = 1;
  1287. des_qp->qp_wq = create_singlethread_workqueue("hns_roce_destroy_qp");
  1288. if (!des_qp->qp_wq) {
  1289. dev_err(dev, "Create destroy qp workqueue failed!\n");
  1290. return -ENOMEM;
  1291. }
  1292. return 0;
  1293. }
  1294. static void hns_roce_des_qp_free(struct hns_roce_dev *hr_dev)
  1295. {
  1296. struct hns_roce_v1_priv *priv;
  1297. struct hns_roce_des_qp *des_qp;
  1298. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  1299. des_qp = &priv->des_qp;
  1300. des_qp->requeue_flag = 0;
  1301. flush_workqueue(des_qp->qp_wq);
  1302. destroy_workqueue(des_qp->qp_wq);
  1303. }
  1304. static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
  1305. {
  1306. int i = 0;
  1307. struct hns_roce_caps *caps = &hr_dev->caps;
  1308. hr_dev->vendor_id = roce_read(hr_dev, ROCEE_VENDOR_ID_REG);
  1309. hr_dev->vendor_part_id = roce_read(hr_dev, ROCEE_VENDOR_PART_ID_REG);
  1310. hr_dev->sys_image_guid = roce_read(hr_dev, ROCEE_SYS_IMAGE_GUID_L_REG) |
  1311. ((u64)roce_read(hr_dev,
  1312. ROCEE_SYS_IMAGE_GUID_H_REG) << 32);
  1313. hr_dev->hw_rev = HNS_ROCE_HW_VER1;
  1314. caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM;
  1315. caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM;
  1316. caps->min_wqes = HNS_ROCE_MIN_WQE_NUM;
  1317. caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM;
  1318. caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
  1319. caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM;
  1320. caps->max_sq_sg = HNS_ROCE_V1_SG_NUM;
  1321. caps->max_rq_sg = HNS_ROCE_V1_SG_NUM;
  1322. caps->max_sq_inline = HNS_ROCE_V1_INLINE_SIZE;
  1323. caps->num_uars = HNS_ROCE_V1_UAR_NUM;
  1324. caps->phy_num_uars = HNS_ROCE_V1_PHY_UAR_NUM;
  1325. caps->num_aeq_vectors = HNS_ROCE_V1_AEQE_VEC_NUM;
  1326. caps->num_comp_vectors = HNS_ROCE_V1_COMP_VEC_NUM;
  1327. caps->num_other_vectors = HNS_ROCE_V1_ABNORMAL_VEC_NUM;
  1328. caps->num_mtpts = HNS_ROCE_V1_MAX_MTPT_NUM;
  1329. caps->num_mtt_segs = HNS_ROCE_V1_MAX_MTT_SEGS;
  1330. caps->num_pds = HNS_ROCE_V1_MAX_PD_NUM;
  1331. caps->max_qp_init_rdma = HNS_ROCE_V1_MAX_QP_INIT_RDMA;
  1332. caps->max_qp_dest_rdma = HNS_ROCE_V1_MAX_QP_DEST_RDMA;
  1333. caps->max_sq_desc_sz = HNS_ROCE_V1_MAX_SQ_DESC_SZ;
  1334. caps->max_rq_desc_sz = HNS_ROCE_V1_MAX_RQ_DESC_SZ;
  1335. caps->qpc_entry_sz = HNS_ROCE_V1_QPC_ENTRY_SIZE;
  1336. caps->irrl_entry_sz = HNS_ROCE_V1_IRRL_ENTRY_SIZE;
  1337. caps->cqc_entry_sz = HNS_ROCE_V1_CQC_ENTRY_SIZE;
  1338. caps->mtpt_entry_sz = HNS_ROCE_V1_MTPT_ENTRY_SIZE;
  1339. caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE;
  1340. caps->cq_entry_sz = HNS_ROCE_V1_CQE_ENTRY_SIZE;
  1341. caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
  1342. caps->reserved_lkey = 0;
  1343. caps->reserved_pds = 0;
  1344. caps->reserved_mrws = 1;
  1345. caps->reserved_uars = 0;
  1346. caps->reserved_cqs = 0;
  1347. caps->chunk_sz = HNS_ROCE_V1_TABLE_CHUNK_SIZE;
  1348. for (i = 0; i < caps->num_ports; i++)
  1349. caps->pkey_table_len[i] = 1;
  1350. for (i = 0; i < caps->num_ports; i++) {
  1351. /* Six ports shared 16 GID in v1 engine */
  1352. if (i >= (HNS_ROCE_V1_GID_NUM % caps->num_ports))
  1353. caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
  1354. caps->num_ports;
  1355. else
  1356. caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
  1357. caps->num_ports + 1;
  1358. }
  1359. caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM;
  1360. caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM;
  1361. caps->local_ca_ack_delay = roce_read(hr_dev, ROCEE_ACK_DELAY_REG);
  1362. caps->max_mtu = IB_MTU_2048;
  1363. return 0;
  1364. }
  1365. static int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
  1366. {
  1367. int ret;
  1368. u32 val;
  1369. __le32 tmp;
  1370. struct device *dev = &hr_dev->pdev->dev;
  1371. /* DMAE user config */
  1372. val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG);
  1373. tmp = cpu_to_le32(val);
  1374. roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M,
  1375. ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf);
  1376. roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M,
  1377. ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S,
  1378. 1 << PAGES_SHIFT_16);
  1379. val = le32_to_cpu(tmp);
  1380. roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val);
  1381. val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG);
  1382. tmp = cpu_to_le32(val);
  1383. roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M,
  1384. ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf);
  1385. roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M,
  1386. ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S,
  1387. 1 << PAGES_SHIFT_16);
  1388. ret = hns_roce_db_init(hr_dev);
  1389. if (ret) {
  1390. dev_err(dev, "doorbell init failed!\n");
  1391. return ret;
  1392. }
  1393. ret = hns_roce_raq_init(hr_dev);
  1394. if (ret) {
  1395. dev_err(dev, "raq init failed!\n");
  1396. goto error_failed_raq_init;
  1397. }
  1398. ret = hns_roce_bt_init(hr_dev);
  1399. if (ret) {
  1400. dev_err(dev, "bt init failed!\n");
  1401. goto error_failed_bt_init;
  1402. }
  1403. ret = hns_roce_tptr_init(hr_dev);
  1404. if (ret) {
  1405. dev_err(dev, "tptr init failed!\n");
  1406. goto error_failed_tptr_init;
  1407. }
  1408. ret = hns_roce_des_qp_init(hr_dev);
  1409. if (ret) {
  1410. dev_err(dev, "des qp init failed!\n");
  1411. goto error_failed_des_qp_init;
  1412. }
  1413. ret = hns_roce_free_mr_init(hr_dev);
  1414. if (ret) {
  1415. dev_err(dev, "free mr init failed!\n");
  1416. goto error_failed_free_mr_init;
  1417. }
  1418. hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
  1419. return 0;
  1420. error_failed_free_mr_init:
  1421. hns_roce_des_qp_free(hr_dev);
  1422. error_failed_des_qp_init:
  1423. hns_roce_tptr_free(hr_dev);
  1424. error_failed_tptr_init:
  1425. hns_roce_bt_free(hr_dev);
  1426. error_failed_bt_init:
  1427. hns_roce_raq_free(hr_dev);
  1428. error_failed_raq_init:
  1429. hns_roce_db_free(hr_dev);
  1430. return ret;
  1431. }
  1432. static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
  1433. {
  1434. hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
  1435. hns_roce_free_mr_free(hr_dev);
  1436. hns_roce_des_qp_free(hr_dev);
  1437. hns_roce_tptr_free(hr_dev);
  1438. hns_roce_bt_free(hr_dev);
  1439. hns_roce_raq_free(hr_dev);
  1440. hns_roce_db_free(hr_dev);
  1441. }
  1442. static int hns_roce_v1_cmd_pending(struct hns_roce_dev *hr_dev)
  1443. {
  1444. u32 status = readl(hr_dev->reg_base + ROCEE_MB6_REG);
  1445. return (!!(status & (1 << HCR_GO_BIT)));
  1446. }
  1447. static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
  1448. u64 out_param, u32 in_modifier, u8 op_modifier,
  1449. u16 op, u16 token, int event)
  1450. {
  1451. u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + ROCEE_MB1_REG);
  1452. unsigned long end;
  1453. u32 val = 0;
  1454. __le32 tmp;
  1455. end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
  1456. while (hns_roce_v1_cmd_pending(hr_dev)) {
  1457. if (time_after(jiffies, end)) {
  1458. dev_err(hr_dev->dev, "jiffies=%d end=%d\n",
  1459. (int)jiffies, (int)end);
  1460. return -EAGAIN;
  1461. }
  1462. cond_resched();
  1463. }
  1464. tmp = cpu_to_le32(val);
  1465. roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
  1466. op);
  1467. roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
  1468. ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
  1469. roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
  1470. roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
  1471. roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_TOKEN_M,
  1472. ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
  1473. val = le32_to_cpu(tmp);
  1474. writeq(in_param, hcr + 0);
  1475. writeq(out_param, hcr + 2);
  1476. writel(in_modifier, hcr + 4);
  1477. /* Memory barrier */
  1478. wmb();
  1479. writel(val, hcr + 5);
  1480. mmiowb();
  1481. return 0;
  1482. }
  1483. static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
  1484. unsigned long timeout)
  1485. {
  1486. u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG;
  1487. unsigned long end = 0;
  1488. u32 status = 0;
  1489. end = msecs_to_jiffies(timeout) + jiffies;
  1490. while (hns_roce_v1_cmd_pending(hr_dev) && time_before(jiffies, end))
  1491. cond_resched();
  1492. if (hns_roce_v1_cmd_pending(hr_dev)) {
  1493. dev_err(hr_dev->dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
  1494. return -ETIMEDOUT;
  1495. }
  1496. status = le32_to_cpu((__force __le32)
  1497. __raw_readl(hcr + HCR_STATUS_OFFSET));
  1498. if ((status & STATUS_MASK) != 0x1) {
  1499. dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status);
  1500. return -EBUSY;
  1501. }
  1502. return 0;
  1503. }
  1504. static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port,
  1505. int gid_index, const union ib_gid *gid,
  1506. const struct ib_gid_attr *attr)
  1507. {
  1508. u32 *p = NULL;
  1509. u8 gid_idx = 0;
  1510. gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
  1511. p = (u32 *)&gid->raw[0];
  1512. roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG +
  1513. (HNS_ROCE_V1_GID_NUM * gid_idx));
  1514. p = (u32 *)&gid->raw[4];
  1515. roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_ML_0_REG +
  1516. (HNS_ROCE_V1_GID_NUM * gid_idx));
  1517. p = (u32 *)&gid->raw[8];
  1518. roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_MH_0_REG +
  1519. (HNS_ROCE_V1_GID_NUM * gid_idx));
  1520. p = (u32 *)&gid->raw[0xc];
  1521. roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG +
  1522. (HNS_ROCE_V1_GID_NUM * gid_idx));
  1523. return 0;
  1524. }
  1525. static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
  1526. u8 *addr)
  1527. {
  1528. u32 reg_smac_l;
  1529. u16 reg_smac_h;
  1530. __le32 tmp;
  1531. u16 *p_h;
  1532. u32 *p;
  1533. u32 val;
  1534. /*
  1535. * When mac changed, loopback may fail
  1536. * because of smac not equal to dmac.
  1537. * We Need to release and create reserved qp again.
  1538. */
  1539. if (hr_dev->hw->dereg_mr) {
  1540. int ret;
  1541. ret = hns_roce_v1_recreate_lp_qp(hr_dev);
  1542. if (ret && ret != -ETIMEDOUT)
  1543. return ret;
  1544. }
  1545. p = (u32 *)(&addr[0]);
  1546. reg_smac_l = *p;
  1547. roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG +
  1548. PHY_PORT_OFFSET * phy_port);
  1549. val = roce_read(hr_dev,
  1550. ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
  1551. tmp = cpu_to_le32(val);
  1552. p_h = (u16 *)(&addr[4]);
  1553. reg_smac_h = *p_h;
  1554. roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_SMAC_H_M,
  1555. ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h);
  1556. val = le32_to_cpu(tmp);
  1557. roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
  1558. val);
  1559. return 0;
  1560. }
  1561. static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
  1562. enum ib_mtu mtu)
  1563. {
  1564. __le32 tmp;
  1565. u32 val;
  1566. val = roce_read(hr_dev,
  1567. ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
  1568. tmp = cpu_to_le32(val);
  1569. roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_PORT_MTU_M,
  1570. ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu);
  1571. val = le32_to_cpu(tmp);
  1572. roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
  1573. val);
  1574. }
  1575. static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
  1576. unsigned long mtpt_idx)
  1577. {
  1578. struct hns_roce_v1_mpt_entry *mpt_entry;
  1579. struct scatterlist *sg;
  1580. u64 *pages;
  1581. int entry;
  1582. int i;
  1583. /* MPT filled into mailbox buf */
  1584. mpt_entry = (struct hns_roce_v1_mpt_entry *)mb_buf;
  1585. memset(mpt_entry, 0, sizeof(*mpt_entry));
  1586. roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_STATE_M,
  1587. MPT_BYTE_4_KEY_STATE_S, KEY_VALID);
  1588. roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_M,
  1589. MPT_BYTE_4_KEY_S, mr->key);
  1590. roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_PAGE_SIZE_M,
  1591. MPT_BYTE_4_PAGE_SIZE_S, MR_SIZE_4K);
  1592. roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_TYPE_S, 0);
  1593. roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_BIND_ENABLE_S,
  1594. (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
  1595. roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_OWN_S, 0);
  1596. roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_MEMORY_LOCATION_TYPE_M,
  1597. MPT_BYTE_4_MEMORY_LOCATION_TYPE_S, mr->type);
  1598. roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_ATOMIC_S, 0);
  1599. roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_LOCAL_WRITE_S,
  1600. (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
  1601. roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_WRITE_S,
  1602. (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
  1603. roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_READ_S,
  1604. (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
  1605. roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_INVAL_ENABLE_S,
  1606. 0);
  1607. roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_ADDRESS_TYPE_S, 0);
  1608. roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
  1609. MPT_BYTE_12_PBL_ADDR_H_S, 0);
  1610. roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M,
  1611. MPT_BYTE_12_MW_BIND_COUNTER_S, 0);
  1612. mpt_entry->virt_addr_l = cpu_to_le32((u32)mr->iova);
  1613. mpt_entry->virt_addr_h = cpu_to_le32((u32)(mr->iova >> 32));
  1614. mpt_entry->length = cpu_to_le32((u32)mr->size);
  1615. roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M,
  1616. MPT_BYTE_28_PD_S, mr->pd);
  1617. roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_L_KEY_IDX_L_M,
  1618. MPT_BYTE_28_L_KEY_IDX_L_S, mtpt_idx);
  1619. roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M,
  1620. MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT);
  1621. /* DMA memory register */
  1622. if (mr->type == MR_TYPE_DMA)
  1623. return 0;
  1624. pages = (u64 *) __get_free_page(GFP_KERNEL);
  1625. if (!pages)
  1626. return -ENOMEM;
  1627. i = 0;
  1628. for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
  1629. pages[i] = ((u64)sg_dma_address(sg)) >> 12;
  1630. /* Directly record to MTPT table firstly 7 entry */
  1631. if (i >= HNS_ROCE_MAX_INNER_MTPT_NUM)
  1632. break;
  1633. i++;
  1634. }
  1635. /* Register user mr */
  1636. for (i = 0; i < HNS_ROCE_MAX_INNER_MTPT_NUM; i++) {
  1637. switch (i) {
  1638. case 0:
  1639. mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i]));
  1640. roce_set_field(mpt_entry->mpt_byte_36,
  1641. MPT_BYTE_36_PA0_H_M,
  1642. MPT_BYTE_36_PA0_H_S,
  1643. (u32)(pages[i] >> PAGES_SHIFT_32));
  1644. break;
  1645. case 1:
  1646. roce_set_field(mpt_entry->mpt_byte_36,
  1647. MPT_BYTE_36_PA1_L_M,
  1648. MPT_BYTE_36_PA1_L_S, (u32)(pages[i]));
  1649. roce_set_field(mpt_entry->mpt_byte_40,
  1650. MPT_BYTE_40_PA1_H_M,
  1651. MPT_BYTE_40_PA1_H_S,
  1652. (u32)(pages[i] >> PAGES_SHIFT_24));
  1653. break;
  1654. case 2:
  1655. roce_set_field(mpt_entry->mpt_byte_40,
  1656. MPT_BYTE_40_PA2_L_M,
  1657. MPT_BYTE_40_PA2_L_S, (u32)(pages[i]));
  1658. roce_set_field(mpt_entry->mpt_byte_44,
  1659. MPT_BYTE_44_PA2_H_M,
  1660. MPT_BYTE_44_PA2_H_S,
  1661. (u32)(pages[i] >> PAGES_SHIFT_16));
  1662. break;
  1663. case 3:
  1664. roce_set_field(mpt_entry->mpt_byte_44,
  1665. MPT_BYTE_44_PA3_L_M,
  1666. MPT_BYTE_44_PA3_L_S, (u32)(pages[i]));
  1667. roce_set_field(mpt_entry->mpt_byte_48,
  1668. MPT_BYTE_48_PA3_H_M,
  1669. MPT_BYTE_48_PA3_H_S,
  1670. (u32)(pages[i] >> PAGES_SHIFT_8));
  1671. break;
  1672. case 4:
  1673. mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i]));
  1674. roce_set_field(mpt_entry->mpt_byte_56,
  1675. MPT_BYTE_56_PA4_H_M,
  1676. MPT_BYTE_56_PA4_H_S,
  1677. (u32)(pages[i] >> PAGES_SHIFT_32));
  1678. break;
  1679. case 5:
  1680. roce_set_field(mpt_entry->mpt_byte_56,
  1681. MPT_BYTE_56_PA5_L_M,
  1682. MPT_BYTE_56_PA5_L_S, (u32)(pages[i]));
  1683. roce_set_field(mpt_entry->mpt_byte_60,
  1684. MPT_BYTE_60_PA5_H_M,
  1685. MPT_BYTE_60_PA5_H_S,
  1686. (u32)(pages[i] >> PAGES_SHIFT_24));
  1687. break;
  1688. case 6:
  1689. roce_set_field(mpt_entry->mpt_byte_60,
  1690. MPT_BYTE_60_PA6_L_M,
  1691. MPT_BYTE_60_PA6_L_S, (u32)(pages[i]));
  1692. roce_set_field(mpt_entry->mpt_byte_64,
  1693. MPT_BYTE_64_PA6_H_M,
  1694. MPT_BYTE_64_PA6_H_S,
  1695. (u32)(pages[i] >> PAGES_SHIFT_16));
  1696. break;
  1697. default:
  1698. break;
  1699. }
  1700. }
  1701. free_page((unsigned long) pages);
  1702. mpt_entry->pbl_addr_l = cpu_to_le32((u32)(mr->pbl_dma_addr));
  1703. roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
  1704. MPT_BYTE_12_PBL_ADDR_H_S,
  1705. ((u32)(mr->pbl_dma_addr >> 32)));
  1706. return 0;
  1707. }
  1708. static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
  1709. {
  1710. return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
  1711. n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
  1712. }
  1713. static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
  1714. {
  1715. struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe);
  1716. /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
  1717. return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^
  1718. !!(n & (hr_cq->ib_cq.cqe + 1))) ? hr_cqe : NULL;
  1719. }
  1720. static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
  1721. {
  1722. return get_sw_cqe(hr_cq, hr_cq->cons_index);
  1723. }
  1724. static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
  1725. {
  1726. __le32 doorbell[2];
  1727. doorbell[0] = cpu_to_le32(cons_index & ((hr_cq->cq_depth << 1) - 1));
  1728. doorbell[1] = 0;
  1729. roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
  1730. roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
  1731. ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
  1732. roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
  1733. ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 0);
  1734. roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
  1735. ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn);
  1736. hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
  1737. }
  1738. static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
  1739. struct hns_roce_srq *srq)
  1740. {
  1741. struct hns_roce_cqe *cqe, *dest;
  1742. u32 prod_index;
  1743. int nfreed = 0;
  1744. u8 owner_bit;
  1745. for (prod_index = hr_cq->cons_index; get_sw_cqe(hr_cq, prod_index);
  1746. ++prod_index) {
  1747. if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
  1748. break;
  1749. }
  1750. /*
  1751. * Now backwards through the CQ, removing CQ entries
  1752. * that match our QP by overwriting them with next entries.
  1753. */
  1754. while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
  1755. cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe);
  1756. if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
  1757. CQE_BYTE_16_LOCAL_QPN_S) &
  1758. HNS_ROCE_CQE_QPN_MASK) == qpn) {
  1759. /* In v1 engine, not support SRQ */
  1760. ++nfreed;
  1761. } else if (nfreed) {
  1762. dest = get_cqe(hr_cq, (prod_index + nfreed) &
  1763. hr_cq->ib_cq.cqe);
  1764. owner_bit = roce_get_bit(dest->cqe_byte_4,
  1765. CQE_BYTE_4_OWNER_S);
  1766. memcpy(dest, cqe, sizeof(*cqe));
  1767. roce_set_bit(dest->cqe_byte_4, CQE_BYTE_4_OWNER_S,
  1768. owner_bit);
  1769. }
  1770. }
  1771. if (nfreed) {
  1772. hr_cq->cons_index += nfreed;
  1773. /*
  1774. * Make sure update of buffer contents is done before
  1775. * updating consumer index.
  1776. */
  1777. wmb();
  1778. hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
  1779. }
  1780. }
  1781. static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
  1782. struct hns_roce_srq *srq)
  1783. {
  1784. spin_lock_irq(&hr_cq->lock);
  1785. __hns_roce_v1_cq_clean(hr_cq, qpn, srq);
  1786. spin_unlock_irq(&hr_cq->lock);
  1787. }
  1788. static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
  1789. struct hns_roce_cq *hr_cq, void *mb_buf,
  1790. u64 *mtts, dma_addr_t dma_handle, int nent,
  1791. u32 vector)
  1792. {
  1793. struct hns_roce_cq_context *cq_context = NULL;
  1794. struct hns_roce_buf_list *tptr_buf;
  1795. struct hns_roce_v1_priv *priv;
  1796. dma_addr_t tptr_dma_addr;
  1797. int offset;
  1798. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  1799. tptr_buf = &priv->tptr_table.tptr_buf;
  1800. cq_context = mb_buf;
  1801. memset(cq_context, 0, sizeof(*cq_context));
  1802. /* Get the tptr for this CQ. */
  1803. offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE;
  1804. tptr_dma_addr = tptr_buf->map + offset;
  1805. hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset);
  1806. /* Register cq_context members */
  1807. roce_set_field(cq_context->cqc_byte_4,
  1808. CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M,
  1809. CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID);
  1810. roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M,
  1811. CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn);
  1812. cq_context->cq_bt_l = cpu_to_le32((u32)dma_handle);
  1813. roce_set_field(cq_context->cqc_byte_12,
  1814. CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M,
  1815. CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S,
  1816. ((u64)dma_handle >> 32));
  1817. roce_set_field(cq_context->cqc_byte_12,
  1818. CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M,
  1819. CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S,
  1820. ilog2((unsigned int)nent));
  1821. roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
  1822. CQ_CONTEXT_CQC_BYTE_12_CEQN_S, vector);
  1823. cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0]));
  1824. roce_set_field(cq_context->cqc_byte_20,
  1825. CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M,
  1826. CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S, (mtts[0]) >> 32);
  1827. /* Dedicated hardware, directly set 0 */
  1828. roce_set_field(cq_context->cqc_byte_20,
  1829. CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M,
  1830. CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S, 0);
  1831. /**
  1832. * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
  1833. * using 4K page, and shift more 32 because of
  1834. * caculating the high 32 bit value evaluated to hardware.
  1835. */
  1836. roce_set_field(cq_context->cqc_byte_20,
  1837. CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
  1838. CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S,
  1839. tptr_dma_addr >> 44);
  1840. cq_context->cqe_tptr_addr_l = cpu_to_le32((u32)(tptr_dma_addr >> 12));
  1841. roce_set_field(cq_context->cqc_byte_32,
  1842. CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M,
  1843. CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S, 0);
  1844. roce_set_bit(cq_context->cqc_byte_32,
  1845. CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S, 0);
  1846. roce_set_bit(cq_context->cqc_byte_32,
  1847. CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S, 0);
  1848. roce_set_bit(cq_context->cqc_byte_32,
  1849. CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S, 0);
  1850. roce_set_bit(cq_context->cqc_byte_32,
  1851. CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S,
  1852. 0);
  1853. /* The initial value of cq's ci is 0 */
  1854. roce_set_field(cq_context->cqc_byte_32,
  1855. CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M,
  1856. CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
  1857. }
  1858. static int hns_roce_v1_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
  1859. {
  1860. return -EOPNOTSUPP;
  1861. }
  1862. static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
  1863. enum ib_cq_notify_flags flags)
  1864. {
  1865. struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
  1866. u32 notification_flag;
  1867. __le32 doorbell[2];
  1868. notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
  1869. IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
  1870. /*
  1871. * flags = 0; Notification Flag = 1, next
  1872. * flags = 1; Notification Flag = 0, solocited
  1873. */
  1874. doorbell[0] =
  1875. cpu_to_le32(hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
  1876. roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
  1877. roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
  1878. ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
  1879. roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
  1880. ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 1);
  1881. roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
  1882. ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S,
  1883. hr_cq->cqn | notification_flag);
  1884. hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
  1885. return 0;
  1886. }
  1887. static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
  1888. struct hns_roce_qp **cur_qp, struct ib_wc *wc)
  1889. {
  1890. int qpn;
  1891. int is_send;
  1892. u16 wqe_ctr;
  1893. u32 status;
  1894. u32 opcode;
  1895. struct hns_roce_cqe *cqe;
  1896. struct hns_roce_qp *hr_qp;
  1897. struct hns_roce_wq *wq;
  1898. struct hns_roce_wqe_ctrl_seg *sq_wqe;
  1899. struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
  1900. struct device *dev = &hr_dev->pdev->dev;
  1901. /* Find cqe according consumer index */
  1902. cqe = next_cqe_sw(hr_cq);
  1903. if (!cqe)
  1904. return -EAGAIN;
  1905. ++hr_cq->cons_index;
  1906. /* Memory barrier */
  1907. rmb();
  1908. /* 0->SQ, 1->RQ */
  1909. is_send = !(roce_get_bit(cqe->cqe_byte_4, CQE_BYTE_4_SQ_RQ_FLAG_S));
  1910. /* Local_qpn in UD cqe is always 1, so it needs to compute new qpn */
  1911. if (roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
  1912. CQE_BYTE_16_LOCAL_QPN_S) <= 1) {
  1913. qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M,
  1914. CQE_BYTE_20_PORT_NUM_S) +
  1915. roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
  1916. CQE_BYTE_16_LOCAL_QPN_S) *
  1917. HNS_ROCE_MAX_PORTS;
  1918. } else {
  1919. qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
  1920. CQE_BYTE_16_LOCAL_QPN_S);
  1921. }
  1922. if (!*cur_qp || (qpn & HNS_ROCE_CQE_QPN_MASK) != (*cur_qp)->qpn) {
  1923. hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
  1924. if (unlikely(!hr_qp)) {
  1925. dev_err(dev, "CQ %06lx with entry for unknown QPN %06x\n",
  1926. hr_cq->cqn, (qpn & HNS_ROCE_CQE_QPN_MASK));
  1927. return -EINVAL;
  1928. }
  1929. *cur_qp = hr_qp;
  1930. }
  1931. wc->qp = &(*cur_qp)->ibqp;
  1932. wc->vendor_err = 0;
  1933. status = roce_get_field(cqe->cqe_byte_4,
  1934. CQE_BYTE_4_STATUS_OF_THE_OPERATION_M,
  1935. CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) &
  1936. HNS_ROCE_CQE_STATUS_MASK;
  1937. switch (status) {
  1938. case HNS_ROCE_CQE_SUCCESS:
  1939. wc->status = IB_WC_SUCCESS;
  1940. break;
  1941. case HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR:
  1942. wc->status = IB_WC_LOC_LEN_ERR;
  1943. break;
  1944. case HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR:
  1945. wc->status = IB_WC_LOC_QP_OP_ERR;
  1946. break;
  1947. case HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR:
  1948. wc->status = IB_WC_LOC_PROT_ERR;
  1949. break;
  1950. case HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR:
  1951. wc->status = IB_WC_WR_FLUSH_ERR;
  1952. break;
  1953. case HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR:
  1954. wc->status = IB_WC_MW_BIND_ERR;
  1955. break;
  1956. case HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR:
  1957. wc->status = IB_WC_BAD_RESP_ERR;
  1958. break;
  1959. case HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR:
  1960. wc->status = IB_WC_LOC_ACCESS_ERR;
  1961. break;
  1962. case HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
  1963. wc->status = IB_WC_REM_INV_REQ_ERR;
  1964. break;
  1965. case HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR:
  1966. wc->status = IB_WC_REM_ACCESS_ERR;
  1967. break;
  1968. case HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR:
  1969. wc->status = IB_WC_REM_OP_ERR;
  1970. break;
  1971. case HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
  1972. wc->status = IB_WC_RETRY_EXC_ERR;
  1973. break;
  1974. case HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
  1975. wc->status = IB_WC_RNR_RETRY_EXC_ERR;
  1976. break;
  1977. default:
  1978. wc->status = IB_WC_GENERAL_ERR;
  1979. break;
  1980. }
  1981. /* CQE status error, directly return */
  1982. if (wc->status != IB_WC_SUCCESS)
  1983. return 0;
  1984. if (is_send) {
  1985. /* SQ conrespond to CQE */
  1986. sq_wqe = get_send_wqe(*cur_qp, roce_get_field(cqe->cqe_byte_4,
  1987. CQE_BYTE_4_WQE_INDEX_M,
  1988. CQE_BYTE_4_WQE_INDEX_S)&
  1989. ((*cur_qp)->sq.wqe_cnt-1));
  1990. switch (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_OPCODE_MASK) {
  1991. case HNS_ROCE_WQE_OPCODE_SEND:
  1992. wc->opcode = IB_WC_SEND;
  1993. break;
  1994. case HNS_ROCE_WQE_OPCODE_RDMA_READ:
  1995. wc->opcode = IB_WC_RDMA_READ;
  1996. wc->byte_len = le32_to_cpu(cqe->byte_cnt);
  1997. break;
  1998. case HNS_ROCE_WQE_OPCODE_RDMA_WRITE:
  1999. wc->opcode = IB_WC_RDMA_WRITE;
  2000. break;
  2001. case HNS_ROCE_WQE_OPCODE_LOCAL_INV:
  2002. wc->opcode = IB_WC_LOCAL_INV;
  2003. break;
  2004. case HNS_ROCE_WQE_OPCODE_UD_SEND:
  2005. wc->opcode = IB_WC_SEND;
  2006. break;
  2007. default:
  2008. wc->status = IB_WC_GENERAL_ERR;
  2009. break;
  2010. }
  2011. wc->wc_flags = (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_IMM ?
  2012. IB_WC_WITH_IMM : 0);
  2013. wq = &(*cur_qp)->sq;
  2014. if ((*cur_qp)->sq_signal_bits) {
  2015. /*
  2016. * If sg_signal_bit is 1,
  2017. * firstly tail pointer updated to wqe
  2018. * which current cqe correspond to
  2019. */
  2020. wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4,
  2021. CQE_BYTE_4_WQE_INDEX_M,
  2022. CQE_BYTE_4_WQE_INDEX_S);
  2023. wq->tail += (wqe_ctr - (u16)wq->tail) &
  2024. (wq->wqe_cnt - 1);
  2025. }
  2026. wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
  2027. ++wq->tail;
  2028. } else {
  2029. /* RQ conrespond to CQE */
  2030. wc->byte_len = le32_to_cpu(cqe->byte_cnt);
  2031. opcode = roce_get_field(cqe->cqe_byte_4,
  2032. CQE_BYTE_4_OPERATION_TYPE_M,
  2033. CQE_BYTE_4_OPERATION_TYPE_S) &
  2034. HNS_ROCE_CQE_OPCODE_MASK;
  2035. switch (opcode) {
  2036. case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE:
  2037. wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
  2038. wc->wc_flags = IB_WC_WITH_IMM;
  2039. wc->ex.imm_data =
  2040. cpu_to_be32(le32_to_cpu(cqe->immediate_data));
  2041. break;
  2042. case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE:
  2043. if (roce_get_bit(cqe->cqe_byte_4,
  2044. CQE_BYTE_4_IMM_INDICATOR_S)) {
  2045. wc->opcode = IB_WC_RECV;
  2046. wc->wc_flags = IB_WC_WITH_IMM;
  2047. wc->ex.imm_data = cpu_to_be32(
  2048. le32_to_cpu(cqe->immediate_data));
  2049. } else {
  2050. wc->opcode = IB_WC_RECV;
  2051. wc->wc_flags = 0;
  2052. }
  2053. break;
  2054. default:
  2055. wc->status = IB_WC_GENERAL_ERR;
  2056. break;
  2057. }
  2058. /* Update tail pointer, record wr_id */
  2059. wq = &(*cur_qp)->rq;
  2060. wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
  2061. ++wq->tail;
  2062. wc->sl = (u8)roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_SL_M,
  2063. CQE_BYTE_20_SL_S);
  2064. wc->src_qp = (u8)roce_get_field(cqe->cqe_byte_20,
  2065. CQE_BYTE_20_REMOTE_QPN_M,
  2066. CQE_BYTE_20_REMOTE_QPN_S);
  2067. wc->wc_flags |= (roce_get_bit(cqe->cqe_byte_20,
  2068. CQE_BYTE_20_GRH_PRESENT_S) ?
  2069. IB_WC_GRH : 0);
  2070. wc->pkey_index = (u16)roce_get_field(cqe->cqe_byte_28,
  2071. CQE_BYTE_28_P_KEY_IDX_M,
  2072. CQE_BYTE_28_P_KEY_IDX_S);
  2073. }
  2074. return 0;
  2075. }
  2076. int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
  2077. {
  2078. struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
  2079. struct hns_roce_qp *cur_qp = NULL;
  2080. unsigned long flags;
  2081. int npolled;
  2082. int ret = 0;
  2083. spin_lock_irqsave(&hr_cq->lock, flags);
  2084. for (npolled = 0; npolled < num_entries; ++npolled) {
  2085. ret = hns_roce_v1_poll_one(hr_cq, &cur_qp, wc + npolled);
  2086. if (ret)
  2087. break;
  2088. }
  2089. if (npolled) {
  2090. *hr_cq->tptr_addr = hr_cq->cons_index &
  2091. ((hr_cq->cq_depth << 1) - 1);
  2092. /* Memroy barrier */
  2093. wmb();
  2094. hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
  2095. }
  2096. spin_unlock_irqrestore(&hr_cq->lock, flags);
  2097. if (ret == 0 || ret == -EAGAIN)
  2098. return npolled;
  2099. else
  2100. return ret;
  2101. }
  2102. static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
  2103. struct hns_roce_hem_table *table, int obj,
  2104. int step_idx)
  2105. {
  2106. struct device *dev = &hr_dev->pdev->dev;
  2107. struct hns_roce_v1_priv *priv;
  2108. unsigned long end = 0, flags = 0;
  2109. __le32 bt_cmd_val[2] = {0};
  2110. void __iomem *bt_cmd;
  2111. u64 bt_ba = 0;
  2112. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  2113. switch (table->type) {
  2114. case HEM_TYPE_QPC:
  2115. roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
  2116. ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
  2117. bt_ba = priv->bt_table.qpc_buf.map >> 12;
  2118. break;
  2119. case HEM_TYPE_MTPT:
  2120. roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
  2121. ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_MTPT);
  2122. bt_ba = priv->bt_table.mtpt_buf.map >> 12;
  2123. break;
  2124. case HEM_TYPE_CQC:
  2125. roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
  2126. ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
  2127. bt_ba = priv->bt_table.cqc_buf.map >> 12;
  2128. break;
  2129. case HEM_TYPE_SRQC:
  2130. dev_dbg(dev, "HEM_TYPE_SRQC not support.\n");
  2131. return -EINVAL;
  2132. default:
  2133. return 0;
  2134. }
  2135. roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
  2136. ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
  2137. roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
  2138. roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
  2139. spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
  2140. bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
  2141. end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
  2142. while (1) {
  2143. if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
  2144. if (!(time_before(jiffies, end))) {
  2145. dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
  2146. spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
  2147. flags);
  2148. return -EBUSY;
  2149. }
  2150. } else {
  2151. break;
  2152. }
  2153. msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
  2154. }
  2155. bt_cmd_val[0] = (__le32)bt_ba;
  2156. roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
  2157. ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
  2158. hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
  2159. spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
  2160. return 0;
  2161. }
  2162. static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
  2163. struct hns_roce_mtt *mtt,
  2164. enum hns_roce_qp_state cur_state,
  2165. enum hns_roce_qp_state new_state,
  2166. struct hns_roce_qp_context *context,
  2167. struct hns_roce_qp *hr_qp)
  2168. {
  2169. static const u16
  2170. op[HNS_ROCE_QP_NUM_STATE][HNS_ROCE_QP_NUM_STATE] = {
  2171. [HNS_ROCE_QP_STATE_RST] = {
  2172. [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
  2173. [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
  2174. [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
  2175. },
  2176. [HNS_ROCE_QP_STATE_INIT] = {
  2177. [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
  2178. [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
  2179. /* Note: In v1 engine, HW doesn't support RST2INIT.
  2180. * We use RST2INIT cmd instead of INIT2INIT.
  2181. */
  2182. [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
  2183. [HNS_ROCE_QP_STATE_RTR] = HNS_ROCE_CMD_INIT2RTR_QP,
  2184. },
  2185. [HNS_ROCE_QP_STATE_RTR] = {
  2186. [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
  2187. [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
  2188. [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTR2RTS_QP,
  2189. },
  2190. [HNS_ROCE_QP_STATE_RTS] = {
  2191. [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
  2192. [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
  2193. [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTS2RTS_QP,
  2194. [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_RTS2SQD_QP,
  2195. },
  2196. [HNS_ROCE_QP_STATE_SQD] = {
  2197. [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
  2198. [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
  2199. [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_SQD2RTS_QP,
  2200. [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_SQD2SQD_QP,
  2201. },
  2202. [HNS_ROCE_QP_STATE_ERR] = {
  2203. [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
  2204. [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
  2205. }
  2206. };
  2207. struct hns_roce_cmd_mailbox *mailbox;
  2208. struct device *dev = &hr_dev->pdev->dev;
  2209. int ret = 0;
  2210. if (cur_state >= HNS_ROCE_QP_NUM_STATE ||
  2211. new_state >= HNS_ROCE_QP_NUM_STATE ||
  2212. !op[cur_state][new_state]) {
  2213. dev_err(dev, "[modify_qp]not support state %d to %d\n",
  2214. cur_state, new_state);
  2215. return -EINVAL;
  2216. }
  2217. if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP)
  2218. return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
  2219. HNS_ROCE_CMD_2RST_QP,
  2220. HNS_ROCE_CMD_TIMEOUT_MSECS);
  2221. if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP)
  2222. return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
  2223. HNS_ROCE_CMD_2ERR_QP,
  2224. HNS_ROCE_CMD_TIMEOUT_MSECS);
  2225. mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
  2226. if (IS_ERR(mailbox))
  2227. return PTR_ERR(mailbox);
  2228. memcpy(mailbox->buf, context, sizeof(*context));
  2229. ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
  2230. op[cur_state][new_state],
  2231. HNS_ROCE_CMD_TIMEOUT_MSECS);
  2232. hns_roce_free_cmd_mailbox(hr_dev, mailbox);
  2233. return ret;
  2234. }
  2235. static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
  2236. int attr_mask, enum ib_qp_state cur_state,
  2237. enum ib_qp_state new_state)
  2238. {
  2239. struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
  2240. struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
  2241. struct hns_roce_sqp_context *context;
  2242. struct device *dev = &hr_dev->pdev->dev;
  2243. dma_addr_t dma_handle = 0;
  2244. u32 __iomem *addr;
  2245. int rq_pa_start;
  2246. __le32 tmp;
  2247. u32 reg_val;
  2248. u64 *mtts;
  2249. context = kzalloc(sizeof(*context), GFP_KERNEL);
  2250. if (!context)
  2251. return -ENOMEM;
  2252. /* Search QP buf's MTTs */
  2253. mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
  2254. hr_qp->mtt.first_seg, &dma_handle);
  2255. if (!mtts) {
  2256. dev_err(dev, "qp buf pa find failed\n");
  2257. goto out;
  2258. }
  2259. if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
  2260. roce_set_field(context->qp1c_bytes_4,
  2261. QP1C_BYTES_4_SQ_WQE_SHIFT_M,
  2262. QP1C_BYTES_4_SQ_WQE_SHIFT_S,
  2263. ilog2((unsigned int)hr_qp->sq.wqe_cnt));
  2264. roce_set_field(context->qp1c_bytes_4,
  2265. QP1C_BYTES_4_RQ_WQE_SHIFT_M,
  2266. QP1C_BYTES_4_RQ_WQE_SHIFT_S,
  2267. ilog2((unsigned int)hr_qp->rq.wqe_cnt));
  2268. roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M,
  2269. QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn);
  2270. context->sq_rq_bt_l = cpu_to_le32((u32)(dma_handle));
  2271. roce_set_field(context->qp1c_bytes_12,
  2272. QP1C_BYTES_12_SQ_RQ_BT_H_M,
  2273. QP1C_BYTES_12_SQ_RQ_BT_H_S,
  2274. ((u32)(dma_handle >> 32)));
  2275. roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
  2276. QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
  2277. roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M,
  2278. QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
  2279. roce_set_bit(context->qp1c_bytes_16,
  2280. QP1C_BYTES_16_SIGNALING_TYPE_S,
  2281. le32_to_cpu(hr_qp->sq_signal_bits));
  2282. roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
  2283. 1);
  2284. roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
  2285. 1);
  2286. roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_QP1_ERR_S,
  2287. 0);
  2288. roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_SQ_HEAD_M,
  2289. QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head);
  2290. roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M,
  2291. QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index);
  2292. rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
  2293. context->cur_rq_wqe_ba_l =
  2294. cpu_to_le32((u32)(mtts[rq_pa_start]));
  2295. roce_set_field(context->qp1c_bytes_28,
  2296. QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M,
  2297. QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S,
  2298. (mtts[rq_pa_start]) >> 32);
  2299. roce_set_field(context->qp1c_bytes_28,
  2300. QP1C_BYTES_28_RQ_CUR_IDX_M,
  2301. QP1C_BYTES_28_RQ_CUR_IDX_S, 0);
  2302. roce_set_field(context->qp1c_bytes_32,
  2303. QP1C_BYTES_32_RX_CQ_NUM_M,
  2304. QP1C_BYTES_32_RX_CQ_NUM_S,
  2305. to_hr_cq(ibqp->recv_cq)->cqn);
  2306. roce_set_field(context->qp1c_bytes_32,
  2307. QP1C_BYTES_32_TX_CQ_NUM_M,
  2308. QP1C_BYTES_32_TX_CQ_NUM_S,
  2309. to_hr_cq(ibqp->send_cq)->cqn);
  2310. context->cur_sq_wqe_ba_l = cpu_to_le32((u32)mtts[0]);
  2311. roce_set_field(context->qp1c_bytes_40,
  2312. QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M,
  2313. QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S,
  2314. (mtts[0]) >> 32);
  2315. roce_set_field(context->qp1c_bytes_40,
  2316. QP1C_BYTES_40_SQ_CUR_IDX_M,
  2317. QP1C_BYTES_40_SQ_CUR_IDX_S, 0);
  2318. /* Copy context to QP1C register */
  2319. addr = (u32 __iomem *)(hr_dev->reg_base +
  2320. ROCEE_QP1C_CFG0_0_REG +
  2321. hr_qp->phy_port * sizeof(*context));
  2322. writel(le32_to_cpu(context->qp1c_bytes_4), addr);
  2323. writel(le32_to_cpu(context->sq_rq_bt_l), addr + 1);
  2324. writel(le32_to_cpu(context->qp1c_bytes_12), addr + 2);
  2325. writel(le32_to_cpu(context->qp1c_bytes_16), addr + 3);
  2326. writel(le32_to_cpu(context->qp1c_bytes_20), addr + 4);
  2327. writel(le32_to_cpu(context->cur_rq_wqe_ba_l), addr + 5);
  2328. writel(le32_to_cpu(context->qp1c_bytes_28), addr + 6);
  2329. writel(le32_to_cpu(context->qp1c_bytes_32), addr + 7);
  2330. writel(le32_to_cpu(context->cur_sq_wqe_ba_l), addr + 8);
  2331. writel(le32_to_cpu(context->qp1c_bytes_40), addr + 9);
  2332. }
  2333. /* Modify QP1C status */
  2334. reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG +
  2335. hr_qp->phy_port * sizeof(*context));
  2336. tmp = cpu_to_le32(reg_val);
  2337. roce_set_field(tmp, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
  2338. ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state);
  2339. reg_val = le32_to_cpu(tmp);
  2340. roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG +
  2341. hr_qp->phy_port * sizeof(*context), reg_val);
  2342. hr_qp->state = new_state;
  2343. if (new_state == IB_QPS_RESET) {
  2344. hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
  2345. ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
  2346. if (ibqp->send_cq != ibqp->recv_cq)
  2347. hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
  2348. hr_qp->qpn, NULL);
  2349. hr_qp->rq.head = 0;
  2350. hr_qp->rq.tail = 0;
  2351. hr_qp->sq.head = 0;
  2352. hr_qp->sq.tail = 0;
  2353. hr_qp->sq_next_wqe = 0;
  2354. }
  2355. kfree(context);
  2356. return 0;
  2357. out:
  2358. kfree(context);
  2359. return -EINVAL;
  2360. }
  2361. static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
  2362. int attr_mask, enum ib_qp_state cur_state,
  2363. enum ib_qp_state new_state)
  2364. {
  2365. struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
  2366. struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
  2367. struct device *dev = &hr_dev->pdev->dev;
  2368. struct hns_roce_qp_context *context;
  2369. const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
  2370. dma_addr_t dma_handle_2 = 0;
  2371. dma_addr_t dma_handle = 0;
  2372. __le32 doorbell[2] = {0};
  2373. int rq_pa_start = 0;
  2374. u64 *mtts_2 = NULL;
  2375. int ret = -EINVAL;
  2376. u64 *mtts = NULL;
  2377. int port;
  2378. u8 port_num;
  2379. u8 *dmac;
  2380. u8 *smac;
  2381. context = kzalloc(sizeof(*context), GFP_KERNEL);
  2382. if (!context)
  2383. return -ENOMEM;
  2384. /* Search qp buf's mtts */
  2385. mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
  2386. hr_qp->mtt.first_seg, &dma_handle);
  2387. if (mtts == NULL) {
  2388. dev_err(dev, "qp buf pa find failed\n");
  2389. goto out;
  2390. }
  2391. /* Search IRRL's mtts */
  2392. mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
  2393. hr_qp->qpn, &dma_handle_2);
  2394. if (mtts_2 == NULL) {
  2395. dev_err(dev, "qp irrl_table find failed\n");
  2396. goto out;
  2397. }
  2398. /*
  2399. * Reset to init
  2400. * Mandatory param:
  2401. * IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS
  2402. * Optional param: NA
  2403. */
  2404. if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
  2405. roce_set_field(context->qpc_bytes_4,
  2406. QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
  2407. QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
  2408. to_hr_qp_type(hr_qp->ibqp.qp_type));
  2409. roce_set_bit(context->qpc_bytes_4,
  2410. QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
  2411. roce_set_bit(context->qpc_bytes_4,
  2412. QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
  2413. !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
  2414. roce_set_bit(context->qpc_bytes_4,
  2415. QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
  2416. !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
  2417. );
  2418. roce_set_bit(context->qpc_bytes_4,
  2419. QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S,
  2420. !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)
  2421. );
  2422. roce_set_bit(context->qpc_bytes_4,
  2423. QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
  2424. roce_set_field(context->qpc_bytes_4,
  2425. QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
  2426. QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
  2427. ilog2((unsigned int)hr_qp->sq.wqe_cnt));
  2428. roce_set_field(context->qpc_bytes_4,
  2429. QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
  2430. QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
  2431. ilog2((unsigned int)hr_qp->rq.wqe_cnt));
  2432. roce_set_field(context->qpc_bytes_4,
  2433. QP_CONTEXT_QPC_BYTES_4_PD_M,
  2434. QP_CONTEXT_QPC_BYTES_4_PD_S,
  2435. to_hr_pd(ibqp->pd)->pdn);
  2436. hr_qp->access_flags = attr->qp_access_flags;
  2437. roce_set_field(context->qpc_bytes_8,
  2438. QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
  2439. QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
  2440. to_hr_cq(ibqp->send_cq)->cqn);
  2441. roce_set_field(context->qpc_bytes_8,
  2442. QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
  2443. QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
  2444. to_hr_cq(ibqp->recv_cq)->cqn);
  2445. if (ibqp->srq)
  2446. roce_set_field(context->qpc_bytes_12,
  2447. QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
  2448. QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
  2449. to_hr_srq(ibqp->srq)->srqn);
  2450. roce_set_field(context->qpc_bytes_12,
  2451. QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
  2452. QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
  2453. attr->pkey_index);
  2454. hr_qp->pkey_index = attr->pkey_index;
  2455. roce_set_field(context->qpc_bytes_16,
  2456. QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
  2457. QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
  2458. } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
  2459. roce_set_field(context->qpc_bytes_4,
  2460. QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
  2461. QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
  2462. to_hr_qp_type(hr_qp->ibqp.qp_type));
  2463. roce_set_bit(context->qpc_bytes_4,
  2464. QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
  2465. if (attr_mask & IB_QP_ACCESS_FLAGS) {
  2466. roce_set_bit(context->qpc_bytes_4,
  2467. QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
  2468. !!(attr->qp_access_flags &
  2469. IB_ACCESS_REMOTE_READ));
  2470. roce_set_bit(context->qpc_bytes_4,
  2471. QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
  2472. !!(attr->qp_access_flags &
  2473. IB_ACCESS_REMOTE_WRITE));
  2474. } else {
  2475. roce_set_bit(context->qpc_bytes_4,
  2476. QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
  2477. !!(hr_qp->access_flags &
  2478. IB_ACCESS_REMOTE_READ));
  2479. roce_set_bit(context->qpc_bytes_4,
  2480. QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
  2481. !!(hr_qp->access_flags &
  2482. IB_ACCESS_REMOTE_WRITE));
  2483. }
  2484. roce_set_bit(context->qpc_bytes_4,
  2485. QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
  2486. roce_set_field(context->qpc_bytes_4,
  2487. QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
  2488. QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
  2489. ilog2((unsigned int)hr_qp->sq.wqe_cnt));
  2490. roce_set_field(context->qpc_bytes_4,
  2491. QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
  2492. QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
  2493. ilog2((unsigned int)hr_qp->rq.wqe_cnt));
  2494. roce_set_field(context->qpc_bytes_4,
  2495. QP_CONTEXT_QPC_BYTES_4_PD_M,
  2496. QP_CONTEXT_QPC_BYTES_4_PD_S,
  2497. to_hr_pd(ibqp->pd)->pdn);
  2498. roce_set_field(context->qpc_bytes_8,
  2499. QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
  2500. QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
  2501. to_hr_cq(ibqp->send_cq)->cqn);
  2502. roce_set_field(context->qpc_bytes_8,
  2503. QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
  2504. QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
  2505. to_hr_cq(ibqp->recv_cq)->cqn);
  2506. if (ibqp->srq)
  2507. roce_set_field(context->qpc_bytes_12,
  2508. QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
  2509. QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
  2510. to_hr_srq(ibqp->srq)->srqn);
  2511. if (attr_mask & IB_QP_PKEY_INDEX)
  2512. roce_set_field(context->qpc_bytes_12,
  2513. QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
  2514. QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
  2515. attr->pkey_index);
  2516. else
  2517. roce_set_field(context->qpc_bytes_12,
  2518. QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
  2519. QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
  2520. hr_qp->pkey_index);
  2521. roce_set_field(context->qpc_bytes_16,
  2522. QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
  2523. QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
  2524. } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
  2525. if ((attr_mask & IB_QP_ALT_PATH) ||
  2526. (attr_mask & IB_QP_ACCESS_FLAGS) ||
  2527. (attr_mask & IB_QP_PKEY_INDEX) ||
  2528. (attr_mask & IB_QP_QKEY)) {
  2529. dev_err(dev, "INIT2RTR attr_mask error\n");
  2530. goto out;
  2531. }
  2532. dmac = (u8 *)attr->ah_attr.roce.dmac;
  2533. context->sq_rq_bt_l = cpu_to_le32((u32)(dma_handle));
  2534. roce_set_field(context->qpc_bytes_24,
  2535. QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M,
  2536. QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S,
  2537. ((u32)(dma_handle >> 32)));
  2538. roce_set_bit(context->qpc_bytes_24,
  2539. QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S,
  2540. 1);
  2541. roce_set_field(context->qpc_bytes_24,
  2542. QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
  2543. QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S,
  2544. attr->min_rnr_timer);
  2545. context->irrl_ba_l = cpu_to_le32((u32)(dma_handle_2));
  2546. roce_set_field(context->qpc_bytes_32,
  2547. QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M,
  2548. QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S,
  2549. ((u32)(dma_handle_2 >> 32)) &
  2550. QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M);
  2551. roce_set_field(context->qpc_bytes_32,
  2552. QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M,
  2553. QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S, 0);
  2554. roce_set_bit(context->qpc_bytes_32,
  2555. QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S,
  2556. 1);
  2557. roce_set_bit(context->qpc_bytes_32,
  2558. QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
  2559. le32_to_cpu(hr_qp->sq_signal_bits));
  2560. port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
  2561. hr_qp->port;
  2562. smac = (u8 *)hr_dev->dev_addr[port];
  2563. /* when dmac equals smac or loop_idc is 1, it should loopback */
  2564. if (ether_addr_equal_unaligned(dmac, smac) ||
  2565. hr_dev->loop_idc == 0x1)
  2566. roce_set_bit(context->qpc_bytes_32,
  2567. QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
  2568. roce_set_bit(context->qpc_bytes_32,
  2569. QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S,
  2570. rdma_ah_get_ah_flags(&attr->ah_attr));
  2571. roce_set_field(context->qpc_bytes_32,
  2572. QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
  2573. QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S,
  2574. ilog2((unsigned int)attr->max_dest_rd_atomic));
  2575. if (attr_mask & IB_QP_DEST_QPN)
  2576. roce_set_field(context->qpc_bytes_36,
  2577. QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
  2578. QP_CONTEXT_QPC_BYTES_36_DEST_QP_S,
  2579. attr->dest_qp_num);
  2580. /* Configure GID index */
  2581. port_num = rdma_ah_get_port_num(&attr->ah_attr);
  2582. roce_set_field(context->qpc_bytes_36,
  2583. QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
  2584. QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S,
  2585. hns_get_gid_index(hr_dev,
  2586. port_num - 1,
  2587. grh->sgid_index));
  2588. memcpy(&(context->dmac_l), dmac, 4);
  2589. roce_set_field(context->qpc_bytes_44,
  2590. QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
  2591. QP_CONTEXT_QPC_BYTES_44_DMAC_H_S,
  2592. *((u16 *)(&dmac[4])));
  2593. roce_set_field(context->qpc_bytes_44,
  2594. QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M,
  2595. QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S,
  2596. rdma_ah_get_static_rate(&attr->ah_attr));
  2597. roce_set_field(context->qpc_bytes_44,
  2598. QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
  2599. QP_CONTEXT_QPC_BYTES_44_HOPLMT_S,
  2600. grh->hop_limit);
  2601. roce_set_field(context->qpc_bytes_48,
  2602. QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
  2603. QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S,
  2604. grh->flow_label);
  2605. roce_set_field(context->qpc_bytes_48,
  2606. QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
  2607. QP_CONTEXT_QPC_BYTES_48_TCLASS_S,
  2608. grh->traffic_class);
  2609. roce_set_field(context->qpc_bytes_48,
  2610. QP_CONTEXT_QPC_BYTES_48_MTU_M,
  2611. QP_CONTEXT_QPC_BYTES_48_MTU_S, attr->path_mtu);
  2612. memcpy(context->dgid, grh->dgid.raw,
  2613. sizeof(grh->dgid.raw));
  2614. dev_dbg(dev, "dmac:%x :%lx\n", context->dmac_l,
  2615. roce_get_field(context->qpc_bytes_44,
  2616. QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
  2617. QP_CONTEXT_QPC_BYTES_44_DMAC_H_S));
  2618. roce_set_field(context->qpc_bytes_68,
  2619. QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M,
  2620. QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S,
  2621. hr_qp->rq.head);
  2622. roce_set_field(context->qpc_bytes_68,
  2623. QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
  2624. QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
  2625. rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
  2626. context->cur_rq_wqe_ba_l =
  2627. cpu_to_le32((u32)(mtts[rq_pa_start]));
  2628. roce_set_field(context->qpc_bytes_76,
  2629. QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M,
  2630. QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S,
  2631. mtts[rq_pa_start] >> 32);
  2632. roce_set_field(context->qpc_bytes_76,
  2633. QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M,
  2634. QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0);
  2635. context->rx_rnr_time = 0;
  2636. roce_set_field(context->qpc_bytes_84,
  2637. QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M,
  2638. QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S,
  2639. attr->rq_psn - 1);
  2640. roce_set_field(context->qpc_bytes_84,
  2641. QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M,
  2642. QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S, 0);
  2643. roce_set_field(context->qpc_bytes_88,
  2644. QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
  2645. QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S,
  2646. attr->rq_psn);
  2647. roce_set_bit(context->qpc_bytes_88,
  2648. QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S, 0);
  2649. roce_set_bit(context->qpc_bytes_88,
  2650. QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S, 0);
  2651. roce_set_field(context->qpc_bytes_88,
  2652. QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M,
  2653. QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S,
  2654. 0);
  2655. roce_set_field(context->qpc_bytes_88,
  2656. QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M,
  2657. QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S,
  2658. 0);
  2659. context->dma_length = 0;
  2660. context->r_key = 0;
  2661. context->va_l = 0;
  2662. context->va_h = 0;
  2663. roce_set_field(context->qpc_bytes_108,
  2664. QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M,
  2665. QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S, 0);
  2666. roce_set_bit(context->qpc_bytes_108,
  2667. QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S, 0);
  2668. roce_set_bit(context->qpc_bytes_108,
  2669. QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S, 0);
  2670. roce_set_field(context->qpc_bytes_112,
  2671. QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M,
  2672. QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S, 0);
  2673. roce_set_field(context->qpc_bytes_112,
  2674. QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M,
  2675. QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S, 0);
  2676. /* For chip resp ack */
  2677. roce_set_field(context->qpc_bytes_156,
  2678. QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
  2679. QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
  2680. hr_qp->phy_port);
  2681. roce_set_field(context->qpc_bytes_156,
  2682. QP_CONTEXT_QPC_BYTES_156_SL_M,
  2683. QP_CONTEXT_QPC_BYTES_156_SL_S,
  2684. rdma_ah_get_sl(&attr->ah_attr));
  2685. hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
  2686. } else if (cur_state == IB_QPS_RTR &&
  2687. new_state == IB_QPS_RTS) {
  2688. /* If exist optional param, return error */
  2689. if ((attr_mask & IB_QP_ALT_PATH) ||
  2690. (attr_mask & IB_QP_ACCESS_FLAGS) ||
  2691. (attr_mask & IB_QP_QKEY) ||
  2692. (attr_mask & IB_QP_PATH_MIG_STATE) ||
  2693. (attr_mask & IB_QP_CUR_STATE) ||
  2694. (attr_mask & IB_QP_MIN_RNR_TIMER)) {
  2695. dev_err(dev, "RTR2RTS attr_mask error\n");
  2696. goto out;
  2697. }
  2698. context->rx_cur_sq_wqe_ba_l = cpu_to_le32((u32)(mtts[0]));
  2699. roce_set_field(context->qpc_bytes_120,
  2700. QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M,
  2701. QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S,
  2702. (mtts[0]) >> 32);
  2703. roce_set_field(context->qpc_bytes_124,
  2704. QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M,
  2705. QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S, 0);
  2706. roce_set_field(context->qpc_bytes_124,
  2707. QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M,
  2708. QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S, 0);
  2709. roce_set_field(context->qpc_bytes_128,
  2710. QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M,
  2711. QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S,
  2712. attr->sq_psn);
  2713. roce_set_bit(context->qpc_bytes_128,
  2714. QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S, 0);
  2715. roce_set_field(context->qpc_bytes_128,
  2716. QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M,
  2717. QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S,
  2718. 0);
  2719. roce_set_bit(context->qpc_bytes_128,
  2720. QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S, 0);
  2721. roce_set_field(context->qpc_bytes_132,
  2722. QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M,
  2723. QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S, 0);
  2724. roce_set_field(context->qpc_bytes_132,
  2725. QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M,
  2726. QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S, 0);
  2727. roce_set_field(context->qpc_bytes_136,
  2728. QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M,
  2729. QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S,
  2730. attr->sq_psn);
  2731. roce_set_field(context->qpc_bytes_136,
  2732. QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M,
  2733. QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S,
  2734. attr->sq_psn);
  2735. roce_set_field(context->qpc_bytes_140,
  2736. QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M,
  2737. QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S,
  2738. (attr->sq_psn >> SQ_PSN_SHIFT));
  2739. roce_set_field(context->qpc_bytes_140,
  2740. QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M,
  2741. QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S, 0);
  2742. roce_set_bit(context->qpc_bytes_140,
  2743. QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0);
  2744. roce_set_field(context->qpc_bytes_148,
  2745. QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M,
  2746. QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0);
  2747. roce_set_field(context->qpc_bytes_148,
  2748. QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
  2749. QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S,
  2750. attr->retry_cnt);
  2751. roce_set_field(context->qpc_bytes_148,
  2752. QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M,
  2753. QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S,
  2754. attr->rnr_retry);
  2755. roce_set_field(context->qpc_bytes_148,
  2756. QP_CONTEXT_QPC_BYTES_148_LSN_M,
  2757. QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100);
  2758. context->rnr_retry = 0;
  2759. roce_set_field(context->qpc_bytes_156,
  2760. QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M,
  2761. QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S,
  2762. attr->retry_cnt);
  2763. if (attr->timeout < 0x12) {
  2764. dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n",
  2765. attr->timeout);
  2766. roce_set_field(context->qpc_bytes_156,
  2767. QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
  2768. QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
  2769. 0x12);
  2770. } else {
  2771. roce_set_field(context->qpc_bytes_156,
  2772. QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
  2773. QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
  2774. attr->timeout);
  2775. }
  2776. roce_set_field(context->qpc_bytes_156,
  2777. QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M,
  2778. QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S,
  2779. attr->rnr_retry);
  2780. roce_set_field(context->qpc_bytes_156,
  2781. QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
  2782. QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
  2783. hr_qp->phy_port);
  2784. roce_set_field(context->qpc_bytes_156,
  2785. QP_CONTEXT_QPC_BYTES_156_SL_M,
  2786. QP_CONTEXT_QPC_BYTES_156_SL_S,
  2787. rdma_ah_get_sl(&attr->ah_attr));
  2788. hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
  2789. roce_set_field(context->qpc_bytes_156,
  2790. QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
  2791. QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S,
  2792. ilog2((unsigned int)attr->max_rd_atomic));
  2793. roce_set_field(context->qpc_bytes_156,
  2794. QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M,
  2795. QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S, 0);
  2796. context->pkt_use_len = 0;
  2797. roce_set_field(context->qpc_bytes_164,
  2798. QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
  2799. QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S, attr->sq_psn);
  2800. roce_set_field(context->qpc_bytes_164,
  2801. QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M,
  2802. QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S, 0);
  2803. roce_set_field(context->qpc_bytes_168,
  2804. QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M,
  2805. QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S,
  2806. attr->sq_psn);
  2807. roce_set_field(context->qpc_bytes_168,
  2808. QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M,
  2809. QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S, 0);
  2810. roce_set_field(context->qpc_bytes_168,
  2811. QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M,
  2812. QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S, 0);
  2813. roce_set_bit(context->qpc_bytes_168,
  2814. QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S, 0);
  2815. roce_set_bit(context->qpc_bytes_168,
  2816. QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S, 0);
  2817. roce_set_bit(context->qpc_bytes_168,
  2818. QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S, 0);
  2819. context->sge_use_len = 0;
  2820. roce_set_field(context->qpc_bytes_176,
  2821. QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M,
  2822. QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S, 0);
  2823. roce_set_field(context->qpc_bytes_176,
  2824. QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M,
  2825. QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S,
  2826. 0);
  2827. roce_set_field(context->qpc_bytes_180,
  2828. QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M,
  2829. QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S, 0);
  2830. roce_set_field(context->qpc_bytes_180,
  2831. QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M,
  2832. QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0);
  2833. context->tx_cur_sq_wqe_ba_l = cpu_to_le32((u32)(mtts[0]));
  2834. roce_set_field(context->qpc_bytes_188,
  2835. QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M,
  2836. QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S,
  2837. (mtts[0]) >> 32);
  2838. roce_set_bit(context->qpc_bytes_188,
  2839. QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0);
  2840. roce_set_field(context->qpc_bytes_188,
  2841. QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
  2842. QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
  2843. 0);
  2844. } else if (!((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
  2845. (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
  2846. (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
  2847. (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
  2848. (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
  2849. (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
  2850. (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
  2851. (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR))) {
  2852. dev_err(dev, "not support this status migration\n");
  2853. goto out;
  2854. }
  2855. /* Every status migrate must change state */
  2856. roce_set_field(context->qpc_bytes_144,
  2857. QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
  2858. QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state);
  2859. /* SW pass context to HW */
  2860. ret = hns_roce_v1_qp_modify(hr_dev, &hr_qp->mtt,
  2861. to_hns_roce_state(cur_state),
  2862. to_hns_roce_state(new_state), context,
  2863. hr_qp);
  2864. if (ret) {
  2865. dev_err(dev, "hns_roce_qp_modify failed\n");
  2866. goto out;
  2867. }
  2868. /*
  2869. * Use rst2init to instead of init2init with drv,
  2870. * need to hw to flash RQ HEAD by DB again
  2871. */
  2872. if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
  2873. /* Memory barrier */
  2874. wmb();
  2875. roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M,
  2876. RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head);
  2877. roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M,
  2878. RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
  2879. roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M,
  2880. RQ_DOORBELL_U32_8_CMD_S, 1);
  2881. roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1);
  2882. if (ibqp->uobject) {
  2883. hr_qp->rq.db_reg_l = hr_dev->reg_base +
  2884. hr_dev->odb_offset +
  2885. DB_REG_OFFSET * hr_dev->priv_uar.index;
  2886. }
  2887. hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
  2888. }
  2889. hr_qp->state = new_state;
  2890. if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
  2891. hr_qp->resp_depth = attr->max_dest_rd_atomic;
  2892. if (attr_mask & IB_QP_PORT) {
  2893. hr_qp->port = attr->port_num - 1;
  2894. hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
  2895. }
  2896. if (new_state == IB_QPS_RESET && !ibqp->uobject) {
  2897. hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
  2898. ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
  2899. if (ibqp->send_cq != ibqp->recv_cq)
  2900. hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
  2901. hr_qp->qpn, NULL);
  2902. hr_qp->rq.head = 0;
  2903. hr_qp->rq.tail = 0;
  2904. hr_qp->sq.head = 0;
  2905. hr_qp->sq.tail = 0;
  2906. hr_qp->sq_next_wqe = 0;
  2907. }
  2908. out:
  2909. kfree(context);
  2910. return ret;
  2911. }
  2912. static int hns_roce_v1_modify_qp(struct ib_qp *ibqp,
  2913. const struct ib_qp_attr *attr, int attr_mask,
  2914. enum ib_qp_state cur_state,
  2915. enum ib_qp_state new_state)
  2916. {
  2917. if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
  2918. return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state,
  2919. new_state);
  2920. else
  2921. return hns_roce_v1_m_qp(ibqp, attr, attr_mask, cur_state,
  2922. new_state);
  2923. }
  2924. static enum ib_qp_state to_ib_qp_state(enum hns_roce_qp_state state)
  2925. {
  2926. switch (state) {
  2927. case HNS_ROCE_QP_STATE_RST:
  2928. return IB_QPS_RESET;
  2929. case HNS_ROCE_QP_STATE_INIT:
  2930. return IB_QPS_INIT;
  2931. case HNS_ROCE_QP_STATE_RTR:
  2932. return IB_QPS_RTR;
  2933. case HNS_ROCE_QP_STATE_RTS:
  2934. return IB_QPS_RTS;
  2935. case HNS_ROCE_QP_STATE_SQD:
  2936. return IB_QPS_SQD;
  2937. case HNS_ROCE_QP_STATE_ERR:
  2938. return IB_QPS_ERR;
  2939. default:
  2940. return IB_QPS_ERR;
  2941. }
  2942. }
  2943. static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
  2944. struct hns_roce_qp *hr_qp,
  2945. struct hns_roce_qp_context *hr_context)
  2946. {
  2947. struct hns_roce_cmd_mailbox *mailbox;
  2948. int ret;
  2949. mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
  2950. if (IS_ERR(mailbox))
  2951. return PTR_ERR(mailbox);
  2952. ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
  2953. HNS_ROCE_CMD_QUERY_QP,
  2954. HNS_ROCE_CMD_TIMEOUT_MSECS);
  2955. if (!ret)
  2956. memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
  2957. else
  2958. dev_err(&hr_dev->pdev->dev, "QUERY QP cmd process error\n");
  2959. hns_roce_free_cmd_mailbox(hr_dev, mailbox);
  2960. return ret;
  2961. }
  2962. static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
  2963. int qp_attr_mask,
  2964. struct ib_qp_init_attr *qp_init_attr)
  2965. {
  2966. struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
  2967. struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
  2968. struct hns_roce_sqp_context context;
  2969. u32 addr;
  2970. mutex_lock(&hr_qp->mutex);
  2971. if (hr_qp->state == IB_QPS_RESET) {
  2972. qp_attr->qp_state = IB_QPS_RESET;
  2973. goto done;
  2974. }
  2975. addr = ROCEE_QP1C_CFG0_0_REG +
  2976. hr_qp->port * sizeof(struct hns_roce_sqp_context);
  2977. context.qp1c_bytes_4 = cpu_to_le32(roce_read(hr_dev, addr));
  2978. context.sq_rq_bt_l = cpu_to_le32(roce_read(hr_dev, addr + 1));
  2979. context.qp1c_bytes_12 = cpu_to_le32(roce_read(hr_dev, addr + 2));
  2980. context.qp1c_bytes_16 = cpu_to_le32(roce_read(hr_dev, addr + 3));
  2981. context.qp1c_bytes_20 = cpu_to_le32(roce_read(hr_dev, addr + 4));
  2982. context.cur_rq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 5));
  2983. context.qp1c_bytes_28 = cpu_to_le32(roce_read(hr_dev, addr + 6));
  2984. context.qp1c_bytes_32 = cpu_to_le32(roce_read(hr_dev, addr + 7));
  2985. context.cur_sq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 8));
  2986. context.qp1c_bytes_40 = cpu_to_le32(roce_read(hr_dev, addr + 9));
  2987. hr_qp->state = roce_get_field(context.qp1c_bytes_4,
  2988. QP1C_BYTES_4_QP_STATE_M,
  2989. QP1C_BYTES_4_QP_STATE_S);
  2990. qp_attr->qp_state = hr_qp->state;
  2991. qp_attr->path_mtu = IB_MTU_256;
  2992. qp_attr->path_mig_state = IB_MIG_ARMED;
  2993. qp_attr->qkey = QKEY_VAL;
  2994. qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
  2995. qp_attr->rq_psn = 0;
  2996. qp_attr->sq_psn = 0;
  2997. qp_attr->dest_qp_num = 1;
  2998. qp_attr->qp_access_flags = 6;
  2999. qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20,
  3000. QP1C_BYTES_20_PKEY_IDX_M,
  3001. QP1C_BYTES_20_PKEY_IDX_S);
  3002. qp_attr->port_num = hr_qp->port + 1;
  3003. qp_attr->sq_draining = 0;
  3004. qp_attr->max_rd_atomic = 0;
  3005. qp_attr->max_dest_rd_atomic = 0;
  3006. qp_attr->min_rnr_timer = 0;
  3007. qp_attr->timeout = 0;
  3008. qp_attr->retry_cnt = 0;
  3009. qp_attr->rnr_retry = 0;
  3010. qp_attr->alt_timeout = 0;
  3011. done:
  3012. qp_attr->cur_qp_state = qp_attr->qp_state;
  3013. qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
  3014. qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
  3015. qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
  3016. qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
  3017. qp_attr->cap.max_inline_data = 0;
  3018. qp_init_attr->cap = qp_attr->cap;
  3019. qp_init_attr->create_flags = 0;
  3020. mutex_unlock(&hr_qp->mutex);
  3021. return 0;
  3022. }
  3023. static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
  3024. int qp_attr_mask,
  3025. struct ib_qp_init_attr *qp_init_attr)
  3026. {
  3027. struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
  3028. struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
  3029. struct device *dev = &hr_dev->pdev->dev;
  3030. struct hns_roce_qp_context *context;
  3031. int tmp_qp_state = 0;
  3032. int ret = 0;
  3033. int state;
  3034. context = kzalloc(sizeof(*context), GFP_KERNEL);
  3035. if (!context)
  3036. return -ENOMEM;
  3037. memset(qp_attr, 0, sizeof(*qp_attr));
  3038. memset(qp_init_attr, 0, sizeof(*qp_init_attr));
  3039. mutex_lock(&hr_qp->mutex);
  3040. if (hr_qp->state == IB_QPS_RESET) {
  3041. qp_attr->qp_state = IB_QPS_RESET;
  3042. goto done;
  3043. }
  3044. ret = hns_roce_v1_query_qpc(hr_dev, hr_qp, context);
  3045. if (ret) {
  3046. dev_err(dev, "query qpc error\n");
  3047. ret = -EINVAL;
  3048. goto out;
  3049. }
  3050. state = roce_get_field(context->qpc_bytes_144,
  3051. QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
  3052. QP_CONTEXT_QPC_BYTES_144_QP_STATE_S);
  3053. tmp_qp_state = (int)to_ib_qp_state((enum hns_roce_qp_state)state);
  3054. if (tmp_qp_state == -1) {
  3055. dev_err(dev, "to_ib_qp_state error\n");
  3056. ret = -EINVAL;
  3057. goto out;
  3058. }
  3059. hr_qp->state = (u8)tmp_qp_state;
  3060. qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
  3061. qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->qpc_bytes_48,
  3062. QP_CONTEXT_QPC_BYTES_48_MTU_M,
  3063. QP_CONTEXT_QPC_BYTES_48_MTU_S);
  3064. qp_attr->path_mig_state = IB_MIG_ARMED;
  3065. qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
  3066. if (hr_qp->ibqp.qp_type == IB_QPT_UD)
  3067. qp_attr->qkey = QKEY_VAL;
  3068. qp_attr->rq_psn = roce_get_field(context->qpc_bytes_88,
  3069. QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
  3070. QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S);
  3071. qp_attr->sq_psn = (u32)roce_get_field(context->qpc_bytes_164,
  3072. QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
  3073. QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S);
  3074. qp_attr->dest_qp_num = (u8)roce_get_field(context->qpc_bytes_36,
  3075. QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
  3076. QP_CONTEXT_QPC_BYTES_36_DEST_QP_S);
  3077. qp_attr->qp_access_flags = ((roce_get_bit(context->qpc_bytes_4,
  3078. QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S)) << 2) |
  3079. ((roce_get_bit(context->qpc_bytes_4,
  3080. QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S)) << 1) |
  3081. ((roce_get_bit(context->qpc_bytes_4,
  3082. QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3);
  3083. if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
  3084. hr_qp->ibqp.qp_type == IB_QPT_UC) {
  3085. struct ib_global_route *grh =
  3086. rdma_ah_retrieve_grh(&qp_attr->ah_attr);
  3087. rdma_ah_set_sl(&qp_attr->ah_attr,
  3088. roce_get_field(context->qpc_bytes_156,
  3089. QP_CONTEXT_QPC_BYTES_156_SL_M,
  3090. QP_CONTEXT_QPC_BYTES_156_SL_S));
  3091. rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH);
  3092. grh->flow_label =
  3093. roce_get_field(context->qpc_bytes_48,
  3094. QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
  3095. QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S);
  3096. grh->sgid_index =
  3097. roce_get_field(context->qpc_bytes_36,
  3098. QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
  3099. QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S);
  3100. grh->hop_limit =
  3101. roce_get_field(context->qpc_bytes_44,
  3102. QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
  3103. QP_CONTEXT_QPC_BYTES_44_HOPLMT_S);
  3104. grh->traffic_class =
  3105. roce_get_field(context->qpc_bytes_48,
  3106. QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
  3107. QP_CONTEXT_QPC_BYTES_48_TCLASS_S);
  3108. memcpy(grh->dgid.raw, context->dgid,
  3109. sizeof(grh->dgid.raw));
  3110. }
  3111. qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12,
  3112. QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
  3113. QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S);
  3114. qp_attr->port_num = hr_qp->port + 1;
  3115. qp_attr->sq_draining = 0;
  3116. qp_attr->max_rd_atomic = 1 << roce_get_field(context->qpc_bytes_156,
  3117. QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
  3118. QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S);
  3119. qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->qpc_bytes_32,
  3120. QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
  3121. QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S);
  3122. qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24,
  3123. QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
  3124. QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S));
  3125. qp_attr->timeout = (u8)(roce_get_field(context->qpc_bytes_156,
  3126. QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
  3127. QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S));
  3128. qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148,
  3129. QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
  3130. QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S);
  3131. qp_attr->rnr_retry = (u8)context->rnr_retry;
  3132. done:
  3133. qp_attr->cur_qp_state = qp_attr->qp_state;
  3134. qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
  3135. qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
  3136. if (!ibqp->uobject) {
  3137. qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
  3138. qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
  3139. } else {
  3140. qp_attr->cap.max_send_wr = 0;
  3141. qp_attr->cap.max_send_sge = 0;
  3142. }
  3143. qp_init_attr->cap = qp_attr->cap;
  3144. out:
  3145. mutex_unlock(&hr_qp->mutex);
  3146. kfree(context);
  3147. return ret;
  3148. }
  3149. static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
  3150. int qp_attr_mask,
  3151. struct ib_qp_init_attr *qp_init_attr)
  3152. {
  3153. struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
  3154. return hr_qp->doorbell_qpn <= 1 ?
  3155. hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) :
  3156. hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr);
  3157. }
  3158. static void hns_roce_check_sdb_status(struct hns_roce_dev *hr_dev,
  3159. u32 *old_send, u32 *old_retry,
  3160. u32 *tsp_st, u32 *success_flags)
  3161. {
  3162. __le32 *old_send_tmp, *old_retry_tmp;
  3163. u32 sdb_retry_cnt;
  3164. u32 sdb_send_ptr;
  3165. u32 cur_cnt, old_cnt;
  3166. __le32 tmp, tmp1;
  3167. u32 send_ptr;
  3168. sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
  3169. sdb_retry_cnt = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG);
  3170. tmp = cpu_to_le32(sdb_send_ptr);
  3171. tmp1 = cpu_to_le32(sdb_retry_cnt);
  3172. cur_cnt = roce_get_field(tmp, ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
  3173. ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
  3174. roce_get_field(tmp1, ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
  3175. ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
  3176. old_send_tmp = (__le32 *)old_send;
  3177. old_retry_tmp = (__le32 *)old_retry;
  3178. if (!roce_get_bit(*tsp_st, ROCEE_CNT_CLR_CE_CNT_CLR_CE_S)) {
  3179. old_cnt = roce_get_field(*old_send_tmp,
  3180. ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
  3181. ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
  3182. roce_get_field(*old_retry_tmp,
  3183. ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
  3184. ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
  3185. if (cur_cnt - old_cnt > SDB_ST_CMP_VAL)
  3186. *success_flags = 1;
  3187. } else {
  3188. old_cnt = roce_get_field(*old_send_tmp,
  3189. ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
  3190. ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
  3191. if (cur_cnt - old_cnt > SDB_ST_CMP_VAL) {
  3192. *success_flags = 1;
  3193. } else {
  3194. send_ptr = roce_get_field(*old_send_tmp,
  3195. ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
  3196. ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
  3197. roce_get_field(tmp1,
  3198. ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M,
  3199. ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S);
  3200. roce_set_field(*old_send_tmp,
  3201. ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
  3202. ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S,
  3203. send_ptr);
  3204. }
  3205. }
  3206. }
  3207. static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
  3208. struct hns_roce_qp *hr_qp,
  3209. u32 sdb_issue_ptr,
  3210. u32 *sdb_inv_cnt,
  3211. u32 *wait_stage)
  3212. {
  3213. struct device *dev = &hr_dev->pdev->dev;
  3214. u32 sdb_send_ptr, old_send;
  3215. __le32 sdb_issue_ptr_tmp;
  3216. __le32 sdb_send_ptr_tmp;
  3217. u32 success_flags = 0;
  3218. unsigned long end;
  3219. u32 old_retry;
  3220. u32 inv_cnt;
  3221. u32 tsp_st;
  3222. __le32 tmp;
  3223. if (*wait_stage > HNS_ROCE_V1_DB_STAGE2 ||
  3224. *wait_stage < HNS_ROCE_V1_DB_STAGE1) {
  3225. dev_err(dev, "QP(0x%lx) db status wait stage(%d) error!\n",
  3226. hr_qp->qpn, *wait_stage);
  3227. return -EINVAL;
  3228. }
  3229. /* Calculate the total timeout for the entire verification process */
  3230. end = msecs_to_jiffies(HNS_ROCE_V1_CHECK_DB_TIMEOUT_MSECS) + jiffies;
  3231. if (*wait_stage == HNS_ROCE_V1_DB_STAGE1) {
  3232. /* Query db process status, until hw process completely */
  3233. sdb_send_ptr = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
  3234. while (roce_hw_index_cmp_lt(sdb_send_ptr, sdb_issue_ptr,
  3235. ROCEE_SDB_PTR_CMP_BITS)) {
  3236. if (!time_before(jiffies, end)) {
  3237. dev_dbg(dev, "QP(0x%lx) db process stage1 timeout. issue 0x%x send 0x%x.\n",
  3238. hr_qp->qpn, sdb_issue_ptr,
  3239. sdb_send_ptr);
  3240. return 0;
  3241. }
  3242. msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
  3243. sdb_send_ptr = roce_read(hr_dev,
  3244. ROCEE_SDB_SEND_PTR_REG);
  3245. }
  3246. sdb_send_ptr_tmp = cpu_to_le32(sdb_send_ptr);
  3247. sdb_issue_ptr_tmp = cpu_to_le32(sdb_issue_ptr);
  3248. if (roce_get_field(sdb_issue_ptr_tmp,
  3249. ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M,
  3250. ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S) ==
  3251. roce_get_field(sdb_send_ptr_tmp,
  3252. ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
  3253. ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)) {
  3254. old_send = roce_read(hr_dev, ROCEE_SDB_SEND_PTR_REG);
  3255. old_retry = roce_read(hr_dev, ROCEE_SDB_RETRY_CNT_REG);
  3256. do {
  3257. tsp_st = roce_read(hr_dev, ROCEE_TSP_BP_ST_REG);
  3258. tmp = cpu_to_le32(tsp_st);
  3259. if (roce_get_bit(tmp,
  3260. ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S) == 1) {
  3261. *wait_stage = HNS_ROCE_V1_DB_WAIT_OK;
  3262. return 0;
  3263. }
  3264. if (!time_before(jiffies, end)) {
  3265. dev_dbg(dev, "QP(0x%lx) db process stage1 timeout when send ptr equals issue ptr.\n"
  3266. "issue 0x%x send 0x%x.\n",
  3267. hr_qp->qpn,
  3268. le32_to_cpu(sdb_issue_ptr_tmp),
  3269. le32_to_cpu(sdb_send_ptr_tmp));
  3270. return 0;
  3271. }
  3272. msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
  3273. hns_roce_check_sdb_status(hr_dev, &old_send,
  3274. &old_retry, &tsp_st,
  3275. &success_flags);
  3276. } while (!success_flags);
  3277. }
  3278. *wait_stage = HNS_ROCE_V1_DB_STAGE2;
  3279. /* Get list pointer */
  3280. *sdb_inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
  3281. dev_dbg(dev, "QP(0x%lx) db process stage2. inv cnt = 0x%x.\n",
  3282. hr_qp->qpn, *sdb_inv_cnt);
  3283. }
  3284. if (*wait_stage == HNS_ROCE_V1_DB_STAGE2) {
  3285. /* Query db's list status, until hw reversal */
  3286. inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
  3287. while (roce_hw_index_cmp_lt(inv_cnt,
  3288. *sdb_inv_cnt + SDB_INV_CNT_OFFSET,
  3289. ROCEE_SDB_CNT_CMP_BITS)) {
  3290. if (!time_before(jiffies, end)) {
  3291. dev_dbg(dev, "QP(0x%lx) db process stage2 timeout. inv cnt 0x%x.\n",
  3292. hr_qp->qpn, inv_cnt);
  3293. return 0;
  3294. }
  3295. msleep(HNS_ROCE_V1_CHECK_DB_SLEEP_MSECS);
  3296. inv_cnt = roce_read(hr_dev, ROCEE_SDB_INV_CNT_REG);
  3297. }
  3298. *wait_stage = HNS_ROCE_V1_DB_WAIT_OK;
  3299. }
  3300. return 0;
  3301. }
  3302. static int check_qp_reset_state(struct hns_roce_dev *hr_dev,
  3303. struct hns_roce_qp *hr_qp,
  3304. struct hns_roce_qp_work *qp_work_entry,
  3305. int *is_timeout)
  3306. {
  3307. struct device *dev = &hr_dev->pdev->dev;
  3308. u32 sdb_issue_ptr;
  3309. int ret;
  3310. if (hr_qp->state != IB_QPS_RESET) {
  3311. /* Set qp to ERR, waiting for hw complete processing all dbs */
  3312. ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
  3313. IB_QPS_ERR);
  3314. if (ret) {
  3315. dev_err(dev, "Modify QP(0x%lx) to ERR failed!\n",
  3316. hr_qp->qpn);
  3317. return ret;
  3318. }
  3319. /* Record issued doorbell */
  3320. sdb_issue_ptr = roce_read(hr_dev, ROCEE_SDB_ISSUE_PTR_REG);
  3321. qp_work_entry->sdb_issue_ptr = sdb_issue_ptr;
  3322. qp_work_entry->db_wait_stage = HNS_ROCE_V1_DB_STAGE1;
  3323. /* Query db process status, until hw process completely */
  3324. ret = check_qp_db_process_status(hr_dev, hr_qp, sdb_issue_ptr,
  3325. &qp_work_entry->sdb_inv_cnt,
  3326. &qp_work_entry->db_wait_stage);
  3327. if (ret) {
  3328. dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
  3329. hr_qp->qpn);
  3330. return ret;
  3331. }
  3332. if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK) {
  3333. qp_work_entry->sche_cnt = 0;
  3334. *is_timeout = 1;
  3335. return 0;
  3336. }
  3337. /* Modify qp to reset before destroying qp */
  3338. ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
  3339. IB_QPS_RESET);
  3340. if (ret) {
  3341. dev_err(dev, "Modify QP(0x%lx) to RST failed!\n",
  3342. hr_qp->qpn);
  3343. return ret;
  3344. }
  3345. }
  3346. return 0;
  3347. }
  3348. static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
  3349. {
  3350. struct hns_roce_qp_work *qp_work_entry;
  3351. struct hns_roce_v1_priv *priv;
  3352. struct hns_roce_dev *hr_dev;
  3353. struct hns_roce_qp *hr_qp;
  3354. struct device *dev;
  3355. unsigned long qpn;
  3356. int ret;
  3357. qp_work_entry = container_of(work, struct hns_roce_qp_work, work);
  3358. hr_dev = to_hr_dev(qp_work_entry->ib_dev);
  3359. dev = &hr_dev->pdev->dev;
  3360. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  3361. hr_qp = qp_work_entry->qp;
  3362. qpn = hr_qp->qpn;
  3363. dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", qpn);
  3364. qp_work_entry->sche_cnt++;
  3365. /* Query db process status, until hw process completely */
  3366. ret = check_qp_db_process_status(hr_dev, hr_qp,
  3367. qp_work_entry->sdb_issue_ptr,
  3368. &qp_work_entry->sdb_inv_cnt,
  3369. &qp_work_entry->db_wait_stage);
  3370. if (ret) {
  3371. dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
  3372. qpn);
  3373. return;
  3374. }
  3375. if (qp_work_entry->db_wait_stage != HNS_ROCE_V1_DB_WAIT_OK &&
  3376. priv->des_qp.requeue_flag) {
  3377. queue_work(priv->des_qp.qp_wq, work);
  3378. return;
  3379. }
  3380. /* Modify qp to reset before destroying qp */
  3381. ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
  3382. IB_QPS_RESET);
  3383. if (ret) {
  3384. dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", qpn);
  3385. return;
  3386. }
  3387. hns_roce_qp_remove(hr_dev, hr_qp);
  3388. hns_roce_qp_free(hr_dev, hr_qp);
  3389. if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
  3390. /* RC QP, release QPN */
  3391. hns_roce_release_range_qp(hr_dev, qpn, 1);
  3392. kfree(hr_qp);
  3393. } else
  3394. kfree(hr_to_hr_sqp(hr_qp));
  3395. kfree(qp_work_entry);
  3396. dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn);
  3397. }
  3398. int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
  3399. {
  3400. struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
  3401. struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
  3402. struct device *dev = &hr_dev->pdev->dev;
  3403. struct hns_roce_qp_work qp_work_entry;
  3404. struct hns_roce_qp_work *qp_work;
  3405. struct hns_roce_v1_priv *priv;
  3406. struct hns_roce_cq *send_cq, *recv_cq;
  3407. int is_user = !!ibqp->pd->uobject;
  3408. int is_timeout = 0;
  3409. int ret;
  3410. ret = check_qp_reset_state(hr_dev, hr_qp, &qp_work_entry, &is_timeout);
  3411. if (ret) {
  3412. dev_err(dev, "QP reset state check failed(%d)!\n", ret);
  3413. return ret;
  3414. }
  3415. send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
  3416. recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
  3417. hns_roce_lock_cqs(send_cq, recv_cq);
  3418. if (!is_user) {
  3419. __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
  3420. to_hr_srq(hr_qp->ibqp.srq) : NULL);
  3421. if (send_cq != recv_cq)
  3422. __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
  3423. }
  3424. hns_roce_unlock_cqs(send_cq, recv_cq);
  3425. if (!is_timeout) {
  3426. hns_roce_qp_remove(hr_dev, hr_qp);
  3427. hns_roce_qp_free(hr_dev, hr_qp);
  3428. /* RC QP, release QPN */
  3429. if (hr_qp->ibqp.qp_type == IB_QPT_RC)
  3430. hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
  3431. }
  3432. hns_roce_mtt_cleanup(hr_dev, &hr_qp->mtt);
  3433. if (is_user)
  3434. ib_umem_release(hr_qp->umem);
  3435. else {
  3436. kfree(hr_qp->sq.wrid);
  3437. kfree(hr_qp->rq.wrid);
  3438. hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
  3439. }
  3440. if (!is_timeout) {
  3441. if (hr_qp->ibqp.qp_type == IB_QPT_RC)
  3442. kfree(hr_qp);
  3443. else
  3444. kfree(hr_to_hr_sqp(hr_qp));
  3445. } else {
  3446. qp_work = kzalloc(sizeof(*qp_work), GFP_KERNEL);
  3447. if (!qp_work)
  3448. return -ENOMEM;
  3449. INIT_WORK(&qp_work->work, hns_roce_v1_destroy_qp_work_fn);
  3450. qp_work->ib_dev = &hr_dev->ib_dev;
  3451. qp_work->qp = hr_qp;
  3452. qp_work->db_wait_stage = qp_work_entry.db_wait_stage;
  3453. qp_work->sdb_issue_ptr = qp_work_entry.sdb_issue_ptr;
  3454. qp_work->sdb_inv_cnt = qp_work_entry.sdb_inv_cnt;
  3455. qp_work->sche_cnt = qp_work_entry.sche_cnt;
  3456. priv = (struct hns_roce_v1_priv *)hr_dev->priv;
  3457. queue_work(priv->des_qp.qp_wq, &qp_work->work);
  3458. dev_dbg(dev, "Begin destroy QP(0x%lx) work.\n", hr_qp->qpn);
  3459. }
  3460. return 0;
  3461. }
  3462. static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq)
  3463. {
  3464. struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
  3465. struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
  3466. struct device *dev = &hr_dev->pdev->dev;
  3467. u32 cqe_cnt_ori;
  3468. u32 cqe_cnt_cur;
  3469. u32 cq_buf_size;
  3470. int wait_time = 0;
  3471. int ret = 0;
  3472. hns_roce_free_cq(hr_dev, hr_cq);
  3473. /*
  3474. * Before freeing cq buffer, we need to ensure that the outstanding CQE
  3475. * have been written by checking the CQE counter.
  3476. */
  3477. cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
  3478. while (1) {
  3479. if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) &
  3480. HNS_ROCE_CQE_WCMD_EMPTY_BIT)
  3481. break;
  3482. cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
  3483. if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT)
  3484. break;
  3485. msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS);
  3486. if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) {
  3487. dev_warn(dev, "Destroy cq 0x%lx timeout!\n",
  3488. hr_cq->cqn);
  3489. ret = -ETIMEDOUT;
  3490. break;
  3491. }
  3492. wait_time++;
  3493. }
  3494. hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
  3495. if (ibcq->uobject)
  3496. ib_umem_release(hr_cq->umem);
  3497. else {
  3498. /* Free the buff of stored cq */
  3499. cq_buf_size = (ibcq->cqe + 1) * hr_dev->caps.cq_entry_sz;
  3500. hns_roce_buf_free(hr_dev, cq_buf_size, &hr_cq->hr_buf.hr_buf);
  3501. }
  3502. kfree(hr_cq);
  3503. return ret;
  3504. }
  3505. static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
  3506. {
  3507. roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) |
  3508. (req_not << eq->log_entries), eq->doorbell);
  3509. }
  3510. static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
  3511. struct hns_roce_aeqe *aeqe, int qpn)
  3512. {
  3513. struct device *dev = &hr_dev->pdev->dev;
  3514. dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
  3515. switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
  3516. HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
  3517. case HNS_ROCE_LWQCE_QPC_ERROR:
  3518. dev_warn(dev, "QP %d, QPC error.\n", qpn);
  3519. break;
  3520. case HNS_ROCE_LWQCE_MTU_ERROR:
  3521. dev_warn(dev, "QP %d, MTU error.\n", qpn);
  3522. break;
  3523. case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
  3524. dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
  3525. break;
  3526. case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
  3527. dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
  3528. break;
  3529. case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
  3530. dev_warn(dev, "QP %d, WQE shift error\n", qpn);
  3531. break;
  3532. case HNS_ROCE_LWQCE_SL_ERROR:
  3533. dev_warn(dev, "QP %d, SL error.\n", qpn);
  3534. break;
  3535. case HNS_ROCE_LWQCE_PORT_ERROR:
  3536. dev_warn(dev, "QP %d, port error.\n", qpn);
  3537. break;
  3538. default:
  3539. break;
  3540. }
  3541. }
  3542. static void hns_roce_v1_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
  3543. struct hns_roce_aeqe *aeqe,
  3544. int qpn)
  3545. {
  3546. struct device *dev = &hr_dev->pdev->dev;
  3547. dev_warn(dev, "Local Access Violation Work Queue Error.\n");
  3548. switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
  3549. HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
  3550. case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
  3551. dev_warn(dev, "QP %d, R_key violation.\n", qpn);
  3552. break;
  3553. case HNS_ROCE_LAVWQE_LENGTH_ERROR:
  3554. dev_warn(dev, "QP %d, length error.\n", qpn);
  3555. break;
  3556. case HNS_ROCE_LAVWQE_VA_ERROR:
  3557. dev_warn(dev, "QP %d, VA error.\n", qpn);
  3558. break;
  3559. case HNS_ROCE_LAVWQE_PD_ERROR:
  3560. dev_err(dev, "QP %d, PD error.\n", qpn);
  3561. break;
  3562. case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
  3563. dev_warn(dev, "QP %d, rw acc error.\n", qpn);
  3564. break;
  3565. case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
  3566. dev_warn(dev, "QP %d, key state error.\n", qpn);
  3567. break;
  3568. case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
  3569. dev_warn(dev, "QP %d, MR operation error.\n", qpn);
  3570. break;
  3571. default:
  3572. break;
  3573. }
  3574. }
  3575. static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev,
  3576. struct hns_roce_aeqe *aeqe,
  3577. int event_type)
  3578. {
  3579. struct device *dev = &hr_dev->pdev->dev;
  3580. int phy_port;
  3581. int qpn;
  3582. qpn = roce_get_field(aeqe->event.qp_event.qp,
  3583. HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
  3584. HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
  3585. phy_port = roce_get_field(aeqe->event.qp_event.qp,
  3586. HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
  3587. HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
  3588. if (qpn <= 1)
  3589. qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
  3590. switch (event_type) {
  3591. case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
  3592. dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
  3593. "QP %d, phy_port %d.\n", qpn, phy_port);
  3594. break;
  3595. case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
  3596. hns_roce_v1_wq_catas_err_handle(hr_dev, aeqe, qpn);
  3597. break;
  3598. case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
  3599. hns_roce_v1_local_wq_access_err_handle(hr_dev, aeqe, qpn);
  3600. break;
  3601. default:
  3602. break;
  3603. }
  3604. hns_roce_qp_event(hr_dev, qpn, event_type);
  3605. }
  3606. static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev,
  3607. struct hns_roce_aeqe *aeqe,
  3608. int event_type)
  3609. {
  3610. struct device *dev = &hr_dev->pdev->dev;
  3611. u32 cqn;
  3612. cqn = roce_get_field(aeqe->event.cq_event.cq,
  3613. HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
  3614. HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S);
  3615. switch (event_type) {
  3616. case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
  3617. dev_warn(dev, "CQ 0x%x access err.\n", cqn);
  3618. break;
  3619. case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
  3620. dev_warn(dev, "CQ 0x%x overflow\n", cqn);
  3621. break;
  3622. case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
  3623. dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
  3624. break;
  3625. default:
  3626. break;
  3627. }
  3628. hns_roce_cq_event(hr_dev, cqn, event_type);
  3629. }
  3630. static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev,
  3631. struct hns_roce_aeqe *aeqe)
  3632. {
  3633. struct device *dev = &hr_dev->pdev->dev;
  3634. switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
  3635. HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
  3636. case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
  3637. dev_warn(dev, "SDB overflow.\n");
  3638. break;
  3639. case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
  3640. dev_warn(dev, "SDB almost overflow.\n");
  3641. break;
  3642. case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
  3643. dev_warn(dev, "SDB almost empty.\n");
  3644. break;
  3645. case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
  3646. dev_warn(dev, "ODB overflow.\n");
  3647. break;
  3648. case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
  3649. dev_warn(dev, "ODB almost overflow.\n");
  3650. break;
  3651. case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
  3652. dev_warn(dev, "SDB almost empty.\n");
  3653. break;
  3654. default:
  3655. break;
  3656. }
  3657. }
  3658. static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry)
  3659. {
  3660. unsigned long off = (entry & (eq->entries - 1)) *
  3661. HNS_ROCE_AEQ_ENTRY_SIZE;
  3662. return (struct hns_roce_aeqe *)((u8 *)
  3663. (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
  3664. off % HNS_ROCE_BA_SIZE);
  3665. }
  3666. static struct hns_roce_aeqe *next_aeqe_sw_v1(struct hns_roce_eq *eq)
  3667. {
  3668. struct hns_roce_aeqe *aeqe = get_aeqe_v1(eq, eq->cons_index);
  3669. return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
  3670. !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
  3671. }
  3672. static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
  3673. struct hns_roce_eq *eq)
  3674. {
  3675. struct device *dev = &hr_dev->pdev->dev;
  3676. struct hns_roce_aeqe *aeqe;
  3677. int aeqes_found = 0;
  3678. int event_type;
  3679. while ((aeqe = next_aeqe_sw_v1(eq))) {
  3680. /* Make sure we read the AEQ entry after we have checked the
  3681. * ownership bit
  3682. */
  3683. dma_rmb();
  3684. dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe,
  3685. roce_get_field(aeqe->asyn,
  3686. HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
  3687. HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
  3688. event_type = roce_get_field(aeqe->asyn,
  3689. HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
  3690. HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
  3691. switch (event_type) {
  3692. case HNS_ROCE_EVENT_TYPE_PATH_MIG:
  3693. dev_warn(dev, "PATH MIG not supported\n");
  3694. break;
  3695. case HNS_ROCE_EVENT_TYPE_COMM_EST:
  3696. dev_warn(dev, "COMMUNICATION established\n");
  3697. break;
  3698. case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
  3699. dev_warn(dev, "SQ DRAINED not supported\n");
  3700. break;
  3701. case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
  3702. dev_warn(dev, "PATH MIG failed\n");
  3703. break;
  3704. case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
  3705. case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
  3706. case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
  3707. hns_roce_v1_qp_err_handle(hr_dev, aeqe, event_type);
  3708. break;
  3709. case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
  3710. case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
  3711. case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
  3712. dev_warn(dev, "SRQ not support!\n");
  3713. break;
  3714. case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
  3715. case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
  3716. case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
  3717. hns_roce_v1_cq_err_handle(hr_dev, aeqe, event_type);
  3718. break;
  3719. case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
  3720. dev_warn(dev, "port change.\n");
  3721. break;
  3722. case HNS_ROCE_EVENT_TYPE_MB:
  3723. hns_roce_cmd_event(hr_dev,
  3724. le16_to_cpu(aeqe->event.cmd.token),
  3725. aeqe->event.cmd.status,
  3726. le64_to_cpu(aeqe->event.cmd.out_param
  3727. ));
  3728. break;
  3729. case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
  3730. hns_roce_v1_db_overflow_handle(hr_dev, aeqe);
  3731. break;
  3732. case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
  3733. dev_warn(dev, "CEQ 0x%lx overflow.\n",
  3734. roce_get_field(aeqe->event.ce_event.ceqe,
  3735. HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M,
  3736. HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
  3737. break;
  3738. default:
  3739. dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n",
  3740. event_type, eq->eqn, eq->cons_index);
  3741. break;
  3742. }
  3743. eq->cons_index++;
  3744. aeqes_found = 1;
  3745. if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) {
  3746. dev_warn(dev, "cons_index overflow, set back to 0.\n");
  3747. eq->cons_index = 0;
  3748. }
  3749. }
  3750. set_eq_cons_index_v1(eq, 0);
  3751. return aeqes_found;
  3752. }
  3753. static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry)
  3754. {
  3755. unsigned long off = (entry & (eq->entries - 1)) *
  3756. HNS_ROCE_CEQ_ENTRY_SIZE;
  3757. return (struct hns_roce_ceqe *)((u8 *)
  3758. (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
  3759. off % HNS_ROCE_BA_SIZE);
  3760. }
  3761. static struct hns_roce_ceqe *next_ceqe_sw_v1(struct hns_roce_eq *eq)
  3762. {
  3763. struct hns_roce_ceqe *ceqe = get_ceqe_v1(eq, eq->cons_index);
  3764. return (!!(roce_get_bit(ceqe->comp,
  3765. HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
  3766. (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
  3767. }
  3768. static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev,
  3769. struct hns_roce_eq *eq)
  3770. {
  3771. struct hns_roce_ceqe *ceqe;
  3772. int ceqes_found = 0;
  3773. u32 cqn;
  3774. while ((ceqe = next_ceqe_sw_v1(eq))) {
  3775. /* Make sure we read CEQ entry after we have checked the
  3776. * ownership bit
  3777. */
  3778. dma_rmb();
  3779. cqn = roce_get_field(ceqe->comp,
  3780. HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
  3781. HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
  3782. hns_roce_cq_completion(hr_dev, cqn);
  3783. ++eq->cons_index;
  3784. ceqes_found = 1;
  3785. if (eq->cons_index > 2 * hr_dev->caps.ceqe_depth - 1) {
  3786. dev_warn(&eq->hr_dev->pdev->dev,
  3787. "cons_index overflow, set back to 0.\n");
  3788. eq->cons_index = 0;
  3789. }
  3790. }
  3791. set_eq_cons_index_v1(eq, 0);
  3792. return ceqes_found;
  3793. }
  3794. static irqreturn_t hns_roce_v1_msix_interrupt_eq(int irq, void *eq_ptr)
  3795. {
  3796. struct hns_roce_eq *eq = eq_ptr;
  3797. struct hns_roce_dev *hr_dev = eq->hr_dev;
  3798. int int_work = 0;
  3799. if (eq->type_flag == HNS_ROCE_CEQ)
  3800. /* CEQ irq routine, CEQ is pulse irq, not clear */
  3801. int_work = hns_roce_v1_ceq_int(hr_dev, eq);
  3802. else
  3803. /* AEQ irq routine, AEQ is pulse irq, not clear */
  3804. int_work = hns_roce_v1_aeq_int(hr_dev, eq);
  3805. return IRQ_RETVAL(int_work);
  3806. }
  3807. static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
  3808. {
  3809. struct hns_roce_dev *hr_dev = dev_id;
  3810. struct device *dev = &hr_dev->pdev->dev;
  3811. int int_work = 0;
  3812. u32 caepaemask_val;
  3813. u32 cealmovf_val;
  3814. u32 caepaest_val;
  3815. u32 aeshift_val;
  3816. u32 ceshift_val;
  3817. u32 cemask_val;
  3818. __le32 tmp;
  3819. int i;
  3820. /*
  3821. * Abnormal interrupt:
  3822. * AEQ overflow, ECC multi-bit err, CEQ overflow must clear
  3823. * interrupt, mask irq, clear irq, cancel mask operation
  3824. */
  3825. aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
  3826. tmp = cpu_to_le32(aeshift_val);
  3827. /* AEQE overflow */
  3828. if (roce_get_bit(tmp,
  3829. ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
  3830. dev_warn(dev, "AEQ overflow!\n");
  3831. /* Set mask */
  3832. caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
  3833. tmp = cpu_to_le32(caepaemask_val);
  3834. roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
  3835. HNS_ROCE_INT_MASK_ENABLE);
  3836. caepaemask_val = le32_to_cpu(tmp);
  3837. roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
  3838. /* Clear int state(INT_WC : write 1 clear) */
  3839. caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
  3840. tmp = cpu_to_le32(caepaest_val);
  3841. roce_set_bit(tmp, ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
  3842. caepaest_val = le32_to_cpu(tmp);
  3843. roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
  3844. /* Clear mask */
  3845. caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
  3846. tmp = cpu_to_le32(caepaemask_val);
  3847. roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
  3848. HNS_ROCE_INT_MASK_DISABLE);
  3849. caepaemask_val = le32_to_cpu(tmp);
  3850. roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
  3851. }
  3852. /* CEQ almost overflow */
  3853. for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
  3854. ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
  3855. i * CEQ_REG_OFFSET);
  3856. tmp = cpu_to_le32(ceshift_val);
  3857. if (roce_get_bit(tmp,
  3858. ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
  3859. dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
  3860. int_work++;
  3861. /* Set mask */
  3862. cemask_val = roce_read(hr_dev,
  3863. ROCEE_CAEP_CE_IRQ_MASK_0_REG +
  3864. i * CEQ_REG_OFFSET);
  3865. tmp = cpu_to_le32(cemask_val);
  3866. roce_set_bit(tmp,
  3867. ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
  3868. HNS_ROCE_INT_MASK_ENABLE);
  3869. cemask_val = le32_to_cpu(tmp);
  3870. roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
  3871. i * CEQ_REG_OFFSET, cemask_val);
  3872. /* Clear int state(INT_WC : write 1 clear) */
  3873. cealmovf_val = roce_read(hr_dev,
  3874. ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
  3875. i * CEQ_REG_OFFSET);
  3876. tmp = cpu_to_le32(cealmovf_val);
  3877. roce_set_bit(tmp,
  3878. ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
  3879. 1);
  3880. cealmovf_val = le32_to_cpu(tmp);
  3881. roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
  3882. i * CEQ_REG_OFFSET, cealmovf_val);
  3883. /* Clear mask */
  3884. cemask_val = roce_read(hr_dev,
  3885. ROCEE_CAEP_CE_IRQ_MASK_0_REG +
  3886. i * CEQ_REG_OFFSET);
  3887. tmp = cpu_to_le32(cemask_val);
  3888. roce_set_bit(tmp,
  3889. ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
  3890. HNS_ROCE_INT_MASK_DISABLE);
  3891. cemask_val = le32_to_cpu(tmp);
  3892. roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
  3893. i * CEQ_REG_OFFSET, cemask_val);
  3894. }
  3895. }
  3896. /* ECC multi-bit error alarm */
  3897. dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
  3898. roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
  3899. roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
  3900. roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
  3901. dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
  3902. roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
  3903. roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
  3904. roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
  3905. return IRQ_RETVAL(int_work);
  3906. }
  3907. static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev)
  3908. {
  3909. u32 aemask_val;
  3910. int masken = 0;
  3911. __le32 tmp;
  3912. int i;
  3913. /* AEQ INT */
  3914. aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
  3915. tmp = cpu_to_le32(aemask_val);
  3916. roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
  3917. masken);
  3918. roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
  3919. aemask_val = le32_to_cpu(tmp);
  3920. roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
  3921. /* CEQ INT */
  3922. for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
  3923. /* IRQ mask */
  3924. roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
  3925. i * CEQ_REG_OFFSET, masken);
  3926. }
  3927. }
  3928. static void hns_roce_v1_free_eq(struct hns_roce_dev *hr_dev,
  3929. struct hns_roce_eq *eq)
  3930. {
  3931. int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
  3932. HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
  3933. int i;
  3934. if (!eq->buf_list)
  3935. return;
  3936. for (i = 0; i < npages; ++i)
  3937. dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
  3938. eq->buf_list[i].buf, eq->buf_list[i].map);
  3939. kfree(eq->buf_list);
  3940. }
  3941. static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
  3942. int enable_flag)
  3943. {
  3944. void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
  3945. __le32 tmp;
  3946. u32 val;
  3947. val = readl(eqc);
  3948. tmp = cpu_to_le32(val);
  3949. if (enable_flag)
  3950. roce_set_field(tmp,
  3951. ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
  3952. ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
  3953. HNS_ROCE_EQ_STAT_VALID);
  3954. else
  3955. roce_set_field(tmp,
  3956. ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
  3957. ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
  3958. HNS_ROCE_EQ_STAT_INVALID);
  3959. val = le32_to_cpu(tmp);
  3960. writel(val, eqc);
  3961. }
  3962. static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
  3963. struct hns_roce_eq *eq)
  3964. {
  3965. void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
  3966. struct device *dev = &hr_dev->pdev->dev;
  3967. dma_addr_t tmp_dma_addr;
  3968. u32 eqconsindx_val = 0;
  3969. u32 eqcuridx_val = 0;
  3970. u32 eqshift_val = 0;
  3971. __le32 tmp2 = 0;
  3972. __le32 tmp1 = 0;
  3973. __le32 tmp = 0;
  3974. int num_bas;
  3975. int ret;
  3976. int i;
  3977. num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
  3978. HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
  3979. if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
  3980. dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
  3981. (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
  3982. num_bas);
  3983. return -EINVAL;
  3984. }
  3985. eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
  3986. if (!eq->buf_list)
  3987. return -ENOMEM;
  3988. for (i = 0; i < num_bas; ++i) {
  3989. eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
  3990. &tmp_dma_addr,
  3991. GFP_KERNEL);
  3992. if (!eq->buf_list[i].buf) {
  3993. ret = -ENOMEM;
  3994. goto err_out_free_pages;
  3995. }
  3996. eq->buf_list[i].map = tmp_dma_addr;
  3997. memset(eq->buf_list[i].buf, 0, HNS_ROCE_BA_SIZE);
  3998. }
  3999. eq->cons_index = 0;
  4000. roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
  4001. ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
  4002. HNS_ROCE_EQ_STAT_INVALID);
  4003. roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
  4004. ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
  4005. eq->log_entries);
  4006. eqshift_val = le32_to_cpu(tmp);
  4007. writel(eqshift_val, eqc);
  4008. /* Configure eq extended address 12~44bit */
  4009. writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
  4010. /*
  4011. * Configure eq extended address 45~49 bit.
  4012. * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
  4013. * using 4K page, and shift more 32 because of
  4014. * caculating the high 32 bit value evaluated to hardware.
  4015. */
  4016. roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
  4017. ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
  4018. eq->buf_list[0].map >> 44);
  4019. roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
  4020. ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
  4021. eqcuridx_val = le32_to_cpu(tmp1);
  4022. writel(eqcuridx_val, eqc + 8);
  4023. /* Configure eq consumer index */
  4024. roce_set_field(tmp2, ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
  4025. ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
  4026. eqconsindx_val = le32_to_cpu(tmp2);
  4027. writel(eqconsindx_val, eqc + 0xc);
  4028. return 0;
  4029. err_out_free_pages:
  4030. for (i -= 1; i >= 0; i--)
  4031. dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
  4032. eq->buf_list[i].map);
  4033. kfree(eq->buf_list);
  4034. return ret;
  4035. }
  4036. static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
  4037. {
  4038. struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
  4039. struct device *dev = &hr_dev->pdev->dev;
  4040. struct hns_roce_eq *eq;
  4041. int irq_num;
  4042. int eq_num;
  4043. int ret;
  4044. int i, j;
  4045. eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
  4046. irq_num = eq_num + hr_dev->caps.num_other_vectors;
  4047. eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
  4048. if (!eq_table->eq)
  4049. return -ENOMEM;
  4050. eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
  4051. GFP_KERNEL);
  4052. if (!eq_table->eqc_base) {
  4053. ret = -ENOMEM;
  4054. goto err_eqc_base_alloc_fail;
  4055. }
  4056. for (i = 0; i < eq_num; i++) {
  4057. eq = &eq_table->eq[i];
  4058. eq->hr_dev = hr_dev;
  4059. eq->eqn = i;
  4060. eq->irq = hr_dev->irq[i];
  4061. eq->log_page_size = PAGE_SHIFT;
  4062. if (i < hr_dev->caps.num_comp_vectors) {
  4063. /* CEQ */
  4064. eq_table->eqc_base[i] = hr_dev->reg_base +
  4065. ROCEE_CAEP_CEQC_SHIFT_0_REG +
  4066. CEQ_REG_OFFSET * i;
  4067. eq->type_flag = HNS_ROCE_CEQ;
  4068. eq->doorbell = hr_dev->reg_base +
  4069. ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
  4070. CEQ_REG_OFFSET * i;
  4071. eq->entries = hr_dev->caps.ceqe_depth;
  4072. eq->log_entries = ilog2(eq->entries);
  4073. eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
  4074. } else {
  4075. /* AEQ */
  4076. eq_table->eqc_base[i] = hr_dev->reg_base +
  4077. ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
  4078. eq->type_flag = HNS_ROCE_AEQ;
  4079. eq->doorbell = hr_dev->reg_base +
  4080. ROCEE_CAEP_AEQE_CONS_IDX_REG;
  4081. eq->entries = hr_dev->caps.aeqe_depth;
  4082. eq->log_entries = ilog2(eq->entries);
  4083. eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
  4084. }
  4085. }
  4086. /* Disable irq */
  4087. hns_roce_v1_int_mask_enable(hr_dev);
  4088. /* Configure ce int interval */
  4089. roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
  4090. HNS_ROCE_CEQ_DEFAULT_INTERVAL);
  4091. /* Configure ce int burst num */
  4092. roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
  4093. HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
  4094. for (i = 0; i < eq_num; i++) {
  4095. ret = hns_roce_v1_create_eq(hr_dev, &eq_table->eq[i]);
  4096. if (ret) {
  4097. dev_err(dev, "eq create failed\n");
  4098. goto err_create_eq_fail;
  4099. }
  4100. }
  4101. for (j = 0; j < irq_num; j++) {
  4102. if (j < eq_num)
  4103. ret = request_irq(hr_dev->irq[j],
  4104. hns_roce_v1_msix_interrupt_eq, 0,
  4105. hr_dev->irq_names[j],
  4106. &eq_table->eq[j]);
  4107. else
  4108. ret = request_irq(hr_dev->irq[j],
  4109. hns_roce_v1_msix_interrupt_abn, 0,
  4110. hr_dev->irq_names[j], hr_dev);
  4111. if (ret) {
  4112. dev_err(dev, "request irq error!\n");
  4113. goto err_request_irq_fail;
  4114. }
  4115. }
  4116. for (i = 0; i < eq_num; i++)
  4117. hns_roce_v1_enable_eq(hr_dev, i, EQ_ENABLE);
  4118. return 0;
  4119. err_request_irq_fail:
  4120. for (j -= 1; j >= 0; j--)
  4121. free_irq(hr_dev->irq[j], &eq_table->eq[j]);
  4122. err_create_eq_fail:
  4123. for (i -= 1; i >= 0; i--)
  4124. hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
  4125. kfree(eq_table->eqc_base);
  4126. err_eqc_base_alloc_fail:
  4127. kfree(eq_table->eq);
  4128. return ret;
  4129. }
  4130. static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev)
  4131. {
  4132. struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
  4133. int irq_num;
  4134. int eq_num;
  4135. int i;
  4136. eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
  4137. irq_num = eq_num + hr_dev->caps.num_other_vectors;
  4138. for (i = 0; i < eq_num; i++) {
  4139. /* Disable EQ */
  4140. hns_roce_v1_enable_eq(hr_dev, i, EQ_DISABLE);
  4141. free_irq(hr_dev->irq[i], &eq_table->eq[i]);
  4142. hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
  4143. }
  4144. for (i = eq_num; i < irq_num; i++)
  4145. free_irq(hr_dev->irq[i], hr_dev);
  4146. kfree(eq_table->eqc_base);
  4147. kfree(eq_table->eq);
  4148. }
  4149. static const struct hns_roce_hw hns_roce_hw_v1 = {
  4150. .reset = hns_roce_v1_reset,
  4151. .hw_profile = hns_roce_v1_profile,
  4152. .hw_init = hns_roce_v1_init,
  4153. .hw_exit = hns_roce_v1_exit,
  4154. .post_mbox = hns_roce_v1_post_mbox,
  4155. .chk_mbox = hns_roce_v1_chk_mbox,
  4156. .set_gid = hns_roce_v1_set_gid,
  4157. .set_mac = hns_roce_v1_set_mac,
  4158. .set_mtu = hns_roce_v1_set_mtu,
  4159. .write_mtpt = hns_roce_v1_write_mtpt,
  4160. .write_cqc = hns_roce_v1_write_cqc,
  4161. .modify_cq = hns_roce_v1_modify_cq,
  4162. .clear_hem = hns_roce_v1_clear_hem,
  4163. .modify_qp = hns_roce_v1_modify_qp,
  4164. .query_qp = hns_roce_v1_query_qp,
  4165. .destroy_qp = hns_roce_v1_destroy_qp,
  4166. .post_send = hns_roce_v1_post_send,
  4167. .post_recv = hns_roce_v1_post_recv,
  4168. .req_notify_cq = hns_roce_v1_req_notify_cq,
  4169. .poll_cq = hns_roce_v1_poll_cq,
  4170. .dereg_mr = hns_roce_v1_dereg_mr,
  4171. .destroy_cq = hns_roce_v1_destroy_cq,
  4172. .init_eq = hns_roce_v1_init_eq_table,
  4173. .cleanup_eq = hns_roce_v1_cleanup_eq_table,
  4174. };
  4175. static const struct of_device_id hns_roce_of_match[] = {
  4176. { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
  4177. {},
  4178. };
  4179. MODULE_DEVICE_TABLE(of, hns_roce_of_match);
  4180. static const struct acpi_device_id hns_roce_acpi_match[] = {
  4181. { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
  4182. {},
  4183. };
  4184. MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
  4185. static int hns_roce_node_match(struct device *dev, void *fwnode)
  4186. {
  4187. return dev->fwnode == fwnode;
  4188. }
  4189. static struct
  4190. platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
  4191. {
  4192. struct device *dev;
  4193. /* get the 'device' corresponding to the matching 'fwnode' */
  4194. dev = bus_find_device(&platform_bus_type, NULL,
  4195. fwnode, hns_roce_node_match);
  4196. /* get the platform device */
  4197. return dev ? to_platform_device(dev) : NULL;
  4198. }
  4199. static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
  4200. {
  4201. struct device *dev = &hr_dev->pdev->dev;
  4202. struct platform_device *pdev = NULL;
  4203. struct net_device *netdev = NULL;
  4204. struct device_node *net_node;
  4205. struct resource *res;
  4206. int port_cnt = 0;
  4207. u8 phy_port;
  4208. int ret;
  4209. int i;
  4210. /* check if we are compatible with the underlying SoC */
  4211. if (dev_of_node(dev)) {
  4212. const struct of_device_id *of_id;
  4213. of_id = of_match_node(hns_roce_of_match, dev->of_node);
  4214. if (!of_id) {
  4215. dev_err(dev, "device is not compatible!\n");
  4216. return -ENXIO;
  4217. }
  4218. hr_dev->hw = (const struct hns_roce_hw *)of_id->data;
  4219. if (!hr_dev->hw) {
  4220. dev_err(dev, "couldn't get H/W specific DT data!\n");
  4221. return -ENXIO;
  4222. }
  4223. } else if (is_acpi_device_node(dev->fwnode)) {
  4224. const struct acpi_device_id *acpi_id;
  4225. acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
  4226. if (!acpi_id) {
  4227. dev_err(dev, "device is not compatible!\n");
  4228. return -ENXIO;
  4229. }
  4230. hr_dev->hw = (const struct hns_roce_hw *) acpi_id->driver_data;
  4231. if (!hr_dev->hw) {
  4232. dev_err(dev, "couldn't get H/W specific ACPI data!\n");
  4233. return -ENXIO;
  4234. }
  4235. } else {
  4236. dev_err(dev, "can't read compatibility data from DT or ACPI\n");
  4237. return -ENXIO;
  4238. }
  4239. /* get the mapped register base address */
  4240. res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
  4241. hr_dev->reg_base = devm_ioremap_resource(dev, res);
  4242. if (IS_ERR(hr_dev->reg_base))
  4243. return PTR_ERR(hr_dev->reg_base);
  4244. /* read the node_guid of IB device from the DT or ACPI */
  4245. ret = device_property_read_u8_array(dev, "node-guid",
  4246. (u8 *)&hr_dev->ib_dev.node_guid,
  4247. GUID_LEN);
  4248. if (ret) {
  4249. dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
  4250. return ret;
  4251. }
  4252. /* get the RoCE associated ethernet ports or netdevices */
  4253. for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
  4254. if (dev_of_node(dev)) {
  4255. net_node = of_parse_phandle(dev->of_node, "eth-handle",
  4256. i);
  4257. if (!net_node)
  4258. continue;
  4259. pdev = of_find_device_by_node(net_node);
  4260. } else if (is_acpi_device_node(dev->fwnode)) {
  4261. struct fwnode_reference_args args;
  4262. ret = acpi_node_get_property_reference(dev->fwnode,
  4263. "eth-handle",
  4264. i, &args);
  4265. if (ret)
  4266. continue;
  4267. pdev = hns_roce_find_pdev(args.fwnode);
  4268. } else {
  4269. dev_err(dev, "cannot read data from DT or ACPI\n");
  4270. return -ENXIO;
  4271. }
  4272. if (pdev) {
  4273. netdev = platform_get_drvdata(pdev);
  4274. phy_port = (u8)i;
  4275. if (netdev) {
  4276. hr_dev->iboe.netdevs[port_cnt] = netdev;
  4277. hr_dev->iboe.phy_port[port_cnt] = phy_port;
  4278. } else {
  4279. dev_err(dev, "no netdev found with pdev %s\n",
  4280. pdev->name);
  4281. return -ENODEV;
  4282. }
  4283. port_cnt++;
  4284. }
  4285. }
  4286. if (port_cnt == 0) {
  4287. dev_err(dev, "unable to get eth-handle for available ports!\n");
  4288. return -EINVAL;
  4289. }
  4290. hr_dev->caps.num_ports = port_cnt;
  4291. /* cmd issue mode: 0 is poll, 1 is event */
  4292. hr_dev->cmd_mod = 1;
  4293. hr_dev->loop_idc = 0;
  4294. hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
  4295. hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG;
  4296. /* read the interrupt names from the DT or ACPI */
  4297. ret = device_property_read_string_array(dev, "interrupt-names",
  4298. hr_dev->irq_names,
  4299. HNS_ROCE_V1_MAX_IRQ_NUM);
  4300. if (ret < 0) {
  4301. dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
  4302. return ret;
  4303. }
  4304. /* fetch the interrupt numbers */
  4305. for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) {
  4306. hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
  4307. if (hr_dev->irq[i] <= 0) {
  4308. dev_err(dev, "platform get of irq[=%d] failed!\n", i);
  4309. return -EINVAL;
  4310. }
  4311. }
  4312. return 0;
  4313. }
  4314. /**
  4315. * hns_roce_probe - RoCE driver entrance
  4316. * @pdev: pointer to platform device
  4317. * Return : int
  4318. *
  4319. */
  4320. static int hns_roce_probe(struct platform_device *pdev)
  4321. {
  4322. int ret;
  4323. struct hns_roce_dev *hr_dev;
  4324. struct device *dev = &pdev->dev;
  4325. hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
  4326. if (!hr_dev)
  4327. return -ENOMEM;
  4328. hr_dev->priv = kzalloc(sizeof(struct hns_roce_v1_priv), GFP_KERNEL);
  4329. if (!hr_dev->priv) {
  4330. ret = -ENOMEM;
  4331. goto error_failed_kzalloc;
  4332. }
  4333. hr_dev->pdev = pdev;
  4334. hr_dev->dev = dev;
  4335. platform_set_drvdata(pdev, hr_dev);
  4336. if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
  4337. dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) {
  4338. dev_err(dev, "Not usable DMA addressing mode\n");
  4339. ret = -EIO;
  4340. goto error_failed_get_cfg;
  4341. }
  4342. ret = hns_roce_get_cfg(hr_dev);
  4343. if (ret) {
  4344. dev_err(dev, "Get Configuration failed!\n");
  4345. goto error_failed_get_cfg;
  4346. }
  4347. ret = hns_roce_init(hr_dev);
  4348. if (ret) {
  4349. dev_err(dev, "RoCE engine init failed!\n");
  4350. goto error_failed_get_cfg;
  4351. }
  4352. return 0;
  4353. error_failed_get_cfg:
  4354. kfree(hr_dev->priv);
  4355. error_failed_kzalloc:
  4356. ib_dealloc_device(&hr_dev->ib_dev);
  4357. return ret;
  4358. }
  4359. /**
  4360. * hns_roce_remove - remove RoCE device
  4361. * @pdev: pointer to platform device
  4362. */
  4363. static int hns_roce_remove(struct platform_device *pdev)
  4364. {
  4365. struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
  4366. hns_roce_exit(hr_dev);
  4367. kfree(hr_dev->priv);
  4368. ib_dealloc_device(&hr_dev->ib_dev);
  4369. return 0;
  4370. }
  4371. static struct platform_driver hns_roce_driver = {
  4372. .probe = hns_roce_probe,
  4373. .remove = hns_roce_remove,
  4374. .driver = {
  4375. .name = DRV_NAME,
  4376. .of_match_table = hns_roce_of_match,
  4377. .acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
  4378. },
  4379. };
  4380. module_platform_driver(hns_roce_driver);
  4381. MODULE_LICENSE("Dual BSD/GPL");
  4382. MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
  4383. MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
  4384. MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
  4385. MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver");