i40iw_cm.c 120 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398
  1. /*******************************************************************************
  2. *
  3. * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenFabrics.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. *
  33. *******************************************************************************/
  34. #include <linux/atomic.h>
  35. #include <linux/ip.h>
  36. #include <linux/tcp.h>
  37. #include <linux/init.h>
  38. #include <linux/if_arp.h>
  39. #include <linux/if_vlan.h>
  40. #include <linux/notifier.h>
  41. #include <linux/net.h>
  42. #include <linux/types.h>
  43. #include <linux/timer.h>
  44. #include <linux/time.h>
  45. #include <linux/delay.h>
  46. #include <linux/etherdevice.h>
  47. #include <linux/netdevice.h>
  48. #include <linux/random.h>
  49. #include <linux/list.h>
  50. #include <linux/threads.h>
  51. #include <linux/highmem.h>
  52. #include <net/arp.h>
  53. #include <net/ndisc.h>
  54. #include <net/neighbour.h>
  55. #include <net/route.h>
  56. #include <net/addrconf.h>
  57. #include <net/ip6_route.h>
  58. #include <net/ip_fib.h>
  59. #include <net/secure_seq.h>
  60. #include <net/tcp.h>
  61. #include <asm/checksum.h>
  62. #include "i40iw.h"
  63. static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *);
  64. static void i40iw_cm_post_event(struct i40iw_cm_event *event);
  65. static void i40iw_disconnect_worker(struct work_struct *work);
  66. /**
  67. * i40iw_free_sqbuf - put back puda buffer if refcount = 0
  68. * @vsi: pointer to vsi structure
  69. * @buf: puda buffer to free
  70. */
  71. void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp)
  72. {
  73. struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)bufp;
  74. struct i40iw_puda_rsrc *ilq = vsi->ilq;
  75. if (!atomic_dec_return(&buf->refcount))
  76. i40iw_puda_ret_bufpool(ilq, buf);
  77. }
  78. /**
  79. * i40iw_derive_hw_ird_setting - Calculate IRD
  80. *
  81. * @cm_ird: IRD of connection's node
  82. *
  83. * The ird from the connection is rounded to a supported HW
  84. * setting (2,8,32,64) and then encoded for ird_size field of
  85. * qp_ctx
  86. */
  87. static u8 i40iw_derive_hw_ird_setting(u16 cm_ird)
  88. {
  89. u8 encoded_ird_size;
  90. /* ird_size field is encoded in qp_ctx */
  91. switch (cm_ird ? roundup_pow_of_two(cm_ird) : 0) {
  92. case I40IW_HW_IRD_SETTING_64:
  93. encoded_ird_size = 3;
  94. break;
  95. case I40IW_HW_IRD_SETTING_32:
  96. case I40IW_HW_IRD_SETTING_16:
  97. encoded_ird_size = 2;
  98. break;
  99. case I40IW_HW_IRD_SETTING_8:
  100. case I40IW_HW_IRD_SETTING_4:
  101. encoded_ird_size = 1;
  102. break;
  103. case I40IW_HW_IRD_SETTING_2:
  104. default:
  105. encoded_ird_size = 0;
  106. break;
  107. }
  108. return encoded_ird_size;
  109. }
  110. /**
  111. * i40iw_record_ird_ord - Record IRD/ORD passed in
  112. * @cm_node: connection's node
  113. * @conn_ird: connection IRD
  114. * @conn_ord: connection ORD
  115. */
  116. static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u32 conn_ird,
  117. u32 conn_ord)
  118. {
  119. if (conn_ird > I40IW_MAX_IRD_SIZE)
  120. conn_ird = I40IW_MAX_IRD_SIZE;
  121. if (conn_ord > I40IW_MAX_ORD_SIZE)
  122. conn_ord = I40IW_MAX_ORD_SIZE;
  123. else if (!conn_ord && cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO)
  124. conn_ord = 1;
  125. cm_node->ird_size = conn_ird;
  126. cm_node->ord_size = conn_ord;
  127. }
  128. /**
  129. * i40iw_copy_ip_ntohl - change network to host ip
  130. * @dst: host ip
  131. * @src: big endian
  132. */
  133. void i40iw_copy_ip_ntohl(u32 *dst, __be32 *src)
  134. {
  135. *dst++ = ntohl(*src++);
  136. *dst++ = ntohl(*src++);
  137. *dst++ = ntohl(*src++);
  138. *dst = ntohl(*src);
  139. }
  140. /**
  141. * i40iw_copy_ip_htonl - change host addr to network ip
  142. * @dst: host ip
  143. * @src: little endian
  144. */
  145. static inline void i40iw_copy_ip_htonl(__be32 *dst, u32 *src)
  146. {
  147. *dst++ = htonl(*src++);
  148. *dst++ = htonl(*src++);
  149. *dst++ = htonl(*src++);
  150. *dst = htonl(*src);
  151. }
  152. /**
  153. * i40iw_fill_sockaddr4 - get addr info for passive connection
  154. * @cm_node: connection's node
  155. * @event: upper layer's cm event
  156. */
  157. static inline void i40iw_fill_sockaddr4(struct i40iw_cm_node *cm_node,
  158. struct iw_cm_event *event)
  159. {
  160. struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr;
  161. struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr;
  162. laddr->sin_family = AF_INET;
  163. raddr->sin_family = AF_INET;
  164. laddr->sin_port = htons(cm_node->loc_port);
  165. raddr->sin_port = htons(cm_node->rem_port);
  166. laddr->sin_addr.s_addr = htonl(cm_node->loc_addr[0]);
  167. raddr->sin_addr.s_addr = htonl(cm_node->rem_addr[0]);
  168. }
  169. /**
  170. * i40iw_fill_sockaddr6 - get ipv6 addr info for passive side
  171. * @cm_node: connection's node
  172. * @event: upper layer's cm event
  173. */
  174. static inline void i40iw_fill_sockaddr6(struct i40iw_cm_node *cm_node,
  175. struct iw_cm_event *event)
  176. {
  177. struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr;
  178. struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)&event->remote_addr;
  179. laddr6->sin6_family = AF_INET6;
  180. raddr6->sin6_family = AF_INET6;
  181. laddr6->sin6_port = htons(cm_node->loc_port);
  182. raddr6->sin6_port = htons(cm_node->rem_port);
  183. i40iw_copy_ip_htonl(laddr6->sin6_addr.in6_u.u6_addr32,
  184. cm_node->loc_addr);
  185. i40iw_copy_ip_htonl(raddr6->sin6_addr.in6_u.u6_addr32,
  186. cm_node->rem_addr);
  187. }
  188. /**
  189. * i40iw_get_addr_info
  190. * @cm_node: contains ip/tcp info
  191. * @cm_info: to get a copy of the cm_node ip/tcp info
  192. */
  193. static void i40iw_get_addr_info(struct i40iw_cm_node *cm_node,
  194. struct i40iw_cm_info *cm_info)
  195. {
  196. cm_info->ipv4 = cm_node->ipv4;
  197. cm_info->vlan_id = cm_node->vlan_id;
  198. memcpy(cm_info->loc_addr, cm_node->loc_addr, sizeof(cm_info->loc_addr));
  199. memcpy(cm_info->rem_addr, cm_node->rem_addr, sizeof(cm_info->rem_addr));
  200. cm_info->loc_port = cm_node->loc_port;
  201. cm_info->rem_port = cm_node->rem_port;
  202. cm_info->user_pri = cm_node->user_pri;
  203. }
  204. /**
  205. * i40iw_get_cmevent_info - for cm event upcall
  206. * @cm_node: connection's node
  207. * @cm_id: upper layers cm struct for the event
  208. * @event: upper layer's cm event
  209. */
  210. static inline void i40iw_get_cmevent_info(struct i40iw_cm_node *cm_node,
  211. struct iw_cm_id *cm_id,
  212. struct iw_cm_event *event)
  213. {
  214. memcpy(&event->local_addr, &cm_id->m_local_addr,
  215. sizeof(event->local_addr));
  216. memcpy(&event->remote_addr, &cm_id->m_remote_addr,
  217. sizeof(event->remote_addr));
  218. if (cm_node) {
  219. event->private_data = (void *)cm_node->pdata_buf;
  220. event->private_data_len = (u8)cm_node->pdata.size;
  221. event->ird = cm_node->ird_size;
  222. event->ord = cm_node->ord_size;
  223. }
  224. }
  225. /**
  226. * i40iw_send_cm_event - upcall cm's event handler
  227. * @cm_node: connection's node
  228. * @cm_id: upper layer's cm info struct
  229. * @type: Event type to indicate
  230. * @status: status for the event type
  231. */
  232. static int i40iw_send_cm_event(struct i40iw_cm_node *cm_node,
  233. struct iw_cm_id *cm_id,
  234. enum iw_cm_event_type type,
  235. int status)
  236. {
  237. struct iw_cm_event event;
  238. memset(&event, 0, sizeof(event));
  239. event.event = type;
  240. event.status = status;
  241. switch (type) {
  242. case IW_CM_EVENT_CONNECT_REQUEST:
  243. if (cm_node->ipv4)
  244. i40iw_fill_sockaddr4(cm_node, &event);
  245. else
  246. i40iw_fill_sockaddr6(cm_node, &event);
  247. event.provider_data = (void *)cm_node;
  248. event.private_data = (void *)cm_node->pdata_buf;
  249. event.private_data_len = (u8)cm_node->pdata.size;
  250. event.ird = cm_node->ird_size;
  251. break;
  252. case IW_CM_EVENT_CONNECT_REPLY:
  253. i40iw_get_cmevent_info(cm_node, cm_id, &event);
  254. break;
  255. case IW_CM_EVENT_ESTABLISHED:
  256. event.ird = cm_node->ird_size;
  257. event.ord = cm_node->ord_size;
  258. break;
  259. case IW_CM_EVENT_DISCONNECT:
  260. break;
  261. case IW_CM_EVENT_CLOSE:
  262. break;
  263. default:
  264. i40iw_pr_err("event type received type = %d\n", type);
  265. return -1;
  266. }
  267. return cm_id->event_handler(cm_id, &event);
  268. }
  269. /**
  270. * i40iw_create_event - create cm event
  271. * @cm_node: connection's node
  272. * @type: Event type to generate
  273. */
  274. static struct i40iw_cm_event *i40iw_create_event(struct i40iw_cm_node *cm_node,
  275. enum i40iw_cm_event_type type)
  276. {
  277. struct i40iw_cm_event *event;
  278. if (!cm_node->cm_id)
  279. return NULL;
  280. event = kzalloc(sizeof(*event), GFP_ATOMIC);
  281. if (!event)
  282. return NULL;
  283. event->type = type;
  284. event->cm_node = cm_node;
  285. memcpy(event->cm_info.rem_addr, cm_node->rem_addr, sizeof(event->cm_info.rem_addr));
  286. memcpy(event->cm_info.loc_addr, cm_node->loc_addr, sizeof(event->cm_info.loc_addr));
  287. event->cm_info.rem_port = cm_node->rem_port;
  288. event->cm_info.loc_port = cm_node->loc_port;
  289. event->cm_info.cm_id = cm_node->cm_id;
  290. i40iw_debug(cm_node->dev,
  291. I40IW_DEBUG_CM,
  292. "node=%p event=%p type=%u dst=%pI4 src=%pI4\n",
  293. cm_node,
  294. event,
  295. type,
  296. event->cm_info.loc_addr,
  297. event->cm_info.rem_addr);
  298. i40iw_cm_post_event(event);
  299. return event;
  300. }
  301. /**
  302. * i40iw_free_retrans_entry - free send entry
  303. * @cm_node: connection's node
  304. */
  305. static void i40iw_free_retrans_entry(struct i40iw_cm_node *cm_node)
  306. {
  307. struct i40iw_device *iwdev = cm_node->iwdev;
  308. struct i40iw_timer_entry *send_entry;
  309. send_entry = cm_node->send_entry;
  310. if (send_entry) {
  311. cm_node->send_entry = NULL;
  312. i40iw_free_sqbuf(&iwdev->vsi, (void *)send_entry->sqbuf);
  313. kfree(send_entry);
  314. atomic_dec(&cm_node->ref_count);
  315. }
  316. }
  317. /**
  318. * i40iw_cleanup_retrans_entry - free send entry with lock
  319. * @cm_node: connection's node
  320. */
  321. static void i40iw_cleanup_retrans_entry(struct i40iw_cm_node *cm_node)
  322. {
  323. unsigned long flags;
  324. spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
  325. i40iw_free_retrans_entry(cm_node);
  326. spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
  327. }
  328. /**
  329. * i40iw_form_cm_frame - get a free packet and build frame
  330. * @cm_node: connection's node ionfo to use in frame
  331. * @options: pointer to options info
  332. * @hdr: pointer mpa header
  333. * @pdata: pointer to private data
  334. * @flags: indicates FIN or ACK
  335. */
  336. static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node,
  337. struct i40iw_kmem_info *options,
  338. struct i40iw_kmem_info *hdr,
  339. struct i40iw_kmem_info *pdata,
  340. u8 flags)
  341. {
  342. struct i40iw_puda_buf *sqbuf;
  343. struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
  344. u8 *buf;
  345. struct tcphdr *tcph;
  346. struct iphdr *iph;
  347. struct ipv6hdr *ip6h;
  348. struct ethhdr *ethh;
  349. u16 packetsize;
  350. u16 eth_hlen = ETH_HLEN;
  351. u32 opts_len = 0;
  352. u32 pd_len = 0;
  353. u32 hdr_len = 0;
  354. u16 vtag;
  355. sqbuf = i40iw_puda_get_bufpool(vsi->ilq);
  356. if (!sqbuf)
  357. return NULL;
  358. buf = sqbuf->mem.va;
  359. if (options)
  360. opts_len = (u32)options->size;
  361. if (hdr)
  362. hdr_len = hdr->size;
  363. if (pdata)
  364. pd_len = pdata->size;
  365. if (cm_node->vlan_id < VLAN_TAG_PRESENT)
  366. eth_hlen += 4;
  367. if (cm_node->ipv4)
  368. packetsize = sizeof(*iph) + sizeof(*tcph);
  369. else
  370. packetsize = sizeof(*ip6h) + sizeof(*tcph);
  371. packetsize += opts_len + hdr_len + pd_len;
  372. memset(buf, 0x00, eth_hlen + packetsize);
  373. sqbuf->totallen = packetsize + eth_hlen;
  374. sqbuf->maclen = eth_hlen;
  375. sqbuf->tcphlen = sizeof(*tcph) + opts_len;
  376. sqbuf->scratch = (void *)cm_node;
  377. ethh = (struct ethhdr *)buf;
  378. buf += eth_hlen;
  379. if (cm_node->ipv4) {
  380. sqbuf->ipv4 = true;
  381. iph = (struct iphdr *)buf;
  382. buf += sizeof(*iph);
  383. tcph = (struct tcphdr *)buf;
  384. buf += sizeof(*tcph);
  385. ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
  386. ether_addr_copy(ethh->h_source, cm_node->loc_mac);
  387. if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
  388. ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
  389. vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
  390. ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
  391. ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IP);
  392. } else {
  393. ethh->h_proto = htons(ETH_P_IP);
  394. }
  395. iph->version = IPVERSION;
  396. iph->ihl = 5; /* 5 * 4Byte words, IP headr len */
  397. iph->tos = cm_node->tos;
  398. iph->tot_len = htons(packetsize);
  399. iph->id = htons(++cm_node->tcp_cntxt.loc_id);
  400. iph->frag_off = htons(0x4000);
  401. iph->ttl = 0x40;
  402. iph->protocol = IPPROTO_TCP;
  403. iph->saddr = htonl(cm_node->loc_addr[0]);
  404. iph->daddr = htonl(cm_node->rem_addr[0]);
  405. } else {
  406. sqbuf->ipv4 = false;
  407. ip6h = (struct ipv6hdr *)buf;
  408. buf += sizeof(*ip6h);
  409. tcph = (struct tcphdr *)buf;
  410. buf += sizeof(*tcph);
  411. ether_addr_copy(ethh->h_dest, cm_node->rem_mac);
  412. ether_addr_copy(ethh->h_source, cm_node->loc_mac);
  413. if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
  414. ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q);
  415. vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id;
  416. ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag);
  417. ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IPV6);
  418. } else {
  419. ethh->h_proto = htons(ETH_P_IPV6);
  420. }
  421. ip6h->version = 6;
  422. ip6h->priority = cm_node->tos >> 4;
  423. ip6h->flow_lbl[0] = cm_node->tos << 4;
  424. ip6h->flow_lbl[1] = 0;
  425. ip6h->flow_lbl[2] = 0;
  426. ip6h->payload_len = htons(packetsize - sizeof(*ip6h));
  427. ip6h->nexthdr = 6;
  428. ip6h->hop_limit = 128;
  429. i40iw_copy_ip_htonl(ip6h->saddr.in6_u.u6_addr32,
  430. cm_node->loc_addr);
  431. i40iw_copy_ip_htonl(ip6h->daddr.in6_u.u6_addr32,
  432. cm_node->rem_addr);
  433. }
  434. tcph->source = htons(cm_node->loc_port);
  435. tcph->dest = htons(cm_node->rem_port);
  436. tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num);
  437. if (flags & SET_ACK) {
  438. cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
  439. tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num);
  440. tcph->ack = 1;
  441. } else {
  442. tcph->ack_seq = 0;
  443. }
  444. if (flags & SET_SYN) {
  445. cm_node->tcp_cntxt.loc_seq_num++;
  446. tcph->syn = 1;
  447. } else {
  448. cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len;
  449. }
  450. if (flags & SET_FIN) {
  451. cm_node->tcp_cntxt.loc_seq_num++;
  452. tcph->fin = 1;
  453. }
  454. if (flags & SET_RST)
  455. tcph->rst = 1;
  456. tcph->doff = (u16)((sizeof(*tcph) + opts_len + 3) >> 2);
  457. sqbuf->tcphlen = tcph->doff << 2;
  458. tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd);
  459. tcph->urg_ptr = 0;
  460. if (opts_len) {
  461. memcpy(buf, options->addr, opts_len);
  462. buf += opts_len;
  463. }
  464. if (hdr_len) {
  465. memcpy(buf, hdr->addr, hdr_len);
  466. buf += hdr_len;
  467. }
  468. if (pdata && pdata->addr)
  469. memcpy(buf, pdata->addr, pdata->size);
  470. atomic_set(&sqbuf->refcount, 1);
  471. return sqbuf;
  472. }
  473. /**
  474. * i40iw_send_reset - Send RST packet
  475. * @cm_node: connection's node
  476. */
  477. int i40iw_send_reset(struct i40iw_cm_node *cm_node)
  478. {
  479. struct i40iw_puda_buf *sqbuf;
  480. int flags = SET_RST | SET_ACK;
  481. sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, flags);
  482. if (!sqbuf) {
  483. i40iw_pr_err("no sqbuf\n");
  484. return -1;
  485. }
  486. return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 0, 1);
  487. }
  488. /**
  489. * i40iw_active_open_err - send event for active side cm error
  490. * @cm_node: connection's node
  491. * @reset: Flag to send reset or not
  492. */
  493. static void i40iw_active_open_err(struct i40iw_cm_node *cm_node, bool reset)
  494. {
  495. i40iw_cleanup_retrans_entry(cm_node);
  496. cm_node->cm_core->stats_connect_errs++;
  497. if (reset) {
  498. i40iw_debug(cm_node->dev,
  499. I40IW_DEBUG_CM,
  500. "%s cm_node=%p state=%d\n",
  501. __func__,
  502. cm_node,
  503. cm_node->state);
  504. atomic_inc(&cm_node->ref_count);
  505. i40iw_send_reset(cm_node);
  506. }
  507. cm_node->state = I40IW_CM_STATE_CLOSED;
  508. i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
  509. }
  510. /**
  511. * i40iw_passive_open_err - handle passive side cm error
  512. * @cm_node: connection's node
  513. * @reset: send reset or just free cm_node
  514. */
  515. static void i40iw_passive_open_err(struct i40iw_cm_node *cm_node, bool reset)
  516. {
  517. i40iw_cleanup_retrans_entry(cm_node);
  518. cm_node->cm_core->stats_passive_errs++;
  519. cm_node->state = I40IW_CM_STATE_CLOSED;
  520. i40iw_debug(cm_node->dev,
  521. I40IW_DEBUG_CM,
  522. "%s cm_node=%p state =%d\n",
  523. __func__,
  524. cm_node,
  525. cm_node->state);
  526. if (reset)
  527. i40iw_send_reset(cm_node);
  528. else
  529. i40iw_rem_ref_cm_node(cm_node);
  530. }
  531. /**
  532. * i40iw_event_connect_error - to create connect error event
  533. * @event: cm information for connect event
  534. */
  535. static void i40iw_event_connect_error(struct i40iw_cm_event *event)
  536. {
  537. struct i40iw_qp *iwqp;
  538. struct iw_cm_id *cm_id;
  539. cm_id = event->cm_node->cm_id;
  540. if (!cm_id)
  541. return;
  542. iwqp = cm_id->provider_data;
  543. if (!iwqp || !iwqp->iwdev)
  544. return;
  545. iwqp->cm_id = NULL;
  546. cm_id->provider_data = NULL;
  547. i40iw_send_cm_event(event->cm_node, cm_id,
  548. IW_CM_EVENT_CONNECT_REPLY,
  549. -ECONNRESET);
  550. cm_id->rem_ref(cm_id);
  551. i40iw_rem_ref_cm_node(event->cm_node);
  552. }
  553. /**
  554. * i40iw_process_options
  555. * @cm_node: connection's node
  556. * @optionsloc: point to start of options
  557. * @optionsize: size of all options
  558. * @syn_packet: flag if syn packet
  559. */
  560. static int i40iw_process_options(struct i40iw_cm_node *cm_node,
  561. u8 *optionsloc,
  562. u32 optionsize,
  563. u32 syn_packet)
  564. {
  565. u32 tmp;
  566. u32 offset = 0;
  567. union all_known_options *all_options;
  568. char got_mss_option = 0;
  569. while (offset < optionsize) {
  570. all_options = (union all_known_options *)(optionsloc + offset);
  571. switch (all_options->as_base.optionnum) {
  572. case OPTION_NUMBER_END:
  573. offset = optionsize;
  574. break;
  575. case OPTION_NUMBER_NONE:
  576. offset += 1;
  577. continue;
  578. case OPTION_NUMBER_MSS:
  579. i40iw_debug(cm_node->dev,
  580. I40IW_DEBUG_CM,
  581. "%s: MSS Length: %d Offset: %d Size: %d\n",
  582. __func__,
  583. all_options->as_mss.length,
  584. offset,
  585. optionsize);
  586. got_mss_option = 1;
  587. if (all_options->as_mss.length != 4)
  588. return -1;
  589. tmp = ntohs(all_options->as_mss.mss);
  590. if (tmp > 0 && tmp < cm_node->tcp_cntxt.mss)
  591. cm_node->tcp_cntxt.mss = tmp;
  592. break;
  593. case OPTION_NUMBER_WINDOW_SCALE:
  594. cm_node->tcp_cntxt.snd_wscale =
  595. all_options->as_windowscale.shiftcount;
  596. break;
  597. default:
  598. i40iw_debug(cm_node->dev,
  599. I40IW_DEBUG_CM,
  600. "TCP Option not understood: %x\n",
  601. all_options->as_base.optionnum);
  602. break;
  603. }
  604. offset += all_options->as_base.length;
  605. }
  606. if (!got_mss_option && syn_packet)
  607. cm_node->tcp_cntxt.mss = I40IW_CM_DEFAULT_MSS;
  608. return 0;
  609. }
  610. /**
  611. * i40iw_handle_tcp_options -
  612. * @cm_node: connection's node
  613. * @tcph: pointer tcp header
  614. * @optionsize: size of options rcvd
  615. * @passive: active or passive flag
  616. */
  617. static int i40iw_handle_tcp_options(struct i40iw_cm_node *cm_node,
  618. struct tcphdr *tcph,
  619. int optionsize,
  620. int passive)
  621. {
  622. u8 *optionsloc = (u8 *)&tcph[1];
  623. if (optionsize) {
  624. if (i40iw_process_options(cm_node,
  625. optionsloc,
  626. optionsize,
  627. (u32)tcph->syn)) {
  628. i40iw_debug(cm_node->dev,
  629. I40IW_DEBUG_CM,
  630. "%s: Node %p, Sending RESET\n",
  631. __func__,
  632. cm_node);
  633. if (passive)
  634. i40iw_passive_open_err(cm_node, true);
  635. else
  636. i40iw_active_open_err(cm_node, true);
  637. return -1;
  638. }
  639. }
  640. cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) <<
  641. cm_node->tcp_cntxt.snd_wscale;
  642. if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd)
  643. cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd;
  644. return 0;
  645. }
  646. /**
  647. * i40iw_build_mpa_v1 - build a MPA V1 frame
  648. * @cm_node: connection's node
  649. * @mpa_key: to do read0 or write0
  650. */
  651. static void i40iw_build_mpa_v1(struct i40iw_cm_node *cm_node,
  652. void *start_addr,
  653. u8 mpa_key)
  654. {
  655. struct ietf_mpa_v1 *mpa_frame = (struct ietf_mpa_v1 *)start_addr;
  656. switch (mpa_key) {
  657. case MPA_KEY_REQUEST:
  658. memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE);
  659. break;
  660. case MPA_KEY_REPLY:
  661. memcpy(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE);
  662. break;
  663. default:
  664. break;
  665. }
  666. mpa_frame->flags = IETF_MPA_FLAGS_CRC;
  667. mpa_frame->rev = cm_node->mpa_frame_rev;
  668. mpa_frame->priv_data_len = htons(cm_node->pdata.size);
  669. }
  670. /**
  671. * i40iw_build_mpa_v2 - build a MPA V2 frame
  672. * @cm_node: connection's node
  673. * @start_addr: buffer start address
  674. * @mpa_key: to do read0 or write0
  675. */
  676. static void i40iw_build_mpa_v2(struct i40iw_cm_node *cm_node,
  677. void *start_addr,
  678. u8 mpa_key)
  679. {
  680. struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr;
  681. struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg;
  682. u16 ctrl_ird, ctrl_ord;
  683. /* initialize the upper 5 bytes of the frame */
  684. i40iw_build_mpa_v1(cm_node, start_addr, mpa_key);
  685. mpa_frame->flags |= IETF_MPA_V2_FLAG;
  686. mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE);
  687. /* initialize RTR msg */
  688. if (cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) {
  689. ctrl_ird = IETF_NO_IRD_ORD;
  690. ctrl_ord = IETF_NO_IRD_ORD;
  691. } else {
  692. ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ?
  693. IETF_NO_IRD_ORD : cm_node->ird_size;
  694. ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ?
  695. IETF_NO_IRD_ORD : cm_node->ord_size;
  696. }
  697. ctrl_ird |= IETF_PEER_TO_PEER;
  698. switch (mpa_key) {
  699. case MPA_KEY_REQUEST:
  700. ctrl_ord |= IETF_RDMA0_WRITE;
  701. ctrl_ord |= IETF_RDMA0_READ;
  702. break;
  703. case MPA_KEY_REPLY:
  704. switch (cm_node->send_rdma0_op) {
  705. case SEND_RDMA_WRITE_ZERO:
  706. ctrl_ord |= IETF_RDMA0_WRITE;
  707. break;
  708. case SEND_RDMA_READ_ZERO:
  709. ctrl_ord |= IETF_RDMA0_READ;
  710. break;
  711. }
  712. break;
  713. default:
  714. break;
  715. }
  716. rtr_msg->ctrl_ird = htons(ctrl_ird);
  717. rtr_msg->ctrl_ord = htons(ctrl_ord);
  718. }
  719. /**
  720. * i40iw_cm_build_mpa_frame - build mpa frame for mpa version 1 or version 2
  721. * @cm_node: connection's node
  722. * @mpa: mpa: data buffer
  723. * @mpa_key: to do read0 or write0
  724. */
  725. static int i40iw_cm_build_mpa_frame(struct i40iw_cm_node *cm_node,
  726. struct i40iw_kmem_info *mpa,
  727. u8 mpa_key)
  728. {
  729. int hdr_len = 0;
  730. switch (cm_node->mpa_frame_rev) {
  731. case IETF_MPA_V1:
  732. hdr_len = sizeof(struct ietf_mpa_v1);
  733. i40iw_build_mpa_v1(cm_node, mpa->addr, mpa_key);
  734. break;
  735. case IETF_MPA_V2:
  736. hdr_len = sizeof(struct ietf_mpa_v2);
  737. i40iw_build_mpa_v2(cm_node, mpa->addr, mpa_key);
  738. break;
  739. default:
  740. break;
  741. }
  742. return hdr_len;
  743. }
  744. /**
  745. * i40iw_send_mpa_request - active node send mpa request to passive node
  746. * @cm_node: connection's node
  747. */
  748. static int i40iw_send_mpa_request(struct i40iw_cm_node *cm_node)
  749. {
  750. struct i40iw_puda_buf *sqbuf;
  751. if (!cm_node) {
  752. i40iw_pr_err("cm_node == NULL\n");
  753. return -1;
  754. }
  755. cm_node->mpa_hdr.addr = &cm_node->mpa_frame;
  756. cm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node,
  757. &cm_node->mpa_hdr,
  758. MPA_KEY_REQUEST);
  759. if (!cm_node->mpa_hdr.size) {
  760. i40iw_pr_err("mpa size = %d\n", cm_node->mpa_hdr.size);
  761. return -1;
  762. }
  763. sqbuf = i40iw_form_cm_frame(cm_node,
  764. NULL,
  765. &cm_node->mpa_hdr,
  766. &cm_node->pdata,
  767. SET_ACK);
  768. if (!sqbuf) {
  769. i40iw_pr_err("sq_buf == NULL\n");
  770. return -1;
  771. }
  772. return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
  773. }
  774. /**
  775. * i40iw_send_mpa_reject -
  776. * @cm_node: connection's node
  777. * @pdata: reject data for connection
  778. * @plen: length of reject data
  779. */
  780. static int i40iw_send_mpa_reject(struct i40iw_cm_node *cm_node,
  781. const void *pdata,
  782. u8 plen)
  783. {
  784. struct i40iw_puda_buf *sqbuf;
  785. struct i40iw_kmem_info priv_info;
  786. cm_node->mpa_hdr.addr = &cm_node->mpa_frame;
  787. cm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node,
  788. &cm_node->mpa_hdr,
  789. MPA_KEY_REPLY);
  790. cm_node->mpa_frame.flags |= IETF_MPA_FLAGS_REJECT;
  791. priv_info.addr = (void *)pdata;
  792. priv_info.size = plen;
  793. sqbuf = i40iw_form_cm_frame(cm_node,
  794. NULL,
  795. &cm_node->mpa_hdr,
  796. &priv_info,
  797. SET_ACK | SET_FIN);
  798. if (!sqbuf) {
  799. i40iw_pr_err("no sqbuf\n");
  800. return -ENOMEM;
  801. }
  802. cm_node->state = I40IW_CM_STATE_FIN_WAIT1;
  803. return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
  804. }
  805. /**
  806. * recv_mpa - process an IETF MPA frame
  807. * @cm_node: connection's node
  808. * @buffer: Data pointer
  809. * @type: to return accept or reject
  810. * @len: Len of mpa buffer
  811. */
  812. static int i40iw_parse_mpa(struct i40iw_cm_node *cm_node, u8 *buffer, u32 *type, u32 len)
  813. {
  814. struct ietf_mpa_v1 *mpa_frame;
  815. struct ietf_mpa_v2 *mpa_v2_frame;
  816. struct ietf_rtr_msg *rtr_msg;
  817. int mpa_hdr_len;
  818. int priv_data_len;
  819. *type = I40IW_MPA_REQUEST_ACCEPT;
  820. if (len < sizeof(struct ietf_mpa_v1)) {
  821. i40iw_pr_err("ietf buffer small (%x)\n", len);
  822. return -1;
  823. }
  824. mpa_frame = (struct ietf_mpa_v1 *)buffer;
  825. mpa_hdr_len = sizeof(struct ietf_mpa_v1);
  826. priv_data_len = ntohs(mpa_frame->priv_data_len);
  827. if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) {
  828. i40iw_pr_err("large pri_data %d\n", priv_data_len);
  829. return -1;
  830. }
  831. if (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) {
  832. i40iw_pr_err("unsupported mpa rev = %d\n", mpa_frame->rev);
  833. return -1;
  834. }
  835. if (mpa_frame->rev > cm_node->mpa_frame_rev) {
  836. i40iw_pr_err("rev %d\n", mpa_frame->rev);
  837. return -1;
  838. }
  839. cm_node->mpa_frame_rev = mpa_frame->rev;
  840. if (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) {
  841. if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE)) {
  842. i40iw_pr_err("Unexpected MPA Key received\n");
  843. return -1;
  844. }
  845. } else {
  846. if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE)) {
  847. i40iw_pr_err("Unexpected MPA Key received\n");
  848. return -1;
  849. }
  850. }
  851. if (priv_data_len + mpa_hdr_len > len) {
  852. i40iw_pr_err("ietf buffer len(%x + %x != %x)\n",
  853. priv_data_len, mpa_hdr_len, len);
  854. return -1;
  855. }
  856. if (len > MAX_CM_BUFFER) {
  857. i40iw_pr_err("ietf buffer large len = %d\n", len);
  858. return -1;
  859. }
  860. switch (mpa_frame->rev) {
  861. case IETF_MPA_V2:{
  862. u16 ird_size;
  863. u16 ord_size;
  864. u16 ctrl_ord;
  865. u16 ctrl_ird;
  866. mpa_v2_frame = (struct ietf_mpa_v2 *)buffer;
  867. mpa_hdr_len += IETF_RTR_MSG_SIZE;
  868. rtr_msg = &mpa_v2_frame->rtr_msg;
  869. /* parse rtr message */
  870. ctrl_ord = ntohs(rtr_msg->ctrl_ord);
  871. ctrl_ird = ntohs(rtr_msg->ctrl_ird);
  872. ird_size = ctrl_ird & IETF_NO_IRD_ORD;
  873. ord_size = ctrl_ord & IETF_NO_IRD_ORD;
  874. if (!(ctrl_ird & IETF_PEER_TO_PEER))
  875. return -1;
  876. if (ird_size == IETF_NO_IRD_ORD || ord_size == IETF_NO_IRD_ORD) {
  877. cm_node->mpav2_ird_ord = IETF_NO_IRD_ORD;
  878. goto negotiate_done;
  879. }
  880. if (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) {
  881. /* responder */
  882. if (!ord_size && (ctrl_ord & IETF_RDMA0_READ))
  883. cm_node->ird_size = 1;
  884. if (cm_node->ord_size > ird_size)
  885. cm_node->ord_size = ird_size;
  886. } else {
  887. /* initiator */
  888. if (!ird_size && (ctrl_ord & IETF_RDMA0_READ))
  889. return -1;
  890. if (cm_node->ord_size > ird_size)
  891. cm_node->ord_size = ird_size;
  892. if (cm_node->ird_size < ord_size)
  893. /* no resources available */
  894. return -1;
  895. }
  896. negotiate_done:
  897. if (ctrl_ord & IETF_RDMA0_READ)
  898. cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
  899. else if (ctrl_ord & IETF_RDMA0_WRITE)
  900. cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO;
  901. else /* Not supported RDMA0 operation */
  902. return -1;
  903. i40iw_debug(cm_node->dev, I40IW_DEBUG_CM,
  904. "MPAV2: Negotiated ORD: %d, IRD: %d\n",
  905. cm_node->ord_size, cm_node->ird_size);
  906. break;
  907. }
  908. break;
  909. case IETF_MPA_V1:
  910. default:
  911. break;
  912. }
  913. memcpy(cm_node->pdata_buf, buffer + mpa_hdr_len, priv_data_len);
  914. cm_node->pdata.size = priv_data_len;
  915. if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT)
  916. *type = I40IW_MPA_REQUEST_REJECT;
  917. if (mpa_frame->flags & IETF_MPA_FLAGS_MARKERS)
  918. cm_node->snd_mark_en = true;
  919. return 0;
  920. }
  921. /**
  922. * i40iw_schedule_cm_timer
  923. * @@cm_node: connection's node
  924. * @sqbuf: buffer to send
  925. * @type: if it is send or close
  926. * @send_retrans: if rexmits to be done
  927. * @close_when_complete: is cm_node to be removed
  928. *
  929. * note - cm_node needs to be protected before calling this. Encase in:
  930. * i40iw_rem_ref_cm_node(cm_core, cm_node);
  931. * i40iw_schedule_cm_timer(...)
  932. * atomic_inc(&cm_node->ref_count);
  933. */
  934. int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node,
  935. struct i40iw_puda_buf *sqbuf,
  936. enum i40iw_timer_type type,
  937. int send_retrans,
  938. int close_when_complete)
  939. {
  940. struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
  941. struct i40iw_cm_core *cm_core = cm_node->cm_core;
  942. struct i40iw_timer_entry *new_send;
  943. int ret = 0;
  944. u32 was_timer_set;
  945. unsigned long flags;
  946. new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC);
  947. if (!new_send) {
  948. if (type != I40IW_TIMER_TYPE_CLOSE)
  949. i40iw_free_sqbuf(vsi, (void *)sqbuf);
  950. return -ENOMEM;
  951. }
  952. new_send->retrycount = I40IW_DEFAULT_RETRYS;
  953. new_send->retranscount = I40IW_DEFAULT_RETRANS;
  954. new_send->sqbuf = sqbuf;
  955. new_send->timetosend = jiffies;
  956. new_send->type = type;
  957. new_send->send_retrans = send_retrans;
  958. new_send->close_when_complete = close_when_complete;
  959. if (type == I40IW_TIMER_TYPE_CLOSE) {
  960. new_send->timetosend += (HZ / 10);
  961. if (cm_node->close_entry) {
  962. kfree(new_send);
  963. i40iw_pr_err("already close entry\n");
  964. return -EINVAL;
  965. }
  966. cm_node->close_entry = new_send;
  967. }
  968. if (type == I40IW_TIMER_TYPE_SEND) {
  969. spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
  970. cm_node->send_entry = new_send;
  971. atomic_inc(&cm_node->ref_count);
  972. spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
  973. new_send->timetosend = jiffies + I40IW_RETRY_TIMEOUT;
  974. atomic_inc(&sqbuf->refcount);
  975. i40iw_puda_send_buf(vsi->ilq, sqbuf);
  976. if (!send_retrans) {
  977. i40iw_cleanup_retrans_entry(cm_node);
  978. if (close_when_complete)
  979. i40iw_rem_ref_cm_node(cm_node);
  980. return ret;
  981. }
  982. }
  983. spin_lock_irqsave(&cm_core->ht_lock, flags);
  984. was_timer_set = timer_pending(&cm_core->tcp_timer);
  985. if (!was_timer_set) {
  986. cm_core->tcp_timer.expires = new_send->timetosend;
  987. add_timer(&cm_core->tcp_timer);
  988. }
  989. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  990. return ret;
  991. }
  992. /**
  993. * i40iw_retrans_expired - Could not rexmit the packet
  994. * @cm_node: connection's node
  995. */
  996. static void i40iw_retrans_expired(struct i40iw_cm_node *cm_node)
  997. {
  998. struct iw_cm_id *cm_id = cm_node->cm_id;
  999. enum i40iw_cm_node_state state = cm_node->state;
  1000. cm_node->state = I40IW_CM_STATE_CLOSED;
  1001. switch (state) {
  1002. case I40IW_CM_STATE_SYN_RCVD:
  1003. case I40IW_CM_STATE_CLOSING:
  1004. i40iw_rem_ref_cm_node(cm_node);
  1005. break;
  1006. case I40IW_CM_STATE_FIN_WAIT1:
  1007. case I40IW_CM_STATE_LAST_ACK:
  1008. if (cm_node->cm_id)
  1009. cm_id->rem_ref(cm_id);
  1010. i40iw_send_reset(cm_node);
  1011. break;
  1012. default:
  1013. atomic_inc(&cm_node->ref_count);
  1014. i40iw_send_reset(cm_node);
  1015. i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
  1016. break;
  1017. }
  1018. }
  1019. /**
  1020. * i40iw_handle_close_entry - for handling retry/timeouts
  1021. * @cm_node: connection's node
  1022. * @rem_node: flag for remove cm_node
  1023. */
  1024. static void i40iw_handle_close_entry(struct i40iw_cm_node *cm_node, u32 rem_node)
  1025. {
  1026. struct i40iw_timer_entry *close_entry = cm_node->close_entry;
  1027. struct iw_cm_id *cm_id = cm_node->cm_id;
  1028. struct i40iw_qp *iwqp;
  1029. unsigned long flags;
  1030. if (!close_entry)
  1031. return;
  1032. iwqp = (struct i40iw_qp *)close_entry->sqbuf;
  1033. if (iwqp) {
  1034. spin_lock_irqsave(&iwqp->lock, flags);
  1035. if (iwqp->cm_id) {
  1036. iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
  1037. iwqp->hw_iwarp_state = I40IW_QP_STATE_ERROR;
  1038. iwqp->last_aeq = I40IW_AE_RESET_SENT;
  1039. iwqp->ibqp_state = IB_QPS_ERR;
  1040. spin_unlock_irqrestore(&iwqp->lock, flags);
  1041. i40iw_cm_disconn(iwqp);
  1042. } else {
  1043. spin_unlock_irqrestore(&iwqp->lock, flags);
  1044. }
  1045. } else if (rem_node) {
  1046. /* TIME_WAIT state */
  1047. i40iw_rem_ref_cm_node(cm_node);
  1048. }
  1049. if (cm_id)
  1050. cm_id->rem_ref(cm_id);
  1051. kfree(close_entry);
  1052. cm_node->close_entry = NULL;
  1053. }
  1054. /**
  1055. * i40iw_build_timer_list - Add cm_nodes to timer list
  1056. * @timer_list: ptr to timer list
  1057. * @hte: ptr to accelerated or non-accelerated list
  1058. */
  1059. static void i40iw_build_timer_list(struct list_head *timer_list,
  1060. struct list_head *hte)
  1061. {
  1062. struct i40iw_cm_node *cm_node;
  1063. struct list_head *list_core_temp, *list_node;
  1064. list_for_each_safe(list_node, list_core_temp, hte) {
  1065. cm_node = container_of(list_node, struct i40iw_cm_node, list);
  1066. if (cm_node->close_entry || cm_node->send_entry) {
  1067. atomic_inc(&cm_node->ref_count);
  1068. list_add(&cm_node->timer_entry, timer_list);
  1069. }
  1070. }
  1071. }
  1072. /**
  1073. * i40iw_cm_timer_tick - system's timer expired callback
  1074. * @pass: Pointing to cm_core
  1075. */
  1076. static void i40iw_cm_timer_tick(struct timer_list *t)
  1077. {
  1078. unsigned long nexttimeout = jiffies + I40IW_LONG_TIME;
  1079. struct i40iw_cm_node *cm_node;
  1080. struct i40iw_timer_entry *send_entry, *close_entry;
  1081. struct list_head *list_core_temp;
  1082. struct i40iw_sc_vsi *vsi;
  1083. struct list_head *list_node;
  1084. struct i40iw_cm_core *cm_core = from_timer(cm_core, t, tcp_timer);
  1085. u32 settimer = 0;
  1086. unsigned long timetosend;
  1087. unsigned long flags;
  1088. struct list_head timer_list;
  1089. INIT_LIST_HEAD(&timer_list);
  1090. spin_lock_irqsave(&cm_core->ht_lock, flags);
  1091. i40iw_build_timer_list(&timer_list, &cm_core->non_accelerated_list);
  1092. i40iw_build_timer_list(&timer_list, &cm_core->accelerated_list);
  1093. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  1094. list_for_each_safe(list_node, list_core_temp, &timer_list) {
  1095. cm_node = container_of(list_node,
  1096. struct i40iw_cm_node,
  1097. timer_entry);
  1098. close_entry = cm_node->close_entry;
  1099. if (close_entry) {
  1100. if (time_after(close_entry->timetosend, jiffies)) {
  1101. if (nexttimeout > close_entry->timetosend ||
  1102. !settimer) {
  1103. nexttimeout = close_entry->timetosend;
  1104. settimer = 1;
  1105. }
  1106. } else {
  1107. i40iw_handle_close_entry(cm_node, 1);
  1108. }
  1109. }
  1110. spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
  1111. send_entry = cm_node->send_entry;
  1112. if (!send_entry)
  1113. goto done;
  1114. if (time_after(send_entry->timetosend, jiffies)) {
  1115. if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
  1116. if ((nexttimeout > send_entry->timetosend) ||
  1117. !settimer) {
  1118. nexttimeout = send_entry->timetosend;
  1119. settimer = 1;
  1120. }
  1121. } else {
  1122. i40iw_free_retrans_entry(cm_node);
  1123. }
  1124. goto done;
  1125. }
  1126. if ((cm_node->state == I40IW_CM_STATE_OFFLOADED) ||
  1127. (cm_node->state == I40IW_CM_STATE_CLOSED)) {
  1128. i40iw_free_retrans_entry(cm_node);
  1129. goto done;
  1130. }
  1131. if (!send_entry->retranscount || !send_entry->retrycount) {
  1132. i40iw_free_retrans_entry(cm_node);
  1133. spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
  1134. i40iw_retrans_expired(cm_node);
  1135. cm_node->state = I40IW_CM_STATE_CLOSED;
  1136. spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
  1137. goto done;
  1138. }
  1139. spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
  1140. vsi = &cm_node->iwdev->vsi;
  1141. if (!cm_node->ack_rcvd) {
  1142. atomic_inc(&send_entry->sqbuf->refcount);
  1143. i40iw_puda_send_buf(vsi->ilq, send_entry->sqbuf);
  1144. cm_node->cm_core->stats_pkt_retrans++;
  1145. }
  1146. spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
  1147. if (send_entry->send_retrans) {
  1148. send_entry->retranscount--;
  1149. timetosend = (I40IW_RETRY_TIMEOUT <<
  1150. (I40IW_DEFAULT_RETRANS -
  1151. send_entry->retranscount));
  1152. send_entry->timetosend = jiffies +
  1153. min(timetosend, I40IW_MAX_TIMEOUT);
  1154. if (nexttimeout > send_entry->timetosend || !settimer) {
  1155. nexttimeout = send_entry->timetosend;
  1156. settimer = 1;
  1157. }
  1158. } else {
  1159. int close_when_complete;
  1160. close_when_complete = send_entry->close_when_complete;
  1161. i40iw_debug(cm_node->dev,
  1162. I40IW_DEBUG_CM,
  1163. "cm_node=%p state=%d\n",
  1164. cm_node,
  1165. cm_node->state);
  1166. i40iw_free_retrans_entry(cm_node);
  1167. if (close_when_complete)
  1168. i40iw_rem_ref_cm_node(cm_node);
  1169. }
  1170. done:
  1171. spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
  1172. i40iw_rem_ref_cm_node(cm_node);
  1173. }
  1174. if (settimer) {
  1175. spin_lock_irqsave(&cm_core->ht_lock, flags);
  1176. if (!timer_pending(&cm_core->tcp_timer)) {
  1177. cm_core->tcp_timer.expires = nexttimeout;
  1178. add_timer(&cm_core->tcp_timer);
  1179. }
  1180. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  1181. }
  1182. }
  1183. /**
  1184. * i40iw_send_syn - send SYN packet
  1185. * @cm_node: connection's node
  1186. * @sendack: flag to set ACK bit or not
  1187. */
  1188. int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack)
  1189. {
  1190. struct i40iw_puda_buf *sqbuf;
  1191. int flags = SET_SYN;
  1192. char optionsbuffer[sizeof(struct option_mss) +
  1193. sizeof(struct option_windowscale) +
  1194. sizeof(struct option_base) + TCP_OPTIONS_PADDING];
  1195. struct i40iw_kmem_info opts;
  1196. int optionssize = 0;
  1197. /* Sending MSS option */
  1198. union all_known_options *options;
  1199. opts.addr = optionsbuffer;
  1200. if (!cm_node) {
  1201. i40iw_pr_err("no cm_node\n");
  1202. return -EINVAL;
  1203. }
  1204. options = (union all_known_options *)&optionsbuffer[optionssize];
  1205. options->as_mss.optionnum = OPTION_NUMBER_MSS;
  1206. options->as_mss.length = sizeof(struct option_mss);
  1207. options->as_mss.mss = htons(cm_node->tcp_cntxt.mss);
  1208. optionssize += sizeof(struct option_mss);
  1209. options = (union all_known_options *)&optionsbuffer[optionssize];
  1210. options->as_windowscale.optionnum = OPTION_NUMBER_WINDOW_SCALE;
  1211. options->as_windowscale.length = sizeof(struct option_windowscale);
  1212. options->as_windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale;
  1213. optionssize += sizeof(struct option_windowscale);
  1214. options = (union all_known_options *)&optionsbuffer[optionssize];
  1215. options->as_end = OPTION_NUMBER_END;
  1216. optionssize += 1;
  1217. if (sendack)
  1218. flags |= SET_ACK;
  1219. opts.size = optionssize;
  1220. sqbuf = i40iw_form_cm_frame(cm_node, &opts, NULL, NULL, flags);
  1221. if (!sqbuf) {
  1222. i40iw_pr_err("no sqbuf\n");
  1223. return -1;
  1224. }
  1225. return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
  1226. }
  1227. /**
  1228. * i40iw_send_ack - Send ACK packet
  1229. * @cm_node: connection's node
  1230. */
  1231. static void i40iw_send_ack(struct i40iw_cm_node *cm_node)
  1232. {
  1233. struct i40iw_puda_buf *sqbuf;
  1234. struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi;
  1235. sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK);
  1236. if (sqbuf)
  1237. i40iw_puda_send_buf(vsi->ilq, sqbuf);
  1238. else
  1239. i40iw_pr_err("no sqbuf\n");
  1240. }
  1241. /**
  1242. * i40iw_send_fin - Send FIN pkt
  1243. * @cm_node: connection's node
  1244. */
  1245. static int i40iw_send_fin(struct i40iw_cm_node *cm_node)
  1246. {
  1247. struct i40iw_puda_buf *sqbuf;
  1248. sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK | SET_FIN);
  1249. if (!sqbuf) {
  1250. i40iw_pr_err("no sqbuf\n");
  1251. return -1;
  1252. }
  1253. return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0);
  1254. }
  1255. /**
  1256. * i40iw_find_node - find a cm node that matches the reference cm node
  1257. * @cm_core: cm's core
  1258. * @rem_port: remote tcp port num
  1259. * @rem_addr: remote ip addr
  1260. * @loc_port: local tcp port num
  1261. * @loc_addr: loc ip addr
  1262. * @add_refcnt: flag to increment refcount of cm_node
  1263. * @accelerated_list: flag for accelerated vs non-accelerated list to search
  1264. */
  1265. struct i40iw_cm_node *i40iw_find_node(struct i40iw_cm_core *cm_core,
  1266. u16 rem_port,
  1267. u32 *rem_addr,
  1268. u16 loc_port,
  1269. u32 *loc_addr,
  1270. bool add_refcnt,
  1271. bool accelerated_list)
  1272. {
  1273. struct list_head *hte;
  1274. struct i40iw_cm_node *cm_node;
  1275. unsigned long flags;
  1276. hte = accelerated_list ?
  1277. &cm_core->accelerated_list : &cm_core->non_accelerated_list;
  1278. /* walk list and find cm_node associated with this session ID */
  1279. spin_lock_irqsave(&cm_core->ht_lock, flags);
  1280. list_for_each_entry(cm_node, hte, list) {
  1281. if (!memcmp(cm_node->loc_addr, loc_addr, sizeof(cm_node->loc_addr)) &&
  1282. (cm_node->loc_port == loc_port) &&
  1283. !memcmp(cm_node->rem_addr, rem_addr, sizeof(cm_node->rem_addr)) &&
  1284. (cm_node->rem_port == rem_port)) {
  1285. if (add_refcnt)
  1286. atomic_inc(&cm_node->ref_count);
  1287. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  1288. return cm_node;
  1289. }
  1290. }
  1291. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  1292. /* no owner node */
  1293. return NULL;
  1294. }
  1295. /**
  1296. * i40iw_find_listener - find a cm node listening on this addr-port pair
  1297. * @cm_core: cm's core
  1298. * @dst_port: listener tcp port num
  1299. * @dst_addr: listener ip addr
  1300. * @listener_state: state to match with listen node's
  1301. */
  1302. static struct i40iw_cm_listener *i40iw_find_listener(
  1303. struct i40iw_cm_core *cm_core,
  1304. u32 *dst_addr,
  1305. u16 dst_port,
  1306. u16 vlan_id,
  1307. enum i40iw_cm_listener_state
  1308. listener_state)
  1309. {
  1310. struct i40iw_cm_listener *listen_node;
  1311. static const u32 ip_zero[4] = { 0, 0, 0, 0 };
  1312. u32 listen_addr[4];
  1313. u16 listen_port;
  1314. unsigned long flags;
  1315. /* walk list and find cm_node associated with this session ID */
  1316. spin_lock_irqsave(&cm_core->listen_list_lock, flags);
  1317. list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
  1318. memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr));
  1319. listen_port = listen_node->loc_port;
  1320. /* compare node pair, return node handle if a match */
  1321. if ((!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) ||
  1322. !memcmp(listen_addr, ip_zero, sizeof(listen_addr))) &&
  1323. (listen_port == dst_port) &&
  1324. (listener_state & listen_node->listener_state)) {
  1325. atomic_inc(&listen_node->ref_count);
  1326. spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
  1327. return listen_node;
  1328. }
  1329. }
  1330. spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
  1331. return NULL;
  1332. }
  1333. /**
  1334. * i40iw_add_hte_node - add a cm node to the hash table
  1335. * @cm_core: cm's core
  1336. * @cm_node: connection's node
  1337. */
  1338. static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core,
  1339. struct i40iw_cm_node *cm_node)
  1340. {
  1341. unsigned long flags;
  1342. if (!cm_node || !cm_core) {
  1343. i40iw_pr_err("cm_node or cm_core == NULL\n");
  1344. return;
  1345. }
  1346. spin_lock_irqsave(&cm_core->ht_lock, flags);
  1347. list_add_tail(&cm_node->list, &cm_core->non_accelerated_list);
  1348. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  1349. }
  1350. /**
  1351. * i40iw_find_port - find port that matches reference port
  1352. * @hte: ptr to accelerated or non-accelerated list
  1353. * @accelerated_list: flag for accelerated vs non-accelerated list
  1354. */
  1355. static bool i40iw_find_port(struct list_head *hte, u16 port)
  1356. {
  1357. struct i40iw_cm_node *cm_node;
  1358. list_for_each_entry(cm_node, hte, list) {
  1359. if (cm_node->loc_port == port)
  1360. return true;
  1361. }
  1362. return false;
  1363. }
  1364. /**
  1365. * i40iw_port_in_use - determine if port is in use
  1366. * @cm_core: cm's core
  1367. * @port: port number
  1368. */
  1369. bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port)
  1370. {
  1371. struct i40iw_cm_listener *listen_node;
  1372. unsigned long flags;
  1373. spin_lock_irqsave(&cm_core->ht_lock, flags);
  1374. if (i40iw_find_port(&cm_core->accelerated_list, port) ||
  1375. i40iw_find_port(&cm_core->non_accelerated_list, port)) {
  1376. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  1377. return true;
  1378. }
  1379. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  1380. spin_lock_irqsave(&cm_core->listen_list_lock, flags);
  1381. list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
  1382. if (listen_node->loc_port == port) {
  1383. spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
  1384. return true;
  1385. }
  1386. }
  1387. spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
  1388. return false;
  1389. }
  1390. /**
  1391. * i40iw_del_multiple_qhash - Remove qhash and child listens
  1392. * @iwdev: iWarp device
  1393. * @cm_info: CM info for parent listen node
  1394. * @cm_parent_listen_node: The parent listen node
  1395. */
  1396. static enum i40iw_status_code i40iw_del_multiple_qhash(
  1397. struct i40iw_device *iwdev,
  1398. struct i40iw_cm_info *cm_info,
  1399. struct i40iw_cm_listener *cm_parent_listen_node)
  1400. {
  1401. struct i40iw_cm_listener *child_listen_node;
  1402. enum i40iw_status_code ret = I40IW_ERR_CONFIG;
  1403. struct list_head *pos, *tpos;
  1404. unsigned long flags;
  1405. spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
  1406. list_for_each_safe(pos, tpos, &cm_parent_listen_node->child_listen_list) {
  1407. child_listen_node = list_entry(pos, struct i40iw_cm_listener, child_listen_list);
  1408. if (child_listen_node->ipv4)
  1409. i40iw_debug(&iwdev->sc_dev,
  1410. I40IW_DEBUG_CM,
  1411. "removing child listen for IP=%pI4, port=%d, vlan=%d\n",
  1412. child_listen_node->loc_addr,
  1413. child_listen_node->loc_port,
  1414. child_listen_node->vlan_id);
  1415. else
  1416. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
  1417. "removing child listen for IP=%pI6, port=%d, vlan=%d\n",
  1418. child_listen_node->loc_addr,
  1419. child_listen_node->loc_port,
  1420. child_listen_node->vlan_id);
  1421. list_del(pos);
  1422. memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
  1423. sizeof(cm_info->loc_addr));
  1424. cm_info->vlan_id = child_listen_node->vlan_id;
  1425. if (child_listen_node->qhash_set) {
  1426. ret = i40iw_manage_qhash(iwdev, cm_info,
  1427. I40IW_QHASH_TYPE_TCP_SYN,
  1428. I40IW_QHASH_MANAGE_TYPE_DELETE,
  1429. NULL, false);
  1430. child_listen_node->qhash_set = false;
  1431. } else {
  1432. ret = I40IW_SUCCESS;
  1433. }
  1434. i40iw_debug(&iwdev->sc_dev,
  1435. I40IW_DEBUG_CM,
  1436. "freed pointer = %p\n",
  1437. child_listen_node);
  1438. kfree(child_listen_node);
  1439. cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++;
  1440. }
  1441. spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
  1442. return ret;
  1443. }
  1444. /**
  1445. * i40iw_netdev_vlan_ipv6 - Gets the netdev and vlan
  1446. * @addr: local IPv6 address
  1447. * @vlan_id: vlan id for the given IPv6 address
  1448. *
  1449. * Returns the net_device of the IPv6 address and also sets the
  1450. * vlan id for that address.
  1451. */
  1452. static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id)
  1453. {
  1454. struct net_device *ip_dev = NULL;
  1455. struct in6_addr laddr6;
  1456. if (!IS_ENABLED(CONFIG_IPV6))
  1457. return NULL;
  1458. i40iw_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr);
  1459. if (vlan_id)
  1460. *vlan_id = I40IW_NO_VLAN;
  1461. rcu_read_lock();
  1462. for_each_netdev_rcu(&init_net, ip_dev) {
  1463. if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) {
  1464. if (vlan_id)
  1465. *vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
  1466. break;
  1467. }
  1468. }
  1469. rcu_read_unlock();
  1470. return ip_dev;
  1471. }
  1472. /**
  1473. * i40iw_get_vlan_ipv4 - Returns the vlan_id for IPv4 address
  1474. * @addr: local IPv4 address
  1475. */
  1476. static u16 i40iw_get_vlan_ipv4(u32 *addr)
  1477. {
  1478. struct net_device *netdev;
  1479. u16 vlan_id = I40IW_NO_VLAN;
  1480. netdev = ip_dev_find(&init_net, htonl(addr[0]));
  1481. if (netdev) {
  1482. vlan_id = rdma_vlan_dev_vlan_id(netdev);
  1483. dev_put(netdev);
  1484. }
  1485. return vlan_id;
  1486. }
  1487. /**
  1488. * i40iw_add_mqh_6 - Adds multiple qhashes for IPv6
  1489. * @iwdev: iWarp device
  1490. * @cm_info: CM info for parent listen node
  1491. * @cm_parent_listen_node: The parent listen node
  1492. *
  1493. * Adds a qhash and a child listen node for every IPv6 address
  1494. * on the adapter and adds the associated qhash filter
  1495. */
  1496. static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
  1497. struct i40iw_cm_info *cm_info,
  1498. struct i40iw_cm_listener *cm_parent_listen_node)
  1499. {
  1500. struct net_device *ip_dev;
  1501. struct inet6_dev *idev;
  1502. struct inet6_ifaddr *ifp, *tmp;
  1503. enum i40iw_status_code ret = 0;
  1504. struct i40iw_cm_listener *child_listen_node;
  1505. unsigned long flags;
  1506. rtnl_lock();
  1507. for_each_netdev(&init_net, ip_dev) {
  1508. if ((((rdma_vlan_dev_vlan_id(ip_dev) < I40IW_NO_VLAN) &&
  1509. (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
  1510. (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
  1511. idev = __in6_dev_get(ip_dev);
  1512. if (!idev) {
  1513. i40iw_pr_err("idev == NULL\n");
  1514. break;
  1515. }
  1516. list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
  1517. i40iw_debug(&iwdev->sc_dev,
  1518. I40IW_DEBUG_CM,
  1519. "IP=%pI6, vlan_id=%d, MAC=%pM\n",
  1520. &ifp->addr,
  1521. rdma_vlan_dev_vlan_id(ip_dev),
  1522. ip_dev->dev_addr);
  1523. child_listen_node =
  1524. kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
  1525. i40iw_debug(&iwdev->sc_dev,
  1526. I40IW_DEBUG_CM,
  1527. "Allocating child listener %p\n",
  1528. child_listen_node);
  1529. if (!child_listen_node) {
  1530. ret = I40IW_ERR_NO_MEMORY;
  1531. goto exit;
  1532. }
  1533. cm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
  1534. cm_parent_listen_node->vlan_id = cm_info->vlan_id;
  1535. memcpy(child_listen_node, cm_parent_listen_node,
  1536. sizeof(*child_listen_node));
  1537. i40iw_copy_ip_ntohl(child_listen_node->loc_addr,
  1538. ifp->addr.in6_u.u6_addr32);
  1539. memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
  1540. sizeof(cm_info->loc_addr));
  1541. ret = i40iw_manage_qhash(iwdev, cm_info,
  1542. I40IW_QHASH_TYPE_TCP_SYN,
  1543. I40IW_QHASH_MANAGE_TYPE_ADD,
  1544. NULL, true);
  1545. if (!ret) {
  1546. child_listen_node->qhash_set = true;
  1547. spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
  1548. list_add(&child_listen_node->child_listen_list,
  1549. &cm_parent_listen_node->child_listen_list);
  1550. spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
  1551. cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
  1552. } else {
  1553. kfree(child_listen_node);
  1554. }
  1555. }
  1556. }
  1557. }
  1558. exit:
  1559. rtnl_unlock();
  1560. return ret;
  1561. }
  1562. /**
  1563. * i40iw_add_mqh_4 - Adds multiple qhashes for IPv4
  1564. * @iwdev: iWarp device
  1565. * @cm_info: CM info for parent listen node
  1566. * @cm_parent_listen_node: The parent listen node
  1567. *
  1568. * Adds a qhash and a child listen node for every IPv4 address
  1569. * on the adapter and adds the associated qhash filter
  1570. */
  1571. static enum i40iw_status_code i40iw_add_mqh_4(
  1572. struct i40iw_device *iwdev,
  1573. struct i40iw_cm_info *cm_info,
  1574. struct i40iw_cm_listener *cm_parent_listen_node)
  1575. {
  1576. struct net_device *dev;
  1577. struct in_device *idev;
  1578. struct i40iw_cm_listener *child_listen_node;
  1579. enum i40iw_status_code ret = 0;
  1580. unsigned long flags;
  1581. rtnl_lock();
  1582. for_each_netdev(&init_net, dev) {
  1583. if ((((rdma_vlan_dev_vlan_id(dev) < I40IW_NO_VLAN) &&
  1584. (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
  1585. (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) {
  1586. idev = in_dev_get(dev);
  1587. for_ifa(idev) {
  1588. i40iw_debug(&iwdev->sc_dev,
  1589. I40IW_DEBUG_CM,
  1590. "Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
  1591. &ifa->ifa_address,
  1592. rdma_vlan_dev_vlan_id(dev),
  1593. dev->dev_addr);
  1594. child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_KERNEL);
  1595. cm_parent_listen_node->cm_core->stats_listen_nodes_created++;
  1596. i40iw_debug(&iwdev->sc_dev,
  1597. I40IW_DEBUG_CM,
  1598. "Allocating child listener %p\n",
  1599. child_listen_node);
  1600. if (!child_listen_node) {
  1601. in_dev_put(idev);
  1602. ret = I40IW_ERR_NO_MEMORY;
  1603. goto exit;
  1604. }
  1605. cm_info->vlan_id = rdma_vlan_dev_vlan_id(dev);
  1606. cm_parent_listen_node->vlan_id = cm_info->vlan_id;
  1607. memcpy(child_listen_node,
  1608. cm_parent_listen_node,
  1609. sizeof(*child_listen_node));
  1610. child_listen_node->loc_addr[0] = ntohl(ifa->ifa_address);
  1611. memcpy(cm_info->loc_addr, child_listen_node->loc_addr,
  1612. sizeof(cm_info->loc_addr));
  1613. ret = i40iw_manage_qhash(iwdev,
  1614. cm_info,
  1615. I40IW_QHASH_TYPE_TCP_SYN,
  1616. I40IW_QHASH_MANAGE_TYPE_ADD,
  1617. NULL,
  1618. true);
  1619. if (!ret) {
  1620. child_listen_node->qhash_set = true;
  1621. spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
  1622. list_add(&child_listen_node->child_listen_list,
  1623. &cm_parent_listen_node->child_listen_list);
  1624. spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags);
  1625. } else {
  1626. kfree(child_listen_node);
  1627. cm_parent_listen_node->cm_core->stats_listen_nodes_created--;
  1628. }
  1629. }
  1630. endfor_ifa(idev);
  1631. in_dev_put(idev);
  1632. }
  1633. }
  1634. exit:
  1635. rtnl_unlock();
  1636. return ret;
  1637. }
  1638. /**
  1639. * i40iw_dec_refcnt_listen - delete listener and associated cm nodes
  1640. * @cm_core: cm's core
  1641. * @free_hanging_nodes: to free associated cm_nodes
  1642. * @apbvt_del: flag to delete the apbvt
  1643. */
  1644. static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core,
  1645. struct i40iw_cm_listener *listener,
  1646. int free_hanging_nodes, bool apbvt_del)
  1647. {
  1648. int ret = -EINVAL;
  1649. int err = 0;
  1650. struct list_head *list_pos;
  1651. struct list_head *list_temp;
  1652. struct i40iw_cm_node *cm_node;
  1653. struct list_head reset_list;
  1654. struct i40iw_cm_info nfo;
  1655. struct i40iw_cm_node *loopback;
  1656. enum i40iw_cm_node_state old_state;
  1657. unsigned long flags;
  1658. /* free non-accelerated child nodes for this listener */
  1659. INIT_LIST_HEAD(&reset_list);
  1660. if (free_hanging_nodes) {
  1661. spin_lock_irqsave(&cm_core->ht_lock, flags);
  1662. list_for_each_safe(list_pos,
  1663. list_temp, &cm_core->non_accelerated_list) {
  1664. cm_node = container_of(list_pos, struct i40iw_cm_node, list);
  1665. if ((cm_node->listener == listener) &&
  1666. !cm_node->accelerated) {
  1667. atomic_inc(&cm_node->ref_count);
  1668. list_add(&cm_node->reset_entry, &reset_list);
  1669. }
  1670. }
  1671. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  1672. }
  1673. list_for_each_safe(list_pos, list_temp, &reset_list) {
  1674. cm_node = container_of(list_pos, struct i40iw_cm_node, reset_entry);
  1675. loopback = cm_node->loopbackpartner;
  1676. if (cm_node->state >= I40IW_CM_STATE_FIN_WAIT1) {
  1677. i40iw_rem_ref_cm_node(cm_node);
  1678. } else {
  1679. if (!loopback) {
  1680. i40iw_cleanup_retrans_entry(cm_node);
  1681. err = i40iw_send_reset(cm_node);
  1682. if (err) {
  1683. cm_node->state = I40IW_CM_STATE_CLOSED;
  1684. i40iw_pr_err("send reset\n");
  1685. } else {
  1686. old_state = cm_node->state;
  1687. cm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED;
  1688. if (old_state != I40IW_CM_STATE_MPAREQ_RCVD)
  1689. i40iw_rem_ref_cm_node(cm_node);
  1690. }
  1691. } else {
  1692. struct i40iw_cm_event event;
  1693. event.cm_node = loopback;
  1694. memcpy(event.cm_info.rem_addr,
  1695. loopback->rem_addr, sizeof(event.cm_info.rem_addr));
  1696. memcpy(event.cm_info.loc_addr,
  1697. loopback->loc_addr, sizeof(event.cm_info.loc_addr));
  1698. event.cm_info.rem_port = loopback->rem_port;
  1699. event.cm_info.loc_port = loopback->loc_port;
  1700. event.cm_info.cm_id = loopback->cm_id;
  1701. event.cm_info.ipv4 = loopback->ipv4;
  1702. atomic_inc(&loopback->ref_count);
  1703. loopback->state = I40IW_CM_STATE_CLOSED;
  1704. i40iw_event_connect_error(&event);
  1705. cm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED;
  1706. i40iw_rem_ref_cm_node(cm_node);
  1707. }
  1708. }
  1709. }
  1710. if (!atomic_dec_return(&listener->ref_count)) {
  1711. spin_lock_irqsave(&cm_core->listen_list_lock, flags);
  1712. list_del(&listener->list);
  1713. spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
  1714. if (listener->iwdev) {
  1715. if (apbvt_del)
  1716. i40iw_manage_apbvt(listener->iwdev,
  1717. listener->loc_port,
  1718. I40IW_MANAGE_APBVT_DEL);
  1719. memcpy(nfo.loc_addr, listener->loc_addr, sizeof(nfo.loc_addr));
  1720. nfo.loc_port = listener->loc_port;
  1721. nfo.ipv4 = listener->ipv4;
  1722. nfo.vlan_id = listener->vlan_id;
  1723. nfo.user_pri = listener->user_pri;
  1724. if (!list_empty(&listener->child_listen_list)) {
  1725. i40iw_del_multiple_qhash(listener->iwdev, &nfo, listener);
  1726. } else {
  1727. if (listener->qhash_set)
  1728. i40iw_manage_qhash(listener->iwdev,
  1729. &nfo,
  1730. I40IW_QHASH_TYPE_TCP_SYN,
  1731. I40IW_QHASH_MANAGE_TYPE_DELETE,
  1732. NULL,
  1733. false);
  1734. }
  1735. }
  1736. cm_core->stats_listen_destroyed++;
  1737. kfree(listener);
  1738. cm_core->stats_listen_nodes_destroyed++;
  1739. listener = NULL;
  1740. ret = 0;
  1741. }
  1742. if (listener) {
  1743. if (atomic_read(&listener->pend_accepts_cnt) > 0)
  1744. i40iw_debug(cm_core->dev,
  1745. I40IW_DEBUG_CM,
  1746. "%s: listener (%p) pending accepts=%u\n",
  1747. __func__,
  1748. listener,
  1749. atomic_read(&listener->pend_accepts_cnt));
  1750. }
  1751. return ret;
  1752. }
  1753. /**
  1754. * i40iw_cm_del_listen - delete a linstener
  1755. * @cm_core: cm's core
  1756. * @listener: passive connection's listener
  1757. * @apbvt_del: flag to delete apbvt
  1758. */
  1759. static int i40iw_cm_del_listen(struct i40iw_cm_core *cm_core,
  1760. struct i40iw_cm_listener *listener,
  1761. bool apbvt_del)
  1762. {
  1763. listener->listener_state = I40IW_CM_LISTENER_PASSIVE_STATE;
  1764. listener->cm_id = NULL; /* going to be destroyed pretty soon */
  1765. return i40iw_dec_refcnt_listen(cm_core, listener, 1, apbvt_del);
  1766. }
  1767. /**
  1768. * i40iw_addr_resolve_neigh - resolve neighbor address
  1769. * @iwdev: iwarp device structure
  1770. * @src_ip: local ip address
  1771. * @dst_ip: remote ip address
  1772. * @arpindex: if there is an arp entry
  1773. */
  1774. static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev,
  1775. u32 src_ip,
  1776. u32 dst_ip,
  1777. int arpindex)
  1778. {
  1779. struct rtable *rt;
  1780. struct neighbour *neigh;
  1781. int rc = arpindex;
  1782. __be32 dst_ipaddr = htonl(dst_ip);
  1783. __be32 src_ipaddr = htonl(src_ip);
  1784. rt = ip_route_output(&init_net, dst_ipaddr, src_ipaddr, 0, 0);
  1785. if (IS_ERR(rt)) {
  1786. i40iw_pr_err("ip_route_output\n");
  1787. return rc;
  1788. }
  1789. neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr);
  1790. rcu_read_lock();
  1791. if (neigh) {
  1792. if (neigh->nud_state & NUD_VALID) {
  1793. if (arpindex >= 0) {
  1794. if (ether_addr_equal(iwdev->arp_table[arpindex].mac_addr,
  1795. neigh->ha))
  1796. /* Mac address same as arp table */
  1797. goto resolve_neigh_exit;
  1798. i40iw_manage_arp_cache(iwdev,
  1799. iwdev->arp_table[arpindex].mac_addr,
  1800. &dst_ip,
  1801. true,
  1802. I40IW_ARP_DELETE);
  1803. }
  1804. i40iw_manage_arp_cache(iwdev, neigh->ha, &dst_ip, true, I40IW_ARP_ADD);
  1805. rc = i40iw_arp_table(iwdev, &dst_ip, true, NULL, I40IW_ARP_RESOLVE);
  1806. } else {
  1807. neigh_event_send(neigh, NULL);
  1808. }
  1809. }
  1810. resolve_neigh_exit:
  1811. rcu_read_unlock();
  1812. if (neigh)
  1813. neigh_release(neigh);
  1814. ip_rt_put(rt);
  1815. return rc;
  1816. }
  1817. /**
  1818. * i40iw_get_dst_ipv6
  1819. */
  1820. static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr,
  1821. struct sockaddr_in6 *dst_addr)
  1822. {
  1823. struct dst_entry *dst;
  1824. struct flowi6 fl6;
  1825. memset(&fl6, 0, sizeof(fl6));
  1826. fl6.daddr = dst_addr->sin6_addr;
  1827. fl6.saddr = src_addr->sin6_addr;
  1828. if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
  1829. fl6.flowi6_oif = dst_addr->sin6_scope_id;
  1830. dst = ip6_route_output(&init_net, NULL, &fl6);
  1831. return dst;
  1832. }
  1833. /**
  1834. * i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address
  1835. * @iwdev: iwarp device structure
  1836. * @dst_ip: remote ip address
  1837. * @arpindex: if there is an arp entry
  1838. */
  1839. static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
  1840. u32 *src,
  1841. u32 *dest,
  1842. int arpindex)
  1843. {
  1844. struct neighbour *neigh;
  1845. int rc = arpindex;
  1846. struct dst_entry *dst;
  1847. struct sockaddr_in6 dst_addr;
  1848. struct sockaddr_in6 src_addr;
  1849. memset(&dst_addr, 0, sizeof(dst_addr));
  1850. dst_addr.sin6_family = AF_INET6;
  1851. i40iw_copy_ip_htonl(dst_addr.sin6_addr.in6_u.u6_addr32, dest);
  1852. memset(&src_addr, 0, sizeof(src_addr));
  1853. src_addr.sin6_family = AF_INET6;
  1854. i40iw_copy_ip_htonl(src_addr.sin6_addr.in6_u.u6_addr32, src);
  1855. dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr);
  1856. if (!dst || dst->error) {
  1857. if (dst) {
  1858. i40iw_pr_err("ip6_route_output returned dst->error = %d\n",
  1859. dst->error);
  1860. dst_release(dst);
  1861. }
  1862. return rc;
  1863. }
  1864. neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32);
  1865. rcu_read_lock();
  1866. if (neigh) {
  1867. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "dst_neigh_lookup MAC=%pM\n", neigh->ha);
  1868. if (neigh->nud_state & NUD_VALID) {
  1869. if (arpindex >= 0) {
  1870. if (ether_addr_equal
  1871. (iwdev->arp_table[arpindex].mac_addr,
  1872. neigh->ha)) {
  1873. /* Mac address same as in arp table */
  1874. goto resolve_neigh_exit6;
  1875. }
  1876. i40iw_manage_arp_cache(iwdev,
  1877. iwdev->arp_table[arpindex].mac_addr,
  1878. dest,
  1879. false,
  1880. I40IW_ARP_DELETE);
  1881. }
  1882. i40iw_manage_arp_cache(iwdev,
  1883. neigh->ha,
  1884. dest,
  1885. false,
  1886. I40IW_ARP_ADD);
  1887. rc = i40iw_arp_table(iwdev,
  1888. dest,
  1889. false,
  1890. NULL,
  1891. I40IW_ARP_RESOLVE);
  1892. } else {
  1893. neigh_event_send(neigh, NULL);
  1894. }
  1895. }
  1896. resolve_neigh_exit6:
  1897. rcu_read_unlock();
  1898. if (neigh)
  1899. neigh_release(neigh);
  1900. dst_release(dst);
  1901. return rc;
  1902. }
  1903. /**
  1904. * i40iw_ipv4_is_loopback - check if loopback
  1905. * @loc_addr: local addr to compare
  1906. * @rem_addr: remote address
  1907. */
  1908. static bool i40iw_ipv4_is_loopback(u32 loc_addr, u32 rem_addr)
  1909. {
  1910. return ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr);
  1911. }
  1912. /**
  1913. * i40iw_ipv6_is_loopback - check if loopback
  1914. * @loc_addr: local addr to compare
  1915. * @rem_addr: remote address
  1916. */
  1917. static bool i40iw_ipv6_is_loopback(u32 *loc_addr, u32 *rem_addr)
  1918. {
  1919. struct in6_addr raddr6;
  1920. i40iw_copy_ip_htonl(raddr6.in6_u.u6_addr32, rem_addr);
  1921. return !memcmp(loc_addr, rem_addr, 16) || ipv6_addr_loopback(&raddr6);
  1922. }
  1923. /**
  1924. * i40iw_make_cm_node - create a new instance of a cm node
  1925. * @cm_core: cm's core
  1926. * @iwdev: iwarp device structure
  1927. * @cm_info: quad info for connection
  1928. * @listener: passive connection's listener
  1929. */
  1930. static struct i40iw_cm_node *i40iw_make_cm_node(
  1931. struct i40iw_cm_core *cm_core,
  1932. struct i40iw_device *iwdev,
  1933. struct i40iw_cm_info *cm_info,
  1934. struct i40iw_cm_listener *listener)
  1935. {
  1936. struct i40iw_cm_node *cm_node;
  1937. int oldarpindex;
  1938. int arpindex;
  1939. struct net_device *netdev = iwdev->netdev;
  1940. /* create an hte and cm_node for this instance */
  1941. cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
  1942. if (!cm_node)
  1943. return NULL;
  1944. /* set our node specific transport info */
  1945. cm_node->ipv4 = cm_info->ipv4;
  1946. cm_node->vlan_id = cm_info->vlan_id;
  1947. if ((cm_node->vlan_id == I40IW_NO_VLAN) && iwdev->dcb)
  1948. cm_node->vlan_id = 0;
  1949. cm_node->tos = cm_info->tos;
  1950. cm_node->user_pri = cm_info->user_pri;
  1951. if (listener) {
  1952. if (listener->tos != cm_info->tos)
  1953. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB,
  1954. "application TOS[%d] and remote client TOS[%d] mismatch\n",
  1955. listener->tos, cm_info->tos);
  1956. cm_node->tos = max(listener->tos, cm_info->tos);
  1957. cm_node->user_pri = rt_tos2priority(cm_node->tos);
  1958. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "listener: TOS:[%d] UP:[%d]\n",
  1959. cm_node->tos, cm_node->user_pri);
  1960. }
  1961. memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr));
  1962. memcpy(cm_node->rem_addr, cm_info->rem_addr, sizeof(cm_node->rem_addr));
  1963. cm_node->loc_port = cm_info->loc_port;
  1964. cm_node->rem_port = cm_info->rem_port;
  1965. cm_node->mpa_frame_rev = iwdev->mpa_version;
  1966. cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO;
  1967. cm_node->ird_size = I40IW_MAX_IRD_SIZE;
  1968. cm_node->ord_size = I40IW_MAX_ORD_SIZE;
  1969. cm_node->listener = listener;
  1970. cm_node->cm_id = cm_info->cm_id;
  1971. ether_addr_copy(cm_node->loc_mac, netdev->dev_addr);
  1972. spin_lock_init(&cm_node->retrans_list_lock);
  1973. cm_node->ack_rcvd = false;
  1974. atomic_set(&cm_node->ref_count, 1);
  1975. /* associate our parent CM core */
  1976. cm_node->cm_core = cm_core;
  1977. cm_node->tcp_cntxt.loc_id = I40IW_CM_DEF_LOCAL_ID;
  1978. cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
  1979. cm_node->tcp_cntxt.rcv_wnd =
  1980. I40IW_CM_DEFAULT_RCV_WND_SCALED >> I40IW_CM_DEFAULT_RCV_WND_SCALE;
  1981. if (cm_node->ipv4) {
  1982. cm_node->tcp_cntxt.loc_seq_num = secure_tcp_seq(htonl(cm_node->loc_addr[0]),
  1983. htonl(cm_node->rem_addr[0]),
  1984. htons(cm_node->loc_port),
  1985. htons(cm_node->rem_port));
  1986. cm_node->tcp_cntxt.mss = iwdev->vsi.mtu - I40IW_MTU_TO_MSS_IPV4;
  1987. } else if (IS_ENABLED(CONFIG_IPV6)) {
  1988. __be32 loc[4] = {
  1989. htonl(cm_node->loc_addr[0]), htonl(cm_node->loc_addr[1]),
  1990. htonl(cm_node->loc_addr[2]), htonl(cm_node->loc_addr[3])
  1991. };
  1992. __be32 rem[4] = {
  1993. htonl(cm_node->rem_addr[0]), htonl(cm_node->rem_addr[1]),
  1994. htonl(cm_node->rem_addr[2]), htonl(cm_node->rem_addr[3])
  1995. };
  1996. cm_node->tcp_cntxt.loc_seq_num = secure_tcpv6_seq(loc, rem,
  1997. htons(cm_node->loc_port),
  1998. htons(cm_node->rem_port));
  1999. cm_node->tcp_cntxt.mss = iwdev->vsi.mtu - I40IW_MTU_TO_MSS_IPV6;
  2000. }
  2001. cm_node->iwdev = iwdev;
  2002. cm_node->dev = &iwdev->sc_dev;
  2003. if ((cm_node->ipv4 &&
  2004. i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
  2005. (!cm_node->ipv4 && i40iw_ipv6_is_loopback(cm_node->loc_addr,
  2006. cm_node->rem_addr))) {
  2007. arpindex = i40iw_arp_table(iwdev,
  2008. cm_node->rem_addr,
  2009. false,
  2010. NULL,
  2011. I40IW_ARP_RESOLVE);
  2012. } else {
  2013. oldarpindex = i40iw_arp_table(iwdev,
  2014. cm_node->rem_addr,
  2015. false,
  2016. NULL,
  2017. I40IW_ARP_RESOLVE);
  2018. if (cm_node->ipv4)
  2019. arpindex = i40iw_addr_resolve_neigh(iwdev,
  2020. cm_info->loc_addr[0],
  2021. cm_info->rem_addr[0],
  2022. oldarpindex);
  2023. else if (IS_ENABLED(CONFIG_IPV6))
  2024. arpindex = i40iw_addr_resolve_neigh_ipv6(iwdev,
  2025. cm_info->loc_addr,
  2026. cm_info->rem_addr,
  2027. oldarpindex);
  2028. else
  2029. arpindex = -EINVAL;
  2030. }
  2031. if (arpindex < 0) {
  2032. i40iw_pr_err("cm_node arpindex\n");
  2033. kfree(cm_node);
  2034. return NULL;
  2035. }
  2036. ether_addr_copy(cm_node->rem_mac, iwdev->arp_table[arpindex].mac_addr);
  2037. i40iw_add_hte_node(cm_core, cm_node);
  2038. cm_core->stats_nodes_created++;
  2039. return cm_node;
  2040. }
  2041. /**
  2042. * i40iw_rem_ref_cm_node - destroy an instance of a cm node
  2043. * @cm_node: connection's node
  2044. */
  2045. static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
  2046. {
  2047. struct i40iw_cm_core *cm_core = cm_node->cm_core;
  2048. struct i40iw_qp *iwqp;
  2049. struct i40iw_cm_info nfo;
  2050. unsigned long flags;
  2051. spin_lock_irqsave(&cm_node->cm_core->ht_lock, flags);
  2052. if (atomic_dec_return(&cm_node->ref_count)) {
  2053. spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
  2054. return;
  2055. }
  2056. list_del(&cm_node->list);
  2057. spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags);
  2058. /* if the node is destroyed before connection was accelerated */
  2059. if (!cm_node->accelerated && cm_node->accept_pend) {
  2060. pr_err("node destroyed before established\n");
  2061. atomic_dec(&cm_node->listener->pend_accepts_cnt);
  2062. }
  2063. if (cm_node->close_entry)
  2064. i40iw_handle_close_entry(cm_node, 0);
  2065. if (cm_node->listener) {
  2066. i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
  2067. } else {
  2068. if (cm_node->apbvt_set) {
  2069. i40iw_manage_apbvt(cm_node->iwdev,
  2070. cm_node->loc_port,
  2071. I40IW_MANAGE_APBVT_DEL);
  2072. cm_node->apbvt_set = 0;
  2073. }
  2074. i40iw_get_addr_info(cm_node, &nfo);
  2075. if (cm_node->qhash_set) {
  2076. i40iw_manage_qhash(cm_node->iwdev,
  2077. &nfo,
  2078. I40IW_QHASH_TYPE_TCP_ESTABLISHED,
  2079. I40IW_QHASH_MANAGE_TYPE_DELETE,
  2080. NULL,
  2081. false);
  2082. cm_node->qhash_set = 0;
  2083. }
  2084. }
  2085. iwqp = cm_node->iwqp;
  2086. if (iwqp) {
  2087. iwqp->cm_node = NULL;
  2088. i40iw_rem_ref(&iwqp->ibqp);
  2089. cm_node->iwqp = NULL;
  2090. } else if (cm_node->qhash_set) {
  2091. i40iw_get_addr_info(cm_node, &nfo);
  2092. i40iw_manage_qhash(cm_node->iwdev,
  2093. &nfo,
  2094. I40IW_QHASH_TYPE_TCP_ESTABLISHED,
  2095. I40IW_QHASH_MANAGE_TYPE_DELETE,
  2096. NULL,
  2097. false);
  2098. cm_node->qhash_set = 0;
  2099. }
  2100. cm_node->cm_core->stats_nodes_destroyed++;
  2101. kfree(cm_node);
  2102. }
  2103. /**
  2104. * i40iw_handle_fin_pkt - FIN packet received
  2105. * @cm_node: connection's node
  2106. */
  2107. static void i40iw_handle_fin_pkt(struct i40iw_cm_node *cm_node)
  2108. {
  2109. u32 ret;
  2110. switch (cm_node->state) {
  2111. case I40IW_CM_STATE_SYN_RCVD:
  2112. case I40IW_CM_STATE_SYN_SENT:
  2113. case I40IW_CM_STATE_ESTABLISHED:
  2114. case I40IW_CM_STATE_MPAREJ_RCVD:
  2115. cm_node->tcp_cntxt.rcv_nxt++;
  2116. i40iw_cleanup_retrans_entry(cm_node);
  2117. cm_node->state = I40IW_CM_STATE_LAST_ACK;
  2118. i40iw_send_fin(cm_node);
  2119. break;
  2120. case I40IW_CM_STATE_MPAREQ_SENT:
  2121. i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED);
  2122. cm_node->tcp_cntxt.rcv_nxt++;
  2123. i40iw_cleanup_retrans_entry(cm_node);
  2124. cm_node->state = I40IW_CM_STATE_CLOSED;
  2125. atomic_inc(&cm_node->ref_count);
  2126. i40iw_send_reset(cm_node);
  2127. break;
  2128. case I40IW_CM_STATE_FIN_WAIT1:
  2129. cm_node->tcp_cntxt.rcv_nxt++;
  2130. i40iw_cleanup_retrans_entry(cm_node);
  2131. cm_node->state = I40IW_CM_STATE_CLOSING;
  2132. i40iw_send_ack(cm_node);
  2133. /*
  2134. * Wait for ACK as this is simultaneous close.
  2135. * After we receive ACK, do not send anything.
  2136. * Just rm the node.
  2137. */
  2138. break;
  2139. case I40IW_CM_STATE_FIN_WAIT2:
  2140. cm_node->tcp_cntxt.rcv_nxt++;
  2141. i40iw_cleanup_retrans_entry(cm_node);
  2142. cm_node->state = I40IW_CM_STATE_TIME_WAIT;
  2143. i40iw_send_ack(cm_node);
  2144. ret =
  2145. i40iw_schedule_cm_timer(cm_node, NULL, I40IW_TIMER_TYPE_CLOSE, 1, 0);
  2146. if (ret)
  2147. i40iw_pr_err("node %p state = %d\n", cm_node, cm_node->state);
  2148. break;
  2149. case I40IW_CM_STATE_TIME_WAIT:
  2150. cm_node->tcp_cntxt.rcv_nxt++;
  2151. i40iw_cleanup_retrans_entry(cm_node);
  2152. cm_node->state = I40IW_CM_STATE_CLOSED;
  2153. i40iw_rem_ref_cm_node(cm_node);
  2154. break;
  2155. case I40IW_CM_STATE_OFFLOADED:
  2156. default:
  2157. i40iw_pr_err("bad state node %p state = %d\n", cm_node, cm_node->state);
  2158. break;
  2159. }
  2160. }
  2161. /**
  2162. * i40iw_handle_rst_pkt - process received RST packet
  2163. * @cm_node: connection's node
  2164. * @rbuf: receive buffer
  2165. */
  2166. static void i40iw_handle_rst_pkt(struct i40iw_cm_node *cm_node,
  2167. struct i40iw_puda_buf *rbuf)
  2168. {
  2169. i40iw_cleanup_retrans_entry(cm_node);
  2170. switch (cm_node->state) {
  2171. case I40IW_CM_STATE_SYN_SENT:
  2172. case I40IW_CM_STATE_MPAREQ_SENT:
  2173. switch (cm_node->mpa_frame_rev) {
  2174. case IETF_MPA_V2:
  2175. cm_node->mpa_frame_rev = IETF_MPA_V1;
  2176. /* send a syn and goto syn sent state */
  2177. cm_node->state = I40IW_CM_STATE_SYN_SENT;
  2178. if (i40iw_send_syn(cm_node, 0))
  2179. i40iw_active_open_err(cm_node, false);
  2180. break;
  2181. case IETF_MPA_V1:
  2182. default:
  2183. i40iw_active_open_err(cm_node, false);
  2184. break;
  2185. }
  2186. break;
  2187. case I40IW_CM_STATE_MPAREQ_RCVD:
  2188. atomic_add_return(1, &cm_node->passive_state);
  2189. break;
  2190. case I40IW_CM_STATE_ESTABLISHED:
  2191. case I40IW_CM_STATE_SYN_RCVD:
  2192. case I40IW_CM_STATE_LISTENING:
  2193. i40iw_pr_err("Bad state state = %d\n", cm_node->state);
  2194. i40iw_passive_open_err(cm_node, false);
  2195. break;
  2196. case I40IW_CM_STATE_OFFLOADED:
  2197. i40iw_active_open_err(cm_node, false);
  2198. break;
  2199. case I40IW_CM_STATE_CLOSED:
  2200. break;
  2201. case I40IW_CM_STATE_FIN_WAIT2:
  2202. case I40IW_CM_STATE_FIN_WAIT1:
  2203. case I40IW_CM_STATE_LAST_ACK:
  2204. cm_node->cm_id->rem_ref(cm_node->cm_id);
  2205. /* fall through */
  2206. case I40IW_CM_STATE_TIME_WAIT:
  2207. cm_node->state = I40IW_CM_STATE_CLOSED;
  2208. i40iw_rem_ref_cm_node(cm_node);
  2209. break;
  2210. default:
  2211. break;
  2212. }
  2213. }
  2214. /**
  2215. * i40iw_handle_rcv_mpa - Process a recv'd mpa buffer
  2216. * @cm_node: connection's node
  2217. * @rbuf: receive buffer
  2218. */
  2219. static void i40iw_handle_rcv_mpa(struct i40iw_cm_node *cm_node,
  2220. struct i40iw_puda_buf *rbuf)
  2221. {
  2222. int ret;
  2223. int datasize = rbuf->datalen;
  2224. u8 *dataloc = rbuf->data;
  2225. enum i40iw_cm_event_type type = I40IW_CM_EVENT_UNKNOWN;
  2226. u32 res_type;
  2227. ret = i40iw_parse_mpa(cm_node, dataloc, &res_type, datasize);
  2228. if (ret) {
  2229. if (cm_node->state == I40IW_CM_STATE_MPAREQ_SENT)
  2230. i40iw_active_open_err(cm_node, true);
  2231. else
  2232. i40iw_passive_open_err(cm_node, true);
  2233. return;
  2234. }
  2235. switch (cm_node->state) {
  2236. case I40IW_CM_STATE_ESTABLISHED:
  2237. if (res_type == I40IW_MPA_REQUEST_REJECT)
  2238. i40iw_pr_err("state for reject\n");
  2239. cm_node->state = I40IW_CM_STATE_MPAREQ_RCVD;
  2240. type = I40IW_CM_EVENT_MPA_REQ;
  2241. i40iw_send_ack(cm_node); /* ACK received MPA request */
  2242. atomic_set(&cm_node->passive_state,
  2243. I40IW_PASSIVE_STATE_INDICATED);
  2244. break;
  2245. case I40IW_CM_STATE_MPAREQ_SENT:
  2246. i40iw_cleanup_retrans_entry(cm_node);
  2247. if (res_type == I40IW_MPA_REQUEST_REJECT) {
  2248. type = I40IW_CM_EVENT_MPA_REJECT;
  2249. cm_node->state = I40IW_CM_STATE_MPAREJ_RCVD;
  2250. } else {
  2251. type = I40IW_CM_EVENT_CONNECTED;
  2252. cm_node->state = I40IW_CM_STATE_OFFLOADED;
  2253. }
  2254. i40iw_send_ack(cm_node);
  2255. break;
  2256. default:
  2257. pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state);
  2258. break;
  2259. }
  2260. i40iw_create_event(cm_node, type);
  2261. }
  2262. /**
  2263. * i40iw_indicate_pkt_err - Send up err event to cm
  2264. * @cm_node: connection's node
  2265. */
  2266. static void i40iw_indicate_pkt_err(struct i40iw_cm_node *cm_node)
  2267. {
  2268. switch (cm_node->state) {
  2269. case I40IW_CM_STATE_SYN_SENT:
  2270. case I40IW_CM_STATE_MPAREQ_SENT:
  2271. i40iw_active_open_err(cm_node, true);
  2272. break;
  2273. case I40IW_CM_STATE_ESTABLISHED:
  2274. case I40IW_CM_STATE_SYN_RCVD:
  2275. i40iw_passive_open_err(cm_node, true);
  2276. break;
  2277. case I40IW_CM_STATE_OFFLOADED:
  2278. default:
  2279. break;
  2280. }
  2281. }
  2282. /**
  2283. * i40iw_check_syn - Check for error on received syn ack
  2284. * @cm_node: connection's node
  2285. * @tcph: pointer tcp header
  2286. */
  2287. static int i40iw_check_syn(struct i40iw_cm_node *cm_node, struct tcphdr *tcph)
  2288. {
  2289. int err = 0;
  2290. if (ntohl(tcph->ack_seq) != cm_node->tcp_cntxt.loc_seq_num) {
  2291. err = 1;
  2292. i40iw_active_open_err(cm_node, true);
  2293. }
  2294. return err;
  2295. }
  2296. /**
  2297. * i40iw_check_seq - check seq numbers if OK
  2298. * @cm_node: connection's node
  2299. * @tcph: pointer tcp header
  2300. */
  2301. static int i40iw_check_seq(struct i40iw_cm_node *cm_node, struct tcphdr *tcph)
  2302. {
  2303. int err = 0;
  2304. u32 seq;
  2305. u32 ack_seq;
  2306. u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num;
  2307. u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt;
  2308. u32 rcv_wnd;
  2309. seq = ntohl(tcph->seq);
  2310. ack_seq = ntohl(tcph->ack_seq);
  2311. rcv_wnd = cm_node->tcp_cntxt.rcv_wnd;
  2312. if (ack_seq != loc_seq_num)
  2313. err = -1;
  2314. else if (!between(seq, rcv_nxt, (rcv_nxt + rcv_wnd)))
  2315. err = -1;
  2316. if (err) {
  2317. i40iw_pr_err("seq number\n");
  2318. i40iw_indicate_pkt_err(cm_node);
  2319. }
  2320. return err;
  2321. }
  2322. /**
  2323. * i40iw_handle_syn_pkt - is for Passive node
  2324. * @cm_node: connection's node
  2325. * @rbuf: receive buffer
  2326. */
  2327. static void i40iw_handle_syn_pkt(struct i40iw_cm_node *cm_node,
  2328. struct i40iw_puda_buf *rbuf)
  2329. {
  2330. struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
  2331. int ret;
  2332. u32 inc_sequence;
  2333. int optionsize;
  2334. struct i40iw_cm_info nfo;
  2335. optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
  2336. inc_sequence = ntohl(tcph->seq);
  2337. switch (cm_node->state) {
  2338. case I40IW_CM_STATE_SYN_SENT:
  2339. case I40IW_CM_STATE_MPAREQ_SENT:
  2340. /* Rcvd syn on active open connection */
  2341. i40iw_active_open_err(cm_node, 1);
  2342. break;
  2343. case I40IW_CM_STATE_LISTENING:
  2344. /* Passive OPEN */
  2345. if (atomic_read(&cm_node->listener->pend_accepts_cnt) >
  2346. cm_node->listener->backlog) {
  2347. cm_node->cm_core->stats_backlog_drops++;
  2348. i40iw_passive_open_err(cm_node, false);
  2349. break;
  2350. }
  2351. ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1);
  2352. if (ret) {
  2353. i40iw_passive_open_err(cm_node, false);
  2354. /* drop pkt */
  2355. break;
  2356. }
  2357. cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
  2358. cm_node->accept_pend = 1;
  2359. atomic_inc(&cm_node->listener->pend_accepts_cnt);
  2360. cm_node->state = I40IW_CM_STATE_SYN_RCVD;
  2361. i40iw_get_addr_info(cm_node, &nfo);
  2362. ret = i40iw_manage_qhash(cm_node->iwdev,
  2363. &nfo,
  2364. I40IW_QHASH_TYPE_TCP_ESTABLISHED,
  2365. I40IW_QHASH_MANAGE_TYPE_ADD,
  2366. (void *)cm_node,
  2367. false);
  2368. cm_node->qhash_set = true;
  2369. break;
  2370. case I40IW_CM_STATE_CLOSED:
  2371. i40iw_cleanup_retrans_entry(cm_node);
  2372. atomic_inc(&cm_node->ref_count);
  2373. i40iw_send_reset(cm_node);
  2374. break;
  2375. case I40IW_CM_STATE_OFFLOADED:
  2376. case I40IW_CM_STATE_ESTABLISHED:
  2377. case I40IW_CM_STATE_FIN_WAIT1:
  2378. case I40IW_CM_STATE_FIN_WAIT2:
  2379. case I40IW_CM_STATE_MPAREQ_RCVD:
  2380. case I40IW_CM_STATE_LAST_ACK:
  2381. case I40IW_CM_STATE_CLOSING:
  2382. case I40IW_CM_STATE_UNKNOWN:
  2383. default:
  2384. break;
  2385. }
  2386. }
  2387. /**
  2388. * i40iw_handle_synack_pkt - Process SYN+ACK packet (active side)
  2389. * @cm_node: connection's node
  2390. * @rbuf: receive buffer
  2391. */
  2392. static void i40iw_handle_synack_pkt(struct i40iw_cm_node *cm_node,
  2393. struct i40iw_puda_buf *rbuf)
  2394. {
  2395. struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
  2396. int ret;
  2397. u32 inc_sequence;
  2398. int optionsize;
  2399. optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
  2400. inc_sequence = ntohl(tcph->seq);
  2401. switch (cm_node->state) {
  2402. case I40IW_CM_STATE_SYN_SENT:
  2403. i40iw_cleanup_retrans_entry(cm_node);
  2404. /* active open */
  2405. if (i40iw_check_syn(cm_node, tcph)) {
  2406. i40iw_pr_err("check syn fail\n");
  2407. return;
  2408. }
  2409. cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
  2410. /* setup options */
  2411. ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 0);
  2412. if (ret) {
  2413. i40iw_debug(cm_node->dev,
  2414. I40IW_DEBUG_CM,
  2415. "cm_node=%p tcp_options failed\n",
  2416. cm_node);
  2417. break;
  2418. }
  2419. i40iw_cleanup_retrans_entry(cm_node);
  2420. cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1;
  2421. i40iw_send_ack(cm_node); /* ACK for the syn_ack */
  2422. ret = i40iw_send_mpa_request(cm_node);
  2423. if (ret) {
  2424. i40iw_debug(cm_node->dev,
  2425. I40IW_DEBUG_CM,
  2426. "cm_node=%p i40iw_send_mpa_request failed\n",
  2427. cm_node);
  2428. break;
  2429. }
  2430. cm_node->state = I40IW_CM_STATE_MPAREQ_SENT;
  2431. break;
  2432. case I40IW_CM_STATE_MPAREQ_RCVD:
  2433. i40iw_passive_open_err(cm_node, true);
  2434. break;
  2435. case I40IW_CM_STATE_LISTENING:
  2436. cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
  2437. i40iw_cleanup_retrans_entry(cm_node);
  2438. cm_node->state = I40IW_CM_STATE_CLOSED;
  2439. i40iw_send_reset(cm_node);
  2440. break;
  2441. case I40IW_CM_STATE_CLOSED:
  2442. cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq);
  2443. i40iw_cleanup_retrans_entry(cm_node);
  2444. atomic_inc(&cm_node->ref_count);
  2445. i40iw_send_reset(cm_node);
  2446. break;
  2447. case I40IW_CM_STATE_ESTABLISHED:
  2448. case I40IW_CM_STATE_FIN_WAIT1:
  2449. case I40IW_CM_STATE_FIN_WAIT2:
  2450. case I40IW_CM_STATE_LAST_ACK:
  2451. case I40IW_CM_STATE_OFFLOADED:
  2452. case I40IW_CM_STATE_CLOSING:
  2453. case I40IW_CM_STATE_UNKNOWN:
  2454. case I40IW_CM_STATE_MPAREQ_SENT:
  2455. default:
  2456. break;
  2457. }
  2458. }
  2459. /**
  2460. * i40iw_handle_ack_pkt - process packet with ACK
  2461. * @cm_node: connection's node
  2462. * @rbuf: receive buffer
  2463. */
  2464. static int i40iw_handle_ack_pkt(struct i40iw_cm_node *cm_node,
  2465. struct i40iw_puda_buf *rbuf)
  2466. {
  2467. struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
  2468. u32 inc_sequence;
  2469. int ret = 0;
  2470. int optionsize;
  2471. u32 datasize = rbuf->datalen;
  2472. optionsize = (tcph->doff << 2) - sizeof(struct tcphdr);
  2473. if (i40iw_check_seq(cm_node, tcph))
  2474. return -EINVAL;
  2475. inc_sequence = ntohl(tcph->seq);
  2476. switch (cm_node->state) {
  2477. case I40IW_CM_STATE_SYN_RCVD:
  2478. i40iw_cleanup_retrans_entry(cm_node);
  2479. ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1);
  2480. if (ret)
  2481. break;
  2482. cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
  2483. cm_node->state = I40IW_CM_STATE_ESTABLISHED;
  2484. if (datasize) {
  2485. cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
  2486. i40iw_handle_rcv_mpa(cm_node, rbuf);
  2487. }
  2488. break;
  2489. case I40IW_CM_STATE_ESTABLISHED:
  2490. i40iw_cleanup_retrans_entry(cm_node);
  2491. if (datasize) {
  2492. cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
  2493. i40iw_handle_rcv_mpa(cm_node, rbuf);
  2494. }
  2495. break;
  2496. case I40IW_CM_STATE_MPAREQ_SENT:
  2497. cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq);
  2498. if (datasize) {
  2499. cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
  2500. cm_node->ack_rcvd = false;
  2501. i40iw_handle_rcv_mpa(cm_node, rbuf);
  2502. } else {
  2503. cm_node->ack_rcvd = true;
  2504. }
  2505. break;
  2506. case I40IW_CM_STATE_LISTENING:
  2507. i40iw_cleanup_retrans_entry(cm_node);
  2508. cm_node->state = I40IW_CM_STATE_CLOSED;
  2509. i40iw_send_reset(cm_node);
  2510. break;
  2511. case I40IW_CM_STATE_CLOSED:
  2512. i40iw_cleanup_retrans_entry(cm_node);
  2513. atomic_inc(&cm_node->ref_count);
  2514. i40iw_send_reset(cm_node);
  2515. break;
  2516. case I40IW_CM_STATE_LAST_ACK:
  2517. case I40IW_CM_STATE_CLOSING:
  2518. i40iw_cleanup_retrans_entry(cm_node);
  2519. cm_node->state = I40IW_CM_STATE_CLOSED;
  2520. if (!cm_node->accept_pend)
  2521. cm_node->cm_id->rem_ref(cm_node->cm_id);
  2522. i40iw_rem_ref_cm_node(cm_node);
  2523. break;
  2524. case I40IW_CM_STATE_FIN_WAIT1:
  2525. i40iw_cleanup_retrans_entry(cm_node);
  2526. cm_node->state = I40IW_CM_STATE_FIN_WAIT2;
  2527. break;
  2528. case I40IW_CM_STATE_SYN_SENT:
  2529. case I40IW_CM_STATE_FIN_WAIT2:
  2530. case I40IW_CM_STATE_OFFLOADED:
  2531. case I40IW_CM_STATE_MPAREQ_RCVD:
  2532. case I40IW_CM_STATE_UNKNOWN:
  2533. default:
  2534. i40iw_cleanup_retrans_entry(cm_node);
  2535. break;
  2536. }
  2537. return ret;
  2538. }
  2539. /**
  2540. * i40iw_process_packet - process cm packet
  2541. * @cm_node: connection's node
  2542. * @rbuf: receive buffer
  2543. */
  2544. static void i40iw_process_packet(struct i40iw_cm_node *cm_node,
  2545. struct i40iw_puda_buf *rbuf)
  2546. {
  2547. enum i40iw_tcpip_pkt_type pkt_type = I40IW_PKT_TYPE_UNKNOWN;
  2548. struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph;
  2549. u32 fin_set = 0;
  2550. int ret;
  2551. if (tcph->rst) {
  2552. pkt_type = I40IW_PKT_TYPE_RST;
  2553. } else if (tcph->syn) {
  2554. pkt_type = I40IW_PKT_TYPE_SYN;
  2555. if (tcph->ack)
  2556. pkt_type = I40IW_PKT_TYPE_SYNACK;
  2557. } else if (tcph->ack) {
  2558. pkt_type = I40IW_PKT_TYPE_ACK;
  2559. }
  2560. if (tcph->fin)
  2561. fin_set = 1;
  2562. switch (pkt_type) {
  2563. case I40IW_PKT_TYPE_SYN:
  2564. i40iw_handle_syn_pkt(cm_node, rbuf);
  2565. break;
  2566. case I40IW_PKT_TYPE_SYNACK:
  2567. i40iw_handle_synack_pkt(cm_node, rbuf);
  2568. break;
  2569. case I40IW_PKT_TYPE_ACK:
  2570. ret = i40iw_handle_ack_pkt(cm_node, rbuf);
  2571. if (fin_set && !ret)
  2572. i40iw_handle_fin_pkt(cm_node);
  2573. break;
  2574. case I40IW_PKT_TYPE_RST:
  2575. i40iw_handle_rst_pkt(cm_node, rbuf);
  2576. break;
  2577. default:
  2578. if (fin_set &&
  2579. (!i40iw_check_seq(cm_node, (struct tcphdr *)rbuf->tcph)))
  2580. i40iw_handle_fin_pkt(cm_node);
  2581. break;
  2582. }
  2583. }
  2584. /**
  2585. * i40iw_make_listen_node - create a listen node with params
  2586. * @cm_core: cm's core
  2587. * @iwdev: iwarp device structure
  2588. * @cm_info: quad info for connection
  2589. */
  2590. static struct i40iw_cm_listener *i40iw_make_listen_node(
  2591. struct i40iw_cm_core *cm_core,
  2592. struct i40iw_device *iwdev,
  2593. struct i40iw_cm_info *cm_info)
  2594. {
  2595. struct i40iw_cm_listener *listener;
  2596. unsigned long flags;
  2597. /* cannot have multiple matching listeners */
  2598. listener = i40iw_find_listener(cm_core, cm_info->loc_addr,
  2599. cm_info->loc_port,
  2600. cm_info->vlan_id,
  2601. I40IW_CM_LISTENER_EITHER_STATE);
  2602. if (listener &&
  2603. (listener->listener_state == I40IW_CM_LISTENER_ACTIVE_STATE)) {
  2604. atomic_dec(&listener->ref_count);
  2605. i40iw_debug(cm_core->dev,
  2606. I40IW_DEBUG_CM,
  2607. "Not creating listener since it already exists\n");
  2608. return NULL;
  2609. }
  2610. if (!listener) {
  2611. /* create a CM listen node (1/2 node to compare incoming traffic to) */
  2612. listener = kzalloc(sizeof(*listener), GFP_KERNEL);
  2613. if (!listener)
  2614. return NULL;
  2615. cm_core->stats_listen_nodes_created++;
  2616. memcpy(listener->loc_addr, cm_info->loc_addr, sizeof(listener->loc_addr));
  2617. listener->loc_port = cm_info->loc_port;
  2618. INIT_LIST_HEAD(&listener->child_listen_list);
  2619. atomic_set(&listener->ref_count, 1);
  2620. } else {
  2621. listener->reused_node = 1;
  2622. }
  2623. listener->cm_id = cm_info->cm_id;
  2624. listener->ipv4 = cm_info->ipv4;
  2625. listener->vlan_id = cm_info->vlan_id;
  2626. atomic_set(&listener->pend_accepts_cnt, 0);
  2627. listener->cm_core = cm_core;
  2628. listener->iwdev = iwdev;
  2629. listener->backlog = cm_info->backlog;
  2630. listener->listener_state = I40IW_CM_LISTENER_ACTIVE_STATE;
  2631. if (!listener->reused_node) {
  2632. spin_lock_irqsave(&cm_core->listen_list_lock, flags);
  2633. list_add(&listener->list, &cm_core->listen_nodes);
  2634. spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
  2635. }
  2636. return listener;
  2637. }
  2638. /**
  2639. * i40iw_create_cm_node - make a connection node with params
  2640. * @cm_core: cm's core
  2641. * @iwdev: iwarp device structure
  2642. * @conn_param: upper layer connection parameters
  2643. * @cm_info: quad info for connection
  2644. */
  2645. static struct i40iw_cm_node *i40iw_create_cm_node(
  2646. struct i40iw_cm_core *cm_core,
  2647. struct i40iw_device *iwdev,
  2648. struct iw_cm_conn_param *conn_param,
  2649. struct i40iw_cm_info *cm_info)
  2650. {
  2651. struct i40iw_cm_node *cm_node;
  2652. struct i40iw_cm_listener *loopback_remotelistener;
  2653. struct i40iw_cm_node *loopback_remotenode;
  2654. struct i40iw_cm_info loopback_cm_info;
  2655. u16 private_data_len = conn_param->private_data_len;
  2656. const void *private_data = conn_param->private_data;
  2657. /* create a CM connection node */
  2658. cm_node = i40iw_make_cm_node(cm_core, iwdev, cm_info, NULL);
  2659. if (!cm_node)
  2660. return ERR_PTR(-ENOMEM);
  2661. /* set our node side to client (active) side */
  2662. cm_node->tcp_cntxt.client = 1;
  2663. cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE;
  2664. i40iw_record_ird_ord(cm_node, conn_param->ird, conn_param->ord);
  2665. if (!memcmp(cm_info->loc_addr, cm_info->rem_addr, sizeof(cm_info->loc_addr))) {
  2666. loopback_remotelistener = i40iw_find_listener(
  2667. cm_core,
  2668. cm_info->rem_addr,
  2669. cm_node->rem_port,
  2670. cm_node->vlan_id,
  2671. I40IW_CM_LISTENER_ACTIVE_STATE);
  2672. if (!loopback_remotelistener) {
  2673. i40iw_rem_ref_cm_node(cm_node);
  2674. return ERR_PTR(-ECONNREFUSED);
  2675. } else {
  2676. loopback_cm_info = *cm_info;
  2677. loopback_cm_info.loc_port = cm_info->rem_port;
  2678. loopback_cm_info.rem_port = cm_info->loc_port;
  2679. loopback_cm_info.cm_id = loopback_remotelistener->cm_id;
  2680. loopback_cm_info.ipv4 = cm_info->ipv4;
  2681. loopback_remotenode = i40iw_make_cm_node(cm_core,
  2682. iwdev,
  2683. &loopback_cm_info,
  2684. loopback_remotelistener);
  2685. if (!loopback_remotenode) {
  2686. i40iw_rem_ref_cm_node(cm_node);
  2687. return ERR_PTR(-ENOMEM);
  2688. }
  2689. cm_core->stats_loopbacks++;
  2690. loopback_remotenode->loopbackpartner = cm_node;
  2691. loopback_remotenode->tcp_cntxt.rcv_wscale =
  2692. I40IW_CM_DEFAULT_RCV_WND_SCALE;
  2693. cm_node->loopbackpartner = loopback_remotenode;
  2694. memcpy(loopback_remotenode->pdata_buf, private_data,
  2695. private_data_len);
  2696. loopback_remotenode->pdata.size = private_data_len;
  2697. if (loopback_remotenode->ord_size > cm_node->ird_size)
  2698. loopback_remotenode->ord_size =
  2699. cm_node->ird_size;
  2700. cm_node->state = I40IW_CM_STATE_OFFLOADED;
  2701. cm_node->tcp_cntxt.rcv_nxt =
  2702. loopback_remotenode->tcp_cntxt.loc_seq_num;
  2703. loopback_remotenode->tcp_cntxt.rcv_nxt =
  2704. cm_node->tcp_cntxt.loc_seq_num;
  2705. cm_node->tcp_cntxt.max_snd_wnd =
  2706. loopback_remotenode->tcp_cntxt.rcv_wnd;
  2707. loopback_remotenode->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
  2708. cm_node->tcp_cntxt.snd_wnd = loopback_remotenode->tcp_cntxt.rcv_wnd;
  2709. loopback_remotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd;
  2710. cm_node->tcp_cntxt.snd_wscale = loopback_remotenode->tcp_cntxt.rcv_wscale;
  2711. loopback_remotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale;
  2712. }
  2713. return cm_node;
  2714. }
  2715. cm_node->pdata.size = private_data_len;
  2716. cm_node->pdata.addr = cm_node->pdata_buf;
  2717. memcpy(cm_node->pdata_buf, private_data, private_data_len);
  2718. cm_node->state = I40IW_CM_STATE_SYN_SENT;
  2719. return cm_node;
  2720. }
  2721. /**
  2722. * i40iw_cm_reject - reject and teardown a connection
  2723. * @cm_node: connection's node
  2724. * @pdate: ptr to private data for reject
  2725. * @plen: size of private data
  2726. */
  2727. static int i40iw_cm_reject(struct i40iw_cm_node *cm_node, const void *pdata, u8 plen)
  2728. {
  2729. int ret = 0;
  2730. int err;
  2731. int passive_state;
  2732. struct iw_cm_id *cm_id = cm_node->cm_id;
  2733. struct i40iw_cm_node *loopback = cm_node->loopbackpartner;
  2734. if (cm_node->tcp_cntxt.client)
  2735. return ret;
  2736. i40iw_cleanup_retrans_entry(cm_node);
  2737. if (!loopback) {
  2738. passive_state = atomic_add_return(1, &cm_node->passive_state);
  2739. if (passive_state == I40IW_SEND_RESET_EVENT) {
  2740. cm_node->state = I40IW_CM_STATE_CLOSED;
  2741. i40iw_rem_ref_cm_node(cm_node);
  2742. } else {
  2743. if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
  2744. i40iw_rem_ref_cm_node(cm_node);
  2745. } else {
  2746. ret = i40iw_send_mpa_reject(cm_node, pdata, plen);
  2747. if (ret) {
  2748. cm_node->state = I40IW_CM_STATE_CLOSED;
  2749. err = i40iw_send_reset(cm_node);
  2750. if (err)
  2751. i40iw_pr_err("send reset failed\n");
  2752. } else {
  2753. cm_id->add_ref(cm_id);
  2754. }
  2755. }
  2756. }
  2757. } else {
  2758. cm_node->cm_id = NULL;
  2759. if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
  2760. i40iw_rem_ref_cm_node(cm_node);
  2761. i40iw_rem_ref_cm_node(loopback);
  2762. } else {
  2763. ret = i40iw_send_cm_event(loopback,
  2764. loopback->cm_id,
  2765. IW_CM_EVENT_CONNECT_REPLY,
  2766. -ECONNREFUSED);
  2767. i40iw_rem_ref_cm_node(cm_node);
  2768. loopback->state = I40IW_CM_STATE_CLOSING;
  2769. cm_id = loopback->cm_id;
  2770. i40iw_rem_ref_cm_node(loopback);
  2771. cm_id->rem_ref(cm_id);
  2772. }
  2773. }
  2774. return ret;
  2775. }
  2776. /**
  2777. * i40iw_cm_close - close of cm connection
  2778. * @cm_node: connection's node
  2779. */
  2780. static int i40iw_cm_close(struct i40iw_cm_node *cm_node)
  2781. {
  2782. int ret = 0;
  2783. if (!cm_node)
  2784. return -EINVAL;
  2785. switch (cm_node->state) {
  2786. case I40IW_CM_STATE_SYN_RCVD:
  2787. case I40IW_CM_STATE_SYN_SENT:
  2788. case I40IW_CM_STATE_ONE_SIDE_ESTABLISHED:
  2789. case I40IW_CM_STATE_ESTABLISHED:
  2790. case I40IW_CM_STATE_ACCEPTING:
  2791. case I40IW_CM_STATE_MPAREQ_SENT:
  2792. case I40IW_CM_STATE_MPAREQ_RCVD:
  2793. i40iw_cleanup_retrans_entry(cm_node);
  2794. i40iw_send_reset(cm_node);
  2795. break;
  2796. case I40IW_CM_STATE_CLOSE_WAIT:
  2797. cm_node->state = I40IW_CM_STATE_LAST_ACK;
  2798. i40iw_send_fin(cm_node);
  2799. break;
  2800. case I40IW_CM_STATE_FIN_WAIT1:
  2801. case I40IW_CM_STATE_FIN_WAIT2:
  2802. case I40IW_CM_STATE_LAST_ACK:
  2803. case I40IW_CM_STATE_TIME_WAIT:
  2804. case I40IW_CM_STATE_CLOSING:
  2805. ret = -1;
  2806. break;
  2807. case I40IW_CM_STATE_LISTENING:
  2808. i40iw_cleanup_retrans_entry(cm_node);
  2809. i40iw_send_reset(cm_node);
  2810. break;
  2811. case I40IW_CM_STATE_MPAREJ_RCVD:
  2812. case I40IW_CM_STATE_UNKNOWN:
  2813. case I40IW_CM_STATE_INITED:
  2814. case I40IW_CM_STATE_CLOSED:
  2815. case I40IW_CM_STATE_LISTENER_DESTROYED:
  2816. i40iw_rem_ref_cm_node(cm_node);
  2817. break;
  2818. case I40IW_CM_STATE_OFFLOADED:
  2819. if (cm_node->send_entry)
  2820. i40iw_pr_err("send_entry\n");
  2821. i40iw_rem_ref_cm_node(cm_node);
  2822. break;
  2823. }
  2824. return ret;
  2825. }
  2826. /**
  2827. * i40iw_receive_ilq - recv an ETHERNET packet, and process it
  2828. * through CM
  2829. * @vsi: pointer to the vsi structure
  2830. * @rbuf: receive buffer
  2831. */
  2832. void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf)
  2833. {
  2834. struct i40iw_cm_node *cm_node;
  2835. struct i40iw_cm_listener *listener;
  2836. struct iphdr *iph;
  2837. struct ipv6hdr *ip6h;
  2838. struct tcphdr *tcph;
  2839. struct i40iw_cm_info cm_info;
  2840. struct i40iw_sc_dev *dev = vsi->dev;
  2841. struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
  2842. struct i40iw_cm_core *cm_core = &iwdev->cm_core;
  2843. struct vlan_ethhdr *ethh;
  2844. u16 vtag;
  2845. /* if vlan, then maclen = 18 else 14 */
  2846. iph = (struct iphdr *)rbuf->iph;
  2847. memset(&cm_info, 0, sizeof(cm_info));
  2848. i40iw_debug_buf(dev,
  2849. I40IW_DEBUG_ILQ,
  2850. "RECEIVE ILQ BUFFER",
  2851. rbuf->mem.va,
  2852. rbuf->totallen);
  2853. ethh = (struct vlan_ethhdr *)rbuf->mem.va;
  2854. if (ethh->h_vlan_proto == htons(ETH_P_8021Q)) {
  2855. vtag = ntohs(ethh->h_vlan_TCI);
  2856. cm_info.user_pri = (vtag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
  2857. cm_info.vlan_id = vtag & VLAN_VID_MASK;
  2858. i40iw_debug(cm_core->dev,
  2859. I40IW_DEBUG_CM,
  2860. "%s vlan_id=%d\n",
  2861. __func__,
  2862. cm_info.vlan_id);
  2863. } else {
  2864. cm_info.vlan_id = I40IW_NO_VLAN;
  2865. }
  2866. tcph = (struct tcphdr *)rbuf->tcph;
  2867. if (rbuf->ipv4) {
  2868. cm_info.loc_addr[0] = ntohl(iph->daddr);
  2869. cm_info.rem_addr[0] = ntohl(iph->saddr);
  2870. cm_info.ipv4 = true;
  2871. cm_info.tos = iph->tos;
  2872. } else {
  2873. ip6h = (struct ipv6hdr *)rbuf->iph;
  2874. i40iw_copy_ip_ntohl(cm_info.loc_addr,
  2875. ip6h->daddr.in6_u.u6_addr32);
  2876. i40iw_copy_ip_ntohl(cm_info.rem_addr,
  2877. ip6h->saddr.in6_u.u6_addr32);
  2878. cm_info.ipv4 = false;
  2879. cm_info.tos = (ip6h->priority << 4) | (ip6h->flow_lbl[0] >> 4);
  2880. }
  2881. cm_info.loc_port = ntohs(tcph->dest);
  2882. cm_info.rem_port = ntohs(tcph->source);
  2883. cm_node = i40iw_find_node(cm_core,
  2884. cm_info.rem_port,
  2885. cm_info.rem_addr,
  2886. cm_info.loc_port,
  2887. cm_info.loc_addr,
  2888. true,
  2889. false);
  2890. if (!cm_node) {
  2891. /* Only type of packet accepted are for */
  2892. /* the PASSIVE open (syn only) */
  2893. if (!tcph->syn || tcph->ack)
  2894. return;
  2895. listener =
  2896. i40iw_find_listener(cm_core,
  2897. cm_info.loc_addr,
  2898. cm_info.loc_port,
  2899. cm_info.vlan_id,
  2900. I40IW_CM_LISTENER_ACTIVE_STATE);
  2901. if (!listener) {
  2902. cm_info.cm_id = NULL;
  2903. i40iw_debug(cm_core->dev,
  2904. I40IW_DEBUG_CM,
  2905. "%s no listener found\n",
  2906. __func__);
  2907. return;
  2908. }
  2909. cm_info.cm_id = listener->cm_id;
  2910. cm_node = i40iw_make_cm_node(cm_core, iwdev, &cm_info, listener);
  2911. if (!cm_node) {
  2912. i40iw_debug(cm_core->dev,
  2913. I40IW_DEBUG_CM,
  2914. "%s allocate node failed\n",
  2915. __func__);
  2916. atomic_dec(&listener->ref_count);
  2917. return;
  2918. }
  2919. if (!tcph->rst && !tcph->fin) {
  2920. cm_node->state = I40IW_CM_STATE_LISTENING;
  2921. } else {
  2922. i40iw_rem_ref_cm_node(cm_node);
  2923. return;
  2924. }
  2925. atomic_inc(&cm_node->ref_count);
  2926. } else if (cm_node->state == I40IW_CM_STATE_OFFLOADED) {
  2927. i40iw_rem_ref_cm_node(cm_node);
  2928. return;
  2929. }
  2930. i40iw_process_packet(cm_node, rbuf);
  2931. i40iw_rem_ref_cm_node(cm_node);
  2932. }
  2933. /**
  2934. * i40iw_setup_cm_core - allocate a top level instance of a cm
  2935. * core
  2936. * @iwdev: iwarp device structure
  2937. */
  2938. void i40iw_setup_cm_core(struct i40iw_device *iwdev)
  2939. {
  2940. struct i40iw_cm_core *cm_core = &iwdev->cm_core;
  2941. cm_core->iwdev = iwdev;
  2942. cm_core->dev = &iwdev->sc_dev;
  2943. INIT_LIST_HEAD(&cm_core->accelerated_list);
  2944. INIT_LIST_HEAD(&cm_core->non_accelerated_list);
  2945. INIT_LIST_HEAD(&cm_core->listen_nodes);
  2946. timer_setup(&cm_core->tcp_timer, i40iw_cm_timer_tick, 0);
  2947. spin_lock_init(&cm_core->ht_lock);
  2948. spin_lock_init(&cm_core->listen_list_lock);
  2949. spin_lock_init(&cm_core->apbvt_lock);
  2950. cm_core->event_wq = alloc_ordered_workqueue("iwewq",
  2951. WQ_MEM_RECLAIM);
  2952. cm_core->disconn_wq = alloc_ordered_workqueue("iwdwq",
  2953. WQ_MEM_RECLAIM);
  2954. }
  2955. /**
  2956. * i40iw_cleanup_cm_core - deallocate a top level instance of a
  2957. * cm core
  2958. * @cm_core: cm's core
  2959. */
  2960. void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core)
  2961. {
  2962. unsigned long flags;
  2963. if (!cm_core)
  2964. return;
  2965. spin_lock_irqsave(&cm_core->ht_lock, flags);
  2966. if (timer_pending(&cm_core->tcp_timer))
  2967. del_timer_sync(&cm_core->tcp_timer);
  2968. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  2969. destroy_workqueue(cm_core->event_wq);
  2970. destroy_workqueue(cm_core->disconn_wq);
  2971. }
  2972. /**
  2973. * i40iw_init_tcp_ctx - setup qp context
  2974. * @cm_node: connection's node
  2975. * @tcp_info: offload info for tcp
  2976. * @iwqp: associate qp for the connection
  2977. */
  2978. static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node,
  2979. struct i40iw_tcp_offload_info *tcp_info,
  2980. struct i40iw_qp *iwqp)
  2981. {
  2982. tcp_info->ipv4 = cm_node->ipv4;
  2983. tcp_info->drop_ooo_seg = true;
  2984. tcp_info->wscale = true;
  2985. tcp_info->ignore_tcp_opt = true;
  2986. tcp_info->ignore_tcp_uns_opt = true;
  2987. tcp_info->no_nagle = false;
  2988. tcp_info->ttl = I40IW_DEFAULT_TTL;
  2989. tcp_info->rtt_var = cpu_to_le32(I40IW_DEFAULT_RTT_VAR);
  2990. tcp_info->ss_thresh = cpu_to_le32(I40IW_DEFAULT_SS_THRESH);
  2991. tcp_info->rexmit_thresh = I40IW_DEFAULT_REXMIT_THRESH;
  2992. tcp_info->tcp_state = I40IW_TCP_STATE_ESTABLISHED;
  2993. tcp_info->snd_wscale = cm_node->tcp_cntxt.snd_wscale;
  2994. tcp_info->rcv_wscale = cm_node->tcp_cntxt.rcv_wscale;
  2995. tcp_info->snd_nxt = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
  2996. tcp_info->snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.snd_wnd);
  2997. tcp_info->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
  2998. tcp_info->snd_max = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
  2999. tcp_info->snd_una = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
  3000. tcp_info->cwnd = cpu_to_le32(2 * cm_node->tcp_cntxt.mss);
  3001. tcp_info->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt);
  3002. tcp_info->snd_wl2 = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num);
  3003. tcp_info->max_snd_window = cpu_to_le32(cm_node->tcp_cntxt.max_snd_wnd);
  3004. tcp_info->rcv_wnd = cpu_to_le32(cm_node->tcp_cntxt.rcv_wnd <<
  3005. cm_node->tcp_cntxt.rcv_wscale);
  3006. tcp_info->flow_label = 0;
  3007. tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss));
  3008. if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
  3009. tcp_info->insert_vlan_tag = true;
  3010. tcp_info->vlan_tag = cpu_to_le16(((u16)cm_node->user_pri << I40IW_VLAN_PRIO_SHIFT) |
  3011. cm_node->vlan_id);
  3012. }
  3013. if (cm_node->ipv4) {
  3014. tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
  3015. tcp_info->dst_port = cpu_to_le16(cm_node->rem_port);
  3016. tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[0]);
  3017. tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[0]);
  3018. tcp_info->arp_idx =
  3019. cpu_to_le16((u16)i40iw_arp_table(
  3020. iwqp->iwdev,
  3021. &tcp_info->dest_ip_addr3,
  3022. true,
  3023. NULL,
  3024. I40IW_ARP_RESOLVE));
  3025. } else {
  3026. tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
  3027. tcp_info->dst_port = cpu_to_le16(cm_node->rem_port);
  3028. tcp_info->dest_ip_addr0 = cpu_to_le32(cm_node->rem_addr[0]);
  3029. tcp_info->dest_ip_addr1 = cpu_to_le32(cm_node->rem_addr[1]);
  3030. tcp_info->dest_ip_addr2 = cpu_to_le32(cm_node->rem_addr[2]);
  3031. tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[3]);
  3032. tcp_info->local_ipaddr0 = cpu_to_le32(cm_node->loc_addr[0]);
  3033. tcp_info->local_ipaddr1 = cpu_to_le32(cm_node->loc_addr[1]);
  3034. tcp_info->local_ipaddr2 = cpu_to_le32(cm_node->loc_addr[2]);
  3035. tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[3]);
  3036. tcp_info->arp_idx =
  3037. cpu_to_le16((u16)i40iw_arp_table(
  3038. iwqp->iwdev,
  3039. &tcp_info->dest_ip_addr0,
  3040. false,
  3041. NULL,
  3042. I40IW_ARP_RESOLVE));
  3043. }
  3044. }
  3045. /**
  3046. * i40iw_cm_init_tsa_conn - setup qp for RTS
  3047. * @iwqp: associate qp for the connection
  3048. * @cm_node: connection's node
  3049. */
  3050. static void i40iw_cm_init_tsa_conn(struct i40iw_qp *iwqp,
  3051. struct i40iw_cm_node *cm_node)
  3052. {
  3053. struct i40iw_tcp_offload_info tcp_info;
  3054. struct i40iwarp_offload_info *iwarp_info;
  3055. struct i40iw_qp_host_ctx_info *ctx_info;
  3056. struct i40iw_device *iwdev = iwqp->iwdev;
  3057. struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
  3058. memset(&tcp_info, 0x00, sizeof(struct i40iw_tcp_offload_info));
  3059. iwarp_info = &iwqp->iwarp_info;
  3060. ctx_info = &iwqp->ctx_info;
  3061. ctx_info->tcp_info = &tcp_info;
  3062. ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
  3063. ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
  3064. iwarp_info->ord_size = cm_node->ord_size;
  3065. iwarp_info->ird_size = i40iw_derive_hw_ird_setting(cm_node->ird_size);
  3066. if (iwarp_info->ord_size == 1)
  3067. iwarp_info->ord_size = 2;
  3068. iwarp_info->rd_enable = true;
  3069. iwarp_info->rdmap_ver = 1;
  3070. iwarp_info->ddp_ver = 1;
  3071. iwarp_info->pd_id = iwqp->iwpd->sc_pd.pd_id;
  3072. ctx_info->tcp_info_valid = true;
  3073. ctx_info->iwarp_info_valid = true;
  3074. ctx_info->add_to_qoslist = true;
  3075. ctx_info->user_pri = cm_node->user_pri;
  3076. i40iw_init_tcp_ctx(cm_node, &tcp_info, iwqp);
  3077. if (cm_node->snd_mark_en) {
  3078. iwarp_info->snd_mark_en = true;
  3079. iwarp_info->snd_mark_offset = (tcp_info.snd_nxt &
  3080. SNDMARKER_SEQNMASK) + cm_node->lsmm_size;
  3081. }
  3082. cm_node->state = I40IW_CM_STATE_OFFLOADED;
  3083. tcp_info.tcp_state = I40IW_TCP_STATE_ESTABLISHED;
  3084. tcp_info.src_mac_addr_idx = iwdev->mac_ip_table_idx;
  3085. tcp_info.tos = cm_node->tos;
  3086. dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, (u64 *)(iwqp->host_ctx.va), ctx_info);
  3087. /* once tcp_info is set, no need to do it again */
  3088. ctx_info->tcp_info_valid = false;
  3089. ctx_info->iwarp_info_valid = false;
  3090. ctx_info->add_to_qoslist = false;
  3091. }
  3092. /**
  3093. * i40iw_cm_disconn - when a connection is being closed
  3094. * @iwqp: associate qp for the connection
  3095. */
  3096. void i40iw_cm_disconn(struct i40iw_qp *iwqp)
  3097. {
  3098. struct disconn_work *work;
  3099. struct i40iw_device *iwdev = iwqp->iwdev;
  3100. struct i40iw_cm_core *cm_core = &iwdev->cm_core;
  3101. unsigned long flags;
  3102. work = kzalloc(sizeof(*work), GFP_ATOMIC);
  3103. if (!work)
  3104. return; /* Timer will clean up */
  3105. spin_lock_irqsave(&iwdev->qptable_lock, flags);
  3106. if (!iwdev->qp_table[iwqp->ibqp.qp_num]) {
  3107. spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
  3108. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
  3109. "%s qp_id %d is already freed\n",
  3110. __func__, iwqp->ibqp.qp_num);
  3111. kfree(work);
  3112. return;
  3113. }
  3114. i40iw_add_ref(&iwqp->ibqp);
  3115. spin_unlock_irqrestore(&iwdev->qptable_lock, flags);
  3116. work->iwqp = iwqp;
  3117. INIT_WORK(&work->work, i40iw_disconnect_worker);
  3118. queue_work(cm_core->disconn_wq, &work->work);
  3119. return;
  3120. }
  3121. /**
  3122. * i40iw_qp_disconnect - free qp and close cm
  3123. * @iwqp: associate qp for the connection
  3124. */
  3125. static void i40iw_qp_disconnect(struct i40iw_qp *iwqp)
  3126. {
  3127. struct i40iw_device *iwdev;
  3128. struct i40iw_ib_device *iwibdev;
  3129. iwdev = to_iwdev(iwqp->ibqp.device);
  3130. if (!iwdev) {
  3131. i40iw_pr_err("iwdev == NULL\n");
  3132. return;
  3133. }
  3134. iwibdev = iwdev->iwibdev;
  3135. if (iwqp->active_conn) {
  3136. /* indicate this connection is NOT active */
  3137. iwqp->active_conn = 0;
  3138. } else {
  3139. /* Need to free the Last Streaming Mode Message */
  3140. if (iwqp->ietf_mem.va) {
  3141. if (iwqp->lsmm_mr)
  3142. iwibdev->ibdev.dereg_mr(iwqp->lsmm_mr);
  3143. i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->ietf_mem);
  3144. }
  3145. }
  3146. /* close the CM node down if it is still active */
  3147. if (iwqp->cm_node) {
  3148. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "%s Call close API\n", __func__);
  3149. i40iw_cm_close(iwqp->cm_node);
  3150. }
  3151. }
  3152. /**
  3153. * i40iw_cm_disconn_true - called by worker thread to disconnect qp
  3154. * @iwqp: associate qp for the connection
  3155. */
  3156. static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp)
  3157. {
  3158. struct iw_cm_id *cm_id;
  3159. struct i40iw_device *iwdev;
  3160. struct i40iw_sc_qp *qp = &iwqp->sc_qp;
  3161. u16 last_ae;
  3162. u8 original_hw_tcp_state;
  3163. u8 original_ibqp_state;
  3164. int disconn_status = 0;
  3165. int issue_disconn = 0;
  3166. int issue_close = 0;
  3167. int issue_flush = 0;
  3168. struct ib_event ibevent;
  3169. unsigned long flags;
  3170. int ret;
  3171. if (!iwqp) {
  3172. i40iw_pr_err("iwqp == NULL\n");
  3173. return;
  3174. }
  3175. spin_lock_irqsave(&iwqp->lock, flags);
  3176. cm_id = iwqp->cm_id;
  3177. /* make sure we havent already closed this connection */
  3178. if (!cm_id) {
  3179. spin_unlock_irqrestore(&iwqp->lock, flags);
  3180. return;
  3181. }
  3182. iwdev = to_iwdev(iwqp->ibqp.device);
  3183. original_hw_tcp_state = iwqp->hw_tcp_state;
  3184. original_ibqp_state = iwqp->ibqp_state;
  3185. last_ae = iwqp->last_aeq;
  3186. if (qp->term_flags) {
  3187. issue_disconn = 1;
  3188. issue_close = 1;
  3189. iwqp->cm_id = NULL;
  3190. /*When term timer expires after cm_timer, don't want
  3191. *terminate-handler to issue cm_disconn which can re-free
  3192. *a QP even after its refcnt=0.
  3193. */
  3194. i40iw_terminate_del_timer(qp);
  3195. if (!iwqp->flush_issued) {
  3196. iwqp->flush_issued = 1;
  3197. issue_flush = 1;
  3198. }
  3199. } else if ((original_hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) ||
  3200. ((original_ibqp_state == IB_QPS_RTS) &&
  3201. (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) {
  3202. issue_disconn = 1;
  3203. if (last_ae == I40IW_AE_LLP_CONNECTION_RESET)
  3204. disconn_status = -ECONNRESET;
  3205. }
  3206. if (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) ||
  3207. (original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) ||
  3208. (last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) ||
  3209. (last_ae == I40IW_AE_LLP_CONNECTION_RESET) ||
  3210. iwdev->reset)) {
  3211. issue_close = 1;
  3212. iwqp->cm_id = NULL;
  3213. if (!iwqp->flush_issued) {
  3214. iwqp->flush_issued = 1;
  3215. issue_flush = 1;
  3216. }
  3217. }
  3218. spin_unlock_irqrestore(&iwqp->lock, flags);
  3219. if (issue_flush && !iwqp->destroyed) {
  3220. /* Flush the queues */
  3221. i40iw_flush_wqes(iwdev, iwqp);
  3222. if (qp->term_flags && iwqp->ibqp.event_handler) {
  3223. ibevent.device = iwqp->ibqp.device;
  3224. ibevent.event = (qp->eventtype == TERM_EVENT_QP_FATAL) ?
  3225. IB_EVENT_QP_FATAL : IB_EVENT_QP_ACCESS_ERR;
  3226. ibevent.element.qp = &iwqp->ibqp;
  3227. iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
  3228. }
  3229. }
  3230. if (cm_id && cm_id->event_handler) {
  3231. if (issue_disconn) {
  3232. ret = i40iw_send_cm_event(NULL,
  3233. cm_id,
  3234. IW_CM_EVENT_DISCONNECT,
  3235. disconn_status);
  3236. if (ret)
  3237. i40iw_debug(&iwdev->sc_dev,
  3238. I40IW_DEBUG_CM,
  3239. "disconnect event failed %s: - cm_id = %p\n",
  3240. __func__, cm_id);
  3241. }
  3242. if (issue_close) {
  3243. i40iw_qp_disconnect(iwqp);
  3244. cm_id->provider_data = iwqp;
  3245. ret = i40iw_send_cm_event(NULL, cm_id, IW_CM_EVENT_CLOSE, 0);
  3246. if (ret)
  3247. i40iw_debug(&iwdev->sc_dev,
  3248. I40IW_DEBUG_CM,
  3249. "close event failed %s: - cm_id = %p\n",
  3250. __func__, cm_id);
  3251. cm_id->rem_ref(cm_id);
  3252. }
  3253. }
  3254. }
  3255. /**
  3256. * i40iw_disconnect_worker - worker for connection close
  3257. * @work: points or disconn structure
  3258. */
  3259. static void i40iw_disconnect_worker(struct work_struct *work)
  3260. {
  3261. struct disconn_work *dwork = container_of(work, struct disconn_work, work);
  3262. struct i40iw_qp *iwqp = dwork->iwqp;
  3263. kfree(dwork);
  3264. i40iw_cm_disconn_true(iwqp);
  3265. i40iw_rem_ref(&iwqp->ibqp);
  3266. }
  3267. /**
  3268. * i40iw_accept - registered call for connection to be accepted
  3269. * @cm_id: cm information for passive connection
  3270. * @conn_param: accpet parameters
  3271. */
  3272. int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
  3273. {
  3274. struct ib_qp *ibqp;
  3275. struct i40iw_qp *iwqp;
  3276. struct i40iw_device *iwdev;
  3277. struct i40iw_sc_dev *dev;
  3278. struct i40iw_cm_core *cm_core;
  3279. struct i40iw_cm_node *cm_node;
  3280. struct ib_qp_attr attr;
  3281. int passive_state;
  3282. struct ib_mr *ibmr;
  3283. struct i40iw_pd *iwpd;
  3284. u16 buf_len = 0;
  3285. struct i40iw_kmem_info accept;
  3286. enum i40iw_status_code status;
  3287. u64 tagged_offset;
  3288. unsigned long flags;
  3289. memset(&attr, 0, sizeof(attr));
  3290. ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
  3291. if (!ibqp)
  3292. return -EINVAL;
  3293. iwqp = to_iwqp(ibqp);
  3294. iwdev = iwqp->iwdev;
  3295. dev = &iwdev->sc_dev;
  3296. cm_core = &iwdev->cm_core;
  3297. cm_node = (struct i40iw_cm_node *)cm_id->provider_data;
  3298. if (((struct sockaddr_in *)&cm_id->local_addr)->sin_family == AF_INET) {
  3299. cm_node->ipv4 = true;
  3300. cm_node->vlan_id = i40iw_get_vlan_ipv4(cm_node->loc_addr);
  3301. } else {
  3302. cm_node->ipv4 = false;
  3303. i40iw_netdev_vlan_ipv6(cm_node->loc_addr, &cm_node->vlan_id);
  3304. }
  3305. i40iw_debug(cm_node->dev,
  3306. I40IW_DEBUG_CM,
  3307. "Accept vlan_id=%d\n",
  3308. cm_node->vlan_id);
  3309. if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) {
  3310. if (cm_node->loopbackpartner)
  3311. i40iw_rem_ref_cm_node(cm_node->loopbackpartner);
  3312. i40iw_rem_ref_cm_node(cm_node);
  3313. return -EINVAL;
  3314. }
  3315. passive_state = atomic_add_return(1, &cm_node->passive_state);
  3316. if (passive_state == I40IW_SEND_RESET_EVENT) {
  3317. i40iw_rem_ref_cm_node(cm_node);
  3318. return -ECONNRESET;
  3319. }
  3320. cm_node->cm_core->stats_accepts++;
  3321. iwqp->cm_node = (void *)cm_node;
  3322. cm_node->iwqp = iwqp;
  3323. buf_len = conn_param->private_data_len + I40IW_MAX_IETF_SIZE;
  3324. status = i40iw_allocate_dma_mem(dev->hw, &iwqp->ietf_mem, buf_len, 1);
  3325. if (status)
  3326. return -ENOMEM;
  3327. cm_node->pdata.size = conn_param->private_data_len;
  3328. accept.addr = iwqp->ietf_mem.va;
  3329. accept.size = i40iw_cm_build_mpa_frame(cm_node, &accept, MPA_KEY_REPLY);
  3330. memcpy(accept.addr + accept.size, conn_param->private_data,
  3331. conn_param->private_data_len);
  3332. /* setup our first outgoing iWarp send WQE (the IETF frame response) */
  3333. if ((cm_node->ipv4 &&
  3334. !i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) ||
  3335. (!cm_node->ipv4 &&
  3336. !i40iw_ipv6_is_loopback(cm_node->loc_addr, cm_node->rem_addr))) {
  3337. iwpd = iwqp->iwpd;
  3338. tagged_offset = (uintptr_t)iwqp->ietf_mem.va;
  3339. ibmr = i40iw_reg_phys_mr(&iwpd->ibpd,
  3340. iwqp->ietf_mem.pa,
  3341. buf_len,
  3342. IB_ACCESS_LOCAL_WRITE,
  3343. &tagged_offset);
  3344. if (IS_ERR(ibmr)) {
  3345. i40iw_free_dma_mem(dev->hw, &iwqp->ietf_mem);
  3346. return -ENOMEM;
  3347. }
  3348. ibmr->pd = &iwpd->ibpd;
  3349. ibmr->device = iwpd->ibpd.device;
  3350. iwqp->lsmm_mr = ibmr;
  3351. if (iwqp->page)
  3352. iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
  3353. dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp,
  3354. iwqp->ietf_mem.va,
  3355. (accept.size + conn_param->private_data_len),
  3356. ibmr->lkey);
  3357. } else {
  3358. if (iwqp->page)
  3359. iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
  3360. dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp, NULL, 0, 0);
  3361. }
  3362. if (iwqp->page)
  3363. kunmap(iwqp->page);
  3364. iwqp->cm_id = cm_id;
  3365. cm_node->cm_id = cm_id;
  3366. cm_id->provider_data = (void *)iwqp;
  3367. iwqp->active_conn = 0;
  3368. cm_node->lsmm_size = accept.size + conn_param->private_data_len;
  3369. i40iw_cm_init_tsa_conn(iwqp, cm_node);
  3370. cm_id->add_ref(cm_id);
  3371. i40iw_add_ref(&iwqp->ibqp);
  3372. attr.qp_state = IB_QPS_RTS;
  3373. cm_node->qhash_set = false;
  3374. i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
  3375. cm_node->accelerated = true;
  3376. spin_lock_irqsave(&cm_core->ht_lock, flags);
  3377. list_move_tail(&cm_node->list, &cm_core->accelerated_list);
  3378. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  3379. status =
  3380. i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0);
  3381. if (status)
  3382. i40iw_debug(dev, I40IW_DEBUG_CM, "error sending cm event - ESTABLISHED\n");
  3383. if (cm_node->loopbackpartner) {
  3384. cm_node->loopbackpartner->pdata.size = conn_param->private_data_len;
  3385. /* copy entire MPA frame to our cm_node's frame */
  3386. memcpy(cm_node->loopbackpartner->pdata_buf,
  3387. conn_param->private_data,
  3388. conn_param->private_data_len);
  3389. i40iw_create_event(cm_node->loopbackpartner, I40IW_CM_EVENT_CONNECTED);
  3390. }
  3391. if (cm_node->accept_pend) {
  3392. atomic_dec(&cm_node->listener->pend_accepts_cnt);
  3393. cm_node->accept_pend = 0;
  3394. }
  3395. return 0;
  3396. }
  3397. /**
  3398. * i40iw_reject - registered call for connection to be rejected
  3399. * @cm_id: cm information for passive connection
  3400. * @pdata: private data to be sent
  3401. * @pdata_len: private data length
  3402. */
  3403. int i40iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
  3404. {
  3405. struct i40iw_device *iwdev;
  3406. struct i40iw_cm_node *cm_node;
  3407. struct i40iw_cm_node *loopback;
  3408. cm_node = (struct i40iw_cm_node *)cm_id->provider_data;
  3409. loopback = cm_node->loopbackpartner;
  3410. cm_node->cm_id = cm_id;
  3411. cm_node->pdata.size = pdata_len;
  3412. iwdev = to_iwdev(cm_id->device);
  3413. if (!iwdev)
  3414. return -EINVAL;
  3415. cm_node->cm_core->stats_rejects++;
  3416. if (pdata_len + sizeof(struct ietf_mpa_v2) > MAX_CM_BUFFER)
  3417. return -EINVAL;
  3418. if (loopback) {
  3419. memcpy(&loopback->pdata_buf, pdata, pdata_len);
  3420. loopback->pdata.size = pdata_len;
  3421. }
  3422. return i40iw_cm_reject(cm_node, pdata, pdata_len);
  3423. }
  3424. /**
  3425. * i40iw_connect - registered call for connection to be established
  3426. * @cm_id: cm information for passive connection
  3427. * @conn_param: Information about the connection
  3428. */
  3429. int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
  3430. {
  3431. struct ib_qp *ibqp;
  3432. struct i40iw_qp *iwqp;
  3433. struct i40iw_device *iwdev;
  3434. struct i40iw_cm_node *cm_node;
  3435. struct i40iw_cm_info cm_info;
  3436. struct sockaddr_in *laddr;
  3437. struct sockaddr_in *raddr;
  3438. struct sockaddr_in6 *laddr6;
  3439. struct sockaddr_in6 *raddr6;
  3440. int ret = 0;
  3441. ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
  3442. if (!ibqp)
  3443. return -EINVAL;
  3444. iwqp = to_iwqp(ibqp);
  3445. if (!iwqp)
  3446. return -EINVAL;
  3447. iwdev = to_iwdev(iwqp->ibqp.device);
  3448. if (!iwdev)
  3449. return -EINVAL;
  3450. laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
  3451. raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
  3452. laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
  3453. raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
  3454. if (!(laddr->sin_port) || !(raddr->sin_port))
  3455. return -EINVAL;
  3456. iwqp->active_conn = 1;
  3457. iwqp->cm_id = NULL;
  3458. cm_id->provider_data = iwqp;
  3459. /* set up the connection params for the node */
  3460. if (cm_id->remote_addr.ss_family == AF_INET) {
  3461. cm_info.ipv4 = true;
  3462. memset(cm_info.loc_addr, 0, sizeof(cm_info.loc_addr));
  3463. memset(cm_info.rem_addr, 0, sizeof(cm_info.rem_addr));
  3464. cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
  3465. cm_info.rem_addr[0] = ntohl(raddr->sin_addr.s_addr);
  3466. cm_info.loc_port = ntohs(laddr->sin_port);
  3467. cm_info.rem_port = ntohs(raddr->sin_port);
  3468. cm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr);
  3469. } else {
  3470. cm_info.ipv4 = false;
  3471. i40iw_copy_ip_ntohl(cm_info.loc_addr,
  3472. laddr6->sin6_addr.in6_u.u6_addr32);
  3473. i40iw_copy_ip_ntohl(cm_info.rem_addr,
  3474. raddr6->sin6_addr.in6_u.u6_addr32);
  3475. cm_info.loc_port = ntohs(laddr6->sin6_port);
  3476. cm_info.rem_port = ntohs(raddr6->sin6_port);
  3477. i40iw_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id);
  3478. }
  3479. cm_info.cm_id = cm_id;
  3480. cm_info.tos = cm_id->tos;
  3481. cm_info.user_pri = rt_tos2priority(cm_id->tos);
  3482. i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n",
  3483. __func__, cm_id->tos, cm_info.user_pri);
  3484. cm_id->add_ref(cm_id);
  3485. cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev,
  3486. conn_param, &cm_info);
  3487. if (IS_ERR(cm_node)) {
  3488. ret = PTR_ERR(cm_node);
  3489. cm_id->rem_ref(cm_id);
  3490. return ret;
  3491. }
  3492. if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
  3493. (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
  3494. raddr6->sin6_addr.in6_u.u6_addr32,
  3495. sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) {
  3496. if (i40iw_manage_qhash(iwdev, &cm_info, I40IW_QHASH_TYPE_TCP_ESTABLISHED,
  3497. I40IW_QHASH_MANAGE_TYPE_ADD, NULL, true)) {
  3498. ret = -EINVAL;
  3499. goto err;
  3500. }
  3501. cm_node->qhash_set = true;
  3502. }
  3503. if (i40iw_manage_apbvt(iwdev, cm_info.loc_port,
  3504. I40IW_MANAGE_APBVT_ADD)) {
  3505. ret = -EINVAL;
  3506. goto err;
  3507. }
  3508. cm_node->apbvt_set = true;
  3509. iwqp->cm_node = cm_node;
  3510. cm_node->iwqp = iwqp;
  3511. iwqp->cm_id = cm_id;
  3512. i40iw_add_ref(&iwqp->ibqp);
  3513. if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
  3514. cm_node->state = I40IW_CM_STATE_SYN_SENT;
  3515. ret = i40iw_send_syn(cm_node, 0);
  3516. if (ret)
  3517. goto err;
  3518. }
  3519. if (cm_node->loopbackpartner) {
  3520. cm_node->loopbackpartner->state = I40IW_CM_STATE_MPAREQ_RCVD;
  3521. i40iw_create_event(cm_node->loopbackpartner,
  3522. I40IW_CM_EVENT_MPA_REQ);
  3523. }
  3524. i40iw_debug(cm_node->dev,
  3525. I40IW_DEBUG_CM,
  3526. "Api - connect(): port=0x%04x, cm_node=%p, cm_id = %p.\n",
  3527. cm_node->rem_port,
  3528. cm_node,
  3529. cm_node->cm_id);
  3530. return 0;
  3531. err:
  3532. if (cm_info.ipv4)
  3533. i40iw_debug(&iwdev->sc_dev,
  3534. I40IW_DEBUG_CM,
  3535. "Api - connect() FAILED: dest addr=%pI4",
  3536. cm_info.rem_addr);
  3537. else
  3538. i40iw_debug(&iwdev->sc_dev,
  3539. I40IW_DEBUG_CM,
  3540. "Api - connect() FAILED: dest addr=%pI6",
  3541. cm_info.rem_addr);
  3542. i40iw_rem_ref_cm_node(cm_node);
  3543. cm_id->rem_ref(cm_id);
  3544. iwdev->cm_core.stats_connect_errs++;
  3545. return ret;
  3546. }
  3547. /**
  3548. * i40iw_create_listen - registered call creating listener
  3549. * @cm_id: cm information for passive connection
  3550. * @backlog: to max accept pending count
  3551. */
  3552. int i40iw_create_listen(struct iw_cm_id *cm_id, int backlog)
  3553. {
  3554. struct i40iw_device *iwdev;
  3555. struct i40iw_cm_listener *cm_listen_node;
  3556. struct i40iw_cm_info cm_info;
  3557. enum i40iw_status_code ret;
  3558. struct sockaddr_in *laddr;
  3559. struct sockaddr_in6 *laddr6;
  3560. bool wildcard = false;
  3561. iwdev = to_iwdev(cm_id->device);
  3562. if (!iwdev)
  3563. return -EINVAL;
  3564. laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
  3565. laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
  3566. memset(&cm_info, 0, sizeof(cm_info));
  3567. if (laddr->sin_family == AF_INET) {
  3568. cm_info.ipv4 = true;
  3569. cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr);
  3570. cm_info.loc_port = ntohs(laddr->sin_port);
  3571. if (laddr->sin_addr.s_addr != INADDR_ANY)
  3572. cm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr);
  3573. else
  3574. wildcard = true;
  3575. } else {
  3576. cm_info.ipv4 = false;
  3577. i40iw_copy_ip_ntohl(cm_info.loc_addr,
  3578. laddr6->sin6_addr.in6_u.u6_addr32);
  3579. cm_info.loc_port = ntohs(laddr6->sin6_port);
  3580. if (ipv6_addr_type(&laddr6->sin6_addr) != IPV6_ADDR_ANY)
  3581. i40iw_netdev_vlan_ipv6(cm_info.loc_addr,
  3582. &cm_info.vlan_id);
  3583. else
  3584. wildcard = true;
  3585. }
  3586. cm_info.backlog = backlog;
  3587. cm_info.cm_id = cm_id;
  3588. cm_listen_node = i40iw_make_listen_node(&iwdev->cm_core, iwdev, &cm_info);
  3589. if (!cm_listen_node) {
  3590. i40iw_pr_err("cm_listen_node == NULL\n");
  3591. return -ENOMEM;
  3592. }
  3593. cm_id->provider_data = cm_listen_node;
  3594. cm_listen_node->tos = cm_id->tos;
  3595. cm_listen_node->user_pri = rt_tos2priority(cm_id->tos);
  3596. cm_info.user_pri = cm_listen_node->user_pri;
  3597. if (!cm_listen_node->reused_node) {
  3598. if (wildcard) {
  3599. if (cm_info.ipv4)
  3600. ret = i40iw_add_mqh_4(iwdev,
  3601. &cm_info,
  3602. cm_listen_node);
  3603. else
  3604. ret = i40iw_add_mqh_6(iwdev,
  3605. &cm_info,
  3606. cm_listen_node);
  3607. if (ret)
  3608. goto error;
  3609. ret = i40iw_manage_apbvt(iwdev,
  3610. cm_info.loc_port,
  3611. I40IW_MANAGE_APBVT_ADD);
  3612. if (ret)
  3613. goto error;
  3614. } else {
  3615. ret = i40iw_manage_qhash(iwdev,
  3616. &cm_info,
  3617. I40IW_QHASH_TYPE_TCP_SYN,
  3618. I40IW_QHASH_MANAGE_TYPE_ADD,
  3619. NULL,
  3620. true);
  3621. if (ret)
  3622. goto error;
  3623. cm_listen_node->qhash_set = true;
  3624. ret = i40iw_manage_apbvt(iwdev,
  3625. cm_info.loc_port,
  3626. I40IW_MANAGE_APBVT_ADD);
  3627. if (ret)
  3628. goto error;
  3629. }
  3630. }
  3631. cm_id->add_ref(cm_id);
  3632. cm_listen_node->cm_core->stats_listen_created++;
  3633. return 0;
  3634. error:
  3635. i40iw_cm_del_listen(&iwdev->cm_core, (void *)cm_listen_node, false);
  3636. return -EINVAL;
  3637. }
  3638. /**
  3639. * i40iw_destroy_listen - registered call to destroy listener
  3640. * @cm_id: cm information for passive connection
  3641. */
  3642. int i40iw_destroy_listen(struct iw_cm_id *cm_id)
  3643. {
  3644. struct i40iw_device *iwdev;
  3645. iwdev = to_iwdev(cm_id->device);
  3646. if (cm_id->provider_data)
  3647. i40iw_cm_del_listen(&iwdev->cm_core, cm_id->provider_data, true);
  3648. else
  3649. i40iw_pr_err("cm_id->provider_data was NULL\n");
  3650. cm_id->rem_ref(cm_id);
  3651. return 0;
  3652. }
  3653. /**
  3654. * i40iw_cm_event_connected - handle connected active node
  3655. * @event: the info for cm_node of connection
  3656. */
  3657. static void i40iw_cm_event_connected(struct i40iw_cm_event *event)
  3658. {
  3659. struct i40iw_qp *iwqp;
  3660. struct i40iw_device *iwdev;
  3661. struct i40iw_cm_core *cm_core;
  3662. struct i40iw_cm_node *cm_node;
  3663. struct i40iw_sc_dev *dev;
  3664. struct ib_qp_attr attr;
  3665. struct iw_cm_id *cm_id;
  3666. unsigned long flags;
  3667. int status;
  3668. bool read0;
  3669. cm_node = event->cm_node;
  3670. cm_id = cm_node->cm_id;
  3671. iwqp = (struct i40iw_qp *)cm_id->provider_data;
  3672. iwdev = to_iwdev(iwqp->ibqp.device);
  3673. dev = &iwdev->sc_dev;
  3674. cm_core = &iwdev->cm_core;
  3675. if (iwqp->destroyed) {
  3676. status = -ETIMEDOUT;
  3677. goto error;
  3678. }
  3679. i40iw_cm_init_tsa_conn(iwqp, cm_node);
  3680. read0 = (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO);
  3681. if (iwqp->page)
  3682. iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page);
  3683. dev->iw_priv_qp_ops->qp_send_rtt(&iwqp->sc_qp, read0);
  3684. if (iwqp->page)
  3685. kunmap(iwqp->page);
  3686. memset(&attr, 0, sizeof(attr));
  3687. attr.qp_state = IB_QPS_RTS;
  3688. cm_node->qhash_set = false;
  3689. i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL);
  3690. cm_node->accelerated = true;
  3691. spin_lock_irqsave(&cm_core->ht_lock, flags);
  3692. list_move_tail(&cm_node->list, &cm_core->accelerated_list);
  3693. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  3694. status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY,
  3695. 0);
  3696. if (status)
  3697. i40iw_debug(dev, I40IW_DEBUG_CM, "error sending cm event - CONNECT_REPLY\n");
  3698. return;
  3699. error:
  3700. iwqp->cm_id = NULL;
  3701. cm_id->provider_data = NULL;
  3702. i40iw_send_cm_event(event->cm_node,
  3703. cm_id,
  3704. IW_CM_EVENT_CONNECT_REPLY,
  3705. status);
  3706. cm_id->rem_ref(cm_id);
  3707. i40iw_rem_ref_cm_node(event->cm_node);
  3708. }
  3709. /**
  3710. * i40iw_cm_event_reset - handle reset
  3711. * @event: the info for cm_node of connection
  3712. */
  3713. static void i40iw_cm_event_reset(struct i40iw_cm_event *event)
  3714. {
  3715. struct i40iw_cm_node *cm_node = event->cm_node;
  3716. struct iw_cm_id *cm_id = cm_node->cm_id;
  3717. struct i40iw_qp *iwqp;
  3718. if (!cm_id)
  3719. return;
  3720. iwqp = cm_id->provider_data;
  3721. if (!iwqp)
  3722. return;
  3723. i40iw_debug(cm_node->dev,
  3724. I40IW_DEBUG_CM,
  3725. "reset event %p - cm_id = %p\n",
  3726. event->cm_node, cm_id);
  3727. iwqp->cm_id = NULL;
  3728. i40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_DISCONNECT, -ECONNRESET);
  3729. i40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_CLOSE, 0);
  3730. }
  3731. /**
  3732. * i40iw_cm_event_handler - worker thread callback to send event to cm upper layer
  3733. * @work: pointer of cm event info.
  3734. */
  3735. static void i40iw_cm_event_handler(struct work_struct *work)
  3736. {
  3737. struct i40iw_cm_event *event = container_of(work,
  3738. struct i40iw_cm_event,
  3739. event_work);
  3740. struct i40iw_cm_node *cm_node;
  3741. if (!event || !event->cm_node || !event->cm_node->cm_core)
  3742. return;
  3743. cm_node = event->cm_node;
  3744. switch (event->type) {
  3745. case I40IW_CM_EVENT_MPA_REQ:
  3746. i40iw_send_cm_event(cm_node,
  3747. cm_node->cm_id,
  3748. IW_CM_EVENT_CONNECT_REQUEST,
  3749. 0);
  3750. break;
  3751. case I40IW_CM_EVENT_RESET:
  3752. i40iw_cm_event_reset(event);
  3753. break;
  3754. case I40IW_CM_EVENT_CONNECTED:
  3755. if (!event->cm_node->cm_id ||
  3756. (event->cm_node->state != I40IW_CM_STATE_OFFLOADED))
  3757. break;
  3758. i40iw_cm_event_connected(event);
  3759. break;
  3760. case I40IW_CM_EVENT_MPA_REJECT:
  3761. if (!event->cm_node->cm_id ||
  3762. (cm_node->state == I40IW_CM_STATE_OFFLOADED))
  3763. break;
  3764. i40iw_send_cm_event(cm_node,
  3765. cm_node->cm_id,
  3766. IW_CM_EVENT_CONNECT_REPLY,
  3767. -ECONNREFUSED);
  3768. break;
  3769. case I40IW_CM_EVENT_ABORTED:
  3770. if (!event->cm_node->cm_id ||
  3771. (event->cm_node->state == I40IW_CM_STATE_OFFLOADED))
  3772. break;
  3773. i40iw_event_connect_error(event);
  3774. break;
  3775. default:
  3776. i40iw_pr_err("event type = %d\n", event->type);
  3777. break;
  3778. }
  3779. event->cm_info.cm_id->rem_ref(event->cm_info.cm_id);
  3780. i40iw_rem_ref_cm_node(event->cm_node);
  3781. kfree(event);
  3782. }
  3783. /**
  3784. * i40iw_cm_post_event - queue event request for worker thread
  3785. * @event: cm node's info for up event call
  3786. */
  3787. static void i40iw_cm_post_event(struct i40iw_cm_event *event)
  3788. {
  3789. atomic_inc(&event->cm_node->ref_count);
  3790. event->cm_info.cm_id->add_ref(event->cm_info.cm_id);
  3791. INIT_WORK(&event->event_work, i40iw_cm_event_handler);
  3792. queue_work(event->cm_node->cm_core->event_wq, &event->event_work);
  3793. }
  3794. /**
  3795. * i40iw_qhash_ctrl - enable/disable qhash for list
  3796. * @iwdev: device pointer
  3797. * @parent_listen_node: parent listen node
  3798. * @nfo: cm info node
  3799. * @ipaddr: Pointer to IPv4 or IPv6 address
  3800. * @ipv4: flag indicating IPv4 when true
  3801. * @ifup: flag indicating interface up when true
  3802. *
  3803. * Enables or disables the qhash for the node in the child
  3804. * listen list that matches ipaddr. If no matching IP was found
  3805. * it will allocate and add a new child listen node to the
  3806. * parent listen node. The listen_list_lock is assumed to be
  3807. * held when called.
  3808. */
  3809. static void i40iw_qhash_ctrl(struct i40iw_device *iwdev,
  3810. struct i40iw_cm_listener *parent_listen_node,
  3811. struct i40iw_cm_info *nfo,
  3812. u32 *ipaddr, bool ipv4, bool ifup)
  3813. {
  3814. struct list_head *child_listen_list = &parent_listen_node->child_listen_list;
  3815. struct i40iw_cm_listener *child_listen_node;
  3816. struct list_head *pos, *tpos;
  3817. enum i40iw_status_code ret;
  3818. bool node_allocated = false;
  3819. enum i40iw_quad_hash_manage_type op =
  3820. ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;
  3821. list_for_each_safe(pos, tpos, child_listen_list) {
  3822. child_listen_node =
  3823. list_entry(pos,
  3824. struct i40iw_cm_listener,
  3825. child_listen_list);
  3826. if (!memcmp(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16))
  3827. goto set_qhash;
  3828. }
  3829. /* if not found then add a child listener if interface is going up */
  3830. if (!ifup)
  3831. return;
  3832. child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_ATOMIC);
  3833. if (!child_listen_node)
  3834. return;
  3835. node_allocated = true;
  3836. memcpy(child_listen_node, parent_listen_node, sizeof(*child_listen_node));
  3837. memcpy(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16);
  3838. set_qhash:
  3839. memcpy(nfo->loc_addr,
  3840. child_listen_node->loc_addr,
  3841. sizeof(nfo->loc_addr));
  3842. nfo->vlan_id = child_listen_node->vlan_id;
  3843. ret = i40iw_manage_qhash(iwdev, nfo,
  3844. I40IW_QHASH_TYPE_TCP_SYN,
  3845. op,
  3846. NULL, false);
  3847. if (!ret) {
  3848. child_listen_node->qhash_set = ifup;
  3849. if (node_allocated)
  3850. list_add(&child_listen_node->child_listen_list,
  3851. &parent_listen_node->child_listen_list);
  3852. } else if (node_allocated) {
  3853. kfree(child_listen_node);
  3854. }
  3855. }
  3856. /**
  3857. * i40iw_cm_teardown_connections - teardown QPs
  3858. * @iwdev: device pointer
  3859. * @ipaddr: Pointer to IPv4 or IPv6 address
  3860. * @ipv4: flag indicating IPv4 when true
  3861. * @disconnect_all: flag indicating disconnect all QPs
  3862. * teardown QPs where source or destination addr matches ip addr
  3863. */
  3864. void i40iw_cm_teardown_connections(struct i40iw_device *iwdev, u32 *ipaddr,
  3865. struct i40iw_cm_info *nfo,
  3866. bool disconnect_all)
  3867. {
  3868. struct i40iw_cm_core *cm_core = &iwdev->cm_core;
  3869. struct list_head *list_core_temp;
  3870. struct list_head *list_node;
  3871. struct i40iw_cm_node *cm_node;
  3872. unsigned long flags;
  3873. struct list_head teardown_list;
  3874. struct ib_qp_attr attr;
  3875. INIT_LIST_HEAD(&teardown_list);
  3876. spin_lock_irqsave(&cm_core->ht_lock, flags);
  3877. list_for_each_safe(list_node, list_core_temp,
  3878. &cm_core->accelerated_list) {
  3879. cm_node = container_of(list_node, struct i40iw_cm_node, list);
  3880. if (disconnect_all ||
  3881. (nfo->vlan_id == cm_node->vlan_id &&
  3882. (!memcmp(cm_node->loc_addr, ipaddr, nfo->ipv4 ? 4 : 16) ||
  3883. !memcmp(cm_node->rem_addr, ipaddr, nfo->ipv4 ? 4 : 16)))) {
  3884. atomic_inc(&cm_node->ref_count);
  3885. list_add(&cm_node->teardown_entry, &teardown_list);
  3886. }
  3887. }
  3888. list_for_each_safe(list_node, list_core_temp,
  3889. &cm_core->non_accelerated_list) {
  3890. cm_node = container_of(list_node, struct i40iw_cm_node, list);
  3891. if (disconnect_all ||
  3892. (nfo->vlan_id == cm_node->vlan_id &&
  3893. (!memcmp(cm_node->loc_addr, ipaddr, nfo->ipv4 ? 4 : 16) ||
  3894. !memcmp(cm_node->rem_addr, ipaddr, nfo->ipv4 ? 4 : 16)))) {
  3895. atomic_inc(&cm_node->ref_count);
  3896. list_add(&cm_node->teardown_entry, &teardown_list);
  3897. }
  3898. }
  3899. spin_unlock_irqrestore(&cm_core->ht_lock, flags);
  3900. list_for_each_safe(list_node, list_core_temp, &teardown_list) {
  3901. cm_node = container_of(list_node, struct i40iw_cm_node,
  3902. teardown_entry);
  3903. attr.qp_state = IB_QPS_ERR;
  3904. i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
  3905. if (iwdev->reset)
  3906. i40iw_cm_disconn(cm_node->iwqp);
  3907. i40iw_rem_ref_cm_node(cm_node);
  3908. }
  3909. }
  3910. /**
  3911. * i40iw_ifdown_notify - process an ifdown on an interface
  3912. * @iwdev: device pointer
  3913. * @ipaddr: Pointer to IPv4 or IPv6 address
  3914. * @ipv4: flag indicating IPv4 when true
  3915. * @ifup: flag indicating interface up when true
  3916. */
  3917. void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev,
  3918. u32 *ipaddr, bool ipv4, bool ifup)
  3919. {
  3920. struct i40iw_cm_core *cm_core = &iwdev->cm_core;
  3921. unsigned long flags;
  3922. struct i40iw_cm_listener *listen_node;
  3923. static const u32 ip_zero[4] = { 0, 0, 0, 0 };
  3924. struct i40iw_cm_info nfo;
  3925. u16 vlan_id = rdma_vlan_dev_vlan_id(netdev);
  3926. enum i40iw_status_code ret;
  3927. enum i40iw_quad_hash_manage_type op =
  3928. ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE;
  3929. nfo.vlan_id = vlan_id;
  3930. nfo.ipv4 = ipv4;
  3931. /* Disable or enable qhash for listeners */
  3932. spin_lock_irqsave(&cm_core->listen_list_lock, flags);
  3933. list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
  3934. if (vlan_id == listen_node->vlan_id &&
  3935. (!memcmp(listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16) ||
  3936. !memcmp(listen_node->loc_addr, ip_zero, ipv4 ? 4 : 16))) {
  3937. memcpy(nfo.loc_addr, listen_node->loc_addr,
  3938. sizeof(nfo.loc_addr));
  3939. nfo.loc_port = listen_node->loc_port;
  3940. nfo.user_pri = listen_node->user_pri;
  3941. if (!list_empty(&listen_node->child_listen_list)) {
  3942. i40iw_qhash_ctrl(iwdev,
  3943. listen_node,
  3944. &nfo,
  3945. ipaddr, ipv4, ifup);
  3946. } else if (memcmp(listen_node->loc_addr, ip_zero,
  3947. ipv4 ? 4 : 16)) {
  3948. ret = i40iw_manage_qhash(iwdev,
  3949. &nfo,
  3950. I40IW_QHASH_TYPE_TCP_SYN,
  3951. op,
  3952. NULL,
  3953. false);
  3954. if (!ret)
  3955. listen_node->qhash_set = ifup;
  3956. }
  3957. }
  3958. }
  3959. spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
  3960. /* teardown connected qp's on ifdown */
  3961. if (!ifup)
  3962. i40iw_cm_teardown_connections(iwdev, ipaddr, &nfo, false);
  3963. }