af_smc.c 93 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Shared Memory Communications over RDMA (SMC-R) and RoCE
  4. *
  5. * AF_SMC protocol family socket handler keeping the AF_INET sock address type
  6. * applies to SOCK_STREAM sockets only
  7. * offers an alternative communication option for TCP-protocol sockets
  8. * applicable with RoCE-cards only
  9. *
  10. * Initial restrictions:
  11. * - support for alternate links postponed
  12. *
  13. * Copyright IBM Corp. 2016, 2018
  14. *
  15. * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
  16. * based on prototype from Frank Blaschka
  17. */
  18. #define KMSG_COMPONENT "smc"
  19. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  20. #include <linux/module.h>
  21. #include <linux/socket.h>
  22. #include <linux/workqueue.h>
  23. #include <linux/in.h>
  24. #include <linux/sched/signal.h>
  25. #include <linux/if_vlan.h>
  26. #include <linux/rcupdate_wait.h>
  27. #include <linux/ctype.h>
  28. #include <linux/splice.h>
  29. #include <net/sock.h>
  30. #include <net/tcp.h>
  31. #include <net/smc.h>
  32. #include <asm/ioctls.h>
  33. #include <net/net_namespace.h>
  34. #include <net/netns/generic.h>
  35. #include "smc_netns.h"
  36. #include "smc.h"
  37. #include "smc_clc.h"
  38. #include "smc_llc.h"
  39. #include "smc_cdc.h"
  40. #include "smc_core.h"
  41. #include "smc_ib.h"
  42. #include "smc_ism.h"
  43. #include "smc_pnet.h"
  44. #include "smc_netlink.h"
  45. #include "smc_tx.h"
  46. #include "smc_rx.h"
  47. #include "smc_close.h"
  48. #include "smc_stats.h"
  49. #include "smc_tracepoint.h"
  50. #include "smc_sysctl.h"
  51. #include "smc_loopback.h"
  52. #include "smc_inet.h"
  53. static DEFINE_MUTEX(smc_server_lgr_pending); /* serialize link group
  54. * creation on server
  55. */
  56. static DEFINE_MUTEX(smc_client_lgr_pending); /* serialize link group
  57. * creation on client
  58. */
  59. static struct workqueue_struct *smc_tcp_ls_wq; /* wq for tcp listen work */
  60. struct workqueue_struct *smc_hs_wq; /* wq for handshake work */
  61. struct workqueue_struct *smc_close_wq; /* wq for close work */
  62. static void smc_tcp_listen_work(struct work_struct *);
  63. static void smc_connect_work(struct work_struct *);
  64. int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb)
  65. {
  66. struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
  67. void *hdr;
  68. if (cb_ctx->pos[0])
  69. goto out;
  70. hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
  71. &smc_gen_nl_family, NLM_F_MULTI,
  72. SMC_NETLINK_DUMP_HS_LIMITATION);
  73. if (!hdr)
  74. return -ENOMEM;
  75. if (nla_put_u8(skb, SMC_NLA_HS_LIMITATION_ENABLED,
  76. sock_net(skb->sk)->smc.limit_smc_hs))
  77. goto err;
  78. genlmsg_end(skb, hdr);
  79. cb_ctx->pos[0] = 1;
  80. out:
  81. return skb->len;
  82. err:
  83. genlmsg_cancel(skb, hdr);
  84. return -EMSGSIZE;
  85. }
  86. int smc_nl_enable_hs_limitation(struct sk_buff *skb, struct genl_info *info)
  87. {
  88. sock_net(skb->sk)->smc.limit_smc_hs = true;
  89. return 0;
  90. }
  91. int smc_nl_disable_hs_limitation(struct sk_buff *skb, struct genl_info *info)
  92. {
  93. sock_net(skb->sk)->smc.limit_smc_hs = false;
  94. return 0;
  95. }
  96. static void smc_set_keepalive(struct sock *sk, int val)
  97. {
  98. struct smc_sock *smc = smc_sk(sk);
  99. smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val);
  100. }
  101. static struct sock *smc_tcp_syn_recv_sock(const struct sock *sk,
  102. struct sk_buff *skb,
  103. struct request_sock *req,
  104. struct dst_entry *dst,
  105. struct request_sock *req_unhash,
  106. bool *own_req)
  107. {
  108. struct smc_sock *smc;
  109. struct sock *child;
  110. smc = smc_clcsock_user_data(sk);
  111. if (READ_ONCE(sk->sk_ack_backlog) + atomic_read(&smc->queued_smc_hs) >
  112. sk->sk_max_ack_backlog)
  113. goto drop;
  114. if (sk_acceptq_is_full(&smc->sk)) {
  115. NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
  116. goto drop;
  117. }
  118. /* passthrough to original syn recv sock fct */
  119. child = smc->ori_af_ops->syn_recv_sock(sk, skb, req, dst, req_unhash,
  120. own_req);
  121. /* child must not inherit smc or its ops */
  122. if (child) {
  123. rcu_assign_sk_user_data(child, NULL);
  124. /* v4-mapped sockets don't inherit parent ops. Don't restore. */
  125. if (inet_csk(child)->icsk_af_ops == inet_csk(sk)->icsk_af_ops)
  126. inet_csk(child)->icsk_af_ops = smc->ori_af_ops;
  127. }
  128. return child;
  129. drop:
  130. dst_release(dst);
  131. tcp_listendrop(sk);
  132. return NULL;
  133. }
  134. static bool smc_hs_congested(const struct sock *sk)
  135. {
  136. const struct smc_sock *smc;
  137. smc = smc_clcsock_user_data(sk);
  138. if (!smc)
  139. return true;
  140. if (workqueue_congested(WORK_CPU_UNBOUND, smc_hs_wq))
  141. return true;
  142. return false;
  143. }
  144. struct smc_hashinfo smc_v4_hashinfo = {
  145. .lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
  146. };
  147. struct smc_hashinfo smc_v6_hashinfo = {
  148. .lock = __RW_LOCK_UNLOCKED(smc_v6_hashinfo.lock),
  149. };
  150. int smc_hash_sk(struct sock *sk)
  151. {
  152. struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
  153. struct hlist_head *head;
  154. head = &h->ht;
  155. write_lock_bh(&h->lock);
  156. sk_add_node(sk, head);
  157. write_unlock_bh(&h->lock);
  158. sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
  159. return 0;
  160. }
  161. void smc_unhash_sk(struct sock *sk)
  162. {
  163. struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
  164. write_lock_bh(&h->lock);
  165. if (sk_del_node_init(sk))
  166. sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
  167. write_unlock_bh(&h->lock);
  168. }
  169. /* This will be called before user really release sock_lock. So do the
  170. * work which we didn't do because of user hold the sock_lock in the
  171. * BH context
  172. */
  173. void smc_release_cb(struct sock *sk)
  174. {
  175. struct smc_sock *smc = smc_sk(sk);
  176. if (smc->conn.tx_in_release_sock) {
  177. smc_tx_pending(&smc->conn);
  178. smc->conn.tx_in_release_sock = false;
  179. }
  180. }
  181. struct proto smc_proto = {
  182. .name = "SMC",
  183. .owner = THIS_MODULE,
  184. .keepalive = smc_set_keepalive,
  185. .hash = smc_hash_sk,
  186. .unhash = smc_unhash_sk,
  187. .release_cb = smc_release_cb,
  188. .obj_size = sizeof(struct smc_sock),
  189. .h.smc_hash = &smc_v4_hashinfo,
  190. .slab_flags = SLAB_TYPESAFE_BY_RCU,
  191. };
  192. EXPORT_SYMBOL_GPL(smc_proto);
  193. struct proto smc_proto6 = {
  194. .name = "SMC6",
  195. .owner = THIS_MODULE,
  196. .keepalive = smc_set_keepalive,
  197. .hash = smc_hash_sk,
  198. .unhash = smc_unhash_sk,
  199. .release_cb = smc_release_cb,
  200. .obj_size = sizeof(struct smc_sock),
  201. .h.smc_hash = &smc_v6_hashinfo,
  202. .slab_flags = SLAB_TYPESAFE_BY_RCU,
  203. };
  204. EXPORT_SYMBOL_GPL(smc_proto6);
  205. static void smc_fback_restore_callbacks(struct smc_sock *smc)
  206. {
  207. struct sock *clcsk = smc->clcsock->sk;
  208. write_lock_bh(&clcsk->sk_callback_lock);
  209. clcsk->sk_user_data = NULL;
  210. smc_clcsock_restore_cb(&clcsk->sk_state_change, &smc->clcsk_state_change);
  211. smc_clcsock_restore_cb(&clcsk->sk_data_ready, &smc->clcsk_data_ready);
  212. smc_clcsock_restore_cb(&clcsk->sk_write_space, &smc->clcsk_write_space);
  213. smc_clcsock_restore_cb(&clcsk->sk_error_report, &smc->clcsk_error_report);
  214. write_unlock_bh(&clcsk->sk_callback_lock);
  215. }
  216. static void smc_restore_fallback_changes(struct smc_sock *smc)
  217. {
  218. if (smc->clcsock->file) { /* non-accepted sockets have no file yet */
  219. smc->clcsock->file->private_data = smc->sk.sk_socket;
  220. smc->clcsock->file = NULL;
  221. smc_fback_restore_callbacks(smc);
  222. }
  223. }
  224. static int __smc_release(struct smc_sock *smc)
  225. {
  226. struct sock *sk = &smc->sk;
  227. int rc = 0;
  228. if (!smc->use_fallback) {
  229. rc = smc_close_active(smc);
  230. smc_sock_set_flag(sk, SOCK_DEAD);
  231. sk->sk_shutdown |= SHUTDOWN_MASK;
  232. } else {
  233. if (sk->sk_state != SMC_CLOSED) {
  234. if (sk->sk_state != SMC_LISTEN &&
  235. sk->sk_state != SMC_INIT)
  236. sock_put(sk); /* passive closing */
  237. if (sk->sk_state == SMC_LISTEN) {
  238. /* wake up clcsock accept */
  239. rc = kernel_sock_shutdown(smc->clcsock,
  240. SHUT_RDWR);
  241. }
  242. sk->sk_state = SMC_CLOSED;
  243. sk->sk_state_change(sk);
  244. }
  245. smc_restore_fallback_changes(smc);
  246. }
  247. sk->sk_prot->unhash(sk);
  248. if (sk->sk_state == SMC_CLOSED) {
  249. if (smc->clcsock) {
  250. release_sock(sk);
  251. smc_clcsock_release(smc);
  252. lock_sock(sk);
  253. }
  254. if (!smc->use_fallback)
  255. smc_conn_free(&smc->conn);
  256. }
  257. return rc;
  258. }
  259. int smc_release(struct socket *sock)
  260. {
  261. struct sock *sk = sock->sk;
  262. struct smc_sock *smc;
  263. int old_state, rc = 0;
  264. if (!sk)
  265. goto out;
  266. sock_hold(sk); /* sock_put below */
  267. smc = smc_sk(sk);
  268. old_state = sk->sk_state;
  269. /* cleanup for a dangling non-blocking connect */
  270. if (smc->connect_nonblock && old_state == SMC_INIT)
  271. tcp_abort(smc->clcsock->sk, ECONNABORTED);
  272. if (cancel_work_sync(&smc->connect_work))
  273. sock_put(&smc->sk); /* sock_hold in smc_connect for passive closing */
  274. if (sk->sk_state == SMC_LISTEN)
  275. /* smc_close_non_accepted() is called and acquires
  276. * sock lock for child sockets again
  277. */
  278. lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
  279. else
  280. lock_sock(sk);
  281. if (old_state == SMC_INIT && sk->sk_state == SMC_ACTIVE &&
  282. !smc->use_fallback)
  283. smc_close_active_abort(smc);
  284. rc = __smc_release(smc);
  285. /* detach socket */
  286. sock_orphan(sk);
  287. sock->sk = NULL;
  288. release_sock(sk);
  289. sock_put(sk); /* sock_hold above */
  290. sock_put(sk); /* final sock_put */
  291. out:
  292. return rc;
  293. }
  294. static void smc_destruct(struct sock *sk)
  295. {
  296. if (sk->sk_state != SMC_CLOSED)
  297. return;
  298. if (!sock_flag(sk, SOCK_DEAD))
  299. return;
  300. }
  301. void smc_sk_init(struct net *net, struct sock *sk, int protocol)
  302. {
  303. struct smc_sock *smc = smc_sk(sk);
  304. sk->sk_state = SMC_INIT;
  305. sk->sk_destruct = smc_destruct;
  306. sk->sk_protocol = protocol;
  307. WRITE_ONCE(sk->sk_sndbuf, 2 * READ_ONCE(net->smc.sysctl_wmem));
  308. WRITE_ONCE(sk->sk_rcvbuf, 2 * READ_ONCE(net->smc.sysctl_rmem));
  309. INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
  310. INIT_WORK(&smc->connect_work, smc_connect_work);
  311. INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
  312. INIT_LIST_HEAD(&smc->accept_q);
  313. spin_lock_init(&smc->accept_q_lock);
  314. spin_lock_init(&smc->conn.send_lock);
  315. sk->sk_prot->hash(sk);
  316. mutex_init(&smc->clcsock_release_lock);
  317. smc_init_saved_callbacks(smc);
  318. smc->limit_smc_hs = net->smc.limit_smc_hs;
  319. smc->use_fallback = false; /* assume rdma capability first */
  320. smc->fallback_rsn = 0;
  321. smc_close_init(smc);
  322. }
  323. static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
  324. int protocol)
  325. {
  326. struct proto *prot;
  327. struct sock *sk;
  328. prot = (protocol == SMCPROTO_SMC6) ? &smc_proto6 : &smc_proto;
  329. sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0);
  330. if (!sk)
  331. return NULL;
  332. sock_init_data(sock, sk); /* sets sk_refcnt to 1 */
  333. smc_sk_init(net, sk, protocol);
  334. return sk;
  335. }
  336. int smc_bind(struct socket *sock, struct sockaddr *uaddr,
  337. int addr_len)
  338. {
  339. struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
  340. struct sock *sk = sock->sk;
  341. struct smc_sock *smc;
  342. int rc;
  343. smc = smc_sk(sk);
  344. /* replicate tests from inet_bind(), to be safe wrt. future changes */
  345. rc = -EINVAL;
  346. if (addr_len < sizeof(struct sockaddr_in))
  347. goto out;
  348. rc = -EAFNOSUPPORT;
  349. if (addr->sin_family != AF_INET &&
  350. addr->sin_family != AF_INET6 &&
  351. addr->sin_family != AF_UNSPEC)
  352. goto out;
  353. /* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */
  354. if (addr->sin_family == AF_UNSPEC &&
  355. addr->sin_addr.s_addr != htonl(INADDR_ANY))
  356. goto out;
  357. lock_sock(sk);
  358. /* Check if socket is already active */
  359. rc = -EINVAL;
  360. if (sk->sk_state != SMC_INIT || smc->connect_nonblock)
  361. goto out_rel;
  362. smc->clcsock->sk->sk_reuse = sk->sk_reuse;
  363. smc->clcsock->sk->sk_reuseport = sk->sk_reuseport;
  364. rc = kernel_bind(smc->clcsock, uaddr, addr_len);
  365. out_rel:
  366. release_sock(sk);
  367. out:
  368. return rc;
  369. }
  370. /* copy only relevant settings and flags of SOL_SOCKET level from smc to
  371. * clc socket (since smc is not called for these options from net/core)
  372. */
  373. #define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
  374. (1UL << SOCK_KEEPOPEN) | \
  375. (1UL << SOCK_LINGER) | \
  376. (1UL << SOCK_BROADCAST) | \
  377. (1UL << SOCK_TIMESTAMP) | \
  378. (1UL << SOCK_DBG) | \
  379. (1UL << SOCK_RCVTSTAMP) | \
  380. (1UL << SOCK_RCVTSTAMPNS) | \
  381. (1UL << SOCK_LOCALROUTE) | \
  382. (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
  383. (1UL << SOCK_RXQ_OVFL) | \
  384. (1UL << SOCK_WIFI_STATUS) | \
  385. (1UL << SOCK_NOFCS) | \
  386. (1UL << SOCK_FILTER_LOCKED) | \
  387. (1UL << SOCK_TSTAMP_NEW))
  388. /* if set, use value set by setsockopt() - else use IPv4 or SMC sysctl value */
  389. static void smc_adjust_sock_bufsizes(struct sock *nsk, struct sock *osk,
  390. unsigned long mask)
  391. {
  392. nsk->sk_userlocks = osk->sk_userlocks;
  393. if (osk->sk_userlocks & SOCK_SNDBUF_LOCK)
  394. nsk->sk_sndbuf = osk->sk_sndbuf;
  395. if (osk->sk_userlocks & SOCK_RCVBUF_LOCK)
  396. nsk->sk_rcvbuf = osk->sk_rcvbuf;
  397. }
  398. static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
  399. unsigned long mask)
  400. {
  401. /* options we don't get control via setsockopt for */
  402. nsk->sk_type = osk->sk_type;
  403. nsk->sk_sndtimeo = osk->sk_sndtimeo;
  404. nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
  405. nsk->sk_mark = READ_ONCE(osk->sk_mark);
  406. nsk->sk_priority = READ_ONCE(osk->sk_priority);
  407. nsk->sk_rcvlowat = osk->sk_rcvlowat;
  408. nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
  409. nsk->sk_err = osk->sk_err;
  410. nsk->sk_flags &= ~mask;
  411. nsk->sk_flags |= osk->sk_flags & mask;
  412. smc_adjust_sock_bufsizes(nsk, osk, mask);
  413. }
  414. static void smc_copy_sock_settings_to_clc(struct smc_sock *smc)
  415. {
  416. smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC);
  417. }
  418. #define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \
  419. (1UL << SOCK_KEEPOPEN) | \
  420. (1UL << SOCK_LINGER) | \
  421. (1UL << SOCK_DBG))
  422. /* copy only settings and flags relevant for smc from clc to smc socket */
  423. static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
  424. {
  425. smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
  426. }
  427. /* register the new vzalloced sndbuf on all links */
  428. static int smcr_lgr_reg_sndbufs(struct smc_link *link,
  429. struct smc_buf_desc *snd_desc)
  430. {
  431. struct smc_link_group *lgr = link->lgr;
  432. int i, rc = 0;
  433. if (!snd_desc->is_vm)
  434. return -EINVAL;
  435. /* protect against parallel smcr_link_reg_buf() */
  436. down_write(&lgr->llc_conf_mutex);
  437. for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
  438. if (!smc_link_active(&lgr->lnk[i]))
  439. continue;
  440. rc = smcr_link_reg_buf(&lgr->lnk[i], snd_desc);
  441. if (rc)
  442. break;
  443. }
  444. up_write(&lgr->llc_conf_mutex);
  445. return rc;
  446. }
  447. /* register the new rmb on all links */
  448. static int smcr_lgr_reg_rmbs(struct smc_link *link,
  449. struct smc_buf_desc *rmb_desc)
  450. {
  451. struct smc_link_group *lgr = link->lgr;
  452. bool do_slow = false;
  453. int i, rc = 0;
  454. rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
  455. if (rc)
  456. return rc;
  457. down_read(&lgr->llc_conf_mutex);
  458. for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
  459. if (!smc_link_active(&lgr->lnk[i]))
  460. continue;
  461. if (!rmb_desc->is_reg_mr[link->link_idx]) {
  462. up_read(&lgr->llc_conf_mutex);
  463. goto slow_path;
  464. }
  465. }
  466. /* mr register already */
  467. goto fast_path;
  468. slow_path:
  469. do_slow = true;
  470. /* protect against parallel smc_llc_cli_rkey_exchange() and
  471. * parallel smcr_link_reg_buf()
  472. */
  473. down_write(&lgr->llc_conf_mutex);
  474. for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
  475. if (!smc_link_active(&lgr->lnk[i]))
  476. continue;
  477. rc = smcr_link_reg_buf(&lgr->lnk[i], rmb_desc);
  478. if (rc)
  479. goto out;
  480. }
  481. fast_path:
  482. /* exchange confirm_rkey msg with peer */
  483. rc = smc_llc_do_confirm_rkey(link, rmb_desc);
  484. if (rc) {
  485. rc = -EFAULT;
  486. goto out;
  487. }
  488. rmb_desc->is_conf_rkey = true;
  489. out:
  490. do_slow ? up_write(&lgr->llc_conf_mutex) : up_read(&lgr->llc_conf_mutex);
  491. smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
  492. return rc;
  493. }
  494. static int smcr_clnt_conf_first_link(struct smc_sock *smc)
  495. {
  496. struct smc_link *link = smc->conn.lnk;
  497. struct smc_llc_qentry *qentry;
  498. int rc;
  499. /* Receive CONFIRM LINK request from server over RoCE fabric.
  500. * Increasing the client's timeout by twice as much as the server's
  501. * timeout by default can temporarily avoid decline messages of
  502. * both sides crossing or colliding
  503. */
  504. qentry = smc_llc_wait(link->lgr, NULL, 2 * SMC_LLC_WAIT_TIME,
  505. SMC_LLC_CONFIRM_LINK);
  506. if (!qentry) {
  507. struct smc_clc_msg_decline dclc;
  508. rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
  509. SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
  510. return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
  511. }
  512. smc_llc_save_peer_uid(qentry);
  513. rc = smc_llc_eval_conf_link(qentry, SMC_LLC_REQ);
  514. smc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl);
  515. if (rc)
  516. return SMC_CLC_DECL_RMBE_EC;
  517. rc = smc_ib_modify_qp_rts(link);
  518. if (rc)
  519. return SMC_CLC_DECL_ERR_RDYLNK;
  520. smc_wr_remember_qp_attr(link);
  521. /* reg the sndbuf if it was vzalloced */
  522. if (smc->conn.sndbuf_desc->is_vm) {
  523. if (smcr_link_reg_buf(link, smc->conn.sndbuf_desc))
  524. return SMC_CLC_DECL_ERR_REGBUF;
  525. }
  526. /* reg the rmb */
  527. if (smcr_link_reg_buf(link, smc->conn.rmb_desc))
  528. return SMC_CLC_DECL_ERR_REGBUF;
  529. /* confirm_rkey is implicit on 1st contact */
  530. smc->conn.rmb_desc->is_conf_rkey = true;
  531. /* send CONFIRM LINK response over RoCE fabric */
  532. rc = smc_llc_send_confirm_link(link, SMC_LLC_RESP);
  533. if (rc < 0)
  534. return SMC_CLC_DECL_TIMEOUT_CL;
  535. smc_llc_link_active(link);
  536. smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
  537. if (link->lgr->max_links > 1) {
  538. /* optional 2nd link, receive ADD LINK request from server */
  539. qentry = smc_llc_wait(link->lgr, NULL, SMC_LLC_WAIT_TIME,
  540. SMC_LLC_ADD_LINK);
  541. if (!qentry) {
  542. struct smc_clc_msg_decline dclc;
  543. rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
  544. SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
  545. if (rc == -EAGAIN)
  546. rc = 0; /* no DECLINE received, go with one link */
  547. return rc;
  548. }
  549. smc_llc_flow_qentry_clr(&link->lgr->llc_flow_lcl);
  550. smc_llc_cli_add_link(link, qentry);
  551. }
  552. return 0;
  553. }
  554. static bool smc_isascii(char *hostname)
  555. {
  556. int i;
  557. for (i = 0; i < SMC_MAX_HOSTNAME_LEN; i++)
  558. if (!isascii(hostname[i]))
  559. return false;
  560. return true;
  561. }
  562. static void smc_conn_save_peer_info_fce(struct smc_sock *smc,
  563. struct smc_clc_msg_accept_confirm *clc)
  564. {
  565. struct smc_clc_first_contact_ext *fce;
  566. int clc_v2_len;
  567. if (clc->hdr.version == SMC_V1 ||
  568. !(clc->hdr.typev2 & SMC_FIRST_CONTACT_MASK))
  569. return;
  570. if (smc->conn.lgr->is_smcd) {
  571. memcpy(smc->conn.lgr->negotiated_eid, clc->d1.eid,
  572. SMC_MAX_EID_LEN);
  573. clc_v2_len = offsetofend(struct smc_clc_msg_accept_confirm, d1);
  574. } else {
  575. memcpy(smc->conn.lgr->negotiated_eid, clc->r1.eid,
  576. SMC_MAX_EID_LEN);
  577. clc_v2_len = offsetofend(struct smc_clc_msg_accept_confirm, r1);
  578. }
  579. fce = (struct smc_clc_first_contact_ext *)(((u8 *)clc) + clc_v2_len);
  580. smc->conn.lgr->peer_os = fce->os_type;
  581. smc->conn.lgr->peer_smc_release = fce->release;
  582. if (smc_isascii(fce->hostname))
  583. memcpy(smc->conn.lgr->peer_hostname, fce->hostname,
  584. SMC_MAX_HOSTNAME_LEN);
  585. }
  586. static void smcr_conn_save_peer_info(struct smc_sock *smc,
  587. struct smc_clc_msg_accept_confirm *clc)
  588. {
  589. int bufsize = smc_uncompress_bufsize(clc->r0.rmbe_size);
  590. smc->conn.peer_rmbe_idx = clc->r0.rmbe_idx;
  591. smc->conn.local_tx_ctrl.token = ntohl(clc->r0.rmbe_alert_token);
  592. smc->conn.peer_rmbe_size = bufsize;
  593. atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
  594. smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1);
  595. }
  596. static void smcd_conn_save_peer_info(struct smc_sock *smc,
  597. struct smc_clc_msg_accept_confirm *clc)
  598. {
  599. int bufsize = smc_uncompress_bufsize(clc->d0.dmbe_size);
  600. smc->conn.peer_rmbe_idx = clc->d0.dmbe_idx;
  601. smc->conn.peer_token = ntohll(clc->d0.token);
  602. /* msg header takes up space in the buffer */
  603. smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg);
  604. atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
  605. smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx;
  606. }
  607. static void smc_conn_save_peer_info(struct smc_sock *smc,
  608. struct smc_clc_msg_accept_confirm *clc)
  609. {
  610. if (smc->conn.lgr->is_smcd)
  611. smcd_conn_save_peer_info(smc, clc);
  612. else
  613. smcr_conn_save_peer_info(smc, clc);
  614. smc_conn_save_peer_info_fce(smc, clc);
  615. }
  616. static void smc_link_save_peer_info(struct smc_link *link,
  617. struct smc_clc_msg_accept_confirm *clc,
  618. struct smc_init_info *ini)
  619. {
  620. link->peer_qpn = ntoh24(clc->r0.qpn);
  621. memcpy(link->peer_gid, ini->peer_gid, SMC_GID_SIZE);
  622. memcpy(link->peer_mac, ini->peer_mac, sizeof(link->peer_mac));
  623. link->peer_psn = ntoh24(clc->r0.psn);
  624. link->peer_mtu = clc->r0.qp_mtu;
  625. }
  626. static void smc_stat_inc_fback_rsn_cnt(struct smc_sock *smc,
  627. struct smc_stats_fback *fback_arr)
  628. {
  629. int cnt;
  630. for (cnt = 0; cnt < SMC_MAX_FBACK_RSN_CNT; cnt++) {
  631. if (fback_arr[cnt].fback_code == smc->fallback_rsn) {
  632. fback_arr[cnt].count++;
  633. break;
  634. }
  635. if (!fback_arr[cnt].fback_code) {
  636. fback_arr[cnt].fback_code = smc->fallback_rsn;
  637. fback_arr[cnt].count++;
  638. break;
  639. }
  640. }
  641. }
  642. static void smc_stat_fallback(struct smc_sock *smc)
  643. {
  644. struct net *net = sock_net(&smc->sk);
  645. mutex_lock(&net->smc.mutex_fback_rsn);
  646. if (smc->listen_smc) {
  647. smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->srv);
  648. net->smc.fback_rsn->srv_fback_cnt++;
  649. } else {
  650. smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->clnt);
  651. net->smc.fback_rsn->clnt_fback_cnt++;
  652. }
  653. mutex_unlock(&net->smc.mutex_fback_rsn);
  654. }
  655. /* must be called under rcu read lock */
  656. static void smc_fback_wakeup_waitqueue(struct smc_sock *smc, void *key)
  657. {
  658. struct socket_wq *wq;
  659. __poll_t flags;
  660. wq = rcu_dereference(smc->sk.sk_wq);
  661. if (!skwq_has_sleeper(wq))
  662. return;
  663. /* wake up smc sk->sk_wq */
  664. if (!key) {
  665. /* sk_state_change */
  666. wake_up_interruptible_all(&wq->wait);
  667. } else {
  668. flags = key_to_poll(key);
  669. if (flags & (EPOLLIN | EPOLLOUT))
  670. /* sk_data_ready or sk_write_space */
  671. wake_up_interruptible_sync_poll(&wq->wait, flags);
  672. else if (flags & EPOLLERR)
  673. /* sk_error_report */
  674. wake_up_interruptible_poll(&wq->wait, flags);
  675. }
  676. }
  677. static int smc_fback_mark_woken(wait_queue_entry_t *wait,
  678. unsigned int mode, int sync, void *key)
  679. {
  680. struct smc_mark_woken *mark =
  681. container_of(wait, struct smc_mark_woken, wait_entry);
  682. mark->woken = true;
  683. mark->key = key;
  684. return 0;
  685. }
  686. static void smc_fback_forward_wakeup(struct smc_sock *smc, struct sock *clcsk,
  687. void (*clcsock_callback)(struct sock *sk))
  688. {
  689. struct smc_mark_woken mark = { .woken = false };
  690. struct socket_wq *wq;
  691. init_waitqueue_func_entry(&mark.wait_entry,
  692. smc_fback_mark_woken);
  693. rcu_read_lock();
  694. wq = rcu_dereference(clcsk->sk_wq);
  695. if (!wq)
  696. goto out;
  697. add_wait_queue(sk_sleep(clcsk), &mark.wait_entry);
  698. clcsock_callback(clcsk);
  699. remove_wait_queue(sk_sleep(clcsk), &mark.wait_entry);
  700. if (mark.woken)
  701. smc_fback_wakeup_waitqueue(smc, mark.key);
  702. out:
  703. rcu_read_unlock();
  704. }
  705. static void smc_fback_state_change(struct sock *clcsk)
  706. {
  707. struct smc_sock *smc;
  708. read_lock_bh(&clcsk->sk_callback_lock);
  709. smc = smc_clcsock_user_data(clcsk);
  710. if (smc)
  711. smc_fback_forward_wakeup(smc, clcsk,
  712. smc->clcsk_state_change);
  713. read_unlock_bh(&clcsk->sk_callback_lock);
  714. }
  715. static void smc_fback_data_ready(struct sock *clcsk)
  716. {
  717. struct smc_sock *smc;
  718. read_lock_bh(&clcsk->sk_callback_lock);
  719. smc = smc_clcsock_user_data(clcsk);
  720. if (smc)
  721. smc_fback_forward_wakeup(smc, clcsk,
  722. smc->clcsk_data_ready);
  723. read_unlock_bh(&clcsk->sk_callback_lock);
  724. }
  725. static void smc_fback_write_space(struct sock *clcsk)
  726. {
  727. struct smc_sock *smc;
  728. read_lock_bh(&clcsk->sk_callback_lock);
  729. smc = smc_clcsock_user_data(clcsk);
  730. if (smc)
  731. smc_fback_forward_wakeup(smc, clcsk,
  732. smc->clcsk_write_space);
  733. read_unlock_bh(&clcsk->sk_callback_lock);
  734. }
  735. static void smc_fback_error_report(struct sock *clcsk)
  736. {
  737. struct smc_sock *smc;
  738. read_lock_bh(&clcsk->sk_callback_lock);
  739. smc = smc_clcsock_user_data(clcsk);
  740. if (smc)
  741. smc_fback_forward_wakeup(smc, clcsk,
  742. smc->clcsk_error_report);
  743. read_unlock_bh(&clcsk->sk_callback_lock);
  744. }
  745. static void smc_fback_replace_callbacks(struct smc_sock *smc)
  746. {
  747. struct sock *clcsk = smc->clcsock->sk;
  748. write_lock_bh(&clcsk->sk_callback_lock);
  749. clcsk->sk_user_data = (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
  750. smc_clcsock_replace_cb(&clcsk->sk_state_change, smc_fback_state_change,
  751. &smc->clcsk_state_change);
  752. smc_clcsock_replace_cb(&clcsk->sk_data_ready, smc_fback_data_ready,
  753. &smc->clcsk_data_ready);
  754. smc_clcsock_replace_cb(&clcsk->sk_write_space, smc_fback_write_space,
  755. &smc->clcsk_write_space);
  756. smc_clcsock_replace_cb(&clcsk->sk_error_report, smc_fback_error_report,
  757. &smc->clcsk_error_report);
  758. write_unlock_bh(&clcsk->sk_callback_lock);
  759. }
  760. static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
  761. {
  762. int rc = 0;
  763. mutex_lock(&smc->clcsock_release_lock);
  764. if (!smc->clcsock) {
  765. rc = -EBADF;
  766. goto out;
  767. }
  768. smc->use_fallback = true;
  769. smc->fallback_rsn = reason_code;
  770. smc_stat_fallback(smc);
  771. trace_smc_switch_to_fallback(smc, reason_code);
  772. if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
  773. smc->clcsock->file = smc->sk.sk_socket->file;
  774. smc->clcsock->file->private_data = smc->clcsock;
  775. smc->clcsock->wq.fasync_list =
  776. smc->sk.sk_socket->wq.fasync_list;
  777. smc->sk.sk_socket->wq.fasync_list = NULL;
  778. /* There might be some wait entries remaining
  779. * in smc sk->sk_wq and they should be woken up
  780. * as clcsock's wait queue is woken up.
  781. */
  782. smc_fback_replace_callbacks(smc);
  783. }
  784. out:
  785. mutex_unlock(&smc->clcsock_release_lock);
  786. return rc;
  787. }
  788. /* fall back during connect */
  789. static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
  790. {
  791. struct net *net = sock_net(&smc->sk);
  792. int rc = 0;
  793. rc = smc_switch_to_fallback(smc, reason_code);
  794. if (rc) { /* fallback fails */
  795. this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
  796. if (smc->sk.sk_state == SMC_INIT)
  797. sock_put(&smc->sk); /* passive closing */
  798. return rc;
  799. }
  800. smc_copy_sock_settings_to_clc(smc);
  801. smc->connect_nonblock = 0;
  802. if (smc->sk.sk_state == SMC_INIT)
  803. smc->sk.sk_state = SMC_ACTIVE;
  804. return 0;
  805. }
  806. /* decline and fall back during connect */
  807. static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code,
  808. u8 version)
  809. {
  810. struct net *net = sock_net(&smc->sk);
  811. int rc;
  812. if (reason_code < 0) { /* error, fallback is not possible */
  813. this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
  814. if (smc->sk.sk_state == SMC_INIT)
  815. sock_put(&smc->sk); /* passive closing */
  816. return reason_code;
  817. }
  818. if (reason_code != SMC_CLC_DECL_PEERDECL) {
  819. rc = smc_clc_send_decline(smc, reason_code, version);
  820. if (rc < 0) {
  821. this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
  822. if (smc->sk.sk_state == SMC_INIT)
  823. sock_put(&smc->sk); /* passive closing */
  824. return rc;
  825. }
  826. }
  827. return smc_connect_fallback(smc, reason_code);
  828. }
  829. static void smc_conn_abort(struct smc_sock *smc, int local_first)
  830. {
  831. struct smc_connection *conn = &smc->conn;
  832. struct smc_link_group *lgr = conn->lgr;
  833. bool lgr_valid = false;
  834. if (smc_conn_lgr_valid(conn))
  835. lgr_valid = true;
  836. smc_conn_free(conn);
  837. if (local_first && lgr_valid)
  838. smc_lgr_cleanup_early(lgr);
  839. }
  840. /* check if there is a rdma device available for this connection. */
  841. /* called for connect and listen */
  842. static int smc_find_rdma_device(struct smc_sock *smc, struct smc_init_info *ini)
  843. {
  844. /* PNET table look up: search active ib_device and port
  845. * within same PNETID that also contains the ethernet device
  846. * used for the internal TCP socket
  847. */
  848. smc_pnet_find_roce_resource(smc->clcsock->sk, ini);
  849. if (!ini->check_smcrv2 && !ini->ib_dev)
  850. return SMC_CLC_DECL_NOSMCRDEV;
  851. if (ini->check_smcrv2 && !ini->smcrv2.ib_dev_v2)
  852. return SMC_CLC_DECL_NOSMCRDEV;
  853. return 0;
  854. }
  855. /* check if there is an ISM device available for this connection. */
  856. /* called for connect and listen */
  857. static int smc_find_ism_device(struct smc_sock *smc, struct smc_init_info *ini)
  858. {
  859. /* Find ISM device with same PNETID as connecting interface */
  860. smc_pnet_find_ism_resource(smc->clcsock->sk, ini);
  861. if (!ini->ism_dev[0])
  862. return SMC_CLC_DECL_NOSMCDDEV;
  863. else
  864. ini->ism_chid[0] = smc_ism_get_chid(ini->ism_dev[0]);
  865. return 0;
  866. }
  867. /* is chid unique for the ism devices that are already determined? */
  868. static bool smc_find_ism_v2_is_unique_chid(u16 chid, struct smc_init_info *ini,
  869. int cnt)
  870. {
  871. int i = (!ini->ism_dev[0]) ? 1 : 0;
  872. for (; i < cnt; i++)
  873. if (ini->ism_chid[i] == chid)
  874. return false;
  875. return true;
  876. }
  877. /* determine possible V2 ISM devices (either without PNETID or with PNETID plus
  878. * PNETID matching net_device)
  879. */
  880. static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
  881. struct smc_init_info *ini)
  882. {
  883. int rc = SMC_CLC_DECL_NOSMCDDEV;
  884. struct smcd_dev *smcd;
  885. int i = 1, entry = 1;
  886. bool is_emulated;
  887. u16 chid;
  888. if (smcd_indicated(ini->smc_type_v1))
  889. rc = 0; /* already initialized for V1 */
  890. mutex_lock(&smcd_dev_list.mutex);
  891. list_for_each_entry(smcd, &smcd_dev_list.list, list) {
  892. if (smcd->going_away || smcd == ini->ism_dev[0])
  893. continue;
  894. chid = smc_ism_get_chid(smcd);
  895. if (!smc_find_ism_v2_is_unique_chid(chid, ini, i))
  896. continue;
  897. is_emulated = __smc_ism_is_emulated(chid);
  898. if (!smc_pnet_is_pnetid_set(smcd->pnetid) ||
  899. smc_pnet_is_ndev_pnetid(sock_net(&smc->sk), smcd->pnetid)) {
  900. if (is_emulated && entry == SMCD_CLC_MAX_V2_GID_ENTRIES)
  901. /* It's the last GID-CHID entry left in CLC
  902. * Proposal SMC-Dv2 extension, but an Emulated-
  903. * ISM device will take two entries. So give
  904. * up it and try the next potential ISM device.
  905. */
  906. continue;
  907. ini->ism_dev[i] = smcd;
  908. ini->ism_chid[i] = chid;
  909. ini->is_smcd = true;
  910. rc = 0;
  911. i++;
  912. entry = is_emulated ? entry + 2 : entry + 1;
  913. if (entry > SMCD_CLC_MAX_V2_GID_ENTRIES)
  914. break;
  915. }
  916. }
  917. mutex_unlock(&smcd_dev_list.mutex);
  918. ini->ism_offered_cnt = i - 1;
  919. if (!ini->ism_dev[0] && !ini->ism_dev[1])
  920. ini->smcd_version = 0;
  921. return rc;
  922. }
  923. /* Check for VLAN ID and register it on ISM device just for CLC handshake */
  924. static int smc_connect_ism_vlan_setup(struct smc_sock *smc,
  925. struct smc_init_info *ini)
  926. {
  927. if (ini->vlan_id && smc_ism_get_vlan(ini->ism_dev[0], ini->vlan_id))
  928. return SMC_CLC_DECL_ISMVLANERR;
  929. return 0;
  930. }
  931. static int smc_find_proposal_devices(struct smc_sock *smc,
  932. struct smc_init_info *ini)
  933. {
  934. int rc = 0;
  935. /* check if there is an ism device available */
  936. if (!(ini->smcd_version & SMC_V1) ||
  937. smc_find_ism_device(smc, ini) ||
  938. smc_connect_ism_vlan_setup(smc, ini))
  939. ini->smcd_version &= ~SMC_V1;
  940. /* else ISM V1 is supported for this connection */
  941. /* check if there is an rdma device available */
  942. if (!(ini->smcr_version & SMC_V1) ||
  943. smc_find_rdma_device(smc, ini))
  944. ini->smcr_version &= ~SMC_V1;
  945. /* else RDMA is supported for this connection */
  946. ini->smc_type_v1 = smc_indicated_type(ini->smcd_version & SMC_V1,
  947. ini->smcr_version & SMC_V1);
  948. /* check if there is an ism v2 device available */
  949. if (!(ini->smcd_version & SMC_V2) ||
  950. !smc_ism_is_v2_capable() ||
  951. smc_find_ism_v2_device_clnt(smc, ini))
  952. ini->smcd_version &= ~SMC_V2;
  953. /* check if there is an rdma v2 device available */
  954. ini->check_smcrv2 = true;
  955. ini->smcrv2.saddr = smc->clcsock->sk->sk_rcv_saddr;
  956. if (!(ini->smcr_version & SMC_V2) ||
  957. smc->clcsock->sk->sk_family != AF_INET ||
  958. !smc_clc_ueid_count() ||
  959. smc_find_rdma_device(smc, ini))
  960. ini->smcr_version &= ~SMC_V2;
  961. ini->check_smcrv2 = false;
  962. ini->smc_type_v2 = smc_indicated_type(ini->smcd_version & SMC_V2,
  963. ini->smcr_version & SMC_V2);
  964. /* if neither ISM nor RDMA are supported, fallback */
  965. if (ini->smc_type_v1 == SMC_TYPE_N && ini->smc_type_v2 == SMC_TYPE_N)
  966. rc = SMC_CLC_DECL_NOSMCDEV;
  967. return rc;
  968. }
  969. /* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is
  970. * used, the VLAN ID will be registered again during the connection setup.
  971. */
  972. static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc,
  973. struct smc_init_info *ini)
  974. {
  975. if (!smcd_indicated(ini->smc_type_v1))
  976. return 0;
  977. if (ini->vlan_id && smc_ism_put_vlan(ini->ism_dev[0], ini->vlan_id))
  978. return SMC_CLC_DECL_CNFERR;
  979. return 0;
  980. }
  981. #define SMC_CLC_MAX_ACCEPT_LEN \
  982. (sizeof(struct smc_clc_msg_accept_confirm) + \
  983. sizeof(struct smc_clc_first_contact_ext_v2x) + \
  984. sizeof(struct smc_clc_msg_trail))
  985. /* CLC handshake during connect */
  986. static int smc_connect_clc(struct smc_sock *smc,
  987. struct smc_clc_msg_accept_confirm *aclc,
  988. struct smc_init_info *ini)
  989. {
  990. int rc = 0;
  991. /* do inband token exchange */
  992. rc = smc_clc_send_proposal(smc, ini);
  993. if (rc)
  994. return rc;
  995. /* receive SMC Accept CLC message */
  996. return smc_clc_wait_msg(smc, aclc, SMC_CLC_MAX_ACCEPT_LEN,
  997. SMC_CLC_ACCEPT, CLC_WAIT_TIME);
  998. }
  999. void smc_fill_gid_list(struct smc_link_group *lgr,
  1000. struct smc_gidlist *gidlist,
  1001. struct smc_ib_device *known_dev, u8 *known_gid)
  1002. {
  1003. struct smc_init_info *alt_ini = NULL;
  1004. memset(gidlist, 0, sizeof(*gidlist));
  1005. memcpy(gidlist->list[gidlist->len++], known_gid, SMC_GID_SIZE);
  1006. alt_ini = kzalloc(sizeof(*alt_ini), GFP_KERNEL);
  1007. if (!alt_ini)
  1008. goto out;
  1009. alt_ini->vlan_id = lgr->vlan_id;
  1010. alt_ini->check_smcrv2 = true;
  1011. alt_ini->smcrv2.saddr = lgr->saddr;
  1012. smc_pnet_find_alt_roce(lgr, alt_ini, known_dev);
  1013. if (!alt_ini->smcrv2.ib_dev_v2)
  1014. goto out;
  1015. memcpy(gidlist->list[gidlist->len++], alt_ini->smcrv2.ib_gid_v2,
  1016. SMC_GID_SIZE);
  1017. out:
  1018. kfree(alt_ini);
  1019. }
  1020. static int smc_connect_rdma_v2_prepare(struct smc_sock *smc,
  1021. struct smc_clc_msg_accept_confirm *aclc,
  1022. struct smc_init_info *ini)
  1023. {
  1024. struct smc_clc_first_contact_ext *fce =
  1025. smc_get_clc_first_contact_ext(aclc, false);
  1026. struct net *net = sock_net(&smc->sk);
  1027. int rc;
  1028. if (!ini->first_contact_peer || aclc->hdr.version == SMC_V1)
  1029. return 0;
  1030. if (fce->v2_direct) {
  1031. memcpy(ini->smcrv2.nexthop_mac, &aclc->r0.lcl.mac, ETH_ALEN);
  1032. ini->smcrv2.uses_gateway = false;
  1033. } else {
  1034. if (smc_ib_find_route(net, smc->clcsock->sk->sk_rcv_saddr,
  1035. smc_ib_gid_to_ipv4(aclc->r0.lcl.gid),
  1036. ini->smcrv2.nexthop_mac,
  1037. &ini->smcrv2.uses_gateway))
  1038. return SMC_CLC_DECL_NOROUTE;
  1039. if (!ini->smcrv2.uses_gateway) {
  1040. /* mismatch: peer claims indirect, but its direct */
  1041. return SMC_CLC_DECL_NOINDIRECT;
  1042. }
  1043. }
  1044. ini->release_nr = fce->release;
  1045. rc = smc_clc_clnt_v2x_features_validate(fce, ini);
  1046. if (rc)
  1047. return rc;
  1048. return 0;
  1049. }
  1050. /* setup for RDMA connection of client */
  1051. static int smc_connect_rdma(struct smc_sock *smc,
  1052. struct smc_clc_msg_accept_confirm *aclc,
  1053. struct smc_init_info *ini)
  1054. {
  1055. int i, reason_code = 0;
  1056. struct smc_link *link;
  1057. u8 *eid = NULL;
  1058. ini->is_smcd = false;
  1059. ini->ib_clcqpn = ntoh24(aclc->r0.qpn);
  1060. ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK;
  1061. memcpy(ini->peer_systemid, aclc->r0.lcl.id_for_peer, SMC_SYSTEMID_LEN);
  1062. memcpy(ini->peer_gid, aclc->r0.lcl.gid, SMC_GID_SIZE);
  1063. memcpy(ini->peer_mac, aclc->r0.lcl.mac, ETH_ALEN);
  1064. ini->max_conns = SMC_CONN_PER_LGR_MAX;
  1065. ini->max_links = SMC_LINKS_ADD_LNK_MAX;
  1066. reason_code = smc_connect_rdma_v2_prepare(smc, aclc, ini);
  1067. if (reason_code)
  1068. return reason_code;
  1069. mutex_lock(&smc_client_lgr_pending);
  1070. reason_code = smc_conn_create(smc, ini);
  1071. if (reason_code) {
  1072. mutex_unlock(&smc_client_lgr_pending);
  1073. return reason_code;
  1074. }
  1075. smc_conn_save_peer_info(smc, aclc);
  1076. if (ini->first_contact_local) {
  1077. link = smc->conn.lnk;
  1078. } else {
  1079. /* set link that was assigned by server */
  1080. link = NULL;
  1081. for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
  1082. struct smc_link *l = &smc->conn.lgr->lnk[i];
  1083. if (l->peer_qpn == ntoh24(aclc->r0.qpn) &&
  1084. !memcmp(l->peer_gid, &aclc->r0.lcl.gid,
  1085. SMC_GID_SIZE) &&
  1086. (aclc->hdr.version > SMC_V1 ||
  1087. !memcmp(l->peer_mac, &aclc->r0.lcl.mac,
  1088. sizeof(l->peer_mac)))) {
  1089. link = l;
  1090. break;
  1091. }
  1092. }
  1093. if (!link) {
  1094. reason_code = SMC_CLC_DECL_NOSRVLINK;
  1095. goto connect_abort;
  1096. }
  1097. smc_switch_link_and_count(&smc->conn, link);
  1098. }
  1099. /* create send buffer and rmb */
  1100. if (smc_buf_create(smc, false)) {
  1101. reason_code = SMC_CLC_DECL_MEM;
  1102. goto connect_abort;
  1103. }
  1104. if (ini->first_contact_local)
  1105. smc_link_save_peer_info(link, aclc, ini);
  1106. if (smc_rmb_rtoken_handling(&smc->conn, link, aclc)) {
  1107. reason_code = SMC_CLC_DECL_ERR_RTOK;
  1108. goto connect_abort;
  1109. }
  1110. smc_rx_init(smc);
  1111. if (ini->first_contact_local) {
  1112. if (smc_ib_ready_link(link)) {
  1113. reason_code = SMC_CLC_DECL_ERR_RDYLNK;
  1114. goto connect_abort;
  1115. }
  1116. } else {
  1117. /* reg sendbufs if they were vzalloced */
  1118. if (smc->conn.sndbuf_desc->is_vm) {
  1119. if (smcr_lgr_reg_sndbufs(link, smc->conn.sndbuf_desc)) {
  1120. reason_code = SMC_CLC_DECL_ERR_REGBUF;
  1121. goto connect_abort;
  1122. }
  1123. }
  1124. if (smcr_lgr_reg_rmbs(link, smc->conn.rmb_desc)) {
  1125. reason_code = SMC_CLC_DECL_ERR_REGBUF;
  1126. goto connect_abort;
  1127. }
  1128. }
  1129. if (aclc->hdr.version > SMC_V1) {
  1130. eid = aclc->r1.eid;
  1131. if (ini->first_contact_local)
  1132. smc_fill_gid_list(link->lgr, &ini->smcrv2.gidlist,
  1133. link->smcibdev, link->gid);
  1134. }
  1135. reason_code = smc_clc_send_confirm(smc, ini->first_contact_local,
  1136. aclc->hdr.version, eid, ini);
  1137. if (reason_code)
  1138. goto connect_abort;
  1139. smc_tx_init(smc);
  1140. if (ini->first_contact_local) {
  1141. /* QP confirmation over RoCE fabric */
  1142. smc_llc_flow_initiate(link->lgr, SMC_LLC_FLOW_ADD_LINK);
  1143. reason_code = smcr_clnt_conf_first_link(smc);
  1144. smc_llc_flow_stop(link->lgr, &link->lgr->llc_flow_lcl);
  1145. if (reason_code)
  1146. goto connect_abort;
  1147. }
  1148. mutex_unlock(&smc_client_lgr_pending);
  1149. smc_copy_sock_settings_to_clc(smc);
  1150. smc->connect_nonblock = 0;
  1151. if (smc->sk.sk_state == SMC_INIT)
  1152. smc->sk.sk_state = SMC_ACTIVE;
  1153. return 0;
  1154. connect_abort:
  1155. smc_conn_abort(smc, ini->first_contact_local);
  1156. mutex_unlock(&smc_client_lgr_pending);
  1157. smc->connect_nonblock = 0;
  1158. return reason_code;
  1159. }
  1160. /* The server has chosen one of the proposed ISM devices for the communication.
  1161. * Determine from the CHID of the received CLC ACCEPT the ISM device chosen.
  1162. */
  1163. static int
  1164. smc_v2_determine_accepted_chid(struct smc_clc_msg_accept_confirm *aclc,
  1165. struct smc_init_info *ini)
  1166. {
  1167. int i;
  1168. for (i = 0; i < ini->ism_offered_cnt + 1; i++) {
  1169. if (ini->ism_chid[i] == ntohs(aclc->d1.chid)) {
  1170. ini->ism_selected = i;
  1171. return 0;
  1172. }
  1173. }
  1174. return -EPROTO;
  1175. }
  1176. /* setup for ISM connection of client */
  1177. static int smc_connect_ism(struct smc_sock *smc,
  1178. struct smc_clc_msg_accept_confirm *aclc,
  1179. struct smc_init_info *ini)
  1180. {
  1181. u8 *eid = NULL;
  1182. int rc = 0;
  1183. ini->is_smcd = true;
  1184. ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK;
  1185. if (aclc->hdr.version == SMC_V2) {
  1186. if (ini->first_contact_peer) {
  1187. struct smc_clc_first_contact_ext *fce =
  1188. smc_get_clc_first_contact_ext(aclc, true);
  1189. ini->release_nr = fce->release;
  1190. rc = smc_clc_clnt_v2x_features_validate(fce, ini);
  1191. if (rc)
  1192. return rc;
  1193. }
  1194. rc = smc_v2_determine_accepted_chid(aclc, ini);
  1195. if (rc)
  1196. return rc;
  1197. if (__smc_ism_is_emulated(ini->ism_chid[ini->ism_selected]))
  1198. ini->ism_peer_gid[ini->ism_selected].gid_ext =
  1199. ntohll(aclc->d1.gid_ext);
  1200. /* for non-Emulated-ISM devices, peer gid_ext remains 0. */
  1201. }
  1202. ini->ism_peer_gid[ini->ism_selected].gid = ntohll(aclc->d0.gid);
  1203. /* there is only one lgr role for SMC-D; use server lock */
  1204. mutex_lock(&smc_server_lgr_pending);
  1205. rc = smc_conn_create(smc, ini);
  1206. if (rc) {
  1207. mutex_unlock(&smc_server_lgr_pending);
  1208. return rc;
  1209. }
  1210. /* Create send and receive buffers */
  1211. rc = smc_buf_create(smc, true);
  1212. if (rc) {
  1213. rc = (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB : SMC_CLC_DECL_MEM;
  1214. goto connect_abort;
  1215. }
  1216. smc_conn_save_peer_info(smc, aclc);
  1217. if (smc_ism_support_dmb_nocopy(smc->conn.lgr->smcd)) {
  1218. rc = smcd_buf_attach(smc);
  1219. if (rc) {
  1220. rc = SMC_CLC_DECL_MEM; /* try to fallback */
  1221. goto connect_abort;
  1222. }
  1223. }
  1224. smc_rx_init(smc);
  1225. smc_tx_init(smc);
  1226. if (aclc->hdr.version > SMC_V1)
  1227. eid = aclc->d1.eid;
  1228. rc = smc_clc_send_confirm(smc, ini->first_contact_local,
  1229. aclc->hdr.version, eid, ini);
  1230. if (rc)
  1231. goto connect_abort;
  1232. mutex_unlock(&smc_server_lgr_pending);
  1233. smc_copy_sock_settings_to_clc(smc);
  1234. smc->connect_nonblock = 0;
  1235. if (smc->sk.sk_state == SMC_INIT)
  1236. smc->sk.sk_state = SMC_ACTIVE;
  1237. return 0;
  1238. connect_abort:
  1239. smc_conn_abort(smc, ini->first_contact_local);
  1240. mutex_unlock(&smc_server_lgr_pending);
  1241. smc->connect_nonblock = 0;
  1242. return rc;
  1243. }
  1244. /* check if received accept type and version matches a proposed one */
  1245. static int smc_connect_check_aclc(struct smc_init_info *ini,
  1246. struct smc_clc_msg_accept_confirm *aclc)
  1247. {
  1248. if (aclc->hdr.version >= SMC_V2) {
  1249. if ((aclc->hdr.typev1 == SMC_TYPE_R &&
  1250. !smcr_indicated(ini->smc_type_v2)) ||
  1251. (aclc->hdr.typev1 == SMC_TYPE_D &&
  1252. !smcd_indicated(ini->smc_type_v2)))
  1253. return SMC_CLC_DECL_MODEUNSUPP;
  1254. } else {
  1255. if ((aclc->hdr.typev1 == SMC_TYPE_R &&
  1256. !smcr_indicated(ini->smc_type_v1)) ||
  1257. (aclc->hdr.typev1 == SMC_TYPE_D &&
  1258. !smcd_indicated(ini->smc_type_v1)))
  1259. return SMC_CLC_DECL_MODEUNSUPP;
  1260. }
  1261. return 0;
  1262. }
  1263. /* perform steps before actually connecting */
  1264. static int __smc_connect(struct smc_sock *smc)
  1265. {
  1266. u8 version = smc_ism_is_v2_capable() ? SMC_V2 : SMC_V1;
  1267. struct smc_clc_msg_accept_confirm *aclc;
  1268. struct smc_init_info *ini = NULL;
  1269. u8 *buf = NULL;
  1270. int rc = 0;
  1271. if (smc->use_fallback)
  1272. return smc_connect_fallback(smc, smc->fallback_rsn);
  1273. /* if peer has not signalled SMC-capability, fall back */
  1274. if (!tcp_sk(smc->clcsock->sk)->syn_smc)
  1275. return smc_connect_fallback(smc, SMC_CLC_DECL_PEERNOSMC);
  1276. /* IPSec connections opt out of SMC optimizations */
  1277. if (using_ipsec(smc))
  1278. return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC,
  1279. version);
  1280. ini = kzalloc(sizeof(*ini), GFP_KERNEL);
  1281. if (!ini)
  1282. return smc_connect_decline_fallback(smc, SMC_CLC_DECL_MEM,
  1283. version);
  1284. ini->smcd_version = SMC_V1 | SMC_V2;
  1285. ini->smcr_version = SMC_V1 | SMC_V2;
  1286. ini->smc_type_v1 = SMC_TYPE_B;
  1287. ini->smc_type_v2 = SMC_TYPE_B;
  1288. /* get vlan id from IP device */
  1289. if (smc_vlan_by_tcpsk(smc->clcsock, ini)) {
  1290. ini->smcd_version &= ~SMC_V1;
  1291. ini->smcr_version = 0;
  1292. ini->smc_type_v1 = SMC_TYPE_N;
  1293. }
  1294. rc = smc_find_proposal_devices(smc, ini);
  1295. if (rc)
  1296. goto fallback;
  1297. buf = kzalloc(SMC_CLC_MAX_ACCEPT_LEN, GFP_KERNEL);
  1298. if (!buf) {
  1299. rc = SMC_CLC_DECL_MEM;
  1300. goto fallback;
  1301. }
  1302. aclc = (struct smc_clc_msg_accept_confirm *)buf;
  1303. /* perform CLC handshake */
  1304. rc = smc_connect_clc(smc, aclc, ini);
  1305. if (rc) {
  1306. /* -EAGAIN on timeout, see tcp_recvmsg() */
  1307. if (rc == -EAGAIN) {
  1308. rc = -ETIMEDOUT;
  1309. smc->sk.sk_err = ETIMEDOUT;
  1310. }
  1311. goto vlan_cleanup;
  1312. }
  1313. /* check if smc modes and versions of CLC proposal and accept match */
  1314. rc = smc_connect_check_aclc(ini, aclc);
  1315. version = aclc->hdr.version == SMC_V1 ? SMC_V1 : SMC_V2;
  1316. if (rc)
  1317. goto vlan_cleanup;
  1318. /* depending on previous steps, connect using rdma or ism */
  1319. if (aclc->hdr.typev1 == SMC_TYPE_R) {
  1320. ini->smcr_version = version;
  1321. rc = smc_connect_rdma(smc, aclc, ini);
  1322. } else if (aclc->hdr.typev1 == SMC_TYPE_D) {
  1323. ini->smcd_version = version;
  1324. rc = smc_connect_ism(smc, aclc, ini);
  1325. }
  1326. if (rc)
  1327. goto vlan_cleanup;
  1328. SMC_STAT_CLNT_SUCC_INC(sock_net(smc->clcsock->sk), aclc);
  1329. smc_connect_ism_vlan_cleanup(smc, ini);
  1330. kfree(buf);
  1331. kfree(ini);
  1332. return 0;
  1333. vlan_cleanup:
  1334. smc_connect_ism_vlan_cleanup(smc, ini);
  1335. kfree(buf);
  1336. fallback:
  1337. kfree(ini);
  1338. return smc_connect_decline_fallback(smc, rc, version);
  1339. }
  1340. static void smc_connect_work(struct work_struct *work)
  1341. {
  1342. struct smc_sock *smc = container_of(work, struct smc_sock,
  1343. connect_work);
  1344. long timeo = smc->sk.sk_sndtimeo;
  1345. int rc = 0;
  1346. if (!timeo)
  1347. timeo = MAX_SCHEDULE_TIMEOUT;
  1348. lock_sock(smc->clcsock->sk);
  1349. if (smc->clcsock->sk->sk_err) {
  1350. smc->sk.sk_err = smc->clcsock->sk->sk_err;
  1351. } else if ((1 << smc->clcsock->sk->sk_state) &
  1352. (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
  1353. rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo);
  1354. if ((rc == -EPIPE) &&
  1355. ((1 << smc->clcsock->sk->sk_state) &
  1356. (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)))
  1357. rc = 0;
  1358. }
  1359. release_sock(smc->clcsock->sk);
  1360. lock_sock(&smc->sk);
  1361. if (rc != 0 || smc->sk.sk_err) {
  1362. smc->sk.sk_state = SMC_CLOSED;
  1363. if (rc == -EPIPE || rc == -EAGAIN)
  1364. smc->sk.sk_err = EPIPE;
  1365. else if (rc == -ECONNREFUSED)
  1366. smc->sk.sk_err = ECONNREFUSED;
  1367. else if (signal_pending(current))
  1368. smc->sk.sk_err = -sock_intr_errno(timeo);
  1369. sock_put(&smc->sk); /* passive closing */
  1370. goto out;
  1371. }
  1372. rc = __smc_connect(smc);
  1373. if (rc < 0)
  1374. smc->sk.sk_err = -rc;
  1375. out:
  1376. if (!sock_flag(&smc->sk, SOCK_DEAD)) {
  1377. if (smc->sk.sk_err) {
  1378. smc->sk.sk_state_change(&smc->sk);
  1379. } else { /* allow polling before and after fallback decision */
  1380. smc->clcsock->sk->sk_write_space(smc->clcsock->sk);
  1381. smc->sk.sk_write_space(&smc->sk);
  1382. }
  1383. }
  1384. release_sock(&smc->sk);
  1385. }
  1386. int smc_connect(struct socket *sock, struct sockaddr *addr,
  1387. int alen, int flags)
  1388. {
  1389. struct sock *sk = sock->sk;
  1390. struct smc_sock *smc;
  1391. int rc = -EINVAL;
  1392. smc = smc_sk(sk);
  1393. /* separate smc parameter checking to be safe */
  1394. if (alen < sizeof(addr->sa_family))
  1395. goto out_err;
  1396. if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6)
  1397. goto out_err;
  1398. lock_sock(sk);
  1399. switch (sock->state) {
  1400. default:
  1401. rc = -EINVAL;
  1402. goto out;
  1403. case SS_CONNECTED:
  1404. rc = sk->sk_state == SMC_ACTIVE ? -EISCONN : -EINVAL;
  1405. goto out;
  1406. case SS_CONNECTING:
  1407. if (sk->sk_state == SMC_ACTIVE)
  1408. goto connected;
  1409. break;
  1410. case SS_UNCONNECTED:
  1411. sock->state = SS_CONNECTING;
  1412. break;
  1413. }
  1414. switch (sk->sk_state) {
  1415. default:
  1416. goto out;
  1417. case SMC_CLOSED:
  1418. rc = sock_error(sk) ? : -ECONNABORTED;
  1419. sock->state = SS_UNCONNECTED;
  1420. goto out;
  1421. case SMC_ACTIVE:
  1422. rc = -EISCONN;
  1423. goto out;
  1424. case SMC_INIT:
  1425. break;
  1426. }
  1427. smc_copy_sock_settings_to_clc(smc);
  1428. tcp_sk(smc->clcsock->sk)->syn_smc = 1;
  1429. if (smc->connect_nonblock) {
  1430. rc = -EALREADY;
  1431. goto out;
  1432. }
  1433. rc = kernel_connect(smc->clcsock, addr, alen, flags);
  1434. if (rc && rc != -EINPROGRESS)
  1435. goto out;
  1436. if (smc->use_fallback) {
  1437. sock->state = rc ? SS_CONNECTING : SS_CONNECTED;
  1438. goto out;
  1439. }
  1440. sock_hold(&smc->sk); /* sock put in passive closing */
  1441. if (flags & O_NONBLOCK) {
  1442. if (queue_work(smc_hs_wq, &smc->connect_work))
  1443. smc->connect_nonblock = 1;
  1444. rc = -EINPROGRESS;
  1445. goto out;
  1446. } else {
  1447. rc = __smc_connect(smc);
  1448. if (rc < 0)
  1449. goto out;
  1450. }
  1451. connected:
  1452. rc = 0;
  1453. sock->state = SS_CONNECTED;
  1454. out:
  1455. release_sock(sk);
  1456. out_err:
  1457. return rc;
  1458. }
  1459. static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
  1460. {
  1461. struct socket *new_clcsock = NULL;
  1462. struct sock *lsk = &lsmc->sk;
  1463. struct sock *new_sk;
  1464. int rc = -EINVAL;
  1465. release_sock(lsk);
  1466. new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
  1467. if (!new_sk) {
  1468. rc = -ENOMEM;
  1469. lsk->sk_err = ENOMEM;
  1470. *new_smc = NULL;
  1471. lock_sock(lsk);
  1472. goto out;
  1473. }
  1474. *new_smc = smc_sk(new_sk);
  1475. mutex_lock(&lsmc->clcsock_release_lock);
  1476. if (lsmc->clcsock)
  1477. rc = kernel_accept(lsmc->clcsock, &new_clcsock, SOCK_NONBLOCK);
  1478. mutex_unlock(&lsmc->clcsock_release_lock);
  1479. lock_sock(lsk);
  1480. if (rc < 0 && rc != -EAGAIN)
  1481. lsk->sk_err = -rc;
  1482. if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
  1483. new_sk->sk_prot->unhash(new_sk);
  1484. if (new_clcsock)
  1485. sock_release(new_clcsock);
  1486. new_sk->sk_state = SMC_CLOSED;
  1487. smc_sock_set_flag(new_sk, SOCK_DEAD);
  1488. sock_put(new_sk); /* final */
  1489. *new_smc = NULL;
  1490. goto out;
  1491. }
  1492. /* new clcsock has inherited the smc listen-specific sk_data_ready
  1493. * function; switch it back to the original sk_data_ready function
  1494. */
  1495. new_clcsock->sk->sk_data_ready = lsmc->clcsk_data_ready;
  1496. /* if new clcsock has also inherited the fallback-specific callback
  1497. * functions, switch them back to the original ones.
  1498. */
  1499. if (lsmc->use_fallback) {
  1500. if (lsmc->clcsk_state_change)
  1501. new_clcsock->sk->sk_state_change = lsmc->clcsk_state_change;
  1502. if (lsmc->clcsk_write_space)
  1503. new_clcsock->sk->sk_write_space = lsmc->clcsk_write_space;
  1504. if (lsmc->clcsk_error_report)
  1505. new_clcsock->sk->sk_error_report = lsmc->clcsk_error_report;
  1506. }
  1507. (*new_smc)->clcsock = new_clcsock;
  1508. out:
  1509. return rc;
  1510. }
  1511. /* add a just created sock to the accept queue of the listen sock as
  1512. * candidate for a following socket accept call from user space
  1513. */
  1514. static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
  1515. {
  1516. struct smc_sock *par = smc_sk(parent);
  1517. sock_hold(sk); /* sock_put in smc_accept_unlink () */
  1518. spin_lock(&par->accept_q_lock);
  1519. list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
  1520. spin_unlock(&par->accept_q_lock);
  1521. sk_acceptq_added(parent);
  1522. }
  1523. /* remove a socket from the accept queue of its parental listening socket */
  1524. static void smc_accept_unlink(struct sock *sk)
  1525. {
  1526. struct smc_sock *par = smc_sk(sk)->listen_smc;
  1527. spin_lock(&par->accept_q_lock);
  1528. list_del_init(&smc_sk(sk)->accept_q);
  1529. spin_unlock(&par->accept_q_lock);
  1530. sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
  1531. sock_put(sk); /* sock_hold in smc_accept_enqueue */
  1532. }
  1533. /* remove a sock from the accept queue to bind it to a new socket created
  1534. * for a socket accept call from user space
  1535. */
  1536. struct sock *smc_accept_dequeue(struct sock *parent,
  1537. struct socket *new_sock)
  1538. {
  1539. struct smc_sock *isk, *n;
  1540. struct sock *new_sk;
  1541. list_for_each_entry_safe(isk, n, &smc_sk(parent)->accept_q, accept_q) {
  1542. new_sk = (struct sock *)isk;
  1543. smc_accept_unlink(new_sk);
  1544. if (new_sk->sk_state == SMC_CLOSED) {
  1545. new_sk->sk_prot->unhash(new_sk);
  1546. if (isk->clcsock) {
  1547. sock_release(isk->clcsock);
  1548. isk->clcsock = NULL;
  1549. }
  1550. sock_put(new_sk); /* final */
  1551. continue;
  1552. }
  1553. if (new_sock) {
  1554. sock_graft(new_sk, new_sock);
  1555. new_sock->state = SS_CONNECTED;
  1556. if (isk->use_fallback) {
  1557. smc_sk(new_sk)->clcsock->file = new_sock->file;
  1558. isk->clcsock->file->private_data = isk->clcsock;
  1559. }
  1560. }
  1561. return new_sk;
  1562. }
  1563. return NULL;
  1564. }
  1565. /* clean up for a created but never accepted sock */
  1566. void smc_close_non_accepted(struct sock *sk)
  1567. {
  1568. struct smc_sock *smc = smc_sk(sk);
  1569. sock_hold(sk); /* sock_put below */
  1570. lock_sock(sk);
  1571. if (!sk->sk_lingertime)
  1572. /* wait for peer closing */
  1573. WRITE_ONCE(sk->sk_lingertime, SMC_MAX_STREAM_WAIT_TIMEOUT);
  1574. __smc_release(smc);
  1575. release_sock(sk);
  1576. sock_put(sk); /* sock_hold above */
  1577. sock_put(sk); /* final sock_put */
  1578. }
  1579. static int smcr_serv_conf_first_link(struct smc_sock *smc)
  1580. {
  1581. struct smc_link *link = smc->conn.lnk;
  1582. struct smc_llc_qentry *qentry;
  1583. int rc;
  1584. /* reg the sndbuf if it was vzalloced*/
  1585. if (smc->conn.sndbuf_desc->is_vm) {
  1586. if (smcr_link_reg_buf(link, smc->conn.sndbuf_desc))
  1587. return SMC_CLC_DECL_ERR_REGBUF;
  1588. }
  1589. /* reg the rmb */
  1590. if (smcr_link_reg_buf(link, smc->conn.rmb_desc))
  1591. return SMC_CLC_DECL_ERR_REGBUF;
  1592. /* send CONFIRM LINK request to client over the RoCE fabric */
  1593. rc = smc_llc_send_confirm_link(link, SMC_LLC_REQ);
  1594. if (rc < 0)
  1595. return SMC_CLC_DECL_TIMEOUT_CL;
  1596. /* receive CONFIRM LINK response from client over the RoCE fabric */
  1597. qentry = smc_llc_wait(link->lgr, link, SMC_LLC_WAIT_TIME,
  1598. SMC_LLC_CONFIRM_LINK);
  1599. if (!qentry) {
  1600. struct smc_clc_msg_decline dclc;
  1601. rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
  1602. SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
  1603. return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
  1604. }
  1605. smc_llc_save_peer_uid(qentry);
  1606. rc = smc_llc_eval_conf_link(qentry, SMC_LLC_RESP);
  1607. smc_llc_flow_qentry_del(&link->lgr->llc_flow_lcl);
  1608. if (rc)
  1609. return SMC_CLC_DECL_RMBE_EC;
  1610. /* confirm_rkey is implicit on 1st contact */
  1611. smc->conn.rmb_desc->is_conf_rkey = true;
  1612. smc_llc_link_active(link);
  1613. smcr_lgr_set_type(link->lgr, SMC_LGR_SINGLE);
  1614. if (link->lgr->max_links > 1) {
  1615. down_write(&link->lgr->llc_conf_mutex);
  1616. /* initial contact - try to establish second link */
  1617. smc_llc_srv_add_link(link, NULL);
  1618. up_write(&link->lgr->llc_conf_mutex);
  1619. }
  1620. return 0;
  1621. }
  1622. /* listen worker: finish */
  1623. static void smc_listen_out(struct smc_sock *new_smc)
  1624. {
  1625. struct smc_sock *lsmc = new_smc->listen_smc;
  1626. struct sock *newsmcsk = &new_smc->sk;
  1627. if (tcp_sk(new_smc->clcsock->sk)->syn_smc)
  1628. atomic_dec(&lsmc->queued_smc_hs);
  1629. release_sock(newsmcsk); /* lock in smc_listen_work() */
  1630. if (lsmc->sk.sk_state == SMC_LISTEN) {
  1631. lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
  1632. smc_accept_enqueue(&lsmc->sk, newsmcsk);
  1633. release_sock(&lsmc->sk);
  1634. } else { /* no longer listening */
  1635. smc_close_non_accepted(newsmcsk);
  1636. }
  1637. /* Wake up accept */
  1638. lsmc->sk.sk_data_ready(&lsmc->sk);
  1639. sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */
  1640. }
  1641. /* listen worker: finish in state connected */
  1642. static void smc_listen_out_connected(struct smc_sock *new_smc)
  1643. {
  1644. struct sock *newsmcsk = &new_smc->sk;
  1645. if (newsmcsk->sk_state == SMC_INIT)
  1646. newsmcsk->sk_state = SMC_ACTIVE;
  1647. smc_listen_out(new_smc);
  1648. }
  1649. /* listen worker: finish in error state */
  1650. static void smc_listen_out_err(struct smc_sock *new_smc)
  1651. {
  1652. struct sock *newsmcsk = &new_smc->sk;
  1653. struct net *net = sock_net(newsmcsk);
  1654. this_cpu_inc(net->smc.smc_stats->srv_hshake_err_cnt);
  1655. if (newsmcsk->sk_state == SMC_INIT)
  1656. sock_put(&new_smc->sk); /* passive closing */
  1657. newsmcsk->sk_state = SMC_CLOSED;
  1658. smc_listen_out(new_smc);
  1659. }
  1660. /* listen worker: decline and fall back if possible */
  1661. static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
  1662. int local_first, u8 version)
  1663. {
  1664. /* RDMA setup failed, switch back to TCP */
  1665. smc_conn_abort(new_smc, local_first);
  1666. if (reason_code < 0 ||
  1667. smc_switch_to_fallback(new_smc, reason_code)) {
  1668. /* error, no fallback possible */
  1669. smc_listen_out_err(new_smc);
  1670. return;
  1671. }
  1672. if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
  1673. if (smc_clc_send_decline(new_smc, reason_code, version) < 0) {
  1674. smc_listen_out_err(new_smc);
  1675. return;
  1676. }
  1677. }
  1678. smc_listen_out_connected(new_smc);
  1679. }
  1680. /* listen worker: version checking */
  1681. static int smc_listen_v2_check(struct smc_sock *new_smc,
  1682. struct smc_clc_msg_proposal *pclc,
  1683. struct smc_init_info *ini)
  1684. {
  1685. struct smc_clc_smcd_v2_extension *pclc_smcd_v2_ext;
  1686. struct smc_clc_v2_extension *pclc_v2_ext;
  1687. int rc = SMC_CLC_DECL_PEERNOSMC;
  1688. ini->smc_type_v1 = pclc->hdr.typev1;
  1689. ini->smc_type_v2 = pclc->hdr.typev2;
  1690. ini->smcd_version = smcd_indicated(ini->smc_type_v1) ? SMC_V1 : 0;
  1691. ini->smcr_version = smcr_indicated(ini->smc_type_v1) ? SMC_V1 : 0;
  1692. if (pclc->hdr.version > SMC_V1) {
  1693. if (smcd_indicated(ini->smc_type_v2))
  1694. ini->smcd_version |= SMC_V2;
  1695. if (smcr_indicated(ini->smc_type_v2))
  1696. ini->smcr_version |= SMC_V2;
  1697. }
  1698. if (!(ini->smcd_version & SMC_V2) && !(ini->smcr_version & SMC_V2)) {
  1699. rc = SMC_CLC_DECL_PEERNOSMC;
  1700. goto out;
  1701. }
  1702. pclc_v2_ext = smc_get_clc_v2_ext(pclc);
  1703. if (!pclc_v2_ext) {
  1704. ini->smcd_version &= ~SMC_V2;
  1705. ini->smcr_version &= ~SMC_V2;
  1706. rc = SMC_CLC_DECL_NOV2EXT;
  1707. goto out;
  1708. }
  1709. pclc_smcd_v2_ext = smc_get_clc_smcd_v2_ext(pclc_v2_ext);
  1710. if (ini->smcd_version & SMC_V2) {
  1711. if (!smc_ism_is_v2_capable()) {
  1712. ini->smcd_version &= ~SMC_V2;
  1713. rc = SMC_CLC_DECL_NOISM2SUPP;
  1714. } else if (!pclc_smcd_v2_ext) {
  1715. ini->smcd_version &= ~SMC_V2;
  1716. rc = SMC_CLC_DECL_NOV2DEXT;
  1717. } else if (!pclc_v2_ext->hdr.eid_cnt &&
  1718. !pclc_v2_ext->hdr.flag.seid) {
  1719. ini->smcd_version &= ~SMC_V2;
  1720. rc = SMC_CLC_DECL_NOUEID;
  1721. }
  1722. }
  1723. if (ini->smcr_version & SMC_V2) {
  1724. if (!pclc_v2_ext->hdr.eid_cnt) {
  1725. ini->smcr_version &= ~SMC_V2;
  1726. rc = SMC_CLC_DECL_NOUEID;
  1727. }
  1728. }
  1729. ini->release_nr = pclc_v2_ext->hdr.flag.release;
  1730. if (pclc_v2_ext->hdr.flag.release > SMC_RELEASE)
  1731. ini->release_nr = SMC_RELEASE;
  1732. out:
  1733. if (!ini->smcd_version && !ini->smcr_version)
  1734. return rc;
  1735. return 0;
  1736. }
  1737. /* listen worker: check prefixes */
  1738. static int smc_listen_prfx_check(struct smc_sock *new_smc,
  1739. struct smc_clc_msg_proposal *pclc)
  1740. {
  1741. struct smc_clc_msg_proposal_prefix *pclc_prfx;
  1742. struct socket *newclcsock = new_smc->clcsock;
  1743. if (pclc->hdr.typev1 == SMC_TYPE_N)
  1744. return 0;
  1745. pclc_prfx = smc_clc_proposal_get_prefix(pclc);
  1746. if (!pclc_prfx)
  1747. return -EPROTO;
  1748. if (smc_clc_prfx_match(newclcsock, pclc_prfx))
  1749. return SMC_CLC_DECL_DIFFPREFIX;
  1750. return 0;
  1751. }
  1752. /* listen worker: initialize connection and buffers */
  1753. static int smc_listen_rdma_init(struct smc_sock *new_smc,
  1754. struct smc_init_info *ini)
  1755. {
  1756. int rc;
  1757. /* allocate connection / link group */
  1758. rc = smc_conn_create(new_smc, ini);
  1759. if (rc)
  1760. return rc;
  1761. /* create send buffer and rmb */
  1762. if (smc_buf_create(new_smc, false)) {
  1763. smc_conn_abort(new_smc, ini->first_contact_local);
  1764. return SMC_CLC_DECL_MEM;
  1765. }
  1766. return 0;
  1767. }
  1768. /* listen worker: initialize connection and buffers for SMC-D */
  1769. static int smc_listen_ism_init(struct smc_sock *new_smc,
  1770. struct smc_init_info *ini)
  1771. {
  1772. int rc;
  1773. rc = smc_conn_create(new_smc, ini);
  1774. if (rc)
  1775. return rc;
  1776. /* Create send and receive buffers */
  1777. rc = smc_buf_create(new_smc, true);
  1778. if (rc) {
  1779. smc_conn_abort(new_smc, ini->first_contact_local);
  1780. return (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB :
  1781. SMC_CLC_DECL_MEM;
  1782. }
  1783. return 0;
  1784. }
  1785. static bool smc_is_already_selected(struct smcd_dev *smcd,
  1786. struct smc_init_info *ini,
  1787. int matches)
  1788. {
  1789. int i;
  1790. for (i = 0; i < matches; i++)
  1791. if (smcd == ini->ism_dev[i])
  1792. return true;
  1793. return false;
  1794. }
  1795. /* check for ISM devices matching proposed ISM devices */
  1796. static void smc_check_ism_v2_match(struct smc_init_info *ini,
  1797. u16 proposed_chid,
  1798. struct smcd_gid *proposed_gid,
  1799. unsigned int *matches)
  1800. {
  1801. struct smcd_dev *smcd;
  1802. list_for_each_entry(smcd, &smcd_dev_list.list, list) {
  1803. if (smcd->going_away)
  1804. continue;
  1805. if (smc_is_already_selected(smcd, ini, *matches))
  1806. continue;
  1807. if (smc_ism_get_chid(smcd) == proposed_chid &&
  1808. !smc_ism_cantalk(proposed_gid, ISM_RESERVED_VLANID, smcd)) {
  1809. ini->ism_peer_gid[*matches].gid = proposed_gid->gid;
  1810. if (__smc_ism_is_emulated(proposed_chid))
  1811. ini->ism_peer_gid[*matches].gid_ext =
  1812. proposed_gid->gid_ext;
  1813. /* non-Emulated-ISM's peer gid_ext remains 0. */
  1814. ini->ism_dev[*matches] = smcd;
  1815. (*matches)++;
  1816. break;
  1817. }
  1818. }
  1819. }
  1820. static void smc_find_ism_store_rc(u32 rc, struct smc_init_info *ini)
  1821. {
  1822. if (!ini->rc)
  1823. ini->rc = rc;
  1824. }
  1825. static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
  1826. struct smc_clc_msg_proposal *pclc,
  1827. struct smc_init_info *ini)
  1828. {
  1829. struct smc_clc_smcd_v2_extension *smcd_v2_ext;
  1830. struct smc_clc_v2_extension *smc_v2_ext;
  1831. struct smc_clc_msg_smcd *pclc_smcd;
  1832. unsigned int matches = 0;
  1833. struct smcd_gid smcd_gid;
  1834. u8 smcd_version;
  1835. u8 *eid = NULL;
  1836. int i, rc;
  1837. u16 chid;
  1838. if (!(ini->smcd_version & SMC_V2) || !smcd_indicated(ini->smc_type_v2))
  1839. goto not_found;
  1840. pclc_smcd = smc_get_clc_msg_smcd(pclc);
  1841. smc_v2_ext = smc_get_clc_v2_ext(pclc);
  1842. smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext);
  1843. if (!pclc_smcd || !smc_v2_ext || !smcd_v2_ext)
  1844. goto not_found;
  1845. mutex_lock(&smcd_dev_list.mutex);
  1846. if (pclc_smcd->ism.chid) {
  1847. /* check for ISM device matching proposed native ISM device */
  1848. smcd_gid.gid = ntohll(pclc_smcd->ism.gid);
  1849. smcd_gid.gid_ext = 0;
  1850. smc_check_ism_v2_match(ini, ntohs(pclc_smcd->ism.chid),
  1851. &smcd_gid, &matches);
  1852. }
  1853. for (i = 0; i < smc_v2_ext->hdr.ism_gid_cnt; i++) {
  1854. /* check for ISM devices matching proposed non-native ISM
  1855. * devices
  1856. */
  1857. smcd_gid.gid = ntohll(smcd_v2_ext->gidchid[i].gid);
  1858. smcd_gid.gid_ext = 0;
  1859. chid = ntohs(smcd_v2_ext->gidchid[i].chid);
  1860. if (__smc_ism_is_emulated(chid)) {
  1861. if ((i + 1) == smc_v2_ext->hdr.ism_gid_cnt ||
  1862. chid != ntohs(smcd_v2_ext->gidchid[i + 1].chid))
  1863. /* each Emulated-ISM device takes two GID-CHID
  1864. * entries and CHID of the second entry repeats
  1865. * that of the first entry.
  1866. *
  1867. * So check if the next GID-CHID entry exists
  1868. * and both two entries' CHIDs are the same.
  1869. */
  1870. continue;
  1871. smcd_gid.gid_ext =
  1872. ntohll(smcd_v2_ext->gidchid[++i].gid);
  1873. }
  1874. smc_check_ism_v2_match(ini, chid, &smcd_gid, &matches);
  1875. }
  1876. mutex_unlock(&smcd_dev_list.mutex);
  1877. if (!ini->ism_dev[0]) {
  1878. smc_find_ism_store_rc(SMC_CLC_DECL_NOSMCD2DEV, ini);
  1879. goto not_found;
  1880. }
  1881. smc_ism_get_system_eid(&eid);
  1882. if (!smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext,
  1883. smcd_v2_ext->system_eid, eid))
  1884. goto not_found;
  1885. /* separate - outside the smcd_dev_list.lock */
  1886. smcd_version = ini->smcd_version;
  1887. for (i = 0; i < matches; i++) {
  1888. ini->smcd_version = SMC_V2;
  1889. ini->is_smcd = true;
  1890. ini->ism_selected = i;
  1891. rc = smc_listen_ism_init(new_smc, ini);
  1892. if (rc) {
  1893. smc_find_ism_store_rc(rc, ini);
  1894. /* try next active ISM device */
  1895. continue;
  1896. }
  1897. return; /* matching and usable V2 ISM device found */
  1898. }
  1899. /* no V2 ISM device could be initialized */
  1900. ini->smcd_version = smcd_version; /* restore original value */
  1901. ini->negotiated_eid[0] = 0;
  1902. not_found:
  1903. ini->smcd_version &= ~SMC_V2;
  1904. ini->ism_dev[0] = NULL;
  1905. ini->is_smcd = false;
  1906. }
  1907. static void smc_find_ism_v1_device_serv(struct smc_sock *new_smc,
  1908. struct smc_clc_msg_proposal *pclc,
  1909. struct smc_init_info *ini)
  1910. {
  1911. struct smc_clc_msg_smcd *pclc_smcd = smc_get_clc_msg_smcd(pclc);
  1912. int rc = 0;
  1913. /* check if ISM V1 is available */
  1914. if (!(ini->smcd_version & SMC_V1) ||
  1915. !smcd_indicated(ini->smc_type_v1) ||
  1916. !pclc_smcd)
  1917. goto not_found;
  1918. ini->is_smcd = true; /* prepare ISM check */
  1919. ini->ism_peer_gid[0].gid = ntohll(pclc_smcd->ism.gid);
  1920. ini->ism_peer_gid[0].gid_ext = 0;
  1921. rc = smc_find_ism_device(new_smc, ini);
  1922. if (rc)
  1923. goto not_found;
  1924. ini->ism_selected = 0;
  1925. rc = smc_listen_ism_init(new_smc, ini);
  1926. if (!rc)
  1927. return; /* V1 ISM device found */
  1928. not_found:
  1929. smc_find_ism_store_rc(rc, ini);
  1930. ini->smcd_version &= ~SMC_V1;
  1931. ini->ism_dev[0] = NULL;
  1932. ini->is_smcd = false;
  1933. }
  1934. /* listen worker: register buffers */
  1935. static int smc_listen_rdma_reg(struct smc_sock *new_smc, bool local_first)
  1936. {
  1937. struct smc_connection *conn = &new_smc->conn;
  1938. if (!local_first) {
  1939. /* reg sendbufs if they were vzalloced */
  1940. if (conn->sndbuf_desc->is_vm) {
  1941. if (smcr_lgr_reg_sndbufs(conn->lnk,
  1942. conn->sndbuf_desc))
  1943. return SMC_CLC_DECL_ERR_REGBUF;
  1944. }
  1945. if (smcr_lgr_reg_rmbs(conn->lnk, conn->rmb_desc))
  1946. return SMC_CLC_DECL_ERR_REGBUF;
  1947. }
  1948. return 0;
  1949. }
  1950. static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc,
  1951. struct smc_clc_msg_proposal *pclc,
  1952. struct smc_init_info *ini)
  1953. {
  1954. struct smc_clc_v2_extension *smc_v2_ext;
  1955. u8 smcr_version;
  1956. int rc;
  1957. if (!(ini->smcr_version & SMC_V2) || !smcr_indicated(ini->smc_type_v2))
  1958. goto not_found;
  1959. smc_v2_ext = smc_get_clc_v2_ext(pclc);
  1960. if (!smc_v2_ext ||
  1961. !smc_clc_match_eid(ini->negotiated_eid, smc_v2_ext, NULL, NULL))
  1962. goto not_found;
  1963. /* prepare RDMA check */
  1964. memcpy(ini->peer_systemid, pclc->lcl.id_for_peer, SMC_SYSTEMID_LEN);
  1965. memcpy(ini->peer_gid, smc_v2_ext->roce, SMC_GID_SIZE);
  1966. memcpy(ini->peer_mac, pclc->lcl.mac, ETH_ALEN);
  1967. ini->check_smcrv2 = true;
  1968. ini->smcrv2.clc_sk = new_smc->clcsock->sk;
  1969. ini->smcrv2.saddr = new_smc->clcsock->sk->sk_rcv_saddr;
  1970. ini->smcrv2.daddr = smc_ib_gid_to_ipv4(smc_v2_ext->roce);
  1971. rc = smc_find_rdma_device(new_smc, ini);
  1972. if (rc) {
  1973. smc_find_ism_store_rc(rc, ini);
  1974. goto not_found;
  1975. }
  1976. if (!ini->smcrv2.uses_gateway)
  1977. memcpy(ini->smcrv2.nexthop_mac, pclc->lcl.mac, ETH_ALEN);
  1978. smcr_version = ini->smcr_version;
  1979. ini->smcr_version = SMC_V2;
  1980. rc = smc_listen_rdma_init(new_smc, ini);
  1981. if (!rc) {
  1982. rc = smc_listen_rdma_reg(new_smc, ini->first_contact_local);
  1983. if (rc)
  1984. smc_conn_abort(new_smc, ini->first_contact_local);
  1985. }
  1986. if (!rc)
  1987. return;
  1988. ini->smcr_version = smcr_version;
  1989. smc_find_ism_store_rc(rc, ini);
  1990. not_found:
  1991. ini->smcr_version &= ~SMC_V2;
  1992. ini->smcrv2.ib_dev_v2 = NULL;
  1993. ini->check_smcrv2 = false;
  1994. }
  1995. static int smc_find_rdma_v1_device_serv(struct smc_sock *new_smc,
  1996. struct smc_clc_msg_proposal *pclc,
  1997. struct smc_init_info *ini)
  1998. {
  1999. int rc;
  2000. if (!(ini->smcr_version & SMC_V1) || !smcr_indicated(ini->smc_type_v1))
  2001. return SMC_CLC_DECL_NOSMCDEV;
  2002. /* prepare RDMA check */
  2003. memcpy(ini->peer_systemid, pclc->lcl.id_for_peer, SMC_SYSTEMID_LEN);
  2004. memcpy(ini->peer_gid, pclc->lcl.gid, SMC_GID_SIZE);
  2005. memcpy(ini->peer_mac, pclc->lcl.mac, ETH_ALEN);
  2006. rc = smc_find_rdma_device(new_smc, ini);
  2007. if (rc) {
  2008. /* no RDMA device found */
  2009. return SMC_CLC_DECL_NOSMCDEV;
  2010. }
  2011. rc = smc_listen_rdma_init(new_smc, ini);
  2012. if (rc)
  2013. return rc;
  2014. return smc_listen_rdma_reg(new_smc, ini->first_contact_local);
  2015. }
  2016. /* determine the local device matching to proposal */
  2017. static int smc_listen_find_device(struct smc_sock *new_smc,
  2018. struct smc_clc_msg_proposal *pclc,
  2019. struct smc_init_info *ini)
  2020. {
  2021. int prfx_rc;
  2022. /* check for ISM device matching V2 proposed device */
  2023. smc_find_ism_v2_device_serv(new_smc, pclc, ini);
  2024. if (ini->ism_dev[0])
  2025. return 0;
  2026. /* check for matching IP prefix and subnet length (V1) */
  2027. prfx_rc = smc_listen_prfx_check(new_smc, pclc);
  2028. if (prfx_rc)
  2029. smc_find_ism_store_rc(prfx_rc, ini);
  2030. /* get vlan id from IP device */
  2031. if (smc_vlan_by_tcpsk(new_smc->clcsock, ini))
  2032. return ini->rc ?: SMC_CLC_DECL_GETVLANERR;
  2033. /* check for ISM device matching V1 proposed device */
  2034. if (!prfx_rc)
  2035. smc_find_ism_v1_device_serv(new_smc, pclc, ini);
  2036. if (ini->ism_dev[0])
  2037. return 0;
  2038. if (!smcr_indicated(pclc->hdr.typev1) &&
  2039. !smcr_indicated(pclc->hdr.typev2))
  2040. /* skip RDMA and decline */
  2041. return ini->rc ?: SMC_CLC_DECL_NOSMCDDEV;
  2042. /* check if RDMA V2 is available */
  2043. smc_find_rdma_v2_device_serv(new_smc, pclc, ini);
  2044. if (ini->smcrv2.ib_dev_v2)
  2045. return 0;
  2046. /* check if RDMA V1 is available */
  2047. if (!prfx_rc) {
  2048. int rc;
  2049. rc = smc_find_rdma_v1_device_serv(new_smc, pclc, ini);
  2050. smc_find_ism_store_rc(rc, ini);
  2051. return (!rc) ? 0 : ini->rc;
  2052. }
  2053. return prfx_rc;
  2054. }
  2055. /* listen worker: finish RDMA setup */
  2056. static int smc_listen_rdma_finish(struct smc_sock *new_smc,
  2057. struct smc_clc_msg_accept_confirm *cclc,
  2058. bool local_first,
  2059. struct smc_init_info *ini)
  2060. {
  2061. struct smc_link *link = new_smc->conn.lnk;
  2062. int reason_code = 0;
  2063. if (local_first)
  2064. smc_link_save_peer_info(link, cclc, ini);
  2065. if (smc_rmb_rtoken_handling(&new_smc->conn, link, cclc))
  2066. return SMC_CLC_DECL_ERR_RTOK;
  2067. if (local_first) {
  2068. if (smc_ib_ready_link(link))
  2069. return SMC_CLC_DECL_ERR_RDYLNK;
  2070. /* QP confirmation over RoCE fabric */
  2071. smc_llc_flow_initiate(link->lgr, SMC_LLC_FLOW_ADD_LINK);
  2072. reason_code = smcr_serv_conf_first_link(new_smc);
  2073. smc_llc_flow_stop(link->lgr, &link->lgr->llc_flow_lcl);
  2074. }
  2075. return reason_code;
  2076. }
  2077. /* setup for connection of server */
  2078. static void smc_listen_work(struct work_struct *work)
  2079. {
  2080. struct smc_sock *new_smc = container_of(work, struct smc_sock,
  2081. smc_listen_work);
  2082. struct socket *newclcsock = new_smc->clcsock;
  2083. struct smc_clc_msg_accept_confirm *cclc;
  2084. struct smc_clc_msg_proposal_area *buf;
  2085. struct smc_clc_msg_proposal *pclc;
  2086. struct smc_init_info *ini = NULL;
  2087. u8 proposal_version = SMC_V1;
  2088. u8 accept_version;
  2089. int rc = 0;
  2090. lock_sock(&new_smc->sk); /* release in smc_listen_out() */
  2091. if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
  2092. return smc_listen_out_err(new_smc);
  2093. if (new_smc->use_fallback) {
  2094. smc_listen_out_connected(new_smc);
  2095. return;
  2096. }
  2097. /* check if peer is smc capable */
  2098. if (!tcp_sk(newclcsock->sk)->syn_smc) {
  2099. rc = smc_switch_to_fallback(new_smc, SMC_CLC_DECL_PEERNOSMC);
  2100. if (rc)
  2101. smc_listen_out_err(new_smc);
  2102. else
  2103. smc_listen_out_connected(new_smc);
  2104. return;
  2105. }
  2106. /* do inband token exchange -
  2107. * wait for and receive SMC Proposal CLC message
  2108. */
  2109. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  2110. if (!buf) {
  2111. rc = SMC_CLC_DECL_MEM;
  2112. goto out_decl;
  2113. }
  2114. pclc = (struct smc_clc_msg_proposal *)buf;
  2115. rc = smc_clc_wait_msg(new_smc, pclc, sizeof(*buf),
  2116. SMC_CLC_PROPOSAL, CLC_WAIT_TIME);
  2117. if (rc)
  2118. goto out_decl;
  2119. if (pclc->hdr.version > SMC_V1)
  2120. proposal_version = SMC_V2;
  2121. /* IPSec connections opt out of SMC optimizations */
  2122. if (using_ipsec(new_smc)) {
  2123. rc = SMC_CLC_DECL_IPSEC;
  2124. goto out_decl;
  2125. }
  2126. ini = kzalloc(sizeof(*ini), GFP_KERNEL);
  2127. if (!ini) {
  2128. rc = SMC_CLC_DECL_MEM;
  2129. goto out_decl;
  2130. }
  2131. /* initial version checking */
  2132. rc = smc_listen_v2_check(new_smc, pclc, ini);
  2133. if (rc)
  2134. goto out_decl;
  2135. rc = smc_clc_srv_v2x_features_validate(new_smc, pclc, ini);
  2136. if (rc)
  2137. goto out_decl;
  2138. mutex_lock(&smc_server_lgr_pending);
  2139. smc_rx_init(new_smc);
  2140. smc_tx_init(new_smc);
  2141. /* determine ISM or RoCE device used for connection */
  2142. rc = smc_listen_find_device(new_smc, pclc, ini);
  2143. if (rc)
  2144. goto out_unlock;
  2145. /* send SMC Accept CLC message */
  2146. accept_version = ini->is_smcd ? ini->smcd_version : ini->smcr_version;
  2147. rc = smc_clc_send_accept(new_smc, ini->first_contact_local,
  2148. accept_version, ini->negotiated_eid, ini);
  2149. if (rc)
  2150. goto out_unlock;
  2151. /* SMC-D does not need this lock any more */
  2152. if (ini->is_smcd)
  2153. mutex_unlock(&smc_server_lgr_pending);
  2154. /* receive SMC Confirm CLC message */
  2155. memset(buf, 0, sizeof(*buf));
  2156. cclc = (struct smc_clc_msg_accept_confirm *)buf;
  2157. rc = smc_clc_wait_msg(new_smc, cclc, sizeof(*buf),
  2158. SMC_CLC_CONFIRM, CLC_WAIT_TIME);
  2159. if (rc) {
  2160. if (!ini->is_smcd)
  2161. goto out_unlock;
  2162. goto out_decl;
  2163. }
  2164. rc = smc_clc_v2x_features_confirm_check(cclc, ini);
  2165. if (rc) {
  2166. if (!ini->is_smcd)
  2167. goto out_unlock;
  2168. goto out_decl;
  2169. }
  2170. /* fce smc release version is needed in smc_listen_rdma_finish,
  2171. * so save fce info here.
  2172. */
  2173. smc_conn_save_peer_info_fce(new_smc, cclc);
  2174. /* finish worker */
  2175. if (!ini->is_smcd) {
  2176. rc = smc_listen_rdma_finish(new_smc, cclc,
  2177. ini->first_contact_local, ini);
  2178. if (rc)
  2179. goto out_unlock;
  2180. mutex_unlock(&smc_server_lgr_pending);
  2181. }
  2182. smc_conn_save_peer_info(new_smc, cclc);
  2183. if (ini->is_smcd &&
  2184. smc_ism_support_dmb_nocopy(new_smc->conn.lgr->smcd)) {
  2185. rc = smcd_buf_attach(new_smc);
  2186. if (rc)
  2187. goto out_decl;
  2188. }
  2189. smc_listen_out_connected(new_smc);
  2190. SMC_STAT_SERV_SUCC_INC(sock_net(newclcsock->sk), ini);
  2191. goto out_free;
  2192. out_unlock:
  2193. mutex_unlock(&smc_server_lgr_pending);
  2194. out_decl:
  2195. smc_listen_decline(new_smc, rc, ini ? ini->first_contact_local : 0,
  2196. proposal_version);
  2197. out_free:
  2198. kfree(ini);
  2199. kfree(buf);
  2200. }
  2201. static void smc_tcp_listen_work(struct work_struct *work)
  2202. {
  2203. struct smc_sock *lsmc = container_of(work, struct smc_sock,
  2204. tcp_listen_work);
  2205. struct sock *lsk = &lsmc->sk;
  2206. struct smc_sock *new_smc;
  2207. int rc = 0;
  2208. lock_sock(lsk);
  2209. while (lsk->sk_state == SMC_LISTEN) {
  2210. rc = smc_clcsock_accept(lsmc, &new_smc);
  2211. if (rc) /* clcsock accept queue empty or error */
  2212. goto out;
  2213. if (!new_smc)
  2214. continue;
  2215. if (tcp_sk(new_smc->clcsock->sk)->syn_smc)
  2216. atomic_inc(&lsmc->queued_smc_hs);
  2217. new_smc->listen_smc = lsmc;
  2218. new_smc->use_fallback = lsmc->use_fallback;
  2219. new_smc->fallback_rsn = lsmc->fallback_rsn;
  2220. sock_hold(lsk); /* sock_put in smc_listen_work */
  2221. INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
  2222. smc_copy_sock_settings_to_smc(new_smc);
  2223. sock_hold(&new_smc->sk); /* sock_put in passive closing */
  2224. if (!queue_work(smc_hs_wq, &new_smc->smc_listen_work))
  2225. sock_put(&new_smc->sk);
  2226. }
  2227. out:
  2228. release_sock(lsk);
  2229. sock_put(&lsmc->sk); /* sock_hold in smc_clcsock_data_ready() */
  2230. }
  2231. static void smc_clcsock_data_ready(struct sock *listen_clcsock)
  2232. {
  2233. struct smc_sock *lsmc;
  2234. read_lock_bh(&listen_clcsock->sk_callback_lock);
  2235. lsmc = smc_clcsock_user_data(listen_clcsock);
  2236. if (!lsmc)
  2237. goto out;
  2238. lsmc->clcsk_data_ready(listen_clcsock);
  2239. if (lsmc->sk.sk_state == SMC_LISTEN) {
  2240. sock_hold(&lsmc->sk); /* sock_put in smc_tcp_listen_work() */
  2241. if (!queue_work(smc_tcp_ls_wq, &lsmc->tcp_listen_work))
  2242. sock_put(&lsmc->sk);
  2243. }
  2244. out:
  2245. read_unlock_bh(&listen_clcsock->sk_callback_lock);
  2246. }
  2247. int smc_listen(struct socket *sock, int backlog)
  2248. {
  2249. struct sock *sk = sock->sk;
  2250. struct smc_sock *smc;
  2251. int rc;
  2252. smc = smc_sk(sk);
  2253. lock_sock(sk);
  2254. rc = -EINVAL;
  2255. if ((sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) ||
  2256. smc->connect_nonblock || sock->state != SS_UNCONNECTED)
  2257. goto out;
  2258. rc = 0;
  2259. if (sk->sk_state == SMC_LISTEN) {
  2260. sk->sk_max_ack_backlog = backlog;
  2261. goto out;
  2262. }
  2263. /* some socket options are handled in core, so we could not apply
  2264. * them to the clc socket -- copy smc socket options to clc socket
  2265. */
  2266. smc_copy_sock_settings_to_clc(smc);
  2267. if (!smc->use_fallback)
  2268. tcp_sk(smc->clcsock->sk)->syn_smc = 1;
  2269. /* save original sk_data_ready function and establish
  2270. * smc-specific sk_data_ready function
  2271. */
  2272. write_lock_bh(&smc->clcsock->sk->sk_callback_lock);
  2273. smc->clcsock->sk->sk_user_data =
  2274. (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
  2275. smc_clcsock_replace_cb(&smc->clcsock->sk->sk_data_ready,
  2276. smc_clcsock_data_ready, &smc->clcsk_data_ready);
  2277. write_unlock_bh(&smc->clcsock->sk->sk_callback_lock);
  2278. /* save original ops */
  2279. smc->ori_af_ops = inet_csk(smc->clcsock->sk)->icsk_af_ops;
  2280. smc->af_ops = *smc->ori_af_ops;
  2281. smc->af_ops.syn_recv_sock = smc_tcp_syn_recv_sock;
  2282. inet_csk(smc->clcsock->sk)->icsk_af_ops = &smc->af_ops;
  2283. if (smc->limit_smc_hs)
  2284. tcp_sk(smc->clcsock->sk)->smc_hs_congested = smc_hs_congested;
  2285. rc = kernel_listen(smc->clcsock, backlog);
  2286. if (rc) {
  2287. write_lock_bh(&smc->clcsock->sk->sk_callback_lock);
  2288. smc_clcsock_restore_cb(&smc->clcsock->sk->sk_data_ready,
  2289. &smc->clcsk_data_ready);
  2290. smc->clcsock->sk->sk_user_data = NULL;
  2291. write_unlock_bh(&smc->clcsock->sk->sk_callback_lock);
  2292. goto out;
  2293. }
  2294. sk->sk_max_ack_backlog = backlog;
  2295. sk->sk_ack_backlog = 0;
  2296. sk->sk_state = SMC_LISTEN;
  2297. out:
  2298. release_sock(sk);
  2299. return rc;
  2300. }
  2301. int smc_accept(struct socket *sock, struct socket *new_sock,
  2302. struct proto_accept_arg *arg)
  2303. {
  2304. struct sock *sk = sock->sk, *nsk;
  2305. DECLARE_WAITQUEUE(wait, current);
  2306. struct smc_sock *lsmc;
  2307. long timeo;
  2308. int rc = 0;
  2309. lsmc = smc_sk(sk);
  2310. sock_hold(sk); /* sock_put below */
  2311. lock_sock(sk);
  2312. if (lsmc->sk.sk_state != SMC_LISTEN) {
  2313. rc = -EINVAL;
  2314. release_sock(sk);
  2315. goto out;
  2316. }
  2317. /* Wait for an incoming connection */
  2318. timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK);
  2319. add_wait_queue_exclusive(sk_sleep(sk), &wait);
  2320. while (!(nsk = smc_accept_dequeue(sk, new_sock))) {
  2321. set_current_state(TASK_INTERRUPTIBLE);
  2322. if (!timeo) {
  2323. rc = -EAGAIN;
  2324. break;
  2325. }
  2326. release_sock(sk);
  2327. timeo = schedule_timeout(timeo);
  2328. /* wakeup by sk_data_ready in smc_listen_work() */
  2329. sched_annotate_sleep();
  2330. lock_sock(sk);
  2331. if (signal_pending(current)) {
  2332. rc = sock_intr_errno(timeo);
  2333. break;
  2334. }
  2335. }
  2336. set_current_state(TASK_RUNNING);
  2337. remove_wait_queue(sk_sleep(sk), &wait);
  2338. if (!rc)
  2339. rc = sock_error(nsk);
  2340. release_sock(sk);
  2341. if (rc)
  2342. goto out;
  2343. if (lsmc->sockopt_defer_accept && !(arg->flags & O_NONBLOCK)) {
  2344. /* wait till data arrives on the socket */
  2345. timeo = msecs_to_jiffies(lsmc->sockopt_defer_accept *
  2346. MSEC_PER_SEC);
  2347. if (smc_sk(nsk)->use_fallback) {
  2348. struct sock *clcsk = smc_sk(nsk)->clcsock->sk;
  2349. lock_sock(clcsk);
  2350. if (skb_queue_empty(&clcsk->sk_receive_queue))
  2351. sk_wait_data(clcsk, &timeo, NULL);
  2352. release_sock(clcsk);
  2353. } else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) {
  2354. lock_sock(nsk);
  2355. smc_rx_wait(smc_sk(nsk), &timeo, 0, smc_rx_data_available);
  2356. release_sock(nsk);
  2357. }
  2358. }
  2359. out:
  2360. sock_put(sk); /* sock_hold above */
  2361. return rc;
  2362. }
  2363. int smc_getname(struct socket *sock, struct sockaddr *addr,
  2364. int peer)
  2365. {
  2366. struct smc_sock *smc;
  2367. if (peer && (sock->sk->sk_state != SMC_ACTIVE) &&
  2368. (sock->sk->sk_state != SMC_APPCLOSEWAIT1))
  2369. return -ENOTCONN;
  2370. smc = smc_sk(sock->sk);
  2371. return smc->clcsock->ops->getname(smc->clcsock, addr, peer);
  2372. }
  2373. int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
  2374. {
  2375. struct sock *sk = sock->sk;
  2376. struct smc_sock *smc;
  2377. int rc;
  2378. smc = smc_sk(sk);
  2379. lock_sock(sk);
  2380. /* SMC does not support connect with fastopen */
  2381. if (msg->msg_flags & MSG_FASTOPEN) {
  2382. /* not connected yet, fallback */
  2383. if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
  2384. rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
  2385. if (rc)
  2386. goto out;
  2387. } else {
  2388. rc = -EINVAL;
  2389. goto out;
  2390. }
  2391. } else if ((sk->sk_state != SMC_ACTIVE) &&
  2392. (sk->sk_state != SMC_APPCLOSEWAIT1) &&
  2393. (sk->sk_state != SMC_INIT)) {
  2394. rc = -EPIPE;
  2395. goto out;
  2396. }
  2397. if (smc->use_fallback) {
  2398. rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
  2399. } else {
  2400. rc = smc_tx_sendmsg(smc, msg, len);
  2401. SMC_STAT_TX_PAYLOAD(smc, len, rc);
  2402. }
  2403. out:
  2404. release_sock(sk);
  2405. return rc;
  2406. }
  2407. int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
  2408. int flags)
  2409. {
  2410. struct sock *sk = sock->sk;
  2411. struct smc_sock *smc;
  2412. int rc = -ENOTCONN;
  2413. smc = smc_sk(sk);
  2414. lock_sock(sk);
  2415. if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
  2416. /* socket was connected before, no more data to read */
  2417. rc = 0;
  2418. goto out;
  2419. }
  2420. if ((sk->sk_state == SMC_INIT) ||
  2421. (sk->sk_state == SMC_LISTEN) ||
  2422. (sk->sk_state == SMC_CLOSED))
  2423. goto out;
  2424. if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
  2425. rc = 0;
  2426. goto out;
  2427. }
  2428. if (smc->use_fallback) {
  2429. rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags);
  2430. } else {
  2431. msg->msg_namelen = 0;
  2432. rc = smc_rx_recvmsg(smc, msg, NULL, len, flags);
  2433. SMC_STAT_RX_PAYLOAD(smc, rc, rc);
  2434. }
  2435. out:
  2436. release_sock(sk);
  2437. return rc;
  2438. }
  2439. static __poll_t smc_accept_poll(struct sock *parent)
  2440. {
  2441. struct smc_sock *isk = smc_sk(parent);
  2442. __poll_t mask = 0;
  2443. spin_lock(&isk->accept_q_lock);
  2444. if (!list_empty(&isk->accept_q))
  2445. mask = EPOLLIN | EPOLLRDNORM;
  2446. spin_unlock(&isk->accept_q_lock);
  2447. return mask;
  2448. }
  2449. __poll_t smc_poll(struct file *file, struct socket *sock,
  2450. poll_table *wait)
  2451. {
  2452. struct sock *sk = sock->sk;
  2453. struct smc_sock *smc;
  2454. __poll_t mask = 0;
  2455. if (!sk)
  2456. return EPOLLNVAL;
  2457. smc = smc_sk(sock->sk);
  2458. if (smc->use_fallback) {
  2459. /* delegate to CLC child sock */
  2460. mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
  2461. sk->sk_err = smc->clcsock->sk->sk_err;
  2462. } else {
  2463. if (sk->sk_state != SMC_CLOSED)
  2464. sock_poll_wait(file, sock, wait);
  2465. if (sk->sk_err)
  2466. mask |= EPOLLERR;
  2467. if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
  2468. (sk->sk_state == SMC_CLOSED))
  2469. mask |= EPOLLHUP;
  2470. if (sk->sk_state == SMC_LISTEN) {
  2471. /* woken up by sk_data_ready in smc_listen_work() */
  2472. mask |= smc_accept_poll(sk);
  2473. } else if (smc->use_fallback) { /* as result of connect_work()*/
  2474. mask |= smc->clcsock->ops->poll(file, smc->clcsock,
  2475. wait);
  2476. sk->sk_err = smc->clcsock->sk->sk_err;
  2477. } else {
  2478. if ((sk->sk_state != SMC_INIT &&
  2479. atomic_read(&smc->conn.sndbuf_space)) ||
  2480. sk->sk_shutdown & SEND_SHUTDOWN) {
  2481. mask |= EPOLLOUT | EPOLLWRNORM;
  2482. } else {
  2483. sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
  2484. set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
  2485. if (sk->sk_state != SMC_INIT) {
  2486. /* Race breaker the same way as tcp_poll(). */
  2487. smp_mb__after_atomic();
  2488. if (atomic_read(&smc->conn.sndbuf_space))
  2489. mask |= EPOLLOUT | EPOLLWRNORM;
  2490. }
  2491. }
  2492. if (atomic_read(&smc->conn.bytes_to_rcv))
  2493. mask |= EPOLLIN | EPOLLRDNORM;
  2494. if (sk->sk_shutdown & RCV_SHUTDOWN)
  2495. mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
  2496. if (sk->sk_state == SMC_APPCLOSEWAIT1)
  2497. mask |= EPOLLIN;
  2498. if (smc->conn.urg_state == SMC_URG_VALID)
  2499. mask |= EPOLLPRI;
  2500. }
  2501. }
  2502. return mask;
  2503. }
  2504. int smc_shutdown(struct socket *sock, int how)
  2505. {
  2506. struct sock *sk = sock->sk;
  2507. bool do_shutdown = true;
  2508. struct smc_sock *smc;
  2509. int rc = -EINVAL;
  2510. int old_state;
  2511. int rc1 = 0;
  2512. smc = smc_sk(sk);
  2513. if ((how < SHUT_RD) || (how > SHUT_RDWR))
  2514. return rc;
  2515. lock_sock(sk);
  2516. if (sock->state == SS_CONNECTING) {
  2517. if (sk->sk_state == SMC_ACTIVE)
  2518. sock->state = SS_CONNECTED;
  2519. else if (sk->sk_state == SMC_PEERCLOSEWAIT1 ||
  2520. sk->sk_state == SMC_PEERCLOSEWAIT2 ||
  2521. sk->sk_state == SMC_APPCLOSEWAIT1 ||
  2522. sk->sk_state == SMC_APPCLOSEWAIT2 ||
  2523. sk->sk_state == SMC_APPFINCLOSEWAIT)
  2524. sock->state = SS_DISCONNECTING;
  2525. }
  2526. rc = -ENOTCONN;
  2527. if ((sk->sk_state != SMC_ACTIVE) &&
  2528. (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
  2529. (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
  2530. (sk->sk_state != SMC_APPCLOSEWAIT1) &&
  2531. (sk->sk_state != SMC_APPCLOSEWAIT2) &&
  2532. (sk->sk_state != SMC_APPFINCLOSEWAIT))
  2533. goto out;
  2534. if (smc->use_fallback) {
  2535. rc = kernel_sock_shutdown(smc->clcsock, how);
  2536. sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
  2537. if (sk->sk_shutdown == SHUTDOWN_MASK) {
  2538. sk->sk_state = SMC_CLOSED;
  2539. sk->sk_socket->state = SS_UNCONNECTED;
  2540. sock_put(sk);
  2541. }
  2542. goto out;
  2543. }
  2544. switch (how) {
  2545. case SHUT_RDWR: /* shutdown in both directions */
  2546. old_state = sk->sk_state;
  2547. rc = smc_close_active(smc);
  2548. if (old_state == SMC_ACTIVE &&
  2549. sk->sk_state == SMC_PEERCLOSEWAIT1)
  2550. do_shutdown = false;
  2551. break;
  2552. case SHUT_WR:
  2553. rc = smc_close_shutdown_write(smc);
  2554. break;
  2555. case SHUT_RD:
  2556. rc = 0;
  2557. /* nothing more to do because peer is not involved */
  2558. break;
  2559. }
  2560. if (do_shutdown && smc->clcsock)
  2561. rc1 = kernel_sock_shutdown(smc->clcsock, how);
  2562. /* map sock_shutdown_cmd constants to sk_shutdown value range */
  2563. sk->sk_shutdown |= how + 1;
  2564. if (sk->sk_state == SMC_CLOSED)
  2565. sock->state = SS_UNCONNECTED;
  2566. else
  2567. sock->state = SS_DISCONNECTING;
  2568. out:
  2569. release_sock(sk);
  2570. return rc ? rc : rc1;
  2571. }
  2572. static int __smc_getsockopt(struct socket *sock, int level, int optname,
  2573. char __user *optval, int __user *optlen)
  2574. {
  2575. struct smc_sock *smc;
  2576. int val, len;
  2577. smc = smc_sk(sock->sk);
  2578. if (get_user(len, optlen))
  2579. return -EFAULT;
  2580. len = min_t(int, len, sizeof(int));
  2581. if (len < 0)
  2582. return -EINVAL;
  2583. switch (optname) {
  2584. case SMC_LIMIT_HS:
  2585. val = smc->limit_smc_hs;
  2586. break;
  2587. default:
  2588. return -EOPNOTSUPP;
  2589. }
  2590. if (put_user(len, optlen))
  2591. return -EFAULT;
  2592. if (copy_to_user(optval, &val, len))
  2593. return -EFAULT;
  2594. return 0;
  2595. }
  2596. static int __smc_setsockopt(struct socket *sock, int level, int optname,
  2597. sockptr_t optval, unsigned int optlen)
  2598. {
  2599. struct sock *sk = sock->sk;
  2600. struct smc_sock *smc;
  2601. int val, rc;
  2602. smc = smc_sk(sk);
  2603. lock_sock(sk);
  2604. switch (optname) {
  2605. case SMC_LIMIT_HS:
  2606. if (optlen < sizeof(int)) {
  2607. rc = -EINVAL;
  2608. break;
  2609. }
  2610. if (copy_from_sockptr(&val, optval, sizeof(int))) {
  2611. rc = -EFAULT;
  2612. break;
  2613. }
  2614. smc->limit_smc_hs = !!val;
  2615. rc = 0;
  2616. break;
  2617. default:
  2618. rc = -EOPNOTSUPP;
  2619. break;
  2620. }
  2621. release_sock(sk);
  2622. return rc;
  2623. }
  2624. int smc_setsockopt(struct socket *sock, int level, int optname,
  2625. sockptr_t optval, unsigned int optlen)
  2626. {
  2627. struct sock *sk = sock->sk;
  2628. struct smc_sock *smc;
  2629. int val, rc;
  2630. if (level == SOL_TCP && optname == TCP_ULP)
  2631. return -EOPNOTSUPP;
  2632. else if (level == SOL_SMC)
  2633. return __smc_setsockopt(sock, level, optname, optval, optlen);
  2634. smc = smc_sk(sk);
  2635. /* generic setsockopts reaching us here always apply to the
  2636. * CLC socket
  2637. */
  2638. mutex_lock(&smc->clcsock_release_lock);
  2639. if (!smc->clcsock) {
  2640. mutex_unlock(&smc->clcsock_release_lock);
  2641. return -EBADF;
  2642. }
  2643. if (unlikely(!smc->clcsock->ops->setsockopt))
  2644. rc = -EOPNOTSUPP;
  2645. else
  2646. rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname,
  2647. optval, optlen);
  2648. if (smc->clcsock->sk->sk_err) {
  2649. sk->sk_err = smc->clcsock->sk->sk_err;
  2650. sk_error_report(sk);
  2651. }
  2652. mutex_unlock(&smc->clcsock_release_lock);
  2653. if (optlen < sizeof(int))
  2654. return -EINVAL;
  2655. if (copy_from_sockptr(&val, optval, sizeof(int)))
  2656. return -EFAULT;
  2657. lock_sock(sk);
  2658. if (rc || smc->use_fallback)
  2659. goto out;
  2660. switch (optname) {
  2661. case TCP_FASTOPEN:
  2662. case TCP_FASTOPEN_CONNECT:
  2663. case TCP_FASTOPEN_KEY:
  2664. case TCP_FASTOPEN_NO_COOKIE:
  2665. /* option not supported by SMC */
  2666. if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
  2667. rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
  2668. } else {
  2669. rc = -EINVAL;
  2670. }
  2671. break;
  2672. case TCP_NODELAY:
  2673. if (sk->sk_state != SMC_INIT &&
  2674. sk->sk_state != SMC_LISTEN &&
  2675. sk->sk_state != SMC_CLOSED) {
  2676. if (val) {
  2677. SMC_STAT_INC(smc, ndly_cnt);
  2678. smc_tx_pending(&smc->conn);
  2679. cancel_delayed_work(&smc->conn.tx_work);
  2680. }
  2681. }
  2682. break;
  2683. case TCP_CORK:
  2684. if (sk->sk_state != SMC_INIT &&
  2685. sk->sk_state != SMC_LISTEN &&
  2686. sk->sk_state != SMC_CLOSED) {
  2687. if (!val) {
  2688. SMC_STAT_INC(smc, cork_cnt);
  2689. smc_tx_pending(&smc->conn);
  2690. cancel_delayed_work(&smc->conn.tx_work);
  2691. }
  2692. }
  2693. break;
  2694. case TCP_DEFER_ACCEPT:
  2695. smc->sockopt_defer_accept = val;
  2696. break;
  2697. default:
  2698. break;
  2699. }
  2700. out:
  2701. release_sock(sk);
  2702. return rc;
  2703. }
  2704. int smc_getsockopt(struct socket *sock, int level, int optname,
  2705. char __user *optval, int __user *optlen)
  2706. {
  2707. struct smc_sock *smc;
  2708. int rc;
  2709. if (level == SOL_SMC)
  2710. return __smc_getsockopt(sock, level, optname, optval, optlen);
  2711. smc = smc_sk(sock->sk);
  2712. mutex_lock(&smc->clcsock_release_lock);
  2713. if (!smc->clcsock) {
  2714. mutex_unlock(&smc->clcsock_release_lock);
  2715. return -EBADF;
  2716. }
  2717. /* socket options apply to the CLC socket */
  2718. if (unlikely(!smc->clcsock->ops->getsockopt)) {
  2719. mutex_unlock(&smc->clcsock_release_lock);
  2720. return -EOPNOTSUPP;
  2721. }
  2722. rc = smc->clcsock->ops->getsockopt(smc->clcsock, level, optname,
  2723. optval, optlen);
  2724. mutex_unlock(&smc->clcsock_release_lock);
  2725. return rc;
  2726. }
  2727. int smc_ioctl(struct socket *sock, unsigned int cmd,
  2728. unsigned long arg)
  2729. {
  2730. union smc_host_cursor cons, urg;
  2731. struct smc_connection *conn;
  2732. struct smc_sock *smc;
  2733. int answ;
  2734. smc = smc_sk(sock->sk);
  2735. conn = &smc->conn;
  2736. lock_sock(&smc->sk);
  2737. if (smc->use_fallback) {
  2738. if (!smc->clcsock) {
  2739. release_sock(&smc->sk);
  2740. return -EBADF;
  2741. }
  2742. answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
  2743. release_sock(&smc->sk);
  2744. return answ;
  2745. }
  2746. switch (cmd) {
  2747. case SIOCINQ: /* same as FIONREAD */
  2748. if (smc->sk.sk_state == SMC_LISTEN) {
  2749. release_sock(&smc->sk);
  2750. return -EINVAL;
  2751. }
  2752. if (smc->sk.sk_state == SMC_INIT ||
  2753. smc->sk.sk_state == SMC_CLOSED)
  2754. answ = 0;
  2755. else
  2756. answ = atomic_read(&smc->conn.bytes_to_rcv);
  2757. break;
  2758. case SIOCOUTQ:
  2759. /* output queue size (not send + not acked) */
  2760. if (smc->sk.sk_state == SMC_LISTEN) {
  2761. release_sock(&smc->sk);
  2762. return -EINVAL;
  2763. }
  2764. if (smc->sk.sk_state == SMC_INIT ||
  2765. smc->sk.sk_state == SMC_CLOSED)
  2766. answ = 0;
  2767. else
  2768. answ = smc->conn.sndbuf_desc->len -
  2769. atomic_read(&smc->conn.sndbuf_space);
  2770. break;
  2771. case SIOCOUTQNSD:
  2772. /* output queue size (not send only) */
  2773. if (smc->sk.sk_state == SMC_LISTEN) {
  2774. release_sock(&smc->sk);
  2775. return -EINVAL;
  2776. }
  2777. if (smc->sk.sk_state == SMC_INIT ||
  2778. smc->sk.sk_state == SMC_CLOSED)
  2779. answ = 0;
  2780. else
  2781. answ = smc_tx_prepared_sends(&smc->conn);
  2782. break;
  2783. case SIOCATMARK:
  2784. if (smc->sk.sk_state == SMC_LISTEN) {
  2785. release_sock(&smc->sk);
  2786. return -EINVAL;
  2787. }
  2788. if (smc->sk.sk_state == SMC_INIT ||
  2789. smc->sk.sk_state == SMC_CLOSED) {
  2790. answ = 0;
  2791. } else {
  2792. smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
  2793. smc_curs_copy(&urg, &conn->urg_curs, conn);
  2794. answ = smc_curs_diff(conn->rmb_desc->len,
  2795. &cons, &urg) == 1;
  2796. }
  2797. break;
  2798. default:
  2799. release_sock(&smc->sk);
  2800. return -ENOIOCTLCMD;
  2801. }
  2802. release_sock(&smc->sk);
  2803. return put_user(answ, (int __user *)arg);
  2804. }
  2805. /* Map the affected portions of the rmbe into an spd, note the number of bytes
  2806. * to splice in conn->splice_pending, and press 'go'. Delays consumer cursor
  2807. * updates till whenever a respective page has been fully processed.
  2808. * Note that subsequent recv() calls have to wait till all splice() processing
  2809. * completed.
  2810. */
  2811. ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
  2812. struct pipe_inode_info *pipe, size_t len,
  2813. unsigned int flags)
  2814. {
  2815. struct sock *sk = sock->sk;
  2816. struct smc_sock *smc;
  2817. int rc = -ENOTCONN;
  2818. smc = smc_sk(sk);
  2819. lock_sock(sk);
  2820. if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
  2821. /* socket was connected before, no more data to read */
  2822. rc = 0;
  2823. goto out;
  2824. }
  2825. if (sk->sk_state == SMC_INIT ||
  2826. sk->sk_state == SMC_LISTEN ||
  2827. sk->sk_state == SMC_CLOSED)
  2828. goto out;
  2829. if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
  2830. rc = 0;
  2831. goto out;
  2832. }
  2833. if (smc->use_fallback) {
  2834. rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos,
  2835. pipe, len, flags);
  2836. } else {
  2837. if (*ppos) {
  2838. rc = -ESPIPE;
  2839. goto out;
  2840. }
  2841. if (flags & SPLICE_F_NONBLOCK)
  2842. flags = MSG_DONTWAIT;
  2843. else
  2844. flags = 0;
  2845. SMC_STAT_INC(smc, splice_cnt);
  2846. rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags);
  2847. }
  2848. out:
  2849. release_sock(sk);
  2850. return rc;
  2851. }
  2852. /* must look like tcp */
  2853. static const struct proto_ops smc_sock_ops = {
  2854. .family = PF_SMC,
  2855. .owner = THIS_MODULE,
  2856. .release = smc_release,
  2857. .bind = smc_bind,
  2858. .connect = smc_connect,
  2859. .socketpair = sock_no_socketpair,
  2860. .accept = smc_accept,
  2861. .getname = smc_getname,
  2862. .poll = smc_poll,
  2863. .ioctl = smc_ioctl,
  2864. .listen = smc_listen,
  2865. .shutdown = smc_shutdown,
  2866. .setsockopt = smc_setsockopt,
  2867. .getsockopt = smc_getsockopt,
  2868. .sendmsg = smc_sendmsg,
  2869. .recvmsg = smc_recvmsg,
  2870. .mmap = sock_no_mmap,
  2871. .splice_read = smc_splice_read,
  2872. };
  2873. int smc_create_clcsk(struct net *net, struct sock *sk, int family)
  2874. {
  2875. struct smc_sock *smc = smc_sk(sk);
  2876. int rc;
  2877. rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
  2878. &smc->clcsock);
  2879. if (rc)
  2880. return rc;
  2881. /* smc_clcsock_release() does not wait smc->clcsock->sk's
  2882. * destruction; its sk_state might not be TCP_CLOSE after
  2883. * smc->sk is close()d, and TCP timers can be fired later,
  2884. * which need net ref.
  2885. */
  2886. sk = smc->clcsock->sk;
  2887. __netns_tracker_free(net, &sk->ns_tracker, false);
  2888. sk->sk_net_refcnt = 1;
  2889. get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
  2890. sock_inuse_add(net, 1);
  2891. return 0;
  2892. }
  2893. static int __smc_create(struct net *net, struct socket *sock, int protocol,
  2894. int kern, struct socket *clcsock)
  2895. {
  2896. int family = (protocol == SMCPROTO_SMC6) ? PF_INET6 : PF_INET;
  2897. struct smc_sock *smc;
  2898. struct sock *sk;
  2899. int rc;
  2900. rc = -ESOCKTNOSUPPORT;
  2901. if (sock->type != SOCK_STREAM)
  2902. goto out;
  2903. rc = -EPROTONOSUPPORT;
  2904. if (protocol != SMCPROTO_SMC && protocol != SMCPROTO_SMC6)
  2905. goto out;
  2906. rc = -ENOBUFS;
  2907. sock->ops = &smc_sock_ops;
  2908. sock->state = SS_UNCONNECTED;
  2909. sk = smc_sock_alloc(net, sock, protocol);
  2910. if (!sk)
  2911. goto out;
  2912. /* create internal TCP socket for CLC handshake and fallback */
  2913. smc = smc_sk(sk);
  2914. rc = 0;
  2915. if (clcsock)
  2916. smc->clcsock = clcsock;
  2917. else
  2918. rc = smc_create_clcsk(net, sk, family);
  2919. if (rc) {
  2920. sk_common_release(sk);
  2921. sock->sk = NULL;
  2922. }
  2923. out:
  2924. return rc;
  2925. }
  2926. static int smc_create(struct net *net, struct socket *sock, int protocol,
  2927. int kern)
  2928. {
  2929. return __smc_create(net, sock, protocol, kern, NULL);
  2930. }
  2931. static const struct net_proto_family smc_sock_family_ops = {
  2932. .family = PF_SMC,
  2933. .owner = THIS_MODULE,
  2934. .create = smc_create,
  2935. };
  2936. static int smc_ulp_init(struct sock *sk)
  2937. {
  2938. struct socket *tcp = sk->sk_socket;
  2939. struct net *net = sock_net(sk);
  2940. struct socket *smcsock;
  2941. int protocol, ret;
  2942. /* only TCP can be replaced */
  2943. if (tcp->type != SOCK_STREAM || sk->sk_protocol != IPPROTO_TCP ||
  2944. (sk->sk_family != AF_INET && sk->sk_family != AF_INET6))
  2945. return -ESOCKTNOSUPPORT;
  2946. /* don't handle wq now */
  2947. if (tcp->state != SS_UNCONNECTED || !tcp->file || tcp->wq.fasync_list)
  2948. return -ENOTCONN;
  2949. if (sk->sk_family == AF_INET)
  2950. protocol = SMCPROTO_SMC;
  2951. else
  2952. protocol = SMCPROTO_SMC6;
  2953. smcsock = sock_alloc();
  2954. if (!smcsock)
  2955. return -ENFILE;
  2956. smcsock->type = SOCK_STREAM;
  2957. __module_get(THIS_MODULE); /* tried in __tcp_ulp_find_autoload */
  2958. ret = __smc_create(net, smcsock, protocol, 1, tcp);
  2959. if (ret) {
  2960. sock_release(smcsock); /* module_put() which ops won't be NULL */
  2961. return ret;
  2962. }
  2963. /* replace tcp socket to smc */
  2964. smcsock->file = tcp->file;
  2965. smcsock->file->private_data = smcsock;
  2966. smcsock->file->f_inode = SOCK_INODE(smcsock); /* replace inode when sock_close */
  2967. smcsock->file->f_path.dentry->d_inode = SOCK_INODE(smcsock); /* dput() in __fput */
  2968. tcp->file = NULL;
  2969. return ret;
  2970. }
  2971. static void smc_ulp_clone(const struct request_sock *req, struct sock *newsk,
  2972. const gfp_t priority)
  2973. {
  2974. struct inet_connection_sock *icsk = inet_csk(newsk);
  2975. /* don't inherit ulp ops to child when listen */
  2976. icsk->icsk_ulp_ops = NULL;
  2977. }
  2978. static struct tcp_ulp_ops smc_ulp_ops __read_mostly = {
  2979. .name = "smc",
  2980. .owner = THIS_MODULE,
  2981. .init = smc_ulp_init,
  2982. .clone = smc_ulp_clone,
  2983. };
  2984. unsigned int smc_net_id;
  2985. static __net_init int smc_net_init(struct net *net)
  2986. {
  2987. int rc;
  2988. rc = smc_sysctl_net_init(net);
  2989. if (rc)
  2990. return rc;
  2991. return smc_pnet_net_init(net);
  2992. }
  2993. static void __net_exit smc_net_exit(struct net *net)
  2994. {
  2995. smc_sysctl_net_exit(net);
  2996. smc_pnet_net_exit(net);
  2997. }
  2998. static __net_init int smc_net_stat_init(struct net *net)
  2999. {
  3000. return smc_stats_init(net);
  3001. }
  3002. static void __net_exit smc_net_stat_exit(struct net *net)
  3003. {
  3004. smc_stats_exit(net);
  3005. }
  3006. static struct pernet_operations smc_net_ops = {
  3007. .init = smc_net_init,
  3008. .exit = smc_net_exit,
  3009. .id = &smc_net_id,
  3010. .size = sizeof(struct smc_net),
  3011. };
  3012. static struct pernet_operations smc_net_stat_ops = {
  3013. .init = smc_net_stat_init,
  3014. .exit = smc_net_stat_exit,
  3015. };
  3016. static int __init smc_init(void)
  3017. {
  3018. int rc;
  3019. rc = register_pernet_subsys(&smc_net_ops);
  3020. if (rc)
  3021. return rc;
  3022. rc = register_pernet_subsys(&smc_net_stat_ops);
  3023. if (rc)
  3024. goto out_pernet_subsys;
  3025. rc = smc_ism_init();
  3026. if (rc)
  3027. goto out_pernet_subsys_stat;
  3028. smc_clc_init();
  3029. rc = smc_nl_init();
  3030. if (rc)
  3031. goto out_ism;
  3032. rc = smc_pnet_init();
  3033. if (rc)
  3034. goto out_nl;
  3035. rc = -ENOMEM;
  3036. smc_tcp_ls_wq = alloc_workqueue("smc_tcp_ls_wq", 0, 0);
  3037. if (!smc_tcp_ls_wq)
  3038. goto out_pnet;
  3039. smc_hs_wq = alloc_workqueue("smc_hs_wq", 0, 0);
  3040. if (!smc_hs_wq)
  3041. goto out_alloc_tcp_ls_wq;
  3042. smc_close_wq = alloc_workqueue("smc_close_wq", 0, 0);
  3043. if (!smc_close_wq)
  3044. goto out_alloc_hs_wq;
  3045. rc = smc_core_init();
  3046. if (rc) {
  3047. pr_err("%s: smc_core_init fails with %d\n", __func__, rc);
  3048. goto out_alloc_wqs;
  3049. }
  3050. rc = smc_llc_init();
  3051. if (rc) {
  3052. pr_err("%s: smc_llc_init fails with %d\n", __func__, rc);
  3053. goto out_core;
  3054. }
  3055. rc = smc_cdc_init();
  3056. if (rc) {
  3057. pr_err("%s: smc_cdc_init fails with %d\n", __func__, rc);
  3058. goto out_core;
  3059. }
  3060. rc = proto_register(&smc_proto, 1);
  3061. if (rc) {
  3062. pr_err("%s: proto_register(v4) fails with %d\n", __func__, rc);
  3063. goto out_core;
  3064. }
  3065. rc = proto_register(&smc_proto6, 1);
  3066. if (rc) {
  3067. pr_err("%s: proto_register(v6) fails with %d\n", __func__, rc);
  3068. goto out_proto;
  3069. }
  3070. rc = sock_register(&smc_sock_family_ops);
  3071. if (rc) {
  3072. pr_err("%s: sock_register fails with %d\n", __func__, rc);
  3073. goto out_proto6;
  3074. }
  3075. INIT_HLIST_HEAD(&smc_v4_hashinfo.ht);
  3076. INIT_HLIST_HEAD(&smc_v6_hashinfo.ht);
  3077. rc = smc_ib_register_client();
  3078. if (rc) {
  3079. pr_err("%s: ib_register fails with %d\n", __func__, rc);
  3080. goto out_sock;
  3081. }
  3082. rc = smc_loopback_init();
  3083. if (rc) {
  3084. pr_err("%s: smc_loopback_init fails with %d\n", __func__, rc);
  3085. goto out_ib;
  3086. }
  3087. rc = tcp_register_ulp(&smc_ulp_ops);
  3088. if (rc) {
  3089. pr_err("%s: tcp_ulp_register fails with %d\n", __func__, rc);
  3090. goto out_lo;
  3091. }
  3092. rc = smc_inet_init();
  3093. if (rc) {
  3094. pr_err("%s: smc_inet_init fails with %d\n", __func__, rc);
  3095. goto out_ulp;
  3096. }
  3097. static_branch_enable(&tcp_have_smc);
  3098. return 0;
  3099. out_ulp:
  3100. tcp_unregister_ulp(&smc_ulp_ops);
  3101. out_lo:
  3102. smc_loopback_exit();
  3103. out_ib:
  3104. smc_ib_unregister_client();
  3105. out_sock:
  3106. sock_unregister(PF_SMC);
  3107. out_proto6:
  3108. proto_unregister(&smc_proto6);
  3109. out_proto:
  3110. proto_unregister(&smc_proto);
  3111. out_core:
  3112. smc_core_exit();
  3113. out_alloc_wqs:
  3114. destroy_workqueue(smc_close_wq);
  3115. out_alloc_hs_wq:
  3116. destroy_workqueue(smc_hs_wq);
  3117. out_alloc_tcp_ls_wq:
  3118. destroy_workqueue(smc_tcp_ls_wq);
  3119. out_pnet:
  3120. smc_pnet_exit();
  3121. out_nl:
  3122. smc_nl_exit();
  3123. out_ism:
  3124. smc_clc_exit();
  3125. smc_ism_exit();
  3126. out_pernet_subsys_stat:
  3127. unregister_pernet_subsys(&smc_net_stat_ops);
  3128. out_pernet_subsys:
  3129. unregister_pernet_subsys(&smc_net_ops);
  3130. return rc;
  3131. }
  3132. static void __exit smc_exit(void)
  3133. {
  3134. static_branch_disable(&tcp_have_smc);
  3135. smc_inet_exit();
  3136. tcp_unregister_ulp(&smc_ulp_ops);
  3137. sock_unregister(PF_SMC);
  3138. smc_core_exit();
  3139. smc_loopback_exit();
  3140. smc_ib_unregister_client();
  3141. smc_ism_exit();
  3142. destroy_workqueue(smc_close_wq);
  3143. destroy_workqueue(smc_tcp_ls_wq);
  3144. destroy_workqueue(smc_hs_wq);
  3145. proto_unregister(&smc_proto6);
  3146. proto_unregister(&smc_proto);
  3147. smc_pnet_exit();
  3148. smc_nl_exit();
  3149. smc_clc_exit();
  3150. unregister_pernet_subsys(&smc_net_stat_ops);
  3151. unregister_pernet_subsys(&smc_net_ops);
  3152. rcu_barrier();
  3153. }
  3154. module_init(smc_init);
  3155. module_exit(smc_exit);
  3156. MODULE_AUTHOR("Ursula Braun <ubraun@linux.vnet.ibm.com>");
  3157. MODULE_DESCRIPTION("smc socket address family");
  3158. MODULE_LICENSE("GPL");
  3159. MODULE_ALIAS_NETPROTO(PF_SMC);
  3160. MODULE_ALIAS_TCP_ULP("smc");
  3161. /* 256 for IPPROTO_SMC and 1 for SOCK_STREAM */
  3162. MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 256, 1);
  3163. #if IS_ENABLED(CONFIG_IPV6)
  3164. MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 256, 1);
  3165. #endif /* CONFIG_IPV6 */
  3166. MODULE_ALIAS_GENL_FAMILY(SMC_GENL_FAMILY_NAME);