cxgbit_cm.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007
  1. /*
  2. * Copyright (c) 2016 Chelsio Communications, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/list.h>
  10. #include <linux/workqueue.h>
  11. #include <linux/skbuff.h>
  12. #include <linux/timer.h>
  13. #include <linux/notifier.h>
  14. #include <linux/inetdevice.h>
  15. #include <linux/ip.h>
  16. #include <linux/tcp.h>
  17. #include <linux/if_vlan.h>
  18. #include <net/neighbour.h>
  19. #include <net/netevent.h>
  20. #include <net/route.h>
  21. #include <net/tcp.h>
  22. #include <net/ip6_route.h>
  23. #include <net/addrconf.h>
  24. #include <libcxgb_cm.h>
  25. #include "cxgbit.h"
  26. #include "clip_tbl.h"
  27. static void cxgbit_init_wr_wait(struct cxgbit_wr_wait *wr_waitp)
  28. {
  29. wr_waitp->ret = 0;
  30. reinit_completion(&wr_waitp->completion);
  31. }
  32. static void
  33. cxgbit_wake_up(struct cxgbit_wr_wait *wr_waitp, const char *func, u8 ret)
  34. {
  35. if (ret == CPL_ERR_NONE)
  36. wr_waitp->ret = 0;
  37. else
  38. wr_waitp->ret = -EIO;
  39. if (wr_waitp->ret)
  40. pr_err("%s: err:%u", func, ret);
  41. complete(&wr_waitp->completion);
  42. }
  43. static int
  44. cxgbit_wait_for_reply(struct cxgbit_device *cdev,
  45. struct cxgbit_wr_wait *wr_waitp, u32 tid, u32 timeout,
  46. const char *func)
  47. {
  48. int ret;
  49. if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
  50. wr_waitp->ret = -EIO;
  51. goto out;
  52. }
  53. ret = wait_for_completion_timeout(&wr_waitp->completion, timeout * HZ);
  54. if (!ret) {
  55. pr_info("%s - Device %s not responding tid %u\n",
  56. func, pci_name(cdev->lldi.pdev), tid);
  57. wr_waitp->ret = -ETIMEDOUT;
  58. }
  59. out:
  60. if (wr_waitp->ret)
  61. pr_info("%s: FW reply %d tid %u\n",
  62. pci_name(cdev->lldi.pdev), wr_waitp->ret, tid);
  63. return wr_waitp->ret;
  64. }
  65. static int cxgbit_np_hashfn(const struct cxgbit_np *cnp)
  66. {
  67. return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1);
  68. }
  69. static struct np_info *
  70. cxgbit_np_hash_add(struct cxgbit_device *cdev, struct cxgbit_np *cnp,
  71. unsigned int stid)
  72. {
  73. struct np_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
  74. if (p) {
  75. int bucket = cxgbit_np_hashfn(cnp);
  76. p->cnp = cnp;
  77. p->stid = stid;
  78. spin_lock(&cdev->np_lock);
  79. p->next = cdev->np_hash_tab[bucket];
  80. cdev->np_hash_tab[bucket] = p;
  81. spin_unlock(&cdev->np_lock);
  82. }
  83. return p;
  84. }
  85. static int
  86. cxgbit_np_hash_find(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
  87. {
  88. int stid = -1, bucket = cxgbit_np_hashfn(cnp);
  89. struct np_info *p;
  90. spin_lock(&cdev->np_lock);
  91. for (p = cdev->np_hash_tab[bucket]; p; p = p->next) {
  92. if (p->cnp == cnp) {
  93. stid = p->stid;
  94. break;
  95. }
  96. }
  97. spin_unlock(&cdev->np_lock);
  98. return stid;
  99. }
  100. static int cxgbit_np_hash_del(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
  101. {
  102. int stid = -1, bucket = cxgbit_np_hashfn(cnp);
  103. struct np_info *p, **prev = &cdev->np_hash_tab[bucket];
  104. spin_lock(&cdev->np_lock);
  105. for (p = *prev; p; prev = &p->next, p = p->next) {
  106. if (p->cnp == cnp) {
  107. stid = p->stid;
  108. *prev = p->next;
  109. kfree(p);
  110. break;
  111. }
  112. }
  113. spin_unlock(&cdev->np_lock);
  114. return stid;
  115. }
  116. void _cxgbit_free_cnp(struct kref *kref)
  117. {
  118. struct cxgbit_np *cnp;
  119. cnp = container_of(kref, struct cxgbit_np, kref);
  120. kfree(cnp);
  121. }
  122. static int
  123. cxgbit_create_server6(struct cxgbit_device *cdev, unsigned int stid,
  124. struct cxgbit_np *cnp)
  125. {
  126. struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
  127. &cnp->com.local_addr;
  128. int addr_type;
  129. int ret;
  130. pr_debug("%s: dev = %s; stid = %u; sin6_port = %u\n",
  131. __func__, cdev->lldi.ports[0]->name, stid, sin6->sin6_port);
  132. addr_type = ipv6_addr_type((const struct in6_addr *)
  133. &sin6->sin6_addr);
  134. if (addr_type != IPV6_ADDR_ANY) {
  135. ret = cxgb4_clip_get(cdev->lldi.ports[0],
  136. (const u32 *)&sin6->sin6_addr.s6_addr, 1);
  137. if (ret) {
  138. pr_err("Unable to find clip table entry. laddr %pI6. Error:%d.\n",
  139. sin6->sin6_addr.s6_addr, ret);
  140. return -ENOMEM;
  141. }
  142. }
  143. cxgbit_get_cnp(cnp);
  144. cxgbit_init_wr_wait(&cnp->com.wr_wait);
  145. ret = cxgb4_create_server6(cdev->lldi.ports[0],
  146. stid, &sin6->sin6_addr,
  147. sin6->sin6_port,
  148. cdev->lldi.rxq_ids[0]);
  149. if (!ret)
  150. ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
  151. 0, 10, __func__);
  152. else if (ret > 0)
  153. ret = net_xmit_errno(ret);
  154. else
  155. cxgbit_put_cnp(cnp);
  156. if (ret) {
  157. if (ret != -ETIMEDOUT)
  158. cxgb4_clip_release(cdev->lldi.ports[0],
  159. (const u32 *)&sin6->sin6_addr.s6_addr, 1);
  160. pr_err("create server6 err %d stid %d laddr %pI6 lport %d\n",
  161. ret, stid, sin6->sin6_addr.s6_addr,
  162. ntohs(sin6->sin6_port));
  163. }
  164. return ret;
  165. }
  166. static int
  167. cxgbit_create_server4(struct cxgbit_device *cdev, unsigned int stid,
  168. struct cxgbit_np *cnp)
  169. {
  170. struct sockaddr_in *sin = (struct sockaddr_in *)
  171. &cnp->com.local_addr;
  172. int ret;
  173. pr_debug("%s: dev = %s; stid = %u; sin_port = %u\n",
  174. __func__, cdev->lldi.ports[0]->name, stid, sin->sin_port);
  175. cxgbit_get_cnp(cnp);
  176. cxgbit_init_wr_wait(&cnp->com.wr_wait);
  177. ret = cxgb4_create_server(cdev->lldi.ports[0],
  178. stid, sin->sin_addr.s_addr,
  179. sin->sin_port, 0,
  180. cdev->lldi.rxq_ids[0]);
  181. if (!ret)
  182. ret = cxgbit_wait_for_reply(cdev,
  183. &cnp->com.wr_wait,
  184. 0, 10, __func__);
  185. else if (ret > 0)
  186. ret = net_xmit_errno(ret);
  187. else
  188. cxgbit_put_cnp(cnp);
  189. if (ret)
  190. pr_err("create server failed err %d stid %d laddr %pI4 lport %d\n",
  191. ret, stid, &sin->sin_addr, ntohs(sin->sin_port));
  192. return ret;
  193. }
  194. struct cxgbit_device *cxgbit_find_device(struct net_device *ndev, u8 *port_id)
  195. {
  196. struct cxgbit_device *cdev;
  197. u8 i;
  198. list_for_each_entry(cdev, &cdev_list_head, list) {
  199. struct cxgb4_lld_info *lldi = &cdev->lldi;
  200. for (i = 0; i < lldi->nports; i++) {
  201. if (lldi->ports[i] == ndev) {
  202. if (port_id)
  203. *port_id = i;
  204. return cdev;
  205. }
  206. }
  207. }
  208. return NULL;
  209. }
  210. static struct net_device *cxgbit_get_real_dev(struct net_device *ndev)
  211. {
  212. if (ndev->priv_flags & IFF_BONDING) {
  213. pr_err("Bond devices are not supported. Interface:%s\n",
  214. ndev->name);
  215. return NULL;
  216. }
  217. if (is_vlan_dev(ndev))
  218. return vlan_dev_real_dev(ndev);
  219. return ndev;
  220. }
  221. static struct net_device *cxgbit_ipv4_netdev(__be32 saddr)
  222. {
  223. struct net_device *ndev;
  224. ndev = __ip_dev_find(&init_net, saddr, false);
  225. if (!ndev)
  226. return NULL;
  227. return cxgbit_get_real_dev(ndev);
  228. }
  229. static struct net_device *cxgbit_ipv6_netdev(struct in6_addr *addr6)
  230. {
  231. struct net_device *ndev = NULL;
  232. bool found = false;
  233. if (IS_ENABLED(CONFIG_IPV6)) {
  234. for_each_netdev_rcu(&init_net, ndev)
  235. if (ipv6_chk_addr(&init_net, addr6, ndev, 1)) {
  236. found = true;
  237. break;
  238. }
  239. }
  240. if (!found)
  241. return NULL;
  242. return cxgbit_get_real_dev(ndev);
  243. }
  244. static struct cxgbit_device *cxgbit_find_np_cdev(struct cxgbit_np *cnp)
  245. {
  246. struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
  247. int ss_family = sockaddr->ss_family;
  248. struct net_device *ndev = NULL;
  249. struct cxgbit_device *cdev = NULL;
  250. rcu_read_lock();
  251. if (ss_family == AF_INET) {
  252. struct sockaddr_in *sin;
  253. sin = (struct sockaddr_in *)sockaddr;
  254. ndev = cxgbit_ipv4_netdev(sin->sin_addr.s_addr);
  255. } else if (ss_family == AF_INET6) {
  256. struct sockaddr_in6 *sin6;
  257. sin6 = (struct sockaddr_in6 *)sockaddr;
  258. ndev = cxgbit_ipv6_netdev(&sin6->sin6_addr);
  259. }
  260. if (!ndev)
  261. goto out;
  262. cdev = cxgbit_find_device(ndev, NULL);
  263. out:
  264. rcu_read_unlock();
  265. return cdev;
  266. }
  267. static bool cxgbit_inaddr_any(struct cxgbit_np *cnp)
  268. {
  269. struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
  270. int ss_family = sockaddr->ss_family;
  271. int addr_type;
  272. if (ss_family == AF_INET) {
  273. struct sockaddr_in *sin;
  274. sin = (struct sockaddr_in *)sockaddr;
  275. if (sin->sin_addr.s_addr == htonl(INADDR_ANY))
  276. return true;
  277. } else if (ss_family == AF_INET6) {
  278. struct sockaddr_in6 *sin6;
  279. sin6 = (struct sockaddr_in6 *)sockaddr;
  280. addr_type = ipv6_addr_type((const struct in6_addr *)
  281. &sin6->sin6_addr);
  282. if (addr_type == IPV6_ADDR_ANY)
  283. return true;
  284. }
  285. return false;
  286. }
  287. static int
  288. __cxgbit_setup_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
  289. {
  290. int stid, ret;
  291. int ss_family = cnp->com.local_addr.ss_family;
  292. if (!test_bit(CDEV_STATE_UP, &cdev->flags))
  293. return -EINVAL;
  294. stid = cxgb4_alloc_stid(cdev->lldi.tids, ss_family, cnp);
  295. if (stid < 0)
  296. return -EINVAL;
  297. if (!cxgbit_np_hash_add(cdev, cnp, stid)) {
  298. cxgb4_free_stid(cdev->lldi.tids, stid, ss_family);
  299. return -EINVAL;
  300. }
  301. if (ss_family == AF_INET)
  302. ret = cxgbit_create_server4(cdev, stid, cnp);
  303. else
  304. ret = cxgbit_create_server6(cdev, stid, cnp);
  305. if (ret) {
  306. if (ret != -ETIMEDOUT)
  307. cxgb4_free_stid(cdev->lldi.tids, stid,
  308. ss_family);
  309. cxgbit_np_hash_del(cdev, cnp);
  310. return ret;
  311. }
  312. return ret;
  313. }
  314. static int cxgbit_setup_cdev_np(struct cxgbit_np *cnp)
  315. {
  316. struct cxgbit_device *cdev;
  317. int ret = -1;
  318. mutex_lock(&cdev_list_lock);
  319. cdev = cxgbit_find_np_cdev(cnp);
  320. if (!cdev)
  321. goto out;
  322. if (cxgbit_np_hash_find(cdev, cnp) >= 0)
  323. goto out;
  324. if (__cxgbit_setup_cdev_np(cdev, cnp))
  325. goto out;
  326. cnp->com.cdev = cdev;
  327. ret = 0;
  328. out:
  329. mutex_unlock(&cdev_list_lock);
  330. return ret;
  331. }
  332. static int cxgbit_setup_all_np(struct cxgbit_np *cnp)
  333. {
  334. struct cxgbit_device *cdev;
  335. int ret;
  336. u32 count = 0;
  337. mutex_lock(&cdev_list_lock);
  338. list_for_each_entry(cdev, &cdev_list_head, list) {
  339. if (cxgbit_np_hash_find(cdev, cnp) >= 0) {
  340. mutex_unlock(&cdev_list_lock);
  341. return -1;
  342. }
  343. }
  344. list_for_each_entry(cdev, &cdev_list_head, list) {
  345. ret = __cxgbit_setup_cdev_np(cdev, cnp);
  346. if (ret == -ETIMEDOUT)
  347. break;
  348. if (ret != 0)
  349. continue;
  350. count++;
  351. }
  352. mutex_unlock(&cdev_list_lock);
  353. return count ? 0 : -1;
  354. }
  355. int cxgbit_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr)
  356. {
  357. struct cxgbit_np *cnp;
  358. int ret;
  359. if ((ksockaddr->ss_family != AF_INET) &&
  360. (ksockaddr->ss_family != AF_INET6))
  361. return -EINVAL;
  362. cnp = kzalloc(sizeof(*cnp), GFP_KERNEL);
  363. if (!cnp)
  364. return -ENOMEM;
  365. init_waitqueue_head(&cnp->accept_wait);
  366. init_completion(&cnp->com.wr_wait.completion);
  367. init_completion(&cnp->accept_comp);
  368. INIT_LIST_HEAD(&cnp->np_accept_list);
  369. spin_lock_init(&cnp->np_accept_lock);
  370. kref_init(&cnp->kref);
  371. memcpy(&np->np_sockaddr, ksockaddr,
  372. sizeof(struct sockaddr_storage));
  373. memcpy(&cnp->com.local_addr, &np->np_sockaddr,
  374. sizeof(cnp->com.local_addr));
  375. cnp->np = np;
  376. cnp->com.cdev = NULL;
  377. if (cxgbit_inaddr_any(cnp))
  378. ret = cxgbit_setup_all_np(cnp);
  379. else
  380. ret = cxgbit_setup_cdev_np(cnp);
  381. if (ret) {
  382. cxgbit_put_cnp(cnp);
  383. return -EINVAL;
  384. }
  385. np->np_context = cnp;
  386. cnp->com.state = CSK_STATE_LISTEN;
  387. return 0;
  388. }
  389. static void
  390. cxgbit_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
  391. struct cxgbit_sock *csk)
  392. {
  393. conn->login_family = np->np_sockaddr.ss_family;
  394. conn->login_sockaddr = csk->com.remote_addr;
  395. conn->local_sockaddr = csk->com.local_addr;
  396. }
  397. int cxgbit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
  398. {
  399. struct cxgbit_np *cnp = np->np_context;
  400. struct cxgbit_sock *csk;
  401. int ret = 0;
  402. accept_wait:
  403. ret = wait_for_completion_interruptible(&cnp->accept_comp);
  404. if (ret)
  405. return -ENODEV;
  406. spin_lock_bh(&np->np_thread_lock);
  407. if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
  408. spin_unlock_bh(&np->np_thread_lock);
  409. /**
  410. * No point in stalling here when np_thread
  411. * is in state RESET/SHUTDOWN/EXIT - bail
  412. **/
  413. return -ENODEV;
  414. }
  415. spin_unlock_bh(&np->np_thread_lock);
  416. spin_lock_bh(&cnp->np_accept_lock);
  417. if (list_empty(&cnp->np_accept_list)) {
  418. spin_unlock_bh(&cnp->np_accept_lock);
  419. goto accept_wait;
  420. }
  421. csk = list_first_entry(&cnp->np_accept_list,
  422. struct cxgbit_sock,
  423. accept_node);
  424. list_del_init(&csk->accept_node);
  425. spin_unlock_bh(&cnp->np_accept_lock);
  426. conn->context = csk;
  427. csk->conn = conn;
  428. cxgbit_set_conn_info(np, conn, csk);
  429. return 0;
  430. }
  431. static int
  432. __cxgbit_free_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
  433. {
  434. int stid, ret;
  435. bool ipv6 = false;
  436. stid = cxgbit_np_hash_del(cdev, cnp);
  437. if (stid < 0)
  438. return -EINVAL;
  439. if (!test_bit(CDEV_STATE_UP, &cdev->flags))
  440. return -EINVAL;
  441. if (cnp->np->np_sockaddr.ss_family == AF_INET6)
  442. ipv6 = true;
  443. cxgbit_get_cnp(cnp);
  444. cxgbit_init_wr_wait(&cnp->com.wr_wait);
  445. ret = cxgb4_remove_server(cdev->lldi.ports[0], stid,
  446. cdev->lldi.rxq_ids[0], ipv6);
  447. if (ret > 0)
  448. ret = net_xmit_errno(ret);
  449. if (ret) {
  450. cxgbit_put_cnp(cnp);
  451. return ret;
  452. }
  453. ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
  454. 0, 10, __func__);
  455. if (ret == -ETIMEDOUT)
  456. return ret;
  457. if (ipv6 && cnp->com.cdev) {
  458. struct sockaddr_in6 *sin6;
  459. sin6 = (struct sockaddr_in6 *)&cnp->com.local_addr;
  460. cxgb4_clip_release(cdev->lldi.ports[0],
  461. (const u32 *)&sin6->sin6_addr.s6_addr,
  462. 1);
  463. }
  464. cxgb4_free_stid(cdev->lldi.tids, stid,
  465. cnp->com.local_addr.ss_family);
  466. return 0;
  467. }
  468. static void cxgbit_free_all_np(struct cxgbit_np *cnp)
  469. {
  470. struct cxgbit_device *cdev;
  471. int ret;
  472. mutex_lock(&cdev_list_lock);
  473. list_for_each_entry(cdev, &cdev_list_head, list) {
  474. ret = __cxgbit_free_cdev_np(cdev, cnp);
  475. if (ret == -ETIMEDOUT)
  476. break;
  477. }
  478. mutex_unlock(&cdev_list_lock);
  479. }
  480. static void cxgbit_free_cdev_np(struct cxgbit_np *cnp)
  481. {
  482. struct cxgbit_device *cdev;
  483. bool found = false;
  484. mutex_lock(&cdev_list_lock);
  485. list_for_each_entry(cdev, &cdev_list_head, list) {
  486. if (cdev == cnp->com.cdev) {
  487. found = true;
  488. break;
  489. }
  490. }
  491. if (!found)
  492. goto out;
  493. __cxgbit_free_cdev_np(cdev, cnp);
  494. out:
  495. mutex_unlock(&cdev_list_lock);
  496. }
  497. static void __cxgbit_free_conn(struct cxgbit_sock *csk);
  498. void cxgbit_free_np(struct iscsi_np *np)
  499. {
  500. struct cxgbit_np *cnp = np->np_context;
  501. struct cxgbit_sock *csk, *tmp;
  502. cnp->com.state = CSK_STATE_DEAD;
  503. if (cnp->com.cdev)
  504. cxgbit_free_cdev_np(cnp);
  505. else
  506. cxgbit_free_all_np(cnp);
  507. spin_lock_bh(&cnp->np_accept_lock);
  508. list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) {
  509. list_del_init(&csk->accept_node);
  510. __cxgbit_free_conn(csk);
  511. }
  512. spin_unlock_bh(&cnp->np_accept_lock);
  513. np->np_context = NULL;
  514. cxgbit_put_cnp(cnp);
  515. }
  516. static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
  517. {
  518. struct sk_buff *skb;
  519. u32 len = roundup(sizeof(struct cpl_close_con_req), 16);
  520. skb = alloc_skb(len, GFP_ATOMIC);
  521. if (!skb)
  522. return;
  523. cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx,
  524. NULL, NULL);
  525. cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
  526. __skb_queue_tail(&csk->txq, skb);
  527. cxgbit_push_tx_frames(csk);
  528. }
  529. static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
  530. {
  531. struct cxgbit_sock *csk = handle;
  532. pr_debug("%s cxgbit_device %p\n", __func__, handle);
  533. kfree_skb(skb);
  534. cxgbit_put_csk(csk);
  535. }
  536. static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
  537. {
  538. struct cxgbit_device *cdev = handle;
  539. struct cpl_abort_req *req = cplhdr(skb);
  540. pr_debug("%s cdev %p\n", __func__, cdev);
  541. req->cmd = CPL_ABORT_NO_RST;
  542. cxgbit_ofld_send(cdev, skb);
  543. }
  544. static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
  545. {
  546. struct sk_buff *skb;
  547. u32 len = roundup(sizeof(struct cpl_abort_req), 16);
  548. pr_debug("%s: csk %p tid %u; state %d\n",
  549. __func__, csk, csk->tid, csk->com.state);
  550. __skb_queue_purge(&csk->txq);
  551. if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
  552. cxgbit_send_tx_flowc_wr(csk);
  553. skb = __skb_dequeue(&csk->skbq);
  554. cxgb_mk_abort_req(skb, len, csk->tid, csk->txq_idx,
  555. csk->com.cdev, cxgbit_abort_arp_failure);
  556. return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
  557. }
  558. static void
  559. __cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
  560. {
  561. __kfree_skb(skb);
  562. if (csk->com.state != CSK_STATE_ESTABLISHED)
  563. goto no_abort;
  564. set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags);
  565. csk->com.state = CSK_STATE_ABORTING;
  566. cxgbit_send_abort_req(csk);
  567. return;
  568. no_abort:
  569. cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE);
  570. cxgbit_put_csk(csk);
  571. }
  572. void cxgbit_abort_conn(struct cxgbit_sock *csk)
  573. {
  574. struct sk_buff *skb = alloc_skb(0, GFP_KERNEL | __GFP_NOFAIL);
  575. cxgbit_get_csk(csk);
  576. cxgbit_init_wr_wait(&csk->com.wr_wait);
  577. spin_lock_bh(&csk->lock);
  578. if (csk->lock_owner) {
  579. cxgbit_skcb_rx_backlog_fn(skb) = __cxgbit_abort_conn;
  580. __skb_queue_tail(&csk->backlogq, skb);
  581. } else {
  582. __cxgbit_abort_conn(csk, skb);
  583. }
  584. spin_unlock_bh(&csk->lock);
  585. cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
  586. csk->tid, 600, __func__);
  587. }
  588. static void __cxgbit_free_conn(struct cxgbit_sock *csk)
  589. {
  590. struct iscsi_conn *conn = csk->conn;
  591. bool release = false;
  592. pr_debug("%s: state %d\n",
  593. __func__, csk->com.state);
  594. spin_lock_bh(&csk->lock);
  595. switch (csk->com.state) {
  596. case CSK_STATE_ESTABLISHED:
  597. if (conn && (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)) {
  598. csk->com.state = CSK_STATE_CLOSING;
  599. cxgbit_send_halfclose(csk);
  600. } else {
  601. csk->com.state = CSK_STATE_ABORTING;
  602. cxgbit_send_abort_req(csk);
  603. }
  604. break;
  605. case CSK_STATE_CLOSING:
  606. csk->com.state = CSK_STATE_MORIBUND;
  607. cxgbit_send_halfclose(csk);
  608. break;
  609. case CSK_STATE_DEAD:
  610. release = true;
  611. break;
  612. default:
  613. pr_err("%s: csk %p; state %d\n",
  614. __func__, csk, csk->com.state);
  615. }
  616. spin_unlock_bh(&csk->lock);
  617. if (release)
  618. cxgbit_put_csk(csk);
  619. }
  620. void cxgbit_free_conn(struct iscsi_conn *conn)
  621. {
  622. __cxgbit_free_conn(conn->context);
  623. }
  624. static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
  625. {
  626. csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
  627. ((csk->com.remote_addr.ss_family == AF_INET) ?
  628. sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
  629. sizeof(struct tcphdr);
  630. csk->mss = csk->emss;
  631. if (TCPOPT_TSTAMP_G(opt))
  632. csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
  633. if (csk->emss < 128)
  634. csk->emss = 128;
  635. if (csk->emss & 7)
  636. pr_info("Warning: misaligned mtu idx %u mss %u emss=%u\n",
  637. TCPOPT_MSS_G(opt), csk->mss, csk->emss);
  638. pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
  639. csk->mss, csk->emss);
  640. }
  641. static void cxgbit_free_skb(struct cxgbit_sock *csk)
  642. {
  643. struct sk_buff *skb;
  644. __skb_queue_purge(&csk->txq);
  645. __skb_queue_purge(&csk->rxq);
  646. __skb_queue_purge(&csk->backlogq);
  647. __skb_queue_purge(&csk->ppodq);
  648. __skb_queue_purge(&csk->skbq);
  649. while ((skb = cxgbit_sock_dequeue_wr(csk)))
  650. kfree_skb(skb);
  651. __kfree_skb(csk->lro_hskb);
  652. }
  653. void _cxgbit_free_csk(struct kref *kref)
  654. {
  655. struct cxgbit_sock *csk;
  656. struct cxgbit_device *cdev;
  657. csk = container_of(kref, struct cxgbit_sock, kref);
  658. pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state);
  659. if (csk->com.local_addr.ss_family == AF_INET6) {
  660. struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
  661. &csk->com.local_addr;
  662. cxgb4_clip_release(csk->com.cdev->lldi.ports[0],
  663. (const u32 *)
  664. &sin6->sin6_addr.s6_addr, 1);
  665. }
  666. cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid,
  667. csk->com.local_addr.ss_family);
  668. dst_release(csk->dst);
  669. cxgb4_l2t_release(csk->l2t);
  670. cdev = csk->com.cdev;
  671. spin_lock_bh(&cdev->cskq.lock);
  672. list_del(&csk->list);
  673. spin_unlock_bh(&cdev->cskq.lock);
  674. cxgbit_free_skb(csk);
  675. cxgbit_put_cnp(csk->cnp);
  676. cxgbit_put_cdev(cdev);
  677. kfree(csk);
  678. }
  679. static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
  680. {
  681. unsigned int linkspeed;
  682. u8 scale;
  683. linkspeed = pi->link_cfg.speed;
  684. scale = linkspeed / SPEED_10000;
  685. #define CXGBIT_10G_RCV_WIN (256 * 1024)
  686. csk->rcv_win = CXGBIT_10G_RCV_WIN;
  687. if (scale)
  688. csk->rcv_win *= scale;
  689. #define CXGBIT_10G_SND_WIN (256 * 1024)
  690. csk->snd_win = CXGBIT_10G_SND_WIN;
  691. if (scale)
  692. csk->snd_win *= scale;
  693. pr_debug("%s snd_win %d rcv_win %d\n",
  694. __func__, csk->snd_win, csk->rcv_win);
  695. }
  696. #ifdef CONFIG_CHELSIO_T4_DCB
  697. static u8 cxgbit_get_iscsi_dcb_state(struct net_device *ndev)
  698. {
  699. return ndev->dcbnl_ops->getstate(ndev);
  700. }
  701. static int cxgbit_select_priority(int pri_mask)
  702. {
  703. if (!pri_mask)
  704. return 0;
  705. return (ffs(pri_mask) - 1);
  706. }
  707. static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port)
  708. {
  709. int ret;
  710. u8 caps;
  711. struct dcb_app iscsi_dcb_app = {
  712. .protocol = local_port
  713. };
  714. ret = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
  715. if (ret)
  716. return 0;
  717. if (caps & DCB_CAP_DCBX_VER_IEEE) {
  718. iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
  719. ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
  720. } else if (caps & DCB_CAP_DCBX_VER_CEE) {
  721. iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
  722. ret = dcb_getapp(ndev, &iscsi_dcb_app);
  723. }
  724. pr_info("iSCSI priority is set to %u\n", cxgbit_select_priority(ret));
  725. return cxgbit_select_priority(ret);
  726. }
  727. #endif
  728. static int
  729. cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
  730. u16 local_port, struct dst_entry *dst,
  731. struct cxgbit_device *cdev)
  732. {
  733. struct neighbour *n;
  734. int ret, step;
  735. struct net_device *ndev;
  736. u16 rxq_idx, port_id;
  737. #ifdef CONFIG_CHELSIO_T4_DCB
  738. u8 priority = 0;
  739. #endif
  740. n = dst_neigh_lookup(dst, peer_ip);
  741. if (!n)
  742. return -ENODEV;
  743. rcu_read_lock();
  744. if (!(n->nud_state & NUD_VALID))
  745. neigh_event_send(n, NULL);
  746. ret = -ENOMEM;
  747. if (n->dev->flags & IFF_LOOPBACK) {
  748. if (iptype == 4)
  749. ndev = cxgbit_ipv4_netdev(*(__be32 *)peer_ip);
  750. else if (IS_ENABLED(CONFIG_IPV6))
  751. ndev = cxgbit_ipv6_netdev((struct in6_addr *)peer_ip);
  752. else
  753. ndev = NULL;
  754. if (!ndev) {
  755. ret = -ENODEV;
  756. goto out;
  757. }
  758. csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t,
  759. n, ndev, 0);
  760. if (!csk->l2t)
  761. goto out;
  762. csk->mtu = ndev->mtu;
  763. csk->tx_chan = cxgb4_port_chan(ndev);
  764. csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi.adapter_type,
  765. cxgb4_port_viid(ndev));
  766. step = cdev->lldi.ntxq /
  767. cdev->lldi.nchan;
  768. csk->txq_idx = cxgb4_port_idx(ndev) * step;
  769. step = cdev->lldi.nrxq /
  770. cdev->lldi.nchan;
  771. csk->ctrlq_idx = cxgb4_port_idx(ndev);
  772. csk->rss_qid = cdev->lldi.rxq_ids[
  773. cxgb4_port_idx(ndev) * step];
  774. csk->port_id = cxgb4_port_idx(ndev);
  775. cxgbit_set_tcp_window(csk,
  776. (struct port_info *)netdev_priv(ndev));
  777. } else {
  778. ndev = cxgbit_get_real_dev(n->dev);
  779. if (!ndev) {
  780. ret = -ENODEV;
  781. goto out;
  782. }
  783. #ifdef CONFIG_CHELSIO_T4_DCB
  784. if (cxgbit_get_iscsi_dcb_state(ndev))
  785. priority = cxgbit_get_iscsi_dcb_priority(ndev,
  786. local_port);
  787. csk->dcb_priority = priority;
  788. csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority);
  789. #else
  790. csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0);
  791. #endif
  792. if (!csk->l2t)
  793. goto out;
  794. port_id = cxgb4_port_idx(ndev);
  795. csk->mtu = dst_mtu(dst);
  796. csk->tx_chan = cxgb4_port_chan(ndev);
  797. csk->smac_idx = cxgb4_tp_smt_idx(cdev->lldi.adapter_type,
  798. cxgb4_port_viid(ndev));
  799. step = cdev->lldi.ntxq /
  800. cdev->lldi.nports;
  801. csk->txq_idx = (port_id * step) +
  802. (cdev->selectq[port_id][0]++ % step);
  803. csk->ctrlq_idx = cxgb4_port_idx(ndev);
  804. step = cdev->lldi.nrxq /
  805. cdev->lldi.nports;
  806. rxq_idx = (port_id * step) +
  807. (cdev->selectq[port_id][1]++ % step);
  808. csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx];
  809. csk->port_id = port_id;
  810. cxgbit_set_tcp_window(csk,
  811. (struct port_info *)netdev_priv(ndev));
  812. }
  813. ret = 0;
  814. out:
  815. rcu_read_unlock();
  816. neigh_release(n);
  817. return ret;
  818. }
  819. int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
  820. {
  821. int ret = 0;
  822. if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
  823. kfree_skb(skb);
  824. pr_err("%s - device not up - dropping\n", __func__);
  825. return -EIO;
  826. }
  827. ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb);
  828. if (ret < 0)
  829. kfree_skb(skb);
  830. return ret < 0 ? ret : 0;
  831. }
  832. static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
  833. {
  834. u32 len = roundup(sizeof(struct cpl_tid_release), 16);
  835. struct sk_buff *skb;
  836. skb = alloc_skb(len, GFP_ATOMIC);
  837. if (!skb)
  838. return;
  839. cxgb_mk_tid_release(skb, len, tid, 0);
  840. cxgbit_ofld_send(cdev, skb);
  841. }
  842. int
  843. cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
  844. struct l2t_entry *l2e)
  845. {
  846. int ret = 0;
  847. if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
  848. kfree_skb(skb);
  849. pr_err("%s - device not up - dropping\n", __func__);
  850. return -EIO;
  851. }
  852. ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e);
  853. if (ret < 0)
  854. kfree_skb(skb);
  855. return ret < 0 ? ret : 0;
  856. }
  857. static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
  858. {
  859. if (csk->com.state != CSK_STATE_ESTABLISHED) {
  860. __kfree_skb(skb);
  861. return;
  862. }
  863. cxgbit_ofld_send(csk->com.cdev, skb);
  864. }
  865. /*
  866. * CPL connection rx data ack: host ->
  867. * Send RX credits through an RX_DATA_ACK CPL message.
  868. * Returns the number of credits sent.
  869. */
  870. int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
  871. {
  872. struct sk_buff *skb;
  873. u32 len = roundup(sizeof(struct cpl_rx_data_ack), 16);
  874. u32 credit_dack;
  875. skb = alloc_skb(len, GFP_KERNEL);
  876. if (!skb)
  877. return -1;
  878. credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(1) |
  879. RX_CREDITS_V(csk->rx_credits);
  880. cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx,
  881. credit_dack);
  882. csk->rx_credits = 0;
  883. spin_lock_bh(&csk->lock);
  884. if (csk->lock_owner) {
  885. cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_send_rx_credits;
  886. __skb_queue_tail(&csk->backlogq, skb);
  887. spin_unlock_bh(&csk->lock);
  888. return 0;
  889. }
  890. cxgbit_send_rx_credits(csk, skb);
  891. spin_unlock_bh(&csk->lock);
  892. return 0;
  893. }
  894. #define FLOWC_WR_NPARAMS_MIN 9
  895. #define FLOWC_WR_NPARAMS_MAX 11
  896. static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk)
  897. {
  898. struct sk_buff *skb;
  899. u32 len, flowclen;
  900. u8 i;
  901. flowclen = offsetof(struct fw_flowc_wr,
  902. mnemval[FLOWC_WR_NPARAMS_MAX]);
  903. len = max_t(u32, sizeof(struct cpl_abort_req),
  904. sizeof(struct cpl_abort_rpl));
  905. len = max(len, flowclen);
  906. len = roundup(len, 16);
  907. for (i = 0; i < 3; i++) {
  908. skb = alloc_skb(len, GFP_ATOMIC);
  909. if (!skb)
  910. goto out;
  911. __skb_queue_tail(&csk->skbq, skb);
  912. }
  913. skb = alloc_skb(LRO_SKB_MIN_HEADROOM, GFP_ATOMIC);
  914. if (!skb)
  915. goto out;
  916. memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
  917. csk->lro_hskb = skb;
  918. return 0;
  919. out:
  920. __skb_queue_purge(&csk->skbq);
  921. return -ENOMEM;
  922. }
  923. static void
  924. cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
  925. {
  926. struct sk_buff *skb;
  927. const struct tcphdr *tcph;
  928. struct cpl_t5_pass_accept_rpl *rpl5;
  929. struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
  930. unsigned int len = roundup(sizeof(*rpl5), 16);
  931. unsigned int mtu_idx;
  932. u64 opt0;
  933. u32 opt2, hlen;
  934. u32 wscale;
  935. u32 win;
  936. pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid);
  937. skb = alloc_skb(len, GFP_ATOMIC);
  938. if (!skb) {
  939. cxgbit_put_csk(csk);
  940. return;
  941. }
  942. rpl5 = __skb_put_zero(skb, len);
  943. INIT_TP_WR(rpl5, csk->tid);
  944. OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
  945. csk->tid));
  946. cxgb_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
  947. req->tcpopt.tstamp,
  948. (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
  949. wscale = cxgb_compute_wscale(csk->rcv_win);
  950. /*
  951. * Specify the largest window that will fit in opt0. The
  952. * remainder will be specified in the rx_data_ack.
  953. */
  954. win = csk->rcv_win >> 10;
  955. if (win > RCV_BUFSIZ_M)
  956. win = RCV_BUFSIZ_M;
  957. opt0 = TCAM_BYPASS_F |
  958. WND_SCALE_V(wscale) |
  959. MSS_IDX_V(mtu_idx) |
  960. L2T_IDX_V(csk->l2t->idx) |
  961. TX_CHAN_V(csk->tx_chan) |
  962. SMAC_SEL_V(csk->smac_idx) |
  963. DSCP_V(csk->tos >> 2) |
  964. ULP_MODE_V(ULP_MODE_ISCSI) |
  965. RCV_BUFSIZ_V(win);
  966. opt2 = RX_CHANNEL_V(0) |
  967. RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
  968. if (!is_t5(lldi->adapter_type))
  969. opt2 |= RX_FC_DISABLE_F;
  970. if (req->tcpopt.tstamp)
  971. opt2 |= TSTAMPS_EN_F;
  972. if (req->tcpopt.sack)
  973. opt2 |= SACK_EN_F;
  974. if (wscale)
  975. opt2 |= WND_SCALE_EN_F;
  976. hlen = ntohl(req->hdr_len);
  977. if (is_t5(lldi->adapter_type))
  978. tcph = (struct tcphdr *)((u8 *)(req + 1) +
  979. ETH_HDR_LEN_G(hlen) + IP_HDR_LEN_G(hlen));
  980. else
  981. tcph = (struct tcphdr *)((u8 *)(req + 1) +
  982. T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen));
  983. if (tcph->ece && tcph->cwr)
  984. opt2 |= CCTRL_ECN_V(1);
  985. opt2 |= RX_COALESCE_V(3);
  986. opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
  987. opt2 |= T5_ISS_F;
  988. rpl5->iss = cpu_to_be32((prandom_u32() & ~7UL) - 1);
  989. opt2 |= T5_OPT_2_VALID_F;
  990. rpl5->opt0 = cpu_to_be64(opt0);
  991. rpl5->opt2 = cpu_to_be32(opt2);
  992. set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
  993. t4_set_arp_err_handler(skb, csk, cxgbit_arp_failure_discard);
  994. cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
  995. }
  996. static void
  997. cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
  998. {
  999. struct cxgbit_sock *csk = NULL;
  1000. struct cxgbit_np *cnp;
  1001. struct cpl_pass_accept_req *req = cplhdr(skb);
  1002. unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
  1003. struct tid_info *t = cdev->lldi.tids;
  1004. unsigned int tid = GET_TID(req);
  1005. u16 peer_mss = ntohs(req->tcpopt.mss);
  1006. unsigned short hdrs;
  1007. struct dst_entry *dst;
  1008. __u8 local_ip[16], peer_ip[16];
  1009. __be16 local_port, peer_port;
  1010. int ret;
  1011. int iptype;
  1012. pr_debug("%s: cdev = %p; stid = %u; tid = %u\n",
  1013. __func__, cdev, stid, tid);
  1014. cnp = lookup_stid(t, stid);
  1015. if (!cnp) {
  1016. pr_err("%s connect request on invalid stid %d\n",
  1017. __func__, stid);
  1018. goto rel_skb;
  1019. }
  1020. if (cnp->com.state != CSK_STATE_LISTEN) {
  1021. pr_err("%s - listening parent not in CSK_STATE_LISTEN\n",
  1022. __func__);
  1023. goto reject;
  1024. }
  1025. csk = lookup_tid(t, tid);
  1026. if (csk) {
  1027. pr_err("%s csk not null tid %u\n",
  1028. __func__, tid);
  1029. goto rel_skb;
  1030. }
  1031. cxgb_get_4tuple(req, cdev->lldi.adapter_type, &iptype, local_ip,
  1032. peer_ip, &local_port, &peer_port);
  1033. /* Find output route */
  1034. if (iptype == 4) {
  1035. pr_debug("%s parent sock %p tid %u laddr %pI4 raddr %pI4 "
  1036. "lport %d rport %d peer_mss %d\n"
  1037. , __func__, cnp, tid,
  1038. local_ip, peer_ip, ntohs(local_port),
  1039. ntohs(peer_port), peer_mss);
  1040. dst = cxgb_find_route(&cdev->lldi, cxgbit_get_real_dev,
  1041. *(__be32 *)local_ip,
  1042. *(__be32 *)peer_ip,
  1043. local_port, peer_port,
  1044. PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
  1045. } else {
  1046. pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 "
  1047. "lport %d rport %d peer_mss %d\n"
  1048. , __func__, cnp, tid,
  1049. local_ip, peer_ip, ntohs(local_port),
  1050. ntohs(peer_port), peer_mss);
  1051. dst = cxgb_find_route6(&cdev->lldi, cxgbit_get_real_dev,
  1052. local_ip, peer_ip,
  1053. local_port, peer_port,
  1054. PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
  1055. ((struct sockaddr_in6 *)
  1056. &cnp->com.local_addr)->sin6_scope_id);
  1057. }
  1058. if (!dst) {
  1059. pr_err("%s - failed to find dst entry!\n",
  1060. __func__);
  1061. goto reject;
  1062. }
  1063. csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
  1064. if (!csk) {
  1065. dst_release(dst);
  1066. goto rel_skb;
  1067. }
  1068. ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port),
  1069. dst, cdev);
  1070. if (ret) {
  1071. pr_err("%s - failed to allocate l2t entry!\n",
  1072. __func__);
  1073. dst_release(dst);
  1074. kfree(csk);
  1075. goto reject;
  1076. }
  1077. kref_init(&csk->kref);
  1078. init_completion(&csk->com.wr_wait.completion);
  1079. INIT_LIST_HEAD(&csk->accept_node);
  1080. hdrs = (iptype == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
  1081. sizeof(struct tcphdr) + (req->tcpopt.tstamp ? 12 : 0);
  1082. if (peer_mss && csk->mtu > (peer_mss + hdrs))
  1083. csk->mtu = peer_mss + hdrs;
  1084. csk->com.state = CSK_STATE_CONNECTING;
  1085. csk->com.cdev = cdev;
  1086. csk->cnp = cnp;
  1087. csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
  1088. csk->dst = dst;
  1089. csk->tid = tid;
  1090. csk->wr_cred = cdev->lldi.wr_cred -
  1091. DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
  1092. csk->wr_max_cred = csk->wr_cred;
  1093. csk->wr_una_cred = 0;
  1094. if (iptype == 4) {
  1095. struct sockaddr_in *sin = (struct sockaddr_in *)
  1096. &csk->com.local_addr;
  1097. sin->sin_family = AF_INET;
  1098. sin->sin_port = local_port;
  1099. sin->sin_addr.s_addr = *(__be32 *)local_ip;
  1100. sin = (struct sockaddr_in *)&csk->com.remote_addr;
  1101. sin->sin_family = AF_INET;
  1102. sin->sin_port = peer_port;
  1103. sin->sin_addr.s_addr = *(__be32 *)peer_ip;
  1104. } else {
  1105. struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
  1106. &csk->com.local_addr;
  1107. sin6->sin6_family = PF_INET6;
  1108. sin6->sin6_port = local_port;
  1109. memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
  1110. cxgb4_clip_get(cdev->lldi.ports[0],
  1111. (const u32 *)&sin6->sin6_addr.s6_addr,
  1112. 1);
  1113. sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr;
  1114. sin6->sin6_family = PF_INET6;
  1115. sin6->sin6_port = peer_port;
  1116. memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
  1117. }
  1118. skb_queue_head_init(&csk->rxq);
  1119. skb_queue_head_init(&csk->txq);
  1120. skb_queue_head_init(&csk->ppodq);
  1121. skb_queue_head_init(&csk->backlogq);
  1122. skb_queue_head_init(&csk->skbq);
  1123. cxgbit_sock_reset_wr_list(csk);
  1124. spin_lock_init(&csk->lock);
  1125. init_waitqueue_head(&csk->waitq);
  1126. init_waitqueue_head(&csk->ack_waitq);
  1127. csk->lock_owner = false;
  1128. if (cxgbit_alloc_csk_skb(csk)) {
  1129. dst_release(dst);
  1130. kfree(csk);
  1131. goto rel_skb;
  1132. }
  1133. cxgbit_get_cnp(cnp);
  1134. cxgbit_get_cdev(cdev);
  1135. spin_lock(&cdev->cskq.lock);
  1136. list_add_tail(&csk->list, &cdev->cskq.list);
  1137. spin_unlock(&cdev->cskq.lock);
  1138. cxgb4_insert_tid(t, csk, tid, csk->com.local_addr.ss_family);
  1139. cxgbit_pass_accept_rpl(csk, req);
  1140. goto rel_skb;
  1141. reject:
  1142. cxgbit_release_tid(cdev, tid);
  1143. rel_skb:
  1144. __kfree_skb(skb);
  1145. }
  1146. static u32
  1147. cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp,
  1148. u32 *flowclenp)
  1149. {
  1150. u32 nparams, flowclen16, flowclen;
  1151. nparams = FLOWC_WR_NPARAMS_MIN;
  1152. if (csk->snd_wscale)
  1153. nparams++;
  1154. #ifdef CONFIG_CHELSIO_T4_DCB
  1155. nparams++;
  1156. #endif
  1157. flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
  1158. flowclen16 = DIV_ROUND_UP(flowclen, 16);
  1159. flowclen = flowclen16 * 16;
  1160. /*
  1161. * Return the number of 16-byte credits used by the flowc request.
  1162. * Pass back the nparams and actual flowc length if requested.
  1163. */
  1164. if (nparamsp)
  1165. *nparamsp = nparams;
  1166. if (flowclenp)
  1167. *flowclenp = flowclen;
  1168. return flowclen16;
  1169. }
  1170. u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk)
  1171. {
  1172. struct cxgbit_device *cdev = csk->com.cdev;
  1173. struct fw_flowc_wr *flowc;
  1174. u32 nparams, flowclen16, flowclen;
  1175. struct sk_buff *skb;
  1176. u8 index;
  1177. #ifdef CONFIG_CHELSIO_T4_DCB
  1178. u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
  1179. #endif
  1180. flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen);
  1181. skb = __skb_dequeue(&csk->skbq);
  1182. flowc = __skb_put_zero(skb, flowclen);
  1183. flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
  1184. FW_FLOWC_WR_NPARAMS_V(nparams));
  1185. flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
  1186. FW_WR_FLOWID_V(csk->tid));
  1187. flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
  1188. flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
  1189. (csk->com.cdev->lldi.pf));
  1190. flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
  1191. flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan);
  1192. flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
  1193. flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan);
  1194. flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
  1195. flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid);
  1196. flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
  1197. flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt);
  1198. flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
  1199. flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt);
  1200. flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
  1201. flowc->mnemval[6].val = cpu_to_be32(csk->snd_win);
  1202. flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
  1203. flowc->mnemval[7].val = cpu_to_be32(csk->emss);
  1204. flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
  1205. if (test_bit(CDEV_ISO_ENABLE, &cdev->flags))
  1206. flowc->mnemval[8].val = cpu_to_be32(CXGBIT_MAX_ISO_PAYLOAD);
  1207. else
  1208. flowc->mnemval[8].val = cpu_to_be32(16384);
  1209. index = 9;
  1210. if (csk->snd_wscale) {
  1211. flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
  1212. flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale);
  1213. index++;
  1214. }
  1215. #ifdef CONFIG_CHELSIO_T4_DCB
  1216. flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
  1217. if (vlan == VLAN_NONE) {
  1218. pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid);
  1219. flowc->mnemval[index].val = cpu_to_be32(0);
  1220. } else
  1221. flowc->mnemval[index].val = cpu_to_be32(
  1222. (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT);
  1223. #endif
  1224. pr_debug("%s: csk %p; tx_chan = %u; rss_qid = %u; snd_seq = %u;"
  1225. " rcv_seq = %u; snd_win = %u; emss = %u\n",
  1226. __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt,
  1227. csk->rcv_nxt, csk->snd_win, csk->emss);
  1228. set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
  1229. cxgbit_ofld_send(csk->com.cdev, skb);
  1230. return flowclen16;
  1231. }
  1232. int cxgbit_setup_conn_digest(struct cxgbit_sock *csk)
  1233. {
  1234. struct sk_buff *skb;
  1235. struct cpl_set_tcb_field *req;
  1236. u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC;
  1237. u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC;
  1238. unsigned int len = roundup(sizeof(*req), 16);
  1239. int ret;
  1240. skb = alloc_skb(len, GFP_KERNEL);
  1241. if (!skb)
  1242. return -ENOMEM;
  1243. /* set up ulp submode */
  1244. req = __skb_put_zero(skb, len);
  1245. INIT_TP_WR(req, csk->tid);
  1246. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
  1247. req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
  1248. req->word_cookie = htons(0);
  1249. req->mask = cpu_to_be64(0x3 << 4);
  1250. req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
  1251. (dcrc ? ULP_CRC_DATA : 0)) << 4);
  1252. set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
  1253. cxgbit_get_csk(csk);
  1254. cxgbit_init_wr_wait(&csk->com.wr_wait);
  1255. cxgbit_ofld_send(csk->com.cdev, skb);
  1256. ret = cxgbit_wait_for_reply(csk->com.cdev,
  1257. &csk->com.wr_wait,
  1258. csk->tid, 5, __func__);
  1259. if (ret)
  1260. return -1;
  1261. return 0;
  1262. }
  1263. int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx)
  1264. {
  1265. struct sk_buff *skb;
  1266. struct cpl_set_tcb_field *req;
  1267. unsigned int len = roundup(sizeof(*req), 16);
  1268. int ret;
  1269. skb = alloc_skb(len, GFP_KERNEL);
  1270. if (!skb)
  1271. return -ENOMEM;
  1272. req = __skb_put_zero(skb, len);
  1273. INIT_TP_WR(req, csk->tid);
  1274. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
  1275. req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
  1276. req->word_cookie = htons(0);
  1277. req->mask = cpu_to_be64(0x3 << 8);
  1278. req->val = cpu_to_be64(pg_idx << 8);
  1279. set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
  1280. cxgbit_get_csk(csk);
  1281. cxgbit_init_wr_wait(&csk->com.wr_wait);
  1282. cxgbit_ofld_send(csk->com.cdev, skb);
  1283. ret = cxgbit_wait_for_reply(csk->com.cdev,
  1284. &csk->com.wr_wait,
  1285. csk->tid, 5, __func__);
  1286. if (ret)
  1287. return -1;
  1288. return 0;
  1289. }
  1290. static void
  1291. cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
  1292. {
  1293. struct cpl_pass_open_rpl *rpl = cplhdr(skb);
  1294. struct tid_info *t = cdev->lldi.tids;
  1295. unsigned int stid = GET_TID(rpl);
  1296. struct cxgbit_np *cnp = lookup_stid(t, stid);
  1297. pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
  1298. __func__, cnp, stid, rpl->status);
  1299. if (!cnp) {
  1300. pr_info("%s stid %d lookup failure\n", __func__, stid);
  1301. goto rel_skb;
  1302. }
  1303. cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
  1304. cxgbit_put_cnp(cnp);
  1305. rel_skb:
  1306. __kfree_skb(skb);
  1307. }
  1308. static void
  1309. cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
  1310. {
  1311. struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
  1312. struct tid_info *t = cdev->lldi.tids;
  1313. unsigned int stid = GET_TID(rpl);
  1314. struct cxgbit_np *cnp = lookup_stid(t, stid);
  1315. pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
  1316. __func__, cnp, stid, rpl->status);
  1317. if (!cnp) {
  1318. pr_info("%s stid %d lookup failure\n", __func__, stid);
  1319. goto rel_skb;
  1320. }
  1321. cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
  1322. cxgbit_put_cnp(cnp);
  1323. rel_skb:
  1324. __kfree_skb(skb);
  1325. }
  1326. static void
  1327. cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb)
  1328. {
  1329. struct cpl_pass_establish *req = cplhdr(skb);
  1330. struct tid_info *t = cdev->lldi.tids;
  1331. unsigned int tid = GET_TID(req);
  1332. struct cxgbit_sock *csk;
  1333. struct cxgbit_np *cnp;
  1334. u16 tcp_opt = be16_to_cpu(req->tcp_opt);
  1335. u32 snd_isn = be32_to_cpu(req->snd_isn);
  1336. u32 rcv_isn = be32_to_cpu(req->rcv_isn);
  1337. csk = lookup_tid(t, tid);
  1338. if (unlikely(!csk)) {
  1339. pr_err("can't find connection for tid %u.\n", tid);
  1340. goto rel_skb;
  1341. }
  1342. cnp = csk->cnp;
  1343. pr_debug("%s: csk %p; tid %u; cnp %p\n",
  1344. __func__, csk, tid, cnp);
  1345. csk->write_seq = snd_isn;
  1346. csk->snd_una = snd_isn;
  1347. csk->snd_nxt = snd_isn;
  1348. csk->rcv_nxt = rcv_isn;
  1349. if (csk->rcv_win > (RCV_BUFSIZ_M << 10))
  1350. csk->rx_credits = (csk->rcv_win - (RCV_BUFSIZ_M << 10));
  1351. csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
  1352. cxgbit_set_emss(csk, tcp_opt);
  1353. dst_confirm(csk->dst);
  1354. csk->com.state = CSK_STATE_ESTABLISHED;
  1355. spin_lock_bh(&cnp->np_accept_lock);
  1356. list_add_tail(&csk->accept_node, &cnp->np_accept_list);
  1357. spin_unlock_bh(&cnp->np_accept_lock);
  1358. complete(&cnp->accept_comp);
  1359. rel_skb:
  1360. __kfree_skb(skb);
  1361. }
  1362. static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
  1363. {
  1364. cxgbit_skcb_flags(skb) = 0;
  1365. spin_lock_bh(&csk->rxq.lock);
  1366. __skb_queue_tail(&csk->rxq, skb);
  1367. spin_unlock_bh(&csk->rxq.lock);
  1368. wake_up(&csk->waitq);
  1369. }
  1370. static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb)
  1371. {
  1372. pr_debug("%s: csk %p; tid %u; state %d\n",
  1373. __func__, csk, csk->tid, csk->com.state);
  1374. switch (csk->com.state) {
  1375. case CSK_STATE_ESTABLISHED:
  1376. csk->com.state = CSK_STATE_CLOSING;
  1377. cxgbit_queue_rx_skb(csk, skb);
  1378. return;
  1379. case CSK_STATE_CLOSING:
  1380. /* simultaneous close */
  1381. csk->com.state = CSK_STATE_MORIBUND;
  1382. break;
  1383. case CSK_STATE_MORIBUND:
  1384. csk->com.state = CSK_STATE_DEAD;
  1385. cxgbit_put_csk(csk);
  1386. break;
  1387. case CSK_STATE_ABORTING:
  1388. break;
  1389. default:
  1390. pr_info("%s: cpl_peer_close in bad state %d\n",
  1391. __func__, csk->com.state);
  1392. }
  1393. __kfree_skb(skb);
  1394. }
  1395. static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb)
  1396. {
  1397. pr_debug("%s: csk %p; tid %u; state %d\n",
  1398. __func__, csk, csk->tid, csk->com.state);
  1399. switch (csk->com.state) {
  1400. case CSK_STATE_CLOSING:
  1401. csk->com.state = CSK_STATE_MORIBUND;
  1402. break;
  1403. case CSK_STATE_MORIBUND:
  1404. csk->com.state = CSK_STATE_DEAD;
  1405. cxgbit_put_csk(csk);
  1406. break;
  1407. case CSK_STATE_ABORTING:
  1408. case CSK_STATE_DEAD:
  1409. break;
  1410. default:
  1411. pr_info("%s: cpl_close_con_rpl in bad state %d\n",
  1412. __func__, csk->com.state);
  1413. }
  1414. __kfree_skb(skb);
  1415. }
  1416. static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
  1417. {
  1418. struct cpl_abort_req_rss *hdr = cplhdr(skb);
  1419. unsigned int tid = GET_TID(hdr);
  1420. struct sk_buff *rpl_skb;
  1421. bool release = false;
  1422. bool wakeup_thread = false;
  1423. u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
  1424. pr_debug("%s: csk %p; tid %u; state %d\n",
  1425. __func__, csk, tid, csk->com.state);
  1426. if (cxgb_is_neg_adv(hdr->status)) {
  1427. pr_err("%s: got neg advise %d on tid %u\n",
  1428. __func__, hdr->status, tid);
  1429. goto rel_skb;
  1430. }
  1431. switch (csk->com.state) {
  1432. case CSK_STATE_CONNECTING:
  1433. case CSK_STATE_MORIBUND:
  1434. csk->com.state = CSK_STATE_DEAD;
  1435. release = true;
  1436. break;
  1437. case CSK_STATE_ESTABLISHED:
  1438. csk->com.state = CSK_STATE_DEAD;
  1439. wakeup_thread = true;
  1440. break;
  1441. case CSK_STATE_CLOSING:
  1442. csk->com.state = CSK_STATE_DEAD;
  1443. if (!csk->conn)
  1444. release = true;
  1445. break;
  1446. case CSK_STATE_ABORTING:
  1447. break;
  1448. default:
  1449. pr_info("%s: cpl_abort_req_rss in bad state %d\n",
  1450. __func__, csk->com.state);
  1451. csk->com.state = CSK_STATE_DEAD;
  1452. }
  1453. __skb_queue_purge(&csk->txq);
  1454. if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
  1455. cxgbit_send_tx_flowc_wr(csk);
  1456. rpl_skb = __skb_dequeue(&csk->skbq);
  1457. cxgb_mk_abort_rpl(rpl_skb, len, csk->tid, csk->txq_idx);
  1458. cxgbit_ofld_send(csk->com.cdev, rpl_skb);
  1459. if (wakeup_thread) {
  1460. cxgbit_queue_rx_skb(csk, skb);
  1461. return;
  1462. }
  1463. if (release)
  1464. cxgbit_put_csk(csk);
  1465. rel_skb:
  1466. __kfree_skb(skb);
  1467. }
  1468. static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
  1469. {
  1470. struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
  1471. pr_debug("%s: csk %p; tid %u; state %d\n",
  1472. __func__, csk, csk->tid, csk->com.state);
  1473. switch (csk->com.state) {
  1474. case CSK_STATE_ABORTING:
  1475. csk->com.state = CSK_STATE_DEAD;
  1476. if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags))
  1477. cxgbit_wake_up(&csk->com.wr_wait, __func__,
  1478. rpl->status);
  1479. cxgbit_put_csk(csk);
  1480. break;
  1481. default:
  1482. pr_info("%s: cpl_abort_rpl_rss in state %d\n",
  1483. __func__, csk->com.state);
  1484. }
  1485. __kfree_skb(skb);
  1486. }
  1487. static bool cxgbit_credit_err(const struct cxgbit_sock *csk)
  1488. {
  1489. const struct sk_buff *skb = csk->wr_pending_head;
  1490. u32 credit = 0;
  1491. if (unlikely(csk->wr_cred > csk->wr_max_cred)) {
  1492. pr_err("csk 0x%p, tid %u, credit %u > %u\n",
  1493. csk, csk->tid, csk->wr_cred, csk->wr_max_cred);
  1494. return true;
  1495. }
  1496. while (skb) {
  1497. credit += (__force u32)skb->csum;
  1498. skb = cxgbit_skcb_tx_wr_next(skb);
  1499. }
  1500. if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) {
  1501. pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
  1502. csk, csk->tid, csk->wr_cred,
  1503. credit, csk->wr_max_cred);
  1504. return true;
  1505. }
  1506. return false;
  1507. }
  1508. static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
  1509. {
  1510. struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)cplhdr(skb);
  1511. u32 credits = rpl->credits;
  1512. u32 snd_una = ntohl(rpl->snd_una);
  1513. csk->wr_cred += credits;
  1514. if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred))
  1515. csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
  1516. while (credits) {
  1517. struct sk_buff *p = cxgbit_sock_peek_wr(csk);
  1518. u32 csum;
  1519. if (unlikely(!p)) {
  1520. pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
  1521. csk, csk->tid, credits,
  1522. csk->wr_cred, csk->wr_una_cred);
  1523. break;
  1524. }
  1525. csum = (__force u32)p->csum;
  1526. if (unlikely(credits < csum)) {
  1527. pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
  1528. csk, csk->tid,
  1529. credits, csk->wr_cred, csk->wr_una_cred,
  1530. csum);
  1531. p->csum = (__force __wsum)(csum - credits);
  1532. break;
  1533. }
  1534. cxgbit_sock_dequeue_wr(csk);
  1535. credits -= csum;
  1536. kfree_skb(p);
  1537. }
  1538. if (unlikely(cxgbit_credit_err(csk))) {
  1539. cxgbit_queue_rx_skb(csk, skb);
  1540. return;
  1541. }
  1542. if (rpl->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
  1543. if (unlikely(before(snd_una, csk->snd_una))) {
  1544. pr_warn("csk 0x%p,%u, snd_una %u/%u.",
  1545. csk, csk->tid, snd_una,
  1546. csk->snd_una);
  1547. goto rel_skb;
  1548. }
  1549. if (csk->snd_una != snd_una) {
  1550. csk->snd_una = snd_una;
  1551. dst_confirm(csk->dst);
  1552. wake_up(&csk->ack_waitq);
  1553. }
  1554. }
  1555. if (skb_queue_len(&csk->txq))
  1556. cxgbit_push_tx_frames(csk);
  1557. rel_skb:
  1558. __kfree_skb(skb);
  1559. }
  1560. static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
  1561. {
  1562. struct cxgbit_sock *csk;
  1563. struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
  1564. unsigned int tid = GET_TID(rpl);
  1565. struct cxgb4_lld_info *lldi = &cdev->lldi;
  1566. struct tid_info *t = lldi->tids;
  1567. csk = lookup_tid(t, tid);
  1568. if (unlikely(!csk)) {
  1569. pr_err("can't find connection for tid %u.\n", tid);
  1570. goto rel_skb;
  1571. } else {
  1572. cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
  1573. }
  1574. cxgbit_put_csk(csk);
  1575. rel_skb:
  1576. __kfree_skb(skb);
  1577. }
  1578. static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
  1579. {
  1580. struct cxgbit_sock *csk;
  1581. struct cpl_rx_data *cpl = cplhdr(skb);
  1582. unsigned int tid = GET_TID(cpl);
  1583. struct cxgb4_lld_info *lldi = &cdev->lldi;
  1584. struct tid_info *t = lldi->tids;
  1585. csk = lookup_tid(t, tid);
  1586. if (unlikely(!csk)) {
  1587. pr_err("can't find conn. for tid %u.\n", tid);
  1588. goto rel_skb;
  1589. }
  1590. cxgbit_queue_rx_skb(csk, skb);
  1591. return;
  1592. rel_skb:
  1593. __kfree_skb(skb);
  1594. }
  1595. static void
  1596. __cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
  1597. {
  1598. spin_lock(&csk->lock);
  1599. if (csk->lock_owner) {
  1600. __skb_queue_tail(&csk->backlogq, skb);
  1601. spin_unlock(&csk->lock);
  1602. return;
  1603. }
  1604. cxgbit_skcb_rx_backlog_fn(skb)(csk, skb);
  1605. spin_unlock(&csk->lock);
  1606. }
  1607. static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
  1608. {
  1609. cxgbit_get_csk(csk);
  1610. __cxgbit_process_rx_cpl(csk, skb);
  1611. cxgbit_put_csk(csk);
  1612. }
  1613. static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb)
  1614. {
  1615. struct cxgbit_sock *csk;
  1616. struct cpl_tx_data *cpl = cplhdr(skb);
  1617. struct cxgb4_lld_info *lldi = &cdev->lldi;
  1618. struct tid_info *t = lldi->tids;
  1619. unsigned int tid = GET_TID(cpl);
  1620. u8 opcode = cxgbit_skcb_rx_opcode(skb);
  1621. bool ref = true;
  1622. switch (opcode) {
  1623. case CPL_FW4_ACK:
  1624. cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_fw4_ack;
  1625. ref = false;
  1626. break;
  1627. case CPL_PEER_CLOSE:
  1628. cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_peer_close;
  1629. break;
  1630. case CPL_CLOSE_CON_RPL:
  1631. cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_close_con_rpl;
  1632. break;
  1633. case CPL_ABORT_REQ_RSS:
  1634. cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_req_rss;
  1635. break;
  1636. case CPL_ABORT_RPL_RSS:
  1637. cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_rpl_rss;
  1638. break;
  1639. default:
  1640. goto rel_skb;
  1641. }
  1642. csk = lookup_tid(t, tid);
  1643. if (unlikely(!csk)) {
  1644. pr_err("can't find conn. for tid %u.\n", tid);
  1645. goto rel_skb;
  1646. }
  1647. if (ref)
  1648. cxgbit_process_rx_cpl(csk, skb);
  1649. else
  1650. __cxgbit_process_rx_cpl(csk, skb);
  1651. return;
  1652. rel_skb:
  1653. __kfree_skb(skb);
  1654. }
  1655. cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS] = {
  1656. [CPL_PASS_OPEN_RPL] = cxgbit_pass_open_rpl,
  1657. [CPL_CLOSE_LISTSRV_RPL] = cxgbit_close_listsrv_rpl,
  1658. [CPL_PASS_ACCEPT_REQ] = cxgbit_pass_accept_req,
  1659. [CPL_PASS_ESTABLISH] = cxgbit_pass_establish,
  1660. [CPL_SET_TCB_RPL] = cxgbit_set_tcb_rpl,
  1661. [CPL_RX_DATA] = cxgbit_rx_data,
  1662. [CPL_FW4_ACK] = cxgbit_rx_cpl,
  1663. [CPL_PEER_CLOSE] = cxgbit_rx_cpl,
  1664. [CPL_CLOSE_CON_RPL] = cxgbit_rx_cpl,
  1665. [CPL_ABORT_REQ_RSS] = cxgbit_rx_cpl,
  1666. [CPL_ABORT_RPL_RSS] = cxgbit_rx_cpl,
  1667. };