cxgbit_cm.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016 Chelsio Communications, Inc.
  4. */
  5. #include <linux/module.h>
  6. #include <linux/list.h>
  7. #include <linux/workqueue.h>
  8. #include <linux/skbuff.h>
  9. #include <linux/timer.h>
  10. #include <linux/notifier.h>
  11. #include <linux/inetdevice.h>
  12. #include <linux/ip.h>
  13. #include <linux/tcp.h>
  14. #include <linux/if_vlan.h>
  15. #include <net/neighbour.h>
  16. #include <net/netevent.h>
  17. #include <net/route.h>
  18. #include <net/tcp.h>
  19. #include <net/ip6_route.h>
  20. #include <net/addrconf.h>
  21. #include <libcxgb_cm.h>
  22. #include "cxgbit.h"
  23. #include "clip_tbl.h"
  24. static void cxgbit_init_wr_wait(struct cxgbit_wr_wait *wr_waitp)
  25. {
  26. wr_waitp->ret = 0;
  27. reinit_completion(&wr_waitp->completion);
  28. }
  29. static void
  30. cxgbit_wake_up(struct cxgbit_wr_wait *wr_waitp, const char *func, u8 ret)
  31. {
  32. if (ret == CPL_ERR_NONE)
  33. wr_waitp->ret = 0;
  34. else
  35. wr_waitp->ret = -EIO;
  36. if (wr_waitp->ret)
  37. pr_err("%s: err:%u", func, ret);
  38. complete(&wr_waitp->completion);
  39. }
  40. static int
  41. cxgbit_wait_for_reply(struct cxgbit_device *cdev,
  42. struct cxgbit_wr_wait *wr_waitp, u32 tid, u32 timeout,
  43. const char *func)
  44. {
  45. int ret;
  46. if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
  47. wr_waitp->ret = -EIO;
  48. goto out;
  49. }
  50. ret = wait_for_completion_timeout(&wr_waitp->completion, timeout * HZ);
  51. if (!ret) {
  52. pr_info("%s - Device %s not responding tid %u\n",
  53. func, pci_name(cdev->lldi.pdev), tid);
  54. wr_waitp->ret = -ETIMEDOUT;
  55. }
  56. out:
  57. if (wr_waitp->ret)
  58. pr_info("%s: FW reply %d tid %u\n",
  59. pci_name(cdev->lldi.pdev), wr_waitp->ret, tid);
  60. return wr_waitp->ret;
  61. }
  62. static int cxgbit_np_hashfn(const struct cxgbit_np *cnp)
  63. {
  64. return ((unsigned long)cnp >> 10) & (NP_INFO_HASH_SIZE - 1);
  65. }
  66. static struct np_info *
  67. cxgbit_np_hash_add(struct cxgbit_device *cdev, struct cxgbit_np *cnp,
  68. unsigned int stid)
  69. {
  70. struct np_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
  71. if (p) {
  72. int bucket = cxgbit_np_hashfn(cnp);
  73. p->cnp = cnp;
  74. p->stid = stid;
  75. spin_lock(&cdev->np_lock);
  76. p->next = cdev->np_hash_tab[bucket];
  77. cdev->np_hash_tab[bucket] = p;
  78. spin_unlock(&cdev->np_lock);
  79. }
  80. return p;
  81. }
  82. static int
  83. cxgbit_np_hash_find(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
  84. {
  85. int stid = -1, bucket = cxgbit_np_hashfn(cnp);
  86. struct np_info *p;
  87. spin_lock(&cdev->np_lock);
  88. for (p = cdev->np_hash_tab[bucket]; p; p = p->next) {
  89. if (p->cnp == cnp) {
  90. stid = p->stid;
  91. break;
  92. }
  93. }
  94. spin_unlock(&cdev->np_lock);
  95. return stid;
  96. }
  97. static int cxgbit_np_hash_del(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
  98. {
  99. int stid = -1, bucket = cxgbit_np_hashfn(cnp);
  100. struct np_info *p, **prev = &cdev->np_hash_tab[bucket];
  101. spin_lock(&cdev->np_lock);
  102. for (p = *prev; p; prev = &p->next, p = p->next) {
  103. if (p->cnp == cnp) {
  104. stid = p->stid;
  105. *prev = p->next;
  106. kfree(p);
  107. break;
  108. }
  109. }
  110. spin_unlock(&cdev->np_lock);
  111. return stid;
  112. }
  113. void _cxgbit_free_cnp(struct kref *kref)
  114. {
  115. struct cxgbit_np *cnp;
  116. cnp = container_of(kref, struct cxgbit_np, kref);
  117. kfree(cnp);
  118. }
  119. static int
  120. cxgbit_create_server6(struct cxgbit_device *cdev, unsigned int stid,
  121. struct cxgbit_np *cnp)
  122. {
  123. struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
  124. &cnp->com.local_addr;
  125. int addr_type;
  126. int ret;
  127. pr_debug("%s: dev = %s; stid = %u; sin6_port = %u\n",
  128. __func__, cdev->lldi.ports[0]->name, stid, sin6->sin6_port);
  129. addr_type = ipv6_addr_type((const struct in6_addr *)
  130. &sin6->sin6_addr);
  131. if (addr_type != IPV6_ADDR_ANY) {
  132. ret = cxgb4_clip_get(cdev->lldi.ports[0],
  133. (const u32 *)&sin6->sin6_addr.s6_addr, 1);
  134. if (ret) {
  135. pr_err("Unable to find clip table entry. laddr %pI6. Error:%d.\n",
  136. sin6->sin6_addr.s6_addr, ret);
  137. return -ENOMEM;
  138. }
  139. }
  140. cxgbit_get_cnp(cnp);
  141. cxgbit_init_wr_wait(&cnp->com.wr_wait);
  142. ret = cxgb4_create_server6(cdev->lldi.ports[0],
  143. stid, &sin6->sin6_addr,
  144. sin6->sin6_port,
  145. cdev->lldi.rxq_ids[0]);
  146. if (!ret)
  147. ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
  148. 0, 10, __func__);
  149. else if (ret > 0)
  150. ret = net_xmit_errno(ret);
  151. else
  152. cxgbit_put_cnp(cnp);
  153. if (ret) {
  154. if (ret != -ETIMEDOUT)
  155. cxgb4_clip_release(cdev->lldi.ports[0],
  156. (const u32 *)&sin6->sin6_addr.s6_addr, 1);
  157. pr_err("create server6 err %d stid %d laddr %pI6 lport %d\n",
  158. ret, stid, sin6->sin6_addr.s6_addr,
  159. ntohs(sin6->sin6_port));
  160. }
  161. return ret;
  162. }
  163. static int
  164. cxgbit_create_server4(struct cxgbit_device *cdev, unsigned int stid,
  165. struct cxgbit_np *cnp)
  166. {
  167. struct sockaddr_in *sin = (struct sockaddr_in *)
  168. &cnp->com.local_addr;
  169. int ret;
  170. pr_debug("%s: dev = %s; stid = %u; sin_port = %u\n",
  171. __func__, cdev->lldi.ports[0]->name, stid, sin->sin_port);
  172. cxgbit_get_cnp(cnp);
  173. cxgbit_init_wr_wait(&cnp->com.wr_wait);
  174. ret = cxgb4_create_server(cdev->lldi.ports[0],
  175. stid, sin->sin_addr.s_addr,
  176. sin->sin_port, 0,
  177. cdev->lldi.rxq_ids[0]);
  178. if (!ret)
  179. ret = cxgbit_wait_for_reply(cdev,
  180. &cnp->com.wr_wait,
  181. 0, 10, __func__);
  182. else if (ret > 0)
  183. ret = net_xmit_errno(ret);
  184. else
  185. cxgbit_put_cnp(cnp);
  186. if (ret)
  187. pr_err("create server failed err %d stid %d laddr %pI4 lport %d\n",
  188. ret, stid, &sin->sin_addr, ntohs(sin->sin_port));
  189. return ret;
  190. }
  191. struct cxgbit_device *cxgbit_find_device(struct net_device *ndev, u8 *port_id)
  192. {
  193. struct cxgbit_device *cdev;
  194. u8 i;
  195. list_for_each_entry(cdev, &cdev_list_head, list) {
  196. struct cxgb4_lld_info *lldi = &cdev->lldi;
  197. for (i = 0; i < lldi->nports; i++) {
  198. if (lldi->ports[i] == ndev) {
  199. if (port_id)
  200. *port_id = i;
  201. return cdev;
  202. }
  203. }
  204. }
  205. return NULL;
  206. }
  207. static struct net_device *cxgbit_get_real_dev(struct net_device *ndev)
  208. {
  209. if (ndev->priv_flags & IFF_BONDING) {
  210. pr_err("Bond devices are not supported. Interface:%s\n",
  211. ndev->name);
  212. return NULL;
  213. }
  214. if (is_vlan_dev(ndev))
  215. return vlan_dev_real_dev(ndev);
  216. return ndev;
  217. }
  218. static struct net_device *cxgbit_ipv4_netdev(__be32 saddr)
  219. {
  220. struct net_device *ndev;
  221. ndev = __ip_dev_find(&init_net, saddr, false);
  222. if (!ndev)
  223. return NULL;
  224. return cxgbit_get_real_dev(ndev);
  225. }
  226. static struct net_device *cxgbit_ipv6_netdev(struct in6_addr *addr6)
  227. {
  228. struct net_device *ndev = NULL;
  229. bool found = false;
  230. if (IS_ENABLED(CONFIG_IPV6)) {
  231. for_each_netdev_rcu(&init_net, ndev)
  232. if (ipv6_chk_addr(&init_net, addr6, ndev, 1)) {
  233. found = true;
  234. break;
  235. }
  236. }
  237. if (!found)
  238. return NULL;
  239. return cxgbit_get_real_dev(ndev);
  240. }
  241. static struct cxgbit_device *cxgbit_find_np_cdev(struct cxgbit_np *cnp)
  242. {
  243. struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
  244. int ss_family = sockaddr->ss_family;
  245. struct net_device *ndev = NULL;
  246. struct cxgbit_device *cdev = NULL;
  247. rcu_read_lock();
  248. if (ss_family == AF_INET) {
  249. struct sockaddr_in *sin;
  250. sin = (struct sockaddr_in *)sockaddr;
  251. ndev = cxgbit_ipv4_netdev(sin->sin_addr.s_addr);
  252. } else if (ss_family == AF_INET6) {
  253. struct sockaddr_in6 *sin6;
  254. sin6 = (struct sockaddr_in6 *)sockaddr;
  255. ndev = cxgbit_ipv6_netdev(&sin6->sin6_addr);
  256. }
  257. if (!ndev)
  258. goto out;
  259. cdev = cxgbit_find_device(ndev, NULL);
  260. out:
  261. rcu_read_unlock();
  262. return cdev;
  263. }
  264. static bool cxgbit_inaddr_any(struct cxgbit_np *cnp)
  265. {
  266. struct sockaddr_storage *sockaddr = &cnp->com.local_addr;
  267. int ss_family = sockaddr->ss_family;
  268. int addr_type;
  269. if (ss_family == AF_INET) {
  270. struct sockaddr_in *sin;
  271. sin = (struct sockaddr_in *)sockaddr;
  272. if (sin->sin_addr.s_addr == htonl(INADDR_ANY))
  273. return true;
  274. } else if (ss_family == AF_INET6) {
  275. struct sockaddr_in6 *sin6;
  276. sin6 = (struct sockaddr_in6 *)sockaddr;
  277. addr_type = ipv6_addr_type((const struct in6_addr *)
  278. &sin6->sin6_addr);
  279. if (addr_type == IPV6_ADDR_ANY)
  280. return true;
  281. }
  282. return false;
  283. }
  284. static int
  285. __cxgbit_setup_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
  286. {
  287. int stid, ret;
  288. int ss_family = cnp->com.local_addr.ss_family;
  289. if (!test_bit(CDEV_STATE_UP, &cdev->flags))
  290. return -EINVAL;
  291. stid = cxgb4_alloc_stid(cdev->lldi.tids, ss_family, cnp);
  292. if (stid < 0)
  293. return -EINVAL;
  294. if (!cxgbit_np_hash_add(cdev, cnp, stid)) {
  295. cxgb4_free_stid(cdev->lldi.tids, stid, ss_family);
  296. return -EINVAL;
  297. }
  298. if (ss_family == AF_INET)
  299. ret = cxgbit_create_server4(cdev, stid, cnp);
  300. else
  301. ret = cxgbit_create_server6(cdev, stid, cnp);
  302. if (ret) {
  303. if (ret != -ETIMEDOUT)
  304. cxgb4_free_stid(cdev->lldi.tids, stid,
  305. ss_family);
  306. cxgbit_np_hash_del(cdev, cnp);
  307. return ret;
  308. }
  309. return ret;
  310. }
  311. static int cxgbit_setup_cdev_np(struct cxgbit_np *cnp)
  312. {
  313. struct cxgbit_device *cdev;
  314. int ret = -1;
  315. mutex_lock(&cdev_list_lock);
  316. cdev = cxgbit_find_np_cdev(cnp);
  317. if (!cdev)
  318. goto out;
  319. if (cxgbit_np_hash_find(cdev, cnp) >= 0)
  320. goto out;
  321. if (__cxgbit_setup_cdev_np(cdev, cnp))
  322. goto out;
  323. cnp->com.cdev = cdev;
  324. ret = 0;
  325. out:
  326. mutex_unlock(&cdev_list_lock);
  327. return ret;
  328. }
  329. static int cxgbit_setup_all_np(struct cxgbit_np *cnp)
  330. {
  331. struct cxgbit_device *cdev;
  332. int ret;
  333. u32 count = 0;
  334. mutex_lock(&cdev_list_lock);
  335. list_for_each_entry(cdev, &cdev_list_head, list) {
  336. if (cxgbit_np_hash_find(cdev, cnp) >= 0) {
  337. mutex_unlock(&cdev_list_lock);
  338. return -1;
  339. }
  340. }
  341. list_for_each_entry(cdev, &cdev_list_head, list) {
  342. ret = __cxgbit_setup_cdev_np(cdev, cnp);
  343. if (ret == -ETIMEDOUT)
  344. break;
  345. if (ret != 0)
  346. continue;
  347. count++;
  348. }
  349. mutex_unlock(&cdev_list_lock);
  350. return count ? 0 : -1;
  351. }
  352. int cxgbit_setup_np(struct iscsi_np *np, struct sockaddr_storage *ksockaddr)
  353. {
  354. struct cxgbit_np *cnp;
  355. int ret;
  356. if ((ksockaddr->ss_family != AF_INET) &&
  357. (ksockaddr->ss_family != AF_INET6))
  358. return -EINVAL;
  359. cnp = kzalloc(sizeof(*cnp), GFP_KERNEL);
  360. if (!cnp)
  361. return -ENOMEM;
  362. init_waitqueue_head(&cnp->accept_wait);
  363. init_completion(&cnp->com.wr_wait.completion);
  364. init_completion(&cnp->accept_comp);
  365. INIT_LIST_HEAD(&cnp->np_accept_list);
  366. spin_lock_init(&cnp->np_accept_lock);
  367. kref_init(&cnp->kref);
  368. memcpy(&np->np_sockaddr, ksockaddr,
  369. sizeof(struct sockaddr_storage));
  370. memcpy(&cnp->com.local_addr, &np->np_sockaddr,
  371. sizeof(cnp->com.local_addr));
  372. cnp->np = np;
  373. cnp->com.cdev = NULL;
  374. if (cxgbit_inaddr_any(cnp))
  375. ret = cxgbit_setup_all_np(cnp);
  376. else
  377. ret = cxgbit_setup_cdev_np(cnp);
  378. if (ret) {
  379. cxgbit_put_cnp(cnp);
  380. return -EINVAL;
  381. }
  382. np->np_context = cnp;
  383. cnp->com.state = CSK_STATE_LISTEN;
  384. return 0;
  385. }
  386. static void
  387. cxgbit_set_conn_info(struct iscsi_np *np, struct iscsit_conn *conn,
  388. struct cxgbit_sock *csk)
  389. {
  390. conn->login_family = np->np_sockaddr.ss_family;
  391. conn->login_sockaddr = csk->com.remote_addr;
  392. conn->local_sockaddr = csk->com.local_addr;
  393. }
  394. int cxgbit_accept_np(struct iscsi_np *np, struct iscsit_conn *conn)
  395. {
  396. struct cxgbit_np *cnp = np->np_context;
  397. struct cxgbit_sock *csk;
  398. int ret = 0;
  399. accept_wait:
  400. ret = wait_for_completion_interruptible(&cnp->accept_comp);
  401. if (ret)
  402. return -ENODEV;
  403. spin_lock_bh(&np->np_thread_lock);
  404. if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
  405. spin_unlock_bh(&np->np_thread_lock);
  406. /**
  407. * No point in stalling here when np_thread
  408. * is in state RESET/SHUTDOWN/EXIT - bail
  409. **/
  410. return -ENODEV;
  411. }
  412. spin_unlock_bh(&np->np_thread_lock);
  413. spin_lock_bh(&cnp->np_accept_lock);
  414. if (list_empty(&cnp->np_accept_list)) {
  415. spin_unlock_bh(&cnp->np_accept_lock);
  416. goto accept_wait;
  417. }
  418. csk = list_first_entry(&cnp->np_accept_list,
  419. struct cxgbit_sock,
  420. accept_node);
  421. list_del_init(&csk->accept_node);
  422. spin_unlock_bh(&cnp->np_accept_lock);
  423. conn->context = csk;
  424. csk->conn = conn;
  425. cxgbit_set_conn_info(np, conn, csk);
  426. return 0;
  427. }
  428. static int
  429. __cxgbit_free_cdev_np(struct cxgbit_device *cdev, struct cxgbit_np *cnp)
  430. {
  431. int stid, ret;
  432. bool ipv6 = false;
  433. stid = cxgbit_np_hash_del(cdev, cnp);
  434. if (stid < 0)
  435. return -EINVAL;
  436. if (!test_bit(CDEV_STATE_UP, &cdev->flags))
  437. return -EINVAL;
  438. if (cnp->np->np_sockaddr.ss_family == AF_INET6)
  439. ipv6 = true;
  440. cxgbit_get_cnp(cnp);
  441. cxgbit_init_wr_wait(&cnp->com.wr_wait);
  442. ret = cxgb4_remove_server(cdev->lldi.ports[0], stid,
  443. cdev->lldi.rxq_ids[0], ipv6);
  444. if (ret > 0)
  445. ret = net_xmit_errno(ret);
  446. if (ret) {
  447. cxgbit_put_cnp(cnp);
  448. return ret;
  449. }
  450. ret = cxgbit_wait_for_reply(cdev, &cnp->com.wr_wait,
  451. 0, 10, __func__);
  452. if (ret == -ETIMEDOUT)
  453. return ret;
  454. if (ipv6 && cnp->com.cdev) {
  455. struct sockaddr_in6 *sin6;
  456. sin6 = (struct sockaddr_in6 *)&cnp->com.local_addr;
  457. cxgb4_clip_release(cdev->lldi.ports[0],
  458. (const u32 *)&sin6->sin6_addr.s6_addr,
  459. 1);
  460. }
  461. cxgb4_free_stid(cdev->lldi.tids, stid,
  462. cnp->com.local_addr.ss_family);
  463. return 0;
  464. }
  465. static void cxgbit_free_all_np(struct cxgbit_np *cnp)
  466. {
  467. struct cxgbit_device *cdev;
  468. int ret;
  469. mutex_lock(&cdev_list_lock);
  470. list_for_each_entry(cdev, &cdev_list_head, list) {
  471. ret = __cxgbit_free_cdev_np(cdev, cnp);
  472. if (ret == -ETIMEDOUT)
  473. break;
  474. }
  475. mutex_unlock(&cdev_list_lock);
  476. }
  477. static void cxgbit_free_cdev_np(struct cxgbit_np *cnp)
  478. {
  479. struct cxgbit_device *cdev;
  480. bool found = false;
  481. mutex_lock(&cdev_list_lock);
  482. list_for_each_entry(cdev, &cdev_list_head, list) {
  483. if (cdev == cnp->com.cdev) {
  484. found = true;
  485. break;
  486. }
  487. }
  488. if (!found)
  489. goto out;
  490. __cxgbit_free_cdev_np(cdev, cnp);
  491. out:
  492. mutex_unlock(&cdev_list_lock);
  493. }
  494. static void __cxgbit_free_conn(struct cxgbit_sock *csk);
  495. void cxgbit_free_np(struct iscsi_np *np)
  496. {
  497. struct cxgbit_np *cnp = np->np_context;
  498. struct cxgbit_sock *csk, *tmp;
  499. cnp->com.state = CSK_STATE_DEAD;
  500. if (cnp->com.cdev)
  501. cxgbit_free_cdev_np(cnp);
  502. else
  503. cxgbit_free_all_np(cnp);
  504. spin_lock_bh(&cnp->np_accept_lock);
  505. list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) {
  506. list_del_init(&csk->accept_node);
  507. __cxgbit_free_conn(csk);
  508. }
  509. spin_unlock_bh(&cnp->np_accept_lock);
  510. np->np_context = NULL;
  511. cxgbit_put_cnp(cnp);
  512. }
  513. static void cxgbit_send_halfclose(struct cxgbit_sock *csk)
  514. {
  515. struct sk_buff *skb;
  516. u32 len = roundup(sizeof(struct cpl_close_con_req), 16);
  517. skb = alloc_skb(len, GFP_ATOMIC);
  518. if (!skb)
  519. return;
  520. cxgb_mk_close_con_req(skb, len, csk->tid, csk->txq_idx,
  521. NULL, NULL);
  522. cxgbit_skcb_flags(skb) |= SKCBF_TX_FLAG_COMPL;
  523. __skb_queue_tail(&csk->txq, skb);
  524. cxgbit_push_tx_frames(csk);
  525. }
  526. static void cxgbit_arp_failure_discard(void *handle, struct sk_buff *skb)
  527. {
  528. struct cxgbit_sock *csk = handle;
  529. pr_debug("%s cxgbit_device %p\n", __func__, handle);
  530. kfree_skb(skb);
  531. cxgbit_put_csk(csk);
  532. }
  533. static void cxgbit_abort_arp_failure(void *handle, struct sk_buff *skb)
  534. {
  535. struct cxgbit_device *cdev = handle;
  536. struct cpl_abort_req *req = cplhdr(skb);
  537. pr_debug("%s cdev %p\n", __func__, cdev);
  538. req->cmd = CPL_ABORT_NO_RST;
  539. cxgbit_ofld_send(cdev, skb);
  540. }
  541. static int cxgbit_send_abort_req(struct cxgbit_sock *csk)
  542. {
  543. struct sk_buff *skb;
  544. u32 len = roundup(sizeof(struct cpl_abort_req), 16);
  545. pr_debug("%s: csk %p tid %u; state %d\n",
  546. __func__, csk, csk->tid, csk->com.state);
  547. __skb_queue_purge(&csk->txq);
  548. if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
  549. cxgbit_send_tx_flowc_wr(csk);
  550. skb = __skb_dequeue(&csk->skbq);
  551. cxgb_mk_abort_req(skb, len, csk->tid, csk->txq_idx,
  552. csk->com.cdev, cxgbit_abort_arp_failure);
  553. return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
  554. }
  555. static void
  556. __cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
  557. {
  558. __kfree_skb(skb);
  559. if (csk->com.state != CSK_STATE_ESTABLISHED)
  560. goto no_abort;
  561. set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags);
  562. csk->com.state = CSK_STATE_ABORTING;
  563. cxgbit_send_abort_req(csk);
  564. return;
  565. no_abort:
  566. cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE);
  567. cxgbit_put_csk(csk);
  568. }
  569. void cxgbit_abort_conn(struct cxgbit_sock *csk)
  570. {
  571. struct sk_buff *skb = alloc_skb(0, GFP_KERNEL | __GFP_NOFAIL);
  572. cxgbit_get_csk(csk);
  573. cxgbit_init_wr_wait(&csk->com.wr_wait);
  574. spin_lock_bh(&csk->lock);
  575. if (csk->lock_owner) {
  576. cxgbit_skcb_rx_backlog_fn(skb) = __cxgbit_abort_conn;
  577. __skb_queue_tail(&csk->backlogq, skb);
  578. } else {
  579. __cxgbit_abort_conn(csk, skb);
  580. }
  581. spin_unlock_bh(&csk->lock);
  582. cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
  583. csk->tid, 600, __func__);
  584. }
  585. static void __cxgbit_free_conn(struct cxgbit_sock *csk)
  586. {
  587. struct iscsit_conn *conn = csk->conn;
  588. bool release = false;
  589. pr_debug("%s: state %d\n",
  590. __func__, csk->com.state);
  591. spin_lock_bh(&csk->lock);
  592. switch (csk->com.state) {
  593. case CSK_STATE_ESTABLISHED:
  594. if (conn && (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)) {
  595. csk->com.state = CSK_STATE_CLOSING;
  596. cxgbit_send_halfclose(csk);
  597. } else {
  598. csk->com.state = CSK_STATE_ABORTING;
  599. cxgbit_send_abort_req(csk);
  600. }
  601. break;
  602. case CSK_STATE_CLOSING:
  603. csk->com.state = CSK_STATE_MORIBUND;
  604. cxgbit_send_halfclose(csk);
  605. break;
  606. case CSK_STATE_DEAD:
  607. release = true;
  608. break;
  609. default:
  610. pr_err("%s: csk %p; state %d\n",
  611. __func__, csk, csk->com.state);
  612. }
  613. spin_unlock_bh(&csk->lock);
  614. if (release)
  615. cxgbit_put_csk(csk);
  616. }
  617. void cxgbit_free_conn(struct iscsit_conn *conn)
  618. {
  619. __cxgbit_free_conn(conn->context);
  620. }
  621. static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
  622. {
  623. csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
  624. ((csk->com.remote_addr.ss_family == AF_INET) ?
  625. sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
  626. sizeof(struct tcphdr);
  627. csk->mss = csk->emss;
  628. if (TCPOPT_TSTAMP_G(opt))
  629. csk->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
  630. if (csk->emss < 128)
  631. csk->emss = 128;
  632. if (csk->emss & 7)
  633. pr_info("Warning: misaligned mtu idx %u mss %u emss=%u\n",
  634. TCPOPT_MSS_G(opt), csk->mss, csk->emss);
  635. pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
  636. csk->mss, csk->emss);
  637. }
  638. static void cxgbit_free_skb(struct cxgbit_sock *csk)
  639. {
  640. struct sk_buff *skb;
  641. __skb_queue_purge(&csk->txq);
  642. __skb_queue_purge(&csk->rxq);
  643. __skb_queue_purge(&csk->backlogq);
  644. __skb_queue_purge(&csk->ppodq);
  645. __skb_queue_purge(&csk->skbq);
  646. while ((skb = cxgbit_sock_dequeue_wr(csk)))
  647. kfree_skb(skb);
  648. __kfree_skb(csk->lro_hskb);
  649. }
  650. void _cxgbit_free_csk(struct kref *kref)
  651. {
  652. struct cxgbit_sock *csk;
  653. struct cxgbit_device *cdev;
  654. csk = container_of(kref, struct cxgbit_sock, kref);
  655. pr_debug("%s csk %p state %d\n", __func__, csk, csk->com.state);
  656. if (csk->com.local_addr.ss_family == AF_INET6) {
  657. struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
  658. &csk->com.local_addr;
  659. cxgb4_clip_release(csk->com.cdev->lldi.ports[0],
  660. (const u32 *)
  661. &sin6->sin6_addr.s6_addr, 1);
  662. }
  663. cxgb4_remove_tid(csk->com.cdev->lldi.tids, 0, csk->tid,
  664. csk->com.local_addr.ss_family);
  665. dst_release(csk->dst);
  666. cxgb4_l2t_release(csk->l2t);
  667. cdev = csk->com.cdev;
  668. spin_lock_bh(&cdev->cskq.lock);
  669. list_del(&csk->list);
  670. spin_unlock_bh(&cdev->cskq.lock);
  671. cxgbit_free_skb(csk);
  672. cxgbit_put_cnp(csk->cnp);
  673. cxgbit_put_cdev(cdev);
  674. kfree(csk);
  675. }
  676. static void cxgbit_set_tcp_window(struct cxgbit_sock *csk, struct port_info *pi)
  677. {
  678. unsigned int linkspeed;
  679. u8 scale;
  680. linkspeed = pi->link_cfg.speed;
  681. scale = linkspeed / SPEED_10000;
  682. #define CXGBIT_10G_RCV_WIN (256 * 1024)
  683. csk->rcv_win = CXGBIT_10G_RCV_WIN;
  684. if (scale)
  685. csk->rcv_win *= scale;
  686. csk->rcv_win = min(csk->rcv_win, RCV_BUFSIZ_M << 10);
  687. #define CXGBIT_10G_SND_WIN (256 * 1024)
  688. csk->snd_win = CXGBIT_10G_SND_WIN;
  689. if (scale)
  690. csk->snd_win *= scale;
  691. csk->snd_win = min(csk->snd_win, 512U * 1024);
  692. pr_debug("%s snd_win %d rcv_win %d\n",
  693. __func__, csk->snd_win, csk->rcv_win);
  694. }
  695. #ifdef CONFIG_CHELSIO_T4_DCB
  696. static u8 cxgbit_get_iscsi_dcb_state(struct net_device *ndev)
  697. {
  698. return ndev->dcbnl_ops->getstate(ndev);
  699. }
  700. static int cxgbit_select_priority(int pri_mask)
  701. {
  702. if (!pri_mask)
  703. return 0;
  704. return (ffs(pri_mask) - 1);
  705. }
  706. static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port)
  707. {
  708. int ret;
  709. u8 caps;
  710. struct dcb_app iscsi_dcb_app = {
  711. .protocol = local_port
  712. };
  713. ret = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
  714. if (ret)
  715. return 0;
  716. if (caps & DCB_CAP_DCBX_VER_IEEE) {
  717. iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_STREAM;
  718. ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
  719. if (!ret) {
  720. iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
  721. ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
  722. }
  723. } else if (caps & DCB_CAP_DCBX_VER_CEE) {
  724. iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
  725. ret = dcb_getapp(ndev, &iscsi_dcb_app);
  726. }
  727. pr_info("iSCSI priority is set to %u\n", cxgbit_select_priority(ret));
  728. return cxgbit_select_priority(ret);
  729. }
  730. #endif
  731. static int
  732. cxgbit_offload_init(struct cxgbit_sock *csk, int iptype, __u8 *peer_ip,
  733. u16 local_port, struct dst_entry *dst,
  734. struct cxgbit_device *cdev)
  735. {
  736. struct neighbour *n;
  737. int ret, step;
  738. struct net_device *ndev;
  739. u16 rxq_idx, port_id;
  740. #ifdef CONFIG_CHELSIO_T4_DCB
  741. u8 priority = 0;
  742. #endif
  743. n = dst_neigh_lookup(dst, peer_ip);
  744. if (!n)
  745. return -ENODEV;
  746. rcu_read_lock();
  747. if (!(n->nud_state & NUD_VALID))
  748. neigh_event_send(n, NULL);
  749. ret = -ENOMEM;
  750. if (n->dev->flags & IFF_LOOPBACK) {
  751. if (iptype == 4)
  752. ndev = cxgbit_ipv4_netdev(*(__be32 *)peer_ip);
  753. else if (IS_ENABLED(CONFIG_IPV6))
  754. ndev = cxgbit_ipv6_netdev((struct in6_addr *)peer_ip);
  755. else
  756. ndev = NULL;
  757. if (!ndev) {
  758. ret = -ENODEV;
  759. goto out;
  760. }
  761. csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t,
  762. n, ndev, 0);
  763. if (!csk->l2t)
  764. goto out;
  765. csk->mtu = ndev->mtu;
  766. csk->tx_chan = cxgb4_port_chan(ndev);
  767. csk->smac_idx =
  768. ((struct port_info *)netdev_priv(ndev))->smt_idx;
  769. step = cdev->lldi.ntxq /
  770. cdev->lldi.nchan;
  771. csk->txq_idx = cxgb4_port_idx(ndev) * step;
  772. step = cdev->lldi.nrxq /
  773. cdev->lldi.nchan;
  774. csk->ctrlq_idx = cxgb4_port_idx(ndev);
  775. csk->rss_qid = cdev->lldi.rxq_ids[
  776. cxgb4_port_idx(ndev) * step];
  777. csk->port_id = cxgb4_port_idx(ndev);
  778. cxgbit_set_tcp_window(csk,
  779. (struct port_info *)netdev_priv(ndev));
  780. } else {
  781. ndev = cxgbit_get_real_dev(n->dev);
  782. if (!ndev) {
  783. ret = -ENODEV;
  784. goto out;
  785. }
  786. #ifdef CONFIG_CHELSIO_T4_DCB
  787. if (cxgbit_get_iscsi_dcb_state(ndev))
  788. priority = cxgbit_get_iscsi_dcb_priority(ndev,
  789. local_port);
  790. csk->dcb_priority = priority;
  791. csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, priority);
  792. #else
  793. csk->l2t = cxgb4_l2t_get(cdev->lldi.l2t, n, ndev, 0);
  794. #endif
  795. if (!csk->l2t)
  796. goto out;
  797. port_id = cxgb4_port_idx(ndev);
  798. csk->mtu = dst_mtu(dst);
  799. csk->tx_chan = cxgb4_port_chan(ndev);
  800. csk->smac_idx =
  801. ((struct port_info *)netdev_priv(ndev))->smt_idx;
  802. step = cdev->lldi.ntxq /
  803. cdev->lldi.nports;
  804. csk->txq_idx = (port_id * step) +
  805. (cdev->selectq[port_id][0]++ % step);
  806. csk->ctrlq_idx = cxgb4_port_idx(ndev);
  807. step = cdev->lldi.nrxq /
  808. cdev->lldi.nports;
  809. rxq_idx = (port_id * step) +
  810. (cdev->selectq[port_id][1]++ % step);
  811. csk->rss_qid = cdev->lldi.rxq_ids[rxq_idx];
  812. csk->port_id = port_id;
  813. cxgbit_set_tcp_window(csk,
  814. (struct port_info *)netdev_priv(ndev));
  815. }
  816. ret = 0;
  817. out:
  818. rcu_read_unlock();
  819. neigh_release(n);
  820. return ret;
  821. }
  822. int cxgbit_ofld_send(struct cxgbit_device *cdev, struct sk_buff *skb)
  823. {
  824. int ret = 0;
  825. if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
  826. kfree_skb(skb);
  827. pr_err("%s - device not up - dropping\n", __func__);
  828. return -EIO;
  829. }
  830. ret = cxgb4_ofld_send(cdev->lldi.ports[0], skb);
  831. if (ret < 0)
  832. kfree_skb(skb);
  833. return ret < 0 ? ret : 0;
  834. }
  835. static void cxgbit_release_tid(struct cxgbit_device *cdev, u32 tid)
  836. {
  837. u32 len = roundup(sizeof(struct cpl_tid_release), 16);
  838. struct sk_buff *skb;
  839. skb = alloc_skb(len, GFP_ATOMIC);
  840. if (!skb)
  841. return;
  842. cxgb_mk_tid_release(skb, len, tid, 0);
  843. cxgbit_ofld_send(cdev, skb);
  844. }
  845. int
  846. cxgbit_l2t_send(struct cxgbit_device *cdev, struct sk_buff *skb,
  847. struct l2t_entry *l2e)
  848. {
  849. int ret = 0;
  850. if (!test_bit(CDEV_STATE_UP, &cdev->flags)) {
  851. kfree_skb(skb);
  852. pr_err("%s - device not up - dropping\n", __func__);
  853. return -EIO;
  854. }
  855. ret = cxgb4_l2t_send(cdev->lldi.ports[0], skb, l2e);
  856. if (ret < 0)
  857. kfree_skb(skb);
  858. return ret < 0 ? ret : 0;
  859. }
  860. static void cxgbit_send_rx_credits(struct cxgbit_sock *csk, struct sk_buff *skb)
  861. {
  862. if (csk->com.state != CSK_STATE_ESTABLISHED) {
  863. __kfree_skb(skb);
  864. return;
  865. }
  866. cxgbit_ofld_send(csk->com.cdev, skb);
  867. }
  868. /*
  869. * CPL connection rx data ack: host ->
  870. * Send RX credits through an RX_DATA_ACK CPL message.
  871. * Returns the number of credits sent.
  872. */
  873. int cxgbit_rx_data_ack(struct cxgbit_sock *csk)
  874. {
  875. struct sk_buff *skb;
  876. u32 len = roundup(sizeof(struct cpl_rx_data_ack), 16);
  877. u32 credit_dack;
  878. skb = alloc_skb(len, GFP_KERNEL);
  879. if (!skb)
  880. return -1;
  881. credit_dack = RX_DACK_CHANGE_F | RX_DACK_MODE_V(3) |
  882. RX_CREDITS_V(csk->rx_credits);
  883. cxgb_mk_rx_data_ack(skb, len, csk->tid, csk->ctrlq_idx,
  884. credit_dack);
  885. csk->rx_credits = 0;
  886. spin_lock_bh(&csk->lock);
  887. if (csk->lock_owner) {
  888. cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_send_rx_credits;
  889. __skb_queue_tail(&csk->backlogq, skb);
  890. spin_unlock_bh(&csk->lock);
  891. return 0;
  892. }
  893. cxgbit_send_rx_credits(csk, skb);
  894. spin_unlock_bh(&csk->lock);
  895. return 0;
  896. }
  897. #define FLOWC_WR_NPARAMS_MIN 9
  898. #define FLOWC_WR_NPARAMS_MAX 11
  899. static int cxgbit_alloc_csk_skb(struct cxgbit_sock *csk)
  900. {
  901. struct sk_buff *skb;
  902. u32 len, flowclen;
  903. u8 i;
  904. flowclen = offsetof(struct fw_flowc_wr,
  905. mnemval[FLOWC_WR_NPARAMS_MAX]);
  906. len = max_t(u32, sizeof(struct cpl_abort_req),
  907. sizeof(struct cpl_abort_rpl));
  908. len = max(len, flowclen);
  909. len = roundup(len, 16);
  910. for (i = 0; i < 3; i++) {
  911. skb = alloc_skb(len, GFP_ATOMIC);
  912. if (!skb)
  913. goto out;
  914. __skb_queue_tail(&csk->skbq, skb);
  915. }
  916. skb = alloc_skb(LRO_SKB_MIN_HEADROOM, GFP_ATOMIC);
  917. if (!skb)
  918. goto out;
  919. memset(skb->data, 0, LRO_SKB_MIN_HEADROOM);
  920. csk->lro_hskb = skb;
  921. return 0;
  922. out:
  923. __skb_queue_purge(&csk->skbq);
  924. return -ENOMEM;
  925. }
  926. static void
  927. cxgbit_pass_accept_rpl(struct cxgbit_sock *csk, struct cpl_pass_accept_req *req)
  928. {
  929. struct sk_buff *skb;
  930. const struct tcphdr *tcph;
  931. struct cpl_t5_pass_accept_rpl *rpl5;
  932. struct cxgb4_lld_info *lldi = &csk->com.cdev->lldi;
  933. unsigned int len = roundup(sizeof(*rpl5), 16);
  934. unsigned int mtu_idx;
  935. u64 opt0;
  936. u32 opt2, hlen;
  937. u32 wscale;
  938. u32 win;
  939. pr_debug("%s csk %p tid %u\n", __func__, csk, csk->tid);
  940. skb = alloc_skb(len, GFP_ATOMIC);
  941. if (!skb) {
  942. cxgbit_put_csk(csk);
  943. return;
  944. }
  945. rpl5 = __skb_put_zero(skb, len);
  946. INIT_TP_WR(rpl5, csk->tid);
  947. OPCODE_TID(rpl5) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
  948. csk->tid));
  949. cxgb_best_mtu(csk->com.cdev->lldi.mtus, csk->mtu, &mtu_idx,
  950. req->tcpopt.tstamp,
  951. (csk->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
  952. wscale = cxgb_compute_wscale(csk->rcv_win);
  953. /*
  954. * Specify the largest window that will fit in opt0. The
  955. * remainder will be specified in the rx_data_ack.
  956. */
  957. win = csk->rcv_win >> 10;
  958. if (win > RCV_BUFSIZ_M)
  959. win = RCV_BUFSIZ_M;
  960. opt0 = TCAM_BYPASS_F |
  961. WND_SCALE_V(wscale) |
  962. MSS_IDX_V(mtu_idx) |
  963. L2T_IDX_V(csk->l2t->idx) |
  964. TX_CHAN_V(csk->tx_chan) |
  965. SMAC_SEL_V(csk->smac_idx) |
  966. DSCP_V(csk->tos >> 2) |
  967. ULP_MODE_V(ULP_MODE_ISCSI) |
  968. RCV_BUFSIZ_V(win);
  969. opt2 = RX_CHANNEL_V(0) |
  970. RSS_QUEUE_VALID_F | RSS_QUEUE_V(csk->rss_qid);
  971. if (!is_t5(lldi->adapter_type))
  972. opt2 |= RX_FC_DISABLE_F;
  973. if (req->tcpopt.tstamp)
  974. opt2 |= TSTAMPS_EN_F;
  975. if (req->tcpopt.sack)
  976. opt2 |= SACK_EN_F;
  977. if (wscale)
  978. opt2 |= WND_SCALE_EN_F;
  979. hlen = ntohl(req->hdr_len);
  980. if (is_t5(lldi->adapter_type))
  981. tcph = (struct tcphdr *)((u8 *)(req + 1) +
  982. ETH_HDR_LEN_G(hlen) + IP_HDR_LEN_G(hlen));
  983. else
  984. tcph = (struct tcphdr *)((u8 *)(req + 1) +
  985. T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen));
  986. if (tcph->ece && tcph->cwr)
  987. opt2 |= CCTRL_ECN_V(1);
  988. opt2 |= CONG_CNTRL_V(CONG_ALG_NEWRENO);
  989. opt2 |= T5_ISS_F;
  990. rpl5->iss = cpu_to_be32((get_random_u32() & ~7UL) - 1);
  991. opt2 |= T5_OPT_2_VALID_F;
  992. rpl5->opt0 = cpu_to_be64(opt0);
  993. rpl5->opt2 = cpu_to_be32(opt2);
  994. set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->ctrlq_idx);
  995. t4_set_arp_err_handler(skb, csk, cxgbit_arp_failure_discard);
  996. cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
  997. }
  998. static void
  999. cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
  1000. {
  1001. struct cxgbit_sock *csk = NULL;
  1002. struct cxgbit_np *cnp;
  1003. struct cpl_pass_accept_req *req = cplhdr(skb);
  1004. unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
  1005. struct tid_info *t = cdev->lldi.tids;
  1006. unsigned int tid = GET_TID(req);
  1007. u16 peer_mss = ntohs(req->tcpopt.mss);
  1008. unsigned short hdrs;
  1009. struct dst_entry *dst;
  1010. __u8 local_ip[16], peer_ip[16];
  1011. __be16 local_port, peer_port;
  1012. int ret;
  1013. int iptype;
  1014. pr_debug("%s: cdev = %p; stid = %u; tid = %u\n",
  1015. __func__, cdev, stid, tid);
  1016. cnp = lookup_stid(t, stid);
  1017. if (!cnp) {
  1018. pr_err("%s connect request on invalid stid %d\n",
  1019. __func__, stid);
  1020. goto rel_skb;
  1021. }
  1022. if (cnp->com.state != CSK_STATE_LISTEN) {
  1023. pr_err("%s - listening parent not in CSK_STATE_LISTEN\n",
  1024. __func__);
  1025. goto reject;
  1026. }
  1027. csk = lookup_tid(t, tid);
  1028. if (csk) {
  1029. pr_err("%s csk not null tid %u\n",
  1030. __func__, tid);
  1031. goto rel_skb;
  1032. }
  1033. cxgb_get_4tuple(req, cdev->lldi.adapter_type, &iptype, local_ip,
  1034. peer_ip, &local_port, &peer_port);
  1035. /* Find output route */
  1036. if (iptype == 4) {
  1037. pr_debug("%s parent sock %p tid %u laddr %pI4 raddr %pI4 "
  1038. "lport %d rport %d peer_mss %d\n"
  1039. , __func__, cnp, tid,
  1040. local_ip, peer_ip, ntohs(local_port),
  1041. ntohs(peer_port), peer_mss);
  1042. dst = cxgb_find_route(&cdev->lldi, cxgbit_get_real_dev,
  1043. *(__be32 *)local_ip,
  1044. *(__be32 *)peer_ip,
  1045. local_port, peer_port,
  1046. PASS_OPEN_TOS_G(ntohl(req->tos_stid)));
  1047. } else {
  1048. pr_debug("%s parent sock %p tid %u laddr %pI6 raddr %pI6 "
  1049. "lport %d rport %d peer_mss %d\n"
  1050. , __func__, cnp, tid,
  1051. local_ip, peer_ip, ntohs(local_port),
  1052. ntohs(peer_port), peer_mss);
  1053. dst = cxgb_find_route6(&cdev->lldi, cxgbit_get_real_dev,
  1054. local_ip, peer_ip,
  1055. local_port, peer_port,
  1056. PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
  1057. ((struct sockaddr_in6 *)
  1058. &cnp->com.local_addr)->sin6_scope_id);
  1059. }
  1060. if (!dst) {
  1061. pr_err("%s - failed to find dst entry!\n",
  1062. __func__);
  1063. goto reject;
  1064. }
  1065. csk = kzalloc(sizeof(*csk), GFP_ATOMIC);
  1066. if (!csk) {
  1067. dst_release(dst);
  1068. goto rel_skb;
  1069. }
  1070. ret = cxgbit_offload_init(csk, iptype, peer_ip, ntohs(local_port),
  1071. dst, cdev);
  1072. if (ret) {
  1073. pr_err("%s - failed to allocate l2t entry!\n",
  1074. __func__);
  1075. dst_release(dst);
  1076. kfree(csk);
  1077. goto reject;
  1078. }
  1079. kref_init(&csk->kref);
  1080. init_completion(&csk->com.wr_wait.completion);
  1081. INIT_LIST_HEAD(&csk->accept_node);
  1082. hdrs = (iptype == 4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
  1083. sizeof(struct tcphdr) + (req->tcpopt.tstamp ? 12 : 0);
  1084. if (peer_mss && csk->mtu > (peer_mss + hdrs))
  1085. csk->mtu = peer_mss + hdrs;
  1086. csk->com.state = CSK_STATE_CONNECTING;
  1087. csk->com.cdev = cdev;
  1088. csk->cnp = cnp;
  1089. csk->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
  1090. csk->dst = dst;
  1091. csk->tid = tid;
  1092. csk->wr_cred = cdev->lldi.wr_cred -
  1093. DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
  1094. csk->wr_max_cred = csk->wr_cred;
  1095. csk->wr_una_cred = 0;
  1096. if (iptype == 4) {
  1097. struct sockaddr_in *sin = (struct sockaddr_in *)
  1098. &csk->com.local_addr;
  1099. sin->sin_family = AF_INET;
  1100. sin->sin_port = local_port;
  1101. sin->sin_addr.s_addr = *(__be32 *)local_ip;
  1102. sin = (struct sockaddr_in *)&csk->com.remote_addr;
  1103. sin->sin_family = AF_INET;
  1104. sin->sin_port = peer_port;
  1105. sin->sin_addr.s_addr = *(__be32 *)peer_ip;
  1106. } else {
  1107. struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
  1108. &csk->com.local_addr;
  1109. sin6->sin6_family = PF_INET6;
  1110. sin6->sin6_port = local_port;
  1111. memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
  1112. cxgb4_clip_get(cdev->lldi.ports[0],
  1113. (const u32 *)&sin6->sin6_addr.s6_addr,
  1114. 1);
  1115. sin6 = (struct sockaddr_in6 *)&csk->com.remote_addr;
  1116. sin6->sin6_family = PF_INET6;
  1117. sin6->sin6_port = peer_port;
  1118. memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
  1119. }
  1120. skb_queue_head_init(&csk->rxq);
  1121. skb_queue_head_init(&csk->txq);
  1122. skb_queue_head_init(&csk->ppodq);
  1123. skb_queue_head_init(&csk->backlogq);
  1124. skb_queue_head_init(&csk->skbq);
  1125. cxgbit_sock_reset_wr_list(csk);
  1126. spin_lock_init(&csk->lock);
  1127. init_waitqueue_head(&csk->waitq);
  1128. csk->lock_owner = false;
  1129. if (cxgbit_alloc_csk_skb(csk)) {
  1130. dst_release(dst);
  1131. kfree(csk);
  1132. goto rel_skb;
  1133. }
  1134. cxgbit_get_cnp(cnp);
  1135. cxgbit_get_cdev(cdev);
  1136. spin_lock(&cdev->cskq.lock);
  1137. list_add_tail(&csk->list, &cdev->cskq.list);
  1138. spin_unlock(&cdev->cskq.lock);
  1139. cxgb4_insert_tid(t, csk, tid, csk->com.local_addr.ss_family);
  1140. cxgbit_pass_accept_rpl(csk, req);
  1141. goto rel_skb;
  1142. reject:
  1143. cxgbit_release_tid(cdev, tid);
  1144. rel_skb:
  1145. __kfree_skb(skb);
  1146. }
  1147. static u32
  1148. cxgbit_tx_flowc_wr_credits(struct cxgbit_sock *csk, u32 *nparamsp,
  1149. u32 *flowclenp)
  1150. {
  1151. u32 nparams, flowclen16, flowclen;
  1152. nparams = FLOWC_WR_NPARAMS_MIN;
  1153. if (csk->snd_wscale)
  1154. nparams++;
  1155. #ifdef CONFIG_CHELSIO_T4_DCB
  1156. nparams++;
  1157. #endif
  1158. flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
  1159. flowclen16 = DIV_ROUND_UP(flowclen, 16);
  1160. flowclen = flowclen16 * 16;
  1161. /*
  1162. * Return the number of 16-byte credits used by the flowc request.
  1163. * Pass back the nparams and actual flowc length if requested.
  1164. */
  1165. if (nparamsp)
  1166. *nparamsp = nparams;
  1167. if (flowclenp)
  1168. *flowclenp = flowclen;
  1169. return flowclen16;
  1170. }
  1171. u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *csk)
  1172. {
  1173. struct cxgbit_device *cdev = csk->com.cdev;
  1174. struct fw_flowc_wr *flowc;
  1175. u32 nparams, flowclen16, flowclen;
  1176. struct sk_buff *skb;
  1177. u8 index;
  1178. #ifdef CONFIG_CHELSIO_T4_DCB
  1179. u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
  1180. #endif
  1181. flowclen16 = cxgbit_tx_flowc_wr_credits(csk, &nparams, &flowclen);
  1182. skb = __skb_dequeue(&csk->skbq);
  1183. flowc = __skb_put_zero(skb, flowclen);
  1184. flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
  1185. FW_FLOWC_WR_NPARAMS_V(nparams));
  1186. flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) |
  1187. FW_WR_FLOWID_V(csk->tid));
  1188. flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
  1189. flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
  1190. (csk->com.cdev->lldi.pf));
  1191. flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
  1192. flowc->mnemval[1].val = cpu_to_be32(csk->tx_chan);
  1193. flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
  1194. flowc->mnemval[2].val = cpu_to_be32(csk->tx_chan);
  1195. flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
  1196. flowc->mnemval[3].val = cpu_to_be32(csk->rss_qid);
  1197. flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
  1198. flowc->mnemval[4].val = cpu_to_be32(csk->snd_nxt);
  1199. flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
  1200. flowc->mnemval[5].val = cpu_to_be32(csk->rcv_nxt);
  1201. flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
  1202. flowc->mnemval[6].val = cpu_to_be32(csk->snd_win);
  1203. flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
  1204. flowc->mnemval[7].val = cpu_to_be32(csk->emss);
  1205. flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
  1206. if (test_bit(CDEV_ISO_ENABLE, &cdev->flags))
  1207. flowc->mnemval[8].val = cpu_to_be32(CXGBIT_MAX_ISO_PAYLOAD);
  1208. else
  1209. flowc->mnemval[8].val = cpu_to_be32(16384);
  1210. index = 9;
  1211. if (csk->snd_wscale) {
  1212. flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_RCV_SCALE;
  1213. flowc->mnemval[index].val = cpu_to_be32(csk->snd_wscale);
  1214. index++;
  1215. }
  1216. #ifdef CONFIG_CHELSIO_T4_DCB
  1217. flowc->mnemval[index].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
  1218. if (vlan == VLAN_NONE) {
  1219. pr_warn("csk %u without VLAN Tag on DCB Link\n", csk->tid);
  1220. flowc->mnemval[index].val = cpu_to_be32(0);
  1221. } else
  1222. flowc->mnemval[index].val = cpu_to_be32(
  1223. (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT);
  1224. #endif
  1225. pr_debug("%s: csk %p; tx_chan = %u; rss_qid = %u; snd_seq = %u;"
  1226. " rcv_seq = %u; snd_win = %u; emss = %u\n",
  1227. __func__, csk, csk->tx_chan, csk->rss_qid, csk->snd_nxt,
  1228. csk->rcv_nxt, csk->snd_win, csk->emss);
  1229. set_wr_txq(skb, CPL_PRIORITY_DATA, csk->txq_idx);
  1230. cxgbit_ofld_send(csk->com.cdev, skb);
  1231. return flowclen16;
  1232. }
  1233. static int
  1234. cxgbit_send_tcb_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
  1235. {
  1236. spin_lock_bh(&csk->lock);
  1237. if (unlikely(csk->com.state != CSK_STATE_ESTABLISHED)) {
  1238. spin_unlock_bh(&csk->lock);
  1239. pr_err("%s: csk 0x%p, tid %u, state %u\n",
  1240. __func__, csk, csk->tid, csk->com.state);
  1241. __kfree_skb(skb);
  1242. return -1;
  1243. }
  1244. cxgbit_get_csk(csk);
  1245. cxgbit_init_wr_wait(&csk->com.wr_wait);
  1246. cxgbit_ofld_send(csk->com.cdev, skb);
  1247. spin_unlock_bh(&csk->lock);
  1248. return 0;
  1249. }
  1250. int cxgbit_setup_conn_digest(struct cxgbit_sock *csk)
  1251. {
  1252. struct sk_buff *skb;
  1253. struct cpl_set_tcb_field *req;
  1254. u8 hcrc = csk->submode & CXGBIT_SUBMODE_HCRC;
  1255. u8 dcrc = csk->submode & CXGBIT_SUBMODE_DCRC;
  1256. unsigned int len = roundup(sizeof(*req), 16);
  1257. int ret;
  1258. skb = alloc_skb(len, GFP_KERNEL);
  1259. if (!skb)
  1260. return -ENOMEM;
  1261. /* set up ulp submode */
  1262. req = __skb_put_zero(skb, len);
  1263. INIT_TP_WR(req, csk->tid);
  1264. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
  1265. req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
  1266. req->word_cookie = htons(0);
  1267. req->mask = cpu_to_be64(0x3 << 4);
  1268. req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
  1269. (dcrc ? ULP_CRC_DATA : 0)) << 4);
  1270. set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
  1271. if (cxgbit_send_tcb_skb(csk, skb))
  1272. return -1;
  1273. ret = cxgbit_wait_for_reply(csk->com.cdev,
  1274. &csk->com.wr_wait,
  1275. csk->tid, 5, __func__);
  1276. if (ret)
  1277. return -1;
  1278. return 0;
  1279. }
  1280. int cxgbit_setup_conn_pgidx(struct cxgbit_sock *csk, u32 pg_idx)
  1281. {
  1282. struct sk_buff *skb;
  1283. struct cpl_set_tcb_field *req;
  1284. unsigned int len = roundup(sizeof(*req), 16);
  1285. int ret;
  1286. skb = alloc_skb(len, GFP_KERNEL);
  1287. if (!skb)
  1288. return -ENOMEM;
  1289. req = __skb_put_zero(skb, len);
  1290. INIT_TP_WR(req, csk->tid);
  1291. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
  1292. req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
  1293. req->word_cookie = htons(0);
  1294. req->mask = cpu_to_be64(0x3 << 8);
  1295. req->val = cpu_to_be64(pg_idx << 8);
  1296. set_wr_txq(skb, CPL_PRIORITY_CONTROL, csk->ctrlq_idx);
  1297. if (cxgbit_send_tcb_skb(csk, skb))
  1298. return -1;
  1299. ret = cxgbit_wait_for_reply(csk->com.cdev,
  1300. &csk->com.wr_wait,
  1301. csk->tid, 5, __func__);
  1302. if (ret)
  1303. return -1;
  1304. return 0;
  1305. }
  1306. static void
  1307. cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
  1308. {
  1309. struct cpl_pass_open_rpl *rpl = cplhdr(skb);
  1310. struct tid_info *t = cdev->lldi.tids;
  1311. unsigned int stid = GET_TID(rpl);
  1312. struct cxgbit_np *cnp = lookup_stid(t, stid);
  1313. pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
  1314. __func__, cnp, stid, rpl->status);
  1315. if (!cnp) {
  1316. pr_info("%s stid %d lookup failure\n", __func__, stid);
  1317. goto rel_skb;
  1318. }
  1319. cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
  1320. cxgbit_put_cnp(cnp);
  1321. rel_skb:
  1322. __kfree_skb(skb);
  1323. }
  1324. static void
  1325. cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
  1326. {
  1327. struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
  1328. struct tid_info *t = cdev->lldi.tids;
  1329. unsigned int stid = GET_TID(rpl);
  1330. struct cxgbit_np *cnp = lookup_stid(t, stid);
  1331. pr_debug("%s: cnp = %p; stid = %u; status = %d\n",
  1332. __func__, cnp, stid, rpl->status);
  1333. if (!cnp) {
  1334. pr_info("%s stid %d lookup failure\n", __func__, stid);
  1335. goto rel_skb;
  1336. }
  1337. cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
  1338. cxgbit_put_cnp(cnp);
  1339. rel_skb:
  1340. __kfree_skb(skb);
  1341. }
  1342. static void
  1343. cxgbit_pass_establish(struct cxgbit_device *cdev, struct sk_buff *skb)
  1344. {
  1345. struct cpl_pass_establish *req = cplhdr(skb);
  1346. struct tid_info *t = cdev->lldi.tids;
  1347. unsigned int tid = GET_TID(req);
  1348. struct cxgbit_sock *csk;
  1349. struct cxgbit_np *cnp;
  1350. u16 tcp_opt = be16_to_cpu(req->tcp_opt);
  1351. u32 snd_isn = be32_to_cpu(req->snd_isn);
  1352. u32 rcv_isn = be32_to_cpu(req->rcv_isn);
  1353. csk = lookup_tid(t, tid);
  1354. if (unlikely(!csk)) {
  1355. pr_err("can't find connection for tid %u.\n", tid);
  1356. goto rel_skb;
  1357. }
  1358. cnp = csk->cnp;
  1359. pr_debug("%s: csk %p; tid %u; cnp %p\n",
  1360. __func__, csk, tid, cnp);
  1361. csk->write_seq = snd_isn;
  1362. csk->snd_una = snd_isn;
  1363. csk->snd_nxt = snd_isn;
  1364. csk->rcv_nxt = rcv_isn;
  1365. csk->snd_wscale = TCPOPT_SND_WSCALE_G(tcp_opt);
  1366. cxgbit_set_emss(csk, tcp_opt);
  1367. dst_confirm(csk->dst);
  1368. csk->com.state = CSK_STATE_ESTABLISHED;
  1369. spin_lock_bh(&cnp->np_accept_lock);
  1370. list_add_tail(&csk->accept_node, &cnp->np_accept_list);
  1371. spin_unlock_bh(&cnp->np_accept_lock);
  1372. complete(&cnp->accept_comp);
  1373. rel_skb:
  1374. __kfree_skb(skb);
  1375. }
  1376. static void cxgbit_queue_rx_skb(struct cxgbit_sock *csk, struct sk_buff *skb)
  1377. {
  1378. cxgbit_skcb_flags(skb) = 0;
  1379. spin_lock_bh(&csk->rxq.lock);
  1380. __skb_queue_tail(&csk->rxq, skb);
  1381. spin_unlock_bh(&csk->rxq.lock);
  1382. wake_up(&csk->waitq);
  1383. }
  1384. static void cxgbit_peer_close(struct cxgbit_sock *csk, struct sk_buff *skb)
  1385. {
  1386. pr_debug("%s: csk %p; tid %u; state %d\n",
  1387. __func__, csk, csk->tid, csk->com.state);
  1388. switch (csk->com.state) {
  1389. case CSK_STATE_ESTABLISHED:
  1390. csk->com.state = CSK_STATE_CLOSING;
  1391. cxgbit_queue_rx_skb(csk, skb);
  1392. return;
  1393. case CSK_STATE_CLOSING:
  1394. /* simultaneous close */
  1395. csk->com.state = CSK_STATE_MORIBUND;
  1396. break;
  1397. case CSK_STATE_MORIBUND:
  1398. csk->com.state = CSK_STATE_DEAD;
  1399. cxgbit_put_csk(csk);
  1400. break;
  1401. case CSK_STATE_ABORTING:
  1402. break;
  1403. default:
  1404. pr_info("%s: cpl_peer_close in bad state %d\n",
  1405. __func__, csk->com.state);
  1406. }
  1407. __kfree_skb(skb);
  1408. }
  1409. static void cxgbit_close_con_rpl(struct cxgbit_sock *csk, struct sk_buff *skb)
  1410. {
  1411. pr_debug("%s: csk %p; tid %u; state %d\n",
  1412. __func__, csk, csk->tid, csk->com.state);
  1413. switch (csk->com.state) {
  1414. case CSK_STATE_CLOSING:
  1415. csk->com.state = CSK_STATE_MORIBUND;
  1416. break;
  1417. case CSK_STATE_MORIBUND:
  1418. csk->com.state = CSK_STATE_DEAD;
  1419. cxgbit_put_csk(csk);
  1420. break;
  1421. case CSK_STATE_ABORTING:
  1422. case CSK_STATE_DEAD:
  1423. break;
  1424. default:
  1425. pr_info("%s: cpl_close_con_rpl in bad state %d\n",
  1426. __func__, csk->com.state);
  1427. }
  1428. __kfree_skb(skb);
  1429. }
  1430. static void cxgbit_abort_req_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
  1431. {
  1432. struct cpl_abort_req_rss *hdr = cplhdr(skb);
  1433. unsigned int tid = GET_TID(hdr);
  1434. struct sk_buff *rpl_skb;
  1435. bool release = false;
  1436. bool wakeup_thread = false;
  1437. u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
  1438. pr_debug("%s: csk %p; tid %u; state %d\n",
  1439. __func__, csk, tid, csk->com.state);
  1440. if (cxgb_is_neg_adv(hdr->status)) {
  1441. pr_err("%s: got neg advise %d on tid %u\n",
  1442. __func__, hdr->status, tid);
  1443. goto rel_skb;
  1444. }
  1445. switch (csk->com.state) {
  1446. case CSK_STATE_CONNECTING:
  1447. case CSK_STATE_MORIBUND:
  1448. csk->com.state = CSK_STATE_DEAD;
  1449. release = true;
  1450. break;
  1451. case CSK_STATE_ESTABLISHED:
  1452. csk->com.state = CSK_STATE_DEAD;
  1453. wakeup_thread = true;
  1454. break;
  1455. case CSK_STATE_CLOSING:
  1456. csk->com.state = CSK_STATE_DEAD;
  1457. if (!csk->conn)
  1458. release = true;
  1459. break;
  1460. case CSK_STATE_ABORTING:
  1461. break;
  1462. default:
  1463. pr_info("%s: cpl_abort_req_rss in bad state %d\n",
  1464. __func__, csk->com.state);
  1465. csk->com.state = CSK_STATE_DEAD;
  1466. }
  1467. __skb_queue_purge(&csk->txq);
  1468. if (!test_and_set_bit(CSK_TX_DATA_SENT, &csk->com.flags))
  1469. cxgbit_send_tx_flowc_wr(csk);
  1470. rpl_skb = __skb_dequeue(&csk->skbq);
  1471. cxgb_mk_abort_rpl(rpl_skb, len, csk->tid, csk->txq_idx);
  1472. cxgbit_ofld_send(csk->com.cdev, rpl_skb);
  1473. if (wakeup_thread) {
  1474. cxgbit_queue_rx_skb(csk, skb);
  1475. return;
  1476. }
  1477. if (release)
  1478. cxgbit_put_csk(csk);
  1479. rel_skb:
  1480. __kfree_skb(skb);
  1481. }
  1482. static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
  1483. {
  1484. struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
  1485. pr_debug("%s: csk %p; tid %u; state %d\n",
  1486. __func__, csk, csk->tid, csk->com.state);
  1487. switch (csk->com.state) {
  1488. case CSK_STATE_ABORTING:
  1489. csk->com.state = CSK_STATE_DEAD;
  1490. if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags))
  1491. cxgbit_wake_up(&csk->com.wr_wait, __func__,
  1492. rpl->status);
  1493. cxgbit_put_csk(csk);
  1494. break;
  1495. default:
  1496. pr_info("%s: cpl_abort_rpl_rss in state %d\n",
  1497. __func__, csk->com.state);
  1498. }
  1499. __kfree_skb(skb);
  1500. }
  1501. static bool cxgbit_credit_err(const struct cxgbit_sock *csk)
  1502. {
  1503. const struct sk_buff *skb = csk->wr_pending_head;
  1504. u32 credit = 0;
  1505. if (unlikely(csk->wr_cred > csk->wr_max_cred)) {
  1506. pr_err("csk 0x%p, tid %u, credit %u > %u\n",
  1507. csk, csk->tid, csk->wr_cred, csk->wr_max_cred);
  1508. return true;
  1509. }
  1510. while (skb) {
  1511. credit += (__force u32)skb->csum;
  1512. skb = cxgbit_skcb_tx_wr_next(skb);
  1513. }
  1514. if (unlikely((csk->wr_cred + credit) != csk->wr_max_cred)) {
  1515. pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
  1516. csk, csk->tid, csk->wr_cred,
  1517. credit, csk->wr_max_cred);
  1518. return true;
  1519. }
  1520. return false;
  1521. }
  1522. static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
  1523. {
  1524. struct cpl_fw4_ack *rpl = (struct cpl_fw4_ack *)cplhdr(skb);
  1525. u32 credits = rpl->credits;
  1526. u32 snd_una = ntohl(rpl->snd_una);
  1527. csk->wr_cred += credits;
  1528. if (csk->wr_una_cred > (csk->wr_max_cred - csk->wr_cred))
  1529. csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
  1530. while (credits) {
  1531. struct sk_buff *p = cxgbit_sock_peek_wr(csk);
  1532. u32 csum;
  1533. if (unlikely(!p)) {
  1534. pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
  1535. csk, csk->tid, credits,
  1536. csk->wr_cred, csk->wr_una_cred);
  1537. break;
  1538. }
  1539. csum = (__force u32)p->csum;
  1540. if (unlikely(credits < csum)) {
  1541. pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
  1542. csk, csk->tid,
  1543. credits, csk->wr_cred, csk->wr_una_cred,
  1544. csum);
  1545. p->csum = (__force __wsum)(csum - credits);
  1546. break;
  1547. }
  1548. cxgbit_sock_dequeue_wr(csk);
  1549. credits -= csum;
  1550. kfree_skb(p);
  1551. }
  1552. if (unlikely(cxgbit_credit_err(csk))) {
  1553. cxgbit_queue_rx_skb(csk, skb);
  1554. return;
  1555. }
  1556. if (rpl->seq_vld & CPL_FW4_ACK_FLAGS_SEQVAL) {
  1557. if (unlikely(before(snd_una, csk->snd_una))) {
  1558. pr_warn("csk 0x%p,%u, snd_una %u/%u.",
  1559. csk, csk->tid, snd_una,
  1560. csk->snd_una);
  1561. goto rel_skb;
  1562. }
  1563. if (csk->snd_una != snd_una) {
  1564. csk->snd_una = snd_una;
  1565. dst_confirm(csk->dst);
  1566. }
  1567. }
  1568. if (skb_queue_len(&csk->txq))
  1569. cxgbit_push_tx_frames(csk);
  1570. rel_skb:
  1571. __kfree_skb(skb);
  1572. }
  1573. static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
  1574. {
  1575. struct cxgbit_sock *csk;
  1576. struct cpl_set_tcb_rpl *rpl = (struct cpl_set_tcb_rpl *)skb->data;
  1577. unsigned int tid = GET_TID(rpl);
  1578. struct cxgb4_lld_info *lldi = &cdev->lldi;
  1579. struct tid_info *t = lldi->tids;
  1580. csk = lookup_tid(t, tid);
  1581. if (unlikely(!csk)) {
  1582. pr_err("can't find connection for tid %u.\n", tid);
  1583. goto rel_skb;
  1584. } else {
  1585. cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
  1586. }
  1587. cxgbit_put_csk(csk);
  1588. rel_skb:
  1589. __kfree_skb(skb);
  1590. }
  1591. static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
  1592. {
  1593. struct cxgbit_sock *csk;
  1594. struct cpl_rx_data *cpl = cplhdr(skb);
  1595. unsigned int tid = GET_TID(cpl);
  1596. struct cxgb4_lld_info *lldi = &cdev->lldi;
  1597. struct tid_info *t = lldi->tids;
  1598. csk = lookup_tid(t, tid);
  1599. if (unlikely(!csk)) {
  1600. pr_err("can't find conn. for tid %u.\n", tid);
  1601. goto rel_skb;
  1602. }
  1603. cxgbit_queue_rx_skb(csk, skb);
  1604. return;
  1605. rel_skb:
  1606. __kfree_skb(skb);
  1607. }
  1608. static void
  1609. __cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
  1610. {
  1611. spin_lock(&csk->lock);
  1612. if (csk->lock_owner) {
  1613. __skb_queue_tail(&csk->backlogq, skb);
  1614. spin_unlock(&csk->lock);
  1615. return;
  1616. }
  1617. cxgbit_skcb_rx_backlog_fn(skb)(csk, skb);
  1618. spin_unlock(&csk->lock);
  1619. }
  1620. static void cxgbit_process_rx_cpl(struct cxgbit_sock *csk, struct sk_buff *skb)
  1621. {
  1622. cxgbit_get_csk(csk);
  1623. __cxgbit_process_rx_cpl(csk, skb);
  1624. cxgbit_put_csk(csk);
  1625. }
  1626. static void cxgbit_rx_cpl(struct cxgbit_device *cdev, struct sk_buff *skb)
  1627. {
  1628. struct cxgbit_sock *csk;
  1629. struct cpl_tx_data *cpl = cplhdr(skb);
  1630. struct cxgb4_lld_info *lldi = &cdev->lldi;
  1631. struct tid_info *t = lldi->tids;
  1632. unsigned int tid = GET_TID(cpl);
  1633. u8 opcode = cxgbit_skcb_rx_opcode(skb);
  1634. bool ref = true;
  1635. switch (opcode) {
  1636. case CPL_FW4_ACK:
  1637. cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_fw4_ack;
  1638. ref = false;
  1639. break;
  1640. case CPL_PEER_CLOSE:
  1641. cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_peer_close;
  1642. break;
  1643. case CPL_CLOSE_CON_RPL:
  1644. cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_close_con_rpl;
  1645. break;
  1646. case CPL_ABORT_REQ_RSS:
  1647. cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_req_rss;
  1648. break;
  1649. case CPL_ABORT_RPL_RSS:
  1650. cxgbit_skcb_rx_backlog_fn(skb) = cxgbit_abort_rpl_rss;
  1651. break;
  1652. default:
  1653. goto rel_skb;
  1654. }
  1655. csk = lookup_tid(t, tid);
  1656. if (unlikely(!csk)) {
  1657. pr_err("can't find conn. for tid %u.\n", tid);
  1658. goto rel_skb;
  1659. }
  1660. if (ref)
  1661. cxgbit_process_rx_cpl(csk, skb);
  1662. else
  1663. __cxgbit_process_rx_cpl(csk, skb);
  1664. return;
  1665. rel_skb:
  1666. __kfree_skb(skb);
  1667. }
  1668. cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS] = {
  1669. [CPL_PASS_OPEN_RPL] = cxgbit_pass_open_rpl,
  1670. [CPL_CLOSE_LISTSRV_RPL] = cxgbit_close_listsrv_rpl,
  1671. [CPL_PASS_ACCEPT_REQ] = cxgbit_pass_accept_req,
  1672. [CPL_PASS_ESTABLISH] = cxgbit_pass_establish,
  1673. [CPL_SET_TCB_RPL] = cxgbit_set_tcb_rpl,
  1674. [CPL_RX_DATA] = cxgbit_rx_data,
  1675. [CPL_FW4_ACK] = cxgbit_rx_cpl,
  1676. [CPL_PEER_CLOSE] = cxgbit_rx_cpl,
  1677. [CPL_CLOSE_CON_RPL] = cxgbit_rx_cpl,
  1678. [CPL_ABORT_REQ_RSS] = cxgbit_rx_cpl,
  1679. [CPL_ABORT_RPL_RSS] = cxgbit_rx_cpl,
  1680. };