chcr_ipsec.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655
  1. /*
  2. * This file is part of the Chelsio T6 Crypto driver for Linux.
  3. *
  4. * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. *
  34. * Written and Maintained by:
  35. * Atul Gupta (atul.gupta@chelsio.com)
  36. */
  37. #define pr_fmt(fmt) "chcr:" fmt
  38. #include <linux/kernel.h>
  39. #include <linux/module.h>
  40. #include <linux/crypto.h>
  41. #include <linux/cryptohash.h>
  42. #include <linux/skbuff.h>
  43. #include <linux/rtnetlink.h>
  44. #include <linux/highmem.h>
  45. #include <linux/if_vlan.h>
  46. #include <linux/ip.h>
  47. #include <linux/netdevice.h>
  48. #include <net/esp.h>
  49. #include <net/xfrm.h>
  50. #include <crypto/aes.h>
  51. #include <crypto/algapi.h>
  52. #include <crypto/hash.h>
  53. #include <crypto/sha.h>
  54. #include <crypto/authenc.h>
  55. #include <crypto/internal/aead.h>
  56. #include <crypto/null.h>
  57. #include <crypto/internal/skcipher.h>
  58. #include <crypto/aead.h>
  59. #include <crypto/scatterwalk.h>
  60. #include <crypto/internal/hash.h>
  61. #include "chcr_core.h"
  62. #include "chcr_algo.h"
  63. #include "chcr_crypto.h"
  64. /*
  65. * Max Tx descriptor space we allow for an Ethernet packet to be inlined
  66. * into a WR.
  67. */
  68. #define MAX_IMM_TX_PKT_LEN 256
  69. #define GCM_ESP_IV_SIZE 8
  70. static int chcr_xfrm_add_state(struct xfrm_state *x);
  71. static void chcr_xfrm_del_state(struct xfrm_state *x);
  72. static void chcr_xfrm_free_state(struct xfrm_state *x);
  73. static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
  74. static const struct xfrmdev_ops chcr_xfrmdev_ops = {
  75. .xdo_dev_state_add = chcr_xfrm_add_state,
  76. .xdo_dev_state_delete = chcr_xfrm_del_state,
  77. .xdo_dev_state_free = chcr_xfrm_free_state,
  78. .xdo_dev_offload_ok = chcr_ipsec_offload_ok,
  79. };
  80. /* Add offload xfrms to Chelsio Interface */
  81. void chcr_add_xfrmops(const struct cxgb4_lld_info *lld)
  82. {
  83. struct net_device *netdev = NULL;
  84. int i;
  85. for (i = 0; i < lld->nports; i++) {
  86. netdev = lld->ports[i];
  87. if (!netdev)
  88. continue;
  89. netdev->xfrmdev_ops = &chcr_xfrmdev_ops;
  90. netdev->hw_enc_features |= NETIF_F_HW_ESP;
  91. netdev->features |= NETIF_F_HW_ESP;
  92. rtnl_lock();
  93. netdev_change_features(netdev);
  94. rtnl_unlock();
  95. }
  96. }
  97. static inline int chcr_ipsec_setauthsize(struct xfrm_state *x,
  98. struct ipsec_sa_entry *sa_entry)
  99. {
  100. int hmac_ctrl;
  101. int authsize = x->aead->alg_icv_len / 8;
  102. sa_entry->authsize = authsize;
  103. switch (authsize) {
  104. case ICV_8:
  105. hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
  106. break;
  107. case ICV_12:
  108. hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
  109. break;
  110. case ICV_16:
  111. hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  112. break;
  113. default:
  114. return -EINVAL;
  115. }
  116. return hmac_ctrl;
  117. }
  118. static inline int chcr_ipsec_setkey(struct xfrm_state *x,
  119. struct ipsec_sa_entry *sa_entry)
  120. {
  121. struct crypto_cipher *cipher;
  122. int keylen = (x->aead->alg_key_len + 7) / 8;
  123. unsigned char *key = x->aead->alg_key;
  124. int ck_size, key_ctx_size = 0;
  125. unsigned char ghash_h[AEAD_H_SIZE];
  126. int ret = 0;
  127. if (keylen > 3) {
  128. keylen -= 4; /* nonce/salt is present in the last 4 bytes */
  129. memcpy(sa_entry->salt, key + keylen, 4);
  130. }
  131. if (keylen == AES_KEYSIZE_128) {
  132. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  133. } else if (keylen == AES_KEYSIZE_192) {
  134. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  135. } else if (keylen == AES_KEYSIZE_256) {
  136. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  137. } else {
  138. pr_err("GCM: Invalid key length %d\n", keylen);
  139. ret = -EINVAL;
  140. goto out;
  141. }
  142. memcpy(sa_entry->key, key, keylen);
  143. sa_entry->enckey_len = keylen;
  144. key_ctx_size = sizeof(struct _key_ctx) +
  145. ((DIV_ROUND_UP(keylen, 16)) << 4) +
  146. AEAD_H_SIZE;
  147. sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
  148. CHCR_KEYCTX_MAC_KEY_SIZE_128,
  149. 0, 0,
  150. key_ctx_size >> 4);
  151. /* Calculate the H = CIPH(K, 0 repeated 16 times).
  152. * It will go in key context
  153. */
  154. cipher = crypto_alloc_cipher("aes-generic", 0, 0);
  155. if (IS_ERR(cipher)) {
  156. sa_entry->enckey_len = 0;
  157. ret = -ENOMEM;
  158. goto out;
  159. }
  160. ret = crypto_cipher_setkey(cipher, key, keylen);
  161. if (ret) {
  162. sa_entry->enckey_len = 0;
  163. goto out1;
  164. }
  165. memset(ghash_h, 0, AEAD_H_SIZE);
  166. crypto_cipher_encrypt_one(cipher, ghash_h, ghash_h);
  167. memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) *
  168. 16), ghash_h, AEAD_H_SIZE);
  169. sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) +
  170. AEAD_H_SIZE;
  171. out1:
  172. crypto_free_cipher(cipher);
  173. out:
  174. return ret;
  175. }
  176. /*
  177. * chcr_xfrm_add_state
  178. * returns 0 on success, negative error if failed to send message to FPGA
  179. * positive error if FPGA returned a bad response
  180. */
  181. static int chcr_xfrm_add_state(struct xfrm_state *x)
  182. {
  183. struct ipsec_sa_entry *sa_entry;
  184. int res = 0;
  185. if (x->props.aalgo != SADB_AALG_NONE) {
  186. pr_debug("CHCR: Cannot offload authenticated xfrm states\n");
  187. return -EINVAL;
  188. }
  189. if (x->props.calgo != SADB_X_CALG_NONE) {
  190. pr_debug("CHCR: Cannot offload compressed xfrm states\n");
  191. return -EINVAL;
  192. }
  193. if (x->props.flags & XFRM_STATE_ESN) {
  194. pr_debug("CHCR: Cannot offload ESN xfrm states\n");
  195. return -EINVAL;
  196. }
  197. if (x->props.family != AF_INET &&
  198. x->props.family != AF_INET6) {
  199. pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n");
  200. return -EINVAL;
  201. }
  202. if (x->props.mode != XFRM_MODE_TRANSPORT &&
  203. x->props.mode != XFRM_MODE_TUNNEL) {
  204. pr_debug("CHCR: Only transport and tunnel xfrm offload\n");
  205. return -EINVAL;
  206. }
  207. if (x->id.proto != IPPROTO_ESP) {
  208. pr_debug("CHCR: Only ESP xfrm state offloaded\n");
  209. return -EINVAL;
  210. }
  211. if (x->encap) {
  212. pr_debug("CHCR: Encapsulated xfrm state not offloaded\n");
  213. return -EINVAL;
  214. }
  215. if (!x->aead) {
  216. pr_debug("CHCR: Cannot offload xfrm states without aead\n");
  217. return -EINVAL;
  218. }
  219. if (x->aead->alg_icv_len != 128 &&
  220. x->aead->alg_icv_len != 96) {
  221. pr_debug("CHCR: Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
  222. return -EINVAL;
  223. }
  224. if ((x->aead->alg_key_len != 128 + 32) &&
  225. (x->aead->alg_key_len != 256 + 32)) {
  226. pr_debug("CHCR: Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
  227. return -EINVAL;
  228. }
  229. if (x->tfcpad) {
  230. pr_debug("CHCR: Cannot offload xfrm states with tfc padding\n");
  231. return -EINVAL;
  232. }
  233. if (!x->geniv) {
  234. pr_debug("CHCR: Cannot offload xfrm states without geniv\n");
  235. return -EINVAL;
  236. }
  237. if (strcmp(x->geniv, "seqiv")) {
  238. pr_debug("CHCR: Cannot offload xfrm states with geniv other than seqiv\n");
  239. return -EINVAL;
  240. }
  241. sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
  242. if (!sa_entry) {
  243. res = -ENOMEM;
  244. goto out;
  245. }
  246. sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry);
  247. chcr_ipsec_setkey(x, sa_entry);
  248. x->xso.offload_handle = (unsigned long)sa_entry;
  249. try_module_get(THIS_MODULE);
  250. out:
  251. return res;
  252. }
  253. static void chcr_xfrm_del_state(struct xfrm_state *x)
  254. {
  255. /* do nothing */
  256. if (!x->xso.offload_handle)
  257. return;
  258. }
  259. static void chcr_xfrm_free_state(struct xfrm_state *x)
  260. {
  261. struct ipsec_sa_entry *sa_entry;
  262. if (!x->xso.offload_handle)
  263. return;
  264. sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
  265. kfree(sa_entry);
  266. module_put(THIS_MODULE);
  267. }
  268. static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
  269. {
  270. /* Offload with IP options is not supported yet */
  271. if (ip_hdr(skb)->ihl > 5)
  272. return false;
  273. return true;
  274. }
  275. static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len)
  276. {
  277. int hdrlen;
  278. hdrlen = sizeof(struct fw_ulptx_wr) +
  279. sizeof(struct chcr_ipsec_req) + kctx_len;
  280. hdrlen += sizeof(struct cpl_tx_pkt);
  281. if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
  282. return hdrlen;
  283. return 0;
  284. }
  285. static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
  286. unsigned int kctx_len)
  287. {
  288. unsigned int flits;
  289. int hdrlen = is_eth_imm(skb, kctx_len);
  290. /* If the skb is small enough, we can pump it out as a work request
  291. * with only immediate data. In that case we just have to have the
  292. * TX Packet header plus the skb data in the Work Request.
  293. */
  294. if (hdrlen)
  295. return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
  296. flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
  297. /* Otherwise, we're going to have to construct a Scatter gather list
  298. * of the skb body and fragments. We also include the flits necessary
  299. * for the TX Packet Work Request and CPL. We always have a firmware
  300. * Write Header (incorporated as part of the cpl_tx_pkt_lso and
  301. * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
  302. * message or, if we're doing a Large Send Offload, an LSO CPL message
  303. * with an embedded TX Packet Write CPL message.
  304. */
  305. flits += (sizeof(struct fw_ulptx_wr) +
  306. sizeof(struct chcr_ipsec_req) +
  307. kctx_len +
  308. sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
  309. return flits;
  310. }
  311. inline void *copy_cpltx_pktxt(struct sk_buff *skb,
  312. struct net_device *dev,
  313. void *pos)
  314. {
  315. struct cpl_tx_pkt_core *cpl;
  316. struct sge_eth_txq *q;
  317. struct adapter *adap;
  318. struct port_info *pi;
  319. u32 ctrl0, qidx;
  320. u64 cntrl = 0;
  321. int left;
  322. pi = netdev_priv(dev);
  323. adap = pi->adapter;
  324. qidx = skb->queue_mapping;
  325. q = &adap->sge.ethtxq[qidx + pi->first_qset];
  326. left = (void *)q->q.stat - pos;
  327. if (!left)
  328. pos = q->q.desc;
  329. cpl = (struct cpl_tx_pkt_core *)pos;
  330. cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
  331. ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
  332. TXPKT_PF_V(adap->pf);
  333. if (skb_vlan_tag_present(skb)) {
  334. q->vlan_ins++;
  335. cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
  336. }
  337. cpl->ctrl0 = htonl(ctrl0);
  338. cpl->pack = htons(0);
  339. cpl->len = htons(skb->len);
  340. cpl->ctrl1 = cpu_to_be64(cntrl);
  341. pos += sizeof(struct cpl_tx_pkt_core);
  342. return pos;
  343. }
  344. inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
  345. struct net_device *dev,
  346. void *pos,
  347. struct ipsec_sa_entry *sa_entry)
  348. {
  349. struct _key_ctx *key_ctx;
  350. int left, eoq, key_len;
  351. struct sge_eth_txq *q;
  352. struct adapter *adap;
  353. struct port_info *pi;
  354. unsigned int qidx;
  355. pi = netdev_priv(dev);
  356. adap = pi->adapter;
  357. qidx = skb->queue_mapping;
  358. q = &adap->sge.ethtxq[qidx + pi->first_qset];
  359. key_len = sa_entry->kctx_len;
  360. /* end of queue, reset pos to start of queue */
  361. eoq = (void *)q->q.stat - pos;
  362. left = eoq;
  363. if (!eoq) {
  364. pos = q->q.desc;
  365. left = 64 * q->q.size;
  366. }
  367. /* Copy the Key context header */
  368. key_ctx = (struct _key_ctx *)pos;
  369. key_ctx->ctx_hdr = sa_entry->key_ctx_hdr;
  370. memcpy(key_ctx->salt, sa_entry->salt, MAX_SALT);
  371. pos += sizeof(struct _key_ctx);
  372. left -= sizeof(struct _key_ctx);
  373. if (likely(key_len <= left)) {
  374. memcpy(key_ctx->key, sa_entry->key, key_len);
  375. pos += key_len;
  376. } else {
  377. memcpy(pos, sa_entry->key, left);
  378. memcpy(q->q.desc, sa_entry->key + left,
  379. key_len - left);
  380. pos = (u8 *)q->q.desc + (key_len - left);
  381. }
  382. /* Copy CPL TX PKT XT */
  383. pos = copy_cpltx_pktxt(skb, dev, pos);
  384. return pos;
  385. }
  386. inline void *chcr_crypto_wreq(struct sk_buff *skb,
  387. struct net_device *dev,
  388. void *pos,
  389. int credits,
  390. struct ipsec_sa_entry *sa_entry)
  391. {
  392. struct port_info *pi = netdev_priv(dev);
  393. struct adapter *adap = pi->adapter;
  394. unsigned int immdatalen = 0;
  395. unsigned int ivsize = GCM_ESP_IV_SIZE;
  396. struct chcr_ipsec_wr *wr;
  397. unsigned int flits;
  398. u32 wr_mid;
  399. int qidx = skb_get_queue_mapping(skb);
  400. struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
  401. unsigned int kctx_len = sa_entry->kctx_len;
  402. int qid = q->q.cntxt_id;
  403. atomic_inc(&adap->chcr_stats.ipsec_cnt);
  404. flits = calc_tx_sec_flits(skb, kctx_len);
  405. if (is_eth_imm(skb, kctx_len))
  406. immdatalen = skb->len;
  407. /* WR Header */
  408. wr = (struct chcr_ipsec_wr *)pos;
  409. wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
  410. wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
  411. if (unlikely(credits < ETHTXQ_STOP_THRES)) {
  412. netif_tx_stop_queue(q->txq);
  413. q->q.stops++;
  414. wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
  415. }
  416. wr_mid |= FW_ULPTX_WR_DATA_F;
  417. wr->wreq.flowid_len16 = htonl(wr_mid);
  418. /* ULPTX */
  419. wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
  420. wr->req.ulptx.len = htonl(DIV_ROUND_UP(flits, 2) - 1);
  421. /* Sub-command */
  422. wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen);
  423. wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
  424. sizeof(wr->req.key_ctx) +
  425. kctx_len +
  426. sizeof(struct cpl_tx_pkt_core) +
  427. immdatalen);
  428. /* CPL_SEC_PDU */
  429. wr->req.sec_cpl.op_ivinsrtofst = htonl(
  430. CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
  431. CPL_TX_SEC_PDU_CPLLEN_V(2) |
  432. CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
  433. CPL_TX_SEC_PDU_IVINSRTOFST_V(
  434. (skb_transport_offset(skb) +
  435. sizeof(struct ip_esp_hdr) + 1)));
  436. wr->req.sec_cpl.pldlen = htonl(skb->len);
  437. wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
  438. (skb_transport_offset(skb) + 1),
  439. (skb_transport_offset(skb) +
  440. sizeof(struct ip_esp_hdr)),
  441. (skb_transport_offset(skb) +
  442. sizeof(struct ip_esp_hdr) +
  443. GCM_ESP_IV_SIZE + 1), 0);
  444. wr->req.sec_cpl.cipherstop_lo_authinsert =
  445. FILL_SEC_CPL_AUTHINSERT(0, skb_transport_offset(skb) +
  446. sizeof(struct ip_esp_hdr) +
  447. GCM_ESP_IV_SIZE + 1,
  448. sa_entry->authsize,
  449. sa_entry->authsize);
  450. wr->req.sec_cpl.seqno_numivs =
  451. FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1,
  452. CHCR_SCMD_CIPHER_MODE_AES_GCM,
  453. CHCR_SCMD_AUTH_MODE_GHASH,
  454. sa_entry->hmac_ctrl,
  455. ivsize >> 1);
  456. wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
  457. 0, 0, 0);
  458. pos += sizeof(struct fw_ulptx_wr) +
  459. sizeof(struct ulp_txpkt) +
  460. sizeof(struct ulptx_idata) +
  461. sizeof(struct cpl_tx_sec_pdu);
  462. pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry);
  463. return pos;
  464. }
  465. /**
  466. * flits_to_desc - returns the num of Tx descriptors for the given flits
  467. * @n: the number of flits
  468. *
  469. * Returns the number of Tx descriptors needed for the supplied number
  470. * of flits.
  471. */
  472. static inline unsigned int flits_to_desc(unsigned int n)
  473. {
  474. WARN_ON(n > SGE_MAX_WR_LEN / 8);
  475. return DIV_ROUND_UP(n, 8);
  476. }
  477. static inline unsigned int txq_avail(const struct sge_txq *q)
  478. {
  479. return q->size - 1 - q->in_use;
  480. }
  481. static void eth_txq_stop(struct sge_eth_txq *q)
  482. {
  483. netif_tx_stop_queue(q->txq);
  484. q->q.stops++;
  485. }
  486. static inline void txq_advance(struct sge_txq *q, unsigned int n)
  487. {
  488. q->in_use += n;
  489. q->pidx += n;
  490. if (q->pidx >= q->size)
  491. q->pidx -= q->size;
  492. }
  493. /*
  494. * chcr_ipsec_xmit called from ULD Tx handler
  495. */
  496. int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
  497. {
  498. struct xfrm_state *x = xfrm_input_state(skb);
  499. struct ipsec_sa_entry *sa_entry;
  500. u64 *pos, *end, *before, *sgl;
  501. int qidx, left, credits;
  502. unsigned int flits = 0, ndesc, kctx_len;
  503. struct adapter *adap;
  504. struct sge_eth_txq *q;
  505. struct port_info *pi;
  506. dma_addr_t addr[MAX_SKB_FRAGS + 1];
  507. bool immediate = false;
  508. if (!x->xso.offload_handle)
  509. return NETDEV_TX_BUSY;
  510. sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
  511. kctx_len = sa_entry->kctx_len;
  512. if (skb->sp->len != 1) {
  513. out_free: dev_kfree_skb_any(skb);
  514. return NETDEV_TX_OK;
  515. }
  516. pi = netdev_priv(dev);
  517. adap = pi->adapter;
  518. qidx = skb->queue_mapping;
  519. q = &adap->sge.ethtxq[qidx + pi->first_qset];
  520. cxgb4_reclaim_completed_tx(adap, &q->q, true);
  521. flits = calc_tx_sec_flits(skb, sa_entry->kctx_len);
  522. ndesc = flits_to_desc(flits);
  523. credits = txq_avail(&q->q) - ndesc;
  524. if (unlikely(credits < 0)) {
  525. eth_txq_stop(q);
  526. dev_err(adap->pdev_dev,
  527. "%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n",
  528. dev->name, qidx, credits, ndesc, txq_avail(&q->q),
  529. flits);
  530. return NETDEV_TX_BUSY;
  531. }
  532. if (is_eth_imm(skb, kctx_len))
  533. immediate = true;
  534. if (!immediate &&
  535. unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
  536. q->mapping_err++;
  537. goto out_free;
  538. }
  539. pos = (u64 *)&q->q.desc[q->q.pidx];
  540. before = (u64 *)pos;
  541. end = (u64 *)pos + flits;
  542. /* Setup IPSec CPL */
  543. pos = (void *)chcr_crypto_wreq(skb, dev, (void *)pos,
  544. credits, sa_entry);
  545. if (before > (u64 *)pos) {
  546. left = (u8 *)end - (u8 *)q->q.stat;
  547. end = (void *)q->q.desc + left;
  548. }
  549. if (pos == (u64 *)q->q.stat) {
  550. left = (u8 *)end - (u8 *)q->q.stat;
  551. end = (void *)q->q.desc + left;
  552. pos = (void *)q->q.desc;
  553. }
  554. sgl = (void *)pos;
  555. if (immediate) {
  556. cxgb4_inline_tx_skb(skb, &q->q, sgl);
  557. dev_consume_skb_any(skb);
  558. } else {
  559. int last_desc;
  560. cxgb4_write_sgl(skb, &q->q, (void *)sgl, end,
  561. 0, addr);
  562. skb_orphan(skb);
  563. last_desc = q->q.pidx + ndesc - 1;
  564. if (last_desc >= q->q.size)
  565. last_desc -= q->q.size;
  566. q->q.sdesc[last_desc].skb = skb;
  567. q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl;
  568. }
  569. txq_advance(&q->q, ndesc);
  570. cxgb4_ring_tx_db(adap, &q->q, ndesc);
  571. return NETDEV_TX_OK;
  572. }