chtls_cm.h 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221
  1. /*
  2. * Copyright (c) 2018 Chelsio Communications, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #ifndef __CHTLS_CM_H__
  9. #define __CHTLS_CM_H__
  10. /*
  11. * TCB settings
  12. */
  13. /* 3:0 */
  14. #define TCB_ULP_TYPE_W 0
  15. #define TCB_ULP_TYPE_S 0
  16. #define TCB_ULP_TYPE_M 0xfULL
  17. #define TCB_ULP_TYPE_V(x) ((x) << TCB_ULP_TYPE_S)
  18. /* 11:4 */
  19. #define TCB_ULP_RAW_W 0
  20. #define TCB_ULP_RAW_S 4
  21. #define TCB_ULP_RAW_M 0xffULL
  22. #define TCB_ULP_RAW_V(x) ((x) << TCB_ULP_RAW_S)
  23. #define TF_TLS_KEY_SIZE_S 7
  24. #define TF_TLS_KEY_SIZE_V(x) ((x) << TF_TLS_KEY_SIZE_S)
  25. #define TF_TLS_CONTROL_S 2
  26. #define TF_TLS_CONTROL_V(x) ((x) << TF_TLS_CONTROL_S)
  27. #define TF_TLS_ACTIVE_S 1
  28. #define TF_TLS_ACTIVE_V(x) ((x) << TF_TLS_ACTIVE_S)
  29. #define TF_TLS_ENABLE_S 0
  30. #define TF_TLS_ENABLE_V(x) ((x) << TF_TLS_ENABLE_S)
  31. #define TF_RX_QUIESCE_S 15
  32. #define TF_RX_QUIESCE_V(x) ((x) << TF_RX_QUIESCE_S)
  33. /*
  34. * Max receive window supported by HW in bytes. Only a small part of it can
  35. * be set through option0, the rest needs to be set through RX_DATA_ACK.
  36. */
  37. #define MAX_RCV_WND ((1U << 27) - 1)
  38. #define MAX_MSS 65536
  39. /*
  40. * Min receive window. We want it to be large enough to accommodate receive
  41. * coalescing, handle jumbo frames, and not trigger sender SWS avoidance.
  42. */
  43. #define MIN_RCV_WND (24 * 1024U)
  44. #define LOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000))
  45. /* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
  46. #define TX_HEADER_LEN \
  47. (sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr))
  48. #define TX_TLSHDR_LEN \
  49. (sizeof(struct fw_tlstx_data_wr) + sizeof(struct cpl_tx_tls_sfo) + \
  50. sizeof(struct sge_opaque_hdr))
  51. #define TXDATA_SKB_LEN 128
  52. enum {
  53. CPL_TX_TLS_SFO_TYPE_CCS,
  54. CPL_TX_TLS_SFO_TYPE_ALERT,
  55. CPL_TX_TLS_SFO_TYPE_HANDSHAKE,
  56. CPL_TX_TLS_SFO_TYPE_DATA,
  57. CPL_TX_TLS_SFO_TYPE_HEARTBEAT,
  58. };
  59. enum {
  60. TLS_HDR_TYPE_CCS = 20,
  61. TLS_HDR_TYPE_ALERT,
  62. TLS_HDR_TYPE_HANDSHAKE,
  63. TLS_HDR_TYPE_RECORD,
  64. TLS_HDR_TYPE_HEARTBEAT,
  65. };
  66. typedef void (*defer_handler_t)(struct chtls_dev *dev, struct sk_buff *skb);
  67. extern struct request_sock_ops chtls_rsk_ops;
  68. struct deferred_skb_cb {
  69. defer_handler_t handler;
  70. struct chtls_dev *dev;
  71. };
  72. #define DEFERRED_SKB_CB(skb) ((struct deferred_skb_cb *)(skb)->cb)
  73. #define failover_flowc_wr_len offsetof(struct fw_flowc_wr, mnemval[3])
  74. #define WR_SKB_CB(skb) ((struct wr_skb_cb *)(skb)->cb)
  75. #define ACCEPT_QUEUE(sk) (&inet_csk(sk)->icsk_accept_queue.rskq_accept_head)
  76. #define SND_WSCALE(tp) ((tp)->rx_opt.snd_wscale)
  77. #define RCV_WSCALE(tp) ((tp)->rx_opt.rcv_wscale)
  78. #define USER_MSS(tp) ((tp)->rx_opt.user_mss)
  79. #define TS_RECENT_STAMP(tp) ((tp)->rx_opt.ts_recent_stamp)
  80. #define WSCALE_OK(tp) ((tp)->rx_opt.wscale_ok)
  81. #define TSTAMP_OK(tp) ((tp)->rx_opt.tstamp_ok)
  82. #define SACK_OK(tp) ((tp)->rx_opt.sack_ok)
  83. #define INC_ORPHAN_COUNT(sk) percpu_counter_inc((sk)->sk_prot->orphan_count)
  84. /* TLS SKB */
  85. #define skb_ulp_tls_inline(skb) (ULP_SKB_CB(skb)->ulp.tls.ofld)
  86. #define skb_ulp_tls_iv_imm(skb) (ULP_SKB_CB(skb)->ulp.tls.iv)
  87. void chtls_defer_reply(struct sk_buff *skb, struct chtls_dev *dev,
  88. defer_handler_t handler);
  89. /*
  90. * Returns true if the socket is in one of the supplied states.
  91. */
  92. static inline unsigned int sk_in_state(const struct sock *sk,
  93. unsigned int states)
  94. {
  95. return states & (1 << sk->sk_state);
  96. }
  97. static void chtls_rsk_destructor(struct request_sock *req)
  98. {
  99. /* do nothing */
  100. }
  101. static inline void chtls_init_rsk_ops(struct proto *chtls_tcp_prot,
  102. struct request_sock_ops *chtls_tcp_ops,
  103. struct proto *tcp_prot, int family)
  104. {
  105. memset(chtls_tcp_ops, 0, sizeof(*chtls_tcp_ops));
  106. chtls_tcp_ops->family = family;
  107. chtls_tcp_ops->obj_size = sizeof(struct tcp_request_sock);
  108. chtls_tcp_ops->destructor = chtls_rsk_destructor;
  109. chtls_tcp_ops->slab = tcp_prot->rsk_prot->slab;
  110. chtls_tcp_prot->rsk_prot = chtls_tcp_ops;
  111. }
  112. static inline void chtls_reqsk_free(struct request_sock *req)
  113. {
  114. if (req->rsk_listener)
  115. sock_put(req->rsk_listener);
  116. kmem_cache_free(req->rsk_ops->slab, req);
  117. }
  118. #define DECLARE_TASK_FUNC(task, task_param) \
  119. static void task(struct work_struct *task_param)
  120. static inline void sk_wakeup_sleepers(struct sock *sk, bool interruptable)
  121. {
  122. struct socket_wq *wq;
  123. rcu_read_lock();
  124. wq = rcu_dereference(sk->sk_wq);
  125. if (skwq_has_sleeper(wq)) {
  126. if (interruptable)
  127. wake_up_interruptible(sk_sleep(sk));
  128. else
  129. wake_up_all(sk_sleep(sk));
  130. }
  131. rcu_read_unlock();
  132. }
  133. static inline void chtls_set_req_port(struct request_sock *oreq,
  134. __be16 source, __be16 dest)
  135. {
  136. inet_rsk(oreq)->ir_rmt_port = source;
  137. inet_rsk(oreq)->ir_num = ntohs(dest);
  138. }
  139. static inline void chtls_set_req_addr(struct request_sock *oreq,
  140. __be32 local_ip, __be32 peer_ip)
  141. {
  142. inet_rsk(oreq)->ir_loc_addr = local_ip;
  143. inet_rsk(oreq)->ir_rmt_addr = peer_ip;
  144. }
  145. static inline void chtls_free_skb(struct sock *sk, struct sk_buff *skb)
  146. {
  147. skb_dst_set(skb, NULL);
  148. __skb_unlink(skb, &sk->sk_receive_queue);
  149. __kfree_skb(skb);
  150. }
  151. static inline void chtls_kfree_skb(struct sock *sk, struct sk_buff *skb)
  152. {
  153. skb_dst_set(skb, NULL);
  154. __skb_unlink(skb, &sk->sk_receive_queue);
  155. kfree_skb(skb);
  156. }
  157. static inline void chtls_reset_wr_list(struct chtls_sock *csk)
  158. {
  159. csk->wr_skb_head = NULL;
  160. csk->wr_skb_tail = NULL;
  161. }
  162. static inline void enqueue_wr(struct chtls_sock *csk, struct sk_buff *skb)
  163. {
  164. WR_SKB_CB(skb)->next_wr = NULL;
  165. skb_get(skb);
  166. if (!csk->wr_skb_head)
  167. csk->wr_skb_head = skb;
  168. else
  169. WR_SKB_CB(csk->wr_skb_tail)->next_wr = skb;
  170. csk->wr_skb_tail = skb;
  171. }
  172. static inline struct sk_buff *dequeue_wr(struct sock *sk)
  173. {
  174. struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
  175. struct sk_buff *skb = NULL;
  176. skb = csk->wr_skb_head;
  177. if (likely(skb)) {
  178. /* Don't bother clearing the tail */
  179. csk->wr_skb_head = WR_SKB_CB(skb)->next_wr;
  180. WR_SKB_CB(skb)->next_wr = NULL;
  181. }
  182. return skb;
  183. }
  184. #endif