tcpv6_offload.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * IPV6 GSO/GRO offload support
  4. * Linux INET6 implementation
  5. *
  6. * TCPv6 GSO/GRO support
  7. */
  8. #include <linux/indirect_call_wrapper.h>
  9. #include <linux/skbuff.h>
  10. #include <net/inet6_hashtables.h>
  11. #include <net/gro.h>
  12. #include <net/protocol.h>
  13. #include <net/tcp.h>
  14. #include <net/ip6_checksum.h>
  15. #include "ip6_offload.h"
  16. static void tcp6_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
  17. struct tcphdr *th)
  18. {
  19. #if IS_ENABLED(CONFIG_IPV6)
  20. const struct ipv6hdr *hdr;
  21. struct sk_buff *p;
  22. struct sock *sk;
  23. struct net *net;
  24. int iif, sdif;
  25. if (likely(!(skb->dev->features & NETIF_F_GRO_FRAGLIST)))
  26. return;
  27. p = tcp_gro_lookup(head, th);
  28. if (p) {
  29. NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
  30. return;
  31. }
  32. inet6_get_iif_sdif(skb, &iif, &sdif);
  33. hdr = skb_gro_network_header(skb);
  34. net = dev_net(skb->dev);
  35. sk = __inet6_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
  36. &hdr->saddr, th->source,
  37. &hdr->daddr, ntohs(th->dest),
  38. iif, sdif);
  39. NAPI_GRO_CB(skb)->is_flist = !sk;
  40. if (sk)
  41. sock_put(sk);
  42. #endif /* IS_ENABLED(CONFIG_IPV6) */
  43. }
  44. INDIRECT_CALLABLE_SCOPE
  45. struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
  46. {
  47. struct tcphdr *th;
  48. /* Don't bother verifying checksum if we're going to flush anyway. */
  49. if (!NAPI_GRO_CB(skb)->flush &&
  50. skb_gro_checksum_validate(skb, IPPROTO_TCP,
  51. ip6_gro_compute_pseudo))
  52. goto flush;
  53. th = tcp_gro_pull_header(skb);
  54. if (!th)
  55. goto flush;
  56. tcp6_check_fraglist_gro(head, skb, th);
  57. return tcp_gro_receive(head, skb, th);
  58. flush:
  59. NAPI_GRO_CB(skb)->flush = 1;
  60. return NULL;
  61. }
  62. INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff)
  63. {
  64. const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
  65. const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset);
  66. struct tcphdr *th = tcp_hdr(skb);
  67. if (unlikely(NAPI_GRO_CB(skb)->is_flist)) {
  68. skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV6;
  69. skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
  70. __skb_incr_checksum_unnecessary(skb);
  71. return 0;
  72. }
  73. th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
  74. &iph->daddr, 0);
  75. skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
  76. tcp_gro_complete(skb);
  77. return 0;
  78. }
  79. static void __tcpv6_gso_segment_csum(struct sk_buff *seg,
  80. __be16 *oldport, __be16 newport)
  81. {
  82. struct tcphdr *th;
  83. if (*oldport == newport)
  84. return;
  85. th = tcp_hdr(seg);
  86. inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
  87. *oldport = newport;
  88. }
  89. static struct sk_buff *__tcpv6_gso_segment_list_csum(struct sk_buff *segs)
  90. {
  91. const struct tcphdr *th;
  92. const struct ipv6hdr *iph;
  93. struct sk_buff *seg;
  94. struct tcphdr *th2;
  95. struct ipv6hdr *iph2;
  96. seg = segs;
  97. th = tcp_hdr(seg);
  98. iph = ipv6_hdr(seg);
  99. th2 = tcp_hdr(seg->next);
  100. iph2 = ipv6_hdr(seg->next);
  101. if (!(*(const u32 *)&th->source ^ *(const u32 *)&th2->source) &&
  102. ipv6_addr_equal(&iph->saddr, &iph2->saddr) &&
  103. ipv6_addr_equal(&iph->daddr, &iph2->daddr))
  104. return segs;
  105. while ((seg = seg->next)) {
  106. th2 = tcp_hdr(seg);
  107. iph2 = ipv6_hdr(seg);
  108. iph2->saddr = iph->saddr;
  109. iph2->daddr = iph->daddr;
  110. __tcpv6_gso_segment_csum(seg, &th2->source, th->source);
  111. __tcpv6_gso_segment_csum(seg, &th2->dest, th->dest);
  112. }
  113. return segs;
  114. }
  115. static struct sk_buff *__tcp6_gso_segment_list(struct sk_buff *skb,
  116. netdev_features_t features)
  117. {
  118. skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
  119. if (IS_ERR(skb))
  120. return skb;
  121. return __tcpv6_gso_segment_list_csum(skb);
  122. }
  123. static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
  124. netdev_features_t features)
  125. {
  126. struct tcphdr *th;
  127. if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
  128. return ERR_PTR(-EINVAL);
  129. if (!pskb_may_pull(skb, sizeof(*th)))
  130. return ERR_PTR(-EINVAL);
  131. if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) {
  132. struct tcphdr *th = tcp_hdr(skb);
  133. if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size)
  134. return __tcp6_gso_segment_list(skb, features);
  135. skb->ip_summed = CHECKSUM_NONE;
  136. }
  137. if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
  138. const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
  139. struct tcphdr *th = tcp_hdr(skb);
  140. /* Set up pseudo header, usually expect stack to have done
  141. * this.
  142. */
  143. th->check = 0;
  144. skb->ip_summed = CHECKSUM_PARTIAL;
  145. __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
  146. }
  147. return tcp_gso_segment(skb, features);
  148. }
  149. int __init tcpv6_offload_init(void)
  150. {
  151. net_hotdata.tcpv6_offload = (struct net_offload) {
  152. .callbacks = {
  153. .gso_segment = tcp6_gso_segment,
  154. .gro_receive = tcp6_gro_receive,
  155. .gro_complete = tcp6_gro_complete,
  156. },
  157. };
  158. return inet6_add_offload(&net_hotdata.tcpv6_offload, IPPROTO_TCP);
  159. }