gso.c 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. #include <linux/skbuff.h>
  3. #include <linux/sctp.h>
  4. #include <net/gso.h>
  5. #include <net/gro.h>
  6. /**
  7. * skb_eth_gso_segment - segmentation handler for ethernet protocols.
  8. * @skb: buffer to segment
  9. * @features: features for the output path (see dev->features)
  10. * @type: Ethernet Protocol ID
  11. */
  12. struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
  13. netdev_features_t features, __be16 type)
  14. {
  15. struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
  16. struct packet_offload *ptype;
  17. rcu_read_lock();
  18. list_for_each_entry_rcu(ptype, &net_hotdata.offload_base, list) {
  19. if (ptype->type == type && ptype->callbacks.gso_segment) {
  20. segs = ptype->callbacks.gso_segment(skb, features);
  21. break;
  22. }
  23. }
  24. rcu_read_unlock();
  25. return segs;
  26. }
  27. EXPORT_SYMBOL(skb_eth_gso_segment);
  28. /**
  29. * skb_mac_gso_segment - mac layer segmentation handler.
  30. * @skb: buffer to segment
  31. * @features: features for the output path (see dev->features)
  32. */
  33. struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
  34. netdev_features_t features)
  35. {
  36. struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
  37. struct packet_offload *ptype;
  38. int vlan_depth = skb->mac_len;
  39. __be16 type = skb_network_protocol(skb, &vlan_depth);
  40. if (unlikely(!type))
  41. return ERR_PTR(-EINVAL);
  42. __skb_pull(skb, vlan_depth);
  43. rcu_read_lock();
  44. list_for_each_entry_rcu(ptype, &net_hotdata.offload_base, list) {
  45. if (ptype->type == type && ptype->callbacks.gso_segment) {
  46. segs = ptype->callbacks.gso_segment(skb, features);
  47. break;
  48. }
  49. }
  50. rcu_read_unlock();
  51. __skb_push(skb, skb->data - skb_mac_header(skb));
  52. return segs;
  53. }
  54. EXPORT_SYMBOL(skb_mac_gso_segment);
  55. /* openvswitch calls this on rx path, so we need a different check.
  56. */
  57. static bool skb_needs_check(const struct sk_buff *skb, bool tx_path)
  58. {
  59. if (tx_path)
  60. return skb->ip_summed != CHECKSUM_PARTIAL &&
  61. skb->ip_summed != CHECKSUM_UNNECESSARY;
  62. return skb->ip_summed == CHECKSUM_NONE;
  63. }
  64. /**
  65. * __skb_gso_segment - Perform segmentation on skb.
  66. * @skb: buffer to segment
  67. * @features: features for the output path (see dev->features)
  68. * @tx_path: whether it is called in TX path
  69. *
  70. * This function segments the given skb and returns a list of segments.
  71. *
  72. * It may return NULL if the skb requires no segmentation. This is
  73. * only possible when GSO is used for verifying header integrity.
  74. *
  75. * Segmentation preserves SKB_GSO_CB_OFFSET bytes of previous skb cb.
  76. */
  77. struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
  78. netdev_features_t features, bool tx_path)
  79. {
  80. struct sk_buff *segs;
  81. if (unlikely(skb_needs_check(skb, tx_path))) {
  82. int err;
  83. /* We're going to init ->check field in TCP or UDP header */
  84. err = skb_cow_head(skb, 0);
  85. if (err < 0)
  86. return ERR_PTR(err);
  87. }
  88. /* Only report GSO partial support if it will enable us to
  89. * support segmentation on this frame without needing additional
  90. * work.
  91. */
  92. if (features & NETIF_F_GSO_PARTIAL) {
  93. netdev_features_t partial_features = NETIF_F_GSO_ROBUST;
  94. struct net_device *dev = skb->dev;
  95. partial_features |= dev->features & dev->gso_partial_features;
  96. if (!skb_gso_ok(skb, features | partial_features))
  97. features &= ~NETIF_F_GSO_PARTIAL;
  98. }
  99. BUILD_BUG_ON(SKB_GSO_CB_OFFSET +
  100. sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
  101. SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
  102. SKB_GSO_CB(skb)->encap_level = 0;
  103. skb_reset_mac_header(skb);
  104. skb_reset_mac_len(skb);
  105. segs = skb_mac_gso_segment(skb, features);
  106. if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs)))
  107. skb_warn_bad_offload(skb);
  108. return segs;
  109. }
  110. EXPORT_SYMBOL(__skb_gso_segment);
  111. /**
  112. * skb_gso_transport_seglen - Return length of individual segments of a gso packet
  113. *
  114. * @skb: GSO skb
  115. *
  116. * skb_gso_transport_seglen is used to determine the real size of the
  117. * individual segments, including Layer4 headers (TCP/UDP).
  118. *
  119. * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
  120. */
  121. static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
  122. {
  123. const struct skb_shared_info *shinfo = skb_shinfo(skb);
  124. unsigned int thlen = 0;
  125. if (skb->encapsulation) {
  126. thlen = skb_inner_transport_header(skb) -
  127. skb_transport_header(skb);
  128. if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
  129. thlen += inner_tcp_hdrlen(skb);
  130. } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
  131. thlen = tcp_hdrlen(skb);
  132. } else if (unlikely(skb_is_gso_sctp(skb))) {
  133. thlen = sizeof(struct sctphdr);
  134. } else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
  135. thlen = sizeof(struct udphdr);
  136. }
  137. /* UFO sets gso_size to the size of the fragmentation
  138. * payload, i.e. the size of the L4 (UDP) header is already
  139. * accounted for.
  140. */
  141. return thlen + shinfo->gso_size;
  142. }
  143. /**
  144. * skb_gso_network_seglen - Return length of individual segments of a gso packet
  145. *
  146. * @skb: GSO skb
  147. *
  148. * skb_gso_network_seglen is used to determine the real size of the
  149. * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
  150. *
  151. * The MAC/L2 header is not accounted for.
  152. */
  153. static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
  154. {
  155. unsigned int hdr_len = skb_transport_header(skb) -
  156. skb_network_header(skb);
  157. return hdr_len + skb_gso_transport_seglen(skb);
  158. }
  159. /**
  160. * skb_gso_mac_seglen - Return length of individual segments of a gso packet
  161. *
  162. * @skb: GSO skb
  163. *
  164. * skb_gso_mac_seglen is used to determine the real size of the
  165. * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
  166. * headers (TCP/UDP).
  167. */
  168. static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
  169. {
  170. unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
  171. return hdr_len + skb_gso_transport_seglen(skb);
  172. }
  173. /**
  174. * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
  175. *
  176. * There are a couple of instances where we have a GSO skb, and we
  177. * want to determine what size it would be after it is segmented.
  178. *
  179. * We might want to check:
  180. * - L3+L4+payload size (e.g. IP forwarding)
  181. * - L2+L3+L4+payload size (e.g. sanity check before passing to driver)
  182. *
  183. * This is a helper to do that correctly considering GSO_BY_FRAGS.
  184. *
  185. * @skb: GSO skb
  186. *
  187. * @seg_len: The segmented length (from skb_gso_*_seglen). In the
  188. * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
  189. *
  190. * @max_len: The maximum permissible length.
  191. *
  192. * Returns true if the segmented length <= max length.
  193. */
  194. static inline bool skb_gso_size_check(const struct sk_buff *skb,
  195. unsigned int seg_len,
  196. unsigned int max_len) {
  197. const struct skb_shared_info *shinfo = skb_shinfo(skb);
  198. const struct sk_buff *iter;
  199. if (shinfo->gso_size != GSO_BY_FRAGS)
  200. return seg_len <= max_len;
  201. /* Undo this so we can re-use header sizes */
  202. seg_len -= GSO_BY_FRAGS;
  203. skb_walk_frags(skb, iter) {
  204. if (seg_len + skb_headlen(iter) > max_len)
  205. return false;
  206. }
  207. return true;
  208. }
  209. /**
  210. * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
  211. *
  212. * @skb: GSO skb
  213. * @mtu: MTU to validate against
  214. *
  215. * skb_gso_validate_network_len validates if a given skb will fit a
  216. * wanted MTU once split. It considers L3 headers, L4 headers, and the
  217. * payload.
  218. */
  219. bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
  220. {
  221. return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
  222. }
  223. EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
  224. /**
  225. * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
  226. *
  227. * @skb: GSO skb
  228. * @len: length to validate against
  229. *
  230. * skb_gso_validate_mac_len validates if a given skb will fit a wanted
  231. * length once split, including L2, L3 and L4 headers and the payload.
  232. */
  233. bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len)
  234. {
  235. return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len);
  236. }
  237. EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);