net_test.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. #include <kunit/test.h>
  3. /* GSO */
  4. #include <linux/skbuff.h>
  5. static const char hdr[] = "abcdefgh";
  6. #define GSO_TEST_SIZE 1000
  7. static void __init_skb(struct sk_buff *skb)
  8. {
  9. skb_reset_mac_header(skb);
  10. memcpy(skb_mac_header(skb), hdr, sizeof(hdr));
  11. /* skb_segment expects skb->data at start of payload */
  12. skb_pull(skb, sizeof(hdr));
  13. skb_reset_network_header(skb);
  14. skb_reset_transport_header(skb);
  15. /* proto is arbitrary, as long as not ETH_P_TEB or vlan */
  16. skb->protocol = htons(ETH_P_ATALK);
  17. skb_shinfo(skb)->gso_size = GSO_TEST_SIZE;
  18. }
  19. enum gso_test_nr {
  20. GSO_TEST_LINEAR,
  21. GSO_TEST_NO_GSO,
  22. GSO_TEST_FRAGS,
  23. GSO_TEST_FRAGS_PURE,
  24. GSO_TEST_GSO_PARTIAL,
  25. GSO_TEST_FRAG_LIST,
  26. GSO_TEST_FRAG_LIST_PURE,
  27. GSO_TEST_FRAG_LIST_NON_UNIFORM,
  28. GSO_TEST_GSO_BY_FRAGS,
  29. };
  30. struct gso_test_case {
  31. enum gso_test_nr id;
  32. const char *name;
  33. /* input */
  34. unsigned int linear_len;
  35. unsigned int nr_frags;
  36. const unsigned int *frags;
  37. unsigned int nr_frag_skbs;
  38. const unsigned int *frag_skbs;
  39. /* output as expected */
  40. unsigned int nr_segs;
  41. const unsigned int *segs;
  42. };
  43. static struct gso_test_case cases[] = {
  44. {
  45. .id = GSO_TEST_NO_GSO,
  46. .name = "no_gso",
  47. .linear_len = GSO_TEST_SIZE,
  48. .nr_segs = 1,
  49. .segs = (const unsigned int[]) { GSO_TEST_SIZE },
  50. },
  51. {
  52. .id = GSO_TEST_LINEAR,
  53. .name = "linear",
  54. .linear_len = GSO_TEST_SIZE + GSO_TEST_SIZE + 1,
  55. .nr_segs = 3,
  56. .segs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE, 1 },
  57. },
  58. {
  59. .id = GSO_TEST_FRAGS,
  60. .name = "frags",
  61. .linear_len = GSO_TEST_SIZE,
  62. .nr_frags = 2,
  63. .frags = (const unsigned int[]) { GSO_TEST_SIZE, 1 },
  64. .nr_segs = 3,
  65. .segs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE, 1 },
  66. },
  67. {
  68. .id = GSO_TEST_FRAGS_PURE,
  69. .name = "frags_pure",
  70. .nr_frags = 3,
  71. .frags = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE, 2 },
  72. .nr_segs = 3,
  73. .segs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE, 2 },
  74. },
  75. {
  76. .id = GSO_TEST_GSO_PARTIAL,
  77. .name = "gso_partial",
  78. .linear_len = GSO_TEST_SIZE,
  79. .nr_frags = 2,
  80. .frags = (const unsigned int[]) { GSO_TEST_SIZE, 3 },
  81. .nr_segs = 2,
  82. .segs = (const unsigned int[]) { 2 * GSO_TEST_SIZE, 3 },
  83. },
  84. {
  85. /* commit 89319d3801d1: frag_list on mss boundaries */
  86. .id = GSO_TEST_FRAG_LIST,
  87. .name = "frag_list",
  88. .linear_len = GSO_TEST_SIZE,
  89. .nr_frag_skbs = 2,
  90. .frag_skbs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE },
  91. .nr_segs = 3,
  92. .segs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE, GSO_TEST_SIZE },
  93. },
  94. {
  95. .id = GSO_TEST_FRAG_LIST_PURE,
  96. .name = "frag_list_pure",
  97. .nr_frag_skbs = 2,
  98. .frag_skbs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE },
  99. .nr_segs = 2,
  100. .segs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE },
  101. },
  102. {
  103. /* commit 43170c4e0ba7: GRO of frag_list trains */
  104. .id = GSO_TEST_FRAG_LIST_NON_UNIFORM,
  105. .name = "frag_list_non_uniform",
  106. .linear_len = GSO_TEST_SIZE,
  107. .nr_frag_skbs = 4,
  108. .frag_skbs = (const unsigned int[]) { GSO_TEST_SIZE, 1, GSO_TEST_SIZE, 2 },
  109. .nr_segs = 4,
  110. .segs = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE, GSO_TEST_SIZE, 3 },
  111. },
  112. {
  113. /* commit 3953c46c3ac7 ("sk_buff: allow segmenting based on frag sizes") and
  114. * commit 90017accff61 ("sctp: Add GSO support")
  115. *
  116. * "there will be a cover skb with protocol headers and
  117. * children ones containing the actual segments"
  118. */
  119. .id = GSO_TEST_GSO_BY_FRAGS,
  120. .name = "gso_by_frags",
  121. .nr_frag_skbs = 4,
  122. .frag_skbs = (const unsigned int[]) { 100, 200, 300, 400 },
  123. .nr_segs = 4,
  124. .segs = (const unsigned int[]) { 100, 200, 300, 400 },
  125. },
  126. };
  127. static void gso_test_case_to_desc(struct gso_test_case *t, char *desc)
  128. {
  129. sprintf(desc, "%s", t->name);
  130. }
  131. KUNIT_ARRAY_PARAM(gso_test, cases, gso_test_case_to_desc);
  132. static void gso_test_func(struct kunit *test)
  133. {
  134. const int shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  135. struct sk_buff *skb, *segs, *cur, *next, *last;
  136. const struct gso_test_case *tcase;
  137. netdev_features_t features;
  138. struct page *page;
  139. int i;
  140. tcase = test->param_value;
  141. page = alloc_page(GFP_KERNEL);
  142. KUNIT_ASSERT_NOT_NULL(test, page);
  143. skb = build_skb(page_address(page), sizeof(hdr) + tcase->linear_len + shinfo_size);
  144. KUNIT_ASSERT_NOT_NULL(test, skb);
  145. __skb_put(skb, sizeof(hdr) + tcase->linear_len);
  146. __init_skb(skb);
  147. if (tcase->nr_frags) {
  148. unsigned int pg_off = 0;
  149. page = alloc_page(GFP_KERNEL);
  150. KUNIT_ASSERT_NOT_NULL(test, page);
  151. page_ref_add(page, tcase->nr_frags - 1);
  152. for (i = 0; i < tcase->nr_frags; i++) {
  153. skb_fill_page_desc(skb, i, page, pg_off, tcase->frags[i]);
  154. pg_off += tcase->frags[i];
  155. }
  156. KUNIT_ASSERT_LE(test, pg_off, PAGE_SIZE);
  157. skb->data_len = pg_off;
  158. skb->len += skb->data_len;
  159. skb->truesize += skb->data_len;
  160. }
  161. if (tcase->frag_skbs) {
  162. unsigned int total_size = 0, total_true_size = 0;
  163. struct sk_buff *frag_skb, *prev = NULL;
  164. for (i = 0; i < tcase->nr_frag_skbs; i++) {
  165. unsigned int frag_size;
  166. page = alloc_page(GFP_KERNEL);
  167. KUNIT_ASSERT_NOT_NULL(test, page);
  168. frag_size = tcase->frag_skbs[i];
  169. frag_skb = build_skb(page_address(page),
  170. frag_size + shinfo_size);
  171. KUNIT_ASSERT_NOT_NULL(test, frag_skb);
  172. __skb_put(frag_skb, frag_size);
  173. if (prev)
  174. prev->next = frag_skb;
  175. else
  176. skb_shinfo(skb)->frag_list = frag_skb;
  177. prev = frag_skb;
  178. total_size += frag_size;
  179. total_true_size += frag_skb->truesize;
  180. }
  181. skb->len += total_size;
  182. skb->data_len += total_size;
  183. skb->truesize += total_true_size;
  184. if (tcase->id == GSO_TEST_GSO_BY_FRAGS)
  185. skb_shinfo(skb)->gso_size = GSO_BY_FRAGS;
  186. }
  187. features = NETIF_F_SG | NETIF_F_HW_CSUM;
  188. if (tcase->id == GSO_TEST_GSO_PARTIAL)
  189. features |= NETIF_F_GSO_PARTIAL;
  190. /* TODO: this should also work with SG,
  191. * rather than hit BUG_ON(i >= nfrags)
  192. */
  193. if (tcase->id == GSO_TEST_FRAG_LIST_NON_UNIFORM)
  194. features &= ~NETIF_F_SG;
  195. segs = skb_segment(skb, features);
  196. if (IS_ERR(segs)) {
  197. KUNIT_FAIL(test, "segs error %pe", segs);
  198. goto free_gso_skb;
  199. } else if (!segs) {
  200. KUNIT_FAIL(test, "no segments");
  201. goto free_gso_skb;
  202. }
  203. last = segs->prev;
  204. for (cur = segs, i = 0; cur; cur = next, i++) {
  205. next = cur->next;
  206. KUNIT_ASSERT_EQ(test, cur->len, sizeof(hdr) + tcase->segs[i]);
  207. /* segs have skb->data pointing to the mac header */
  208. KUNIT_ASSERT_PTR_EQ(test, skb_mac_header(cur), cur->data);
  209. KUNIT_ASSERT_PTR_EQ(test, skb_network_header(cur), cur->data + sizeof(hdr));
  210. /* header was copied to all segs */
  211. KUNIT_ASSERT_EQ(test, memcmp(skb_mac_header(cur), hdr, sizeof(hdr)), 0);
  212. /* last seg can be found through segs->prev pointer */
  213. if (!next)
  214. KUNIT_ASSERT_PTR_EQ(test, cur, last);
  215. consume_skb(cur);
  216. }
  217. KUNIT_ASSERT_EQ(test, i, tcase->nr_segs);
  218. free_gso_skb:
  219. consume_skb(skb);
  220. }
  221. /* IP tunnel flags */
  222. #include <net/ip_tunnels.h>
  223. struct ip_tunnel_flags_test {
  224. const char *name;
  225. const u16 *src_bits;
  226. const u16 *exp_bits;
  227. u8 src_num;
  228. u8 exp_num;
  229. __be16 exp_val;
  230. bool exp_comp;
  231. };
  232. #define IP_TUNNEL_FLAGS_TEST(n, src, comp, eval, exp) { \
  233. .name = (n), \
  234. .src_bits = (src), \
  235. .src_num = ARRAY_SIZE(src), \
  236. .exp_comp = (comp), \
  237. .exp_val = (eval), \
  238. .exp_bits = (exp), \
  239. .exp_num = ARRAY_SIZE(exp), \
  240. }
  241. /* These are __be16-compatible and can be compared as is */
  242. static const u16 ip_tunnel_flags_1[] = {
  243. IP_TUNNEL_KEY_BIT,
  244. IP_TUNNEL_STRICT_BIT,
  245. IP_TUNNEL_ERSPAN_OPT_BIT,
  246. };
  247. /* Due to the previous flags design limitation, setting either
  248. * ``IP_TUNNEL_CSUM_BIT`` (on Big Endian) or ``IP_TUNNEL_DONT_FRAGMENT_BIT``
  249. * (on Little) also sets VTI/ISATAP bit. In the bitmap implementation, they
  250. * correspond to ``BIT(16)``, which is bigger than ``U16_MAX``, but still is
  251. * backward-compatible.
  252. */
  253. #ifdef __LITTLE_ENDIAN
  254. #define IP_TUNNEL_CONFLICT_BIT IP_TUNNEL_DONT_FRAGMENT_BIT
  255. #else
  256. #define IP_TUNNEL_CONFLICT_BIT IP_TUNNEL_CSUM_BIT
  257. #endif
  258. static const u16 ip_tunnel_flags_2_src[] = {
  259. IP_TUNNEL_CONFLICT_BIT,
  260. };
  261. static const u16 ip_tunnel_flags_2_exp[] = {
  262. IP_TUNNEL_CONFLICT_BIT,
  263. IP_TUNNEL_SIT_ISATAP_BIT,
  264. };
  265. /* Bits 17 and higher are not compatible with __be16 flags */
  266. static const u16 ip_tunnel_flags_3_src[] = {
  267. IP_TUNNEL_VXLAN_OPT_BIT,
  268. 17,
  269. 18,
  270. 20,
  271. };
  272. static const u16 ip_tunnel_flags_3_exp[] = {
  273. IP_TUNNEL_VXLAN_OPT_BIT,
  274. };
  275. static const struct ip_tunnel_flags_test ip_tunnel_flags_test[] = {
  276. IP_TUNNEL_FLAGS_TEST("compat", ip_tunnel_flags_1, true,
  277. cpu_to_be16(BIT(IP_TUNNEL_KEY_BIT) |
  278. BIT(IP_TUNNEL_STRICT_BIT) |
  279. BIT(IP_TUNNEL_ERSPAN_OPT_BIT)),
  280. ip_tunnel_flags_1),
  281. IP_TUNNEL_FLAGS_TEST("conflict", ip_tunnel_flags_2_src, true,
  282. VTI_ISVTI, ip_tunnel_flags_2_exp),
  283. IP_TUNNEL_FLAGS_TEST("new", ip_tunnel_flags_3_src, false,
  284. cpu_to_be16(BIT(IP_TUNNEL_VXLAN_OPT_BIT)),
  285. ip_tunnel_flags_3_exp),
  286. };
  287. static void
  288. ip_tunnel_flags_test_case_to_desc(const struct ip_tunnel_flags_test *t,
  289. char *desc)
  290. {
  291. strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
  292. }
  293. KUNIT_ARRAY_PARAM(ip_tunnel_flags_test, ip_tunnel_flags_test,
  294. ip_tunnel_flags_test_case_to_desc);
  295. static void ip_tunnel_flags_test_run(struct kunit *test)
  296. {
  297. const struct ip_tunnel_flags_test *t = test->param_value;
  298. IP_TUNNEL_DECLARE_FLAGS(src) = { };
  299. IP_TUNNEL_DECLARE_FLAGS(exp) = { };
  300. IP_TUNNEL_DECLARE_FLAGS(out);
  301. for (u32 j = 0; j < t->src_num; j++)
  302. __set_bit(t->src_bits[j], src);
  303. for (u32 j = 0; j < t->exp_num; j++)
  304. __set_bit(t->exp_bits[j], exp);
  305. KUNIT_ASSERT_EQ(test, t->exp_comp,
  306. ip_tunnel_flags_is_be16_compat(src));
  307. KUNIT_ASSERT_EQ(test, (__force u16)t->exp_val,
  308. (__force u16)ip_tunnel_flags_to_be16(src));
  309. ip_tunnel_flags_from_be16(out, t->exp_val);
  310. KUNIT_ASSERT_TRUE(test, __ipt_flag_op(bitmap_equal, exp, out));
  311. }
  312. static struct kunit_case net_test_cases[] = {
  313. KUNIT_CASE_PARAM(gso_test_func, gso_test_gen_params),
  314. KUNIT_CASE_PARAM(ip_tunnel_flags_test_run,
  315. ip_tunnel_flags_test_gen_params),
  316. { },
  317. };
  318. static struct kunit_suite net_test_suite = {
  319. .name = "net_core",
  320. .test_cases = net_test_cases,
  321. };
  322. kunit_test_suite(net_test_suite);
  323. MODULE_DESCRIPTION("KUnit tests for networking core");
  324. MODULE_LICENSE("GPL");