route-test.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <kunit/test.h>
  3. #include "utils.h"
  4. struct mctp_test_route {
  5. struct mctp_route rt;
  6. struct sk_buff_head pkts;
  7. };
  8. static int mctp_test_route_output(struct mctp_route *rt, struct sk_buff *skb)
  9. {
  10. struct mctp_test_route *test_rt = container_of(rt, struct mctp_test_route, rt);
  11. skb_queue_tail(&test_rt->pkts, skb);
  12. return 0;
  13. }
  14. /* local version of mctp_route_alloc() */
  15. static struct mctp_test_route *mctp_route_test_alloc(void)
  16. {
  17. struct mctp_test_route *rt;
  18. rt = kzalloc(sizeof(*rt), GFP_KERNEL);
  19. if (!rt)
  20. return NULL;
  21. INIT_LIST_HEAD(&rt->rt.list);
  22. refcount_set(&rt->rt.refs, 1);
  23. rt->rt.output = mctp_test_route_output;
  24. skb_queue_head_init(&rt->pkts);
  25. return rt;
  26. }
  27. static struct mctp_test_route *mctp_test_create_route(struct net *net,
  28. struct mctp_dev *dev,
  29. mctp_eid_t eid,
  30. unsigned int mtu)
  31. {
  32. struct mctp_test_route *rt;
  33. rt = mctp_route_test_alloc();
  34. if (!rt)
  35. return NULL;
  36. rt->rt.min = eid;
  37. rt->rt.max = eid;
  38. rt->rt.mtu = mtu;
  39. rt->rt.type = RTN_UNSPEC;
  40. if (dev)
  41. mctp_dev_hold(dev);
  42. rt->rt.dev = dev;
  43. list_add_rcu(&rt->rt.list, &net->mctp.routes);
  44. return rt;
  45. }
  46. static void mctp_test_route_destroy(struct kunit *test,
  47. struct mctp_test_route *rt)
  48. {
  49. unsigned int refs;
  50. rtnl_lock();
  51. list_del_rcu(&rt->rt.list);
  52. rtnl_unlock();
  53. skb_queue_purge(&rt->pkts);
  54. if (rt->rt.dev)
  55. mctp_dev_put(rt->rt.dev);
  56. refs = refcount_read(&rt->rt.refs);
  57. KUNIT_ASSERT_EQ_MSG(test, refs, 1, "route ref imbalance");
  58. kfree_rcu(&rt->rt, rcu);
  59. }
  60. static void mctp_test_skb_set_dev(struct sk_buff *skb,
  61. struct mctp_test_dev *dev)
  62. {
  63. struct mctp_skb_cb *cb;
  64. cb = mctp_cb(skb);
  65. cb->net = READ_ONCE(dev->mdev->net);
  66. skb->dev = dev->ndev;
  67. }
  68. static struct sk_buff *mctp_test_create_skb(const struct mctp_hdr *hdr,
  69. unsigned int data_len)
  70. {
  71. size_t hdr_len = sizeof(*hdr);
  72. struct sk_buff *skb;
  73. unsigned int i;
  74. u8 *buf;
  75. skb = alloc_skb(hdr_len + data_len, GFP_KERNEL);
  76. if (!skb)
  77. return NULL;
  78. __mctp_cb(skb);
  79. memcpy(skb_put(skb, hdr_len), hdr, hdr_len);
  80. buf = skb_put(skb, data_len);
  81. for (i = 0; i < data_len; i++)
  82. buf[i] = i & 0xff;
  83. return skb;
  84. }
  85. static struct sk_buff *__mctp_test_create_skb_data(const struct mctp_hdr *hdr,
  86. const void *data,
  87. size_t data_len)
  88. {
  89. size_t hdr_len = sizeof(*hdr);
  90. struct sk_buff *skb;
  91. skb = alloc_skb(hdr_len + data_len, GFP_KERNEL);
  92. if (!skb)
  93. return NULL;
  94. __mctp_cb(skb);
  95. memcpy(skb_put(skb, hdr_len), hdr, hdr_len);
  96. memcpy(skb_put(skb, data_len), data, data_len);
  97. return skb;
  98. }
  99. #define mctp_test_create_skb_data(h, d) \
  100. __mctp_test_create_skb_data(h, d, sizeof(*d))
  101. struct mctp_frag_test {
  102. unsigned int mtu;
  103. unsigned int msgsize;
  104. unsigned int n_frags;
  105. };
  106. static void mctp_test_fragment(struct kunit *test)
  107. {
  108. const struct mctp_frag_test *params;
  109. int rc, i, n, mtu, msgsize;
  110. struct mctp_test_route *rt;
  111. struct sk_buff *skb;
  112. struct mctp_hdr hdr;
  113. u8 seq;
  114. params = test->param_value;
  115. mtu = params->mtu;
  116. msgsize = params->msgsize;
  117. hdr.ver = 1;
  118. hdr.src = 8;
  119. hdr.dest = 10;
  120. hdr.flags_seq_tag = MCTP_HDR_FLAG_TO;
  121. skb = mctp_test_create_skb(&hdr, msgsize);
  122. KUNIT_ASSERT_TRUE(test, skb);
  123. rt = mctp_test_create_route(&init_net, NULL, 10, mtu);
  124. KUNIT_ASSERT_TRUE(test, rt);
  125. rc = mctp_do_fragment_route(&rt->rt, skb, mtu, MCTP_TAG_OWNER);
  126. KUNIT_EXPECT_FALSE(test, rc);
  127. n = rt->pkts.qlen;
  128. KUNIT_EXPECT_EQ(test, n, params->n_frags);
  129. for (i = 0;; i++) {
  130. struct mctp_hdr *hdr2;
  131. struct sk_buff *skb2;
  132. u8 tag_mask, seq2;
  133. bool first, last;
  134. first = i == 0;
  135. last = i == (n - 1);
  136. skb2 = skb_dequeue(&rt->pkts);
  137. if (!skb2)
  138. break;
  139. hdr2 = mctp_hdr(skb2);
  140. tag_mask = MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO;
  141. KUNIT_EXPECT_EQ(test, hdr2->ver, hdr.ver);
  142. KUNIT_EXPECT_EQ(test, hdr2->src, hdr.src);
  143. KUNIT_EXPECT_EQ(test, hdr2->dest, hdr.dest);
  144. KUNIT_EXPECT_EQ(test, hdr2->flags_seq_tag & tag_mask,
  145. hdr.flags_seq_tag & tag_mask);
  146. KUNIT_EXPECT_EQ(test,
  147. !!(hdr2->flags_seq_tag & MCTP_HDR_FLAG_SOM), first);
  148. KUNIT_EXPECT_EQ(test,
  149. !!(hdr2->flags_seq_tag & MCTP_HDR_FLAG_EOM), last);
  150. seq2 = (hdr2->flags_seq_tag >> MCTP_HDR_SEQ_SHIFT) &
  151. MCTP_HDR_SEQ_MASK;
  152. if (first) {
  153. seq = seq2;
  154. } else {
  155. seq++;
  156. KUNIT_EXPECT_EQ(test, seq2, seq & MCTP_HDR_SEQ_MASK);
  157. }
  158. if (!last)
  159. KUNIT_EXPECT_EQ(test, skb2->len, mtu);
  160. else
  161. KUNIT_EXPECT_LE(test, skb2->len, mtu);
  162. kfree_skb(skb2);
  163. }
  164. mctp_test_route_destroy(test, rt);
  165. }
  166. static const struct mctp_frag_test mctp_frag_tests[] = {
  167. {.mtu = 68, .msgsize = 63, .n_frags = 1},
  168. {.mtu = 68, .msgsize = 64, .n_frags = 1},
  169. {.mtu = 68, .msgsize = 65, .n_frags = 2},
  170. {.mtu = 68, .msgsize = 66, .n_frags = 2},
  171. {.mtu = 68, .msgsize = 127, .n_frags = 2},
  172. {.mtu = 68, .msgsize = 128, .n_frags = 2},
  173. {.mtu = 68, .msgsize = 129, .n_frags = 3},
  174. {.mtu = 68, .msgsize = 130, .n_frags = 3},
  175. };
  176. static void mctp_frag_test_to_desc(const struct mctp_frag_test *t, char *desc)
  177. {
  178. sprintf(desc, "mtu %d len %d -> %d frags",
  179. t->msgsize, t->mtu, t->n_frags);
  180. }
  181. KUNIT_ARRAY_PARAM(mctp_frag, mctp_frag_tests, mctp_frag_test_to_desc);
  182. struct mctp_rx_input_test {
  183. struct mctp_hdr hdr;
  184. bool input;
  185. };
  186. static void mctp_test_rx_input(struct kunit *test)
  187. {
  188. const struct mctp_rx_input_test *params;
  189. struct mctp_test_route *rt;
  190. struct mctp_test_dev *dev;
  191. struct sk_buff *skb;
  192. params = test->param_value;
  193. dev = mctp_test_create_dev();
  194. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
  195. rt = mctp_test_create_route(&init_net, dev->mdev, 8, 68);
  196. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt);
  197. skb = mctp_test_create_skb(&params->hdr, 1);
  198. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
  199. mctp_pkttype_receive(skb, dev->ndev, &mctp_packet_type, NULL);
  200. KUNIT_EXPECT_EQ(test, !!rt->pkts.qlen, params->input);
  201. mctp_test_route_destroy(test, rt);
  202. mctp_test_destroy_dev(dev);
  203. }
  204. #define RX_HDR(_ver, _src, _dest, _fst) \
  205. { .ver = _ver, .src = _src, .dest = _dest, .flags_seq_tag = _fst }
  206. /* we have a route for EID 8 only */
  207. static const struct mctp_rx_input_test mctp_rx_input_tests[] = {
  208. { .hdr = RX_HDR(1, 10, 8, 0), .input = true },
  209. { .hdr = RX_HDR(1, 10, 9, 0), .input = false }, /* no input route */
  210. { .hdr = RX_HDR(2, 10, 8, 0), .input = false }, /* invalid version */
  211. };
  212. static void mctp_rx_input_test_to_desc(const struct mctp_rx_input_test *t,
  213. char *desc)
  214. {
  215. sprintf(desc, "{%x,%x,%x,%x}", t->hdr.ver, t->hdr.src, t->hdr.dest,
  216. t->hdr.flags_seq_tag);
  217. }
  218. KUNIT_ARRAY_PARAM(mctp_rx_input, mctp_rx_input_tests,
  219. mctp_rx_input_test_to_desc);
  220. /* set up a local dev, route on EID 8, and a socket listening on type 0 */
  221. static void __mctp_route_test_init(struct kunit *test,
  222. struct mctp_test_dev **devp,
  223. struct mctp_test_route **rtp,
  224. struct socket **sockp,
  225. unsigned int netid)
  226. {
  227. struct sockaddr_mctp addr = {0};
  228. struct mctp_test_route *rt;
  229. struct mctp_test_dev *dev;
  230. struct socket *sock;
  231. int rc;
  232. dev = mctp_test_create_dev();
  233. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
  234. if (netid != MCTP_NET_ANY)
  235. WRITE_ONCE(dev->mdev->net, netid);
  236. rt = mctp_test_create_route(&init_net, dev->mdev, 8, 68);
  237. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt);
  238. rc = sock_create_kern(&init_net, AF_MCTP, SOCK_DGRAM, 0, &sock);
  239. KUNIT_ASSERT_EQ(test, rc, 0);
  240. addr.smctp_family = AF_MCTP;
  241. addr.smctp_network = netid;
  242. addr.smctp_addr.s_addr = 8;
  243. addr.smctp_type = 0;
  244. rc = kernel_bind(sock, (struct sockaddr *)&addr, sizeof(addr));
  245. KUNIT_ASSERT_EQ(test, rc, 0);
  246. *rtp = rt;
  247. *devp = dev;
  248. *sockp = sock;
  249. }
  250. static void __mctp_route_test_fini(struct kunit *test,
  251. struct mctp_test_dev *dev,
  252. struct mctp_test_route *rt,
  253. struct socket *sock)
  254. {
  255. sock_release(sock);
  256. mctp_test_route_destroy(test, rt);
  257. mctp_test_destroy_dev(dev);
  258. }
  259. struct mctp_route_input_sk_test {
  260. struct mctp_hdr hdr;
  261. u8 type;
  262. bool deliver;
  263. };
  264. static void mctp_test_route_input_sk(struct kunit *test)
  265. {
  266. const struct mctp_route_input_sk_test *params;
  267. struct sk_buff *skb, *skb2;
  268. struct mctp_test_route *rt;
  269. struct mctp_test_dev *dev;
  270. struct socket *sock;
  271. int rc;
  272. params = test->param_value;
  273. __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
  274. skb = mctp_test_create_skb_data(&params->hdr, &params->type);
  275. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
  276. mctp_test_skb_set_dev(skb, dev);
  277. rc = mctp_route_input(&rt->rt, skb);
  278. if (params->deliver) {
  279. KUNIT_EXPECT_EQ(test, rc, 0);
  280. skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
  281. KUNIT_EXPECT_NOT_ERR_OR_NULL(test, skb2);
  282. KUNIT_EXPECT_EQ(test, skb2->len, 1);
  283. skb_free_datagram(sock->sk, skb2);
  284. } else {
  285. KUNIT_EXPECT_NE(test, rc, 0);
  286. skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
  287. KUNIT_EXPECT_NULL(test, skb2);
  288. }
  289. __mctp_route_test_fini(test, dev, rt, sock);
  290. }
  291. #define FL_S (MCTP_HDR_FLAG_SOM)
  292. #define FL_E (MCTP_HDR_FLAG_EOM)
  293. #define FL_TO (MCTP_HDR_FLAG_TO)
  294. #define FL_T(t) ((t) & MCTP_HDR_TAG_MASK)
  295. static const struct mctp_route_input_sk_test mctp_route_input_sk_tests[] = {
  296. { .hdr = RX_HDR(1, 10, 8, FL_S | FL_E | FL_TO), .type = 0, .deliver = true },
  297. { .hdr = RX_HDR(1, 10, 8, FL_S | FL_E | FL_TO), .type = 1, .deliver = false },
  298. { .hdr = RX_HDR(1, 10, 8, FL_S | FL_E), .type = 0, .deliver = false },
  299. { .hdr = RX_HDR(1, 10, 8, FL_E | FL_TO), .type = 0, .deliver = false },
  300. { .hdr = RX_HDR(1, 10, 8, FL_TO), .type = 0, .deliver = false },
  301. { .hdr = RX_HDR(1, 10, 8, 0), .type = 0, .deliver = false },
  302. };
  303. static void mctp_route_input_sk_to_desc(const struct mctp_route_input_sk_test *t,
  304. char *desc)
  305. {
  306. sprintf(desc, "{%x,%x,%x,%x} type %d", t->hdr.ver, t->hdr.src,
  307. t->hdr.dest, t->hdr.flags_seq_tag, t->type);
  308. }
  309. KUNIT_ARRAY_PARAM(mctp_route_input_sk, mctp_route_input_sk_tests,
  310. mctp_route_input_sk_to_desc);
  311. struct mctp_route_input_sk_reasm_test {
  312. const char *name;
  313. struct mctp_hdr hdrs[4];
  314. int n_hdrs;
  315. int rx_len;
  316. };
  317. static void mctp_test_route_input_sk_reasm(struct kunit *test)
  318. {
  319. const struct mctp_route_input_sk_reasm_test *params;
  320. struct sk_buff *skb, *skb2;
  321. struct mctp_test_route *rt;
  322. struct mctp_test_dev *dev;
  323. struct socket *sock;
  324. int i, rc;
  325. u8 c;
  326. params = test->param_value;
  327. __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
  328. for (i = 0; i < params->n_hdrs; i++) {
  329. c = i;
  330. skb = mctp_test_create_skb_data(&params->hdrs[i], &c);
  331. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
  332. mctp_test_skb_set_dev(skb, dev);
  333. rc = mctp_route_input(&rt->rt, skb);
  334. }
  335. skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
  336. if (params->rx_len) {
  337. KUNIT_EXPECT_NOT_ERR_OR_NULL(test, skb2);
  338. KUNIT_EXPECT_EQ(test, skb2->len, params->rx_len);
  339. skb_free_datagram(sock->sk, skb2);
  340. } else {
  341. KUNIT_EXPECT_NULL(test, skb2);
  342. }
  343. __mctp_route_test_fini(test, dev, rt, sock);
  344. }
  345. #define RX_FRAG(f, s) RX_HDR(1, 10, 8, FL_TO | (f) | ((s) << MCTP_HDR_SEQ_SHIFT))
  346. static const struct mctp_route_input_sk_reasm_test mctp_route_input_sk_reasm_tests[] = {
  347. {
  348. .name = "single packet",
  349. .hdrs = {
  350. RX_FRAG(FL_S | FL_E, 0),
  351. },
  352. .n_hdrs = 1,
  353. .rx_len = 1,
  354. },
  355. {
  356. .name = "single packet, offset seq",
  357. .hdrs = {
  358. RX_FRAG(FL_S | FL_E, 1),
  359. },
  360. .n_hdrs = 1,
  361. .rx_len = 1,
  362. },
  363. {
  364. .name = "start & end packets",
  365. .hdrs = {
  366. RX_FRAG(FL_S, 0),
  367. RX_FRAG(FL_E, 1),
  368. },
  369. .n_hdrs = 2,
  370. .rx_len = 2,
  371. },
  372. {
  373. .name = "start & end packets, offset seq",
  374. .hdrs = {
  375. RX_FRAG(FL_S, 1),
  376. RX_FRAG(FL_E, 2),
  377. },
  378. .n_hdrs = 2,
  379. .rx_len = 2,
  380. },
  381. {
  382. .name = "start & end packets, out of order",
  383. .hdrs = {
  384. RX_FRAG(FL_E, 1),
  385. RX_FRAG(FL_S, 0),
  386. },
  387. .n_hdrs = 2,
  388. .rx_len = 0,
  389. },
  390. {
  391. .name = "start, middle & end packets",
  392. .hdrs = {
  393. RX_FRAG(FL_S, 0),
  394. RX_FRAG(0, 1),
  395. RX_FRAG(FL_E, 2),
  396. },
  397. .n_hdrs = 3,
  398. .rx_len = 3,
  399. },
  400. {
  401. .name = "missing seq",
  402. .hdrs = {
  403. RX_FRAG(FL_S, 0),
  404. RX_FRAG(FL_E, 2),
  405. },
  406. .n_hdrs = 2,
  407. .rx_len = 0,
  408. },
  409. {
  410. .name = "seq wrap",
  411. .hdrs = {
  412. RX_FRAG(FL_S, 3),
  413. RX_FRAG(FL_E, 0),
  414. },
  415. .n_hdrs = 2,
  416. .rx_len = 2,
  417. },
  418. };
  419. static void mctp_route_input_sk_reasm_to_desc(
  420. const struct mctp_route_input_sk_reasm_test *t,
  421. char *desc)
  422. {
  423. sprintf(desc, "%s", t->name);
  424. }
  425. KUNIT_ARRAY_PARAM(mctp_route_input_sk_reasm, mctp_route_input_sk_reasm_tests,
  426. mctp_route_input_sk_reasm_to_desc);
  427. struct mctp_route_input_sk_keys_test {
  428. const char *name;
  429. mctp_eid_t key_peer_addr;
  430. mctp_eid_t key_local_addr;
  431. u8 key_tag;
  432. struct mctp_hdr hdr;
  433. bool deliver;
  434. };
  435. /* test packet rx in the presence of various key configurations */
  436. static void mctp_test_route_input_sk_keys(struct kunit *test)
  437. {
  438. const struct mctp_route_input_sk_keys_test *params;
  439. struct mctp_test_route *rt;
  440. struct sk_buff *skb, *skb2;
  441. struct mctp_test_dev *dev;
  442. struct mctp_sk_key *key;
  443. struct netns_mctp *mns;
  444. struct mctp_sock *msk;
  445. struct socket *sock;
  446. unsigned long flags;
  447. unsigned int net;
  448. int rc;
  449. u8 c;
  450. params = test->param_value;
  451. dev = mctp_test_create_dev();
  452. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
  453. net = READ_ONCE(dev->mdev->net);
  454. rt = mctp_test_create_route(&init_net, dev->mdev, 8, 68);
  455. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt);
  456. rc = sock_create_kern(&init_net, AF_MCTP, SOCK_DGRAM, 0, &sock);
  457. KUNIT_ASSERT_EQ(test, rc, 0);
  458. msk = container_of(sock->sk, struct mctp_sock, sk);
  459. mns = &sock_net(sock->sk)->mctp;
  460. /* set the incoming tag according to test params */
  461. key = mctp_key_alloc(msk, net, params->key_local_addr,
  462. params->key_peer_addr, params->key_tag,
  463. GFP_KERNEL);
  464. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, key);
  465. spin_lock_irqsave(&mns->keys_lock, flags);
  466. mctp_reserve_tag(&init_net, key, msk);
  467. spin_unlock_irqrestore(&mns->keys_lock, flags);
  468. /* create packet and route */
  469. c = 0;
  470. skb = mctp_test_create_skb_data(&params->hdr, &c);
  471. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
  472. mctp_test_skb_set_dev(skb, dev);
  473. rc = mctp_route_input(&rt->rt, skb);
  474. /* (potentially) receive message */
  475. skb2 = skb_recv_datagram(sock->sk, MSG_DONTWAIT, &rc);
  476. if (params->deliver)
  477. KUNIT_EXPECT_NOT_ERR_OR_NULL(test, skb2);
  478. else
  479. KUNIT_EXPECT_PTR_EQ(test, skb2, NULL);
  480. if (skb2)
  481. skb_free_datagram(sock->sk, skb2);
  482. mctp_key_unref(key);
  483. __mctp_route_test_fini(test, dev, rt, sock);
  484. }
  485. static const struct mctp_route_input_sk_keys_test mctp_route_input_sk_keys_tests[] = {
  486. {
  487. .name = "direct match",
  488. .key_peer_addr = 9,
  489. .key_local_addr = 8,
  490. .key_tag = 1,
  491. .hdr = RX_HDR(1, 9, 8, FL_S | FL_E | FL_T(1)),
  492. .deliver = true,
  493. },
  494. {
  495. .name = "flipped src/dest",
  496. .key_peer_addr = 8,
  497. .key_local_addr = 9,
  498. .key_tag = 1,
  499. .hdr = RX_HDR(1, 9, 8, FL_S | FL_E | FL_T(1)),
  500. .deliver = false,
  501. },
  502. {
  503. .name = "peer addr mismatch",
  504. .key_peer_addr = 9,
  505. .key_local_addr = 8,
  506. .key_tag = 1,
  507. .hdr = RX_HDR(1, 10, 8, FL_S | FL_E | FL_T(1)),
  508. .deliver = false,
  509. },
  510. {
  511. .name = "tag value mismatch",
  512. .key_peer_addr = 9,
  513. .key_local_addr = 8,
  514. .key_tag = 1,
  515. .hdr = RX_HDR(1, 9, 8, FL_S | FL_E | FL_T(2)),
  516. .deliver = false,
  517. },
  518. {
  519. .name = "TO mismatch",
  520. .key_peer_addr = 9,
  521. .key_local_addr = 8,
  522. .key_tag = 1,
  523. .hdr = RX_HDR(1, 9, 8, FL_S | FL_E | FL_T(1) | FL_TO),
  524. .deliver = false,
  525. },
  526. {
  527. .name = "broadcast response",
  528. .key_peer_addr = MCTP_ADDR_ANY,
  529. .key_local_addr = 8,
  530. .key_tag = 1,
  531. .hdr = RX_HDR(1, 11, 8, FL_S | FL_E | FL_T(1)),
  532. .deliver = true,
  533. },
  534. {
  535. .name = "any local match",
  536. .key_peer_addr = 12,
  537. .key_local_addr = MCTP_ADDR_ANY,
  538. .key_tag = 1,
  539. .hdr = RX_HDR(1, 12, 8, FL_S | FL_E | FL_T(1)),
  540. .deliver = true,
  541. },
  542. };
  543. static void mctp_route_input_sk_keys_to_desc(
  544. const struct mctp_route_input_sk_keys_test *t,
  545. char *desc)
  546. {
  547. sprintf(desc, "%s", t->name);
  548. }
  549. KUNIT_ARRAY_PARAM(mctp_route_input_sk_keys, mctp_route_input_sk_keys_tests,
  550. mctp_route_input_sk_keys_to_desc);
  551. struct test_net {
  552. unsigned int netid;
  553. struct mctp_test_dev *dev;
  554. struct mctp_test_route *rt;
  555. struct socket *sock;
  556. struct sk_buff *skb;
  557. struct mctp_sk_key *key;
  558. struct {
  559. u8 type;
  560. unsigned int data;
  561. } msg;
  562. };
  563. static void
  564. mctp_test_route_input_multiple_nets_bind_init(struct kunit *test,
  565. struct test_net *t)
  566. {
  567. struct mctp_hdr hdr = RX_HDR(1, 9, 8, FL_S | FL_E | FL_T(1) | FL_TO);
  568. t->msg.data = t->netid;
  569. __mctp_route_test_init(test, &t->dev, &t->rt, &t->sock, t->netid);
  570. t->skb = mctp_test_create_skb_data(&hdr, &t->msg);
  571. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->skb);
  572. mctp_test_skb_set_dev(t->skb, t->dev);
  573. }
  574. static void
  575. mctp_test_route_input_multiple_nets_bind_fini(struct kunit *test,
  576. struct test_net *t)
  577. {
  578. __mctp_route_test_fini(test, t->dev, t->rt, t->sock);
  579. }
  580. /* Test that skbs from different nets (otherwise identical) get routed to their
  581. * corresponding socket via the sockets' bind()
  582. */
  583. static void mctp_test_route_input_multiple_nets_bind(struct kunit *test)
  584. {
  585. struct sk_buff *rx_skb1, *rx_skb2;
  586. struct test_net t1, t2;
  587. int rc;
  588. t1.netid = 1;
  589. t2.netid = 2;
  590. t1.msg.type = 0;
  591. t2.msg.type = 0;
  592. mctp_test_route_input_multiple_nets_bind_init(test, &t1);
  593. mctp_test_route_input_multiple_nets_bind_init(test, &t2);
  594. rc = mctp_route_input(&t1.rt->rt, t1.skb);
  595. KUNIT_ASSERT_EQ(test, rc, 0);
  596. rc = mctp_route_input(&t2.rt->rt, t2.skb);
  597. KUNIT_ASSERT_EQ(test, rc, 0);
  598. rx_skb1 = skb_recv_datagram(t1.sock->sk, MSG_DONTWAIT, &rc);
  599. KUNIT_EXPECT_NOT_ERR_OR_NULL(test, rx_skb1);
  600. KUNIT_EXPECT_EQ(test, rx_skb1->len, sizeof(t1.msg));
  601. KUNIT_EXPECT_EQ(test,
  602. *(unsigned int *)skb_pull(rx_skb1, sizeof(t1.msg.data)),
  603. t1.netid);
  604. kfree_skb(rx_skb1);
  605. rx_skb2 = skb_recv_datagram(t2.sock->sk, MSG_DONTWAIT, &rc);
  606. KUNIT_EXPECT_NOT_ERR_OR_NULL(test, rx_skb2);
  607. KUNIT_EXPECT_EQ(test, rx_skb2->len, sizeof(t2.msg));
  608. KUNIT_EXPECT_EQ(test,
  609. *(unsigned int *)skb_pull(rx_skb2, sizeof(t2.msg.data)),
  610. t2.netid);
  611. kfree_skb(rx_skb2);
  612. mctp_test_route_input_multiple_nets_bind_fini(test, &t1);
  613. mctp_test_route_input_multiple_nets_bind_fini(test, &t2);
  614. }
  615. static void
  616. mctp_test_route_input_multiple_nets_key_init(struct kunit *test,
  617. struct test_net *t)
  618. {
  619. struct mctp_hdr hdr = RX_HDR(1, 9, 8, FL_S | FL_E | FL_T(1));
  620. struct mctp_sock *msk;
  621. struct netns_mctp *mns;
  622. unsigned long flags;
  623. t->msg.data = t->netid;
  624. __mctp_route_test_init(test, &t->dev, &t->rt, &t->sock, t->netid);
  625. msk = container_of(t->sock->sk, struct mctp_sock, sk);
  626. t->key = mctp_key_alloc(msk, t->netid, hdr.dest, hdr.src, 1, GFP_KERNEL);
  627. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->key);
  628. mns = &sock_net(t->sock->sk)->mctp;
  629. spin_lock_irqsave(&mns->keys_lock, flags);
  630. mctp_reserve_tag(&init_net, t->key, msk);
  631. spin_unlock_irqrestore(&mns->keys_lock, flags);
  632. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->key);
  633. t->skb = mctp_test_create_skb_data(&hdr, &t->msg);
  634. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, t->skb);
  635. mctp_test_skb_set_dev(t->skb, t->dev);
  636. }
  637. static void
  638. mctp_test_route_input_multiple_nets_key_fini(struct kunit *test,
  639. struct test_net *t)
  640. {
  641. mctp_key_unref(t->key);
  642. __mctp_route_test_fini(test, t->dev, t->rt, t->sock);
  643. }
  644. /* test that skbs from different nets (otherwise identical) get routed to their
  645. * corresponding socket via the sk_key
  646. */
  647. static void mctp_test_route_input_multiple_nets_key(struct kunit *test)
  648. {
  649. struct sk_buff *rx_skb1, *rx_skb2;
  650. struct test_net t1, t2;
  651. int rc;
  652. t1.netid = 1;
  653. t2.netid = 2;
  654. /* use type 1 which is not bound */
  655. t1.msg.type = 1;
  656. t2.msg.type = 1;
  657. mctp_test_route_input_multiple_nets_key_init(test, &t1);
  658. mctp_test_route_input_multiple_nets_key_init(test, &t2);
  659. rc = mctp_route_input(&t1.rt->rt, t1.skb);
  660. KUNIT_ASSERT_EQ(test, rc, 0);
  661. rc = mctp_route_input(&t2.rt->rt, t2.skb);
  662. KUNIT_ASSERT_EQ(test, rc, 0);
  663. rx_skb1 = skb_recv_datagram(t1.sock->sk, MSG_DONTWAIT, &rc);
  664. KUNIT_EXPECT_NOT_ERR_OR_NULL(test, rx_skb1);
  665. KUNIT_EXPECT_EQ(test, rx_skb1->len, sizeof(t1.msg));
  666. KUNIT_EXPECT_EQ(test,
  667. *(unsigned int *)skb_pull(rx_skb1, sizeof(t1.msg.data)),
  668. t1.netid);
  669. kfree_skb(rx_skb1);
  670. rx_skb2 = skb_recv_datagram(t2.sock->sk, MSG_DONTWAIT, &rc);
  671. KUNIT_EXPECT_NOT_ERR_OR_NULL(test, rx_skb2);
  672. KUNIT_EXPECT_EQ(test, rx_skb2->len, sizeof(t2.msg));
  673. KUNIT_EXPECT_EQ(test,
  674. *(unsigned int *)skb_pull(rx_skb2, sizeof(t2.msg.data)),
  675. t2.netid);
  676. kfree_skb(rx_skb2);
  677. mctp_test_route_input_multiple_nets_key_fini(test, &t1);
  678. mctp_test_route_input_multiple_nets_key_fini(test, &t2);
  679. }
  680. /* Input route to socket, using a single-packet message, where sock delivery
  681. * fails. Ensure we're handling the failure appropriately.
  682. */
  683. static void mctp_test_route_input_sk_fail_single(struct kunit *test)
  684. {
  685. const struct mctp_hdr hdr = RX_HDR(1, 10, 8, FL_S | FL_E | FL_TO);
  686. struct mctp_test_route *rt;
  687. struct mctp_test_dev *dev;
  688. struct socket *sock;
  689. struct sk_buff *skb;
  690. int rc;
  691. __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
  692. /* No rcvbuf space, so delivery should fail. __sock_set_rcvbuf will
  693. * clamp the minimum to SOCK_MIN_RCVBUF, so we open-code this.
  694. */
  695. lock_sock(sock->sk);
  696. WRITE_ONCE(sock->sk->sk_rcvbuf, 0);
  697. release_sock(sock->sk);
  698. skb = mctp_test_create_skb(&hdr, 10);
  699. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb);
  700. skb_get(skb);
  701. mctp_test_skb_set_dev(skb, dev);
  702. /* do route input, which should fail */
  703. rc = mctp_route_input(&rt->rt, skb);
  704. KUNIT_EXPECT_NE(test, rc, 0);
  705. /* we should hold the only reference to skb */
  706. KUNIT_EXPECT_EQ(test, refcount_read(&skb->users), 1);
  707. kfree_skb(skb);
  708. __mctp_route_test_fini(test, dev, rt, sock);
  709. }
  710. /* Input route to socket, using a fragmented message, where sock delivery fails.
  711. */
  712. static void mctp_test_route_input_sk_fail_frag(struct kunit *test)
  713. {
  714. const struct mctp_hdr hdrs[2] = { RX_FRAG(FL_S, 0), RX_FRAG(FL_E, 1) };
  715. struct mctp_test_route *rt;
  716. struct mctp_test_dev *dev;
  717. struct sk_buff *skbs[2];
  718. struct socket *sock;
  719. unsigned int i;
  720. int rc;
  721. __mctp_route_test_init(test, &dev, &rt, &sock, MCTP_NET_ANY);
  722. lock_sock(sock->sk);
  723. WRITE_ONCE(sock->sk->sk_rcvbuf, 0);
  724. release_sock(sock->sk);
  725. for (i = 0; i < ARRAY_SIZE(skbs); i++) {
  726. skbs[i] = mctp_test_create_skb(&hdrs[i], 10);
  727. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skbs[i]);
  728. skb_get(skbs[i]);
  729. mctp_test_skb_set_dev(skbs[i], dev);
  730. }
  731. /* first route input should succeed, we're only queueing to the
  732. * frag list
  733. */
  734. rc = mctp_route_input(&rt->rt, skbs[0]);
  735. KUNIT_EXPECT_EQ(test, rc, 0);
  736. /* final route input should fail to deliver to the socket */
  737. rc = mctp_route_input(&rt->rt, skbs[1]);
  738. KUNIT_EXPECT_NE(test, rc, 0);
  739. /* we should hold the only reference to both skbs */
  740. KUNIT_EXPECT_EQ(test, refcount_read(&skbs[0]->users), 1);
  741. kfree_skb(skbs[0]);
  742. KUNIT_EXPECT_EQ(test, refcount_read(&skbs[1]->users), 1);
  743. kfree_skb(skbs[1]);
  744. __mctp_route_test_fini(test, dev, rt, sock);
  745. }
  746. #if IS_ENABLED(CONFIG_MCTP_FLOWS)
  747. static void mctp_test_flow_init(struct kunit *test,
  748. struct mctp_test_dev **devp,
  749. struct mctp_test_route **rtp,
  750. struct socket **sock,
  751. struct sk_buff **skbp,
  752. unsigned int len)
  753. {
  754. struct mctp_test_route *rt;
  755. struct mctp_test_dev *dev;
  756. struct sk_buff *skb;
  757. /* we have a slightly odd routing setup here; the test route
  758. * is for EID 8, which is our local EID. We don't do a routing
  759. * lookup, so that's fine - all we require is a path through
  760. * mctp_local_output, which will call rt->output on whatever
  761. * route we provide
  762. */
  763. __mctp_route_test_init(test, &dev, &rt, sock, MCTP_NET_ANY);
  764. /* Assign a single EID. ->addrs is freed on mctp netdev release */
  765. dev->mdev->addrs = kmalloc(sizeof(u8), GFP_KERNEL);
  766. dev->mdev->num_addrs = 1;
  767. dev->mdev->addrs[0] = 8;
  768. skb = alloc_skb(len + sizeof(struct mctp_hdr) + 1, GFP_KERNEL);
  769. KUNIT_ASSERT_TRUE(test, skb);
  770. __mctp_cb(skb);
  771. skb_reserve(skb, sizeof(struct mctp_hdr) + 1);
  772. memset(skb_put(skb, len), 0, len);
  773. /* take a ref for the route, we'll decrement in local output */
  774. refcount_inc(&rt->rt.refs);
  775. *devp = dev;
  776. *rtp = rt;
  777. *skbp = skb;
  778. }
  779. static void mctp_test_flow_fini(struct kunit *test,
  780. struct mctp_test_dev *dev,
  781. struct mctp_test_route *rt,
  782. struct socket *sock)
  783. {
  784. __mctp_route_test_fini(test, dev, rt, sock);
  785. }
  786. /* test that an outgoing skb has the correct MCTP extension data set */
  787. static void mctp_test_packet_flow(struct kunit *test)
  788. {
  789. struct sk_buff *skb, *skb2;
  790. struct mctp_test_route *rt;
  791. struct mctp_test_dev *dev;
  792. struct mctp_flow *flow;
  793. struct socket *sock;
  794. u8 dst = 8;
  795. int n, rc;
  796. mctp_test_flow_init(test, &dev, &rt, &sock, &skb, 30);
  797. rc = mctp_local_output(sock->sk, &rt->rt, skb, dst, MCTP_TAG_OWNER);
  798. KUNIT_ASSERT_EQ(test, rc, 0);
  799. n = rt->pkts.qlen;
  800. KUNIT_ASSERT_EQ(test, n, 1);
  801. skb2 = skb_dequeue(&rt->pkts);
  802. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, skb2);
  803. flow = skb_ext_find(skb2, SKB_EXT_MCTP);
  804. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flow);
  805. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flow->key);
  806. KUNIT_ASSERT_PTR_EQ(test, flow->key->sk, sock->sk);
  807. kfree_skb(skb2);
  808. mctp_test_flow_fini(test, dev, rt, sock);
  809. }
  810. /* test that outgoing skbs, after fragmentation, all have the correct MCTP
  811. * extension data set.
  812. */
  813. static void mctp_test_fragment_flow(struct kunit *test)
  814. {
  815. struct mctp_flow *flows[2];
  816. struct sk_buff *tx_skbs[2];
  817. struct mctp_test_route *rt;
  818. struct mctp_test_dev *dev;
  819. struct sk_buff *skb;
  820. struct socket *sock;
  821. u8 dst = 8;
  822. int n, rc;
  823. mctp_test_flow_init(test, &dev, &rt, &sock, &skb, 100);
  824. rc = mctp_local_output(sock->sk, &rt->rt, skb, dst, MCTP_TAG_OWNER);
  825. KUNIT_ASSERT_EQ(test, rc, 0);
  826. n = rt->pkts.qlen;
  827. KUNIT_ASSERT_EQ(test, n, 2);
  828. /* both resulting packets should have the same flow data */
  829. tx_skbs[0] = skb_dequeue(&rt->pkts);
  830. tx_skbs[1] = skb_dequeue(&rt->pkts);
  831. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, tx_skbs[0]);
  832. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, tx_skbs[1]);
  833. flows[0] = skb_ext_find(tx_skbs[0], SKB_EXT_MCTP);
  834. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[0]);
  835. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[0]->key);
  836. KUNIT_ASSERT_PTR_EQ(test, flows[0]->key->sk, sock->sk);
  837. flows[1] = skb_ext_find(tx_skbs[1], SKB_EXT_MCTP);
  838. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[1]);
  839. KUNIT_ASSERT_PTR_EQ(test, flows[1]->key, flows[0]->key);
  840. kfree_skb(tx_skbs[0]);
  841. kfree_skb(tx_skbs[1]);
  842. mctp_test_flow_fini(test, dev, rt, sock);
  843. }
  844. #else
  845. static void mctp_test_packet_flow(struct kunit *test)
  846. {
  847. kunit_skip(test, "Requires CONFIG_MCTP_FLOWS=y");
  848. }
  849. static void mctp_test_fragment_flow(struct kunit *test)
  850. {
  851. kunit_skip(test, "Requires CONFIG_MCTP_FLOWS=y");
  852. }
  853. #endif
  854. /* Test that outgoing skbs cause a suitable tag to be created */
  855. static void mctp_test_route_output_key_create(struct kunit *test)
  856. {
  857. const unsigned int netid = 50;
  858. const u8 dst = 26, src = 15;
  859. struct mctp_test_route *rt;
  860. struct mctp_test_dev *dev;
  861. struct mctp_sk_key *key;
  862. struct netns_mctp *mns;
  863. unsigned long flags;
  864. struct socket *sock;
  865. struct sk_buff *skb;
  866. bool empty, single;
  867. const int len = 2;
  868. int rc;
  869. dev = mctp_test_create_dev();
  870. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
  871. WRITE_ONCE(dev->mdev->net, netid);
  872. rt = mctp_test_create_route(&init_net, dev->mdev, dst, 68);
  873. KUNIT_ASSERT_NOT_ERR_OR_NULL(test, rt);
  874. rc = sock_create_kern(&init_net, AF_MCTP, SOCK_DGRAM, 0, &sock);
  875. KUNIT_ASSERT_EQ(test, rc, 0);
  876. dev->mdev->addrs = kmalloc(sizeof(u8), GFP_KERNEL);
  877. dev->mdev->num_addrs = 1;
  878. dev->mdev->addrs[0] = src;
  879. skb = alloc_skb(sizeof(struct mctp_hdr) + 1 + len, GFP_KERNEL);
  880. KUNIT_ASSERT_TRUE(test, skb);
  881. __mctp_cb(skb);
  882. skb_reserve(skb, sizeof(struct mctp_hdr) + 1 + len);
  883. memset(skb_put(skb, len), 0, len);
  884. refcount_inc(&rt->rt.refs);
  885. mns = &sock_net(sock->sk)->mctp;
  886. /* We assume we're starting from an empty keys list, which requires
  887. * preceding tests to clean up correctly!
  888. */
  889. spin_lock_irqsave(&mns->keys_lock, flags);
  890. empty = hlist_empty(&mns->keys);
  891. spin_unlock_irqrestore(&mns->keys_lock, flags);
  892. KUNIT_ASSERT_TRUE(test, empty);
  893. rc = mctp_local_output(sock->sk, &rt->rt, skb, dst, MCTP_TAG_OWNER);
  894. KUNIT_ASSERT_EQ(test, rc, 0);
  895. key = NULL;
  896. single = false;
  897. spin_lock_irqsave(&mns->keys_lock, flags);
  898. if (!hlist_empty(&mns->keys)) {
  899. key = hlist_entry(mns->keys.first, struct mctp_sk_key, hlist);
  900. single = hlist_is_singular_node(&key->hlist, &mns->keys);
  901. }
  902. spin_unlock_irqrestore(&mns->keys_lock, flags);
  903. KUNIT_ASSERT_NOT_NULL(test, key);
  904. KUNIT_ASSERT_TRUE(test, single);
  905. KUNIT_EXPECT_EQ(test, key->net, netid);
  906. KUNIT_EXPECT_EQ(test, key->local_addr, src);
  907. KUNIT_EXPECT_EQ(test, key->peer_addr, dst);
  908. /* key has incoming tag, so inverse of what we sent */
  909. KUNIT_EXPECT_FALSE(test, key->tag & MCTP_TAG_OWNER);
  910. sock_release(sock);
  911. mctp_test_route_destroy(test, rt);
  912. mctp_test_destroy_dev(dev);
  913. }
  914. static struct kunit_case mctp_test_cases[] = {
  915. KUNIT_CASE_PARAM(mctp_test_fragment, mctp_frag_gen_params),
  916. KUNIT_CASE_PARAM(mctp_test_rx_input, mctp_rx_input_gen_params),
  917. KUNIT_CASE_PARAM(mctp_test_route_input_sk, mctp_route_input_sk_gen_params),
  918. KUNIT_CASE_PARAM(mctp_test_route_input_sk_reasm,
  919. mctp_route_input_sk_reasm_gen_params),
  920. KUNIT_CASE_PARAM(mctp_test_route_input_sk_keys,
  921. mctp_route_input_sk_keys_gen_params),
  922. KUNIT_CASE(mctp_test_route_input_sk_fail_single),
  923. KUNIT_CASE(mctp_test_route_input_sk_fail_frag),
  924. KUNIT_CASE(mctp_test_route_input_multiple_nets_bind),
  925. KUNIT_CASE(mctp_test_route_input_multiple_nets_key),
  926. KUNIT_CASE(mctp_test_packet_flow),
  927. KUNIT_CASE(mctp_test_fragment_flow),
  928. KUNIT_CASE(mctp_test_route_output_key_create),
  929. {}
  930. };
  931. static struct kunit_suite mctp_test_suite = {
  932. .name = "mctp",
  933. .test_cases = mctp_test_cases,
  934. };
  935. kunit_test_suite(mctp_test_suite);