xdpsock_user.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2017 - 2018 Intel Corporation. */
  3. #include <assert.h>
  4. #include <errno.h>
  5. #include <getopt.h>
  6. #include <libgen.h>
  7. #include <linux/bpf.h>
  8. #include <linux/if_link.h>
  9. #include <linux/if_xdp.h>
  10. #include <linux/if_ether.h>
  11. #include <net/if.h>
  12. #include <signal.h>
  13. #include <stdbool.h>
  14. #include <stdio.h>
  15. #include <stdlib.h>
  16. #include <string.h>
  17. #include <net/ethernet.h>
  18. #include <sys/resource.h>
  19. #include <sys/socket.h>
  20. #include <sys/mman.h>
  21. #include <time.h>
  22. #include <unistd.h>
  23. #include <pthread.h>
  24. #include <locale.h>
  25. #include <sys/types.h>
  26. #include <poll.h>
  27. #include "bpf/libbpf.h"
  28. #include "bpf_util.h"
  29. #include <bpf/bpf.h>
  30. #include "xdpsock.h"
  31. #ifndef SOL_XDP
  32. #define SOL_XDP 283
  33. #endif
  34. #ifndef AF_XDP
  35. #define AF_XDP 44
  36. #endif
  37. #ifndef PF_XDP
  38. #define PF_XDP AF_XDP
  39. #endif
  40. #define NUM_FRAMES 131072
  41. #define FRAME_HEADROOM 0
  42. #define FRAME_SHIFT 11
  43. #define FRAME_SIZE 2048
  44. #define NUM_DESCS 1024
  45. #define BATCH_SIZE 16
  46. #define FQ_NUM_DESCS 1024
  47. #define CQ_NUM_DESCS 1024
  48. #define DEBUG_HEXDUMP 0
  49. typedef __u64 u64;
  50. typedef __u32 u32;
  51. static unsigned long prev_time;
  52. enum benchmark_type {
  53. BENCH_RXDROP = 0,
  54. BENCH_TXONLY = 1,
  55. BENCH_L2FWD = 2,
  56. };
  57. static enum benchmark_type opt_bench = BENCH_RXDROP;
  58. static u32 opt_xdp_flags;
  59. static const char *opt_if = "";
  60. static int opt_ifindex;
  61. static int opt_queue;
  62. static int opt_poll;
  63. static int opt_shared_packet_buffer;
  64. static int opt_interval = 1;
  65. static u32 opt_xdp_bind_flags;
  66. struct xdp_umem_uqueue {
  67. u32 cached_prod;
  68. u32 cached_cons;
  69. u32 mask;
  70. u32 size;
  71. u32 *producer;
  72. u32 *consumer;
  73. u64 *ring;
  74. void *map;
  75. };
  76. struct xdp_umem {
  77. char *frames;
  78. struct xdp_umem_uqueue fq;
  79. struct xdp_umem_uqueue cq;
  80. int fd;
  81. };
  82. struct xdp_uqueue {
  83. u32 cached_prod;
  84. u32 cached_cons;
  85. u32 mask;
  86. u32 size;
  87. u32 *producer;
  88. u32 *consumer;
  89. struct xdp_desc *ring;
  90. void *map;
  91. };
  92. struct xdpsock {
  93. struct xdp_uqueue rx;
  94. struct xdp_uqueue tx;
  95. int sfd;
  96. struct xdp_umem *umem;
  97. u32 outstanding_tx;
  98. unsigned long rx_npkts;
  99. unsigned long tx_npkts;
  100. unsigned long prev_rx_npkts;
  101. unsigned long prev_tx_npkts;
  102. };
  103. #define MAX_SOCKS 4
  104. static int num_socks;
  105. struct xdpsock *xsks[MAX_SOCKS];
  106. static unsigned long get_nsecs(void)
  107. {
  108. struct timespec ts;
  109. clock_gettime(CLOCK_MONOTONIC, &ts);
  110. return ts.tv_sec * 1000000000UL + ts.tv_nsec;
  111. }
  112. static void dump_stats(void);
  113. #define lassert(expr) \
  114. do { \
  115. if (!(expr)) { \
  116. fprintf(stderr, "%s:%s:%i: Assertion failed: " \
  117. #expr ": errno: %d/\"%s\"\n", \
  118. __FILE__, __func__, __LINE__, \
  119. errno, strerror(errno)); \
  120. dump_stats(); \
  121. exit(EXIT_FAILURE); \
  122. } \
  123. } while (0)
  124. #define barrier() __asm__ __volatile__("": : :"memory")
  125. #ifdef __aarch64__
  126. #define u_smp_rmb() __asm__ __volatile__("dmb ishld": : :"memory")
  127. #define u_smp_wmb() __asm__ __volatile__("dmb ishst": : :"memory")
  128. #else
  129. #define u_smp_rmb() barrier()
  130. #define u_smp_wmb() barrier()
  131. #endif
  132. #define likely(x) __builtin_expect(!!(x), 1)
  133. #define unlikely(x) __builtin_expect(!!(x), 0)
  134. static const char pkt_data[] =
  135. "\x3c\xfd\xfe\x9e\x7f\x71\xec\xb1\xd7\x98\x3a\xc0\x08\x00\x45\x00"
  136. "\x00\x2e\x00\x00\x00\x00\x40\x11\x88\x97\x05\x08\x07\x08\xc8\x14"
  137. "\x1e\x04\x10\x92\x10\x92\x00\x1a\x6d\xa3\x34\x33\x1f\x69\x40\x6b"
  138. "\x54\x59\xb6\x14\x2d\x11\x44\xbf\xaf\xd9\xbe\xaa";
  139. static inline u32 umem_nb_free(struct xdp_umem_uqueue *q, u32 nb)
  140. {
  141. u32 free_entries = q->cached_cons - q->cached_prod;
  142. if (free_entries >= nb)
  143. return free_entries;
  144. /* Refresh the local tail pointer */
  145. q->cached_cons = *q->consumer + q->size;
  146. return q->cached_cons - q->cached_prod;
  147. }
  148. static inline u32 xq_nb_free(struct xdp_uqueue *q, u32 ndescs)
  149. {
  150. u32 free_entries = q->cached_cons - q->cached_prod;
  151. if (free_entries >= ndescs)
  152. return free_entries;
  153. /* Refresh the local tail pointer */
  154. q->cached_cons = *q->consumer + q->size;
  155. return q->cached_cons - q->cached_prod;
  156. }
  157. static inline u32 umem_nb_avail(struct xdp_umem_uqueue *q, u32 nb)
  158. {
  159. u32 entries = q->cached_prod - q->cached_cons;
  160. if (entries == 0) {
  161. q->cached_prod = *q->producer;
  162. entries = q->cached_prod - q->cached_cons;
  163. }
  164. return (entries > nb) ? nb : entries;
  165. }
  166. static inline u32 xq_nb_avail(struct xdp_uqueue *q, u32 ndescs)
  167. {
  168. u32 entries = q->cached_prod - q->cached_cons;
  169. if (entries == 0) {
  170. q->cached_prod = *q->producer;
  171. entries = q->cached_prod - q->cached_cons;
  172. }
  173. return (entries > ndescs) ? ndescs : entries;
  174. }
  175. static inline int umem_fill_to_kernel_ex(struct xdp_umem_uqueue *fq,
  176. struct xdp_desc *d,
  177. size_t nb)
  178. {
  179. u32 i;
  180. if (umem_nb_free(fq, nb) < nb)
  181. return -ENOSPC;
  182. for (i = 0; i < nb; i++) {
  183. u32 idx = fq->cached_prod++ & fq->mask;
  184. fq->ring[idx] = d[i].addr;
  185. }
  186. u_smp_wmb();
  187. *fq->producer = fq->cached_prod;
  188. return 0;
  189. }
  190. static inline int umem_fill_to_kernel(struct xdp_umem_uqueue *fq, u64 *d,
  191. size_t nb)
  192. {
  193. u32 i;
  194. if (umem_nb_free(fq, nb) < nb)
  195. return -ENOSPC;
  196. for (i = 0; i < nb; i++) {
  197. u32 idx = fq->cached_prod++ & fq->mask;
  198. fq->ring[idx] = d[i];
  199. }
  200. u_smp_wmb();
  201. *fq->producer = fq->cached_prod;
  202. return 0;
  203. }
  204. static inline size_t umem_complete_from_kernel(struct xdp_umem_uqueue *cq,
  205. u64 *d, size_t nb)
  206. {
  207. u32 idx, i, entries = umem_nb_avail(cq, nb);
  208. u_smp_rmb();
  209. for (i = 0; i < entries; i++) {
  210. idx = cq->cached_cons++ & cq->mask;
  211. d[i] = cq->ring[idx];
  212. }
  213. if (entries > 0) {
  214. u_smp_wmb();
  215. *cq->consumer = cq->cached_cons;
  216. }
  217. return entries;
  218. }
  219. static inline void *xq_get_data(struct xdpsock *xsk, u64 addr)
  220. {
  221. return &xsk->umem->frames[addr];
  222. }
  223. static inline int xq_enq(struct xdp_uqueue *uq,
  224. const struct xdp_desc *descs,
  225. unsigned int ndescs)
  226. {
  227. struct xdp_desc *r = uq->ring;
  228. unsigned int i;
  229. if (xq_nb_free(uq, ndescs) < ndescs)
  230. return -ENOSPC;
  231. for (i = 0; i < ndescs; i++) {
  232. u32 idx = uq->cached_prod++ & uq->mask;
  233. r[idx].addr = descs[i].addr;
  234. r[idx].len = descs[i].len;
  235. }
  236. u_smp_wmb();
  237. *uq->producer = uq->cached_prod;
  238. return 0;
  239. }
  240. static inline int xq_enq_tx_only(struct xdp_uqueue *uq,
  241. unsigned int id, unsigned int ndescs)
  242. {
  243. struct xdp_desc *r = uq->ring;
  244. unsigned int i;
  245. if (xq_nb_free(uq, ndescs) < ndescs)
  246. return -ENOSPC;
  247. for (i = 0; i < ndescs; i++) {
  248. u32 idx = uq->cached_prod++ & uq->mask;
  249. r[idx].addr = (id + i) << FRAME_SHIFT;
  250. r[idx].len = sizeof(pkt_data) - 1;
  251. }
  252. u_smp_wmb();
  253. *uq->producer = uq->cached_prod;
  254. return 0;
  255. }
  256. static inline int xq_deq(struct xdp_uqueue *uq,
  257. struct xdp_desc *descs,
  258. int ndescs)
  259. {
  260. struct xdp_desc *r = uq->ring;
  261. unsigned int idx;
  262. int i, entries;
  263. entries = xq_nb_avail(uq, ndescs);
  264. u_smp_rmb();
  265. for (i = 0; i < entries; i++) {
  266. idx = uq->cached_cons++ & uq->mask;
  267. descs[i] = r[idx];
  268. }
  269. if (entries > 0) {
  270. u_smp_wmb();
  271. *uq->consumer = uq->cached_cons;
  272. }
  273. return entries;
  274. }
  275. static void swap_mac_addresses(void *data)
  276. {
  277. struct ether_header *eth = (struct ether_header *)data;
  278. struct ether_addr *src_addr = (struct ether_addr *)&eth->ether_shost;
  279. struct ether_addr *dst_addr = (struct ether_addr *)&eth->ether_dhost;
  280. struct ether_addr tmp;
  281. tmp = *src_addr;
  282. *src_addr = *dst_addr;
  283. *dst_addr = tmp;
  284. }
  285. static void hex_dump(void *pkt, size_t length, u64 addr)
  286. {
  287. const unsigned char *address = (unsigned char *)pkt;
  288. const unsigned char *line = address;
  289. size_t line_size = 32;
  290. unsigned char c;
  291. char buf[32];
  292. int i = 0;
  293. if (!DEBUG_HEXDUMP)
  294. return;
  295. sprintf(buf, "addr=%llu", addr);
  296. printf("length = %zu\n", length);
  297. printf("%s | ", buf);
  298. while (length-- > 0) {
  299. printf("%02X ", *address++);
  300. if (!(++i % line_size) || (length == 0 && i % line_size)) {
  301. if (length == 0) {
  302. while (i++ % line_size)
  303. printf("__ ");
  304. }
  305. printf(" | "); /* right close */
  306. while (line < address) {
  307. c = *line++;
  308. printf("%c", (c < 33 || c == 255) ? 0x2E : c);
  309. }
  310. printf("\n");
  311. if (length > 0)
  312. printf("%s | ", buf);
  313. }
  314. }
  315. printf("\n");
  316. }
  317. static size_t gen_eth_frame(char *frame)
  318. {
  319. memcpy(frame, pkt_data, sizeof(pkt_data) - 1);
  320. return sizeof(pkt_data) - 1;
  321. }
  322. static struct xdp_umem *xdp_umem_configure(int sfd)
  323. {
  324. int fq_size = FQ_NUM_DESCS, cq_size = CQ_NUM_DESCS;
  325. struct xdp_mmap_offsets off;
  326. struct xdp_umem_reg mr;
  327. struct xdp_umem *umem;
  328. socklen_t optlen;
  329. void *bufs;
  330. umem = calloc(1, sizeof(*umem));
  331. lassert(umem);
  332. lassert(posix_memalign(&bufs, getpagesize(), /* PAGE_SIZE aligned */
  333. NUM_FRAMES * FRAME_SIZE) == 0);
  334. mr.addr = (__u64)bufs;
  335. mr.len = NUM_FRAMES * FRAME_SIZE;
  336. mr.chunk_size = FRAME_SIZE;
  337. mr.headroom = FRAME_HEADROOM;
  338. lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr)) == 0);
  339. lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_FILL_RING, &fq_size,
  340. sizeof(int)) == 0);
  341. lassert(setsockopt(sfd, SOL_XDP, XDP_UMEM_COMPLETION_RING, &cq_size,
  342. sizeof(int)) == 0);
  343. optlen = sizeof(off);
  344. lassert(getsockopt(sfd, SOL_XDP, XDP_MMAP_OFFSETS, &off,
  345. &optlen) == 0);
  346. umem->fq.map = mmap(0, off.fr.desc +
  347. FQ_NUM_DESCS * sizeof(u64),
  348. PROT_READ | PROT_WRITE,
  349. MAP_SHARED | MAP_POPULATE, sfd,
  350. XDP_UMEM_PGOFF_FILL_RING);
  351. lassert(umem->fq.map != MAP_FAILED);
  352. umem->fq.mask = FQ_NUM_DESCS - 1;
  353. umem->fq.size = FQ_NUM_DESCS;
  354. umem->fq.producer = umem->fq.map + off.fr.producer;
  355. umem->fq.consumer = umem->fq.map + off.fr.consumer;
  356. umem->fq.ring = umem->fq.map + off.fr.desc;
  357. umem->fq.cached_cons = FQ_NUM_DESCS;
  358. umem->cq.map = mmap(0, off.cr.desc +
  359. CQ_NUM_DESCS * sizeof(u64),
  360. PROT_READ | PROT_WRITE,
  361. MAP_SHARED | MAP_POPULATE, sfd,
  362. XDP_UMEM_PGOFF_COMPLETION_RING);
  363. lassert(umem->cq.map != MAP_FAILED);
  364. umem->cq.mask = CQ_NUM_DESCS - 1;
  365. umem->cq.size = CQ_NUM_DESCS;
  366. umem->cq.producer = umem->cq.map + off.cr.producer;
  367. umem->cq.consumer = umem->cq.map + off.cr.consumer;
  368. umem->cq.ring = umem->cq.map + off.cr.desc;
  369. umem->frames = bufs;
  370. umem->fd = sfd;
  371. if (opt_bench == BENCH_TXONLY) {
  372. int i;
  373. for (i = 0; i < NUM_FRAMES * FRAME_SIZE; i += FRAME_SIZE)
  374. (void)gen_eth_frame(&umem->frames[i]);
  375. }
  376. return umem;
  377. }
  378. static struct xdpsock *xsk_configure(struct xdp_umem *umem)
  379. {
  380. struct sockaddr_xdp sxdp = {};
  381. struct xdp_mmap_offsets off;
  382. int sfd, ndescs = NUM_DESCS;
  383. struct xdpsock *xsk;
  384. bool shared = true;
  385. socklen_t optlen;
  386. u64 i;
  387. sfd = socket(PF_XDP, SOCK_RAW, 0);
  388. lassert(sfd >= 0);
  389. xsk = calloc(1, sizeof(*xsk));
  390. lassert(xsk);
  391. xsk->sfd = sfd;
  392. xsk->outstanding_tx = 0;
  393. if (!umem) {
  394. shared = false;
  395. xsk->umem = xdp_umem_configure(sfd);
  396. } else {
  397. xsk->umem = umem;
  398. }
  399. lassert(setsockopt(sfd, SOL_XDP, XDP_RX_RING,
  400. &ndescs, sizeof(int)) == 0);
  401. lassert(setsockopt(sfd, SOL_XDP, XDP_TX_RING,
  402. &ndescs, sizeof(int)) == 0);
  403. optlen = sizeof(off);
  404. lassert(getsockopt(sfd, SOL_XDP, XDP_MMAP_OFFSETS, &off,
  405. &optlen) == 0);
  406. /* Rx */
  407. xsk->rx.map = mmap(NULL,
  408. off.rx.desc +
  409. NUM_DESCS * sizeof(struct xdp_desc),
  410. PROT_READ | PROT_WRITE,
  411. MAP_SHARED | MAP_POPULATE, sfd,
  412. XDP_PGOFF_RX_RING);
  413. lassert(xsk->rx.map != MAP_FAILED);
  414. if (!shared) {
  415. for (i = 0; i < NUM_DESCS * FRAME_SIZE; i += FRAME_SIZE)
  416. lassert(umem_fill_to_kernel(&xsk->umem->fq, &i, 1)
  417. == 0);
  418. }
  419. /* Tx */
  420. xsk->tx.map = mmap(NULL,
  421. off.tx.desc +
  422. NUM_DESCS * sizeof(struct xdp_desc),
  423. PROT_READ | PROT_WRITE,
  424. MAP_SHARED | MAP_POPULATE, sfd,
  425. XDP_PGOFF_TX_RING);
  426. lassert(xsk->tx.map != MAP_FAILED);
  427. xsk->rx.mask = NUM_DESCS - 1;
  428. xsk->rx.size = NUM_DESCS;
  429. xsk->rx.producer = xsk->rx.map + off.rx.producer;
  430. xsk->rx.consumer = xsk->rx.map + off.rx.consumer;
  431. xsk->rx.ring = xsk->rx.map + off.rx.desc;
  432. xsk->tx.mask = NUM_DESCS - 1;
  433. xsk->tx.size = NUM_DESCS;
  434. xsk->tx.producer = xsk->tx.map + off.tx.producer;
  435. xsk->tx.consumer = xsk->tx.map + off.tx.consumer;
  436. xsk->tx.ring = xsk->tx.map + off.tx.desc;
  437. xsk->tx.cached_cons = NUM_DESCS;
  438. sxdp.sxdp_family = PF_XDP;
  439. sxdp.sxdp_ifindex = opt_ifindex;
  440. sxdp.sxdp_queue_id = opt_queue;
  441. if (shared) {
  442. sxdp.sxdp_flags = XDP_SHARED_UMEM;
  443. sxdp.sxdp_shared_umem_fd = umem->fd;
  444. } else {
  445. sxdp.sxdp_flags = opt_xdp_bind_flags;
  446. }
  447. lassert(bind(sfd, (struct sockaddr *)&sxdp, sizeof(sxdp)) == 0);
  448. return xsk;
  449. }
  450. static void print_benchmark(bool running)
  451. {
  452. const char *bench_str = "INVALID";
  453. if (opt_bench == BENCH_RXDROP)
  454. bench_str = "rxdrop";
  455. else if (opt_bench == BENCH_TXONLY)
  456. bench_str = "txonly";
  457. else if (opt_bench == BENCH_L2FWD)
  458. bench_str = "l2fwd";
  459. printf("%s:%d %s ", opt_if, opt_queue, bench_str);
  460. if (opt_xdp_flags & XDP_FLAGS_SKB_MODE)
  461. printf("xdp-skb ");
  462. else if (opt_xdp_flags & XDP_FLAGS_DRV_MODE)
  463. printf("xdp-drv ");
  464. else
  465. printf(" ");
  466. if (opt_poll)
  467. printf("poll() ");
  468. if (running) {
  469. printf("running...");
  470. fflush(stdout);
  471. }
  472. }
  473. static void dump_stats(void)
  474. {
  475. unsigned long now = get_nsecs();
  476. long dt = now - prev_time;
  477. int i;
  478. prev_time = now;
  479. for (i = 0; i < num_socks; i++) {
  480. char *fmt = "%-15s %'-11.0f %'-11lu\n";
  481. double rx_pps, tx_pps;
  482. rx_pps = (xsks[i]->rx_npkts - xsks[i]->prev_rx_npkts) *
  483. 1000000000. / dt;
  484. tx_pps = (xsks[i]->tx_npkts - xsks[i]->prev_tx_npkts) *
  485. 1000000000. / dt;
  486. printf("\n sock%d@", i);
  487. print_benchmark(false);
  488. printf("\n");
  489. printf("%-15s %-11s %-11s %-11.2f\n", "", "pps", "pkts",
  490. dt / 1000000000.);
  491. printf(fmt, "rx", rx_pps, xsks[i]->rx_npkts);
  492. printf(fmt, "tx", tx_pps, xsks[i]->tx_npkts);
  493. xsks[i]->prev_rx_npkts = xsks[i]->rx_npkts;
  494. xsks[i]->prev_tx_npkts = xsks[i]->tx_npkts;
  495. }
  496. }
  497. static void *poller(void *arg)
  498. {
  499. (void)arg;
  500. for (;;) {
  501. sleep(opt_interval);
  502. dump_stats();
  503. }
  504. return NULL;
  505. }
  506. static void int_exit(int sig)
  507. {
  508. (void)sig;
  509. dump_stats();
  510. bpf_set_link_xdp_fd(opt_ifindex, -1, opt_xdp_flags);
  511. exit(EXIT_SUCCESS);
  512. }
  513. static struct option long_options[] = {
  514. {"rxdrop", no_argument, 0, 'r'},
  515. {"txonly", no_argument, 0, 't'},
  516. {"l2fwd", no_argument, 0, 'l'},
  517. {"interface", required_argument, 0, 'i'},
  518. {"queue", required_argument, 0, 'q'},
  519. {"poll", no_argument, 0, 'p'},
  520. {"shared-buffer", no_argument, 0, 's'},
  521. {"xdp-skb", no_argument, 0, 'S'},
  522. {"xdp-native", no_argument, 0, 'N'},
  523. {"interval", required_argument, 0, 'n'},
  524. {0, 0, 0, 0}
  525. };
  526. static void usage(const char *prog)
  527. {
  528. const char *str =
  529. " Usage: %s [OPTIONS]\n"
  530. " Options:\n"
  531. " -r, --rxdrop Discard all incoming packets (default)\n"
  532. " -t, --txonly Only send packets\n"
  533. " -l, --l2fwd MAC swap L2 forwarding\n"
  534. " -i, --interface=n Run on interface n\n"
  535. " -q, --queue=n Use queue n (default 0)\n"
  536. " -p, --poll Use poll syscall\n"
  537. " -s, --shared-buffer Use shared packet buffer\n"
  538. " -S, --xdp-skb=n Use XDP skb-mod\n"
  539. " -N, --xdp-native=n Enfore XDP native mode\n"
  540. " -n, --interval=n Specify statistics update interval (default 1 sec).\n"
  541. "\n";
  542. fprintf(stderr, str, prog);
  543. exit(EXIT_FAILURE);
  544. }
  545. static void parse_command_line(int argc, char **argv)
  546. {
  547. int option_index, c;
  548. opterr = 0;
  549. for (;;) {
  550. c = getopt_long(argc, argv, "rtli:q:psSNn:", long_options,
  551. &option_index);
  552. if (c == -1)
  553. break;
  554. switch (c) {
  555. case 'r':
  556. opt_bench = BENCH_RXDROP;
  557. break;
  558. case 't':
  559. opt_bench = BENCH_TXONLY;
  560. break;
  561. case 'l':
  562. opt_bench = BENCH_L2FWD;
  563. break;
  564. case 'i':
  565. opt_if = optarg;
  566. break;
  567. case 'q':
  568. opt_queue = atoi(optarg);
  569. break;
  570. case 's':
  571. opt_shared_packet_buffer = 1;
  572. break;
  573. case 'p':
  574. opt_poll = 1;
  575. break;
  576. case 'S':
  577. opt_xdp_flags |= XDP_FLAGS_SKB_MODE;
  578. opt_xdp_bind_flags |= XDP_COPY;
  579. break;
  580. case 'N':
  581. opt_xdp_flags |= XDP_FLAGS_DRV_MODE;
  582. break;
  583. case 'n':
  584. opt_interval = atoi(optarg);
  585. break;
  586. default:
  587. usage(basename(argv[0]));
  588. }
  589. }
  590. opt_ifindex = if_nametoindex(opt_if);
  591. if (!opt_ifindex) {
  592. fprintf(stderr, "ERROR: interface \"%s\" does not exist\n",
  593. opt_if);
  594. usage(basename(argv[0]));
  595. }
  596. }
  597. static void kick_tx(int fd)
  598. {
  599. int ret;
  600. ret = sendto(fd, NULL, 0, MSG_DONTWAIT, NULL, 0);
  601. if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN || errno == EBUSY)
  602. return;
  603. lassert(0);
  604. }
  605. static inline void complete_tx_l2fwd(struct xdpsock *xsk)
  606. {
  607. u64 descs[BATCH_SIZE];
  608. unsigned int rcvd;
  609. size_t ndescs;
  610. if (!xsk->outstanding_tx)
  611. return;
  612. kick_tx(xsk->sfd);
  613. ndescs = (xsk->outstanding_tx > BATCH_SIZE) ? BATCH_SIZE :
  614. xsk->outstanding_tx;
  615. /* re-add completed Tx buffers */
  616. rcvd = umem_complete_from_kernel(&xsk->umem->cq, descs, ndescs);
  617. if (rcvd > 0) {
  618. umem_fill_to_kernel(&xsk->umem->fq, descs, rcvd);
  619. xsk->outstanding_tx -= rcvd;
  620. xsk->tx_npkts += rcvd;
  621. }
  622. }
  623. static inline void complete_tx_only(struct xdpsock *xsk)
  624. {
  625. u64 descs[BATCH_SIZE];
  626. unsigned int rcvd;
  627. if (!xsk->outstanding_tx)
  628. return;
  629. kick_tx(xsk->sfd);
  630. rcvd = umem_complete_from_kernel(&xsk->umem->cq, descs, BATCH_SIZE);
  631. if (rcvd > 0) {
  632. xsk->outstanding_tx -= rcvd;
  633. xsk->tx_npkts += rcvd;
  634. }
  635. }
  636. static void rx_drop(struct xdpsock *xsk)
  637. {
  638. struct xdp_desc descs[BATCH_SIZE];
  639. unsigned int rcvd, i;
  640. rcvd = xq_deq(&xsk->rx, descs, BATCH_SIZE);
  641. if (!rcvd)
  642. return;
  643. for (i = 0; i < rcvd; i++) {
  644. char *pkt = xq_get_data(xsk, descs[i].addr);
  645. hex_dump(pkt, descs[i].len, descs[i].addr);
  646. }
  647. xsk->rx_npkts += rcvd;
  648. umem_fill_to_kernel_ex(&xsk->umem->fq, descs, rcvd);
  649. }
  650. static void rx_drop_all(void)
  651. {
  652. struct pollfd fds[MAX_SOCKS + 1];
  653. int i, ret, timeout, nfds = 1;
  654. memset(fds, 0, sizeof(fds));
  655. for (i = 0; i < num_socks; i++) {
  656. fds[i].fd = xsks[i]->sfd;
  657. fds[i].events = POLLIN;
  658. timeout = 1000; /* 1sn */
  659. }
  660. for (;;) {
  661. if (opt_poll) {
  662. ret = poll(fds, nfds, timeout);
  663. if (ret <= 0)
  664. continue;
  665. }
  666. for (i = 0; i < num_socks; i++)
  667. rx_drop(xsks[i]);
  668. }
  669. }
  670. static void tx_only(struct xdpsock *xsk)
  671. {
  672. int timeout, ret, nfds = 1;
  673. struct pollfd fds[nfds + 1];
  674. unsigned int idx = 0;
  675. memset(fds, 0, sizeof(fds));
  676. fds[0].fd = xsk->sfd;
  677. fds[0].events = POLLOUT;
  678. timeout = 1000; /* 1sn */
  679. for (;;) {
  680. if (opt_poll) {
  681. ret = poll(fds, nfds, timeout);
  682. if (ret <= 0)
  683. continue;
  684. if (fds[0].fd != xsk->sfd ||
  685. !(fds[0].revents & POLLOUT))
  686. continue;
  687. }
  688. if (xq_nb_free(&xsk->tx, BATCH_SIZE) >= BATCH_SIZE) {
  689. lassert(xq_enq_tx_only(&xsk->tx, idx, BATCH_SIZE) == 0);
  690. xsk->outstanding_tx += BATCH_SIZE;
  691. idx += BATCH_SIZE;
  692. idx %= NUM_FRAMES;
  693. }
  694. complete_tx_only(xsk);
  695. }
  696. }
  697. static void l2fwd(struct xdpsock *xsk)
  698. {
  699. for (;;) {
  700. struct xdp_desc descs[BATCH_SIZE];
  701. unsigned int rcvd, i;
  702. int ret;
  703. for (;;) {
  704. complete_tx_l2fwd(xsk);
  705. rcvd = xq_deq(&xsk->rx, descs, BATCH_SIZE);
  706. if (rcvd > 0)
  707. break;
  708. }
  709. for (i = 0; i < rcvd; i++) {
  710. char *pkt = xq_get_data(xsk, descs[i].addr);
  711. swap_mac_addresses(pkt);
  712. hex_dump(pkt, descs[i].len, descs[i].addr);
  713. }
  714. xsk->rx_npkts += rcvd;
  715. ret = xq_enq(&xsk->tx, descs, rcvd);
  716. lassert(ret == 0);
  717. xsk->outstanding_tx += rcvd;
  718. }
  719. }
  720. int main(int argc, char **argv)
  721. {
  722. struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
  723. struct bpf_prog_load_attr prog_load_attr = {
  724. .prog_type = BPF_PROG_TYPE_XDP,
  725. };
  726. int prog_fd, qidconf_map, xsks_map;
  727. struct bpf_object *obj;
  728. char xdp_filename[256];
  729. struct bpf_map *map;
  730. int i, ret, key = 0;
  731. pthread_t pt;
  732. parse_command_line(argc, argv);
  733. if (setrlimit(RLIMIT_MEMLOCK, &r)) {
  734. fprintf(stderr, "ERROR: setrlimit(RLIMIT_MEMLOCK) \"%s\"\n",
  735. strerror(errno));
  736. exit(EXIT_FAILURE);
  737. }
  738. snprintf(xdp_filename, sizeof(xdp_filename), "%s_kern.o", argv[0]);
  739. prog_load_attr.file = xdp_filename;
  740. if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
  741. exit(EXIT_FAILURE);
  742. if (prog_fd < 0) {
  743. fprintf(stderr, "ERROR: no program found: %s\n",
  744. strerror(prog_fd));
  745. exit(EXIT_FAILURE);
  746. }
  747. map = bpf_object__find_map_by_name(obj, "qidconf_map");
  748. qidconf_map = bpf_map__fd(map);
  749. if (qidconf_map < 0) {
  750. fprintf(stderr, "ERROR: no qidconf map found: %s\n",
  751. strerror(qidconf_map));
  752. exit(EXIT_FAILURE);
  753. }
  754. map = bpf_object__find_map_by_name(obj, "xsks_map");
  755. xsks_map = bpf_map__fd(map);
  756. if (xsks_map < 0) {
  757. fprintf(stderr, "ERROR: no xsks map found: %s\n",
  758. strerror(xsks_map));
  759. exit(EXIT_FAILURE);
  760. }
  761. if (bpf_set_link_xdp_fd(opt_ifindex, prog_fd, opt_xdp_flags) < 0) {
  762. fprintf(stderr, "ERROR: link set xdp fd failed\n");
  763. exit(EXIT_FAILURE);
  764. }
  765. ret = bpf_map_update_elem(qidconf_map, &key, &opt_queue, 0);
  766. if (ret) {
  767. fprintf(stderr, "ERROR: bpf_map_update_elem qidconf\n");
  768. exit(EXIT_FAILURE);
  769. }
  770. /* Create sockets... */
  771. xsks[num_socks++] = xsk_configure(NULL);
  772. #if RR_LB
  773. for (i = 0; i < MAX_SOCKS - 1; i++)
  774. xsks[num_socks++] = xsk_configure(xsks[0]->umem);
  775. #endif
  776. /* ...and insert them into the map. */
  777. for (i = 0; i < num_socks; i++) {
  778. key = i;
  779. ret = bpf_map_update_elem(xsks_map, &key, &xsks[i]->sfd, 0);
  780. if (ret) {
  781. fprintf(stderr, "ERROR: bpf_map_update_elem %d\n", i);
  782. exit(EXIT_FAILURE);
  783. }
  784. }
  785. signal(SIGINT, int_exit);
  786. signal(SIGTERM, int_exit);
  787. signal(SIGABRT, int_exit);
  788. setlocale(LC_ALL, "");
  789. ret = pthread_create(&pt, NULL, poller, NULL);
  790. lassert(ret == 0);
  791. prev_time = get_nsecs();
  792. if (opt_bench == BENCH_RXDROP)
  793. rx_drop_all();
  794. else if (opt_bench == BENCH_TXONLY)
  795. tx_only(xsks[0]);
  796. else
  797. l2fwd(xsks[0]);
  798. return 0;
  799. }