xdp_monitor_user.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715
  1. /* SPDX-License-Identifier: GPL-2.0
  2. * Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc.
  3. */
  4. static const char *__doc__=
  5. "XDP monitor tool, based on tracepoints\n"
  6. ;
  7. static const char *__doc_err_only__=
  8. " NOTICE: Only tracking XDP redirect errors\n"
  9. " Enable TX success stats via '--stats'\n"
  10. " (which comes with a per packet processing overhead)\n"
  11. ;
  12. #include <errno.h>
  13. #include <stdio.h>
  14. #include <stdlib.h>
  15. #include <stdbool.h>
  16. #include <stdint.h>
  17. #include <string.h>
  18. #include <ctype.h>
  19. #include <unistd.h>
  20. #include <locale.h>
  21. #include <sys/resource.h>
  22. #include <getopt.h>
  23. #include <net/if.h>
  24. #include <time.h>
  25. #include <bpf/bpf.h>
  26. #include "bpf_load.h"
  27. #include "bpf_util.h"
  28. static int verbose = 1;
  29. static bool debug = false;
  30. static const struct option long_options[] = {
  31. {"help", no_argument, NULL, 'h' },
  32. {"debug", no_argument, NULL, 'D' },
  33. {"stats", no_argument, NULL, 'S' },
  34. {"sec", required_argument, NULL, 's' },
  35. {0, 0, NULL, 0 }
  36. };
  37. /* C standard specifies two constants, EXIT_SUCCESS(0) and EXIT_FAILURE(1) */
  38. #define EXIT_FAIL_MEM 5
  39. static void usage(char *argv[])
  40. {
  41. int i;
  42. printf("\nDOCUMENTATION:\n%s\n", __doc__);
  43. printf("\n");
  44. printf(" Usage: %s (options-see-below)\n",
  45. argv[0]);
  46. printf(" Listing options:\n");
  47. for (i = 0; long_options[i].name != 0; i++) {
  48. printf(" --%-15s", long_options[i].name);
  49. if (long_options[i].flag != NULL)
  50. printf(" flag (internal value:%d)",
  51. *long_options[i].flag);
  52. else
  53. printf("short-option: -%c",
  54. long_options[i].val);
  55. printf("\n");
  56. }
  57. printf("\n");
  58. }
  59. #define NANOSEC_PER_SEC 1000000000 /* 10^9 */
  60. static __u64 gettime(void)
  61. {
  62. struct timespec t;
  63. int res;
  64. res = clock_gettime(CLOCK_MONOTONIC, &t);
  65. if (res < 0) {
  66. fprintf(stderr, "Error with gettimeofday! (%i)\n", res);
  67. exit(EXIT_FAILURE);
  68. }
  69. return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec;
  70. }
  71. enum {
  72. REDIR_SUCCESS = 0,
  73. REDIR_ERROR = 1,
  74. };
  75. #define REDIR_RES_MAX 2
  76. static const char *redir_names[REDIR_RES_MAX] = {
  77. [REDIR_SUCCESS] = "Success",
  78. [REDIR_ERROR] = "Error",
  79. };
  80. static const char *err2str(int err)
  81. {
  82. if (err < REDIR_RES_MAX)
  83. return redir_names[err];
  84. return NULL;
  85. }
  86. /* enum xdp_action */
  87. #define XDP_UNKNOWN XDP_REDIRECT + 1
  88. #define XDP_ACTION_MAX (XDP_UNKNOWN + 1)
  89. static const char *xdp_action_names[XDP_ACTION_MAX] = {
  90. [XDP_ABORTED] = "XDP_ABORTED",
  91. [XDP_DROP] = "XDP_DROP",
  92. [XDP_PASS] = "XDP_PASS",
  93. [XDP_TX] = "XDP_TX",
  94. [XDP_REDIRECT] = "XDP_REDIRECT",
  95. [XDP_UNKNOWN] = "XDP_UNKNOWN",
  96. };
  97. static const char *action2str(int action)
  98. {
  99. if (action < XDP_ACTION_MAX)
  100. return xdp_action_names[action];
  101. return NULL;
  102. }
  103. /* Common stats data record shared with _kern.c */
  104. struct datarec {
  105. __u64 processed;
  106. __u64 dropped;
  107. __u64 info;
  108. __u64 err;
  109. };
  110. #define MAX_CPUS 64
  111. /* Userspace structs for collection of stats from maps */
  112. struct record {
  113. __u64 timestamp;
  114. struct datarec total;
  115. struct datarec *cpu;
  116. };
  117. struct u64rec {
  118. __u64 processed;
  119. };
  120. struct record_u64 {
  121. /* record for _kern side __u64 values */
  122. __u64 timestamp;
  123. struct u64rec total;
  124. struct u64rec *cpu;
  125. };
  126. struct stats_record {
  127. struct record_u64 xdp_redirect[REDIR_RES_MAX];
  128. struct record_u64 xdp_exception[XDP_ACTION_MAX];
  129. struct record xdp_cpumap_kthread;
  130. struct record xdp_cpumap_enqueue[MAX_CPUS];
  131. struct record xdp_devmap_xmit;
  132. };
  133. static bool map_collect_record(int fd, __u32 key, struct record *rec)
  134. {
  135. /* For percpu maps, userspace gets a value per possible CPU */
  136. unsigned int nr_cpus = bpf_num_possible_cpus();
  137. struct datarec values[nr_cpus];
  138. __u64 sum_processed = 0;
  139. __u64 sum_dropped = 0;
  140. __u64 sum_info = 0;
  141. __u64 sum_err = 0;
  142. int i;
  143. if ((bpf_map_lookup_elem(fd, &key, values)) != 0) {
  144. fprintf(stderr,
  145. "ERR: bpf_map_lookup_elem failed key:0x%X\n", key);
  146. return false;
  147. }
  148. /* Get time as close as possible to reading map contents */
  149. rec->timestamp = gettime();
  150. /* Record and sum values from each CPU */
  151. for (i = 0; i < nr_cpus; i++) {
  152. rec->cpu[i].processed = values[i].processed;
  153. sum_processed += values[i].processed;
  154. rec->cpu[i].dropped = values[i].dropped;
  155. sum_dropped += values[i].dropped;
  156. rec->cpu[i].info = values[i].info;
  157. sum_info += values[i].info;
  158. rec->cpu[i].err = values[i].err;
  159. sum_err += values[i].err;
  160. }
  161. rec->total.processed = sum_processed;
  162. rec->total.dropped = sum_dropped;
  163. rec->total.info = sum_info;
  164. rec->total.err = sum_err;
  165. return true;
  166. }
  167. static bool map_collect_record_u64(int fd, __u32 key, struct record_u64 *rec)
  168. {
  169. /* For percpu maps, userspace gets a value per possible CPU */
  170. unsigned int nr_cpus = bpf_num_possible_cpus();
  171. struct u64rec values[nr_cpus];
  172. __u64 sum_total = 0;
  173. int i;
  174. if ((bpf_map_lookup_elem(fd, &key, values)) != 0) {
  175. fprintf(stderr,
  176. "ERR: bpf_map_lookup_elem failed key:0x%X\n", key);
  177. return false;
  178. }
  179. /* Get time as close as possible to reading map contents */
  180. rec->timestamp = gettime();
  181. /* Record and sum values from each CPU */
  182. for (i = 0; i < nr_cpus; i++) {
  183. rec->cpu[i].processed = values[i].processed;
  184. sum_total += values[i].processed;
  185. }
  186. rec->total.processed = sum_total;
  187. return true;
  188. }
  189. static double calc_period(struct record *r, struct record *p)
  190. {
  191. double period_ = 0;
  192. __u64 period = 0;
  193. period = r->timestamp - p->timestamp;
  194. if (period > 0)
  195. period_ = ((double) period / NANOSEC_PER_SEC);
  196. return period_;
  197. }
  198. static double calc_period_u64(struct record_u64 *r, struct record_u64 *p)
  199. {
  200. double period_ = 0;
  201. __u64 period = 0;
  202. period = r->timestamp - p->timestamp;
  203. if (period > 0)
  204. period_ = ((double) period / NANOSEC_PER_SEC);
  205. return period_;
  206. }
  207. static double calc_pps(struct datarec *r, struct datarec *p, double period)
  208. {
  209. __u64 packets = 0;
  210. double pps = 0;
  211. if (period > 0) {
  212. packets = r->processed - p->processed;
  213. pps = packets / period;
  214. }
  215. return pps;
  216. }
  217. static double calc_pps_u64(struct u64rec *r, struct u64rec *p, double period)
  218. {
  219. __u64 packets = 0;
  220. double pps = 0;
  221. if (period > 0) {
  222. packets = r->processed - p->processed;
  223. pps = packets / period;
  224. }
  225. return pps;
  226. }
  227. static double calc_drop(struct datarec *r, struct datarec *p, double period)
  228. {
  229. __u64 packets = 0;
  230. double pps = 0;
  231. if (period > 0) {
  232. packets = r->dropped - p->dropped;
  233. pps = packets / period;
  234. }
  235. return pps;
  236. }
  237. static double calc_info(struct datarec *r, struct datarec *p, double period)
  238. {
  239. __u64 packets = 0;
  240. double pps = 0;
  241. if (period > 0) {
  242. packets = r->info - p->info;
  243. pps = packets / period;
  244. }
  245. return pps;
  246. }
  247. static double calc_err(struct datarec *r, struct datarec *p, double period)
  248. {
  249. __u64 packets = 0;
  250. double pps = 0;
  251. if (period > 0) {
  252. packets = r->err - p->err;
  253. pps = packets / period;
  254. }
  255. return pps;
  256. }
  257. static void stats_print(struct stats_record *stats_rec,
  258. struct stats_record *stats_prev,
  259. bool err_only)
  260. {
  261. unsigned int nr_cpus = bpf_num_possible_cpus();
  262. int rec_i = 0, i, to_cpu;
  263. double t = 0, pps = 0;
  264. /* Header */
  265. printf("%-15s %-7s %-12s %-12s %-9s\n",
  266. "XDP-event", "CPU:to", "pps", "drop-pps", "extra-info");
  267. /* tracepoint: xdp:xdp_redirect_* */
  268. if (err_only)
  269. rec_i = REDIR_ERROR;
  270. for (; rec_i < REDIR_RES_MAX; rec_i++) {
  271. struct record_u64 *rec, *prev;
  272. char *fmt1 = "%-15s %-7d %'-12.0f %'-12.0f %s\n";
  273. char *fmt2 = "%-15s %-7s %'-12.0f %'-12.0f %s\n";
  274. rec = &stats_rec->xdp_redirect[rec_i];
  275. prev = &stats_prev->xdp_redirect[rec_i];
  276. t = calc_period_u64(rec, prev);
  277. for (i = 0; i < nr_cpus; i++) {
  278. struct u64rec *r = &rec->cpu[i];
  279. struct u64rec *p = &prev->cpu[i];
  280. pps = calc_pps_u64(r, p, t);
  281. if (pps > 0)
  282. printf(fmt1, "XDP_REDIRECT", i,
  283. rec_i ? 0.0: pps, rec_i ? pps : 0.0,
  284. err2str(rec_i));
  285. }
  286. pps = calc_pps_u64(&rec->total, &prev->total, t);
  287. printf(fmt2, "XDP_REDIRECT", "total",
  288. rec_i ? 0.0: pps, rec_i ? pps : 0.0, err2str(rec_i));
  289. }
  290. /* tracepoint: xdp:xdp_exception */
  291. for (rec_i = 0; rec_i < XDP_ACTION_MAX; rec_i++) {
  292. struct record_u64 *rec, *prev;
  293. char *fmt1 = "%-15s %-7d %'-12.0f %'-12.0f %s\n";
  294. char *fmt2 = "%-15s %-7s %'-12.0f %'-12.0f %s\n";
  295. rec = &stats_rec->xdp_exception[rec_i];
  296. prev = &stats_prev->xdp_exception[rec_i];
  297. t = calc_period_u64(rec, prev);
  298. for (i = 0; i < nr_cpus; i++) {
  299. struct u64rec *r = &rec->cpu[i];
  300. struct u64rec *p = &prev->cpu[i];
  301. pps = calc_pps_u64(r, p, t);
  302. if (pps > 0)
  303. printf(fmt1, "Exception", i,
  304. 0.0, pps, action2str(rec_i));
  305. }
  306. pps = calc_pps_u64(&rec->total, &prev->total, t);
  307. if (pps > 0)
  308. printf(fmt2, "Exception", "total",
  309. 0.0, pps, action2str(rec_i));
  310. }
  311. /* cpumap enqueue stats */
  312. for (to_cpu = 0; to_cpu < MAX_CPUS; to_cpu++) {
  313. char *fmt1 = "%-15s %3d:%-3d %'-12.0f %'-12.0f %'-10.2f %s\n";
  314. char *fmt2 = "%-15s %3s:%-3d %'-12.0f %'-12.0f %'-10.2f %s\n";
  315. struct record *rec, *prev;
  316. char *info_str = "";
  317. double drop, info;
  318. rec = &stats_rec->xdp_cpumap_enqueue[to_cpu];
  319. prev = &stats_prev->xdp_cpumap_enqueue[to_cpu];
  320. t = calc_period(rec, prev);
  321. for (i = 0; i < nr_cpus; i++) {
  322. struct datarec *r = &rec->cpu[i];
  323. struct datarec *p = &prev->cpu[i];
  324. pps = calc_pps(r, p, t);
  325. drop = calc_drop(r, p, t);
  326. info = calc_info(r, p, t);
  327. if (info > 0) {
  328. info_str = "bulk-average";
  329. info = pps / info; /* calc average bulk size */
  330. }
  331. if (pps > 0)
  332. printf(fmt1, "cpumap-enqueue",
  333. i, to_cpu, pps, drop, info, info_str);
  334. }
  335. pps = calc_pps(&rec->total, &prev->total, t);
  336. if (pps > 0) {
  337. drop = calc_drop(&rec->total, &prev->total, t);
  338. info = calc_info(&rec->total, &prev->total, t);
  339. if (info > 0) {
  340. info_str = "bulk-average";
  341. info = pps / info; /* calc average bulk size */
  342. }
  343. printf(fmt2, "cpumap-enqueue",
  344. "sum", to_cpu, pps, drop, info, info_str);
  345. }
  346. }
  347. /* cpumap kthread stats */
  348. {
  349. char *fmt1 = "%-15s %-7d %'-12.0f %'-12.0f %'-10.0f %s\n";
  350. char *fmt2 = "%-15s %-7s %'-12.0f %'-12.0f %'-10.0f %s\n";
  351. struct record *rec, *prev;
  352. double drop, info;
  353. char *i_str = "";
  354. rec = &stats_rec->xdp_cpumap_kthread;
  355. prev = &stats_prev->xdp_cpumap_kthread;
  356. t = calc_period(rec, prev);
  357. for (i = 0; i < nr_cpus; i++) {
  358. struct datarec *r = &rec->cpu[i];
  359. struct datarec *p = &prev->cpu[i];
  360. pps = calc_pps(r, p, t);
  361. drop = calc_drop(r, p, t);
  362. info = calc_info(r, p, t);
  363. if (info > 0)
  364. i_str = "sched";
  365. if (pps > 0 || drop > 0)
  366. printf(fmt1, "cpumap-kthread",
  367. i, pps, drop, info, i_str);
  368. }
  369. pps = calc_pps(&rec->total, &prev->total, t);
  370. drop = calc_drop(&rec->total, &prev->total, t);
  371. info = calc_info(&rec->total, &prev->total, t);
  372. if (info > 0)
  373. i_str = "sched-sum";
  374. printf(fmt2, "cpumap-kthread", "total", pps, drop, info, i_str);
  375. }
  376. /* devmap ndo_xdp_xmit stats */
  377. {
  378. char *fmt1 = "%-15s %-7d %'-12.0f %'-12.0f %'-10.2f %s %s\n";
  379. char *fmt2 = "%-15s %-7s %'-12.0f %'-12.0f %'-10.2f %s %s\n";
  380. struct record *rec, *prev;
  381. double drop, info, err;
  382. char *i_str = "";
  383. char *err_str = "";
  384. rec = &stats_rec->xdp_devmap_xmit;
  385. prev = &stats_prev->xdp_devmap_xmit;
  386. t = calc_period(rec, prev);
  387. for (i = 0; i < nr_cpus; i++) {
  388. struct datarec *r = &rec->cpu[i];
  389. struct datarec *p = &prev->cpu[i];
  390. pps = calc_pps(r, p, t);
  391. drop = calc_drop(r, p, t);
  392. info = calc_info(r, p, t);
  393. err = calc_err(r, p, t);
  394. if (info > 0) {
  395. i_str = "bulk-average";
  396. info = (pps+drop) / info; /* calc avg bulk */
  397. }
  398. if (err > 0)
  399. err_str = "drv-err";
  400. if (pps > 0 || drop > 0)
  401. printf(fmt1, "devmap-xmit",
  402. i, pps, drop, info, i_str, err_str);
  403. }
  404. pps = calc_pps(&rec->total, &prev->total, t);
  405. drop = calc_drop(&rec->total, &prev->total, t);
  406. info = calc_info(&rec->total, &prev->total, t);
  407. err = calc_err(&rec->total, &prev->total, t);
  408. if (info > 0) {
  409. i_str = "bulk-average";
  410. info = (pps+drop) / info; /* calc avg bulk */
  411. }
  412. if (err > 0)
  413. err_str = "drv-err";
  414. printf(fmt2, "devmap-xmit", "total", pps, drop,
  415. info, i_str, err_str);
  416. }
  417. printf("\n");
  418. }
  419. static bool stats_collect(struct stats_record *rec)
  420. {
  421. int fd;
  422. int i;
  423. /* TODO: Detect if someone unloaded the perf event_fd's, as
  424. * this can happen by someone running perf-record -e
  425. */
  426. fd = map_data[0].fd; /* map0: redirect_err_cnt */
  427. for (i = 0; i < REDIR_RES_MAX; i++)
  428. map_collect_record_u64(fd, i, &rec->xdp_redirect[i]);
  429. fd = map_data[1].fd; /* map1: exception_cnt */
  430. for (i = 0; i < XDP_ACTION_MAX; i++) {
  431. map_collect_record_u64(fd, i, &rec->xdp_exception[i]);
  432. }
  433. fd = map_data[2].fd; /* map2: cpumap_enqueue_cnt */
  434. for (i = 0; i < MAX_CPUS; i++)
  435. map_collect_record(fd, i, &rec->xdp_cpumap_enqueue[i]);
  436. fd = map_data[3].fd; /* map3: cpumap_kthread_cnt */
  437. map_collect_record(fd, 0, &rec->xdp_cpumap_kthread);
  438. fd = map_data[4].fd; /* map4: devmap_xmit_cnt */
  439. map_collect_record(fd, 0, &rec->xdp_devmap_xmit);
  440. return true;
  441. }
  442. static void *alloc_rec_per_cpu(int record_size)
  443. {
  444. unsigned int nr_cpus = bpf_num_possible_cpus();
  445. void *array;
  446. size_t size;
  447. size = record_size * nr_cpus;
  448. array = malloc(size);
  449. memset(array, 0, size);
  450. if (!array) {
  451. fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
  452. exit(EXIT_FAIL_MEM);
  453. }
  454. return array;
  455. }
  456. static struct stats_record *alloc_stats_record(void)
  457. {
  458. struct stats_record *rec;
  459. int rec_sz;
  460. int i;
  461. /* Alloc main stats_record structure */
  462. rec = malloc(sizeof(*rec));
  463. memset(rec, 0, sizeof(*rec));
  464. if (!rec) {
  465. fprintf(stderr, "Mem alloc error\n");
  466. exit(EXIT_FAIL_MEM);
  467. }
  468. /* Alloc stats stored per CPU for each record */
  469. rec_sz = sizeof(struct u64rec);
  470. for (i = 0; i < REDIR_RES_MAX; i++)
  471. rec->xdp_redirect[i].cpu = alloc_rec_per_cpu(rec_sz);
  472. for (i = 0; i < XDP_ACTION_MAX; i++)
  473. rec->xdp_exception[i].cpu = alloc_rec_per_cpu(rec_sz);
  474. rec_sz = sizeof(struct datarec);
  475. rec->xdp_cpumap_kthread.cpu = alloc_rec_per_cpu(rec_sz);
  476. rec->xdp_devmap_xmit.cpu = alloc_rec_per_cpu(rec_sz);
  477. for (i = 0; i < MAX_CPUS; i++)
  478. rec->xdp_cpumap_enqueue[i].cpu = alloc_rec_per_cpu(rec_sz);
  479. return rec;
  480. }
  481. static void free_stats_record(struct stats_record *r)
  482. {
  483. int i;
  484. for (i = 0; i < REDIR_RES_MAX; i++)
  485. free(r->xdp_redirect[i].cpu);
  486. for (i = 0; i < XDP_ACTION_MAX; i++)
  487. free(r->xdp_exception[i].cpu);
  488. free(r->xdp_cpumap_kthread.cpu);
  489. free(r->xdp_devmap_xmit.cpu);
  490. for (i = 0; i < MAX_CPUS; i++)
  491. free(r->xdp_cpumap_enqueue[i].cpu);
  492. free(r);
  493. }
  494. /* Pointer swap trick */
  495. static inline void swap(struct stats_record **a, struct stats_record **b)
  496. {
  497. struct stats_record *tmp;
  498. tmp = *a;
  499. *a = *b;
  500. *b = tmp;
  501. }
  502. static void stats_poll(int interval, bool err_only)
  503. {
  504. struct stats_record *rec, *prev;
  505. rec = alloc_stats_record();
  506. prev = alloc_stats_record();
  507. stats_collect(rec);
  508. if (err_only)
  509. printf("\n%s\n", __doc_err_only__);
  510. /* Trick to pretty printf with thousands separators use %' */
  511. setlocale(LC_NUMERIC, "en_US");
  512. /* Header */
  513. if (verbose)
  514. printf("\n%s", __doc__);
  515. /* TODO Need more advanced stats on error types */
  516. if (verbose) {
  517. printf(" - Stats map0: %s\n", map_data[0].name);
  518. printf(" - Stats map1: %s\n", map_data[1].name);
  519. printf("\n");
  520. }
  521. fflush(stdout);
  522. while (1) {
  523. swap(&prev, &rec);
  524. stats_collect(rec);
  525. stats_print(rec, prev, err_only);
  526. fflush(stdout);
  527. sleep(interval);
  528. }
  529. free_stats_record(rec);
  530. free_stats_record(prev);
  531. }
  532. static void print_bpf_prog_info(void)
  533. {
  534. int i;
  535. /* Prog info */
  536. printf("Loaded BPF prog have %d bpf program(s)\n", prog_cnt);
  537. for (i = 0; i < prog_cnt; i++) {
  538. printf(" - prog_fd[%d] = fd(%d)\n", i, prog_fd[i]);
  539. }
  540. /* Maps info */
  541. printf("Loaded BPF prog have %d map(s)\n", map_data_count);
  542. for (i = 0; i < map_data_count; i++) {
  543. char *name = map_data[i].name;
  544. int fd = map_data[i].fd;
  545. printf(" - map_data[%d] = fd(%d) name:%s\n", i, fd, name);
  546. }
  547. /* Event info */
  548. printf("Searching for (max:%d) event file descriptor(s)\n", prog_cnt);
  549. for (i = 0; i < prog_cnt; i++) {
  550. if (event_fd[i] != -1)
  551. printf(" - event_fd[%d] = fd(%d)\n", i, event_fd[i]);
  552. }
  553. }
  554. int main(int argc, char **argv)
  555. {
  556. struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
  557. int longindex = 0, opt;
  558. int ret = EXIT_SUCCESS;
  559. char bpf_obj_file[256];
  560. /* Default settings: */
  561. bool errors_only = true;
  562. int interval = 2;
  563. snprintf(bpf_obj_file, sizeof(bpf_obj_file), "%s_kern.o", argv[0]);
  564. /* Parse commands line args */
  565. while ((opt = getopt_long(argc, argv, "hDSs:",
  566. long_options, &longindex)) != -1) {
  567. switch (opt) {
  568. case 'D':
  569. debug = true;
  570. break;
  571. case 'S':
  572. errors_only = false;
  573. break;
  574. case 's':
  575. interval = atoi(optarg);
  576. break;
  577. case 'h':
  578. default:
  579. usage(argv);
  580. return EXIT_FAILURE;
  581. }
  582. }
  583. if (setrlimit(RLIMIT_MEMLOCK, &r)) {
  584. perror("setrlimit(RLIMIT_MEMLOCK)");
  585. return EXIT_FAILURE;
  586. }
  587. if (load_bpf_file(bpf_obj_file)) {
  588. printf("ERROR - bpf_log_buf: %s", bpf_log_buf);
  589. return EXIT_FAILURE;
  590. }
  591. if (!prog_fd[0]) {
  592. printf("ERROR - load_bpf_file: %s\n", strerror(errno));
  593. return EXIT_FAILURE;
  594. }
  595. if (debug) {
  596. print_bpf_prog_info();
  597. }
  598. /* Unload/stop tracepoint event by closing fd's */
  599. if (errors_only) {
  600. /* The prog_fd[i] and event_fd[i] depend on the
  601. * order the functions was defined in _kern.c
  602. */
  603. close(event_fd[2]); /* tracepoint/xdp/xdp_redirect */
  604. close(prog_fd[2]); /* func: trace_xdp_redirect */
  605. close(event_fd[3]); /* tracepoint/xdp/xdp_redirect_map */
  606. close(prog_fd[3]); /* func: trace_xdp_redirect_map */
  607. }
  608. stats_poll(interval, errors_only);
  609. return ret;
  610. }