test_progs.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756
  1. /* Copyright (c) 2017 Facebook
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of version 2 of the GNU General Public
  5. * License as published by the Free Software Foundation.
  6. */
  7. #include <stdio.h>
  8. #include <unistd.h>
  9. #include <errno.h>
  10. #include <string.h>
  11. #include <assert.h>
  12. #include <stdlib.h>
  13. #include <time.h>
  14. #include <linux/types.h>
  15. typedef __u16 __sum16;
  16. #include <arpa/inet.h>
  17. #include <linux/if_ether.h>
  18. #include <linux/if_packet.h>
  19. #include <linux/ip.h>
  20. #include <linux/ipv6.h>
  21. #include <linux/tcp.h>
  22. #include <linux/filter.h>
  23. #include <linux/perf_event.h>
  24. #include <linux/unistd.h>
  25. #include <sys/ioctl.h>
  26. #include <sys/wait.h>
  27. #include <sys/types.h>
  28. #include <fcntl.h>
  29. #include <linux/bpf.h>
  30. #include <linux/err.h>
  31. #include <bpf/bpf.h>
  32. #include <bpf/libbpf.h>
  33. #include "test_iptunnel_common.h"
  34. #include "bpf_util.h"
  35. #include "bpf_endian.h"
  36. #include "bpf_rlimit.h"
  37. #include "trace_helpers.h"
  38. static int error_cnt, pass_cnt;
  39. static bool jit_enabled;
  40. #define MAGIC_BYTES 123
  41. /* ipv4 test vector */
  42. static struct {
  43. struct ethhdr eth;
  44. struct iphdr iph;
  45. struct tcphdr tcp;
  46. } __packed pkt_v4 = {
  47. .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
  48. .iph.ihl = 5,
  49. .iph.protocol = 6,
  50. .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
  51. .tcp.urg_ptr = 123,
  52. };
  53. /* ipv6 test vector */
  54. static struct {
  55. struct ethhdr eth;
  56. struct ipv6hdr iph;
  57. struct tcphdr tcp;
  58. } __packed pkt_v6 = {
  59. .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
  60. .iph.nexthdr = 6,
  61. .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
  62. .tcp.urg_ptr = 123,
  63. };
  64. #define CHECK(condition, tag, format...) ({ \
  65. int __ret = !!(condition); \
  66. if (__ret) { \
  67. error_cnt++; \
  68. printf("%s:FAIL:%s ", __func__, tag); \
  69. printf(format); \
  70. } else { \
  71. pass_cnt++; \
  72. printf("%s:PASS:%s %d nsec\n", __func__, tag, duration);\
  73. } \
  74. __ret; \
  75. })
  76. static int bpf_find_map(const char *test, struct bpf_object *obj,
  77. const char *name)
  78. {
  79. struct bpf_map *map;
  80. map = bpf_object__find_map_by_name(obj, name);
  81. if (!map) {
  82. printf("%s:FAIL:map '%s' not found\n", test, name);
  83. error_cnt++;
  84. return -1;
  85. }
  86. return bpf_map__fd(map);
  87. }
  88. static void test_pkt_access(void)
  89. {
  90. const char *file = "./test_pkt_access.o";
  91. struct bpf_object *obj;
  92. __u32 duration, retval;
  93. int err, prog_fd;
  94. err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
  95. if (err) {
  96. error_cnt++;
  97. return;
  98. }
  99. err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4),
  100. NULL, NULL, &retval, &duration);
  101. CHECK(err || errno || retval, "ipv4",
  102. "err %d errno %d retval %d duration %d\n",
  103. err, errno, retval, duration);
  104. err = bpf_prog_test_run(prog_fd, 100000, &pkt_v6, sizeof(pkt_v6),
  105. NULL, NULL, &retval, &duration);
  106. CHECK(err || errno || retval, "ipv6",
  107. "err %d errno %d retval %d duration %d\n",
  108. err, errno, retval, duration);
  109. bpf_object__close(obj);
  110. }
  111. static void test_xdp(void)
  112. {
  113. struct vip key4 = {.protocol = 6, .family = AF_INET};
  114. struct vip key6 = {.protocol = 6, .family = AF_INET6};
  115. struct iptnl_info value4 = {.family = AF_INET};
  116. struct iptnl_info value6 = {.family = AF_INET6};
  117. const char *file = "./test_xdp.o";
  118. struct bpf_object *obj;
  119. char buf[128];
  120. struct ipv6hdr *iph6 = (void *)buf + sizeof(struct ethhdr);
  121. struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
  122. __u32 duration, retval, size;
  123. int err, prog_fd, map_fd;
  124. err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
  125. if (err) {
  126. error_cnt++;
  127. return;
  128. }
  129. map_fd = bpf_find_map(__func__, obj, "vip2tnl");
  130. if (map_fd < 0)
  131. goto out;
  132. bpf_map_update_elem(map_fd, &key4, &value4, 0);
  133. bpf_map_update_elem(map_fd, &key6, &value6, 0);
  134. err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
  135. buf, &size, &retval, &duration);
  136. CHECK(err || errno || retval != XDP_TX || size != 74 ||
  137. iph->protocol != IPPROTO_IPIP, "ipv4",
  138. "err %d errno %d retval %d size %d\n",
  139. err, errno, retval, size);
  140. err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
  141. buf, &size, &retval, &duration);
  142. CHECK(err || errno || retval != XDP_TX || size != 114 ||
  143. iph6->nexthdr != IPPROTO_IPV6, "ipv6",
  144. "err %d errno %d retval %d size %d\n",
  145. err, errno, retval, size);
  146. out:
  147. bpf_object__close(obj);
  148. }
  149. static void test_xdp_adjust_tail(void)
  150. {
  151. const char *file = "./test_adjust_tail.o";
  152. struct bpf_object *obj;
  153. char buf[128];
  154. __u32 duration, retval, size;
  155. int err, prog_fd;
  156. err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
  157. if (err) {
  158. error_cnt++;
  159. return;
  160. }
  161. err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
  162. buf, &size, &retval, &duration);
  163. CHECK(err || errno || retval != XDP_DROP,
  164. "ipv4", "err %d errno %d retval %d size %d\n",
  165. err, errno, retval, size);
  166. err = bpf_prog_test_run(prog_fd, 1, &pkt_v6, sizeof(pkt_v6),
  167. buf, &size, &retval, &duration);
  168. CHECK(err || errno || retval != XDP_TX || size != 54,
  169. "ipv6", "err %d errno %d retval %d size %d\n",
  170. err, errno, retval, size);
  171. bpf_object__close(obj);
  172. }
  173. #define MAGIC_VAL 0x1234
  174. #define NUM_ITER 100000
  175. #define VIP_NUM 5
  176. static void test_l4lb(const char *file)
  177. {
  178. unsigned int nr_cpus = bpf_num_possible_cpus();
  179. struct vip key = {.protocol = 6};
  180. struct vip_meta {
  181. __u32 flags;
  182. __u32 vip_num;
  183. } value = {.vip_num = VIP_NUM};
  184. __u32 stats_key = VIP_NUM;
  185. struct vip_stats {
  186. __u64 bytes;
  187. __u64 pkts;
  188. } stats[nr_cpus];
  189. struct real_definition {
  190. union {
  191. __be32 dst;
  192. __be32 dstv6[4];
  193. };
  194. __u8 flags;
  195. } real_def = {.dst = MAGIC_VAL};
  196. __u32 ch_key = 11, real_num = 3;
  197. __u32 duration, retval, size;
  198. int err, i, prog_fd, map_fd;
  199. __u64 bytes = 0, pkts = 0;
  200. struct bpf_object *obj;
  201. char buf[128];
  202. u32 *magic = (u32 *)buf;
  203. err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
  204. if (err) {
  205. error_cnt++;
  206. return;
  207. }
  208. map_fd = bpf_find_map(__func__, obj, "vip_map");
  209. if (map_fd < 0)
  210. goto out;
  211. bpf_map_update_elem(map_fd, &key, &value, 0);
  212. map_fd = bpf_find_map(__func__, obj, "ch_rings");
  213. if (map_fd < 0)
  214. goto out;
  215. bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
  216. map_fd = bpf_find_map(__func__, obj, "reals");
  217. if (map_fd < 0)
  218. goto out;
  219. bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
  220. err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
  221. buf, &size, &retval, &duration);
  222. CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 54 ||
  223. *magic != MAGIC_VAL, "ipv4",
  224. "err %d errno %d retval %d size %d magic %x\n",
  225. err, errno, retval, size, *magic);
  226. err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
  227. buf, &size, &retval, &duration);
  228. CHECK(err || errno || retval != 7/*TC_ACT_REDIRECT*/ || size != 74 ||
  229. *magic != MAGIC_VAL, "ipv6",
  230. "err %d errno %d retval %d size %d magic %x\n",
  231. err, errno, retval, size, *magic);
  232. map_fd = bpf_find_map(__func__, obj, "stats");
  233. if (map_fd < 0)
  234. goto out;
  235. bpf_map_lookup_elem(map_fd, &stats_key, stats);
  236. for (i = 0; i < nr_cpus; i++) {
  237. bytes += stats[i].bytes;
  238. pkts += stats[i].pkts;
  239. }
  240. if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
  241. error_cnt++;
  242. printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts);
  243. }
  244. out:
  245. bpf_object__close(obj);
  246. }
  247. static void test_l4lb_all(void)
  248. {
  249. const char *file1 = "./test_l4lb.o";
  250. const char *file2 = "./test_l4lb_noinline.o";
  251. test_l4lb(file1);
  252. test_l4lb(file2);
  253. }
  254. static void test_xdp_noinline(void)
  255. {
  256. const char *file = "./test_xdp_noinline.o";
  257. unsigned int nr_cpus = bpf_num_possible_cpus();
  258. struct vip key = {.protocol = 6};
  259. struct vip_meta {
  260. __u32 flags;
  261. __u32 vip_num;
  262. } value = {.vip_num = VIP_NUM};
  263. __u32 stats_key = VIP_NUM;
  264. struct vip_stats {
  265. __u64 bytes;
  266. __u64 pkts;
  267. } stats[nr_cpus];
  268. struct real_definition {
  269. union {
  270. __be32 dst;
  271. __be32 dstv6[4];
  272. };
  273. __u8 flags;
  274. } real_def = {.dst = MAGIC_VAL};
  275. __u32 ch_key = 11, real_num = 3;
  276. __u32 duration, retval, size;
  277. int err, i, prog_fd, map_fd;
  278. __u64 bytes = 0, pkts = 0;
  279. struct bpf_object *obj;
  280. char buf[128];
  281. u32 *magic = (u32 *)buf;
  282. err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
  283. if (err) {
  284. error_cnt++;
  285. return;
  286. }
  287. map_fd = bpf_find_map(__func__, obj, "vip_map");
  288. if (map_fd < 0)
  289. goto out;
  290. bpf_map_update_elem(map_fd, &key, &value, 0);
  291. map_fd = bpf_find_map(__func__, obj, "ch_rings");
  292. if (map_fd < 0)
  293. goto out;
  294. bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
  295. map_fd = bpf_find_map(__func__, obj, "reals");
  296. if (map_fd < 0)
  297. goto out;
  298. bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
  299. err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
  300. buf, &size, &retval, &duration);
  301. CHECK(err || errno || retval != 1 || size != 54 ||
  302. *magic != MAGIC_VAL, "ipv4",
  303. "err %d errno %d retval %d size %d magic %x\n",
  304. err, errno, retval, size, *magic);
  305. err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
  306. buf, &size, &retval, &duration);
  307. CHECK(err || errno || retval != 1 || size != 74 ||
  308. *magic != MAGIC_VAL, "ipv6",
  309. "err %d errno %d retval %d size %d magic %x\n",
  310. err, errno, retval, size, *magic);
  311. map_fd = bpf_find_map(__func__, obj, "stats");
  312. if (map_fd < 0)
  313. goto out;
  314. bpf_map_lookup_elem(map_fd, &stats_key, stats);
  315. for (i = 0; i < nr_cpus; i++) {
  316. bytes += stats[i].bytes;
  317. pkts += stats[i].pkts;
  318. }
  319. if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) {
  320. error_cnt++;
  321. printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts);
  322. }
  323. out:
  324. bpf_object__close(obj);
  325. }
  326. static void test_tcp_estats(void)
  327. {
  328. const char *file = "./test_tcp_estats.o";
  329. int err, prog_fd;
  330. struct bpf_object *obj;
  331. __u32 duration = 0;
  332. err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
  333. CHECK(err, "", "err %d errno %d\n", err, errno);
  334. if (err) {
  335. error_cnt++;
  336. return;
  337. }
  338. bpf_object__close(obj);
  339. }
  340. static inline __u64 ptr_to_u64(const void *ptr)
  341. {
  342. return (__u64) (unsigned long) ptr;
  343. }
  344. static bool is_jit_enabled(void)
  345. {
  346. const char *jit_sysctl = "/proc/sys/net/core/bpf_jit_enable";
  347. bool enabled = false;
  348. int sysctl_fd;
  349. sysctl_fd = open(jit_sysctl, 0, O_RDONLY);
  350. if (sysctl_fd != -1) {
  351. char tmpc;
  352. if (read(sysctl_fd, &tmpc, sizeof(tmpc)) == 1)
  353. enabled = (tmpc != '0');
  354. close(sysctl_fd);
  355. }
  356. return enabled;
  357. }
  358. static void test_bpf_obj_id(void)
  359. {
  360. const __u64 array_magic_value = 0xfaceb00c;
  361. const __u32 array_key = 0;
  362. const int nr_iters = 2;
  363. const char *file = "./test_obj_id.o";
  364. const char *expected_prog_name = "test_obj_id";
  365. const char *expected_map_name = "test_map_id";
  366. const __u64 nsec_per_sec = 1000000000;
  367. struct bpf_object *objs[nr_iters];
  368. int prog_fds[nr_iters], map_fds[nr_iters];
  369. /* +1 to test for the info_len returned by kernel */
  370. struct bpf_prog_info prog_infos[nr_iters + 1];
  371. struct bpf_map_info map_infos[nr_iters + 1];
  372. /* Each prog only uses one map. +1 to test nr_map_ids
  373. * returned by kernel.
  374. */
  375. __u32 map_ids[nr_iters + 1];
  376. char jited_insns[128], xlated_insns[128], zeros[128];
  377. __u32 i, next_id, info_len, nr_id_found, duration = 0;
  378. struct timespec real_time_ts, boot_time_ts;
  379. int err = 0;
  380. __u64 array_value;
  381. uid_t my_uid = getuid();
  382. time_t now, load_time;
  383. err = bpf_prog_get_fd_by_id(0);
  384. CHECK(err >= 0 || errno != ENOENT,
  385. "get-fd-by-notexist-prog-id", "err %d errno %d\n", err, errno);
  386. err = bpf_map_get_fd_by_id(0);
  387. CHECK(err >= 0 || errno != ENOENT,
  388. "get-fd-by-notexist-map-id", "err %d errno %d\n", err, errno);
  389. for (i = 0; i < nr_iters; i++)
  390. objs[i] = NULL;
  391. /* Check bpf_obj_get_info_by_fd() */
  392. bzero(zeros, sizeof(zeros));
  393. for (i = 0; i < nr_iters; i++) {
  394. now = time(NULL);
  395. err = bpf_prog_load(file, BPF_PROG_TYPE_SOCKET_FILTER,
  396. &objs[i], &prog_fds[i]);
  397. /* test_obj_id.o is a dumb prog. It should never fail
  398. * to load.
  399. */
  400. if (err)
  401. error_cnt++;
  402. assert(!err);
  403. /* Insert a magic value to the map */
  404. map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id");
  405. assert(map_fds[i] >= 0);
  406. err = bpf_map_update_elem(map_fds[i], &array_key,
  407. &array_magic_value, 0);
  408. assert(!err);
  409. /* Check getting map info */
  410. info_len = sizeof(struct bpf_map_info) * 2;
  411. bzero(&map_infos[i], info_len);
  412. err = bpf_obj_get_info_by_fd(map_fds[i], &map_infos[i],
  413. &info_len);
  414. if (CHECK(err ||
  415. map_infos[i].type != BPF_MAP_TYPE_ARRAY ||
  416. map_infos[i].key_size != sizeof(__u32) ||
  417. map_infos[i].value_size != sizeof(__u64) ||
  418. map_infos[i].max_entries != 1 ||
  419. map_infos[i].map_flags != 0 ||
  420. info_len != sizeof(struct bpf_map_info) ||
  421. strcmp((char *)map_infos[i].name, expected_map_name),
  422. "get-map-info(fd)",
  423. "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
  424. err, errno,
  425. map_infos[i].type, BPF_MAP_TYPE_ARRAY,
  426. info_len, sizeof(struct bpf_map_info),
  427. map_infos[i].key_size,
  428. map_infos[i].value_size,
  429. map_infos[i].max_entries,
  430. map_infos[i].map_flags,
  431. map_infos[i].name, expected_map_name))
  432. goto done;
  433. /* Check getting prog info */
  434. info_len = sizeof(struct bpf_prog_info) * 2;
  435. bzero(&prog_infos[i], info_len);
  436. bzero(jited_insns, sizeof(jited_insns));
  437. bzero(xlated_insns, sizeof(xlated_insns));
  438. prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns);
  439. prog_infos[i].jited_prog_len = sizeof(jited_insns);
  440. prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns);
  441. prog_infos[i].xlated_prog_len = sizeof(xlated_insns);
  442. prog_infos[i].map_ids = ptr_to_u64(map_ids + i);
  443. prog_infos[i].nr_map_ids = 2;
  444. err = clock_gettime(CLOCK_REALTIME, &real_time_ts);
  445. assert(!err);
  446. err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts);
  447. assert(!err);
  448. err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i],
  449. &info_len);
  450. load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec)
  451. + (prog_infos[i].load_time / nsec_per_sec);
  452. if (CHECK(err ||
  453. prog_infos[i].type != BPF_PROG_TYPE_SOCKET_FILTER ||
  454. info_len != sizeof(struct bpf_prog_info) ||
  455. (jit_enabled && !prog_infos[i].jited_prog_len) ||
  456. (jit_enabled &&
  457. !memcmp(jited_insns, zeros, sizeof(zeros))) ||
  458. !prog_infos[i].xlated_prog_len ||
  459. !memcmp(xlated_insns, zeros, sizeof(zeros)) ||
  460. load_time < now - 60 || load_time > now + 60 ||
  461. prog_infos[i].created_by_uid != my_uid ||
  462. prog_infos[i].nr_map_ids != 1 ||
  463. *(int *)prog_infos[i].map_ids != map_infos[i].id ||
  464. strcmp((char *)prog_infos[i].name, expected_prog_name),
  465. "get-prog-info(fd)",
  466. "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
  467. err, errno, i,
  468. prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
  469. info_len, sizeof(struct bpf_prog_info),
  470. jit_enabled,
  471. prog_infos[i].jited_prog_len,
  472. prog_infos[i].xlated_prog_len,
  473. !!memcmp(jited_insns, zeros, sizeof(zeros)),
  474. !!memcmp(xlated_insns, zeros, sizeof(zeros)),
  475. load_time, now,
  476. prog_infos[i].created_by_uid, my_uid,
  477. prog_infos[i].nr_map_ids, 1,
  478. *(int *)prog_infos[i].map_ids, map_infos[i].id,
  479. prog_infos[i].name, expected_prog_name))
  480. goto done;
  481. }
  482. /* Check bpf_prog_get_next_id() */
  483. nr_id_found = 0;
  484. next_id = 0;
  485. while (!bpf_prog_get_next_id(next_id, &next_id)) {
  486. struct bpf_prog_info prog_info = {};
  487. __u32 saved_map_id;
  488. int prog_fd;
  489. info_len = sizeof(prog_info);
  490. prog_fd = bpf_prog_get_fd_by_id(next_id);
  491. if (prog_fd < 0 && errno == ENOENT)
  492. /* The bpf_prog is in the dead row */
  493. continue;
  494. if (CHECK(prog_fd < 0, "get-prog-fd(next_id)",
  495. "prog_fd %d next_id %d errno %d\n",
  496. prog_fd, next_id, errno))
  497. break;
  498. for (i = 0; i < nr_iters; i++)
  499. if (prog_infos[i].id == next_id)
  500. break;
  501. if (i == nr_iters)
  502. continue;
  503. nr_id_found++;
  504. /* Negative test:
  505. * prog_info.nr_map_ids = 1
  506. * prog_info.map_ids = NULL
  507. */
  508. prog_info.nr_map_ids = 1;
  509. err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
  510. if (CHECK(!err || errno != EFAULT,
  511. "get-prog-fd-bad-nr-map-ids", "err %d errno %d(%d)",
  512. err, errno, EFAULT))
  513. break;
  514. bzero(&prog_info, sizeof(prog_info));
  515. info_len = sizeof(prog_info);
  516. saved_map_id = *(int *)(prog_infos[i].map_ids);
  517. prog_info.map_ids = prog_infos[i].map_ids;
  518. prog_info.nr_map_ids = 2;
  519. err = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &info_len);
  520. prog_infos[i].jited_prog_insns = 0;
  521. prog_infos[i].xlated_prog_insns = 0;
  522. CHECK(err || info_len != sizeof(struct bpf_prog_info) ||
  523. memcmp(&prog_info, &prog_infos[i], info_len) ||
  524. *(int *)prog_info.map_ids != saved_map_id,
  525. "get-prog-info(next_id->fd)",
  526. "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
  527. err, errno, info_len, sizeof(struct bpf_prog_info),
  528. memcmp(&prog_info, &prog_infos[i], info_len),
  529. *(int *)prog_info.map_ids, saved_map_id);
  530. close(prog_fd);
  531. }
  532. CHECK(nr_id_found != nr_iters,
  533. "check total prog id found by get_next_id",
  534. "nr_id_found %u(%u)\n",
  535. nr_id_found, nr_iters);
  536. /* Check bpf_map_get_next_id() */
  537. nr_id_found = 0;
  538. next_id = 0;
  539. while (!bpf_map_get_next_id(next_id, &next_id)) {
  540. struct bpf_map_info map_info = {};
  541. int map_fd;
  542. info_len = sizeof(map_info);
  543. map_fd = bpf_map_get_fd_by_id(next_id);
  544. if (map_fd < 0 && errno == ENOENT)
  545. /* The bpf_map is in the dead row */
  546. continue;
  547. if (CHECK(map_fd < 0, "get-map-fd(next_id)",
  548. "map_fd %d next_id %u errno %d\n",
  549. map_fd, next_id, errno))
  550. break;
  551. for (i = 0; i < nr_iters; i++)
  552. if (map_infos[i].id == next_id)
  553. break;
  554. if (i == nr_iters)
  555. continue;
  556. nr_id_found++;
  557. err = bpf_map_lookup_elem(map_fd, &array_key, &array_value);
  558. assert(!err);
  559. err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len);
  560. CHECK(err || info_len != sizeof(struct bpf_map_info) ||
  561. memcmp(&map_info, &map_infos[i], info_len) ||
  562. array_value != array_magic_value,
  563. "check get-map-info(next_id->fd)",
  564. "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
  565. err, errno, info_len, sizeof(struct bpf_map_info),
  566. memcmp(&map_info, &map_infos[i], info_len),
  567. array_value, array_magic_value);
  568. close(map_fd);
  569. }
  570. CHECK(nr_id_found != nr_iters,
  571. "check total map id found by get_next_id",
  572. "nr_id_found %u(%u)\n",
  573. nr_id_found, nr_iters);
  574. done:
  575. for (i = 0; i < nr_iters; i++)
  576. bpf_object__close(objs[i]);
  577. }
  578. static void test_pkt_md_access(void)
  579. {
  580. const char *file = "./test_pkt_md_access.o";
  581. struct bpf_object *obj;
  582. __u32 duration, retval;
  583. int err, prog_fd;
  584. err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
  585. if (err) {
  586. error_cnt++;
  587. return;
  588. }
  589. err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4),
  590. NULL, NULL, &retval, &duration);
  591. CHECK(err || retval, "",
  592. "err %d errno %d retval %d duration %d\n",
  593. err, errno, retval, duration);
  594. bpf_object__close(obj);
  595. }
  596. static void test_obj_name(void)
  597. {
  598. struct {
  599. const char *name;
  600. int success;
  601. int expected_errno;
  602. } tests[] = {
  603. { "", 1, 0 },
  604. { "_123456789ABCDE", 1, 0 },
  605. { "_123456789ABCDEF", 0, EINVAL },
  606. { "_123456789ABCD\n", 0, EINVAL },
  607. };
  608. struct bpf_insn prog[] = {
  609. BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0),
  610. BPF_EXIT_INSN(),
  611. };
  612. __u32 duration = 0;
  613. int i;
  614. for (i = 0; i < sizeof(tests) / sizeof(tests[0]); i++) {
  615. size_t name_len = strlen(tests[i].name) + 1;
  616. union bpf_attr attr;
  617. size_t ncopy;
  618. int fd;
  619. /* test different attr.prog_name during BPF_PROG_LOAD */
  620. ncopy = name_len < sizeof(attr.prog_name) ?
  621. name_len : sizeof(attr.prog_name);
  622. bzero(&attr, sizeof(attr));
  623. attr.prog_type = BPF_PROG_TYPE_SCHED_CLS;
  624. attr.insn_cnt = 2;
  625. attr.insns = ptr_to_u64(prog);
  626. attr.license = ptr_to_u64("");
  627. memcpy(attr.prog_name, tests[i].name, ncopy);
  628. fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
  629. CHECK((tests[i].success && fd < 0) ||
  630. (!tests[i].success && fd != -1) ||
  631. (!tests[i].success && errno != tests[i].expected_errno),
  632. "check-bpf-prog-name",
  633. "fd %d(%d) errno %d(%d)\n",
  634. fd, tests[i].success, errno, tests[i].expected_errno);
  635. if (fd != -1)
  636. close(fd);
  637. /* test different attr.map_name during BPF_MAP_CREATE */
  638. ncopy = name_len < sizeof(attr.map_name) ?
  639. name_len : sizeof(attr.map_name);
  640. bzero(&attr, sizeof(attr));
  641. attr.map_type = BPF_MAP_TYPE_ARRAY;
  642. attr.key_size = 4;
  643. attr.value_size = 4;
  644. attr.max_entries = 1;
  645. attr.map_flags = 0;
  646. memcpy(attr.map_name, tests[i].name, ncopy);
  647. fd = syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr));
  648. CHECK((tests[i].success && fd < 0) ||
  649. (!tests[i].success && fd != -1) ||
  650. (!tests[i].success && errno != tests[i].expected_errno),
  651. "check-bpf-map-name",
  652. "fd %d(%d) errno %d(%d)\n",
  653. fd, tests[i].success, errno, tests[i].expected_errno);
  654. if (fd != -1)
  655. close(fd);
  656. }
  657. }
  658. static void test_tp_attach_query(void)
  659. {
  660. const int num_progs = 3;
  661. int i, j, bytes, efd, err, prog_fd[num_progs], pmu_fd[num_progs];
  662. __u32 duration = 0, info_len, saved_prog_ids[num_progs];
  663. const char *file = "./test_tracepoint.o";
  664. struct perf_event_query_bpf *query;
  665. struct perf_event_attr attr = {};
  666. struct bpf_object *obj[num_progs];
  667. struct bpf_prog_info prog_info;
  668. char buf[256];
  669. snprintf(buf, sizeof(buf),
  670. "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
  671. efd = open(buf, O_RDONLY, 0);
  672. if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
  673. return;
  674. bytes = read(efd, buf, sizeof(buf));
  675. close(efd);
  676. if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
  677. "read", "bytes %d errno %d\n", bytes, errno))
  678. return;
  679. attr.config = strtol(buf, NULL, 0);
  680. attr.type = PERF_TYPE_TRACEPOINT;
  681. attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
  682. attr.sample_period = 1;
  683. attr.wakeup_events = 1;
  684. query = malloc(sizeof(*query) + sizeof(__u32) * num_progs);
  685. for (i = 0; i < num_progs; i++) {
  686. err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj[i],
  687. &prog_fd[i]);
  688. if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
  689. goto cleanup1;
  690. bzero(&prog_info, sizeof(prog_info));
  691. prog_info.jited_prog_len = 0;
  692. prog_info.xlated_prog_len = 0;
  693. prog_info.nr_map_ids = 0;
  694. info_len = sizeof(prog_info);
  695. err = bpf_obj_get_info_by_fd(prog_fd[i], &prog_info, &info_len);
  696. if (CHECK(err, "bpf_obj_get_info_by_fd", "err %d errno %d\n",
  697. err, errno))
  698. goto cleanup1;
  699. saved_prog_ids[i] = prog_info.id;
  700. pmu_fd[i] = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
  701. 0 /* cpu 0 */, -1 /* group id */,
  702. 0 /* flags */);
  703. if (CHECK(pmu_fd[i] < 0, "perf_event_open", "err %d errno %d\n",
  704. pmu_fd[i], errno))
  705. goto cleanup2;
  706. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE, 0);
  707. if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
  708. err, errno))
  709. goto cleanup3;
  710. if (i == 0) {
  711. /* check NULL prog array query */
  712. query->ids_len = num_progs;
  713. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
  714. if (CHECK(err || query->prog_cnt != 0,
  715. "perf_event_ioc_query_bpf",
  716. "err %d errno %d query->prog_cnt %u\n",
  717. err, errno, query->prog_cnt))
  718. goto cleanup3;
  719. }
  720. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[i]);
  721. if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
  722. err, errno))
  723. goto cleanup3;
  724. if (i == 1) {
  725. /* try to get # of programs only */
  726. query->ids_len = 0;
  727. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
  728. if (CHECK(err || query->prog_cnt != 2,
  729. "perf_event_ioc_query_bpf",
  730. "err %d errno %d query->prog_cnt %u\n",
  731. err, errno, query->prog_cnt))
  732. goto cleanup3;
  733. /* try a few negative tests */
  734. /* invalid query pointer */
  735. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF,
  736. (struct perf_event_query_bpf *)0x1);
  737. if (CHECK(!err || errno != EFAULT,
  738. "perf_event_ioc_query_bpf",
  739. "err %d errno %d\n", err, errno))
  740. goto cleanup3;
  741. /* no enough space */
  742. query->ids_len = 1;
  743. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
  744. if (CHECK(!err || errno != ENOSPC || query->prog_cnt != 2,
  745. "perf_event_ioc_query_bpf",
  746. "err %d errno %d query->prog_cnt %u\n",
  747. err, errno, query->prog_cnt))
  748. goto cleanup3;
  749. }
  750. query->ids_len = num_progs;
  751. err = ioctl(pmu_fd[i], PERF_EVENT_IOC_QUERY_BPF, query);
  752. if (CHECK(err || query->prog_cnt != (i + 1),
  753. "perf_event_ioc_query_bpf",
  754. "err %d errno %d query->prog_cnt %u\n",
  755. err, errno, query->prog_cnt))
  756. goto cleanup3;
  757. for (j = 0; j < i + 1; j++)
  758. if (CHECK(saved_prog_ids[j] != query->ids[j],
  759. "perf_event_ioc_query_bpf",
  760. "#%d saved_prog_id %x query prog_id %x\n",
  761. j, saved_prog_ids[j], query->ids[j]))
  762. goto cleanup3;
  763. }
  764. i = num_progs - 1;
  765. for (; i >= 0; i--) {
  766. cleanup3:
  767. ioctl(pmu_fd[i], PERF_EVENT_IOC_DISABLE);
  768. cleanup2:
  769. close(pmu_fd[i]);
  770. cleanup1:
  771. bpf_object__close(obj[i]);
  772. }
  773. free(query);
  774. }
  775. static int compare_map_keys(int map1_fd, int map2_fd)
  776. {
  777. __u32 key, next_key;
  778. char val_buf[PERF_MAX_STACK_DEPTH *
  779. sizeof(struct bpf_stack_build_id)];
  780. int err;
  781. err = bpf_map_get_next_key(map1_fd, NULL, &key);
  782. if (err)
  783. return err;
  784. err = bpf_map_lookup_elem(map2_fd, &key, val_buf);
  785. if (err)
  786. return err;
  787. while (bpf_map_get_next_key(map1_fd, &key, &next_key) == 0) {
  788. err = bpf_map_lookup_elem(map2_fd, &next_key, val_buf);
  789. if (err)
  790. return err;
  791. key = next_key;
  792. }
  793. if (errno != ENOENT)
  794. return -1;
  795. return 0;
  796. }
  797. static int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len)
  798. {
  799. __u32 key, next_key, *cur_key_p, *next_key_p;
  800. char *val_buf1, *val_buf2;
  801. int i, err = 0;
  802. val_buf1 = malloc(stack_trace_len);
  803. val_buf2 = malloc(stack_trace_len);
  804. cur_key_p = NULL;
  805. next_key_p = &key;
  806. while (bpf_map_get_next_key(smap_fd, cur_key_p, next_key_p) == 0) {
  807. err = bpf_map_lookup_elem(smap_fd, next_key_p, val_buf1);
  808. if (err)
  809. goto out;
  810. err = bpf_map_lookup_elem(amap_fd, next_key_p, val_buf2);
  811. if (err)
  812. goto out;
  813. for (i = 0; i < stack_trace_len; i++) {
  814. if (val_buf1[i] != val_buf2[i]) {
  815. err = -1;
  816. goto out;
  817. }
  818. }
  819. key = *next_key_p;
  820. cur_key_p = &key;
  821. next_key_p = &next_key;
  822. }
  823. if (errno != ENOENT)
  824. err = -1;
  825. out:
  826. free(val_buf1);
  827. free(val_buf2);
  828. return err;
  829. }
  830. static void test_stacktrace_map()
  831. {
  832. int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
  833. const char *file = "./test_stacktrace_map.o";
  834. int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
  835. struct perf_event_attr attr = {};
  836. __u32 key, val, duration = 0;
  837. struct bpf_object *obj;
  838. char buf[256];
  839. err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
  840. if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
  841. return;
  842. /* Get the ID for the sched/sched_switch tracepoint */
  843. snprintf(buf, sizeof(buf),
  844. "/sys/kernel/debug/tracing/events/sched/sched_switch/id");
  845. efd = open(buf, O_RDONLY, 0);
  846. if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
  847. goto close_prog;
  848. bytes = read(efd, buf, sizeof(buf));
  849. close(efd);
  850. if (bytes <= 0 || bytes >= sizeof(buf))
  851. goto close_prog;
  852. /* Open the perf event and attach bpf progrram */
  853. attr.config = strtol(buf, NULL, 0);
  854. attr.type = PERF_TYPE_TRACEPOINT;
  855. attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
  856. attr.sample_period = 1;
  857. attr.wakeup_events = 1;
  858. pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
  859. 0 /* cpu 0 */, -1 /* group id */,
  860. 0 /* flags */);
  861. if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
  862. pmu_fd, errno))
  863. goto close_prog;
  864. err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
  865. if (err)
  866. goto disable_pmu;
  867. err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
  868. if (err)
  869. goto disable_pmu;
  870. /* find map fds */
  871. control_map_fd = bpf_find_map(__func__, obj, "control_map");
  872. if (control_map_fd < 0)
  873. goto disable_pmu;
  874. stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
  875. if (stackid_hmap_fd < 0)
  876. goto disable_pmu;
  877. stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
  878. if (stackmap_fd < 0)
  879. goto disable_pmu;
  880. stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
  881. if (stack_amap_fd < 0)
  882. goto disable_pmu;
  883. /* give some time for bpf program run */
  884. sleep(1);
  885. /* disable stack trace collection */
  886. key = 0;
  887. val = 1;
  888. bpf_map_update_elem(control_map_fd, &key, &val, 0);
  889. /* for every element in stackid_hmap, we can find a corresponding one
  890. * in stackmap, and vise versa.
  891. */
  892. err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
  893. if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
  894. "err %d errno %d\n", err, errno))
  895. goto disable_pmu_noerr;
  896. err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
  897. if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
  898. "err %d errno %d\n", err, errno))
  899. goto disable_pmu_noerr;
  900. stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64);
  901. err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
  902. if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
  903. "err %d errno %d\n", err, errno))
  904. goto disable_pmu_noerr;
  905. goto disable_pmu_noerr;
  906. disable_pmu:
  907. error_cnt++;
  908. disable_pmu_noerr:
  909. ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
  910. close(pmu_fd);
  911. close_prog:
  912. bpf_object__close(obj);
  913. }
  914. static void test_stacktrace_map_raw_tp()
  915. {
  916. int control_map_fd, stackid_hmap_fd, stackmap_fd;
  917. const char *file = "./test_stacktrace_map.o";
  918. int efd, err, prog_fd;
  919. __u32 key, val, duration = 0;
  920. struct bpf_object *obj;
  921. err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
  922. if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
  923. return;
  924. efd = bpf_raw_tracepoint_open("sched_switch", prog_fd);
  925. if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
  926. goto close_prog;
  927. /* find map fds */
  928. control_map_fd = bpf_find_map(__func__, obj, "control_map");
  929. if (control_map_fd < 0)
  930. goto close_prog;
  931. stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
  932. if (stackid_hmap_fd < 0)
  933. goto close_prog;
  934. stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
  935. if (stackmap_fd < 0)
  936. goto close_prog;
  937. /* give some time for bpf program run */
  938. sleep(1);
  939. /* disable stack trace collection */
  940. key = 0;
  941. val = 1;
  942. bpf_map_update_elem(control_map_fd, &key, &val, 0);
  943. /* for every element in stackid_hmap, we can find a corresponding one
  944. * in stackmap, and vise versa.
  945. */
  946. err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
  947. if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
  948. "err %d errno %d\n", err, errno))
  949. goto close_prog;
  950. err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
  951. if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
  952. "err %d errno %d\n", err, errno))
  953. goto close_prog;
  954. goto close_prog_noerr;
  955. close_prog:
  956. error_cnt++;
  957. close_prog_noerr:
  958. bpf_object__close(obj);
  959. }
  960. static int extract_build_id(char *build_id, size_t size)
  961. {
  962. FILE *fp;
  963. char *line = NULL;
  964. size_t len = 0;
  965. fp = popen("readelf -n ./urandom_read | grep 'Build ID'", "r");
  966. if (fp == NULL)
  967. return -1;
  968. if (getline(&line, &len, fp) == -1)
  969. goto err;
  970. fclose(fp);
  971. if (len > size)
  972. len = size;
  973. memcpy(build_id, line, len);
  974. build_id[len] = '\0';
  975. free(line);
  976. return 0;
  977. err:
  978. fclose(fp);
  979. return -1;
  980. }
  981. static void test_stacktrace_build_id(void)
  982. {
  983. int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
  984. const char *file = "./test_stacktrace_build_id.o";
  985. int bytes, efd, err, pmu_fd, prog_fd, stack_trace_len;
  986. struct perf_event_attr attr = {};
  987. __u32 key, previous_key, val, duration = 0;
  988. struct bpf_object *obj;
  989. char buf[256];
  990. int i, j;
  991. struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
  992. int build_id_matches = 0;
  993. int retry = 1;
  994. retry:
  995. err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
  996. if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
  997. goto out;
  998. /* Get the ID for the sched/sched_switch tracepoint */
  999. snprintf(buf, sizeof(buf),
  1000. "/sys/kernel/debug/tracing/events/random/urandom_read/id");
  1001. efd = open(buf, O_RDONLY, 0);
  1002. if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
  1003. goto close_prog;
  1004. bytes = read(efd, buf, sizeof(buf));
  1005. close(efd);
  1006. if (CHECK(bytes <= 0 || bytes >= sizeof(buf),
  1007. "read", "bytes %d errno %d\n", bytes, errno))
  1008. goto close_prog;
  1009. /* Open the perf event and attach bpf progrram */
  1010. attr.config = strtol(buf, NULL, 0);
  1011. attr.type = PERF_TYPE_TRACEPOINT;
  1012. attr.sample_type = PERF_SAMPLE_RAW | PERF_SAMPLE_CALLCHAIN;
  1013. attr.sample_period = 1;
  1014. attr.wakeup_events = 1;
  1015. pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
  1016. 0 /* cpu 0 */, -1 /* group id */,
  1017. 0 /* flags */);
  1018. if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n",
  1019. pmu_fd, errno))
  1020. goto close_prog;
  1021. err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
  1022. if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
  1023. err, errno))
  1024. goto close_pmu;
  1025. err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
  1026. if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
  1027. err, errno))
  1028. goto disable_pmu;
  1029. /* find map fds */
  1030. control_map_fd = bpf_find_map(__func__, obj, "control_map");
  1031. if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
  1032. "err %d errno %d\n", err, errno))
  1033. goto disable_pmu;
  1034. stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
  1035. if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
  1036. "err %d errno %d\n", err, errno))
  1037. goto disable_pmu;
  1038. stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
  1039. if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
  1040. err, errno))
  1041. goto disable_pmu;
  1042. stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
  1043. if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
  1044. "err %d errno %d\n", err, errno))
  1045. goto disable_pmu;
  1046. assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
  1047. == 0);
  1048. assert(system("./urandom_read") == 0);
  1049. /* disable stack trace collection */
  1050. key = 0;
  1051. val = 1;
  1052. bpf_map_update_elem(control_map_fd, &key, &val, 0);
  1053. /* for every element in stackid_hmap, we can find a corresponding one
  1054. * in stackmap, and vise versa.
  1055. */
  1056. err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
  1057. if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
  1058. "err %d errno %d\n", err, errno))
  1059. goto disable_pmu;
  1060. err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
  1061. if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
  1062. "err %d errno %d\n", err, errno))
  1063. goto disable_pmu;
  1064. err = extract_build_id(buf, 256);
  1065. if (CHECK(err, "get build_id with readelf",
  1066. "err %d errno %d\n", err, errno))
  1067. goto disable_pmu;
  1068. err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
  1069. if (CHECK(err, "get_next_key from stackmap",
  1070. "err %d, errno %d\n", err, errno))
  1071. goto disable_pmu;
  1072. do {
  1073. char build_id[64];
  1074. err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
  1075. if (CHECK(err, "lookup_elem from stackmap",
  1076. "err %d, errno %d\n", err, errno))
  1077. goto disable_pmu;
  1078. for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
  1079. if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
  1080. id_offs[i].offset != 0) {
  1081. for (j = 0; j < 20; ++j)
  1082. sprintf(build_id + 2 * j, "%02x",
  1083. id_offs[i].build_id[j] & 0xff);
  1084. if (strstr(buf, build_id) != NULL)
  1085. build_id_matches = 1;
  1086. }
  1087. previous_key = key;
  1088. } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
  1089. /* stack_map_get_build_id_offset() is racy and sometimes can return
  1090. * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
  1091. * try it one more time.
  1092. */
  1093. if (build_id_matches < 1 && retry--) {
  1094. ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
  1095. close(pmu_fd);
  1096. bpf_object__close(obj);
  1097. printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
  1098. __func__);
  1099. goto retry;
  1100. }
  1101. if (CHECK(build_id_matches < 1, "build id match",
  1102. "Didn't find expected build ID from the map\n"))
  1103. goto disable_pmu;
  1104. stack_trace_len = PERF_MAX_STACK_DEPTH
  1105. * sizeof(struct bpf_stack_build_id);
  1106. err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len);
  1107. CHECK(err, "compare_stack_ips stackmap vs. stack_amap",
  1108. "err %d errno %d\n", err, errno);
  1109. disable_pmu:
  1110. ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
  1111. close_pmu:
  1112. close(pmu_fd);
  1113. close_prog:
  1114. bpf_object__close(obj);
  1115. out:
  1116. return;
  1117. }
  1118. static void test_stacktrace_build_id_nmi(void)
  1119. {
  1120. int control_map_fd, stackid_hmap_fd, stackmap_fd, stack_amap_fd;
  1121. const char *file = "./test_stacktrace_build_id.o";
  1122. int err, pmu_fd, prog_fd;
  1123. struct perf_event_attr attr = {
  1124. .sample_freq = 5000,
  1125. .freq = 1,
  1126. .type = PERF_TYPE_HARDWARE,
  1127. .config = PERF_COUNT_HW_CPU_CYCLES,
  1128. };
  1129. __u32 key, previous_key, val, duration = 0;
  1130. struct bpf_object *obj;
  1131. char buf[256];
  1132. int i, j;
  1133. struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
  1134. int build_id_matches = 0;
  1135. int retry = 1;
  1136. retry:
  1137. err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
  1138. if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
  1139. return;
  1140. pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
  1141. 0 /* cpu 0 */, -1 /* group id */,
  1142. 0 /* flags */);
  1143. if (CHECK(pmu_fd < 0, "perf_event_open",
  1144. "err %d errno %d. Does the test host support PERF_COUNT_HW_CPU_CYCLES?\n",
  1145. pmu_fd, errno))
  1146. goto close_prog;
  1147. err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
  1148. if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n",
  1149. err, errno))
  1150. goto close_pmu;
  1151. err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
  1152. if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n",
  1153. err, errno))
  1154. goto disable_pmu;
  1155. /* find map fds */
  1156. control_map_fd = bpf_find_map(__func__, obj, "control_map");
  1157. if (CHECK(control_map_fd < 0, "bpf_find_map control_map",
  1158. "err %d errno %d\n", err, errno))
  1159. goto disable_pmu;
  1160. stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap");
  1161. if (CHECK(stackid_hmap_fd < 0, "bpf_find_map stackid_hmap",
  1162. "err %d errno %d\n", err, errno))
  1163. goto disable_pmu;
  1164. stackmap_fd = bpf_find_map(__func__, obj, "stackmap");
  1165. if (CHECK(stackmap_fd < 0, "bpf_find_map stackmap", "err %d errno %d\n",
  1166. err, errno))
  1167. goto disable_pmu;
  1168. stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap");
  1169. if (CHECK(stack_amap_fd < 0, "bpf_find_map stack_amap",
  1170. "err %d errno %d\n", err, errno))
  1171. goto disable_pmu;
  1172. assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null")
  1173. == 0);
  1174. assert(system("taskset 0x1 ./urandom_read 100000") == 0);
  1175. /* disable stack trace collection */
  1176. key = 0;
  1177. val = 1;
  1178. bpf_map_update_elem(control_map_fd, &key, &val, 0);
  1179. /* for every element in stackid_hmap, we can find a corresponding one
  1180. * in stackmap, and vise versa.
  1181. */
  1182. err = compare_map_keys(stackid_hmap_fd, stackmap_fd);
  1183. if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap",
  1184. "err %d errno %d\n", err, errno))
  1185. goto disable_pmu;
  1186. err = compare_map_keys(stackmap_fd, stackid_hmap_fd);
  1187. if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap",
  1188. "err %d errno %d\n", err, errno))
  1189. goto disable_pmu;
  1190. err = extract_build_id(buf, 256);
  1191. if (CHECK(err, "get build_id with readelf",
  1192. "err %d errno %d\n", err, errno))
  1193. goto disable_pmu;
  1194. err = bpf_map_get_next_key(stackmap_fd, NULL, &key);
  1195. if (CHECK(err, "get_next_key from stackmap",
  1196. "err %d, errno %d\n", err, errno))
  1197. goto disable_pmu;
  1198. do {
  1199. char build_id[64];
  1200. err = bpf_map_lookup_elem(stackmap_fd, &key, id_offs);
  1201. if (CHECK(err, "lookup_elem from stackmap",
  1202. "err %d, errno %d\n", err, errno))
  1203. goto disable_pmu;
  1204. for (i = 0; i < PERF_MAX_STACK_DEPTH; ++i)
  1205. if (id_offs[i].status == BPF_STACK_BUILD_ID_VALID &&
  1206. id_offs[i].offset != 0) {
  1207. for (j = 0; j < 20; ++j)
  1208. sprintf(build_id + 2 * j, "%02x",
  1209. id_offs[i].build_id[j] & 0xff);
  1210. if (strstr(buf, build_id) != NULL)
  1211. build_id_matches = 1;
  1212. }
  1213. previous_key = key;
  1214. } while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
  1215. /* stack_map_get_build_id_offset() is racy and sometimes can return
  1216. * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
  1217. * try it one more time.
  1218. */
  1219. if (build_id_matches < 1 && retry--) {
  1220. ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
  1221. close(pmu_fd);
  1222. bpf_object__close(obj);
  1223. printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
  1224. __func__);
  1225. goto retry;
  1226. }
  1227. if (CHECK(build_id_matches < 1, "build id match",
  1228. "Didn't find expected build ID from the map\n"))
  1229. goto disable_pmu;
  1230. /*
  1231. * We intentionally skip compare_stack_ips(). This is because we
  1232. * only support one in_nmi() ips-to-build_id translation per cpu
  1233. * at any time, thus stack_amap here will always fallback to
  1234. * BPF_STACK_BUILD_ID_IP;
  1235. */
  1236. disable_pmu:
  1237. ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
  1238. close_pmu:
  1239. close(pmu_fd);
  1240. close_prog:
  1241. bpf_object__close(obj);
  1242. }
  1243. #define MAX_CNT_RAWTP 10ull
  1244. #define MAX_STACK_RAWTP 100
  1245. struct get_stack_trace_t {
  1246. int pid;
  1247. int kern_stack_size;
  1248. int user_stack_size;
  1249. int user_stack_buildid_size;
  1250. __u64 kern_stack[MAX_STACK_RAWTP];
  1251. __u64 user_stack[MAX_STACK_RAWTP];
  1252. struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
  1253. };
  1254. static int get_stack_print_output(void *data, int size)
  1255. {
  1256. bool good_kern_stack = false, good_user_stack = false;
  1257. const char *nonjit_func = "___bpf_prog_run";
  1258. struct get_stack_trace_t *e = data;
  1259. int i, num_stack;
  1260. static __u64 cnt;
  1261. struct ksym *ks;
  1262. cnt++;
  1263. if (size < sizeof(struct get_stack_trace_t)) {
  1264. __u64 *raw_data = data;
  1265. bool found = false;
  1266. num_stack = size / sizeof(__u64);
  1267. /* If jit is enabled, we do not have a good way to
  1268. * verify the sanity of the kernel stack. So we
  1269. * just assume it is good if the stack is not empty.
  1270. * This could be improved in the future.
  1271. */
  1272. if (jit_enabled) {
  1273. found = num_stack > 0;
  1274. } else {
  1275. for (i = 0; i < num_stack; i++) {
  1276. ks = ksym_search(raw_data[i]);
  1277. if (strcmp(ks->name, nonjit_func) == 0) {
  1278. found = true;
  1279. break;
  1280. }
  1281. }
  1282. }
  1283. if (found) {
  1284. good_kern_stack = true;
  1285. good_user_stack = true;
  1286. }
  1287. } else {
  1288. num_stack = e->kern_stack_size / sizeof(__u64);
  1289. if (jit_enabled) {
  1290. good_kern_stack = num_stack > 0;
  1291. } else {
  1292. for (i = 0; i < num_stack; i++) {
  1293. ks = ksym_search(e->kern_stack[i]);
  1294. if (strcmp(ks->name, nonjit_func) == 0) {
  1295. good_kern_stack = true;
  1296. break;
  1297. }
  1298. }
  1299. }
  1300. if (e->user_stack_size > 0 && e->user_stack_buildid_size > 0)
  1301. good_user_stack = true;
  1302. }
  1303. if (!good_kern_stack || !good_user_stack)
  1304. return LIBBPF_PERF_EVENT_ERROR;
  1305. if (cnt == MAX_CNT_RAWTP)
  1306. return LIBBPF_PERF_EVENT_DONE;
  1307. return LIBBPF_PERF_EVENT_CONT;
  1308. }
  1309. static void test_get_stack_raw_tp(void)
  1310. {
  1311. const char *file = "./test_get_stack_rawtp.o";
  1312. int i, efd, err, prog_fd, pmu_fd, perfmap_fd;
  1313. struct perf_event_attr attr = {};
  1314. struct timespec tv = {0, 10};
  1315. __u32 key = 0, duration = 0;
  1316. struct bpf_object *obj;
  1317. err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
  1318. if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
  1319. return;
  1320. efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
  1321. if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
  1322. goto close_prog;
  1323. perfmap_fd = bpf_find_map(__func__, obj, "perfmap");
  1324. if (CHECK(perfmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
  1325. perfmap_fd, errno))
  1326. goto close_prog;
  1327. err = load_kallsyms();
  1328. if (CHECK(err < 0, "load_kallsyms", "err %d errno %d\n", err, errno))
  1329. goto close_prog;
  1330. attr.sample_type = PERF_SAMPLE_RAW;
  1331. attr.type = PERF_TYPE_SOFTWARE;
  1332. attr.config = PERF_COUNT_SW_BPF_OUTPUT;
  1333. pmu_fd = syscall(__NR_perf_event_open, &attr, getpid()/*pid*/, -1/*cpu*/,
  1334. -1/*group_fd*/, 0);
  1335. if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
  1336. errno))
  1337. goto close_prog;
  1338. err = bpf_map_update_elem(perfmap_fd, &key, &pmu_fd, BPF_ANY);
  1339. if (CHECK(err < 0, "bpf_map_update_elem", "err %d errno %d\n", err,
  1340. errno))
  1341. goto close_prog;
  1342. err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
  1343. if (CHECK(err < 0, "ioctl PERF_EVENT_IOC_ENABLE", "err %d errno %d\n",
  1344. err, errno))
  1345. goto close_prog;
  1346. err = perf_event_mmap(pmu_fd);
  1347. if (CHECK(err < 0, "perf_event_mmap", "err %d errno %d\n", err, errno))
  1348. goto close_prog;
  1349. /* trigger some syscall action */
  1350. for (i = 0; i < MAX_CNT_RAWTP; i++)
  1351. nanosleep(&tv, NULL);
  1352. err = perf_event_poller(pmu_fd, get_stack_print_output);
  1353. if (CHECK(err < 0, "perf_event_poller", "err %d errno %d\n", err, errno))
  1354. goto close_prog;
  1355. goto close_prog_noerr;
  1356. close_prog:
  1357. error_cnt++;
  1358. close_prog_noerr:
  1359. bpf_object__close(obj);
  1360. }
  1361. static void test_task_fd_query_rawtp(void)
  1362. {
  1363. const char *file = "./test_get_stack_rawtp.o";
  1364. __u64 probe_offset, probe_addr;
  1365. __u32 len, prog_id, fd_type;
  1366. struct bpf_object *obj;
  1367. int efd, err, prog_fd;
  1368. __u32 duration = 0;
  1369. char buf[256];
  1370. err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
  1371. if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
  1372. return;
  1373. efd = bpf_raw_tracepoint_open("sys_enter", prog_fd);
  1374. if (CHECK(efd < 0, "raw_tp_open", "err %d errno %d\n", efd, errno))
  1375. goto close_prog;
  1376. /* query (getpid(), efd) */
  1377. len = sizeof(buf);
  1378. err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
  1379. &fd_type, &probe_offset, &probe_addr);
  1380. if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
  1381. errno))
  1382. goto close_prog;
  1383. err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
  1384. strcmp(buf, "sys_enter") == 0;
  1385. if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
  1386. fd_type, buf))
  1387. goto close_prog;
  1388. /* test zero len */
  1389. len = 0;
  1390. err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
  1391. &fd_type, &probe_offset, &probe_addr);
  1392. if (CHECK(err < 0, "bpf_task_fd_query (len = 0)", "err %d errno %d\n",
  1393. err, errno))
  1394. goto close_prog;
  1395. err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
  1396. len == strlen("sys_enter");
  1397. if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
  1398. goto close_prog;
  1399. /* test empty buffer */
  1400. len = sizeof(buf);
  1401. err = bpf_task_fd_query(getpid(), efd, 0, 0, &len, &prog_id,
  1402. &fd_type, &probe_offset, &probe_addr);
  1403. if (CHECK(err < 0, "bpf_task_fd_query (buf = 0)", "err %d errno %d\n",
  1404. err, errno))
  1405. goto close_prog;
  1406. err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
  1407. len == strlen("sys_enter");
  1408. if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
  1409. goto close_prog;
  1410. /* test smaller buffer */
  1411. len = 3;
  1412. err = bpf_task_fd_query(getpid(), efd, 0, buf, &len, &prog_id,
  1413. &fd_type, &probe_offset, &probe_addr);
  1414. if (CHECK(err >= 0 || errno != ENOSPC, "bpf_task_fd_query (len = 3)",
  1415. "err %d errno %d\n", err, errno))
  1416. goto close_prog;
  1417. err = fd_type == BPF_FD_TYPE_RAW_TRACEPOINT &&
  1418. len == strlen("sys_enter") &&
  1419. strcmp(buf, "sy") == 0;
  1420. if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len))
  1421. goto close_prog;
  1422. goto close_prog_noerr;
  1423. close_prog:
  1424. error_cnt++;
  1425. close_prog_noerr:
  1426. bpf_object__close(obj);
  1427. }
  1428. static void test_task_fd_query_tp_core(const char *probe_name,
  1429. const char *tp_name)
  1430. {
  1431. const char *file = "./test_tracepoint.o";
  1432. int err, bytes, efd, prog_fd, pmu_fd;
  1433. struct perf_event_attr attr = {};
  1434. __u64 probe_offset, probe_addr;
  1435. __u32 len, prog_id, fd_type;
  1436. struct bpf_object *obj;
  1437. __u32 duration = 0;
  1438. char buf[256];
  1439. err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
  1440. if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
  1441. goto close_prog;
  1442. snprintf(buf, sizeof(buf),
  1443. "/sys/kernel/debug/tracing/events/%s/id", probe_name);
  1444. efd = open(buf, O_RDONLY, 0);
  1445. if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
  1446. goto close_prog;
  1447. bytes = read(efd, buf, sizeof(buf));
  1448. close(efd);
  1449. if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
  1450. "bytes %d errno %d\n", bytes, errno))
  1451. goto close_prog;
  1452. attr.config = strtol(buf, NULL, 0);
  1453. attr.type = PERF_TYPE_TRACEPOINT;
  1454. attr.sample_type = PERF_SAMPLE_RAW;
  1455. attr.sample_period = 1;
  1456. attr.wakeup_events = 1;
  1457. pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
  1458. 0 /* cpu 0 */, -1 /* group id */,
  1459. 0 /* flags */);
  1460. if (CHECK(err, "perf_event_open", "err %d errno %d\n", err, errno))
  1461. goto close_pmu;
  1462. err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
  1463. if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
  1464. errno))
  1465. goto close_pmu;
  1466. err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
  1467. if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
  1468. errno))
  1469. goto close_pmu;
  1470. /* query (getpid(), pmu_fd) */
  1471. len = sizeof(buf);
  1472. err = bpf_task_fd_query(getpid(), pmu_fd, 0, buf, &len, &prog_id,
  1473. &fd_type, &probe_offset, &probe_addr);
  1474. if (CHECK(err < 0, "bpf_task_fd_query", "err %d errno %d\n", err,
  1475. errno))
  1476. goto close_pmu;
  1477. err = (fd_type == BPF_FD_TYPE_TRACEPOINT) && !strcmp(buf, tp_name);
  1478. if (CHECK(!err, "check_results", "fd_type %d tp_name %s\n",
  1479. fd_type, buf))
  1480. goto close_pmu;
  1481. close(pmu_fd);
  1482. goto close_prog_noerr;
  1483. close_pmu:
  1484. close(pmu_fd);
  1485. close_prog:
  1486. error_cnt++;
  1487. close_prog_noerr:
  1488. bpf_object__close(obj);
  1489. }
  1490. static void test_task_fd_query_tp(void)
  1491. {
  1492. test_task_fd_query_tp_core("sched/sched_switch",
  1493. "sched_switch");
  1494. test_task_fd_query_tp_core("syscalls/sys_enter_read",
  1495. "sys_enter_read");
  1496. }
  1497. int main(void)
  1498. {
  1499. jit_enabled = is_jit_enabled();
  1500. test_pkt_access();
  1501. test_xdp();
  1502. test_xdp_adjust_tail();
  1503. test_l4lb_all();
  1504. test_xdp_noinline();
  1505. test_tcp_estats();
  1506. test_bpf_obj_id();
  1507. test_pkt_md_access();
  1508. test_obj_name();
  1509. test_tp_attach_query();
  1510. test_stacktrace_map();
  1511. test_stacktrace_build_id();
  1512. test_stacktrace_build_id_nmi();
  1513. test_stacktrace_map_raw_tp();
  1514. test_get_stack_raw_tp();
  1515. test_task_fd_query_rawtp();
  1516. test_task_fd_query_tp();
  1517. printf("Summary: %d PASSED, %d FAILED\n", pass_cnt, error_cnt);
  1518. return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS;
  1519. }