bpf.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330
  1. // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
  2. /*
  3. * common eBPF ELF operations.
  4. *
  5. * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
  6. * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
  7. * Copyright (C) 2015 Huawei Inc.
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation;
  12. * version 2.1 of the License (not later!)
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with this program; if not, see <http://www.gnu.org/licenses>
  21. */
  22. #include <stdlib.h>
  23. #include <string.h>
  24. #include <memory.h>
  25. #include <unistd.h>
  26. #include <asm/unistd.h>
  27. #include <errno.h>
  28. #include <linux/bpf.h>
  29. #include <linux/filter.h>
  30. #include <linux/kernel.h>
  31. #include <limits.h>
  32. #include <sys/resource.h>
  33. #include "bpf.h"
  34. #include "libbpf.h"
  35. #include "libbpf_internal.h"
  36. /*
  37. * When building perf, unistd.h is overridden. __NR_bpf is
  38. * required to be defined explicitly.
  39. */
  40. #ifndef __NR_bpf
  41. # if defined(__i386__)
  42. # define __NR_bpf 357
  43. # elif defined(__x86_64__)
  44. # define __NR_bpf 321
  45. # elif defined(__aarch64__)
  46. # define __NR_bpf 280
  47. # elif defined(__sparc__)
  48. # define __NR_bpf 349
  49. # elif defined(__s390__)
  50. # define __NR_bpf 351
  51. # elif defined(__arc__)
  52. # define __NR_bpf 280
  53. # elif defined(__mips__) && defined(_ABIO32)
  54. # define __NR_bpf 4355
  55. # elif defined(__mips__) && defined(_ABIN32)
  56. # define __NR_bpf 6319
  57. # elif defined(__mips__) && defined(_ABI64)
  58. # define __NR_bpf 5315
  59. # else
  60. # error __NR_bpf not defined. libbpf does not support your arch.
  61. # endif
  62. #endif
  63. static inline __u64 ptr_to_u64(const void *ptr)
  64. {
  65. return (__u64) (unsigned long) ptr;
  66. }
  67. static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
  68. unsigned int size)
  69. {
  70. return syscall(__NR_bpf, cmd, attr, size);
  71. }
  72. static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr,
  73. unsigned int size)
  74. {
  75. int fd;
  76. fd = sys_bpf(cmd, attr, size);
  77. return ensure_good_fd(fd);
  78. }
  79. int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts)
  80. {
  81. int fd;
  82. do {
  83. fd = sys_bpf_fd(BPF_PROG_LOAD, attr, size);
  84. } while (fd < 0 && errno == EAGAIN && --attempts > 0);
  85. return fd;
  86. }
  87. /* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to
  88. * memcg-based memory accounting for BPF maps and progs. This was done in [0].
  89. * We use the support for bpf_ktime_get_coarse_ns() helper, which was added in
  90. * the same 5.11 Linux release ([1]), to detect memcg-based accounting for BPF.
  91. *
  92. * [0] https://lore.kernel.org/bpf/20201201215900.3569844-1-guro@fb.com/
  93. * [1] d05512618056 ("bpf: Add bpf_ktime_get_coarse_ns helper")
  94. */
  95. int probe_memcg_account(int token_fd)
  96. {
  97. const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
  98. struct bpf_insn insns[] = {
  99. BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns),
  100. BPF_EXIT_INSN(),
  101. };
  102. size_t insn_cnt = ARRAY_SIZE(insns);
  103. union bpf_attr attr;
  104. int prog_fd;
  105. /* attempt loading freplace trying to use custom BTF */
  106. memset(&attr, 0, attr_sz);
  107. attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
  108. attr.insns = ptr_to_u64(insns);
  109. attr.insn_cnt = insn_cnt;
  110. attr.license = ptr_to_u64("GPL");
  111. attr.prog_token_fd = token_fd;
  112. if (token_fd)
  113. attr.prog_flags |= BPF_F_TOKEN_FD;
  114. prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, attr_sz);
  115. if (prog_fd >= 0) {
  116. close(prog_fd);
  117. return 1;
  118. }
  119. return 0;
  120. }
  121. static bool memlock_bumped;
  122. static rlim_t memlock_rlim = RLIM_INFINITY;
  123. int libbpf_set_memlock_rlim(size_t memlock_bytes)
  124. {
  125. if (memlock_bumped)
  126. return libbpf_err(-EBUSY);
  127. memlock_rlim = memlock_bytes;
  128. return 0;
  129. }
  130. int bump_rlimit_memlock(void)
  131. {
  132. struct rlimit rlim;
  133. /* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */
  134. if (memlock_bumped || feat_supported(NULL, FEAT_MEMCG_ACCOUNT))
  135. return 0;
  136. memlock_bumped = true;
  137. /* zero memlock_rlim_max disables auto-bumping RLIMIT_MEMLOCK */
  138. if (memlock_rlim == 0)
  139. return 0;
  140. rlim.rlim_cur = rlim.rlim_max = memlock_rlim;
  141. if (setrlimit(RLIMIT_MEMLOCK, &rlim))
  142. return -errno;
  143. return 0;
  144. }
  145. int bpf_map_create(enum bpf_map_type map_type,
  146. const char *map_name,
  147. __u32 key_size,
  148. __u32 value_size,
  149. __u32 max_entries,
  150. const struct bpf_map_create_opts *opts)
  151. {
  152. const size_t attr_sz = offsetofend(union bpf_attr, map_token_fd);
  153. union bpf_attr attr;
  154. int fd;
  155. bump_rlimit_memlock();
  156. memset(&attr, 0, attr_sz);
  157. if (!OPTS_VALID(opts, bpf_map_create_opts))
  158. return libbpf_err(-EINVAL);
  159. attr.map_type = map_type;
  160. if (map_name && feat_supported(NULL, FEAT_PROG_NAME))
  161. libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
  162. attr.key_size = key_size;
  163. attr.value_size = value_size;
  164. attr.max_entries = max_entries;
  165. attr.btf_fd = OPTS_GET(opts, btf_fd, 0);
  166. attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0);
  167. attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0);
  168. attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0);
  169. attr.value_type_btf_obj_fd = OPTS_GET(opts, value_type_btf_obj_fd, 0);
  170. attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0);
  171. attr.map_flags = OPTS_GET(opts, map_flags, 0);
  172. attr.map_extra = OPTS_GET(opts, map_extra, 0);
  173. attr.numa_node = OPTS_GET(opts, numa_node, 0);
  174. attr.map_ifindex = OPTS_GET(opts, map_ifindex, 0);
  175. attr.map_token_fd = OPTS_GET(opts, token_fd, 0);
  176. fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, attr_sz);
  177. return libbpf_err_errno(fd);
  178. }
  179. static void *
  180. alloc_zero_tailing_info(const void *orecord, __u32 cnt,
  181. __u32 actual_rec_size, __u32 expected_rec_size)
  182. {
  183. __u64 info_len = (__u64)actual_rec_size * cnt;
  184. void *info, *nrecord;
  185. int i;
  186. info = malloc(info_len);
  187. if (!info)
  188. return NULL;
  189. /* zero out bytes kernel does not understand */
  190. nrecord = info;
  191. for (i = 0; i < cnt; i++) {
  192. memcpy(nrecord, orecord, expected_rec_size);
  193. memset(nrecord + expected_rec_size, 0,
  194. actual_rec_size - expected_rec_size);
  195. orecord += actual_rec_size;
  196. nrecord += actual_rec_size;
  197. }
  198. return info;
  199. }
  200. int bpf_prog_load(enum bpf_prog_type prog_type,
  201. const char *prog_name, const char *license,
  202. const struct bpf_insn *insns, size_t insn_cnt,
  203. struct bpf_prog_load_opts *opts)
  204. {
  205. const size_t attr_sz = offsetofend(union bpf_attr, prog_token_fd);
  206. void *finfo = NULL, *linfo = NULL;
  207. const char *func_info, *line_info;
  208. __u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd;
  209. __u32 func_info_rec_size, line_info_rec_size;
  210. int fd, attempts;
  211. union bpf_attr attr;
  212. char *log_buf;
  213. bump_rlimit_memlock();
  214. if (!OPTS_VALID(opts, bpf_prog_load_opts))
  215. return libbpf_err(-EINVAL);
  216. attempts = OPTS_GET(opts, attempts, 0);
  217. if (attempts < 0)
  218. return libbpf_err(-EINVAL);
  219. if (attempts == 0)
  220. attempts = PROG_LOAD_ATTEMPTS;
  221. memset(&attr, 0, attr_sz);
  222. attr.prog_type = prog_type;
  223. attr.expected_attach_type = OPTS_GET(opts, expected_attach_type, 0);
  224. attr.prog_btf_fd = OPTS_GET(opts, prog_btf_fd, 0);
  225. attr.prog_flags = OPTS_GET(opts, prog_flags, 0);
  226. attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0);
  227. attr.kern_version = OPTS_GET(opts, kern_version, 0);
  228. attr.prog_token_fd = OPTS_GET(opts, token_fd, 0);
  229. if (prog_name && feat_supported(NULL, FEAT_PROG_NAME))
  230. libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
  231. attr.license = ptr_to_u64(license);
  232. if (insn_cnt > UINT_MAX)
  233. return libbpf_err(-E2BIG);
  234. attr.insns = ptr_to_u64(insns);
  235. attr.insn_cnt = (__u32)insn_cnt;
  236. attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
  237. attach_btf_obj_fd = OPTS_GET(opts, attach_btf_obj_fd, 0);
  238. if (attach_prog_fd && attach_btf_obj_fd)
  239. return libbpf_err(-EINVAL);
  240. attr.attach_btf_id = OPTS_GET(opts, attach_btf_id, 0);
  241. if (attach_prog_fd)
  242. attr.attach_prog_fd = attach_prog_fd;
  243. else
  244. attr.attach_btf_obj_fd = attach_btf_obj_fd;
  245. log_buf = OPTS_GET(opts, log_buf, NULL);
  246. log_size = OPTS_GET(opts, log_size, 0);
  247. log_level = OPTS_GET(opts, log_level, 0);
  248. if (!!log_buf != !!log_size)
  249. return libbpf_err(-EINVAL);
  250. func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0);
  251. func_info = OPTS_GET(opts, func_info, NULL);
  252. attr.func_info_rec_size = func_info_rec_size;
  253. attr.func_info = ptr_to_u64(func_info);
  254. attr.func_info_cnt = OPTS_GET(opts, func_info_cnt, 0);
  255. line_info_rec_size = OPTS_GET(opts, line_info_rec_size, 0);
  256. line_info = OPTS_GET(opts, line_info, NULL);
  257. attr.line_info_rec_size = line_info_rec_size;
  258. attr.line_info = ptr_to_u64(line_info);
  259. attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0);
  260. attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL));
  261. if (log_level) {
  262. attr.log_buf = ptr_to_u64(log_buf);
  263. attr.log_size = log_size;
  264. attr.log_level = log_level;
  265. }
  266. fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
  267. OPTS_SET(opts, log_true_size, attr.log_true_size);
  268. if (fd >= 0)
  269. return fd;
  270. /* After bpf_prog_load, the kernel may modify certain attributes
  271. * to give user space a hint how to deal with loading failure.
  272. * Check to see whether we can make some changes and load again.
  273. */
  274. while (errno == E2BIG && (!finfo || !linfo)) {
  275. if (!finfo && attr.func_info_cnt &&
  276. attr.func_info_rec_size < func_info_rec_size) {
  277. /* try with corrected func info records */
  278. finfo = alloc_zero_tailing_info(func_info,
  279. attr.func_info_cnt,
  280. func_info_rec_size,
  281. attr.func_info_rec_size);
  282. if (!finfo) {
  283. errno = E2BIG;
  284. goto done;
  285. }
  286. attr.func_info = ptr_to_u64(finfo);
  287. attr.func_info_rec_size = func_info_rec_size;
  288. } else if (!linfo && attr.line_info_cnt &&
  289. attr.line_info_rec_size < line_info_rec_size) {
  290. linfo = alloc_zero_tailing_info(line_info,
  291. attr.line_info_cnt,
  292. line_info_rec_size,
  293. attr.line_info_rec_size);
  294. if (!linfo) {
  295. errno = E2BIG;
  296. goto done;
  297. }
  298. attr.line_info = ptr_to_u64(linfo);
  299. attr.line_info_rec_size = line_info_rec_size;
  300. } else {
  301. break;
  302. }
  303. fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
  304. OPTS_SET(opts, log_true_size, attr.log_true_size);
  305. if (fd >= 0)
  306. goto done;
  307. }
  308. if (log_level == 0 && log_buf) {
  309. /* log_level == 0 with non-NULL log_buf requires retrying on error
  310. * with log_level == 1 and log_buf/log_buf_size set, to get details of
  311. * failure
  312. */
  313. attr.log_buf = ptr_to_u64(log_buf);
  314. attr.log_size = log_size;
  315. attr.log_level = 1;
  316. fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
  317. OPTS_SET(opts, log_true_size, attr.log_true_size);
  318. }
  319. done:
  320. /* free() doesn't affect errno, so we don't need to restore it */
  321. free(finfo);
  322. free(linfo);
  323. return libbpf_err_errno(fd);
  324. }
  325. int bpf_map_update_elem(int fd, const void *key, const void *value,
  326. __u64 flags)
  327. {
  328. const size_t attr_sz = offsetofend(union bpf_attr, flags);
  329. union bpf_attr attr;
  330. int ret;
  331. memset(&attr, 0, attr_sz);
  332. attr.map_fd = fd;
  333. attr.key = ptr_to_u64(key);
  334. attr.value = ptr_to_u64(value);
  335. attr.flags = flags;
  336. ret = sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, attr_sz);
  337. return libbpf_err_errno(ret);
  338. }
  339. int bpf_map_lookup_elem(int fd, const void *key, void *value)
  340. {
  341. const size_t attr_sz = offsetofend(union bpf_attr, flags);
  342. union bpf_attr attr;
  343. int ret;
  344. memset(&attr, 0, attr_sz);
  345. attr.map_fd = fd;
  346. attr.key = ptr_to_u64(key);
  347. attr.value = ptr_to_u64(value);
  348. ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, attr_sz);
  349. return libbpf_err_errno(ret);
  350. }
  351. int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags)
  352. {
  353. const size_t attr_sz = offsetofend(union bpf_attr, flags);
  354. union bpf_attr attr;
  355. int ret;
  356. memset(&attr, 0, attr_sz);
  357. attr.map_fd = fd;
  358. attr.key = ptr_to_u64(key);
  359. attr.value = ptr_to_u64(value);
  360. attr.flags = flags;
  361. ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, attr_sz);
  362. return libbpf_err_errno(ret);
  363. }
  364. int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value)
  365. {
  366. const size_t attr_sz = offsetofend(union bpf_attr, flags);
  367. union bpf_attr attr;
  368. int ret;
  369. memset(&attr, 0, attr_sz);
  370. attr.map_fd = fd;
  371. attr.key = ptr_to_u64(key);
  372. attr.value = ptr_to_u64(value);
  373. ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, attr_sz);
  374. return libbpf_err_errno(ret);
  375. }
  376. int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key, void *value, __u64 flags)
  377. {
  378. const size_t attr_sz = offsetofend(union bpf_attr, flags);
  379. union bpf_attr attr;
  380. int ret;
  381. memset(&attr, 0, attr_sz);
  382. attr.map_fd = fd;
  383. attr.key = ptr_to_u64(key);
  384. attr.value = ptr_to_u64(value);
  385. attr.flags = flags;
  386. ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, attr_sz);
  387. return libbpf_err_errno(ret);
  388. }
  389. int bpf_map_delete_elem(int fd, const void *key)
  390. {
  391. const size_t attr_sz = offsetofend(union bpf_attr, flags);
  392. union bpf_attr attr;
  393. int ret;
  394. memset(&attr, 0, attr_sz);
  395. attr.map_fd = fd;
  396. attr.key = ptr_to_u64(key);
  397. ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz);
  398. return libbpf_err_errno(ret);
  399. }
  400. int bpf_map_delete_elem_flags(int fd, const void *key, __u64 flags)
  401. {
  402. const size_t attr_sz = offsetofend(union bpf_attr, flags);
  403. union bpf_attr attr;
  404. int ret;
  405. memset(&attr, 0, attr_sz);
  406. attr.map_fd = fd;
  407. attr.key = ptr_to_u64(key);
  408. attr.flags = flags;
  409. ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz);
  410. return libbpf_err_errno(ret);
  411. }
  412. int bpf_map_get_next_key(int fd, const void *key, void *next_key)
  413. {
  414. const size_t attr_sz = offsetofend(union bpf_attr, next_key);
  415. union bpf_attr attr;
  416. int ret;
  417. memset(&attr, 0, attr_sz);
  418. attr.map_fd = fd;
  419. attr.key = ptr_to_u64(key);
  420. attr.next_key = ptr_to_u64(next_key);
  421. ret = sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, attr_sz);
  422. return libbpf_err_errno(ret);
  423. }
  424. int bpf_map_freeze(int fd)
  425. {
  426. const size_t attr_sz = offsetofend(union bpf_attr, map_fd);
  427. union bpf_attr attr;
  428. int ret;
  429. memset(&attr, 0, attr_sz);
  430. attr.map_fd = fd;
  431. ret = sys_bpf(BPF_MAP_FREEZE, &attr, attr_sz);
  432. return libbpf_err_errno(ret);
  433. }
  434. static int bpf_map_batch_common(int cmd, int fd, void *in_batch,
  435. void *out_batch, void *keys, void *values,
  436. __u32 *count,
  437. const struct bpf_map_batch_opts *opts)
  438. {
  439. const size_t attr_sz = offsetofend(union bpf_attr, batch);
  440. union bpf_attr attr;
  441. int ret;
  442. if (!OPTS_VALID(opts, bpf_map_batch_opts))
  443. return libbpf_err(-EINVAL);
  444. memset(&attr, 0, attr_sz);
  445. attr.batch.map_fd = fd;
  446. attr.batch.in_batch = ptr_to_u64(in_batch);
  447. attr.batch.out_batch = ptr_to_u64(out_batch);
  448. attr.batch.keys = ptr_to_u64(keys);
  449. attr.batch.values = ptr_to_u64(values);
  450. attr.batch.count = *count;
  451. attr.batch.elem_flags = OPTS_GET(opts, elem_flags, 0);
  452. attr.batch.flags = OPTS_GET(opts, flags, 0);
  453. ret = sys_bpf(cmd, &attr, attr_sz);
  454. *count = attr.batch.count;
  455. return libbpf_err_errno(ret);
  456. }
  457. int bpf_map_delete_batch(int fd, const void *keys, __u32 *count,
  458. const struct bpf_map_batch_opts *opts)
  459. {
  460. return bpf_map_batch_common(BPF_MAP_DELETE_BATCH, fd, NULL,
  461. NULL, (void *)keys, NULL, count, opts);
  462. }
  463. int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, void *keys,
  464. void *values, __u32 *count,
  465. const struct bpf_map_batch_opts *opts)
  466. {
  467. return bpf_map_batch_common(BPF_MAP_LOOKUP_BATCH, fd, in_batch,
  468. out_batch, keys, values, count, opts);
  469. }
  470. int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, void *out_batch,
  471. void *keys, void *values, __u32 *count,
  472. const struct bpf_map_batch_opts *opts)
  473. {
  474. return bpf_map_batch_common(BPF_MAP_LOOKUP_AND_DELETE_BATCH,
  475. fd, in_batch, out_batch, keys, values,
  476. count, opts);
  477. }
  478. int bpf_map_update_batch(int fd, const void *keys, const void *values, __u32 *count,
  479. const struct bpf_map_batch_opts *opts)
  480. {
  481. return bpf_map_batch_common(BPF_MAP_UPDATE_BATCH, fd, NULL, NULL,
  482. (void *)keys, (void *)values, count, opts);
  483. }
  484. int bpf_obj_pin_opts(int fd, const char *pathname, const struct bpf_obj_pin_opts *opts)
  485. {
  486. const size_t attr_sz = offsetofend(union bpf_attr, path_fd);
  487. union bpf_attr attr;
  488. int ret;
  489. if (!OPTS_VALID(opts, bpf_obj_pin_opts))
  490. return libbpf_err(-EINVAL);
  491. memset(&attr, 0, attr_sz);
  492. attr.path_fd = OPTS_GET(opts, path_fd, 0);
  493. attr.pathname = ptr_to_u64((void *)pathname);
  494. attr.file_flags = OPTS_GET(opts, file_flags, 0);
  495. attr.bpf_fd = fd;
  496. ret = sys_bpf(BPF_OBJ_PIN, &attr, attr_sz);
  497. return libbpf_err_errno(ret);
  498. }
  499. int bpf_obj_pin(int fd, const char *pathname)
  500. {
  501. return bpf_obj_pin_opts(fd, pathname, NULL);
  502. }
  503. int bpf_obj_get(const char *pathname)
  504. {
  505. return bpf_obj_get_opts(pathname, NULL);
  506. }
  507. int bpf_obj_get_opts(const char *pathname, const struct bpf_obj_get_opts *opts)
  508. {
  509. const size_t attr_sz = offsetofend(union bpf_attr, path_fd);
  510. union bpf_attr attr;
  511. int fd;
  512. if (!OPTS_VALID(opts, bpf_obj_get_opts))
  513. return libbpf_err(-EINVAL);
  514. memset(&attr, 0, attr_sz);
  515. attr.path_fd = OPTS_GET(opts, path_fd, 0);
  516. attr.pathname = ptr_to_u64((void *)pathname);
  517. attr.file_flags = OPTS_GET(opts, file_flags, 0);
  518. fd = sys_bpf_fd(BPF_OBJ_GET, &attr, attr_sz);
  519. return libbpf_err_errno(fd);
  520. }
  521. int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
  522. unsigned int flags)
  523. {
  524. DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, opts,
  525. .flags = flags,
  526. );
  527. return bpf_prog_attach_opts(prog_fd, target_fd, type, &opts);
  528. }
  529. int bpf_prog_attach_opts(int prog_fd, int target, enum bpf_attach_type type,
  530. const struct bpf_prog_attach_opts *opts)
  531. {
  532. const size_t attr_sz = offsetofend(union bpf_attr, expected_revision);
  533. __u32 relative_id, flags;
  534. int ret, relative_fd;
  535. union bpf_attr attr;
  536. if (!OPTS_VALID(opts, bpf_prog_attach_opts))
  537. return libbpf_err(-EINVAL);
  538. relative_id = OPTS_GET(opts, relative_id, 0);
  539. relative_fd = OPTS_GET(opts, relative_fd, 0);
  540. flags = OPTS_GET(opts, flags, 0);
  541. /* validate we don't have unexpected combinations of non-zero fields */
  542. if (relative_fd && relative_id)
  543. return libbpf_err(-EINVAL);
  544. memset(&attr, 0, attr_sz);
  545. attr.target_fd = target;
  546. attr.attach_bpf_fd = prog_fd;
  547. attr.attach_type = type;
  548. attr.replace_bpf_fd = OPTS_GET(opts, replace_fd, 0);
  549. attr.expected_revision = OPTS_GET(opts, expected_revision, 0);
  550. if (relative_id) {
  551. attr.attach_flags = flags | BPF_F_ID;
  552. attr.relative_id = relative_id;
  553. } else {
  554. attr.attach_flags = flags;
  555. attr.relative_fd = relative_fd;
  556. }
  557. ret = sys_bpf(BPF_PROG_ATTACH, &attr, attr_sz);
  558. return libbpf_err_errno(ret);
  559. }
  560. int bpf_prog_detach_opts(int prog_fd, int target, enum bpf_attach_type type,
  561. const struct bpf_prog_detach_opts *opts)
  562. {
  563. const size_t attr_sz = offsetofend(union bpf_attr, expected_revision);
  564. __u32 relative_id, flags;
  565. int ret, relative_fd;
  566. union bpf_attr attr;
  567. if (!OPTS_VALID(opts, bpf_prog_detach_opts))
  568. return libbpf_err(-EINVAL);
  569. relative_id = OPTS_GET(opts, relative_id, 0);
  570. relative_fd = OPTS_GET(opts, relative_fd, 0);
  571. flags = OPTS_GET(opts, flags, 0);
  572. /* validate we don't have unexpected combinations of non-zero fields */
  573. if (relative_fd && relative_id)
  574. return libbpf_err(-EINVAL);
  575. memset(&attr, 0, attr_sz);
  576. attr.target_fd = target;
  577. attr.attach_bpf_fd = prog_fd;
  578. attr.attach_type = type;
  579. attr.expected_revision = OPTS_GET(opts, expected_revision, 0);
  580. if (relative_id) {
  581. attr.attach_flags = flags | BPF_F_ID;
  582. attr.relative_id = relative_id;
  583. } else {
  584. attr.attach_flags = flags;
  585. attr.relative_fd = relative_fd;
  586. }
  587. ret = sys_bpf(BPF_PROG_DETACH, &attr, attr_sz);
  588. return libbpf_err_errno(ret);
  589. }
  590. int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
  591. {
  592. return bpf_prog_detach_opts(0, target_fd, type, NULL);
  593. }
  594. int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
  595. {
  596. return bpf_prog_detach_opts(prog_fd, target_fd, type, NULL);
  597. }
  598. int bpf_link_create(int prog_fd, int target_fd,
  599. enum bpf_attach_type attach_type,
  600. const struct bpf_link_create_opts *opts)
  601. {
  602. const size_t attr_sz = offsetofend(union bpf_attr, link_create);
  603. __u32 target_btf_id, iter_info_len, relative_id;
  604. int fd, err, relative_fd;
  605. union bpf_attr attr;
  606. if (!OPTS_VALID(opts, bpf_link_create_opts))
  607. return libbpf_err(-EINVAL);
  608. iter_info_len = OPTS_GET(opts, iter_info_len, 0);
  609. target_btf_id = OPTS_GET(opts, target_btf_id, 0);
  610. /* validate we don't have unexpected combinations of non-zero fields */
  611. if (iter_info_len || target_btf_id) {
  612. if (iter_info_len && target_btf_id)
  613. return libbpf_err(-EINVAL);
  614. if (!OPTS_ZEROED(opts, target_btf_id))
  615. return libbpf_err(-EINVAL);
  616. }
  617. memset(&attr, 0, attr_sz);
  618. attr.link_create.prog_fd = prog_fd;
  619. attr.link_create.target_fd = target_fd;
  620. attr.link_create.attach_type = attach_type;
  621. attr.link_create.flags = OPTS_GET(opts, flags, 0);
  622. if (target_btf_id) {
  623. attr.link_create.target_btf_id = target_btf_id;
  624. goto proceed;
  625. }
  626. switch (attach_type) {
  627. case BPF_TRACE_ITER:
  628. attr.link_create.iter_info = ptr_to_u64(OPTS_GET(opts, iter_info, (void *)0));
  629. attr.link_create.iter_info_len = iter_info_len;
  630. break;
  631. case BPF_PERF_EVENT:
  632. attr.link_create.perf_event.bpf_cookie = OPTS_GET(opts, perf_event.bpf_cookie, 0);
  633. if (!OPTS_ZEROED(opts, perf_event))
  634. return libbpf_err(-EINVAL);
  635. break;
  636. case BPF_TRACE_KPROBE_MULTI:
  637. case BPF_TRACE_KPROBE_SESSION:
  638. attr.link_create.kprobe_multi.flags = OPTS_GET(opts, kprobe_multi.flags, 0);
  639. attr.link_create.kprobe_multi.cnt = OPTS_GET(opts, kprobe_multi.cnt, 0);
  640. attr.link_create.kprobe_multi.syms = ptr_to_u64(OPTS_GET(opts, kprobe_multi.syms, 0));
  641. attr.link_create.kprobe_multi.addrs = ptr_to_u64(OPTS_GET(opts, kprobe_multi.addrs, 0));
  642. attr.link_create.kprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, kprobe_multi.cookies, 0));
  643. if (!OPTS_ZEROED(opts, kprobe_multi))
  644. return libbpf_err(-EINVAL);
  645. break;
  646. case BPF_TRACE_UPROBE_MULTI:
  647. attr.link_create.uprobe_multi.flags = OPTS_GET(opts, uprobe_multi.flags, 0);
  648. attr.link_create.uprobe_multi.cnt = OPTS_GET(opts, uprobe_multi.cnt, 0);
  649. attr.link_create.uprobe_multi.path = ptr_to_u64(OPTS_GET(opts, uprobe_multi.path, 0));
  650. attr.link_create.uprobe_multi.offsets = ptr_to_u64(OPTS_GET(opts, uprobe_multi.offsets, 0));
  651. attr.link_create.uprobe_multi.ref_ctr_offsets = ptr_to_u64(OPTS_GET(opts, uprobe_multi.ref_ctr_offsets, 0));
  652. attr.link_create.uprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, uprobe_multi.cookies, 0));
  653. attr.link_create.uprobe_multi.pid = OPTS_GET(opts, uprobe_multi.pid, 0);
  654. if (!OPTS_ZEROED(opts, uprobe_multi))
  655. return libbpf_err(-EINVAL);
  656. break;
  657. case BPF_TRACE_RAW_TP:
  658. case BPF_TRACE_FENTRY:
  659. case BPF_TRACE_FEXIT:
  660. case BPF_MODIFY_RETURN:
  661. case BPF_LSM_MAC:
  662. attr.link_create.tracing.cookie = OPTS_GET(opts, tracing.cookie, 0);
  663. if (!OPTS_ZEROED(opts, tracing))
  664. return libbpf_err(-EINVAL);
  665. break;
  666. case BPF_NETFILTER:
  667. attr.link_create.netfilter.pf = OPTS_GET(opts, netfilter.pf, 0);
  668. attr.link_create.netfilter.hooknum = OPTS_GET(opts, netfilter.hooknum, 0);
  669. attr.link_create.netfilter.priority = OPTS_GET(opts, netfilter.priority, 0);
  670. attr.link_create.netfilter.flags = OPTS_GET(opts, netfilter.flags, 0);
  671. if (!OPTS_ZEROED(opts, netfilter))
  672. return libbpf_err(-EINVAL);
  673. break;
  674. case BPF_TCX_INGRESS:
  675. case BPF_TCX_EGRESS:
  676. relative_fd = OPTS_GET(opts, tcx.relative_fd, 0);
  677. relative_id = OPTS_GET(opts, tcx.relative_id, 0);
  678. if (relative_fd && relative_id)
  679. return libbpf_err(-EINVAL);
  680. if (relative_id) {
  681. attr.link_create.tcx.relative_id = relative_id;
  682. attr.link_create.flags |= BPF_F_ID;
  683. } else {
  684. attr.link_create.tcx.relative_fd = relative_fd;
  685. }
  686. attr.link_create.tcx.expected_revision = OPTS_GET(opts, tcx.expected_revision, 0);
  687. if (!OPTS_ZEROED(opts, tcx))
  688. return libbpf_err(-EINVAL);
  689. break;
  690. case BPF_NETKIT_PRIMARY:
  691. case BPF_NETKIT_PEER:
  692. relative_fd = OPTS_GET(opts, netkit.relative_fd, 0);
  693. relative_id = OPTS_GET(opts, netkit.relative_id, 0);
  694. if (relative_fd && relative_id)
  695. return libbpf_err(-EINVAL);
  696. if (relative_id) {
  697. attr.link_create.netkit.relative_id = relative_id;
  698. attr.link_create.flags |= BPF_F_ID;
  699. } else {
  700. attr.link_create.netkit.relative_fd = relative_fd;
  701. }
  702. attr.link_create.netkit.expected_revision = OPTS_GET(opts, netkit.expected_revision, 0);
  703. if (!OPTS_ZEROED(opts, netkit))
  704. return libbpf_err(-EINVAL);
  705. break;
  706. default:
  707. if (!OPTS_ZEROED(opts, flags))
  708. return libbpf_err(-EINVAL);
  709. break;
  710. }
  711. proceed:
  712. fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, attr_sz);
  713. if (fd >= 0)
  714. return fd;
  715. /* we'll get EINVAL if LINK_CREATE doesn't support attaching fentry
  716. * and other similar programs
  717. */
  718. err = -errno;
  719. if (err != -EINVAL)
  720. return libbpf_err(err);
  721. /* if user used features not supported by
  722. * BPF_RAW_TRACEPOINT_OPEN command, then just give up immediately
  723. */
  724. if (attr.link_create.target_fd || attr.link_create.target_btf_id)
  725. return libbpf_err(err);
  726. if (!OPTS_ZEROED(opts, sz))
  727. return libbpf_err(err);
  728. /* otherwise, for few select kinds of programs that can be
  729. * attached using BPF_RAW_TRACEPOINT_OPEN command, try that as
  730. * a fallback for older kernels
  731. */
  732. switch (attach_type) {
  733. case BPF_TRACE_RAW_TP:
  734. case BPF_LSM_MAC:
  735. case BPF_TRACE_FENTRY:
  736. case BPF_TRACE_FEXIT:
  737. case BPF_MODIFY_RETURN:
  738. return bpf_raw_tracepoint_open(NULL, prog_fd);
  739. default:
  740. return libbpf_err(err);
  741. }
  742. }
  743. int bpf_link_detach(int link_fd)
  744. {
  745. const size_t attr_sz = offsetofend(union bpf_attr, link_detach);
  746. union bpf_attr attr;
  747. int ret;
  748. memset(&attr, 0, attr_sz);
  749. attr.link_detach.link_fd = link_fd;
  750. ret = sys_bpf(BPF_LINK_DETACH, &attr, attr_sz);
  751. return libbpf_err_errno(ret);
  752. }
  753. int bpf_link_update(int link_fd, int new_prog_fd,
  754. const struct bpf_link_update_opts *opts)
  755. {
  756. const size_t attr_sz = offsetofend(union bpf_attr, link_update);
  757. union bpf_attr attr;
  758. int ret;
  759. if (!OPTS_VALID(opts, bpf_link_update_opts))
  760. return libbpf_err(-EINVAL);
  761. if (OPTS_GET(opts, old_prog_fd, 0) && OPTS_GET(opts, old_map_fd, 0))
  762. return libbpf_err(-EINVAL);
  763. memset(&attr, 0, attr_sz);
  764. attr.link_update.link_fd = link_fd;
  765. attr.link_update.new_prog_fd = new_prog_fd;
  766. attr.link_update.flags = OPTS_GET(opts, flags, 0);
  767. if (OPTS_GET(opts, old_prog_fd, 0))
  768. attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0);
  769. else if (OPTS_GET(opts, old_map_fd, 0))
  770. attr.link_update.old_map_fd = OPTS_GET(opts, old_map_fd, 0);
  771. ret = sys_bpf(BPF_LINK_UPDATE, &attr, attr_sz);
  772. return libbpf_err_errno(ret);
  773. }
  774. int bpf_iter_create(int link_fd)
  775. {
  776. const size_t attr_sz = offsetofend(union bpf_attr, iter_create);
  777. union bpf_attr attr;
  778. int fd;
  779. memset(&attr, 0, attr_sz);
  780. attr.iter_create.link_fd = link_fd;
  781. fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, attr_sz);
  782. return libbpf_err_errno(fd);
  783. }
  784. int bpf_prog_query_opts(int target, enum bpf_attach_type type,
  785. struct bpf_prog_query_opts *opts)
  786. {
  787. const size_t attr_sz = offsetofend(union bpf_attr, query);
  788. union bpf_attr attr;
  789. int ret;
  790. if (!OPTS_VALID(opts, bpf_prog_query_opts))
  791. return libbpf_err(-EINVAL);
  792. memset(&attr, 0, attr_sz);
  793. attr.query.target_fd = target;
  794. attr.query.attach_type = type;
  795. attr.query.query_flags = OPTS_GET(opts, query_flags, 0);
  796. attr.query.count = OPTS_GET(opts, count, 0);
  797. attr.query.prog_ids = ptr_to_u64(OPTS_GET(opts, prog_ids, NULL));
  798. attr.query.link_ids = ptr_to_u64(OPTS_GET(opts, link_ids, NULL));
  799. attr.query.prog_attach_flags = ptr_to_u64(OPTS_GET(opts, prog_attach_flags, NULL));
  800. attr.query.link_attach_flags = ptr_to_u64(OPTS_GET(opts, link_attach_flags, NULL));
  801. ret = sys_bpf(BPF_PROG_QUERY, &attr, attr_sz);
  802. OPTS_SET(opts, attach_flags, attr.query.attach_flags);
  803. OPTS_SET(opts, revision, attr.query.revision);
  804. OPTS_SET(opts, count, attr.query.count);
  805. return libbpf_err_errno(ret);
  806. }
  807. int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
  808. __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
  809. {
  810. LIBBPF_OPTS(bpf_prog_query_opts, opts);
  811. int ret;
  812. opts.query_flags = query_flags;
  813. opts.prog_ids = prog_ids;
  814. opts.prog_cnt = *prog_cnt;
  815. ret = bpf_prog_query_opts(target_fd, type, &opts);
  816. if (attach_flags)
  817. *attach_flags = opts.attach_flags;
  818. *prog_cnt = opts.prog_cnt;
  819. return libbpf_err_errno(ret);
  820. }
  821. int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts)
  822. {
  823. const size_t attr_sz = offsetofend(union bpf_attr, test);
  824. union bpf_attr attr;
  825. int ret;
  826. if (!OPTS_VALID(opts, bpf_test_run_opts))
  827. return libbpf_err(-EINVAL);
  828. memset(&attr, 0, attr_sz);
  829. attr.test.prog_fd = prog_fd;
  830. attr.test.batch_size = OPTS_GET(opts, batch_size, 0);
  831. attr.test.cpu = OPTS_GET(opts, cpu, 0);
  832. attr.test.flags = OPTS_GET(opts, flags, 0);
  833. attr.test.repeat = OPTS_GET(opts, repeat, 0);
  834. attr.test.duration = OPTS_GET(opts, duration, 0);
  835. attr.test.ctx_size_in = OPTS_GET(opts, ctx_size_in, 0);
  836. attr.test.ctx_size_out = OPTS_GET(opts, ctx_size_out, 0);
  837. attr.test.data_size_in = OPTS_GET(opts, data_size_in, 0);
  838. attr.test.data_size_out = OPTS_GET(opts, data_size_out, 0);
  839. attr.test.ctx_in = ptr_to_u64(OPTS_GET(opts, ctx_in, NULL));
  840. attr.test.ctx_out = ptr_to_u64(OPTS_GET(opts, ctx_out, NULL));
  841. attr.test.data_in = ptr_to_u64(OPTS_GET(opts, data_in, NULL));
  842. attr.test.data_out = ptr_to_u64(OPTS_GET(opts, data_out, NULL));
  843. ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, attr_sz);
  844. OPTS_SET(opts, data_size_out, attr.test.data_size_out);
  845. OPTS_SET(opts, ctx_size_out, attr.test.ctx_size_out);
  846. OPTS_SET(opts, duration, attr.test.duration);
  847. OPTS_SET(opts, retval, attr.test.retval);
  848. return libbpf_err_errno(ret);
  849. }
  850. static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd)
  851. {
  852. const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
  853. union bpf_attr attr;
  854. int err;
  855. memset(&attr, 0, attr_sz);
  856. attr.start_id = start_id;
  857. err = sys_bpf(cmd, &attr, attr_sz);
  858. if (!err)
  859. *next_id = attr.next_id;
  860. return libbpf_err_errno(err);
  861. }
  862. int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
  863. {
  864. return bpf_obj_get_next_id(start_id, next_id, BPF_PROG_GET_NEXT_ID);
  865. }
  866. int bpf_map_get_next_id(__u32 start_id, __u32 *next_id)
  867. {
  868. return bpf_obj_get_next_id(start_id, next_id, BPF_MAP_GET_NEXT_ID);
  869. }
  870. int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id)
  871. {
  872. return bpf_obj_get_next_id(start_id, next_id, BPF_BTF_GET_NEXT_ID);
  873. }
  874. int bpf_link_get_next_id(__u32 start_id, __u32 *next_id)
  875. {
  876. return bpf_obj_get_next_id(start_id, next_id, BPF_LINK_GET_NEXT_ID);
  877. }
  878. int bpf_prog_get_fd_by_id_opts(__u32 id,
  879. const struct bpf_get_fd_by_id_opts *opts)
  880. {
  881. const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
  882. union bpf_attr attr;
  883. int fd;
  884. if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts))
  885. return libbpf_err(-EINVAL);
  886. memset(&attr, 0, attr_sz);
  887. attr.prog_id = id;
  888. attr.open_flags = OPTS_GET(opts, open_flags, 0);
  889. fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, attr_sz);
  890. return libbpf_err_errno(fd);
  891. }
  892. int bpf_prog_get_fd_by_id(__u32 id)
  893. {
  894. return bpf_prog_get_fd_by_id_opts(id, NULL);
  895. }
  896. int bpf_map_get_fd_by_id_opts(__u32 id,
  897. const struct bpf_get_fd_by_id_opts *opts)
  898. {
  899. const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
  900. union bpf_attr attr;
  901. int fd;
  902. if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts))
  903. return libbpf_err(-EINVAL);
  904. memset(&attr, 0, attr_sz);
  905. attr.map_id = id;
  906. attr.open_flags = OPTS_GET(opts, open_flags, 0);
  907. fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, attr_sz);
  908. return libbpf_err_errno(fd);
  909. }
  910. int bpf_map_get_fd_by_id(__u32 id)
  911. {
  912. return bpf_map_get_fd_by_id_opts(id, NULL);
  913. }
  914. int bpf_btf_get_fd_by_id_opts(__u32 id,
  915. const struct bpf_get_fd_by_id_opts *opts)
  916. {
  917. const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
  918. union bpf_attr attr;
  919. int fd;
  920. if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts))
  921. return libbpf_err(-EINVAL);
  922. memset(&attr, 0, attr_sz);
  923. attr.btf_id = id;
  924. attr.open_flags = OPTS_GET(opts, open_flags, 0);
  925. fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, attr_sz);
  926. return libbpf_err_errno(fd);
  927. }
  928. int bpf_btf_get_fd_by_id(__u32 id)
  929. {
  930. return bpf_btf_get_fd_by_id_opts(id, NULL);
  931. }
  932. int bpf_link_get_fd_by_id_opts(__u32 id,
  933. const struct bpf_get_fd_by_id_opts *opts)
  934. {
  935. const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
  936. union bpf_attr attr;
  937. int fd;
  938. if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts))
  939. return libbpf_err(-EINVAL);
  940. memset(&attr, 0, attr_sz);
  941. attr.link_id = id;
  942. attr.open_flags = OPTS_GET(opts, open_flags, 0);
  943. fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, attr_sz);
  944. return libbpf_err_errno(fd);
  945. }
  946. int bpf_link_get_fd_by_id(__u32 id)
  947. {
  948. return bpf_link_get_fd_by_id_opts(id, NULL);
  949. }
  950. int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len)
  951. {
  952. const size_t attr_sz = offsetofend(union bpf_attr, info);
  953. union bpf_attr attr;
  954. int err;
  955. memset(&attr, 0, attr_sz);
  956. attr.info.bpf_fd = bpf_fd;
  957. attr.info.info_len = *info_len;
  958. attr.info.info = ptr_to_u64(info);
  959. err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, attr_sz);
  960. if (!err)
  961. *info_len = attr.info.info_len;
  962. return libbpf_err_errno(err);
  963. }
  964. int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info, __u32 *info_len)
  965. {
  966. return bpf_obj_get_info_by_fd(prog_fd, info, info_len);
  967. }
  968. int bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len)
  969. {
  970. return bpf_obj_get_info_by_fd(map_fd, info, info_len);
  971. }
  972. int bpf_btf_get_info_by_fd(int btf_fd, struct bpf_btf_info *info, __u32 *info_len)
  973. {
  974. return bpf_obj_get_info_by_fd(btf_fd, info, info_len);
  975. }
  976. int bpf_link_get_info_by_fd(int link_fd, struct bpf_link_info *info, __u32 *info_len)
  977. {
  978. return bpf_obj_get_info_by_fd(link_fd, info, info_len);
  979. }
  980. int bpf_raw_tracepoint_open_opts(int prog_fd, struct bpf_raw_tp_opts *opts)
  981. {
  982. const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint);
  983. union bpf_attr attr;
  984. int fd;
  985. if (!OPTS_VALID(opts, bpf_raw_tp_opts))
  986. return libbpf_err(-EINVAL);
  987. memset(&attr, 0, attr_sz);
  988. attr.raw_tracepoint.prog_fd = prog_fd;
  989. attr.raw_tracepoint.name = ptr_to_u64(OPTS_GET(opts, tp_name, NULL));
  990. attr.raw_tracepoint.cookie = OPTS_GET(opts, cookie, 0);
  991. fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, attr_sz);
  992. return libbpf_err_errno(fd);
  993. }
  994. int bpf_raw_tracepoint_open(const char *name, int prog_fd)
  995. {
  996. LIBBPF_OPTS(bpf_raw_tp_opts, opts, .tp_name = name);
  997. return bpf_raw_tracepoint_open_opts(prog_fd, &opts);
  998. }
  999. int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts *opts)
  1000. {
  1001. const size_t attr_sz = offsetofend(union bpf_attr, btf_token_fd);
  1002. union bpf_attr attr;
  1003. char *log_buf;
  1004. size_t log_size;
  1005. __u32 log_level;
  1006. int fd;
  1007. bump_rlimit_memlock();
  1008. memset(&attr, 0, attr_sz);
  1009. if (!OPTS_VALID(opts, bpf_btf_load_opts))
  1010. return libbpf_err(-EINVAL);
  1011. log_buf = OPTS_GET(opts, log_buf, NULL);
  1012. log_size = OPTS_GET(opts, log_size, 0);
  1013. log_level = OPTS_GET(opts, log_level, 0);
  1014. if (log_size > UINT_MAX)
  1015. return libbpf_err(-EINVAL);
  1016. if (log_size && !log_buf)
  1017. return libbpf_err(-EINVAL);
  1018. attr.btf = ptr_to_u64(btf_data);
  1019. attr.btf_size = btf_size;
  1020. attr.btf_flags = OPTS_GET(opts, btf_flags, 0);
  1021. attr.btf_token_fd = OPTS_GET(opts, token_fd, 0);
  1022. /* log_level == 0 and log_buf != NULL means "try loading without
  1023. * log_buf, but retry with log_buf and log_level=1 on error", which is
  1024. * consistent across low-level and high-level BTF and program loading
  1025. * APIs within libbpf and provides a sensible behavior in practice
  1026. */
  1027. if (log_level) {
  1028. attr.btf_log_buf = ptr_to_u64(log_buf);
  1029. attr.btf_log_size = (__u32)log_size;
  1030. attr.btf_log_level = log_level;
  1031. }
  1032. fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz);
  1033. if (fd < 0 && log_buf && log_level == 0) {
  1034. attr.btf_log_buf = ptr_to_u64(log_buf);
  1035. attr.btf_log_size = (__u32)log_size;
  1036. attr.btf_log_level = 1;
  1037. fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz);
  1038. }
  1039. OPTS_SET(opts, log_true_size, attr.btf_log_true_size);
  1040. return libbpf_err_errno(fd);
  1041. }
  1042. int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len,
  1043. __u32 *prog_id, __u32 *fd_type, __u64 *probe_offset,
  1044. __u64 *probe_addr)
  1045. {
  1046. const size_t attr_sz = offsetofend(union bpf_attr, task_fd_query);
  1047. union bpf_attr attr;
  1048. int err;
  1049. memset(&attr, 0, attr_sz);
  1050. attr.task_fd_query.pid = pid;
  1051. attr.task_fd_query.fd = fd;
  1052. attr.task_fd_query.flags = flags;
  1053. attr.task_fd_query.buf = ptr_to_u64(buf);
  1054. attr.task_fd_query.buf_len = *buf_len;
  1055. err = sys_bpf(BPF_TASK_FD_QUERY, &attr, attr_sz);
  1056. *buf_len = attr.task_fd_query.buf_len;
  1057. *prog_id = attr.task_fd_query.prog_id;
  1058. *fd_type = attr.task_fd_query.fd_type;
  1059. *probe_offset = attr.task_fd_query.probe_offset;
  1060. *probe_addr = attr.task_fd_query.probe_addr;
  1061. return libbpf_err_errno(err);
  1062. }
  1063. int bpf_enable_stats(enum bpf_stats_type type)
  1064. {
  1065. const size_t attr_sz = offsetofend(union bpf_attr, enable_stats);
  1066. union bpf_attr attr;
  1067. int fd;
  1068. memset(&attr, 0, attr_sz);
  1069. attr.enable_stats.type = type;
  1070. fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, attr_sz);
  1071. return libbpf_err_errno(fd);
  1072. }
  1073. int bpf_prog_bind_map(int prog_fd, int map_fd,
  1074. const struct bpf_prog_bind_opts *opts)
  1075. {
  1076. const size_t attr_sz = offsetofend(union bpf_attr, prog_bind_map);
  1077. union bpf_attr attr;
  1078. int ret;
  1079. if (!OPTS_VALID(opts, bpf_prog_bind_opts))
  1080. return libbpf_err(-EINVAL);
  1081. memset(&attr, 0, attr_sz);
  1082. attr.prog_bind_map.prog_fd = prog_fd;
  1083. attr.prog_bind_map.map_fd = map_fd;
  1084. attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0);
  1085. ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, attr_sz);
  1086. return libbpf_err_errno(ret);
  1087. }
  1088. int bpf_token_create(int bpffs_fd, struct bpf_token_create_opts *opts)
  1089. {
  1090. const size_t attr_sz = offsetofend(union bpf_attr, token_create);
  1091. union bpf_attr attr;
  1092. int fd;
  1093. if (!OPTS_VALID(opts, bpf_token_create_opts))
  1094. return libbpf_err(-EINVAL);
  1095. memset(&attr, 0, attr_sz);
  1096. attr.token_create.bpffs_fd = bpffs_fd;
  1097. attr.token_create.flags = OPTS_GET(opts, flags, 0);
  1098. fd = sys_bpf_fd(BPF_TOKEN_CREATE, &attr, attr_sz);
  1099. return libbpf_err_errno(fd);
  1100. }