bpf_dummy_struct_ops.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2021. Huawei Technologies Co., Ltd
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/bpf_verifier.h>
  7. #include <linux/bpf.h>
  8. #include <linux/btf.h>
  9. static struct bpf_struct_ops bpf_bpf_dummy_ops;
  10. /* A common type for test_N with return value in bpf_dummy_ops */
  11. typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *state, ...);
  12. static int dummy_ops_test_ret_function(struct bpf_dummy_ops_state *state, ...)
  13. {
  14. return 0;
  15. }
  16. struct bpf_dummy_ops_test_args {
  17. u64 args[MAX_BPF_FUNC_ARGS];
  18. struct bpf_dummy_ops_state state;
  19. };
  20. static struct btf *bpf_dummy_ops_btf;
  21. static struct bpf_dummy_ops_test_args *
  22. dummy_ops_init_args(const union bpf_attr *kattr, unsigned int nr)
  23. {
  24. __u32 size_in;
  25. struct bpf_dummy_ops_test_args *args;
  26. void __user *ctx_in;
  27. void __user *u_state;
  28. size_in = kattr->test.ctx_size_in;
  29. if (size_in != sizeof(u64) * nr)
  30. return ERR_PTR(-EINVAL);
  31. args = kzalloc(sizeof(*args), GFP_KERNEL);
  32. if (!args)
  33. return ERR_PTR(-ENOMEM);
  34. ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
  35. if (copy_from_user(args->args, ctx_in, size_in))
  36. goto out;
  37. /* args[0] is 0 means state argument of test_N will be NULL */
  38. u_state = u64_to_user_ptr(args->args[0]);
  39. if (u_state && copy_from_user(&args->state, u_state,
  40. sizeof(args->state)))
  41. goto out;
  42. return args;
  43. out:
  44. kfree(args);
  45. return ERR_PTR(-EFAULT);
  46. }
  47. static int dummy_ops_copy_args(struct bpf_dummy_ops_test_args *args)
  48. {
  49. void __user *u_state;
  50. u_state = u64_to_user_ptr(args->args[0]);
  51. if (u_state && copy_to_user(u_state, &args->state, sizeof(args->state)))
  52. return -EFAULT;
  53. return 0;
  54. }
  55. static int dummy_ops_call_op(void *image, struct bpf_dummy_ops_test_args *args)
  56. {
  57. dummy_ops_test_ret_fn test = (void *)image + cfi_get_offset();
  58. struct bpf_dummy_ops_state *state = NULL;
  59. /* state needs to be NULL if args[0] is 0 */
  60. if (args->args[0])
  61. state = &args->state;
  62. return test(state, args->args[1], args->args[2],
  63. args->args[3], args->args[4]);
  64. }
  65. static const struct bpf_ctx_arg_aux *find_ctx_arg_info(struct bpf_prog_aux *aux, int offset)
  66. {
  67. int i;
  68. for (i = 0; i < aux->ctx_arg_info_size; i++)
  69. if (aux->ctx_arg_info[i].offset == offset)
  70. return &aux->ctx_arg_info[i];
  71. return NULL;
  72. }
  73. /* There is only one check at the moment:
  74. * - zero should not be passed for pointer parameters not marked as nullable.
  75. */
  76. static int check_test_run_args(struct bpf_prog *prog, struct bpf_dummy_ops_test_args *args)
  77. {
  78. const struct btf_type *func_proto = prog->aux->attach_func_proto;
  79. for (u32 arg_no = 0; arg_no < btf_type_vlen(func_proto) ; ++arg_no) {
  80. const struct btf_param *param = &btf_params(func_proto)[arg_no];
  81. const struct bpf_ctx_arg_aux *info;
  82. const struct btf_type *t;
  83. int offset;
  84. if (args->args[arg_no] != 0)
  85. continue;
  86. /* Program is validated already, so there is no need
  87. * to check if t is NULL.
  88. */
  89. t = btf_type_skip_modifiers(bpf_dummy_ops_btf, param->type, NULL);
  90. if (!btf_type_is_ptr(t))
  91. continue;
  92. offset = btf_ctx_arg_offset(bpf_dummy_ops_btf, func_proto, arg_no);
  93. info = find_ctx_arg_info(prog->aux, offset);
  94. if (info && type_may_be_null(info->reg_type))
  95. continue;
  96. return -EINVAL;
  97. }
  98. return 0;
  99. }
  100. extern const struct bpf_link_ops bpf_struct_ops_link_lops;
  101. int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
  102. union bpf_attr __user *uattr)
  103. {
  104. const struct bpf_struct_ops *st_ops = &bpf_bpf_dummy_ops;
  105. const struct btf_type *func_proto;
  106. struct bpf_dummy_ops_test_args *args;
  107. struct bpf_tramp_links *tlinks = NULL;
  108. struct bpf_tramp_link *link = NULL;
  109. void *image = NULL;
  110. unsigned int op_idx;
  111. u32 image_off = 0;
  112. int prog_ret;
  113. s32 type_id;
  114. int err;
  115. type_id = btf_find_by_name_kind(bpf_dummy_ops_btf,
  116. bpf_bpf_dummy_ops.name,
  117. BTF_KIND_STRUCT);
  118. if (type_id < 0)
  119. return -EINVAL;
  120. if (prog->aux->attach_btf_id != type_id)
  121. return -EOPNOTSUPP;
  122. func_proto = prog->aux->attach_func_proto;
  123. args = dummy_ops_init_args(kattr, btf_type_vlen(func_proto));
  124. if (IS_ERR(args))
  125. return PTR_ERR(args);
  126. err = check_test_run_args(prog, args);
  127. if (err)
  128. goto out;
  129. tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
  130. if (!tlinks) {
  131. err = -ENOMEM;
  132. goto out;
  133. }
  134. link = kzalloc(sizeof(*link), GFP_USER);
  135. if (!link) {
  136. err = -ENOMEM;
  137. goto out;
  138. }
  139. /* prog doesn't take the ownership of the reference from caller */
  140. bpf_prog_inc(prog);
  141. bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_link_lops, prog);
  142. op_idx = prog->expected_attach_type;
  143. err = bpf_struct_ops_prepare_trampoline(tlinks, link,
  144. &st_ops->func_models[op_idx],
  145. &dummy_ops_test_ret_function,
  146. &image, &image_off,
  147. true);
  148. if (err < 0)
  149. goto out;
  150. err = arch_protect_bpf_trampoline(image, PAGE_SIZE);
  151. if (err)
  152. goto out;
  153. prog_ret = dummy_ops_call_op(image, args);
  154. err = dummy_ops_copy_args(args);
  155. if (err)
  156. goto out;
  157. if (put_user(prog_ret, &uattr->test.retval))
  158. err = -EFAULT;
  159. out:
  160. kfree(args);
  161. bpf_struct_ops_image_free(image);
  162. if (link)
  163. bpf_link_put(&link->link);
  164. kfree(tlinks);
  165. return err;
  166. }
  167. static int bpf_dummy_init(struct btf *btf)
  168. {
  169. bpf_dummy_ops_btf = btf;
  170. return 0;
  171. }
  172. static bool bpf_dummy_ops_is_valid_access(int off, int size,
  173. enum bpf_access_type type,
  174. const struct bpf_prog *prog,
  175. struct bpf_insn_access_aux *info)
  176. {
  177. return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
  178. }
  179. static int bpf_dummy_ops_check_member(const struct btf_type *t,
  180. const struct btf_member *member,
  181. const struct bpf_prog *prog)
  182. {
  183. u32 moff = __btf_member_bit_offset(t, member) / 8;
  184. switch (moff) {
  185. case offsetof(struct bpf_dummy_ops, test_sleepable):
  186. break;
  187. default:
  188. if (prog->sleepable)
  189. return -EINVAL;
  190. }
  191. return 0;
  192. }
  193. static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log,
  194. const struct bpf_reg_state *reg,
  195. int off, int size)
  196. {
  197. const struct btf_type *state;
  198. const struct btf_type *t;
  199. s32 type_id;
  200. type_id = btf_find_by_name_kind(reg->btf, "bpf_dummy_ops_state",
  201. BTF_KIND_STRUCT);
  202. if (type_id < 0)
  203. return -EINVAL;
  204. t = btf_type_by_id(reg->btf, reg->btf_id);
  205. state = btf_type_by_id(reg->btf, type_id);
  206. if (t != state) {
  207. bpf_log(log, "only access to bpf_dummy_ops_state is supported\n");
  208. return -EACCES;
  209. }
  210. if (off + size > sizeof(struct bpf_dummy_ops_state)) {
  211. bpf_log(log, "write access at off %d with size %d\n", off, size);
  212. return -EACCES;
  213. }
  214. return NOT_INIT;
  215. }
  216. static const struct bpf_verifier_ops bpf_dummy_verifier_ops = {
  217. .is_valid_access = bpf_dummy_ops_is_valid_access,
  218. .btf_struct_access = bpf_dummy_ops_btf_struct_access,
  219. };
  220. static int bpf_dummy_init_member(const struct btf_type *t,
  221. const struct btf_member *member,
  222. void *kdata, const void *udata)
  223. {
  224. return -EOPNOTSUPP;
  225. }
  226. static int bpf_dummy_reg(void *kdata, struct bpf_link *link)
  227. {
  228. return -EOPNOTSUPP;
  229. }
  230. static void bpf_dummy_unreg(void *kdata, struct bpf_link *link)
  231. {
  232. }
  233. static int bpf_dummy_ops__test_1(struct bpf_dummy_ops_state *cb__nullable)
  234. {
  235. return 0;
  236. }
  237. static int bpf_dummy_test_2(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
  238. char a3, unsigned long a4)
  239. {
  240. return 0;
  241. }
  242. static int bpf_dummy_test_sleepable(struct bpf_dummy_ops_state *cb)
  243. {
  244. return 0;
  245. }
  246. static struct bpf_dummy_ops __bpf_bpf_dummy_ops = {
  247. .test_1 = bpf_dummy_ops__test_1,
  248. .test_2 = bpf_dummy_test_2,
  249. .test_sleepable = bpf_dummy_test_sleepable,
  250. };
  251. static struct bpf_struct_ops bpf_bpf_dummy_ops = {
  252. .verifier_ops = &bpf_dummy_verifier_ops,
  253. .init = bpf_dummy_init,
  254. .check_member = bpf_dummy_ops_check_member,
  255. .init_member = bpf_dummy_init_member,
  256. .reg = bpf_dummy_reg,
  257. .unreg = bpf_dummy_unreg,
  258. .name = "bpf_dummy_ops",
  259. .cfi_stubs = &__bpf_bpf_dummy_ops,
  260. .owner = THIS_MODULE,
  261. };
  262. static int __init bpf_dummy_struct_ops_init(void)
  263. {
  264. return register_bpf_struct_ops(&bpf_bpf_dummy_ops, bpf_dummy_ops);
  265. }
  266. late_initcall(bpf_dummy_struct_ops_init);