act_bpf.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
  4. */
  5. #include <linux/module.h>
  6. #include <linux/init.h>
  7. #include <linux/kernel.h>
  8. #include <linux/skbuff.h>
  9. #include <linux/rtnetlink.h>
  10. #include <linux/filter.h>
  11. #include <linux/bpf.h>
  12. #include <net/netlink.h>
  13. #include <net/sock.h>
  14. #include <net/pkt_sched.h>
  15. #include <net/pkt_cls.h>
  16. #include <linux/tc_act/tc_bpf.h>
  17. #include <net/tc_act/tc_bpf.h>
  18. #include <net/tc_wrapper.h>
  19. #define ACT_BPF_NAME_LEN 256
  20. struct tcf_bpf_cfg {
  21. struct bpf_prog *filter;
  22. struct sock_filter *bpf_ops;
  23. const char *bpf_name;
  24. u16 bpf_num_ops;
  25. bool is_ebpf;
  26. };
  27. static struct tc_action_ops act_bpf_ops;
  28. TC_INDIRECT_SCOPE int tcf_bpf_act(struct sk_buff *skb,
  29. const struct tc_action *act,
  30. struct tcf_result *res)
  31. {
  32. bool at_ingress = skb_at_tc_ingress(skb);
  33. struct tcf_bpf *prog = to_bpf(act);
  34. struct bpf_prog *filter;
  35. int action, filter_res;
  36. tcf_lastuse_update(&prog->tcf_tm);
  37. bstats_update(this_cpu_ptr(prog->common.cpu_bstats), skb);
  38. filter = rcu_dereference(prog->filter);
  39. if (at_ingress) {
  40. __skb_push(skb, skb->mac_len);
  41. bpf_compute_data_pointers(skb);
  42. filter_res = bpf_prog_run(filter, skb);
  43. __skb_pull(skb, skb->mac_len);
  44. } else {
  45. bpf_compute_data_pointers(skb);
  46. filter_res = bpf_prog_run(filter, skb);
  47. }
  48. if (unlikely(!skb->tstamp && skb->tstamp_type))
  49. skb->tstamp_type = SKB_CLOCK_REALTIME;
  50. if (skb_sk_is_prefetched(skb) && filter_res != TC_ACT_OK)
  51. skb_orphan(skb);
  52. /* A BPF program may overwrite the default action opcode.
  53. * Similarly as in cls_bpf, if filter_res == -1 we use the
  54. * default action specified from tc.
  55. *
  56. * In case a different well-known TC_ACT opcode has been
  57. * returned, it will overwrite the default one.
  58. *
  59. * For everything else that is unknown, TC_ACT_UNSPEC is
  60. * returned.
  61. */
  62. switch (filter_res) {
  63. case TC_ACT_PIPE:
  64. case TC_ACT_RECLASSIFY:
  65. case TC_ACT_OK:
  66. case TC_ACT_REDIRECT:
  67. action = filter_res;
  68. break;
  69. case TC_ACT_SHOT:
  70. action = filter_res;
  71. qstats_drop_inc(this_cpu_ptr(prog->common.cpu_qstats));
  72. break;
  73. case TC_ACT_UNSPEC:
  74. action = prog->tcf_action;
  75. break;
  76. default:
  77. action = TC_ACT_UNSPEC;
  78. break;
  79. }
  80. return action;
  81. }
  82. static bool tcf_bpf_is_ebpf(const struct tcf_bpf *prog)
  83. {
  84. return !prog->bpf_ops;
  85. }
  86. static int tcf_bpf_dump_bpf_info(const struct tcf_bpf *prog,
  87. struct sk_buff *skb)
  88. {
  89. struct nlattr *nla;
  90. if (nla_put_u16(skb, TCA_ACT_BPF_OPS_LEN, prog->bpf_num_ops))
  91. return -EMSGSIZE;
  92. nla = nla_reserve(skb, TCA_ACT_BPF_OPS, prog->bpf_num_ops *
  93. sizeof(struct sock_filter));
  94. if (nla == NULL)
  95. return -EMSGSIZE;
  96. memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
  97. return 0;
  98. }
  99. static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog,
  100. struct sk_buff *skb)
  101. {
  102. struct nlattr *nla;
  103. if (prog->bpf_name &&
  104. nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name))
  105. return -EMSGSIZE;
  106. if (nla_put_u32(skb, TCA_ACT_BPF_ID, prog->filter->aux->id))
  107. return -EMSGSIZE;
  108. nla = nla_reserve(skb, TCA_ACT_BPF_TAG, sizeof(prog->filter->tag));
  109. if (nla == NULL)
  110. return -EMSGSIZE;
  111. memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
  112. return 0;
  113. }
  114. static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
  115. int bind, int ref)
  116. {
  117. unsigned char *tp = skb_tail_pointer(skb);
  118. struct tcf_bpf *prog = to_bpf(act);
  119. struct tc_act_bpf opt = {
  120. .index = prog->tcf_index,
  121. .refcnt = refcount_read(&prog->tcf_refcnt) - ref,
  122. .bindcnt = atomic_read(&prog->tcf_bindcnt) - bind,
  123. };
  124. struct tcf_t tm;
  125. int ret;
  126. spin_lock_bh(&prog->tcf_lock);
  127. opt.action = prog->tcf_action;
  128. if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt))
  129. goto nla_put_failure;
  130. if (tcf_bpf_is_ebpf(prog))
  131. ret = tcf_bpf_dump_ebpf_info(prog, skb);
  132. else
  133. ret = tcf_bpf_dump_bpf_info(prog, skb);
  134. if (ret)
  135. goto nla_put_failure;
  136. tcf_tm_dump(&tm, &prog->tcf_tm);
  137. if (nla_put_64bit(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm,
  138. TCA_ACT_BPF_PAD))
  139. goto nla_put_failure;
  140. spin_unlock_bh(&prog->tcf_lock);
  141. return skb->len;
  142. nla_put_failure:
  143. spin_unlock_bh(&prog->tcf_lock);
  144. nlmsg_trim(skb, tp);
  145. return -1;
  146. }
  147. static const struct nla_policy act_bpf_policy[TCA_ACT_BPF_MAX + 1] = {
  148. [TCA_ACT_BPF_PARMS] = { .len = sizeof(struct tc_act_bpf) },
  149. [TCA_ACT_BPF_FD] = { .type = NLA_U32 },
  150. [TCA_ACT_BPF_NAME] = { .type = NLA_NUL_STRING,
  151. .len = ACT_BPF_NAME_LEN },
  152. [TCA_ACT_BPF_OPS_LEN] = { .type = NLA_U16 },
  153. [TCA_ACT_BPF_OPS] = { .type = NLA_BINARY,
  154. .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
  155. };
  156. static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
  157. {
  158. struct sock_filter *bpf_ops;
  159. struct sock_fprog_kern fprog_tmp;
  160. struct bpf_prog *fp;
  161. u16 bpf_size, bpf_num_ops;
  162. int ret;
  163. bpf_num_ops = nla_get_u16(tb[TCA_ACT_BPF_OPS_LEN]);
  164. if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
  165. return -EINVAL;
  166. bpf_size = bpf_num_ops * sizeof(*bpf_ops);
  167. if (bpf_size != nla_len(tb[TCA_ACT_BPF_OPS]))
  168. return -EINVAL;
  169. bpf_ops = kmemdup(nla_data(tb[TCA_ACT_BPF_OPS]), bpf_size, GFP_KERNEL);
  170. if (bpf_ops == NULL)
  171. return -ENOMEM;
  172. fprog_tmp.len = bpf_num_ops;
  173. fprog_tmp.filter = bpf_ops;
  174. ret = bpf_prog_create(&fp, &fprog_tmp);
  175. if (ret < 0) {
  176. kfree(bpf_ops);
  177. return ret;
  178. }
  179. cfg->bpf_ops = bpf_ops;
  180. cfg->bpf_num_ops = bpf_num_ops;
  181. cfg->filter = fp;
  182. cfg->is_ebpf = false;
  183. return 0;
  184. }
  185. static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
  186. {
  187. struct bpf_prog *fp;
  188. char *name = NULL;
  189. u32 bpf_fd;
  190. bpf_fd = nla_get_u32(tb[TCA_ACT_BPF_FD]);
  191. fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_ACT);
  192. if (IS_ERR(fp))
  193. return PTR_ERR(fp);
  194. if (tb[TCA_ACT_BPF_NAME]) {
  195. name = nla_memdup(tb[TCA_ACT_BPF_NAME], GFP_KERNEL);
  196. if (!name) {
  197. bpf_prog_put(fp);
  198. return -ENOMEM;
  199. }
  200. }
  201. cfg->bpf_name = name;
  202. cfg->filter = fp;
  203. cfg->is_ebpf = true;
  204. return 0;
  205. }
  206. static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
  207. {
  208. struct bpf_prog *filter = cfg->filter;
  209. if (filter) {
  210. if (cfg->is_ebpf)
  211. bpf_prog_put(filter);
  212. else
  213. bpf_prog_destroy(filter);
  214. }
  215. kfree(cfg->bpf_ops);
  216. kfree(cfg->bpf_name);
  217. }
  218. static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
  219. struct tcf_bpf_cfg *cfg)
  220. {
  221. cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
  222. /* updates to prog->filter are prevented, since it's called either
  223. * with tcf lock or during final cleanup in rcu callback
  224. */
  225. cfg->filter = rcu_dereference_protected(prog->filter, 1);
  226. cfg->bpf_ops = prog->bpf_ops;
  227. cfg->bpf_name = prog->bpf_name;
  228. }
  229. static int tcf_bpf_init(struct net *net, struct nlattr *nla,
  230. struct nlattr *est, struct tc_action **act,
  231. struct tcf_proto *tp, u32 flags,
  232. struct netlink_ext_ack *extack)
  233. {
  234. struct tc_action_net *tn = net_generic(net, act_bpf_ops.net_id);
  235. bool bind = flags & TCA_ACT_FLAGS_BIND;
  236. struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
  237. struct tcf_chain *goto_ch = NULL;
  238. struct tcf_bpf_cfg cfg, old;
  239. struct tc_act_bpf *parm;
  240. struct tcf_bpf *prog;
  241. bool is_bpf, is_ebpf;
  242. int ret, res = 0;
  243. u32 index;
  244. if (!nla)
  245. return -EINVAL;
  246. ret = nla_parse_nested_deprecated(tb, TCA_ACT_BPF_MAX, nla,
  247. act_bpf_policy, NULL);
  248. if (ret < 0)
  249. return ret;
  250. if (!tb[TCA_ACT_BPF_PARMS])
  251. return -EINVAL;
  252. parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
  253. index = parm->index;
  254. ret = tcf_idr_check_alloc(tn, &index, act, bind);
  255. if (!ret) {
  256. ret = tcf_idr_create(tn, index, est, act,
  257. &act_bpf_ops, bind, true, flags);
  258. if (ret < 0) {
  259. tcf_idr_cleanup(tn, index);
  260. return ret;
  261. }
  262. res = ACT_P_CREATED;
  263. } else if (ret > 0) {
  264. /* Don't override defaults. */
  265. if (bind)
  266. return ACT_P_BOUND;
  267. if (!(flags & TCA_ACT_FLAGS_REPLACE)) {
  268. tcf_idr_release(*act, bind);
  269. return -EEXIST;
  270. }
  271. } else {
  272. return ret;
  273. }
  274. ret = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
  275. if (ret < 0)
  276. goto release_idr;
  277. is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
  278. is_ebpf = tb[TCA_ACT_BPF_FD];
  279. if (is_bpf == is_ebpf) {
  280. ret = -EINVAL;
  281. goto put_chain;
  282. }
  283. memset(&cfg, 0, sizeof(cfg));
  284. ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
  285. tcf_bpf_init_from_efd(tb, &cfg);
  286. if (ret < 0)
  287. goto put_chain;
  288. prog = to_bpf(*act);
  289. spin_lock_bh(&prog->tcf_lock);
  290. if (res != ACT_P_CREATED)
  291. tcf_bpf_prog_fill_cfg(prog, &old);
  292. prog->bpf_ops = cfg.bpf_ops;
  293. prog->bpf_name = cfg.bpf_name;
  294. if (cfg.bpf_num_ops)
  295. prog->bpf_num_ops = cfg.bpf_num_ops;
  296. goto_ch = tcf_action_set_ctrlact(*act, parm->action, goto_ch);
  297. rcu_assign_pointer(prog->filter, cfg.filter);
  298. spin_unlock_bh(&prog->tcf_lock);
  299. if (goto_ch)
  300. tcf_chain_put_by_act(goto_ch);
  301. if (res != ACT_P_CREATED) {
  302. /* make sure the program being replaced is no longer executing */
  303. synchronize_rcu();
  304. tcf_bpf_cfg_cleanup(&old);
  305. }
  306. return res;
  307. put_chain:
  308. if (goto_ch)
  309. tcf_chain_put_by_act(goto_ch);
  310. release_idr:
  311. tcf_idr_release(*act, bind);
  312. return ret;
  313. }
  314. static void tcf_bpf_cleanup(struct tc_action *act)
  315. {
  316. struct tcf_bpf_cfg tmp;
  317. tcf_bpf_prog_fill_cfg(to_bpf(act), &tmp);
  318. tcf_bpf_cfg_cleanup(&tmp);
  319. }
  320. static struct tc_action_ops act_bpf_ops __read_mostly = {
  321. .kind = "bpf",
  322. .id = TCA_ID_BPF,
  323. .owner = THIS_MODULE,
  324. .act = tcf_bpf_act,
  325. .dump = tcf_bpf_dump,
  326. .cleanup = tcf_bpf_cleanup,
  327. .init = tcf_bpf_init,
  328. .size = sizeof(struct tcf_bpf),
  329. };
  330. MODULE_ALIAS_NET_ACT("bpf");
  331. static __net_init int bpf_init_net(struct net *net)
  332. {
  333. struct tc_action_net *tn = net_generic(net, act_bpf_ops.net_id);
  334. return tc_action_net_init(net, tn, &act_bpf_ops);
  335. }
  336. static void __net_exit bpf_exit_net(struct list_head *net_list)
  337. {
  338. tc_action_net_exit(net_list, act_bpf_ops.net_id);
  339. }
  340. static struct pernet_operations bpf_net_ops = {
  341. .init = bpf_init_net,
  342. .exit_batch = bpf_exit_net,
  343. .id = &act_bpf_ops.net_id,
  344. .size = sizeof(struct tc_action_net),
  345. };
  346. static int __init bpf_init_module(void)
  347. {
  348. return tcf_register_action(&act_bpf_ops, &bpf_net_ops);
  349. }
  350. static void __exit bpf_cleanup_module(void)
  351. {
  352. tcf_unregister_action(&act_bpf_ops, &bpf_net_ops);
  353. }
  354. module_init(bpf_init_module);
  355. module_exit(bpf_cleanup_module);
  356. MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
  357. MODULE_DESCRIPTION("TC BPF based action");
  358. MODULE_LICENSE("GPL v2");