netkit.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2023 Isovalent */
  3. #include <linux/netdevice.h>
  4. #include <linux/ethtool.h>
  5. #include <linux/etherdevice.h>
  6. #include <linux/filter.h>
  7. #include <linux/netfilter_netdev.h>
  8. #include <linux/bpf_mprog.h>
  9. #include <linux/indirect_call_wrapper.h>
  10. #include <net/netkit.h>
  11. #include <net/dst.h>
  12. #include <net/tcx.h>
  13. #define DRV_NAME "netkit"
  14. struct netkit {
  15. /* Needed in fast-path */
  16. struct net_device __rcu *peer;
  17. struct bpf_mprog_entry __rcu *active;
  18. enum netkit_action policy;
  19. enum netkit_scrub scrub;
  20. struct bpf_mprog_bundle bundle;
  21. /* Needed in slow-path */
  22. enum netkit_mode mode;
  23. bool primary;
  24. u32 headroom;
  25. };
  26. struct netkit_link {
  27. struct bpf_link link;
  28. struct net_device *dev;
  29. u32 location;
  30. };
  31. static __always_inline int
  32. netkit_run(const struct bpf_mprog_entry *entry, struct sk_buff *skb,
  33. enum netkit_action ret)
  34. {
  35. const struct bpf_mprog_fp *fp;
  36. const struct bpf_prog *prog;
  37. bpf_mprog_foreach_prog(entry, fp, prog) {
  38. bpf_compute_data_pointers(skb);
  39. ret = bpf_prog_run(prog, skb);
  40. if (ret != NETKIT_NEXT)
  41. break;
  42. }
  43. return ret;
  44. }
  45. static void netkit_xnet(struct sk_buff *skb)
  46. {
  47. skb->priority = 0;
  48. skb->mark = 0;
  49. }
  50. static void netkit_prep_forward(struct sk_buff *skb,
  51. bool xnet, bool xnet_scrub)
  52. {
  53. skb_scrub_packet(skb, false);
  54. nf_skip_egress(skb, true);
  55. skb_reset_mac_header(skb);
  56. if (!xnet)
  57. return;
  58. ipvs_reset(skb);
  59. skb_clear_tstamp(skb);
  60. if (xnet_scrub)
  61. netkit_xnet(skb);
  62. }
  63. static struct netkit *netkit_priv(const struct net_device *dev)
  64. {
  65. return netdev_priv(dev);
  66. }
  67. static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
  68. {
  69. struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
  70. struct netkit *nk = netkit_priv(dev);
  71. enum netkit_action ret = READ_ONCE(nk->policy);
  72. netdev_tx_t ret_dev = NET_XMIT_SUCCESS;
  73. const struct bpf_mprog_entry *entry;
  74. struct net_device *peer;
  75. int len = skb->len;
  76. bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
  77. rcu_read_lock();
  78. peer = rcu_dereference(nk->peer);
  79. if (unlikely(!peer || !(peer->flags & IFF_UP) ||
  80. !pskb_may_pull(skb, ETH_HLEN) ||
  81. skb_orphan_frags(skb, GFP_ATOMIC)))
  82. goto drop;
  83. netkit_prep_forward(skb, !net_eq(dev_net(dev), dev_net(peer)),
  84. nk->scrub);
  85. eth_skb_pkt_type(skb, peer);
  86. skb->dev = peer;
  87. entry = rcu_dereference(nk->active);
  88. if (entry)
  89. ret = netkit_run(entry, skb, ret);
  90. switch (ret) {
  91. case NETKIT_NEXT:
  92. case NETKIT_PASS:
  93. eth_skb_pull_mac(skb);
  94. skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
  95. if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) {
  96. dev_sw_netstats_tx_add(dev, 1, len);
  97. dev_sw_netstats_rx_add(peer, len);
  98. } else {
  99. goto drop_stats;
  100. }
  101. break;
  102. case NETKIT_REDIRECT:
  103. dev_sw_netstats_tx_add(dev, 1, len);
  104. skb_do_redirect(skb);
  105. break;
  106. case NETKIT_DROP:
  107. default:
  108. drop:
  109. kfree_skb(skb);
  110. drop_stats:
  111. dev_core_stats_tx_dropped_inc(dev);
  112. ret_dev = NET_XMIT_DROP;
  113. break;
  114. }
  115. rcu_read_unlock();
  116. bpf_net_ctx_clear(bpf_net_ctx);
  117. return ret_dev;
  118. }
  119. static int netkit_open(struct net_device *dev)
  120. {
  121. struct netkit *nk = netkit_priv(dev);
  122. struct net_device *peer = rtnl_dereference(nk->peer);
  123. if (!peer)
  124. return -ENOTCONN;
  125. if (peer->flags & IFF_UP) {
  126. netif_carrier_on(dev);
  127. netif_carrier_on(peer);
  128. }
  129. return 0;
  130. }
  131. static int netkit_close(struct net_device *dev)
  132. {
  133. struct netkit *nk = netkit_priv(dev);
  134. struct net_device *peer = rtnl_dereference(nk->peer);
  135. netif_carrier_off(dev);
  136. if (peer)
  137. netif_carrier_off(peer);
  138. return 0;
  139. }
  140. static int netkit_get_iflink(const struct net_device *dev)
  141. {
  142. struct netkit *nk = netkit_priv(dev);
  143. struct net_device *peer;
  144. int iflink = 0;
  145. rcu_read_lock();
  146. peer = rcu_dereference(nk->peer);
  147. if (peer)
  148. iflink = READ_ONCE(peer->ifindex);
  149. rcu_read_unlock();
  150. return iflink;
  151. }
  152. static void netkit_set_multicast(struct net_device *dev)
  153. {
  154. /* Nothing to do, we receive whatever gets pushed to us! */
  155. }
  156. static int netkit_set_macaddr(struct net_device *dev, void *sa)
  157. {
  158. struct netkit *nk = netkit_priv(dev);
  159. if (nk->mode != NETKIT_L2)
  160. return -EOPNOTSUPP;
  161. return eth_mac_addr(dev, sa);
  162. }
  163. static void netkit_set_headroom(struct net_device *dev, int headroom)
  164. {
  165. struct netkit *nk = netkit_priv(dev), *nk2;
  166. struct net_device *peer;
  167. if (headroom < 0)
  168. headroom = NET_SKB_PAD;
  169. rcu_read_lock();
  170. peer = rcu_dereference(nk->peer);
  171. if (unlikely(!peer))
  172. goto out;
  173. nk2 = netkit_priv(peer);
  174. nk->headroom = headroom;
  175. headroom = max(nk->headroom, nk2->headroom);
  176. peer->needed_headroom = headroom;
  177. dev->needed_headroom = headroom;
  178. out:
  179. rcu_read_unlock();
  180. }
  181. INDIRECT_CALLABLE_SCOPE struct net_device *netkit_peer_dev(struct net_device *dev)
  182. {
  183. return rcu_dereference(netkit_priv(dev)->peer);
  184. }
  185. static void netkit_get_stats(struct net_device *dev,
  186. struct rtnl_link_stats64 *stats)
  187. {
  188. dev_fetch_sw_netstats(stats, dev->tstats);
  189. stats->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
  190. }
  191. static void netkit_uninit(struct net_device *dev);
  192. static const struct net_device_ops netkit_netdev_ops = {
  193. .ndo_open = netkit_open,
  194. .ndo_stop = netkit_close,
  195. .ndo_start_xmit = netkit_xmit,
  196. .ndo_set_rx_mode = netkit_set_multicast,
  197. .ndo_set_rx_headroom = netkit_set_headroom,
  198. .ndo_set_mac_address = netkit_set_macaddr,
  199. .ndo_get_iflink = netkit_get_iflink,
  200. .ndo_get_peer_dev = netkit_peer_dev,
  201. .ndo_get_stats64 = netkit_get_stats,
  202. .ndo_uninit = netkit_uninit,
  203. .ndo_features_check = passthru_features_check,
  204. };
  205. static void netkit_get_drvinfo(struct net_device *dev,
  206. struct ethtool_drvinfo *info)
  207. {
  208. strscpy(info->driver, DRV_NAME, sizeof(info->driver));
  209. }
  210. static const struct ethtool_ops netkit_ethtool_ops = {
  211. .get_drvinfo = netkit_get_drvinfo,
  212. };
  213. static void netkit_setup(struct net_device *dev)
  214. {
  215. static const netdev_features_t netkit_features_hw_vlan =
  216. NETIF_F_HW_VLAN_CTAG_TX |
  217. NETIF_F_HW_VLAN_CTAG_RX |
  218. NETIF_F_HW_VLAN_STAG_TX |
  219. NETIF_F_HW_VLAN_STAG_RX;
  220. static const netdev_features_t netkit_features =
  221. netkit_features_hw_vlan |
  222. NETIF_F_SG |
  223. NETIF_F_FRAGLIST |
  224. NETIF_F_HW_CSUM |
  225. NETIF_F_RXCSUM |
  226. NETIF_F_SCTP_CRC |
  227. NETIF_F_HIGHDMA |
  228. NETIF_F_GSO_SOFTWARE |
  229. NETIF_F_GSO_ENCAP_ALL;
  230. ether_setup(dev);
  231. dev->max_mtu = ETH_MAX_MTU;
  232. dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
  233. dev->flags |= IFF_NOARP;
  234. dev->priv_flags &= ~IFF_TX_SKB_SHARING;
  235. dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
  236. dev->priv_flags |= IFF_PHONY_HEADROOM;
  237. dev->priv_flags |= IFF_NO_QUEUE;
  238. dev->priv_flags |= IFF_DISABLE_NETPOLL;
  239. dev->lltx = true;
  240. dev->ethtool_ops = &netkit_ethtool_ops;
  241. dev->netdev_ops = &netkit_netdev_ops;
  242. dev->features |= netkit_features;
  243. dev->hw_features = netkit_features;
  244. dev->hw_enc_features = netkit_features;
  245. dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
  246. dev->vlan_features = dev->features & ~netkit_features_hw_vlan;
  247. dev->needs_free_netdev = true;
  248. netif_set_tso_max_size(dev, GSO_MAX_SIZE);
  249. }
  250. static struct net *netkit_get_link_net(const struct net_device *dev)
  251. {
  252. struct netkit *nk = netkit_priv(dev);
  253. struct net_device *peer = rtnl_dereference(nk->peer);
  254. return peer ? dev_net(peer) : dev_net(dev);
  255. }
  256. static int netkit_check_policy(int policy, struct nlattr *tb,
  257. struct netlink_ext_ack *extack)
  258. {
  259. switch (policy) {
  260. case NETKIT_PASS:
  261. case NETKIT_DROP:
  262. return 0;
  263. default:
  264. NL_SET_ERR_MSG_ATTR(extack, tb,
  265. "Provided default xmit policy not supported");
  266. return -EINVAL;
  267. }
  268. }
  269. static int netkit_check_mode(int mode, struct nlattr *tb,
  270. struct netlink_ext_ack *extack)
  271. {
  272. switch (mode) {
  273. case NETKIT_L2:
  274. case NETKIT_L3:
  275. return 0;
  276. default:
  277. NL_SET_ERR_MSG_ATTR(extack, tb,
  278. "Provided device mode can only be L2 or L3");
  279. return -EINVAL;
  280. }
  281. }
  282. static int netkit_validate(struct nlattr *tb[], struct nlattr *data[],
  283. struct netlink_ext_ack *extack)
  284. {
  285. struct nlattr *attr = tb[IFLA_ADDRESS];
  286. if (!attr)
  287. return 0;
  288. if (nla_len(attr) != ETH_ALEN)
  289. return -EINVAL;
  290. if (!is_valid_ether_addr(nla_data(attr)))
  291. return -EADDRNOTAVAIL;
  292. return 0;
  293. }
  294. static struct rtnl_link_ops netkit_link_ops;
  295. static int netkit_new_link(struct net *src_net, struct net_device *dev,
  296. struct nlattr *tb[], struct nlattr *data[],
  297. struct netlink_ext_ack *extack)
  298. {
  299. struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb, *attr;
  300. enum netkit_action policy_prim = NETKIT_PASS;
  301. enum netkit_action policy_peer = NETKIT_PASS;
  302. enum netkit_scrub scrub_prim = NETKIT_SCRUB_DEFAULT;
  303. enum netkit_scrub scrub_peer = NETKIT_SCRUB_DEFAULT;
  304. enum netkit_mode mode = NETKIT_L3;
  305. unsigned char ifname_assign_type;
  306. struct ifinfomsg *ifmp = NULL;
  307. struct net_device *peer;
  308. char ifname[IFNAMSIZ];
  309. struct netkit *nk;
  310. struct net *net;
  311. int err;
  312. if (data) {
  313. if (data[IFLA_NETKIT_MODE]) {
  314. attr = data[IFLA_NETKIT_MODE];
  315. mode = nla_get_u32(attr);
  316. err = netkit_check_mode(mode, attr, extack);
  317. if (err < 0)
  318. return err;
  319. }
  320. if (data[IFLA_NETKIT_PEER_INFO]) {
  321. attr = data[IFLA_NETKIT_PEER_INFO];
  322. ifmp = nla_data(attr);
  323. err = rtnl_nla_parse_ifinfomsg(peer_tb, attr, extack);
  324. if (err < 0)
  325. return err;
  326. err = netkit_validate(peer_tb, NULL, extack);
  327. if (err < 0)
  328. return err;
  329. tbp = peer_tb;
  330. }
  331. if (data[IFLA_NETKIT_SCRUB])
  332. scrub_prim = nla_get_u32(data[IFLA_NETKIT_SCRUB]);
  333. if (data[IFLA_NETKIT_PEER_SCRUB])
  334. scrub_peer = nla_get_u32(data[IFLA_NETKIT_PEER_SCRUB]);
  335. if (data[IFLA_NETKIT_POLICY]) {
  336. attr = data[IFLA_NETKIT_POLICY];
  337. policy_prim = nla_get_u32(attr);
  338. err = netkit_check_policy(policy_prim, attr, extack);
  339. if (err < 0)
  340. return err;
  341. }
  342. if (data[IFLA_NETKIT_PEER_POLICY]) {
  343. attr = data[IFLA_NETKIT_PEER_POLICY];
  344. policy_peer = nla_get_u32(attr);
  345. err = netkit_check_policy(policy_peer, attr, extack);
  346. if (err < 0)
  347. return err;
  348. }
  349. }
  350. if (ifmp && tbp[IFLA_IFNAME]) {
  351. nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
  352. ifname_assign_type = NET_NAME_USER;
  353. } else {
  354. strscpy(ifname, "nk%d", IFNAMSIZ);
  355. ifname_assign_type = NET_NAME_ENUM;
  356. }
  357. if (mode != NETKIT_L2 &&
  358. (tb[IFLA_ADDRESS] || tbp[IFLA_ADDRESS]))
  359. return -EOPNOTSUPP;
  360. net = rtnl_link_get_net(src_net, tbp);
  361. if (IS_ERR(net))
  362. return PTR_ERR(net);
  363. peer = rtnl_create_link(net, ifname, ifname_assign_type,
  364. &netkit_link_ops, tbp, extack);
  365. if (IS_ERR(peer)) {
  366. put_net(net);
  367. return PTR_ERR(peer);
  368. }
  369. netif_inherit_tso_max(peer, dev);
  370. if (mode == NETKIT_L2 && !(ifmp && tbp[IFLA_ADDRESS]))
  371. eth_hw_addr_random(peer);
  372. if (ifmp && dev->ifindex)
  373. peer->ifindex = ifmp->ifi_index;
  374. nk = netkit_priv(peer);
  375. nk->primary = false;
  376. nk->policy = policy_peer;
  377. nk->scrub = scrub_peer;
  378. nk->mode = mode;
  379. bpf_mprog_bundle_init(&nk->bundle);
  380. err = register_netdevice(peer);
  381. put_net(net);
  382. if (err < 0)
  383. goto err_register_peer;
  384. netif_carrier_off(peer);
  385. if (mode == NETKIT_L2)
  386. dev_change_flags(peer, peer->flags & ~IFF_NOARP, NULL);
  387. err = rtnl_configure_link(peer, NULL, 0, NULL);
  388. if (err < 0)
  389. goto err_configure_peer;
  390. if (mode == NETKIT_L2 && !tb[IFLA_ADDRESS])
  391. eth_hw_addr_random(dev);
  392. if (tb[IFLA_IFNAME])
  393. nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
  394. else
  395. strscpy(dev->name, "nk%d", IFNAMSIZ);
  396. nk = netkit_priv(dev);
  397. nk->primary = true;
  398. nk->policy = policy_prim;
  399. nk->scrub = scrub_prim;
  400. nk->mode = mode;
  401. bpf_mprog_bundle_init(&nk->bundle);
  402. err = register_netdevice(dev);
  403. if (err < 0)
  404. goto err_configure_peer;
  405. netif_carrier_off(dev);
  406. if (mode == NETKIT_L2)
  407. dev_change_flags(dev, dev->flags & ~IFF_NOARP, NULL);
  408. rcu_assign_pointer(netkit_priv(dev)->peer, peer);
  409. rcu_assign_pointer(netkit_priv(peer)->peer, dev);
  410. return 0;
  411. err_configure_peer:
  412. unregister_netdevice(peer);
  413. return err;
  414. err_register_peer:
  415. free_netdev(peer);
  416. return err;
  417. }
  418. static struct bpf_mprog_entry *netkit_entry_fetch(struct net_device *dev,
  419. bool bundle_fallback)
  420. {
  421. struct netkit *nk = netkit_priv(dev);
  422. struct bpf_mprog_entry *entry;
  423. ASSERT_RTNL();
  424. entry = rcu_dereference_rtnl(nk->active);
  425. if (entry)
  426. return entry;
  427. if (bundle_fallback)
  428. return &nk->bundle.a;
  429. return NULL;
  430. }
  431. static void netkit_entry_update(struct net_device *dev,
  432. struct bpf_mprog_entry *entry)
  433. {
  434. struct netkit *nk = netkit_priv(dev);
  435. ASSERT_RTNL();
  436. rcu_assign_pointer(nk->active, entry);
  437. }
  438. static void netkit_entry_sync(void)
  439. {
  440. synchronize_rcu();
  441. }
  442. static struct net_device *netkit_dev_fetch(struct net *net, u32 ifindex, u32 which)
  443. {
  444. struct net_device *dev;
  445. struct netkit *nk;
  446. ASSERT_RTNL();
  447. switch (which) {
  448. case BPF_NETKIT_PRIMARY:
  449. case BPF_NETKIT_PEER:
  450. break;
  451. default:
  452. return ERR_PTR(-EINVAL);
  453. }
  454. dev = __dev_get_by_index(net, ifindex);
  455. if (!dev)
  456. return ERR_PTR(-ENODEV);
  457. if (dev->netdev_ops != &netkit_netdev_ops)
  458. return ERR_PTR(-ENXIO);
  459. nk = netkit_priv(dev);
  460. if (!nk->primary)
  461. return ERR_PTR(-EACCES);
  462. if (which == BPF_NETKIT_PEER) {
  463. dev = rcu_dereference_rtnl(nk->peer);
  464. if (!dev)
  465. return ERR_PTR(-ENODEV);
  466. }
  467. return dev;
  468. }
  469. int netkit_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
  470. {
  471. struct bpf_mprog_entry *entry, *entry_new;
  472. struct bpf_prog *replace_prog = NULL;
  473. struct net_device *dev;
  474. int ret;
  475. rtnl_lock();
  476. dev = netkit_dev_fetch(current->nsproxy->net_ns, attr->target_ifindex,
  477. attr->attach_type);
  478. if (IS_ERR(dev)) {
  479. ret = PTR_ERR(dev);
  480. goto out;
  481. }
  482. entry = netkit_entry_fetch(dev, true);
  483. if (attr->attach_flags & BPF_F_REPLACE) {
  484. replace_prog = bpf_prog_get_type(attr->replace_bpf_fd,
  485. prog->type);
  486. if (IS_ERR(replace_prog)) {
  487. ret = PTR_ERR(replace_prog);
  488. replace_prog = NULL;
  489. goto out;
  490. }
  491. }
  492. ret = bpf_mprog_attach(entry, &entry_new, prog, NULL, replace_prog,
  493. attr->attach_flags, attr->relative_fd,
  494. attr->expected_revision);
  495. if (!ret) {
  496. if (entry != entry_new) {
  497. netkit_entry_update(dev, entry_new);
  498. netkit_entry_sync();
  499. }
  500. bpf_mprog_commit(entry);
  501. }
  502. out:
  503. if (replace_prog)
  504. bpf_prog_put(replace_prog);
  505. rtnl_unlock();
  506. return ret;
  507. }
  508. int netkit_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog)
  509. {
  510. struct bpf_mprog_entry *entry, *entry_new;
  511. struct net_device *dev;
  512. int ret;
  513. rtnl_lock();
  514. dev = netkit_dev_fetch(current->nsproxy->net_ns, attr->target_ifindex,
  515. attr->attach_type);
  516. if (IS_ERR(dev)) {
  517. ret = PTR_ERR(dev);
  518. goto out;
  519. }
  520. entry = netkit_entry_fetch(dev, false);
  521. if (!entry) {
  522. ret = -ENOENT;
  523. goto out;
  524. }
  525. ret = bpf_mprog_detach(entry, &entry_new, prog, NULL, attr->attach_flags,
  526. attr->relative_fd, attr->expected_revision);
  527. if (!ret) {
  528. if (!bpf_mprog_total(entry_new))
  529. entry_new = NULL;
  530. netkit_entry_update(dev, entry_new);
  531. netkit_entry_sync();
  532. bpf_mprog_commit(entry);
  533. }
  534. out:
  535. rtnl_unlock();
  536. return ret;
  537. }
  538. int netkit_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
  539. {
  540. struct net_device *dev;
  541. int ret;
  542. rtnl_lock();
  543. dev = netkit_dev_fetch(current->nsproxy->net_ns,
  544. attr->query.target_ifindex,
  545. attr->query.attach_type);
  546. if (IS_ERR(dev)) {
  547. ret = PTR_ERR(dev);
  548. goto out;
  549. }
  550. ret = bpf_mprog_query(attr, uattr, netkit_entry_fetch(dev, false));
  551. out:
  552. rtnl_unlock();
  553. return ret;
  554. }
  555. static struct netkit_link *netkit_link(const struct bpf_link *link)
  556. {
  557. return container_of(link, struct netkit_link, link);
  558. }
  559. static int netkit_link_prog_attach(struct bpf_link *link, u32 flags,
  560. u32 id_or_fd, u64 revision)
  561. {
  562. struct netkit_link *nkl = netkit_link(link);
  563. struct bpf_mprog_entry *entry, *entry_new;
  564. struct net_device *dev = nkl->dev;
  565. int ret;
  566. ASSERT_RTNL();
  567. entry = netkit_entry_fetch(dev, true);
  568. ret = bpf_mprog_attach(entry, &entry_new, link->prog, link, NULL, flags,
  569. id_or_fd, revision);
  570. if (!ret) {
  571. if (entry != entry_new) {
  572. netkit_entry_update(dev, entry_new);
  573. netkit_entry_sync();
  574. }
  575. bpf_mprog_commit(entry);
  576. }
  577. return ret;
  578. }
  579. static void netkit_link_release(struct bpf_link *link)
  580. {
  581. struct netkit_link *nkl = netkit_link(link);
  582. struct bpf_mprog_entry *entry, *entry_new;
  583. struct net_device *dev;
  584. int ret = 0;
  585. rtnl_lock();
  586. dev = nkl->dev;
  587. if (!dev)
  588. goto out;
  589. entry = netkit_entry_fetch(dev, false);
  590. if (!entry) {
  591. ret = -ENOENT;
  592. goto out;
  593. }
  594. ret = bpf_mprog_detach(entry, &entry_new, link->prog, link, 0, 0, 0);
  595. if (!ret) {
  596. if (!bpf_mprog_total(entry_new))
  597. entry_new = NULL;
  598. netkit_entry_update(dev, entry_new);
  599. netkit_entry_sync();
  600. bpf_mprog_commit(entry);
  601. nkl->dev = NULL;
  602. }
  603. out:
  604. WARN_ON_ONCE(ret);
  605. rtnl_unlock();
  606. }
  607. static int netkit_link_update(struct bpf_link *link, struct bpf_prog *nprog,
  608. struct bpf_prog *oprog)
  609. {
  610. struct netkit_link *nkl = netkit_link(link);
  611. struct bpf_mprog_entry *entry, *entry_new;
  612. struct net_device *dev;
  613. int ret = 0;
  614. rtnl_lock();
  615. dev = nkl->dev;
  616. if (!dev) {
  617. ret = -ENOLINK;
  618. goto out;
  619. }
  620. if (oprog && link->prog != oprog) {
  621. ret = -EPERM;
  622. goto out;
  623. }
  624. oprog = link->prog;
  625. if (oprog == nprog) {
  626. bpf_prog_put(nprog);
  627. goto out;
  628. }
  629. entry = netkit_entry_fetch(dev, false);
  630. if (!entry) {
  631. ret = -ENOENT;
  632. goto out;
  633. }
  634. ret = bpf_mprog_attach(entry, &entry_new, nprog, link, oprog,
  635. BPF_F_REPLACE | BPF_F_ID,
  636. link->prog->aux->id, 0);
  637. if (!ret) {
  638. WARN_ON_ONCE(entry != entry_new);
  639. oprog = xchg(&link->prog, nprog);
  640. bpf_prog_put(oprog);
  641. bpf_mprog_commit(entry);
  642. }
  643. out:
  644. rtnl_unlock();
  645. return ret;
  646. }
  647. static void netkit_link_dealloc(struct bpf_link *link)
  648. {
  649. kfree(netkit_link(link));
  650. }
  651. static void netkit_link_fdinfo(const struct bpf_link *link, struct seq_file *seq)
  652. {
  653. const struct netkit_link *nkl = netkit_link(link);
  654. u32 ifindex = 0;
  655. rtnl_lock();
  656. if (nkl->dev)
  657. ifindex = nkl->dev->ifindex;
  658. rtnl_unlock();
  659. seq_printf(seq, "ifindex:\t%u\n", ifindex);
  660. seq_printf(seq, "attach_type:\t%u (%s)\n",
  661. nkl->location,
  662. nkl->location == BPF_NETKIT_PRIMARY ? "primary" : "peer");
  663. }
  664. static int netkit_link_fill_info(const struct bpf_link *link,
  665. struct bpf_link_info *info)
  666. {
  667. const struct netkit_link *nkl = netkit_link(link);
  668. u32 ifindex = 0;
  669. rtnl_lock();
  670. if (nkl->dev)
  671. ifindex = nkl->dev->ifindex;
  672. rtnl_unlock();
  673. info->netkit.ifindex = ifindex;
  674. info->netkit.attach_type = nkl->location;
  675. return 0;
  676. }
  677. static int netkit_link_detach(struct bpf_link *link)
  678. {
  679. netkit_link_release(link);
  680. return 0;
  681. }
  682. static const struct bpf_link_ops netkit_link_lops = {
  683. .release = netkit_link_release,
  684. .detach = netkit_link_detach,
  685. .dealloc = netkit_link_dealloc,
  686. .update_prog = netkit_link_update,
  687. .show_fdinfo = netkit_link_fdinfo,
  688. .fill_link_info = netkit_link_fill_info,
  689. };
  690. static int netkit_link_init(struct netkit_link *nkl,
  691. struct bpf_link_primer *link_primer,
  692. const union bpf_attr *attr,
  693. struct net_device *dev,
  694. struct bpf_prog *prog)
  695. {
  696. bpf_link_init(&nkl->link, BPF_LINK_TYPE_NETKIT,
  697. &netkit_link_lops, prog);
  698. nkl->location = attr->link_create.attach_type;
  699. nkl->dev = dev;
  700. return bpf_link_prime(&nkl->link, link_primer);
  701. }
  702. int netkit_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
  703. {
  704. struct bpf_link_primer link_primer;
  705. struct netkit_link *nkl;
  706. struct net_device *dev;
  707. int ret;
  708. rtnl_lock();
  709. dev = netkit_dev_fetch(current->nsproxy->net_ns,
  710. attr->link_create.target_ifindex,
  711. attr->link_create.attach_type);
  712. if (IS_ERR(dev)) {
  713. ret = PTR_ERR(dev);
  714. goto out;
  715. }
  716. nkl = kzalloc(sizeof(*nkl), GFP_KERNEL_ACCOUNT);
  717. if (!nkl) {
  718. ret = -ENOMEM;
  719. goto out;
  720. }
  721. ret = netkit_link_init(nkl, &link_primer, attr, dev, prog);
  722. if (ret) {
  723. kfree(nkl);
  724. goto out;
  725. }
  726. ret = netkit_link_prog_attach(&nkl->link,
  727. attr->link_create.flags,
  728. attr->link_create.netkit.relative_fd,
  729. attr->link_create.netkit.expected_revision);
  730. if (ret) {
  731. nkl->dev = NULL;
  732. bpf_link_cleanup(&link_primer);
  733. goto out;
  734. }
  735. ret = bpf_link_settle(&link_primer);
  736. out:
  737. rtnl_unlock();
  738. return ret;
  739. }
  740. static void netkit_release_all(struct net_device *dev)
  741. {
  742. struct bpf_mprog_entry *entry;
  743. struct bpf_tuple tuple = {};
  744. struct bpf_mprog_fp *fp;
  745. struct bpf_mprog_cp *cp;
  746. entry = netkit_entry_fetch(dev, false);
  747. if (!entry)
  748. return;
  749. netkit_entry_update(dev, NULL);
  750. netkit_entry_sync();
  751. bpf_mprog_foreach_tuple(entry, fp, cp, tuple) {
  752. if (tuple.link)
  753. netkit_link(tuple.link)->dev = NULL;
  754. else
  755. bpf_prog_put(tuple.prog);
  756. }
  757. }
  758. static void netkit_uninit(struct net_device *dev)
  759. {
  760. netkit_release_all(dev);
  761. }
  762. static void netkit_del_link(struct net_device *dev, struct list_head *head)
  763. {
  764. struct netkit *nk = netkit_priv(dev);
  765. struct net_device *peer = rtnl_dereference(nk->peer);
  766. RCU_INIT_POINTER(nk->peer, NULL);
  767. unregister_netdevice_queue(dev, head);
  768. if (peer) {
  769. nk = netkit_priv(peer);
  770. RCU_INIT_POINTER(nk->peer, NULL);
  771. unregister_netdevice_queue(peer, head);
  772. }
  773. }
  774. static int netkit_change_link(struct net_device *dev, struct nlattr *tb[],
  775. struct nlattr *data[],
  776. struct netlink_ext_ack *extack)
  777. {
  778. struct netkit *nk = netkit_priv(dev);
  779. struct net_device *peer = rtnl_dereference(nk->peer);
  780. enum netkit_action policy;
  781. struct nlattr *attr;
  782. int err;
  783. if (!nk->primary) {
  784. NL_SET_ERR_MSG(extack,
  785. "netkit link settings can be changed only through the primary device");
  786. return -EACCES;
  787. }
  788. if (data[IFLA_NETKIT_MODE]) {
  789. NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_MODE],
  790. "netkit link operating mode cannot be changed after device creation");
  791. return -EACCES;
  792. }
  793. if (data[IFLA_NETKIT_SCRUB]) {
  794. NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_SCRUB],
  795. "netkit scrubbing cannot be changed after device creation");
  796. return -EACCES;
  797. }
  798. if (data[IFLA_NETKIT_PEER_SCRUB]) {
  799. NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_PEER_SCRUB],
  800. "netkit scrubbing cannot be changed after device creation");
  801. return -EACCES;
  802. }
  803. if (data[IFLA_NETKIT_PEER_INFO]) {
  804. NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_PEER_INFO],
  805. "netkit peer info cannot be changed after device creation");
  806. return -EINVAL;
  807. }
  808. if (data[IFLA_NETKIT_POLICY]) {
  809. attr = data[IFLA_NETKIT_POLICY];
  810. policy = nla_get_u32(attr);
  811. err = netkit_check_policy(policy, attr, extack);
  812. if (err)
  813. return err;
  814. WRITE_ONCE(nk->policy, policy);
  815. }
  816. if (data[IFLA_NETKIT_PEER_POLICY]) {
  817. err = -EOPNOTSUPP;
  818. attr = data[IFLA_NETKIT_PEER_POLICY];
  819. policy = nla_get_u32(attr);
  820. if (peer)
  821. err = netkit_check_policy(policy, attr, extack);
  822. if (err)
  823. return err;
  824. nk = netkit_priv(peer);
  825. WRITE_ONCE(nk->policy, policy);
  826. }
  827. return 0;
  828. }
  829. static size_t netkit_get_size(const struct net_device *dev)
  830. {
  831. return nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_POLICY */
  832. nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_PEER_POLICY */
  833. nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_SCRUB */
  834. nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_PEER_SCRUB */
  835. nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_MODE */
  836. nla_total_size(sizeof(u8)) + /* IFLA_NETKIT_PRIMARY */
  837. 0;
  838. }
  839. static int netkit_fill_info(struct sk_buff *skb, const struct net_device *dev)
  840. {
  841. struct netkit *nk = netkit_priv(dev);
  842. struct net_device *peer = rtnl_dereference(nk->peer);
  843. if (nla_put_u8(skb, IFLA_NETKIT_PRIMARY, nk->primary))
  844. return -EMSGSIZE;
  845. if (nla_put_u32(skb, IFLA_NETKIT_POLICY, nk->policy))
  846. return -EMSGSIZE;
  847. if (nla_put_u32(skb, IFLA_NETKIT_MODE, nk->mode))
  848. return -EMSGSIZE;
  849. if (nla_put_u32(skb, IFLA_NETKIT_SCRUB, nk->scrub))
  850. return -EMSGSIZE;
  851. if (peer) {
  852. nk = netkit_priv(peer);
  853. if (nla_put_u32(skb, IFLA_NETKIT_PEER_POLICY, nk->policy))
  854. return -EMSGSIZE;
  855. if (nla_put_u32(skb, IFLA_NETKIT_PEER_SCRUB, nk->scrub))
  856. return -EMSGSIZE;
  857. }
  858. return 0;
  859. }
  860. static const struct nla_policy netkit_policy[IFLA_NETKIT_MAX + 1] = {
  861. [IFLA_NETKIT_PEER_INFO] = { .len = sizeof(struct ifinfomsg) },
  862. [IFLA_NETKIT_MODE] = { .type = NLA_U32 },
  863. [IFLA_NETKIT_POLICY] = { .type = NLA_U32 },
  864. [IFLA_NETKIT_PEER_POLICY] = { .type = NLA_U32 },
  865. [IFLA_NETKIT_SCRUB] = NLA_POLICY_MAX(NLA_U32, NETKIT_SCRUB_DEFAULT),
  866. [IFLA_NETKIT_PEER_SCRUB] = NLA_POLICY_MAX(NLA_U32, NETKIT_SCRUB_DEFAULT),
  867. [IFLA_NETKIT_PRIMARY] = { .type = NLA_REJECT,
  868. .reject_message = "Primary attribute is read-only" },
  869. };
  870. static struct rtnl_link_ops netkit_link_ops = {
  871. .kind = DRV_NAME,
  872. .priv_size = sizeof(struct netkit),
  873. .setup = netkit_setup,
  874. .newlink = netkit_new_link,
  875. .dellink = netkit_del_link,
  876. .changelink = netkit_change_link,
  877. .get_link_net = netkit_get_link_net,
  878. .get_size = netkit_get_size,
  879. .fill_info = netkit_fill_info,
  880. .policy = netkit_policy,
  881. .validate = netkit_validate,
  882. .maxtype = IFLA_NETKIT_MAX,
  883. };
  884. static __init int netkit_init(void)
  885. {
  886. BUILD_BUG_ON((int)NETKIT_NEXT != (int)TCX_NEXT ||
  887. (int)NETKIT_PASS != (int)TCX_PASS ||
  888. (int)NETKIT_DROP != (int)TCX_DROP ||
  889. (int)NETKIT_REDIRECT != (int)TCX_REDIRECT);
  890. return rtnl_link_register(&netkit_link_ops);
  891. }
  892. static __exit void netkit_exit(void)
  893. {
  894. rtnl_link_unregister(&netkit_link_ops);
  895. }
  896. module_init(netkit_init);
  897. module_exit(netkit_exit);
  898. MODULE_DESCRIPTION("BPF-programmable network device");
  899. MODULE_AUTHOR("Daniel Borkmann <daniel@iogearbox.net>");
  900. MODULE_AUTHOR("Nikolay Aleksandrov <razor@blackwall.org>");
  901. MODULE_LICENSE("GPL");
  902. MODULE_ALIAS_RTNL_LINK(DRV_NAME);