br_input.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Handle incoming frames
  4. * Linux ethernet bridge
  5. *
  6. * Authors:
  7. * Lennert Buytenhek <buytenh@gnu.org>
  8. */
  9. #include <linux/slab.h>
  10. #include <linux/kernel.h>
  11. #include <linux/netdevice.h>
  12. #include <linux/etherdevice.h>
  13. #include <linux/netfilter_bridge.h>
  14. #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
  15. #include <net/netfilter/nf_queue.h>
  16. #endif
  17. #include <linux/neighbour.h>
  18. #include <net/arp.h>
  19. #include <net/dsa.h>
  20. #include <linux/export.h>
  21. #include <linux/rculist.h>
  22. #include "br_private.h"
  23. #include "br_private_tunnel.h"
  24. static int
  25. br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb)
  26. {
  27. br_drop_fake_rtable(skb);
  28. return netif_receive_skb(skb);
  29. }
  30. static int br_pass_frame_up(struct sk_buff *skb, bool promisc)
  31. {
  32. struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
  33. struct net_bridge *br = netdev_priv(brdev);
  34. struct net_bridge_vlan_group *vg;
  35. dev_sw_netstats_rx_add(brdev, skb->len);
  36. vg = br_vlan_group_rcu(br);
  37. /* Reset the offload_fwd_mark because there could be a stacked
  38. * bridge above, and it should not think this bridge it doing
  39. * that bridge's work forwarding out its ports.
  40. */
  41. br_switchdev_frame_unmark(skb);
  42. /* Bridge is just like any other port. Make sure the
  43. * packet is allowed except in promisc mode when someone
  44. * may be running packet capture.
  45. */
  46. if (!(brdev->flags & IFF_PROMISC) &&
  47. !br_allowed_egress(vg, skb)) {
  48. kfree_skb(skb);
  49. return NET_RX_DROP;
  50. }
  51. indev = skb->dev;
  52. skb->dev = brdev;
  53. skb = br_handle_vlan(br, NULL, vg, skb);
  54. if (!skb)
  55. return NET_RX_DROP;
  56. /* update the multicast stats if the packet is IGMP/MLD */
  57. br_multicast_count(br, NULL, skb, br_multicast_igmp_type(skb),
  58. BR_MCAST_DIR_TX);
  59. BR_INPUT_SKB_CB(skb)->promisc = promisc;
  60. return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
  61. dev_net(indev), NULL, skb, indev, NULL,
  62. br_netif_receive_skb);
  63. }
  64. /* note: already called with rcu_read_lock */
  65. int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
  66. {
  67. struct net_bridge_port *p = br_port_get_rcu(skb->dev);
  68. enum br_pkt_type pkt_type = BR_PKT_UNICAST;
  69. struct net_bridge_fdb_entry *dst = NULL;
  70. struct net_bridge_mcast_port *pmctx;
  71. struct net_bridge_mdb_entry *mdst;
  72. bool local_rcv, mcast_hit = false;
  73. struct net_bridge_mcast *brmctx;
  74. struct net_bridge_vlan *vlan;
  75. struct net_bridge *br;
  76. bool promisc;
  77. u16 vid = 0;
  78. u8 state;
  79. if (!p)
  80. goto drop;
  81. br = p->br;
  82. if (br_mst_is_enabled(br)) {
  83. state = BR_STATE_FORWARDING;
  84. } else {
  85. if (p->state == BR_STATE_DISABLED)
  86. goto drop;
  87. state = p->state;
  88. }
  89. brmctx = &p->br->multicast_ctx;
  90. pmctx = &p->multicast_ctx;
  91. if (!br_allowed_ingress(p->br, nbp_vlan_group_rcu(p), skb, &vid,
  92. &state, &vlan))
  93. goto out;
  94. if (p->flags & BR_PORT_LOCKED) {
  95. struct net_bridge_fdb_entry *fdb_src =
  96. br_fdb_find_rcu(br, eth_hdr(skb)->h_source, vid);
  97. if (!fdb_src) {
  98. /* FDB miss. Create locked FDB entry if MAB is enabled
  99. * and drop the packet.
  100. */
  101. if (p->flags & BR_PORT_MAB)
  102. br_fdb_update(br, p, eth_hdr(skb)->h_source,
  103. vid, BIT(BR_FDB_LOCKED));
  104. goto drop;
  105. } else if (READ_ONCE(fdb_src->dst) != p ||
  106. test_bit(BR_FDB_LOCAL, &fdb_src->flags)) {
  107. /* FDB mismatch. Drop the packet without roaming. */
  108. goto drop;
  109. } else if (test_bit(BR_FDB_LOCKED, &fdb_src->flags)) {
  110. /* FDB match, but entry is locked. Refresh it and drop
  111. * the packet.
  112. */
  113. br_fdb_update(br, p, eth_hdr(skb)->h_source, vid,
  114. BIT(BR_FDB_LOCKED));
  115. goto drop;
  116. }
  117. }
  118. nbp_switchdev_frame_mark(p, skb);
  119. /* insert into forwarding database after filtering to avoid spoofing */
  120. if (p->flags & BR_LEARNING)
  121. br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, 0);
  122. promisc = !!(br->dev->flags & IFF_PROMISC);
  123. local_rcv = promisc;
  124. if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
  125. /* by definition the broadcast is also a multicast address */
  126. if (is_broadcast_ether_addr(eth_hdr(skb)->h_dest)) {
  127. pkt_type = BR_PKT_BROADCAST;
  128. local_rcv = true;
  129. } else {
  130. pkt_type = BR_PKT_MULTICAST;
  131. if (br_multicast_rcv(&brmctx, &pmctx, vlan, skb, vid))
  132. goto drop;
  133. }
  134. }
  135. if (state == BR_STATE_LEARNING)
  136. goto drop;
  137. BR_INPUT_SKB_CB(skb)->brdev = br->dev;
  138. BR_INPUT_SKB_CB(skb)->src_port_isolated = !!(p->flags & BR_ISOLATED);
  139. if (IS_ENABLED(CONFIG_INET) &&
  140. (skb->protocol == htons(ETH_P_ARP) ||
  141. skb->protocol == htons(ETH_P_RARP))) {
  142. br_do_proxy_suppress_arp(skb, br, vid, p);
  143. } else if (IS_ENABLED(CONFIG_IPV6) &&
  144. skb->protocol == htons(ETH_P_IPV6) &&
  145. br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
  146. pskb_may_pull(skb, sizeof(struct ipv6hdr) +
  147. sizeof(struct nd_msg)) &&
  148. ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
  149. struct nd_msg *msg, _msg;
  150. msg = br_is_nd_neigh_msg(skb, &_msg);
  151. if (msg)
  152. br_do_suppress_nd(skb, br, vid, p, msg);
  153. }
  154. switch (pkt_type) {
  155. case BR_PKT_MULTICAST:
  156. mdst = br_mdb_entry_skb_get(brmctx, skb, vid);
  157. if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
  158. br_multicast_querier_exists(brmctx, eth_hdr(skb), mdst)) {
  159. if ((mdst && mdst->host_joined) ||
  160. br_multicast_is_router(brmctx, skb)) {
  161. local_rcv = true;
  162. DEV_STATS_INC(br->dev, multicast);
  163. }
  164. mcast_hit = true;
  165. } else {
  166. local_rcv = true;
  167. DEV_STATS_INC(br->dev, multicast);
  168. }
  169. break;
  170. case BR_PKT_UNICAST:
  171. dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, vid);
  172. break;
  173. default:
  174. break;
  175. }
  176. if (dst) {
  177. unsigned long now = jiffies;
  178. if (test_bit(BR_FDB_LOCAL, &dst->flags))
  179. return br_pass_frame_up(skb, false);
  180. if (now != dst->used)
  181. dst->used = now;
  182. br_forward(dst->dst, skb, local_rcv, false);
  183. } else {
  184. if (!mcast_hit)
  185. br_flood(br, skb, pkt_type, local_rcv, false, vid);
  186. else
  187. br_multicast_flood(mdst, skb, brmctx, local_rcv, false);
  188. }
  189. if (local_rcv)
  190. return br_pass_frame_up(skb, promisc);
  191. out:
  192. return 0;
  193. drop:
  194. kfree_skb(skb);
  195. goto out;
  196. }
  197. EXPORT_SYMBOL_GPL(br_handle_frame_finish);
  198. static void __br_handle_local_finish(struct sk_buff *skb)
  199. {
  200. struct net_bridge_port *p = br_port_get_rcu(skb->dev);
  201. u16 vid = 0;
  202. /* check if vlan is allowed, to avoid spoofing */
  203. if ((p->flags & BR_LEARNING) &&
  204. nbp_state_should_learn(p) &&
  205. !br_opt_get(p->br, BROPT_NO_LL_LEARN) &&
  206. br_should_learn(p, skb, &vid))
  207. br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, 0);
  208. }
  209. /* note: already called with rcu_read_lock */
  210. static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
  211. {
  212. __br_handle_local_finish(skb);
  213. /* return 1 to signal the okfn() was called so it's ok to use the skb */
  214. return 1;
  215. }
  216. static int nf_hook_bridge_pre(struct sk_buff *skb, struct sk_buff **pskb)
  217. {
  218. #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE
  219. struct nf_hook_entries *e = NULL;
  220. struct nf_hook_state state;
  221. unsigned int verdict, i;
  222. struct net *net;
  223. int ret;
  224. net = dev_net(skb->dev);
  225. #ifdef HAVE_JUMP_LABEL
  226. if (!static_key_false(&nf_hooks_needed[NFPROTO_BRIDGE][NF_BR_PRE_ROUTING]))
  227. goto frame_finish;
  228. #endif
  229. e = rcu_dereference(net->nf.hooks_bridge[NF_BR_PRE_ROUTING]);
  230. if (!e)
  231. goto frame_finish;
  232. nf_hook_state_init(&state, NF_BR_PRE_ROUTING,
  233. NFPROTO_BRIDGE, skb->dev, NULL, NULL,
  234. net, br_handle_frame_finish);
  235. for (i = 0; i < e->num_hook_entries; i++) {
  236. verdict = nf_hook_entry_hookfn(&e->hooks[i], skb, &state);
  237. switch (verdict & NF_VERDICT_MASK) {
  238. case NF_ACCEPT:
  239. if (BR_INPUT_SKB_CB(skb)->br_netfilter_broute) {
  240. *pskb = skb;
  241. return RX_HANDLER_PASS;
  242. }
  243. break;
  244. case NF_DROP:
  245. kfree_skb(skb);
  246. return RX_HANDLER_CONSUMED;
  247. case NF_QUEUE:
  248. ret = nf_queue(skb, &state, i, verdict);
  249. if (ret == 1)
  250. continue;
  251. return RX_HANDLER_CONSUMED;
  252. default: /* STOLEN */
  253. return RX_HANDLER_CONSUMED;
  254. }
  255. }
  256. frame_finish:
  257. net = dev_net(skb->dev);
  258. br_handle_frame_finish(net, NULL, skb);
  259. #else
  260. br_handle_frame_finish(dev_net(skb->dev), NULL, skb);
  261. #endif
  262. return RX_HANDLER_CONSUMED;
  263. }
  264. /* Return 0 if the frame was not processed otherwise 1
  265. * note: already called with rcu_read_lock
  266. */
  267. static int br_process_frame_type(struct net_bridge_port *p,
  268. struct sk_buff *skb)
  269. {
  270. struct br_frame_type *tmp;
  271. hlist_for_each_entry_rcu(tmp, &p->br->frame_type_list, list)
  272. if (unlikely(tmp->type == skb->protocol))
  273. return tmp->frame_handler(p, skb);
  274. return 0;
  275. }
  276. /*
  277. * Return NULL if skb is handled
  278. * note: already called with rcu_read_lock
  279. */
  280. static rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
  281. {
  282. struct net_bridge_port *p;
  283. struct sk_buff *skb = *pskb;
  284. const unsigned char *dest = eth_hdr(skb)->h_dest;
  285. if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
  286. return RX_HANDLER_PASS;
  287. if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
  288. goto drop;
  289. skb = skb_share_check(skb, GFP_ATOMIC);
  290. if (!skb)
  291. return RX_HANDLER_CONSUMED;
  292. memset(skb->cb, 0, sizeof(struct br_input_skb_cb));
  293. br_tc_skb_miss_set(skb, false);
  294. p = br_port_get_rcu(skb->dev);
  295. if (p->flags & BR_VLAN_TUNNEL)
  296. br_handle_ingress_vlan_tunnel(skb, p, nbp_vlan_group_rcu(p));
  297. if (unlikely(is_link_local_ether_addr(dest))) {
  298. u16 fwd_mask = p->br->group_fwd_mask_required;
  299. /*
  300. * See IEEE 802.1D Table 7-10 Reserved addresses
  301. *
  302. * Assignment Value
  303. * Bridge Group Address 01-80-C2-00-00-00
  304. * (MAC Control) 802.3 01-80-C2-00-00-01
  305. * (Link Aggregation) 802.3 01-80-C2-00-00-02
  306. * 802.1X PAE address 01-80-C2-00-00-03
  307. *
  308. * 802.1AB LLDP 01-80-C2-00-00-0E
  309. *
  310. * Others reserved for future standardization
  311. */
  312. fwd_mask |= p->group_fwd_mask;
  313. switch (dest[5]) {
  314. case 0x00: /* Bridge Group Address */
  315. /* If STP is turned off,
  316. then must forward to keep loop detection */
  317. if (p->br->stp_enabled == BR_NO_STP ||
  318. fwd_mask & (1u << dest[5]))
  319. goto forward;
  320. *pskb = skb;
  321. __br_handle_local_finish(skb);
  322. return RX_HANDLER_PASS;
  323. case 0x01: /* IEEE MAC (Pause) */
  324. goto drop;
  325. case 0x0E: /* 802.1AB LLDP */
  326. fwd_mask |= p->br->group_fwd_mask;
  327. if (fwd_mask & (1u << dest[5]))
  328. goto forward;
  329. *pskb = skb;
  330. __br_handle_local_finish(skb);
  331. return RX_HANDLER_PASS;
  332. default:
  333. /* Allow selective forwarding for most other protocols */
  334. fwd_mask |= p->br->group_fwd_mask;
  335. if (fwd_mask & (1u << dest[5]))
  336. goto forward;
  337. }
  338. BR_INPUT_SKB_CB(skb)->promisc = false;
  339. /* The else clause should be hit when nf_hook():
  340. * - returns < 0 (drop/error)
  341. * - returns = 0 (stolen/nf_queue)
  342. * Thus return 1 from the okfn() to signal the skb is ok to pass
  343. */
  344. if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
  345. dev_net(skb->dev), NULL, skb, skb->dev, NULL,
  346. br_handle_local_finish) == 1) {
  347. return RX_HANDLER_PASS;
  348. } else {
  349. return RX_HANDLER_CONSUMED;
  350. }
  351. }
  352. if (unlikely(br_process_frame_type(p, skb)))
  353. return RX_HANDLER_PASS;
  354. forward:
  355. if (br_mst_is_enabled(p->br))
  356. goto defer_stp_filtering;
  357. switch (p->state) {
  358. case BR_STATE_FORWARDING:
  359. case BR_STATE_LEARNING:
  360. defer_stp_filtering:
  361. if (ether_addr_equal(p->br->dev->dev_addr, dest))
  362. skb->pkt_type = PACKET_HOST;
  363. return nf_hook_bridge_pre(skb, pskb);
  364. default:
  365. drop:
  366. kfree_skb(skb);
  367. }
  368. return RX_HANDLER_CONSUMED;
  369. }
  370. /* This function has no purpose other than to appease the br_port_get_rcu/rtnl
  371. * helpers which identify bridged ports according to the rx_handler installed
  372. * on them (so there _needs_ to be a bridge rx_handler even if we don't need it
  373. * to do anything useful). This bridge won't support traffic to/from the stack,
  374. * but only hardware bridging. So return RX_HANDLER_PASS so we don't steal
  375. * frames from the ETH_P_XDSA packet_type handler.
  376. */
  377. static rx_handler_result_t br_handle_frame_dummy(struct sk_buff **pskb)
  378. {
  379. return RX_HANDLER_PASS;
  380. }
  381. rx_handler_func_t *br_get_rx_handler(const struct net_device *dev)
  382. {
  383. if (netdev_uses_dsa(dev))
  384. return br_handle_frame_dummy;
  385. return br_handle_frame;
  386. }
  387. void br_add_frame(struct net_bridge *br, struct br_frame_type *ft)
  388. {
  389. hlist_add_head_rcu(&ft->list, &br->frame_type_list);
  390. }
  391. void br_del_frame(struct net_bridge *br, struct br_frame_type *ft)
  392. {
  393. struct br_frame_type *tmp;
  394. hlist_for_each_entry(tmp, &br->frame_type_list, list)
  395. if (ft == tmp) {
  396. hlist_del_rcu(&ft->list);
  397. return;
  398. }
  399. }