br_vlan.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212
  1. #include <linux/kernel.h>
  2. #include <linux/netdevice.h>
  3. #include <linux/rtnetlink.h>
  4. #include <linux/slab.h>
  5. #include <net/switchdev.h>
  6. #include "br_private.h"
  7. #include "br_private_tunnel.h"
  8. static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
  9. const void *ptr)
  10. {
  11. const struct net_bridge_vlan *vle = ptr;
  12. u16 vid = *(u16 *)arg->key;
  13. return vle->vid != vid;
  14. }
  15. static const struct rhashtable_params br_vlan_rht_params = {
  16. .head_offset = offsetof(struct net_bridge_vlan, vnode),
  17. .key_offset = offsetof(struct net_bridge_vlan, vid),
  18. .key_len = sizeof(u16),
  19. .nelem_hint = 3,
  20. .locks_mul = 1,
  21. .max_size = VLAN_N_VID,
  22. .obj_cmpfn = br_vlan_cmp,
  23. .automatic_shrinking = true,
  24. };
  25. static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
  26. {
  27. return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
  28. }
  29. static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  30. {
  31. if (vg->pvid == vid)
  32. return false;
  33. smp_wmb();
  34. vg->pvid = vid;
  35. return true;
  36. }
  37. static bool __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  38. {
  39. if (vg->pvid != vid)
  40. return false;
  41. smp_wmb();
  42. vg->pvid = 0;
  43. return true;
  44. }
  45. /* return true if anything changed, false otherwise */
  46. static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
  47. {
  48. struct net_bridge_vlan_group *vg;
  49. u16 old_flags = v->flags;
  50. bool ret;
  51. if (br_vlan_is_master(v))
  52. vg = br_vlan_group(v->br);
  53. else
  54. vg = nbp_vlan_group(v->port);
  55. if (flags & BRIDGE_VLAN_INFO_PVID)
  56. ret = __vlan_add_pvid(vg, v->vid);
  57. else
  58. ret = __vlan_delete_pvid(vg, v->vid);
  59. if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
  60. v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  61. else
  62. v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
  63. return ret || !!(old_flags ^ v->flags);
  64. }
  65. static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
  66. u16 vid, u16 flags)
  67. {
  68. int err;
  69. /* Try switchdev op first. In case it is not supported, fallback to
  70. * 8021q add.
  71. */
  72. err = br_switchdev_port_vlan_add(dev, vid, flags);
  73. if (err == -EOPNOTSUPP)
  74. return vlan_vid_add(dev, br->vlan_proto, vid);
  75. return err;
  76. }
  77. static void __vlan_add_list(struct net_bridge_vlan *v)
  78. {
  79. struct net_bridge_vlan_group *vg;
  80. struct list_head *headp, *hpos;
  81. struct net_bridge_vlan *vent;
  82. if (br_vlan_is_master(v))
  83. vg = br_vlan_group(v->br);
  84. else
  85. vg = nbp_vlan_group(v->port);
  86. headp = &vg->vlan_list;
  87. list_for_each_prev(hpos, headp) {
  88. vent = list_entry(hpos, struct net_bridge_vlan, vlist);
  89. if (v->vid < vent->vid)
  90. continue;
  91. else
  92. break;
  93. }
  94. list_add_rcu(&v->vlist, hpos);
  95. }
  96. static void __vlan_del_list(struct net_bridge_vlan *v)
  97. {
  98. list_del_rcu(&v->vlist);
  99. }
  100. static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
  101. u16 vid)
  102. {
  103. int err;
  104. /* Try switchdev op first. In case it is not supported, fallback to
  105. * 8021q del.
  106. */
  107. err = br_switchdev_port_vlan_del(dev, vid);
  108. if (err == -EOPNOTSUPP) {
  109. vlan_vid_del(dev, br->vlan_proto, vid);
  110. return 0;
  111. }
  112. return err;
  113. }
  114. /* Returns a master vlan, if it didn't exist it gets created. In all cases a
  115. * a reference is taken to the master vlan before returning.
  116. */
  117. static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid)
  118. {
  119. struct net_bridge_vlan_group *vg;
  120. struct net_bridge_vlan *masterv;
  121. vg = br_vlan_group(br);
  122. masterv = br_vlan_find(vg, vid);
  123. if (!masterv) {
  124. bool changed;
  125. /* missing global ctx, create it now */
  126. if (br_vlan_add(br, vid, 0, &changed))
  127. return NULL;
  128. masterv = br_vlan_find(vg, vid);
  129. if (WARN_ON(!masterv))
  130. return NULL;
  131. refcount_set(&masterv->refcnt, 1);
  132. return masterv;
  133. }
  134. refcount_inc(&masterv->refcnt);
  135. return masterv;
  136. }
  137. static void br_master_vlan_rcu_free(struct rcu_head *rcu)
  138. {
  139. struct net_bridge_vlan *v;
  140. v = container_of(rcu, struct net_bridge_vlan, rcu);
  141. WARN_ON(!br_vlan_is_master(v));
  142. free_percpu(v->stats);
  143. v->stats = NULL;
  144. kfree(v);
  145. }
  146. static void br_vlan_put_master(struct net_bridge_vlan *masterv)
  147. {
  148. struct net_bridge_vlan_group *vg;
  149. if (!br_vlan_is_master(masterv))
  150. return;
  151. vg = br_vlan_group(masterv->br);
  152. if (refcount_dec_and_test(&masterv->refcnt)) {
  153. rhashtable_remove_fast(&vg->vlan_hash,
  154. &masterv->vnode, br_vlan_rht_params);
  155. __vlan_del_list(masterv);
  156. call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
  157. }
  158. }
  159. /* This is the shared VLAN add function which works for both ports and bridge
  160. * devices. There are four possible calls to this function in terms of the
  161. * vlan entry type:
  162. * 1. vlan is being added on a port (no master flags, global entry exists)
  163. * 2. vlan is being added on a bridge (both master and brentry flags)
  164. * 3. vlan is being added on a port, but a global entry didn't exist which
  165. * is being created right now (master flag set, brentry flag unset), the
  166. * global entry is used for global per-vlan features, but not for filtering
  167. * 4. same as 3 but with both master and brentry flags set so the entry
  168. * will be used for filtering in both the port and the bridge
  169. */
  170. static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
  171. {
  172. struct net_bridge_vlan *masterv = NULL;
  173. struct net_bridge_port *p = NULL;
  174. struct net_bridge_vlan_group *vg;
  175. struct net_device *dev;
  176. struct net_bridge *br;
  177. int err;
  178. if (br_vlan_is_master(v)) {
  179. br = v->br;
  180. dev = br->dev;
  181. vg = br_vlan_group(br);
  182. } else {
  183. p = v->port;
  184. br = p->br;
  185. dev = p->dev;
  186. vg = nbp_vlan_group(p);
  187. }
  188. if (p) {
  189. /* Add VLAN to the device filter if it is supported.
  190. * This ensures tagged traffic enters the bridge when
  191. * promiscuous mode is disabled by br_manage_promisc().
  192. */
  193. err = __vlan_vid_add(dev, br, v->vid, flags);
  194. if (err)
  195. goto out;
  196. /* need to work on the master vlan too */
  197. if (flags & BRIDGE_VLAN_INFO_MASTER) {
  198. bool changed;
  199. err = br_vlan_add(br, v->vid,
  200. flags | BRIDGE_VLAN_INFO_BRENTRY,
  201. &changed);
  202. if (err)
  203. goto out_filt;
  204. }
  205. masterv = br_vlan_get_master(br, v->vid);
  206. if (!masterv) {
  207. err = -ENOMEM;
  208. goto out_filt;
  209. }
  210. v->brvlan = masterv;
  211. v->stats = masterv->stats;
  212. } else {
  213. err = br_switchdev_port_vlan_add(dev, v->vid, flags);
  214. if (err && err != -EOPNOTSUPP)
  215. goto out;
  216. }
  217. /* Add the dev mac and count the vlan only if it's usable */
  218. if (br_vlan_should_use(v)) {
  219. err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
  220. if (err) {
  221. br_err(br, "failed insert local address into bridge forwarding table\n");
  222. goto out_filt;
  223. }
  224. vg->num_vlans++;
  225. }
  226. err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
  227. br_vlan_rht_params);
  228. if (err)
  229. goto out_fdb_insert;
  230. __vlan_add_list(v);
  231. __vlan_add_flags(v, flags);
  232. out:
  233. return err;
  234. out_fdb_insert:
  235. if (br_vlan_should_use(v)) {
  236. br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
  237. vg->num_vlans--;
  238. }
  239. out_filt:
  240. if (p) {
  241. __vlan_vid_del(dev, br, v->vid);
  242. if (masterv) {
  243. br_vlan_put_master(masterv);
  244. v->brvlan = NULL;
  245. }
  246. } else {
  247. br_switchdev_port_vlan_del(dev, v->vid);
  248. }
  249. goto out;
  250. }
  251. static int __vlan_del(struct net_bridge_vlan *v)
  252. {
  253. struct net_bridge_vlan *masterv = v;
  254. struct net_bridge_vlan_group *vg;
  255. struct net_bridge_port *p = NULL;
  256. int err = 0;
  257. if (br_vlan_is_master(v)) {
  258. vg = br_vlan_group(v->br);
  259. } else {
  260. p = v->port;
  261. vg = nbp_vlan_group(v->port);
  262. masterv = v->brvlan;
  263. }
  264. __vlan_delete_pvid(vg, v->vid);
  265. if (p) {
  266. err = __vlan_vid_del(p->dev, p->br, v->vid);
  267. if (err)
  268. goto out;
  269. } else {
  270. err = br_switchdev_port_vlan_del(v->br->dev, v->vid);
  271. if (err && err != -EOPNOTSUPP)
  272. goto out;
  273. err = 0;
  274. }
  275. if (br_vlan_should_use(v)) {
  276. v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
  277. vg->num_vlans--;
  278. }
  279. if (masterv != v) {
  280. vlan_tunnel_info_del(vg, v);
  281. rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
  282. br_vlan_rht_params);
  283. __vlan_del_list(v);
  284. kfree_rcu(v, rcu);
  285. }
  286. br_vlan_put_master(masterv);
  287. out:
  288. return err;
  289. }
  290. static void __vlan_group_free(struct net_bridge_vlan_group *vg)
  291. {
  292. WARN_ON(!list_empty(&vg->vlan_list));
  293. rhashtable_destroy(&vg->vlan_hash);
  294. vlan_tunnel_deinit(vg);
  295. kfree(vg);
  296. }
  297. static void __vlan_flush(struct net_bridge_vlan_group *vg)
  298. {
  299. struct net_bridge_vlan *vlan, *tmp;
  300. __vlan_delete_pvid(vg, vg->pvid);
  301. list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
  302. __vlan_del(vlan);
  303. }
  304. struct sk_buff *br_handle_vlan(struct net_bridge *br,
  305. const struct net_bridge_port *p,
  306. struct net_bridge_vlan_group *vg,
  307. struct sk_buff *skb)
  308. {
  309. struct br_vlan_stats *stats;
  310. struct net_bridge_vlan *v;
  311. u16 vid;
  312. /* If this packet was not filtered at input, let it pass */
  313. if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
  314. goto out;
  315. /* At this point, we know that the frame was filtered and contains
  316. * a valid vlan id. If the vlan id has untagged flag set,
  317. * send untagged; otherwise, send tagged.
  318. */
  319. br_vlan_get_tag(skb, &vid);
  320. v = br_vlan_find(vg, vid);
  321. /* Vlan entry must be configured at this point. The
  322. * only exception is the bridge is set in promisc mode and the
  323. * packet is destined for the bridge device. In this case
  324. * pass the packet as is.
  325. */
  326. if (!v || !br_vlan_should_use(v)) {
  327. if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
  328. goto out;
  329. } else {
  330. kfree_skb(skb);
  331. return NULL;
  332. }
  333. }
  334. if (br->vlan_stats_enabled) {
  335. stats = this_cpu_ptr(v->stats);
  336. u64_stats_update_begin(&stats->syncp);
  337. stats->tx_bytes += skb->len;
  338. stats->tx_packets++;
  339. u64_stats_update_end(&stats->syncp);
  340. }
  341. if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
  342. skb->vlan_tci = 0;
  343. if (p && (p->flags & BR_VLAN_TUNNEL) &&
  344. br_handle_egress_vlan_tunnel(skb, v)) {
  345. kfree_skb(skb);
  346. return NULL;
  347. }
  348. out:
  349. return skb;
  350. }
  351. /* Called under RCU */
  352. static bool __allowed_ingress(const struct net_bridge *br,
  353. struct net_bridge_vlan_group *vg,
  354. struct sk_buff *skb, u16 *vid)
  355. {
  356. struct br_vlan_stats *stats;
  357. struct net_bridge_vlan *v;
  358. bool tagged;
  359. BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
  360. /* If vlan tx offload is disabled on bridge device and frame was
  361. * sent from vlan device on the bridge device, it does not have
  362. * HW accelerated vlan tag.
  363. */
  364. if (unlikely(!skb_vlan_tag_present(skb) &&
  365. skb->protocol == br->vlan_proto)) {
  366. skb = skb_vlan_untag(skb);
  367. if (unlikely(!skb))
  368. return false;
  369. }
  370. if (!br_vlan_get_tag(skb, vid)) {
  371. /* Tagged frame */
  372. if (skb->vlan_proto != br->vlan_proto) {
  373. /* Protocol-mismatch, empty out vlan_tci for new tag */
  374. skb_push(skb, ETH_HLEN);
  375. skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
  376. skb_vlan_tag_get(skb));
  377. if (unlikely(!skb))
  378. return false;
  379. skb_pull(skb, ETH_HLEN);
  380. skb_reset_mac_len(skb);
  381. *vid = 0;
  382. tagged = false;
  383. } else {
  384. tagged = true;
  385. }
  386. } else {
  387. /* Untagged frame */
  388. tagged = false;
  389. }
  390. if (!*vid) {
  391. u16 pvid = br_get_pvid(vg);
  392. /* Frame had a tag with VID 0 or did not have a tag.
  393. * See if pvid is set on this port. That tells us which
  394. * vlan untagged or priority-tagged traffic belongs to.
  395. */
  396. if (!pvid)
  397. goto drop;
  398. /* PVID is set on this port. Any untagged or priority-tagged
  399. * ingress frame is considered to belong to this vlan.
  400. */
  401. *vid = pvid;
  402. if (likely(!tagged))
  403. /* Untagged Frame. */
  404. __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
  405. else
  406. /* Priority-tagged Frame.
  407. * At this point, We know that skb->vlan_tci had
  408. * VLAN_TAG_PRESENT bit and its VID field was 0x000.
  409. * We update only VID field and preserve PCP field.
  410. */
  411. skb->vlan_tci |= pvid;
  412. /* if stats are disabled we can avoid the lookup */
  413. if (!br->vlan_stats_enabled)
  414. return true;
  415. }
  416. v = br_vlan_find(vg, *vid);
  417. if (!v || !br_vlan_should_use(v))
  418. goto drop;
  419. if (br->vlan_stats_enabled) {
  420. stats = this_cpu_ptr(v->stats);
  421. u64_stats_update_begin(&stats->syncp);
  422. stats->rx_bytes += skb->len;
  423. stats->rx_packets++;
  424. u64_stats_update_end(&stats->syncp);
  425. }
  426. return true;
  427. drop:
  428. kfree_skb(skb);
  429. return false;
  430. }
  431. bool br_allowed_ingress(const struct net_bridge *br,
  432. struct net_bridge_vlan_group *vg, struct sk_buff *skb,
  433. u16 *vid)
  434. {
  435. /* If VLAN filtering is disabled on the bridge, all packets are
  436. * permitted.
  437. */
  438. if (!br->vlan_enabled) {
  439. BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
  440. return true;
  441. }
  442. return __allowed_ingress(br, vg, skb, vid);
  443. }
  444. /* Called under RCU. */
  445. bool br_allowed_egress(struct net_bridge_vlan_group *vg,
  446. const struct sk_buff *skb)
  447. {
  448. const struct net_bridge_vlan *v;
  449. u16 vid;
  450. /* If this packet was not filtered at input, let it pass */
  451. if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
  452. return true;
  453. br_vlan_get_tag(skb, &vid);
  454. v = br_vlan_find(vg, vid);
  455. if (v && br_vlan_should_use(v))
  456. return true;
  457. return false;
  458. }
  459. /* Called under RCU */
  460. bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
  461. {
  462. struct net_bridge_vlan_group *vg;
  463. struct net_bridge *br = p->br;
  464. /* If filtering was disabled at input, let it pass. */
  465. if (!br->vlan_enabled)
  466. return true;
  467. vg = nbp_vlan_group_rcu(p);
  468. if (!vg || !vg->num_vlans)
  469. return false;
  470. if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
  471. *vid = 0;
  472. if (!*vid) {
  473. *vid = br_get_pvid(vg);
  474. if (!*vid)
  475. return false;
  476. return true;
  477. }
  478. if (br_vlan_find(vg, *vid))
  479. return true;
  480. return false;
  481. }
  482. static int br_vlan_add_existing(struct net_bridge *br,
  483. struct net_bridge_vlan_group *vg,
  484. struct net_bridge_vlan *vlan,
  485. u16 flags, bool *changed)
  486. {
  487. int err;
  488. err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags);
  489. if (err && err != -EOPNOTSUPP)
  490. return err;
  491. if (!br_vlan_is_brentry(vlan)) {
  492. /* Trying to change flags of non-existent bridge vlan */
  493. if (!(flags & BRIDGE_VLAN_INFO_BRENTRY)) {
  494. err = -EINVAL;
  495. goto err_flags;
  496. }
  497. /* It was only kept for port vlans, now make it real */
  498. err = br_fdb_insert(br, NULL, br->dev->dev_addr,
  499. vlan->vid);
  500. if (err) {
  501. br_err(br, "failed to insert local address into bridge forwarding table\n");
  502. goto err_fdb_insert;
  503. }
  504. refcount_inc(&vlan->refcnt);
  505. vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
  506. vg->num_vlans++;
  507. *changed = true;
  508. }
  509. if (__vlan_add_flags(vlan, flags))
  510. *changed = true;
  511. return 0;
  512. err_fdb_insert:
  513. err_flags:
  514. br_switchdev_port_vlan_del(br->dev, vlan->vid);
  515. return err;
  516. }
  517. /* Must be protected by RTNL.
  518. * Must be called with vid in range from 1 to 4094 inclusive.
  519. * changed must be true only if the vlan was created or updated
  520. */
  521. int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed)
  522. {
  523. struct net_bridge_vlan_group *vg;
  524. struct net_bridge_vlan *vlan;
  525. int ret;
  526. ASSERT_RTNL();
  527. *changed = false;
  528. vg = br_vlan_group(br);
  529. vlan = br_vlan_find(vg, vid);
  530. if (vlan)
  531. return br_vlan_add_existing(br, vg, vlan, flags, changed);
  532. vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
  533. if (!vlan)
  534. return -ENOMEM;
  535. vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
  536. if (!vlan->stats) {
  537. kfree(vlan);
  538. return -ENOMEM;
  539. }
  540. vlan->vid = vid;
  541. vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
  542. vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
  543. vlan->br = br;
  544. if (flags & BRIDGE_VLAN_INFO_BRENTRY)
  545. refcount_set(&vlan->refcnt, 1);
  546. ret = __vlan_add(vlan, flags);
  547. if (ret) {
  548. free_percpu(vlan->stats);
  549. kfree(vlan);
  550. } else {
  551. *changed = true;
  552. }
  553. return ret;
  554. }
  555. /* Must be protected by RTNL.
  556. * Must be called with vid in range from 1 to 4094 inclusive.
  557. */
  558. int br_vlan_delete(struct net_bridge *br, u16 vid)
  559. {
  560. struct net_bridge_vlan_group *vg;
  561. struct net_bridge_vlan *v;
  562. ASSERT_RTNL();
  563. vg = br_vlan_group(br);
  564. v = br_vlan_find(vg, vid);
  565. if (!v || !br_vlan_is_brentry(v))
  566. return -ENOENT;
  567. br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
  568. br_fdb_delete_by_port(br, NULL, vid, 0);
  569. vlan_tunnel_info_del(vg, v);
  570. return __vlan_del(v);
  571. }
  572. void br_vlan_flush(struct net_bridge *br)
  573. {
  574. struct net_bridge_vlan_group *vg;
  575. ASSERT_RTNL();
  576. /* delete auto-added default pvid local fdb before flushing vlans
  577. * otherwise it will be leaked on bridge device init failure
  578. */
  579. br_fdb_delete_by_port(br, NULL, 0, 1);
  580. vg = br_vlan_group(br);
  581. __vlan_flush(vg);
  582. RCU_INIT_POINTER(br->vlgrp, NULL);
  583. synchronize_rcu();
  584. __vlan_group_free(vg);
  585. }
  586. struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
  587. {
  588. if (!vg)
  589. return NULL;
  590. return br_vlan_lookup(&vg->vlan_hash, vid);
  591. }
  592. /* Must be protected by RTNL. */
  593. static void recalculate_group_addr(struct net_bridge *br)
  594. {
  595. if (br->group_addr_set)
  596. return;
  597. spin_lock_bh(&br->lock);
  598. if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) {
  599. /* Bridge Group Address */
  600. br->group_addr[5] = 0x00;
  601. } else { /* vlan_enabled && ETH_P_8021AD */
  602. /* Provider Bridge Group Address */
  603. br->group_addr[5] = 0x08;
  604. }
  605. spin_unlock_bh(&br->lock);
  606. }
  607. /* Must be protected by RTNL. */
  608. void br_recalculate_fwd_mask(struct net_bridge *br)
  609. {
  610. if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q))
  611. br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
  612. else /* vlan_enabled && ETH_P_8021AD */
  613. br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
  614. ~(1u << br->group_addr[5]);
  615. }
  616. int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
  617. {
  618. struct switchdev_attr attr = {
  619. .orig_dev = br->dev,
  620. .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
  621. .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
  622. .u.vlan_filtering = val,
  623. };
  624. int err;
  625. if (br->vlan_enabled == val)
  626. return 0;
  627. err = switchdev_port_attr_set(br->dev, &attr);
  628. if (err && err != -EOPNOTSUPP)
  629. return err;
  630. br->vlan_enabled = val;
  631. br_manage_promisc(br);
  632. recalculate_group_addr(br);
  633. br_recalculate_fwd_mask(br);
  634. return 0;
  635. }
  636. int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
  637. {
  638. return __br_vlan_filter_toggle(br, val);
  639. }
  640. bool br_vlan_enabled(const struct net_device *dev)
  641. {
  642. struct net_bridge *br = netdev_priv(dev);
  643. return !!br->vlan_enabled;
  644. }
  645. EXPORT_SYMBOL_GPL(br_vlan_enabled);
  646. int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
  647. {
  648. int err = 0;
  649. struct net_bridge_port *p;
  650. struct net_bridge_vlan *vlan;
  651. struct net_bridge_vlan_group *vg;
  652. __be16 oldproto;
  653. if (br->vlan_proto == proto)
  654. return 0;
  655. /* Add VLANs for the new proto to the device filter. */
  656. list_for_each_entry(p, &br->port_list, list) {
  657. vg = nbp_vlan_group(p);
  658. list_for_each_entry(vlan, &vg->vlan_list, vlist) {
  659. err = vlan_vid_add(p->dev, proto, vlan->vid);
  660. if (err)
  661. goto err_filt;
  662. }
  663. }
  664. oldproto = br->vlan_proto;
  665. br->vlan_proto = proto;
  666. recalculate_group_addr(br);
  667. br_recalculate_fwd_mask(br);
  668. /* Delete VLANs for the old proto from the device filter. */
  669. list_for_each_entry(p, &br->port_list, list) {
  670. vg = nbp_vlan_group(p);
  671. list_for_each_entry(vlan, &vg->vlan_list, vlist)
  672. vlan_vid_del(p->dev, oldproto, vlan->vid);
  673. }
  674. return 0;
  675. err_filt:
  676. list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
  677. vlan_vid_del(p->dev, proto, vlan->vid);
  678. list_for_each_entry_continue_reverse(p, &br->port_list, list) {
  679. vg = nbp_vlan_group(p);
  680. list_for_each_entry(vlan, &vg->vlan_list, vlist)
  681. vlan_vid_del(p->dev, proto, vlan->vid);
  682. }
  683. return err;
  684. }
  685. int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
  686. {
  687. if (val != ETH_P_8021Q && val != ETH_P_8021AD)
  688. return -EPROTONOSUPPORT;
  689. return __br_vlan_set_proto(br, htons(val));
  690. }
  691. int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
  692. {
  693. switch (val) {
  694. case 0:
  695. case 1:
  696. br->vlan_stats_enabled = val;
  697. break;
  698. default:
  699. return -EINVAL;
  700. }
  701. return 0;
  702. }
  703. static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  704. {
  705. struct net_bridge_vlan *v;
  706. if (vid != vg->pvid)
  707. return false;
  708. v = br_vlan_lookup(&vg->vlan_hash, vid);
  709. if (v && br_vlan_should_use(v) &&
  710. (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
  711. return true;
  712. return false;
  713. }
  714. static void br_vlan_disable_default_pvid(struct net_bridge *br)
  715. {
  716. struct net_bridge_port *p;
  717. u16 pvid = br->default_pvid;
  718. /* Disable default_pvid on all ports where it is still
  719. * configured.
  720. */
  721. if (vlan_default_pvid(br_vlan_group(br), pvid))
  722. br_vlan_delete(br, pvid);
  723. list_for_each_entry(p, &br->port_list, list) {
  724. if (vlan_default_pvid(nbp_vlan_group(p), pvid))
  725. nbp_vlan_delete(p, pvid);
  726. }
  727. br->default_pvid = 0;
  728. }
  729. int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
  730. {
  731. const struct net_bridge_vlan *pvent;
  732. struct net_bridge_vlan_group *vg;
  733. struct net_bridge_port *p;
  734. unsigned long *changed;
  735. bool vlchange;
  736. u16 old_pvid;
  737. int err = 0;
  738. if (!pvid) {
  739. br_vlan_disable_default_pvid(br);
  740. return 0;
  741. }
  742. changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
  743. GFP_KERNEL);
  744. if (!changed)
  745. return -ENOMEM;
  746. old_pvid = br->default_pvid;
  747. /* Update default_pvid config only if we do not conflict with
  748. * user configuration.
  749. */
  750. vg = br_vlan_group(br);
  751. pvent = br_vlan_find(vg, pvid);
  752. if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
  753. (!pvent || !br_vlan_should_use(pvent))) {
  754. err = br_vlan_add(br, pvid,
  755. BRIDGE_VLAN_INFO_PVID |
  756. BRIDGE_VLAN_INFO_UNTAGGED |
  757. BRIDGE_VLAN_INFO_BRENTRY,
  758. &vlchange);
  759. if (err)
  760. goto out;
  761. br_vlan_delete(br, old_pvid);
  762. set_bit(0, changed);
  763. }
  764. list_for_each_entry(p, &br->port_list, list) {
  765. /* Update default_pvid config only if we do not conflict with
  766. * user configuration.
  767. */
  768. vg = nbp_vlan_group(p);
  769. if ((old_pvid &&
  770. !vlan_default_pvid(vg, old_pvid)) ||
  771. br_vlan_find(vg, pvid))
  772. continue;
  773. err = nbp_vlan_add(p, pvid,
  774. BRIDGE_VLAN_INFO_PVID |
  775. BRIDGE_VLAN_INFO_UNTAGGED,
  776. &vlchange);
  777. if (err)
  778. goto err_port;
  779. nbp_vlan_delete(p, old_pvid);
  780. set_bit(p->port_no, changed);
  781. }
  782. br->default_pvid = pvid;
  783. out:
  784. kfree(changed);
  785. return err;
  786. err_port:
  787. list_for_each_entry_continue_reverse(p, &br->port_list, list) {
  788. if (!test_bit(p->port_no, changed))
  789. continue;
  790. if (old_pvid)
  791. nbp_vlan_add(p, old_pvid,
  792. BRIDGE_VLAN_INFO_PVID |
  793. BRIDGE_VLAN_INFO_UNTAGGED,
  794. &vlchange);
  795. nbp_vlan_delete(p, pvid);
  796. }
  797. if (test_bit(0, changed)) {
  798. if (old_pvid)
  799. br_vlan_add(br, old_pvid,
  800. BRIDGE_VLAN_INFO_PVID |
  801. BRIDGE_VLAN_INFO_UNTAGGED |
  802. BRIDGE_VLAN_INFO_BRENTRY,
  803. &vlchange);
  804. br_vlan_delete(br, pvid);
  805. }
  806. goto out;
  807. }
  808. int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
  809. {
  810. u16 pvid = val;
  811. int err = 0;
  812. if (val >= VLAN_VID_MASK)
  813. return -EINVAL;
  814. if (pvid == br->default_pvid)
  815. goto out;
  816. /* Only allow default pvid change when filtering is disabled */
  817. if (br->vlan_enabled) {
  818. pr_info_once("Please disable vlan filtering to change default_pvid\n");
  819. err = -EPERM;
  820. goto out;
  821. }
  822. err = __br_vlan_set_default_pvid(br, pvid);
  823. out:
  824. return err;
  825. }
  826. int br_vlan_init(struct net_bridge *br)
  827. {
  828. struct net_bridge_vlan_group *vg;
  829. int ret = -ENOMEM;
  830. bool changed;
  831. vg = kzalloc(sizeof(*vg), GFP_KERNEL);
  832. if (!vg)
  833. goto out;
  834. ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
  835. if (ret)
  836. goto err_rhtbl;
  837. ret = vlan_tunnel_init(vg);
  838. if (ret)
  839. goto err_tunnel_init;
  840. INIT_LIST_HEAD(&vg->vlan_list);
  841. br->vlan_proto = htons(ETH_P_8021Q);
  842. br->default_pvid = 1;
  843. rcu_assign_pointer(br->vlgrp, vg);
  844. ret = br_vlan_add(br, 1,
  845. BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
  846. BRIDGE_VLAN_INFO_BRENTRY, &changed);
  847. if (ret)
  848. goto err_vlan_add;
  849. out:
  850. return ret;
  851. err_vlan_add:
  852. vlan_tunnel_deinit(vg);
  853. err_tunnel_init:
  854. rhashtable_destroy(&vg->vlan_hash);
  855. err_rhtbl:
  856. kfree(vg);
  857. goto out;
  858. }
  859. int nbp_vlan_init(struct net_bridge_port *p)
  860. {
  861. struct switchdev_attr attr = {
  862. .orig_dev = p->br->dev,
  863. .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
  864. .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
  865. .u.vlan_filtering = p->br->vlan_enabled,
  866. };
  867. struct net_bridge_vlan_group *vg;
  868. int ret = -ENOMEM;
  869. vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
  870. if (!vg)
  871. goto out;
  872. ret = switchdev_port_attr_set(p->dev, &attr);
  873. if (ret && ret != -EOPNOTSUPP)
  874. goto err_vlan_enabled;
  875. ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
  876. if (ret)
  877. goto err_rhtbl;
  878. ret = vlan_tunnel_init(vg);
  879. if (ret)
  880. goto err_tunnel_init;
  881. INIT_LIST_HEAD(&vg->vlan_list);
  882. rcu_assign_pointer(p->vlgrp, vg);
  883. if (p->br->default_pvid) {
  884. bool changed;
  885. ret = nbp_vlan_add(p, p->br->default_pvid,
  886. BRIDGE_VLAN_INFO_PVID |
  887. BRIDGE_VLAN_INFO_UNTAGGED,
  888. &changed);
  889. if (ret)
  890. goto err_vlan_add;
  891. }
  892. out:
  893. return ret;
  894. err_vlan_add:
  895. RCU_INIT_POINTER(p->vlgrp, NULL);
  896. synchronize_rcu();
  897. vlan_tunnel_deinit(vg);
  898. err_tunnel_init:
  899. rhashtable_destroy(&vg->vlan_hash);
  900. err_rhtbl:
  901. err_vlan_enabled:
  902. kfree(vg);
  903. goto out;
  904. }
  905. /* Must be protected by RTNL.
  906. * Must be called with vid in range from 1 to 4094 inclusive.
  907. * changed must be true only if the vlan was created or updated
  908. */
  909. int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
  910. bool *changed)
  911. {
  912. struct net_bridge_vlan *vlan;
  913. int ret;
  914. ASSERT_RTNL();
  915. *changed = false;
  916. vlan = br_vlan_find(nbp_vlan_group(port), vid);
  917. if (vlan) {
  918. /* Pass the flags to the hardware bridge */
  919. ret = br_switchdev_port_vlan_add(port->dev, vid, flags);
  920. if (ret && ret != -EOPNOTSUPP)
  921. return ret;
  922. *changed = __vlan_add_flags(vlan, flags);
  923. return 0;
  924. }
  925. vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
  926. if (!vlan)
  927. return -ENOMEM;
  928. vlan->vid = vid;
  929. vlan->port = port;
  930. ret = __vlan_add(vlan, flags);
  931. if (ret)
  932. kfree(vlan);
  933. else
  934. *changed = true;
  935. return ret;
  936. }
  937. /* Must be protected by RTNL.
  938. * Must be called with vid in range from 1 to 4094 inclusive.
  939. */
  940. int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
  941. {
  942. struct net_bridge_vlan *v;
  943. ASSERT_RTNL();
  944. v = br_vlan_find(nbp_vlan_group(port), vid);
  945. if (!v)
  946. return -ENOENT;
  947. br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
  948. br_fdb_delete_by_port(port->br, port, vid, 0);
  949. return __vlan_del(v);
  950. }
  951. void nbp_vlan_flush(struct net_bridge_port *port)
  952. {
  953. struct net_bridge_vlan_group *vg;
  954. ASSERT_RTNL();
  955. vg = nbp_vlan_group(port);
  956. __vlan_flush(vg);
  957. RCU_INIT_POINTER(port->vlgrp, NULL);
  958. synchronize_rcu();
  959. __vlan_group_free(vg);
  960. }
  961. void br_vlan_get_stats(const struct net_bridge_vlan *v,
  962. struct br_vlan_stats *stats)
  963. {
  964. int i;
  965. memset(stats, 0, sizeof(*stats));
  966. for_each_possible_cpu(i) {
  967. u64 rxpackets, rxbytes, txpackets, txbytes;
  968. struct br_vlan_stats *cpu_stats;
  969. unsigned int start;
  970. cpu_stats = per_cpu_ptr(v->stats, i);
  971. do {
  972. start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
  973. rxpackets = cpu_stats->rx_packets;
  974. rxbytes = cpu_stats->rx_bytes;
  975. txbytes = cpu_stats->tx_bytes;
  976. txpackets = cpu_stats->tx_packets;
  977. } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
  978. stats->rx_packets += rxpackets;
  979. stats->rx_bytes += rxbytes;
  980. stats->tx_bytes += txbytes;
  981. stats->tx_packets += txpackets;
  982. }
  983. }
  984. int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
  985. {
  986. struct net_bridge_vlan_group *vg;
  987. ASSERT_RTNL();
  988. if (netif_is_bridge_master(dev))
  989. vg = br_vlan_group(netdev_priv(dev));
  990. else
  991. return -EINVAL;
  992. *p_pvid = br_get_pvid(vg);
  993. return 0;
  994. }
  995. EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
  996. int br_vlan_get_info(const struct net_device *dev, u16 vid,
  997. struct bridge_vlan_info *p_vinfo)
  998. {
  999. struct net_bridge_vlan_group *vg;
  1000. struct net_bridge_vlan *v;
  1001. struct net_bridge_port *p;
  1002. ASSERT_RTNL();
  1003. p = br_port_get_check_rtnl(dev);
  1004. if (p)
  1005. vg = nbp_vlan_group(p);
  1006. else if (netif_is_bridge_master(dev))
  1007. vg = br_vlan_group(netdev_priv(dev));
  1008. else
  1009. return -EINVAL;
  1010. v = br_vlan_find(vg, vid);
  1011. if (!v)
  1012. return -ENOENT;
  1013. p_vinfo->vid = vid;
  1014. p_vinfo->flags = v->flags;
  1015. return 0;
  1016. }
  1017. EXPORT_SYMBOL_GPL(br_vlan_get_info);