br_fdb.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Forwarding database
  4. * Linux ethernet bridge
  5. *
  6. * Authors:
  7. * Lennert Buytenhek <buytenh@gnu.org>
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/init.h>
  11. #include <linux/rculist.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/times.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/etherdevice.h>
  16. #include <linux/jhash.h>
  17. #include <linux/random.h>
  18. #include <linux/slab.h>
  19. #include <linux/atomic.h>
  20. #include <linux/unaligned.h>
  21. #include <linux/if_vlan.h>
  22. #include <net/switchdev.h>
  23. #include <trace/events/bridge.h>
  24. #include "br_private.h"
  25. static const struct rhashtable_params br_fdb_rht_params = {
  26. .head_offset = offsetof(struct net_bridge_fdb_entry, rhnode),
  27. .key_offset = offsetof(struct net_bridge_fdb_entry, key),
  28. .key_len = sizeof(struct net_bridge_fdb_key),
  29. .automatic_shrinking = true,
  30. };
  31. static struct kmem_cache *br_fdb_cache __read_mostly;
  32. int __init br_fdb_init(void)
  33. {
  34. br_fdb_cache = KMEM_CACHE(net_bridge_fdb_entry, SLAB_HWCACHE_ALIGN);
  35. if (!br_fdb_cache)
  36. return -ENOMEM;
  37. return 0;
  38. }
  39. void br_fdb_fini(void)
  40. {
  41. kmem_cache_destroy(br_fdb_cache);
  42. }
  43. int br_fdb_hash_init(struct net_bridge *br)
  44. {
  45. return rhashtable_init(&br->fdb_hash_tbl, &br_fdb_rht_params);
  46. }
  47. void br_fdb_hash_fini(struct net_bridge *br)
  48. {
  49. rhashtable_destroy(&br->fdb_hash_tbl);
  50. }
  51. /* if topology_changing then use forward_delay (default 15 sec)
  52. * otherwise keep longer (default 5 minutes)
  53. */
  54. static inline unsigned long hold_time(const struct net_bridge *br)
  55. {
  56. return br->topology_change ? br->forward_delay : br->ageing_time;
  57. }
  58. static inline int has_expired(const struct net_bridge *br,
  59. const struct net_bridge_fdb_entry *fdb)
  60. {
  61. return !test_bit(BR_FDB_STATIC, &fdb->flags) &&
  62. !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags) &&
  63. time_before_eq(fdb->updated + hold_time(br), jiffies);
  64. }
  65. static void fdb_rcu_free(struct rcu_head *head)
  66. {
  67. struct net_bridge_fdb_entry *ent
  68. = container_of(head, struct net_bridge_fdb_entry, rcu);
  69. kmem_cache_free(br_fdb_cache, ent);
  70. }
  71. static int fdb_to_nud(const struct net_bridge *br,
  72. const struct net_bridge_fdb_entry *fdb)
  73. {
  74. if (test_bit(BR_FDB_LOCAL, &fdb->flags))
  75. return NUD_PERMANENT;
  76. else if (test_bit(BR_FDB_STATIC, &fdb->flags))
  77. return NUD_NOARP;
  78. else if (has_expired(br, fdb))
  79. return NUD_STALE;
  80. else
  81. return NUD_REACHABLE;
  82. }
  83. static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
  84. const struct net_bridge_fdb_entry *fdb,
  85. u32 portid, u32 seq, int type, unsigned int flags)
  86. {
  87. const struct net_bridge_port *dst = READ_ONCE(fdb->dst);
  88. unsigned long now = jiffies;
  89. struct nda_cacheinfo ci;
  90. struct nlmsghdr *nlh;
  91. struct ndmsg *ndm;
  92. u32 ext_flags = 0;
  93. nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
  94. if (nlh == NULL)
  95. return -EMSGSIZE;
  96. ndm = nlmsg_data(nlh);
  97. ndm->ndm_family = AF_BRIDGE;
  98. ndm->ndm_pad1 = 0;
  99. ndm->ndm_pad2 = 0;
  100. ndm->ndm_flags = 0;
  101. ndm->ndm_type = 0;
  102. ndm->ndm_ifindex = dst ? dst->dev->ifindex : br->dev->ifindex;
  103. ndm->ndm_state = fdb_to_nud(br, fdb);
  104. if (test_bit(BR_FDB_OFFLOADED, &fdb->flags))
  105. ndm->ndm_flags |= NTF_OFFLOADED;
  106. if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
  107. ndm->ndm_flags |= NTF_EXT_LEARNED;
  108. if (test_bit(BR_FDB_STICKY, &fdb->flags))
  109. ndm->ndm_flags |= NTF_STICKY;
  110. if (test_bit(BR_FDB_LOCKED, &fdb->flags))
  111. ext_flags |= NTF_EXT_LOCKED;
  112. if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr))
  113. goto nla_put_failure;
  114. if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
  115. goto nla_put_failure;
  116. if (nla_put_u32(skb, NDA_FLAGS_EXT, ext_flags))
  117. goto nla_put_failure;
  118. ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
  119. ci.ndm_confirmed = 0;
  120. ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
  121. ci.ndm_refcnt = 0;
  122. if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
  123. goto nla_put_failure;
  124. if (fdb->key.vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16),
  125. &fdb->key.vlan_id))
  126. goto nla_put_failure;
  127. if (test_bit(BR_FDB_NOTIFY, &fdb->flags)) {
  128. struct nlattr *nest = nla_nest_start(skb, NDA_FDB_EXT_ATTRS);
  129. u8 notify_bits = FDB_NOTIFY_BIT;
  130. if (!nest)
  131. goto nla_put_failure;
  132. if (test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
  133. notify_bits |= FDB_NOTIFY_INACTIVE_BIT;
  134. if (nla_put_u8(skb, NFEA_ACTIVITY_NOTIFY, notify_bits)) {
  135. nla_nest_cancel(skb, nest);
  136. goto nla_put_failure;
  137. }
  138. nla_nest_end(skb, nest);
  139. }
  140. nlmsg_end(skb, nlh);
  141. return 0;
  142. nla_put_failure:
  143. nlmsg_cancel(skb, nlh);
  144. return -EMSGSIZE;
  145. }
  146. static inline size_t fdb_nlmsg_size(void)
  147. {
  148. return NLMSG_ALIGN(sizeof(struct ndmsg))
  149. + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
  150. + nla_total_size(sizeof(u32)) /* NDA_MASTER */
  151. + nla_total_size(sizeof(u32)) /* NDA_FLAGS_EXT */
  152. + nla_total_size(sizeof(u16)) /* NDA_VLAN */
  153. + nla_total_size(sizeof(struct nda_cacheinfo))
  154. + nla_total_size(0) /* NDA_FDB_EXT_ATTRS */
  155. + nla_total_size(sizeof(u8)); /* NFEA_ACTIVITY_NOTIFY */
  156. }
  157. static void fdb_notify(struct net_bridge *br,
  158. const struct net_bridge_fdb_entry *fdb, int type,
  159. bool swdev_notify)
  160. {
  161. struct net *net = dev_net(br->dev);
  162. struct sk_buff *skb;
  163. int err = -ENOBUFS;
  164. if (swdev_notify)
  165. br_switchdev_fdb_notify(br, fdb, type);
  166. skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
  167. if (skb == NULL)
  168. goto errout;
  169. err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0);
  170. if (err < 0) {
  171. /* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
  172. WARN_ON(err == -EMSGSIZE);
  173. kfree_skb(skb);
  174. goto errout;
  175. }
  176. rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
  177. return;
  178. errout:
  179. rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
  180. }
  181. static struct net_bridge_fdb_entry *fdb_find_rcu(struct rhashtable *tbl,
  182. const unsigned char *addr,
  183. __u16 vid)
  184. {
  185. struct net_bridge_fdb_key key;
  186. WARN_ON_ONCE(!rcu_read_lock_held());
  187. key.vlan_id = vid;
  188. memcpy(key.addr.addr, addr, sizeof(key.addr.addr));
  189. return rhashtable_lookup(tbl, &key, br_fdb_rht_params);
  190. }
  191. /* requires bridge hash_lock */
  192. static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
  193. const unsigned char *addr,
  194. __u16 vid)
  195. {
  196. struct net_bridge_fdb_entry *fdb;
  197. lockdep_assert_held_once(&br->hash_lock);
  198. rcu_read_lock();
  199. fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
  200. rcu_read_unlock();
  201. return fdb;
  202. }
  203. struct net_device *br_fdb_find_port(const struct net_device *br_dev,
  204. const unsigned char *addr,
  205. __u16 vid)
  206. {
  207. struct net_bridge_fdb_entry *f;
  208. struct net_device *dev = NULL;
  209. struct net_bridge *br;
  210. ASSERT_RTNL();
  211. if (!netif_is_bridge_master(br_dev))
  212. return NULL;
  213. br = netdev_priv(br_dev);
  214. rcu_read_lock();
  215. f = br_fdb_find_rcu(br, addr, vid);
  216. if (f && f->dst)
  217. dev = f->dst->dev;
  218. rcu_read_unlock();
  219. return dev;
  220. }
  221. EXPORT_SYMBOL_GPL(br_fdb_find_port);
  222. struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br,
  223. const unsigned char *addr,
  224. __u16 vid)
  225. {
  226. return fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
  227. }
  228. /* When a static FDB entry is added, the mac address from the entry is
  229. * added to the bridge private HW address list and all required ports
  230. * are then updated with the new information.
  231. * Called under RTNL.
  232. */
  233. static void fdb_add_hw_addr(struct net_bridge *br, const unsigned char *addr)
  234. {
  235. int err;
  236. struct net_bridge_port *p;
  237. ASSERT_RTNL();
  238. list_for_each_entry(p, &br->port_list, list) {
  239. if (!br_promisc_port(p)) {
  240. err = dev_uc_add(p->dev, addr);
  241. if (err)
  242. goto undo;
  243. }
  244. }
  245. return;
  246. undo:
  247. list_for_each_entry_continue_reverse(p, &br->port_list, list) {
  248. if (!br_promisc_port(p))
  249. dev_uc_del(p->dev, addr);
  250. }
  251. }
  252. /* When a static FDB entry is deleted, the HW address from that entry is
  253. * also removed from the bridge private HW address list and updates all
  254. * the ports with needed information.
  255. * Called under RTNL.
  256. */
  257. static void fdb_del_hw_addr(struct net_bridge *br, const unsigned char *addr)
  258. {
  259. struct net_bridge_port *p;
  260. ASSERT_RTNL();
  261. list_for_each_entry(p, &br->port_list, list) {
  262. if (!br_promisc_port(p))
  263. dev_uc_del(p->dev, addr);
  264. }
  265. }
  266. static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f,
  267. bool swdev_notify)
  268. {
  269. trace_fdb_delete(br, f);
  270. if (test_bit(BR_FDB_STATIC, &f->flags))
  271. fdb_del_hw_addr(br, f->key.addr.addr);
  272. hlist_del_init_rcu(&f->fdb_node);
  273. rhashtable_remove_fast(&br->fdb_hash_tbl, &f->rhnode,
  274. br_fdb_rht_params);
  275. if (test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED, &f->flags))
  276. atomic_dec(&br->fdb_n_learned);
  277. fdb_notify(br, f, RTM_DELNEIGH, swdev_notify);
  278. call_rcu(&f->rcu, fdb_rcu_free);
  279. }
  280. /* Delete a local entry if no other port had the same address.
  281. *
  282. * This function should only be called on entries with BR_FDB_LOCAL set,
  283. * so even with BR_FDB_ADDED_BY_USER cleared we never need to increase
  284. * the accounting for dynamically learned entries again.
  285. */
  286. static void fdb_delete_local(struct net_bridge *br,
  287. const struct net_bridge_port *p,
  288. struct net_bridge_fdb_entry *f)
  289. {
  290. const unsigned char *addr = f->key.addr.addr;
  291. struct net_bridge_vlan_group *vg;
  292. const struct net_bridge_vlan *v;
  293. struct net_bridge_port *op;
  294. u16 vid = f->key.vlan_id;
  295. /* Maybe another port has same hw addr? */
  296. list_for_each_entry(op, &br->port_list, list) {
  297. vg = nbp_vlan_group(op);
  298. if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
  299. (!vid || br_vlan_find(vg, vid))) {
  300. f->dst = op;
  301. clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
  302. return;
  303. }
  304. }
  305. vg = br_vlan_group(br);
  306. v = br_vlan_find(vg, vid);
  307. /* Maybe bridge device has same hw addr? */
  308. if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
  309. (!vid || (v && br_vlan_should_use(v)))) {
  310. f->dst = NULL;
  311. clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
  312. return;
  313. }
  314. fdb_delete(br, f, true);
  315. }
  316. void br_fdb_find_delete_local(struct net_bridge *br,
  317. const struct net_bridge_port *p,
  318. const unsigned char *addr, u16 vid)
  319. {
  320. struct net_bridge_fdb_entry *f;
  321. spin_lock_bh(&br->hash_lock);
  322. f = br_fdb_find(br, addr, vid);
  323. if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
  324. !test_bit(BR_FDB_ADDED_BY_USER, &f->flags) && f->dst == p)
  325. fdb_delete_local(br, p, f);
  326. spin_unlock_bh(&br->hash_lock);
  327. }
  328. static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
  329. struct net_bridge_port *source,
  330. const unsigned char *addr,
  331. __u16 vid,
  332. unsigned long flags)
  333. {
  334. bool learned = !test_bit(BR_FDB_ADDED_BY_USER, &flags) &&
  335. !test_bit(BR_FDB_LOCAL, &flags);
  336. u32 max_learned = READ_ONCE(br->fdb_max_learned);
  337. struct net_bridge_fdb_entry *fdb;
  338. int err;
  339. if (likely(learned)) {
  340. int n_learned = atomic_read(&br->fdb_n_learned);
  341. if (unlikely(max_learned && n_learned >= max_learned))
  342. return NULL;
  343. __set_bit(BR_FDB_DYNAMIC_LEARNED, &flags);
  344. }
  345. fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
  346. if (!fdb)
  347. return NULL;
  348. memcpy(fdb->key.addr.addr, addr, ETH_ALEN);
  349. WRITE_ONCE(fdb->dst, source);
  350. fdb->key.vlan_id = vid;
  351. fdb->flags = flags;
  352. fdb->updated = fdb->used = jiffies;
  353. err = rhashtable_lookup_insert_fast(&br->fdb_hash_tbl, &fdb->rhnode,
  354. br_fdb_rht_params);
  355. if (err) {
  356. kmem_cache_free(br_fdb_cache, fdb);
  357. return NULL;
  358. }
  359. if (likely(learned))
  360. atomic_inc(&br->fdb_n_learned);
  361. hlist_add_head_rcu(&fdb->fdb_node, &br->fdb_list);
  362. return fdb;
  363. }
  364. static int fdb_add_local(struct net_bridge *br, struct net_bridge_port *source,
  365. const unsigned char *addr, u16 vid)
  366. {
  367. struct net_bridge_fdb_entry *fdb;
  368. if (!is_valid_ether_addr(addr))
  369. return -EINVAL;
  370. fdb = br_fdb_find(br, addr, vid);
  371. if (fdb) {
  372. /* it is okay to have multiple ports with same
  373. * address, just use the first one.
  374. */
  375. if (test_bit(BR_FDB_LOCAL, &fdb->flags))
  376. return 0;
  377. br_warn(br, "adding interface %s with same address as a received packet (addr:%pM, vlan:%u)\n",
  378. source ? source->dev->name : br->dev->name, addr, vid);
  379. fdb_delete(br, fdb, true);
  380. }
  381. fdb = fdb_create(br, source, addr, vid,
  382. BIT(BR_FDB_LOCAL) | BIT(BR_FDB_STATIC));
  383. if (!fdb)
  384. return -ENOMEM;
  385. fdb_add_hw_addr(br, addr);
  386. fdb_notify(br, fdb, RTM_NEWNEIGH, true);
  387. return 0;
  388. }
  389. void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
  390. {
  391. struct net_bridge_vlan_group *vg;
  392. struct net_bridge_fdb_entry *f;
  393. struct net_bridge *br = p->br;
  394. struct net_bridge_vlan *v;
  395. spin_lock_bh(&br->hash_lock);
  396. vg = nbp_vlan_group(p);
  397. hlist_for_each_entry(f, &br->fdb_list, fdb_node) {
  398. if (f->dst == p && test_bit(BR_FDB_LOCAL, &f->flags) &&
  399. !test_bit(BR_FDB_ADDED_BY_USER, &f->flags)) {
  400. /* delete old one */
  401. fdb_delete_local(br, p, f);
  402. /* if this port has no vlan information
  403. * configured, we can safely be done at
  404. * this point.
  405. */
  406. if (!vg || !vg->num_vlans)
  407. goto insert;
  408. }
  409. }
  410. insert:
  411. /* insert new address, may fail if invalid address or dup. */
  412. fdb_add_local(br, p, newaddr, 0);
  413. if (!vg || !vg->num_vlans)
  414. goto done;
  415. /* Now add entries for every VLAN configured on the port.
  416. * This function runs under RTNL so the bitmap will not change
  417. * from under us.
  418. */
  419. list_for_each_entry(v, &vg->vlan_list, vlist)
  420. fdb_add_local(br, p, newaddr, v->vid);
  421. done:
  422. spin_unlock_bh(&br->hash_lock);
  423. }
  424. void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
  425. {
  426. struct net_bridge_vlan_group *vg;
  427. struct net_bridge_fdb_entry *f;
  428. struct net_bridge_vlan *v;
  429. spin_lock_bh(&br->hash_lock);
  430. /* If old entry was unassociated with any port, then delete it. */
  431. f = br_fdb_find(br, br->dev->dev_addr, 0);
  432. if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
  433. !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
  434. fdb_delete_local(br, NULL, f);
  435. fdb_add_local(br, NULL, newaddr, 0);
  436. vg = br_vlan_group(br);
  437. if (!vg || !vg->num_vlans)
  438. goto out;
  439. /* Now remove and add entries for every VLAN configured on the
  440. * bridge. This function runs under RTNL so the bitmap will not
  441. * change from under us.
  442. */
  443. list_for_each_entry(v, &vg->vlan_list, vlist) {
  444. if (!br_vlan_should_use(v))
  445. continue;
  446. f = br_fdb_find(br, br->dev->dev_addr, v->vid);
  447. if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
  448. !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
  449. fdb_delete_local(br, NULL, f);
  450. fdb_add_local(br, NULL, newaddr, v->vid);
  451. }
  452. out:
  453. spin_unlock_bh(&br->hash_lock);
  454. }
  455. void br_fdb_cleanup(struct work_struct *work)
  456. {
  457. struct net_bridge *br = container_of(work, struct net_bridge,
  458. gc_work.work);
  459. struct net_bridge_fdb_entry *f = NULL;
  460. unsigned long delay = hold_time(br);
  461. unsigned long work_delay = delay;
  462. unsigned long now = jiffies;
  463. /* this part is tricky, in order to avoid blocking learning and
  464. * consequently forwarding, we rely on rcu to delete objects with
  465. * delayed freeing allowing us to continue traversing
  466. */
  467. rcu_read_lock();
  468. hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
  469. unsigned long this_timer = f->updated + delay;
  470. if (test_bit(BR_FDB_STATIC, &f->flags) ||
  471. test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags)) {
  472. if (test_bit(BR_FDB_NOTIFY, &f->flags)) {
  473. if (time_after(this_timer, now))
  474. work_delay = min(work_delay,
  475. this_timer - now);
  476. else if (!test_and_set_bit(BR_FDB_NOTIFY_INACTIVE,
  477. &f->flags))
  478. fdb_notify(br, f, RTM_NEWNEIGH, false);
  479. }
  480. continue;
  481. }
  482. if (time_after(this_timer, now)) {
  483. work_delay = min(work_delay, this_timer - now);
  484. } else {
  485. spin_lock_bh(&br->hash_lock);
  486. if (!hlist_unhashed(&f->fdb_node))
  487. fdb_delete(br, f, true);
  488. spin_unlock_bh(&br->hash_lock);
  489. }
  490. }
  491. rcu_read_unlock();
  492. /* Cleanup minimum 10 milliseconds apart */
  493. work_delay = max_t(unsigned long, work_delay, msecs_to_jiffies(10));
  494. mod_delayed_work(system_long_wq, &br->gc_work, work_delay);
  495. }
  496. static bool __fdb_flush_matches(const struct net_bridge *br,
  497. const struct net_bridge_fdb_entry *f,
  498. const struct net_bridge_fdb_flush_desc *desc)
  499. {
  500. const struct net_bridge_port *dst = READ_ONCE(f->dst);
  501. int port_ifidx = dst ? dst->dev->ifindex : br->dev->ifindex;
  502. if (desc->vlan_id && desc->vlan_id != f->key.vlan_id)
  503. return false;
  504. if (desc->port_ifindex && desc->port_ifindex != port_ifidx)
  505. return false;
  506. if (desc->flags_mask && (f->flags & desc->flags_mask) != desc->flags)
  507. return false;
  508. return true;
  509. }
  510. /* Flush forwarding database entries matching the description */
  511. void br_fdb_flush(struct net_bridge *br,
  512. const struct net_bridge_fdb_flush_desc *desc)
  513. {
  514. struct net_bridge_fdb_entry *f;
  515. rcu_read_lock();
  516. hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
  517. if (!__fdb_flush_matches(br, f, desc))
  518. continue;
  519. spin_lock_bh(&br->hash_lock);
  520. if (!hlist_unhashed(&f->fdb_node))
  521. fdb_delete(br, f, true);
  522. spin_unlock_bh(&br->hash_lock);
  523. }
  524. rcu_read_unlock();
  525. }
  526. static unsigned long __ndm_state_to_fdb_flags(u16 ndm_state)
  527. {
  528. unsigned long flags = 0;
  529. if (ndm_state & NUD_PERMANENT)
  530. __set_bit(BR_FDB_LOCAL, &flags);
  531. if (ndm_state & NUD_NOARP)
  532. __set_bit(BR_FDB_STATIC, &flags);
  533. return flags;
  534. }
  535. static unsigned long __ndm_flags_to_fdb_flags(u8 ndm_flags)
  536. {
  537. unsigned long flags = 0;
  538. if (ndm_flags & NTF_USE)
  539. __set_bit(BR_FDB_ADDED_BY_USER, &flags);
  540. if (ndm_flags & NTF_EXT_LEARNED)
  541. __set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &flags);
  542. if (ndm_flags & NTF_OFFLOADED)
  543. __set_bit(BR_FDB_OFFLOADED, &flags);
  544. if (ndm_flags & NTF_STICKY)
  545. __set_bit(BR_FDB_STICKY, &flags);
  546. return flags;
  547. }
  548. static int __fdb_flush_validate_ifindex(const struct net_bridge *br,
  549. int ifindex,
  550. struct netlink_ext_ack *extack)
  551. {
  552. const struct net_device *dev;
  553. dev = __dev_get_by_index(dev_net(br->dev), ifindex);
  554. if (!dev) {
  555. NL_SET_ERR_MSG_MOD(extack, "Unknown flush device ifindex");
  556. return -ENODEV;
  557. }
  558. if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) {
  559. NL_SET_ERR_MSG_MOD(extack, "Flush device is not a bridge or bridge port");
  560. return -EINVAL;
  561. }
  562. if (netif_is_bridge_master(dev) && dev != br->dev) {
  563. NL_SET_ERR_MSG_MOD(extack,
  564. "Flush bridge device does not match target bridge device");
  565. return -EINVAL;
  566. }
  567. if (netif_is_bridge_port(dev)) {
  568. struct net_bridge_port *p = br_port_get_rtnl(dev);
  569. if (p->br != br) {
  570. NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
  571. return -EINVAL;
  572. }
  573. }
  574. return 0;
  575. }
  576. static const struct nla_policy br_fdb_del_bulk_policy[NDA_MAX + 1] = {
  577. [NDA_VLAN] = NLA_POLICY_RANGE(NLA_U16, 1, VLAN_N_VID - 2),
  578. [NDA_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 1),
  579. [NDA_NDM_STATE_MASK] = { .type = NLA_U16 },
  580. [NDA_NDM_FLAGS_MASK] = { .type = NLA_U8 },
  581. };
  582. int br_fdb_delete_bulk(struct nlmsghdr *nlh, struct net_device *dev,
  583. struct netlink_ext_ack *extack)
  584. {
  585. struct net_bridge_fdb_flush_desc desc = {};
  586. struct ndmsg *ndm = nlmsg_data(nlh);
  587. struct net_bridge_port *p = NULL;
  588. struct nlattr *tb[NDA_MAX + 1];
  589. struct net_bridge *br;
  590. u8 ndm_flags;
  591. int err;
  592. ndm_flags = ndm->ndm_flags & ~FDB_FLUSH_IGNORED_NDM_FLAGS;
  593. err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX,
  594. br_fdb_del_bulk_policy, extack);
  595. if (err)
  596. return err;
  597. if (netif_is_bridge_master(dev)) {
  598. br = netdev_priv(dev);
  599. } else {
  600. p = br_port_get_rtnl(dev);
  601. if (!p) {
  602. NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge port");
  603. return -EINVAL;
  604. }
  605. br = p->br;
  606. }
  607. if (tb[NDA_VLAN])
  608. desc.vlan_id = nla_get_u16(tb[NDA_VLAN]);
  609. if (ndm_flags & ~FDB_FLUSH_ALLOWED_NDM_FLAGS) {
  610. NL_SET_ERR_MSG(extack, "Unsupported fdb flush ndm flag bits set");
  611. return -EINVAL;
  612. }
  613. if (ndm->ndm_state & ~FDB_FLUSH_ALLOWED_NDM_STATES) {
  614. NL_SET_ERR_MSG(extack, "Unsupported fdb flush ndm state bits set");
  615. return -EINVAL;
  616. }
  617. desc.flags |= __ndm_state_to_fdb_flags(ndm->ndm_state);
  618. desc.flags |= __ndm_flags_to_fdb_flags(ndm_flags);
  619. if (tb[NDA_NDM_STATE_MASK]) {
  620. u16 ndm_state_mask = nla_get_u16(tb[NDA_NDM_STATE_MASK]);
  621. desc.flags_mask |= __ndm_state_to_fdb_flags(ndm_state_mask);
  622. }
  623. if (tb[NDA_NDM_FLAGS_MASK]) {
  624. u8 ndm_flags_mask = nla_get_u8(tb[NDA_NDM_FLAGS_MASK]);
  625. desc.flags_mask |= __ndm_flags_to_fdb_flags(ndm_flags_mask);
  626. }
  627. if (tb[NDA_IFINDEX]) {
  628. int ifidx = nla_get_s32(tb[NDA_IFINDEX]);
  629. err = __fdb_flush_validate_ifindex(br, ifidx, extack);
  630. if (err)
  631. return err;
  632. desc.port_ifindex = ifidx;
  633. } else if (p) {
  634. /* flush was invoked with port device and NTF_MASTER */
  635. desc.port_ifindex = p->dev->ifindex;
  636. }
  637. br_debug(br, "flushing port ifindex: %d vlan id: %u flags: 0x%lx flags mask: 0x%lx\n",
  638. desc.port_ifindex, desc.vlan_id, desc.flags, desc.flags_mask);
  639. br_fdb_flush(br, &desc);
  640. return 0;
  641. }
  642. /* Flush all entries referring to a specific port.
  643. * if do_all is set also flush static entries
  644. * if vid is set delete all entries that match the vlan_id
  645. */
  646. void br_fdb_delete_by_port(struct net_bridge *br,
  647. const struct net_bridge_port *p,
  648. u16 vid,
  649. int do_all)
  650. {
  651. struct net_bridge_fdb_entry *f;
  652. struct hlist_node *tmp;
  653. spin_lock_bh(&br->hash_lock);
  654. hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
  655. if (f->dst != p)
  656. continue;
  657. if (!do_all)
  658. if (test_bit(BR_FDB_STATIC, &f->flags) ||
  659. (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags) &&
  660. !test_bit(BR_FDB_OFFLOADED, &f->flags)) ||
  661. (vid && f->key.vlan_id != vid))
  662. continue;
  663. if (test_bit(BR_FDB_LOCAL, &f->flags))
  664. fdb_delete_local(br, p, f);
  665. else
  666. fdb_delete(br, f, true);
  667. }
  668. spin_unlock_bh(&br->hash_lock);
  669. }
  670. #if IS_ENABLED(CONFIG_ATM_LANE)
  671. /* Interface used by ATM LANE hook to test
  672. * if an addr is on some other bridge port */
  673. int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
  674. {
  675. struct net_bridge_fdb_entry *fdb;
  676. struct net_bridge_port *port;
  677. int ret;
  678. rcu_read_lock();
  679. port = br_port_get_rcu(dev);
  680. if (!port)
  681. ret = 0;
  682. else {
  683. const struct net_bridge_port *dst = NULL;
  684. fdb = br_fdb_find_rcu(port->br, addr, 0);
  685. if (fdb)
  686. dst = READ_ONCE(fdb->dst);
  687. ret = dst && dst->dev != dev &&
  688. dst->state == BR_STATE_FORWARDING;
  689. }
  690. rcu_read_unlock();
  691. return ret;
  692. }
  693. #endif /* CONFIG_ATM_LANE */
  694. /*
  695. * Fill buffer with forwarding table records in
  696. * the API format.
  697. */
  698. int br_fdb_fillbuf(struct net_bridge *br, void *buf,
  699. unsigned long maxnum, unsigned long skip)
  700. {
  701. struct net_bridge_fdb_entry *f;
  702. struct __fdb_entry *fe = buf;
  703. int num = 0;
  704. memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
  705. rcu_read_lock();
  706. hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
  707. if (num >= maxnum)
  708. break;
  709. if (has_expired(br, f))
  710. continue;
  711. /* ignore pseudo entry for local MAC address */
  712. if (!f->dst)
  713. continue;
  714. if (skip) {
  715. --skip;
  716. continue;
  717. }
  718. /* convert from internal format to API */
  719. memcpy(fe->mac_addr, f->key.addr.addr, ETH_ALEN);
  720. /* due to ABI compat need to split into hi/lo */
  721. fe->port_no = f->dst->port_no;
  722. fe->port_hi = f->dst->port_no >> 8;
  723. fe->is_local = test_bit(BR_FDB_LOCAL, &f->flags);
  724. if (!test_bit(BR_FDB_STATIC, &f->flags))
  725. fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
  726. ++fe;
  727. ++num;
  728. }
  729. rcu_read_unlock();
  730. return num;
  731. }
  732. /* Add entry for local address of interface */
  733. int br_fdb_add_local(struct net_bridge *br, struct net_bridge_port *source,
  734. const unsigned char *addr, u16 vid)
  735. {
  736. int ret;
  737. spin_lock_bh(&br->hash_lock);
  738. ret = fdb_add_local(br, source, addr, vid);
  739. spin_unlock_bh(&br->hash_lock);
  740. return ret;
  741. }
  742. /* returns true if the fdb was modified */
  743. static bool __fdb_mark_active(struct net_bridge_fdb_entry *fdb)
  744. {
  745. return !!(test_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags) &&
  746. test_and_clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags));
  747. }
  748. void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
  749. const unsigned char *addr, u16 vid, unsigned long flags)
  750. {
  751. struct net_bridge_fdb_entry *fdb;
  752. /* some users want to always flood. */
  753. if (hold_time(br) == 0)
  754. return;
  755. fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
  756. if (likely(fdb)) {
  757. /* attempt to update an entry for a local interface */
  758. if (unlikely(test_bit(BR_FDB_LOCAL, &fdb->flags))) {
  759. if (net_ratelimit())
  760. br_warn(br, "received packet on %s with own address as source address (addr:%pM, vlan:%u)\n",
  761. source->dev->name, addr, vid);
  762. } else {
  763. unsigned long now = jiffies;
  764. bool fdb_modified = false;
  765. if (now != fdb->updated) {
  766. fdb->updated = now;
  767. fdb_modified = __fdb_mark_active(fdb);
  768. }
  769. /* fastpath: update of existing entry */
  770. if (unlikely(source != READ_ONCE(fdb->dst) &&
  771. !test_bit(BR_FDB_STICKY, &fdb->flags))) {
  772. br_switchdev_fdb_notify(br, fdb, RTM_DELNEIGH);
  773. WRITE_ONCE(fdb->dst, source);
  774. fdb_modified = true;
  775. /* Take over HW learned entry */
  776. if (unlikely(test_bit(BR_FDB_ADDED_BY_EXT_LEARN,
  777. &fdb->flags)))
  778. clear_bit(BR_FDB_ADDED_BY_EXT_LEARN,
  779. &fdb->flags);
  780. /* Clear locked flag when roaming to an
  781. * unlocked port.
  782. */
  783. if (unlikely(test_bit(BR_FDB_LOCKED, &fdb->flags)))
  784. clear_bit(BR_FDB_LOCKED, &fdb->flags);
  785. }
  786. if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags))) {
  787. set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
  788. if (test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED,
  789. &fdb->flags))
  790. atomic_dec(&br->fdb_n_learned);
  791. }
  792. if (unlikely(fdb_modified)) {
  793. trace_br_fdb_update(br, source, addr, vid, flags);
  794. fdb_notify(br, fdb, RTM_NEWNEIGH, true);
  795. }
  796. }
  797. } else {
  798. spin_lock(&br->hash_lock);
  799. fdb = fdb_create(br, source, addr, vid, flags);
  800. if (fdb) {
  801. trace_br_fdb_update(br, source, addr, vid, flags);
  802. fdb_notify(br, fdb, RTM_NEWNEIGH, true);
  803. }
  804. /* else we lose race and someone else inserts
  805. * it first, don't bother updating
  806. */
  807. spin_unlock(&br->hash_lock);
  808. }
  809. }
  810. /* Dump information about entries, in response to GETNEIGH */
  811. int br_fdb_dump(struct sk_buff *skb,
  812. struct netlink_callback *cb,
  813. struct net_device *dev,
  814. struct net_device *filter_dev,
  815. int *idx)
  816. {
  817. struct net_bridge *br = netdev_priv(dev);
  818. struct net_bridge_fdb_entry *f;
  819. int err = 0;
  820. if (!netif_is_bridge_master(dev))
  821. return err;
  822. if (!filter_dev) {
  823. err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
  824. if (err < 0)
  825. return err;
  826. }
  827. rcu_read_lock();
  828. hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
  829. if (*idx < cb->args[2])
  830. goto skip;
  831. if (filter_dev && (!f->dst || f->dst->dev != filter_dev)) {
  832. if (filter_dev != dev)
  833. goto skip;
  834. /* !f->dst is a special case for bridge
  835. * It means the MAC belongs to the bridge
  836. * Therefore need a little more filtering
  837. * we only want to dump the !f->dst case
  838. */
  839. if (f->dst)
  840. goto skip;
  841. }
  842. if (!filter_dev && f->dst)
  843. goto skip;
  844. err = fdb_fill_info(skb, br, f,
  845. NETLINK_CB(cb->skb).portid,
  846. cb->nlh->nlmsg_seq,
  847. RTM_NEWNEIGH,
  848. NLM_F_MULTI);
  849. if (err < 0)
  850. break;
  851. skip:
  852. *idx += 1;
  853. }
  854. rcu_read_unlock();
  855. return err;
  856. }
  857. int br_fdb_get(struct sk_buff *skb,
  858. struct nlattr *tb[],
  859. struct net_device *dev,
  860. const unsigned char *addr,
  861. u16 vid, u32 portid, u32 seq,
  862. struct netlink_ext_ack *extack)
  863. {
  864. struct net_bridge *br = netdev_priv(dev);
  865. struct net_bridge_fdb_entry *f;
  866. int err = 0;
  867. rcu_read_lock();
  868. f = br_fdb_find_rcu(br, addr, vid);
  869. if (!f) {
  870. NL_SET_ERR_MSG(extack, "Fdb entry not found");
  871. err = -ENOENT;
  872. goto errout;
  873. }
  874. err = fdb_fill_info(skb, br, f, portid, seq,
  875. RTM_NEWNEIGH, 0);
  876. errout:
  877. rcu_read_unlock();
  878. return err;
  879. }
  880. /* returns true if the fdb is modified */
  881. static bool fdb_handle_notify(struct net_bridge_fdb_entry *fdb, u8 notify)
  882. {
  883. bool modified = false;
  884. /* allow to mark an entry as inactive, usually done on creation */
  885. if ((notify & FDB_NOTIFY_INACTIVE_BIT) &&
  886. !test_and_set_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags))
  887. modified = true;
  888. if ((notify & FDB_NOTIFY_BIT) &&
  889. !test_and_set_bit(BR_FDB_NOTIFY, &fdb->flags)) {
  890. /* enabled activity tracking */
  891. modified = true;
  892. } else if (!(notify & FDB_NOTIFY_BIT) &&
  893. test_and_clear_bit(BR_FDB_NOTIFY, &fdb->flags)) {
  894. /* disabled activity tracking, clear notify state */
  895. clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags);
  896. modified = true;
  897. }
  898. return modified;
  899. }
  900. /* Update (create or replace) forwarding database entry */
  901. static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
  902. const u8 *addr, struct ndmsg *ndm, u16 flags, u16 vid,
  903. struct nlattr *nfea_tb[])
  904. {
  905. bool is_sticky = !!(ndm->ndm_flags & NTF_STICKY);
  906. bool refresh = !nfea_tb[NFEA_DONT_REFRESH];
  907. struct net_bridge_fdb_entry *fdb;
  908. u16 state = ndm->ndm_state;
  909. bool modified = false;
  910. u8 notify = 0;
  911. /* If the port cannot learn allow only local and static entries */
  912. if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
  913. !(source->state == BR_STATE_LEARNING ||
  914. source->state == BR_STATE_FORWARDING))
  915. return -EPERM;
  916. if (!source && !(state & NUD_PERMANENT)) {
  917. pr_info("bridge: RTM_NEWNEIGH %s without NUD_PERMANENT\n",
  918. br->dev->name);
  919. return -EINVAL;
  920. }
  921. if (is_sticky && (state & NUD_PERMANENT))
  922. return -EINVAL;
  923. if (nfea_tb[NFEA_ACTIVITY_NOTIFY]) {
  924. notify = nla_get_u8(nfea_tb[NFEA_ACTIVITY_NOTIFY]);
  925. if ((notify & ~BR_FDB_NOTIFY_SETTABLE_BITS) ||
  926. (notify & BR_FDB_NOTIFY_SETTABLE_BITS) == FDB_NOTIFY_INACTIVE_BIT)
  927. return -EINVAL;
  928. }
  929. fdb = br_fdb_find(br, addr, vid);
  930. if (fdb == NULL) {
  931. if (!(flags & NLM_F_CREATE))
  932. return -ENOENT;
  933. fdb = fdb_create(br, source, addr, vid,
  934. BIT(BR_FDB_ADDED_BY_USER));
  935. if (!fdb)
  936. return -ENOMEM;
  937. modified = true;
  938. } else {
  939. if (flags & NLM_F_EXCL)
  940. return -EEXIST;
  941. if (READ_ONCE(fdb->dst) != source) {
  942. WRITE_ONCE(fdb->dst, source);
  943. modified = true;
  944. }
  945. set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
  946. if (test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED, &fdb->flags))
  947. atomic_dec(&br->fdb_n_learned);
  948. }
  949. if (fdb_to_nud(br, fdb) != state) {
  950. if (state & NUD_PERMANENT) {
  951. set_bit(BR_FDB_LOCAL, &fdb->flags);
  952. if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
  953. fdb_add_hw_addr(br, addr);
  954. } else if (state & NUD_NOARP) {
  955. clear_bit(BR_FDB_LOCAL, &fdb->flags);
  956. if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
  957. fdb_add_hw_addr(br, addr);
  958. } else {
  959. clear_bit(BR_FDB_LOCAL, &fdb->flags);
  960. if (test_and_clear_bit(BR_FDB_STATIC, &fdb->flags))
  961. fdb_del_hw_addr(br, addr);
  962. }
  963. modified = true;
  964. }
  965. if (is_sticky != test_bit(BR_FDB_STICKY, &fdb->flags)) {
  966. change_bit(BR_FDB_STICKY, &fdb->flags);
  967. modified = true;
  968. }
  969. if (test_and_clear_bit(BR_FDB_LOCKED, &fdb->flags))
  970. modified = true;
  971. if (fdb_handle_notify(fdb, notify))
  972. modified = true;
  973. fdb->used = jiffies;
  974. if (modified) {
  975. if (refresh)
  976. fdb->updated = jiffies;
  977. fdb_notify(br, fdb, RTM_NEWNEIGH, true);
  978. }
  979. return 0;
  980. }
  981. static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
  982. struct net_bridge_port *p, const unsigned char *addr,
  983. u16 nlh_flags, u16 vid, struct nlattr *nfea_tb[],
  984. struct netlink_ext_ack *extack)
  985. {
  986. int err = 0;
  987. if (ndm->ndm_flags & NTF_USE) {
  988. if (!p) {
  989. pr_info("bridge: RTM_NEWNEIGH %s with NTF_USE is not supported\n",
  990. br->dev->name);
  991. return -EINVAL;
  992. }
  993. if (!nbp_state_should_learn(p))
  994. return 0;
  995. local_bh_disable();
  996. rcu_read_lock();
  997. br_fdb_update(br, p, addr, vid, BIT(BR_FDB_ADDED_BY_USER));
  998. rcu_read_unlock();
  999. local_bh_enable();
  1000. } else if (ndm->ndm_flags & NTF_EXT_LEARNED) {
  1001. if (!p && !(ndm->ndm_state & NUD_PERMANENT)) {
  1002. NL_SET_ERR_MSG_MOD(extack,
  1003. "FDB entry towards bridge must be permanent");
  1004. return -EINVAL;
  1005. }
  1006. err = br_fdb_external_learn_add(br, p, addr, vid, false, true);
  1007. } else {
  1008. spin_lock_bh(&br->hash_lock);
  1009. err = fdb_add_entry(br, p, addr, ndm, nlh_flags, vid, nfea_tb);
  1010. spin_unlock_bh(&br->hash_lock);
  1011. }
  1012. return err;
  1013. }
  1014. static const struct nla_policy br_nda_fdb_pol[NFEA_MAX + 1] = {
  1015. [NFEA_ACTIVITY_NOTIFY] = { .type = NLA_U8 },
  1016. [NFEA_DONT_REFRESH] = { .type = NLA_FLAG },
  1017. };
  1018. /* Add new permanent fdb entry with RTM_NEWNEIGH */
  1019. int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
  1020. struct net_device *dev,
  1021. const unsigned char *addr, u16 vid, u16 nlh_flags,
  1022. struct netlink_ext_ack *extack)
  1023. {
  1024. struct nlattr *nfea_tb[NFEA_MAX + 1], *attr;
  1025. struct net_bridge_vlan_group *vg;
  1026. struct net_bridge_port *p = NULL;
  1027. struct net_bridge_vlan *v;
  1028. struct net_bridge *br = NULL;
  1029. u32 ext_flags = 0;
  1030. int err = 0;
  1031. trace_br_fdb_add(ndm, dev, addr, vid, nlh_flags);
  1032. if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
  1033. pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
  1034. return -EINVAL;
  1035. }
  1036. if (is_zero_ether_addr(addr)) {
  1037. pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n");
  1038. return -EINVAL;
  1039. }
  1040. if (netif_is_bridge_master(dev)) {
  1041. br = netdev_priv(dev);
  1042. vg = br_vlan_group(br);
  1043. } else {
  1044. p = br_port_get_rtnl(dev);
  1045. if (!p) {
  1046. pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
  1047. dev->name);
  1048. return -EINVAL;
  1049. }
  1050. br = p->br;
  1051. vg = nbp_vlan_group(p);
  1052. }
  1053. if (tb[NDA_FLAGS_EXT])
  1054. ext_flags = nla_get_u32(tb[NDA_FLAGS_EXT]);
  1055. if (ext_flags & NTF_EXT_LOCKED) {
  1056. NL_SET_ERR_MSG_MOD(extack, "Cannot add FDB entry with \"locked\" flag set");
  1057. return -EINVAL;
  1058. }
  1059. if (tb[NDA_FDB_EXT_ATTRS]) {
  1060. attr = tb[NDA_FDB_EXT_ATTRS];
  1061. err = nla_parse_nested(nfea_tb, NFEA_MAX, attr,
  1062. br_nda_fdb_pol, extack);
  1063. if (err)
  1064. return err;
  1065. } else {
  1066. memset(nfea_tb, 0, sizeof(struct nlattr *) * (NFEA_MAX + 1));
  1067. }
  1068. if (vid) {
  1069. v = br_vlan_find(vg, vid);
  1070. if (!v || !br_vlan_should_use(v)) {
  1071. pr_info("bridge: RTM_NEWNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
  1072. return -EINVAL;
  1073. }
  1074. /* VID was specified, so use it. */
  1075. err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid, nfea_tb,
  1076. extack);
  1077. } else {
  1078. err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0, nfea_tb,
  1079. extack);
  1080. if (err || !vg || !vg->num_vlans)
  1081. goto out;
  1082. /* We have vlans configured on this port and user didn't
  1083. * specify a VLAN. To be nice, add/update entry for every
  1084. * vlan on this port.
  1085. */
  1086. list_for_each_entry(v, &vg->vlan_list, vlist) {
  1087. if (!br_vlan_should_use(v))
  1088. continue;
  1089. err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid,
  1090. nfea_tb, extack);
  1091. if (err)
  1092. goto out;
  1093. }
  1094. }
  1095. out:
  1096. return err;
  1097. }
  1098. static int fdb_delete_by_addr_and_port(struct net_bridge *br,
  1099. const struct net_bridge_port *p,
  1100. const u8 *addr, u16 vlan)
  1101. {
  1102. struct net_bridge_fdb_entry *fdb;
  1103. fdb = br_fdb_find(br, addr, vlan);
  1104. if (!fdb || READ_ONCE(fdb->dst) != p)
  1105. return -ENOENT;
  1106. fdb_delete(br, fdb, true);
  1107. return 0;
  1108. }
  1109. static int __br_fdb_delete(struct net_bridge *br,
  1110. const struct net_bridge_port *p,
  1111. const unsigned char *addr, u16 vid)
  1112. {
  1113. int err;
  1114. spin_lock_bh(&br->hash_lock);
  1115. err = fdb_delete_by_addr_and_port(br, p, addr, vid);
  1116. spin_unlock_bh(&br->hash_lock);
  1117. return err;
  1118. }
  1119. /* Remove neighbor entry with RTM_DELNEIGH */
  1120. int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
  1121. struct net_device *dev,
  1122. const unsigned char *addr, u16 vid,
  1123. struct netlink_ext_ack *extack)
  1124. {
  1125. struct net_bridge_vlan_group *vg;
  1126. struct net_bridge_port *p = NULL;
  1127. struct net_bridge_vlan *v;
  1128. struct net_bridge *br;
  1129. int err;
  1130. if (netif_is_bridge_master(dev)) {
  1131. br = netdev_priv(dev);
  1132. vg = br_vlan_group(br);
  1133. } else {
  1134. p = br_port_get_rtnl(dev);
  1135. if (!p) {
  1136. pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
  1137. dev->name);
  1138. return -EINVAL;
  1139. }
  1140. vg = nbp_vlan_group(p);
  1141. br = p->br;
  1142. }
  1143. if (vid) {
  1144. v = br_vlan_find(vg, vid);
  1145. if (!v) {
  1146. pr_info("bridge: RTM_DELNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
  1147. return -EINVAL;
  1148. }
  1149. err = __br_fdb_delete(br, p, addr, vid);
  1150. } else {
  1151. err = -ENOENT;
  1152. err &= __br_fdb_delete(br, p, addr, 0);
  1153. if (!vg || !vg->num_vlans)
  1154. return err;
  1155. list_for_each_entry(v, &vg->vlan_list, vlist) {
  1156. if (!br_vlan_should_use(v))
  1157. continue;
  1158. err &= __br_fdb_delete(br, p, addr, v->vid);
  1159. }
  1160. }
  1161. return err;
  1162. }
  1163. int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
  1164. {
  1165. struct net_bridge_fdb_entry *f, *tmp;
  1166. int err = 0;
  1167. ASSERT_RTNL();
  1168. /* the key here is that static entries change only under rtnl */
  1169. rcu_read_lock();
  1170. hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
  1171. /* We only care for static entries */
  1172. if (!test_bit(BR_FDB_STATIC, &f->flags))
  1173. continue;
  1174. err = dev_uc_add(p->dev, f->key.addr.addr);
  1175. if (err)
  1176. goto rollback;
  1177. }
  1178. done:
  1179. rcu_read_unlock();
  1180. return err;
  1181. rollback:
  1182. hlist_for_each_entry_rcu(tmp, &br->fdb_list, fdb_node) {
  1183. /* We only care for static entries */
  1184. if (!test_bit(BR_FDB_STATIC, &tmp->flags))
  1185. continue;
  1186. if (tmp == f)
  1187. break;
  1188. dev_uc_del(p->dev, tmp->key.addr.addr);
  1189. }
  1190. goto done;
  1191. }
  1192. void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
  1193. {
  1194. struct net_bridge_fdb_entry *f;
  1195. ASSERT_RTNL();
  1196. rcu_read_lock();
  1197. hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
  1198. /* We only care for static entries */
  1199. if (!test_bit(BR_FDB_STATIC, &f->flags))
  1200. continue;
  1201. dev_uc_del(p->dev, f->key.addr.addr);
  1202. }
  1203. rcu_read_unlock();
  1204. }
  1205. int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
  1206. const unsigned char *addr, u16 vid, bool locked,
  1207. bool swdev_notify)
  1208. {
  1209. struct net_bridge_fdb_entry *fdb;
  1210. bool modified = false;
  1211. int err = 0;
  1212. trace_br_fdb_external_learn_add(br, p, addr, vid);
  1213. if (locked && (!p || !(p->flags & BR_PORT_MAB)))
  1214. return -EINVAL;
  1215. spin_lock_bh(&br->hash_lock);
  1216. fdb = br_fdb_find(br, addr, vid);
  1217. if (!fdb) {
  1218. unsigned long flags = BIT(BR_FDB_ADDED_BY_EXT_LEARN);
  1219. if (swdev_notify)
  1220. flags |= BIT(BR_FDB_ADDED_BY_USER);
  1221. if (!p)
  1222. flags |= BIT(BR_FDB_LOCAL);
  1223. if (locked)
  1224. flags |= BIT(BR_FDB_LOCKED);
  1225. fdb = fdb_create(br, p, addr, vid, flags);
  1226. if (!fdb) {
  1227. err = -ENOMEM;
  1228. goto err_unlock;
  1229. }
  1230. fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
  1231. } else {
  1232. if (locked &&
  1233. (!test_bit(BR_FDB_LOCKED, &fdb->flags) ||
  1234. READ_ONCE(fdb->dst) != p)) {
  1235. err = -EINVAL;
  1236. goto err_unlock;
  1237. }
  1238. fdb->updated = jiffies;
  1239. if (READ_ONCE(fdb->dst) != p) {
  1240. WRITE_ONCE(fdb->dst, p);
  1241. modified = true;
  1242. }
  1243. if (test_and_set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
  1244. /* Refresh entry */
  1245. fdb->used = jiffies;
  1246. } else {
  1247. modified = true;
  1248. }
  1249. if (locked != test_bit(BR_FDB_LOCKED, &fdb->flags)) {
  1250. change_bit(BR_FDB_LOCKED, &fdb->flags);
  1251. modified = true;
  1252. }
  1253. if (swdev_notify)
  1254. set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
  1255. if (!p)
  1256. set_bit(BR_FDB_LOCAL, &fdb->flags);
  1257. if ((swdev_notify || !p) &&
  1258. test_and_clear_bit(BR_FDB_DYNAMIC_LEARNED, &fdb->flags))
  1259. atomic_dec(&br->fdb_n_learned);
  1260. if (modified)
  1261. fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
  1262. }
  1263. err_unlock:
  1264. spin_unlock_bh(&br->hash_lock);
  1265. return err;
  1266. }
  1267. int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
  1268. const unsigned char *addr, u16 vid,
  1269. bool swdev_notify)
  1270. {
  1271. struct net_bridge_fdb_entry *fdb;
  1272. int err = 0;
  1273. spin_lock_bh(&br->hash_lock);
  1274. fdb = br_fdb_find(br, addr, vid);
  1275. if (fdb && test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
  1276. fdb_delete(br, fdb, swdev_notify);
  1277. else
  1278. err = -ENOENT;
  1279. spin_unlock_bh(&br->hash_lock);
  1280. return err;
  1281. }
  1282. void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
  1283. const unsigned char *addr, u16 vid, bool offloaded)
  1284. {
  1285. struct net_bridge_fdb_entry *fdb;
  1286. spin_lock_bh(&br->hash_lock);
  1287. fdb = br_fdb_find(br, addr, vid);
  1288. if (fdb && offloaded != test_bit(BR_FDB_OFFLOADED, &fdb->flags))
  1289. change_bit(BR_FDB_OFFLOADED, &fdb->flags);
  1290. spin_unlock_bh(&br->hash_lock);
  1291. }
  1292. void br_fdb_clear_offload(const struct net_device *dev, u16 vid)
  1293. {
  1294. struct net_bridge_fdb_entry *f;
  1295. struct net_bridge_port *p;
  1296. ASSERT_RTNL();
  1297. p = br_port_get_rtnl(dev);
  1298. if (!p)
  1299. return;
  1300. spin_lock_bh(&p->br->hash_lock);
  1301. hlist_for_each_entry(f, &p->br->fdb_list, fdb_node) {
  1302. if (f->dst == p && f->key.vlan_id == vid)
  1303. clear_bit(BR_FDB_OFFLOADED, &f->flags);
  1304. }
  1305. spin_unlock_bh(&p->br->hash_lock);
  1306. }
  1307. EXPORT_SYMBOL_GPL(br_fdb_clear_offload);