br_mdb.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/err.h>
  3. #include <linux/igmp.h>
  4. #include <linux/kernel.h>
  5. #include <linux/netdevice.h>
  6. #include <linux/rculist.h>
  7. #include <linux/skbuff.h>
  8. #include <linux/if_ether.h>
  9. #include <net/ip.h>
  10. #include <net/netlink.h>
  11. #include <net/switchdev.h>
  12. #if IS_ENABLED(CONFIG_IPV6)
  13. #include <net/ipv6.h>
  14. #include <net/addrconf.h>
  15. #endif
  16. #include "br_private.h"
  17. static bool
  18. br_ip4_rports_get_timer(struct net_bridge_mcast_port *pmctx,
  19. unsigned long *timer)
  20. {
  21. *timer = br_timer_value(&pmctx->ip4_mc_router_timer);
  22. return !hlist_unhashed(&pmctx->ip4_rlist);
  23. }
  24. static bool
  25. br_ip6_rports_get_timer(struct net_bridge_mcast_port *pmctx,
  26. unsigned long *timer)
  27. {
  28. #if IS_ENABLED(CONFIG_IPV6)
  29. *timer = br_timer_value(&pmctx->ip6_mc_router_timer);
  30. return !hlist_unhashed(&pmctx->ip6_rlist);
  31. #else
  32. *timer = 0;
  33. return false;
  34. #endif
  35. }
  36. static size_t __br_rports_one_size(void)
  37. {
  38. return nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PORT */
  39. nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_TIMER */
  40. nla_total_size(sizeof(u8)) + /* MDBA_ROUTER_PATTR_TYPE */
  41. nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET_TIMER */
  42. nla_total_size(sizeof(u32)) + /* MDBA_ROUTER_PATTR_INET6_TIMER */
  43. nla_total_size(sizeof(u32)); /* MDBA_ROUTER_PATTR_VID */
  44. }
  45. size_t br_rports_size(const struct net_bridge_mcast *brmctx)
  46. {
  47. struct net_bridge_mcast_port *pmctx;
  48. size_t size = nla_total_size(0); /* MDBA_ROUTER */
  49. rcu_read_lock();
  50. hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
  51. ip4_rlist)
  52. size += __br_rports_one_size();
  53. #if IS_ENABLED(CONFIG_IPV6)
  54. hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
  55. ip6_rlist)
  56. size += __br_rports_one_size();
  57. #endif
  58. rcu_read_unlock();
  59. return size;
  60. }
  61. int br_rports_fill_info(struct sk_buff *skb,
  62. const struct net_bridge_mcast *brmctx)
  63. {
  64. u16 vid = brmctx->vlan ? brmctx->vlan->vid : 0;
  65. bool have_ip4_mc_rtr, have_ip6_mc_rtr;
  66. unsigned long ip4_timer, ip6_timer;
  67. struct nlattr *nest, *port_nest;
  68. struct net_bridge_port *p;
  69. if (!brmctx->multicast_router || !br_rports_have_mc_router(brmctx))
  70. return 0;
  71. nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
  72. if (nest == NULL)
  73. return -EMSGSIZE;
  74. list_for_each_entry_rcu(p, &brmctx->br->port_list, list) {
  75. struct net_bridge_mcast_port *pmctx;
  76. if (vid) {
  77. struct net_bridge_vlan *v;
  78. v = br_vlan_find(nbp_vlan_group(p), vid);
  79. if (!v)
  80. continue;
  81. pmctx = &v->port_mcast_ctx;
  82. } else {
  83. pmctx = &p->multicast_ctx;
  84. }
  85. have_ip4_mc_rtr = br_ip4_rports_get_timer(pmctx, &ip4_timer);
  86. have_ip6_mc_rtr = br_ip6_rports_get_timer(pmctx, &ip6_timer);
  87. if (!have_ip4_mc_rtr && !have_ip6_mc_rtr)
  88. continue;
  89. port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
  90. if (!port_nest)
  91. goto fail;
  92. if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
  93. nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
  94. max(ip4_timer, ip6_timer)) ||
  95. nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
  96. p->multicast_ctx.multicast_router) ||
  97. (have_ip4_mc_rtr &&
  98. nla_put_u32(skb, MDBA_ROUTER_PATTR_INET_TIMER,
  99. ip4_timer)) ||
  100. (have_ip6_mc_rtr &&
  101. nla_put_u32(skb, MDBA_ROUTER_PATTR_INET6_TIMER,
  102. ip6_timer)) ||
  103. (vid && nla_put_u16(skb, MDBA_ROUTER_PATTR_VID, vid))) {
  104. nla_nest_cancel(skb, port_nest);
  105. goto fail;
  106. }
  107. nla_nest_end(skb, port_nest);
  108. }
  109. nla_nest_end(skb, nest);
  110. return 0;
  111. fail:
  112. nla_nest_cancel(skb, nest);
  113. return -EMSGSIZE;
  114. }
  115. static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
  116. {
  117. e->state = flags & MDB_PG_FLAGS_PERMANENT;
  118. e->flags = 0;
  119. if (flags & MDB_PG_FLAGS_OFFLOAD)
  120. e->flags |= MDB_FLAGS_OFFLOAD;
  121. if (flags & MDB_PG_FLAGS_FAST_LEAVE)
  122. e->flags |= MDB_FLAGS_FAST_LEAVE;
  123. if (flags & MDB_PG_FLAGS_STAR_EXCL)
  124. e->flags |= MDB_FLAGS_STAR_EXCL;
  125. if (flags & MDB_PG_FLAGS_BLOCKED)
  126. e->flags |= MDB_FLAGS_BLOCKED;
  127. }
  128. static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip,
  129. struct nlattr **mdb_attrs)
  130. {
  131. memset(ip, 0, sizeof(struct br_ip));
  132. ip->vid = entry->vid;
  133. ip->proto = entry->addr.proto;
  134. switch (ip->proto) {
  135. case htons(ETH_P_IP):
  136. ip->dst.ip4 = entry->addr.u.ip4;
  137. if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
  138. ip->src.ip4 = nla_get_in_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
  139. break;
  140. #if IS_ENABLED(CONFIG_IPV6)
  141. case htons(ETH_P_IPV6):
  142. ip->dst.ip6 = entry->addr.u.ip6;
  143. if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
  144. ip->src.ip6 = nla_get_in6_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
  145. break;
  146. #endif
  147. default:
  148. ether_addr_copy(ip->dst.mac_addr, entry->addr.u.mac_addr);
  149. }
  150. }
  151. static int __mdb_fill_srcs(struct sk_buff *skb,
  152. struct net_bridge_port_group *p)
  153. {
  154. struct net_bridge_group_src *ent;
  155. struct nlattr *nest, *nest_ent;
  156. if (hlist_empty(&p->src_list))
  157. return 0;
  158. nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST);
  159. if (!nest)
  160. return -EMSGSIZE;
  161. hlist_for_each_entry_rcu(ent, &p->src_list, node,
  162. lockdep_is_held(&p->key.port->br->multicast_lock)) {
  163. nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
  164. if (!nest_ent)
  165. goto out_cancel_err;
  166. switch (ent->addr.proto) {
  167. case htons(ETH_P_IP):
  168. if (nla_put_in_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
  169. ent->addr.src.ip4)) {
  170. nla_nest_cancel(skb, nest_ent);
  171. goto out_cancel_err;
  172. }
  173. break;
  174. #if IS_ENABLED(CONFIG_IPV6)
  175. case htons(ETH_P_IPV6):
  176. if (nla_put_in6_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
  177. &ent->addr.src.ip6)) {
  178. nla_nest_cancel(skb, nest_ent);
  179. goto out_cancel_err;
  180. }
  181. break;
  182. #endif
  183. default:
  184. nla_nest_cancel(skb, nest_ent);
  185. continue;
  186. }
  187. if (nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER,
  188. br_timer_value(&ent->timer))) {
  189. nla_nest_cancel(skb, nest_ent);
  190. goto out_cancel_err;
  191. }
  192. nla_nest_end(skb, nest_ent);
  193. }
  194. nla_nest_end(skb, nest);
  195. return 0;
  196. out_cancel_err:
  197. nla_nest_cancel(skb, nest);
  198. return -EMSGSIZE;
  199. }
  200. static int __mdb_fill_info(struct sk_buff *skb,
  201. struct net_bridge_mdb_entry *mp,
  202. struct net_bridge_port_group *p)
  203. {
  204. bool dump_srcs_mode = false;
  205. struct timer_list *mtimer;
  206. struct nlattr *nest_ent;
  207. struct br_mdb_entry e;
  208. u8 flags = 0;
  209. int ifindex;
  210. memset(&e, 0, sizeof(e));
  211. if (p) {
  212. ifindex = p->key.port->dev->ifindex;
  213. mtimer = &p->timer;
  214. flags = p->flags;
  215. } else {
  216. ifindex = mp->br->dev->ifindex;
  217. mtimer = &mp->timer;
  218. }
  219. __mdb_entry_fill_flags(&e, flags);
  220. e.ifindex = ifindex;
  221. e.vid = mp->addr.vid;
  222. if (mp->addr.proto == htons(ETH_P_IP)) {
  223. e.addr.u.ip4 = mp->addr.dst.ip4;
  224. #if IS_ENABLED(CONFIG_IPV6)
  225. } else if (mp->addr.proto == htons(ETH_P_IPV6)) {
  226. e.addr.u.ip6 = mp->addr.dst.ip6;
  227. #endif
  228. } else {
  229. ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
  230. e.state = MDB_PERMANENT;
  231. }
  232. e.addr.proto = mp->addr.proto;
  233. nest_ent = nla_nest_start_noflag(skb,
  234. MDBA_MDB_ENTRY_INFO);
  235. if (!nest_ent)
  236. return -EMSGSIZE;
  237. if (nla_put_nohdr(skb, sizeof(e), &e) ||
  238. nla_put_u32(skb,
  239. MDBA_MDB_EATTR_TIMER,
  240. br_timer_value(mtimer)))
  241. goto nest_err;
  242. switch (mp->addr.proto) {
  243. case htons(ETH_P_IP):
  244. dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_igmp_version == 3);
  245. if (mp->addr.src.ip4) {
  246. if (nla_put_in_addr(skb, MDBA_MDB_EATTR_SOURCE,
  247. mp->addr.src.ip4))
  248. goto nest_err;
  249. break;
  250. }
  251. break;
  252. #if IS_ENABLED(CONFIG_IPV6)
  253. case htons(ETH_P_IPV6):
  254. dump_srcs_mode = !!(mp->br->multicast_ctx.multicast_mld_version == 2);
  255. if (!ipv6_addr_any(&mp->addr.src.ip6)) {
  256. if (nla_put_in6_addr(skb, MDBA_MDB_EATTR_SOURCE,
  257. &mp->addr.src.ip6))
  258. goto nest_err;
  259. break;
  260. }
  261. break;
  262. #endif
  263. default:
  264. ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
  265. }
  266. if (p) {
  267. if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, p->rt_protocol))
  268. goto nest_err;
  269. if (dump_srcs_mode &&
  270. (__mdb_fill_srcs(skb, p) ||
  271. nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE,
  272. p->filter_mode)))
  273. goto nest_err;
  274. }
  275. nla_nest_end(skb, nest_ent);
  276. return 0;
  277. nest_err:
  278. nla_nest_cancel(skb, nest_ent);
  279. return -EMSGSIZE;
  280. }
  281. static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
  282. struct net_device *dev)
  283. {
  284. int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
  285. struct net_bridge *br = netdev_priv(dev);
  286. struct net_bridge_mdb_entry *mp;
  287. struct nlattr *nest, *nest2;
  288. nest = nla_nest_start_noflag(skb, MDBA_MDB);
  289. if (nest == NULL)
  290. return -EMSGSIZE;
  291. hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
  292. struct net_bridge_port_group *p;
  293. struct net_bridge_port_group __rcu **pp;
  294. if (idx < s_idx)
  295. goto skip;
  296. nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
  297. if (!nest2) {
  298. err = -EMSGSIZE;
  299. break;
  300. }
  301. if (!s_pidx && mp->host_joined) {
  302. err = __mdb_fill_info(skb, mp, NULL);
  303. if (err) {
  304. nla_nest_cancel(skb, nest2);
  305. break;
  306. }
  307. }
  308. for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
  309. pp = &p->next) {
  310. if (!p->key.port)
  311. continue;
  312. if (pidx < s_pidx)
  313. goto skip_pg;
  314. err = __mdb_fill_info(skb, mp, p);
  315. if (err) {
  316. nla_nest_end(skb, nest2);
  317. goto out;
  318. }
  319. skip_pg:
  320. pidx++;
  321. }
  322. pidx = 0;
  323. s_pidx = 0;
  324. nla_nest_end(skb, nest2);
  325. skip:
  326. idx++;
  327. }
  328. out:
  329. cb->args[1] = idx;
  330. cb->args[2] = pidx;
  331. nla_nest_end(skb, nest);
  332. return err;
  333. }
  334. int br_mdb_dump(struct net_device *dev, struct sk_buff *skb,
  335. struct netlink_callback *cb)
  336. {
  337. struct net_bridge *br = netdev_priv(dev);
  338. struct br_port_msg *bpm;
  339. struct nlmsghdr *nlh;
  340. int err;
  341. nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
  342. cb->nlh->nlmsg_seq, RTM_GETMDB, sizeof(*bpm),
  343. NLM_F_MULTI);
  344. if (!nlh)
  345. return -EMSGSIZE;
  346. bpm = nlmsg_data(nlh);
  347. memset(bpm, 0, sizeof(*bpm));
  348. bpm->ifindex = dev->ifindex;
  349. rcu_read_lock();
  350. err = br_mdb_fill_info(skb, cb, dev);
  351. if (err)
  352. goto out;
  353. err = br_rports_fill_info(skb, &br->multicast_ctx);
  354. if (err)
  355. goto out;
  356. out:
  357. rcu_read_unlock();
  358. nlmsg_end(skb, nlh);
  359. return err;
  360. }
  361. static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
  362. struct net_device *dev,
  363. struct net_bridge_mdb_entry *mp,
  364. struct net_bridge_port_group *pg,
  365. int type)
  366. {
  367. struct nlmsghdr *nlh;
  368. struct br_port_msg *bpm;
  369. struct nlattr *nest, *nest2;
  370. nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0);
  371. if (!nlh)
  372. return -EMSGSIZE;
  373. bpm = nlmsg_data(nlh);
  374. memset(bpm, 0, sizeof(*bpm));
  375. bpm->family = AF_BRIDGE;
  376. bpm->ifindex = dev->ifindex;
  377. nest = nla_nest_start_noflag(skb, MDBA_MDB);
  378. if (nest == NULL)
  379. goto cancel;
  380. nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
  381. if (nest2 == NULL)
  382. goto end;
  383. if (__mdb_fill_info(skb, mp, pg))
  384. goto end;
  385. nla_nest_end(skb, nest2);
  386. nla_nest_end(skb, nest);
  387. nlmsg_end(skb, nlh);
  388. return 0;
  389. end:
  390. nla_nest_end(skb, nest);
  391. cancel:
  392. nlmsg_cancel(skb, nlh);
  393. return -EMSGSIZE;
  394. }
  395. static size_t rtnl_mdb_nlmsg_pg_size(const struct net_bridge_port_group *pg)
  396. {
  397. struct net_bridge_group_src *ent;
  398. size_t nlmsg_size, addr_size = 0;
  399. /* MDBA_MDB_ENTRY_INFO */
  400. nlmsg_size = nla_total_size(sizeof(struct br_mdb_entry)) +
  401. /* MDBA_MDB_EATTR_TIMER */
  402. nla_total_size(sizeof(u32));
  403. if (!pg)
  404. goto out;
  405. /* MDBA_MDB_EATTR_RTPROT */
  406. nlmsg_size += nla_total_size(sizeof(u8));
  407. switch (pg->key.addr.proto) {
  408. case htons(ETH_P_IP):
  409. /* MDBA_MDB_EATTR_SOURCE */
  410. if (pg->key.addr.src.ip4)
  411. nlmsg_size += nla_total_size(sizeof(__be32));
  412. if (pg->key.port->br->multicast_ctx.multicast_igmp_version == 2)
  413. goto out;
  414. addr_size = sizeof(__be32);
  415. break;
  416. #if IS_ENABLED(CONFIG_IPV6)
  417. case htons(ETH_P_IPV6):
  418. /* MDBA_MDB_EATTR_SOURCE */
  419. if (!ipv6_addr_any(&pg->key.addr.src.ip6))
  420. nlmsg_size += nla_total_size(sizeof(struct in6_addr));
  421. if (pg->key.port->br->multicast_ctx.multicast_mld_version == 1)
  422. goto out;
  423. addr_size = sizeof(struct in6_addr);
  424. break;
  425. #endif
  426. }
  427. /* MDBA_MDB_EATTR_GROUP_MODE */
  428. nlmsg_size += nla_total_size(sizeof(u8));
  429. /* MDBA_MDB_EATTR_SRC_LIST nested attr */
  430. if (!hlist_empty(&pg->src_list))
  431. nlmsg_size += nla_total_size(0);
  432. hlist_for_each_entry(ent, &pg->src_list, node) {
  433. /* MDBA_MDB_SRCLIST_ENTRY nested attr +
  434. * MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
  435. */
  436. nlmsg_size += nla_total_size(0) +
  437. nla_total_size(addr_size) +
  438. nla_total_size(sizeof(u32));
  439. }
  440. out:
  441. return nlmsg_size;
  442. }
  443. static size_t rtnl_mdb_nlmsg_size(const struct net_bridge_port_group *pg)
  444. {
  445. return NLMSG_ALIGN(sizeof(struct br_port_msg)) +
  446. /* MDBA_MDB */
  447. nla_total_size(0) +
  448. /* MDBA_MDB_ENTRY */
  449. nla_total_size(0) +
  450. /* Port group entry */
  451. rtnl_mdb_nlmsg_pg_size(pg);
  452. }
  453. void br_mdb_notify(struct net_device *dev,
  454. struct net_bridge_mdb_entry *mp,
  455. struct net_bridge_port_group *pg,
  456. int type)
  457. {
  458. struct net *net = dev_net(dev);
  459. struct sk_buff *skb;
  460. int err = -ENOBUFS;
  461. br_switchdev_mdb_notify(dev, mp, pg, type);
  462. skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
  463. if (!skb)
  464. goto errout;
  465. err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
  466. if (err < 0) {
  467. kfree_skb(skb);
  468. goto errout;
  469. }
  470. rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
  471. return;
  472. errout:
  473. rtnl_set_sk_err(net, RTNLGRP_MDB, err);
  474. }
  475. static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
  476. struct net_device *dev,
  477. int ifindex, u16 vid, u32 pid,
  478. u32 seq, int type, unsigned int flags)
  479. {
  480. struct nlattr *nest, *port_nest;
  481. struct br_port_msg *bpm;
  482. struct nlmsghdr *nlh;
  483. nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
  484. if (!nlh)
  485. return -EMSGSIZE;
  486. bpm = nlmsg_data(nlh);
  487. memset(bpm, 0, sizeof(*bpm));
  488. bpm->family = AF_BRIDGE;
  489. bpm->ifindex = dev->ifindex;
  490. nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
  491. if (!nest)
  492. goto cancel;
  493. port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
  494. if (!port_nest)
  495. goto end;
  496. if (nla_put_nohdr(skb, sizeof(u32), &ifindex)) {
  497. nla_nest_cancel(skb, port_nest);
  498. goto end;
  499. }
  500. if (vid && nla_put_u16(skb, MDBA_ROUTER_PATTR_VID, vid)) {
  501. nla_nest_cancel(skb, port_nest);
  502. goto end;
  503. }
  504. nla_nest_end(skb, port_nest);
  505. nla_nest_end(skb, nest);
  506. nlmsg_end(skb, nlh);
  507. return 0;
  508. end:
  509. nla_nest_end(skb, nest);
  510. cancel:
  511. nlmsg_cancel(skb, nlh);
  512. return -EMSGSIZE;
  513. }
  514. static inline size_t rtnl_rtr_nlmsg_size(void)
  515. {
  516. return NLMSG_ALIGN(sizeof(struct br_port_msg))
  517. + nla_total_size(sizeof(__u32))
  518. + nla_total_size(sizeof(u16));
  519. }
  520. void br_rtr_notify(struct net_device *dev, struct net_bridge_mcast_port *pmctx,
  521. int type)
  522. {
  523. struct net *net = dev_net(dev);
  524. struct sk_buff *skb;
  525. int err = -ENOBUFS;
  526. int ifindex;
  527. u16 vid;
  528. ifindex = pmctx ? pmctx->port->dev->ifindex : 0;
  529. vid = pmctx && br_multicast_port_ctx_is_vlan(pmctx) ? pmctx->vlan->vid :
  530. 0;
  531. skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
  532. if (!skb)
  533. goto errout;
  534. err = nlmsg_populate_rtr_fill(skb, dev, ifindex, vid, 0, 0, type,
  535. NTF_SELF);
  536. if (err < 0) {
  537. kfree_skb(skb);
  538. goto errout;
  539. }
  540. rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
  541. return;
  542. errout:
  543. rtnl_set_sk_err(net, RTNLGRP_MDB, err);
  544. }
  545. static const struct nla_policy
  546. br_mdbe_src_list_entry_pol[MDBE_SRCATTR_MAX + 1] = {
  547. [MDBE_SRCATTR_ADDRESS] = NLA_POLICY_RANGE(NLA_BINARY,
  548. sizeof(struct in_addr),
  549. sizeof(struct in6_addr)),
  550. };
  551. static const struct nla_policy
  552. br_mdbe_src_list_pol[MDBE_SRC_LIST_MAX + 1] = {
  553. [MDBE_SRC_LIST_ENTRY] = NLA_POLICY_NESTED(br_mdbe_src_list_entry_pol),
  554. };
  555. static const struct nla_policy br_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
  556. [MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
  557. sizeof(struct in_addr),
  558. sizeof(struct in6_addr)),
  559. [MDBE_ATTR_GROUP_MODE] = NLA_POLICY_RANGE(NLA_U8, MCAST_EXCLUDE,
  560. MCAST_INCLUDE),
  561. [MDBE_ATTR_SRC_LIST] = NLA_POLICY_NESTED(br_mdbe_src_list_pol),
  562. [MDBE_ATTR_RTPROT] = NLA_POLICY_MIN(NLA_U8, RTPROT_STATIC),
  563. };
  564. static bool is_valid_mdb_source(struct nlattr *attr, __be16 proto,
  565. struct netlink_ext_ack *extack)
  566. {
  567. switch (proto) {
  568. case htons(ETH_P_IP):
  569. if (nla_len(attr) != sizeof(struct in_addr)) {
  570. NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length");
  571. return false;
  572. }
  573. if (ipv4_is_multicast(nla_get_in_addr(attr))) {
  574. NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed");
  575. return false;
  576. }
  577. break;
  578. #if IS_ENABLED(CONFIG_IPV6)
  579. case htons(ETH_P_IPV6): {
  580. struct in6_addr src;
  581. if (nla_len(attr) != sizeof(struct in6_addr)) {
  582. NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length");
  583. return false;
  584. }
  585. src = nla_get_in6_addr(attr);
  586. if (ipv6_addr_is_multicast(&src)) {
  587. NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed");
  588. return false;
  589. }
  590. break;
  591. }
  592. #endif
  593. default:
  594. NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address");
  595. return false;
  596. }
  597. return true;
  598. }
  599. static struct net_bridge_mcast *
  600. __br_mdb_choose_context(struct net_bridge *br,
  601. const struct br_mdb_entry *entry,
  602. struct netlink_ext_ack *extack)
  603. {
  604. struct net_bridge_mcast *brmctx = NULL;
  605. struct net_bridge_vlan *v;
  606. if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
  607. brmctx = &br->multicast_ctx;
  608. goto out;
  609. }
  610. if (!entry->vid) {
  611. NL_SET_ERR_MSG_MOD(extack, "Cannot add an entry without a vlan when vlan snooping is enabled");
  612. goto out;
  613. }
  614. v = br_vlan_find(br_vlan_group(br), entry->vid);
  615. if (!v) {
  616. NL_SET_ERR_MSG_MOD(extack, "Vlan is not configured");
  617. goto out;
  618. }
  619. if (br_multicast_ctx_vlan_global_disabled(&v->br_mcast_ctx)) {
  620. NL_SET_ERR_MSG_MOD(extack, "Vlan's multicast processing is disabled");
  621. goto out;
  622. }
  623. brmctx = &v->br_mcast_ctx;
  624. out:
  625. return brmctx;
  626. }
  627. static int br_mdb_replace_group_sg(const struct br_mdb_config *cfg,
  628. struct net_bridge_mdb_entry *mp,
  629. struct net_bridge_port_group *pg,
  630. struct net_bridge_mcast *brmctx,
  631. unsigned char flags)
  632. {
  633. unsigned long now = jiffies;
  634. pg->flags = flags;
  635. pg->rt_protocol = cfg->rt_protocol;
  636. if (!(flags & MDB_PG_FLAGS_PERMANENT) && !cfg->src_entry)
  637. mod_timer(&pg->timer,
  638. now + brmctx->multicast_membership_interval);
  639. else
  640. del_timer(&pg->timer);
  641. br_mdb_notify(cfg->br->dev, mp, pg, RTM_NEWMDB);
  642. return 0;
  643. }
  644. static int br_mdb_add_group_sg(const struct br_mdb_config *cfg,
  645. struct net_bridge_mdb_entry *mp,
  646. struct net_bridge_mcast *brmctx,
  647. unsigned char flags,
  648. struct netlink_ext_ack *extack)
  649. {
  650. struct net_bridge_port_group __rcu **pp;
  651. struct net_bridge_port_group *p;
  652. unsigned long now = jiffies;
  653. for (pp = &mp->ports;
  654. (p = mlock_dereference(*pp, cfg->br)) != NULL;
  655. pp = &p->next) {
  656. if (p->key.port == cfg->p) {
  657. if (!(cfg->nlflags & NLM_F_REPLACE)) {
  658. NL_SET_ERR_MSG_MOD(extack, "(S, G) group is already joined by port");
  659. return -EEXIST;
  660. }
  661. return br_mdb_replace_group_sg(cfg, mp, p, brmctx,
  662. flags);
  663. }
  664. if ((unsigned long)p->key.port < (unsigned long)cfg->p)
  665. break;
  666. }
  667. p = br_multicast_new_port_group(cfg->p, &cfg->group, *pp, flags, NULL,
  668. MCAST_INCLUDE, cfg->rt_protocol, extack);
  669. if (unlikely(!p))
  670. return -ENOMEM;
  671. rcu_assign_pointer(*pp, p);
  672. if (!(flags & MDB_PG_FLAGS_PERMANENT) && !cfg->src_entry)
  673. mod_timer(&p->timer,
  674. now + brmctx->multicast_membership_interval);
  675. br_mdb_notify(cfg->br->dev, mp, p, RTM_NEWMDB);
  676. /* All of (*, G) EXCLUDE ports need to be added to the new (S, G) for
  677. * proper replication.
  678. */
  679. if (br_multicast_should_handle_mode(brmctx, cfg->group.proto)) {
  680. struct net_bridge_mdb_entry *star_mp;
  681. struct br_ip star_group;
  682. star_group = p->key.addr;
  683. memset(&star_group.src, 0, sizeof(star_group.src));
  684. star_mp = br_mdb_ip_get(cfg->br, &star_group);
  685. if (star_mp)
  686. br_multicast_sg_add_exclude_ports(star_mp, p);
  687. }
  688. return 0;
  689. }
  690. static int br_mdb_add_group_src_fwd(const struct br_mdb_config *cfg,
  691. struct br_ip *src_ip,
  692. struct net_bridge_mcast *brmctx,
  693. struct netlink_ext_ack *extack)
  694. {
  695. struct net_bridge_mdb_entry *sgmp;
  696. struct br_mdb_config sg_cfg;
  697. struct br_ip sg_ip;
  698. u8 flags = 0;
  699. sg_ip = cfg->group;
  700. sg_ip.src = src_ip->src;
  701. sgmp = br_multicast_new_group(cfg->br, &sg_ip);
  702. if (IS_ERR(sgmp)) {
  703. NL_SET_ERR_MSG_MOD(extack, "Failed to add (S, G) MDB entry");
  704. return PTR_ERR(sgmp);
  705. }
  706. if (cfg->entry->state == MDB_PERMANENT)
  707. flags |= MDB_PG_FLAGS_PERMANENT;
  708. if (cfg->filter_mode == MCAST_EXCLUDE)
  709. flags |= MDB_PG_FLAGS_BLOCKED;
  710. memset(&sg_cfg, 0, sizeof(sg_cfg));
  711. sg_cfg.br = cfg->br;
  712. sg_cfg.p = cfg->p;
  713. sg_cfg.entry = cfg->entry;
  714. sg_cfg.group = sg_ip;
  715. sg_cfg.src_entry = true;
  716. sg_cfg.filter_mode = MCAST_INCLUDE;
  717. sg_cfg.rt_protocol = cfg->rt_protocol;
  718. sg_cfg.nlflags = cfg->nlflags;
  719. return br_mdb_add_group_sg(&sg_cfg, sgmp, brmctx, flags, extack);
  720. }
  721. static int br_mdb_add_group_src(const struct br_mdb_config *cfg,
  722. struct net_bridge_port_group *pg,
  723. struct net_bridge_mcast *brmctx,
  724. struct br_mdb_src_entry *src,
  725. struct netlink_ext_ack *extack)
  726. {
  727. struct net_bridge_group_src *ent;
  728. unsigned long now = jiffies;
  729. int err;
  730. ent = br_multicast_find_group_src(pg, &src->addr);
  731. if (!ent) {
  732. ent = br_multicast_new_group_src(pg, &src->addr);
  733. if (!ent) {
  734. NL_SET_ERR_MSG_MOD(extack, "Failed to add new source entry");
  735. return -ENOSPC;
  736. }
  737. } else if (!(cfg->nlflags & NLM_F_REPLACE)) {
  738. NL_SET_ERR_MSG_MOD(extack, "Source entry already exists");
  739. return -EEXIST;
  740. }
  741. if (cfg->filter_mode == MCAST_INCLUDE &&
  742. cfg->entry->state == MDB_TEMPORARY)
  743. mod_timer(&ent->timer, now + br_multicast_gmi(brmctx));
  744. else
  745. del_timer(&ent->timer);
  746. /* Install a (S, G) forwarding entry for the source. */
  747. err = br_mdb_add_group_src_fwd(cfg, &src->addr, brmctx, extack);
  748. if (err)
  749. goto err_del_sg;
  750. ent->flags = BR_SGRP_F_INSTALLED | BR_SGRP_F_USER_ADDED;
  751. return 0;
  752. err_del_sg:
  753. __br_multicast_del_group_src(ent);
  754. return err;
  755. }
  756. static void br_mdb_del_group_src(struct net_bridge_port_group *pg,
  757. struct br_mdb_src_entry *src)
  758. {
  759. struct net_bridge_group_src *ent;
  760. ent = br_multicast_find_group_src(pg, &src->addr);
  761. if (WARN_ON_ONCE(!ent))
  762. return;
  763. br_multicast_del_group_src(ent, false);
  764. }
  765. static int br_mdb_add_group_srcs(const struct br_mdb_config *cfg,
  766. struct net_bridge_port_group *pg,
  767. struct net_bridge_mcast *brmctx,
  768. struct netlink_ext_ack *extack)
  769. {
  770. int i, err;
  771. for (i = 0; i < cfg->num_src_entries; i++) {
  772. err = br_mdb_add_group_src(cfg, pg, brmctx,
  773. &cfg->src_entries[i], extack);
  774. if (err)
  775. goto err_del_group_srcs;
  776. }
  777. return 0;
  778. err_del_group_srcs:
  779. for (i--; i >= 0; i--)
  780. br_mdb_del_group_src(pg, &cfg->src_entries[i]);
  781. return err;
  782. }
  783. static int br_mdb_replace_group_srcs(const struct br_mdb_config *cfg,
  784. struct net_bridge_port_group *pg,
  785. struct net_bridge_mcast *brmctx,
  786. struct netlink_ext_ack *extack)
  787. {
  788. struct net_bridge_group_src *ent;
  789. struct hlist_node *tmp;
  790. int err;
  791. hlist_for_each_entry(ent, &pg->src_list, node)
  792. ent->flags |= BR_SGRP_F_DELETE;
  793. err = br_mdb_add_group_srcs(cfg, pg, brmctx, extack);
  794. if (err)
  795. goto err_clear_delete;
  796. hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node) {
  797. if (ent->flags & BR_SGRP_F_DELETE)
  798. br_multicast_del_group_src(ent, false);
  799. }
  800. return 0;
  801. err_clear_delete:
  802. hlist_for_each_entry(ent, &pg->src_list, node)
  803. ent->flags &= ~BR_SGRP_F_DELETE;
  804. return err;
  805. }
  806. static int br_mdb_replace_group_star_g(const struct br_mdb_config *cfg,
  807. struct net_bridge_mdb_entry *mp,
  808. struct net_bridge_port_group *pg,
  809. struct net_bridge_mcast *brmctx,
  810. unsigned char flags,
  811. struct netlink_ext_ack *extack)
  812. {
  813. unsigned long now = jiffies;
  814. int err;
  815. err = br_mdb_replace_group_srcs(cfg, pg, brmctx, extack);
  816. if (err)
  817. return err;
  818. pg->flags = flags;
  819. pg->filter_mode = cfg->filter_mode;
  820. pg->rt_protocol = cfg->rt_protocol;
  821. if (!(flags & MDB_PG_FLAGS_PERMANENT) &&
  822. cfg->filter_mode == MCAST_EXCLUDE)
  823. mod_timer(&pg->timer,
  824. now + brmctx->multicast_membership_interval);
  825. else
  826. del_timer(&pg->timer);
  827. br_mdb_notify(cfg->br->dev, mp, pg, RTM_NEWMDB);
  828. if (br_multicast_should_handle_mode(brmctx, cfg->group.proto))
  829. br_multicast_star_g_handle_mode(pg, cfg->filter_mode);
  830. return 0;
  831. }
  832. static int br_mdb_add_group_star_g(const struct br_mdb_config *cfg,
  833. struct net_bridge_mdb_entry *mp,
  834. struct net_bridge_mcast *brmctx,
  835. unsigned char flags,
  836. struct netlink_ext_ack *extack)
  837. {
  838. struct net_bridge_port_group __rcu **pp;
  839. struct net_bridge_port_group *p;
  840. unsigned long now = jiffies;
  841. int err;
  842. for (pp = &mp->ports;
  843. (p = mlock_dereference(*pp, cfg->br)) != NULL;
  844. pp = &p->next) {
  845. if (p->key.port == cfg->p) {
  846. if (!(cfg->nlflags & NLM_F_REPLACE)) {
  847. NL_SET_ERR_MSG_MOD(extack, "(*, G) group is already joined by port");
  848. return -EEXIST;
  849. }
  850. return br_mdb_replace_group_star_g(cfg, mp, p, brmctx,
  851. flags, extack);
  852. }
  853. if ((unsigned long)p->key.port < (unsigned long)cfg->p)
  854. break;
  855. }
  856. p = br_multicast_new_port_group(cfg->p, &cfg->group, *pp, flags, NULL,
  857. cfg->filter_mode, cfg->rt_protocol,
  858. extack);
  859. if (unlikely(!p))
  860. return -ENOMEM;
  861. err = br_mdb_add_group_srcs(cfg, p, brmctx, extack);
  862. if (err)
  863. goto err_del_port_group;
  864. rcu_assign_pointer(*pp, p);
  865. if (!(flags & MDB_PG_FLAGS_PERMANENT) &&
  866. cfg->filter_mode == MCAST_EXCLUDE)
  867. mod_timer(&p->timer,
  868. now + brmctx->multicast_membership_interval);
  869. br_mdb_notify(cfg->br->dev, mp, p, RTM_NEWMDB);
  870. /* If we are adding a new EXCLUDE port group (*, G), it needs to be
  871. * also added to all (S, G) entries for proper replication.
  872. */
  873. if (br_multicast_should_handle_mode(brmctx, cfg->group.proto) &&
  874. cfg->filter_mode == MCAST_EXCLUDE)
  875. br_multicast_star_g_handle_mode(p, MCAST_EXCLUDE);
  876. return 0;
  877. err_del_port_group:
  878. br_multicast_del_port_group(p);
  879. return err;
  880. }
  881. static int br_mdb_add_group(const struct br_mdb_config *cfg,
  882. struct netlink_ext_ack *extack)
  883. {
  884. struct br_mdb_entry *entry = cfg->entry;
  885. struct net_bridge_port *port = cfg->p;
  886. struct net_bridge_mdb_entry *mp;
  887. struct net_bridge *br = cfg->br;
  888. struct net_bridge_mcast *brmctx;
  889. struct br_ip group = cfg->group;
  890. unsigned char flags = 0;
  891. brmctx = __br_mdb_choose_context(br, entry, extack);
  892. if (!brmctx)
  893. return -EINVAL;
  894. mp = br_multicast_new_group(br, &group);
  895. if (IS_ERR(mp))
  896. return PTR_ERR(mp);
  897. /* host join */
  898. if (!port) {
  899. if (mp->host_joined) {
  900. NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host");
  901. return -EEXIST;
  902. }
  903. br_multicast_host_join(brmctx, mp, false);
  904. br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
  905. return 0;
  906. }
  907. if (entry->state == MDB_PERMANENT)
  908. flags |= MDB_PG_FLAGS_PERMANENT;
  909. if (br_multicast_is_star_g(&group))
  910. return br_mdb_add_group_star_g(cfg, mp, brmctx, flags, extack);
  911. else
  912. return br_mdb_add_group_sg(cfg, mp, brmctx, flags, extack);
  913. }
  914. static int __br_mdb_add(const struct br_mdb_config *cfg,
  915. struct netlink_ext_ack *extack)
  916. {
  917. int ret;
  918. spin_lock_bh(&cfg->br->multicast_lock);
  919. ret = br_mdb_add_group(cfg, extack);
  920. spin_unlock_bh(&cfg->br->multicast_lock);
  921. return ret;
  922. }
  923. static int br_mdb_config_src_entry_init(struct nlattr *src_entry,
  924. struct br_mdb_src_entry *src,
  925. __be16 proto,
  926. struct netlink_ext_ack *extack)
  927. {
  928. struct nlattr *tb[MDBE_SRCATTR_MAX + 1];
  929. int err;
  930. err = nla_parse_nested(tb, MDBE_SRCATTR_MAX, src_entry,
  931. br_mdbe_src_list_entry_pol, extack);
  932. if (err)
  933. return err;
  934. if (NL_REQ_ATTR_CHECK(extack, src_entry, tb, MDBE_SRCATTR_ADDRESS))
  935. return -EINVAL;
  936. if (!is_valid_mdb_source(tb[MDBE_SRCATTR_ADDRESS], proto, extack))
  937. return -EINVAL;
  938. src->addr.proto = proto;
  939. nla_memcpy(&src->addr.src, tb[MDBE_SRCATTR_ADDRESS],
  940. nla_len(tb[MDBE_SRCATTR_ADDRESS]));
  941. return 0;
  942. }
  943. static int br_mdb_config_src_list_init(struct nlattr *src_list,
  944. struct br_mdb_config *cfg,
  945. struct netlink_ext_ack *extack)
  946. {
  947. struct nlattr *src_entry;
  948. int rem, err;
  949. int i = 0;
  950. nla_for_each_nested(src_entry, src_list, rem)
  951. cfg->num_src_entries++;
  952. if (cfg->num_src_entries >= PG_SRC_ENT_LIMIT) {
  953. NL_SET_ERR_MSG_FMT_MOD(extack, "Exceeded maximum number of source entries (%u)",
  954. PG_SRC_ENT_LIMIT - 1);
  955. return -EINVAL;
  956. }
  957. cfg->src_entries = kcalloc(cfg->num_src_entries,
  958. sizeof(struct br_mdb_src_entry), GFP_KERNEL);
  959. if (!cfg->src_entries)
  960. return -ENOMEM;
  961. nla_for_each_nested(src_entry, src_list, rem) {
  962. err = br_mdb_config_src_entry_init(src_entry,
  963. &cfg->src_entries[i],
  964. cfg->entry->addr.proto,
  965. extack);
  966. if (err)
  967. goto err_src_entry_init;
  968. i++;
  969. }
  970. return 0;
  971. err_src_entry_init:
  972. kfree(cfg->src_entries);
  973. return err;
  974. }
  975. static void br_mdb_config_src_list_fini(struct br_mdb_config *cfg)
  976. {
  977. kfree(cfg->src_entries);
  978. }
  979. static int br_mdb_config_attrs_init(struct nlattr *set_attrs,
  980. struct br_mdb_config *cfg,
  981. struct netlink_ext_ack *extack)
  982. {
  983. struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
  984. int err;
  985. err = nla_parse_nested(mdb_attrs, MDBE_ATTR_MAX, set_attrs,
  986. br_mdbe_attrs_pol, extack);
  987. if (err)
  988. return err;
  989. if (mdb_attrs[MDBE_ATTR_SOURCE] &&
  990. !is_valid_mdb_source(mdb_attrs[MDBE_ATTR_SOURCE],
  991. cfg->entry->addr.proto, extack))
  992. return -EINVAL;
  993. __mdb_entry_to_br_ip(cfg->entry, &cfg->group, mdb_attrs);
  994. if (mdb_attrs[MDBE_ATTR_GROUP_MODE]) {
  995. if (!cfg->p) {
  996. NL_SET_ERR_MSG_MOD(extack, "Filter mode cannot be set for host groups");
  997. return -EINVAL;
  998. }
  999. if (!br_multicast_is_star_g(&cfg->group)) {
  1000. NL_SET_ERR_MSG_MOD(extack, "Filter mode can only be set for (*, G) entries");
  1001. return -EINVAL;
  1002. }
  1003. cfg->filter_mode = nla_get_u8(mdb_attrs[MDBE_ATTR_GROUP_MODE]);
  1004. } else {
  1005. cfg->filter_mode = MCAST_EXCLUDE;
  1006. }
  1007. if (mdb_attrs[MDBE_ATTR_SRC_LIST]) {
  1008. if (!cfg->p) {
  1009. NL_SET_ERR_MSG_MOD(extack, "Source list cannot be set for host groups");
  1010. return -EINVAL;
  1011. }
  1012. if (!br_multicast_is_star_g(&cfg->group)) {
  1013. NL_SET_ERR_MSG_MOD(extack, "Source list can only be set for (*, G) entries");
  1014. return -EINVAL;
  1015. }
  1016. if (!mdb_attrs[MDBE_ATTR_GROUP_MODE]) {
  1017. NL_SET_ERR_MSG_MOD(extack, "Source list cannot be set without filter mode");
  1018. return -EINVAL;
  1019. }
  1020. err = br_mdb_config_src_list_init(mdb_attrs[MDBE_ATTR_SRC_LIST],
  1021. cfg, extack);
  1022. if (err)
  1023. return err;
  1024. }
  1025. if (!cfg->num_src_entries && cfg->filter_mode == MCAST_INCLUDE) {
  1026. NL_SET_ERR_MSG_MOD(extack, "Cannot add (*, G) INCLUDE with an empty source list");
  1027. return -EINVAL;
  1028. }
  1029. if (mdb_attrs[MDBE_ATTR_RTPROT]) {
  1030. if (!cfg->p) {
  1031. NL_SET_ERR_MSG_MOD(extack, "Protocol cannot be set for host groups");
  1032. return -EINVAL;
  1033. }
  1034. cfg->rt_protocol = nla_get_u8(mdb_attrs[MDBE_ATTR_RTPROT]);
  1035. }
  1036. return 0;
  1037. }
  1038. static int br_mdb_config_init(struct br_mdb_config *cfg, struct net_device *dev,
  1039. struct nlattr *tb[], u16 nlmsg_flags,
  1040. struct netlink_ext_ack *extack)
  1041. {
  1042. struct net *net = dev_net(dev);
  1043. memset(cfg, 0, sizeof(*cfg));
  1044. cfg->filter_mode = MCAST_EXCLUDE;
  1045. cfg->rt_protocol = RTPROT_STATIC;
  1046. cfg->nlflags = nlmsg_flags;
  1047. cfg->br = netdev_priv(dev);
  1048. if (!netif_running(cfg->br->dev)) {
  1049. NL_SET_ERR_MSG_MOD(extack, "Bridge device is not running");
  1050. return -EINVAL;
  1051. }
  1052. if (!br_opt_get(cfg->br, BROPT_MULTICAST_ENABLED)) {
  1053. NL_SET_ERR_MSG_MOD(extack, "Bridge's multicast processing is disabled");
  1054. return -EINVAL;
  1055. }
  1056. cfg->entry = nla_data(tb[MDBA_SET_ENTRY]);
  1057. if (cfg->entry->ifindex != cfg->br->dev->ifindex) {
  1058. struct net_device *pdev;
  1059. pdev = __dev_get_by_index(net, cfg->entry->ifindex);
  1060. if (!pdev) {
  1061. NL_SET_ERR_MSG_MOD(extack, "Port net device doesn't exist");
  1062. return -ENODEV;
  1063. }
  1064. cfg->p = br_port_get_rtnl(pdev);
  1065. if (!cfg->p) {
  1066. NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
  1067. return -EINVAL;
  1068. }
  1069. if (cfg->p->br != cfg->br) {
  1070. NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
  1071. return -EINVAL;
  1072. }
  1073. }
  1074. if (cfg->entry->addr.proto == htons(ETH_P_IP) &&
  1075. ipv4_is_zeronet(cfg->entry->addr.u.ip4)) {
  1076. NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address 0.0.0.0 is not allowed");
  1077. return -EINVAL;
  1078. }
  1079. if (tb[MDBA_SET_ENTRY_ATTRS])
  1080. return br_mdb_config_attrs_init(tb[MDBA_SET_ENTRY_ATTRS], cfg,
  1081. extack);
  1082. else
  1083. __mdb_entry_to_br_ip(cfg->entry, &cfg->group, NULL);
  1084. return 0;
  1085. }
  1086. static void br_mdb_config_fini(struct br_mdb_config *cfg)
  1087. {
  1088. br_mdb_config_src_list_fini(cfg);
  1089. }
  1090. int br_mdb_add(struct net_device *dev, struct nlattr *tb[], u16 nlmsg_flags,
  1091. struct netlink_ext_ack *extack)
  1092. {
  1093. struct net_bridge_vlan_group *vg;
  1094. struct net_bridge_vlan *v;
  1095. struct br_mdb_config cfg;
  1096. int err;
  1097. err = br_mdb_config_init(&cfg, dev, tb, nlmsg_flags, extack);
  1098. if (err)
  1099. return err;
  1100. err = -EINVAL;
  1101. /* host join errors which can happen before creating the group */
  1102. if (!cfg.p && !br_group_is_l2(&cfg.group)) {
  1103. /* don't allow any flags for host-joined IP groups */
  1104. if (cfg.entry->state) {
  1105. NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
  1106. goto out;
  1107. }
  1108. if (!br_multicast_is_star_g(&cfg.group)) {
  1109. NL_SET_ERR_MSG_MOD(extack, "Groups with sources cannot be manually host joined");
  1110. goto out;
  1111. }
  1112. }
  1113. if (br_group_is_l2(&cfg.group) && cfg.entry->state != MDB_PERMANENT) {
  1114. NL_SET_ERR_MSG_MOD(extack, "Only permanent L2 entries allowed");
  1115. goto out;
  1116. }
  1117. if (cfg.p) {
  1118. if (cfg.p->state == BR_STATE_DISABLED && cfg.entry->state != MDB_PERMANENT) {
  1119. NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state and entry is not permanent");
  1120. goto out;
  1121. }
  1122. vg = nbp_vlan_group(cfg.p);
  1123. } else {
  1124. vg = br_vlan_group(cfg.br);
  1125. }
  1126. /* If vlan filtering is enabled and VLAN is not specified
  1127. * install mdb entry on all vlans configured on the port.
  1128. */
  1129. if (br_vlan_enabled(cfg.br->dev) && vg && cfg.entry->vid == 0) {
  1130. list_for_each_entry(v, &vg->vlan_list, vlist) {
  1131. cfg.entry->vid = v->vid;
  1132. cfg.group.vid = v->vid;
  1133. err = __br_mdb_add(&cfg, extack);
  1134. if (err)
  1135. break;
  1136. }
  1137. } else {
  1138. err = __br_mdb_add(&cfg, extack);
  1139. }
  1140. out:
  1141. br_mdb_config_fini(&cfg);
  1142. return err;
  1143. }
  1144. static int __br_mdb_del(const struct br_mdb_config *cfg)
  1145. {
  1146. struct br_mdb_entry *entry = cfg->entry;
  1147. struct net_bridge *br = cfg->br;
  1148. struct net_bridge_mdb_entry *mp;
  1149. struct net_bridge_port_group *p;
  1150. struct net_bridge_port_group __rcu **pp;
  1151. struct br_ip ip = cfg->group;
  1152. int err = -EINVAL;
  1153. spin_lock_bh(&br->multicast_lock);
  1154. mp = br_mdb_ip_get(br, &ip);
  1155. if (!mp)
  1156. goto unlock;
  1157. /* host leave */
  1158. if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
  1159. br_multicast_host_leave(mp, false);
  1160. err = 0;
  1161. br_mdb_notify(br->dev, mp, NULL, RTM_DELMDB);
  1162. if (!mp->ports && netif_running(br->dev))
  1163. mod_timer(&mp->timer, jiffies);
  1164. goto unlock;
  1165. }
  1166. for (pp = &mp->ports;
  1167. (p = mlock_dereference(*pp, br)) != NULL;
  1168. pp = &p->next) {
  1169. if (!p->key.port || p->key.port->dev->ifindex != entry->ifindex)
  1170. continue;
  1171. br_multicast_del_pg(mp, p, pp);
  1172. err = 0;
  1173. break;
  1174. }
  1175. unlock:
  1176. spin_unlock_bh(&br->multicast_lock);
  1177. return err;
  1178. }
  1179. int br_mdb_del(struct net_device *dev, struct nlattr *tb[],
  1180. struct netlink_ext_ack *extack)
  1181. {
  1182. struct net_bridge_vlan_group *vg;
  1183. struct net_bridge_vlan *v;
  1184. struct br_mdb_config cfg;
  1185. int err;
  1186. err = br_mdb_config_init(&cfg, dev, tb, 0, extack);
  1187. if (err)
  1188. return err;
  1189. if (cfg.p)
  1190. vg = nbp_vlan_group(cfg.p);
  1191. else
  1192. vg = br_vlan_group(cfg.br);
  1193. /* If vlan filtering is enabled and VLAN is not specified
  1194. * delete mdb entry on all vlans configured on the port.
  1195. */
  1196. if (br_vlan_enabled(cfg.br->dev) && vg && cfg.entry->vid == 0) {
  1197. list_for_each_entry(v, &vg->vlan_list, vlist) {
  1198. cfg.entry->vid = v->vid;
  1199. cfg.group.vid = v->vid;
  1200. err = __br_mdb_del(&cfg);
  1201. }
  1202. } else {
  1203. err = __br_mdb_del(&cfg);
  1204. }
  1205. br_mdb_config_fini(&cfg);
  1206. return err;
  1207. }
  1208. struct br_mdb_flush_desc {
  1209. u32 port_ifindex;
  1210. u16 vid;
  1211. u8 rt_protocol;
  1212. u8 state;
  1213. u8 state_mask;
  1214. };
  1215. static const struct nla_policy br_mdbe_attrs_del_bulk_pol[MDBE_ATTR_MAX + 1] = {
  1216. [MDBE_ATTR_RTPROT] = NLA_POLICY_MIN(NLA_U8, RTPROT_STATIC),
  1217. [MDBE_ATTR_STATE_MASK] = NLA_POLICY_MASK(NLA_U8, MDB_PERMANENT),
  1218. };
  1219. static int br_mdb_flush_desc_init(struct br_mdb_flush_desc *desc,
  1220. struct nlattr *tb[],
  1221. struct netlink_ext_ack *extack)
  1222. {
  1223. struct br_mdb_entry *entry = nla_data(tb[MDBA_SET_ENTRY]);
  1224. struct nlattr *mdbe_attrs[MDBE_ATTR_MAX + 1];
  1225. int err;
  1226. desc->port_ifindex = entry->ifindex;
  1227. desc->vid = entry->vid;
  1228. desc->state = entry->state;
  1229. if (!tb[MDBA_SET_ENTRY_ATTRS])
  1230. return 0;
  1231. err = nla_parse_nested(mdbe_attrs, MDBE_ATTR_MAX,
  1232. tb[MDBA_SET_ENTRY_ATTRS],
  1233. br_mdbe_attrs_del_bulk_pol, extack);
  1234. if (err)
  1235. return err;
  1236. if (mdbe_attrs[MDBE_ATTR_STATE_MASK])
  1237. desc->state_mask = nla_get_u8(mdbe_attrs[MDBE_ATTR_STATE_MASK]);
  1238. if (mdbe_attrs[MDBE_ATTR_RTPROT])
  1239. desc->rt_protocol = nla_get_u8(mdbe_attrs[MDBE_ATTR_RTPROT]);
  1240. return 0;
  1241. }
  1242. static void br_mdb_flush_host(struct net_bridge *br,
  1243. struct net_bridge_mdb_entry *mp,
  1244. const struct br_mdb_flush_desc *desc)
  1245. {
  1246. u8 state;
  1247. if (desc->port_ifindex && desc->port_ifindex != br->dev->ifindex)
  1248. return;
  1249. if (desc->rt_protocol)
  1250. return;
  1251. state = br_group_is_l2(&mp->addr) ? MDB_PERMANENT : 0;
  1252. if (desc->state_mask && (state & desc->state_mask) != desc->state)
  1253. return;
  1254. br_multicast_host_leave(mp, true);
  1255. if (!mp->ports && netif_running(br->dev))
  1256. mod_timer(&mp->timer, jiffies);
  1257. }
  1258. static void br_mdb_flush_pgs(struct net_bridge *br,
  1259. struct net_bridge_mdb_entry *mp,
  1260. const struct br_mdb_flush_desc *desc)
  1261. {
  1262. struct net_bridge_port_group __rcu **pp;
  1263. struct net_bridge_port_group *p;
  1264. for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;) {
  1265. u8 state;
  1266. if (desc->port_ifindex &&
  1267. desc->port_ifindex != p->key.port->dev->ifindex) {
  1268. pp = &p->next;
  1269. continue;
  1270. }
  1271. if (desc->rt_protocol && desc->rt_protocol != p->rt_protocol) {
  1272. pp = &p->next;
  1273. continue;
  1274. }
  1275. state = p->flags & MDB_PG_FLAGS_PERMANENT ? MDB_PERMANENT : 0;
  1276. if (desc->state_mask &&
  1277. (state & desc->state_mask) != desc->state) {
  1278. pp = &p->next;
  1279. continue;
  1280. }
  1281. br_multicast_del_pg(mp, p, pp);
  1282. }
  1283. }
  1284. static void br_mdb_flush(struct net_bridge *br,
  1285. const struct br_mdb_flush_desc *desc)
  1286. {
  1287. struct net_bridge_mdb_entry *mp;
  1288. spin_lock_bh(&br->multicast_lock);
  1289. /* Safe variant is not needed because entries are removed from the list
  1290. * upon group timer expiration or bridge deletion.
  1291. */
  1292. hlist_for_each_entry(mp, &br->mdb_list, mdb_node) {
  1293. if (desc->vid && desc->vid != mp->addr.vid)
  1294. continue;
  1295. br_mdb_flush_host(br, mp, desc);
  1296. br_mdb_flush_pgs(br, mp, desc);
  1297. }
  1298. spin_unlock_bh(&br->multicast_lock);
  1299. }
  1300. int br_mdb_del_bulk(struct net_device *dev, struct nlattr *tb[],
  1301. struct netlink_ext_ack *extack)
  1302. {
  1303. struct net_bridge *br = netdev_priv(dev);
  1304. struct br_mdb_flush_desc desc = {};
  1305. int err;
  1306. err = br_mdb_flush_desc_init(&desc, tb, extack);
  1307. if (err)
  1308. return err;
  1309. br_mdb_flush(br, &desc);
  1310. return 0;
  1311. }
  1312. static const struct nla_policy br_mdbe_attrs_get_pol[MDBE_ATTR_MAX + 1] = {
  1313. [MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
  1314. sizeof(struct in_addr),
  1315. sizeof(struct in6_addr)),
  1316. };
  1317. static int br_mdb_get_parse(struct net_device *dev, struct nlattr *tb[],
  1318. struct br_ip *group, struct netlink_ext_ack *extack)
  1319. {
  1320. struct br_mdb_entry *entry = nla_data(tb[MDBA_GET_ENTRY]);
  1321. struct nlattr *mdbe_attrs[MDBE_ATTR_MAX + 1];
  1322. int err;
  1323. if (!tb[MDBA_GET_ENTRY_ATTRS]) {
  1324. __mdb_entry_to_br_ip(entry, group, NULL);
  1325. return 0;
  1326. }
  1327. err = nla_parse_nested(mdbe_attrs, MDBE_ATTR_MAX,
  1328. tb[MDBA_GET_ENTRY_ATTRS], br_mdbe_attrs_get_pol,
  1329. extack);
  1330. if (err)
  1331. return err;
  1332. if (mdbe_attrs[MDBE_ATTR_SOURCE] &&
  1333. !is_valid_mdb_source(mdbe_attrs[MDBE_ATTR_SOURCE],
  1334. entry->addr.proto, extack))
  1335. return -EINVAL;
  1336. __mdb_entry_to_br_ip(entry, group, mdbe_attrs);
  1337. return 0;
  1338. }
  1339. static struct sk_buff *
  1340. br_mdb_get_reply_alloc(const struct net_bridge_mdb_entry *mp)
  1341. {
  1342. struct net_bridge_port_group *pg;
  1343. size_t nlmsg_size;
  1344. nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
  1345. /* MDBA_MDB */
  1346. nla_total_size(0) +
  1347. /* MDBA_MDB_ENTRY */
  1348. nla_total_size(0);
  1349. if (mp->host_joined)
  1350. nlmsg_size += rtnl_mdb_nlmsg_pg_size(NULL);
  1351. for (pg = mlock_dereference(mp->ports, mp->br); pg;
  1352. pg = mlock_dereference(pg->next, mp->br))
  1353. nlmsg_size += rtnl_mdb_nlmsg_pg_size(pg);
  1354. return nlmsg_new(nlmsg_size, GFP_ATOMIC);
  1355. }
  1356. static int br_mdb_get_reply_fill(struct sk_buff *skb,
  1357. struct net_bridge_mdb_entry *mp, u32 portid,
  1358. u32 seq)
  1359. {
  1360. struct nlattr *mdb_nest, *mdb_entry_nest;
  1361. struct net_bridge_port_group *pg;
  1362. struct br_port_msg *bpm;
  1363. struct nlmsghdr *nlh;
  1364. int err;
  1365. nlh = nlmsg_put(skb, portid, seq, RTM_NEWMDB, sizeof(*bpm), 0);
  1366. if (!nlh)
  1367. return -EMSGSIZE;
  1368. bpm = nlmsg_data(nlh);
  1369. memset(bpm, 0, sizeof(*bpm));
  1370. bpm->family = AF_BRIDGE;
  1371. bpm->ifindex = mp->br->dev->ifindex;
  1372. mdb_nest = nla_nest_start_noflag(skb, MDBA_MDB);
  1373. if (!mdb_nest) {
  1374. err = -EMSGSIZE;
  1375. goto cancel;
  1376. }
  1377. mdb_entry_nest = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
  1378. if (!mdb_entry_nest) {
  1379. err = -EMSGSIZE;
  1380. goto cancel;
  1381. }
  1382. if (mp->host_joined) {
  1383. err = __mdb_fill_info(skb, mp, NULL);
  1384. if (err)
  1385. goto cancel;
  1386. }
  1387. for (pg = mlock_dereference(mp->ports, mp->br); pg;
  1388. pg = mlock_dereference(pg->next, mp->br)) {
  1389. err = __mdb_fill_info(skb, mp, pg);
  1390. if (err)
  1391. goto cancel;
  1392. }
  1393. nla_nest_end(skb, mdb_entry_nest);
  1394. nla_nest_end(skb, mdb_nest);
  1395. nlmsg_end(skb, nlh);
  1396. return 0;
  1397. cancel:
  1398. nlmsg_cancel(skb, nlh);
  1399. return err;
  1400. }
  1401. int br_mdb_get(struct net_device *dev, struct nlattr *tb[], u32 portid, u32 seq,
  1402. struct netlink_ext_ack *extack)
  1403. {
  1404. struct net_bridge *br = netdev_priv(dev);
  1405. struct net_bridge_mdb_entry *mp;
  1406. struct sk_buff *skb;
  1407. struct br_ip group;
  1408. int err;
  1409. err = br_mdb_get_parse(dev, tb, &group, extack);
  1410. if (err)
  1411. return err;
  1412. /* Hold the multicast lock to ensure that the MDB entry does not change
  1413. * between the time the reply size is determined and when the reply is
  1414. * filled in.
  1415. */
  1416. spin_lock_bh(&br->multicast_lock);
  1417. mp = br_mdb_ip_get(br, &group);
  1418. if (!mp || (!mp->ports && !mp->host_joined)) {
  1419. NL_SET_ERR_MSG_MOD(extack, "MDB entry not found");
  1420. err = -ENOENT;
  1421. goto unlock;
  1422. }
  1423. skb = br_mdb_get_reply_alloc(mp);
  1424. if (!skb) {
  1425. err = -ENOMEM;
  1426. goto unlock;
  1427. }
  1428. err = br_mdb_get_reply_fill(skb, mp, portid, seq);
  1429. if (err) {
  1430. NL_SET_ERR_MSG_MOD(extack, "Failed to fill MDB get reply");
  1431. goto free;
  1432. }
  1433. spin_unlock_bh(&br->multicast_lock);
  1434. return rtnl_unicast(skb, dev_net(dev), portid);
  1435. free:
  1436. kfree_skb(skb);
  1437. unlock:
  1438. spin_unlock_bh(&br->multicast_lock);
  1439. return err;
  1440. }