br_multicast.c 135 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Bridge multicast support.
  4. *
  5. * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
  6. */
  7. #include <linux/err.h>
  8. #include <linux/export.h>
  9. #include <linux/if_ether.h>
  10. #include <linux/igmp.h>
  11. #include <linux/in.h>
  12. #include <linux/jhash.h>
  13. #include <linux/kernel.h>
  14. #include <linux/log2.h>
  15. #include <linux/netdevice.h>
  16. #include <linux/netfilter_bridge.h>
  17. #include <linux/random.h>
  18. #include <linux/rculist.h>
  19. #include <linux/skbuff.h>
  20. #include <linux/slab.h>
  21. #include <linux/timer.h>
  22. #include <linux/inetdevice.h>
  23. #include <linux/mroute.h>
  24. #include <net/ip.h>
  25. #include <net/switchdev.h>
  26. #if IS_ENABLED(CONFIG_IPV6)
  27. #include <linux/icmpv6.h>
  28. #include <net/ipv6.h>
  29. #include <net/mld.h>
  30. #include <net/ip6_checksum.h>
  31. #include <net/addrconf.h>
  32. #endif
  33. #include <trace/events/bridge.h>
  34. #include "br_private.h"
  35. #include "br_private_mcast_eht.h"
  36. static const struct rhashtable_params br_mdb_rht_params = {
  37. .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
  38. .key_offset = offsetof(struct net_bridge_mdb_entry, addr),
  39. .key_len = sizeof(struct br_ip),
  40. .automatic_shrinking = true,
  41. };
  42. static const struct rhashtable_params br_sg_port_rht_params = {
  43. .head_offset = offsetof(struct net_bridge_port_group, rhnode),
  44. .key_offset = offsetof(struct net_bridge_port_group, key),
  45. .key_len = sizeof(struct net_bridge_port_group_sg_key),
  46. .automatic_shrinking = true,
  47. };
  48. static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
  49. struct bridge_mcast_own_query *query);
  50. static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
  51. struct net_bridge_mcast_port *pmctx);
  52. static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
  53. struct net_bridge_mcast_port *pmctx,
  54. __be32 group,
  55. __u16 vid,
  56. const unsigned char *src);
  57. static void br_multicast_port_group_rexmit(struct timer_list *t);
  58. static void
  59. br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted);
  60. static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
  61. struct net_bridge_mcast_port *pmctx);
  62. #if IS_ENABLED(CONFIG_IPV6)
  63. static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
  64. struct net_bridge_mcast_port *pmctx,
  65. const struct in6_addr *group,
  66. __u16 vid, const unsigned char *src);
  67. #endif
  68. static struct net_bridge_port_group *
  69. __br_multicast_add_group(struct net_bridge_mcast *brmctx,
  70. struct net_bridge_mcast_port *pmctx,
  71. struct br_ip *group,
  72. const unsigned char *src,
  73. u8 filter_mode,
  74. bool igmpv2_mldv1,
  75. bool blocked);
  76. static void br_multicast_find_del_pg(struct net_bridge *br,
  77. struct net_bridge_port_group *pg);
  78. static void __br_multicast_stop(struct net_bridge_mcast *brmctx);
  79. static int br_mc_disabled_update(struct net_device *dev, bool value,
  80. struct netlink_ext_ack *extack);
  81. static struct net_bridge_port_group *
  82. br_sg_port_find(struct net_bridge *br,
  83. struct net_bridge_port_group_sg_key *sg_p)
  84. {
  85. lockdep_assert_held_once(&br->multicast_lock);
  86. return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p,
  87. br_sg_port_rht_params);
  88. }
  89. static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
  90. struct br_ip *dst)
  91. {
  92. return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
  93. }
  94. struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
  95. struct br_ip *dst)
  96. {
  97. struct net_bridge_mdb_entry *ent;
  98. lockdep_assert_held_once(&br->multicast_lock);
  99. rcu_read_lock();
  100. ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
  101. rcu_read_unlock();
  102. return ent;
  103. }
  104. static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
  105. __be32 dst, __u16 vid)
  106. {
  107. struct br_ip br_dst;
  108. memset(&br_dst, 0, sizeof(br_dst));
  109. br_dst.dst.ip4 = dst;
  110. br_dst.proto = htons(ETH_P_IP);
  111. br_dst.vid = vid;
  112. return br_mdb_ip_get(br, &br_dst);
  113. }
  114. #if IS_ENABLED(CONFIG_IPV6)
  115. static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
  116. const struct in6_addr *dst,
  117. __u16 vid)
  118. {
  119. struct br_ip br_dst;
  120. memset(&br_dst, 0, sizeof(br_dst));
  121. br_dst.dst.ip6 = *dst;
  122. br_dst.proto = htons(ETH_P_IPV6);
  123. br_dst.vid = vid;
  124. return br_mdb_ip_get(br, &br_dst);
  125. }
  126. #endif
  127. struct net_bridge_mdb_entry *
  128. br_mdb_entry_skb_get(struct net_bridge_mcast *brmctx, struct sk_buff *skb,
  129. u16 vid)
  130. {
  131. struct net_bridge *br = brmctx->br;
  132. struct br_ip ip;
  133. if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
  134. br_multicast_ctx_vlan_global_disabled(brmctx))
  135. return NULL;
  136. if (BR_INPUT_SKB_CB(skb)->igmp)
  137. return NULL;
  138. memset(&ip, 0, sizeof(ip));
  139. ip.proto = skb->protocol;
  140. ip.vid = vid;
  141. switch (skb->protocol) {
  142. case htons(ETH_P_IP):
  143. ip.dst.ip4 = ip_hdr(skb)->daddr;
  144. if (brmctx->multicast_igmp_version == 3) {
  145. struct net_bridge_mdb_entry *mdb;
  146. ip.src.ip4 = ip_hdr(skb)->saddr;
  147. mdb = br_mdb_ip_get_rcu(br, &ip);
  148. if (mdb)
  149. return mdb;
  150. ip.src.ip4 = 0;
  151. }
  152. break;
  153. #if IS_ENABLED(CONFIG_IPV6)
  154. case htons(ETH_P_IPV6):
  155. ip.dst.ip6 = ipv6_hdr(skb)->daddr;
  156. if (brmctx->multicast_mld_version == 2) {
  157. struct net_bridge_mdb_entry *mdb;
  158. ip.src.ip6 = ipv6_hdr(skb)->saddr;
  159. mdb = br_mdb_ip_get_rcu(br, &ip);
  160. if (mdb)
  161. return mdb;
  162. memset(&ip.src.ip6, 0, sizeof(ip.src.ip6));
  163. }
  164. break;
  165. #endif
  166. default:
  167. ip.proto = 0;
  168. ether_addr_copy(ip.dst.mac_addr, eth_hdr(skb)->h_dest);
  169. }
  170. return br_mdb_ip_get_rcu(br, &ip);
  171. }
  172. /* IMPORTANT: this function must be used only when the contexts cannot be
  173. * passed down (e.g. timer) and must be used for read-only purposes because
  174. * the vlan snooping option can change, so it can return any context
  175. * (non-vlan or vlan). Its initial intended purpose is to read timer values
  176. * from the *current* context based on the option. At worst that could lead
  177. * to inconsistent timers when the contexts are changed, i.e. src timer
  178. * which needs to re-arm with a specific delay taken from the old context
  179. */
  180. static struct net_bridge_mcast_port *
  181. br_multicast_pg_to_port_ctx(const struct net_bridge_port_group *pg)
  182. {
  183. struct net_bridge_mcast_port *pmctx = &pg->key.port->multicast_ctx;
  184. struct net_bridge_vlan *vlan;
  185. lockdep_assert_held_once(&pg->key.port->br->multicast_lock);
  186. /* if vlan snooping is disabled use the port's multicast context */
  187. if (!pg->key.addr.vid ||
  188. !br_opt_get(pg->key.port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED))
  189. goto out;
  190. /* locking is tricky here, due to different rules for multicast and
  191. * vlans we need to take rcu to find the vlan and make sure it has
  192. * the BR_VLFLAG_MCAST_ENABLED flag set, it can only change under
  193. * multicast_lock which must be already held here, so the vlan's pmctx
  194. * can safely be used on return
  195. */
  196. rcu_read_lock();
  197. vlan = br_vlan_find(nbp_vlan_group_rcu(pg->key.port), pg->key.addr.vid);
  198. if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx))
  199. pmctx = &vlan->port_mcast_ctx;
  200. else
  201. pmctx = NULL;
  202. rcu_read_unlock();
  203. out:
  204. return pmctx;
  205. }
  206. static struct net_bridge_mcast_port *
  207. br_multicast_port_vid_to_port_ctx(struct net_bridge_port *port, u16 vid)
  208. {
  209. struct net_bridge_mcast_port *pmctx = NULL;
  210. struct net_bridge_vlan *vlan;
  211. lockdep_assert_held_once(&port->br->multicast_lock);
  212. if (!br_opt_get(port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED))
  213. return NULL;
  214. /* Take RCU to access the vlan. */
  215. rcu_read_lock();
  216. vlan = br_vlan_find(nbp_vlan_group_rcu(port), vid);
  217. if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx))
  218. pmctx = &vlan->port_mcast_ctx;
  219. rcu_read_unlock();
  220. return pmctx;
  221. }
  222. /* when snooping we need to check if the contexts should be used
  223. * in the following order:
  224. * - if pmctx is non-NULL (port), check if it should be used
  225. * - if pmctx is NULL (bridge), check if brmctx should be used
  226. */
  227. static bool
  228. br_multicast_ctx_should_use(const struct net_bridge_mcast *brmctx,
  229. const struct net_bridge_mcast_port *pmctx)
  230. {
  231. if (!netif_running(brmctx->br->dev))
  232. return false;
  233. if (pmctx)
  234. return !br_multicast_port_ctx_state_disabled(pmctx);
  235. else
  236. return !br_multicast_ctx_vlan_disabled(brmctx);
  237. }
  238. static bool br_port_group_equal(struct net_bridge_port_group *p,
  239. struct net_bridge_port *port,
  240. const unsigned char *src)
  241. {
  242. if (p->key.port != port)
  243. return false;
  244. if (!(port->flags & BR_MULTICAST_TO_UNICAST))
  245. return true;
  246. return ether_addr_equal(src, p->eth_addr);
  247. }
  248. static void __fwd_add_star_excl(struct net_bridge_mcast_port *pmctx,
  249. struct net_bridge_port_group *pg,
  250. struct br_ip *sg_ip)
  251. {
  252. struct net_bridge_port_group_sg_key sg_key;
  253. struct net_bridge_port_group *src_pg;
  254. struct net_bridge_mcast *brmctx;
  255. memset(&sg_key, 0, sizeof(sg_key));
  256. brmctx = br_multicast_port_ctx_get_global(pmctx);
  257. sg_key.port = pg->key.port;
  258. sg_key.addr = *sg_ip;
  259. if (br_sg_port_find(brmctx->br, &sg_key))
  260. return;
  261. src_pg = __br_multicast_add_group(brmctx, pmctx,
  262. sg_ip, pg->eth_addr,
  263. MCAST_INCLUDE, false, false);
  264. if (IS_ERR_OR_NULL(src_pg) ||
  265. src_pg->rt_protocol != RTPROT_KERNEL)
  266. return;
  267. src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
  268. }
  269. static void __fwd_del_star_excl(struct net_bridge_port_group *pg,
  270. struct br_ip *sg_ip)
  271. {
  272. struct net_bridge_port_group_sg_key sg_key;
  273. struct net_bridge *br = pg->key.port->br;
  274. struct net_bridge_port_group *src_pg;
  275. memset(&sg_key, 0, sizeof(sg_key));
  276. sg_key.port = pg->key.port;
  277. sg_key.addr = *sg_ip;
  278. src_pg = br_sg_port_find(br, &sg_key);
  279. if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) ||
  280. src_pg->rt_protocol != RTPROT_KERNEL)
  281. return;
  282. br_multicast_find_del_pg(br, src_pg);
  283. }
  284. /* When a port group transitions to (or is added as) EXCLUDE we need to add it
  285. * to all other ports' S,G entries which are not blocked by the current group
  286. * for proper replication, the assumption is that any S,G blocked entries
  287. * are already added so the S,G,port lookup should skip them.
  288. * When a port group transitions from EXCLUDE -> INCLUDE mode or is being
  289. * deleted we need to remove it from all ports' S,G entries where it was
  290. * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL).
  291. */
  292. void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg,
  293. u8 filter_mode)
  294. {
  295. struct net_bridge *br = pg->key.port->br;
  296. struct net_bridge_port_group *pg_lst;
  297. struct net_bridge_mcast_port *pmctx;
  298. struct net_bridge_mdb_entry *mp;
  299. struct br_ip sg_ip;
  300. if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr)))
  301. return;
  302. mp = br_mdb_ip_get(br, &pg->key.addr);
  303. if (!mp)
  304. return;
  305. pmctx = br_multicast_pg_to_port_ctx(pg);
  306. if (!pmctx)
  307. return;
  308. memset(&sg_ip, 0, sizeof(sg_ip));
  309. sg_ip = pg->key.addr;
  310. for (pg_lst = mlock_dereference(mp->ports, br);
  311. pg_lst;
  312. pg_lst = mlock_dereference(pg_lst->next, br)) {
  313. struct net_bridge_group_src *src_ent;
  314. if (pg_lst == pg)
  315. continue;
  316. hlist_for_each_entry(src_ent, &pg_lst->src_list, node) {
  317. if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
  318. continue;
  319. sg_ip.src = src_ent->addr.src;
  320. switch (filter_mode) {
  321. case MCAST_INCLUDE:
  322. __fwd_del_star_excl(pg, &sg_ip);
  323. break;
  324. case MCAST_EXCLUDE:
  325. __fwd_add_star_excl(pmctx, pg, &sg_ip);
  326. break;
  327. }
  328. }
  329. }
  330. }
  331. /* called when adding a new S,G with host_joined == false by default */
  332. static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp,
  333. struct net_bridge_port_group *sg)
  334. {
  335. struct net_bridge_mdb_entry *sg_mp;
  336. if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
  337. return;
  338. if (!star_mp->host_joined)
  339. return;
  340. sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr);
  341. if (!sg_mp)
  342. return;
  343. sg_mp->host_joined = true;
  344. }
  345. /* set the host_joined state of all of *,G's S,G entries */
  346. static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp)
  347. {
  348. struct net_bridge *br = star_mp->br;
  349. struct net_bridge_mdb_entry *sg_mp;
  350. struct net_bridge_port_group *pg;
  351. struct br_ip sg_ip;
  352. if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
  353. return;
  354. memset(&sg_ip, 0, sizeof(sg_ip));
  355. sg_ip = star_mp->addr;
  356. for (pg = mlock_dereference(star_mp->ports, br);
  357. pg;
  358. pg = mlock_dereference(pg->next, br)) {
  359. struct net_bridge_group_src *src_ent;
  360. hlist_for_each_entry(src_ent, &pg->src_list, node) {
  361. if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
  362. continue;
  363. sg_ip.src = src_ent->addr.src;
  364. sg_mp = br_mdb_ip_get(br, &sg_ip);
  365. if (!sg_mp)
  366. continue;
  367. sg_mp->host_joined = star_mp->host_joined;
  368. }
  369. }
  370. }
  371. static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp)
  372. {
  373. struct net_bridge_port_group __rcu **pp;
  374. struct net_bridge_port_group *p;
  375. /* *,G exclude ports are only added to S,G entries */
  376. if (WARN_ON(br_multicast_is_star_g(&sgmp->addr)))
  377. return;
  378. /* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports
  379. * we should ignore perm entries since they're managed by user-space
  380. */
  381. for (pp = &sgmp->ports;
  382. (p = mlock_dereference(*pp, sgmp->br)) != NULL;
  383. pp = &p->next)
  384. if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL |
  385. MDB_PG_FLAGS_PERMANENT)))
  386. return;
  387. /* currently the host can only have joined the *,G which means
  388. * we treat it as EXCLUDE {}, so for an S,G it's considered a
  389. * STAR_EXCLUDE entry and we can safely leave it
  390. */
  391. sgmp->host_joined = false;
  392. for (pp = &sgmp->ports;
  393. (p = mlock_dereference(*pp, sgmp->br)) != NULL;) {
  394. if (!(p->flags & MDB_PG_FLAGS_PERMANENT))
  395. br_multicast_del_pg(sgmp, p, pp);
  396. else
  397. pp = &p->next;
  398. }
  399. }
  400. void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp,
  401. struct net_bridge_port_group *sg)
  402. {
  403. struct net_bridge_port_group_sg_key sg_key;
  404. struct net_bridge *br = star_mp->br;
  405. struct net_bridge_mcast_port *pmctx;
  406. struct net_bridge_port_group *pg;
  407. struct net_bridge_mcast *brmctx;
  408. if (WARN_ON(br_multicast_is_star_g(&sg->key.addr)))
  409. return;
  410. if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
  411. return;
  412. br_multicast_sg_host_state(star_mp, sg);
  413. memset(&sg_key, 0, sizeof(sg_key));
  414. sg_key.addr = sg->key.addr;
  415. /* we need to add all exclude ports to the S,G */
  416. for (pg = mlock_dereference(star_mp->ports, br);
  417. pg;
  418. pg = mlock_dereference(pg->next, br)) {
  419. struct net_bridge_port_group *src_pg;
  420. if (pg == sg || pg->filter_mode == MCAST_INCLUDE)
  421. continue;
  422. sg_key.port = pg->key.port;
  423. if (br_sg_port_find(br, &sg_key))
  424. continue;
  425. pmctx = br_multicast_pg_to_port_ctx(pg);
  426. if (!pmctx)
  427. continue;
  428. brmctx = br_multicast_port_ctx_get_global(pmctx);
  429. src_pg = __br_multicast_add_group(brmctx, pmctx,
  430. &sg->key.addr,
  431. sg->eth_addr,
  432. MCAST_INCLUDE, false, false);
  433. if (IS_ERR_OR_NULL(src_pg) ||
  434. src_pg->rt_protocol != RTPROT_KERNEL)
  435. continue;
  436. src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
  437. }
  438. }
  439. static void br_multicast_fwd_src_add(struct net_bridge_group_src *src)
  440. {
  441. struct net_bridge_mdb_entry *star_mp;
  442. struct net_bridge_mcast_port *pmctx;
  443. struct net_bridge_port_group *sg;
  444. struct net_bridge_mcast *brmctx;
  445. struct br_ip sg_ip;
  446. if (src->flags & BR_SGRP_F_INSTALLED)
  447. return;
  448. memset(&sg_ip, 0, sizeof(sg_ip));
  449. pmctx = br_multicast_pg_to_port_ctx(src->pg);
  450. if (!pmctx)
  451. return;
  452. brmctx = br_multicast_port_ctx_get_global(pmctx);
  453. sg_ip = src->pg->key.addr;
  454. sg_ip.src = src->addr.src;
  455. sg = __br_multicast_add_group(brmctx, pmctx, &sg_ip,
  456. src->pg->eth_addr, MCAST_INCLUDE, false,
  457. !timer_pending(&src->timer));
  458. if (IS_ERR_OR_NULL(sg))
  459. return;
  460. src->flags |= BR_SGRP_F_INSTALLED;
  461. sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL;
  462. /* if it was added by user-space as perm we can skip next steps */
  463. if (sg->rt_protocol != RTPROT_KERNEL &&
  464. (sg->flags & MDB_PG_FLAGS_PERMANENT))
  465. return;
  466. /* the kernel is now responsible for removing this S,G */
  467. del_timer(&sg->timer);
  468. star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr);
  469. if (!star_mp)
  470. return;
  471. br_multicast_sg_add_exclude_ports(star_mp, sg);
  472. }
  473. static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src,
  474. bool fastleave)
  475. {
  476. struct net_bridge_port_group *p, *pg = src->pg;
  477. struct net_bridge_port_group __rcu **pp;
  478. struct net_bridge_mdb_entry *mp;
  479. struct br_ip sg_ip;
  480. memset(&sg_ip, 0, sizeof(sg_ip));
  481. sg_ip = pg->key.addr;
  482. sg_ip.src = src->addr.src;
  483. mp = br_mdb_ip_get(src->br, &sg_ip);
  484. if (!mp)
  485. return;
  486. for (pp = &mp->ports;
  487. (p = mlock_dereference(*pp, src->br)) != NULL;
  488. pp = &p->next) {
  489. if (!br_port_group_equal(p, pg->key.port, pg->eth_addr))
  490. continue;
  491. if (p->rt_protocol != RTPROT_KERNEL &&
  492. (p->flags & MDB_PG_FLAGS_PERMANENT) &&
  493. !(src->flags & BR_SGRP_F_USER_ADDED))
  494. break;
  495. if (fastleave)
  496. p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
  497. br_multicast_del_pg(mp, p, pp);
  498. break;
  499. }
  500. src->flags &= ~BR_SGRP_F_INSTALLED;
  501. }
  502. /* install S,G and based on src's timer enable or disable forwarding */
  503. static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src)
  504. {
  505. struct net_bridge_port_group_sg_key sg_key;
  506. struct net_bridge_port_group *sg;
  507. u8 old_flags;
  508. br_multicast_fwd_src_add(src);
  509. memset(&sg_key, 0, sizeof(sg_key));
  510. sg_key.addr = src->pg->key.addr;
  511. sg_key.addr.src = src->addr.src;
  512. sg_key.port = src->pg->key.port;
  513. sg = br_sg_port_find(src->br, &sg_key);
  514. if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT))
  515. return;
  516. old_flags = sg->flags;
  517. if (timer_pending(&src->timer))
  518. sg->flags &= ~MDB_PG_FLAGS_BLOCKED;
  519. else
  520. sg->flags |= MDB_PG_FLAGS_BLOCKED;
  521. if (old_flags != sg->flags) {
  522. struct net_bridge_mdb_entry *sg_mp;
  523. sg_mp = br_mdb_ip_get(src->br, &sg_key.addr);
  524. if (!sg_mp)
  525. return;
  526. br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB);
  527. }
  528. }
  529. static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc)
  530. {
  531. struct net_bridge_mdb_entry *mp;
  532. mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc);
  533. WARN_ON(!hlist_unhashed(&mp->mdb_node));
  534. WARN_ON(mp->ports);
  535. timer_shutdown_sync(&mp->timer);
  536. kfree_rcu(mp, rcu);
  537. }
  538. static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp)
  539. {
  540. struct net_bridge *br = mp->br;
  541. rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
  542. br_mdb_rht_params);
  543. hlist_del_init_rcu(&mp->mdb_node);
  544. hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list);
  545. queue_work(system_long_wq, &br->mcast_gc_work);
  546. }
  547. static void br_multicast_group_expired(struct timer_list *t)
  548. {
  549. struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
  550. struct net_bridge *br = mp->br;
  551. spin_lock(&br->multicast_lock);
  552. if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) ||
  553. timer_pending(&mp->timer))
  554. goto out;
  555. br_multicast_host_leave(mp, true);
  556. if (mp->ports)
  557. goto out;
  558. br_multicast_del_mdb_entry(mp);
  559. out:
  560. spin_unlock(&br->multicast_lock);
  561. }
  562. static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc)
  563. {
  564. struct net_bridge_group_src *src;
  565. src = container_of(gc, struct net_bridge_group_src, mcast_gc);
  566. WARN_ON(!hlist_unhashed(&src->node));
  567. timer_shutdown_sync(&src->timer);
  568. kfree_rcu(src, rcu);
  569. }
  570. void __br_multicast_del_group_src(struct net_bridge_group_src *src)
  571. {
  572. struct net_bridge *br = src->pg->key.port->br;
  573. hlist_del_init_rcu(&src->node);
  574. src->pg->src_ents--;
  575. hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list);
  576. queue_work(system_long_wq, &br->mcast_gc_work);
  577. }
  578. void br_multicast_del_group_src(struct net_bridge_group_src *src,
  579. bool fastleave)
  580. {
  581. br_multicast_fwd_src_remove(src, fastleave);
  582. __br_multicast_del_group_src(src);
  583. }
  584. static int
  585. br_multicast_port_ngroups_inc_one(struct net_bridge_mcast_port *pmctx,
  586. struct netlink_ext_ack *extack,
  587. const char *what)
  588. {
  589. u32 max = READ_ONCE(pmctx->mdb_max_entries);
  590. u32 n = READ_ONCE(pmctx->mdb_n_entries);
  591. if (max && n >= max) {
  592. NL_SET_ERR_MSG_FMT_MOD(extack, "%s is already in %u groups, and mcast_max_groups=%u",
  593. what, n, max);
  594. return -E2BIG;
  595. }
  596. WRITE_ONCE(pmctx->mdb_n_entries, n + 1);
  597. return 0;
  598. }
  599. static void br_multicast_port_ngroups_dec_one(struct net_bridge_mcast_port *pmctx)
  600. {
  601. u32 n = READ_ONCE(pmctx->mdb_n_entries);
  602. WARN_ON_ONCE(n == 0);
  603. WRITE_ONCE(pmctx->mdb_n_entries, n - 1);
  604. }
  605. static int br_multicast_port_ngroups_inc(struct net_bridge_port *port,
  606. const struct br_ip *group,
  607. struct netlink_ext_ack *extack)
  608. {
  609. struct net_bridge_mcast_port *pmctx;
  610. int err;
  611. lockdep_assert_held_once(&port->br->multicast_lock);
  612. /* Always count on the port context. */
  613. err = br_multicast_port_ngroups_inc_one(&port->multicast_ctx, extack,
  614. "Port");
  615. if (err) {
  616. trace_br_mdb_full(port->dev, group);
  617. return err;
  618. }
  619. /* Only count on the VLAN context if VID is given, and if snooping on
  620. * that VLAN is enabled.
  621. */
  622. if (!group->vid)
  623. return 0;
  624. pmctx = br_multicast_port_vid_to_port_ctx(port, group->vid);
  625. if (!pmctx)
  626. return 0;
  627. err = br_multicast_port_ngroups_inc_one(pmctx, extack, "Port-VLAN");
  628. if (err) {
  629. trace_br_mdb_full(port->dev, group);
  630. goto dec_one_out;
  631. }
  632. return 0;
  633. dec_one_out:
  634. br_multicast_port_ngroups_dec_one(&port->multicast_ctx);
  635. return err;
  636. }
  637. static void br_multicast_port_ngroups_dec(struct net_bridge_port *port, u16 vid)
  638. {
  639. struct net_bridge_mcast_port *pmctx;
  640. lockdep_assert_held_once(&port->br->multicast_lock);
  641. if (vid) {
  642. pmctx = br_multicast_port_vid_to_port_ctx(port, vid);
  643. if (pmctx)
  644. br_multicast_port_ngroups_dec_one(pmctx);
  645. }
  646. br_multicast_port_ngroups_dec_one(&port->multicast_ctx);
  647. }
  648. u32 br_multicast_ngroups_get(const struct net_bridge_mcast_port *pmctx)
  649. {
  650. return READ_ONCE(pmctx->mdb_n_entries);
  651. }
  652. void br_multicast_ngroups_set_max(struct net_bridge_mcast_port *pmctx, u32 max)
  653. {
  654. WRITE_ONCE(pmctx->mdb_max_entries, max);
  655. }
  656. u32 br_multicast_ngroups_get_max(const struct net_bridge_mcast_port *pmctx)
  657. {
  658. return READ_ONCE(pmctx->mdb_max_entries);
  659. }
  660. static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc)
  661. {
  662. struct net_bridge_port_group *pg;
  663. pg = container_of(gc, struct net_bridge_port_group, mcast_gc);
  664. WARN_ON(!hlist_unhashed(&pg->mglist));
  665. WARN_ON(!hlist_empty(&pg->src_list));
  666. timer_shutdown_sync(&pg->rexmit_timer);
  667. timer_shutdown_sync(&pg->timer);
  668. kfree_rcu(pg, rcu);
  669. }
  670. void br_multicast_del_pg(struct net_bridge_mdb_entry *mp,
  671. struct net_bridge_port_group *pg,
  672. struct net_bridge_port_group __rcu **pp)
  673. {
  674. struct net_bridge *br = pg->key.port->br;
  675. struct net_bridge_group_src *ent;
  676. struct hlist_node *tmp;
  677. rcu_assign_pointer(*pp, pg->next);
  678. hlist_del_init(&pg->mglist);
  679. br_multicast_eht_clean_sets(pg);
  680. hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
  681. br_multicast_del_group_src(ent, false);
  682. br_mdb_notify(br->dev, mp, pg, RTM_DELMDB);
  683. if (!br_multicast_is_star_g(&mp->addr)) {
  684. rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode,
  685. br_sg_port_rht_params);
  686. br_multicast_sg_del_exclude_ports(mp);
  687. } else {
  688. br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
  689. }
  690. br_multicast_port_ngroups_dec(pg->key.port, pg->key.addr.vid);
  691. hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list);
  692. queue_work(system_long_wq, &br->mcast_gc_work);
  693. if (!mp->ports && !mp->host_joined && netif_running(br->dev))
  694. mod_timer(&mp->timer, jiffies);
  695. }
  696. static void br_multicast_find_del_pg(struct net_bridge *br,
  697. struct net_bridge_port_group *pg)
  698. {
  699. struct net_bridge_port_group __rcu **pp;
  700. struct net_bridge_mdb_entry *mp;
  701. struct net_bridge_port_group *p;
  702. mp = br_mdb_ip_get(br, &pg->key.addr);
  703. if (WARN_ON(!mp))
  704. return;
  705. for (pp = &mp->ports;
  706. (p = mlock_dereference(*pp, br)) != NULL;
  707. pp = &p->next) {
  708. if (p != pg)
  709. continue;
  710. br_multicast_del_pg(mp, pg, pp);
  711. return;
  712. }
  713. WARN_ON(1);
  714. }
  715. static void br_multicast_port_group_expired(struct timer_list *t)
  716. {
  717. struct net_bridge_port_group *pg = from_timer(pg, t, timer);
  718. struct net_bridge_group_src *src_ent;
  719. struct net_bridge *br = pg->key.port->br;
  720. struct hlist_node *tmp;
  721. bool changed;
  722. spin_lock(&br->multicast_lock);
  723. if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
  724. hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
  725. goto out;
  726. changed = !!(pg->filter_mode == MCAST_EXCLUDE);
  727. pg->filter_mode = MCAST_INCLUDE;
  728. hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
  729. if (!timer_pending(&src_ent->timer)) {
  730. br_multicast_del_group_src(src_ent, false);
  731. changed = true;
  732. }
  733. }
  734. if (hlist_empty(&pg->src_list)) {
  735. br_multicast_find_del_pg(br, pg);
  736. } else if (changed) {
  737. struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr);
  738. if (changed && br_multicast_is_star_g(&pg->key.addr))
  739. br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
  740. if (WARN_ON(!mp))
  741. goto out;
  742. br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB);
  743. }
  744. out:
  745. spin_unlock(&br->multicast_lock);
  746. }
  747. static void br_multicast_gc(struct hlist_head *head)
  748. {
  749. struct net_bridge_mcast_gc *gcent;
  750. struct hlist_node *tmp;
  751. hlist_for_each_entry_safe(gcent, tmp, head, gc_node) {
  752. hlist_del_init(&gcent->gc_node);
  753. gcent->destroy(gcent);
  754. }
  755. }
  756. static void __br_multicast_query_handle_vlan(struct net_bridge_mcast *brmctx,
  757. struct net_bridge_mcast_port *pmctx,
  758. struct sk_buff *skb)
  759. {
  760. struct net_bridge_vlan *vlan = NULL;
  761. if (pmctx && br_multicast_port_ctx_is_vlan(pmctx))
  762. vlan = pmctx->vlan;
  763. else if (br_multicast_ctx_is_vlan(brmctx))
  764. vlan = brmctx->vlan;
  765. if (vlan && !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
  766. u16 vlan_proto;
  767. if (br_vlan_get_proto(brmctx->br->dev, &vlan_proto) != 0)
  768. return;
  769. __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan->vid);
  770. }
  771. }
  772. static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge_mcast *brmctx,
  773. struct net_bridge_mcast_port *pmctx,
  774. struct net_bridge_port_group *pg,
  775. __be32 ip_dst, __be32 group,
  776. bool with_srcs, bool over_lmqt,
  777. u8 sflag, u8 *igmp_type,
  778. bool *need_rexmit)
  779. {
  780. struct net_bridge_port *p = pg ? pg->key.port : NULL;
  781. struct net_bridge_group_src *ent;
  782. size_t pkt_size, igmp_hdr_size;
  783. unsigned long now = jiffies;
  784. struct igmpv3_query *ihv3;
  785. void *csum_start = NULL;
  786. __sum16 *csum = NULL;
  787. struct sk_buff *skb;
  788. struct igmphdr *ih;
  789. struct ethhdr *eth;
  790. unsigned long lmqt;
  791. struct iphdr *iph;
  792. u16 lmqt_srcs = 0;
  793. igmp_hdr_size = sizeof(*ih);
  794. if (brmctx->multicast_igmp_version == 3) {
  795. igmp_hdr_size = sizeof(*ihv3);
  796. if (pg && with_srcs) {
  797. lmqt = now + (brmctx->multicast_last_member_interval *
  798. brmctx->multicast_last_member_count);
  799. hlist_for_each_entry(ent, &pg->src_list, node) {
  800. if (over_lmqt == time_after(ent->timer.expires,
  801. lmqt) &&
  802. ent->src_query_rexmit_cnt > 0)
  803. lmqt_srcs++;
  804. }
  805. if (!lmqt_srcs)
  806. return NULL;
  807. igmp_hdr_size += lmqt_srcs * sizeof(__be32);
  808. }
  809. }
  810. pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size;
  811. if ((p && pkt_size > p->dev->mtu) ||
  812. pkt_size > brmctx->br->dev->mtu)
  813. return NULL;
  814. skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
  815. if (!skb)
  816. goto out;
  817. __br_multicast_query_handle_vlan(brmctx, pmctx, skb);
  818. skb->protocol = htons(ETH_P_IP);
  819. skb_reset_mac_header(skb);
  820. eth = eth_hdr(skb);
  821. ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
  822. ip_eth_mc_map(ip_dst, eth->h_dest);
  823. eth->h_proto = htons(ETH_P_IP);
  824. skb_put(skb, sizeof(*eth));
  825. skb_set_network_header(skb, skb->len);
  826. iph = ip_hdr(skb);
  827. iph->tot_len = htons(pkt_size - sizeof(*eth));
  828. iph->version = 4;
  829. iph->ihl = 6;
  830. iph->tos = 0xc0;
  831. iph->id = 0;
  832. iph->frag_off = htons(IP_DF);
  833. iph->ttl = 1;
  834. iph->protocol = IPPROTO_IGMP;
  835. iph->saddr = br_opt_get(brmctx->br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
  836. inet_select_addr(brmctx->br->dev, 0, RT_SCOPE_LINK) : 0;
  837. iph->daddr = ip_dst;
  838. ((u8 *)&iph[1])[0] = IPOPT_RA;
  839. ((u8 *)&iph[1])[1] = 4;
  840. ((u8 *)&iph[1])[2] = 0;
  841. ((u8 *)&iph[1])[3] = 0;
  842. ip_send_check(iph);
  843. skb_put(skb, 24);
  844. skb_set_transport_header(skb, skb->len);
  845. *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
  846. switch (brmctx->multicast_igmp_version) {
  847. case 2:
  848. ih = igmp_hdr(skb);
  849. ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
  850. ih->code = (group ? brmctx->multicast_last_member_interval :
  851. brmctx->multicast_query_response_interval) /
  852. (HZ / IGMP_TIMER_SCALE);
  853. ih->group = group;
  854. ih->csum = 0;
  855. csum = &ih->csum;
  856. csum_start = (void *)ih;
  857. break;
  858. case 3:
  859. ihv3 = igmpv3_query_hdr(skb);
  860. ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
  861. ihv3->code = (group ? brmctx->multicast_last_member_interval :
  862. brmctx->multicast_query_response_interval) /
  863. (HZ / IGMP_TIMER_SCALE);
  864. ihv3->group = group;
  865. ihv3->qqic = brmctx->multicast_query_interval / HZ;
  866. ihv3->nsrcs = htons(lmqt_srcs);
  867. ihv3->resv = 0;
  868. ihv3->suppress = sflag;
  869. ihv3->qrv = 2;
  870. ihv3->csum = 0;
  871. csum = &ihv3->csum;
  872. csum_start = (void *)ihv3;
  873. if (!pg || !with_srcs)
  874. break;
  875. lmqt_srcs = 0;
  876. hlist_for_each_entry(ent, &pg->src_list, node) {
  877. if (over_lmqt == time_after(ent->timer.expires,
  878. lmqt) &&
  879. ent->src_query_rexmit_cnt > 0) {
  880. ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4;
  881. ent->src_query_rexmit_cnt--;
  882. if (need_rexmit && ent->src_query_rexmit_cnt)
  883. *need_rexmit = true;
  884. }
  885. }
  886. if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) {
  887. kfree_skb(skb);
  888. return NULL;
  889. }
  890. break;
  891. }
  892. if (WARN_ON(!csum || !csum_start)) {
  893. kfree_skb(skb);
  894. return NULL;
  895. }
  896. *csum = ip_compute_csum(csum_start, igmp_hdr_size);
  897. skb_put(skb, igmp_hdr_size);
  898. __skb_pull(skb, sizeof(*eth));
  899. out:
  900. return skb;
  901. }
  902. #if IS_ENABLED(CONFIG_IPV6)
  903. static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge_mcast *brmctx,
  904. struct net_bridge_mcast_port *pmctx,
  905. struct net_bridge_port_group *pg,
  906. const struct in6_addr *ip6_dst,
  907. const struct in6_addr *group,
  908. bool with_srcs, bool over_llqt,
  909. u8 sflag, u8 *igmp_type,
  910. bool *need_rexmit)
  911. {
  912. struct net_bridge_port *p = pg ? pg->key.port : NULL;
  913. struct net_bridge_group_src *ent;
  914. size_t pkt_size, mld_hdr_size;
  915. unsigned long now = jiffies;
  916. struct mld2_query *mld2q;
  917. void *csum_start = NULL;
  918. unsigned long interval;
  919. __sum16 *csum = NULL;
  920. struct ipv6hdr *ip6h;
  921. struct mld_msg *mldq;
  922. struct sk_buff *skb;
  923. unsigned long llqt;
  924. struct ethhdr *eth;
  925. u16 llqt_srcs = 0;
  926. u8 *hopopt;
  927. mld_hdr_size = sizeof(*mldq);
  928. if (brmctx->multicast_mld_version == 2) {
  929. mld_hdr_size = sizeof(*mld2q);
  930. if (pg && with_srcs) {
  931. llqt = now + (brmctx->multicast_last_member_interval *
  932. brmctx->multicast_last_member_count);
  933. hlist_for_each_entry(ent, &pg->src_list, node) {
  934. if (over_llqt == time_after(ent->timer.expires,
  935. llqt) &&
  936. ent->src_query_rexmit_cnt > 0)
  937. llqt_srcs++;
  938. }
  939. if (!llqt_srcs)
  940. return NULL;
  941. mld_hdr_size += llqt_srcs * sizeof(struct in6_addr);
  942. }
  943. }
  944. pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size;
  945. if ((p && pkt_size > p->dev->mtu) ||
  946. pkt_size > brmctx->br->dev->mtu)
  947. return NULL;
  948. skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
  949. if (!skb)
  950. goto out;
  951. __br_multicast_query_handle_vlan(brmctx, pmctx, skb);
  952. skb->protocol = htons(ETH_P_IPV6);
  953. /* Ethernet header */
  954. skb_reset_mac_header(skb);
  955. eth = eth_hdr(skb);
  956. ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
  957. eth->h_proto = htons(ETH_P_IPV6);
  958. skb_put(skb, sizeof(*eth));
  959. /* IPv6 header + HbH option */
  960. skb_set_network_header(skb, skb->len);
  961. ip6h = ipv6_hdr(skb);
  962. *(__force __be32 *)ip6h = htonl(0x60000000);
  963. ip6h->payload_len = htons(8 + mld_hdr_size);
  964. ip6h->nexthdr = IPPROTO_HOPOPTS;
  965. ip6h->hop_limit = 1;
  966. ip6h->daddr = *ip6_dst;
  967. if (ipv6_dev_get_saddr(dev_net(brmctx->br->dev), brmctx->br->dev,
  968. &ip6h->daddr, 0, &ip6h->saddr)) {
  969. kfree_skb(skb);
  970. br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, false);
  971. return NULL;
  972. }
  973. br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, true);
  974. ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
  975. hopopt = (u8 *)(ip6h + 1);
  976. hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
  977. hopopt[1] = 0; /* length of HbH */
  978. hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
  979. hopopt[3] = 2; /* Length of RA Option */
  980. hopopt[4] = 0; /* Type = 0x0000 (MLD) */
  981. hopopt[5] = 0;
  982. hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */
  983. hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */
  984. skb_put(skb, sizeof(*ip6h) + 8);
  985. /* ICMPv6 */
  986. skb_set_transport_header(skb, skb->len);
  987. interval = ipv6_addr_any(group) ?
  988. brmctx->multicast_query_response_interval :
  989. brmctx->multicast_last_member_interval;
  990. *igmp_type = ICMPV6_MGM_QUERY;
  991. switch (brmctx->multicast_mld_version) {
  992. case 1:
  993. mldq = (struct mld_msg *)icmp6_hdr(skb);
  994. mldq->mld_type = ICMPV6_MGM_QUERY;
  995. mldq->mld_code = 0;
  996. mldq->mld_cksum = 0;
  997. mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
  998. mldq->mld_reserved = 0;
  999. mldq->mld_mca = *group;
  1000. csum = &mldq->mld_cksum;
  1001. csum_start = (void *)mldq;
  1002. break;
  1003. case 2:
  1004. mld2q = (struct mld2_query *)icmp6_hdr(skb);
  1005. mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
  1006. mld2q->mld2q_type = ICMPV6_MGM_QUERY;
  1007. mld2q->mld2q_code = 0;
  1008. mld2q->mld2q_cksum = 0;
  1009. mld2q->mld2q_resv1 = 0;
  1010. mld2q->mld2q_resv2 = 0;
  1011. mld2q->mld2q_suppress = sflag;
  1012. mld2q->mld2q_qrv = 2;
  1013. mld2q->mld2q_nsrcs = htons(llqt_srcs);
  1014. mld2q->mld2q_qqic = brmctx->multicast_query_interval / HZ;
  1015. mld2q->mld2q_mca = *group;
  1016. csum = &mld2q->mld2q_cksum;
  1017. csum_start = (void *)mld2q;
  1018. if (!pg || !with_srcs)
  1019. break;
  1020. llqt_srcs = 0;
  1021. hlist_for_each_entry(ent, &pg->src_list, node) {
  1022. if (over_llqt == time_after(ent->timer.expires,
  1023. llqt) &&
  1024. ent->src_query_rexmit_cnt > 0) {
  1025. mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6;
  1026. ent->src_query_rexmit_cnt--;
  1027. if (need_rexmit && ent->src_query_rexmit_cnt)
  1028. *need_rexmit = true;
  1029. }
  1030. }
  1031. if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) {
  1032. kfree_skb(skb);
  1033. return NULL;
  1034. }
  1035. break;
  1036. }
  1037. if (WARN_ON(!csum || !csum_start)) {
  1038. kfree_skb(skb);
  1039. return NULL;
  1040. }
  1041. *csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size,
  1042. IPPROTO_ICMPV6,
  1043. csum_partial(csum_start, mld_hdr_size, 0));
  1044. skb_put(skb, mld_hdr_size);
  1045. __skb_pull(skb, sizeof(*eth));
  1046. out:
  1047. return skb;
  1048. }
  1049. #endif
  1050. static struct sk_buff *br_multicast_alloc_query(struct net_bridge_mcast *brmctx,
  1051. struct net_bridge_mcast_port *pmctx,
  1052. struct net_bridge_port_group *pg,
  1053. struct br_ip *ip_dst,
  1054. struct br_ip *group,
  1055. bool with_srcs, bool over_lmqt,
  1056. u8 sflag, u8 *igmp_type,
  1057. bool *need_rexmit)
  1058. {
  1059. __be32 ip4_dst;
  1060. switch (group->proto) {
  1061. case htons(ETH_P_IP):
  1062. ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP);
  1063. return br_ip4_multicast_alloc_query(brmctx, pmctx, pg,
  1064. ip4_dst, group->dst.ip4,
  1065. with_srcs, over_lmqt,
  1066. sflag, igmp_type,
  1067. need_rexmit);
  1068. #if IS_ENABLED(CONFIG_IPV6)
  1069. case htons(ETH_P_IPV6): {
  1070. struct in6_addr ip6_dst;
  1071. if (ip_dst)
  1072. ip6_dst = ip_dst->dst.ip6;
  1073. else
  1074. ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0,
  1075. htonl(1));
  1076. return br_ip6_multicast_alloc_query(brmctx, pmctx, pg,
  1077. &ip6_dst, &group->dst.ip6,
  1078. with_srcs, over_lmqt,
  1079. sflag, igmp_type,
  1080. need_rexmit);
  1081. }
  1082. #endif
  1083. }
  1084. return NULL;
  1085. }
  1086. struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
  1087. struct br_ip *group)
  1088. {
  1089. struct net_bridge_mdb_entry *mp;
  1090. int err;
  1091. mp = br_mdb_ip_get(br, group);
  1092. if (mp)
  1093. return mp;
  1094. if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
  1095. trace_br_mdb_full(br->dev, group);
  1096. br_mc_disabled_update(br->dev, false, NULL);
  1097. br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
  1098. return ERR_PTR(-E2BIG);
  1099. }
  1100. mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
  1101. if (unlikely(!mp))
  1102. return ERR_PTR(-ENOMEM);
  1103. mp->br = br;
  1104. mp->addr = *group;
  1105. mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry;
  1106. timer_setup(&mp->timer, br_multicast_group_expired, 0);
  1107. err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
  1108. br_mdb_rht_params);
  1109. if (err) {
  1110. kfree(mp);
  1111. mp = ERR_PTR(err);
  1112. } else {
  1113. hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
  1114. }
  1115. return mp;
  1116. }
  1117. static void br_multicast_group_src_expired(struct timer_list *t)
  1118. {
  1119. struct net_bridge_group_src *src = from_timer(src, t, timer);
  1120. struct net_bridge_port_group *pg;
  1121. struct net_bridge *br = src->br;
  1122. spin_lock(&br->multicast_lock);
  1123. if (hlist_unhashed(&src->node) || !netif_running(br->dev) ||
  1124. timer_pending(&src->timer))
  1125. goto out;
  1126. pg = src->pg;
  1127. if (pg->filter_mode == MCAST_INCLUDE) {
  1128. br_multicast_del_group_src(src, false);
  1129. if (!hlist_empty(&pg->src_list))
  1130. goto out;
  1131. br_multicast_find_del_pg(br, pg);
  1132. } else {
  1133. br_multicast_fwd_src_handle(src);
  1134. }
  1135. out:
  1136. spin_unlock(&br->multicast_lock);
  1137. }
  1138. struct net_bridge_group_src *
  1139. br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip)
  1140. {
  1141. struct net_bridge_group_src *ent;
  1142. switch (ip->proto) {
  1143. case htons(ETH_P_IP):
  1144. hlist_for_each_entry(ent, &pg->src_list, node)
  1145. if (ip->src.ip4 == ent->addr.src.ip4)
  1146. return ent;
  1147. break;
  1148. #if IS_ENABLED(CONFIG_IPV6)
  1149. case htons(ETH_P_IPV6):
  1150. hlist_for_each_entry(ent, &pg->src_list, node)
  1151. if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6))
  1152. return ent;
  1153. break;
  1154. #endif
  1155. }
  1156. return NULL;
  1157. }
  1158. struct net_bridge_group_src *
  1159. br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip)
  1160. {
  1161. struct net_bridge_group_src *grp_src;
  1162. if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT))
  1163. return NULL;
  1164. switch (src_ip->proto) {
  1165. case htons(ETH_P_IP):
  1166. if (ipv4_is_zeronet(src_ip->src.ip4) ||
  1167. ipv4_is_multicast(src_ip->src.ip4))
  1168. return NULL;
  1169. break;
  1170. #if IS_ENABLED(CONFIG_IPV6)
  1171. case htons(ETH_P_IPV6):
  1172. if (ipv6_addr_any(&src_ip->src.ip6) ||
  1173. ipv6_addr_is_multicast(&src_ip->src.ip6))
  1174. return NULL;
  1175. break;
  1176. #endif
  1177. }
  1178. grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC);
  1179. if (unlikely(!grp_src))
  1180. return NULL;
  1181. grp_src->pg = pg;
  1182. grp_src->br = pg->key.port->br;
  1183. grp_src->addr = *src_ip;
  1184. grp_src->mcast_gc.destroy = br_multicast_destroy_group_src;
  1185. timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0);
  1186. hlist_add_head_rcu(&grp_src->node, &pg->src_list);
  1187. pg->src_ents++;
  1188. return grp_src;
  1189. }
  1190. struct net_bridge_port_group *br_multicast_new_port_group(
  1191. struct net_bridge_port *port,
  1192. const struct br_ip *group,
  1193. struct net_bridge_port_group __rcu *next,
  1194. unsigned char flags,
  1195. const unsigned char *src,
  1196. u8 filter_mode,
  1197. u8 rt_protocol,
  1198. struct netlink_ext_ack *extack)
  1199. {
  1200. struct net_bridge_port_group *p;
  1201. int err;
  1202. err = br_multicast_port_ngroups_inc(port, group, extack);
  1203. if (err)
  1204. return NULL;
  1205. p = kzalloc(sizeof(*p), GFP_ATOMIC);
  1206. if (unlikely(!p)) {
  1207. NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group");
  1208. goto dec_out;
  1209. }
  1210. p->key.addr = *group;
  1211. p->key.port = port;
  1212. p->flags = flags;
  1213. p->filter_mode = filter_mode;
  1214. p->rt_protocol = rt_protocol;
  1215. p->eht_host_tree = RB_ROOT;
  1216. p->eht_set_tree = RB_ROOT;
  1217. p->mcast_gc.destroy = br_multicast_destroy_port_group;
  1218. INIT_HLIST_HEAD(&p->src_list);
  1219. if (!br_multicast_is_star_g(group) &&
  1220. rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode,
  1221. br_sg_port_rht_params)) {
  1222. NL_SET_ERR_MSG_MOD(extack, "Couldn't insert new port group");
  1223. goto free_out;
  1224. }
  1225. rcu_assign_pointer(p->next, next);
  1226. timer_setup(&p->timer, br_multicast_port_group_expired, 0);
  1227. timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0);
  1228. hlist_add_head(&p->mglist, &port->mglist);
  1229. if (src)
  1230. memcpy(p->eth_addr, src, ETH_ALEN);
  1231. else
  1232. eth_broadcast_addr(p->eth_addr);
  1233. return p;
  1234. free_out:
  1235. kfree(p);
  1236. dec_out:
  1237. br_multicast_port_ngroups_dec(port, group->vid);
  1238. return NULL;
  1239. }
  1240. void br_multicast_del_port_group(struct net_bridge_port_group *p)
  1241. {
  1242. struct net_bridge_port *port = p->key.port;
  1243. __u16 vid = p->key.addr.vid;
  1244. hlist_del_init(&p->mglist);
  1245. if (!br_multicast_is_star_g(&p->key.addr))
  1246. rhashtable_remove_fast(&port->br->sg_port_tbl, &p->rhnode,
  1247. br_sg_port_rht_params);
  1248. kfree(p);
  1249. br_multicast_port_ngroups_dec(port, vid);
  1250. }
  1251. void br_multicast_host_join(const struct net_bridge_mcast *brmctx,
  1252. struct net_bridge_mdb_entry *mp, bool notify)
  1253. {
  1254. if (!mp->host_joined) {
  1255. mp->host_joined = true;
  1256. if (br_multicast_is_star_g(&mp->addr))
  1257. br_multicast_star_g_host_state(mp);
  1258. if (notify)
  1259. br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB);
  1260. }
  1261. if (br_group_is_l2(&mp->addr))
  1262. return;
  1263. mod_timer(&mp->timer, jiffies + brmctx->multicast_membership_interval);
  1264. }
  1265. void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify)
  1266. {
  1267. if (!mp->host_joined)
  1268. return;
  1269. mp->host_joined = false;
  1270. if (br_multicast_is_star_g(&mp->addr))
  1271. br_multicast_star_g_host_state(mp);
  1272. if (notify)
  1273. br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB);
  1274. }
  1275. static struct net_bridge_port_group *
  1276. __br_multicast_add_group(struct net_bridge_mcast *brmctx,
  1277. struct net_bridge_mcast_port *pmctx,
  1278. struct br_ip *group,
  1279. const unsigned char *src,
  1280. u8 filter_mode,
  1281. bool igmpv2_mldv1,
  1282. bool blocked)
  1283. {
  1284. struct net_bridge_port_group __rcu **pp;
  1285. struct net_bridge_port_group *p = NULL;
  1286. struct net_bridge_mdb_entry *mp;
  1287. unsigned long now = jiffies;
  1288. if (!br_multicast_ctx_should_use(brmctx, pmctx))
  1289. goto out;
  1290. mp = br_multicast_new_group(brmctx->br, group);
  1291. if (IS_ERR(mp))
  1292. return ERR_CAST(mp);
  1293. if (!pmctx) {
  1294. br_multicast_host_join(brmctx, mp, true);
  1295. goto out;
  1296. }
  1297. for (pp = &mp->ports;
  1298. (p = mlock_dereference(*pp, brmctx->br)) != NULL;
  1299. pp = &p->next) {
  1300. if (br_port_group_equal(p, pmctx->port, src))
  1301. goto found;
  1302. if ((unsigned long)p->key.port < (unsigned long)pmctx->port)
  1303. break;
  1304. }
  1305. p = br_multicast_new_port_group(pmctx->port, group, *pp, 0, src,
  1306. filter_mode, RTPROT_KERNEL, NULL);
  1307. if (unlikely(!p)) {
  1308. p = ERR_PTR(-ENOMEM);
  1309. goto out;
  1310. }
  1311. rcu_assign_pointer(*pp, p);
  1312. if (blocked)
  1313. p->flags |= MDB_PG_FLAGS_BLOCKED;
  1314. br_mdb_notify(brmctx->br->dev, mp, p, RTM_NEWMDB);
  1315. found:
  1316. if (igmpv2_mldv1)
  1317. mod_timer(&p->timer,
  1318. now + brmctx->multicast_membership_interval);
  1319. out:
  1320. return p;
  1321. }
  1322. static int br_multicast_add_group(struct net_bridge_mcast *brmctx,
  1323. struct net_bridge_mcast_port *pmctx,
  1324. struct br_ip *group,
  1325. const unsigned char *src,
  1326. u8 filter_mode,
  1327. bool igmpv2_mldv1)
  1328. {
  1329. struct net_bridge_port_group *pg;
  1330. int err;
  1331. spin_lock(&brmctx->br->multicast_lock);
  1332. pg = __br_multicast_add_group(brmctx, pmctx, group, src, filter_mode,
  1333. igmpv2_mldv1, false);
  1334. /* NULL is considered valid for host joined groups */
  1335. err = PTR_ERR_OR_ZERO(pg);
  1336. spin_unlock(&brmctx->br->multicast_lock);
  1337. return err;
  1338. }
  1339. static int br_ip4_multicast_add_group(struct net_bridge_mcast *brmctx,
  1340. struct net_bridge_mcast_port *pmctx,
  1341. __be32 group,
  1342. __u16 vid,
  1343. const unsigned char *src,
  1344. bool igmpv2)
  1345. {
  1346. struct br_ip br_group;
  1347. u8 filter_mode;
  1348. if (ipv4_is_local_multicast(group))
  1349. return 0;
  1350. memset(&br_group, 0, sizeof(br_group));
  1351. br_group.dst.ip4 = group;
  1352. br_group.proto = htons(ETH_P_IP);
  1353. br_group.vid = vid;
  1354. filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE;
  1355. return br_multicast_add_group(brmctx, pmctx, &br_group, src,
  1356. filter_mode, igmpv2);
  1357. }
  1358. #if IS_ENABLED(CONFIG_IPV6)
  1359. static int br_ip6_multicast_add_group(struct net_bridge_mcast *brmctx,
  1360. struct net_bridge_mcast_port *pmctx,
  1361. const struct in6_addr *group,
  1362. __u16 vid,
  1363. const unsigned char *src,
  1364. bool mldv1)
  1365. {
  1366. struct br_ip br_group;
  1367. u8 filter_mode;
  1368. if (ipv6_addr_is_ll_all_nodes(group))
  1369. return 0;
  1370. memset(&br_group, 0, sizeof(br_group));
  1371. br_group.dst.ip6 = *group;
  1372. br_group.proto = htons(ETH_P_IPV6);
  1373. br_group.vid = vid;
  1374. filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE;
  1375. return br_multicast_add_group(brmctx, pmctx, &br_group, src,
  1376. filter_mode, mldv1);
  1377. }
  1378. #endif
  1379. static bool br_multicast_rport_del(struct hlist_node *rlist)
  1380. {
  1381. if (hlist_unhashed(rlist))
  1382. return false;
  1383. hlist_del_init_rcu(rlist);
  1384. return true;
  1385. }
  1386. static bool br_ip4_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
  1387. {
  1388. return br_multicast_rport_del(&pmctx->ip4_rlist);
  1389. }
  1390. static bool br_ip6_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
  1391. {
  1392. #if IS_ENABLED(CONFIG_IPV6)
  1393. return br_multicast_rport_del(&pmctx->ip6_rlist);
  1394. #else
  1395. return false;
  1396. #endif
  1397. }
  1398. static void br_multicast_router_expired(struct net_bridge_mcast_port *pmctx,
  1399. struct timer_list *t,
  1400. struct hlist_node *rlist)
  1401. {
  1402. struct net_bridge *br = pmctx->port->br;
  1403. bool del;
  1404. spin_lock(&br->multicast_lock);
  1405. if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
  1406. pmctx->multicast_router == MDB_RTR_TYPE_PERM ||
  1407. timer_pending(t))
  1408. goto out;
  1409. del = br_multicast_rport_del(rlist);
  1410. br_multicast_rport_del_notify(pmctx, del);
  1411. out:
  1412. spin_unlock(&br->multicast_lock);
  1413. }
  1414. static void br_ip4_multicast_router_expired(struct timer_list *t)
  1415. {
  1416. struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
  1417. ip4_mc_router_timer);
  1418. br_multicast_router_expired(pmctx, t, &pmctx->ip4_rlist);
  1419. }
  1420. #if IS_ENABLED(CONFIG_IPV6)
  1421. static void br_ip6_multicast_router_expired(struct timer_list *t)
  1422. {
  1423. struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
  1424. ip6_mc_router_timer);
  1425. br_multicast_router_expired(pmctx, t, &pmctx->ip6_rlist);
  1426. }
  1427. #endif
  1428. static void br_mc_router_state_change(struct net_bridge *p,
  1429. bool is_mc_router)
  1430. {
  1431. struct switchdev_attr attr = {
  1432. .orig_dev = p->dev,
  1433. .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
  1434. .flags = SWITCHDEV_F_DEFER,
  1435. .u.mrouter = is_mc_router,
  1436. };
  1437. switchdev_port_attr_set(p->dev, &attr, NULL);
  1438. }
  1439. static void br_multicast_local_router_expired(struct net_bridge_mcast *brmctx,
  1440. struct timer_list *timer)
  1441. {
  1442. spin_lock(&brmctx->br->multicast_lock);
  1443. if (brmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
  1444. brmctx->multicast_router == MDB_RTR_TYPE_PERM ||
  1445. br_ip4_multicast_is_router(brmctx) ||
  1446. br_ip6_multicast_is_router(brmctx))
  1447. goto out;
  1448. br_mc_router_state_change(brmctx->br, false);
  1449. out:
  1450. spin_unlock(&brmctx->br->multicast_lock);
  1451. }
  1452. static void br_ip4_multicast_local_router_expired(struct timer_list *t)
  1453. {
  1454. struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
  1455. ip4_mc_router_timer);
  1456. br_multicast_local_router_expired(brmctx, t);
  1457. }
  1458. #if IS_ENABLED(CONFIG_IPV6)
  1459. static void br_ip6_multicast_local_router_expired(struct timer_list *t)
  1460. {
  1461. struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
  1462. ip6_mc_router_timer);
  1463. br_multicast_local_router_expired(brmctx, t);
  1464. }
  1465. #endif
  1466. static void br_multicast_querier_expired(struct net_bridge_mcast *brmctx,
  1467. struct bridge_mcast_own_query *query)
  1468. {
  1469. spin_lock(&brmctx->br->multicast_lock);
  1470. if (!netif_running(brmctx->br->dev) ||
  1471. br_multicast_ctx_vlan_global_disabled(brmctx) ||
  1472. !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
  1473. goto out;
  1474. br_multicast_start_querier(brmctx, query);
  1475. out:
  1476. spin_unlock(&brmctx->br->multicast_lock);
  1477. }
  1478. static void br_ip4_multicast_querier_expired(struct timer_list *t)
  1479. {
  1480. struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
  1481. ip4_other_query.timer);
  1482. br_multicast_querier_expired(brmctx, &brmctx->ip4_own_query);
  1483. }
  1484. #if IS_ENABLED(CONFIG_IPV6)
  1485. static void br_ip6_multicast_querier_expired(struct timer_list *t)
  1486. {
  1487. struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
  1488. ip6_other_query.timer);
  1489. br_multicast_querier_expired(brmctx, &brmctx->ip6_own_query);
  1490. }
  1491. #endif
  1492. static void br_multicast_query_delay_expired(struct timer_list *t)
  1493. {
  1494. }
  1495. static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx,
  1496. struct br_ip *ip,
  1497. struct sk_buff *skb)
  1498. {
  1499. if (ip->proto == htons(ETH_P_IP))
  1500. brmctx->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr;
  1501. #if IS_ENABLED(CONFIG_IPV6)
  1502. else
  1503. brmctx->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr;
  1504. #endif
  1505. }
  1506. static void __br_multicast_send_query(struct net_bridge_mcast *brmctx,
  1507. struct net_bridge_mcast_port *pmctx,
  1508. struct net_bridge_port_group *pg,
  1509. struct br_ip *ip_dst,
  1510. struct br_ip *group,
  1511. bool with_srcs,
  1512. u8 sflag,
  1513. bool *need_rexmit)
  1514. {
  1515. bool over_lmqt = !!sflag;
  1516. struct sk_buff *skb;
  1517. u8 igmp_type;
  1518. if (!br_multicast_ctx_should_use(brmctx, pmctx) ||
  1519. !br_multicast_ctx_matches_vlan_snooping(brmctx))
  1520. return;
  1521. again_under_lmqt:
  1522. skb = br_multicast_alloc_query(brmctx, pmctx, pg, ip_dst, group,
  1523. with_srcs, over_lmqt, sflag, &igmp_type,
  1524. need_rexmit);
  1525. if (!skb)
  1526. return;
  1527. if (pmctx) {
  1528. skb->dev = pmctx->port->dev;
  1529. br_multicast_count(brmctx->br, pmctx->port, skb, igmp_type,
  1530. BR_MCAST_DIR_TX);
  1531. NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
  1532. dev_net(pmctx->port->dev), NULL, skb, NULL, skb->dev,
  1533. br_dev_queue_push_xmit);
  1534. if (over_lmqt && with_srcs && sflag) {
  1535. over_lmqt = false;
  1536. goto again_under_lmqt;
  1537. }
  1538. } else {
  1539. br_multicast_select_own_querier(brmctx, group, skb);
  1540. br_multicast_count(brmctx->br, NULL, skb, igmp_type,
  1541. BR_MCAST_DIR_RX);
  1542. netif_rx(skb);
  1543. }
  1544. }
  1545. static void br_multicast_read_querier(const struct bridge_mcast_querier *querier,
  1546. struct bridge_mcast_querier *dest)
  1547. {
  1548. unsigned int seq;
  1549. memset(dest, 0, sizeof(*dest));
  1550. do {
  1551. seq = read_seqcount_begin(&querier->seq);
  1552. dest->port_ifidx = querier->port_ifidx;
  1553. memcpy(&dest->addr, &querier->addr, sizeof(struct br_ip));
  1554. } while (read_seqcount_retry(&querier->seq, seq));
  1555. }
  1556. static void br_multicast_update_querier(struct net_bridge_mcast *brmctx,
  1557. struct bridge_mcast_querier *querier,
  1558. int ifindex,
  1559. struct br_ip *saddr)
  1560. {
  1561. write_seqcount_begin(&querier->seq);
  1562. querier->port_ifidx = ifindex;
  1563. memcpy(&querier->addr, saddr, sizeof(*saddr));
  1564. write_seqcount_end(&querier->seq);
  1565. }
  1566. static void br_multicast_send_query(struct net_bridge_mcast *brmctx,
  1567. struct net_bridge_mcast_port *pmctx,
  1568. struct bridge_mcast_own_query *own_query)
  1569. {
  1570. struct bridge_mcast_other_query *other_query = NULL;
  1571. struct bridge_mcast_querier *querier;
  1572. struct br_ip br_group;
  1573. unsigned long time;
  1574. if (!br_multicast_ctx_should_use(brmctx, pmctx) ||
  1575. !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) ||
  1576. !brmctx->multicast_querier)
  1577. return;
  1578. memset(&br_group.dst, 0, sizeof(br_group.dst));
  1579. if (pmctx ? (own_query == &pmctx->ip4_own_query) :
  1580. (own_query == &brmctx->ip4_own_query)) {
  1581. querier = &brmctx->ip4_querier;
  1582. other_query = &brmctx->ip4_other_query;
  1583. br_group.proto = htons(ETH_P_IP);
  1584. #if IS_ENABLED(CONFIG_IPV6)
  1585. } else {
  1586. querier = &brmctx->ip6_querier;
  1587. other_query = &brmctx->ip6_other_query;
  1588. br_group.proto = htons(ETH_P_IPV6);
  1589. #endif
  1590. }
  1591. if (!other_query || timer_pending(&other_query->timer))
  1592. return;
  1593. /* we're about to select ourselves as querier */
  1594. if (!pmctx && querier->port_ifidx) {
  1595. struct br_ip zeroip = {};
  1596. br_multicast_update_querier(brmctx, querier, 0, &zeroip);
  1597. }
  1598. __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &br_group, false,
  1599. 0, NULL);
  1600. time = jiffies;
  1601. time += own_query->startup_sent < brmctx->multicast_startup_query_count ?
  1602. brmctx->multicast_startup_query_interval :
  1603. brmctx->multicast_query_interval;
  1604. mod_timer(&own_query->timer, time);
  1605. }
  1606. static void
  1607. br_multicast_port_query_expired(struct net_bridge_mcast_port *pmctx,
  1608. struct bridge_mcast_own_query *query)
  1609. {
  1610. struct net_bridge *br = pmctx->port->br;
  1611. struct net_bridge_mcast *brmctx;
  1612. spin_lock(&br->multicast_lock);
  1613. if (br_multicast_port_ctx_state_stopped(pmctx))
  1614. goto out;
  1615. brmctx = br_multicast_port_ctx_get_global(pmctx);
  1616. if (query->startup_sent < brmctx->multicast_startup_query_count)
  1617. query->startup_sent++;
  1618. br_multicast_send_query(brmctx, pmctx, query);
  1619. out:
  1620. spin_unlock(&br->multicast_lock);
  1621. }
  1622. static void br_ip4_multicast_port_query_expired(struct timer_list *t)
  1623. {
  1624. struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
  1625. ip4_own_query.timer);
  1626. br_multicast_port_query_expired(pmctx, &pmctx->ip4_own_query);
  1627. }
  1628. #if IS_ENABLED(CONFIG_IPV6)
  1629. static void br_ip6_multicast_port_query_expired(struct timer_list *t)
  1630. {
  1631. struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
  1632. ip6_own_query.timer);
  1633. br_multicast_port_query_expired(pmctx, &pmctx->ip6_own_query);
  1634. }
  1635. #endif
  1636. static void br_multicast_port_group_rexmit(struct timer_list *t)
  1637. {
  1638. struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer);
  1639. struct bridge_mcast_other_query *other_query = NULL;
  1640. struct net_bridge *br = pg->key.port->br;
  1641. struct net_bridge_mcast_port *pmctx;
  1642. struct net_bridge_mcast *brmctx;
  1643. bool need_rexmit = false;
  1644. spin_lock(&br->multicast_lock);
  1645. if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
  1646. !br_opt_get(br, BROPT_MULTICAST_ENABLED))
  1647. goto out;
  1648. pmctx = br_multicast_pg_to_port_ctx(pg);
  1649. if (!pmctx)
  1650. goto out;
  1651. brmctx = br_multicast_port_ctx_get_global(pmctx);
  1652. if (!brmctx->multicast_querier)
  1653. goto out;
  1654. if (pg->key.addr.proto == htons(ETH_P_IP))
  1655. other_query = &brmctx->ip4_other_query;
  1656. #if IS_ENABLED(CONFIG_IPV6)
  1657. else
  1658. other_query = &brmctx->ip6_other_query;
  1659. #endif
  1660. if (!other_query || timer_pending(&other_query->timer))
  1661. goto out;
  1662. if (pg->grp_query_rexmit_cnt) {
  1663. pg->grp_query_rexmit_cnt--;
  1664. __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
  1665. &pg->key.addr, false, 1, NULL);
  1666. }
  1667. __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
  1668. &pg->key.addr, true, 0, &need_rexmit);
  1669. if (pg->grp_query_rexmit_cnt || need_rexmit)
  1670. mod_timer(&pg->rexmit_timer, jiffies +
  1671. brmctx->multicast_last_member_interval);
  1672. out:
  1673. spin_unlock(&br->multicast_lock);
  1674. }
  1675. static int br_mc_disabled_update(struct net_device *dev, bool value,
  1676. struct netlink_ext_ack *extack)
  1677. {
  1678. struct switchdev_attr attr = {
  1679. .orig_dev = dev,
  1680. .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
  1681. .flags = SWITCHDEV_F_DEFER,
  1682. .u.mc_disabled = !value,
  1683. };
  1684. return switchdev_port_attr_set(dev, &attr, extack);
  1685. }
  1686. void br_multicast_port_ctx_init(struct net_bridge_port *port,
  1687. struct net_bridge_vlan *vlan,
  1688. struct net_bridge_mcast_port *pmctx)
  1689. {
  1690. pmctx->port = port;
  1691. pmctx->vlan = vlan;
  1692. pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
  1693. timer_setup(&pmctx->ip4_mc_router_timer,
  1694. br_ip4_multicast_router_expired, 0);
  1695. timer_setup(&pmctx->ip4_own_query.timer,
  1696. br_ip4_multicast_port_query_expired, 0);
  1697. #if IS_ENABLED(CONFIG_IPV6)
  1698. timer_setup(&pmctx->ip6_mc_router_timer,
  1699. br_ip6_multicast_router_expired, 0);
  1700. timer_setup(&pmctx->ip6_own_query.timer,
  1701. br_ip6_multicast_port_query_expired, 0);
  1702. #endif
  1703. }
  1704. void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx)
  1705. {
  1706. #if IS_ENABLED(CONFIG_IPV6)
  1707. del_timer_sync(&pmctx->ip6_mc_router_timer);
  1708. #endif
  1709. del_timer_sync(&pmctx->ip4_mc_router_timer);
  1710. }
  1711. int br_multicast_add_port(struct net_bridge_port *port)
  1712. {
  1713. int err;
  1714. port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT;
  1715. br_multicast_port_ctx_init(port, NULL, &port->multicast_ctx);
  1716. err = br_mc_disabled_update(port->dev,
  1717. br_opt_get(port->br,
  1718. BROPT_MULTICAST_ENABLED),
  1719. NULL);
  1720. if (err && err != -EOPNOTSUPP)
  1721. return err;
  1722. port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
  1723. if (!port->mcast_stats)
  1724. return -ENOMEM;
  1725. return 0;
  1726. }
  1727. void br_multicast_del_port(struct net_bridge_port *port)
  1728. {
  1729. struct net_bridge *br = port->br;
  1730. struct net_bridge_port_group *pg;
  1731. struct hlist_node *n;
  1732. /* Take care of the remaining groups, only perm ones should be left */
  1733. spin_lock_bh(&br->multicast_lock);
  1734. hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
  1735. br_multicast_find_del_pg(br, pg);
  1736. spin_unlock_bh(&br->multicast_lock);
  1737. flush_work(&br->mcast_gc_work);
  1738. br_multicast_port_ctx_deinit(&port->multicast_ctx);
  1739. free_percpu(port->mcast_stats);
  1740. }
  1741. static void br_multicast_enable(struct bridge_mcast_own_query *query)
  1742. {
  1743. query->startup_sent = 0;
  1744. if (try_to_del_timer_sync(&query->timer) >= 0 ||
  1745. del_timer(&query->timer))
  1746. mod_timer(&query->timer, jiffies);
  1747. }
  1748. static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx)
  1749. {
  1750. struct net_bridge *br = pmctx->port->br;
  1751. struct net_bridge_mcast *brmctx;
  1752. brmctx = br_multicast_port_ctx_get_global(pmctx);
  1753. if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
  1754. !netif_running(br->dev))
  1755. return;
  1756. br_multicast_enable(&pmctx->ip4_own_query);
  1757. #if IS_ENABLED(CONFIG_IPV6)
  1758. br_multicast_enable(&pmctx->ip6_own_query);
  1759. #endif
  1760. if (pmctx->multicast_router == MDB_RTR_TYPE_PERM) {
  1761. br_ip4_multicast_add_router(brmctx, pmctx);
  1762. br_ip6_multicast_add_router(brmctx, pmctx);
  1763. }
  1764. if (br_multicast_port_ctx_is_vlan(pmctx)) {
  1765. struct net_bridge_port_group *pg;
  1766. u32 n = 0;
  1767. /* The mcast_n_groups counter might be wrong. First,
  1768. * BR_VLFLAG_MCAST_ENABLED is toggled before temporary entries
  1769. * are flushed, thus mcast_n_groups after the toggle does not
  1770. * reflect the true values. And second, permanent entries added
  1771. * while BR_VLFLAG_MCAST_ENABLED was disabled, are not reflected
  1772. * either. Thus we have to refresh the counter.
  1773. */
  1774. hlist_for_each_entry(pg, &pmctx->port->mglist, mglist) {
  1775. if (pg->key.addr.vid == pmctx->vlan->vid)
  1776. n++;
  1777. }
  1778. WRITE_ONCE(pmctx->mdb_n_entries, n);
  1779. }
  1780. }
  1781. void br_multicast_enable_port(struct net_bridge_port *port)
  1782. {
  1783. struct net_bridge *br = port->br;
  1784. spin_lock_bh(&br->multicast_lock);
  1785. __br_multicast_enable_port_ctx(&port->multicast_ctx);
  1786. spin_unlock_bh(&br->multicast_lock);
  1787. }
  1788. static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx)
  1789. {
  1790. struct net_bridge_port_group *pg;
  1791. struct hlist_node *n;
  1792. bool del = false;
  1793. hlist_for_each_entry_safe(pg, n, &pmctx->port->mglist, mglist)
  1794. if (!(pg->flags & MDB_PG_FLAGS_PERMANENT) &&
  1795. (!br_multicast_port_ctx_is_vlan(pmctx) ||
  1796. pg->key.addr.vid == pmctx->vlan->vid))
  1797. br_multicast_find_del_pg(pmctx->port->br, pg);
  1798. del |= br_ip4_multicast_rport_del(pmctx);
  1799. del_timer(&pmctx->ip4_mc_router_timer);
  1800. del_timer(&pmctx->ip4_own_query.timer);
  1801. del |= br_ip6_multicast_rport_del(pmctx);
  1802. #if IS_ENABLED(CONFIG_IPV6)
  1803. del_timer(&pmctx->ip6_mc_router_timer);
  1804. del_timer(&pmctx->ip6_own_query.timer);
  1805. #endif
  1806. br_multicast_rport_del_notify(pmctx, del);
  1807. }
  1808. void br_multicast_disable_port(struct net_bridge_port *port)
  1809. {
  1810. spin_lock_bh(&port->br->multicast_lock);
  1811. __br_multicast_disable_port_ctx(&port->multicast_ctx);
  1812. spin_unlock_bh(&port->br->multicast_lock);
  1813. }
  1814. static int __grp_src_delete_marked(struct net_bridge_port_group *pg)
  1815. {
  1816. struct net_bridge_group_src *ent;
  1817. struct hlist_node *tmp;
  1818. int deleted = 0;
  1819. hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
  1820. if (ent->flags & BR_SGRP_F_DELETE) {
  1821. br_multicast_del_group_src(ent, false);
  1822. deleted++;
  1823. }
  1824. return deleted;
  1825. }
  1826. static void __grp_src_mod_timer(struct net_bridge_group_src *src,
  1827. unsigned long expires)
  1828. {
  1829. mod_timer(&src->timer, expires);
  1830. br_multicast_fwd_src_handle(src);
  1831. }
  1832. static void __grp_src_query_marked_and_rexmit(struct net_bridge_mcast *brmctx,
  1833. struct net_bridge_mcast_port *pmctx,
  1834. struct net_bridge_port_group *pg)
  1835. {
  1836. struct bridge_mcast_other_query *other_query = NULL;
  1837. u32 lmqc = brmctx->multicast_last_member_count;
  1838. unsigned long lmqt, lmi, now = jiffies;
  1839. struct net_bridge_group_src *ent;
  1840. if (!netif_running(brmctx->br->dev) ||
  1841. !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
  1842. return;
  1843. if (pg->key.addr.proto == htons(ETH_P_IP))
  1844. other_query = &brmctx->ip4_other_query;
  1845. #if IS_ENABLED(CONFIG_IPV6)
  1846. else
  1847. other_query = &brmctx->ip6_other_query;
  1848. #endif
  1849. lmqt = now + br_multicast_lmqt(brmctx);
  1850. hlist_for_each_entry(ent, &pg->src_list, node) {
  1851. if (ent->flags & BR_SGRP_F_SEND) {
  1852. ent->flags &= ~BR_SGRP_F_SEND;
  1853. if (ent->timer.expires > lmqt) {
  1854. if (brmctx->multicast_querier &&
  1855. other_query &&
  1856. !timer_pending(&other_query->timer))
  1857. ent->src_query_rexmit_cnt = lmqc;
  1858. __grp_src_mod_timer(ent, lmqt);
  1859. }
  1860. }
  1861. }
  1862. if (!brmctx->multicast_querier ||
  1863. !other_query || timer_pending(&other_query->timer))
  1864. return;
  1865. __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
  1866. &pg->key.addr, true, 1, NULL);
  1867. lmi = now + brmctx->multicast_last_member_interval;
  1868. if (!timer_pending(&pg->rexmit_timer) ||
  1869. time_after(pg->rexmit_timer.expires, lmi))
  1870. mod_timer(&pg->rexmit_timer, lmi);
  1871. }
  1872. static void __grp_send_query_and_rexmit(struct net_bridge_mcast *brmctx,
  1873. struct net_bridge_mcast_port *pmctx,
  1874. struct net_bridge_port_group *pg)
  1875. {
  1876. struct bridge_mcast_other_query *other_query = NULL;
  1877. unsigned long now = jiffies, lmi;
  1878. if (!netif_running(brmctx->br->dev) ||
  1879. !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
  1880. return;
  1881. if (pg->key.addr.proto == htons(ETH_P_IP))
  1882. other_query = &brmctx->ip4_other_query;
  1883. #if IS_ENABLED(CONFIG_IPV6)
  1884. else
  1885. other_query = &brmctx->ip6_other_query;
  1886. #endif
  1887. if (brmctx->multicast_querier &&
  1888. other_query && !timer_pending(&other_query->timer)) {
  1889. lmi = now + brmctx->multicast_last_member_interval;
  1890. pg->grp_query_rexmit_cnt = brmctx->multicast_last_member_count - 1;
  1891. __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
  1892. &pg->key.addr, false, 0, NULL);
  1893. if (!timer_pending(&pg->rexmit_timer) ||
  1894. time_after(pg->rexmit_timer.expires, lmi))
  1895. mod_timer(&pg->rexmit_timer, lmi);
  1896. }
  1897. if (pg->filter_mode == MCAST_EXCLUDE &&
  1898. (!timer_pending(&pg->timer) ||
  1899. time_after(pg->timer.expires, now + br_multicast_lmqt(brmctx))))
  1900. mod_timer(&pg->timer, now + br_multicast_lmqt(brmctx));
  1901. }
  1902. /* State Msg type New state Actions
  1903. * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI
  1904. * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI
  1905. * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI
  1906. */
  1907. static bool br_multicast_isinc_allow(const struct net_bridge_mcast *brmctx,
  1908. struct net_bridge_port_group *pg, void *h_addr,
  1909. void *srcs, u32 nsrcs, size_t addr_size,
  1910. int grec_type)
  1911. {
  1912. struct net_bridge_group_src *ent;
  1913. unsigned long now = jiffies;
  1914. bool changed = false;
  1915. struct br_ip src_ip;
  1916. u32 src_idx;
  1917. memset(&src_ip, 0, sizeof(src_ip));
  1918. src_ip.proto = pg->key.addr.proto;
  1919. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  1920. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  1921. ent = br_multicast_find_group_src(pg, &src_ip);
  1922. if (!ent) {
  1923. ent = br_multicast_new_group_src(pg, &src_ip);
  1924. if (ent)
  1925. changed = true;
  1926. }
  1927. if (ent)
  1928. __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
  1929. }
  1930. if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  1931. grec_type))
  1932. changed = true;
  1933. return changed;
  1934. }
  1935. /* State Msg type New state Actions
  1936. * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
  1937. * Delete (A-B)
  1938. * Group Timer=GMI
  1939. */
  1940. static void __grp_src_isexc_incl(const struct net_bridge_mcast *brmctx,
  1941. struct net_bridge_port_group *pg, void *h_addr,
  1942. void *srcs, u32 nsrcs, size_t addr_size,
  1943. int grec_type)
  1944. {
  1945. struct net_bridge_group_src *ent;
  1946. struct br_ip src_ip;
  1947. u32 src_idx;
  1948. hlist_for_each_entry(ent, &pg->src_list, node)
  1949. ent->flags |= BR_SGRP_F_DELETE;
  1950. memset(&src_ip, 0, sizeof(src_ip));
  1951. src_ip.proto = pg->key.addr.proto;
  1952. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  1953. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  1954. ent = br_multicast_find_group_src(pg, &src_ip);
  1955. if (ent)
  1956. ent->flags &= ~BR_SGRP_F_DELETE;
  1957. else
  1958. ent = br_multicast_new_group_src(pg, &src_ip);
  1959. if (ent)
  1960. br_multicast_fwd_src_handle(ent);
  1961. }
  1962. br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  1963. grec_type);
  1964. __grp_src_delete_marked(pg);
  1965. }
  1966. /* State Msg type New state Actions
  1967. * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI
  1968. * Delete (X-A)
  1969. * Delete (Y-A)
  1970. * Group Timer=GMI
  1971. */
  1972. static bool __grp_src_isexc_excl(const struct net_bridge_mcast *brmctx,
  1973. struct net_bridge_port_group *pg, void *h_addr,
  1974. void *srcs, u32 nsrcs, size_t addr_size,
  1975. int grec_type)
  1976. {
  1977. struct net_bridge_group_src *ent;
  1978. unsigned long now = jiffies;
  1979. bool changed = false;
  1980. struct br_ip src_ip;
  1981. u32 src_idx;
  1982. hlist_for_each_entry(ent, &pg->src_list, node)
  1983. ent->flags |= BR_SGRP_F_DELETE;
  1984. memset(&src_ip, 0, sizeof(src_ip));
  1985. src_ip.proto = pg->key.addr.proto;
  1986. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  1987. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  1988. ent = br_multicast_find_group_src(pg, &src_ip);
  1989. if (ent) {
  1990. ent->flags &= ~BR_SGRP_F_DELETE;
  1991. } else {
  1992. ent = br_multicast_new_group_src(pg, &src_ip);
  1993. if (ent) {
  1994. __grp_src_mod_timer(ent,
  1995. now + br_multicast_gmi(brmctx));
  1996. changed = true;
  1997. }
  1998. }
  1999. }
  2000. if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  2001. grec_type))
  2002. changed = true;
  2003. if (__grp_src_delete_marked(pg))
  2004. changed = true;
  2005. return changed;
  2006. }
  2007. static bool br_multicast_isexc(const struct net_bridge_mcast *brmctx,
  2008. struct net_bridge_port_group *pg, void *h_addr,
  2009. void *srcs, u32 nsrcs, size_t addr_size,
  2010. int grec_type)
  2011. {
  2012. bool changed = false;
  2013. switch (pg->filter_mode) {
  2014. case MCAST_INCLUDE:
  2015. __grp_src_isexc_incl(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  2016. grec_type);
  2017. br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
  2018. changed = true;
  2019. break;
  2020. case MCAST_EXCLUDE:
  2021. changed = __grp_src_isexc_excl(brmctx, pg, h_addr, srcs, nsrcs,
  2022. addr_size, grec_type);
  2023. break;
  2024. }
  2025. pg->filter_mode = MCAST_EXCLUDE;
  2026. mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
  2027. return changed;
  2028. }
  2029. /* State Msg type New state Actions
  2030. * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI
  2031. * Send Q(G,A-B)
  2032. */
  2033. static bool __grp_src_toin_incl(struct net_bridge_mcast *brmctx,
  2034. struct net_bridge_mcast_port *pmctx,
  2035. struct net_bridge_port_group *pg, void *h_addr,
  2036. void *srcs, u32 nsrcs, size_t addr_size,
  2037. int grec_type)
  2038. {
  2039. u32 src_idx, to_send = pg->src_ents;
  2040. struct net_bridge_group_src *ent;
  2041. unsigned long now = jiffies;
  2042. bool changed = false;
  2043. struct br_ip src_ip;
  2044. hlist_for_each_entry(ent, &pg->src_list, node)
  2045. ent->flags |= BR_SGRP_F_SEND;
  2046. memset(&src_ip, 0, sizeof(src_ip));
  2047. src_ip.proto = pg->key.addr.proto;
  2048. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  2049. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  2050. ent = br_multicast_find_group_src(pg, &src_ip);
  2051. if (ent) {
  2052. ent->flags &= ~BR_SGRP_F_SEND;
  2053. to_send--;
  2054. } else {
  2055. ent = br_multicast_new_group_src(pg, &src_ip);
  2056. if (ent)
  2057. changed = true;
  2058. }
  2059. if (ent)
  2060. __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
  2061. }
  2062. if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  2063. grec_type))
  2064. changed = true;
  2065. if (to_send)
  2066. __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
  2067. return changed;
  2068. }
  2069. /* State Msg type New state Actions
  2070. * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
  2071. * Send Q(G,X-A)
  2072. * Send Q(G)
  2073. */
  2074. static bool __grp_src_toin_excl(struct net_bridge_mcast *brmctx,
  2075. struct net_bridge_mcast_port *pmctx,
  2076. struct net_bridge_port_group *pg, void *h_addr,
  2077. void *srcs, u32 nsrcs, size_t addr_size,
  2078. int grec_type)
  2079. {
  2080. u32 src_idx, to_send = pg->src_ents;
  2081. struct net_bridge_group_src *ent;
  2082. unsigned long now = jiffies;
  2083. bool changed = false;
  2084. struct br_ip src_ip;
  2085. hlist_for_each_entry(ent, &pg->src_list, node)
  2086. if (timer_pending(&ent->timer))
  2087. ent->flags |= BR_SGRP_F_SEND;
  2088. memset(&src_ip, 0, sizeof(src_ip));
  2089. src_ip.proto = pg->key.addr.proto;
  2090. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  2091. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  2092. ent = br_multicast_find_group_src(pg, &src_ip);
  2093. if (ent) {
  2094. if (timer_pending(&ent->timer)) {
  2095. ent->flags &= ~BR_SGRP_F_SEND;
  2096. to_send--;
  2097. }
  2098. } else {
  2099. ent = br_multicast_new_group_src(pg, &src_ip);
  2100. if (ent)
  2101. changed = true;
  2102. }
  2103. if (ent)
  2104. __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
  2105. }
  2106. if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  2107. grec_type))
  2108. changed = true;
  2109. if (to_send)
  2110. __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
  2111. __grp_send_query_and_rexmit(brmctx, pmctx, pg);
  2112. return changed;
  2113. }
  2114. static bool br_multicast_toin(struct net_bridge_mcast *brmctx,
  2115. struct net_bridge_mcast_port *pmctx,
  2116. struct net_bridge_port_group *pg, void *h_addr,
  2117. void *srcs, u32 nsrcs, size_t addr_size,
  2118. int grec_type)
  2119. {
  2120. bool changed = false;
  2121. switch (pg->filter_mode) {
  2122. case MCAST_INCLUDE:
  2123. changed = __grp_src_toin_incl(brmctx, pmctx, pg, h_addr, srcs,
  2124. nsrcs, addr_size, grec_type);
  2125. break;
  2126. case MCAST_EXCLUDE:
  2127. changed = __grp_src_toin_excl(brmctx, pmctx, pg, h_addr, srcs,
  2128. nsrcs, addr_size, grec_type);
  2129. break;
  2130. }
  2131. if (br_multicast_eht_should_del_pg(pg)) {
  2132. pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
  2133. br_multicast_find_del_pg(pg->key.port->br, pg);
  2134. /* a notification has already been sent and we shouldn't
  2135. * access pg after the delete so we have to return false
  2136. */
  2137. changed = false;
  2138. }
  2139. return changed;
  2140. }
  2141. /* State Msg type New state Actions
  2142. * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
  2143. * Delete (A-B)
  2144. * Send Q(G,A*B)
  2145. * Group Timer=GMI
  2146. */
  2147. static void __grp_src_toex_incl(struct net_bridge_mcast *brmctx,
  2148. struct net_bridge_mcast_port *pmctx,
  2149. struct net_bridge_port_group *pg, void *h_addr,
  2150. void *srcs, u32 nsrcs, size_t addr_size,
  2151. int grec_type)
  2152. {
  2153. struct net_bridge_group_src *ent;
  2154. u32 src_idx, to_send = 0;
  2155. struct br_ip src_ip;
  2156. hlist_for_each_entry(ent, &pg->src_list, node)
  2157. ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
  2158. memset(&src_ip, 0, sizeof(src_ip));
  2159. src_ip.proto = pg->key.addr.proto;
  2160. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  2161. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  2162. ent = br_multicast_find_group_src(pg, &src_ip);
  2163. if (ent) {
  2164. ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) |
  2165. BR_SGRP_F_SEND;
  2166. to_send++;
  2167. } else {
  2168. ent = br_multicast_new_group_src(pg, &src_ip);
  2169. }
  2170. if (ent)
  2171. br_multicast_fwd_src_handle(ent);
  2172. }
  2173. br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  2174. grec_type);
  2175. __grp_src_delete_marked(pg);
  2176. if (to_send)
  2177. __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
  2178. }
  2179. /* State Msg type New state Actions
  2180. * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer
  2181. * Delete (X-A)
  2182. * Delete (Y-A)
  2183. * Send Q(G,A-Y)
  2184. * Group Timer=GMI
  2185. */
  2186. static bool __grp_src_toex_excl(struct net_bridge_mcast *brmctx,
  2187. struct net_bridge_mcast_port *pmctx,
  2188. struct net_bridge_port_group *pg, void *h_addr,
  2189. void *srcs, u32 nsrcs, size_t addr_size,
  2190. int grec_type)
  2191. {
  2192. struct net_bridge_group_src *ent;
  2193. u32 src_idx, to_send = 0;
  2194. bool changed = false;
  2195. struct br_ip src_ip;
  2196. hlist_for_each_entry(ent, &pg->src_list, node)
  2197. ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
  2198. memset(&src_ip, 0, sizeof(src_ip));
  2199. src_ip.proto = pg->key.addr.proto;
  2200. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  2201. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  2202. ent = br_multicast_find_group_src(pg, &src_ip);
  2203. if (ent) {
  2204. ent->flags &= ~BR_SGRP_F_DELETE;
  2205. } else {
  2206. ent = br_multicast_new_group_src(pg, &src_ip);
  2207. if (ent) {
  2208. __grp_src_mod_timer(ent, pg->timer.expires);
  2209. changed = true;
  2210. }
  2211. }
  2212. if (ent && timer_pending(&ent->timer)) {
  2213. ent->flags |= BR_SGRP_F_SEND;
  2214. to_send++;
  2215. }
  2216. }
  2217. if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  2218. grec_type))
  2219. changed = true;
  2220. if (__grp_src_delete_marked(pg))
  2221. changed = true;
  2222. if (to_send)
  2223. __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
  2224. return changed;
  2225. }
  2226. static bool br_multicast_toex(struct net_bridge_mcast *brmctx,
  2227. struct net_bridge_mcast_port *pmctx,
  2228. struct net_bridge_port_group *pg, void *h_addr,
  2229. void *srcs, u32 nsrcs, size_t addr_size,
  2230. int grec_type)
  2231. {
  2232. bool changed = false;
  2233. switch (pg->filter_mode) {
  2234. case MCAST_INCLUDE:
  2235. __grp_src_toex_incl(brmctx, pmctx, pg, h_addr, srcs, nsrcs,
  2236. addr_size, grec_type);
  2237. br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
  2238. changed = true;
  2239. break;
  2240. case MCAST_EXCLUDE:
  2241. changed = __grp_src_toex_excl(brmctx, pmctx, pg, h_addr, srcs,
  2242. nsrcs, addr_size, grec_type);
  2243. break;
  2244. }
  2245. pg->filter_mode = MCAST_EXCLUDE;
  2246. mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
  2247. return changed;
  2248. }
  2249. /* State Msg type New state Actions
  2250. * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B)
  2251. */
  2252. static bool __grp_src_block_incl(struct net_bridge_mcast *brmctx,
  2253. struct net_bridge_mcast_port *pmctx,
  2254. struct net_bridge_port_group *pg, void *h_addr,
  2255. void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
  2256. {
  2257. struct net_bridge_group_src *ent;
  2258. u32 src_idx, to_send = 0;
  2259. bool changed = false;
  2260. struct br_ip src_ip;
  2261. hlist_for_each_entry(ent, &pg->src_list, node)
  2262. ent->flags &= ~BR_SGRP_F_SEND;
  2263. memset(&src_ip, 0, sizeof(src_ip));
  2264. src_ip.proto = pg->key.addr.proto;
  2265. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  2266. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  2267. ent = br_multicast_find_group_src(pg, &src_ip);
  2268. if (ent) {
  2269. ent->flags |= BR_SGRP_F_SEND;
  2270. to_send++;
  2271. }
  2272. }
  2273. if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  2274. grec_type))
  2275. changed = true;
  2276. if (to_send)
  2277. __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
  2278. return changed;
  2279. }
  2280. /* State Msg type New state Actions
  2281. * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer
  2282. * Send Q(G,A-Y)
  2283. */
  2284. static bool __grp_src_block_excl(struct net_bridge_mcast *brmctx,
  2285. struct net_bridge_mcast_port *pmctx,
  2286. struct net_bridge_port_group *pg, void *h_addr,
  2287. void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
  2288. {
  2289. struct net_bridge_group_src *ent;
  2290. u32 src_idx, to_send = 0;
  2291. bool changed = false;
  2292. struct br_ip src_ip;
  2293. hlist_for_each_entry(ent, &pg->src_list, node)
  2294. ent->flags &= ~BR_SGRP_F_SEND;
  2295. memset(&src_ip, 0, sizeof(src_ip));
  2296. src_ip.proto = pg->key.addr.proto;
  2297. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  2298. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  2299. ent = br_multicast_find_group_src(pg, &src_ip);
  2300. if (!ent) {
  2301. ent = br_multicast_new_group_src(pg, &src_ip);
  2302. if (ent) {
  2303. __grp_src_mod_timer(ent, pg->timer.expires);
  2304. changed = true;
  2305. }
  2306. }
  2307. if (ent && timer_pending(&ent->timer)) {
  2308. ent->flags |= BR_SGRP_F_SEND;
  2309. to_send++;
  2310. }
  2311. }
  2312. if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  2313. grec_type))
  2314. changed = true;
  2315. if (to_send)
  2316. __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
  2317. return changed;
  2318. }
  2319. static bool br_multicast_block(struct net_bridge_mcast *brmctx,
  2320. struct net_bridge_mcast_port *pmctx,
  2321. struct net_bridge_port_group *pg, void *h_addr,
  2322. void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
  2323. {
  2324. bool changed = false;
  2325. switch (pg->filter_mode) {
  2326. case MCAST_INCLUDE:
  2327. changed = __grp_src_block_incl(brmctx, pmctx, pg, h_addr, srcs,
  2328. nsrcs, addr_size, grec_type);
  2329. break;
  2330. case MCAST_EXCLUDE:
  2331. changed = __grp_src_block_excl(brmctx, pmctx, pg, h_addr, srcs,
  2332. nsrcs, addr_size, grec_type);
  2333. break;
  2334. }
  2335. if ((pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list)) ||
  2336. br_multicast_eht_should_del_pg(pg)) {
  2337. if (br_multicast_eht_should_del_pg(pg))
  2338. pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
  2339. br_multicast_find_del_pg(pg->key.port->br, pg);
  2340. /* a notification has already been sent and we shouldn't
  2341. * access pg after the delete so we have to return false
  2342. */
  2343. changed = false;
  2344. }
  2345. return changed;
  2346. }
  2347. static struct net_bridge_port_group *
  2348. br_multicast_find_port(struct net_bridge_mdb_entry *mp,
  2349. struct net_bridge_port *p,
  2350. const unsigned char *src)
  2351. {
  2352. struct net_bridge *br __maybe_unused = mp->br;
  2353. struct net_bridge_port_group *pg;
  2354. for (pg = mlock_dereference(mp->ports, br);
  2355. pg;
  2356. pg = mlock_dereference(pg->next, br))
  2357. if (br_port_group_equal(pg, p, src))
  2358. return pg;
  2359. return NULL;
  2360. }
  2361. static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast *brmctx,
  2362. struct net_bridge_mcast_port *pmctx,
  2363. struct sk_buff *skb,
  2364. u16 vid)
  2365. {
  2366. bool igmpv2 = brmctx->multicast_igmp_version == 2;
  2367. struct net_bridge_mdb_entry *mdst;
  2368. struct net_bridge_port_group *pg;
  2369. const unsigned char *src;
  2370. struct igmpv3_report *ih;
  2371. struct igmpv3_grec *grec;
  2372. int i, len, num, type;
  2373. __be32 group, *h_addr;
  2374. bool changed = false;
  2375. int err = 0;
  2376. u16 nsrcs;
  2377. ih = igmpv3_report_hdr(skb);
  2378. num = ntohs(ih->ngrec);
  2379. len = skb_transport_offset(skb) + sizeof(*ih);
  2380. for (i = 0; i < num; i++) {
  2381. len += sizeof(*grec);
  2382. if (!ip_mc_may_pull(skb, len))
  2383. return -EINVAL;
  2384. grec = (void *)(skb->data + len - sizeof(*grec));
  2385. group = grec->grec_mca;
  2386. type = grec->grec_type;
  2387. nsrcs = ntohs(grec->grec_nsrcs);
  2388. len += nsrcs * 4;
  2389. if (!ip_mc_may_pull(skb, len))
  2390. return -EINVAL;
  2391. switch (type) {
  2392. case IGMPV3_MODE_IS_INCLUDE:
  2393. case IGMPV3_MODE_IS_EXCLUDE:
  2394. case IGMPV3_CHANGE_TO_INCLUDE:
  2395. case IGMPV3_CHANGE_TO_EXCLUDE:
  2396. case IGMPV3_ALLOW_NEW_SOURCES:
  2397. case IGMPV3_BLOCK_OLD_SOURCES:
  2398. break;
  2399. default:
  2400. continue;
  2401. }
  2402. src = eth_hdr(skb)->h_source;
  2403. if (nsrcs == 0 &&
  2404. (type == IGMPV3_CHANGE_TO_INCLUDE ||
  2405. type == IGMPV3_MODE_IS_INCLUDE)) {
  2406. if (!pmctx || igmpv2) {
  2407. br_ip4_multicast_leave_group(brmctx, pmctx,
  2408. group, vid, src);
  2409. continue;
  2410. }
  2411. } else {
  2412. err = br_ip4_multicast_add_group(brmctx, pmctx, group,
  2413. vid, src, igmpv2);
  2414. if (err)
  2415. break;
  2416. }
  2417. if (!pmctx || igmpv2)
  2418. continue;
  2419. spin_lock(&brmctx->br->multicast_lock);
  2420. if (!br_multicast_ctx_should_use(brmctx, pmctx))
  2421. goto unlock_continue;
  2422. mdst = br_mdb_ip4_get(brmctx->br, group, vid);
  2423. if (!mdst)
  2424. goto unlock_continue;
  2425. pg = br_multicast_find_port(mdst, pmctx->port, src);
  2426. if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
  2427. goto unlock_continue;
  2428. /* reload grec and host addr */
  2429. grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4));
  2430. h_addr = &ip_hdr(skb)->saddr;
  2431. switch (type) {
  2432. case IGMPV3_ALLOW_NEW_SOURCES:
  2433. changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
  2434. grec->grec_src,
  2435. nsrcs, sizeof(__be32), type);
  2436. break;
  2437. case IGMPV3_MODE_IS_INCLUDE:
  2438. changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
  2439. grec->grec_src,
  2440. nsrcs, sizeof(__be32), type);
  2441. break;
  2442. case IGMPV3_MODE_IS_EXCLUDE:
  2443. changed = br_multicast_isexc(brmctx, pg, h_addr,
  2444. grec->grec_src,
  2445. nsrcs, sizeof(__be32), type);
  2446. break;
  2447. case IGMPV3_CHANGE_TO_INCLUDE:
  2448. changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
  2449. grec->grec_src,
  2450. nsrcs, sizeof(__be32), type);
  2451. break;
  2452. case IGMPV3_CHANGE_TO_EXCLUDE:
  2453. changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
  2454. grec->grec_src,
  2455. nsrcs, sizeof(__be32), type);
  2456. break;
  2457. case IGMPV3_BLOCK_OLD_SOURCES:
  2458. changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
  2459. grec->grec_src,
  2460. nsrcs, sizeof(__be32), type);
  2461. break;
  2462. }
  2463. if (changed)
  2464. br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
  2465. unlock_continue:
  2466. spin_unlock(&brmctx->br->multicast_lock);
  2467. }
  2468. return err;
  2469. }
  2470. #if IS_ENABLED(CONFIG_IPV6)
  2471. static int br_ip6_multicast_mld2_report(struct net_bridge_mcast *brmctx,
  2472. struct net_bridge_mcast_port *pmctx,
  2473. struct sk_buff *skb,
  2474. u16 vid)
  2475. {
  2476. bool mldv1 = brmctx->multicast_mld_version == 1;
  2477. struct net_bridge_mdb_entry *mdst;
  2478. struct net_bridge_port_group *pg;
  2479. unsigned int nsrcs_offset;
  2480. struct mld2_report *mld2r;
  2481. const unsigned char *src;
  2482. struct in6_addr *h_addr;
  2483. struct mld2_grec *grec;
  2484. unsigned int grec_len;
  2485. bool changed = false;
  2486. int i, len, num;
  2487. int err = 0;
  2488. if (!ipv6_mc_may_pull(skb, sizeof(*mld2r)))
  2489. return -EINVAL;
  2490. mld2r = (struct mld2_report *)icmp6_hdr(skb);
  2491. num = ntohs(mld2r->mld2r_ngrec);
  2492. len = skb_transport_offset(skb) + sizeof(*mld2r);
  2493. for (i = 0; i < num; i++) {
  2494. __be16 *_nsrcs, __nsrcs;
  2495. u16 nsrcs;
  2496. nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
  2497. if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
  2498. nsrcs_offset + sizeof(__nsrcs))
  2499. return -EINVAL;
  2500. _nsrcs = skb_header_pointer(skb, nsrcs_offset,
  2501. sizeof(__nsrcs), &__nsrcs);
  2502. if (!_nsrcs)
  2503. return -EINVAL;
  2504. nsrcs = ntohs(*_nsrcs);
  2505. grec_len = struct_size(grec, grec_src, nsrcs);
  2506. if (!ipv6_mc_may_pull(skb, len + grec_len))
  2507. return -EINVAL;
  2508. grec = (struct mld2_grec *)(skb->data + len);
  2509. len += grec_len;
  2510. switch (grec->grec_type) {
  2511. case MLD2_MODE_IS_INCLUDE:
  2512. case MLD2_MODE_IS_EXCLUDE:
  2513. case MLD2_CHANGE_TO_INCLUDE:
  2514. case MLD2_CHANGE_TO_EXCLUDE:
  2515. case MLD2_ALLOW_NEW_SOURCES:
  2516. case MLD2_BLOCK_OLD_SOURCES:
  2517. break;
  2518. default:
  2519. continue;
  2520. }
  2521. src = eth_hdr(skb)->h_source;
  2522. if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
  2523. grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
  2524. nsrcs == 0) {
  2525. if (!pmctx || mldv1) {
  2526. br_ip6_multicast_leave_group(brmctx, pmctx,
  2527. &grec->grec_mca,
  2528. vid, src);
  2529. continue;
  2530. }
  2531. } else {
  2532. err = br_ip6_multicast_add_group(brmctx, pmctx,
  2533. &grec->grec_mca, vid,
  2534. src, mldv1);
  2535. if (err)
  2536. break;
  2537. }
  2538. if (!pmctx || mldv1)
  2539. continue;
  2540. spin_lock(&brmctx->br->multicast_lock);
  2541. if (!br_multicast_ctx_should_use(brmctx, pmctx))
  2542. goto unlock_continue;
  2543. mdst = br_mdb_ip6_get(brmctx->br, &grec->grec_mca, vid);
  2544. if (!mdst)
  2545. goto unlock_continue;
  2546. pg = br_multicast_find_port(mdst, pmctx->port, src);
  2547. if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
  2548. goto unlock_continue;
  2549. h_addr = &ipv6_hdr(skb)->saddr;
  2550. switch (grec->grec_type) {
  2551. case MLD2_ALLOW_NEW_SOURCES:
  2552. changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
  2553. grec->grec_src, nsrcs,
  2554. sizeof(struct in6_addr),
  2555. grec->grec_type);
  2556. break;
  2557. case MLD2_MODE_IS_INCLUDE:
  2558. changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
  2559. grec->grec_src, nsrcs,
  2560. sizeof(struct in6_addr),
  2561. grec->grec_type);
  2562. break;
  2563. case MLD2_MODE_IS_EXCLUDE:
  2564. changed = br_multicast_isexc(brmctx, pg, h_addr,
  2565. grec->grec_src, nsrcs,
  2566. sizeof(struct in6_addr),
  2567. grec->grec_type);
  2568. break;
  2569. case MLD2_CHANGE_TO_INCLUDE:
  2570. changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
  2571. grec->grec_src, nsrcs,
  2572. sizeof(struct in6_addr),
  2573. grec->grec_type);
  2574. break;
  2575. case MLD2_CHANGE_TO_EXCLUDE:
  2576. changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
  2577. grec->grec_src, nsrcs,
  2578. sizeof(struct in6_addr),
  2579. grec->grec_type);
  2580. break;
  2581. case MLD2_BLOCK_OLD_SOURCES:
  2582. changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
  2583. grec->grec_src, nsrcs,
  2584. sizeof(struct in6_addr),
  2585. grec->grec_type);
  2586. break;
  2587. }
  2588. if (changed)
  2589. br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
  2590. unlock_continue:
  2591. spin_unlock(&brmctx->br->multicast_lock);
  2592. }
  2593. return err;
  2594. }
  2595. #endif
  2596. static bool br_multicast_select_querier(struct net_bridge_mcast *brmctx,
  2597. struct net_bridge_mcast_port *pmctx,
  2598. struct br_ip *saddr)
  2599. {
  2600. int port_ifidx = pmctx ? pmctx->port->dev->ifindex : 0;
  2601. struct timer_list *own_timer, *other_timer;
  2602. struct bridge_mcast_querier *querier;
  2603. switch (saddr->proto) {
  2604. case htons(ETH_P_IP):
  2605. querier = &brmctx->ip4_querier;
  2606. own_timer = &brmctx->ip4_own_query.timer;
  2607. other_timer = &brmctx->ip4_other_query.timer;
  2608. if (!querier->addr.src.ip4 ||
  2609. ntohl(saddr->src.ip4) <= ntohl(querier->addr.src.ip4))
  2610. goto update;
  2611. break;
  2612. #if IS_ENABLED(CONFIG_IPV6)
  2613. case htons(ETH_P_IPV6):
  2614. querier = &brmctx->ip6_querier;
  2615. own_timer = &brmctx->ip6_own_query.timer;
  2616. other_timer = &brmctx->ip6_other_query.timer;
  2617. if (ipv6_addr_cmp(&saddr->src.ip6, &querier->addr.src.ip6) <= 0)
  2618. goto update;
  2619. break;
  2620. #endif
  2621. default:
  2622. return false;
  2623. }
  2624. if (!timer_pending(own_timer) && !timer_pending(other_timer))
  2625. goto update;
  2626. return false;
  2627. update:
  2628. br_multicast_update_querier(brmctx, querier, port_ifidx, saddr);
  2629. return true;
  2630. }
  2631. static struct net_bridge_port *
  2632. __br_multicast_get_querier_port(struct net_bridge *br,
  2633. const struct bridge_mcast_querier *querier)
  2634. {
  2635. int port_ifidx = READ_ONCE(querier->port_ifidx);
  2636. struct net_bridge_port *p;
  2637. struct net_device *dev;
  2638. if (port_ifidx == 0)
  2639. return NULL;
  2640. dev = dev_get_by_index_rcu(dev_net(br->dev), port_ifidx);
  2641. if (!dev)
  2642. return NULL;
  2643. p = br_port_get_rtnl_rcu(dev);
  2644. if (!p || p->br != br)
  2645. return NULL;
  2646. return p;
  2647. }
  2648. size_t br_multicast_querier_state_size(void)
  2649. {
  2650. return nla_total_size(0) + /* nest attribute */
  2651. nla_total_size(sizeof(__be32)) + /* BRIDGE_QUERIER_IP_ADDRESS */
  2652. nla_total_size(sizeof(int)) + /* BRIDGE_QUERIER_IP_PORT */
  2653. nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IP_OTHER_TIMER */
  2654. #if IS_ENABLED(CONFIG_IPV6)
  2655. nla_total_size(sizeof(struct in6_addr)) + /* BRIDGE_QUERIER_IPV6_ADDRESS */
  2656. nla_total_size(sizeof(int)) + /* BRIDGE_QUERIER_IPV6_PORT */
  2657. nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IPV6_OTHER_TIMER */
  2658. #endif
  2659. 0;
  2660. }
  2661. /* protected by rtnl or rcu */
  2662. int br_multicast_dump_querier_state(struct sk_buff *skb,
  2663. const struct net_bridge_mcast *brmctx,
  2664. int nest_attr)
  2665. {
  2666. struct bridge_mcast_querier querier = {};
  2667. struct net_bridge_port *p;
  2668. struct nlattr *nest;
  2669. if (!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) ||
  2670. br_multicast_ctx_vlan_global_disabled(brmctx))
  2671. return 0;
  2672. nest = nla_nest_start(skb, nest_attr);
  2673. if (!nest)
  2674. return -EMSGSIZE;
  2675. rcu_read_lock();
  2676. if (!brmctx->multicast_querier &&
  2677. !timer_pending(&brmctx->ip4_other_query.timer))
  2678. goto out_v6;
  2679. br_multicast_read_querier(&brmctx->ip4_querier, &querier);
  2680. if (nla_put_in_addr(skb, BRIDGE_QUERIER_IP_ADDRESS,
  2681. querier.addr.src.ip4)) {
  2682. rcu_read_unlock();
  2683. goto out_err;
  2684. }
  2685. p = __br_multicast_get_querier_port(brmctx->br, &querier);
  2686. if (timer_pending(&brmctx->ip4_other_query.timer) &&
  2687. (nla_put_u64_64bit(skb, BRIDGE_QUERIER_IP_OTHER_TIMER,
  2688. br_timer_value(&brmctx->ip4_other_query.timer),
  2689. BRIDGE_QUERIER_PAD) ||
  2690. (p && nla_put_u32(skb, BRIDGE_QUERIER_IP_PORT, p->dev->ifindex)))) {
  2691. rcu_read_unlock();
  2692. goto out_err;
  2693. }
  2694. out_v6:
  2695. #if IS_ENABLED(CONFIG_IPV6)
  2696. if (!brmctx->multicast_querier &&
  2697. !timer_pending(&brmctx->ip6_other_query.timer))
  2698. goto out;
  2699. br_multicast_read_querier(&brmctx->ip6_querier, &querier);
  2700. if (nla_put_in6_addr(skb, BRIDGE_QUERIER_IPV6_ADDRESS,
  2701. &querier.addr.src.ip6)) {
  2702. rcu_read_unlock();
  2703. goto out_err;
  2704. }
  2705. p = __br_multicast_get_querier_port(brmctx->br, &querier);
  2706. if (timer_pending(&brmctx->ip6_other_query.timer) &&
  2707. (nla_put_u64_64bit(skb, BRIDGE_QUERIER_IPV6_OTHER_TIMER,
  2708. br_timer_value(&brmctx->ip6_other_query.timer),
  2709. BRIDGE_QUERIER_PAD) ||
  2710. (p && nla_put_u32(skb, BRIDGE_QUERIER_IPV6_PORT,
  2711. p->dev->ifindex)))) {
  2712. rcu_read_unlock();
  2713. goto out_err;
  2714. }
  2715. out:
  2716. #endif
  2717. rcu_read_unlock();
  2718. nla_nest_end(skb, nest);
  2719. if (!nla_len(nest))
  2720. nla_nest_cancel(skb, nest);
  2721. return 0;
  2722. out_err:
  2723. nla_nest_cancel(skb, nest);
  2724. return -EMSGSIZE;
  2725. }
  2726. static void
  2727. br_multicast_update_query_timer(struct net_bridge_mcast *brmctx,
  2728. struct bridge_mcast_other_query *query,
  2729. unsigned long max_delay)
  2730. {
  2731. if (!timer_pending(&query->timer))
  2732. mod_timer(&query->delay_timer, jiffies + max_delay);
  2733. mod_timer(&query->timer, jiffies + brmctx->multicast_querier_interval);
  2734. }
  2735. static void br_port_mc_router_state_change(struct net_bridge_port *p,
  2736. bool is_mc_router)
  2737. {
  2738. struct switchdev_attr attr = {
  2739. .orig_dev = p->dev,
  2740. .id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
  2741. .flags = SWITCHDEV_F_DEFER,
  2742. .u.mrouter = is_mc_router,
  2743. };
  2744. switchdev_port_attr_set(p->dev, &attr, NULL);
  2745. }
  2746. static struct net_bridge_port *
  2747. br_multicast_rport_from_node(struct net_bridge_mcast *brmctx,
  2748. struct hlist_head *mc_router_list,
  2749. struct hlist_node *rlist)
  2750. {
  2751. struct net_bridge_mcast_port *pmctx;
  2752. #if IS_ENABLED(CONFIG_IPV6)
  2753. if (mc_router_list == &brmctx->ip6_mc_router_list)
  2754. pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
  2755. ip6_rlist);
  2756. else
  2757. #endif
  2758. pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
  2759. ip4_rlist);
  2760. return pmctx->port;
  2761. }
  2762. static struct hlist_node *
  2763. br_multicast_get_rport_slot(struct net_bridge_mcast *brmctx,
  2764. struct net_bridge_port *port,
  2765. struct hlist_head *mc_router_list)
  2766. {
  2767. struct hlist_node *slot = NULL;
  2768. struct net_bridge_port *p;
  2769. struct hlist_node *rlist;
  2770. hlist_for_each(rlist, mc_router_list) {
  2771. p = br_multicast_rport_from_node(brmctx, mc_router_list, rlist);
  2772. if ((unsigned long)port >= (unsigned long)p)
  2773. break;
  2774. slot = rlist;
  2775. }
  2776. return slot;
  2777. }
  2778. static bool br_multicast_no_router_otherpf(struct net_bridge_mcast_port *pmctx,
  2779. struct hlist_node *rnode)
  2780. {
  2781. #if IS_ENABLED(CONFIG_IPV6)
  2782. if (rnode != &pmctx->ip6_rlist)
  2783. return hlist_unhashed(&pmctx->ip6_rlist);
  2784. else
  2785. return hlist_unhashed(&pmctx->ip4_rlist);
  2786. #else
  2787. return true;
  2788. #endif
  2789. }
  2790. /* Add port to router_list
  2791. * list is maintained ordered by pointer value
  2792. * and locked by br->multicast_lock and RCU
  2793. */
  2794. static void br_multicast_add_router(struct net_bridge_mcast *brmctx,
  2795. struct net_bridge_mcast_port *pmctx,
  2796. struct hlist_node *rlist,
  2797. struct hlist_head *mc_router_list)
  2798. {
  2799. struct hlist_node *slot;
  2800. if (!hlist_unhashed(rlist))
  2801. return;
  2802. slot = br_multicast_get_rport_slot(brmctx, pmctx->port, mc_router_list);
  2803. if (slot)
  2804. hlist_add_behind_rcu(rlist, slot);
  2805. else
  2806. hlist_add_head_rcu(rlist, mc_router_list);
  2807. /* For backwards compatibility for now, only notify if we
  2808. * switched from no IPv4/IPv6 multicast router to a new
  2809. * IPv4 or IPv6 multicast router.
  2810. */
  2811. if (br_multicast_no_router_otherpf(pmctx, rlist)) {
  2812. br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_NEWMDB);
  2813. br_port_mc_router_state_change(pmctx->port, true);
  2814. }
  2815. }
  2816. /* Add port to router_list
  2817. * list is maintained ordered by pointer value
  2818. * and locked by br->multicast_lock and RCU
  2819. */
  2820. static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
  2821. struct net_bridge_mcast_port *pmctx)
  2822. {
  2823. br_multicast_add_router(brmctx, pmctx, &pmctx->ip4_rlist,
  2824. &brmctx->ip4_mc_router_list);
  2825. }
  2826. /* Add port to router_list
  2827. * list is maintained ordered by pointer value
  2828. * and locked by br->multicast_lock and RCU
  2829. */
  2830. static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
  2831. struct net_bridge_mcast_port *pmctx)
  2832. {
  2833. #if IS_ENABLED(CONFIG_IPV6)
  2834. br_multicast_add_router(brmctx, pmctx, &pmctx->ip6_rlist,
  2835. &brmctx->ip6_mc_router_list);
  2836. #endif
  2837. }
  2838. static void br_multicast_mark_router(struct net_bridge_mcast *brmctx,
  2839. struct net_bridge_mcast_port *pmctx,
  2840. struct timer_list *timer,
  2841. struct hlist_node *rlist,
  2842. struct hlist_head *mc_router_list)
  2843. {
  2844. unsigned long now = jiffies;
  2845. if (!br_multicast_ctx_should_use(brmctx, pmctx))
  2846. return;
  2847. if (!pmctx) {
  2848. if (brmctx->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
  2849. if (!br_ip4_multicast_is_router(brmctx) &&
  2850. !br_ip6_multicast_is_router(brmctx))
  2851. br_mc_router_state_change(brmctx->br, true);
  2852. mod_timer(timer, now + brmctx->multicast_querier_interval);
  2853. }
  2854. return;
  2855. }
  2856. if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
  2857. pmctx->multicast_router == MDB_RTR_TYPE_PERM)
  2858. return;
  2859. br_multicast_add_router(brmctx, pmctx, rlist, mc_router_list);
  2860. mod_timer(timer, now + brmctx->multicast_querier_interval);
  2861. }
  2862. static void br_ip4_multicast_mark_router(struct net_bridge_mcast *brmctx,
  2863. struct net_bridge_mcast_port *pmctx)
  2864. {
  2865. struct timer_list *timer = &brmctx->ip4_mc_router_timer;
  2866. struct hlist_node *rlist = NULL;
  2867. if (pmctx) {
  2868. timer = &pmctx->ip4_mc_router_timer;
  2869. rlist = &pmctx->ip4_rlist;
  2870. }
  2871. br_multicast_mark_router(brmctx, pmctx, timer, rlist,
  2872. &brmctx->ip4_mc_router_list);
  2873. }
  2874. static void br_ip6_multicast_mark_router(struct net_bridge_mcast *brmctx,
  2875. struct net_bridge_mcast_port *pmctx)
  2876. {
  2877. #if IS_ENABLED(CONFIG_IPV6)
  2878. struct timer_list *timer = &brmctx->ip6_mc_router_timer;
  2879. struct hlist_node *rlist = NULL;
  2880. if (pmctx) {
  2881. timer = &pmctx->ip6_mc_router_timer;
  2882. rlist = &pmctx->ip6_rlist;
  2883. }
  2884. br_multicast_mark_router(brmctx, pmctx, timer, rlist,
  2885. &brmctx->ip6_mc_router_list);
  2886. #endif
  2887. }
  2888. static void
  2889. br_ip4_multicast_query_received(struct net_bridge_mcast *brmctx,
  2890. struct net_bridge_mcast_port *pmctx,
  2891. struct bridge_mcast_other_query *query,
  2892. struct br_ip *saddr,
  2893. unsigned long max_delay)
  2894. {
  2895. if (!br_multicast_select_querier(brmctx, pmctx, saddr))
  2896. return;
  2897. br_multicast_update_query_timer(brmctx, query, max_delay);
  2898. br_ip4_multicast_mark_router(brmctx, pmctx);
  2899. }
  2900. #if IS_ENABLED(CONFIG_IPV6)
  2901. static void
  2902. br_ip6_multicast_query_received(struct net_bridge_mcast *brmctx,
  2903. struct net_bridge_mcast_port *pmctx,
  2904. struct bridge_mcast_other_query *query,
  2905. struct br_ip *saddr,
  2906. unsigned long max_delay)
  2907. {
  2908. if (!br_multicast_select_querier(brmctx, pmctx, saddr))
  2909. return;
  2910. br_multicast_update_query_timer(brmctx, query, max_delay);
  2911. br_ip6_multicast_mark_router(brmctx, pmctx);
  2912. }
  2913. #endif
  2914. static void br_ip4_multicast_query(struct net_bridge_mcast *brmctx,
  2915. struct net_bridge_mcast_port *pmctx,
  2916. struct sk_buff *skb,
  2917. u16 vid)
  2918. {
  2919. unsigned int transport_len = ip_transport_len(skb);
  2920. const struct iphdr *iph = ip_hdr(skb);
  2921. struct igmphdr *ih = igmp_hdr(skb);
  2922. struct net_bridge_mdb_entry *mp;
  2923. struct igmpv3_query *ih3;
  2924. struct net_bridge_port_group *p;
  2925. struct net_bridge_port_group __rcu **pp;
  2926. struct br_ip saddr = {};
  2927. unsigned long max_delay;
  2928. unsigned long now = jiffies;
  2929. __be32 group;
  2930. spin_lock(&brmctx->br->multicast_lock);
  2931. if (!br_multicast_ctx_should_use(brmctx, pmctx))
  2932. goto out;
  2933. group = ih->group;
  2934. if (transport_len == sizeof(*ih)) {
  2935. max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
  2936. if (!max_delay) {
  2937. max_delay = 10 * HZ;
  2938. group = 0;
  2939. }
  2940. } else if (transport_len >= sizeof(*ih3)) {
  2941. ih3 = igmpv3_query_hdr(skb);
  2942. if (ih3->nsrcs ||
  2943. (brmctx->multicast_igmp_version == 3 && group &&
  2944. ih3->suppress))
  2945. goto out;
  2946. max_delay = ih3->code ?
  2947. IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
  2948. } else {
  2949. goto out;
  2950. }
  2951. if (!group) {
  2952. saddr.proto = htons(ETH_P_IP);
  2953. saddr.src.ip4 = iph->saddr;
  2954. br_ip4_multicast_query_received(brmctx, pmctx,
  2955. &brmctx->ip4_other_query,
  2956. &saddr, max_delay);
  2957. goto out;
  2958. }
  2959. mp = br_mdb_ip4_get(brmctx->br, group, vid);
  2960. if (!mp)
  2961. goto out;
  2962. max_delay *= brmctx->multicast_last_member_count;
  2963. if (mp->host_joined &&
  2964. (timer_pending(&mp->timer) ?
  2965. time_after(mp->timer.expires, now + max_delay) :
  2966. try_to_del_timer_sync(&mp->timer) >= 0))
  2967. mod_timer(&mp->timer, now + max_delay);
  2968. for (pp = &mp->ports;
  2969. (p = mlock_dereference(*pp, brmctx->br)) != NULL;
  2970. pp = &p->next) {
  2971. if (timer_pending(&p->timer) ?
  2972. time_after(p->timer.expires, now + max_delay) :
  2973. try_to_del_timer_sync(&p->timer) >= 0 &&
  2974. (brmctx->multicast_igmp_version == 2 ||
  2975. p->filter_mode == MCAST_EXCLUDE))
  2976. mod_timer(&p->timer, now + max_delay);
  2977. }
  2978. out:
  2979. spin_unlock(&brmctx->br->multicast_lock);
  2980. }
  2981. #if IS_ENABLED(CONFIG_IPV6)
  2982. static int br_ip6_multicast_query(struct net_bridge_mcast *brmctx,
  2983. struct net_bridge_mcast_port *pmctx,
  2984. struct sk_buff *skb,
  2985. u16 vid)
  2986. {
  2987. unsigned int transport_len = ipv6_transport_len(skb);
  2988. struct mld_msg *mld;
  2989. struct net_bridge_mdb_entry *mp;
  2990. struct mld2_query *mld2q;
  2991. struct net_bridge_port_group *p;
  2992. struct net_bridge_port_group __rcu **pp;
  2993. struct br_ip saddr = {};
  2994. unsigned long max_delay;
  2995. unsigned long now = jiffies;
  2996. unsigned int offset = skb_transport_offset(skb);
  2997. const struct in6_addr *group = NULL;
  2998. bool is_general_query;
  2999. int err = 0;
  3000. spin_lock(&brmctx->br->multicast_lock);
  3001. if (!br_multicast_ctx_should_use(brmctx, pmctx))
  3002. goto out;
  3003. if (transport_len == sizeof(*mld)) {
  3004. if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
  3005. err = -EINVAL;
  3006. goto out;
  3007. }
  3008. mld = (struct mld_msg *) icmp6_hdr(skb);
  3009. max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
  3010. if (max_delay)
  3011. group = &mld->mld_mca;
  3012. } else {
  3013. if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
  3014. err = -EINVAL;
  3015. goto out;
  3016. }
  3017. mld2q = (struct mld2_query *)icmp6_hdr(skb);
  3018. if (!mld2q->mld2q_nsrcs)
  3019. group = &mld2q->mld2q_mca;
  3020. if (brmctx->multicast_mld_version == 2 &&
  3021. !ipv6_addr_any(&mld2q->mld2q_mca) &&
  3022. mld2q->mld2q_suppress)
  3023. goto out;
  3024. max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
  3025. }
  3026. is_general_query = group && ipv6_addr_any(group);
  3027. if (is_general_query) {
  3028. saddr.proto = htons(ETH_P_IPV6);
  3029. saddr.src.ip6 = ipv6_hdr(skb)->saddr;
  3030. br_ip6_multicast_query_received(brmctx, pmctx,
  3031. &brmctx->ip6_other_query,
  3032. &saddr, max_delay);
  3033. goto out;
  3034. } else if (!group) {
  3035. goto out;
  3036. }
  3037. mp = br_mdb_ip6_get(brmctx->br, group, vid);
  3038. if (!mp)
  3039. goto out;
  3040. max_delay *= brmctx->multicast_last_member_count;
  3041. if (mp->host_joined &&
  3042. (timer_pending(&mp->timer) ?
  3043. time_after(mp->timer.expires, now + max_delay) :
  3044. try_to_del_timer_sync(&mp->timer) >= 0))
  3045. mod_timer(&mp->timer, now + max_delay);
  3046. for (pp = &mp->ports;
  3047. (p = mlock_dereference(*pp, brmctx->br)) != NULL;
  3048. pp = &p->next) {
  3049. if (timer_pending(&p->timer) ?
  3050. time_after(p->timer.expires, now + max_delay) :
  3051. try_to_del_timer_sync(&p->timer) >= 0 &&
  3052. (brmctx->multicast_mld_version == 1 ||
  3053. p->filter_mode == MCAST_EXCLUDE))
  3054. mod_timer(&p->timer, now + max_delay);
  3055. }
  3056. out:
  3057. spin_unlock(&brmctx->br->multicast_lock);
  3058. return err;
  3059. }
  3060. #endif
  3061. static void
  3062. br_multicast_leave_group(struct net_bridge_mcast *brmctx,
  3063. struct net_bridge_mcast_port *pmctx,
  3064. struct br_ip *group,
  3065. struct bridge_mcast_other_query *other_query,
  3066. struct bridge_mcast_own_query *own_query,
  3067. const unsigned char *src)
  3068. {
  3069. struct net_bridge_mdb_entry *mp;
  3070. struct net_bridge_port_group *p;
  3071. unsigned long now;
  3072. unsigned long time;
  3073. spin_lock(&brmctx->br->multicast_lock);
  3074. if (!br_multicast_ctx_should_use(brmctx, pmctx))
  3075. goto out;
  3076. mp = br_mdb_ip_get(brmctx->br, group);
  3077. if (!mp)
  3078. goto out;
  3079. if (pmctx && (pmctx->port->flags & BR_MULTICAST_FAST_LEAVE)) {
  3080. struct net_bridge_port_group __rcu **pp;
  3081. for (pp = &mp->ports;
  3082. (p = mlock_dereference(*pp, brmctx->br)) != NULL;
  3083. pp = &p->next) {
  3084. if (!br_port_group_equal(p, pmctx->port, src))
  3085. continue;
  3086. if (p->flags & MDB_PG_FLAGS_PERMANENT)
  3087. break;
  3088. p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
  3089. br_multicast_del_pg(mp, p, pp);
  3090. }
  3091. goto out;
  3092. }
  3093. if (timer_pending(&other_query->timer))
  3094. goto out;
  3095. if (brmctx->multicast_querier) {
  3096. __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &mp->addr,
  3097. false, 0, NULL);
  3098. time = jiffies + brmctx->multicast_last_member_count *
  3099. brmctx->multicast_last_member_interval;
  3100. mod_timer(&own_query->timer, time);
  3101. for (p = mlock_dereference(mp->ports, brmctx->br);
  3102. p != NULL && pmctx != NULL;
  3103. p = mlock_dereference(p->next, brmctx->br)) {
  3104. if (!br_port_group_equal(p, pmctx->port, src))
  3105. continue;
  3106. if (!hlist_unhashed(&p->mglist) &&
  3107. (timer_pending(&p->timer) ?
  3108. time_after(p->timer.expires, time) :
  3109. try_to_del_timer_sync(&p->timer) >= 0)) {
  3110. mod_timer(&p->timer, time);
  3111. }
  3112. break;
  3113. }
  3114. }
  3115. now = jiffies;
  3116. time = now + brmctx->multicast_last_member_count *
  3117. brmctx->multicast_last_member_interval;
  3118. if (!pmctx) {
  3119. if (mp->host_joined &&
  3120. (timer_pending(&mp->timer) ?
  3121. time_after(mp->timer.expires, time) :
  3122. try_to_del_timer_sync(&mp->timer) >= 0)) {
  3123. mod_timer(&mp->timer, time);
  3124. }
  3125. goto out;
  3126. }
  3127. for (p = mlock_dereference(mp->ports, brmctx->br);
  3128. p != NULL;
  3129. p = mlock_dereference(p->next, brmctx->br)) {
  3130. if (p->key.port != pmctx->port)
  3131. continue;
  3132. if (!hlist_unhashed(&p->mglist) &&
  3133. (timer_pending(&p->timer) ?
  3134. time_after(p->timer.expires, time) :
  3135. try_to_del_timer_sync(&p->timer) >= 0)) {
  3136. mod_timer(&p->timer, time);
  3137. }
  3138. break;
  3139. }
  3140. out:
  3141. spin_unlock(&brmctx->br->multicast_lock);
  3142. }
  3143. static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
  3144. struct net_bridge_mcast_port *pmctx,
  3145. __be32 group,
  3146. __u16 vid,
  3147. const unsigned char *src)
  3148. {
  3149. struct br_ip br_group;
  3150. struct bridge_mcast_own_query *own_query;
  3151. if (ipv4_is_local_multicast(group))
  3152. return;
  3153. own_query = pmctx ? &pmctx->ip4_own_query : &brmctx->ip4_own_query;
  3154. memset(&br_group, 0, sizeof(br_group));
  3155. br_group.dst.ip4 = group;
  3156. br_group.proto = htons(ETH_P_IP);
  3157. br_group.vid = vid;
  3158. br_multicast_leave_group(brmctx, pmctx, &br_group,
  3159. &brmctx->ip4_other_query,
  3160. own_query, src);
  3161. }
  3162. #if IS_ENABLED(CONFIG_IPV6)
  3163. static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
  3164. struct net_bridge_mcast_port *pmctx,
  3165. const struct in6_addr *group,
  3166. __u16 vid,
  3167. const unsigned char *src)
  3168. {
  3169. struct br_ip br_group;
  3170. struct bridge_mcast_own_query *own_query;
  3171. if (ipv6_addr_is_ll_all_nodes(group))
  3172. return;
  3173. own_query = pmctx ? &pmctx->ip6_own_query : &brmctx->ip6_own_query;
  3174. memset(&br_group, 0, sizeof(br_group));
  3175. br_group.dst.ip6 = *group;
  3176. br_group.proto = htons(ETH_P_IPV6);
  3177. br_group.vid = vid;
  3178. br_multicast_leave_group(brmctx, pmctx, &br_group,
  3179. &brmctx->ip6_other_query,
  3180. own_query, src);
  3181. }
  3182. #endif
  3183. static void br_multicast_err_count(const struct net_bridge *br,
  3184. const struct net_bridge_port *p,
  3185. __be16 proto)
  3186. {
  3187. struct bridge_mcast_stats __percpu *stats;
  3188. struct bridge_mcast_stats *pstats;
  3189. if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
  3190. return;
  3191. if (p)
  3192. stats = p->mcast_stats;
  3193. else
  3194. stats = br->mcast_stats;
  3195. if (WARN_ON(!stats))
  3196. return;
  3197. pstats = this_cpu_ptr(stats);
  3198. u64_stats_update_begin(&pstats->syncp);
  3199. switch (proto) {
  3200. case htons(ETH_P_IP):
  3201. pstats->mstats.igmp_parse_errors++;
  3202. break;
  3203. #if IS_ENABLED(CONFIG_IPV6)
  3204. case htons(ETH_P_IPV6):
  3205. pstats->mstats.mld_parse_errors++;
  3206. break;
  3207. #endif
  3208. }
  3209. u64_stats_update_end(&pstats->syncp);
  3210. }
  3211. static void br_multicast_pim(struct net_bridge_mcast *brmctx,
  3212. struct net_bridge_mcast_port *pmctx,
  3213. const struct sk_buff *skb)
  3214. {
  3215. unsigned int offset = skb_transport_offset(skb);
  3216. struct pimhdr *pimhdr, _pimhdr;
  3217. pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
  3218. if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
  3219. pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
  3220. return;
  3221. spin_lock(&brmctx->br->multicast_lock);
  3222. br_ip4_multicast_mark_router(brmctx, pmctx);
  3223. spin_unlock(&brmctx->br->multicast_lock);
  3224. }
  3225. static int br_ip4_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
  3226. struct net_bridge_mcast_port *pmctx,
  3227. struct sk_buff *skb)
  3228. {
  3229. if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
  3230. igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
  3231. return -ENOMSG;
  3232. spin_lock(&brmctx->br->multicast_lock);
  3233. br_ip4_multicast_mark_router(brmctx, pmctx);
  3234. spin_unlock(&brmctx->br->multicast_lock);
  3235. return 0;
  3236. }
  3237. static int br_multicast_ipv4_rcv(struct net_bridge_mcast *brmctx,
  3238. struct net_bridge_mcast_port *pmctx,
  3239. struct sk_buff *skb,
  3240. u16 vid)
  3241. {
  3242. struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
  3243. const unsigned char *src;
  3244. struct igmphdr *ih;
  3245. int err;
  3246. err = ip_mc_check_igmp(skb);
  3247. if (err == -ENOMSG) {
  3248. if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
  3249. BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
  3250. } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
  3251. if (ip_hdr(skb)->protocol == IPPROTO_PIM)
  3252. br_multicast_pim(brmctx, pmctx, skb);
  3253. } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
  3254. br_ip4_multicast_mrd_rcv(brmctx, pmctx, skb);
  3255. }
  3256. return 0;
  3257. } else if (err < 0) {
  3258. br_multicast_err_count(brmctx->br, p, skb->protocol);
  3259. return err;
  3260. }
  3261. ih = igmp_hdr(skb);
  3262. src = eth_hdr(skb)->h_source;
  3263. BR_INPUT_SKB_CB(skb)->igmp = ih->type;
  3264. switch (ih->type) {
  3265. case IGMP_HOST_MEMBERSHIP_REPORT:
  3266. case IGMPV2_HOST_MEMBERSHIP_REPORT:
  3267. BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
  3268. err = br_ip4_multicast_add_group(brmctx, pmctx, ih->group, vid,
  3269. src, true);
  3270. break;
  3271. case IGMPV3_HOST_MEMBERSHIP_REPORT:
  3272. err = br_ip4_multicast_igmp3_report(brmctx, pmctx, skb, vid);
  3273. break;
  3274. case IGMP_HOST_MEMBERSHIP_QUERY:
  3275. br_ip4_multicast_query(brmctx, pmctx, skb, vid);
  3276. break;
  3277. case IGMP_HOST_LEAVE_MESSAGE:
  3278. br_ip4_multicast_leave_group(brmctx, pmctx, ih->group, vid, src);
  3279. break;
  3280. }
  3281. br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
  3282. BR_MCAST_DIR_RX);
  3283. return err;
  3284. }
  3285. #if IS_ENABLED(CONFIG_IPV6)
  3286. static void br_ip6_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
  3287. struct net_bridge_mcast_port *pmctx,
  3288. struct sk_buff *skb)
  3289. {
  3290. if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
  3291. return;
  3292. spin_lock(&brmctx->br->multicast_lock);
  3293. br_ip6_multicast_mark_router(brmctx, pmctx);
  3294. spin_unlock(&brmctx->br->multicast_lock);
  3295. }
  3296. static int br_multicast_ipv6_rcv(struct net_bridge_mcast *brmctx,
  3297. struct net_bridge_mcast_port *pmctx,
  3298. struct sk_buff *skb,
  3299. u16 vid)
  3300. {
  3301. struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
  3302. const unsigned char *src;
  3303. struct mld_msg *mld;
  3304. int err;
  3305. err = ipv6_mc_check_mld(skb);
  3306. if (err == -ENOMSG || err == -ENODATA) {
  3307. if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
  3308. BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
  3309. if (err == -ENODATA &&
  3310. ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr))
  3311. br_ip6_multicast_mrd_rcv(brmctx, pmctx, skb);
  3312. return 0;
  3313. } else if (err < 0) {
  3314. br_multicast_err_count(brmctx->br, p, skb->protocol);
  3315. return err;
  3316. }
  3317. mld = (struct mld_msg *)skb_transport_header(skb);
  3318. BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
  3319. switch (mld->mld_type) {
  3320. case ICMPV6_MGM_REPORT:
  3321. src = eth_hdr(skb)->h_source;
  3322. BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
  3323. err = br_ip6_multicast_add_group(brmctx, pmctx, &mld->mld_mca,
  3324. vid, src, true);
  3325. break;
  3326. case ICMPV6_MLD2_REPORT:
  3327. err = br_ip6_multicast_mld2_report(brmctx, pmctx, skb, vid);
  3328. break;
  3329. case ICMPV6_MGM_QUERY:
  3330. err = br_ip6_multicast_query(brmctx, pmctx, skb, vid);
  3331. break;
  3332. case ICMPV6_MGM_REDUCTION:
  3333. src = eth_hdr(skb)->h_source;
  3334. br_ip6_multicast_leave_group(brmctx, pmctx, &mld->mld_mca, vid,
  3335. src);
  3336. break;
  3337. }
  3338. br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
  3339. BR_MCAST_DIR_RX);
  3340. return err;
  3341. }
  3342. #endif
  3343. int br_multicast_rcv(struct net_bridge_mcast **brmctx,
  3344. struct net_bridge_mcast_port **pmctx,
  3345. struct net_bridge_vlan *vlan,
  3346. struct sk_buff *skb, u16 vid)
  3347. {
  3348. int ret = 0;
  3349. BR_INPUT_SKB_CB(skb)->igmp = 0;
  3350. BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
  3351. if (!br_opt_get((*brmctx)->br, BROPT_MULTICAST_ENABLED))
  3352. return 0;
  3353. if (br_opt_get((*brmctx)->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) && vlan) {
  3354. const struct net_bridge_vlan *masterv;
  3355. /* the vlan has the master flag set only when transmitting
  3356. * through the bridge device
  3357. */
  3358. if (br_vlan_is_master(vlan)) {
  3359. masterv = vlan;
  3360. *brmctx = &vlan->br_mcast_ctx;
  3361. *pmctx = NULL;
  3362. } else {
  3363. masterv = vlan->brvlan;
  3364. *brmctx = &vlan->brvlan->br_mcast_ctx;
  3365. *pmctx = &vlan->port_mcast_ctx;
  3366. }
  3367. if (!(masterv->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED))
  3368. return 0;
  3369. }
  3370. switch (skb->protocol) {
  3371. case htons(ETH_P_IP):
  3372. ret = br_multicast_ipv4_rcv(*brmctx, *pmctx, skb, vid);
  3373. break;
  3374. #if IS_ENABLED(CONFIG_IPV6)
  3375. case htons(ETH_P_IPV6):
  3376. ret = br_multicast_ipv6_rcv(*brmctx, *pmctx, skb, vid);
  3377. break;
  3378. #endif
  3379. }
  3380. return ret;
  3381. }
  3382. static void br_multicast_query_expired(struct net_bridge_mcast *brmctx,
  3383. struct bridge_mcast_own_query *query,
  3384. struct bridge_mcast_querier *querier)
  3385. {
  3386. spin_lock(&brmctx->br->multicast_lock);
  3387. if (br_multicast_ctx_vlan_disabled(brmctx))
  3388. goto out;
  3389. if (query->startup_sent < brmctx->multicast_startup_query_count)
  3390. query->startup_sent++;
  3391. br_multicast_send_query(brmctx, NULL, query);
  3392. out:
  3393. spin_unlock(&brmctx->br->multicast_lock);
  3394. }
  3395. static void br_ip4_multicast_query_expired(struct timer_list *t)
  3396. {
  3397. struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
  3398. ip4_own_query.timer);
  3399. br_multicast_query_expired(brmctx, &brmctx->ip4_own_query,
  3400. &brmctx->ip4_querier);
  3401. }
  3402. #if IS_ENABLED(CONFIG_IPV6)
  3403. static void br_ip6_multicast_query_expired(struct timer_list *t)
  3404. {
  3405. struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
  3406. ip6_own_query.timer);
  3407. br_multicast_query_expired(brmctx, &brmctx->ip6_own_query,
  3408. &brmctx->ip6_querier);
  3409. }
  3410. #endif
  3411. static void br_multicast_gc_work(struct work_struct *work)
  3412. {
  3413. struct net_bridge *br = container_of(work, struct net_bridge,
  3414. mcast_gc_work);
  3415. HLIST_HEAD(deleted_head);
  3416. spin_lock_bh(&br->multicast_lock);
  3417. hlist_move_list(&br->mcast_gc_list, &deleted_head);
  3418. spin_unlock_bh(&br->multicast_lock);
  3419. br_multicast_gc(&deleted_head);
  3420. }
  3421. void br_multicast_ctx_init(struct net_bridge *br,
  3422. struct net_bridge_vlan *vlan,
  3423. struct net_bridge_mcast *brmctx)
  3424. {
  3425. brmctx->br = br;
  3426. brmctx->vlan = vlan;
  3427. brmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
  3428. brmctx->multicast_last_member_count = 2;
  3429. brmctx->multicast_startup_query_count = 2;
  3430. brmctx->multicast_last_member_interval = HZ;
  3431. brmctx->multicast_query_response_interval = 10 * HZ;
  3432. brmctx->multicast_startup_query_interval = 125 * HZ / 4;
  3433. brmctx->multicast_query_interval = 125 * HZ;
  3434. brmctx->multicast_querier_interval = 255 * HZ;
  3435. brmctx->multicast_membership_interval = 260 * HZ;
  3436. brmctx->ip4_querier.port_ifidx = 0;
  3437. seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock);
  3438. brmctx->multicast_igmp_version = 2;
  3439. #if IS_ENABLED(CONFIG_IPV6)
  3440. brmctx->multicast_mld_version = 1;
  3441. brmctx->ip6_querier.port_ifidx = 0;
  3442. seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock);
  3443. #endif
  3444. timer_setup(&brmctx->ip4_mc_router_timer,
  3445. br_ip4_multicast_local_router_expired, 0);
  3446. timer_setup(&brmctx->ip4_other_query.timer,
  3447. br_ip4_multicast_querier_expired, 0);
  3448. timer_setup(&brmctx->ip4_other_query.delay_timer,
  3449. br_multicast_query_delay_expired, 0);
  3450. timer_setup(&brmctx->ip4_own_query.timer,
  3451. br_ip4_multicast_query_expired, 0);
  3452. #if IS_ENABLED(CONFIG_IPV6)
  3453. timer_setup(&brmctx->ip6_mc_router_timer,
  3454. br_ip6_multicast_local_router_expired, 0);
  3455. timer_setup(&brmctx->ip6_other_query.timer,
  3456. br_ip6_multicast_querier_expired, 0);
  3457. timer_setup(&brmctx->ip6_other_query.delay_timer,
  3458. br_multicast_query_delay_expired, 0);
  3459. timer_setup(&brmctx->ip6_own_query.timer,
  3460. br_ip6_multicast_query_expired, 0);
  3461. #endif
  3462. }
  3463. void br_multicast_ctx_deinit(struct net_bridge_mcast *brmctx)
  3464. {
  3465. __br_multicast_stop(brmctx);
  3466. }
  3467. void br_multicast_init(struct net_bridge *br)
  3468. {
  3469. br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
  3470. br_multicast_ctx_init(br, NULL, &br->multicast_ctx);
  3471. br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
  3472. br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
  3473. spin_lock_init(&br->multicast_lock);
  3474. INIT_HLIST_HEAD(&br->mdb_list);
  3475. INIT_HLIST_HEAD(&br->mcast_gc_list);
  3476. INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work);
  3477. }
  3478. static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
  3479. {
  3480. struct in_device *in_dev = in_dev_get(br->dev);
  3481. if (!in_dev)
  3482. return;
  3483. __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
  3484. in_dev_put(in_dev);
  3485. }
  3486. #if IS_ENABLED(CONFIG_IPV6)
  3487. static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
  3488. {
  3489. struct in6_addr addr;
  3490. ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
  3491. ipv6_dev_mc_inc(br->dev, &addr);
  3492. }
  3493. #else
  3494. static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
  3495. {
  3496. }
  3497. #endif
  3498. void br_multicast_join_snoopers(struct net_bridge *br)
  3499. {
  3500. br_ip4_multicast_join_snoopers(br);
  3501. br_ip6_multicast_join_snoopers(br);
  3502. }
  3503. static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
  3504. {
  3505. struct in_device *in_dev = in_dev_get(br->dev);
  3506. if (WARN_ON(!in_dev))
  3507. return;
  3508. __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
  3509. in_dev_put(in_dev);
  3510. }
  3511. #if IS_ENABLED(CONFIG_IPV6)
  3512. static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
  3513. {
  3514. struct in6_addr addr;
  3515. ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
  3516. ipv6_dev_mc_dec(br->dev, &addr);
  3517. }
  3518. #else
  3519. static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
  3520. {
  3521. }
  3522. #endif
  3523. void br_multicast_leave_snoopers(struct net_bridge *br)
  3524. {
  3525. br_ip4_multicast_leave_snoopers(br);
  3526. br_ip6_multicast_leave_snoopers(br);
  3527. }
  3528. static void __br_multicast_open_query(struct net_bridge *br,
  3529. struct bridge_mcast_own_query *query)
  3530. {
  3531. query->startup_sent = 0;
  3532. if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
  3533. return;
  3534. mod_timer(&query->timer, jiffies);
  3535. }
  3536. static void __br_multicast_open(struct net_bridge_mcast *brmctx)
  3537. {
  3538. __br_multicast_open_query(brmctx->br, &brmctx->ip4_own_query);
  3539. #if IS_ENABLED(CONFIG_IPV6)
  3540. __br_multicast_open_query(brmctx->br, &brmctx->ip6_own_query);
  3541. #endif
  3542. }
  3543. void br_multicast_open(struct net_bridge *br)
  3544. {
  3545. ASSERT_RTNL();
  3546. if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
  3547. struct net_bridge_vlan_group *vg;
  3548. struct net_bridge_vlan *vlan;
  3549. vg = br_vlan_group(br);
  3550. if (vg) {
  3551. list_for_each_entry(vlan, &vg->vlan_list, vlist) {
  3552. struct net_bridge_mcast *brmctx;
  3553. brmctx = &vlan->br_mcast_ctx;
  3554. if (br_vlan_is_brentry(vlan) &&
  3555. !br_multicast_ctx_vlan_disabled(brmctx))
  3556. __br_multicast_open(&vlan->br_mcast_ctx);
  3557. }
  3558. }
  3559. } else {
  3560. __br_multicast_open(&br->multicast_ctx);
  3561. }
  3562. }
  3563. static void __br_multicast_stop(struct net_bridge_mcast *brmctx)
  3564. {
  3565. del_timer_sync(&brmctx->ip4_mc_router_timer);
  3566. del_timer_sync(&brmctx->ip4_other_query.timer);
  3567. del_timer_sync(&brmctx->ip4_other_query.delay_timer);
  3568. del_timer_sync(&brmctx->ip4_own_query.timer);
  3569. #if IS_ENABLED(CONFIG_IPV6)
  3570. del_timer_sync(&brmctx->ip6_mc_router_timer);
  3571. del_timer_sync(&brmctx->ip6_other_query.timer);
  3572. del_timer_sync(&brmctx->ip6_other_query.delay_timer);
  3573. del_timer_sync(&brmctx->ip6_own_query.timer);
  3574. #endif
  3575. }
  3576. void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on)
  3577. {
  3578. struct net_bridge *br;
  3579. /* it's okay to check for the flag without the multicast lock because it
  3580. * can only change under RTNL -> multicast_lock, we need the latter to
  3581. * sync with timers and packets
  3582. */
  3583. if (on == !!(vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED))
  3584. return;
  3585. if (br_vlan_is_master(vlan)) {
  3586. br = vlan->br;
  3587. if (!br_vlan_is_brentry(vlan) ||
  3588. (on &&
  3589. br_multicast_ctx_vlan_global_disabled(&vlan->br_mcast_ctx)))
  3590. return;
  3591. spin_lock_bh(&br->multicast_lock);
  3592. vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
  3593. spin_unlock_bh(&br->multicast_lock);
  3594. if (on)
  3595. __br_multicast_open(&vlan->br_mcast_ctx);
  3596. else
  3597. __br_multicast_stop(&vlan->br_mcast_ctx);
  3598. } else {
  3599. struct net_bridge_mcast *brmctx;
  3600. brmctx = br_multicast_port_ctx_get_global(&vlan->port_mcast_ctx);
  3601. if (on && br_multicast_ctx_vlan_global_disabled(brmctx))
  3602. return;
  3603. br = vlan->port->br;
  3604. spin_lock_bh(&br->multicast_lock);
  3605. vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
  3606. if (on)
  3607. __br_multicast_enable_port_ctx(&vlan->port_mcast_ctx);
  3608. else
  3609. __br_multicast_disable_port_ctx(&vlan->port_mcast_ctx);
  3610. spin_unlock_bh(&br->multicast_lock);
  3611. }
  3612. }
  3613. static void br_multicast_toggle_vlan(struct net_bridge_vlan *vlan, bool on)
  3614. {
  3615. struct net_bridge_port *p;
  3616. if (WARN_ON_ONCE(!br_vlan_is_master(vlan)))
  3617. return;
  3618. list_for_each_entry(p, &vlan->br->port_list, list) {
  3619. struct net_bridge_vlan *vport;
  3620. vport = br_vlan_find(nbp_vlan_group(p), vlan->vid);
  3621. if (!vport)
  3622. continue;
  3623. br_multicast_toggle_one_vlan(vport, on);
  3624. }
  3625. if (br_vlan_is_brentry(vlan))
  3626. br_multicast_toggle_one_vlan(vlan, on);
  3627. }
  3628. int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on,
  3629. struct netlink_ext_ack *extack)
  3630. {
  3631. struct net_bridge_vlan_group *vg;
  3632. struct net_bridge_vlan *vlan;
  3633. struct net_bridge_port *p;
  3634. if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) == on)
  3635. return 0;
  3636. if (on && !br_opt_get(br, BROPT_VLAN_ENABLED)) {
  3637. NL_SET_ERR_MSG_MOD(extack, "Cannot enable multicast vlan snooping with vlan filtering disabled");
  3638. return -EINVAL;
  3639. }
  3640. vg = br_vlan_group(br);
  3641. if (!vg)
  3642. return 0;
  3643. br_opt_toggle(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED, on);
  3644. /* disable/enable non-vlan mcast contexts based on vlan snooping */
  3645. if (on)
  3646. __br_multicast_stop(&br->multicast_ctx);
  3647. else
  3648. __br_multicast_open(&br->multicast_ctx);
  3649. list_for_each_entry(p, &br->port_list, list) {
  3650. if (on)
  3651. br_multicast_disable_port(p);
  3652. else
  3653. br_multicast_enable_port(p);
  3654. }
  3655. list_for_each_entry(vlan, &vg->vlan_list, vlist)
  3656. br_multicast_toggle_vlan(vlan, on);
  3657. return 0;
  3658. }
  3659. bool br_multicast_toggle_global_vlan(struct net_bridge_vlan *vlan, bool on)
  3660. {
  3661. ASSERT_RTNL();
  3662. /* BR_VLFLAG_GLOBAL_MCAST_ENABLED relies on eventual consistency and
  3663. * requires only RTNL to change
  3664. */
  3665. if (on == !!(vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED))
  3666. return false;
  3667. vlan->priv_flags ^= BR_VLFLAG_GLOBAL_MCAST_ENABLED;
  3668. br_multicast_toggle_vlan(vlan, on);
  3669. return true;
  3670. }
  3671. void br_multicast_stop(struct net_bridge *br)
  3672. {
  3673. ASSERT_RTNL();
  3674. if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
  3675. struct net_bridge_vlan_group *vg;
  3676. struct net_bridge_vlan *vlan;
  3677. vg = br_vlan_group(br);
  3678. if (vg) {
  3679. list_for_each_entry(vlan, &vg->vlan_list, vlist) {
  3680. struct net_bridge_mcast *brmctx;
  3681. brmctx = &vlan->br_mcast_ctx;
  3682. if (br_vlan_is_brentry(vlan) &&
  3683. !br_multicast_ctx_vlan_disabled(brmctx))
  3684. __br_multicast_stop(&vlan->br_mcast_ctx);
  3685. }
  3686. }
  3687. } else {
  3688. __br_multicast_stop(&br->multicast_ctx);
  3689. }
  3690. }
  3691. void br_multicast_dev_del(struct net_bridge *br)
  3692. {
  3693. struct net_bridge_mdb_entry *mp;
  3694. HLIST_HEAD(deleted_head);
  3695. struct hlist_node *tmp;
  3696. spin_lock_bh(&br->multicast_lock);
  3697. hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node)
  3698. br_multicast_del_mdb_entry(mp);
  3699. hlist_move_list(&br->mcast_gc_list, &deleted_head);
  3700. spin_unlock_bh(&br->multicast_lock);
  3701. br_multicast_ctx_deinit(&br->multicast_ctx);
  3702. br_multicast_gc(&deleted_head);
  3703. cancel_work_sync(&br->mcast_gc_work);
  3704. rcu_barrier();
  3705. }
  3706. int br_multicast_set_router(struct net_bridge_mcast *brmctx, unsigned long val)
  3707. {
  3708. int err = -EINVAL;
  3709. spin_lock_bh(&brmctx->br->multicast_lock);
  3710. switch (val) {
  3711. case MDB_RTR_TYPE_DISABLED:
  3712. case MDB_RTR_TYPE_PERM:
  3713. br_mc_router_state_change(brmctx->br, val == MDB_RTR_TYPE_PERM);
  3714. del_timer(&brmctx->ip4_mc_router_timer);
  3715. #if IS_ENABLED(CONFIG_IPV6)
  3716. del_timer(&brmctx->ip6_mc_router_timer);
  3717. #endif
  3718. brmctx->multicast_router = val;
  3719. err = 0;
  3720. break;
  3721. case MDB_RTR_TYPE_TEMP_QUERY:
  3722. if (brmctx->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
  3723. br_mc_router_state_change(brmctx->br, false);
  3724. brmctx->multicast_router = val;
  3725. err = 0;
  3726. break;
  3727. }
  3728. spin_unlock_bh(&brmctx->br->multicast_lock);
  3729. return err;
  3730. }
  3731. static void
  3732. br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted)
  3733. {
  3734. if (!deleted)
  3735. return;
  3736. /* For backwards compatibility for now, only notify if there is
  3737. * no multicast router anymore for both IPv4 and IPv6.
  3738. */
  3739. if (!hlist_unhashed(&pmctx->ip4_rlist))
  3740. return;
  3741. #if IS_ENABLED(CONFIG_IPV6)
  3742. if (!hlist_unhashed(&pmctx->ip6_rlist))
  3743. return;
  3744. #endif
  3745. br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_DELMDB);
  3746. br_port_mc_router_state_change(pmctx->port, false);
  3747. /* don't allow timer refresh */
  3748. if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP)
  3749. pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
  3750. }
  3751. int br_multicast_set_port_router(struct net_bridge_mcast_port *pmctx,
  3752. unsigned long val)
  3753. {
  3754. struct net_bridge_mcast *brmctx;
  3755. unsigned long now = jiffies;
  3756. int err = -EINVAL;
  3757. bool del = false;
  3758. brmctx = br_multicast_port_ctx_get_global(pmctx);
  3759. spin_lock_bh(&brmctx->br->multicast_lock);
  3760. if (pmctx->multicast_router == val) {
  3761. /* Refresh the temp router port timer */
  3762. if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) {
  3763. mod_timer(&pmctx->ip4_mc_router_timer,
  3764. now + brmctx->multicast_querier_interval);
  3765. #if IS_ENABLED(CONFIG_IPV6)
  3766. mod_timer(&pmctx->ip6_mc_router_timer,
  3767. now + brmctx->multicast_querier_interval);
  3768. #endif
  3769. }
  3770. err = 0;
  3771. goto unlock;
  3772. }
  3773. switch (val) {
  3774. case MDB_RTR_TYPE_DISABLED:
  3775. pmctx->multicast_router = MDB_RTR_TYPE_DISABLED;
  3776. del |= br_ip4_multicast_rport_del(pmctx);
  3777. del_timer(&pmctx->ip4_mc_router_timer);
  3778. del |= br_ip6_multicast_rport_del(pmctx);
  3779. #if IS_ENABLED(CONFIG_IPV6)
  3780. del_timer(&pmctx->ip6_mc_router_timer);
  3781. #endif
  3782. br_multicast_rport_del_notify(pmctx, del);
  3783. break;
  3784. case MDB_RTR_TYPE_TEMP_QUERY:
  3785. pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
  3786. del |= br_ip4_multicast_rport_del(pmctx);
  3787. del |= br_ip6_multicast_rport_del(pmctx);
  3788. br_multicast_rport_del_notify(pmctx, del);
  3789. break;
  3790. case MDB_RTR_TYPE_PERM:
  3791. pmctx->multicast_router = MDB_RTR_TYPE_PERM;
  3792. del_timer(&pmctx->ip4_mc_router_timer);
  3793. br_ip4_multicast_add_router(brmctx, pmctx);
  3794. #if IS_ENABLED(CONFIG_IPV6)
  3795. del_timer(&pmctx->ip6_mc_router_timer);
  3796. #endif
  3797. br_ip6_multicast_add_router(brmctx, pmctx);
  3798. break;
  3799. case MDB_RTR_TYPE_TEMP:
  3800. pmctx->multicast_router = MDB_RTR_TYPE_TEMP;
  3801. br_ip4_multicast_mark_router(brmctx, pmctx);
  3802. br_ip6_multicast_mark_router(brmctx, pmctx);
  3803. break;
  3804. default:
  3805. goto unlock;
  3806. }
  3807. err = 0;
  3808. unlock:
  3809. spin_unlock_bh(&brmctx->br->multicast_lock);
  3810. return err;
  3811. }
  3812. int br_multicast_set_vlan_router(struct net_bridge_vlan *v, u8 mcast_router)
  3813. {
  3814. int err;
  3815. if (br_vlan_is_master(v))
  3816. err = br_multicast_set_router(&v->br_mcast_ctx, mcast_router);
  3817. else
  3818. err = br_multicast_set_port_router(&v->port_mcast_ctx,
  3819. mcast_router);
  3820. return err;
  3821. }
  3822. static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
  3823. struct bridge_mcast_own_query *query)
  3824. {
  3825. struct net_bridge_port *port;
  3826. if (!br_multicast_ctx_matches_vlan_snooping(brmctx))
  3827. return;
  3828. __br_multicast_open_query(brmctx->br, query);
  3829. rcu_read_lock();
  3830. list_for_each_entry_rcu(port, &brmctx->br->port_list, list) {
  3831. struct bridge_mcast_own_query *ip4_own_query;
  3832. #if IS_ENABLED(CONFIG_IPV6)
  3833. struct bridge_mcast_own_query *ip6_own_query;
  3834. #endif
  3835. if (br_multicast_port_ctx_state_stopped(&port->multicast_ctx))
  3836. continue;
  3837. if (br_multicast_ctx_is_vlan(brmctx)) {
  3838. struct net_bridge_vlan *vlan;
  3839. vlan = br_vlan_find(nbp_vlan_group_rcu(port),
  3840. brmctx->vlan->vid);
  3841. if (!vlan ||
  3842. br_multicast_port_ctx_state_stopped(&vlan->port_mcast_ctx))
  3843. continue;
  3844. ip4_own_query = &vlan->port_mcast_ctx.ip4_own_query;
  3845. #if IS_ENABLED(CONFIG_IPV6)
  3846. ip6_own_query = &vlan->port_mcast_ctx.ip6_own_query;
  3847. #endif
  3848. } else {
  3849. ip4_own_query = &port->multicast_ctx.ip4_own_query;
  3850. #if IS_ENABLED(CONFIG_IPV6)
  3851. ip6_own_query = &port->multicast_ctx.ip6_own_query;
  3852. #endif
  3853. }
  3854. if (query == &brmctx->ip4_own_query)
  3855. br_multicast_enable(ip4_own_query);
  3856. #if IS_ENABLED(CONFIG_IPV6)
  3857. else
  3858. br_multicast_enable(ip6_own_query);
  3859. #endif
  3860. }
  3861. rcu_read_unlock();
  3862. }
  3863. int br_multicast_toggle(struct net_bridge *br, unsigned long val,
  3864. struct netlink_ext_ack *extack)
  3865. {
  3866. struct net_bridge_port *port;
  3867. bool change_snoopers = false;
  3868. int err = 0;
  3869. spin_lock_bh(&br->multicast_lock);
  3870. if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
  3871. goto unlock;
  3872. err = br_mc_disabled_update(br->dev, val, extack);
  3873. if (err == -EOPNOTSUPP)
  3874. err = 0;
  3875. if (err)
  3876. goto unlock;
  3877. br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
  3878. if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
  3879. change_snoopers = true;
  3880. goto unlock;
  3881. }
  3882. if (!netif_running(br->dev))
  3883. goto unlock;
  3884. br_multicast_open(br);
  3885. list_for_each_entry(port, &br->port_list, list)
  3886. __br_multicast_enable_port_ctx(&port->multicast_ctx);
  3887. change_snoopers = true;
  3888. unlock:
  3889. spin_unlock_bh(&br->multicast_lock);
  3890. /* br_multicast_join_snoopers has the potential to cause
  3891. * an MLD Report/Leave to be delivered to br_multicast_rcv,
  3892. * which would in turn call br_multicast_add_group, which would
  3893. * attempt to acquire multicast_lock. This function should be
  3894. * called after the lock has been released to avoid deadlocks on
  3895. * multicast_lock.
  3896. *
  3897. * br_multicast_leave_snoopers does not have the problem since
  3898. * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and
  3899. * returns without calling br_multicast_ipv4/6_rcv if it's not
  3900. * enabled. Moved both functions out just for symmetry.
  3901. */
  3902. if (change_snoopers) {
  3903. if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
  3904. br_multicast_join_snoopers(br);
  3905. else
  3906. br_multicast_leave_snoopers(br);
  3907. }
  3908. return err;
  3909. }
  3910. bool br_multicast_enabled(const struct net_device *dev)
  3911. {
  3912. struct net_bridge *br = netdev_priv(dev);
  3913. return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
  3914. }
  3915. EXPORT_SYMBOL_GPL(br_multicast_enabled);
  3916. bool br_multicast_router(const struct net_device *dev)
  3917. {
  3918. struct net_bridge *br = netdev_priv(dev);
  3919. bool is_router;
  3920. spin_lock_bh(&br->multicast_lock);
  3921. is_router = br_multicast_is_router(&br->multicast_ctx, NULL);
  3922. spin_unlock_bh(&br->multicast_lock);
  3923. return is_router;
  3924. }
  3925. EXPORT_SYMBOL_GPL(br_multicast_router);
  3926. int br_multicast_set_querier(struct net_bridge_mcast *brmctx, unsigned long val)
  3927. {
  3928. unsigned long max_delay;
  3929. val = !!val;
  3930. spin_lock_bh(&brmctx->br->multicast_lock);
  3931. if (brmctx->multicast_querier == val)
  3932. goto unlock;
  3933. WRITE_ONCE(brmctx->multicast_querier, val);
  3934. if (!val)
  3935. goto unlock;
  3936. max_delay = brmctx->multicast_query_response_interval;
  3937. if (!timer_pending(&brmctx->ip4_other_query.timer))
  3938. mod_timer(&brmctx->ip4_other_query.delay_timer,
  3939. jiffies + max_delay);
  3940. br_multicast_start_querier(brmctx, &brmctx->ip4_own_query);
  3941. #if IS_ENABLED(CONFIG_IPV6)
  3942. if (!timer_pending(&brmctx->ip6_other_query.timer))
  3943. mod_timer(&brmctx->ip6_other_query.delay_timer,
  3944. jiffies + max_delay);
  3945. br_multicast_start_querier(brmctx, &brmctx->ip6_own_query);
  3946. #endif
  3947. unlock:
  3948. spin_unlock_bh(&brmctx->br->multicast_lock);
  3949. return 0;
  3950. }
  3951. int br_multicast_set_igmp_version(struct net_bridge_mcast *brmctx,
  3952. unsigned long val)
  3953. {
  3954. /* Currently we support only version 2 and 3 */
  3955. switch (val) {
  3956. case 2:
  3957. case 3:
  3958. break;
  3959. default:
  3960. return -EINVAL;
  3961. }
  3962. spin_lock_bh(&brmctx->br->multicast_lock);
  3963. brmctx->multicast_igmp_version = val;
  3964. spin_unlock_bh(&brmctx->br->multicast_lock);
  3965. return 0;
  3966. }
  3967. #if IS_ENABLED(CONFIG_IPV6)
  3968. int br_multicast_set_mld_version(struct net_bridge_mcast *brmctx,
  3969. unsigned long val)
  3970. {
  3971. /* Currently we support version 1 and 2 */
  3972. switch (val) {
  3973. case 1:
  3974. case 2:
  3975. break;
  3976. default:
  3977. return -EINVAL;
  3978. }
  3979. spin_lock_bh(&brmctx->br->multicast_lock);
  3980. brmctx->multicast_mld_version = val;
  3981. spin_unlock_bh(&brmctx->br->multicast_lock);
  3982. return 0;
  3983. }
  3984. #endif
  3985. void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
  3986. unsigned long val)
  3987. {
  3988. unsigned long intvl_jiffies = clock_t_to_jiffies(val);
  3989. if (intvl_jiffies < BR_MULTICAST_QUERY_INTVL_MIN) {
  3990. br_info(brmctx->br,
  3991. "trying to set multicast query interval below minimum, setting to %lu (%ums)\n",
  3992. jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MIN),
  3993. jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MIN));
  3994. intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN;
  3995. }
  3996. brmctx->multicast_query_interval = intvl_jiffies;
  3997. }
  3998. void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
  3999. unsigned long val)
  4000. {
  4001. unsigned long intvl_jiffies = clock_t_to_jiffies(val);
  4002. if (intvl_jiffies < BR_MULTICAST_STARTUP_QUERY_INTVL_MIN) {
  4003. br_info(brmctx->br,
  4004. "trying to set multicast startup query interval below minimum, setting to %lu (%ums)\n",
  4005. jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN),
  4006. jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN));
  4007. intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN;
  4008. }
  4009. brmctx->multicast_startup_query_interval = intvl_jiffies;
  4010. }
  4011. /**
  4012. * br_multicast_list_adjacent - Returns snooped multicast addresses
  4013. * @dev: The bridge port adjacent to which to retrieve addresses
  4014. * @br_ip_list: The list to store found, snooped multicast IP addresses in
  4015. *
  4016. * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
  4017. * snooping feature on all bridge ports of dev's bridge device, excluding
  4018. * the addresses from dev itself.
  4019. *
  4020. * Returns the number of items added to br_ip_list.
  4021. *
  4022. * Notes:
  4023. * - br_ip_list needs to be initialized by caller
  4024. * - br_ip_list might contain duplicates in the end
  4025. * (needs to be taken care of by caller)
  4026. * - br_ip_list needs to be freed by caller
  4027. */
  4028. int br_multicast_list_adjacent(struct net_device *dev,
  4029. struct list_head *br_ip_list)
  4030. {
  4031. struct net_bridge *br;
  4032. struct net_bridge_port *port;
  4033. struct net_bridge_port_group *group;
  4034. struct br_ip_list *entry;
  4035. int count = 0;
  4036. rcu_read_lock();
  4037. if (!br_ip_list || !netif_is_bridge_port(dev))
  4038. goto unlock;
  4039. port = br_port_get_rcu(dev);
  4040. if (!port || !port->br)
  4041. goto unlock;
  4042. br = port->br;
  4043. list_for_each_entry_rcu(port, &br->port_list, list) {
  4044. if (!port->dev || port->dev == dev)
  4045. continue;
  4046. hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
  4047. entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
  4048. if (!entry)
  4049. goto unlock;
  4050. entry->addr = group->key.addr;
  4051. list_add(&entry->list, br_ip_list);
  4052. count++;
  4053. }
  4054. }
  4055. unlock:
  4056. rcu_read_unlock();
  4057. return count;
  4058. }
  4059. EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
  4060. /**
  4061. * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
  4062. * @dev: The bridge port providing the bridge on which to check for a querier
  4063. * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
  4064. *
  4065. * Checks whether the given interface has a bridge on top and if so returns
  4066. * true if a valid querier exists anywhere on the bridged link layer.
  4067. * Otherwise returns false.
  4068. */
  4069. bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
  4070. {
  4071. struct net_bridge *br;
  4072. struct net_bridge_port *port;
  4073. struct ethhdr eth;
  4074. bool ret = false;
  4075. rcu_read_lock();
  4076. if (!netif_is_bridge_port(dev))
  4077. goto unlock;
  4078. port = br_port_get_rcu(dev);
  4079. if (!port || !port->br)
  4080. goto unlock;
  4081. br = port->br;
  4082. memset(&eth, 0, sizeof(eth));
  4083. eth.h_proto = htons(proto);
  4084. ret = br_multicast_querier_exists(&br->multicast_ctx, &eth, NULL);
  4085. unlock:
  4086. rcu_read_unlock();
  4087. return ret;
  4088. }
  4089. EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
  4090. /**
  4091. * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
  4092. * @dev: The bridge port adjacent to which to check for a querier
  4093. * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
  4094. *
  4095. * Checks whether the given interface has a bridge on top and if so returns
  4096. * true if a selected querier is behind one of the other ports of this
  4097. * bridge. Otherwise returns false.
  4098. */
  4099. bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
  4100. {
  4101. struct net_bridge_mcast *brmctx;
  4102. struct net_bridge *br;
  4103. struct net_bridge_port *port;
  4104. bool ret = false;
  4105. int port_ifidx;
  4106. rcu_read_lock();
  4107. if (!netif_is_bridge_port(dev))
  4108. goto unlock;
  4109. port = br_port_get_rcu(dev);
  4110. if (!port || !port->br)
  4111. goto unlock;
  4112. br = port->br;
  4113. brmctx = &br->multicast_ctx;
  4114. switch (proto) {
  4115. case ETH_P_IP:
  4116. port_ifidx = brmctx->ip4_querier.port_ifidx;
  4117. if (!timer_pending(&brmctx->ip4_other_query.timer) ||
  4118. port_ifidx == port->dev->ifindex)
  4119. goto unlock;
  4120. break;
  4121. #if IS_ENABLED(CONFIG_IPV6)
  4122. case ETH_P_IPV6:
  4123. port_ifidx = brmctx->ip6_querier.port_ifidx;
  4124. if (!timer_pending(&brmctx->ip6_other_query.timer) ||
  4125. port_ifidx == port->dev->ifindex)
  4126. goto unlock;
  4127. break;
  4128. #endif
  4129. default:
  4130. goto unlock;
  4131. }
  4132. ret = true;
  4133. unlock:
  4134. rcu_read_unlock();
  4135. return ret;
  4136. }
  4137. EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
  4138. /**
  4139. * br_multicast_has_router_adjacent - Checks for a router behind a bridge port
  4140. * @dev: The bridge port adjacent to which to check for a multicast router
  4141. * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
  4142. *
  4143. * Checks whether the given interface has a bridge on top and if so returns
  4144. * true if a multicast router is behind one of the other ports of this
  4145. * bridge. Otherwise returns false.
  4146. */
  4147. bool br_multicast_has_router_adjacent(struct net_device *dev, int proto)
  4148. {
  4149. struct net_bridge_mcast_port *pmctx;
  4150. struct net_bridge_mcast *brmctx;
  4151. struct net_bridge_port *port;
  4152. bool ret = false;
  4153. rcu_read_lock();
  4154. port = br_port_get_check_rcu(dev);
  4155. if (!port)
  4156. goto unlock;
  4157. brmctx = &port->br->multicast_ctx;
  4158. switch (proto) {
  4159. case ETH_P_IP:
  4160. hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
  4161. ip4_rlist) {
  4162. if (pmctx->port == port)
  4163. continue;
  4164. ret = true;
  4165. goto unlock;
  4166. }
  4167. break;
  4168. #if IS_ENABLED(CONFIG_IPV6)
  4169. case ETH_P_IPV6:
  4170. hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
  4171. ip6_rlist) {
  4172. if (pmctx->port == port)
  4173. continue;
  4174. ret = true;
  4175. goto unlock;
  4176. }
  4177. break;
  4178. #endif
  4179. default:
  4180. /* when compiled without IPv6 support, be conservative and
  4181. * always assume presence of an IPv6 multicast router
  4182. */
  4183. ret = true;
  4184. }
  4185. unlock:
  4186. rcu_read_unlock();
  4187. return ret;
  4188. }
  4189. EXPORT_SYMBOL_GPL(br_multicast_has_router_adjacent);
  4190. static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
  4191. const struct sk_buff *skb, u8 type, u8 dir)
  4192. {
  4193. struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
  4194. __be16 proto = skb->protocol;
  4195. unsigned int t_len;
  4196. u64_stats_update_begin(&pstats->syncp);
  4197. switch (proto) {
  4198. case htons(ETH_P_IP):
  4199. t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
  4200. switch (type) {
  4201. case IGMP_HOST_MEMBERSHIP_REPORT:
  4202. pstats->mstats.igmp_v1reports[dir]++;
  4203. break;
  4204. case IGMPV2_HOST_MEMBERSHIP_REPORT:
  4205. pstats->mstats.igmp_v2reports[dir]++;
  4206. break;
  4207. case IGMPV3_HOST_MEMBERSHIP_REPORT:
  4208. pstats->mstats.igmp_v3reports[dir]++;
  4209. break;
  4210. case IGMP_HOST_MEMBERSHIP_QUERY:
  4211. if (t_len != sizeof(struct igmphdr)) {
  4212. pstats->mstats.igmp_v3queries[dir]++;
  4213. } else {
  4214. unsigned int offset = skb_transport_offset(skb);
  4215. struct igmphdr *ih, _ihdr;
  4216. ih = skb_header_pointer(skb, offset,
  4217. sizeof(_ihdr), &_ihdr);
  4218. if (!ih)
  4219. break;
  4220. if (!ih->code)
  4221. pstats->mstats.igmp_v1queries[dir]++;
  4222. else
  4223. pstats->mstats.igmp_v2queries[dir]++;
  4224. }
  4225. break;
  4226. case IGMP_HOST_LEAVE_MESSAGE:
  4227. pstats->mstats.igmp_leaves[dir]++;
  4228. break;
  4229. }
  4230. break;
  4231. #if IS_ENABLED(CONFIG_IPV6)
  4232. case htons(ETH_P_IPV6):
  4233. t_len = ntohs(ipv6_hdr(skb)->payload_len) +
  4234. sizeof(struct ipv6hdr);
  4235. t_len -= skb_network_header_len(skb);
  4236. switch (type) {
  4237. case ICMPV6_MGM_REPORT:
  4238. pstats->mstats.mld_v1reports[dir]++;
  4239. break;
  4240. case ICMPV6_MLD2_REPORT:
  4241. pstats->mstats.mld_v2reports[dir]++;
  4242. break;
  4243. case ICMPV6_MGM_QUERY:
  4244. if (t_len != sizeof(struct mld_msg))
  4245. pstats->mstats.mld_v2queries[dir]++;
  4246. else
  4247. pstats->mstats.mld_v1queries[dir]++;
  4248. break;
  4249. case ICMPV6_MGM_REDUCTION:
  4250. pstats->mstats.mld_leaves[dir]++;
  4251. break;
  4252. }
  4253. break;
  4254. #endif /* CONFIG_IPV6 */
  4255. }
  4256. u64_stats_update_end(&pstats->syncp);
  4257. }
  4258. void br_multicast_count(struct net_bridge *br,
  4259. const struct net_bridge_port *p,
  4260. const struct sk_buff *skb, u8 type, u8 dir)
  4261. {
  4262. struct bridge_mcast_stats __percpu *stats;
  4263. /* if multicast_disabled is true then igmp type can't be set */
  4264. if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
  4265. return;
  4266. if (p)
  4267. stats = p->mcast_stats;
  4268. else
  4269. stats = br->mcast_stats;
  4270. if (WARN_ON(!stats))
  4271. return;
  4272. br_mcast_stats_add(stats, skb, type, dir);
  4273. }
  4274. int br_multicast_init_stats(struct net_bridge *br)
  4275. {
  4276. br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
  4277. if (!br->mcast_stats)
  4278. return -ENOMEM;
  4279. return 0;
  4280. }
  4281. void br_multicast_uninit_stats(struct net_bridge *br)
  4282. {
  4283. free_percpu(br->mcast_stats);
  4284. }
  4285. /* noinline for https://llvm.org/pr45802#c9 */
  4286. static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src)
  4287. {
  4288. dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
  4289. dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
  4290. }
  4291. void br_multicast_get_stats(const struct net_bridge *br,
  4292. const struct net_bridge_port *p,
  4293. struct br_mcast_stats *dest)
  4294. {
  4295. struct bridge_mcast_stats __percpu *stats;
  4296. struct br_mcast_stats tdst;
  4297. int i;
  4298. memset(dest, 0, sizeof(*dest));
  4299. if (p)
  4300. stats = p->mcast_stats;
  4301. else
  4302. stats = br->mcast_stats;
  4303. if (WARN_ON(!stats))
  4304. return;
  4305. memset(&tdst, 0, sizeof(tdst));
  4306. for_each_possible_cpu(i) {
  4307. struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
  4308. struct br_mcast_stats temp;
  4309. unsigned int start;
  4310. do {
  4311. start = u64_stats_fetch_begin(&cpu_stats->syncp);
  4312. memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
  4313. } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
  4314. mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
  4315. mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
  4316. mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
  4317. mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
  4318. mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
  4319. mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
  4320. mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
  4321. tdst.igmp_parse_errors += temp.igmp_parse_errors;
  4322. mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
  4323. mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
  4324. mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
  4325. mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
  4326. mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
  4327. tdst.mld_parse_errors += temp.mld_parse_errors;
  4328. }
  4329. memcpy(dest, &tdst, sizeof(*dest));
  4330. }
  4331. int br_mdb_hash_init(struct net_bridge *br)
  4332. {
  4333. int err;
  4334. err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params);
  4335. if (err)
  4336. return err;
  4337. err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
  4338. if (err) {
  4339. rhashtable_destroy(&br->sg_port_tbl);
  4340. return err;
  4341. }
  4342. return 0;
  4343. }
  4344. void br_mdb_hash_fini(struct net_bridge *br)
  4345. {
  4346. rhashtable_destroy(&br->sg_port_tbl);
  4347. rhashtable_destroy(&br->mdb_hash_tbl);
  4348. }