br_multicast.c 138 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Bridge multicast support.
  4. *
  5. * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
  6. */
  7. #include <linux/err.h>
  8. #include <linux/export.h>
  9. #include <linux/if_ether.h>
  10. #include <linux/igmp.h>
  11. #include <linux/in.h>
  12. #include <linux/jhash.h>
  13. #include <linux/kernel.h>
  14. #include <linux/log2.h>
  15. #include <linux/netdevice.h>
  16. #include <linux/netfilter_bridge.h>
  17. #include <linux/random.h>
  18. #include <linux/rculist.h>
  19. #include <linux/skbuff.h>
  20. #include <linux/slab.h>
  21. #include <linux/timer.h>
  22. #include <linux/inetdevice.h>
  23. #include <linux/mroute.h>
  24. #include <net/ip.h>
  25. #include <net/switchdev.h>
  26. #if IS_ENABLED(CONFIG_IPV6)
  27. #include <linux/icmpv6.h>
  28. #include <net/ipv6.h>
  29. #include <net/mld.h>
  30. #include <net/ip6_checksum.h>
  31. #include <net/addrconf.h>
  32. #endif
  33. #include <trace/events/bridge.h>
  34. #include "br_private.h"
  35. #include "br_private_mcast_eht.h"
  36. static const struct rhashtable_params br_mdb_rht_params = {
  37. .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
  38. .key_offset = offsetof(struct net_bridge_mdb_entry, addr),
  39. .key_len = sizeof(struct br_ip),
  40. .automatic_shrinking = true,
  41. };
  42. static const struct rhashtable_params br_sg_port_rht_params = {
  43. .head_offset = offsetof(struct net_bridge_port_group, rhnode),
  44. .key_offset = offsetof(struct net_bridge_port_group, key),
  45. .key_len = sizeof(struct net_bridge_port_group_sg_key),
  46. .automatic_shrinking = true,
  47. };
  48. static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
  49. struct bridge_mcast_own_query *query);
  50. static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
  51. struct net_bridge_mcast_port *pmctx);
  52. static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
  53. struct net_bridge_mcast_port *pmctx,
  54. __be32 group,
  55. __u16 vid,
  56. const unsigned char *src);
  57. static void br_multicast_port_group_rexmit(struct timer_list *t);
  58. static void
  59. br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted);
  60. static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
  61. struct net_bridge_mcast_port *pmctx);
  62. #if IS_ENABLED(CONFIG_IPV6)
  63. static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
  64. struct net_bridge_mcast_port *pmctx,
  65. const struct in6_addr *group,
  66. __u16 vid, const unsigned char *src);
  67. #endif
  68. static struct net_bridge_port_group *
  69. __br_multicast_add_group(struct net_bridge_mcast *brmctx,
  70. struct net_bridge_mcast_port *pmctx,
  71. struct br_ip *group,
  72. const unsigned char *src,
  73. u8 filter_mode,
  74. bool igmpv2_mldv1,
  75. bool blocked);
  76. static void br_multicast_find_del_pg(struct net_bridge *br,
  77. struct net_bridge_port_group *pg);
  78. static void __br_multicast_stop(struct net_bridge_mcast *brmctx);
  79. static int br_mc_disabled_update(struct net_device *dev, bool value,
  80. struct netlink_ext_ack *extack);
  81. static struct net_bridge_port_group *
  82. br_sg_port_find(struct net_bridge *br,
  83. struct net_bridge_port_group_sg_key *sg_p)
  84. {
  85. lockdep_assert_held_once(&br->multicast_lock);
  86. return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p,
  87. br_sg_port_rht_params);
  88. }
  89. static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
  90. struct br_ip *dst)
  91. {
  92. return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
  93. }
  94. struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
  95. struct br_ip *dst)
  96. {
  97. struct net_bridge_mdb_entry *ent;
  98. lockdep_assert_held_once(&br->multicast_lock);
  99. rcu_read_lock();
  100. ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
  101. rcu_read_unlock();
  102. return ent;
  103. }
  104. static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
  105. __be32 dst, __u16 vid)
  106. {
  107. struct br_ip br_dst;
  108. memset(&br_dst, 0, sizeof(br_dst));
  109. br_dst.dst.ip4 = dst;
  110. br_dst.proto = htons(ETH_P_IP);
  111. br_dst.vid = vid;
  112. return br_mdb_ip_get(br, &br_dst);
  113. }
  114. #if IS_ENABLED(CONFIG_IPV6)
  115. static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
  116. const struct in6_addr *dst,
  117. __u16 vid)
  118. {
  119. struct br_ip br_dst;
  120. memset(&br_dst, 0, sizeof(br_dst));
  121. br_dst.dst.ip6 = *dst;
  122. br_dst.proto = htons(ETH_P_IPV6);
  123. br_dst.vid = vid;
  124. return br_mdb_ip_get(br, &br_dst);
  125. }
  126. #endif
  127. struct net_bridge_mdb_entry *
  128. br_mdb_entry_skb_get(struct net_bridge_mcast *brmctx, struct sk_buff *skb,
  129. u16 vid)
  130. {
  131. struct net_bridge *br = brmctx->br;
  132. struct br_ip ip;
  133. if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
  134. br_multicast_ctx_vlan_global_disabled(brmctx))
  135. return NULL;
  136. if (BR_INPUT_SKB_CB(skb)->igmp)
  137. return NULL;
  138. memset(&ip, 0, sizeof(ip));
  139. ip.proto = skb->protocol;
  140. ip.vid = vid;
  141. switch (skb->protocol) {
  142. case htons(ETH_P_IP):
  143. ip.dst.ip4 = ip_hdr(skb)->daddr;
  144. if (brmctx->multicast_igmp_version == 3) {
  145. struct net_bridge_mdb_entry *mdb;
  146. ip.src.ip4 = ip_hdr(skb)->saddr;
  147. mdb = br_mdb_ip_get_rcu(br, &ip);
  148. if (mdb)
  149. return mdb;
  150. ip.src.ip4 = 0;
  151. }
  152. break;
  153. #if IS_ENABLED(CONFIG_IPV6)
  154. case htons(ETH_P_IPV6):
  155. ip.dst.ip6 = ipv6_hdr(skb)->daddr;
  156. if (brmctx->multicast_mld_version == 2) {
  157. struct net_bridge_mdb_entry *mdb;
  158. ip.src.ip6 = ipv6_hdr(skb)->saddr;
  159. mdb = br_mdb_ip_get_rcu(br, &ip);
  160. if (mdb)
  161. return mdb;
  162. memset(&ip.src.ip6, 0, sizeof(ip.src.ip6));
  163. }
  164. break;
  165. #endif
  166. default:
  167. ip.proto = 0;
  168. ether_addr_copy(ip.dst.mac_addr, eth_hdr(skb)->h_dest);
  169. }
  170. return br_mdb_ip_get_rcu(br, &ip);
  171. }
  172. /* IMPORTANT: this function must be used only when the contexts cannot be
  173. * passed down (e.g. timer) and must be used for read-only purposes because
  174. * the vlan snooping option can change, so it can return any context
  175. * (non-vlan or vlan). Its initial intended purpose is to read timer values
  176. * from the *current* context based on the option. At worst that could lead
  177. * to inconsistent timers when the contexts are changed, i.e. src timer
  178. * which needs to re-arm with a specific delay taken from the old context
  179. */
  180. static struct net_bridge_mcast_port *
  181. br_multicast_pg_to_port_ctx(const struct net_bridge_port_group *pg)
  182. {
  183. struct net_bridge_mcast_port *pmctx = &pg->key.port->multicast_ctx;
  184. struct net_bridge_vlan *vlan;
  185. lockdep_assert_held_once(&pg->key.port->br->multicast_lock);
  186. /* if vlan snooping is disabled use the port's multicast context */
  187. if (!pg->key.addr.vid ||
  188. !br_opt_get(pg->key.port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED))
  189. goto out;
  190. /* locking is tricky here, due to different rules for multicast and
  191. * vlans we need to take rcu to find the vlan and make sure it has
  192. * the BR_VLFLAG_MCAST_ENABLED flag set, it can only change under
  193. * multicast_lock which must be already held here, so the vlan's pmctx
  194. * can safely be used on return
  195. */
  196. rcu_read_lock();
  197. vlan = br_vlan_find(nbp_vlan_group_rcu(pg->key.port), pg->key.addr.vid);
  198. if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx))
  199. pmctx = &vlan->port_mcast_ctx;
  200. else
  201. pmctx = NULL;
  202. rcu_read_unlock();
  203. out:
  204. return pmctx;
  205. }
  206. static struct net_bridge_mcast_port *
  207. br_multicast_port_vid_to_port_ctx(struct net_bridge_port *port, u16 vid)
  208. {
  209. struct net_bridge_mcast_port *pmctx = NULL;
  210. struct net_bridge_vlan *vlan;
  211. lockdep_assert_held_once(&port->br->multicast_lock);
  212. if (!br_opt_get(port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED))
  213. return NULL;
  214. /* Take RCU to access the vlan. */
  215. rcu_read_lock();
  216. vlan = br_vlan_find(nbp_vlan_group_rcu(port), vid);
  217. if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx))
  218. pmctx = &vlan->port_mcast_ctx;
  219. rcu_read_unlock();
  220. return pmctx;
  221. }
  222. /* when snooping we need to check if the contexts should be used
  223. * in the following order:
  224. * - if pmctx is non-NULL (port), check if it should be used
  225. * - if pmctx is NULL (bridge), check if brmctx should be used
  226. */
  227. static bool
  228. br_multicast_ctx_should_use(const struct net_bridge_mcast *brmctx,
  229. const struct net_bridge_mcast_port *pmctx)
  230. {
  231. if (!netif_running(brmctx->br->dev))
  232. return false;
  233. if (pmctx)
  234. return !br_multicast_port_ctx_state_disabled(pmctx);
  235. else
  236. return !br_multicast_ctx_vlan_disabled(brmctx);
  237. }
  238. static bool br_port_group_equal(struct net_bridge_port_group *p,
  239. struct net_bridge_port *port,
  240. const unsigned char *src)
  241. {
  242. if (p->key.port != port)
  243. return false;
  244. if (!(port->flags & BR_MULTICAST_TO_UNICAST))
  245. return true;
  246. return ether_addr_equal(src, p->eth_addr);
  247. }
  248. static void __fwd_add_star_excl(struct net_bridge_mcast_port *pmctx,
  249. struct net_bridge_port_group *pg,
  250. struct br_ip *sg_ip)
  251. {
  252. struct net_bridge_port_group_sg_key sg_key;
  253. struct net_bridge_port_group *src_pg;
  254. struct net_bridge_mcast *brmctx;
  255. memset(&sg_key, 0, sizeof(sg_key));
  256. brmctx = br_multicast_port_ctx_get_global(pmctx);
  257. sg_key.port = pg->key.port;
  258. sg_key.addr = *sg_ip;
  259. if (br_sg_port_find(brmctx->br, &sg_key))
  260. return;
  261. src_pg = __br_multicast_add_group(brmctx, pmctx,
  262. sg_ip, pg->eth_addr,
  263. MCAST_INCLUDE, false, false);
  264. if (IS_ERR_OR_NULL(src_pg) ||
  265. src_pg->rt_protocol != RTPROT_KERNEL)
  266. return;
  267. src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
  268. }
  269. static void __fwd_del_star_excl(struct net_bridge_port_group *pg,
  270. struct br_ip *sg_ip)
  271. {
  272. struct net_bridge_port_group_sg_key sg_key;
  273. struct net_bridge *br = pg->key.port->br;
  274. struct net_bridge_port_group *src_pg;
  275. memset(&sg_key, 0, sizeof(sg_key));
  276. sg_key.port = pg->key.port;
  277. sg_key.addr = *sg_ip;
  278. src_pg = br_sg_port_find(br, &sg_key);
  279. if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) ||
  280. src_pg->rt_protocol != RTPROT_KERNEL)
  281. return;
  282. br_multicast_find_del_pg(br, src_pg);
  283. }
  284. /* When a port group transitions to (or is added as) EXCLUDE we need to add it
  285. * to all other ports' S,G entries which are not blocked by the current group
  286. * for proper replication, the assumption is that any S,G blocked entries
  287. * are already added so the S,G,port lookup should skip them.
  288. * When a port group transitions from EXCLUDE -> INCLUDE mode or is being
  289. * deleted we need to remove it from all ports' S,G entries where it was
  290. * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL).
  291. */
  292. void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg,
  293. u8 filter_mode)
  294. {
  295. struct net_bridge *br = pg->key.port->br;
  296. struct net_bridge_port_group *pg_lst;
  297. struct net_bridge_mcast_port *pmctx;
  298. struct net_bridge_mdb_entry *mp;
  299. struct br_ip sg_ip;
  300. if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr)))
  301. return;
  302. mp = br_mdb_ip_get(br, &pg->key.addr);
  303. if (!mp)
  304. return;
  305. pmctx = br_multicast_pg_to_port_ctx(pg);
  306. if (!pmctx)
  307. return;
  308. memset(&sg_ip, 0, sizeof(sg_ip));
  309. sg_ip = pg->key.addr;
  310. for (pg_lst = mlock_dereference(mp->ports, br);
  311. pg_lst;
  312. pg_lst = mlock_dereference(pg_lst->next, br)) {
  313. struct net_bridge_group_src *src_ent;
  314. if (pg_lst == pg)
  315. continue;
  316. hlist_for_each_entry(src_ent, &pg_lst->src_list, node) {
  317. if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
  318. continue;
  319. sg_ip.src = src_ent->addr.src;
  320. switch (filter_mode) {
  321. case MCAST_INCLUDE:
  322. __fwd_del_star_excl(pg, &sg_ip);
  323. break;
  324. case MCAST_EXCLUDE:
  325. __fwd_add_star_excl(pmctx, pg, &sg_ip);
  326. break;
  327. }
  328. }
  329. }
  330. }
  331. /* called when adding a new S,G with host_joined == false by default */
  332. static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp,
  333. struct net_bridge_port_group *sg)
  334. {
  335. struct net_bridge_mdb_entry *sg_mp;
  336. if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
  337. return;
  338. if (!star_mp->host_joined)
  339. return;
  340. sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr);
  341. if (!sg_mp)
  342. return;
  343. sg_mp->host_joined = true;
  344. }
  345. /* set the host_joined state of all of *,G's S,G entries */
  346. static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp)
  347. {
  348. struct net_bridge *br = star_mp->br;
  349. struct net_bridge_mdb_entry *sg_mp;
  350. struct net_bridge_port_group *pg;
  351. struct br_ip sg_ip;
  352. if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
  353. return;
  354. memset(&sg_ip, 0, sizeof(sg_ip));
  355. sg_ip = star_mp->addr;
  356. for (pg = mlock_dereference(star_mp->ports, br);
  357. pg;
  358. pg = mlock_dereference(pg->next, br)) {
  359. struct net_bridge_group_src *src_ent;
  360. hlist_for_each_entry(src_ent, &pg->src_list, node) {
  361. if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
  362. continue;
  363. sg_ip.src = src_ent->addr.src;
  364. sg_mp = br_mdb_ip_get(br, &sg_ip);
  365. if (!sg_mp)
  366. continue;
  367. sg_mp->host_joined = star_mp->host_joined;
  368. }
  369. }
  370. }
  371. static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp)
  372. {
  373. struct net_bridge_port_group __rcu **pp;
  374. struct net_bridge_port_group *p;
  375. /* *,G exclude ports are only added to S,G entries */
  376. if (WARN_ON(br_multicast_is_star_g(&sgmp->addr)))
  377. return;
  378. /* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports
  379. * we should ignore perm entries since they're managed by user-space
  380. */
  381. for (pp = &sgmp->ports;
  382. (p = mlock_dereference(*pp, sgmp->br)) != NULL;
  383. pp = &p->next)
  384. if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL |
  385. MDB_PG_FLAGS_PERMANENT)))
  386. return;
  387. /* currently the host can only have joined the *,G which means
  388. * we treat it as EXCLUDE {}, so for an S,G it's considered a
  389. * STAR_EXCLUDE entry and we can safely leave it
  390. */
  391. sgmp->host_joined = false;
  392. for (pp = &sgmp->ports;
  393. (p = mlock_dereference(*pp, sgmp->br)) != NULL;) {
  394. if (!(p->flags & MDB_PG_FLAGS_PERMANENT))
  395. br_multicast_del_pg(sgmp, p, pp);
  396. else
  397. pp = &p->next;
  398. }
  399. }
  400. void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp,
  401. struct net_bridge_port_group *sg)
  402. {
  403. struct net_bridge_port_group_sg_key sg_key;
  404. struct net_bridge *br = star_mp->br;
  405. struct net_bridge_mcast_port *pmctx;
  406. struct net_bridge_port_group *pg;
  407. struct net_bridge_mcast *brmctx;
  408. if (WARN_ON(br_multicast_is_star_g(&sg->key.addr)))
  409. return;
  410. if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
  411. return;
  412. br_multicast_sg_host_state(star_mp, sg);
  413. memset(&sg_key, 0, sizeof(sg_key));
  414. sg_key.addr = sg->key.addr;
  415. /* we need to add all exclude ports to the S,G */
  416. for (pg = mlock_dereference(star_mp->ports, br);
  417. pg;
  418. pg = mlock_dereference(pg->next, br)) {
  419. struct net_bridge_port_group *src_pg;
  420. if (pg == sg || pg->filter_mode == MCAST_INCLUDE)
  421. continue;
  422. sg_key.port = pg->key.port;
  423. if (br_sg_port_find(br, &sg_key))
  424. continue;
  425. pmctx = br_multicast_pg_to_port_ctx(pg);
  426. if (!pmctx)
  427. continue;
  428. brmctx = br_multicast_port_ctx_get_global(pmctx);
  429. src_pg = __br_multicast_add_group(brmctx, pmctx,
  430. &sg->key.addr,
  431. sg->eth_addr,
  432. MCAST_INCLUDE, false, false);
  433. if (IS_ERR_OR_NULL(src_pg) ||
  434. src_pg->rt_protocol != RTPROT_KERNEL)
  435. continue;
  436. src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
  437. }
  438. }
  439. static void br_multicast_fwd_src_add(struct net_bridge_group_src *src)
  440. {
  441. struct net_bridge_mdb_entry *star_mp;
  442. struct net_bridge_mcast_port *pmctx;
  443. struct net_bridge_port_group *sg;
  444. struct net_bridge_mcast *brmctx;
  445. struct br_ip sg_ip;
  446. if (src->flags & BR_SGRP_F_INSTALLED)
  447. return;
  448. memset(&sg_ip, 0, sizeof(sg_ip));
  449. pmctx = br_multicast_pg_to_port_ctx(src->pg);
  450. if (!pmctx)
  451. return;
  452. brmctx = br_multicast_port_ctx_get_global(pmctx);
  453. sg_ip = src->pg->key.addr;
  454. sg_ip.src = src->addr.src;
  455. sg = __br_multicast_add_group(brmctx, pmctx, &sg_ip,
  456. src->pg->eth_addr, MCAST_INCLUDE, false,
  457. !timer_pending(&src->timer));
  458. if (IS_ERR_OR_NULL(sg))
  459. return;
  460. src->flags |= BR_SGRP_F_INSTALLED;
  461. sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL;
  462. /* if it was added by user-space as perm we can skip next steps */
  463. if (sg->rt_protocol != RTPROT_KERNEL &&
  464. (sg->flags & MDB_PG_FLAGS_PERMANENT))
  465. return;
  466. /* the kernel is now responsible for removing this S,G */
  467. del_timer(&sg->timer);
  468. star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr);
  469. if (!star_mp)
  470. return;
  471. br_multicast_sg_add_exclude_ports(star_mp, sg);
  472. }
  473. static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src,
  474. bool fastleave)
  475. {
  476. struct net_bridge_port_group *p, *pg = src->pg;
  477. struct net_bridge_port_group __rcu **pp;
  478. struct net_bridge_mdb_entry *mp;
  479. struct br_ip sg_ip;
  480. memset(&sg_ip, 0, sizeof(sg_ip));
  481. sg_ip = pg->key.addr;
  482. sg_ip.src = src->addr.src;
  483. mp = br_mdb_ip_get(src->br, &sg_ip);
  484. if (!mp)
  485. return;
  486. for (pp = &mp->ports;
  487. (p = mlock_dereference(*pp, src->br)) != NULL;
  488. pp = &p->next) {
  489. if (!br_port_group_equal(p, pg->key.port, pg->eth_addr))
  490. continue;
  491. if (p->rt_protocol != RTPROT_KERNEL &&
  492. (p->flags & MDB_PG_FLAGS_PERMANENT) &&
  493. !(src->flags & BR_SGRP_F_USER_ADDED))
  494. break;
  495. if (fastleave)
  496. p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
  497. br_multicast_del_pg(mp, p, pp);
  498. break;
  499. }
  500. src->flags &= ~BR_SGRP_F_INSTALLED;
  501. }
  502. /* install S,G and based on src's timer enable or disable forwarding */
  503. static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src)
  504. {
  505. struct net_bridge_port_group_sg_key sg_key;
  506. struct net_bridge_port_group *sg;
  507. u8 old_flags;
  508. br_multicast_fwd_src_add(src);
  509. memset(&sg_key, 0, sizeof(sg_key));
  510. sg_key.addr = src->pg->key.addr;
  511. sg_key.addr.src = src->addr.src;
  512. sg_key.port = src->pg->key.port;
  513. sg = br_sg_port_find(src->br, &sg_key);
  514. if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT))
  515. return;
  516. old_flags = sg->flags;
  517. if (timer_pending(&src->timer))
  518. sg->flags &= ~MDB_PG_FLAGS_BLOCKED;
  519. else
  520. sg->flags |= MDB_PG_FLAGS_BLOCKED;
  521. if (old_flags != sg->flags) {
  522. struct net_bridge_mdb_entry *sg_mp;
  523. sg_mp = br_mdb_ip_get(src->br, &sg_key.addr);
  524. if (!sg_mp)
  525. return;
  526. br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB);
  527. }
  528. }
  529. static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc)
  530. {
  531. struct net_bridge_mdb_entry *mp;
  532. mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc);
  533. WARN_ON(!hlist_unhashed(&mp->mdb_node));
  534. WARN_ON(mp->ports);
  535. timer_shutdown_sync(&mp->timer);
  536. kfree_rcu(mp, rcu);
  537. }
  538. static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp)
  539. {
  540. struct net_bridge *br = mp->br;
  541. rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
  542. br_mdb_rht_params);
  543. hlist_del_init_rcu(&mp->mdb_node);
  544. hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list);
  545. queue_work(system_long_wq, &br->mcast_gc_work);
  546. }
  547. static void br_multicast_group_expired(struct timer_list *t)
  548. {
  549. struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
  550. struct net_bridge *br = mp->br;
  551. spin_lock(&br->multicast_lock);
  552. if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) ||
  553. timer_pending(&mp->timer))
  554. goto out;
  555. br_multicast_host_leave(mp, true);
  556. if (mp->ports)
  557. goto out;
  558. br_multicast_del_mdb_entry(mp);
  559. out:
  560. spin_unlock(&br->multicast_lock);
  561. }
  562. static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc)
  563. {
  564. struct net_bridge_group_src *src;
  565. src = container_of(gc, struct net_bridge_group_src, mcast_gc);
  566. WARN_ON(!hlist_unhashed(&src->node));
  567. timer_shutdown_sync(&src->timer);
  568. kfree_rcu(src, rcu);
  569. }
  570. void __br_multicast_del_group_src(struct net_bridge_group_src *src)
  571. {
  572. struct net_bridge *br = src->pg->key.port->br;
  573. hlist_del_init_rcu(&src->node);
  574. src->pg->src_ents--;
  575. hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list);
  576. queue_work(system_long_wq, &br->mcast_gc_work);
  577. }
  578. void br_multicast_del_group_src(struct net_bridge_group_src *src,
  579. bool fastleave)
  580. {
  581. br_multicast_fwd_src_remove(src, fastleave);
  582. __br_multicast_del_group_src(src);
  583. }
  584. static int
  585. br_multicast_port_ngroups_inc_one(struct net_bridge_mcast_port *pmctx,
  586. struct netlink_ext_ack *extack,
  587. const char *what)
  588. {
  589. u32 max = READ_ONCE(pmctx->mdb_max_entries);
  590. u32 n = READ_ONCE(pmctx->mdb_n_entries);
  591. if (max && n >= max) {
  592. NL_SET_ERR_MSG_FMT_MOD(extack, "%s is already in %u groups, and mcast_max_groups=%u",
  593. what, n, max);
  594. return -E2BIG;
  595. }
  596. WRITE_ONCE(pmctx->mdb_n_entries, n + 1);
  597. return 0;
  598. }
  599. static void br_multicast_port_ngroups_dec_one(struct net_bridge_mcast_port *pmctx)
  600. {
  601. u32 n = READ_ONCE(pmctx->mdb_n_entries);
  602. WARN_ON_ONCE(n == 0);
  603. WRITE_ONCE(pmctx->mdb_n_entries, n - 1);
  604. }
  605. static int br_multicast_port_ngroups_inc(struct net_bridge_port *port,
  606. const struct br_ip *group,
  607. struct netlink_ext_ack *extack)
  608. {
  609. struct net_bridge_mcast_port *pmctx;
  610. int err;
  611. lockdep_assert_held_once(&port->br->multicast_lock);
  612. /* Always count on the port context. */
  613. err = br_multicast_port_ngroups_inc_one(&port->multicast_ctx, extack,
  614. "Port");
  615. if (err) {
  616. trace_br_mdb_full(port->dev, group);
  617. return err;
  618. }
  619. /* Only count on the VLAN context if VID is given, and if snooping on
  620. * that VLAN is enabled.
  621. */
  622. if (!group->vid)
  623. return 0;
  624. pmctx = br_multicast_port_vid_to_port_ctx(port, group->vid);
  625. if (!pmctx)
  626. return 0;
  627. err = br_multicast_port_ngroups_inc_one(pmctx, extack, "Port-VLAN");
  628. if (err) {
  629. trace_br_mdb_full(port->dev, group);
  630. goto dec_one_out;
  631. }
  632. return 0;
  633. dec_one_out:
  634. br_multicast_port_ngroups_dec_one(&port->multicast_ctx);
  635. return err;
  636. }
  637. static void br_multicast_port_ngroups_dec(struct net_bridge_port *port, u16 vid)
  638. {
  639. struct net_bridge_mcast_port *pmctx;
  640. lockdep_assert_held_once(&port->br->multicast_lock);
  641. if (vid) {
  642. pmctx = br_multicast_port_vid_to_port_ctx(port, vid);
  643. if (pmctx)
  644. br_multicast_port_ngroups_dec_one(pmctx);
  645. }
  646. br_multicast_port_ngroups_dec_one(&port->multicast_ctx);
  647. }
  648. u32 br_multicast_ngroups_get(const struct net_bridge_mcast_port *pmctx)
  649. {
  650. return READ_ONCE(pmctx->mdb_n_entries);
  651. }
  652. void br_multicast_ngroups_set_max(struct net_bridge_mcast_port *pmctx, u32 max)
  653. {
  654. WRITE_ONCE(pmctx->mdb_max_entries, max);
  655. }
  656. u32 br_multicast_ngroups_get_max(const struct net_bridge_mcast_port *pmctx)
  657. {
  658. return READ_ONCE(pmctx->mdb_max_entries);
  659. }
  660. static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc)
  661. {
  662. struct net_bridge_port_group *pg;
  663. pg = container_of(gc, struct net_bridge_port_group, mcast_gc);
  664. WARN_ON(!hlist_unhashed(&pg->mglist));
  665. WARN_ON(!hlist_empty(&pg->src_list));
  666. timer_shutdown_sync(&pg->rexmit_timer);
  667. timer_shutdown_sync(&pg->timer);
  668. kfree_rcu(pg, rcu);
  669. }
  670. void br_multicast_del_pg(struct net_bridge_mdb_entry *mp,
  671. struct net_bridge_port_group *pg,
  672. struct net_bridge_port_group __rcu **pp)
  673. {
  674. struct net_bridge *br = pg->key.port->br;
  675. struct net_bridge_group_src *ent;
  676. struct hlist_node *tmp;
  677. rcu_assign_pointer(*pp, pg->next);
  678. hlist_del_init(&pg->mglist);
  679. br_multicast_eht_clean_sets(pg);
  680. hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
  681. br_multicast_del_group_src(ent, false);
  682. br_mdb_notify(br->dev, mp, pg, RTM_DELMDB);
  683. if (!br_multicast_is_star_g(&mp->addr)) {
  684. rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode,
  685. br_sg_port_rht_params);
  686. br_multicast_sg_del_exclude_ports(mp);
  687. } else {
  688. br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
  689. }
  690. br_multicast_port_ngroups_dec(pg->key.port, pg->key.addr.vid);
  691. hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list);
  692. queue_work(system_long_wq, &br->mcast_gc_work);
  693. if (!mp->ports && !mp->host_joined && netif_running(br->dev))
  694. mod_timer(&mp->timer, jiffies);
  695. }
  696. static void br_multicast_find_del_pg(struct net_bridge *br,
  697. struct net_bridge_port_group *pg)
  698. {
  699. struct net_bridge_port_group __rcu **pp;
  700. struct net_bridge_mdb_entry *mp;
  701. struct net_bridge_port_group *p;
  702. mp = br_mdb_ip_get(br, &pg->key.addr);
  703. if (WARN_ON(!mp))
  704. return;
  705. for (pp = &mp->ports;
  706. (p = mlock_dereference(*pp, br)) != NULL;
  707. pp = &p->next) {
  708. if (p != pg)
  709. continue;
  710. br_multicast_del_pg(mp, pg, pp);
  711. return;
  712. }
  713. WARN_ON(1);
  714. }
  715. static void br_multicast_port_group_expired(struct timer_list *t)
  716. {
  717. struct net_bridge_port_group *pg = from_timer(pg, t, timer);
  718. struct net_bridge_group_src *src_ent;
  719. struct net_bridge *br = pg->key.port->br;
  720. struct hlist_node *tmp;
  721. bool changed;
  722. spin_lock(&br->multicast_lock);
  723. if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
  724. hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
  725. goto out;
  726. changed = !!(pg->filter_mode == MCAST_EXCLUDE);
  727. pg->filter_mode = MCAST_INCLUDE;
  728. hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
  729. if (!timer_pending(&src_ent->timer)) {
  730. br_multicast_del_group_src(src_ent, false);
  731. changed = true;
  732. }
  733. }
  734. if (hlist_empty(&pg->src_list)) {
  735. br_multicast_find_del_pg(br, pg);
  736. } else if (changed) {
  737. struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr);
  738. if (changed && br_multicast_is_star_g(&pg->key.addr))
  739. br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
  740. if (WARN_ON(!mp))
  741. goto out;
  742. br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB);
  743. }
  744. out:
  745. spin_unlock(&br->multicast_lock);
  746. }
  747. static void br_multicast_gc(struct hlist_head *head)
  748. {
  749. struct net_bridge_mcast_gc *gcent;
  750. struct hlist_node *tmp;
  751. hlist_for_each_entry_safe(gcent, tmp, head, gc_node) {
  752. hlist_del_init(&gcent->gc_node);
  753. gcent->destroy(gcent);
  754. }
  755. }
  756. static void __br_multicast_query_handle_vlan(struct net_bridge_mcast *brmctx,
  757. struct net_bridge_mcast_port *pmctx,
  758. struct sk_buff *skb)
  759. {
  760. struct net_bridge_vlan *vlan = NULL;
  761. if (pmctx && br_multicast_port_ctx_is_vlan(pmctx))
  762. vlan = pmctx->vlan;
  763. else if (br_multicast_ctx_is_vlan(brmctx))
  764. vlan = brmctx->vlan;
  765. if (vlan && !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
  766. u16 vlan_proto;
  767. if (br_vlan_get_proto(brmctx->br->dev, &vlan_proto) != 0)
  768. return;
  769. __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan->vid);
  770. }
  771. }
  772. static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge_mcast *brmctx,
  773. struct net_bridge_mcast_port *pmctx,
  774. struct net_bridge_port_group *pg,
  775. __be32 ip_dst, __be32 group,
  776. bool with_srcs, bool over_lmqt,
  777. u8 sflag, u8 *igmp_type,
  778. bool *need_rexmit)
  779. {
  780. struct net_bridge_port *p = pg ? pg->key.port : NULL;
  781. struct net_bridge_group_src *ent;
  782. size_t pkt_size, igmp_hdr_size;
  783. unsigned long now = jiffies;
  784. struct igmpv3_query *ihv3;
  785. void *csum_start = NULL;
  786. __sum16 *csum = NULL;
  787. struct sk_buff *skb;
  788. struct igmphdr *ih;
  789. struct ethhdr *eth;
  790. unsigned long lmqt;
  791. struct iphdr *iph;
  792. u16 lmqt_srcs = 0;
  793. igmp_hdr_size = sizeof(*ih);
  794. if (brmctx->multicast_igmp_version == 3) {
  795. igmp_hdr_size = sizeof(*ihv3);
  796. if (pg && with_srcs) {
  797. lmqt = now + (brmctx->multicast_last_member_interval *
  798. brmctx->multicast_last_member_count);
  799. hlist_for_each_entry(ent, &pg->src_list, node) {
  800. if (over_lmqt == time_after(ent->timer.expires,
  801. lmqt) &&
  802. ent->src_query_rexmit_cnt > 0)
  803. lmqt_srcs++;
  804. }
  805. if (!lmqt_srcs)
  806. return NULL;
  807. igmp_hdr_size += lmqt_srcs * sizeof(__be32);
  808. }
  809. }
  810. pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size;
  811. if ((p && pkt_size > p->dev->mtu) ||
  812. pkt_size > brmctx->br->dev->mtu)
  813. return NULL;
  814. skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
  815. if (!skb)
  816. goto out;
  817. __br_multicast_query_handle_vlan(brmctx, pmctx, skb);
  818. skb->protocol = htons(ETH_P_IP);
  819. skb_reset_mac_header(skb);
  820. eth = eth_hdr(skb);
  821. ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
  822. ip_eth_mc_map(ip_dst, eth->h_dest);
  823. eth->h_proto = htons(ETH_P_IP);
  824. skb_put(skb, sizeof(*eth));
  825. skb_set_network_header(skb, skb->len);
  826. iph = ip_hdr(skb);
  827. iph->tot_len = htons(pkt_size - sizeof(*eth));
  828. iph->version = 4;
  829. iph->ihl = 6;
  830. iph->tos = 0xc0;
  831. iph->id = 0;
  832. iph->frag_off = htons(IP_DF);
  833. iph->ttl = 1;
  834. iph->protocol = IPPROTO_IGMP;
  835. iph->saddr = br_opt_get(brmctx->br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
  836. inet_select_addr(brmctx->br->dev, 0, RT_SCOPE_LINK) : 0;
  837. iph->daddr = ip_dst;
  838. ((u8 *)&iph[1])[0] = IPOPT_RA;
  839. ((u8 *)&iph[1])[1] = 4;
  840. ((u8 *)&iph[1])[2] = 0;
  841. ((u8 *)&iph[1])[3] = 0;
  842. ip_send_check(iph);
  843. skb_put(skb, 24);
  844. skb_set_transport_header(skb, skb->len);
  845. *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
  846. switch (brmctx->multicast_igmp_version) {
  847. case 2:
  848. ih = igmp_hdr(skb);
  849. ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
  850. ih->code = (group ? brmctx->multicast_last_member_interval :
  851. brmctx->multicast_query_response_interval) /
  852. (HZ / IGMP_TIMER_SCALE);
  853. ih->group = group;
  854. ih->csum = 0;
  855. csum = &ih->csum;
  856. csum_start = (void *)ih;
  857. break;
  858. case 3:
  859. ihv3 = igmpv3_query_hdr(skb);
  860. ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
  861. ihv3->code = (group ? brmctx->multicast_last_member_interval :
  862. brmctx->multicast_query_response_interval) /
  863. (HZ / IGMP_TIMER_SCALE);
  864. ihv3->group = group;
  865. ihv3->qqic = brmctx->multicast_query_interval / HZ;
  866. ihv3->nsrcs = htons(lmqt_srcs);
  867. ihv3->resv = 0;
  868. ihv3->suppress = sflag;
  869. ihv3->qrv = 2;
  870. ihv3->csum = 0;
  871. csum = &ihv3->csum;
  872. csum_start = (void *)ihv3;
  873. if (!pg || !with_srcs)
  874. break;
  875. lmqt_srcs = 0;
  876. hlist_for_each_entry(ent, &pg->src_list, node) {
  877. if (over_lmqt == time_after(ent->timer.expires,
  878. lmqt) &&
  879. ent->src_query_rexmit_cnt > 0) {
  880. ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4;
  881. ent->src_query_rexmit_cnt--;
  882. if (need_rexmit && ent->src_query_rexmit_cnt)
  883. *need_rexmit = true;
  884. }
  885. }
  886. if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) {
  887. kfree_skb(skb);
  888. return NULL;
  889. }
  890. break;
  891. }
  892. if (WARN_ON(!csum || !csum_start)) {
  893. kfree_skb(skb);
  894. return NULL;
  895. }
  896. *csum = ip_compute_csum(csum_start, igmp_hdr_size);
  897. skb_put(skb, igmp_hdr_size);
  898. __skb_pull(skb, sizeof(*eth));
  899. out:
  900. return skb;
  901. }
  902. #if IS_ENABLED(CONFIG_IPV6)
  903. static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge_mcast *brmctx,
  904. struct net_bridge_mcast_port *pmctx,
  905. struct net_bridge_port_group *pg,
  906. const struct in6_addr *ip6_dst,
  907. const struct in6_addr *group,
  908. bool with_srcs, bool over_llqt,
  909. u8 sflag, u8 *igmp_type,
  910. bool *need_rexmit)
  911. {
  912. struct net_bridge_port *p = pg ? pg->key.port : NULL;
  913. struct net_bridge_group_src *ent;
  914. size_t pkt_size, mld_hdr_size;
  915. unsigned long now = jiffies;
  916. struct mld2_query *mld2q;
  917. void *csum_start = NULL;
  918. unsigned long interval;
  919. __sum16 *csum = NULL;
  920. struct ipv6hdr *ip6h;
  921. struct mld_msg *mldq;
  922. struct sk_buff *skb;
  923. unsigned long llqt;
  924. struct ethhdr *eth;
  925. u16 llqt_srcs = 0;
  926. u8 *hopopt;
  927. mld_hdr_size = sizeof(*mldq);
  928. if (brmctx->multicast_mld_version == 2) {
  929. mld_hdr_size = sizeof(*mld2q);
  930. if (pg && with_srcs) {
  931. llqt = now + (brmctx->multicast_last_member_interval *
  932. brmctx->multicast_last_member_count);
  933. hlist_for_each_entry(ent, &pg->src_list, node) {
  934. if (over_llqt == time_after(ent->timer.expires,
  935. llqt) &&
  936. ent->src_query_rexmit_cnt > 0)
  937. llqt_srcs++;
  938. }
  939. if (!llqt_srcs)
  940. return NULL;
  941. mld_hdr_size += llqt_srcs * sizeof(struct in6_addr);
  942. }
  943. }
  944. pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size;
  945. if ((p && pkt_size > p->dev->mtu) ||
  946. pkt_size > brmctx->br->dev->mtu)
  947. return NULL;
  948. skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
  949. if (!skb)
  950. goto out;
  951. __br_multicast_query_handle_vlan(brmctx, pmctx, skb);
  952. skb->protocol = htons(ETH_P_IPV6);
  953. /* Ethernet header */
  954. skb_reset_mac_header(skb);
  955. eth = eth_hdr(skb);
  956. ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
  957. eth->h_proto = htons(ETH_P_IPV6);
  958. skb_put(skb, sizeof(*eth));
  959. /* IPv6 header + HbH option */
  960. skb_set_network_header(skb, skb->len);
  961. ip6h = ipv6_hdr(skb);
  962. *(__force __be32 *)ip6h = htonl(0x60000000);
  963. ip6h->payload_len = htons(8 + mld_hdr_size);
  964. ip6h->nexthdr = IPPROTO_HOPOPTS;
  965. ip6h->hop_limit = 1;
  966. ip6h->daddr = *ip6_dst;
  967. if (ipv6_dev_get_saddr(dev_net(brmctx->br->dev), brmctx->br->dev,
  968. &ip6h->daddr, 0, &ip6h->saddr)) {
  969. kfree_skb(skb);
  970. br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, false);
  971. return NULL;
  972. }
  973. br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, true);
  974. ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
  975. hopopt = (u8 *)(ip6h + 1);
  976. hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
  977. hopopt[1] = 0; /* length of HbH */
  978. hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
  979. hopopt[3] = 2; /* Length of RA Option */
  980. hopopt[4] = 0; /* Type = 0x0000 (MLD) */
  981. hopopt[5] = 0;
  982. hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */
  983. hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */
  984. skb_put(skb, sizeof(*ip6h) + 8);
  985. /* ICMPv6 */
  986. skb_set_transport_header(skb, skb->len);
  987. interval = ipv6_addr_any(group) ?
  988. brmctx->multicast_query_response_interval :
  989. brmctx->multicast_last_member_interval;
  990. *igmp_type = ICMPV6_MGM_QUERY;
  991. switch (brmctx->multicast_mld_version) {
  992. case 1:
  993. mldq = (struct mld_msg *)icmp6_hdr(skb);
  994. mldq->mld_type = ICMPV6_MGM_QUERY;
  995. mldq->mld_code = 0;
  996. mldq->mld_cksum = 0;
  997. mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
  998. mldq->mld_reserved = 0;
  999. mldq->mld_mca = *group;
  1000. csum = &mldq->mld_cksum;
  1001. csum_start = (void *)mldq;
  1002. break;
  1003. case 2:
  1004. mld2q = (struct mld2_query *)icmp6_hdr(skb);
  1005. mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
  1006. mld2q->mld2q_type = ICMPV6_MGM_QUERY;
  1007. mld2q->mld2q_code = 0;
  1008. mld2q->mld2q_cksum = 0;
  1009. mld2q->mld2q_resv1 = 0;
  1010. mld2q->mld2q_resv2 = 0;
  1011. mld2q->mld2q_suppress = sflag;
  1012. mld2q->mld2q_qrv = 2;
  1013. mld2q->mld2q_nsrcs = htons(llqt_srcs);
  1014. mld2q->mld2q_qqic = brmctx->multicast_query_interval / HZ;
  1015. mld2q->mld2q_mca = *group;
  1016. csum = &mld2q->mld2q_cksum;
  1017. csum_start = (void *)mld2q;
  1018. if (!pg || !with_srcs)
  1019. break;
  1020. llqt_srcs = 0;
  1021. hlist_for_each_entry(ent, &pg->src_list, node) {
  1022. if (over_llqt == time_after(ent->timer.expires,
  1023. llqt) &&
  1024. ent->src_query_rexmit_cnt > 0) {
  1025. mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6;
  1026. ent->src_query_rexmit_cnt--;
  1027. if (need_rexmit && ent->src_query_rexmit_cnt)
  1028. *need_rexmit = true;
  1029. }
  1030. }
  1031. if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) {
  1032. kfree_skb(skb);
  1033. return NULL;
  1034. }
  1035. break;
  1036. }
  1037. if (WARN_ON(!csum || !csum_start)) {
  1038. kfree_skb(skb);
  1039. return NULL;
  1040. }
  1041. *csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size,
  1042. IPPROTO_ICMPV6,
  1043. csum_partial(csum_start, mld_hdr_size, 0));
  1044. skb_put(skb, mld_hdr_size);
  1045. __skb_pull(skb, sizeof(*eth));
  1046. out:
  1047. return skb;
  1048. }
  1049. #endif
  1050. static struct sk_buff *br_multicast_alloc_query(struct net_bridge_mcast *brmctx,
  1051. struct net_bridge_mcast_port *pmctx,
  1052. struct net_bridge_port_group *pg,
  1053. struct br_ip *ip_dst,
  1054. struct br_ip *group,
  1055. bool with_srcs, bool over_lmqt,
  1056. u8 sflag, u8 *igmp_type,
  1057. bool *need_rexmit)
  1058. {
  1059. __be32 ip4_dst;
  1060. switch (group->proto) {
  1061. case htons(ETH_P_IP):
  1062. ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP);
  1063. return br_ip4_multicast_alloc_query(brmctx, pmctx, pg,
  1064. ip4_dst, group->dst.ip4,
  1065. with_srcs, over_lmqt,
  1066. sflag, igmp_type,
  1067. need_rexmit);
  1068. #if IS_ENABLED(CONFIG_IPV6)
  1069. case htons(ETH_P_IPV6): {
  1070. struct in6_addr ip6_dst;
  1071. if (ip_dst)
  1072. ip6_dst = ip_dst->dst.ip6;
  1073. else
  1074. ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0,
  1075. htonl(1));
  1076. return br_ip6_multicast_alloc_query(brmctx, pmctx, pg,
  1077. &ip6_dst, &group->dst.ip6,
  1078. with_srcs, over_lmqt,
  1079. sflag, igmp_type,
  1080. need_rexmit);
  1081. }
  1082. #endif
  1083. }
  1084. return NULL;
  1085. }
  1086. struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
  1087. struct br_ip *group)
  1088. {
  1089. struct net_bridge_mdb_entry *mp;
  1090. int err;
  1091. mp = br_mdb_ip_get(br, group);
  1092. if (mp)
  1093. return mp;
  1094. if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
  1095. trace_br_mdb_full(br->dev, group);
  1096. br_mc_disabled_update(br->dev, false, NULL);
  1097. br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
  1098. return ERR_PTR(-E2BIG);
  1099. }
  1100. mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
  1101. if (unlikely(!mp))
  1102. return ERR_PTR(-ENOMEM);
  1103. mp->br = br;
  1104. mp->addr = *group;
  1105. mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry;
  1106. timer_setup(&mp->timer, br_multicast_group_expired, 0);
  1107. err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
  1108. br_mdb_rht_params);
  1109. if (err) {
  1110. kfree(mp);
  1111. mp = ERR_PTR(err);
  1112. } else {
  1113. hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
  1114. }
  1115. return mp;
  1116. }
  1117. static void br_multicast_group_src_expired(struct timer_list *t)
  1118. {
  1119. struct net_bridge_group_src *src = from_timer(src, t, timer);
  1120. struct net_bridge_port_group *pg;
  1121. struct net_bridge *br = src->br;
  1122. spin_lock(&br->multicast_lock);
  1123. if (hlist_unhashed(&src->node) || !netif_running(br->dev) ||
  1124. timer_pending(&src->timer))
  1125. goto out;
  1126. pg = src->pg;
  1127. if (pg->filter_mode == MCAST_INCLUDE) {
  1128. br_multicast_del_group_src(src, false);
  1129. if (!hlist_empty(&pg->src_list))
  1130. goto out;
  1131. br_multicast_find_del_pg(br, pg);
  1132. } else {
  1133. br_multicast_fwd_src_handle(src);
  1134. }
  1135. out:
  1136. spin_unlock(&br->multicast_lock);
  1137. }
  1138. struct net_bridge_group_src *
  1139. br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip)
  1140. {
  1141. struct net_bridge_group_src *ent;
  1142. switch (ip->proto) {
  1143. case htons(ETH_P_IP):
  1144. hlist_for_each_entry(ent, &pg->src_list, node)
  1145. if (ip->src.ip4 == ent->addr.src.ip4)
  1146. return ent;
  1147. break;
  1148. #if IS_ENABLED(CONFIG_IPV6)
  1149. case htons(ETH_P_IPV6):
  1150. hlist_for_each_entry(ent, &pg->src_list, node)
  1151. if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6))
  1152. return ent;
  1153. break;
  1154. #endif
  1155. }
  1156. return NULL;
  1157. }
  1158. struct net_bridge_group_src *
  1159. br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip)
  1160. {
  1161. struct net_bridge_group_src *grp_src;
  1162. if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT))
  1163. return NULL;
  1164. switch (src_ip->proto) {
  1165. case htons(ETH_P_IP):
  1166. if (ipv4_is_zeronet(src_ip->src.ip4) ||
  1167. ipv4_is_multicast(src_ip->src.ip4))
  1168. return NULL;
  1169. break;
  1170. #if IS_ENABLED(CONFIG_IPV6)
  1171. case htons(ETH_P_IPV6):
  1172. if (ipv6_addr_any(&src_ip->src.ip6) ||
  1173. ipv6_addr_is_multicast(&src_ip->src.ip6))
  1174. return NULL;
  1175. break;
  1176. #endif
  1177. }
  1178. grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC);
  1179. if (unlikely(!grp_src))
  1180. return NULL;
  1181. grp_src->pg = pg;
  1182. grp_src->br = pg->key.port->br;
  1183. grp_src->addr = *src_ip;
  1184. grp_src->mcast_gc.destroy = br_multicast_destroy_group_src;
  1185. timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0);
  1186. hlist_add_head_rcu(&grp_src->node, &pg->src_list);
  1187. pg->src_ents++;
  1188. return grp_src;
  1189. }
  1190. struct net_bridge_port_group *br_multicast_new_port_group(
  1191. struct net_bridge_port *port,
  1192. const struct br_ip *group,
  1193. struct net_bridge_port_group __rcu *next,
  1194. unsigned char flags,
  1195. const unsigned char *src,
  1196. u8 filter_mode,
  1197. u8 rt_protocol,
  1198. struct netlink_ext_ack *extack)
  1199. {
  1200. struct net_bridge_port_group *p;
  1201. int err;
  1202. err = br_multicast_port_ngroups_inc(port, group, extack);
  1203. if (err)
  1204. return NULL;
  1205. p = kzalloc(sizeof(*p), GFP_ATOMIC);
  1206. if (unlikely(!p)) {
  1207. NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group");
  1208. goto dec_out;
  1209. }
  1210. p->key.addr = *group;
  1211. p->key.port = port;
  1212. p->flags = flags;
  1213. p->filter_mode = filter_mode;
  1214. p->rt_protocol = rt_protocol;
  1215. p->eht_host_tree = RB_ROOT;
  1216. p->eht_set_tree = RB_ROOT;
  1217. p->mcast_gc.destroy = br_multicast_destroy_port_group;
  1218. INIT_HLIST_HEAD(&p->src_list);
  1219. if (!br_multicast_is_star_g(group) &&
  1220. rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode,
  1221. br_sg_port_rht_params)) {
  1222. NL_SET_ERR_MSG_MOD(extack, "Couldn't insert new port group");
  1223. goto free_out;
  1224. }
  1225. rcu_assign_pointer(p->next, next);
  1226. timer_setup(&p->timer, br_multicast_port_group_expired, 0);
  1227. timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0);
  1228. hlist_add_head(&p->mglist, &port->mglist);
  1229. if (src)
  1230. memcpy(p->eth_addr, src, ETH_ALEN);
  1231. else
  1232. eth_broadcast_addr(p->eth_addr);
  1233. return p;
  1234. free_out:
  1235. kfree(p);
  1236. dec_out:
  1237. br_multicast_port_ngroups_dec(port, group->vid);
  1238. return NULL;
  1239. }
  1240. void br_multicast_del_port_group(struct net_bridge_port_group *p)
  1241. {
  1242. struct net_bridge_port *port = p->key.port;
  1243. __u16 vid = p->key.addr.vid;
  1244. hlist_del_init(&p->mglist);
  1245. if (!br_multicast_is_star_g(&p->key.addr))
  1246. rhashtable_remove_fast(&port->br->sg_port_tbl, &p->rhnode,
  1247. br_sg_port_rht_params);
  1248. kfree(p);
  1249. br_multicast_port_ngroups_dec(port, vid);
  1250. }
  1251. void br_multicast_host_join(const struct net_bridge_mcast *brmctx,
  1252. struct net_bridge_mdb_entry *mp, bool notify)
  1253. {
  1254. if (!mp->host_joined) {
  1255. mp->host_joined = true;
  1256. if (br_multicast_is_star_g(&mp->addr))
  1257. br_multicast_star_g_host_state(mp);
  1258. if (notify)
  1259. br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB);
  1260. }
  1261. if (br_group_is_l2(&mp->addr))
  1262. return;
  1263. mod_timer(&mp->timer, jiffies + brmctx->multicast_membership_interval);
  1264. }
  1265. void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify)
  1266. {
  1267. if (!mp->host_joined)
  1268. return;
  1269. mp->host_joined = false;
  1270. if (br_multicast_is_star_g(&mp->addr))
  1271. br_multicast_star_g_host_state(mp);
  1272. if (notify)
  1273. br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB);
  1274. }
  1275. static struct net_bridge_port_group *
  1276. __br_multicast_add_group(struct net_bridge_mcast *brmctx,
  1277. struct net_bridge_mcast_port *pmctx,
  1278. struct br_ip *group,
  1279. const unsigned char *src,
  1280. u8 filter_mode,
  1281. bool igmpv2_mldv1,
  1282. bool blocked)
  1283. {
  1284. struct net_bridge_port_group __rcu **pp;
  1285. struct net_bridge_port_group *p = NULL;
  1286. struct net_bridge_mdb_entry *mp;
  1287. unsigned long now = jiffies;
  1288. if (!br_multicast_ctx_should_use(brmctx, pmctx))
  1289. goto out;
  1290. mp = br_multicast_new_group(brmctx->br, group);
  1291. if (IS_ERR(mp))
  1292. return ERR_CAST(mp);
  1293. if (!pmctx) {
  1294. br_multicast_host_join(brmctx, mp, true);
  1295. goto out;
  1296. }
  1297. for (pp = &mp->ports;
  1298. (p = mlock_dereference(*pp, brmctx->br)) != NULL;
  1299. pp = &p->next) {
  1300. if (br_port_group_equal(p, pmctx->port, src))
  1301. goto found;
  1302. if ((unsigned long)p->key.port < (unsigned long)pmctx->port)
  1303. break;
  1304. }
  1305. p = br_multicast_new_port_group(pmctx->port, group, *pp, 0, src,
  1306. filter_mode, RTPROT_KERNEL, NULL);
  1307. if (unlikely(!p)) {
  1308. p = ERR_PTR(-ENOMEM);
  1309. goto out;
  1310. }
  1311. rcu_assign_pointer(*pp, p);
  1312. if (blocked)
  1313. p->flags |= MDB_PG_FLAGS_BLOCKED;
  1314. br_mdb_notify(brmctx->br->dev, mp, p, RTM_NEWMDB);
  1315. found:
  1316. if (igmpv2_mldv1)
  1317. mod_timer(&p->timer,
  1318. now + brmctx->multicast_membership_interval);
  1319. out:
  1320. return p;
  1321. }
  1322. static int br_multicast_add_group(struct net_bridge_mcast *brmctx,
  1323. struct net_bridge_mcast_port *pmctx,
  1324. struct br_ip *group,
  1325. const unsigned char *src,
  1326. u8 filter_mode,
  1327. bool igmpv2_mldv1)
  1328. {
  1329. struct net_bridge_port_group *pg;
  1330. int err;
  1331. spin_lock(&brmctx->br->multicast_lock);
  1332. pg = __br_multicast_add_group(brmctx, pmctx, group, src, filter_mode,
  1333. igmpv2_mldv1, false);
  1334. /* NULL is considered valid for host joined groups */
  1335. err = PTR_ERR_OR_ZERO(pg);
  1336. spin_unlock(&brmctx->br->multicast_lock);
  1337. return err;
  1338. }
  1339. static int br_ip4_multicast_add_group(struct net_bridge_mcast *brmctx,
  1340. struct net_bridge_mcast_port *pmctx,
  1341. __be32 group,
  1342. __u16 vid,
  1343. const unsigned char *src,
  1344. bool igmpv2)
  1345. {
  1346. struct br_ip br_group;
  1347. u8 filter_mode;
  1348. if (ipv4_is_local_multicast(group))
  1349. return 0;
  1350. memset(&br_group, 0, sizeof(br_group));
  1351. br_group.dst.ip4 = group;
  1352. br_group.proto = htons(ETH_P_IP);
  1353. br_group.vid = vid;
  1354. filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE;
  1355. return br_multicast_add_group(brmctx, pmctx, &br_group, src,
  1356. filter_mode, igmpv2);
  1357. }
  1358. #if IS_ENABLED(CONFIG_IPV6)
  1359. static int br_ip6_multicast_add_group(struct net_bridge_mcast *brmctx,
  1360. struct net_bridge_mcast_port *pmctx,
  1361. const struct in6_addr *group,
  1362. __u16 vid,
  1363. const unsigned char *src,
  1364. bool mldv1)
  1365. {
  1366. struct br_ip br_group;
  1367. u8 filter_mode;
  1368. if (ipv6_addr_is_ll_all_nodes(group))
  1369. return 0;
  1370. memset(&br_group, 0, sizeof(br_group));
  1371. br_group.dst.ip6 = *group;
  1372. br_group.proto = htons(ETH_P_IPV6);
  1373. br_group.vid = vid;
  1374. filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE;
  1375. return br_multicast_add_group(brmctx, pmctx, &br_group, src,
  1376. filter_mode, mldv1);
  1377. }
  1378. #endif
  1379. static bool br_multicast_rport_del(struct hlist_node *rlist)
  1380. {
  1381. if (hlist_unhashed(rlist))
  1382. return false;
  1383. hlist_del_init_rcu(rlist);
  1384. return true;
  1385. }
  1386. static bool br_ip4_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
  1387. {
  1388. return br_multicast_rport_del(&pmctx->ip4_rlist);
  1389. }
  1390. static bool br_ip6_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
  1391. {
  1392. #if IS_ENABLED(CONFIG_IPV6)
  1393. return br_multicast_rport_del(&pmctx->ip6_rlist);
  1394. #else
  1395. return false;
  1396. #endif
  1397. }
  1398. static void br_multicast_router_expired(struct net_bridge_mcast_port *pmctx,
  1399. struct timer_list *t,
  1400. struct hlist_node *rlist)
  1401. {
  1402. struct net_bridge *br = pmctx->port->br;
  1403. bool del;
  1404. spin_lock(&br->multicast_lock);
  1405. if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
  1406. pmctx->multicast_router == MDB_RTR_TYPE_PERM ||
  1407. timer_pending(t))
  1408. goto out;
  1409. del = br_multicast_rport_del(rlist);
  1410. br_multicast_rport_del_notify(pmctx, del);
  1411. out:
  1412. spin_unlock(&br->multicast_lock);
  1413. }
  1414. static void br_ip4_multicast_router_expired(struct timer_list *t)
  1415. {
  1416. struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
  1417. ip4_mc_router_timer);
  1418. br_multicast_router_expired(pmctx, t, &pmctx->ip4_rlist);
  1419. }
  1420. #if IS_ENABLED(CONFIG_IPV6)
  1421. static void br_ip6_multicast_router_expired(struct timer_list *t)
  1422. {
  1423. struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
  1424. ip6_mc_router_timer);
  1425. br_multicast_router_expired(pmctx, t, &pmctx->ip6_rlist);
  1426. }
  1427. #endif
  1428. static void br_mc_router_state_change(struct net_bridge *p,
  1429. bool is_mc_router)
  1430. {
  1431. struct switchdev_attr attr = {
  1432. .orig_dev = p->dev,
  1433. .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
  1434. .flags = SWITCHDEV_F_DEFER,
  1435. .u.mrouter = is_mc_router,
  1436. };
  1437. switchdev_port_attr_set(p->dev, &attr, NULL);
  1438. }
  1439. static void br_multicast_local_router_expired(struct net_bridge_mcast *brmctx,
  1440. struct timer_list *timer)
  1441. {
  1442. spin_lock(&brmctx->br->multicast_lock);
  1443. if (brmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
  1444. brmctx->multicast_router == MDB_RTR_TYPE_PERM ||
  1445. br_ip4_multicast_is_router(brmctx) ||
  1446. br_ip6_multicast_is_router(brmctx))
  1447. goto out;
  1448. br_mc_router_state_change(brmctx->br, false);
  1449. out:
  1450. spin_unlock(&brmctx->br->multicast_lock);
  1451. }
  1452. static void br_ip4_multicast_local_router_expired(struct timer_list *t)
  1453. {
  1454. struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
  1455. ip4_mc_router_timer);
  1456. br_multicast_local_router_expired(brmctx, t);
  1457. }
  1458. #if IS_ENABLED(CONFIG_IPV6)
  1459. static void br_ip6_multicast_local_router_expired(struct timer_list *t)
  1460. {
  1461. struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
  1462. ip6_mc_router_timer);
  1463. br_multicast_local_router_expired(brmctx, t);
  1464. }
  1465. #endif
  1466. static void br_multicast_querier_expired(struct net_bridge_mcast *brmctx,
  1467. struct bridge_mcast_own_query *query)
  1468. {
  1469. spin_lock(&brmctx->br->multicast_lock);
  1470. if (!netif_running(brmctx->br->dev) ||
  1471. br_multicast_ctx_vlan_global_disabled(brmctx) ||
  1472. !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
  1473. goto out;
  1474. br_multicast_start_querier(brmctx, query);
  1475. out:
  1476. spin_unlock(&brmctx->br->multicast_lock);
  1477. }
  1478. static void br_ip4_multicast_querier_expired(struct timer_list *t)
  1479. {
  1480. struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
  1481. ip4_other_query.timer);
  1482. br_multicast_querier_expired(brmctx, &brmctx->ip4_own_query);
  1483. }
  1484. #if IS_ENABLED(CONFIG_IPV6)
  1485. static void br_ip6_multicast_querier_expired(struct timer_list *t)
  1486. {
  1487. struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
  1488. ip6_other_query.timer);
  1489. br_multicast_querier_expired(brmctx, &brmctx->ip6_own_query);
  1490. }
  1491. #endif
  1492. static void br_multicast_query_delay_expired(struct timer_list *t)
  1493. {
  1494. }
  1495. static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx,
  1496. struct br_ip *ip,
  1497. struct sk_buff *skb)
  1498. {
  1499. if (ip->proto == htons(ETH_P_IP))
  1500. brmctx->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr;
  1501. #if IS_ENABLED(CONFIG_IPV6)
  1502. else
  1503. brmctx->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr;
  1504. #endif
  1505. }
  1506. static void __br_multicast_send_query(struct net_bridge_mcast *brmctx,
  1507. struct net_bridge_mcast_port *pmctx,
  1508. struct net_bridge_port_group *pg,
  1509. struct br_ip *ip_dst,
  1510. struct br_ip *group,
  1511. bool with_srcs,
  1512. u8 sflag,
  1513. bool *need_rexmit)
  1514. {
  1515. bool over_lmqt = !!sflag;
  1516. struct sk_buff *skb;
  1517. u8 igmp_type;
  1518. if (!br_multicast_ctx_should_use(brmctx, pmctx) ||
  1519. !br_multicast_ctx_matches_vlan_snooping(brmctx))
  1520. return;
  1521. again_under_lmqt:
  1522. skb = br_multicast_alloc_query(brmctx, pmctx, pg, ip_dst, group,
  1523. with_srcs, over_lmqt, sflag, &igmp_type,
  1524. need_rexmit);
  1525. if (!skb)
  1526. return;
  1527. if (pmctx) {
  1528. skb->dev = pmctx->port->dev;
  1529. br_multicast_count(brmctx->br, pmctx->port, skb, igmp_type,
  1530. BR_MCAST_DIR_TX);
  1531. NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
  1532. dev_net(pmctx->port->dev), NULL, skb, NULL, skb->dev,
  1533. br_dev_queue_push_xmit);
  1534. if (over_lmqt && with_srcs && sflag) {
  1535. over_lmqt = false;
  1536. goto again_under_lmqt;
  1537. }
  1538. } else {
  1539. br_multicast_select_own_querier(brmctx, group, skb);
  1540. br_multicast_count(brmctx->br, NULL, skb, igmp_type,
  1541. BR_MCAST_DIR_RX);
  1542. netif_rx(skb);
  1543. }
  1544. }
  1545. static void br_multicast_read_querier(const struct bridge_mcast_querier *querier,
  1546. struct bridge_mcast_querier *dest)
  1547. {
  1548. unsigned int seq;
  1549. memset(dest, 0, sizeof(*dest));
  1550. do {
  1551. seq = read_seqcount_begin(&querier->seq);
  1552. dest->port_ifidx = querier->port_ifidx;
  1553. memcpy(&dest->addr, &querier->addr, sizeof(struct br_ip));
  1554. } while (read_seqcount_retry(&querier->seq, seq));
  1555. }
  1556. static void br_multicast_update_querier(struct net_bridge_mcast *brmctx,
  1557. struct bridge_mcast_querier *querier,
  1558. int ifindex,
  1559. struct br_ip *saddr)
  1560. {
  1561. write_seqcount_begin(&querier->seq);
  1562. querier->port_ifidx = ifindex;
  1563. memcpy(&querier->addr, saddr, sizeof(*saddr));
  1564. write_seqcount_end(&querier->seq);
  1565. }
  1566. static void br_multicast_send_query(struct net_bridge_mcast *brmctx,
  1567. struct net_bridge_mcast_port *pmctx,
  1568. struct bridge_mcast_own_query *own_query)
  1569. {
  1570. struct bridge_mcast_other_query *other_query = NULL;
  1571. struct bridge_mcast_querier *querier;
  1572. struct br_ip br_group;
  1573. unsigned long time;
  1574. if (!br_multicast_ctx_should_use(brmctx, pmctx) ||
  1575. !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) ||
  1576. !brmctx->multicast_querier)
  1577. return;
  1578. memset(&br_group.dst, 0, sizeof(br_group.dst));
  1579. if (pmctx ? (own_query == &pmctx->ip4_own_query) :
  1580. (own_query == &brmctx->ip4_own_query)) {
  1581. querier = &brmctx->ip4_querier;
  1582. other_query = &brmctx->ip4_other_query;
  1583. br_group.proto = htons(ETH_P_IP);
  1584. #if IS_ENABLED(CONFIG_IPV6)
  1585. } else {
  1586. querier = &brmctx->ip6_querier;
  1587. other_query = &brmctx->ip6_other_query;
  1588. br_group.proto = htons(ETH_P_IPV6);
  1589. #endif
  1590. }
  1591. if (!other_query || timer_pending(&other_query->timer))
  1592. return;
  1593. /* we're about to select ourselves as querier */
  1594. if (!pmctx && querier->port_ifidx) {
  1595. struct br_ip zeroip = {};
  1596. br_multicast_update_querier(brmctx, querier, 0, &zeroip);
  1597. }
  1598. __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &br_group, false,
  1599. 0, NULL);
  1600. time = jiffies;
  1601. time += own_query->startup_sent < brmctx->multicast_startup_query_count ?
  1602. brmctx->multicast_startup_query_interval :
  1603. brmctx->multicast_query_interval;
  1604. mod_timer(&own_query->timer, time);
  1605. }
  1606. static void
  1607. br_multicast_port_query_expired(struct net_bridge_mcast_port *pmctx,
  1608. struct bridge_mcast_own_query *query)
  1609. {
  1610. struct net_bridge *br = pmctx->port->br;
  1611. struct net_bridge_mcast *brmctx;
  1612. spin_lock(&br->multicast_lock);
  1613. if (br_multicast_port_ctx_state_stopped(pmctx))
  1614. goto out;
  1615. brmctx = br_multicast_port_ctx_get_global(pmctx);
  1616. if (query->startup_sent < brmctx->multicast_startup_query_count)
  1617. query->startup_sent++;
  1618. br_multicast_send_query(brmctx, pmctx, query);
  1619. out:
  1620. spin_unlock(&br->multicast_lock);
  1621. }
  1622. static void br_ip4_multicast_port_query_expired(struct timer_list *t)
  1623. {
  1624. struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
  1625. ip4_own_query.timer);
  1626. br_multicast_port_query_expired(pmctx, &pmctx->ip4_own_query);
  1627. }
  1628. #if IS_ENABLED(CONFIG_IPV6)
  1629. static void br_ip6_multicast_port_query_expired(struct timer_list *t)
  1630. {
  1631. struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
  1632. ip6_own_query.timer);
  1633. br_multicast_port_query_expired(pmctx, &pmctx->ip6_own_query);
  1634. }
  1635. #endif
  1636. static void br_multicast_port_group_rexmit(struct timer_list *t)
  1637. {
  1638. struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer);
  1639. struct bridge_mcast_other_query *other_query = NULL;
  1640. struct net_bridge *br = pg->key.port->br;
  1641. struct net_bridge_mcast_port *pmctx;
  1642. struct net_bridge_mcast *brmctx;
  1643. bool need_rexmit = false;
  1644. spin_lock(&br->multicast_lock);
  1645. if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
  1646. !br_opt_get(br, BROPT_MULTICAST_ENABLED))
  1647. goto out;
  1648. pmctx = br_multicast_pg_to_port_ctx(pg);
  1649. if (!pmctx)
  1650. goto out;
  1651. brmctx = br_multicast_port_ctx_get_global(pmctx);
  1652. if (!brmctx->multicast_querier)
  1653. goto out;
  1654. if (pg->key.addr.proto == htons(ETH_P_IP))
  1655. other_query = &brmctx->ip4_other_query;
  1656. #if IS_ENABLED(CONFIG_IPV6)
  1657. else
  1658. other_query = &brmctx->ip6_other_query;
  1659. #endif
  1660. if (!other_query || timer_pending(&other_query->timer))
  1661. goto out;
  1662. if (pg->grp_query_rexmit_cnt) {
  1663. pg->grp_query_rexmit_cnt--;
  1664. __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
  1665. &pg->key.addr, false, 1, NULL);
  1666. }
  1667. __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
  1668. &pg->key.addr, true, 0, &need_rexmit);
  1669. if (pg->grp_query_rexmit_cnt || need_rexmit)
  1670. mod_timer(&pg->rexmit_timer, jiffies +
  1671. brmctx->multicast_last_member_interval);
  1672. out:
  1673. spin_unlock(&br->multicast_lock);
  1674. }
  1675. static int br_mc_disabled_update(struct net_device *dev, bool value,
  1676. struct netlink_ext_ack *extack)
  1677. {
  1678. struct switchdev_attr attr = {
  1679. .orig_dev = dev,
  1680. .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
  1681. .flags = SWITCHDEV_F_DEFER,
  1682. .u.mc_disabled = !value,
  1683. };
  1684. return switchdev_port_attr_set(dev, &attr, extack);
  1685. }
  1686. void br_multicast_port_ctx_init(struct net_bridge_port *port,
  1687. struct net_bridge_vlan *vlan,
  1688. struct net_bridge_mcast_port *pmctx)
  1689. {
  1690. pmctx->port = port;
  1691. pmctx->vlan = vlan;
  1692. pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
  1693. timer_setup(&pmctx->ip4_mc_router_timer,
  1694. br_ip4_multicast_router_expired, 0);
  1695. timer_setup(&pmctx->ip4_own_query.timer,
  1696. br_ip4_multicast_port_query_expired, 0);
  1697. #if IS_ENABLED(CONFIG_IPV6)
  1698. timer_setup(&pmctx->ip6_mc_router_timer,
  1699. br_ip6_multicast_router_expired, 0);
  1700. timer_setup(&pmctx->ip6_own_query.timer,
  1701. br_ip6_multicast_port_query_expired, 0);
  1702. #endif
  1703. }
  1704. void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx)
  1705. {
  1706. #if IS_ENABLED(CONFIG_IPV6)
  1707. del_timer_sync(&pmctx->ip6_mc_router_timer);
  1708. #endif
  1709. del_timer_sync(&pmctx->ip4_mc_router_timer);
  1710. }
  1711. int br_multicast_add_port(struct net_bridge_port *port)
  1712. {
  1713. int err;
  1714. port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT;
  1715. br_multicast_port_ctx_init(port, NULL, &port->multicast_ctx);
  1716. err = br_mc_disabled_update(port->dev,
  1717. br_opt_get(port->br,
  1718. BROPT_MULTICAST_ENABLED),
  1719. NULL);
  1720. if (err && err != -EOPNOTSUPP)
  1721. return err;
  1722. port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
  1723. if (!port->mcast_stats)
  1724. return -ENOMEM;
  1725. return 0;
  1726. }
  1727. void br_multicast_del_port(struct net_bridge_port *port)
  1728. {
  1729. struct net_bridge *br = port->br;
  1730. struct net_bridge_port_group *pg;
  1731. struct hlist_node *n;
  1732. /* Take care of the remaining groups, only perm ones should be left */
  1733. spin_lock_bh(&br->multicast_lock);
  1734. hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
  1735. br_multicast_find_del_pg(br, pg);
  1736. spin_unlock_bh(&br->multicast_lock);
  1737. flush_work(&br->mcast_gc_work);
  1738. br_multicast_port_ctx_deinit(&port->multicast_ctx);
  1739. free_percpu(port->mcast_stats);
  1740. }
  1741. static void br_multicast_enable(struct bridge_mcast_own_query *query)
  1742. {
  1743. query->startup_sent = 0;
  1744. if (try_to_del_timer_sync(&query->timer) >= 0 ||
  1745. del_timer(&query->timer))
  1746. mod_timer(&query->timer, jiffies);
  1747. }
  1748. static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx)
  1749. {
  1750. struct net_bridge *br = pmctx->port->br;
  1751. struct net_bridge_mcast *brmctx;
  1752. brmctx = br_multicast_port_ctx_get_global(pmctx);
  1753. if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
  1754. !netif_running(br->dev))
  1755. return;
  1756. br_multicast_enable(&pmctx->ip4_own_query);
  1757. #if IS_ENABLED(CONFIG_IPV6)
  1758. br_multicast_enable(&pmctx->ip6_own_query);
  1759. #endif
  1760. if (pmctx->multicast_router == MDB_RTR_TYPE_PERM) {
  1761. br_ip4_multicast_add_router(brmctx, pmctx);
  1762. br_ip6_multicast_add_router(brmctx, pmctx);
  1763. }
  1764. if (br_multicast_port_ctx_is_vlan(pmctx)) {
  1765. struct net_bridge_port_group *pg;
  1766. u32 n = 0;
  1767. /* The mcast_n_groups counter might be wrong. First,
  1768. * BR_VLFLAG_MCAST_ENABLED is toggled before temporary entries
  1769. * are flushed, thus mcast_n_groups after the toggle does not
  1770. * reflect the true values. And second, permanent entries added
  1771. * while BR_VLFLAG_MCAST_ENABLED was disabled, are not reflected
  1772. * either. Thus we have to refresh the counter.
  1773. */
  1774. hlist_for_each_entry(pg, &pmctx->port->mglist, mglist) {
  1775. if (pg->key.addr.vid == pmctx->vlan->vid)
  1776. n++;
  1777. }
  1778. WRITE_ONCE(pmctx->mdb_n_entries, n);
  1779. }
  1780. }
  1781. static void br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx)
  1782. {
  1783. struct net_bridge *br = pmctx->port->br;
  1784. spin_lock_bh(&br->multicast_lock);
  1785. if (br_multicast_port_ctx_is_vlan(pmctx) &&
  1786. !(pmctx->vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED)) {
  1787. spin_unlock_bh(&br->multicast_lock);
  1788. return;
  1789. }
  1790. __br_multicast_enable_port_ctx(pmctx);
  1791. spin_unlock_bh(&br->multicast_lock);
  1792. }
  1793. static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx)
  1794. {
  1795. struct net_bridge_port_group *pg;
  1796. struct hlist_node *n;
  1797. bool del = false;
  1798. hlist_for_each_entry_safe(pg, n, &pmctx->port->mglist, mglist)
  1799. if (!(pg->flags & MDB_PG_FLAGS_PERMANENT) &&
  1800. (!br_multicast_port_ctx_is_vlan(pmctx) ||
  1801. pg->key.addr.vid == pmctx->vlan->vid))
  1802. br_multicast_find_del_pg(pmctx->port->br, pg);
  1803. del |= br_ip4_multicast_rport_del(pmctx);
  1804. del_timer(&pmctx->ip4_mc_router_timer);
  1805. del_timer(&pmctx->ip4_own_query.timer);
  1806. del |= br_ip6_multicast_rport_del(pmctx);
  1807. #if IS_ENABLED(CONFIG_IPV6)
  1808. del_timer(&pmctx->ip6_mc_router_timer);
  1809. del_timer(&pmctx->ip6_own_query.timer);
  1810. #endif
  1811. br_multicast_rport_del_notify(pmctx, del);
  1812. }
  1813. static void br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx)
  1814. {
  1815. struct net_bridge *br = pmctx->port->br;
  1816. spin_lock_bh(&br->multicast_lock);
  1817. if (br_multicast_port_ctx_is_vlan(pmctx) &&
  1818. !(pmctx->vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED)) {
  1819. spin_unlock_bh(&br->multicast_lock);
  1820. return;
  1821. }
  1822. __br_multicast_disable_port_ctx(pmctx);
  1823. spin_unlock_bh(&br->multicast_lock);
  1824. }
  1825. static void br_multicast_toggle_port(struct net_bridge_port *port, bool on)
  1826. {
  1827. #if IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING)
  1828. if (br_opt_get(port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
  1829. struct net_bridge_vlan_group *vg;
  1830. struct net_bridge_vlan *vlan;
  1831. rcu_read_lock();
  1832. vg = nbp_vlan_group_rcu(port);
  1833. if (!vg) {
  1834. rcu_read_unlock();
  1835. return;
  1836. }
  1837. /* iterate each vlan, toggle vlan multicast context */
  1838. list_for_each_entry_rcu(vlan, &vg->vlan_list, vlist) {
  1839. struct net_bridge_mcast_port *pmctx =
  1840. &vlan->port_mcast_ctx;
  1841. u8 state = br_vlan_get_state(vlan);
  1842. /* enable vlan multicast context when state is
  1843. * LEARNING or FORWARDING
  1844. */
  1845. if (on && br_vlan_state_allowed(state, true))
  1846. br_multicast_enable_port_ctx(pmctx);
  1847. else
  1848. br_multicast_disable_port_ctx(pmctx);
  1849. }
  1850. rcu_read_unlock();
  1851. return;
  1852. }
  1853. #endif
  1854. /* toggle port multicast context when vlan snooping is disabled */
  1855. if (on)
  1856. br_multicast_enable_port_ctx(&port->multicast_ctx);
  1857. else
  1858. br_multicast_disable_port_ctx(&port->multicast_ctx);
  1859. }
  1860. void br_multicast_enable_port(struct net_bridge_port *port)
  1861. {
  1862. br_multicast_toggle_port(port, true);
  1863. }
  1864. void br_multicast_disable_port(struct net_bridge_port *port)
  1865. {
  1866. br_multicast_toggle_port(port, false);
  1867. }
  1868. static int __grp_src_delete_marked(struct net_bridge_port_group *pg)
  1869. {
  1870. struct net_bridge_group_src *ent;
  1871. struct hlist_node *tmp;
  1872. int deleted = 0;
  1873. hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
  1874. if (ent->flags & BR_SGRP_F_DELETE) {
  1875. br_multicast_del_group_src(ent, false);
  1876. deleted++;
  1877. }
  1878. return deleted;
  1879. }
  1880. static void __grp_src_mod_timer(struct net_bridge_group_src *src,
  1881. unsigned long expires)
  1882. {
  1883. mod_timer(&src->timer, expires);
  1884. br_multicast_fwd_src_handle(src);
  1885. }
  1886. static void __grp_src_query_marked_and_rexmit(struct net_bridge_mcast *brmctx,
  1887. struct net_bridge_mcast_port *pmctx,
  1888. struct net_bridge_port_group *pg)
  1889. {
  1890. struct bridge_mcast_other_query *other_query = NULL;
  1891. u32 lmqc = brmctx->multicast_last_member_count;
  1892. unsigned long lmqt, lmi, now = jiffies;
  1893. struct net_bridge_group_src *ent;
  1894. if (!netif_running(brmctx->br->dev) ||
  1895. !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
  1896. return;
  1897. if (pg->key.addr.proto == htons(ETH_P_IP))
  1898. other_query = &brmctx->ip4_other_query;
  1899. #if IS_ENABLED(CONFIG_IPV6)
  1900. else
  1901. other_query = &brmctx->ip6_other_query;
  1902. #endif
  1903. lmqt = now + br_multicast_lmqt(brmctx);
  1904. hlist_for_each_entry(ent, &pg->src_list, node) {
  1905. if (ent->flags & BR_SGRP_F_SEND) {
  1906. ent->flags &= ~BR_SGRP_F_SEND;
  1907. if (ent->timer.expires > lmqt) {
  1908. if (brmctx->multicast_querier &&
  1909. other_query &&
  1910. !timer_pending(&other_query->timer))
  1911. ent->src_query_rexmit_cnt = lmqc;
  1912. __grp_src_mod_timer(ent, lmqt);
  1913. }
  1914. }
  1915. }
  1916. if (!brmctx->multicast_querier ||
  1917. !other_query || timer_pending(&other_query->timer))
  1918. return;
  1919. __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
  1920. &pg->key.addr, true, 1, NULL);
  1921. lmi = now + brmctx->multicast_last_member_interval;
  1922. if (!timer_pending(&pg->rexmit_timer) ||
  1923. time_after(pg->rexmit_timer.expires, lmi))
  1924. mod_timer(&pg->rexmit_timer, lmi);
  1925. }
  1926. static void __grp_send_query_and_rexmit(struct net_bridge_mcast *brmctx,
  1927. struct net_bridge_mcast_port *pmctx,
  1928. struct net_bridge_port_group *pg)
  1929. {
  1930. struct bridge_mcast_other_query *other_query = NULL;
  1931. unsigned long now = jiffies, lmi;
  1932. if (!netif_running(brmctx->br->dev) ||
  1933. !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
  1934. return;
  1935. if (pg->key.addr.proto == htons(ETH_P_IP))
  1936. other_query = &brmctx->ip4_other_query;
  1937. #if IS_ENABLED(CONFIG_IPV6)
  1938. else
  1939. other_query = &brmctx->ip6_other_query;
  1940. #endif
  1941. if (brmctx->multicast_querier &&
  1942. other_query && !timer_pending(&other_query->timer)) {
  1943. lmi = now + brmctx->multicast_last_member_interval;
  1944. pg->grp_query_rexmit_cnt = brmctx->multicast_last_member_count - 1;
  1945. __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
  1946. &pg->key.addr, false, 0, NULL);
  1947. if (!timer_pending(&pg->rexmit_timer) ||
  1948. time_after(pg->rexmit_timer.expires, lmi))
  1949. mod_timer(&pg->rexmit_timer, lmi);
  1950. }
  1951. if (pg->filter_mode == MCAST_EXCLUDE &&
  1952. (!timer_pending(&pg->timer) ||
  1953. time_after(pg->timer.expires, now + br_multicast_lmqt(brmctx))))
  1954. mod_timer(&pg->timer, now + br_multicast_lmqt(brmctx));
  1955. }
  1956. /* State Msg type New state Actions
  1957. * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI
  1958. * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI
  1959. * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI
  1960. */
  1961. static bool br_multicast_isinc_allow(const struct net_bridge_mcast *brmctx,
  1962. struct net_bridge_port_group *pg, void *h_addr,
  1963. void *srcs, u32 nsrcs, size_t addr_size,
  1964. int grec_type)
  1965. {
  1966. struct net_bridge_group_src *ent;
  1967. unsigned long now = jiffies;
  1968. bool changed = false;
  1969. struct br_ip src_ip;
  1970. u32 src_idx;
  1971. memset(&src_ip, 0, sizeof(src_ip));
  1972. src_ip.proto = pg->key.addr.proto;
  1973. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  1974. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  1975. ent = br_multicast_find_group_src(pg, &src_ip);
  1976. if (!ent) {
  1977. ent = br_multicast_new_group_src(pg, &src_ip);
  1978. if (ent)
  1979. changed = true;
  1980. }
  1981. if (ent)
  1982. __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
  1983. }
  1984. if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  1985. grec_type))
  1986. changed = true;
  1987. return changed;
  1988. }
  1989. /* State Msg type New state Actions
  1990. * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
  1991. * Delete (A-B)
  1992. * Group Timer=GMI
  1993. */
  1994. static void __grp_src_isexc_incl(const struct net_bridge_mcast *brmctx,
  1995. struct net_bridge_port_group *pg, void *h_addr,
  1996. void *srcs, u32 nsrcs, size_t addr_size,
  1997. int grec_type)
  1998. {
  1999. struct net_bridge_group_src *ent;
  2000. struct br_ip src_ip;
  2001. u32 src_idx;
  2002. hlist_for_each_entry(ent, &pg->src_list, node)
  2003. ent->flags |= BR_SGRP_F_DELETE;
  2004. memset(&src_ip, 0, sizeof(src_ip));
  2005. src_ip.proto = pg->key.addr.proto;
  2006. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  2007. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  2008. ent = br_multicast_find_group_src(pg, &src_ip);
  2009. if (ent)
  2010. ent->flags &= ~BR_SGRP_F_DELETE;
  2011. else
  2012. ent = br_multicast_new_group_src(pg, &src_ip);
  2013. if (ent)
  2014. br_multicast_fwd_src_handle(ent);
  2015. }
  2016. br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  2017. grec_type);
  2018. __grp_src_delete_marked(pg);
  2019. }
  2020. /* State Msg type New state Actions
  2021. * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI
  2022. * Delete (X-A)
  2023. * Delete (Y-A)
  2024. * Group Timer=GMI
  2025. */
  2026. static bool __grp_src_isexc_excl(const struct net_bridge_mcast *brmctx,
  2027. struct net_bridge_port_group *pg, void *h_addr,
  2028. void *srcs, u32 nsrcs, size_t addr_size,
  2029. int grec_type)
  2030. {
  2031. struct net_bridge_group_src *ent;
  2032. unsigned long now = jiffies;
  2033. bool changed = false;
  2034. struct br_ip src_ip;
  2035. u32 src_idx;
  2036. hlist_for_each_entry(ent, &pg->src_list, node)
  2037. ent->flags |= BR_SGRP_F_DELETE;
  2038. memset(&src_ip, 0, sizeof(src_ip));
  2039. src_ip.proto = pg->key.addr.proto;
  2040. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  2041. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  2042. ent = br_multicast_find_group_src(pg, &src_ip);
  2043. if (ent) {
  2044. ent->flags &= ~BR_SGRP_F_DELETE;
  2045. } else {
  2046. ent = br_multicast_new_group_src(pg, &src_ip);
  2047. if (ent) {
  2048. __grp_src_mod_timer(ent,
  2049. now + br_multicast_gmi(brmctx));
  2050. changed = true;
  2051. }
  2052. }
  2053. }
  2054. if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  2055. grec_type))
  2056. changed = true;
  2057. if (__grp_src_delete_marked(pg))
  2058. changed = true;
  2059. return changed;
  2060. }
  2061. static bool br_multicast_isexc(const struct net_bridge_mcast *brmctx,
  2062. struct net_bridge_port_group *pg, void *h_addr,
  2063. void *srcs, u32 nsrcs, size_t addr_size,
  2064. int grec_type)
  2065. {
  2066. bool changed = false;
  2067. switch (pg->filter_mode) {
  2068. case MCAST_INCLUDE:
  2069. __grp_src_isexc_incl(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  2070. grec_type);
  2071. br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
  2072. changed = true;
  2073. break;
  2074. case MCAST_EXCLUDE:
  2075. changed = __grp_src_isexc_excl(brmctx, pg, h_addr, srcs, nsrcs,
  2076. addr_size, grec_type);
  2077. break;
  2078. }
  2079. pg->filter_mode = MCAST_EXCLUDE;
  2080. mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
  2081. return changed;
  2082. }
  2083. /* State Msg type New state Actions
  2084. * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI
  2085. * Send Q(G,A-B)
  2086. */
  2087. static bool __grp_src_toin_incl(struct net_bridge_mcast *brmctx,
  2088. struct net_bridge_mcast_port *pmctx,
  2089. struct net_bridge_port_group *pg, void *h_addr,
  2090. void *srcs, u32 nsrcs, size_t addr_size,
  2091. int grec_type)
  2092. {
  2093. u32 src_idx, to_send = pg->src_ents;
  2094. struct net_bridge_group_src *ent;
  2095. unsigned long now = jiffies;
  2096. bool changed = false;
  2097. struct br_ip src_ip;
  2098. hlist_for_each_entry(ent, &pg->src_list, node)
  2099. ent->flags |= BR_SGRP_F_SEND;
  2100. memset(&src_ip, 0, sizeof(src_ip));
  2101. src_ip.proto = pg->key.addr.proto;
  2102. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  2103. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  2104. ent = br_multicast_find_group_src(pg, &src_ip);
  2105. if (ent) {
  2106. ent->flags &= ~BR_SGRP_F_SEND;
  2107. to_send--;
  2108. } else {
  2109. ent = br_multicast_new_group_src(pg, &src_ip);
  2110. if (ent)
  2111. changed = true;
  2112. }
  2113. if (ent)
  2114. __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
  2115. }
  2116. if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  2117. grec_type))
  2118. changed = true;
  2119. if (to_send)
  2120. __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
  2121. return changed;
  2122. }
  2123. /* State Msg type New state Actions
  2124. * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
  2125. * Send Q(G,X-A)
  2126. * Send Q(G)
  2127. */
  2128. static bool __grp_src_toin_excl(struct net_bridge_mcast *brmctx,
  2129. struct net_bridge_mcast_port *pmctx,
  2130. struct net_bridge_port_group *pg, void *h_addr,
  2131. void *srcs, u32 nsrcs, size_t addr_size,
  2132. int grec_type)
  2133. {
  2134. u32 src_idx, to_send = pg->src_ents;
  2135. struct net_bridge_group_src *ent;
  2136. unsigned long now = jiffies;
  2137. bool changed = false;
  2138. struct br_ip src_ip;
  2139. hlist_for_each_entry(ent, &pg->src_list, node)
  2140. if (timer_pending(&ent->timer))
  2141. ent->flags |= BR_SGRP_F_SEND;
  2142. memset(&src_ip, 0, sizeof(src_ip));
  2143. src_ip.proto = pg->key.addr.proto;
  2144. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  2145. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  2146. ent = br_multicast_find_group_src(pg, &src_ip);
  2147. if (ent) {
  2148. if (timer_pending(&ent->timer)) {
  2149. ent->flags &= ~BR_SGRP_F_SEND;
  2150. to_send--;
  2151. }
  2152. } else {
  2153. ent = br_multicast_new_group_src(pg, &src_ip);
  2154. if (ent)
  2155. changed = true;
  2156. }
  2157. if (ent)
  2158. __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
  2159. }
  2160. if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  2161. grec_type))
  2162. changed = true;
  2163. if (to_send)
  2164. __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
  2165. __grp_send_query_and_rexmit(brmctx, pmctx, pg);
  2166. return changed;
  2167. }
  2168. static bool br_multicast_toin(struct net_bridge_mcast *brmctx,
  2169. struct net_bridge_mcast_port *pmctx,
  2170. struct net_bridge_port_group *pg, void *h_addr,
  2171. void *srcs, u32 nsrcs, size_t addr_size,
  2172. int grec_type)
  2173. {
  2174. bool changed = false;
  2175. switch (pg->filter_mode) {
  2176. case MCAST_INCLUDE:
  2177. changed = __grp_src_toin_incl(brmctx, pmctx, pg, h_addr, srcs,
  2178. nsrcs, addr_size, grec_type);
  2179. break;
  2180. case MCAST_EXCLUDE:
  2181. changed = __grp_src_toin_excl(brmctx, pmctx, pg, h_addr, srcs,
  2182. nsrcs, addr_size, grec_type);
  2183. break;
  2184. }
  2185. if (br_multicast_eht_should_del_pg(pg)) {
  2186. pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
  2187. br_multicast_find_del_pg(pg->key.port->br, pg);
  2188. /* a notification has already been sent and we shouldn't
  2189. * access pg after the delete so we have to return false
  2190. */
  2191. changed = false;
  2192. }
  2193. return changed;
  2194. }
  2195. /* State Msg type New state Actions
  2196. * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
  2197. * Delete (A-B)
  2198. * Send Q(G,A*B)
  2199. * Group Timer=GMI
  2200. */
  2201. static void __grp_src_toex_incl(struct net_bridge_mcast *brmctx,
  2202. struct net_bridge_mcast_port *pmctx,
  2203. struct net_bridge_port_group *pg, void *h_addr,
  2204. void *srcs, u32 nsrcs, size_t addr_size,
  2205. int grec_type)
  2206. {
  2207. struct net_bridge_group_src *ent;
  2208. u32 src_idx, to_send = 0;
  2209. struct br_ip src_ip;
  2210. hlist_for_each_entry(ent, &pg->src_list, node)
  2211. ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
  2212. memset(&src_ip, 0, sizeof(src_ip));
  2213. src_ip.proto = pg->key.addr.proto;
  2214. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  2215. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  2216. ent = br_multicast_find_group_src(pg, &src_ip);
  2217. if (ent) {
  2218. ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) |
  2219. BR_SGRP_F_SEND;
  2220. to_send++;
  2221. } else {
  2222. ent = br_multicast_new_group_src(pg, &src_ip);
  2223. }
  2224. if (ent)
  2225. br_multicast_fwd_src_handle(ent);
  2226. }
  2227. br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  2228. grec_type);
  2229. __grp_src_delete_marked(pg);
  2230. if (to_send)
  2231. __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
  2232. }
  2233. /* State Msg type New state Actions
  2234. * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer
  2235. * Delete (X-A)
  2236. * Delete (Y-A)
  2237. * Send Q(G,A-Y)
  2238. * Group Timer=GMI
  2239. */
  2240. static bool __grp_src_toex_excl(struct net_bridge_mcast *brmctx,
  2241. struct net_bridge_mcast_port *pmctx,
  2242. struct net_bridge_port_group *pg, void *h_addr,
  2243. void *srcs, u32 nsrcs, size_t addr_size,
  2244. int grec_type)
  2245. {
  2246. struct net_bridge_group_src *ent;
  2247. u32 src_idx, to_send = 0;
  2248. bool changed = false;
  2249. struct br_ip src_ip;
  2250. hlist_for_each_entry(ent, &pg->src_list, node)
  2251. ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
  2252. memset(&src_ip, 0, sizeof(src_ip));
  2253. src_ip.proto = pg->key.addr.proto;
  2254. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  2255. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  2256. ent = br_multicast_find_group_src(pg, &src_ip);
  2257. if (ent) {
  2258. ent->flags &= ~BR_SGRP_F_DELETE;
  2259. } else {
  2260. ent = br_multicast_new_group_src(pg, &src_ip);
  2261. if (ent) {
  2262. __grp_src_mod_timer(ent, pg->timer.expires);
  2263. changed = true;
  2264. }
  2265. }
  2266. if (ent && timer_pending(&ent->timer)) {
  2267. ent->flags |= BR_SGRP_F_SEND;
  2268. to_send++;
  2269. }
  2270. }
  2271. if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  2272. grec_type))
  2273. changed = true;
  2274. if (__grp_src_delete_marked(pg))
  2275. changed = true;
  2276. if (to_send)
  2277. __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
  2278. return changed;
  2279. }
  2280. static bool br_multicast_toex(struct net_bridge_mcast *brmctx,
  2281. struct net_bridge_mcast_port *pmctx,
  2282. struct net_bridge_port_group *pg, void *h_addr,
  2283. void *srcs, u32 nsrcs, size_t addr_size,
  2284. int grec_type)
  2285. {
  2286. bool changed = false;
  2287. switch (pg->filter_mode) {
  2288. case MCAST_INCLUDE:
  2289. __grp_src_toex_incl(brmctx, pmctx, pg, h_addr, srcs, nsrcs,
  2290. addr_size, grec_type);
  2291. br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
  2292. changed = true;
  2293. break;
  2294. case MCAST_EXCLUDE:
  2295. changed = __grp_src_toex_excl(brmctx, pmctx, pg, h_addr, srcs,
  2296. nsrcs, addr_size, grec_type);
  2297. break;
  2298. }
  2299. pg->filter_mode = MCAST_EXCLUDE;
  2300. mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
  2301. return changed;
  2302. }
  2303. /* State Msg type New state Actions
  2304. * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B)
  2305. */
  2306. static bool __grp_src_block_incl(struct net_bridge_mcast *brmctx,
  2307. struct net_bridge_mcast_port *pmctx,
  2308. struct net_bridge_port_group *pg, void *h_addr,
  2309. void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
  2310. {
  2311. struct net_bridge_group_src *ent;
  2312. u32 src_idx, to_send = 0;
  2313. bool changed = false;
  2314. struct br_ip src_ip;
  2315. hlist_for_each_entry(ent, &pg->src_list, node)
  2316. ent->flags &= ~BR_SGRP_F_SEND;
  2317. memset(&src_ip, 0, sizeof(src_ip));
  2318. src_ip.proto = pg->key.addr.proto;
  2319. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  2320. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  2321. ent = br_multicast_find_group_src(pg, &src_ip);
  2322. if (ent) {
  2323. ent->flags |= BR_SGRP_F_SEND;
  2324. to_send++;
  2325. }
  2326. }
  2327. if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  2328. grec_type))
  2329. changed = true;
  2330. if (to_send)
  2331. __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
  2332. return changed;
  2333. }
  2334. /* State Msg type New state Actions
  2335. * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer
  2336. * Send Q(G,A-Y)
  2337. */
  2338. static bool __grp_src_block_excl(struct net_bridge_mcast *brmctx,
  2339. struct net_bridge_mcast_port *pmctx,
  2340. struct net_bridge_port_group *pg, void *h_addr,
  2341. void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
  2342. {
  2343. struct net_bridge_group_src *ent;
  2344. u32 src_idx, to_send = 0;
  2345. bool changed = false;
  2346. struct br_ip src_ip;
  2347. hlist_for_each_entry(ent, &pg->src_list, node)
  2348. ent->flags &= ~BR_SGRP_F_SEND;
  2349. memset(&src_ip, 0, sizeof(src_ip));
  2350. src_ip.proto = pg->key.addr.proto;
  2351. for (src_idx = 0; src_idx < nsrcs; src_idx++) {
  2352. memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
  2353. ent = br_multicast_find_group_src(pg, &src_ip);
  2354. if (!ent) {
  2355. ent = br_multicast_new_group_src(pg, &src_ip);
  2356. if (ent) {
  2357. __grp_src_mod_timer(ent, pg->timer.expires);
  2358. changed = true;
  2359. }
  2360. }
  2361. if (ent && timer_pending(&ent->timer)) {
  2362. ent->flags |= BR_SGRP_F_SEND;
  2363. to_send++;
  2364. }
  2365. }
  2366. if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
  2367. grec_type))
  2368. changed = true;
  2369. if (to_send)
  2370. __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
  2371. return changed;
  2372. }
  2373. static bool br_multicast_block(struct net_bridge_mcast *brmctx,
  2374. struct net_bridge_mcast_port *pmctx,
  2375. struct net_bridge_port_group *pg, void *h_addr,
  2376. void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
  2377. {
  2378. bool changed = false;
  2379. switch (pg->filter_mode) {
  2380. case MCAST_INCLUDE:
  2381. changed = __grp_src_block_incl(brmctx, pmctx, pg, h_addr, srcs,
  2382. nsrcs, addr_size, grec_type);
  2383. break;
  2384. case MCAST_EXCLUDE:
  2385. changed = __grp_src_block_excl(brmctx, pmctx, pg, h_addr, srcs,
  2386. nsrcs, addr_size, grec_type);
  2387. break;
  2388. }
  2389. if ((pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list)) ||
  2390. br_multicast_eht_should_del_pg(pg)) {
  2391. if (br_multicast_eht_should_del_pg(pg))
  2392. pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
  2393. br_multicast_find_del_pg(pg->key.port->br, pg);
  2394. /* a notification has already been sent and we shouldn't
  2395. * access pg after the delete so we have to return false
  2396. */
  2397. changed = false;
  2398. }
  2399. return changed;
  2400. }
  2401. static struct net_bridge_port_group *
  2402. br_multicast_find_port(struct net_bridge_mdb_entry *mp,
  2403. struct net_bridge_port *p,
  2404. const unsigned char *src)
  2405. {
  2406. struct net_bridge *br __maybe_unused = mp->br;
  2407. struct net_bridge_port_group *pg;
  2408. for (pg = mlock_dereference(mp->ports, br);
  2409. pg;
  2410. pg = mlock_dereference(pg->next, br))
  2411. if (br_port_group_equal(pg, p, src))
  2412. return pg;
  2413. return NULL;
  2414. }
  2415. static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast *brmctx,
  2416. struct net_bridge_mcast_port *pmctx,
  2417. struct sk_buff *skb,
  2418. u16 vid)
  2419. {
  2420. bool igmpv2 = brmctx->multicast_igmp_version == 2;
  2421. struct net_bridge_mdb_entry *mdst;
  2422. struct net_bridge_port_group *pg;
  2423. const unsigned char *src;
  2424. struct igmpv3_report *ih;
  2425. struct igmpv3_grec *grec;
  2426. int i, len, num, type;
  2427. __be32 group, *h_addr;
  2428. bool changed = false;
  2429. int err = 0;
  2430. u16 nsrcs;
  2431. ih = igmpv3_report_hdr(skb);
  2432. num = ntohs(ih->ngrec);
  2433. len = skb_transport_offset(skb) + sizeof(*ih);
  2434. for (i = 0; i < num; i++) {
  2435. len += sizeof(*grec);
  2436. if (!ip_mc_may_pull(skb, len))
  2437. return -EINVAL;
  2438. grec = (void *)(skb->data + len - sizeof(*grec));
  2439. group = grec->grec_mca;
  2440. type = grec->grec_type;
  2441. nsrcs = ntohs(grec->grec_nsrcs);
  2442. len += nsrcs * 4;
  2443. if (!ip_mc_may_pull(skb, len))
  2444. return -EINVAL;
  2445. switch (type) {
  2446. case IGMPV3_MODE_IS_INCLUDE:
  2447. case IGMPV3_MODE_IS_EXCLUDE:
  2448. case IGMPV3_CHANGE_TO_INCLUDE:
  2449. case IGMPV3_CHANGE_TO_EXCLUDE:
  2450. case IGMPV3_ALLOW_NEW_SOURCES:
  2451. case IGMPV3_BLOCK_OLD_SOURCES:
  2452. break;
  2453. default:
  2454. continue;
  2455. }
  2456. src = eth_hdr(skb)->h_source;
  2457. if (nsrcs == 0 &&
  2458. (type == IGMPV3_CHANGE_TO_INCLUDE ||
  2459. type == IGMPV3_MODE_IS_INCLUDE)) {
  2460. if (!pmctx || igmpv2) {
  2461. br_ip4_multicast_leave_group(brmctx, pmctx,
  2462. group, vid, src);
  2463. continue;
  2464. }
  2465. } else {
  2466. err = br_ip4_multicast_add_group(brmctx, pmctx, group,
  2467. vid, src, igmpv2);
  2468. if (err)
  2469. break;
  2470. }
  2471. if (!pmctx || igmpv2)
  2472. continue;
  2473. spin_lock(&brmctx->br->multicast_lock);
  2474. if (!br_multicast_ctx_should_use(brmctx, pmctx))
  2475. goto unlock_continue;
  2476. mdst = br_mdb_ip4_get(brmctx->br, group, vid);
  2477. if (!mdst)
  2478. goto unlock_continue;
  2479. pg = br_multicast_find_port(mdst, pmctx->port, src);
  2480. if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
  2481. goto unlock_continue;
  2482. /* reload grec and host addr */
  2483. grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4));
  2484. h_addr = &ip_hdr(skb)->saddr;
  2485. switch (type) {
  2486. case IGMPV3_ALLOW_NEW_SOURCES:
  2487. changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
  2488. grec->grec_src,
  2489. nsrcs, sizeof(__be32), type);
  2490. break;
  2491. case IGMPV3_MODE_IS_INCLUDE:
  2492. changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
  2493. grec->grec_src,
  2494. nsrcs, sizeof(__be32), type);
  2495. break;
  2496. case IGMPV3_MODE_IS_EXCLUDE:
  2497. changed = br_multicast_isexc(brmctx, pg, h_addr,
  2498. grec->grec_src,
  2499. nsrcs, sizeof(__be32), type);
  2500. break;
  2501. case IGMPV3_CHANGE_TO_INCLUDE:
  2502. changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
  2503. grec->grec_src,
  2504. nsrcs, sizeof(__be32), type);
  2505. break;
  2506. case IGMPV3_CHANGE_TO_EXCLUDE:
  2507. changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
  2508. grec->grec_src,
  2509. nsrcs, sizeof(__be32), type);
  2510. break;
  2511. case IGMPV3_BLOCK_OLD_SOURCES:
  2512. changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
  2513. grec->grec_src,
  2514. nsrcs, sizeof(__be32), type);
  2515. break;
  2516. }
  2517. if (changed)
  2518. br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
  2519. unlock_continue:
  2520. spin_unlock(&brmctx->br->multicast_lock);
  2521. }
  2522. return err;
  2523. }
  2524. #if IS_ENABLED(CONFIG_IPV6)
  2525. static int br_ip6_multicast_mld2_report(struct net_bridge_mcast *brmctx,
  2526. struct net_bridge_mcast_port *pmctx,
  2527. struct sk_buff *skb,
  2528. u16 vid)
  2529. {
  2530. bool mldv1 = brmctx->multicast_mld_version == 1;
  2531. struct net_bridge_mdb_entry *mdst;
  2532. struct net_bridge_port_group *pg;
  2533. unsigned int nsrcs_offset;
  2534. struct mld2_report *mld2r;
  2535. const unsigned char *src;
  2536. struct in6_addr *h_addr;
  2537. struct mld2_grec *grec;
  2538. unsigned int grec_len;
  2539. bool changed = false;
  2540. int i, len, num;
  2541. int err = 0;
  2542. if (!ipv6_mc_may_pull(skb, sizeof(*mld2r)))
  2543. return -EINVAL;
  2544. mld2r = (struct mld2_report *)icmp6_hdr(skb);
  2545. num = ntohs(mld2r->mld2r_ngrec);
  2546. len = skb_transport_offset(skb) + sizeof(*mld2r);
  2547. for (i = 0; i < num; i++) {
  2548. __be16 *_nsrcs, __nsrcs;
  2549. u16 nsrcs;
  2550. nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
  2551. if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
  2552. nsrcs_offset + sizeof(__nsrcs))
  2553. return -EINVAL;
  2554. _nsrcs = skb_header_pointer(skb, nsrcs_offset,
  2555. sizeof(__nsrcs), &__nsrcs);
  2556. if (!_nsrcs)
  2557. return -EINVAL;
  2558. nsrcs = ntohs(*_nsrcs);
  2559. grec_len = struct_size(grec, grec_src, nsrcs);
  2560. if (!ipv6_mc_may_pull(skb, len + grec_len))
  2561. return -EINVAL;
  2562. grec = (struct mld2_grec *)(skb->data + len);
  2563. len += grec_len;
  2564. switch (grec->grec_type) {
  2565. case MLD2_MODE_IS_INCLUDE:
  2566. case MLD2_MODE_IS_EXCLUDE:
  2567. case MLD2_CHANGE_TO_INCLUDE:
  2568. case MLD2_CHANGE_TO_EXCLUDE:
  2569. case MLD2_ALLOW_NEW_SOURCES:
  2570. case MLD2_BLOCK_OLD_SOURCES:
  2571. break;
  2572. default:
  2573. continue;
  2574. }
  2575. src = eth_hdr(skb)->h_source;
  2576. if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
  2577. grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
  2578. nsrcs == 0) {
  2579. if (!pmctx || mldv1) {
  2580. br_ip6_multicast_leave_group(brmctx, pmctx,
  2581. &grec->grec_mca,
  2582. vid, src);
  2583. continue;
  2584. }
  2585. } else {
  2586. err = br_ip6_multicast_add_group(brmctx, pmctx,
  2587. &grec->grec_mca, vid,
  2588. src, mldv1);
  2589. if (err)
  2590. break;
  2591. }
  2592. if (!pmctx || mldv1)
  2593. continue;
  2594. spin_lock(&brmctx->br->multicast_lock);
  2595. if (!br_multicast_ctx_should_use(brmctx, pmctx))
  2596. goto unlock_continue;
  2597. mdst = br_mdb_ip6_get(brmctx->br, &grec->grec_mca, vid);
  2598. if (!mdst)
  2599. goto unlock_continue;
  2600. pg = br_multicast_find_port(mdst, pmctx->port, src);
  2601. if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
  2602. goto unlock_continue;
  2603. h_addr = &ipv6_hdr(skb)->saddr;
  2604. switch (grec->grec_type) {
  2605. case MLD2_ALLOW_NEW_SOURCES:
  2606. changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
  2607. grec->grec_src, nsrcs,
  2608. sizeof(struct in6_addr),
  2609. grec->grec_type);
  2610. break;
  2611. case MLD2_MODE_IS_INCLUDE:
  2612. changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
  2613. grec->grec_src, nsrcs,
  2614. sizeof(struct in6_addr),
  2615. grec->grec_type);
  2616. break;
  2617. case MLD2_MODE_IS_EXCLUDE:
  2618. changed = br_multicast_isexc(brmctx, pg, h_addr,
  2619. grec->grec_src, nsrcs,
  2620. sizeof(struct in6_addr),
  2621. grec->grec_type);
  2622. break;
  2623. case MLD2_CHANGE_TO_INCLUDE:
  2624. changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
  2625. grec->grec_src, nsrcs,
  2626. sizeof(struct in6_addr),
  2627. grec->grec_type);
  2628. break;
  2629. case MLD2_CHANGE_TO_EXCLUDE:
  2630. changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
  2631. grec->grec_src, nsrcs,
  2632. sizeof(struct in6_addr),
  2633. grec->grec_type);
  2634. break;
  2635. case MLD2_BLOCK_OLD_SOURCES:
  2636. changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
  2637. grec->grec_src, nsrcs,
  2638. sizeof(struct in6_addr),
  2639. grec->grec_type);
  2640. break;
  2641. }
  2642. if (changed)
  2643. br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
  2644. unlock_continue:
  2645. spin_unlock(&brmctx->br->multicast_lock);
  2646. }
  2647. return err;
  2648. }
  2649. #endif
  2650. static bool br_multicast_select_querier(struct net_bridge_mcast *brmctx,
  2651. struct net_bridge_mcast_port *pmctx,
  2652. struct br_ip *saddr)
  2653. {
  2654. int port_ifidx = pmctx ? pmctx->port->dev->ifindex : 0;
  2655. struct timer_list *own_timer, *other_timer;
  2656. struct bridge_mcast_querier *querier;
  2657. switch (saddr->proto) {
  2658. case htons(ETH_P_IP):
  2659. querier = &brmctx->ip4_querier;
  2660. own_timer = &brmctx->ip4_own_query.timer;
  2661. other_timer = &brmctx->ip4_other_query.timer;
  2662. if (!querier->addr.src.ip4 ||
  2663. ntohl(saddr->src.ip4) <= ntohl(querier->addr.src.ip4))
  2664. goto update;
  2665. break;
  2666. #if IS_ENABLED(CONFIG_IPV6)
  2667. case htons(ETH_P_IPV6):
  2668. querier = &brmctx->ip6_querier;
  2669. own_timer = &brmctx->ip6_own_query.timer;
  2670. other_timer = &brmctx->ip6_other_query.timer;
  2671. if (ipv6_addr_cmp(&saddr->src.ip6, &querier->addr.src.ip6) <= 0)
  2672. goto update;
  2673. break;
  2674. #endif
  2675. default:
  2676. return false;
  2677. }
  2678. if (!timer_pending(own_timer) && !timer_pending(other_timer))
  2679. goto update;
  2680. return false;
  2681. update:
  2682. br_multicast_update_querier(brmctx, querier, port_ifidx, saddr);
  2683. return true;
  2684. }
  2685. static struct net_bridge_port *
  2686. __br_multicast_get_querier_port(struct net_bridge *br,
  2687. const struct bridge_mcast_querier *querier)
  2688. {
  2689. int port_ifidx = READ_ONCE(querier->port_ifidx);
  2690. struct net_bridge_port *p;
  2691. struct net_device *dev;
  2692. if (port_ifidx == 0)
  2693. return NULL;
  2694. dev = dev_get_by_index_rcu(dev_net(br->dev), port_ifidx);
  2695. if (!dev)
  2696. return NULL;
  2697. p = br_port_get_rtnl_rcu(dev);
  2698. if (!p || p->br != br)
  2699. return NULL;
  2700. return p;
  2701. }
  2702. size_t br_multicast_querier_state_size(void)
  2703. {
  2704. return nla_total_size(0) + /* nest attribute */
  2705. nla_total_size(sizeof(__be32)) + /* BRIDGE_QUERIER_IP_ADDRESS */
  2706. nla_total_size(sizeof(int)) + /* BRIDGE_QUERIER_IP_PORT */
  2707. nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IP_OTHER_TIMER */
  2708. #if IS_ENABLED(CONFIG_IPV6)
  2709. nla_total_size(sizeof(struct in6_addr)) + /* BRIDGE_QUERIER_IPV6_ADDRESS */
  2710. nla_total_size(sizeof(int)) + /* BRIDGE_QUERIER_IPV6_PORT */
  2711. nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IPV6_OTHER_TIMER */
  2712. #endif
  2713. 0;
  2714. }
  2715. /* protected by rtnl or rcu */
  2716. int br_multicast_dump_querier_state(struct sk_buff *skb,
  2717. const struct net_bridge_mcast *brmctx,
  2718. int nest_attr)
  2719. {
  2720. struct bridge_mcast_querier querier = {};
  2721. struct net_bridge_port *p;
  2722. struct nlattr *nest;
  2723. if (!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) ||
  2724. br_multicast_ctx_vlan_global_disabled(brmctx))
  2725. return 0;
  2726. nest = nla_nest_start(skb, nest_attr);
  2727. if (!nest)
  2728. return -EMSGSIZE;
  2729. rcu_read_lock();
  2730. if (!brmctx->multicast_querier &&
  2731. !timer_pending(&brmctx->ip4_other_query.timer))
  2732. goto out_v6;
  2733. br_multicast_read_querier(&brmctx->ip4_querier, &querier);
  2734. if (nla_put_in_addr(skb, BRIDGE_QUERIER_IP_ADDRESS,
  2735. querier.addr.src.ip4)) {
  2736. rcu_read_unlock();
  2737. goto out_err;
  2738. }
  2739. p = __br_multicast_get_querier_port(brmctx->br, &querier);
  2740. if (timer_pending(&brmctx->ip4_other_query.timer) &&
  2741. (nla_put_u64_64bit(skb, BRIDGE_QUERIER_IP_OTHER_TIMER,
  2742. br_timer_value(&brmctx->ip4_other_query.timer),
  2743. BRIDGE_QUERIER_PAD) ||
  2744. (p && nla_put_u32(skb, BRIDGE_QUERIER_IP_PORT, p->dev->ifindex)))) {
  2745. rcu_read_unlock();
  2746. goto out_err;
  2747. }
  2748. out_v6:
  2749. #if IS_ENABLED(CONFIG_IPV6)
  2750. if (!brmctx->multicast_querier &&
  2751. !timer_pending(&brmctx->ip6_other_query.timer))
  2752. goto out;
  2753. br_multicast_read_querier(&brmctx->ip6_querier, &querier);
  2754. if (nla_put_in6_addr(skb, BRIDGE_QUERIER_IPV6_ADDRESS,
  2755. &querier.addr.src.ip6)) {
  2756. rcu_read_unlock();
  2757. goto out_err;
  2758. }
  2759. p = __br_multicast_get_querier_port(brmctx->br, &querier);
  2760. if (timer_pending(&brmctx->ip6_other_query.timer) &&
  2761. (nla_put_u64_64bit(skb, BRIDGE_QUERIER_IPV6_OTHER_TIMER,
  2762. br_timer_value(&brmctx->ip6_other_query.timer),
  2763. BRIDGE_QUERIER_PAD) ||
  2764. (p && nla_put_u32(skb, BRIDGE_QUERIER_IPV6_PORT,
  2765. p->dev->ifindex)))) {
  2766. rcu_read_unlock();
  2767. goto out_err;
  2768. }
  2769. out:
  2770. #endif
  2771. rcu_read_unlock();
  2772. nla_nest_end(skb, nest);
  2773. if (!nla_len(nest))
  2774. nla_nest_cancel(skb, nest);
  2775. return 0;
  2776. out_err:
  2777. nla_nest_cancel(skb, nest);
  2778. return -EMSGSIZE;
  2779. }
  2780. static void
  2781. br_multicast_update_query_timer(struct net_bridge_mcast *brmctx,
  2782. struct bridge_mcast_other_query *query,
  2783. unsigned long max_delay)
  2784. {
  2785. if (!timer_pending(&query->timer))
  2786. mod_timer(&query->delay_timer, jiffies + max_delay);
  2787. mod_timer(&query->timer, jiffies + brmctx->multicast_querier_interval);
  2788. }
  2789. static void br_port_mc_router_state_change(struct net_bridge_port *p,
  2790. bool is_mc_router)
  2791. {
  2792. struct switchdev_attr attr = {
  2793. .orig_dev = p->dev,
  2794. .id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
  2795. .flags = SWITCHDEV_F_DEFER,
  2796. .u.mrouter = is_mc_router,
  2797. };
  2798. switchdev_port_attr_set(p->dev, &attr, NULL);
  2799. }
  2800. static struct net_bridge_port *
  2801. br_multicast_rport_from_node(struct net_bridge_mcast *brmctx,
  2802. struct hlist_head *mc_router_list,
  2803. struct hlist_node *rlist)
  2804. {
  2805. struct net_bridge_mcast_port *pmctx;
  2806. #if IS_ENABLED(CONFIG_IPV6)
  2807. if (mc_router_list == &brmctx->ip6_mc_router_list)
  2808. pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
  2809. ip6_rlist);
  2810. else
  2811. #endif
  2812. pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
  2813. ip4_rlist);
  2814. return pmctx->port;
  2815. }
  2816. static struct hlist_node *
  2817. br_multicast_get_rport_slot(struct net_bridge_mcast *brmctx,
  2818. struct net_bridge_port *port,
  2819. struct hlist_head *mc_router_list)
  2820. {
  2821. struct hlist_node *slot = NULL;
  2822. struct net_bridge_port *p;
  2823. struct hlist_node *rlist;
  2824. hlist_for_each(rlist, mc_router_list) {
  2825. p = br_multicast_rport_from_node(brmctx, mc_router_list, rlist);
  2826. if ((unsigned long)port >= (unsigned long)p)
  2827. break;
  2828. slot = rlist;
  2829. }
  2830. return slot;
  2831. }
  2832. static bool br_multicast_no_router_otherpf(struct net_bridge_mcast_port *pmctx,
  2833. struct hlist_node *rnode)
  2834. {
  2835. #if IS_ENABLED(CONFIG_IPV6)
  2836. if (rnode != &pmctx->ip6_rlist)
  2837. return hlist_unhashed(&pmctx->ip6_rlist);
  2838. else
  2839. return hlist_unhashed(&pmctx->ip4_rlist);
  2840. #else
  2841. return true;
  2842. #endif
  2843. }
  2844. /* Add port to router_list
  2845. * list is maintained ordered by pointer value
  2846. * and locked by br->multicast_lock and RCU
  2847. */
  2848. static void br_multicast_add_router(struct net_bridge_mcast *brmctx,
  2849. struct net_bridge_mcast_port *pmctx,
  2850. struct hlist_node *rlist,
  2851. struct hlist_head *mc_router_list)
  2852. {
  2853. struct hlist_node *slot;
  2854. if (!hlist_unhashed(rlist))
  2855. return;
  2856. slot = br_multicast_get_rport_slot(brmctx, pmctx->port, mc_router_list);
  2857. if (slot)
  2858. hlist_add_behind_rcu(rlist, slot);
  2859. else
  2860. hlist_add_head_rcu(rlist, mc_router_list);
  2861. /* For backwards compatibility for now, only notify if we
  2862. * switched from no IPv4/IPv6 multicast router to a new
  2863. * IPv4 or IPv6 multicast router.
  2864. */
  2865. if (br_multicast_no_router_otherpf(pmctx, rlist)) {
  2866. br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_NEWMDB);
  2867. br_port_mc_router_state_change(pmctx->port, true);
  2868. }
  2869. }
  2870. /* Add port to router_list
  2871. * list is maintained ordered by pointer value
  2872. * and locked by br->multicast_lock and RCU
  2873. */
  2874. static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
  2875. struct net_bridge_mcast_port *pmctx)
  2876. {
  2877. br_multicast_add_router(brmctx, pmctx, &pmctx->ip4_rlist,
  2878. &brmctx->ip4_mc_router_list);
  2879. }
  2880. /* Add port to router_list
  2881. * list is maintained ordered by pointer value
  2882. * and locked by br->multicast_lock and RCU
  2883. */
  2884. static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
  2885. struct net_bridge_mcast_port *pmctx)
  2886. {
  2887. #if IS_ENABLED(CONFIG_IPV6)
  2888. br_multicast_add_router(brmctx, pmctx, &pmctx->ip6_rlist,
  2889. &brmctx->ip6_mc_router_list);
  2890. #endif
  2891. }
  2892. static void br_multicast_mark_router(struct net_bridge_mcast *brmctx,
  2893. struct net_bridge_mcast_port *pmctx,
  2894. struct timer_list *timer,
  2895. struct hlist_node *rlist,
  2896. struct hlist_head *mc_router_list)
  2897. {
  2898. unsigned long now = jiffies;
  2899. if (!br_multicast_ctx_should_use(brmctx, pmctx))
  2900. return;
  2901. if (!pmctx) {
  2902. if (brmctx->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
  2903. if (!br_ip4_multicast_is_router(brmctx) &&
  2904. !br_ip6_multicast_is_router(brmctx))
  2905. br_mc_router_state_change(brmctx->br, true);
  2906. mod_timer(timer, now + brmctx->multicast_querier_interval);
  2907. }
  2908. return;
  2909. }
  2910. if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
  2911. pmctx->multicast_router == MDB_RTR_TYPE_PERM)
  2912. return;
  2913. br_multicast_add_router(brmctx, pmctx, rlist, mc_router_list);
  2914. mod_timer(timer, now + brmctx->multicast_querier_interval);
  2915. }
  2916. static void br_ip4_multicast_mark_router(struct net_bridge_mcast *brmctx,
  2917. struct net_bridge_mcast_port *pmctx)
  2918. {
  2919. struct timer_list *timer = &brmctx->ip4_mc_router_timer;
  2920. struct hlist_node *rlist = NULL;
  2921. if (pmctx) {
  2922. timer = &pmctx->ip4_mc_router_timer;
  2923. rlist = &pmctx->ip4_rlist;
  2924. }
  2925. br_multicast_mark_router(brmctx, pmctx, timer, rlist,
  2926. &brmctx->ip4_mc_router_list);
  2927. }
  2928. static void br_ip6_multicast_mark_router(struct net_bridge_mcast *brmctx,
  2929. struct net_bridge_mcast_port *pmctx)
  2930. {
  2931. #if IS_ENABLED(CONFIG_IPV6)
  2932. struct timer_list *timer = &brmctx->ip6_mc_router_timer;
  2933. struct hlist_node *rlist = NULL;
  2934. if (pmctx) {
  2935. timer = &pmctx->ip6_mc_router_timer;
  2936. rlist = &pmctx->ip6_rlist;
  2937. }
  2938. br_multicast_mark_router(brmctx, pmctx, timer, rlist,
  2939. &brmctx->ip6_mc_router_list);
  2940. #endif
  2941. }
  2942. static void
  2943. br_ip4_multicast_query_received(struct net_bridge_mcast *brmctx,
  2944. struct net_bridge_mcast_port *pmctx,
  2945. struct bridge_mcast_other_query *query,
  2946. struct br_ip *saddr,
  2947. unsigned long max_delay)
  2948. {
  2949. if (!br_multicast_select_querier(brmctx, pmctx, saddr))
  2950. return;
  2951. br_multicast_update_query_timer(brmctx, query, max_delay);
  2952. br_ip4_multicast_mark_router(brmctx, pmctx);
  2953. }
  2954. #if IS_ENABLED(CONFIG_IPV6)
  2955. static void
  2956. br_ip6_multicast_query_received(struct net_bridge_mcast *brmctx,
  2957. struct net_bridge_mcast_port *pmctx,
  2958. struct bridge_mcast_other_query *query,
  2959. struct br_ip *saddr,
  2960. unsigned long max_delay)
  2961. {
  2962. if (!br_multicast_select_querier(brmctx, pmctx, saddr))
  2963. return;
  2964. br_multicast_update_query_timer(brmctx, query, max_delay);
  2965. br_ip6_multicast_mark_router(brmctx, pmctx);
  2966. }
  2967. #endif
  2968. static void br_ip4_multicast_query(struct net_bridge_mcast *brmctx,
  2969. struct net_bridge_mcast_port *pmctx,
  2970. struct sk_buff *skb,
  2971. u16 vid)
  2972. {
  2973. unsigned int transport_len = ip_transport_len(skb);
  2974. const struct iphdr *iph = ip_hdr(skb);
  2975. struct igmphdr *ih = igmp_hdr(skb);
  2976. struct net_bridge_mdb_entry *mp;
  2977. struct igmpv3_query *ih3;
  2978. struct net_bridge_port_group *p;
  2979. struct net_bridge_port_group __rcu **pp;
  2980. struct br_ip saddr = {};
  2981. unsigned long max_delay;
  2982. unsigned long now = jiffies;
  2983. __be32 group;
  2984. spin_lock(&brmctx->br->multicast_lock);
  2985. if (!br_multicast_ctx_should_use(brmctx, pmctx))
  2986. goto out;
  2987. group = ih->group;
  2988. if (transport_len == sizeof(*ih)) {
  2989. max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
  2990. if (!max_delay) {
  2991. max_delay = 10 * HZ;
  2992. group = 0;
  2993. }
  2994. } else if (transport_len >= sizeof(*ih3)) {
  2995. ih3 = igmpv3_query_hdr(skb);
  2996. if (ih3->nsrcs ||
  2997. (brmctx->multicast_igmp_version == 3 && group &&
  2998. ih3->suppress))
  2999. goto out;
  3000. max_delay = ih3->code ?
  3001. IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
  3002. } else {
  3003. goto out;
  3004. }
  3005. if (!group) {
  3006. saddr.proto = htons(ETH_P_IP);
  3007. saddr.src.ip4 = iph->saddr;
  3008. br_ip4_multicast_query_received(brmctx, pmctx,
  3009. &brmctx->ip4_other_query,
  3010. &saddr, max_delay);
  3011. goto out;
  3012. }
  3013. mp = br_mdb_ip4_get(brmctx->br, group, vid);
  3014. if (!mp)
  3015. goto out;
  3016. max_delay *= brmctx->multicast_last_member_count;
  3017. if (mp->host_joined &&
  3018. (timer_pending(&mp->timer) ?
  3019. time_after(mp->timer.expires, now + max_delay) :
  3020. try_to_del_timer_sync(&mp->timer) >= 0))
  3021. mod_timer(&mp->timer, now + max_delay);
  3022. for (pp = &mp->ports;
  3023. (p = mlock_dereference(*pp, brmctx->br)) != NULL;
  3024. pp = &p->next) {
  3025. if (timer_pending(&p->timer) ?
  3026. time_after(p->timer.expires, now + max_delay) :
  3027. try_to_del_timer_sync(&p->timer) >= 0 &&
  3028. (brmctx->multicast_igmp_version == 2 ||
  3029. p->filter_mode == MCAST_EXCLUDE))
  3030. mod_timer(&p->timer, now + max_delay);
  3031. }
  3032. out:
  3033. spin_unlock(&brmctx->br->multicast_lock);
  3034. }
  3035. #if IS_ENABLED(CONFIG_IPV6)
  3036. static int br_ip6_multicast_query(struct net_bridge_mcast *brmctx,
  3037. struct net_bridge_mcast_port *pmctx,
  3038. struct sk_buff *skb,
  3039. u16 vid)
  3040. {
  3041. unsigned int transport_len = ipv6_transport_len(skb);
  3042. struct mld_msg *mld;
  3043. struct net_bridge_mdb_entry *mp;
  3044. struct mld2_query *mld2q;
  3045. struct net_bridge_port_group *p;
  3046. struct net_bridge_port_group __rcu **pp;
  3047. struct br_ip saddr = {};
  3048. unsigned long max_delay;
  3049. unsigned long now = jiffies;
  3050. unsigned int offset = skb_transport_offset(skb);
  3051. const struct in6_addr *group = NULL;
  3052. bool is_general_query;
  3053. int err = 0;
  3054. spin_lock(&brmctx->br->multicast_lock);
  3055. if (!br_multicast_ctx_should_use(brmctx, pmctx))
  3056. goto out;
  3057. if (transport_len == sizeof(*mld)) {
  3058. if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
  3059. err = -EINVAL;
  3060. goto out;
  3061. }
  3062. mld = (struct mld_msg *) icmp6_hdr(skb);
  3063. max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
  3064. if (max_delay)
  3065. group = &mld->mld_mca;
  3066. } else {
  3067. if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
  3068. err = -EINVAL;
  3069. goto out;
  3070. }
  3071. mld2q = (struct mld2_query *)icmp6_hdr(skb);
  3072. if (!mld2q->mld2q_nsrcs)
  3073. group = &mld2q->mld2q_mca;
  3074. if (brmctx->multicast_mld_version == 2 &&
  3075. !ipv6_addr_any(&mld2q->mld2q_mca) &&
  3076. mld2q->mld2q_suppress)
  3077. goto out;
  3078. max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
  3079. }
  3080. is_general_query = group && ipv6_addr_any(group);
  3081. if (is_general_query) {
  3082. saddr.proto = htons(ETH_P_IPV6);
  3083. saddr.src.ip6 = ipv6_hdr(skb)->saddr;
  3084. br_ip6_multicast_query_received(brmctx, pmctx,
  3085. &brmctx->ip6_other_query,
  3086. &saddr, max_delay);
  3087. goto out;
  3088. } else if (!group) {
  3089. goto out;
  3090. }
  3091. mp = br_mdb_ip6_get(brmctx->br, group, vid);
  3092. if (!mp)
  3093. goto out;
  3094. max_delay *= brmctx->multicast_last_member_count;
  3095. if (mp->host_joined &&
  3096. (timer_pending(&mp->timer) ?
  3097. time_after(mp->timer.expires, now + max_delay) :
  3098. try_to_del_timer_sync(&mp->timer) >= 0))
  3099. mod_timer(&mp->timer, now + max_delay);
  3100. for (pp = &mp->ports;
  3101. (p = mlock_dereference(*pp, brmctx->br)) != NULL;
  3102. pp = &p->next) {
  3103. if (timer_pending(&p->timer) ?
  3104. time_after(p->timer.expires, now + max_delay) :
  3105. try_to_del_timer_sync(&p->timer) >= 0 &&
  3106. (brmctx->multicast_mld_version == 1 ||
  3107. p->filter_mode == MCAST_EXCLUDE))
  3108. mod_timer(&p->timer, now + max_delay);
  3109. }
  3110. out:
  3111. spin_unlock(&brmctx->br->multicast_lock);
  3112. return err;
  3113. }
  3114. #endif
  3115. static void
  3116. br_multicast_leave_group(struct net_bridge_mcast *brmctx,
  3117. struct net_bridge_mcast_port *pmctx,
  3118. struct br_ip *group,
  3119. struct bridge_mcast_other_query *other_query,
  3120. struct bridge_mcast_own_query *own_query,
  3121. const unsigned char *src)
  3122. {
  3123. struct net_bridge_mdb_entry *mp;
  3124. struct net_bridge_port_group *p;
  3125. unsigned long now;
  3126. unsigned long time;
  3127. spin_lock(&brmctx->br->multicast_lock);
  3128. if (!br_multicast_ctx_should_use(brmctx, pmctx))
  3129. goto out;
  3130. mp = br_mdb_ip_get(brmctx->br, group);
  3131. if (!mp)
  3132. goto out;
  3133. if (pmctx && (pmctx->port->flags & BR_MULTICAST_FAST_LEAVE)) {
  3134. struct net_bridge_port_group __rcu **pp;
  3135. for (pp = &mp->ports;
  3136. (p = mlock_dereference(*pp, brmctx->br)) != NULL;
  3137. pp = &p->next) {
  3138. if (!br_port_group_equal(p, pmctx->port, src))
  3139. continue;
  3140. if (p->flags & MDB_PG_FLAGS_PERMANENT)
  3141. break;
  3142. p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
  3143. br_multicast_del_pg(mp, p, pp);
  3144. }
  3145. goto out;
  3146. }
  3147. if (timer_pending(&other_query->timer))
  3148. goto out;
  3149. if (brmctx->multicast_querier) {
  3150. __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &mp->addr,
  3151. false, 0, NULL);
  3152. time = jiffies + brmctx->multicast_last_member_count *
  3153. brmctx->multicast_last_member_interval;
  3154. mod_timer(&own_query->timer, time);
  3155. for (p = mlock_dereference(mp->ports, brmctx->br);
  3156. p != NULL && pmctx != NULL;
  3157. p = mlock_dereference(p->next, brmctx->br)) {
  3158. if (!br_port_group_equal(p, pmctx->port, src))
  3159. continue;
  3160. if (!hlist_unhashed(&p->mglist) &&
  3161. (timer_pending(&p->timer) ?
  3162. time_after(p->timer.expires, time) :
  3163. try_to_del_timer_sync(&p->timer) >= 0)) {
  3164. mod_timer(&p->timer, time);
  3165. }
  3166. break;
  3167. }
  3168. }
  3169. now = jiffies;
  3170. time = now + brmctx->multicast_last_member_count *
  3171. brmctx->multicast_last_member_interval;
  3172. if (!pmctx) {
  3173. if (mp->host_joined &&
  3174. (timer_pending(&mp->timer) ?
  3175. time_after(mp->timer.expires, time) :
  3176. try_to_del_timer_sync(&mp->timer) >= 0)) {
  3177. mod_timer(&mp->timer, time);
  3178. }
  3179. goto out;
  3180. }
  3181. for (p = mlock_dereference(mp->ports, brmctx->br);
  3182. p != NULL;
  3183. p = mlock_dereference(p->next, brmctx->br)) {
  3184. if (p->key.port != pmctx->port)
  3185. continue;
  3186. if (!hlist_unhashed(&p->mglist) &&
  3187. (timer_pending(&p->timer) ?
  3188. time_after(p->timer.expires, time) :
  3189. try_to_del_timer_sync(&p->timer) >= 0)) {
  3190. mod_timer(&p->timer, time);
  3191. }
  3192. break;
  3193. }
  3194. out:
  3195. spin_unlock(&brmctx->br->multicast_lock);
  3196. }
  3197. static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
  3198. struct net_bridge_mcast_port *pmctx,
  3199. __be32 group,
  3200. __u16 vid,
  3201. const unsigned char *src)
  3202. {
  3203. struct br_ip br_group;
  3204. struct bridge_mcast_own_query *own_query;
  3205. if (ipv4_is_local_multicast(group))
  3206. return;
  3207. own_query = pmctx ? &pmctx->ip4_own_query : &brmctx->ip4_own_query;
  3208. memset(&br_group, 0, sizeof(br_group));
  3209. br_group.dst.ip4 = group;
  3210. br_group.proto = htons(ETH_P_IP);
  3211. br_group.vid = vid;
  3212. br_multicast_leave_group(brmctx, pmctx, &br_group,
  3213. &brmctx->ip4_other_query,
  3214. own_query, src);
  3215. }
  3216. #if IS_ENABLED(CONFIG_IPV6)
  3217. static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
  3218. struct net_bridge_mcast_port *pmctx,
  3219. const struct in6_addr *group,
  3220. __u16 vid,
  3221. const unsigned char *src)
  3222. {
  3223. struct br_ip br_group;
  3224. struct bridge_mcast_own_query *own_query;
  3225. if (ipv6_addr_is_ll_all_nodes(group))
  3226. return;
  3227. own_query = pmctx ? &pmctx->ip6_own_query : &brmctx->ip6_own_query;
  3228. memset(&br_group, 0, sizeof(br_group));
  3229. br_group.dst.ip6 = *group;
  3230. br_group.proto = htons(ETH_P_IPV6);
  3231. br_group.vid = vid;
  3232. br_multicast_leave_group(brmctx, pmctx, &br_group,
  3233. &brmctx->ip6_other_query,
  3234. own_query, src);
  3235. }
  3236. #endif
  3237. static void br_multicast_err_count(const struct net_bridge *br,
  3238. const struct net_bridge_port *p,
  3239. __be16 proto)
  3240. {
  3241. struct bridge_mcast_stats __percpu *stats;
  3242. struct bridge_mcast_stats *pstats;
  3243. if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
  3244. return;
  3245. if (p)
  3246. stats = p->mcast_stats;
  3247. else
  3248. stats = br->mcast_stats;
  3249. if (WARN_ON(!stats))
  3250. return;
  3251. pstats = this_cpu_ptr(stats);
  3252. u64_stats_update_begin(&pstats->syncp);
  3253. switch (proto) {
  3254. case htons(ETH_P_IP):
  3255. pstats->mstats.igmp_parse_errors++;
  3256. break;
  3257. #if IS_ENABLED(CONFIG_IPV6)
  3258. case htons(ETH_P_IPV6):
  3259. pstats->mstats.mld_parse_errors++;
  3260. break;
  3261. #endif
  3262. }
  3263. u64_stats_update_end(&pstats->syncp);
  3264. }
  3265. static void br_multicast_pim(struct net_bridge_mcast *brmctx,
  3266. struct net_bridge_mcast_port *pmctx,
  3267. const struct sk_buff *skb)
  3268. {
  3269. unsigned int offset = skb_transport_offset(skb);
  3270. struct pimhdr *pimhdr, _pimhdr;
  3271. pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
  3272. if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
  3273. pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
  3274. return;
  3275. spin_lock(&brmctx->br->multicast_lock);
  3276. br_ip4_multicast_mark_router(brmctx, pmctx);
  3277. spin_unlock(&brmctx->br->multicast_lock);
  3278. }
  3279. static int br_ip4_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
  3280. struct net_bridge_mcast_port *pmctx,
  3281. struct sk_buff *skb)
  3282. {
  3283. if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
  3284. igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
  3285. return -ENOMSG;
  3286. spin_lock(&brmctx->br->multicast_lock);
  3287. br_ip4_multicast_mark_router(brmctx, pmctx);
  3288. spin_unlock(&brmctx->br->multicast_lock);
  3289. return 0;
  3290. }
  3291. static int br_multicast_ipv4_rcv(struct net_bridge_mcast *brmctx,
  3292. struct net_bridge_mcast_port *pmctx,
  3293. struct sk_buff *skb,
  3294. u16 vid)
  3295. {
  3296. struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
  3297. const unsigned char *src;
  3298. struct igmphdr *ih;
  3299. int err;
  3300. err = ip_mc_check_igmp(skb);
  3301. if (err == -ENOMSG) {
  3302. if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
  3303. BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
  3304. } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
  3305. if (ip_hdr(skb)->protocol == IPPROTO_PIM)
  3306. br_multicast_pim(brmctx, pmctx, skb);
  3307. } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
  3308. br_ip4_multicast_mrd_rcv(brmctx, pmctx, skb);
  3309. }
  3310. return 0;
  3311. } else if (err < 0) {
  3312. br_multicast_err_count(brmctx->br, p, skb->protocol);
  3313. return err;
  3314. }
  3315. ih = igmp_hdr(skb);
  3316. src = eth_hdr(skb)->h_source;
  3317. BR_INPUT_SKB_CB(skb)->igmp = ih->type;
  3318. switch (ih->type) {
  3319. case IGMP_HOST_MEMBERSHIP_REPORT:
  3320. case IGMPV2_HOST_MEMBERSHIP_REPORT:
  3321. BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
  3322. err = br_ip4_multicast_add_group(brmctx, pmctx, ih->group, vid,
  3323. src, true);
  3324. break;
  3325. case IGMPV3_HOST_MEMBERSHIP_REPORT:
  3326. err = br_ip4_multicast_igmp3_report(brmctx, pmctx, skb, vid);
  3327. break;
  3328. case IGMP_HOST_MEMBERSHIP_QUERY:
  3329. br_ip4_multicast_query(brmctx, pmctx, skb, vid);
  3330. break;
  3331. case IGMP_HOST_LEAVE_MESSAGE:
  3332. br_ip4_multicast_leave_group(brmctx, pmctx, ih->group, vid, src);
  3333. break;
  3334. }
  3335. br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
  3336. BR_MCAST_DIR_RX);
  3337. return err;
  3338. }
  3339. #if IS_ENABLED(CONFIG_IPV6)
  3340. static void br_ip6_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
  3341. struct net_bridge_mcast_port *pmctx,
  3342. struct sk_buff *skb)
  3343. {
  3344. if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
  3345. return;
  3346. spin_lock(&brmctx->br->multicast_lock);
  3347. br_ip6_multicast_mark_router(brmctx, pmctx);
  3348. spin_unlock(&brmctx->br->multicast_lock);
  3349. }
  3350. static int br_multicast_ipv6_rcv(struct net_bridge_mcast *brmctx,
  3351. struct net_bridge_mcast_port *pmctx,
  3352. struct sk_buff *skb,
  3353. u16 vid)
  3354. {
  3355. struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
  3356. const unsigned char *src;
  3357. struct mld_msg *mld;
  3358. int err;
  3359. err = ipv6_mc_check_mld(skb);
  3360. if (err == -ENOMSG || err == -ENODATA) {
  3361. if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
  3362. BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
  3363. if (err == -ENODATA &&
  3364. ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr))
  3365. br_ip6_multicast_mrd_rcv(brmctx, pmctx, skb);
  3366. return 0;
  3367. } else if (err < 0) {
  3368. br_multicast_err_count(brmctx->br, p, skb->protocol);
  3369. return err;
  3370. }
  3371. mld = (struct mld_msg *)skb_transport_header(skb);
  3372. BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
  3373. switch (mld->mld_type) {
  3374. case ICMPV6_MGM_REPORT:
  3375. src = eth_hdr(skb)->h_source;
  3376. BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
  3377. err = br_ip6_multicast_add_group(brmctx, pmctx, &mld->mld_mca,
  3378. vid, src, true);
  3379. break;
  3380. case ICMPV6_MLD2_REPORT:
  3381. err = br_ip6_multicast_mld2_report(brmctx, pmctx, skb, vid);
  3382. break;
  3383. case ICMPV6_MGM_QUERY:
  3384. err = br_ip6_multicast_query(brmctx, pmctx, skb, vid);
  3385. break;
  3386. case ICMPV6_MGM_REDUCTION:
  3387. src = eth_hdr(skb)->h_source;
  3388. br_ip6_multicast_leave_group(brmctx, pmctx, &mld->mld_mca, vid,
  3389. src);
  3390. break;
  3391. }
  3392. br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
  3393. BR_MCAST_DIR_RX);
  3394. return err;
  3395. }
  3396. #endif
  3397. int br_multicast_rcv(struct net_bridge_mcast **brmctx,
  3398. struct net_bridge_mcast_port **pmctx,
  3399. struct net_bridge_vlan *vlan,
  3400. struct sk_buff *skb, u16 vid)
  3401. {
  3402. int ret = 0;
  3403. BR_INPUT_SKB_CB(skb)->igmp = 0;
  3404. BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
  3405. if (!br_opt_get((*brmctx)->br, BROPT_MULTICAST_ENABLED))
  3406. return 0;
  3407. if (br_opt_get((*brmctx)->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) && vlan) {
  3408. const struct net_bridge_vlan *masterv;
  3409. /* the vlan has the master flag set only when transmitting
  3410. * through the bridge device
  3411. */
  3412. if (br_vlan_is_master(vlan)) {
  3413. masterv = vlan;
  3414. *brmctx = &vlan->br_mcast_ctx;
  3415. *pmctx = NULL;
  3416. } else {
  3417. masterv = vlan->brvlan;
  3418. *brmctx = &vlan->brvlan->br_mcast_ctx;
  3419. *pmctx = &vlan->port_mcast_ctx;
  3420. }
  3421. if (!(masterv->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED))
  3422. return 0;
  3423. }
  3424. switch (skb->protocol) {
  3425. case htons(ETH_P_IP):
  3426. ret = br_multicast_ipv4_rcv(*brmctx, *pmctx, skb, vid);
  3427. break;
  3428. #if IS_ENABLED(CONFIG_IPV6)
  3429. case htons(ETH_P_IPV6):
  3430. ret = br_multicast_ipv6_rcv(*brmctx, *pmctx, skb, vid);
  3431. break;
  3432. #endif
  3433. }
  3434. return ret;
  3435. }
  3436. static void br_multicast_query_expired(struct net_bridge_mcast *brmctx,
  3437. struct bridge_mcast_own_query *query,
  3438. struct bridge_mcast_querier *querier)
  3439. {
  3440. spin_lock(&brmctx->br->multicast_lock);
  3441. if (br_multicast_ctx_vlan_disabled(brmctx))
  3442. goto out;
  3443. if (query->startup_sent < brmctx->multicast_startup_query_count)
  3444. query->startup_sent++;
  3445. br_multicast_send_query(brmctx, NULL, query);
  3446. out:
  3447. spin_unlock(&brmctx->br->multicast_lock);
  3448. }
  3449. static void br_ip4_multicast_query_expired(struct timer_list *t)
  3450. {
  3451. struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
  3452. ip4_own_query.timer);
  3453. br_multicast_query_expired(brmctx, &brmctx->ip4_own_query,
  3454. &brmctx->ip4_querier);
  3455. }
  3456. #if IS_ENABLED(CONFIG_IPV6)
  3457. static void br_ip6_multicast_query_expired(struct timer_list *t)
  3458. {
  3459. struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
  3460. ip6_own_query.timer);
  3461. br_multicast_query_expired(brmctx, &brmctx->ip6_own_query,
  3462. &brmctx->ip6_querier);
  3463. }
  3464. #endif
  3465. static void br_multicast_gc_work(struct work_struct *work)
  3466. {
  3467. struct net_bridge *br = container_of(work, struct net_bridge,
  3468. mcast_gc_work);
  3469. HLIST_HEAD(deleted_head);
  3470. spin_lock_bh(&br->multicast_lock);
  3471. hlist_move_list(&br->mcast_gc_list, &deleted_head);
  3472. spin_unlock_bh(&br->multicast_lock);
  3473. br_multicast_gc(&deleted_head);
  3474. }
  3475. void br_multicast_ctx_init(struct net_bridge *br,
  3476. struct net_bridge_vlan *vlan,
  3477. struct net_bridge_mcast *brmctx)
  3478. {
  3479. brmctx->br = br;
  3480. brmctx->vlan = vlan;
  3481. brmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
  3482. brmctx->multicast_last_member_count = 2;
  3483. brmctx->multicast_startup_query_count = 2;
  3484. brmctx->multicast_last_member_interval = HZ;
  3485. brmctx->multicast_query_response_interval = 10 * HZ;
  3486. brmctx->multicast_startup_query_interval = 125 * HZ / 4;
  3487. brmctx->multicast_query_interval = 125 * HZ;
  3488. brmctx->multicast_querier_interval = 255 * HZ;
  3489. brmctx->multicast_membership_interval = 260 * HZ;
  3490. brmctx->ip4_querier.port_ifidx = 0;
  3491. seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock);
  3492. brmctx->multicast_igmp_version = 2;
  3493. #if IS_ENABLED(CONFIG_IPV6)
  3494. brmctx->multicast_mld_version = 1;
  3495. brmctx->ip6_querier.port_ifidx = 0;
  3496. seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock);
  3497. #endif
  3498. timer_setup(&brmctx->ip4_mc_router_timer,
  3499. br_ip4_multicast_local_router_expired, 0);
  3500. timer_setup(&brmctx->ip4_other_query.timer,
  3501. br_ip4_multicast_querier_expired, 0);
  3502. timer_setup(&brmctx->ip4_other_query.delay_timer,
  3503. br_multicast_query_delay_expired, 0);
  3504. timer_setup(&brmctx->ip4_own_query.timer,
  3505. br_ip4_multicast_query_expired, 0);
  3506. #if IS_ENABLED(CONFIG_IPV6)
  3507. timer_setup(&brmctx->ip6_mc_router_timer,
  3508. br_ip6_multicast_local_router_expired, 0);
  3509. timer_setup(&brmctx->ip6_other_query.timer,
  3510. br_ip6_multicast_querier_expired, 0);
  3511. timer_setup(&brmctx->ip6_other_query.delay_timer,
  3512. br_multicast_query_delay_expired, 0);
  3513. timer_setup(&brmctx->ip6_own_query.timer,
  3514. br_ip6_multicast_query_expired, 0);
  3515. #endif
  3516. }
  3517. void br_multicast_ctx_deinit(struct net_bridge_mcast *brmctx)
  3518. {
  3519. __br_multicast_stop(brmctx);
  3520. }
  3521. void br_multicast_init(struct net_bridge *br)
  3522. {
  3523. br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
  3524. br_multicast_ctx_init(br, NULL, &br->multicast_ctx);
  3525. br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
  3526. br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
  3527. spin_lock_init(&br->multicast_lock);
  3528. INIT_HLIST_HEAD(&br->mdb_list);
  3529. INIT_HLIST_HEAD(&br->mcast_gc_list);
  3530. INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work);
  3531. }
  3532. static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
  3533. {
  3534. struct in_device *in_dev = in_dev_get(br->dev);
  3535. if (!in_dev)
  3536. return;
  3537. __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
  3538. in_dev_put(in_dev);
  3539. }
  3540. #if IS_ENABLED(CONFIG_IPV6)
  3541. static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
  3542. {
  3543. struct in6_addr addr;
  3544. ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
  3545. ipv6_dev_mc_inc(br->dev, &addr);
  3546. }
  3547. #else
  3548. static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
  3549. {
  3550. }
  3551. #endif
  3552. void br_multicast_join_snoopers(struct net_bridge *br)
  3553. {
  3554. br_ip4_multicast_join_snoopers(br);
  3555. br_ip6_multicast_join_snoopers(br);
  3556. }
  3557. static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
  3558. {
  3559. struct in_device *in_dev = in_dev_get(br->dev);
  3560. if (WARN_ON(!in_dev))
  3561. return;
  3562. __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
  3563. in_dev_put(in_dev);
  3564. }
  3565. #if IS_ENABLED(CONFIG_IPV6)
  3566. static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
  3567. {
  3568. struct in6_addr addr;
  3569. ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
  3570. ipv6_dev_mc_dec(br->dev, &addr);
  3571. }
  3572. #else
  3573. static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
  3574. {
  3575. }
  3576. #endif
  3577. void br_multicast_leave_snoopers(struct net_bridge *br)
  3578. {
  3579. br_ip4_multicast_leave_snoopers(br);
  3580. br_ip6_multicast_leave_snoopers(br);
  3581. }
  3582. static void __br_multicast_open_query(struct net_bridge *br,
  3583. struct bridge_mcast_own_query *query)
  3584. {
  3585. query->startup_sent = 0;
  3586. if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
  3587. return;
  3588. mod_timer(&query->timer, jiffies);
  3589. }
  3590. static void __br_multicast_open(struct net_bridge_mcast *brmctx)
  3591. {
  3592. __br_multicast_open_query(brmctx->br, &brmctx->ip4_own_query);
  3593. #if IS_ENABLED(CONFIG_IPV6)
  3594. __br_multicast_open_query(brmctx->br, &brmctx->ip6_own_query);
  3595. #endif
  3596. }
  3597. void br_multicast_open(struct net_bridge *br)
  3598. {
  3599. ASSERT_RTNL();
  3600. if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
  3601. struct net_bridge_vlan_group *vg;
  3602. struct net_bridge_vlan *vlan;
  3603. vg = br_vlan_group(br);
  3604. if (vg) {
  3605. list_for_each_entry(vlan, &vg->vlan_list, vlist) {
  3606. struct net_bridge_mcast *brmctx;
  3607. brmctx = &vlan->br_mcast_ctx;
  3608. if (br_vlan_is_brentry(vlan) &&
  3609. !br_multicast_ctx_vlan_disabled(brmctx))
  3610. __br_multicast_open(&vlan->br_mcast_ctx);
  3611. }
  3612. }
  3613. } else {
  3614. __br_multicast_open(&br->multicast_ctx);
  3615. }
  3616. }
  3617. static void __br_multicast_stop(struct net_bridge_mcast *brmctx)
  3618. {
  3619. del_timer_sync(&brmctx->ip4_mc_router_timer);
  3620. del_timer_sync(&brmctx->ip4_other_query.timer);
  3621. del_timer_sync(&brmctx->ip4_other_query.delay_timer);
  3622. del_timer_sync(&brmctx->ip4_own_query.timer);
  3623. #if IS_ENABLED(CONFIG_IPV6)
  3624. del_timer_sync(&brmctx->ip6_mc_router_timer);
  3625. del_timer_sync(&brmctx->ip6_other_query.timer);
  3626. del_timer_sync(&brmctx->ip6_other_query.delay_timer);
  3627. del_timer_sync(&brmctx->ip6_own_query.timer);
  3628. #endif
  3629. }
  3630. void br_multicast_update_vlan_mcast_ctx(struct net_bridge_vlan *v, u8 state)
  3631. {
  3632. #if IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING)
  3633. struct net_bridge *br;
  3634. if (!br_vlan_should_use(v))
  3635. return;
  3636. if (br_vlan_is_master(v))
  3637. return;
  3638. br = v->port->br;
  3639. if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED))
  3640. return;
  3641. if (br_vlan_state_allowed(state, true))
  3642. br_multicast_enable_port_ctx(&v->port_mcast_ctx);
  3643. /* Multicast is not disabled for the vlan when it goes in
  3644. * blocking state because the timers will expire and stop by
  3645. * themselves without sending more queries.
  3646. */
  3647. #endif
  3648. }
  3649. void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on)
  3650. {
  3651. struct net_bridge *br;
  3652. /* it's okay to check for the flag without the multicast lock because it
  3653. * can only change under RTNL -> multicast_lock, we need the latter to
  3654. * sync with timers and packets
  3655. */
  3656. if (on == !!(vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED))
  3657. return;
  3658. if (br_vlan_is_master(vlan)) {
  3659. br = vlan->br;
  3660. if (!br_vlan_is_brentry(vlan) ||
  3661. (on &&
  3662. br_multicast_ctx_vlan_global_disabled(&vlan->br_mcast_ctx)))
  3663. return;
  3664. spin_lock_bh(&br->multicast_lock);
  3665. vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
  3666. spin_unlock_bh(&br->multicast_lock);
  3667. if (on)
  3668. __br_multicast_open(&vlan->br_mcast_ctx);
  3669. else
  3670. __br_multicast_stop(&vlan->br_mcast_ctx);
  3671. } else {
  3672. struct net_bridge_mcast *brmctx;
  3673. brmctx = br_multicast_port_ctx_get_global(&vlan->port_mcast_ctx);
  3674. if (on && br_multicast_ctx_vlan_global_disabled(brmctx))
  3675. return;
  3676. br = vlan->port->br;
  3677. spin_lock_bh(&br->multicast_lock);
  3678. vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
  3679. if (on)
  3680. __br_multicast_enable_port_ctx(&vlan->port_mcast_ctx);
  3681. else
  3682. __br_multicast_disable_port_ctx(&vlan->port_mcast_ctx);
  3683. spin_unlock_bh(&br->multicast_lock);
  3684. }
  3685. }
  3686. static void br_multicast_toggle_vlan(struct net_bridge_vlan *vlan, bool on)
  3687. {
  3688. struct net_bridge_port *p;
  3689. if (WARN_ON_ONCE(!br_vlan_is_master(vlan)))
  3690. return;
  3691. list_for_each_entry(p, &vlan->br->port_list, list) {
  3692. struct net_bridge_vlan *vport;
  3693. vport = br_vlan_find(nbp_vlan_group(p), vlan->vid);
  3694. if (!vport)
  3695. continue;
  3696. br_multicast_toggle_one_vlan(vport, on);
  3697. }
  3698. if (br_vlan_is_brentry(vlan))
  3699. br_multicast_toggle_one_vlan(vlan, on);
  3700. }
  3701. int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on,
  3702. struct netlink_ext_ack *extack)
  3703. {
  3704. struct net_bridge_vlan_group *vg;
  3705. struct net_bridge_vlan *vlan;
  3706. struct net_bridge_port *p;
  3707. if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) == on)
  3708. return 0;
  3709. if (on && !br_opt_get(br, BROPT_VLAN_ENABLED)) {
  3710. NL_SET_ERR_MSG_MOD(extack, "Cannot enable multicast vlan snooping with vlan filtering disabled");
  3711. return -EINVAL;
  3712. }
  3713. vg = br_vlan_group(br);
  3714. if (!vg)
  3715. return 0;
  3716. br_opt_toggle(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED, on);
  3717. /* disable/enable non-vlan mcast contexts based on vlan snooping */
  3718. if (on)
  3719. __br_multicast_stop(&br->multicast_ctx);
  3720. else
  3721. __br_multicast_open(&br->multicast_ctx);
  3722. list_for_each_entry(p, &br->port_list, list) {
  3723. if (on)
  3724. br_multicast_disable_port_ctx(&p->multicast_ctx);
  3725. else
  3726. br_multicast_enable_port_ctx(&p->multicast_ctx);
  3727. }
  3728. list_for_each_entry(vlan, &vg->vlan_list, vlist)
  3729. br_multicast_toggle_vlan(vlan, on);
  3730. return 0;
  3731. }
  3732. bool br_multicast_toggle_global_vlan(struct net_bridge_vlan *vlan, bool on)
  3733. {
  3734. ASSERT_RTNL();
  3735. /* BR_VLFLAG_GLOBAL_MCAST_ENABLED relies on eventual consistency and
  3736. * requires only RTNL to change
  3737. */
  3738. if (on == !!(vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED))
  3739. return false;
  3740. vlan->priv_flags ^= BR_VLFLAG_GLOBAL_MCAST_ENABLED;
  3741. br_multicast_toggle_vlan(vlan, on);
  3742. return true;
  3743. }
  3744. void br_multicast_stop(struct net_bridge *br)
  3745. {
  3746. ASSERT_RTNL();
  3747. if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
  3748. struct net_bridge_vlan_group *vg;
  3749. struct net_bridge_vlan *vlan;
  3750. vg = br_vlan_group(br);
  3751. if (vg) {
  3752. list_for_each_entry(vlan, &vg->vlan_list, vlist) {
  3753. struct net_bridge_mcast *brmctx;
  3754. brmctx = &vlan->br_mcast_ctx;
  3755. if (br_vlan_is_brentry(vlan) &&
  3756. !br_multicast_ctx_vlan_disabled(brmctx))
  3757. __br_multicast_stop(&vlan->br_mcast_ctx);
  3758. }
  3759. }
  3760. } else {
  3761. __br_multicast_stop(&br->multicast_ctx);
  3762. }
  3763. }
  3764. void br_multicast_dev_del(struct net_bridge *br)
  3765. {
  3766. struct net_bridge_mdb_entry *mp;
  3767. HLIST_HEAD(deleted_head);
  3768. struct hlist_node *tmp;
  3769. spin_lock_bh(&br->multicast_lock);
  3770. hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node)
  3771. br_multicast_del_mdb_entry(mp);
  3772. hlist_move_list(&br->mcast_gc_list, &deleted_head);
  3773. spin_unlock_bh(&br->multicast_lock);
  3774. br_multicast_ctx_deinit(&br->multicast_ctx);
  3775. br_multicast_gc(&deleted_head);
  3776. cancel_work_sync(&br->mcast_gc_work);
  3777. rcu_barrier();
  3778. }
  3779. int br_multicast_set_router(struct net_bridge_mcast *brmctx, unsigned long val)
  3780. {
  3781. int err = -EINVAL;
  3782. spin_lock_bh(&brmctx->br->multicast_lock);
  3783. switch (val) {
  3784. case MDB_RTR_TYPE_DISABLED:
  3785. case MDB_RTR_TYPE_PERM:
  3786. br_mc_router_state_change(brmctx->br, val == MDB_RTR_TYPE_PERM);
  3787. del_timer(&brmctx->ip4_mc_router_timer);
  3788. #if IS_ENABLED(CONFIG_IPV6)
  3789. del_timer(&brmctx->ip6_mc_router_timer);
  3790. #endif
  3791. brmctx->multicast_router = val;
  3792. err = 0;
  3793. break;
  3794. case MDB_RTR_TYPE_TEMP_QUERY:
  3795. if (brmctx->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
  3796. br_mc_router_state_change(brmctx->br, false);
  3797. brmctx->multicast_router = val;
  3798. err = 0;
  3799. break;
  3800. }
  3801. spin_unlock_bh(&brmctx->br->multicast_lock);
  3802. return err;
  3803. }
  3804. static void
  3805. br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted)
  3806. {
  3807. if (!deleted)
  3808. return;
  3809. /* For backwards compatibility for now, only notify if there is
  3810. * no multicast router anymore for both IPv4 and IPv6.
  3811. */
  3812. if (!hlist_unhashed(&pmctx->ip4_rlist))
  3813. return;
  3814. #if IS_ENABLED(CONFIG_IPV6)
  3815. if (!hlist_unhashed(&pmctx->ip6_rlist))
  3816. return;
  3817. #endif
  3818. br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_DELMDB);
  3819. br_port_mc_router_state_change(pmctx->port, false);
  3820. /* don't allow timer refresh */
  3821. if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP)
  3822. pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
  3823. }
  3824. int br_multicast_set_port_router(struct net_bridge_mcast_port *pmctx,
  3825. unsigned long val)
  3826. {
  3827. struct net_bridge_mcast *brmctx;
  3828. unsigned long now = jiffies;
  3829. int err = -EINVAL;
  3830. bool del = false;
  3831. brmctx = br_multicast_port_ctx_get_global(pmctx);
  3832. spin_lock_bh(&brmctx->br->multicast_lock);
  3833. if (pmctx->multicast_router == val) {
  3834. /* Refresh the temp router port timer */
  3835. if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) {
  3836. mod_timer(&pmctx->ip4_mc_router_timer,
  3837. now + brmctx->multicast_querier_interval);
  3838. #if IS_ENABLED(CONFIG_IPV6)
  3839. mod_timer(&pmctx->ip6_mc_router_timer,
  3840. now + brmctx->multicast_querier_interval);
  3841. #endif
  3842. }
  3843. err = 0;
  3844. goto unlock;
  3845. }
  3846. switch (val) {
  3847. case MDB_RTR_TYPE_DISABLED:
  3848. pmctx->multicast_router = MDB_RTR_TYPE_DISABLED;
  3849. del |= br_ip4_multicast_rport_del(pmctx);
  3850. del_timer(&pmctx->ip4_mc_router_timer);
  3851. del |= br_ip6_multicast_rport_del(pmctx);
  3852. #if IS_ENABLED(CONFIG_IPV6)
  3853. del_timer(&pmctx->ip6_mc_router_timer);
  3854. #endif
  3855. br_multicast_rport_del_notify(pmctx, del);
  3856. break;
  3857. case MDB_RTR_TYPE_TEMP_QUERY:
  3858. pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
  3859. del |= br_ip4_multicast_rport_del(pmctx);
  3860. del |= br_ip6_multicast_rport_del(pmctx);
  3861. br_multicast_rport_del_notify(pmctx, del);
  3862. break;
  3863. case MDB_RTR_TYPE_PERM:
  3864. pmctx->multicast_router = MDB_RTR_TYPE_PERM;
  3865. del_timer(&pmctx->ip4_mc_router_timer);
  3866. br_ip4_multicast_add_router(brmctx, pmctx);
  3867. #if IS_ENABLED(CONFIG_IPV6)
  3868. del_timer(&pmctx->ip6_mc_router_timer);
  3869. #endif
  3870. br_ip6_multicast_add_router(brmctx, pmctx);
  3871. break;
  3872. case MDB_RTR_TYPE_TEMP:
  3873. pmctx->multicast_router = MDB_RTR_TYPE_TEMP;
  3874. br_ip4_multicast_mark_router(brmctx, pmctx);
  3875. br_ip6_multicast_mark_router(brmctx, pmctx);
  3876. break;
  3877. default:
  3878. goto unlock;
  3879. }
  3880. err = 0;
  3881. unlock:
  3882. spin_unlock_bh(&brmctx->br->multicast_lock);
  3883. return err;
  3884. }
  3885. int br_multicast_set_vlan_router(struct net_bridge_vlan *v, u8 mcast_router)
  3886. {
  3887. int err;
  3888. if (br_vlan_is_master(v))
  3889. err = br_multicast_set_router(&v->br_mcast_ctx, mcast_router);
  3890. else
  3891. err = br_multicast_set_port_router(&v->port_mcast_ctx,
  3892. mcast_router);
  3893. return err;
  3894. }
  3895. static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
  3896. struct bridge_mcast_own_query *query)
  3897. {
  3898. struct net_bridge_port *port;
  3899. if (!br_multicast_ctx_matches_vlan_snooping(brmctx))
  3900. return;
  3901. __br_multicast_open_query(brmctx->br, query);
  3902. rcu_read_lock();
  3903. list_for_each_entry_rcu(port, &brmctx->br->port_list, list) {
  3904. struct bridge_mcast_own_query *ip4_own_query;
  3905. #if IS_ENABLED(CONFIG_IPV6)
  3906. struct bridge_mcast_own_query *ip6_own_query;
  3907. #endif
  3908. if (br_multicast_port_ctx_state_stopped(&port->multicast_ctx))
  3909. continue;
  3910. if (br_multicast_ctx_is_vlan(brmctx)) {
  3911. struct net_bridge_vlan *vlan;
  3912. vlan = br_vlan_find(nbp_vlan_group_rcu(port),
  3913. brmctx->vlan->vid);
  3914. if (!vlan ||
  3915. br_multicast_port_ctx_state_stopped(&vlan->port_mcast_ctx))
  3916. continue;
  3917. ip4_own_query = &vlan->port_mcast_ctx.ip4_own_query;
  3918. #if IS_ENABLED(CONFIG_IPV6)
  3919. ip6_own_query = &vlan->port_mcast_ctx.ip6_own_query;
  3920. #endif
  3921. } else {
  3922. ip4_own_query = &port->multicast_ctx.ip4_own_query;
  3923. #if IS_ENABLED(CONFIG_IPV6)
  3924. ip6_own_query = &port->multicast_ctx.ip6_own_query;
  3925. #endif
  3926. }
  3927. if (query == &brmctx->ip4_own_query)
  3928. br_multicast_enable(ip4_own_query);
  3929. #if IS_ENABLED(CONFIG_IPV6)
  3930. else
  3931. br_multicast_enable(ip6_own_query);
  3932. #endif
  3933. }
  3934. rcu_read_unlock();
  3935. }
  3936. int br_multicast_toggle(struct net_bridge *br, unsigned long val,
  3937. struct netlink_ext_ack *extack)
  3938. {
  3939. struct net_bridge_port *port;
  3940. bool change_snoopers = false;
  3941. int err = 0;
  3942. spin_lock_bh(&br->multicast_lock);
  3943. if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
  3944. goto unlock;
  3945. err = br_mc_disabled_update(br->dev, val, extack);
  3946. if (err == -EOPNOTSUPP)
  3947. err = 0;
  3948. if (err)
  3949. goto unlock;
  3950. br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
  3951. if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
  3952. change_snoopers = true;
  3953. goto unlock;
  3954. }
  3955. if (!netif_running(br->dev))
  3956. goto unlock;
  3957. br_multicast_open(br);
  3958. list_for_each_entry(port, &br->port_list, list)
  3959. __br_multicast_enable_port_ctx(&port->multicast_ctx);
  3960. change_snoopers = true;
  3961. unlock:
  3962. spin_unlock_bh(&br->multicast_lock);
  3963. /* br_multicast_join_snoopers has the potential to cause
  3964. * an MLD Report/Leave to be delivered to br_multicast_rcv,
  3965. * which would in turn call br_multicast_add_group, which would
  3966. * attempt to acquire multicast_lock. This function should be
  3967. * called after the lock has been released to avoid deadlocks on
  3968. * multicast_lock.
  3969. *
  3970. * br_multicast_leave_snoopers does not have the problem since
  3971. * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and
  3972. * returns without calling br_multicast_ipv4/6_rcv if it's not
  3973. * enabled. Moved both functions out just for symmetry.
  3974. */
  3975. if (change_snoopers) {
  3976. if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
  3977. br_multicast_join_snoopers(br);
  3978. else
  3979. br_multicast_leave_snoopers(br);
  3980. }
  3981. return err;
  3982. }
  3983. bool br_multicast_enabled(const struct net_device *dev)
  3984. {
  3985. struct net_bridge *br = netdev_priv(dev);
  3986. return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
  3987. }
  3988. EXPORT_SYMBOL_GPL(br_multicast_enabled);
  3989. bool br_multicast_router(const struct net_device *dev)
  3990. {
  3991. struct net_bridge *br = netdev_priv(dev);
  3992. bool is_router;
  3993. spin_lock_bh(&br->multicast_lock);
  3994. is_router = br_multicast_is_router(&br->multicast_ctx, NULL);
  3995. spin_unlock_bh(&br->multicast_lock);
  3996. return is_router;
  3997. }
  3998. EXPORT_SYMBOL_GPL(br_multicast_router);
  3999. int br_multicast_set_querier(struct net_bridge_mcast *brmctx, unsigned long val)
  4000. {
  4001. unsigned long max_delay;
  4002. val = !!val;
  4003. spin_lock_bh(&brmctx->br->multicast_lock);
  4004. if (brmctx->multicast_querier == val)
  4005. goto unlock;
  4006. WRITE_ONCE(brmctx->multicast_querier, val);
  4007. if (!val)
  4008. goto unlock;
  4009. max_delay = brmctx->multicast_query_response_interval;
  4010. if (!timer_pending(&brmctx->ip4_other_query.timer))
  4011. mod_timer(&brmctx->ip4_other_query.delay_timer,
  4012. jiffies + max_delay);
  4013. br_multicast_start_querier(brmctx, &brmctx->ip4_own_query);
  4014. #if IS_ENABLED(CONFIG_IPV6)
  4015. if (!timer_pending(&brmctx->ip6_other_query.timer))
  4016. mod_timer(&brmctx->ip6_other_query.delay_timer,
  4017. jiffies + max_delay);
  4018. br_multicast_start_querier(brmctx, &brmctx->ip6_own_query);
  4019. #endif
  4020. unlock:
  4021. spin_unlock_bh(&brmctx->br->multicast_lock);
  4022. return 0;
  4023. }
  4024. int br_multicast_set_igmp_version(struct net_bridge_mcast *brmctx,
  4025. unsigned long val)
  4026. {
  4027. /* Currently we support only version 2 and 3 */
  4028. switch (val) {
  4029. case 2:
  4030. case 3:
  4031. break;
  4032. default:
  4033. return -EINVAL;
  4034. }
  4035. spin_lock_bh(&brmctx->br->multicast_lock);
  4036. brmctx->multicast_igmp_version = val;
  4037. spin_unlock_bh(&brmctx->br->multicast_lock);
  4038. return 0;
  4039. }
  4040. #if IS_ENABLED(CONFIG_IPV6)
  4041. int br_multicast_set_mld_version(struct net_bridge_mcast *brmctx,
  4042. unsigned long val)
  4043. {
  4044. /* Currently we support version 1 and 2 */
  4045. switch (val) {
  4046. case 1:
  4047. case 2:
  4048. break;
  4049. default:
  4050. return -EINVAL;
  4051. }
  4052. spin_lock_bh(&brmctx->br->multicast_lock);
  4053. brmctx->multicast_mld_version = val;
  4054. spin_unlock_bh(&brmctx->br->multicast_lock);
  4055. return 0;
  4056. }
  4057. #endif
  4058. void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
  4059. unsigned long val)
  4060. {
  4061. unsigned long intvl_jiffies = clock_t_to_jiffies(val);
  4062. if (intvl_jiffies < BR_MULTICAST_QUERY_INTVL_MIN) {
  4063. br_info(brmctx->br,
  4064. "trying to set multicast query interval below minimum, setting to %lu (%ums)\n",
  4065. jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MIN),
  4066. jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MIN));
  4067. intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN;
  4068. }
  4069. if (intvl_jiffies > BR_MULTICAST_QUERY_INTVL_MAX) {
  4070. br_info(brmctx->br,
  4071. "trying to set multicast query interval above maximum, setting to %lu (%ums)\n",
  4072. jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MAX),
  4073. jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MAX));
  4074. intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MAX;
  4075. }
  4076. brmctx->multicast_query_interval = intvl_jiffies;
  4077. }
  4078. void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
  4079. unsigned long val)
  4080. {
  4081. unsigned long intvl_jiffies = clock_t_to_jiffies(val);
  4082. if (intvl_jiffies < BR_MULTICAST_STARTUP_QUERY_INTVL_MIN) {
  4083. br_info(brmctx->br,
  4084. "trying to set multicast startup query interval below minimum, setting to %lu (%ums)\n",
  4085. jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN),
  4086. jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN));
  4087. intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN;
  4088. }
  4089. if (intvl_jiffies > BR_MULTICAST_STARTUP_QUERY_INTVL_MAX) {
  4090. br_info(brmctx->br,
  4091. "trying to set multicast startup query interval above maximum, setting to %lu (%ums)\n",
  4092. jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX),
  4093. jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX));
  4094. intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MAX;
  4095. }
  4096. brmctx->multicast_startup_query_interval = intvl_jiffies;
  4097. }
  4098. /**
  4099. * br_multicast_list_adjacent - Returns snooped multicast addresses
  4100. * @dev: The bridge port adjacent to which to retrieve addresses
  4101. * @br_ip_list: The list to store found, snooped multicast IP addresses in
  4102. *
  4103. * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
  4104. * snooping feature on all bridge ports of dev's bridge device, excluding
  4105. * the addresses from dev itself.
  4106. *
  4107. * Returns the number of items added to br_ip_list.
  4108. *
  4109. * Notes:
  4110. * - br_ip_list needs to be initialized by caller
  4111. * - br_ip_list might contain duplicates in the end
  4112. * (needs to be taken care of by caller)
  4113. * - br_ip_list needs to be freed by caller
  4114. */
  4115. int br_multicast_list_adjacent(struct net_device *dev,
  4116. struct list_head *br_ip_list)
  4117. {
  4118. struct net_bridge *br;
  4119. struct net_bridge_port *port;
  4120. struct net_bridge_port_group *group;
  4121. struct br_ip_list *entry;
  4122. int count = 0;
  4123. rcu_read_lock();
  4124. if (!br_ip_list || !netif_is_bridge_port(dev))
  4125. goto unlock;
  4126. port = br_port_get_rcu(dev);
  4127. if (!port || !port->br)
  4128. goto unlock;
  4129. br = port->br;
  4130. list_for_each_entry_rcu(port, &br->port_list, list) {
  4131. if (!port->dev || port->dev == dev)
  4132. continue;
  4133. hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
  4134. entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
  4135. if (!entry)
  4136. goto unlock;
  4137. entry->addr = group->key.addr;
  4138. list_add(&entry->list, br_ip_list);
  4139. count++;
  4140. }
  4141. }
  4142. unlock:
  4143. rcu_read_unlock();
  4144. return count;
  4145. }
  4146. EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
  4147. /**
  4148. * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
  4149. * @dev: The bridge port providing the bridge on which to check for a querier
  4150. * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
  4151. *
  4152. * Checks whether the given interface has a bridge on top and if so returns
  4153. * true if a valid querier exists anywhere on the bridged link layer.
  4154. * Otherwise returns false.
  4155. */
  4156. bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
  4157. {
  4158. struct net_bridge *br;
  4159. struct net_bridge_port *port;
  4160. struct ethhdr eth;
  4161. bool ret = false;
  4162. rcu_read_lock();
  4163. if (!netif_is_bridge_port(dev))
  4164. goto unlock;
  4165. port = br_port_get_rcu(dev);
  4166. if (!port || !port->br)
  4167. goto unlock;
  4168. br = port->br;
  4169. memset(&eth, 0, sizeof(eth));
  4170. eth.h_proto = htons(proto);
  4171. ret = br_multicast_querier_exists(&br->multicast_ctx, &eth, NULL);
  4172. unlock:
  4173. rcu_read_unlock();
  4174. return ret;
  4175. }
  4176. EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
  4177. /**
  4178. * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
  4179. * @dev: The bridge port adjacent to which to check for a querier
  4180. * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
  4181. *
  4182. * Checks whether the given interface has a bridge on top and if so returns
  4183. * true if a selected querier is behind one of the other ports of this
  4184. * bridge. Otherwise returns false.
  4185. */
  4186. bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
  4187. {
  4188. struct net_bridge_mcast *brmctx;
  4189. struct net_bridge *br;
  4190. struct net_bridge_port *port;
  4191. bool ret = false;
  4192. int port_ifidx;
  4193. rcu_read_lock();
  4194. if (!netif_is_bridge_port(dev))
  4195. goto unlock;
  4196. port = br_port_get_rcu(dev);
  4197. if (!port || !port->br)
  4198. goto unlock;
  4199. br = port->br;
  4200. brmctx = &br->multicast_ctx;
  4201. switch (proto) {
  4202. case ETH_P_IP:
  4203. port_ifidx = brmctx->ip4_querier.port_ifidx;
  4204. if (!timer_pending(&brmctx->ip4_other_query.timer) ||
  4205. port_ifidx == port->dev->ifindex)
  4206. goto unlock;
  4207. break;
  4208. #if IS_ENABLED(CONFIG_IPV6)
  4209. case ETH_P_IPV6:
  4210. port_ifidx = brmctx->ip6_querier.port_ifidx;
  4211. if (!timer_pending(&brmctx->ip6_other_query.timer) ||
  4212. port_ifidx == port->dev->ifindex)
  4213. goto unlock;
  4214. break;
  4215. #endif
  4216. default:
  4217. goto unlock;
  4218. }
  4219. ret = true;
  4220. unlock:
  4221. rcu_read_unlock();
  4222. return ret;
  4223. }
  4224. EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
  4225. /**
  4226. * br_multicast_has_router_adjacent - Checks for a router behind a bridge port
  4227. * @dev: The bridge port adjacent to which to check for a multicast router
  4228. * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
  4229. *
  4230. * Checks whether the given interface has a bridge on top and if so returns
  4231. * true if a multicast router is behind one of the other ports of this
  4232. * bridge. Otherwise returns false.
  4233. */
  4234. bool br_multicast_has_router_adjacent(struct net_device *dev, int proto)
  4235. {
  4236. struct net_bridge_mcast_port *pmctx;
  4237. struct net_bridge_mcast *brmctx;
  4238. struct net_bridge_port *port;
  4239. bool ret = false;
  4240. rcu_read_lock();
  4241. port = br_port_get_check_rcu(dev);
  4242. if (!port)
  4243. goto unlock;
  4244. brmctx = &port->br->multicast_ctx;
  4245. switch (proto) {
  4246. case ETH_P_IP:
  4247. hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
  4248. ip4_rlist) {
  4249. if (pmctx->port == port)
  4250. continue;
  4251. ret = true;
  4252. goto unlock;
  4253. }
  4254. break;
  4255. #if IS_ENABLED(CONFIG_IPV6)
  4256. case ETH_P_IPV6:
  4257. hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
  4258. ip6_rlist) {
  4259. if (pmctx->port == port)
  4260. continue;
  4261. ret = true;
  4262. goto unlock;
  4263. }
  4264. break;
  4265. #endif
  4266. default:
  4267. /* when compiled without IPv6 support, be conservative and
  4268. * always assume presence of an IPv6 multicast router
  4269. */
  4270. ret = true;
  4271. }
  4272. unlock:
  4273. rcu_read_unlock();
  4274. return ret;
  4275. }
  4276. EXPORT_SYMBOL_GPL(br_multicast_has_router_adjacent);
  4277. static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
  4278. const struct sk_buff *skb, u8 type, u8 dir)
  4279. {
  4280. struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
  4281. __be16 proto = skb->protocol;
  4282. unsigned int t_len;
  4283. u64_stats_update_begin(&pstats->syncp);
  4284. switch (proto) {
  4285. case htons(ETH_P_IP):
  4286. t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
  4287. switch (type) {
  4288. case IGMP_HOST_MEMBERSHIP_REPORT:
  4289. pstats->mstats.igmp_v1reports[dir]++;
  4290. break;
  4291. case IGMPV2_HOST_MEMBERSHIP_REPORT:
  4292. pstats->mstats.igmp_v2reports[dir]++;
  4293. break;
  4294. case IGMPV3_HOST_MEMBERSHIP_REPORT:
  4295. pstats->mstats.igmp_v3reports[dir]++;
  4296. break;
  4297. case IGMP_HOST_MEMBERSHIP_QUERY:
  4298. if (t_len != sizeof(struct igmphdr)) {
  4299. pstats->mstats.igmp_v3queries[dir]++;
  4300. } else {
  4301. unsigned int offset = skb_transport_offset(skb);
  4302. struct igmphdr *ih, _ihdr;
  4303. ih = skb_header_pointer(skb, offset,
  4304. sizeof(_ihdr), &_ihdr);
  4305. if (!ih)
  4306. break;
  4307. if (!ih->code)
  4308. pstats->mstats.igmp_v1queries[dir]++;
  4309. else
  4310. pstats->mstats.igmp_v2queries[dir]++;
  4311. }
  4312. break;
  4313. case IGMP_HOST_LEAVE_MESSAGE:
  4314. pstats->mstats.igmp_leaves[dir]++;
  4315. break;
  4316. }
  4317. break;
  4318. #if IS_ENABLED(CONFIG_IPV6)
  4319. case htons(ETH_P_IPV6):
  4320. t_len = ntohs(ipv6_hdr(skb)->payload_len) +
  4321. sizeof(struct ipv6hdr);
  4322. t_len -= skb_network_header_len(skb);
  4323. switch (type) {
  4324. case ICMPV6_MGM_REPORT:
  4325. pstats->mstats.mld_v1reports[dir]++;
  4326. break;
  4327. case ICMPV6_MLD2_REPORT:
  4328. pstats->mstats.mld_v2reports[dir]++;
  4329. break;
  4330. case ICMPV6_MGM_QUERY:
  4331. if (t_len != sizeof(struct mld_msg))
  4332. pstats->mstats.mld_v2queries[dir]++;
  4333. else
  4334. pstats->mstats.mld_v1queries[dir]++;
  4335. break;
  4336. case ICMPV6_MGM_REDUCTION:
  4337. pstats->mstats.mld_leaves[dir]++;
  4338. break;
  4339. }
  4340. break;
  4341. #endif /* CONFIG_IPV6 */
  4342. }
  4343. u64_stats_update_end(&pstats->syncp);
  4344. }
  4345. void br_multicast_count(struct net_bridge *br,
  4346. const struct net_bridge_port *p,
  4347. const struct sk_buff *skb, u8 type, u8 dir)
  4348. {
  4349. struct bridge_mcast_stats __percpu *stats;
  4350. /* if multicast_disabled is true then igmp type can't be set */
  4351. if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
  4352. return;
  4353. if (p)
  4354. stats = p->mcast_stats;
  4355. else
  4356. stats = br->mcast_stats;
  4357. if (WARN_ON(!stats))
  4358. return;
  4359. br_mcast_stats_add(stats, skb, type, dir);
  4360. }
  4361. int br_multicast_init_stats(struct net_bridge *br)
  4362. {
  4363. br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
  4364. if (!br->mcast_stats)
  4365. return -ENOMEM;
  4366. return 0;
  4367. }
  4368. void br_multicast_uninit_stats(struct net_bridge *br)
  4369. {
  4370. free_percpu(br->mcast_stats);
  4371. }
  4372. /* noinline for https://llvm.org/pr45802#c9 */
  4373. static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src)
  4374. {
  4375. dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
  4376. dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
  4377. }
  4378. void br_multicast_get_stats(const struct net_bridge *br,
  4379. const struct net_bridge_port *p,
  4380. struct br_mcast_stats *dest)
  4381. {
  4382. struct bridge_mcast_stats __percpu *stats;
  4383. struct br_mcast_stats tdst;
  4384. int i;
  4385. memset(dest, 0, sizeof(*dest));
  4386. if (p)
  4387. stats = p->mcast_stats;
  4388. else
  4389. stats = br->mcast_stats;
  4390. if (WARN_ON(!stats))
  4391. return;
  4392. memset(&tdst, 0, sizeof(tdst));
  4393. for_each_possible_cpu(i) {
  4394. struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
  4395. struct br_mcast_stats temp;
  4396. unsigned int start;
  4397. do {
  4398. start = u64_stats_fetch_begin(&cpu_stats->syncp);
  4399. memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
  4400. } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
  4401. mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
  4402. mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
  4403. mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
  4404. mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
  4405. mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
  4406. mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
  4407. mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
  4408. tdst.igmp_parse_errors += temp.igmp_parse_errors;
  4409. mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
  4410. mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
  4411. mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
  4412. mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
  4413. mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
  4414. tdst.mld_parse_errors += temp.mld_parse_errors;
  4415. }
  4416. memcpy(dest, &tdst, sizeof(*dest));
  4417. }
  4418. int br_mdb_hash_init(struct net_bridge *br)
  4419. {
  4420. int err;
  4421. err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params);
  4422. if (err)
  4423. return err;
  4424. err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
  4425. if (err) {
  4426. rhashtable_destroy(&br->sg_port_tbl);
  4427. return err;
  4428. }
  4429. return 0;
  4430. }
  4431. void br_mdb_hash_fini(struct net_bridge *br)
  4432. {
  4433. rhashtable_destroy(&br->sg_port_tbl);
  4434. rhashtable_destroy(&br->mdb_hash_tbl);
  4435. }