mcast.c 76 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Multicast support for IPv6
  4. * Linux INET6 implementation
  5. *
  6. * Authors:
  7. * Pedro Roque <roque@di.fc.ul.pt>
  8. *
  9. * Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
  10. */
  11. /* Changes:
  12. *
  13. * yoshfuji : fix format of router-alert option
  14. * YOSHIFUJI Hideaki @USAGI:
  15. * Fixed source address for MLD message based on
  16. * <draft-ietf-magma-mld-source-05.txt>.
  17. * YOSHIFUJI Hideaki @USAGI:
  18. * - Ignore Queries for invalid addresses.
  19. * - MLD for link-local addresses.
  20. * David L Stevens <dlstevens@us.ibm.com>:
  21. * - MLDv2 support
  22. */
  23. #include <linux/module.h>
  24. #include <linux/errno.h>
  25. #include <linux/types.h>
  26. #include <linux/string.h>
  27. #include <linux/socket.h>
  28. #include <linux/sockios.h>
  29. #include <linux/jiffies.h>
  30. #include <linux/net.h>
  31. #include <linux/in.h>
  32. #include <linux/in6.h>
  33. #include <linux/netdevice.h>
  34. #include <linux/if_arp.h>
  35. #include <linux/route.h>
  36. #include <linux/init.h>
  37. #include <linux/proc_fs.h>
  38. #include <linux/seq_file.h>
  39. #include <linux/slab.h>
  40. #include <linux/pkt_sched.h>
  41. #include <net/mld.h>
  42. #include <linux/workqueue.h>
  43. #include <linux/netfilter.h>
  44. #include <linux/netfilter_ipv6.h>
  45. #include <net/net_namespace.h>
  46. #include <net/sock.h>
  47. #include <net/snmp.h>
  48. #include <net/ipv6.h>
  49. #include <net/protocol.h>
  50. #include <net/if_inet6.h>
  51. #include <net/ndisc.h>
  52. #include <net/addrconf.h>
  53. #include <net/ip6_route.h>
  54. #include <net/inet_common.h>
  55. #include <net/ip6_checksum.h>
  56. /* Ensure that we have struct in6_addr aligned on 32bit word. */
  57. static int __mld2_query_bugs[] __attribute__((__unused__)) = {
  58. BUILD_BUG_ON_ZERO(offsetof(struct mld2_query, mld2q_srcs) % 4),
  59. BUILD_BUG_ON_ZERO(offsetof(struct mld2_report, mld2r_grec) % 4),
  60. BUILD_BUG_ON_ZERO(offsetof(struct mld2_grec, grec_mca) % 4)
  61. };
  62. static struct workqueue_struct *mld_wq;
  63. static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
  64. static void igmp6_join_group(struct ifmcaddr6 *ma);
  65. static void igmp6_leave_group(struct ifmcaddr6 *ma);
  66. static void mld_mca_work(struct work_struct *work);
  67. static void mld_ifc_event(struct inet6_dev *idev);
  68. static bool mld_in_v1_mode(const struct inet6_dev *idev);
  69. static int sf_setstate(struct ifmcaddr6 *pmc);
  70. static void sf_markstate(struct ifmcaddr6 *pmc);
  71. static void ip6_mc_clear_src(struct ifmcaddr6 *pmc);
  72. static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
  73. int sfmode, int sfcount, const struct in6_addr *psfsrc,
  74. int delta);
  75. static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
  76. int sfmode, int sfcount, const struct in6_addr *psfsrc,
  77. int delta);
  78. static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
  79. struct inet6_dev *idev);
  80. static int __ipv6_dev_mc_inc(struct net_device *dev,
  81. const struct in6_addr *addr, unsigned int mode);
  82. #define MLD_QRV_DEFAULT 2
  83. /* RFC3810, 9.2. Query Interval */
  84. #define MLD_QI_DEFAULT (125 * HZ)
  85. /* RFC3810, 9.3. Query Response Interval */
  86. #define MLD_QRI_DEFAULT (10 * HZ)
  87. /* RFC3810, 8.1 Query Version Distinctions */
  88. #define MLD_V1_QUERY_LEN 24
  89. #define MLD_V2_QUERY_LEN_MIN 28
  90. #define IPV6_MLD_MAX_MSF 64
  91. int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
  92. int sysctl_mld_qrv __read_mostly = MLD_QRV_DEFAULT;
  93. /*
  94. * socket join on multicast group
  95. */
  96. #define mc_dereference(e, idev) \
  97. rcu_dereference_protected(e, lockdep_is_held(&(idev)->mc_lock))
  98. #define sock_dereference(e, sk) \
  99. rcu_dereference_protected(e, lockdep_sock_is_held(sk))
  100. #define for_each_pmc_socklock(np, sk, pmc) \
  101. for (pmc = sock_dereference((np)->ipv6_mc_list, sk); \
  102. pmc; \
  103. pmc = sock_dereference(pmc->next, sk))
  104. #define for_each_pmc_rcu(np, pmc) \
  105. for (pmc = rcu_dereference((np)->ipv6_mc_list); \
  106. pmc; \
  107. pmc = rcu_dereference(pmc->next))
  108. #define for_each_psf_mclock(mc, psf) \
  109. for (psf = mc_dereference((mc)->mca_sources, mc->idev); \
  110. psf; \
  111. psf = mc_dereference(psf->sf_next, mc->idev))
  112. #define for_each_psf_rcu(mc, psf) \
  113. for (psf = rcu_dereference((mc)->mca_sources); \
  114. psf; \
  115. psf = rcu_dereference(psf->sf_next))
  116. #define for_each_psf_tomb(mc, psf) \
  117. for (psf = mc_dereference((mc)->mca_tomb, mc->idev); \
  118. psf; \
  119. psf = mc_dereference(psf->sf_next, mc->idev))
  120. #define for_each_mc_mclock(idev, mc) \
  121. for (mc = mc_dereference((idev)->mc_list, idev); \
  122. mc; \
  123. mc = mc_dereference(mc->next, idev))
  124. #define for_each_mc_rcu(idev, mc) \
  125. for (mc = rcu_dereference((idev)->mc_list); \
  126. mc; \
  127. mc = rcu_dereference(mc->next))
  128. #define for_each_mc_tomb(idev, mc) \
  129. for (mc = mc_dereference((idev)->mc_tomb, idev); \
  130. mc; \
  131. mc = mc_dereference(mc->next, idev))
  132. static int unsolicited_report_interval(struct inet6_dev *idev)
  133. {
  134. int iv;
  135. if (mld_in_v1_mode(idev))
  136. iv = READ_ONCE(idev->cnf.mldv1_unsolicited_report_interval);
  137. else
  138. iv = READ_ONCE(idev->cnf.mldv2_unsolicited_report_interval);
  139. return iv > 0 ? iv : 1;
  140. }
  141. static int __ipv6_sock_mc_join(struct sock *sk, int ifindex,
  142. const struct in6_addr *addr, unsigned int mode)
  143. {
  144. struct net_device *dev = NULL;
  145. struct ipv6_mc_socklist *mc_lst;
  146. struct ipv6_pinfo *np = inet6_sk(sk);
  147. struct net *net = sock_net(sk);
  148. int err;
  149. ASSERT_RTNL();
  150. if (!ipv6_addr_is_multicast(addr))
  151. return -EINVAL;
  152. for_each_pmc_socklock(np, sk, mc_lst) {
  153. if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
  154. ipv6_addr_equal(&mc_lst->addr, addr))
  155. return -EADDRINUSE;
  156. }
  157. mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
  158. if (!mc_lst)
  159. return -ENOMEM;
  160. mc_lst->next = NULL;
  161. mc_lst->addr = *addr;
  162. if (ifindex == 0) {
  163. struct rt6_info *rt;
  164. rt = rt6_lookup(net, addr, NULL, 0, NULL, 0);
  165. if (rt) {
  166. dev = rt->dst.dev;
  167. ip6_rt_put(rt);
  168. }
  169. } else
  170. dev = __dev_get_by_index(net, ifindex);
  171. if (!dev) {
  172. sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
  173. return -ENODEV;
  174. }
  175. mc_lst->ifindex = dev->ifindex;
  176. mc_lst->sfmode = mode;
  177. RCU_INIT_POINTER(mc_lst->sflist, NULL);
  178. /*
  179. * now add/increase the group membership on the device
  180. */
  181. err = __ipv6_dev_mc_inc(dev, addr, mode);
  182. if (err) {
  183. sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
  184. return err;
  185. }
  186. mc_lst->next = np->ipv6_mc_list;
  187. rcu_assign_pointer(np->ipv6_mc_list, mc_lst);
  188. return 0;
  189. }
  190. int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
  191. {
  192. return __ipv6_sock_mc_join(sk, ifindex, addr, MCAST_EXCLUDE);
  193. }
  194. EXPORT_SYMBOL(ipv6_sock_mc_join);
  195. int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
  196. const struct in6_addr *addr, unsigned int mode)
  197. {
  198. return __ipv6_sock_mc_join(sk, ifindex, addr, mode);
  199. }
  200. /*
  201. * socket leave on multicast group
  202. */
  203. int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
  204. {
  205. struct ipv6_pinfo *np = inet6_sk(sk);
  206. struct ipv6_mc_socklist *mc_lst;
  207. struct ipv6_mc_socklist __rcu **lnk;
  208. struct net *net = sock_net(sk);
  209. ASSERT_RTNL();
  210. if (!ipv6_addr_is_multicast(addr))
  211. return -EINVAL;
  212. for (lnk = &np->ipv6_mc_list;
  213. (mc_lst = sock_dereference(*lnk, sk)) != NULL;
  214. lnk = &mc_lst->next) {
  215. if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
  216. ipv6_addr_equal(&mc_lst->addr, addr)) {
  217. struct net_device *dev;
  218. *lnk = mc_lst->next;
  219. dev = __dev_get_by_index(net, mc_lst->ifindex);
  220. if (dev) {
  221. struct inet6_dev *idev = __in6_dev_get(dev);
  222. ip6_mc_leave_src(sk, mc_lst, idev);
  223. if (idev)
  224. __ipv6_dev_mc_dec(idev, &mc_lst->addr);
  225. } else {
  226. ip6_mc_leave_src(sk, mc_lst, NULL);
  227. }
  228. atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
  229. kfree_rcu(mc_lst, rcu);
  230. return 0;
  231. }
  232. }
  233. return -EADDRNOTAVAIL;
  234. }
  235. EXPORT_SYMBOL(ipv6_sock_mc_drop);
  236. static struct inet6_dev *ip6_mc_find_dev_rtnl(struct net *net,
  237. const struct in6_addr *group,
  238. int ifindex)
  239. {
  240. struct net_device *dev = NULL;
  241. struct inet6_dev *idev = NULL;
  242. if (ifindex == 0) {
  243. struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, NULL, 0);
  244. if (rt) {
  245. dev = rt->dst.dev;
  246. ip6_rt_put(rt);
  247. }
  248. } else {
  249. dev = __dev_get_by_index(net, ifindex);
  250. }
  251. if (!dev)
  252. return NULL;
  253. idev = __in6_dev_get(dev);
  254. if (!idev)
  255. return NULL;
  256. if (idev->dead)
  257. return NULL;
  258. return idev;
  259. }
  260. void __ipv6_sock_mc_close(struct sock *sk)
  261. {
  262. struct ipv6_pinfo *np = inet6_sk(sk);
  263. struct ipv6_mc_socklist *mc_lst;
  264. struct net *net = sock_net(sk);
  265. ASSERT_RTNL();
  266. while ((mc_lst = sock_dereference(np->ipv6_mc_list, sk)) != NULL) {
  267. struct net_device *dev;
  268. np->ipv6_mc_list = mc_lst->next;
  269. dev = __dev_get_by_index(net, mc_lst->ifindex);
  270. if (dev) {
  271. struct inet6_dev *idev = __in6_dev_get(dev);
  272. ip6_mc_leave_src(sk, mc_lst, idev);
  273. if (idev)
  274. __ipv6_dev_mc_dec(idev, &mc_lst->addr);
  275. } else {
  276. ip6_mc_leave_src(sk, mc_lst, NULL);
  277. }
  278. atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
  279. kfree_rcu(mc_lst, rcu);
  280. }
  281. }
  282. void ipv6_sock_mc_close(struct sock *sk)
  283. {
  284. struct ipv6_pinfo *np = inet6_sk(sk);
  285. if (!rcu_access_pointer(np->ipv6_mc_list))
  286. return;
  287. rtnl_lock();
  288. lock_sock(sk);
  289. __ipv6_sock_mc_close(sk);
  290. release_sock(sk);
  291. rtnl_unlock();
  292. }
  293. int ip6_mc_source(int add, int omode, struct sock *sk,
  294. struct group_source_req *pgsr)
  295. {
  296. struct in6_addr *source, *group;
  297. struct ipv6_mc_socklist *pmc;
  298. struct inet6_dev *idev;
  299. struct ipv6_pinfo *inet6 = inet6_sk(sk);
  300. struct ip6_sf_socklist *psl;
  301. struct net *net = sock_net(sk);
  302. int i, j, rv;
  303. int leavegroup = 0;
  304. int err;
  305. source = &((struct sockaddr_in6 *)&pgsr->gsr_source)->sin6_addr;
  306. group = &((struct sockaddr_in6 *)&pgsr->gsr_group)->sin6_addr;
  307. if (!ipv6_addr_is_multicast(group))
  308. return -EINVAL;
  309. idev = ip6_mc_find_dev_rtnl(net, group, pgsr->gsr_interface);
  310. if (!idev)
  311. return -ENODEV;
  312. err = -EADDRNOTAVAIL;
  313. mutex_lock(&idev->mc_lock);
  314. for_each_pmc_socklock(inet6, sk, pmc) {
  315. if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
  316. continue;
  317. if (ipv6_addr_equal(&pmc->addr, group))
  318. break;
  319. }
  320. if (!pmc) { /* must have a prior join */
  321. err = -EINVAL;
  322. goto done;
  323. }
  324. /* if a source filter was set, must be the same mode as before */
  325. if (rcu_access_pointer(pmc->sflist)) {
  326. if (pmc->sfmode != omode) {
  327. err = -EINVAL;
  328. goto done;
  329. }
  330. } else if (pmc->sfmode != omode) {
  331. /* allow mode switches for empty-set filters */
  332. ip6_mc_add_src(idev, group, omode, 0, NULL, 0);
  333. ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
  334. pmc->sfmode = omode;
  335. }
  336. psl = sock_dereference(pmc->sflist, sk);
  337. if (!add) {
  338. if (!psl)
  339. goto done; /* err = -EADDRNOTAVAIL */
  340. rv = !0;
  341. for (i = 0; i < psl->sl_count; i++) {
  342. rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
  343. if (rv == 0)
  344. break;
  345. }
  346. if (rv) /* source not found */
  347. goto done; /* err = -EADDRNOTAVAIL */
  348. /* special case - (INCLUDE, empty) == LEAVE_GROUP */
  349. if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
  350. leavegroup = 1;
  351. goto done;
  352. }
  353. /* update the interface filter */
  354. ip6_mc_del_src(idev, group, omode, 1, source, 1);
  355. for (j = i+1; j < psl->sl_count; j++)
  356. psl->sl_addr[j-1] = psl->sl_addr[j];
  357. psl->sl_count--;
  358. err = 0;
  359. goto done;
  360. }
  361. /* else, add a new source to the filter */
  362. if (psl && psl->sl_count >= sysctl_mld_max_msf) {
  363. err = -ENOBUFS;
  364. goto done;
  365. }
  366. if (!psl || psl->sl_count == psl->sl_max) {
  367. struct ip6_sf_socklist *newpsl;
  368. int count = IP6_SFBLOCK;
  369. if (psl)
  370. count += psl->sl_max;
  371. newpsl = sock_kmalloc(sk, struct_size(newpsl, sl_addr, count),
  372. GFP_KERNEL);
  373. if (!newpsl) {
  374. err = -ENOBUFS;
  375. goto done;
  376. }
  377. newpsl->sl_max = count;
  378. newpsl->sl_count = count - IP6_SFBLOCK;
  379. if (psl) {
  380. for (i = 0; i < psl->sl_count; i++)
  381. newpsl->sl_addr[i] = psl->sl_addr[i];
  382. atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
  383. &sk->sk_omem_alloc);
  384. }
  385. rcu_assign_pointer(pmc->sflist, newpsl);
  386. kfree_rcu(psl, rcu);
  387. psl = newpsl;
  388. }
  389. rv = 1; /* > 0 for insert logic below if sl_count is 0 */
  390. for (i = 0; i < psl->sl_count; i++) {
  391. rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
  392. if (rv == 0) /* There is an error in the address. */
  393. goto done;
  394. }
  395. for (j = psl->sl_count-1; j >= i; j--)
  396. psl->sl_addr[j+1] = psl->sl_addr[j];
  397. psl->sl_addr[i] = *source;
  398. psl->sl_count++;
  399. err = 0;
  400. /* update the interface list */
  401. ip6_mc_add_src(idev, group, omode, 1, source, 1);
  402. done:
  403. mutex_unlock(&idev->mc_lock);
  404. if (leavegroup)
  405. err = ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
  406. return err;
  407. }
  408. int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf,
  409. struct sockaddr_storage *list)
  410. {
  411. const struct in6_addr *group;
  412. struct ipv6_mc_socklist *pmc;
  413. struct inet6_dev *idev;
  414. struct ipv6_pinfo *inet6 = inet6_sk(sk);
  415. struct ip6_sf_socklist *newpsl, *psl;
  416. struct net *net = sock_net(sk);
  417. int leavegroup = 0;
  418. int i, err;
  419. group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
  420. if (!ipv6_addr_is_multicast(group))
  421. return -EINVAL;
  422. if (gsf->gf_fmode != MCAST_INCLUDE &&
  423. gsf->gf_fmode != MCAST_EXCLUDE)
  424. return -EINVAL;
  425. idev = ip6_mc_find_dev_rtnl(net, group, gsf->gf_interface);
  426. if (!idev)
  427. return -ENODEV;
  428. err = 0;
  429. if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
  430. leavegroup = 1;
  431. goto done;
  432. }
  433. for_each_pmc_socklock(inet6, sk, pmc) {
  434. if (pmc->ifindex != gsf->gf_interface)
  435. continue;
  436. if (ipv6_addr_equal(&pmc->addr, group))
  437. break;
  438. }
  439. if (!pmc) { /* must have a prior join */
  440. err = -EINVAL;
  441. goto done;
  442. }
  443. if (gsf->gf_numsrc) {
  444. newpsl = sock_kmalloc(sk, struct_size(newpsl, sl_addr,
  445. gsf->gf_numsrc),
  446. GFP_KERNEL);
  447. if (!newpsl) {
  448. err = -ENOBUFS;
  449. goto done;
  450. }
  451. newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc;
  452. for (i = 0; i < newpsl->sl_count; ++i, ++list) {
  453. struct sockaddr_in6 *psin6;
  454. psin6 = (struct sockaddr_in6 *)list;
  455. newpsl->sl_addr[i] = psin6->sin6_addr;
  456. }
  457. mutex_lock(&idev->mc_lock);
  458. err = ip6_mc_add_src(idev, group, gsf->gf_fmode,
  459. newpsl->sl_count, newpsl->sl_addr, 0);
  460. if (err) {
  461. mutex_unlock(&idev->mc_lock);
  462. sock_kfree_s(sk, newpsl, struct_size(newpsl, sl_addr,
  463. newpsl->sl_max));
  464. goto done;
  465. }
  466. mutex_unlock(&idev->mc_lock);
  467. } else {
  468. newpsl = NULL;
  469. mutex_lock(&idev->mc_lock);
  470. ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0);
  471. mutex_unlock(&idev->mc_lock);
  472. }
  473. mutex_lock(&idev->mc_lock);
  474. psl = sock_dereference(pmc->sflist, sk);
  475. if (psl) {
  476. ip6_mc_del_src(idev, group, pmc->sfmode,
  477. psl->sl_count, psl->sl_addr, 0);
  478. atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
  479. &sk->sk_omem_alloc);
  480. } else {
  481. ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
  482. }
  483. rcu_assign_pointer(pmc->sflist, newpsl);
  484. mutex_unlock(&idev->mc_lock);
  485. kfree_rcu(psl, rcu);
  486. pmc->sfmode = gsf->gf_fmode;
  487. err = 0;
  488. done:
  489. if (leavegroup)
  490. err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group);
  491. return err;
  492. }
  493. int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
  494. sockptr_t optval, size_t ss_offset)
  495. {
  496. struct ipv6_pinfo *inet6 = inet6_sk(sk);
  497. const struct in6_addr *group;
  498. struct ipv6_mc_socklist *pmc;
  499. struct ip6_sf_socklist *psl;
  500. unsigned int count;
  501. int i, copycount;
  502. group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
  503. if (!ipv6_addr_is_multicast(group))
  504. return -EINVAL;
  505. /* changes to the ipv6_mc_list require the socket lock and
  506. * rtnl lock. We have the socket lock, so reading the list is safe.
  507. */
  508. for_each_pmc_socklock(inet6, sk, pmc) {
  509. if (pmc->ifindex != gsf->gf_interface)
  510. continue;
  511. if (ipv6_addr_equal(group, &pmc->addr))
  512. break;
  513. }
  514. if (!pmc) /* must have a prior join */
  515. return -EADDRNOTAVAIL;
  516. gsf->gf_fmode = pmc->sfmode;
  517. psl = sock_dereference(pmc->sflist, sk);
  518. count = psl ? psl->sl_count : 0;
  519. copycount = min(count, gsf->gf_numsrc);
  520. gsf->gf_numsrc = count;
  521. for (i = 0; i < copycount; i++) {
  522. struct sockaddr_in6 *psin6;
  523. struct sockaddr_storage ss;
  524. psin6 = (struct sockaddr_in6 *)&ss;
  525. memset(&ss, 0, sizeof(ss));
  526. psin6->sin6_family = AF_INET6;
  527. psin6->sin6_addr = psl->sl_addr[i];
  528. if (copy_to_sockptr_offset(optval, ss_offset, &ss, sizeof(ss)))
  529. return -EFAULT;
  530. ss_offset += sizeof(ss);
  531. }
  532. return 0;
  533. }
  534. bool inet6_mc_check(const struct sock *sk, const struct in6_addr *mc_addr,
  535. const struct in6_addr *src_addr)
  536. {
  537. const struct ipv6_pinfo *np = inet6_sk(sk);
  538. const struct ipv6_mc_socklist *mc;
  539. const struct ip6_sf_socklist *psl;
  540. bool rv = true;
  541. rcu_read_lock();
  542. for_each_pmc_rcu(np, mc) {
  543. if (ipv6_addr_equal(&mc->addr, mc_addr))
  544. break;
  545. }
  546. if (!mc) {
  547. rcu_read_unlock();
  548. return inet6_test_bit(MC6_ALL, sk);
  549. }
  550. psl = rcu_dereference(mc->sflist);
  551. if (!psl) {
  552. rv = mc->sfmode == MCAST_EXCLUDE;
  553. } else {
  554. int i;
  555. for (i = 0; i < psl->sl_count; i++) {
  556. if (ipv6_addr_equal(&psl->sl_addr[i], src_addr))
  557. break;
  558. }
  559. if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
  560. rv = false;
  561. if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
  562. rv = false;
  563. }
  564. rcu_read_unlock();
  565. return rv;
  566. }
  567. /* called with mc_lock */
  568. static void igmp6_group_added(struct ifmcaddr6 *mc)
  569. {
  570. struct net_device *dev = mc->idev->dev;
  571. char buf[MAX_ADDR_LEN];
  572. if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
  573. IPV6_ADDR_SCOPE_LINKLOCAL)
  574. return;
  575. if (!(mc->mca_flags&MAF_LOADED)) {
  576. mc->mca_flags |= MAF_LOADED;
  577. if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
  578. dev_mc_add(dev, buf);
  579. }
  580. if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT))
  581. return;
  582. if (mld_in_v1_mode(mc->idev)) {
  583. igmp6_join_group(mc);
  584. return;
  585. }
  586. /* else v2 */
  587. /* Based on RFC3810 6.1, for newly added INCLUDE SSM, we
  588. * should not send filter-mode change record as the mode
  589. * should be from IN() to IN(A).
  590. */
  591. if (mc->mca_sfmode == MCAST_EXCLUDE)
  592. mc->mca_crcount = mc->idev->mc_qrv;
  593. mld_ifc_event(mc->idev);
  594. }
  595. /* called with mc_lock */
  596. static void igmp6_group_dropped(struct ifmcaddr6 *mc)
  597. {
  598. struct net_device *dev = mc->idev->dev;
  599. char buf[MAX_ADDR_LEN];
  600. if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
  601. IPV6_ADDR_SCOPE_LINKLOCAL)
  602. return;
  603. if (mc->mca_flags&MAF_LOADED) {
  604. mc->mca_flags &= ~MAF_LOADED;
  605. if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
  606. dev_mc_del(dev, buf);
  607. }
  608. if (mc->mca_flags & MAF_NOREPORT)
  609. return;
  610. if (!mc->idev->dead)
  611. igmp6_leave_group(mc);
  612. if (cancel_delayed_work(&mc->mca_work))
  613. refcount_dec(&mc->mca_refcnt);
  614. }
  615. /*
  616. * deleted ifmcaddr6 manipulation
  617. * called with mc_lock
  618. */
  619. static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
  620. {
  621. struct ifmcaddr6 *pmc;
  622. /* this is an "ifmcaddr6" for convenience; only the fields below
  623. * are actually used. In particular, the refcnt and users are not
  624. * used for management of the delete list. Using the same structure
  625. * for deleted items allows change reports to use common code with
  626. * non-deleted or query-response MCA's.
  627. */
  628. pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
  629. if (!pmc)
  630. return;
  631. pmc->idev = im->idev;
  632. in6_dev_hold(idev);
  633. pmc->mca_addr = im->mca_addr;
  634. pmc->mca_crcount = idev->mc_qrv;
  635. pmc->mca_sfmode = im->mca_sfmode;
  636. if (pmc->mca_sfmode == MCAST_INCLUDE) {
  637. struct ip6_sf_list *psf;
  638. rcu_assign_pointer(pmc->mca_tomb,
  639. mc_dereference(im->mca_tomb, idev));
  640. rcu_assign_pointer(pmc->mca_sources,
  641. mc_dereference(im->mca_sources, idev));
  642. RCU_INIT_POINTER(im->mca_tomb, NULL);
  643. RCU_INIT_POINTER(im->mca_sources, NULL);
  644. for_each_psf_mclock(pmc, psf)
  645. psf->sf_crcount = pmc->mca_crcount;
  646. }
  647. rcu_assign_pointer(pmc->next, idev->mc_tomb);
  648. rcu_assign_pointer(idev->mc_tomb, pmc);
  649. }
  650. /* called with mc_lock */
  651. static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
  652. {
  653. struct ip6_sf_list *psf, *sources, *tomb;
  654. struct in6_addr *pmca = &im->mca_addr;
  655. struct ifmcaddr6 *pmc, *pmc_prev;
  656. pmc_prev = NULL;
  657. for_each_mc_tomb(idev, pmc) {
  658. if (ipv6_addr_equal(&pmc->mca_addr, pmca))
  659. break;
  660. pmc_prev = pmc;
  661. }
  662. if (pmc) {
  663. if (pmc_prev)
  664. rcu_assign_pointer(pmc_prev->next, pmc->next);
  665. else
  666. rcu_assign_pointer(idev->mc_tomb, pmc->next);
  667. }
  668. if (pmc) {
  669. im->idev = pmc->idev;
  670. if (im->mca_sfmode == MCAST_INCLUDE) {
  671. tomb = rcu_replace_pointer(im->mca_tomb,
  672. mc_dereference(pmc->mca_tomb, pmc->idev),
  673. lockdep_is_held(&im->idev->mc_lock));
  674. rcu_assign_pointer(pmc->mca_tomb, tomb);
  675. sources = rcu_replace_pointer(im->mca_sources,
  676. mc_dereference(pmc->mca_sources, pmc->idev),
  677. lockdep_is_held(&im->idev->mc_lock));
  678. rcu_assign_pointer(pmc->mca_sources, sources);
  679. for_each_psf_mclock(im, psf)
  680. psf->sf_crcount = idev->mc_qrv;
  681. } else {
  682. im->mca_crcount = idev->mc_qrv;
  683. }
  684. in6_dev_put(pmc->idev);
  685. ip6_mc_clear_src(pmc);
  686. kfree_rcu(pmc, rcu);
  687. }
  688. }
  689. /* called with mc_lock */
  690. static void mld_clear_delrec(struct inet6_dev *idev)
  691. {
  692. struct ifmcaddr6 *pmc, *nextpmc;
  693. pmc = mc_dereference(idev->mc_tomb, idev);
  694. RCU_INIT_POINTER(idev->mc_tomb, NULL);
  695. for (; pmc; pmc = nextpmc) {
  696. nextpmc = mc_dereference(pmc->next, idev);
  697. ip6_mc_clear_src(pmc);
  698. in6_dev_put(pmc->idev);
  699. kfree_rcu(pmc, rcu);
  700. }
  701. /* clear dead sources, too */
  702. for_each_mc_mclock(idev, pmc) {
  703. struct ip6_sf_list *psf, *psf_next;
  704. psf = mc_dereference(pmc->mca_tomb, idev);
  705. RCU_INIT_POINTER(pmc->mca_tomb, NULL);
  706. for (; psf; psf = psf_next) {
  707. psf_next = mc_dereference(psf->sf_next, idev);
  708. kfree_rcu(psf, rcu);
  709. }
  710. }
  711. }
  712. static void mld_clear_query(struct inet6_dev *idev)
  713. {
  714. struct sk_buff *skb;
  715. spin_lock_bh(&idev->mc_query_lock);
  716. while ((skb = __skb_dequeue(&idev->mc_query_queue)))
  717. kfree_skb(skb);
  718. spin_unlock_bh(&idev->mc_query_lock);
  719. }
  720. static void mld_clear_report(struct inet6_dev *idev)
  721. {
  722. struct sk_buff *skb;
  723. spin_lock_bh(&idev->mc_report_lock);
  724. while ((skb = __skb_dequeue(&idev->mc_report_queue)))
  725. kfree_skb(skb);
  726. spin_unlock_bh(&idev->mc_report_lock);
  727. }
  728. static void mca_get(struct ifmcaddr6 *mc)
  729. {
  730. refcount_inc(&mc->mca_refcnt);
  731. }
  732. static void ma_put(struct ifmcaddr6 *mc)
  733. {
  734. if (refcount_dec_and_test(&mc->mca_refcnt)) {
  735. in6_dev_put(mc->idev);
  736. kfree_rcu(mc, rcu);
  737. }
  738. }
  739. /* called with mc_lock */
  740. static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
  741. const struct in6_addr *addr,
  742. unsigned int mode)
  743. {
  744. struct ifmcaddr6 *mc;
  745. mc = kzalloc(sizeof(*mc), GFP_KERNEL);
  746. if (!mc)
  747. return NULL;
  748. INIT_DELAYED_WORK(&mc->mca_work, mld_mca_work);
  749. mc->mca_addr = *addr;
  750. mc->idev = idev; /* reference taken by caller */
  751. mc->mca_users = 1;
  752. /* mca_stamp should be updated upon changes */
  753. mc->mca_cstamp = mc->mca_tstamp = jiffies;
  754. refcount_set(&mc->mca_refcnt, 1);
  755. mc->mca_sfmode = mode;
  756. mc->mca_sfcount[mode] = 1;
  757. if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||
  758. IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
  759. mc->mca_flags |= MAF_NOREPORT;
  760. return mc;
  761. }
  762. /*
  763. * device multicast group inc (add if not found)
  764. */
  765. static int __ipv6_dev_mc_inc(struct net_device *dev,
  766. const struct in6_addr *addr, unsigned int mode)
  767. {
  768. struct ifmcaddr6 *mc;
  769. struct inet6_dev *idev;
  770. ASSERT_RTNL();
  771. /* we need to take a reference on idev */
  772. idev = in6_dev_get(dev);
  773. if (!idev)
  774. return -EINVAL;
  775. if (idev->dead) {
  776. in6_dev_put(idev);
  777. return -ENODEV;
  778. }
  779. mutex_lock(&idev->mc_lock);
  780. for_each_mc_mclock(idev, mc) {
  781. if (ipv6_addr_equal(&mc->mca_addr, addr)) {
  782. mc->mca_users++;
  783. ip6_mc_add_src(idev, &mc->mca_addr, mode, 0, NULL, 0);
  784. mutex_unlock(&idev->mc_lock);
  785. in6_dev_put(idev);
  786. return 0;
  787. }
  788. }
  789. mc = mca_alloc(idev, addr, mode);
  790. if (!mc) {
  791. mutex_unlock(&idev->mc_lock);
  792. in6_dev_put(idev);
  793. return -ENOMEM;
  794. }
  795. rcu_assign_pointer(mc->next, idev->mc_list);
  796. rcu_assign_pointer(idev->mc_list, mc);
  797. mca_get(mc);
  798. mld_del_delrec(idev, mc);
  799. igmp6_group_added(mc);
  800. mutex_unlock(&idev->mc_lock);
  801. ma_put(mc);
  802. return 0;
  803. }
  804. int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
  805. {
  806. return __ipv6_dev_mc_inc(dev, addr, MCAST_EXCLUDE);
  807. }
  808. EXPORT_SYMBOL(ipv6_dev_mc_inc);
  809. /*
  810. * device multicast group del
  811. */
  812. int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
  813. {
  814. struct ifmcaddr6 *ma, __rcu **map;
  815. ASSERT_RTNL();
  816. mutex_lock(&idev->mc_lock);
  817. for (map = &idev->mc_list;
  818. (ma = mc_dereference(*map, idev));
  819. map = &ma->next) {
  820. if (ipv6_addr_equal(&ma->mca_addr, addr)) {
  821. if (--ma->mca_users == 0) {
  822. *map = ma->next;
  823. igmp6_group_dropped(ma);
  824. ip6_mc_clear_src(ma);
  825. mutex_unlock(&idev->mc_lock);
  826. ma_put(ma);
  827. return 0;
  828. }
  829. mutex_unlock(&idev->mc_lock);
  830. return 0;
  831. }
  832. }
  833. mutex_unlock(&idev->mc_lock);
  834. return -ENOENT;
  835. }
  836. int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
  837. {
  838. struct inet6_dev *idev;
  839. int err;
  840. ASSERT_RTNL();
  841. idev = __in6_dev_get(dev);
  842. if (!idev)
  843. err = -ENODEV;
  844. else
  845. err = __ipv6_dev_mc_dec(idev, addr);
  846. return err;
  847. }
  848. EXPORT_SYMBOL(ipv6_dev_mc_dec);
  849. /*
  850. * check if the interface/address pair is valid
  851. */
  852. bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
  853. const struct in6_addr *src_addr)
  854. {
  855. struct inet6_dev *idev;
  856. struct ifmcaddr6 *mc;
  857. bool rv = false;
  858. rcu_read_lock();
  859. idev = __in6_dev_get(dev);
  860. if (idev) {
  861. for_each_mc_rcu(idev, mc) {
  862. if (ipv6_addr_equal(&mc->mca_addr, group))
  863. break;
  864. }
  865. if (mc) {
  866. if (src_addr && !ipv6_addr_any(src_addr)) {
  867. struct ip6_sf_list *psf;
  868. for_each_psf_rcu(mc, psf) {
  869. if (ipv6_addr_equal(&psf->sf_addr, src_addr))
  870. break;
  871. }
  872. if (psf)
  873. rv = psf->sf_count[MCAST_INCLUDE] ||
  874. psf->sf_count[MCAST_EXCLUDE] !=
  875. mc->mca_sfcount[MCAST_EXCLUDE];
  876. else
  877. rv = mc->mca_sfcount[MCAST_EXCLUDE] != 0;
  878. } else
  879. rv = true; /* don't filter unspecified source */
  880. }
  881. }
  882. rcu_read_unlock();
  883. return rv;
  884. }
  885. /* called with mc_lock */
  886. static void mld_gq_start_work(struct inet6_dev *idev)
  887. {
  888. unsigned long tv = get_random_u32_below(idev->mc_maxdelay);
  889. idev->mc_gq_running = 1;
  890. if (!mod_delayed_work(mld_wq, &idev->mc_gq_work, tv + 2))
  891. in6_dev_hold(idev);
  892. }
  893. /* called with mc_lock */
  894. static void mld_gq_stop_work(struct inet6_dev *idev)
  895. {
  896. idev->mc_gq_running = 0;
  897. if (cancel_delayed_work(&idev->mc_gq_work))
  898. __in6_dev_put(idev);
  899. }
  900. /* called with mc_lock */
  901. static void mld_ifc_start_work(struct inet6_dev *idev, unsigned long delay)
  902. {
  903. unsigned long tv = get_random_u32_below(delay);
  904. if (!mod_delayed_work(mld_wq, &idev->mc_ifc_work, tv + 2))
  905. in6_dev_hold(idev);
  906. }
  907. /* called with mc_lock */
  908. static void mld_ifc_stop_work(struct inet6_dev *idev)
  909. {
  910. idev->mc_ifc_count = 0;
  911. if (cancel_delayed_work(&idev->mc_ifc_work))
  912. __in6_dev_put(idev);
  913. }
  914. /* called with mc_lock */
  915. static void mld_dad_start_work(struct inet6_dev *idev, unsigned long delay)
  916. {
  917. unsigned long tv = get_random_u32_below(delay);
  918. if (!mod_delayed_work(mld_wq, &idev->mc_dad_work, tv + 2))
  919. in6_dev_hold(idev);
  920. }
  921. static void mld_dad_stop_work(struct inet6_dev *idev)
  922. {
  923. if (cancel_delayed_work(&idev->mc_dad_work))
  924. __in6_dev_put(idev);
  925. }
  926. static void mld_query_stop_work(struct inet6_dev *idev)
  927. {
  928. spin_lock_bh(&idev->mc_query_lock);
  929. if (cancel_delayed_work(&idev->mc_query_work))
  930. __in6_dev_put(idev);
  931. spin_unlock_bh(&idev->mc_query_lock);
  932. }
  933. static void mld_report_stop_work(struct inet6_dev *idev)
  934. {
  935. if (cancel_delayed_work_sync(&idev->mc_report_work))
  936. __in6_dev_put(idev);
  937. }
  938. /*
  939. * IGMP handling (alias multicast ICMPv6 messages)
  940. * called with mc_lock
  941. */
  942. static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
  943. {
  944. unsigned long delay = resptime;
  945. /* Do not start work for these addresses */
  946. if (ipv6_addr_is_ll_all_nodes(&ma->mca_addr) ||
  947. IPV6_ADDR_MC_SCOPE(&ma->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
  948. return;
  949. if (cancel_delayed_work(&ma->mca_work)) {
  950. refcount_dec(&ma->mca_refcnt);
  951. delay = ma->mca_work.timer.expires - jiffies;
  952. }
  953. if (delay >= resptime)
  954. delay = get_random_u32_below(resptime);
  955. if (!mod_delayed_work(mld_wq, &ma->mca_work, delay))
  956. refcount_inc(&ma->mca_refcnt);
  957. ma->mca_flags |= MAF_TIMER_RUNNING;
  958. }
  959. /* mark EXCLUDE-mode sources
  960. * called with mc_lock
  961. */
  962. static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
  963. const struct in6_addr *srcs)
  964. {
  965. struct ip6_sf_list *psf;
  966. int i, scount;
  967. scount = 0;
  968. for_each_psf_mclock(pmc, psf) {
  969. if (scount == nsrcs)
  970. break;
  971. for (i = 0; i < nsrcs; i++) {
  972. /* skip inactive filters */
  973. if (psf->sf_count[MCAST_INCLUDE] ||
  974. pmc->mca_sfcount[MCAST_EXCLUDE] !=
  975. psf->sf_count[MCAST_EXCLUDE])
  976. break;
  977. if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
  978. scount++;
  979. break;
  980. }
  981. }
  982. }
  983. pmc->mca_flags &= ~MAF_GSQUERY;
  984. if (scount == nsrcs) /* all sources excluded */
  985. return false;
  986. return true;
  987. }
  988. /* called with mc_lock */
  989. static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
  990. const struct in6_addr *srcs)
  991. {
  992. struct ip6_sf_list *psf;
  993. int i, scount;
  994. if (pmc->mca_sfmode == MCAST_EXCLUDE)
  995. return mld_xmarksources(pmc, nsrcs, srcs);
  996. /* mark INCLUDE-mode sources */
  997. scount = 0;
  998. for_each_psf_mclock(pmc, psf) {
  999. if (scount == nsrcs)
  1000. break;
  1001. for (i = 0; i < nsrcs; i++) {
  1002. if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
  1003. psf->sf_gsresp = 1;
  1004. scount++;
  1005. break;
  1006. }
  1007. }
  1008. }
  1009. if (!scount) {
  1010. pmc->mca_flags &= ~MAF_GSQUERY;
  1011. return false;
  1012. }
  1013. pmc->mca_flags |= MAF_GSQUERY;
  1014. return true;
  1015. }
  1016. static int mld_force_mld_version(const struct inet6_dev *idev)
  1017. {
  1018. const struct net *net = dev_net(idev->dev);
  1019. int all_force;
  1020. all_force = READ_ONCE(net->ipv6.devconf_all->force_mld_version);
  1021. /* Normally, both are 0 here. If enforcement to a particular is
  1022. * being used, individual device enforcement will have a lower
  1023. * precedence over 'all' device (.../conf/all/force_mld_version).
  1024. */
  1025. return all_force ?: READ_ONCE(idev->cnf.force_mld_version);
  1026. }
  1027. static bool mld_in_v2_mode_only(const struct inet6_dev *idev)
  1028. {
  1029. return mld_force_mld_version(idev) == 2;
  1030. }
  1031. static bool mld_in_v1_mode_only(const struct inet6_dev *idev)
  1032. {
  1033. return mld_force_mld_version(idev) == 1;
  1034. }
  1035. static bool mld_in_v1_mode(const struct inet6_dev *idev)
  1036. {
  1037. if (mld_in_v2_mode_only(idev))
  1038. return false;
  1039. if (mld_in_v1_mode_only(idev))
  1040. return true;
  1041. if (idev->mc_v1_seen && time_before(jiffies, idev->mc_v1_seen))
  1042. return true;
  1043. return false;
  1044. }
  1045. static void mld_set_v1_mode(struct inet6_dev *idev)
  1046. {
  1047. /* RFC3810, relevant sections:
  1048. * - 9.1. Robustness Variable
  1049. * - 9.2. Query Interval
  1050. * - 9.3. Query Response Interval
  1051. * - 9.12. Older Version Querier Present Timeout
  1052. */
  1053. unsigned long switchback;
  1054. switchback = (idev->mc_qrv * idev->mc_qi) + idev->mc_qri;
  1055. idev->mc_v1_seen = jiffies + switchback;
  1056. }
  1057. static void mld_update_qrv(struct inet6_dev *idev,
  1058. const struct mld2_query *mlh2)
  1059. {
  1060. /* RFC3810, relevant sections:
  1061. * - 5.1.8. QRV (Querier's Robustness Variable)
  1062. * - 9.1. Robustness Variable
  1063. */
  1064. /* The value of the Robustness Variable MUST NOT be zero,
  1065. * and SHOULD NOT be one. Catch this here if we ever run
  1066. * into such a case in future.
  1067. */
  1068. const int min_qrv = min(MLD_QRV_DEFAULT, sysctl_mld_qrv);
  1069. WARN_ON(idev->mc_qrv == 0);
  1070. if (mlh2->mld2q_qrv > 0)
  1071. idev->mc_qrv = mlh2->mld2q_qrv;
  1072. if (unlikely(idev->mc_qrv < min_qrv)) {
  1073. net_warn_ratelimited("IPv6: MLD: clamping QRV from %u to %u!\n",
  1074. idev->mc_qrv, min_qrv);
  1075. idev->mc_qrv = min_qrv;
  1076. }
  1077. }
  1078. static void mld_update_qi(struct inet6_dev *idev,
  1079. const struct mld2_query *mlh2)
  1080. {
  1081. /* RFC3810, relevant sections:
  1082. * - 5.1.9. QQIC (Querier's Query Interval Code)
  1083. * - 9.2. Query Interval
  1084. * - 9.12. Older Version Querier Present Timeout
  1085. * (the [Query Interval] in the last Query received)
  1086. */
  1087. unsigned long mc_qqi;
  1088. if (mlh2->mld2q_qqic < 128) {
  1089. mc_qqi = mlh2->mld2q_qqic;
  1090. } else {
  1091. unsigned long mc_man, mc_exp;
  1092. mc_exp = MLDV2_QQIC_EXP(mlh2->mld2q_qqic);
  1093. mc_man = MLDV2_QQIC_MAN(mlh2->mld2q_qqic);
  1094. mc_qqi = (mc_man | 0x10) << (mc_exp + 3);
  1095. }
  1096. idev->mc_qi = mc_qqi * HZ;
  1097. }
  1098. static void mld_update_qri(struct inet6_dev *idev,
  1099. const struct mld2_query *mlh2)
  1100. {
  1101. /* RFC3810, relevant sections:
  1102. * - 5.1.3. Maximum Response Code
  1103. * - 9.3. Query Response Interval
  1104. */
  1105. idev->mc_qri = msecs_to_jiffies(mldv2_mrc(mlh2));
  1106. }
  1107. static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld,
  1108. unsigned long *max_delay, bool v1_query)
  1109. {
  1110. unsigned long mldv1_md;
  1111. /* Ignore v1 queries */
  1112. if (mld_in_v2_mode_only(idev))
  1113. return -EINVAL;
  1114. mldv1_md = ntohs(mld->mld_maxdelay);
  1115. /* When in MLDv1 fallback and a MLDv2 router start-up being
  1116. * unaware of current MLDv1 operation, the MRC == MRD mapping
  1117. * only works when the exponential algorithm is not being
  1118. * used (as MLDv1 is unaware of such things).
  1119. *
  1120. * According to the RFC author, the MLDv2 implementations
  1121. * he's aware of all use a MRC < 32768 on start up queries.
  1122. *
  1123. * Thus, should we *ever* encounter something else larger
  1124. * than that, just assume the maximum possible within our
  1125. * reach.
  1126. */
  1127. if (!v1_query)
  1128. mldv1_md = min(mldv1_md, MLDV1_MRD_MAX_COMPAT);
  1129. *max_delay = max(msecs_to_jiffies(mldv1_md), 1UL);
  1130. /* MLDv1 router present: we need to go into v1 mode *only*
  1131. * when an MLDv1 query is received as per section 9.12. of
  1132. * RFC3810! And we know from RFC2710 section 3.7 that MLDv1
  1133. * queries MUST be of exactly 24 octets.
  1134. */
  1135. if (v1_query)
  1136. mld_set_v1_mode(idev);
  1137. /* cancel MLDv2 report work */
  1138. mld_gq_stop_work(idev);
  1139. /* cancel the interface change work */
  1140. mld_ifc_stop_work(idev);
  1141. /* clear deleted report items */
  1142. mld_clear_delrec(idev);
  1143. return 0;
  1144. }
  1145. static void mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld,
  1146. unsigned long *max_delay)
  1147. {
  1148. *max_delay = max(msecs_to_jiffies(mldv2_mrc(mld)), 1UL);
  1149. mld_update_qrv(idev, mld);
  1150. mld_update_qi(idev, mld);
  1151. mld_update_qri(idev, mld);
  1152. idev->mc_maxdelay = *max_delay;
  1153. return;
  1154. }
  1155. /* called with rcu_read_lock() */
  1156. void igmp6_event_query(struct sk_buff *skb)
  1157. {
  1158. struct inet6_dev *idev = __in6_dev_get(skb->dev);
  1159. if (!idev || idev->dead)
  1160. goto out;
  1161. spin_lock_bh(&idev->mc_query_lock);
  1162. if (skb_queue_len(&idev->mc_query_queue) < MLD_MAX_SKBS) {
  1163. __skb_queue_tail(&idev->mc_query_queue, skb);
  1164. if (!mod_delayed_work(mld_wq, &idev->mc_query_work, 0))
  1165. in6_dev_hold(idev);
  1166. skb = NULL;
  1167. }
  1168. spin_unlock_bh(&idev->mc_query_lock);
  1169. out:
  1170. kfree_skb(skb);
  1171. }
  1172. static void __mld_query_work(struct sk_buff *skb)
  1173. {
  1174. struct mld2_query *mlh2 = NULL;
  1175. const struct in6_addr *group;
  1176. unsigned long max_delay;
  1177. struct inet6_dev *idev;
  1178. struct ifmcaddr6 *ma;
  1179. struct mld_msg *mld;
  1180. int group_type;
  1181. int mark = 0;
  1182. int len, err;
  1183. if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
  1184. goto kfree_skb;
  1185. /* compute payload length excluding extension headers */
  1186. len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
  1187. len -= skb_network_header_len(skb);
  1188. /* RFC3810 6.2
  1189. * Upon reception of an MLD message that contains a Query, the node
  1190. * checks if the source address of the message is a valid link-local
  1191. * address, if the Hop Limit is set to 1, and if the Router Alert
  1192. * option is present in the Hop-By-Hop Options header of the IPv6
  1193. * packet. If any of these checks fails, the packet is dropped.
  1194. */
  1195. if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) ||
  1196. ipv6_hdr(skb)->hop_limit != 1 ||
  1197. !(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) ||
  1198. IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD))
  1199. goto kfree_skb;
  1200. idev = in6_dev_get(skb->dev);
  1201. if (!idev)
  1202. goto kfree_skb;
  1203. mld = (struct mld_msg *)icmp6_hdr(skb);
  1204. group = &mld->mld_mca;
  1205. group_type = ipv6_addr_type(group);
  1206. if (group_type != IPV6_ADDR_ANY &&
  1207. !(group_type&IPV6_ADDR_MULTICAST))
  1208. goto out;
  1209. if (len < MLD_V1_QUERY_LEN) {
  1210. goto out;
  1211. } else if (len == MLD_V1_QUERY_LEN || mld_in_v1_mode(idev)) {
  1212. err = mld_process_v1(idev, mld, &max_delay,
  1213. len == MLD_V1_QUERY_LEN);
  1214. if (err < 0)
  1215. goto out;
  1216. } else if (len >= MLD_V2_QUERY_LEN_MIN) {
  1217. int srcs_offset = sizeof(struct mld2_query) -
  1218. sizeof(struct icmp6hdr);
  1219. if (!pskb_may_pull(skb, srcs_offset))
  1220. goto out;
  1221. mlh2 = (struct mld2_query *)skb_transport_header(skb);
  1222. mld_process_v2(idev, mlh2, &max_delay);
  1223. if (group_type == IPV6_ADDR_ANY) { /* general query */
  1224. if (mlh2->mld2q_nsrcs)
  1225. goto out; /* no sources allowed */
  1226. mld_gq_start_work(idev);
  1227. goto out;
  1228. }
  1229. /* mark sources to include, if group & source-specific */
  1230. if (mlh2->mld2q_nsrcs != 0) {
  1231. if (!pskb_may_pull(skb, srcs_offset +
  1232. ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr)))
  1233. goto out;
  1234. mlh2 = (struct mld2_query *)skb_transport_header(skb);
  1235. mark = 1;
  1236. }
  1237. } else {
  1238. goto out;
  1239. }
  1240. if (group_type == IPV6_ADDR_ANY) {
  1241. for_each_mc_mclock(idev, ma) {
  1242. igmp6_group_queried(ma, max_delay);
  1243. }
  1244. } else {
  1245. for_each_mc_mclock(idev, ma) {
  1246. if (!ipv6_addr_equal(group, &ma->mca_addr))
  1247. continue;
  1248. if (ma->mca_flags & MAF_TIMER_RUNNING) {
  1249. /* gsquery <- gsquery && mark */
  1250. if (!mark)
  1251. ma->mca_flags &= ~MAF_GSQUERY;
  1252. } else {
  1253. /* gsquery <- mark */
  1254. if (mark)
  1255. ma->mca_flags |= MAF_GSQUERY;
  1256. else
  1257. ma->mca_flags &= ~MAF_GSQUERY;
  1258. }
  1259. if (!(ma->mca_flags & MAF_GSQUERY) ||
  1260. mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs))
  1261. igmp6_group_queried(ma, max_delay);
  1262. break;
  1263. }
  1264. }
  1265. out:
  1266. in6_dev_put(idev);
  1267. kfree_skb:
  1268. consume_skb(skb);
  1269. }
  1270. static void mld_query_work(struct work_struct *work)
  1271. {
  1272. struct inet6_dev *idev = container_of(to_delayed_work(work),
  1273. struct inet6_dev,
  1274. mc_query_work);
  1275. struct sk_buff_head q;
  1276. struct sk_buff *skb;
  1277. bool rework = false;
  1278. int cnt = 0;
  1279. skb_queue_head_init(&q);
  1280. spin_lock_bh(&idev->mc_query_lock);
  1281. while ((skb = __skb_dequeue(&idev->mc_query_queue))) {
  1282. __skb_queue_tail(&q, skb);
  1283. if (++cnt >= MLD_MAX_QUEUE) {
  1284. rework = true;
  1285. break;
  1286. }
  1287. }
  1288. spin_unlock_bh(&idev->mc_query_lock);
  1289. mutex_lock(&idev->mc_lock);
  1290. while ((skb = __skb_dequeue(&q)))
  1291. __mld_query_work(skb);
  1292. mutex_unlock(&idev->mc_lock);
  1293. if (rework && queue_delayed_work(mld_wq, &idev->mc_query_work, 0))
  1294. return;
  1295. in6_dev_put(idev);
  1296. }
  1297. /* called with rcu_read_lock() */
  1298. void igmp6_event_report(struct sk_buff *skb)
  1299. {
  1300. struct inet6_dev *idev = __in6_dev_get(skb->dev);
  1301. if (!idev || idev->dead)
  1302. goto out;
  1303. spin_lock_bh(&idev->mc_report_lock);
  1304. if (skb_queue_len(&idev->mc_report_queue) < MLD_MAX_SKBS) {
  1305. __skb_queue_tail(&idev->mc_report_queue, skb);
  1306. if (!mod_delayed_work(mld_wq, &idev->mc_report_work, 0))
  1307. in6_dev_hold(idev);
  1308. skb = NULL;
  1309. }
  1310. spin_unlock_bh(&idev->mc_report_lock);
  1311. out:
  1312. kfree_skb(skb);
  1313. }
  1314. static void __mld_report_work(struct sk_buff *skb)
  1315. {
  1316. struct inet6_dev *idev;
  1317. struct ifmcaddr6 *ma;
  1318. struct mld_msg *mld;
  1319. int addr_type;
  1320. /* Our own report looped back. Ignore it. */
  1321. if (skb->pkt_type == PACKET_LOOPBACK)
  1322. goto kfree_skb;
  1323. /* send our report if the MC router may not have heard this report */
  1324. if (skb->pkt_type != PACKET_MULTICAST &&
  1325. skb->pkt_type != PACKET_BROADCAST)
  1326. goto kfree_skb;
  1327. if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr)))
  1328. goto kfree_skb;
  1329. mld = (struct mld_msg *)icmp6_hdr(skb);
  1330. /* Drop reports with not link local source */
  1331. addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
  1332. if (addr_type != IPV6_ADDR_ANY &&
  1333. !(addr_type&IPV6_ADDR_LINKLOCAL))
  1334. goto kfree_skb;
  1335. idev = in6_dev_get(skb->dev);
  1336. if (!idev)
  1337. goto kfree_skb;
  1338. /*
  1339. * Cancel the work for this group
  1340. */
  1341. for_each_mc_mclock(idev, ma) {
  1342. if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) {
  1343. if (cancel_delayed_work(&ma->mca_work))
  1344. refcount_dec(&ma->mca_refcnt);
  1345. ma->mca_flags &= ~(MAF_LAST_REPORTER |
  1346. MAF_TIMER_RUNNING);
  1347. break;
  1348. }
  1349. }
  1350. in6_dev_put(idev);
  1351. kfree_skb:
  1352. consume_skb(skb);
  1353. }
  1354. static void mld_report_work(struct work_struct *work)
  1355. {
  1356. struct inet6_dev *idev = container_of(to_delayed_work(work),
  1357. struct inet6_dev,
  1358. mc_report_work);
  1359. struct sk_buff_head q;
  1360. struct sk_buff *skb;
  1361. bool rework = false;
  1362. int cnt = 0;
  1363. skb_queue_head_init(&q);
  1364. spin_lock_bh(&idev->mc_report_lock);
  1365. while ((skb = __skb_dequeue(&idev->mc_report_queue))) {
  1366. __skb_queue_tail(&q, skb);
  1367. if (++cnt >= MLD_MAX_QUEUE) {
  1368. rework = true;
  1369. break;
  1370. }
  1371. }
  1372. spin_unlock_bh(&idev->mc_report_lock);
  1373. mutex_lock(&idev->mc_lock);
  1374. while ((skb = __skb_dequeue(&q)))
  1375. __mld_report_work(skb);
  1376. mutex_unlock(&idev->mc_lock);
  1377. if (rework && queue_delayed_work(mld_wq, &idev->mc_report_work, 0))
  1378. return;
  1379. in6_dev_put(idev);
  1380. }
  1381. static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
  1382. int gdeleted, int sdeleted)
  1383. {
  1384. switch (type) {
  1385. case MLD2_MODE_IS_INCLUDE:
  1386. case MLD2_MODE_IS_EXCLUDE:
  1387. if (gdeleted || sdeleted)
  1388. return false;
  1389. if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) {
  1390. if (pmc->mca_sfmode == MCAST_INCLUDE)
  1391. return true;
  1392. /* don't include if this source is excluded
  1393. * in all filters
  1394. */
  1395. if (psf->sf_count[MCAST_INCLUDE])
  1396. return type == MLD2_MODE_IS_INCLUDE;
  1397. return pmc->mca_sfcount[MCAST_EXCLUDE] ==
  1398. psf->sf_count[MCAST_EXCLUDE];
  1399. }
  1400. return false;
  1401. case MLD2_CHANGE_TO_INCLUDE:
  1402. if (gdeleted || sdeleted)
  1403. return false;
  1404. return psf->sf_count[MCAST_INCLUDE] != 0;
  1405. case MLD2_CHANGE_TO_EXCLUDE:
  1406. if (gdeleted || sdeleted)
  1407. return false;
  1408. if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 ||
  1409. psf->sf_count[MCAST_INCLUDE])
  1410. return false;
  1411. return pmc->mca_sfcount[MCAST_EXCLUDE] ==
  1412. psf->sf_count[MCAST_EXCLUDE];
  1413. case MLD2_ALLOW_NEW_SOURCES:
  1414. if (gdeleted || !psf->sf_crcount)
  1415. return false;
  1416. return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted;
  1417. case MLD2_BLOCK_OLD_SOURCES:
  1418. if (pmc->mca_sfmode == MCAST_INCLUDE)
  1419. return gdeleted || (psf->sf_crcount && sdeleted);
  1420. return psf->sf_crcount && !gdeleted && !sdeleted;
  1421. }
  1422. return false;
  1423. }
  1424. static int
  1425. mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted)
  1426. {
  1427. struct ip6_sf_list *psf;
  1428. int scount = 0;
  1429. for_each_psf_mclock(pmc, psf) {
  1430. if (!is_in(pmc, psf, type, gdeleted, sdeleted))
  1431. continue;
  1432. scount++;
  1433. }
  1434. return scount;
  1435. }
  1436. static void ip6_mc_hdr(const struct sock *sk, struct sk_buff *skb,
  1437. struct net_device *dev, const struct in6_addr *saddr,
  1438. const struct in6_addr *daddr, int proto, int len)
  1439. {
  1440. struct ipv6hdr *hdr;
  1441. skb->protocol = htons(ETH_P_IPV6);
  1442. skb->dev = dev;
  1443. skb_reset_network_header(skb);
  1444. skb_put(skb, sizeof(struct ipv6hdr));
  1445. hdr = ipv6_hdr(skb);
  1446. ip6_flow_hdr(hdr, 0, 0);
  1447. hdr->payload_len = htons(len);
  1448. hdr->nexthdr = proto;
  1449. hdr->hop_limit = READ_ONCE(inet6_sk(sk)->hop_limit);
  1450. hdr->saddr = *saddr;
  1451. hdr->daddr = *daddr;
  1452. }
  1453. static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
  1454. {
  1455. u8 ra[8] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT,
  1456. 2, 0, 0, IPV6_TLV_PADN, 0 };
  1457. struct net_device *dev = idev->dev;
  1458. int hlen = LL_RESERVED_SPACE(dev);
  1459. int tlen = dev->needed_tailroom;
  1460. const struct in6_addr *saddr;
  1461. struct in6_addr addr_buf;
  1462. struct mld2_report *pmr;
  1463. struct sk_buff *skb;
  1464. unsigned int size;
  1465. struct sock *sk;
  1466. struct net *net;
  1467. /* we assume size > sizeof(ra) here
  1468. * Also try to not allocate high-order pages for big MTU
  1469. */
  1470. size = min_t(int, mtu, PAGE_SIZE / 2) + hlen + tlen;
  1471. skb = alloc_skb(size, GFP_KERNEL);
  1472. if (!skb)
  1473. return NULL;
  1474. skb->priority = TC_PRIO_CONTROL;
  1475. skb_reserve(skb, hlen);
  1476. skb_tailroom_reserve(skb, mtu, tlen);
  1477. rcu_read_lock();
  1478. net = dev_net_rcu(dev);
  1479. sk = net->ipv6.igmp_sk;
  1480. skb_set_owner_w(skb, sk);
  1481. if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
  1482. /* <draft-ietf-magma-mld-source-05.txt>:
  1483. * use unspecified address as the source address
  1484. * when a valid link-local address is not available.
  1485. */
  1486. saddr = &in6addr_any;
  1487. } else
  1488. saddr = &addr_buf;
  1489. ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0);
  1490. rcu_read_unlock();
  1491. skb_put_data(skb, ra, sizeof(ra));
  1492. skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
  1493. skb_put(skb, sizeof(*pmr));
  1494. pmr = (struct mld2_report *)skb_transport_header(skb);
  1495. pmr->mld2r_type = ICMPV6_MLD2_REPORT;
  1496. pmr->mld2r_resv1 = 0;
  1497. pmr->mld2r_cksum = 0;
  1498. pmr->mld2r_resv2 = 0;
  1499. pmr->mld2r_ngrec = 0;
  1500. return skb;
  1501. }
  1502. static void mld_sendpack(struct sk_buff *skb)
  1503. {
  1504. struct ipv6hdr *pip6 = ipv6_hdr(skb);
  1505. struct mld2_report *pmr =
  1506. (struct mld2_report *)skb_transport_header(skb);
  1507. int payload_len, mldlen;
  1508. struct inet6_dev *idev;
  1509. struct net *net = dev_net(skb->dev);
  1510. int err;
  1511. struct flowi6 fl6;
  1512. struct dst_entry *dst;
  1513. rcu_read_lock();
  1514. idev = __in6_dev_get(skb->dev);
  1515. IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
  1516. payload_len = (skb_tail_pointer(skb) - skb_network_header(skb)) -
  1517. sizeof(*pip6);
  1518. mldlen = skb_tail_pointer(skb) - skb_transport_header(skb);
  1519. pip6->payload_len = htons(payload_len);
  1520. pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
  1521. IPPROTO_ICMPV6,
  1522. csum_partial(skb_transport_header(skb),
  1523. mldlen, 0));
  1524. icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT,
  1525. &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
  1526. skb->dev->ifindex);
  1527. dst = icmp6_dst_alloc(skb->dev, &fl6);
  1528. err = 0;
  1529. if (IS_ERR(dst)) {
  1530. err = PTR_ERR(dst);
  1531. dst = NULL;
  1532. }
  1533. skb_dst_set(skb, dst);
  1534. if (err)
  1535. goto err_out;
  1536. err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
  1537. net, net->ipv6.igmp_sk, skb, NULL, skb->dev,
  1538. dst_output);
  1539. out:
  1540. if (!err) {
  1541. ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
  1542. ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
  1543. } else {
  1544. IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
  1545. }
  1546. rcu_read_unlock();
  1547. return;
  1548. err_out:
  1549. kfree_skb(skb);
  1550. goto out;
  1551. }
  1552. static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
  1553. {
  1554. return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel);
  1555. }
  1556. static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
  1557. int type, struct mld2_grec **ppgr, unsigned int mtu)
  1558. {
  1559. struct mld2_report *pmr;
  1560. struct mld2_grec *pgr;
  1561. if (!skb) {
  1562. skb = mld_newpack(pmc->idev, mtu);
  1563. if (!skb)
  1564. return NULL;
  1565. }
  1566. pgr = skb_put(skb, sizeof(struct mld2_grec));
  1567. pgr->grec_type = type;
  1568. pgr->grec_auxwords = 0;
  1569. pgr->grec_nsrcs = 0;
  1570. pgr->grec_mca = pmc->mca_addr; /* structure copy */
  1571. pmr = (struct mld2_report *)skb_transport_header(skb);
  1572. pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1);
  1573. *ppgr = pgr;
  1574. return skb;
  1575. }
  1576. #define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0)
  1577. /* called with mc_lock */
  1578. static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
  1579. int type, int gdeleted, int sdeleted,
  1580. int crsend)
  1581. {
  1582. struct ip6_sf_list *psf, *psf_prev, *psf_next;
  1583. int scount, stotal, first, isquery, truncate;
  1584. struct ip6_sf_list __rcu **psf_list;
  1585. struct inet6_dev *idev = pmc->idev;
  1586. struct net_device *dev = idev->dev;
  1587. struct mld2_grec *pgr = NULL;
  1588. struct mld2_report *pmr;
  1589. unsigned int mtu;
  1590. if (pmc->mca_flags & MAF_NOREPORT)
  1591. return skb;
  1592. mtu = READ_ONCE(dev->mtu);
  1593. if (mtu < IPV6_MIN_MTU)
  1594. return skb;
  1595. isquery = type == MLD2_MODE_IS_INCLUDE ||
  1596. type == MLD2_MODE_IS_EXCLUDE;
  1597. truncate = type == MLD2_MODE_IS_EXCLUDE ||
  1598. type == MLD2_CHANGE_TO_EXCLUDE;
  1599. stotal = scount = 0;
  1600. psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources;
  1601. if (!rcu_access_pointer(*psf_list))
  1602. goto empty_source;
  1603. pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL;
  1604. /* EX and TO_EX get a fresh packet, if needed */
  1605. if (truncate) {
  1606. if (pmr && pmr->mld2r_ngrec &&
  1607. AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
  1608. if (skb)
  1609. mld_sendpack(skb);
  1610. skb = mld_newpack(idev, mtu);
  1611. }
  1612. }
  1613. first = 1;
  1614. psf_prev = NULL;
  1615. for (psf = mc_dereference(*psf_list, idev);
  1616. psf;
  1617. psf = psf_next) {
  1618. struct in6_addr *psrc;
  1619. psf_next = mc_dereference(psf->sf_next, idev);
  1620. if (!is_in(pmc, psf, type, gdeleted, sdeleted) && !crsend) {
  1621. psf_prev = psf;
  1622. continue;
  1623. }
  1624. /* Based on RFC3810 6.1. Should not send source-list change
  1625. * records when there is a filter mode change.
  1626. */
  1627. if (((gdeleted && pmc->mca_sfmode == MCAST_EXCLUDE) ||
  1628. (!gdeleted && pmc->mca_crcount)) &&
  1629. (type == MLD2_ALLOW_NEW_SOURCES ||
  1630. type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount)
  1631. goto decrease_sf_crcount;
  1632. /* clear marks on query responses */
  1633. if (isquery)
  1634. psf->sf_gsresp = 0;
  1635. if (AVAILABLE(skb) < sizeof(*psrc) +
  1636. first*sizeof(struct mld2_grec)) {
  1637. if (truncate && !first)
  1638. break; /* truncate these */
  1639. if (pgr)
  1640. pgr->grec_nsrcs = htons(scount);
  1641. if (skb)
  1642. mld_sendpack(skb);
  1643. skb = mld_newpack(idev, mtu);
  1644. first = 1;
  1645. scount = 0;
  1646. }
  1647. if (first) {
  1648. skb = add_grhead(skb, pmc, type, &pgr, mtu);
  1649. first = 0;
  1650. }
  1651. if (!skb)
  1652. return NULL;
  1653. psrc = skb_put(skb, sizeof(*psrc));
  1654. *psrc = psf->sf_addr;
  1655. scount++; stotal++;
  1656. if ((type == MLD2_ALLOW_NEW_SOURCES ||
  1657. type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
  1658. decrease_sf_crcount:
  1659. psf->sf_crcount--;
  1660. if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
  1661. if (psf_prev)
  1662. rcu_assign_pointer(psf_prev->sf_next,
  1663. mc_dereference(psf->sf_next, idev));
  1664. else
  1665. rcu_assign_pointer(*psf_list,
  1666. mc_dereference(psf->sf_next, idev));
  1667. kfree_rcu(psf, rcu);
  1668. continue;
  1669. }
  1670. }
  1671. psf_prev = psf;
  1672. }
  1673. empty_source:
  1674. if (!stotal) {
  1675. if (type == MLD2_ALLOW_NEW_SOURCES ||
  1676. type == MLD2_BLOCK_OLD_SOURCES)
  1677. return skb;
  1678. if (pmc->mca_crcount || isquery || crsend) {
  1679. /* make sure we have room for group header */
  1680. if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) {
  1681. mld_sendpack(skb);
  1682. skb = NULL; /* add_grhead will get a new one */
  1683. }
  1684. skb = add_grhead(skb, pmc, type, &pgr, mtu);
  1685. }
  1686. }
  1687. if (pgr)
  1688. pgr->grec_nsrcs = htons(scount);
  1689. if (isquery)
  1690. pmc->mca_flags &= ~MAF_GSQUERY; /* clear query state */
  1691. return skb;
  1692. }
  1693. /* called with mc_lock */
  1694. static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
  1695. {
  1696. struct sk_buff *skb = NULL;
  1697. int type;
  1698. if (!pmc) {
  1699. for_each_mc_mclock(idev, pmc) {
  1700. if (pmc->mca_flags & MAF_NOREPORT)
  1701. continue;
  1702. if (pmc->mca_sfcount[MCAST_EXCLUDE])
  1703. type = MLD2_MODE_IS_EXCLUDE;
  1704. else
  1705. type = MLD2_MODE_IS_INCLUDE;
  1706. skb = add_grec(skb, pmc, type, 0, 0, 0);
  1707. }
  1708. } else {
  1709. if (pmc->mca_sfcount[MCAST_EXCLUDE])
  1710. type = MLD2_MODE_IS_EXCLUDE;
  1711. else
  1712. type = MLD2_MODE_IS_INCLUDE;
  1713. skb = add_grec(skb, pmc, type, 0, 0, 0);
  1714. }
  1715. if (skb)
  1716. mld_sendpack(skb);
  1717. }
  1718. /*
  1719. * remove zero-count source records from a source filter list
  1720. * called with mc_lock
  1721. */
  1722. static void mld_clear_zeros(struct ip6_sf_list __rcu **ppsf, struct inet6_dev *idev)
  1723. {
  1724. struct ip6_sf_list *psf_prev, *psf_next, *psf;
  1725. psf_prev = NULL;
  1726. for (psf = mc_dereference(*ppsf, idev);
  1727. psf;
  1728. psf = psf_next) {
  1729. psf_next = mc_dereference(psf->sf_next, idev);
  1730. if (psf->sf_crcount == 0) {
  1731. if (psf_prev)
  1732. rcu_assign_pointer(psf_prev->sf_next,
  1733. mc_dereference(psf->sf_next, idev));
  1734. else
  1735. rcu_assign_pointer(*ppsf,
  1736. mc_dereference(psf->sf_next, idev));
  1737. kfree_rcu(psf, rcu);
  1738. } else {
  1739. psf_prev = psf;
  1740. }
  1741. }
  1742. }
  1743. /* called with mc_lock */
  1744. static void mld_send_cr(struct inet6_dev *idev)
  1745. {
  1746. struct ifmcaddr6 *pmc, *pmc_prev, *pmc_next;
  1747. struct sk_buff *skb = NULL;
  1748. int type, dtype;
  1749. /* deleted MCA's */
  1750. pmc_prev = NULL;
  1751. for (pmc = mc_dereference(idev->mc_tomb, idev);
  1752. pmc;
  1753. pmc = pmc_next) {
  1754. pmc_next = mc_dereference(pmc->next, idev);
  1755. if (pmc->mca_sfmode == MCAST_INCLUDE) {
  1756. type = MLD2_BLOCK_OLD_SOURCES;
  1757. dtype = MLD2_BLOCK_OLD_SOURCES;
  1758. skb = add_grec(skb, pmc, type, 1, 0, 0);
  1759. skb = add_grec(skb, pmc, dtype, 1, 1, 0);
  1760. }
  1761. if (pmc->mca_crcount) {
  1762. if (pmc->mca_sfmode == MCAST_EXCLUDE) {
  1763. type = MLD2_CHANGE_TO_INCLUDE;
  1764. skb = add_grec(skb, pmc, type, 1, 0, 0);
  1765. }
  1766. pmc->mca_crcount--;
  1767. if (pmc->mca_crcount == 0) {
  1768. mld_clear_zeros(&pmc->mca_tomb, idev);
  1769. mld_clear_zeros(&pmc->mca_sources, idev);
  1770. }
  1771. }
  1772. if (pmc->mca_crcount == 0 &&
  1773. !rcu_access_pointer(pmc->mca_tomb) &&
  1774. !rcu_access_pointer(pmc->mca_sources)) {
  1775. if (pmc_prev)
  1776. rcu_assign_pointer(pmc_prev->next, pmc_next);
  1777. else
  1778. rcu_assign_pointer(idev->mc_tomb, pmc_next);
  1779. in6_dev_put(pmc->idev);
  1780. kfree_rcu(pmc, rcu);
  1781. } else
  1782. pmc_prev = pmc;
  1783. }
  1784. /* change recs */
  1785. for_each_mc_mclock(idev, pmc) {
  1786. if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
  1787. type = MLD2_BLOCK_OLD_SOURCES;
  1788. dtype = MLD2_ALLOW_NEW_SOURCES;
  1789. } else {
  1790. type = MLD2_ALLOW_NEW_SOURCES;
  1791. dtype = MLD2_BLOCK_OLD_SOURCES;
  1792. }
  1793. skb = add_grec(skb, pmc, type, 0, 0, 0);
  1794. skb = add_grec(skb, pmc, dtype, 0, 1, 0); /* deleted sources */
  1795. /* filter mode changes */
  1796. if (pmc->mca_crcount) {
  1797. if (pmc->mca_sfmode == MCAST_EXCLUDE)
  1798. type = MLD2_CHANGE_TO_EXCLUDE;
  1799. else
  1800. type = MLD2_CHANGE_TO_INCLUDE;
  1801. skb = add_grec(skb, pmc, type, 0, 0, 0);
  1802. pmc->mca_crcount--;
  1803. }
  1804. }
  1805. if (!skb)
  1806. return;
  1807. (void) mld_sendpack(skb);
  1808. }
  1809. static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
  1810. {
  1811. const struct in6_addr *snd_addr, *saddr;
  1812. int err, len, payload_len, full_len;
  1813. struct in6_addr addr_buf;
  1814. struct inet6_dev *idev;
  1815. struct sk_buff *skb;
  1816. struct mld_msg *hdr;
  1817. int hlen = LL_RESERVED_SPACE(dev);
  1818. int tlen = dev->needed_tailroom;
  1819. u8 ra[8] = { IPPROTO_ICMPV6, 0,
  1820. IPV6_TLV_ROUTERALERT, 2, 0, 0,
  1821. IPV6_TLV_PADN, 0 };
  1822. struct dst_entry *dst;
  1823. struct flowi6 fl6;
  1824. struct net *net;
  1825. struct sock *sk;
  1826. if (type == ICMPV6_MGM_REDUCTION)
  1827. snd_addr = &in6addr_linklocal_allrouters;
  1828. else
  1829. snd_addr = addr;
  1830. len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
  1831. payload_len = len + sizeof(ra);
  1832. full_len = sizeof(struct ipv6hdr) + payload_len;
  1833. skb = alloc_skb(hlen + tlen + full_len, GFP_KERNEL);
  1834. rcu_read_lock();
  1835. net = dev_net_rcu(dev);
  1836. idev = __in6_dev_get(dev);
  1837. IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTREQUESTS);
  1838. if (!skb) {
  1839. IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
  1840. rcu_read_unlock();
  1841. return;
  1842. }
  1843. sk = net->ipv6.igmp_sk;
  1844. skb_set_owner_w(skb, sk);
  1845. skb->priority = TC_PRIO_CONTROL;
  1846. skb_reserve(skb, hlen);
  1847. if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
  1848. /* <draft-ietf-magma-mld-source-05.txt>:
  1849. * use unspecified address as the source address
  1850. * when a valid link-local address is not available.
  1851. */
  1852. saddr = &in6addr_any;
  1853. } else
  1854. saddr = &addr_buf;
  1855. ip6_mc_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len);
  1856. skb_put_data(skb, ra, sizeof(ra));
  1857. hdr = skb_put_zero(skb, sizeof(struct mld_msg));
  1858. hdr->mld_type = type;
  1859. hdr->mld_mca = *addr;
  1860. hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len,
  1861. IPPROTO_ICMPV6,
  1862. csum_partial(hdr, len, 0));
  1863. icmpv6_flow_init(sk, &fl6, type,
  1864. &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
  1865. skb->dev->ifindex);
  1866. dst = icmp6_dst_alloc(skb->dev, &fl6);
  1867. if (IS_ERR(dst)) {
  1868. err = PTR_ERR(dst);
  1869. goto err_out;
  1870. }
  1871. skb_dst_set(skb, dst);
  1872. err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
  1873. net, sk, skb, NULL, skb->dev,
  1874. dst_output);
  1875. out:
  1876. if (!err) {
  1877. ICMP6MSGOUT_INC_STATS(net, idev, type);
  1878. ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
  1879. } else
  1880. IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
  1881. rcu_read_unlock();
  1882. return;
  1883. err_out:
  1884. kfree_skb(skb);
  1885. goto out;
  1886. }
  1887. /* called with mc_lock */
  1888. static void mld_send_initial_cr(struct inet6_dev *idev)
  1889. {
  1890. struct sk_buff *skb;
  1891. struct ifmcaddr6 *pmc;
  1892. int type;
  1893. if (mld_in_v1_mode(idev))
  1894. return;
  1895. skb = NULL;
  1896. for_each_mc_mclock(idev, pmc) {
  1897. if (pmc->mca_sfcount[MCAST_EXCLUDE])
  1898. type = MLD2_CHANGE_TO_EXCLUDE;
  1899. else
  1900. type = MLD2_ALLOW_NEW_SOURCES;
  1901. skb = add_grec(skb, pmc, type, 0, 0, 1);
  1902. }
  1903. if (skb)
  1904. mld_sendpack(skb);
  1905. }
  1906. void ipv6_mc_dad_complete(struct inet6_dev *idev)
  1907. {
  1908. mutex_lock(&idev->mc_lock);
  1909. idev->mc_dad_count = idev->mc_qrv;
  1910. if (idev->mc_dad_count) {
  1911. mld_send_initial_cr(idev);
  1912. idev->mc_dad_count--;
  1913. if (idev->mc_dad_count)
  1914. mld_dad_start_work(idev,
  1915. unsolicited_report_interval(idev));
  1916. }
  1917. mutex_unlock(&idev->mc_lock);
  1918. }
  1919. static void mld_dad_work(struct work_struct *work)
  1920. {
  1921. struct inet6_dev *idev = container_of(to_delayed_work(work),
  1922. struct inet6_dev,
  1923. mc_dad_work);
  1924. mutex_lock(&idev->mc_lock);
  1925. mld_send_initial_cr(idev);
  1926. if (idev->mc_dad_count) {
  1927. idev->mc_dad_count--;
  1928. if (idev->mc_dad_count)
  1929. mld_dad_start_work(idev,
  1930. unsolicited_report_interval(idev));
  1931. }
  1932. mutex_unlock(&idev->mc_lock);
  1933. in6_dev_put(idev);
  1934. }
  1935. /* called with mc_lock */
  1936. static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
  1937. const struct in6_addr *psfsrc)
  1938. {
  1939. struct ip6_sf_list *psf, *psf_prev;
  1940. int rv = 0;
  1941. psf_prev = NULL;
  1942. for_each_psf_mclock(pmc, psf) {
  1943. if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
  1944. break;
  1945. psf_prev = psf;
  1946. }
  1947. if (!psf || psf->sf_count[sfmode] == 0) {
  1948. /* source filter not found, or count wrong => bug */
  1949. return -ESRCH;
  1950. }
  1951. psf->sf_count[sfmode]--;
  1952. if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
  1953. struct inet6_dev *idev = pmc->idev;
  1954. /* no more filters for this source */
  1955. if (psf_prev)
  1956. rcu_assign_pointer(psf_prev->sf_next,
  1957. mc_dereference(psf->sf_next, idev));
  1958. else
  1959. rcu_assign_pointer(pmc->mca_sources,
  1960. mc_dereference(psf->sf_next, idev));
  1961. if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) &&
  1962. !mld_in_v1_mode(idev)) {
  1963. psf->sf_crcount = idev->mc_qrv;
  1964. rcu_assign_pointer(psf->sf_next,
  1965. mc_dereference(pmc->mca_tomb, idev));
  1966. rcu_assign_pointer(pmc->mca_tomb, psf);
  1967. rv = 1;
  1968. } else {
  1969. kfree_rcu(psf, rcu);
  1970. }
  1971. }
  1972. return rv;
  1973. }
  1974. /* called with mc_lock */
  1975. static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
  1976. int sfmode, int sfcount, const struct in6_addr *psfsrc,
  1977. int delta)
  1978. {
  1979. struct ifmcaddr6 *pmc;
  1980. int changerec = 0;
  1981. int i, err;
  1982. if (!idev)
  1983. return -ENODEV;
  1984. for_each_mc_mclock(idev, pmc) {
  1985. if (ipv6_addr_equal(pmca, &pmc->mca_addr))
  1986. break;
  1987. }
  1988. if (!pmc)
  1989. return -ESRCH;
  1990. sf_markstate(pmc);
  1991. if (!delta) {
  1992. if (!pmc->mca_sfcount[sfmode])
  1993. return -EINVAL;
  1994. pmc->mca_sfcount[sfmode]--;
  1995. }
  1996. err = 0;
  1997. for (i = 0; i < sfcount; i++) {
  1998. int rv = ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
  1999. changerec |= rv > 0;
  2000. if (!err && rv < 0)
  2001. err = rv;
  2002. }
  2003. if (pmc->mca_sfmode == MCAST_EXCLUDE &&
  2004. pmc->mca_sfcount[MCAST_EXCLUDE] == 0 &&
  2005. pmc->mca_sfcount[MCAST_INCLUDE]) {
  2006. struct ip6_sf_list *psf;
  2007. /* filter mode change */
  2008. pmc->mca_sfmode = MCAST_INCLUDE;
  2009. pmc->mca_crcount = idev->mc_qrv;
  2010. idev->mc_ifc_count = pmc->mca_crcount;
  2011. for_each_psf_mclock(pmc, psf)
  2012. psf->sf_crcount = 0;
  2013. mld_ifc_event(pmc->idev);
  2014. } else if (sf_setstate(pmc) || changerec) {
  2015. mld_ifc_event(pmc->idev);
  2016. }
  2017. return err;
  2018. }
  2019. /*
  2020. * Add multicast single-source filter to the interface list
  2021. * called with mc_lock
  2022. */
  2023. static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
  2024. const struct in6_addr *psfsrc)
  2025. {
  2026. struct ip6_sf_list *psf, *psf_prev;
  2027. psf_prev = NULL;
  2028. for_each_psf_mclock(pmc, psf) {
  2029. if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
  2030. break;
  2031. psf_prev = psf;
  2032. }
  2033. if (!psf) {
  2034. psf = kzalloc(sizeof(*psf), GFP_KERNEL);
  2035. if (!psf)
  2036. return -ENOBUFS;
  2037. psf->sf_addr = *psfsrc;
  2038. if (psf_prev) {
  2039. rcu_assign_pointer(psf_prev->sf_next, psf);
  2040. } else {
  2041. rcu_assign_pointer(pmc->mca_sources, psf);
  2042. }
  2043. }
  2044. psf->sf_count[sfmode]++;
  2045. return 0;
  2046. }
  2047. /* called with mc_lock */
  2048. static void sf_markstate(struct ifmcaddr6 *pmc)
  2049. {
  2050. struct ip6_sf_list *psf;
  2051. int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
  2052. for_each_psf_mclock(pmc, psf) {
  2053. if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
  2054. psf->sf_oldin = mca_xcount ==
  2055. psf->sf_count[MCAST_EXCLUDE] &&
  2056. !psf->sf_count[MCAST_INCLUDE];
  2057. } else {
  2058. psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
  2059. }
  2060. }
  2061. }
  2062. /* called with mc_lock */
  2063. static int sf_setstate(struct ifmcaddr6 *pmc)
  2064. {
  2065. struct ip6_sf_list *psf, *dpsf;
  2066. int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
  2067. int qrv = pmc->idev->mc_qrv;
  2068. int new_in, rv;
  2069. rv = 0;
  2070. for_each_psf_mclock(pmc, psf) {
  2071. if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
  2072. new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
  2073. !psf->sf_count[MCAST_INCLUDE];
  2074. } else
  2075. new_in = psf->sf_count[MCAST_INCLUDE] != 0;
  2076. if (new_in) {
  2077. if (!psf->sf_oldin) {
  2078. struct ip6_sf_list *prev = NULL;
  2079. for_each_psf_tomb(pmc, dpsf) {
  2080. if (ipv6_addr_equal(&dpsf->sf_addr,
  2081. &psf->sf_addr))
  2082. break;
  2083. prev = dpsf;
  2084. }
  2085. if (dpsf) {
  2086. if (prev)
  2087. rcu_assign_pointer(prev->sf_next,
  2088. mc_dereference(dpsf->sf_next,
  2089. pmc->idev));
  2090. else
  2091. rcu_assign_pointer(pmc->mca_tomb,
  2092. mc_dereference(dpsf->sf_next,
  2093. pmc->idev));
  2094. kfree_rcu(dpsf, rcu);
  2095. }
  2096. psf->sf_crcount = qrv;
  2097. rv++;
  2098. }
  2099. } else if (psf->sf_oldin) {
  2100. psf->sf_crcount = 0;
  2101. /*
  2102. * add or update "delete" records if an active filter
  2103. * is now inactive
  2104. */
  2105. for_each_psf_tomb(pmc, dpsf)
  2106. if (ipv6_addr_equal(&dpsf->sf_addr,
  2107. &psf->sf_addr))
  2108. break;
  2109. if (!dpsf) {
  2110. dpsf = kmalloc(sizeof(*dpsf), GFP_KERNEL);
  2111. if (!dpsf)
  2112. continue;
  2113. *dpsf = *psf;
  2114. rcu_assign_pointer(dpsf->sf_next,
  2115. mc_dereference(pmc->mca_tomb, pmc->idev));
  2116. rcu_assign_pointer(pmc->mca_tomb, dpsf);
  2117. }
  2118. dpsf->sf_crcount = qrv;
  2119. rv++;
  2120. }
  2121. }
  2122. return rv;
  2123. }
  2124. /*
  2125. * Add multicast source filter list to the interface list
  2126. * called with mc_lock
  2127. */
  2128. static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
  2129. int sfmode, int sfcount, const struct in6_addr *psfsrc,
  2130. int delta)
  2131. {
  2132. struct ifmcaddr6 *pmc;
  2133. int isexclude;
  2134. int i, err;
  2135. if (!idev)
  2136. return -ENODEV;
  2137. for_each_mc_mclock(idev, pmc) {
  2138. if (ipv6_addr_equal(pmca, &pmc->mca_addr))
  2139. break;
  2140. }
  2141. if (!pmc)
  2142. return -ESRCH;
  2143. sf_markstate(pmc);
  2144. isexclude = pmc->mca_sfmode == MCAST_EXCLUDE;
  2145. if (!delta)
  2146. pmc->mca_sfcount[sfmode]++;
  2147. err = 0;
  2148. for (i = 0; i < sfcount; i++) {
  2149. err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]);
  2150. if (err)
  2151. break;
  2152. }
  2153. if (err) {
  2154. int j;
  2155. if (!delta)
  2156. pmc->mca_sfcount[sfmode]--;
  2157. for (j = 0; j < i; j++)
  2158. ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
  2159. } else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
  2160. struct ip6_sf_list *psf;
  2161. /* filter mode change */
  2162. if (pmc->mca_sfcount[MCAST_EXCLUDE])
  2163. pmc->mca_sfmode = MCAST_EXCLUDE;
  2164. else if (pmc->mca_sfcount[MCAST_INCLUDE])
  2165. pmc->mca_sfmode = MCAST_INCLUDE;
  2166. /* else no filters; keep old mode for reports */
  2167. pmc->mca_crcount = idev->mc_qrv;
  2168. idev->mc_ifc_count = pmc->mca_crcount;
  2169. for_each_psf_mclock(pmc, psf)
  2170. psf->sf_crcount = 0;
  2171. mld_ifc_event(idev);
  2172. } else if (sf_setstate(pmc)) {
  2173. mld_ifc_event(idev);
  2174. }
  2175. return err;
  2176. }
  2177. /* called with mc_lock */
  2178. static void ip6_mc_clear_src(struct ifmcaddr6 *pmc)
  2179. {
  2180. struct ip6_sf_list *psf, *nextpsf;
  2181. for (psf = mc_dereference(pmc->mca_tomb, pmc->idev);
  2182. psf;
  2183. psf = nextpsf) {
  2184. nextpsf = mc_dereference(psf->sf_next, pmc->idev);
  2185. kfree_rcu(psf, rcu);
  2186. }
  2187. RCU_INIT_POINTER(pmc->mca_tomb, NULL);
  2188. for (psf = mc_dereference(pmc->mca_sources, pmc->idev);
  2189. psf;
  2190. psf = nextpsf) {
  2191. nextpsf = mc_dereference(psf->sf_next, pmc->idev);
  2192. kfree_rcu(psf, rcu);
  2193. }
  2194. RCU_INIT_POINTER(pmc->mca_sources, NULL);
  2195. pmc->mca_sfmode = MCAST_EXCLUDE;
  2196. pmc->mca_sfcount[MCAST_INCLUDE] = 0;
  2197. pmc->mca_sfcount[MCAST_EXCLUDE] = 1;
  2198. }
  2199. /* called with mc_lock */
  2200. static void igmp6_join_group(struct ifmcaddr6 *ma)
  2201. {
  2202. unsigned long delay;
  2203. if (ma->mca_flags & MAF_NOREPORT)
  2204. return;
  2205. igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
  2206. delay = get_random_u32_below(unsolicited_report_interval(ma->idev));
  2207. if (cancel_delayed_work(&ma->mca_work)) {
  2208. refcount_dec(&ma->mca_refcnt);
  2209. delay = ma->mca_work.timer.expires - jiffies;
  2210. }
  2211. if (!mod_delayed_work(mld_wq, &ma->mca_work, delay))
  2212. refcount_inc(&ma->mca_refcnt);
  2213. ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER;
  2214. }
  2215. static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
  2216. struct inet6_dev *idev)
  2217. {
  2218. struct ip6_sf_socklist *psl;
  2219. int err;
  2220. psl = sock_dereference(iml->sflist, sk);
  2221. if (idev)
  2222. mutex_lock(&idev->mc_lock);
  2223. if (!psl) {
  2224. /* any-source empty exclude case */
  2225. err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
  2226. } else {
  2227. err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
  2228. psl->sl_count, psl->sl_addr, 0);
  2229. RCU_INIT_POINTER(iml->sflist, NULL);
  2230. atomic_sub(struct_size(psl, sl_addr, psl->sl_max),
  2231. &sk->sk_omem_alloc);
  2232. kfree_rcu(psl, rcu);
  2233. }
  2234. if (idev)
  2235. mutex_unlock(&idev->mc_lock);
  2236. return err;
  2237. }
  2238. /* called with mc_lock */
  2239. static void igmp6_leave_group(struct ifmcaddr6 *ma)
  2240. {
  2241. if (mld_in_v1_mode(ma->idev)) {
  2242. if (ma->mca_flags & MAF_LAST_REPORTER) {
  2243. igmp6_send(&ma->mca_addr, ma->idev->dev,
  2244. ICMPV6_MGM_REDUCTION);
  2245. }
  2246. } else {
  2247. mld_add_delrec(ma->idev, ma);
  2248. mld_ifc_event(ma->idev);
  2249. }
  2250. }
  2251. static void mld_gq_work(struct work_struct *work)
  2252. {
  2253. struct inet6_dev *idev = container_of(to_delayed_work(work),
  2254. struct inet6_dev,
  2255. mc_gq_work);
  2256. mutex_lock(&idev->mc_lock);
  2257. mld_send_report(idev, NULL);
  2258. idev->mc_gq_running = 0;
  2259. mutex_unlock(&idev->mc_lock);
  2260. in6_dev_put(idev);
  2261. }
  2262. static void mld_ifc_work(struct work_struct *work)
  2263. {
  2264. struct inet6_dev *idev = container_of(to_delayed_work(work),
  2265. struct inet6_dev,
  2266. mc_ifc_work);
  2267. mutex_lock(&idev->mc_lock);
  2268. mld_send_cr(idev);
  2269. if (idev->mc_ifc_count) {
  2270. idev->mc_ifc_count--;
  2271. if (idev->mc_ifc_count)
  2272. mld_ifc_start_work(idev,
  2273. unsolicited_report_interval(idev));
  2274. }
  2275. mutex_unlock(&idev->mc_lock);
  2276. in6_dev_put(idev);
  2277. }
  2278. /* called with mc_lock */
  2279. static void mld_ifc_event(struct inet6_dev *idev)
  2280. {
  2281. if (mld_in_v1_mode(idev))
  2282. return;
  2283. idev->mc_ifc_count = idev->mc_qrv;
  2284. mld_ifc_start_work(idev, 1);
  2285. }
  2286. static void mld_mca_work(struct work_struct *work)
  2287. {
  2288. struct ifmcaddr6 *ma = container_of(to_delayed_work(work),
  2289. struct ifmcaddr6, mca_work);
  2290. mutex_lock(&ma->idev->mc_lock);
  2291. if (mld_in_v1_mode(ma->idev))
  2292. igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
  2293. else
  2294. mld_send_report(ma->idev, ma);
  2295. ma->mca_flags |= MAF_LAST_REPORTER;
  2296. ma->mca_flags &= ~MAF_TIMER_RUNNING;
  2297. mutex_unlock(&ma->idev->mc_lock);
  2298. ma_put(ma);
  2299. }
  2300. /* Device changing type */
  2301. void ipv6_mc_unmap(struct inet6_dev *idev)
  2302. {
  2303. struct ifmcaddr6 *i;
  2304. /* Install multicast list, except for all-nodes (already installed) */
  2305. mutex_lock(&idev->mc_lock);
  2306. for_each_mc_mclock(idev, i)
  2307. igmp6_group_dropped(i);
  2308. mutex_unlock(&idev->mc_lock);
  2309. }
  2310. void ipv6_mc_remap(struct inet6_dev *idev)
  2311. {
  2312. ipv6_mc_up(idev);
  2313. }
  2314. /* Device going down */
  2315. void ipv6_mc_down(struct inet6_dev *idev)
  2316. {
  2317. struct ifmcaddr6 *i;
  2318. mutex_lock(&idev->mc_lock);
  2319. /* Withdraw multicast list */
  2320. for_each_mc_mclock(idev, i)
  2321. igmp6_group_dropped(i);
  2322. mutex_unlock(&idev->mc_lock);
  2323. /* Should stop work after group drop. or we will
  2324. * start work again in mld_ifc_event()
  2325. */
  2326. mld_query_stop_work(idev);
  2327. mld_report_stop_work(idev);
  2328. mutex_lock(&idev->mc_lock);
  2329. mld_ifc_stop_work(idev);
  2330. mld_gq_stop_work(idev);
  2331. mutex_unlock(&idev->mc_lock);
  2332. mld_dad_stop_work(idev);
  2333. }
  2334. static void ipv6_mc_reset(struct inet6_dev *idev)
  2335. {
  2336. idev->mc_qrv = sysctl_mld_qrv;
  2337. idev->mc_qi = MLD_QI_DEFAULT;
  2338. idev->mc_qri = MLD_QRI_DEFAULT;
  2339. idev->mc_v1_seen = 0;
  2340. idev->mc_maxdelay = unsolicited_report_interval(idev);
  2341. }
  2342. /* Device going up */
  2343. void ipv6_mc_up(struct inet6_dev *idev)
  2344. {
  2345. struct ifmcaddr6 *i;
  2346. /* Install multicast list, except for all-nodes (already installed) */
  2347. ipv6_mc_reset(idev);
  2348. mutex_lock(&idev->mc_lock);
  2349. for_each_mc_mclock(idev, i) {
  2350. mld_del_delrec(idev, i);
  2351. igmp6_group_added(i);
  2352. }
  2353. mutex_unlock(&idev->mc_lock);
  2354. }
  2355. /* IPv6 device initialization. */
  2356. void ipv6_mc_init_dev(struct inet6_dev *idev)
  2357. {
  2358. idev->mc_gq_running = 0;
  2359. INIT_DELAYED_WORK(&idev->mc_gq_work, mld_gq_work);
  2360. RCU_INIT_POINTER(idev->mc_tomb, NULL);
  2361. idev->mc_ifc_count = 0;
  2362. INIT_DELAYED_WORK(&idev->mc_ifc_work, mld_ifc_work);
  2363. INIT_DELAYED_WORK(&idev->mc_dad_work, mld_dad_work);
  2364. INIT_DELAYED_WORK(&idev->mc_query_work, mld_query_work);
  2365. INIT_DELAYED_WORK(&idev->mc_report_work, mld_report_work);
  2366. skb_queue_head_init(&idev->mc_query_queue);
  2367. skb_queue_head_init(&idev->mc_report_queue);
  2368. spin_lock_init(&idev->mc_query_lock);
  2369. spin_lock_init(&idev->mc_report_lock);
  2370. mutex_init(&idev->mc_lock);
  2371. ipv6_mc_reset(idev);
  2372. }
  2373. /*
  2374. * Device is about to be destroyed: clean up.
  2375. */
  2376. void ipv6_mc_destroy_dev(struct inet6_dev *idev)
  2377. {
  2378. struct ifmcaddr6 *i;
  2379. /* Deactivate works */
  2380. ipv6_mc_down(idev);
  2381. mutex_lock(&idev->mc_lock);
  2382. mld_clear_delrec(idev);
  2383. mutex_unlock(&idev->mc_lock);
  2384. mld_clear_query(idev);
  2385. mld_clear_report(idev);
  2386. /* Delete all-nodes address. */
  2387. /* We cannot call ipv6_dev_mc_dec() directly, our caller in
  2388. * addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will
  2389. * fail.
  2390. */
  2391. __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes);
  2392. if (idev->cnf.forwarding)
  2393. __ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters);
  2394. mutex_lock(&idev->mc_lock);
  2395. while ((i = mc_dereference(idev->mc_list, idev))) {
  2396. rcu_assign_pointer(idev->mc_list, mc_dereference(i->next, idev));
  2397. ip6_mc_clear_src(i);
  2398. ma_put(i);
  2399. }
  2400. mutex_unlock(&idev->mc_lock);
  2401. }
  2402. static void ipv6_mc_rejoin_groups(struct inet6_dev *idev)
  2403. {
  2404. struct ifmcaddr6 *pmc;
  2405. ASSERT_RTNL();
  2406. mutex_lock(&idev->mc_lock);
  2407. if (mld_in_v1_mode(idev)) {
  2408. for_each_mc_mclock(idev, pmc)
  2409. igmp6_join_group(pmc);
  2410. } else {
  2411. mld_send_report(idev, NULL);
  2412. }
  2413. mutex_unlock(&idev->mc_lock);
  2414. }
  2415. static int ipv6_mc_netdev_event(struct notifier_block *this,
  2416. unsigned long event,
  2417. void *ptr)
  2418. {
  2419. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  2420. struct inet6_dev *idev = __in6_dev_get(dev);
  2421. switch (event) {
  2422. case NETDEV_RESEND_IGMP:
  2423. if (idev)
  2424. ipv6_mc_rejoin_groups(idev);
  2425. break;
  2426. default:
  2427. break;
  2428. }
  2429. return NOTIFY_DONE;
  2430. }
  2431. static struct notifier_block igmp6_netdev_notifier = {
  2432. .notifier_call = ipv6_mc_netdev_event,
  2433. };
  2434. #ifdef CONFIG_PROC_FS
  2435. struct igmp6_mc_iter_state {
  2436. struct seq_net_private p;
  2437. struct net_device *dev;
  2438. struct inet6_dev *idev;
  2439. };
  2440. #define igmp6_mc_seq_private(seq) ((struct igmp6_mc_iter_state *)(seq)->private)
  2441. static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
  2442. {
  2443. struct ifmcaddr6 *im = NULL;
  2444. struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
  2445. struct net *net = seq_file_net(seq);
  2446. state->idev = NULL;
  2447. for_each_netdev_rcu(net, state->dev) {
  2448. struct inet6_dev *idev;
  2449. idev = __in6_dev_get(state->dev);
  2450. if (!idev)
  2451. continue;
  2452. im = rcu_dereference(idev->mc_list);
  2453. if (im) {
  2454. state->idev = idev;
  2455. break;
  2456. }
  2457. }
  2458. return im;
  2459. }
  2460. static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr6 *im)
  2461. {
  2462. struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
  2463. im = rcu_dereference(im->next);
  2464. while (!im) {
  2465. state->dev = next_net_device_rcu(state->dev);
  2466. if (!state->dev) {
  2467. state->idev = NULL;
  2468. break;
  2469. }
  2470. state->idev = __in6_dev_get(state->dev);
  2471. if (!state->idev)
  2472. continue;
  2473. im = rcu_dereference(state->idev->mc_list);
  2474. }
  2475. return im;
  2476. }
  2477. static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos)
  2478. {
  2479. struct ifmcaddr6 *im = igmp6_mc_get_first(seq);
  2480. if (im)
  2481. while (pos && (im = igmp6_mc_get_next(seq, im)) != NULL)
  2482. --pos;
  2483. return pos ? NULL : im;
  2484. }
  2485. static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos)
  2486. __acquires(RCU)
  2487. {
  2488. rcu_read_lock();
  2489. return igmp6_mc_get_idx(seq, *pos);
  2490. }
  2491. static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  2492. {
  2493. struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v);
  2494. ++*pos;
  2495. return im;
  2496. }
  2497. static void igmp6_mc_seq_stop(struct seq_file *seq, void *v)
  2498. __releases(RCU)
  2499. {
  2500. struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
  2501. if (likely(state->idev))
  2502. state->idev = NULL;
  2503. state->dev = NULL;
  2504. rcu_read_unlock();
  2505. }
  2506. static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
  2507. {
  2508. struct ifmcaddr6 *im = (struct ifmcaddr6 *)v;
  2509. struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
  2510. seq_printf(seq,
  2511. "%-4d %-15s %pi6 %5d %08X %ld\n",
  2512. state->dev->ifindex, state->dev->name,
  2513. &im->mca_addr,
  2514. im->mca_users, im->mca_flags,
  2515. (im->mca_flags & MAF_TIMER_RUNNING) ?
  2516. jiffies_to_clock_t(im->mca_work.timer.expires - jiffies) : 0);
  2517. return 0;
  2518. }
  2519. static const struct seq_operations igmp6_mc_seq_ops = {
  2520. .start = igmp6_mc_seq_start,
  2521. .next = igmp6_mc_seq_next,
  2522. .stop = igmp6_mc_seq_stop,
  2523. .show = igmp6_mc_seq_show,
  2524. };
  2525. struct igmp6_mcf_iter_state {
  2526. struct seq_net_private p;
  2527. struct net_device *dev;
  2528. struct inet6_dev *idev;
  2529. struct ifmcaddr6 *im;
  2530. };
  2531. #define igmp6_mcf_seq_private(seq) ((struct igmp6_mcf_iter_state *)(seq)->private)
  2532. static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
  2533. {
  2534. struct ip6_sf_list *psf = NULL;
  2535. struct ifmcaddr6 *im = NULL;
  2536. struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
  2537. struct net *net = seq_file_net(seq);
  2538. state->idev = NULL;
  2539. state->im = NULL;
  2540. for_each_netdev_rcu(net, state->dev) {
  2541. struct inet6_dev *idev;
  2542. idev = __in6_dev_get(state->dev);
  2543. if (unlikely(idev == NULL))
  2544. continue;
  2545. im = rcu_dereference(idev->mc_list);
  2546. if (likely(im)) {
  2547. psf = rcu_dereference(im->mca_sources);
  2548. if (likely(psf)) {
  2549. state->im = im;
  2550. state->idev = idev;
  2551. break;
  2552. }
  2553. }
  2554. }
  2555. return psf;
  2556. }
  2557. static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_sf_list *psf)
  2558. {
  2559. struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
  2560. psf = rcu_dereference(psf->sf_next);
  2561. while (!psf) {
  2562. state->im = rcu_dereference(state->im->next);
  2563. while (!state->im) {
  2564. state->dev = next_net_device_rcu(state->dev);
  2565. if (!state->dev) {
  2566. state->idev = NULL;
  2567. goto out;
  2568. }
  2569. state->idev = __in6_dev_get(state->dev);
  2570. if (!state->idev)
  2571. continue;
  2572. state->im = rcu_dereference(state->idev->mc_list);
  2573. }
  2574. psf = rcu_dereference(state->im->mca_sources);
  2575. }
  2576. out:
  2577. return psf;
  2578. }
  2579. static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos)
  2580. {
  2581. struct ip6_sf_list *psf = igmp6_mcf_get_first(seq);
  2582. if (psf)
  2583. while (pos && (psf = igmp6_mcf_get_next(seq, psf)) != NULL)
  2584. --pos;
  2585. return pos ? NULL : psf;
  2586. }
  2587. static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos)
  2588. __acquires(RCU)
  2589. {
  2590. rcu_read_lock();
  2591. return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
  2592. }
  2593. static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  2594. {
  2595. struct ip6_sf_list *psf;
  2596. if (v == SEQ_START_TOKEN)
  2597. psf = igmp6_mcf_get_first(seq);
  2598. else
  2599. psf = igmp6_mcf_get_next(seq, v);
  2600. ++*pos;
  2601. return psf;
  2602. }
  2603. static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
  2604. __releases(RCU)
  2605. {
  2606. struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
  2607. if (likely(state->im))
  2608. state->im = NULL;
  2609. if (likely(state->idev))
  2610. state->idev = NULL;
  2611. state->dev = NULL;
  2612. rcu_read_unlock();
  2613. }
  2614. static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
  2615. {
  2616. struct ip6_sf_list *psf = (struct ip6_sf_list *)v;
  2617. struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
  2618. if (v == SEQ_START_TOKEN) {
  2619. seq_puts(seq, "Idx Device Multicast Address Source Address INC EXC\n");
  2620. } else {
  2621. seq_printf(seq,
  2622. "%3d %6.6s %pi6 %pi6 %6lu %6lu\n",
  2623. state->dev->ifindex, state->dev->name,
  2624. &state->im->mca_addr,
  2625. &psf->sf_addr,
  2626. psf->sf_count[MCAST_INCLUDE],
  2627. psf->sf_count[MCAST_EXCLUDE]);
  2628. }
  2629. return 0;
  2630. }
  2631. static const struct seq_operations igmp6_mcf_seq_ops = {
  2632. .start = igmp6_mcf_seq_start,
  2633. .next = igmp6_mcf_seq_next,
  2634. .stop = igmp6_mcf_seq_stop,
  2635. .show = igmp6_mcf_seq_show,
  2636. };
  2637. static int __net_init igmp6_proc_init(struct net *net)
  2638. {
  2639. int err;
  2640. err = -ENOMEM;
  2641. if (!proc_create_net("igmp6", 0444, net->proc_net, &igmp6_mc_seq_ops,
  2642. sizeof(struct igmp6_mc_iter_state)))
  2643. goto out;
  2644. if (!proc_create_net("mcfilter6", 0444, net->proc_net,
  2645. &igmp6_mcf_seq_ops,
  2646. sizeof(struct igmp6_mcf_iter_state)))
  2647. goto out_proc_net_igmp6;
  2648. err = 0;
  2649. out:
  2650. return err;
  2651. out_proc_net_igmp6:
  2652. remove_proc_entry("igmp6", net->proc_net);
  2653. goto out;
  2654. }
  2655. static void __net_exit igmp6_proc_exit(struct net *net)
  2656. {
  2657. remove_proc_entry("mcfilter6", net->proc_net);
  2658. remove_proc_entry("igmp6", net->proc_net);
  2659. }
  2660. #else
  2661. static inline int igmp6_proc_init(struct net *net)
  2662. {
  2663. return 0;
  2664. }
  2665. static inline void igmp6_proc_exit(struct net *net)
  2666. {
  2667. }
  2668. #endif
  2669. static int __net_init igmp6_net_init(struct net *net)
  2670. {
  2671. int err;
  2672. err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6,
  2673. SOCK_RAW, IPPROTO_ICMPV6, net);
  2674. if (err < 0) {
  2675. pr_err("Failed to initialize the IGMP6 control socket (err %d)\n",
  2676. err);
  2677. goto out;
  2678. }
  2679. inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1;
  2680. net->ipv6.igmp_sk->sk_allocation = GFP_KERNEL;
  2681. err = inet_ctl_sock_create(&net->ipv6.mc_autojoin_sk, PF_INET6,
  2682. SOCK_RAW, IPPROTO_ICMPV6, net);
  2683. if (err < 0) {
  2684. pr_err("Failed to initialize the IGMP6 autojoin socket (err %d)\n",
  2685. err);
  2686. goto out_sock_create;
  2687. }
  2688. err = igmp6_proc_init(net);
  2689. if (err)
  2690. goto out_sock_create_autojoin;
  2691. return 0;
  2692. out_sock_create_autojoin:
  2693. inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk);
  2694. out_sock_create:
  2695. inet_ctl_sock_destroy(net->ipv6.igmp_sk);
  2696. out:
  2697. return err;
  2698. }
  2699. static void __net_exit igmp6_net_exit(struct net *net)
  2700. {
  2701. inet_ctl_sock_destroy(net->ipv6.igmp_sk);
  2702. inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk);
  2703. igmp6_proc_exit(net);
  2704. }
  2705. static struct pernet_operations igmp6_net_ops = {
  2706. .init = igmp6_net_init,
  2707. .exit = igmp6_net_exit,
  2708. };
  2709. int __init igmp6_init(void)
  2710. {
  2711. int err;
  2712. err = register_pernet_subsys(&igmp6_net_ops);
  2713. if (err)
  2714. return err;
  2715. mld_wq = create_workqueue("mld");
  2716. if (!mld_wq) {
  2717. unregister_pernet_subsys(&igmp6_net_ops);
  2718. return -ENOMEM;
  2719. }
  2720. return err;
  2721. }
  2722. int __init igmp6_late_init(void)
  2723. {
  2724. return register_netdevice_notifier(&igmp6_netdev_notifier);
  2725. }
  2726. void igmp6_cleanup(void)
  2727. {
  2728. unregister_pernet_subsys(&igmp6_net_ops);
  2729. destroy_workqueue(mld_wq);
  2730. }
  2731. void igmp6_late_cleanup(void)
  2732. {
  2733. unregister_netdevice_notifier(&igmp6_netdev_notifier);
  2734. }