ip6_input.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512
  1. /*
  2. * IPv6 input
  3. * Linux INET6 implementation
  4. *
  5. * Authors:
  6. * Pedro Roque <roque@di.fc.ul.pt>
  7. * Ian P. Morris <I.P.Morris@soton.ac.uk>
  8. *
  9. * Based in linux/net/ipv4/ip_input.c
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; either version
  14. * 2 of the License, or (at your option) any later version.
  15. */
  16. /* Changes
  17. *
  18. * Mitsuru KANDA @USAGI and
  19. * YOSHIFUJI Hideaki @USAGI: Remove ipv6_parse_exthdrs().
  20. */
  21. #include <linux/errno.h>
  22. #include <linux/types.h>
  23. #include <linux/socket.h>
  24. #include <linux/sockios.h>
  25. #include <linux/net.h>
  26. #include <linux/netdevice.h>
  27. #include <linux/in6.h>
  28. #include <linux/icmpv6.h>
  29. #include <linux/mroute6.h>
  30. #include <linux/slab.h>
  31. #include <linux/netfilter.h>
  32. #include <linux/netfilter_ipv6.h>
  33. #include <net/sock.h>
  34. #include <net/snmp.h>
  35. #include <net/ipv6.h>
  36. #include <net/protocol.h>
  37. #include <net/transp_v6.h>
  38. #include <net/rawv6.h>
  39. #include <net/ndisc.h>
  40. #include <net/ip6_route.h>
  41. #include <net/addrconf.h>
  42. #include <net/xfrm.h>
  43. #include <net/inet_ecn.h>
  44. #include <net/dst_metadata.h>
  45. static void ip6_rcv_finish_core(struct net *net, struct sock *sk,
  46. struct sk_buff *skb)
  47. {
  48. void (*edemux)(struct sk_buff *skb);
  49. if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
  50. const struct inet6_protocol *ipprot;
  51. ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]);
  52. if (ipprot && (edemux = READ_ONCE(ipprot->early_demux)))
  53. edemux(skb);
  54. }
  55. if (!skb_valid_dst(skb))
  56. ip6_route_input(skb);
  57. }
  58. int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
  59. {
  60. /* if ingress device is enslaved to an L3 master device pass the
  61. * skb to its handler for processing
  62. */
  63. skb = l3mdev_ip6_rcv(skb);
  64. if (!skb)
  65. return NET_RX_SUCCESS;
  66. ip6_rcv_finish_core(net, sk, skb);
  67. return dst_input(skb);
  68. }
  69. static void ip6_sublist_rcv_finish(struct list_head *head)
  70. {
  71. struct sk_buff *skb, *next;
  72. list_for_each_entry_safe(skb, next, head, list) {
  73. skb_list_del_init(skb);
  74. dst_input(skb);
  75. }
  76. }
  77. static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
  78. struct list_head *head)
  79. {
  80. struct dst_entry *curr_dst = NULL;
  81. struct sk_buff *skb, *next;
  82. struct list_head sublist;
  83. INIT_LIST_HEAD(&sublist);
  84. list_for_each_entry_safe(skb, next, head, list) {
  85. struct dst_entry *dst;
  86. skb_list_del_init(skb);
  87. /* if ingress device is enslaved to an L3 master device pass the
  88. * skb to its handler for processing
  89. */
  90. skb = l3mdev_ip6_rcv(skb);
  91. if (!skb)
  92. continue;
  93. ip6_rcv_finish_core(net, sk, skb);
  94. dst = skb_dst(skb);
  95. if (curr_dst != dst) {
  96. /* dispatch old sublist */
  97. if (!list_empty(&sublist))
  98. ip6_sublist_rcv_finish(&sublist);
  99. /* start new sublist */
  100. INIT_LIST_HEAD(&sublist);
  101. curr_dst = dst;
  102. }
  103. list_add_tail(&skb->list, &sublist);
  104. }
  105. /* dispatch final sublist */
  106. ip6_sublist_rcv_finish(&sublist);
  107. }
  108. static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
  109. struct net *net)
  110. {
  111. const struct ipv6hdr *hdr;
  112. u32 pkt_len;
  113. struct inet6_dev *idev;
  114. if (skb->pkt_type == PACKET_OTHERHOST) {
  115. kfree_skb(skb);
  116. return NULL;
  117. }
  118. rcu_read_lock();
  119. idev = __in6_dev_get(skb->dev);
  120. __IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_IN, skb->len);
  121. if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
  122. !idev || unlikely(idev->cnf.disable_ipv6)) {
  123. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
  124. goto drop;
  125. }
  126. memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
  127. /*
  128. * Store incoming device index. When the packet will
  129. * be queued, we cannot refer to skb->dev anymore.
  130. *
  131. * BTW, when we send a packet for our own local address on a
  132. * non-loopback interface (e.g. ethX), it is being delivered
  133. * via the loopback interface (lo) here; skb->dev = loopback_dev.
  134. * It, however, should be considered as if it is being
  135. * arrived via the sending interface (ethX), because of the
  136. * nature of scoping architecture. --yoshfuji
  137. */
  138. IP6CB(skb)->iif = skb_valid_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex;
  139. if (unlikely(!pskb_may_pull(skb, sizeof(*hdr))))
  140. goto err;
  141. hdr = ipv6_hdr(skb);
  142. if (hdr->version != 6)
  143. goto err;
  144. __IP6_ADD_STATS(net, idev,
  145. IPSTATS_MIB_NOECTPKTS +
  146. (ipv6_get_dsfield(hdr) & INET_ECN_MASK),
  147. max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
  148. /*
  149. * RFC4291 2.5.3
  150. * The loopback address must not be used as the source address in IPv6
  151. * packets that are sent outside of a single node. [..]
  152. * A packet received on an interface with a destination address
  153. * of loopback must be dropped.
  154. */
  155. if ((ipv6_addr_loopback(&hdr->saddr) ||
  156. ipv6_addr_loopback(&hdr->daddr)) &&
  157. !(dev->flags & IFF_LOOPBACK))
  158. goto err;
  159. /* RFC4291 Errata ID: 3480
  160. * Interface-Local scope spans only a single interface on a
  161. * node and is useful only for loopback transmission of
  162. * multicast. Packets with interface-local scope received
  163. * from another node must be discarded.
  164. */
  165. if (!(skb->pkt_type == PACKET_LOOPBACK ||
  166. dev->flags & IFF_LOOPBACK) &&
  167. ipv6_addr_is_multicast(&hdr->daddr) &&
  168. IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1)
  169. goto err;
  170. /* If enabled, drop unicast packets that were encapsulated in link-layer
  171. * multicast or broadcast to protected against the so-called "hole-196"
  172. * attack in 802.11 wireless.
  173. */
  174. if (!ipv6_addr_is_multicast(&hdr->daddr) &&
  175. (skb->pkt_type == PACKET_BROADCAST ||
  176. skb->pkt_type == PACKET_MULTICAST) &&
  177. idev->cnf.drop_unicast_in_l2_multicast)
  178. goto err;
  179. /* RFC4291 2.7
  180. * Nodes must not originate a packet to a multicast address whose scope
  181. * field contains the reserved value 0; if such a packet is received, it
  182. * must be silently dropped.
  183. */
  184. if (ipv6_addr_is_multicast(&hdr->daddr) &&
  185. IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 0)
  186. goto err;
  187. /*
  188. * RFC4291 2.7
  189. * Multicast addresses must not be used as source addresses in IPv6
  190. * packets or appear in any Routing header.
  191. */
  192. if (ipv6_addr_is_multicast(&hdr->saddr))
  193. goto err;
  194. skb->transport_header = skb->network_header + sizeof(*hdr);
  195. IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
  196. pkt_len = ntohs(hdr->payload_len);
  197. /* pkt_len may be zero if Jumbo payload option is present */
  198. if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
  199. if (pkt_len + sizeof(struct ipv6hdr) > skb->len) {
  200. __IP6_INC_STATS(net,
  201. idev, IPSTATS_MIB_INTRUNCATEDPKTS);
  202. goto drop;
  203. }
  204. if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) {
  205. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  206. goto drop;
  207. }
  208. hdr = ipv6_hdr(skb);
  209. }
  210. if (hdr->nexthdr == NEXTHDR_HOP) {
  211. if (ipv6_parse_hopopts(skb) < 0) {
  212. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  213. rcu_read_unlock();
  214. return NULL;
  215. }
  216. }
  217. rcu_read_unlock();
  218. /* Must drop socket now because of tproxy. */
  219. skb_orphan(skb);
  220. return skb;
  221. err:
  222. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
  223. drop:
  224. rcu_read_unlock();
  225. kfree_skb(skb);
  226. return NULL;
  227. }
  228. int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
  229. {
  230. struct net *net = dev_net(skb->dev);
  231. skb = ip6_rcv_core(skb, dev, net);
  232. if (skb == NULL)
  233. return NET_RX_DROP;
  234. return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
  235. net, NULL, skb, dev, NULL,
  236. ip6_rcv_finish);
  237. }
  238. static void ip6_sublist_rcv(struct list_head *head, struct net_device *dev,
  239. struct net *net)
  240. {
  241. NF_HOOK_LIST(NFPROTO_IPV6, NF_INET_PRE_ROUTING, net, NULL,
  242. head, dev, NULL, ip6_rcv_finish);
  243. ip6_list_rcv_finish(net, NULL, head);
  244. }
  245. /* Receive a list of IPv6 packets */
  246. void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
  247. struct net_device *orig_dev)
  248. {
  249. struct net_device *curr_dev = NULL;
  250. struct net *curr_net = NULL;
  251. struct sk_buff *skb, *next;
  252. struct list_head sublist;
  253. INIT_LIST_HEAD(&sublist);
  254. list_for_each_entry_safe(skb, next, head, list) {
  255. struct net_device *dev = skb->dev;
  256. struct net *net = dev_net(dev);
  257. skb_list_del_init(skb);
  258. skb = ip6_rcv_core(skb, dev, net);
  259. if (skb == NULL)
  260. continue;
  261. if (curr_dev != dev || curr_net != net) {
  262. /* dispatch old sublist */
  263. if (!list_empty(&sublist))
  264. ip6_sublist_rcv(&sublist, curr_dev, curr_net);
  265. /* start new sublist */
  266. INIT_LIST_HEAD(&sublist);
  267. curr_dev = dev;
  268. curr_net = net;
  269. }
  270. list_add_tail(&skb->list, &sublist);
  271. }
  272. /* dispatch final sublist */
  273. ip6_sublist_rcv(&sublist, curr_dev, curr_net);
  274. }
  275. /*
  276. * Deliver the packet to the host
  277. */
  278. static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
  279. {
  280. const struct inet6_protocol *ipprot;
  281. struct inet6_dev *idev;
  282. unsigned int nhoff;
  283. int nexthdr;
  284. bool raw;
  285. bool have_final = false;
  286. /*
  287. * Parse extension headers
  288. */
  289. rcu_read_lock();
  290. resubmit:
  291. idev = ip6_dst_idev(skb_dst(skb));
  292. if (!pskb_pull(skb, skb_transport_offset(skb)))
  293. goto discard;
  294. nhoff = IP6CB(skb)->nhoff;
  295. nexthdr = skb_network_header(skb)[nhoff];
  296. resubmit_final:
  297. raw = raw6_local_deliver(skb, nexthdr);
  298. ipprot = rcu_dereference(inet6_protos[nexthdr]);
  299. if (ipprot) {
  300. int ret;
  301. if (have_final) {
  302. if (!(ipprot->flags & INET6_PROTO_FINAL)) {
  303. /* Once we've seen a final protocol don't
  304. * allow encapsulation on any non-final
  305. * ones. This allows foo in UDP encapsulation
  306. * to work.
  307. */
  308. goto discard;
  309. }
  310. } else if (ipprot->flags & INET6_PROTO_FINAL) {
  311. const struct ipv6hdr *hdr;
  312. /* Only do this once for first final protocol */
  313. have_final = true;
  314. /* Free reference early: we don't need it any more,
  315. and it may hold ip_conntrack module loaded
  316. indefinitely. */
  317. nf_reset(skb);
  318. skb_postpull_rcsum(skb, skb_network_header(skb),
  319. skb_network_header_len(skb));
  320. hdr = ipv6_hdr(skb);
  321. if (ipv6_addr_is_multicast(&hdr->daddr) &&
  322. !ipv6_chk_mcast_addr(skb->dev, &hdr->daddr,
  323. &hdr->saddr) &&
  324. !ipv6_is_mld(skb, nexthdr, skb_network_header_len(skb)))
  325. goto discard;
  326. }
  327. if (!(ipprot->flags & INET6_PROTO_NOPOLICY) &&
  328. !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
  329. goto discard;
  330. ret = ipprot->handler(skb);
  331. if (ret > 0) {
  332. if (ipprot->flags & INET6_PROTO_FINAL) {
  333. /* Not an extension header, most likely UDP
  334. * encapsulation. Use return value as nexthdr
  335. * protocol not nhoff (which presumably is
  336. * not set by handler).
  337. */
  338. nexthdr = ret;
  339. goto resubmit_final;
  340. } else {
  341. goto resubmit;
  342. }
  343. } else if (ret == 0) {
  344. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS);
  345. }
  346. } else {
  347. if (!raw) {
  348. if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
  349. __IP6_INC_STATS(net, idev,
  350. IPSTATS_MIB_INUNKNOWNPROTOS);
  351. icmpv6_send(skb, ICMPV6_PARAMPROB,
  352. ICMPV6_UNK_NEXTHDR, nhoff);
  353. }
  354. kfree_skb(skb);
  355. } else {
  356. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS);
  357. consume_skb(skb);
  358. }
  359. }
  360. rcu_read_unlock();
  361. return 0;
  362. discard:
  363. __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
  364. rcu_read_unlock();
  365. kfree_skb(skb);
  366. return 0;
  367. }
  368. int ip6_input(struct sk_buff *skb)
  369. {
  370. return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN,
  371. dev_net(skb->dev), NULL, skb, skb->dev, NULL,
  372. ip6_input_finish);
  373. }
  374. EXPORT_SYMBOL_GPL(ip6_input);
  375. int ip6_mc_input(struct sk_buff *skb)
  376. {
  377. const struct ipv6hdr *hdr;
  378. bool deliver;
  379. __IP6_UPD_PO_STATS(dev_net(skb_dst(skb)->dev),
  380. __in6_dev_get_safely(skb->dev), IPSTATS_MIB_INMCAST,
  381. skb->len);
  382. hdr = ipv6_hdr(skb);
  383. deliver = ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL);
  384. #ifdef CONFIG_IPV6_MROUTE
  385. /*
  386. * IPv6 multicast router mode is now supported ;)
  387. */
  388. if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding &&
  389. !(ipv6_addr_type(&hdr->daddr) &
  390. (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) &&
  391. likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
  392. /*
  393. * Okay, we try to forward - split and duplicate
  394. * packets.
  395. */
  396. struct sk_buff *skb2;
  397. struct inet6_skb_parm *opt = IP6CB(skb);
  398. /* Check for MLD */
  399. if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
  400. /* Check if this is a mld message */
  401. u8 nexthdr = hdr->nexthdr;
  402. __be16 frag_off;
  403. int offset;
  404. /* Check if the value of Router Alert
  405. * is for MLD (0x0000).
  406. */
  407. if (opt->ra == htons(IPV6_OPT_ROUTERALERT_MLD)) {
  408. deliver = false;
  409. if (!ipv6_ext_hdr(nexthdr)) {
  410. /* BUG */
  411. goto out;
  412. }
  413. offset = ipv6_skip_exthdr(skb, sizeof(*hdr),
  414. &nexthdr, &frag_off);
  415. if (offset < 0)
  416. goto out;
  417. if (ipv6_is_mld(skb, nexthdr, offset))
  418. deliver = true;
  419. goto out;
  420. }
  421. /* unknown RA - process it normally */
  422. }
  423. if (deliver)
  424. skb2 = skb_clone(skb, GFP_ATOMIC);
  425. else {
  426. skb2 = skb;
  427. skb = NULL;
  428. }
  429. if (skb2) {
  430. ip6_mr_input(skb2);
  431. }
  432. }
  433. out:
  434. #endif
  435. if (likely(deliver))
  436. ip6_input(skb);
  437. else {
  438. /* discard */
  439. kfree_skb(skb);
  440. }
  441. return 0;
  442. }