xfrm_interface.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * XFRM virtual interface
  4. *
  5. * Copyright (C) 2018 secunet Security Networks AG
  6. *
  7. * Author:
  8. * Steffen Klassert <steffen.klassert@secunet.com>
  9. */
  10. #include <linux/module.h>
  11. #include <linux/capability.h>
  12. #include <linux/errno.h>
  13. #include <linux/types.h>
  14. #include <linux/sockios.h>
  15. #include <linux/icmp.h>
  16. #include <linux/if.h>
  17. #include <linux/in.h>
  18. #include <linux/ip.h>
  19. #include <linux/net.h>
  20. #include <linux/in6.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/if_link.h>
  23. #include <linux/if_arp.h>
  24. #include <linux/icmpv6.h>
  25. #include <linux/init.h>
  26. #include <linux/route.h>
  27. #include <linux/rtnetlink.h>
  28. #include <linux/netfilter_ipv6.h>
  29. #include <linux/slab.h>
  30. #include <linux/hash.h>
  31. #include <linux/uaccess.h>
  32. #include <linux/atomic.h>
  33. #include <net/icmp.h>
  34. #include <net/ip.h>
  35. #include <net/ipv6.h>
  36. #include <net/ip6_route.h>
  37. #include <net/addrconf.h>
  38. #include <net/xfrm.h>
  39. #include <net/net_namespace.h>
  40. #include <net/netns/generic.h>
  41. #include <linux/etherdevice.h>
  42. static int xfrmi_dev_init(struct net_device *dev);
  43. static void xfrmi_dev_setup(struct net_device *dev);
  44. static struct rtnl_link_ops xfrmi_link_ops __read_mostly;
  45. static unsigned int xfrmi_net_id __read_mostly;
  46. struct xfrmi_net {
  47. /* lists for storing interfaces in use */
  48. struct xfrm_if __rcu *xfrmi[1];
  49. };
  50. #define for_each_xfrmi_rcu(start, xi) \
  51. for (xi = rcu_dereference(start); xi; xi = rcu_dereference(xi->next))
  52. static struct xfrm_if *xfrmi_lookup(struct net *net, struct xfrm_state *x)
  53. {
  54. struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
  55. struct xfrm_if *xi;
  56. for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
  57. if (x->if_id == xi->p.if_id &&
  58. (xi->dev->flags & IFF_UP))
  59. return xi;
  60. }
  61. return NULL;
  62. }
  63. static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb,
  64. unsigned short family)
  65. {
  66. struct xfrmi_net *xfrmn;
  67. struct xfrm_if *xi;
  68. int ifindex = 0;
  69. if (!secpath_exists(skb) || !skb->dev)
  70. return NULL;
  71. switch (family) {
  72. case AF_INET6:
  73. ifindex = inet6_sdif(skb);
  74. break;
  75. case AF_INET:
  76. ifindex = inet_sdif(skb);
  77. break;
  78. }
  79. if (!ifindex)
  80. ifindex = skb->dev->ifindex;
  81. xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id);
  82. for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) {
  83. if (ifindex == xi->dev->ifindex &&
  84. (xi->dev->flags & IFF_UP))
  85. return xi;
  86. }
  87. return NULL;
  88. }
  89. static void xfrmi_link(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
  90. {
  91. struct xfrm_if __rcu **xip = &xfrmn->xfrmi[0];
  92. rcu_assign_pointer(xi->next , rtnl_dereference(*xip));
  93. rcu_assign_pointer(*xip, xi);
  94. }
  95. static void xfrmi_unlink(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
  96. {
  97. struct xfrm_if __rcu **xip;
  98. struct xfrm_if *iter;
  99. for (xip = &xfrmn->xfrmi[0];
  100. (iter = rtnl_dereference(*xip)) != NULL;
  101. xip = &iter->next) {
  102. if (xi == iter) {
  103. rcu_assign_pointer(*xip, xi->next);
  104. break;
  105. }
  106. }
  107. }
  108. static void xfrmi_dev_free(struct net_device *dev)
  109. {
  110. struct xfrm_if *xi = netdev_priv(dev);
  111. gro_cells_destroy(&xi->gro_cells);
  112. free_percpu(dev->tstats);
  113. }
  114. static int xfrmi_create(struct net_device *dev)
  115. {
  116. struct xfrm_if *xi = netdev_priv(dev);
  117. struct net *net = dev_net(dev);
  118. struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
  119. int err;
  120. dev->rtnl_link_ops = &xfrmi_link_ops;
  121. err = register_netdevice(dev);
  122. if (err < 0)
  123. goto out;
  124. dev_hold(dev);
  125. xfrmi_link(xfrmn, xi);
  126. return 0;
  127. out:
  128. return err;
  129. }
  130. static struct xfrm_if *xfrmi_locate(struct net *net, struct xfrm_if_parms *p)
  131. {
  132. struct xfrm_if __rcu **xip;
  133. struct xfrm_if *xi;
  134. struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
  135. for (xip = &xfrmn->xfrmi[0];
  136. (xi = rtnl_dereference(*xip)) != NULL;
  137. xip = &xi->next)
  138. if (xi->p.if_id == p->if_id)
  139. return xi;
  140. return NULL;
  141. }
  142. static void xfrmi_dev_uninit(struct net_device *dev)
  143. {
  144. struct xfrm_if *xi = netdev_priv(dev);
  145. struct xfrmi_net *xfrmn = net_generic(xi->net, xfrmi_net_id);
  146. xfrmi_unlink(xfrmn, xi);
  147. dev_put(dev);
  148. }
  149. static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
  150. {
  151. skb->tstamp = 0;
  152. skb->pkt_type = PACKET_HOST;
  153. skb->skb_iif = 0;
  154. skb->ignore_df = 0;
  155. skb_dst_drop(skb);
  156. nf_reset(skb);
  157. nf_reset_trace(skb);
  158. if (!xnet)
  159. return;
  160. ipvs_reset(skb);
  161. secpath_reset(skb);
  162. skb_orphan(skb);
  163. skb->mark = 0;
  164. }
  165. static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
  166. {
  167. struct pcpu_sw_netstats *tstats;
  168. struct xfrm_mode *inner_mode;
  169. struct net_device *dev;
  170. struct xfrm_state *x;
  171. struct xfrm_if *xi;
  172. bool xnet;
  173. if (err && !skb->sp)
  174. return 0;
  175. x = xfrm_input_state(skb);
  176. xi = xfrmi_lookup(xs_net(x), x);
  177. if (!xi)
  178. return 1;
  179. dev = xi->dev;
  180. skb->dev = dev;
  181. if (err) {
  182. dev->stats.rx_errors++;
  183. dev->stats.rx_dropped++;
  184. return 0;
  185. }
  186. xnet = !net_eq(xi->net, dev_net(skb->dev));
  187. if (xnet) {
  188. inner_mode = x->inner_mode;
  189. if (x->sel.family == AF_UNSPEC) {
  190. inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
  191. if (inner_mode == NULL) {
  192. XFRM_INC_STATS(dev_net(skb->dev),
  193. LINUX_MIB_XFRMINSTATEMODEERROR);
  194. return -EINVAL;
  195. }
  196. }
  197. if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb,
  198. inner_mode->afinfo->family))
  199. return -EPERM;
  200. }
  201. xfrmi_scrub_packet(skb, xnet);
  202. tstats = this_cpu_ptr(dev->tstats);
  203. u64_stats_update_begin(&tstats->syncp);
  204. tstats->rx_packets++;
  205. tstats->rx_bytes += skb->len;
  206. u64_stats_update_end(&tstats->syncp);
  207. return 0;
  208. }
  209. static int
  210. xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
  211. {
  212. struct xfrm_if *xi = netdev_priv(dev);
  213. struct net_device_stats *stats = &xi->dev->stats;
  214. struct dst_entry *dst = skb_dst(skb);
  215. unsigned int length = skb->len;
  216. struct net_device *tdev;
  217. struct xfrm_state *x;
  218. int err = -1;
  219. int mtu;
  220. dst_hold(dst);
  221. dst = xfrm_lookup_with_ifid(xi->net, dst, fl, NULL, 0, xi->p.if_id);
  222. if (IS_ERR(dst)) {
  223. err = PTR_ERR(dst);
  224. dst = NULL;
  225. goto tx_err_link_failure;
  226. }
  227. x = dst->xfrm;
  228. if (!x)
  229. goto tx_err_link_failure;
  230. if (x->if_id != xi->p.if_id)
  231. goto tx_err_link_failure;
  232. tdev = dst->dev;
  233. if (tdev == dev) {
  234. stats->collisions++;
  235. net_warn_ratelimited("%s: Local routing loop detected!\n",
  236. dev->name);
  237. goto tx_err_dst_release;
  238. }
  239. mtu = dst_mtu(dst);
  240. if (skb->len > mtu) {
  241. skb_dst_update_pmtu_no_confirm(skb, mtu);
  242. if (skb->protocol == htons(ETH_P_IPV6)) {
  243. if (mtu < IPV6_MIN_MTU)
  244. mtu = IPV6_MIN_MTU;
  245. icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
  246. } else {
  247. if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
  248. goto xmit;
  249. icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
  250. htonl(mtu));
  251. }
  252. dst_release(dst);
  253. return -EMSGSIZE;
  254. }
  255. xmit:
  256. xfrmi_scrub_packet(skb, !net_eq(xi->net, dev_net(dev)));
  257. skb_dst_set(skb, dst);
  258. skb->dev = tdev;
  259. err = dst_output(xi->net, skb->sk, skb);
  260. if (net_xmit_eval(err) == 0) {
  261. struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
  262. u64_stats_update_begin(&tstats->syncp);
  263. tstats->tx_bytes += length;
  264. tstats->tx_packets++;
  265. u64_stats_update_end(&tstats->syncp);
  266. } else {
  267. stats->tx_errors++;
  268. stats->tx_aborted_errors++;
  269. }
  270. return 0;
  271. tx_err_link_failure:
  272. stats->tx_carrier_errors++;
  273. dst_link_failure(skb);
  274. tx_err_dst_release:
  275. dst_release(dst);
  276. return err;
  277. }
  278. static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
  279. {
  280. struct xfrm_if *xi = netdev_priv(dev);
  281. struct net_device_stats *stats = &xi->dev->stats;
  282. struct dst_entry *dst = skb_dst(skb);
  283. struct flowi fl;
  284. int ret;
  285. memset(&fl, 0, sizeof(fl));
  286. switch (skb->protocol) {
  287. case htons(ETH_P_IPV6):
  288. xfrm_decode_session(skb, &fl, AF_INET6);
  289. memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
  290. if (!dst) {
  291. fl.u.ip6.flowi6_oif = dev->ifindex;
  292. fl.u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
  293. dst = ip6_route_output(dev_net(dev), NULL, &fl.u.ip6);
  294. if (dst->error) {
  295. dst_release(dst);
  296. stats->tx_carrier_errors++;
  297. goto tx_err;
  298. }
  299. skb_dst_set(skb, dst);
  300. }
  301. break;
  302. case htons(ETH_P_IP):
  303. xfrm_decode_session(skb, &fl, AF_INET);
  304. memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
  305. if (!dst) {
  306. struct rtable *rt;
  307. fl.u.ip4.flowi4_oif = dev->ifindex;
  308. fl.u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
  309. rt = __ip_route_output_key(dev_net(dev), &fl.u.ip4);
  310. if (IS_ERR(rt)) {
  311. stats->tx_carrier_errors++;
  312. goto tx_err;
  313. }
  314. skb_dst_set(skb, &rt->dst);
  315. }
  316. break;
  317. default:
  318. goto tx_err;
  319. }
  320. fl.flowi_oif = xi->p.link;
  321. ret = xfrmi_xmit2(skb, dev, &fl);
  322. if (ret < 0)
  323. goto tx_err;
  324. return NETDEV_TX_OK;
  325. tx_err:
  326. stats->tx_errors++;
  327. stats->tx_dropped++;
  328. kfree_skb(skb);
  329. return NETDEV_TX_OK;
  330. }
  331. static int xfrmi4_err(struct sk_buff *skb, u32 info)
  332. {
  333. const struct iphdr *iph = (const struct iphdr *)skb->data;
  334. struct net *net = dev_net(skb->dev);
  335. int protocol = iph->protocol;
  336. struct ip_comp_hdr *ipch;
  337. struct ip_esp_hdr *esph;
  338. struct ip_auth_hdr *ah ;
  339. struct xfrm_state *x;
  340. struct xfrm_if *xi;
  341. __be32 spi;
  342. switch (protocol) {
  343. case IPPROTO_ESP:
  344. esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
  345. spi = esph->spi;
  346. break;
  347. case IPPROTO_AH:
  348. ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
  349. spi = ah->spi;
  350. break;
  351. case IPPROTO_COMP:
  352. ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
  353. spi = htonl(ntohs(ipch->cpi));
  354. break;
  355. default:
  356. return 0;
  357. }
  358. switch (icmp_hdr(skb)->type) {
  359. case ICMP_DEST_UNREACH:
  360. if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
  361. return 0;
  362. case ICMP_REDIRECT:
  363. break;
  364. default:
  365. return 0;
  366. }
  367. x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
  368. spi, protocol, AF_INET);
  369. if (!x)
  370. return 0;
  371. xi = xfrmi_lookup(net, x);
  372. if (!xi) {
  373. xfrm_state_put(x);
  374. return -1;
  375. }
  376. if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
  377. ipv4_update_pmtu(skb, net, info, 0, 0, protocol, 0);
  378. else
  379. ipv4_redirect(skb, net, 0, 0, protocol, 0);
  380. xfrm_state_put(x);
  381. return 0;
  382. }
  383. static int xfrmi6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
  384. u8 type, u8 code, int offset, __be32 info)
  385. {
  386. const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
  387. struct net *net = dev_net(skb->dev);
  388. int protocol = iph->nexthdr;
  389. struct ip_comp_hdr *ipch;
  390. struct ip_esp_hdr *esph;
  391. struct ip_auth_hdr *ah;
  392. struct xfrm_state *x;
  393. struct xfrm_if *xi;
  394. __be32 spi;
  395. switch (protocol) {
  396. case IPPROTO_ESP:
  397. esph = (struct ip_esp_hdr *)(skb->data + offset);
  398. spi = esph->spi;
  399. break;
  400. case IPPROTO_AH:
  401. ah = (struct ip_auth_hdr *)(skb->data + offset);
  402. spi = ah->spi;
  403. break;
  404. case IPPROTO_COMP:
  405. ipch = (struct ip_comp_hdr *)(skb->data + offset);
  406. spi = htonl(ntohs(ipch->cpi));
  407. break;
  408. default:
  409. return 0;
  410. }
  411. if (type != ICMPV6_PKT_TOOBIG &&
  412. type != NDISC_REDIRECT)
  413. return 0;
  414. x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
  415. spi, protocol, AF_INET6);
  416. if (!x)
  417. return 0;
  418. xi = xfrmi_lookup(net, x);
  419. if (!xi) {
  420. xfrm_state_put(x);
  421. return -1;
  422. }
  423. if (type == NDISC_REDIRECT)
  424. ip6_redirect(skb, net, skb->dev->ifindex, 0,
  425. sock_net_uid(net, NULL));
  426. else
  427. ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
  428. xfrm_state_put(x);
  429. return 0;
  430. }
  431. static int xfrmi_change(struct xfrm_if *xi, const struct xfrm_if_parms *p)
  432. {
  433. if (xi->p.link != p->link)
  434. return -EINVAL;
  435. xi->p.if_id = p->if_id;
  436. return 0;
  437. }
  438. static int xfrmi_update(struct xfrm_if *xi, struct xfrm_if_parms *p)
  439. {
  440. struct net *net = xi->net;
  441. struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
  442. int err;
  443. xfrmi_unlink(xfrmn, xi);
  444. synchronize_net();
  445. err = xfrmi_change(xi, p);
  446. xfrmi_link(xfrmn, xi);
  447. netdev_state_change(xi->dev);
  448. return err;
  449. }
  450. static void xfrmi_get_stats64(struct net_device *dev,
  451. struct rtnl_link_stats64 *s)
  452. {
  453. int cpu;
  454. if (!dev->tstats)
  455. return;
  456. for_each_possible_cpu(cpu) {
  457. struct pcpu_sw_netstats *stats;
  458. struct pcpu_sw_netstats tmp;
  459. int start;
  460. stats = per_cpu_ptr(dev->tstats, cpu);
  461. do {
  462. start = u64_stats_fetch_begin_irq(&stats->syncp);
  463. tmp.rx_packets = stats->rx_packets;
  464. tmp.rx_bytes = stats->rx_bytes;
  465. tmp.tx_packets = stats->tx_packets;
  466. tmp.tx_bytes = stats->tx_bytes;
  467. } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
  468. s->rx_packets += tmp.rx_packets;
  469. s->rx_bytes += tmp.rx_bytes;
  470. s->tx_packets += tmp.tx_packets;
  471. s->tx_bytes += tmp.tx_bytes;
  472. }
  473. s->rx_dropped = dev->stats.rx_dropped;
  474. s->tx_dropped = dev->stats.tx_dropped;
  475. }
  476. static int xfrmi_get_iflink(const struct net_device *dev)
  477. {
  478. struct xfrm_if *xi = netdev_priv(dev);
  479. return xi->p.link;
  480. }
  481. static const struct net_device_ops xfrmi_netdev_ops = {
  482. .ndo_init = xfrmi_dev_init,
  483. .ndo_uninit = xfrmi_dev_uninit,
  484. .ndo_start_xmit = xfrmi_xmit,
  485. .ndo_get_stats64 = xfrmi_get_stats64,
  486. .ndo_get_iflink = xfrmi_get_iflink,
  487. };
  488. static void xfrmi_dev_setup(struct net_device *dev)
  489. {
  490. dev->netdev_ops = &xfrmi_netdev_ops;
  491. dev->type = ARPHRD_NONE;
  492. dev->mtu = ETH_DATA_LEN;
  493. dev->min_mtu = ETH_MIN_MTU;
  494. dev->max_mtu = IP_MAX_MTU;
  495. dev->flags = IFF_NOARP;
  496. dev->needs_free_netdev = true;
  497. dev->priv_destructor = xfrmi_dev_free;
  498. netif_keep_dst(dev);
  499. eth_broadcast_addr(dev->broadcast);
  500. }
  501. static int xfrmi_dev_init(struct net_device *dev)
  502. {
  503. struct xfrm_if *xi = netdev_priv(dev);
  504. struct net_device *phydev = __dev_get_by_index(xi->net, xi->p.link);
  505. int err;
  506. dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
  507. if (!dev->tstats)
  508. return -ENOMEM;
  509. err = gro_cells_init(&xi->gro_cells, dev);
  510. if (err) {
  511. free_percpu(dev->tstats);
  512. return err;
  513. }
  514. dev->features |= NETIF_F_LLTX;
  515. if (phydev) {
  516. dev->needed_headroom = phydev->needed_headroom;
  517. dev->needed_tailroom = phydev->needed_tailroom;
  518. if (is_zero_ether_addr(dev->dev_addr))
  519. eth_hw_addr_inherit(dev, phydev);
  520. if (is_zero_ether_addr(dev->broadcast))
  521. memcpy(dev->broadcast, phydev->broadcast,
  522. dev->addr_len);
  523. } else {
  524. eth_hw_addr_random(dev);
  525. eth_broadcast_addr(dev->broadcast);
  526. }
  527. return 0;
  528. }
  529. static int xfrmi_validate(struct nlattr *tb[], struct nlattr *data[],
  530. struct netlink_ext_ack *extack)
  531. {
  532. return 0;
  533. }
  534. static void xfrmi_netlink_parms(struct nlattr *data[],
  535. struct xfrm_if_parms *parms)
  536. {
  537. memset(parms, 0, sizeof(*parms));
  538. if (!data)
  539. return;
  540. if (data[IFLA_XFRM_LINK])
  541. parms->link = nla_get_u32(data[IFLA_XFRM_LINK]);
  542. if (data[IFLA_XFRM_IF_ID])
  543. parms->if_id = nla_get_u32(data[IFLA_XFRM_IF_ID]);
  544. }
  545. static int xfrmi_newlink(struct net *src_net, struct net_device *dev,
  546. struct nlattr *tb[], struct nlattr *data[],
  547. struct netlink_ext_ack *extack)
  548. {
  549. struct net *net = dev_net(dev);
  550. struct xfrm_if_parms p;
  551. struct xfrm_if *xi;
  552. int err;
  553. xfrmi_netlink_parms(data, &p);
  554. xi = xfrmi_locate(net, &p);
  555. if (xi)
  556. return -EEXIST;
  557. xi = netdev_priv(dev);
  558. xi->p = p;
  559. xi->net = net;
  560. xi->dev = dev;
  561. err = xfrmi_create(dev);
  562. return err;
  563. }
  564. static void xfrmi_dellink(struct net_device *dev, struct list_head *head)
  565. {
  566. unregister_netdevice_queue(dev, head);
  567. }
  568. static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
  569. struct nlattr *data[],
  570. struct netlink_ext_ack *extack)
  571. {
  572. struct xfrm_if *xi = netdev_priv(dev);
  573. struct net *net = xi->net;
  574. struct xfrm_if_parms p;
  575. xfrmi_netlink_parms(data, &p);
  576. xi = xfrmi_locate(net, &p);
  577. if (!xi) {
  578. xi = netdev_priv(dev);
  579. } else {
  580. if (xi->dev != dev)
  581. return -EEXIST;
  582. }
  583. return xfrmi_update(xi, &p);
  584. }
  585. static size_t xfrmi_get_size(const struct net_device *dev)
  586. {
  587. return
  588. /* IFLA_XFRM_LINK */
  589. nla_total_size(4) +
  590. /* IFLA_XFRM_IF_ID */
  591. nla_total_size(4) +
  592. 0;
  593. }
  594. static int xfrmi_fill_info(struct sk_buff *skb, const struct net_device *dev)
  595. {
  596. struct xfrm_if *xi = netdev_priv(dev);
  597. struct xfrm_if_parms *parm = &xi->p;
  598. if (nla_put_u32(skb, IFLA_XFRM_LINK, parm->link) ||
  599. nla_put_u32(skb, IFLA_XFRM_IF_ID, parm->if_id))
  600. goto nla_put_failure;
  601. return 0;
  602. nla_put_failure:
  603. return -EMSGSIZE;
  604. }
  605. struct net *xfrmi_get_link_net(const struct net_device *dev)
  606. {
  607. struct xfrm_if *xi = netdev_priv(dev);
  608. return xi->net;
  609. }
  610. static const struct nla_policy xfrmi_policy[IFLA_XFRM_MAX + 1] = {
  611. [IFLA_XFRM_LINK] = { .type = NLA_U32 },
  612. [IFLA_XFRM_IF_ID] = { .type = NLA_U32 },
  613. };
  614. static struct rtnl_link_ops xfrmi_link_ops __read_mostly = {
  615. .kind = "xfrm",
  616. .maxtype = IFLA_XFRM_MAX,
  617. .policy = xfrmi_policy,
  618. .priv_size = sizeof(struct xfrm_if),
  619. .setup = xfrmi_dev_setup,
  620. .validate = xfrmi_validate,
  621. .newlink = xfrmi_newlink,
  622. .dellink = xfrmi_dellink,
  623. .changelink = xfrmi_changelink,
  624. .get_size = xfrmi_get_size,
  625. .fill_info = xfrmi_fill_info,
  626. .get_link_net = xfrmi_get_link_net,
  627. };
  628. static void __net_exit xfrmi_destroy_interfaces(struct xfrmi_net *xfrmn)
  629. {
  630. struct xfrm_if *xi;
  631. LIST_HEAD(list);
  632. xi = rtnl_dereference(xfrmn->xfrmi[0]);
  633. if (!xi)
  634. return;
  635. unregister_netdevice_queue(xi->dev, &list);
  636. unregister_netdevice_many(&list);
  637. }
  638. static int __net_init xfrmi_init_net(struct net *net)
  639. {
  640. return 0;
  641. }
  642. static void __net_exit xfrmi_exit_net(struct net *net)
  643. {
  644. struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
  645. rtnl_lock();
  646. xfrmi_destroy_interfaces(xfrmn);
  647. rtnl_unlock();
  648. }
  649. static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list)
  650. {
  651. struct net *net;
  652. LIST_HEAD(list);
  653. rtnl_lock();
  654. list_for_each_entry(net, net_exit_list, exit_list) {
  655. struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
  656. struct xfrm_if __rcu **xip;
  657. struct xfrm_if *xi;
  658. for (xip = &xfrmn->xfrmi[0];
  659. (xi = rtnl_dereference(*xip)) != NULL;
  660. xip = &xi->next)
  661. unregister_netdevice_queue(xi->dev, &list);
  662. }
  663. unregister_netdevice_many(&list);
  664. rtnl_unlock();
  665. }
  666. static struct pernet_operations xfrmi_net_ops = {
  667. .exit_batch = xfrmi_exit_batch_net,
  668. .init = xfrmi_init_net,
  669. .exit = xfrmi_exit_net,
  670. .id = &xfrmi_net_id,
  671. .size = sizeof(struct xfrmi_net),
  672. };
  673. static struct xfrm6_protocol xfrmi_esp6_protocol __read_mostly = {
  674. .handler = xfrm6_rcv,
  675. .cb_handler = xfrmi_rcv_cb,
  676. .err_handler = xfrmi6_err,
  677. .priority = 10,
  678. };
  679. static struct xfrm6_protocol xfrmi_ah6_protocol __read_mostly = {
  680. .handler = xfrm6_rcv,
  681. .cb_handler = xfrmi_rcv_cb,
  682. .err_handler = xfrmi6_err,
  683. .priority = 10,
  684. };
  685. static struct xfrm6_protocol xfrmi_ipcomp6_protocol __read_mostly = {
  686. .handler = xfrm6_rcv,
  687. .cb_handler = xfrmi_rcv_cb,
  688. .err_handler = xfrmi6_err,
  689. .priority = 10,
  690. };
  691. static struct xfrm4_protocol xfrmi_esp4_protocol __read_mostly = {
  692. .handler = xfrm4_rcv,
  693. .input_handler = xfrm_input,
  694. .cb_handler = xfrmi_rcv_cb,
  695. .err_handler = xfrmi4_err,
  696. .priority = 10,
  697. };
  698. static struct xfrm4_protocol xfrmi_ah4_protocol __read_mostly = {
  699. .handler = xfrm4_rcv,
  700. .input_handler = xfrm_input,
  701. .cb_handler = xfrmi_rcv_cb,
  702. .err_handler = xfrmi4_err,
  703. .priority = 10,
  704. };
  705. static struct xfrm4_protocol xfrmi_ipcomp4_protocol __read_mostly = {
  706. .handler = xfrm4_rcv,
  707. .input_handler = xfrm_input,
  708. .cb_handler = xfrmi_rcv_cb,
  709. .err_handler = xfrmi4_err,
  710. .priority = 10,
  711. };
  712. static int __init xfrmi4_init(void)
  713. {
  714. int err;
  715. err = xfrm4_protocol_register(&xfrmi_esp4_protocol, IPPROTO_ESP);
  716. if (err < 0)
  717. goto xfrm_proto_esp_failed;
  718. err = xfrm4_protocol_register(&xfrmi_ah4_protocol, IPPROTO_AH);
  719. if (err < 0)
  720. goto xfrm_proto_ah_failed;
  721. err = xfrm4_protocol_register(&xfrmi_ipcomp4_protocol, IPPROTO_COMP);
  722. if (err < 0)
  723. goto xfrm_proto_comp_failed;
  724. return 0;
  725. xfrm_proto_comp_failed:
  726. xfrm4_protocol_deregister(&xfrmi_ah4_protocol, IPPROTO_AH);
  727. xfrm_proto_ah_failed:
  728. xfrm4_protocol_deregister(&xfrmi_esp4_protocol, IPPROTO_ESP);
  729. xfrm_proto_esp_failed:
  730. return err;
  731. }
  732. static void xfrmi4_fini(void)
  733. {
  734. xfrm4_protocol_deregister(&xfrmi_ipcomp4_protocol, IPPROTO_COMP);
  735. xfrm4_protocol_deregister(&xfrmi_ah4_protocol, IPPROTO_AH);
  736. xfrm4_protocol_deregister(&xfrmi_esp4_protocol, IPPROTO_ESP);
  737. }
  738. static int __init xfrmi6_init(void)
  739. {
  740. int err;
  741. err = xfrm6_protocol_register(&xfrmi_esp6_protocol, IPPROTO_ESP);
  742. if (err < 0)
  743. goto xfrm_proto_esp_failed;
  744. err = xfrm6_protocol_register(&xfrmi_ah6_protocol, IPPROTO_AH);
  745. if (err < 0)
  746. goto xfrm_proto_ah_failed;
  747. err = xfrm6_protocol_register(&xfrmi_ipcomp6_protocol, IPPROTO_COMP);
  748. if (err < 0)
  749. goto xfrm_proto_comp_failed;
  750. return 0;
  751. xfrm_proto_comp_failed:
  752. xfrm6_protocol_deregister(&xfrmi_ah6_protocol, IPPROTO_AH);
  753. xfrm_proto_ah_failed:
  754. xfrm6_protocol_deregister(&xfrmi_esp6_protocol, IPPROTO_ESP);
  755. xfrm_proto_esp_failed:
  756. return err;
  757. }
  758. static void xfrmi6_fini(void)
  759. {
  760. xfrm6_protocol_deregister(&xfrmi_ipcomp6_protocol, IPPROTO_COMP);
  761. xfrm6_protocol_deregister(&xfrmi_ah6_protocol, IPPROTO_AH);
  762. xfrm6_protocol_deregister(&xfrmi_esp6_protocol, IPPROTO_ESP);
  763. }
  764. static const struct xfrm_if_cb xfrm_if_cb = {
  765. .decode_session = xfrmi_decode_session,
  766. };
  767. static int __init xfrmi_init(void)
  768. {
  769. const char *msg;
  770. int err;
  771. pr_info("IPsec XFRM device driver\n");
  772. msg = "tunnel device";
  773. err = register_pernet_device(&xfrmi_net_ops);
  774. if (err < 0)
  775. goto pernet_dev_failed;
  776. msg = "xfrm4 protocols";
  777. err = xfrmi4_init();
  778. if (err < 0)
  779. goto xfrmi4_failed;
  780. msg = "xfrm6 protocols";
  781. err = xfrmi6_init();
  782. if (err < 0)
  783. goto xfrmi6_failed;
  784. msg = "netlink interface";
  785. err = rtnl_link_register(&xfrmi_link_ops);
  786. if (err < 0)
  787. goto rtnl_link_failed;
  788. xfrm_if_register_cb(&xfrm_if_cb);
  789. return err;
  790. rtnl_link_failed:
  791. xfrmi6_fini();
  792. xfrmi6_failed:
  793. xfrmi4_fini();
  794. xfrmi4_failed:
  795. unregister_pernet_device(&xfrmi_net_ops);
  796. pernet_dev_failed:
  797. pr_err("xfrmi init: failed to register %s\n", msg);
  798. return err;
  799. }
  800. static void __exit xfrmi_fini(void)
  801. {
  802. xfrm_if_unregister_cb();
  803. rtnl_link_unregister(&xfrmi_link_ops);
  804. xfrmi4_fini();
  805. xfrmi6_fini();
  806. unregister_pernet_device(&xfrmi_net_ops);
  807. }
  808. module_init(xfrmi_init);
  809. module_exit(xfrmi_fini);
  810. MODULE_LICENSE("GPL");
  811. MODULE_ALIAS_RTNL_LINK("xfrm");
  812. MODULE_ALIAS_NETDEV("xfrm0");
  813. MODULE_AUTHOR("Steffen Klassert");
  814. MODULE_DESCRIPTION("XFRM virtual interface");