xfrm_interface_core.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * XFRM virtual interface
  4. *
  5. * Copyright (C) 2018 secunet Security Networks AG
  6. *
  7. * Author:
  8. * Steffen Klassert <steffen.klassert@secunet.com>
  9. */
  10. #include <linux/module.h>
  11. #include <linux/capability.h>
  12. #include <linux/errno.h>
  13. #include <linux/types.h>
  14. #include <linux/sockios.h>
  15. #include <linux/icmp.h>
  16. #include <linux/if.h>
  17. #include <linux/in.h>
  18. #include <linux/ip.h>
  19. #include <linux/net.h>
  20. #include <linux/in6.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/if_link.h>
  23. #include <linux/if_arp.h>
  24. #include <linux/icmpv6.h>
  25. #include <linux/init.h>
  26. #include <linux/route.h>
  27. #include <linux/rtnetlink.h>
  28. #include <linux/netfilter_ipv6.h>
  29. #include <linux/slab.h>
  30. #include <linux/hash.h>
  31. #include <linux/uaccess.h>
  32. #include <linux/atomic.h>
  33. #include <net/gso.h>
  34. #include <net/icmp.h>
  35. #include <net/ip.h>
  36. #include <net/ipv6.h>
  37. #include <net/ip6_route.h>
  38. #include <net/ip_tunnels.h>
  39. #include <net/addrconf.h>
  40. #include <net/xfrm.h>
  41. #include <net/net_namespace.h>
  42. #include <net/dst_metadata.h>
  43. #include <net/netns/generic.h>
  44. #include <linux/etherdevice.h>
  45. static int xfrmi_dev_init(struct net_device *dev);
  46. static void xfrmi_dev_setup(struct net_device *dev);
  47. static struct rtnl_link_ops xfrmi_link_ops __read_mostly;
  48. static unsigned int xfrmi_net_id __read_mostly;
  49. static const struct net_device_ops xfrmi_netdev_ops;
  50. #define XFRMI_HASH_BITS 8
  51. #define XFRMI_HASH_SIZE BIT(XFRMI_HASH_BITS)
  52. struct xfrmi_net {
  53. /* lists for storing interfaces in use */
  54. struct xfrm_if __rcu *xfrmi[XFRMI_HASH_SIZE];
  55. struct xfrm_if __rcu *collect_md_xfrmi;
  56. };
  57. static const struct nla_policy xfrm_lwt_policy[LWT_XFRM_MAX + 1] = {
  58. [LWT_XFRM_IF_ID] = NLA_POLICY_MIN(NLA_U32, 1),
  59. [LWT_XFRM_LINK] = NLA_POLICY_MIN(NLA_U32, 1),
  60. };
  61. static void xfrmi_destroy_state(struct lwtunnel_state *lwt)
  62. {
  63. }
  64. static int xfrmi_build_state(struct net *net, struct nlattr *nla,
  65. unsigned int family, const void *cfg,
  66. struct lwtunnel_state **ts,
  67. struct netlink_ext_ack *extack)
  68. {
  69. struct nlattr *tb[LWT_XFRM_MAX + 1];
  70. struct lwtunnel_state *new_state;
  71. struct xfrm_md_info *info;
  72. int ret;
  73. ret = nla_parse_nested(tb, LWT_XFRM_MAX, nla, xfrm_lwt_policy, extack);
  74. if (ret < 0)
  75. return ret;
  76. if (!tb[LWT_XFRM_IF_ID]) {
  77. NL_SET_ERR_MSG(extack, "if_id must be set");
  78. return -EINVAL;
  79. }
  80. new_state = lwtunnel_state_alloc(sizeof(*info));
  81. if (!new_state) {
  82. NL_SET_ERR_MSG(extack, "failed to create encap info");
  83. return -ENOMEM;
  84. }
  85. new_state->type = LWTUNNEL_ENCAP_XFRM;
  86. info = lwt_xfrm_info(new_state);
  87. info->if_id = nla_get_u32(tb[LWT_XFRM_IF_ID]);
  88. if (tb[LWT_XFRM_LINK])
  89. info->link = nla_get_u32(tb[LWT_XFRM_LINK]);
  90. *ts = new_state;
  91. return 0;
  92. }
  93. static int xfrmi_fill_encap_info(struct sk_buff *skb,
  94. struct lwtunnel_state *lwt)
  95. {
  96. struct xfrm_md_info *info = lwt_xfrm_info(lwt);
  97. if (nla_put_u32(skb, LWT_XFRM_IF_ID, info->if_id) ||
  98. (info->link && nla_put_u32(skb, LWT_XFRM_LINK, info->link)))
  99. return -EMSGSIZE;
  100. return 0;
  101. }
  102. static int xfrmi_encap_nlsize(struct lwtunnel_state *lwtstate)
  103. {
  104. return nla_total_size(sizeof(u32)) + /* LWT_XFRM_IF_ID */
  105. nla_total_size(sizeof(u32)); /* LWT_XFRM_LINK */
  106. }
  107. static int xfrmi_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
  108. {
  109. struct xfrm_md_info *a_info = lwt_xfrm_info(a);
  110. struct xfrm_md_info *b_info = lwt_xfrm_info(b);
  111. return memcmp(a_info, b_info, sizeof(*a_info));
  112. }
  113. static const struct lwtunnel_encap_ops xfrmi_encap_ops = {
  114. .build_state = xfrmi_build_state,
  115. .destroy_state = xfrmi_destroy_state,
  116. .fill_encap = xfrmi_fill_encap_info,
  117. .get_encap_size = xfrmi_encap_nlsize,
  118. .cmp_encap = xfrmi_encap_cmp,
  119. .owner = THIS_MODULE,
  120. };
  121. #define for_each_xfrmi_rcu(start, xi) \
  122. for (xi = rcu_dereference(start); xi; xi = rcu_dereference(xi->next))
  123. static u32 xfrmi_hash(u32 if_id)
  124. {
  125. return hash_32(if_id, XFRMI_HASH_BITS);
  126. }
  127. static struct xfrm_if *xfrmi_lookup(struct net *net, struct xfrm_state *x)
  128. {
  129. struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
  130. struct xfrm_if *xi;
  131. for_each_xfrmi_rcu(xfrmn->xfrmi[xfrmi_hash(x->if_id)], xi) {
  132. if (x->if_id == xi->p.if_id &&
  133. (xi->dev->flags & IFF_UP))
  134. return xi;
  135. }
  136. xi = rcu_dereference(xfrmn->collect_md_xfrmi);
  137. if (xi && (xi->dev->flags & IFF_UP))
  138. return xi;
  139. return NULL;
  140. }
  141. static bool xfrmi_decode_session(struct sk_buff *skb,
  142. unsigned short family,
  143. struct xfrm_if_decode_session_result *res)
  144. {
  145. struct net_device *dev;
  146. struct xfrm_if *xi;
  147. int ifindex = 0;
  148. if (!secpath_exists(skb) || !skb->dev)
  149. return false;
  150. switch (family) {
  151. case AF_INET6:
  152. ifindex = inet6_sdif(skb);
  153. break;
  154. case AF_INET:
  155. ifindex = inet_sdif(skb);
  156. break;
  157. }
  158. if (ifindex) {
  159. struct net *net = xs_net(xfrm_input_state(skb));
  160. dev = dev_get_by_index_rcu(net, ifindex);
  161. } else {
  162. dev = skb->dev;
  163. }
  164. if (!dev || !(dev->flags & IFF_UP))
  165. return false;
  166. if (dev->netdev_ops != &xfrmi_netdev_ops)
  167. return false;
  168. xi = netdev_priv(dev);
  169. res->net = xi->net;
  170. if (xi->p.collect_md)
  171. res->if_id = xfrm_input_state(skb)->if_id;
  172. else
  173. res->if_id = xi->p.if_id;
  174. return true;
  175. }
  176. static void xfrmi_link(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
  177. {
  178. struct xfrm_if __rcu **xip = &xfrmn->xfrmi[xfrmi_hash(xi->p.if_id)];
  179. rcu_assign_pointer(xi->next , rtnl_dereference(*xip));
  180. rcu_assign_pointer(*xip, xi);
  181. }
  182. static void xfrmi_unlink(struct xfrmi_net *xfrmn, struct xfrm_if *xi)
  183. {
  184. struct xfrm_if __rcu **xip;
  185. struct xfrm_if *iter;
  186. for (xip = &xfrmn->xfrmi[xfrmi_hash(xi->p.if_id)];
  187. (iter = rtnl_dereference(*xip)) != NULL;
  188. xip = &iter->next) {
  189. if (xi == iter) {
  190. rcu_assign_pointer(*xip, xi->next);
  191. break;
  192. }
  193. }
  194. }
  195. static void xfrmi_dev_free(struct net_device *dev)
  196. {
  197. struct xfrm_if *xi = netdev_priv(dev);
  198. gro_cells_destroy(&xi->gro_cells);
  199. }
  200. static int xfrmi_create(struct net_device *dev)
  201. {
  202. struct xfrm_if *xi = netdev_priv(dev);
  203. struct net *net = dev_net(dev);
  204. struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
  205. int err;
  206. dev->rtnl_link_ops = &xfrmi_link_ops;
  207. err = register_netdevice(dev);
  208. if (err < 0)
  209. goto out;
  210. if (xi->p.collect_md)
  211. rcu_assign_pointer(xfrmn->collect_md_xfrmi, xi);
  212. else
  213. xfrmi_link(xfrmn, xi);
  214. return 0;
  215. out:
  216. return err;
  217. }
  218. static struct xfrm_if *xfrmi_locate(struct net *net, struct xfrm_if_parms *p)
  219. {
  220. struct xfrm_if __rcu **xip;
  221. struct xfrm_if *xi;
  222. struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
  223. for (xip = &xfrmn->xfrmi[xfrmi_hash(p->if_id)];
  224. (xi = rtnl_dereference(*xip)) != NULL;
  225. xip = &xi->next)
  226. if (xi->p.if_id == p->if_id)
  227. return xi;
  228. return NULL;
  229. }
  230. static void xfrmi_dev_uninit(struct net_device *dev)
  231. {
  232. struct xfrm_if *xi = netdev_priv(dev);
  233. struct xfrmi_net *xfrmn = net_generic(xi->net, xfrmi_net_id);
  234. if (xi->p.collect_md)
  235. RCU_INIT_POINTER(xfrmn->collect_md_xfrmi, NULL);
  236. else
  237. xfrmi_unlink(xfrmn, xi);
  238. }
  239. static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
  240. {
  241. skb_clear_tstamp(skb);
  242. skb->pkt_type = PACKET_HOST;
  243. skb->skb_iif = 0;
  244. skb->ignore_df = 0;
  245. skb_dst_drop(skb);
  246. nf_reset_ct(skb);
  247. nf_reset_trace(skb);
  248. if (!xnet)
  249. return;
  250. ipvs_reset(skb);
  251. secpath_reset(skb);
  252. skb_orphan(skb);
  253. skb->mark = 0;
  254. }
  255. static int xfrmi_input(struct sk_buff *skb, int nexthdr, __be32 spi,
  256. int encap_type, unsigned short family)
  257. {
  258. struct sec_path *sp;
  259. sp = skb_sec_path(skb);
  260. if (sp && (sp->len || sp->olen) &&
  261. !xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family))
  262. goto discard;
  263. XFRM_SPI_SKB_CB(skb)->family = family;
  264. if (family == AF_INET) {
  265. XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
  266. XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
  267. } else {
  268. XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
  269. XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
  270. }
  271. return xfrm_input(skb, nexthdr, spi, encap_type);
  272. discard:
  273. kfree_skb(skb);
  274. return 0;
  275. }
  276. static int xfrmi4_rcv(struct sk_buff *skb)
  277. {
  278. return xfrmi_input(skb, ip_hdr(skb)->protocol, 0, 0, AF_INET);
  279. }
  280. static int xfrmi6_rcv(struct sk_buff *skb)
  281. {
  282. return xfrmi_input(skb, skb_network_header(skb)[IP6CB(skb)->nhoff],
  283. 0, 0, AF_INET6);
  284. }
  285. static int xfrmi4_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
  286. {
  287. return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET);
  288. }
  289. static int xfrmi6_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
  290. {
  291. return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET6);
  292. }
  293. static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
  294. {
  295. const struct xfrm_mode *inner_mode;
  296. struct net_device *dev;
  297. struct xfrm_state *x;
  298. struct xfrm_if *xi;
  299. bool xnet;
  300. int link;
  301. if (err && !secpath_exists(skb))
  302. return 0;
  303. x = xfrm_input_state(skb);
  304. xi = xfrmi_lookup(xs_net(x), x);
  305. if (!xi)
  306. return 1;
  307. link = skb->dev->ifindex;
  308. dev = xi->dev;
  309. skb->dev = dev;
  310. if (err) {
  311. DEV_STATS_INC(dev, rx_errors);
  312. DEV_STATS_INC(dev, rx_dropped);
  313. return 0;
  314. }
  315. xnet = !net_eq(xi->net, dev_net(skb->dev));
  316. if (xnet) {
  317. inner_mode = &x->inner_mode;
  318. if (x->sel.family == AF_UNSPEC) {
  319. inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol);
  320. if (inner_mode == NULL) {
  321. XFRM_INC_STATS(dev_net(skb->dev),
  322. LINUX_MIB_XFRMINSTATEMODEERROR);
  323. return -EINVAL;
  324. }
  325. }
  326. if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb,
  327. inner_mode->family))
  328. return -EPERM;
  329. }
  330. xfrmi_scrub_packet(skb, xnet);
  331. if (xi->p.collect_md) {
  332. struct metadata_dst *md_dst;
  333. md_dst = metadata_dst_alloc(0, METADATA_XFRM, GFP_ATOMIC);
  334. if (!md_dst)
  335. return -ENOMEM;
  336. md_dst->u.xfrm_info.if_id = x->if_id;
  337. md_dst->u.xfrm_info.link = link;
  338. skb_dst_set(skb, (struct dst_entry *)md_dst);
  339. }
  340. dev_sw_netstats_rx_add(dev, skb->len);
  341. return 0;
  342. }
  343. static int
  344. xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
  345. {
  346. struct xfrm_if *xi = netdev_priv(dev);
  347. struct dst_entry *dst = skb_dst(skb);
  348. unsigned int length = skb->len;
  349. struct net_device *tdev;
  350. struct xfrm_state *x;
  351. int err = -1;
  352. u32 if_id;
  353. int mtu;
  354. if (xi->p.collect_md) {
  355. struct xfrm_md_info *md_info = skb_xfrm_md_info(skb);
  356. if (unlikely(!md_info))
  357. return -EINVAL;
  358. if_id = md_info->if_id;
  359. fl->flowi_oif = md_info->link;
  360. if (md_info->dst_orig) {
  361. struct dst_entry *tmp_dst = dst;
  362. dst = md_info->dst_orig;
  363. skb_dst_set(skb, dst);
  364. md_info->dst_orig = NULL;
  365. dst_release(tmp_dst);
  366. }
  367. } else {
  368. if_id = xi->p.if_id;
  369. }
  370. dst_hold(dst);
  371. dst = xfrm_lookup_with_ifid(xi->net, dst, fl, NULL, 0, if_id);
  372. if (IS_ERR(dst)) {
  373. err = PTR_ERR(dst);
  374. dst = NULL;
  375. goto tx_err_link_failure;
  376. }
  377. x = dst->xfrm;
  378. if (!x)
  379. goto tx_err_link_failure;
  380. if (x->if_id != if_id)
  381. goto tx_err_link_failure;
  382. tdev = dst->dev;
  383. if (tdev == dev) {
  384. DEV_STATS_INC(dev, collisions);
  385. net_warn_ratelimited("%s: Local routing loop detected!\n",
  386. dev->name);
  387. goto tx_err_dst_release;
  388. }
  389. mtu = dst_mtu(dst);
  390. if ((!skb_is_gso(skb) && skb->len > mtu) ||
  391. (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu))) {
  392. skb_dst_update_pmtu_no_confirm(skb, mtu);
  393. if (skb->protocol == htons(ETH_P_IPV6)) {
  394. if (mtu < IPV6_MIN_MTU)
  395. mtu = IPV6_MIN_MTU;
  396. if (skb->len > 1280)
  397. icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
  398. else
  399. goto xmit;
  400. } else {
  401. if (!(ip_hdr(skb)->frag_off & htons(IP_DF)))
  402. goto xmit;
  403. icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
  404. htonl(mtu));
  405. }
  406. dst_release(dst);
  407. return -EMSGSIZE;
  408. }
  409. xmit:
  410. xfrmi_scrub_packet(skb, !net_eq(xi->net, dev_net(dev)));
  411. skb_dst_set(skb, dst);
  412. skb->dev = tdev;
  413. err = dst_output(xi->net, skb->sk, skb);
  414. if (net_xmit_eval(err) == 0) {
  415. dev_sw_netstats_tx_add(dev, 1, length);
  416. } else {
  417. DEV_STATS_INC(dev, tx_errors);
  418. DEV_STATS_INC(dev, tx_aborted_errors);
  419. }
  420. return 0;
  421. tx_err_link_failure:
  422. DEV_STATS_INC(dev, tx_carrier_errors);
  423. dst_link_failure(skb);
  424. tx_err_dst_release:
  425. dst_release(dst);
  426. return err;
  427. }
  428. static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
  429. {
  430. struct xfrm_if *xi = netdev_priv(dev);
  431. struct dst_entry *dst = skb_dst(skb);
  432. struct flowi fl;
  433. int ret;
  434. memset(&fl, 0, sizeof(fl));
  435. switch (skb->protocol) {
  436. case htons(ETH_P_IPV6):
  437. memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
  438. xfrm_decode_session(dev_net(dev), skb, &fl, AF_INET6);
  439. if (!dst) {
  440. fl.u.ip6.flowi6_oif = dev->ifindex;
  441. fl.u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
  442. dst = ip6_route_output(dev_net(dev), NULL, &fl.u.ip6);
  443. if (dst->error) {
  444. dst_release(dst);
  445. DEV_STATS_INC(dev, tx_carrier_errors);
  446. goto tx_err;
  447. }
  448. skb_dst_set(skb, dst);
  449. }
  450. break;
  451. case htons(ETH_P_IP):
  452. memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
  453. xfrm_decode_session(dev_net(dev), skb, &fl, AF_INET);
  454. if (!dst) {
  455. struct rtable *rt;
  456. fl.u.ip4.flowi4_oif = dev->ifindex;
  457. fl.u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
  458. rt = __ip_route_output_key(dev_net(dev), &fl.u.ip4);
  459. if (IS_ERR(rt)) {
  460. DEV_STATS_INC(dev, tx_carrier_errors);
  461. goto tx_err;
  462. }
  463. skb_dst_set(skb, &rt->dst);
  464. }
  465. break;
  466. default:
  467. goto tx_err;
  468. }
  469. fl.flowi_oif = xi->p.link;
  470. ret = xfrmi_xmit2(skb, dev, &fl);
  471. if (ret < 0)
  472. goto tx_err;
  473. return NETDEV_TX_OK;
  474. tx_err:
  475. DEV_STATS_INC(dev, tx_errors);
  476. DEV_STATS_INC(dev, tx_dropped);
  477. kfree_skb(skb);
  478. return NETDEV_TX_OK;
  479. }
  480. static int xfrmi4_err(struct sk_buff *skb, u32 info)
  481. {
  482. const struct iphdr *iph = (const struct iphdr *)skb->data;
  483. struct net *net = dev_net(skb->dev);
  484. int protocol = iph->protocol;
  485. struct ip_comp_hdr *ipch;
  486. struct ip_esp_hdr *esph;
  487. struct ip_auth_hdr *ah ;
  488. struct xfrm_state *x;
  489. struct xfrm_if *xi;
  490. __be32 spi;
  491. switch (protocol) {
  492. case IPPROTO_ESP:
  493. esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
  494. spi = esph->spi;
  495. break;
  496. case IPPROTO_AH:
  497. ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
  498. spi = ah->spi;
  499. break;
  500. case IPPROTO_COMP:
  501. ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
  502. spi = htonl(ntohs(ipch->cpi));
  503. break;
  504. default:
  505. return 0;
  506. }
  507. switch (icmp_hdr(skb)->type) {
  508. case ICMP_DEST_UNREACH:
  509. if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
  510. return 0;
  511. break;
  512. case ICMP_REDIRECT:
  513. break;
  514. default:
  515. return 0;
  516. }
  517. x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
  518. spi, protocol, AF_INET);
  519. if (!x)
  520. return 0;
  521. xi = xfrmi_lookup(net, x);
  522. if (!xi) {
  523. xfrm_state_put(x);
  524. return -1;
  525. }
  526. if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
  527. ipv4_update_pmtu(skb, net, info, 0, protocol);
  528. else
  529. ipv4_redirect(skb, net, 0, protocol);
  530. xfrm_state_put(x);
  531. return 0;
  532. }
  533. static int xfrmi6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
  534. u8 type, u8 code, int offset, __be32 info)
  535. {
  536. const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
  537. struct net *net = dev_net(skb->dev);
  538. int protocol = iph->nexthdr;
  539. struct ip_comp_hdr *ipch;
  540. struct ip_esp_hdr *esph;
  541. struct ip_auth_hdr *ah;
  542. struct xfrm_state *x;
  543. struct xfrm_if *xi;
  544. __be32 spi;
  545. switch (protocol) {
  546. case IPPROTO_ESP:
  547. esph = (struct ip_esp_hdr *)(skb->data + offset);
  548. spi = esph->spi;
  549. break;
  550. case IPPROTO_AH:
  551. ah = (struct ip_auth_hdr *)(skb->data + offset);
  552. spi = ah->spi;
  553. break;
  554. case IPPROTO_COMP:
  555. ipch = (struct ip_comp_hdr *)(skb->data + offset);
  556. spi = htonl(ntohs(ipch->cpi));
  557. break;
  558. default:
  559. return 0;
  560. }
  561. if (type != ICMPV6_PKT_TOOBIG &&
  562. type != NDISC_REDIRECT)
  563. return 0;
  564. x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
  565. spi, protocol, AF_INET6);
  566. if (!x)
  567. return 0;
  568. xi = xfrmi_lookup(net, x);
  569. if (!xi) {
  570. xfrm_state_put(x);
  571. return -1;
  572. }
  573. if (type == NDISC_REDIRECT)
  574. ip6_redirect(skb, net, skb->dev->ifindex, 0,
  575. sock_net_uid(net, NULL));
  576. else
  577. ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
  578. xfrm_state_put(x);
  579. return 0;
  580. }
  581. static int xfrmi_change(struct xfrm_if *xi, const struct xfrm_if_parms *p)
  582. {
  583. if (xi->p.link != p->link)
  584. return -EINVAL;
  585. xi->p.if_id = p->if_id;
  586. return 0;
  587. }
  588. static int xfrmi_update(struct xfrm_if *xi, struct xfrm_if_parms *p)
  589. {
  590. struct net *net = xi->net;
  591. struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
  592. int err;
  593. xfrmi_unlink(xfrmn, xi);
  594. synchronize_net();
  595. err = xfrmi_change(xi, p);
  596. xfrmi_link(xfrmn, xi);
  597. netdev_state_change(xi->dev);
  598. return err;
  599. }
  600. static int xfrmi_get_iflink(const struct net_device *dev)
  601. {
  602. struct xfrm_if *xi = netdev_priv(dev);
  603. return READ_ONCE(xi->p.link);
  604. }
  605. static const struct net_device_ops xfrmi_netdev_ops = {
  606. .ndo_init = xfrmi_dev_init,
  607. .ndo_uninit = xfrmi_dev_uninit,
  608. .ndo_start_xmit = xfrmi_xmit,
  609. .ndo_get_stats64 = dev_get_tstats64,
  610. .ndo_get_iflink = xfrmi_get_iflink,
  611. };
  612. static void xfrmi_dev_setup(struct net_device *dev)
  613. {
  614. dev->netdev_ops = &xfrmi_netdev_ops;
  615. dev->header_ops = &ip_tunnel_header_ops;
  616. dev->type = ARPHRD_NONE;
  617. dev->mtu = ETH_DATA_LEN;
  618. dev->min_mtu = ETH_MIN_MTU;
  619. dev->max_mtu = IP_MAX_MTU;
  620. dev->flags = IFF_NOARP;
  621. dev->needs_free_netdev = true;
  622. dev->priv_destructor = xfrmi_dev_free;
  623. dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
  624. netif_keep_dst(dev);
  625. eth_broadcast_addr(dev->broadcast);
  626. }
  627. #define XFRMI_FEATURES (NETIF_F_SG | \
  628. NETIF_F_FRAGLIST | \
  629. NETIF_F_GSO_SOFTWARE | \
  630. NETIF_F_HW_CSUM)
  631. static int xfrmi_dev_init(struct net_device *dev)
  632. {
  633. struct xfrm_if *xi = netdev_priv(dev);
  634. struct net_device *phydev = __dev_get_by_index(xi->net, xi->p.link);
  635. int err;
  636. err = gro_cells_init(&xi->gro_cells, dev);
  637. if (err)
  638. return err;
  639. dev->lltx = true;
  640. dev->features |= XFRMI_FEATURES;
  641. dev->hw_features |= XFRMI_FEATURES;
  642. if (phydev) {
  643. dev->needed_headroom = phydev->needed_headroom;
  644. dev->needed_tailroom = phydev->needed_tailroom;
  645. if (is_zero_ether_addr(dev->dev_addr))
  646. eth_hw_addr_inherit(dev, phydev);
  647. if (is_zero_ether_addr(dev->broadcast))
  648. memcpy(dev->broadcast, phydev->broadcast,
  649. dev->addr_len);
  650. } else {
  651. eth_hw_addr_random(dev);
  652. eth_broadcast_addr(dev->broadcast);
  653. }
  654. return 0;
  655. }
  656. static int xfrmi_validate(struct nlattr *tb[], struct nlattr *data[],
  657. struct netlink_ext_ack *extack)
  658. {
  659. return 0;
  660. }
  661. static void xfrmi_netlink_parms(struct nlattr *data[],
  662. struct xfrm_if_parms *parms)
  663. {
  664. memset(parms, 0, sizeof(*parms));
  665. if (!data)
  666. return;
  667. if (data[IFLA_XFRM_LINK])
  668. parms->link = nla_get_u32(data[IFLA_XFRM_LINK]);
  669. if (data[IFLA_XFRM_IF_ID])
  670. parms->if_id = nla_get_u32(data[IFLA_XFRM_IF_ID]);
  671. if (data[IFLA_XFRM_COLLECT_METADATA])
  672. parms->collect_md = true;
  673. }
  674. static int xfrmi_newlink(struct net *src_net, struct net_device *dev,
  675. struct nlattr *tb[], struct nlattr *data[],
  676. struct netlink_ext_ack *extack)
  677. {
  678. struct net *net = dev_net(dev);
  679. struct xfrm_if_parms p = {};
  680. struct xfrm_if *xi;
  681. int err;
  682. xfrmi_netlink_parms(data, &p);
  683. if (p.collect_md) {
  684. struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
  685. if (p.link || p.if_id) {
  686. NL_SET_ERR_MSG(extack, "link and if_id must be zero");
  687. return -EINVAL;
  688. }
  689. if (rtnl_dereference(xfrmn->collect_md_xfrmi))
  690. return -EEXIST;
  691. } else {
  692. if (!p.if_id) {
  693. NL_SET_ERR_MSG(extack, "if_id must be non zero");
  694. return -EINVAL;
  695. }
  696. xi = xfrmi_locate(net, &p);
  697. if (xi)
  698. return -EEXIST;
  699. }
  700. xi = netdev_priv(dev);
  701. xi->p = p;
  702. xi->net = net;
  703. xi->dev = dev;
  704. err = xfrmi_create(dev);
  705. return err;
  706. }
  707. static void xfrmi_dellink(struct net_device *dev, struct list_head *head)
  708. {
  709. unregister_netdevice_queue(dev, head);
  710. }
  711. static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
  712. struct nlattr *data[],
  713. struct netlink_ext_ack *extack)
  714. {
  715. struct xfrm_if *xi = netdev_priv(dev);
  716. struct net *net = xi->net;
  717. struct xfrm_if_parms p = {};
  718. xfrmi_netlink_parms(data, &p);
  719. if (!p.if_id) {
  720. NL_SET_ERR_MSG(extack, "if_id must be non zero");
  721. return -EINVAL;
  722. }
  723. if (p.collect_md || xi->p.collect_md) {
  724. NL_SET_ERR_MSG(extack, "collect_md can't be changed");
  725. return -EINVAL;
  726. }
  727. xi = xfrmi_locate(net, &p);
  728. if (!xi) {
  729. xi = netdev_priv(dev);
  730. } else {
  731. if (xi->dev != dev)
  732. return -EEXIST;
  733. }
  734. return xfrmi_update(xi, &p);
  735. }
  736. static size_t xfrmi_get_size(const struct net_device *dev)
  737. {
  738. return
  739. /* IFLA_XFRM_LINK */
  740. nla_total_size(4) +
  741. /* IFLA_XFRM_IF_ID */
  742. nla_total_size(4) +
  743. /* IFLA_XFRM_COLLECT_METADATA */
  744. nla_total_size(0) +
  745. 0;
  746. }
  747. static int xfrmi_fill_info(struct sk_buff *skb, const struct net_device *dev)
  748. {
  749. struct xfrm_if *xi = netdev_priv(dev);
  750. struct xfrm_if_parms *parm = &xi->p;
  751. if (nla_put_u32(skb, IFLA_XFRM_LINK, parm->link) ||
  752. nla_put_u32(skb, IFLA_XFRM_IF_ID, parm->if_id) ||
  753. (xi->p.collect_md && nla_put_flag(skb, IFLA_XFRM_COLLECT_METADATA)))
  754. goto nla_put_failure;
  755. return 0;
  756. nla_put_failure:
  757. return -EMSGSIZE;
  758. }
  759. static struct net *xfrmi_get_link_net(const struct net_device *dev)
  760. {
  761. struct xfrm_if *xi = netdev_priv(dev);
  762. return READ_ONCE(xi->net);
  763. }
  764. static const struct nla_policy xfrmi_policy[IFLA_XFRM_MAX + 1] = {
  765. [IFLA_XFRM_UNSPEC] = { .strict_start_type = IFLA_XFRM_COLLECT_METADATA },
  766. [IFLA_XFRM_LINK] = { .type = NLA_U32 },
  767. [IFLA_XFRM_IF_ID] = { .type = NLA_U32 },
  768. [IFLA_XFRM_COLLECT_METADATA] = { .type = NLA_FLAG },
  769. };
  770. static struct rtnl_link_ops xfrmi_link_ops __read_mostly = {
  771. .kind = "xfrm",
  772. .maxtype = IFLA_XFRM_MAX,
  773. .policy = xfrmi_policy,
  774. .priv_size = sizeof(struct xfrm_if),
  775. .setup = xfrmi_dev_setup,
  776. .validate = xfrmi_validate,
  777. .newlink = xfrmi_newlink,
  778. .dellink = xfrmi_dellink,
  779. .changelink = xfrmi_changelink,
  780. .get_size = xfrmi_get_size,
  781. .fill_info = xfrmi_fill_info,
  782. .get_link_net = xfrmi_get_link_net,
  783. };
  784. static void __net_exit xfrmi_exit_batch_rtnl(struct list_head *net_exit_list,
  785. struct list_head *dev_to_kill)
  786. {
  787. struct net *net;
  788. ASSERT_RTNL();
  789. list_for_each_entry(net, net_exit_list, exit_list) {
  790. struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
  791. struct xfrm_if __rcu **xip;
  792. struct xfrm_if *xi;
  793. int i;
  794. for (i = 0; i < XFRMI_HASH_SIZE; i++) {
  795. for (xip = &xfrmn->xfrmi[i];
  796. (xi = rtnl_dereference(*xip)) != NULL;
  797. xip = &xi->next)
  798. unregister_netdevice_queue(xi->dev, dev_to_kill);
  799. }
  800. xi = rtnl_dereference(xfrmn->collect_md_xfrmi);
  801. if (xi)
  802. unregister_netdevice_queue(xi->dev, dev_to_kill);
  803. }
  804. }
  805. static struct pernet_operations xfrmi_net_ops = {
  806. .exit_batch_rtnl = xfrmi_exit_batch_rtnl,
  807. .id = &xfrmi_net_id,
  808. .size = sizeof(struct xfrmi_net),
  809. };
  810. static struct xfrm6_protocol xfrmi_esp6_protocol __read_mostly = {
  811. .handler = xfrmi6_rcv,
  812. .input_handler = xfrmi6_input,
  813. .cb_handler = xfrmi_rcv_cb,
  814. .err_handler = xfrmi6_err,
  815. .priority = 10,
  816. };
  817. static struct xfrm6_protocol xfrmi_ah6_protocol __read_mostly = {
  818. .handler = xfrm6_rcv,
  819. .input_handler = xfrm_input,
  820. .cb_handler = xfrmi_rcv_cb,
  821. .err_handler = xfrmi6_err,
  822. .priority = 10,
  823. };
  824. static struct xfrm6_protocol xfrmi_ipcomp6_protocol __read_mostly = {
  825. .handler = xfrm6_rcv,
  826. .input_handler = xfrm_input,
  827. .cb_handler = xfrmi_rcv_cb,
  828. .err_handler = xfrmi6_err,
  829. .priority = 10,
  830. };
  831. #if IS_REACHABLE(CONFIG_INET6_XFRM_TUNNEL)
  832. static int xfrmi6_rcv_tunnel(struct sk_buff *skb)
  833. {
  834. const xfrm_address_t *saddr;
  835. __be32 spi;
  836. saddr = (const xfrm_address_t *)&ipv6_hdr(skb)->saddr;
  837. spi = xfrm6_tunnel_spi_lookup(dev_net(skb->dev), saddr);
  838. return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi, NULL);
  839. }
  840. static struct xfrm6_tunnel xfrmi_ipv6_handler __read_mostly = {
  841. .handler = xfrmi6_rcv_tunnel,
  842. .cb_handler = xfrmi_rcv_cb,
  843. .err_handler = xfrmi6_err,
  844. .priority = 2,
  845. };
  846. static struct xfrm6_tunnel xfrmi_ip6ip_handler __read_mostly = {
  847. .handler = xfrmi6_rcv_tunnel,
  848. .cb_handler = xfrmi_rcv_cb,
  849. .err_handler = xfrmi6_err,
  850. .priority = 2,
  851. };
  852. #endif
  853. static struct xfrm4_protocol xfrmi_esp4_protocol __read_mostly = {
  854. .handler = xfrmi4_rcv,
  855. .input_handler = xfrmi4_input,
  856. .cb_handler = xfrmi_rcv_cb,
  857. .err_handler = xfrmi4_err,
  858. .priority = 10,
  859. };
  860. static struct xfrm4_protocol xfrmi_ah4_protocol __read_mostly = {
  861. .handler = xfrm4_rcv,
  862. .input_handler = xfrm_input,
  863. .cb_handler = xfrmi_rcv_cb,
  864. .err_handler = xfrmi4_err,
  865. .priority = 10,
  866. };
  867. static struct xfrm4_protocol xfrmi_ipcomp4_protocol __read_mostly = {
  868. .handler = xfrm4_rcv,
  869. .input_handler = xfrm_input,
  870. .cb_handler = xfrmi_rcv_cb,
  871. .err_handler = xfrmi4_err,
  872. .priority = 10,
  873. };
  874. #if IS_REACHABLE(CONFIG_INET_XFRM_TUNNEL)
  875. static int xfrmi4_rcv_tunnel(struct sk_buff *skb)
  876. {
  877. return xfrm4_rcv_spi(skb, IPPROTO_IPIP, ip_hdr(skb)->saddr);
  878. }
  879. static struct xfrm_tunnel xfrmi_ipip_handler __read_mostly = {
  880. .handler = xfrmi4_rcv_tunnel,
  881. .cb_handler = xfrmi_rcv_cb,
  882. .err_handler = xfrmi4_err,
  883. .priority = 3,
  884. };
  885. static struct xfrm_tunnel xfrmi_ipip6_handler __read_mostly = {
  886. .handler = xfrmi4_rcv_tunnel,
  887. .cb_handler = xfrmi_rcv_cb,
  888. .err_handler = xfrmi4_err,
  889. .priority = 2,
  890. };
  891. #endif
  892. static int __init xfrmi4_init(void)
  893. {
  894. int err;
  895. err = xfrm4_protocol_register(&xfrmi_esp4_protocol, IPPROTO_ESP);
  896. if (err < 0)
  897. goto xfrm_proto_esp_failed;
  898. err = xfrm4_protocol_register(&xfrmi_ah4_protocol, IPPROTO_AH);
  899. if (err < 0)
  900. goto xfrm_proto_ah_failed;
  901. err = xfrm4_protocol_register(&xfrmi_ipcomp4_protocol, IPPROTO_COMP);
  902. if (err < 0)
  903. goto xfrm_proto_comp_failed;
  904. #if IS_REACHABLE(CONFIG_INET_XFRM_TUNNEL)
  905. err = xfrm4_tunnel_register(&xfrmi_ipip_handler, AF_INET);
  906. if (err < 0)
  907. goto xfrm_tunnel_ipip_failed;
  908. err = xfrm4_tunnel_register(&xfrmi_ipip6_handler, AF_INET6);
  909. if (err < 0)
  910. goto xfrm_tunnel_ipip6_failed;
  911. #endif
  912. return 0;
  913. #if IS_REACHABLE(CONFIG_INET_XFRM_TUNNEL)
  914. xfrm_tunnel_ipip6_failed:
  915. xfrm4_tunnel_deregister(&xfrmi_ipip_handler, AF_INET);
  916. xfrm_tunnel_ipip_failed:
  917. xfrm4_protocol_deregister(&xfrmi_ipcomp4_protocol, IPPROTO_COMP);
  918. #endif
  919. xfrm_proto_comp_failed:
  920. xfrm4_protocol_deregister(&xfrmi_ah4_protocol, IPPROTO_AH);
  921. xfrm_proto_ah_failed:
  922. xfrm4_protocol_deregister(&xfrmi_esp4_protocol, IPPROTO_ESP);
  923. xfrm_proto_esp_failed:
  924. return err;
  925. }
  926. static void xfrmi4_fini(void)
  927. {
  928. #if IS_REACHABLE(CONFIG_INET_XFRM_TUNNEL)
  929. xfrm4_tunnel_deregister(&xfrmi_ipip6_handler, AF_INET6);
  930. xfrm4_tunnel_deregister(&xfrmi_ipip_handler, AF_INET);
  931. #endif
  932. xfrm4_protocol_deregister(&xfrmi_ipcomp4_protocol, IPPROTO_COMP);
  933. xfrm4_protocol_deregister(&xfrmi_ah4_protocol, IPPROTO_AH);
  934. xfrm4_protocol_deregister(&xfrmi_esp4_protocol, IPPROTO_ESP);
  935. }
  936. static int __init xfrmi6_init(void)
  937. {
  938. int err;
  939. err = xfrm6_protocol_register(&xfrmi_esp6_protocol, IPPROTO_ESP);
  940. if (err < 0)
  941. goto xfrm_proto_esp_failed;
  942. err = xfrm6_protocol_register(&xfrmi_ah6_protocol, IPPROTO_AH);
  943. if (err < 0)
  944. goto xfrm_proto_ah_failed;
  945. err = xfrm6_protocol_register(&xfrmi_ipcomp6_protocol, IPPROTO_COMP);
  946. if (err < 0)
  947. goto xfrm_proto_comp_failed;
  948. #if IS_REACHABLE(CONFIG_INET6_XFRM_TUNNEL)
  949. err = xfrm6_tunnel_register(&xfrmi_ipv6_handler, AF_INET6);
  950. if (err < 0)
  951. goto xfrm_tunnel_ipv6_failed;
  952. err = xfrm6_tunnel_register(&xfrmi_ip6ip_handler, AF_INET);
  953. if (err < 0)
  954. goto xfrm_tunnel_ip6ip_failed;
  955. #endif
  956. return 0;
  957. #if IS_REACHABLE(CONFIG_INET6_XFRM_TUNNEL)
  958. xfrm_tunnel_ip6ip_failed:
  959. xfrm6_tunnel_deregister(&xfrmi_ipv6_handler, AF_INET6);
  960. xfrm_tunnel_ipv6_failed:
  961. xfrm6_protocol_deregister(&xfrmi_ipcomp6_protocol, IPPROTO_COMP);
  962. #endif
  963. xfrm_proto_comp_failed:
  964. xfrm6_protocol_deregister(&xfrmi_ah6_protocol, IPPROTO_AH);
  965. xfrm_proto_ah_failed:
  966. xfrm6_protocol_deregister(&xfrmi_esp6_protocol, IPPROTO_ESP);
  967. xfrm_proto_esp_failed:
  968. return err;
  969. }
  970. static void xfrmi6_fini(void)
  971. {
  972. #if IS_REACHABLE(CONFIG_INET6_XFRM_TUNNEL)
  973. xfrm6_tunnel_deregister(&xfrmi_ip6ip_handler, AF_INET);
  974. xfrm6_tunnel_deregister(&xfrmi_ipv6_handler, AF_INET6);
  975. #endif
  976. xfrm6_protocol_deregister(&xfrmi_ipcomp6_protocol, IPPROTO_COMP);
  977. xfrm6_protocol_deregister(&xfrmi_ah6_protocol, IPPROTO_AH);
  978. xfrm6_protocol_deregister(&xfrmi_esp6_protocol, IPPROTO_ESP);
  979. }
  980. static const struct xfrm_if_cb xfrm_if_cb = {
  981. .decode_session = xfrmi_decode_session,
  982. };
  983. static int __init xfrmi_init(void)
  984. {
  985. const char *msg;
  986. int err;
  987. pr_info("IPsec XFRM device driver\n");
  988. msg = "tunnel device";
  989. err = register_pernet_device(&xfrmi_net_ops);
  990. if (err < 0)
  991. goto pernet_dev_failed;
  992. msg = "xfrm4 protocols";
  993. err = xfrmi4_init();
  994. if (err < 0)
  995. goto xfrmi4_failed;
  996. msg = "xfrm6 protocols";
  997. err = xfrmi6_init();
  998. if (err < 0)
  999. goto xfrmi6_failed;
  1000. msg = "netlink interface";
  1001. err = rtnl_link_register(&xfrmi_link_ops);
  1002. if (err < 0)
  1003. goto rtnl_link_failed;
  1004. err = register_xfrm_interface_bpf();
  1005. if (err < 0)
  1006. goto kfunc_failed;
  1007. lwtunnel_encap_add_ops(&xfrmi_encap_ops, LWTUNNEL_ENCAP_XFRM);
  1008. xfrm_if_register_cb(&xfrm_if_cb);
  1009. return err;
  1010. kfunc_failed:
  1011. rtnl_link_unregister(&xfrmi_link_ops);
  1012. rtnl_link_failed:
  1013. xfrmi6_fini();
  1014. xfrmi6_failed:
  1015. xfrmi4_fini();
  1016. xfrmi4_failed:
  1017. unregister_pernet_device(&xfrmi_net_ops);
  1018. pernet_dev_failed:
  1019. pr_err("xfrmi init: failed to register %s\n", msg);
  1020. return err;
  1021. }
  1022. static void __exit xfrmi_fini(void)
  1023. {
  1024. xfrm_if_unregister_cb();
  1025. lwtunnel_encap_del_ops(&xfrmi_encap_ops, LWTUNNEL_ENCAP_XFRM);
  1026. rtnl_link_unregister(&xfrmi_link_ops);
  1027. xfrmi4_fini();
  1028. xfrmi6_fini();
  1029. unregister_pernet_device(&xfrmi_net_ops);
  1030. }
  1031. module_init(xfrmi_init);
  1032. module_exit(xfrmi_fini);
  1033. MODULE_LICENSE("GPL");
  1034. MODULE_ALIAS_RTNL_LINK("xfrm");
  1035. MODULE_ALIAS_NETDEV("xfrm0");
  1036. MODULE_AUTHOR("Steffen Klassert");
  1037. MODULE_DESCRIPTION("XFRM virtual interface");