xfrm_input.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * xfrm_input.c
  4. *
  5. * Changes:
  6. * YOSHIFUJI Hideaki @USAGI
  7. * Split up af-specific portion
  8. *
  9. */
  10. #include <linux/bottom_half.h>
  11. #include <linux/cache.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/slab.h>
  14. #include <linux/module.h>
  15. #include <linux/netdevice.h>
  16. #include <linux/percpu.h>
  17. #include <net/dst.h>
  18. #include <net/ip.h>
  19. #include <net/xfrm.h>
  20. #include <net/ip_tunnels.h>
  21. #include <net/ip6_tunnel.h>
  22. #include <net/dst_metadata.h>
  23. #include <net/hotdata.h>
  24. #include "xfrm_inout.h"
  25. struct xfrm_trans_tasklet {
  26. struct work_struct work;
  27. spinlock_t queue_lock;
  28. struct sk_buff_head queue;
  29. };
  30. struct xfrm_trans_cb {
  31. union {
  32. struct inet_skb_parm h4;
  33. #if IS_ENABLED(CONFIG_IPV6)
  34. struct inet6_skb_parm h6;
  35. #endif
  36. } header;
  37. int (*finish)(struct net *net, struct sock *sk, struct sk_buff *skb);
  38. struct net *net;
  39. };
  40. #define XFRM_TRANS_SKB_CB(__skb) ((struct xfrm_trans_cb *)&((__skb)->cb[0]))
  41. static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);
  42. static struct xfrm_input_afinfo const __rcu *xfrm_input_afinfo[2][AF_INET6 + 1];
  43. static struct gro_cells gro_cells;
  44. static struct net_device xfrm_napi_dev;
  45. static DEFINE_PER_CPU(struct xfrm_trans_tasklet, xfrm_trans_tasklet);
  46. int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo)
  47. {
  48. int err = 0;
  49. if (WARN_ON(afinfo->family > AF_INET6))
  50. return -EAFNOSUPPORT;
  51. spin_lock_bh(&xfrm_input_afinfo_lock);
  52. if (unlikely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family]))
  53. err = -EEXIST;
  54. else
  55. rcu_assign_pointer(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family], afinfo);
  56. spin_unlock_bh(&xfrm_input_afinfo_lock);
  57. return err;
  58. }
  59. EXPORT_SYMBOL(xfrm_input_register_afinfo);
  60. int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo)
  61. {
  62. int err = 0;
  63. spin_lock_bh(&xfrm_input_afinfo_lock);
  64. if (likely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family])) {
  65. if (unlikely(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family] != afinfo))
  66. err = -EINVAL;
  67. else
  68. RCU_INIT_POINTER(xfrm_input_afinfo[afinfo->is_ipip][afinfo->family], NULL);
  69. }
  70. spin_unlock_bh(&xfrm_input_afinfo_lock);
  71. synchronize_rcu();
  72. return err;
  73. }
  74. EXPORT_SYMBOL(xfrm_input_unregister_afinfo);
  75. static const struct xfrm_input_afinfo *xfrm_input_get_afinfo(u8 family, bool is_ipip)
  76. {
  77. const struct xfrm_input_afinfo *afinfo;
  78. if (WARN_ON_ONCE(family > AF_INET6))
  79. return NULL;
  80. rcu_read_lock();
  81. afinfo = rcu_dereference(xfrm_input_afinfo[is_ipip][family]);
  82. if (unlikely(!afinfo))
  83. rcu_read_unlock();
  84. return afinfo;
  85. }
  86. static int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family, u8 protocol,
  87. int err)
  88. {
  89. bool is_ipip = (protocol == IPPROTO_IPIP || protocol == IPPROTO_IPV6);
  90. const struct xfrm_input_afinfo *afinfo;
  91. int ret;
  92. afinfo = xfrm_input_get_afinfo(family, is_ipip);
  93. if (!afinfo)
  94. return -EAFNOSUPPORT;
  95. ret = afinfo->callback(skb, protocol, err);
  96. rcu_read_unlock();
  97. return ret;
  98. }
  99. struct sec_path *secpath_set(struct sk_buff *skb)
  100. {
  101. struct sec_path *sp, *tmp = skb_ext_find(skb, SKB_EXT_SEC_PATH);
  102. sp = skb_ext_add(skb, SKB_EXT_SEC_PATH);
  103. if (!sp)
  104. return NULL;
  105. if (tmp) /* reused existing one (was COW'd if needed) */
  106. return sp;
  107. /* allocated new secpath */
  108. memset(sp->ovec, 0, sizeof(sp->ovec));
  109. sp->olen = 0;
  110. sp->len = 0;
  111. sp->verified_cnt = 0;
  112. return sp;
  113. }
  114. EXPORT_SYMBOL(secpath_set);
  115. /* Fetch spi and seq from ipsec header */
  116. int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq)
  117. {
  118. int offset, offset_seq;
  119. int hlen;
  120. switch (nexthdr) {
  121. case IPPROTO_AH:
  122. hlen = sizeof(struct ip_auth_hdr);
  123. offset = offsetof(struct ip_auth_hdr, spi);
  124. offset_seq = offsetof(struct ip_auth_hdr, seq_no);
  125. break;
  126. case IPPROTO_ESP:
  127. hlen = sizeof(struct ip_esp_hdr);
  128. offset = offsetof(struct ip_esp_hdr, spi);
  129. offset_seq = offsetof(struct ip_esp_hdr, seq_no);
  130. break;
  131. case IPPROTO_COMP:
  132. if (!pskb_may_pull(skb, sizeof(struct ip_comp_hdr)))
  133. return -EINVAL;
  134. *spi = htonl(ntohs(*(__be16 *)(skb_transport_header(skb) + 2)));
  135. *seq = 0;
  136. return 0;
  137. default:
  138. return 1;
  139. }
  140. if (!pskb_may_pull(skb, hlen))
  141. return -EINVAL;
  142. *spi = *(__be32 *)(skb_transport_header(skb) + offset);
  143. *seq = *(__be32 *)(skb_transport_header(skb) + offset_seq);
  144. return 0;
  145. }
  146. EXPORT_SYMBOL(xfrm_parse_spi);
  147. static int xfrm4_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb)
  148. {
  149. struct iphdr *iph;
  150. int optlen = 0;
  151. int err = -EINVAL;
  152. skb->protocol = htons(ETH_P_IP);
  153. if (unlikely(XFRM_MODE_SKB_CB(skb)->protocol == IPPROTO_BEETPH)) {
  154. struct ip_beet_phdr *ph;
  155. int phlen;
  156. if (!pskb_may_pull(skb, sizeof(*ph)))
  157. goto out;
  158. ph = (struct ip_beet_phdr *)skb->data;
  159. phlen = sizeof(*ph) + ph->padlen;
  160. optlen = ph->hdrlen * 8 + (IPV4_BEET_PHMAXLEN - phlen);
  161. if (optlen < 0 || optlen & 3 || optlen > 250)
  162. goto out;
  163. XFRM_MODE_SKB_CB(skb)->protocol = ph->nexthdr;
  164. if (!pskb_may_pull(skb, phlen))
  165. goto out;
  166. __skb_pull(skb, phlen);
  167. }
  168. skb_push(skb, sizeof(*iph));
  169. skb_reset_network_header(skb);
  170. skb_mac_header_rebuild(skb);
  171. xfrm4_beet_make_header(skb);
  172. iph = ip_hdr(skb);
  173. iph->ihl += optlen / 4;
  174. iph->tot_len = htons(skb->len);
  175. iph->daddr = x->sel.daddr.a4;
  176. iph->saddr = x->sel.saddr.a4;
  177. iph->check = 0;
  178. iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl);
  179. err = 0;
  180. out:
  181. return err;
  182. }
  183. static void ipip_ecn_decapsulate(struct sk_buff *skb)
  184. {
  185. struct iphdr *inner_iph = ipip_hdr(skb);
  186. if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
  187. IP_ECN_set_ce(inner_iph);
  188. }
  189. static int xfrm4_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb)
  190. {
  191. int err = -EINVAL;
  192. skb->protocol = htons(ETH_P_IP);
  193. if (!pskb_may_pull(skb, sizeof(struct iphdr)))
  194. goto out;
  195. err = skb_unclone(skb, GFP_ATOMIC);
  196. if (err)
  197. goto out;
  198. if (x->props.flags & XFRM_STATE_DECAP_DSCP)
  199. ipv4_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipip_hdr(skb));
  200. if (!(x->props.flags & XFRM_STATE_NOECN))
  201. ipip_ecn_decapsulate(skb);
  202. skb_reset_network_header(skb);
  203. skb_mac_header_rebuild(skb);
  204. if (skb->mac_len)
  205. eth_hdr(skb)->h_proto = skb->protocol;
  206. err = 0;
  207. out:
  208. return err;
  209. }
  210. static void ipip6_ecn_decapsulate(struct sk_buff *skb)
  211. {
  212. struct ipv6hdr *inner_iph = ipipv6_hdr(skb);
  213. if (INET_ECN_is_ce(XFRM_MODE_SKB_CB(skb)->tos))
  214. IP6_ECN_set_ce(skb, inner_iph);
  215. }
  216. static int xfrm6_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb)
  217. {
  218. int err = -EINVAL;
  219. skb->protocol = htons(ETH_P_IPV6);
  220. if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
  221. goto out;
  222. err = skb_unclone(skb, GFP_ATOMIC);
  223. if (err)
  224. goto out;
  225. if (x->props.flags & XFRM_STATE_DECAP_DSCP)
  226. ipv6_copy_dscp(XFRM_MODE_SKB_CB(skb)->tos, ipipv6_hdr(skb));
  227. if (!(x->props.flags & XFRM_STATE_NOECN))
  228. ipip6_ecn_decapsulate(skb);
  229. skb_reset_network_header(skb);
  230. skb_mac_header_rebuild(skb);
  231. if (skb->mac_len)
  232. eth_hdr(skb)->h_proto = skb->protocol;
  233. err = 0;
  234. out:
  235. return err;
  236. }
  237. static int xfrm6_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb)
  238. {
  239. struct ipv6hdr *ip6h;
  240. int size = sizeof(struct ipv6hdr);
  241. int err;
  242. skb->protocol = htons(ETH_P_IPV6);
  243. err = skb_cow_head(skb, size + skb->mac_len);
  244. if (err)
  245. goto out;
  246. __skb_push(skb, size);
  247. skb_reset_network_header(skb);
  248. skb_mac_header_rebuild(skb);
  249. xfrm6_beet_make_header(skb);
  250. ip6h = ipv6_hdr(skb);
  251. ip6h->payload_len = htons(skb->len - size);
  252. ip6h->daddr = x->sel.daddr.in6;
  253. ip6h->saddr = x->sel.saddr.in6;
  254. err = 0;
  255. out:
  256. return err;
  257. }
  258. /* Remove encapsulation header.
  259. *
  260. * The IP header will be moved over the top of the encapsulation
  261. * header.
  262. *
  263. * On entry, the transport header shall point to where the IP header
  264. * should be and the network header shall be set to where the IP
  265. * header currently is. skb->data shall point to the start of the
  266. * payload.
  267. */
  268. static int
  269. xfrm_inner_mode_encap_remove(struct xfrm_state *x,
  270. struct sk_buff *skb)
  271. {
  272. switch (x->props.mode) {
  273. case XFRM_MODE_BEET:
  274. switch (x->sel.family) {
  275. case AF_INET:
  276. return xfrm4_remove_beet_encap(x, skb);
  277. case AF_INET6:
  278. return xfrm6_remove_beet_encap(x, skb);
  279. }
  280. break;
  281. case XFRM_MODE_TUNNEL:
  282. switch (XFRM_MODE_SKB_CB(skb)->protocol) {
  283. case IPPROTO_IPIP:
  284. return xfrm4_remove_tunnel_encap(x, skb);
  285. case IPPROTO_IPV6:
  286. return xfrm6_remove_tunnel_encap(x, skb);
  287. break;
  288. }
  289. return -EINVAL;
  290. }
  291. WARN_ON_ONCE(1);
  292. return -EOPNOTSUPP;
  293. }
  294. static int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
  295. {
  296. switch (x->props.family) {
  297. case AF_INET:
  298. xfrm4_extract_header(skb);
  299. break;
  300. case AF_INET6:
  301. xfrm6_extract_header(skb);
  302. break;
  303. default:
  304. WARN_ON_ONCE(1);
  305. return -EAFNOSUPPORT;
  306. }
  307. return xfrm_inner_mode_encap_remove(x, skb);
  308. }
  309. /* Remove encapsulation header.
  310. *
  311. * The IP header will be moved over the top of the encapsulation header.
  312. *
  313. * On entry, skb_transport_header() shall point to where the IP header
  314. * should be and skb_network_header() shall be set to where the IP header
  315. * currently is. skb->data shall point to the start of the payload.
  316. */
  317. static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb)
  318. {
  319. struct xfrm_offload *xo = xfrm_offload(skb);
  320. int ihl = skb->data - skb_transport_header(skb);
  321. if (skb->transport_header != skb->network_header) {
  322. memmove(skb_transport_header(skb),
  323. skb_network_header(skb), ihl);
  324. if (xo)
  325. xo->orig_mac_len =
  326. skb_mac_header_was_set(skb) ? skb_mac_header_len(skb) : 0;
  327. skb->network_header = skb->transport_header;
  328. }
  329. ip_hdr(skb)->tot_len = htons(skb->len + ihl);
  330. skb_reset_transport_header(skb);
  331. return 0;
  332. }
  333. static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb)
  334. {
  335. #if IS_ENABLED(CONFIG_IPV6)
  336. struct xfrm_offload *xo = xfrm_offload(skb);
  337. int ihl = skb->data - skb_transport_header(skb);
  338. if (skb->transport_header != skb->network_header) {
  339. memmove(skb_transport_header(skb),
  340. skb_network_header(skb), ihl);
  341. if (xo)
  342. xo->orig_mac_len =
  343. skb_mac_header_was_set(skb) ? skb_mac_header_len(skb) : 0;
  344. skb->network_header = skb->transport_header;
  345. }
  346. ipv6_hdr(skb)->payload_len = htons(skb->len + ihl -
  347. sizeof(struct ipv6hdr));
  348. skb_reset_transport_header(skb);
  349. return 0;
  350. #else
  351. WARN_ON_ONCE(1);
  352. return -EAFNOSUPPORT;
  353. #endif
  354. }
  355. static int xfrm_inner_mode_input(struct xfrm_state *x,
  356. struct sk_buff *skb)
  357. {
  358. switch (x->props.mode) {
  359. case XFRM_MODE_BEET:
  360. case XFRM_MODE_TUNNEL:
  361. return xfrm_prepare_input(x, skb);
  362. case XFRM_MODE_TRANSPORT:
  363. if (x->props.family == AF_INET)
  364. return xfrm4_transport_input(x, skb);
  365. if (x->props.family == AF_INET6)
  366. return xfrm6_transport_input(x, skb);
  367. break;
  368. case XFRM_MODE_ROUTEOPTIMIZATION:
  369. WARN_ON_ONCE(1);
  370. break;
  371. default:
  372. WARN_ON_ONCE(1);
  373. break;
  374. }
  375. return -EOPNOTSUPP;
  376. }
  377. int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
  378. {
  379. const struct xfrm_state_afinfo *afinfo;
  380. struct net *net = dev_net(skb->dev);
  381. int err;
  382. __be32 seq;
  383. __be32 seq_hi;
  384. struct xfrm_state *x = NULL;
  385. xfrm_address_t *daddr;
  386. u32 mark = skb->mark;
  387. unsigned int family = AF_UNSPEC;
  388. int decaps = 0;
  389. int async = 0;
  390. bool xfrm_gro = false;
  391. bool crypto_done = false;
  392. struct xfrm_offload *xo = xfrm_offload(skb);
  393. struct sec_path *sp;
  394. if (encap_type < 0 || (xo && (xo->flags & XFRM_GRO || encap_type == 0 ||
  395. encap_type == UDP_ENCAP_ESPINUDP))) {
  396. x = xfrm_input_state(skb);
  397. if (unlikely(x->km.state != XFRM_STATE_VALID)) {
  398. if (x->km.state == XFRM_STATE_ACQ)
  399. XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
  400. else
  401. XFRM_INC_STATS(net,
  402. LINUX_MIB_XFRMINSTATEINVALID);
  403. if (encap_type == -1)
  404. dev_put(skb->dev);
  405. goto drop;
  406. }
  407. family = x->props.family;
  408. /* An encap_type of -1 indicates async resumption. */
  409. if (encap_type == -1) {
  410. async = 1;
  411. seq = XFRM_SKB_CB(skb)->seq.input.low;
  412. goto resume;
  413. }
  414. /* GRO call */
  415. seq = XFRM_SPI_SKB_CB(skb)->seq;
  416. if (xo && (xo->flags & CRYPTO_DONE)) {
  417. crypto_done = true;
  418. family = XFRM_SPI_SKB_CB(skb)->family;
  419. if (!(xo->status & CRYPTO_SUCCESS)) {
  420. if (xo->status &
  421. (CRYPTO_TRANSPORT_AH_AUTH_FAILED |
  422. CRYPTO_TRANSPORT_ESP_AUTH_FAILED |
  423. CRYPTO_TUNNEL_AH_AUTH_FAILED |
  424. CRYPTO_TUNNEL_ESP_AUTH_FAILED)) {
  425. xfrm_audit_state_icvfail(x, skb,
  426. x->type->proto);
  427. x->stats.integrity_failed++;
  428. XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
  429. goto drop;
  430. }
  431. if (xo->status & CRYPTO_INVALID_PROTOCOL) {
  432. XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
  433. goto drop;
  434. }
  435. XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
  436. goto drop;
  437. }
  438. if (xfrm_parse_spi(skb, nexthdr, &spi, &seq)) {
  439. XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
  440. goto drop;
  441. }
  442. }
  443. goto lock;
  444. }
  445. family = XFRM_SPI_SKB_CB(skb)->family;
  446. /* if tunnel is present override skb->mark value with tunnel i_key */
  447. switch (family) {
  448. case AF_INET:
  449. if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
  450. mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
  451. break;
  452. case AF_INET6:
  453. if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
  454. mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
  455. break;
  456. }
  457. sp = secpath_set(skb);
  458. if (!sp) {
  459. XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
  460. goto drop;
  461. }
  462. seq = 0;
  463. if (!spi && xfrm_parse_spi(skb, nexthdr, &spi, &seq)) {
  464. secpath_reset(skb);
  465. XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
  466. goto drop;
  467. }
  468. daddr = (xfrm_address_t *)(skb_network_header(skb) +
  469. XFRM_SPI_SKB_CB(skb)->daddroff);
  470. do {
  471. sp = skb_sec_path(skb);
  472. if (sp->len == XFRM_MAX_DEPTH) {
  473. secpath_reset(skb);
  474. XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
  475. goto drop;
  476. }
  477. x = xfrm_input_state_lookup(net, mark, daddr, spi, nexthdr, family);
  478. if (x == NULL) {
  479. secpath_reset(skb);
  480. XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
  481. xfrm_audit_state_notfound(skb, family, spi, seq);
  482. goto drop;
  483. }
  484. if (unlikely(x->dir && x->dir != XFRM_SA_DIR_IN)) {
  485. secpath_reset(skb);
  486. XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEDIRERROR);
  487. xfrm_audit_state_notfound(skb, family, spi, seq);
  488. xfrm_state_put(x);
  489. x = NULL;
  490. goto drop;
  491. }
  492. skb->mark = xfrm_smark_get(skb->mark, x);
  493. sp->xvec[sp->len++] = x;
  494. skb_dst_force(skb);
  495. if (!skb_dst(skb)) {
  496. XFRM_INC_STATS(net, LINUX_MIB_XFRMINERROR);
  497. goto drop;
  498. }
  499. lock:
  500. spin_lock(&x->lock);
  501. if (unlikely(x->km.state != XFRM_STATE_VALID)) {
  502. if (x->km.state == XFRM_STATE_ACQ)
  503. XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
  504. else
  505. XFRM_INC_STATS(net,
  506. LINUX_MIB_XFRMINSTATEINVALID);
  507. goto drop_unlock;
  508. }
  509. if ((x->encap ? x->encap->encap_type : 0) != encap_type) {
  510. XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
  511. goto drop_unlock;
  512. }
  513. if (xfrm_replay_check(x, skb, seq)) {
  514. XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
  515. goto drop_unlock;
  516. }
  517. if (xfrm_state_check_expire(x)) {
  518. XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEEXPIRED);
  519. goto drop_unlock;
  520. }
  521. spin_unlock(&x->lock);
  522. if (xfrm_tunnel_check(skb, x, family)) {
  523. XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
  524. goto drop;
  525. }
  526. seq_hi = htonl(xfrm_replay_seqhi(x, seq));
  527. XFRM_SKB_CB(skb)->seq.input.low = seq;
  528. XFRM_SKB_CB(skb)->seq.input.hi = seq_hi;
  529. dev_hold(skb->dev);
  530. if (crypto_done)
  531. nexthdr = x->type_offload->input_tail(x, skb);
  532. else
  533. nexthdr = x->type->input(x, skb);
  534. if (nexthdr == -EINPROGRESS)
  535. return 0;
  536. resume:
  537. dev_put(skb->dev);
  538. spin_lock(&x->lock);
  539. if (nexthdr < 0) {
  540. if (nexthdr == -EBADMSG) {
  541. xfrm_audit_state_icvfail(x, skb,
  542. x->type->proto);
  543. x->stats.integrity_failed++;
  544. }
  545. XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR);
  546. goto drop_unlock;
  547. }
  548. /* only the first xfrm gets the encap type */
  549. encap_type = 0;
  550. if (xfrm_replay_recheck(x, skb, seq)) {
  551. XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
  552. goto drop_unlock;
  553. }
  554. xfrm_replay_advance(x, seq);
  555. x->curlft.bytes += skb->len;
  556. x->curlft.packets++;
  557. x->lastused = ktime_get_real_seconds();
  558. spin_unlock(&x->lock);
  559. XFRM_MODE_SKB_CB(skb)->protocol = nexthdr;
  560. if (xfrm_inner_mode_input(x, skb)) {
  561. XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
  562. goto drop;
  563. }
  564. if (x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL) {
  565. decaps = 1;
  566. break;
  567. }
  568. /*
  569. * We need the inner address. However, we only get here for
  570. * transport mode so the outer address is identical.
  571. */
  572. daddr = &x->id.daddr;
  573. family = x->props.family;
  574. err = xfrm_parse_spi(skb, nexthdr, &spi, &seq);
  575. if (err < 0) {
  576. XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
  577. goto drop;
  578. }
  579. crypto_done = false;
  580. } while (!err);
  581. err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
  582. if (err)
  583. goto drop;
  584. nf_reset_ct(skb);
  585. if (decaps) {
  586. sp = skb_sec_path(skb);
  587. if (sp)
  588. sp->olen = 0;
  589. if (skb_valid_dst(skb))
  590. skb_dst_drop(skb);
  591. gro_cells_receive(&gro_cells, skb);
  592. return 0;
  593. } else {
  594. xo = xfrm_offload(skb);
  595. if (xo)
  596. xfrm_gro = xo->flags & XFRM_GRO;
  597. err = -EAFNOSUPPORT;
  598. rcu_read_lock();
  599. afinfo = xfrm_state_afinfo_get_rcu(x->props.family);
  600. if (likely(afinfo))
  601. err = afinfo->transport_finish(skb, xfrm_gro || async);
  602. rcu_read_unlock();
  603. if (xfrm_gro) {
  604. sp = skb_sec_path(skb);
  605. if (sp)
  606. sp->olen = 0;
  607. if (skb_valid_dst(skb))
  608. skb_dst_drop(skb);
  609. gro_cells_receive(&gro_cells, skb);
  610. return err;
  611. }
  612. return err;
  613. }
  614. drop_unlock:
  615. spin_unlock(&x->lock);
  616. drop:
  617. xfrm_rcv_cb(skb, family, x && x->type ? x->type->proto : nexthdr, -1);
  618. kfree_skb(skb);
  619. return 0;
  620. }
  621. EXPORT_SYMBOL(xfrm_input);
  622. int xfrm_input_resume(struct sk_buff *skb, int nexthdr)
  623. {
  624. return xfrm_input(skb, nexthdr, 0, -1);
  625. }
  626. EXPORT_SYMBOL(xfrm_input_resume);
  627. static void xfrm_trans_reinject(struct work_struct *work)
  628. {
  629. struct xfrm_trans_tasklet *trans = container_of(work, struct xfrm_trans_tasklet, work);
  630. struct sk_buff_head queue;
  631. struct sk_buff *skb;
  632. __skb_queue_head_init(&queue);
  633. spin_lock_bh(&trans->queue_lock);
  634. skb_queue_splice_init(&trans->queue, &queue);
  635. spin_unlock_bh(&trans->queue_lock);
  636. local_bh_disable();
  637. while ((skb = __skb_dequeue(&queue)))
  638. XFRM_TRANS_SKB_CB(skb)->finish(XFRM_TRANS_SKB_CB(skb)->net,
  639. NULL, skb);
  640. local_bh_enable();
  641. }
  642. int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
  643. int (*finish)(struct net *, struct sock *,
  644. struct sk_buff *))
  645. {
  646. struct xfrm_trans_tasklet *trans;
  647. trans = this_cpu_ptr(&xfrm_trans_tasklet);
  648. if (skb_queue_len(&trans->queue) >= READ_ONCE(net_hotdata.max_backlog))
  649. return -ENOBUFS;
  650. BUILD_BUG_ON(sizeof(struct xfrm_trans_cb) > sizeof(skb->cb));
  651. XFRM_TRANS_SKB_CB(skb)->finish = finish;
  652. XFRM_TRANS_SKB_CB(skb)->net = net;
  653. spin_lock_bh(&trans->queue_lock);
  654. __skb_queue_tail(&trans->queue, skb);
  655. spin_unlock_bh(&trans->queue_lock);
  656. schedule_work(&trans->work);
  657. return 0;
  658. }
  659. EXPORT_SYMBOL(xfrm_trans_queue_net);
  660. int xfrm_trans_queue(struct sk_buff *skb,
  661. int (*finish)(struct net *, struct sock *,
  662. struct sk_buff *))
  663. {
  664. return xfrm_trans_queue_net(dev_net(skb->dev), skb, finish);
  665. }
  666. EXPORT_SYMBOL(xfrm_trans_queue);
  667. void __init xfrm_input_init(void)
  668. {
  669. int err;
  670. int i;
  671. init_dummy_netdev(&xfrm_napi_dev);
  672. err = gro_cells_init(&gro_cells, &xfrm_napi_dev);
  673. if (err)
  674. gro_cells.cells = NULL;
  675. for_each_possible_cpu(i) {
  676. struct xfrm_trans_tasklet *trans;
  677. trans = &per_cpu(xfrm_trans_tasklet, i);
  678. spin_lock_init(&trans->queue_lock);
  679. __skb_queue_head_init(&trans->queue);
  680. INIT_WORK(&trans->work, xfrm_trans_reinject);
  681. }
  682. }