ah6.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Copyright (C)2002 USAGI/WIDE Project
  4. *
  5. * Authors
  6. *
  7. * Mitsuru KANDA @USAGI : IPv6 Support
  8. * Kazunori MIYAZAWA @USAGI :
  9. * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
  10. *
  11. * This file is derived from net/ipv4/ah.c.
  12. */
  13. #define pr_fmt(fmt) "IPv6: " fmt
  14. #include <crypto/hash.h>
  15. #include <crypto/utils.h>
  16. #include <linux/module.h>
  17. #include <linux/slab.h>
  18. #include <net/ip.h>
  19. #include <net/ah.h>
  20. #include <linux/crypto.h>
  21. #include <linux/pfkeyv2.h>
  22. #include <linux/string.h>
  23. #include <linux/scatterlist.h>
  24. #include <net/ip6_route.h>
  25. #include <net/icmp.h>
  26. #include <net/ipv6.h>
  27. #include <net/protocol.h>
  28. #include <net/xfrm.h>
  29. #define IPV6HDR_BASELEN 8
  30. struct tmp_ext {
  31. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  32. struct in6_addr saddr;
  33. #endif
  34. struct in6_addr daddr;
  35. char hdrs[];
  36. };
  37. struct ah_skb_cb {
  38. struct xfrm_skb_cb xfrm;
  39. void *tmp;
  40. };
  41. #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
  42. static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
  43. unsigned int size)
  44. {
  45. unsigned int len;
  46. len = size + crypto_ahash_digestsize(ahash);
  47. len = ALIGN(len, crypto_tfm_ctx_alignment());
  48. len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash);
  49. len = ALIGN(len, __alignof__(struct scatterlist));
  50. len += sizeof(struct scatterlist) * nfrags;
  51. return kmalloc(len, GFP_ATOMIC);
  52. }
  53. static inline struct tmp_ext *ah_tmp_ext(void *base)
  54. {
  55. return base + IPV6HDR_BASELEN;
  56. }
  57. static inline u8 *ah_tmp_auth(u8 *tmp, unsigned int offset)
  58. {
  59. return tmp + offset;
  60. }
  61. static inline u8 *ah_tmp_icv(void *tmp, unsigned int offset)
  62. {
  63. return tmp + offset;
  64. }
  65. static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash,
  66. u8 *icv)
  67. {
  68. struct ahash_request *req;
  69. req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash),
  70. crypto_tfm_ctx_alignment());
  71. ahash_request_set_tfm(req, ahash);
  72. return req;
  73. }
  74. static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
  75. struct ahash_request *req)
  76. {
  77. return (void *)ALIGN((unsigned long)(req + 1) +
  78. crypto_ahash_reqsize(ahash),
  79. __alignof__(struct scatterlist));
  80. }
  81. static bool zero_out_mutable_opts(struct ipv6_opt_hdr *opthdr)
  82. {
  83. u8 *opt = (u8 *)opthdr;
  84. int len = ipv6_optlen(opthdr);
  85. int off = 0;
  86. int optlen = 0;
  87. off += 2;
  88. len -= 2;
  89. while (len > 0) {
  90. switch (opt[off]) {
  91. case IPV6_TLV_PAD1:
  92. optlen = 1;
  93. break;
  94. default:
  95. if (len < 2)
  96. goto bad;
  97. optlen = opt[off+1]+2;
  98. if (len < optlen)
  99. goto bad;
  100. if (opt[off] & 0x20)
  101. memset(&opt[off+2], 0, opt[off+1]);
  102. break;
  103. }
  104. off += optlen;
  105. len -= optlen;
  106. }
  107. if (len == 0)
  108. return true;
  109. bad:
  110. return false;
  111. }
  112. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  113. /**
  114. * ipv6_rearrange_destopt - rearrange IPv6 destination options header
  115. * @iph: IPv6 header
  116. * @destopt: destionation options header
  117. */
  118. static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *destopt)
  119. {
  120. u8 *opt = (u8 *)destopt;
  121. int len = ipv6_optlen(destopt);
  122. int off = 0;
  123. int optlen = 0;
  124. off += 2;
  125. len -= 2;
  126. while (len > 0) {
  127. switch (opt[off]) {
  128. case IPV6_TLV_PAD1:
  129. optlen = 1;
  130. break;
  131. default:
  132. if (len < 2)
  133. goto bad;
  134. optlen = opt[off+1]+2;
  135. if (len < optlen)
  136. goto bad;
  137. /* Rearrange the source address in @iph and the
  138. * addresses in home address option for final source.
  139. * See 11.3.2 of RFC 3775 for details.
  140. */
  141. if (opt[off] == IPV6_TLV_HAO) {
  142. struct ipv6_destopt_hao *hao;
  143. hao = (struct ipv6_destopt_hao *)&opt[off];
  144. if (hao->length != sizeof(hao->addr)) {
  145. net_warn_ratelimited("destopt hao: invalid header length: %u\n",
  146. hao->length);
  147. goto bad;
  148. }
  149. swap(hao->addr, iph->saddr);
  150. }
  151. break;
  152. }
  153. off += optlen;
  154. len -= optlen;
  155. }
  156. /* Note: ok if len == 0 */
  157. bad:
  158. return;
  159. }
  160. #else
  161. static void ipv6_rearrange_destopt(struct ipv6hdr *iph, struct ipv6_opt_hdr *destopt) {}
  162. #endif
  163. /**
  164. * ipv6_rearrange_rthdr - rearrange IPv6 routing header
  165. * @iph: IPv6 header
  166. * @rthdr: routing header
  167. *
  168. * Rearrange the destination address in @iph and the addresses in @rthdr
  169. * so that they appear in the order they will at the final destination.
  170. * See Appendix A2 of RFC 2402 for details.
  171. */
  172. static void ipv6_rearrange_rthdr(struct ipv6hdr *iph, struct ipv6_rt_hdr *rthdr)
  173. {
  174. int segments, segments_left;
  175. struct in6_addr *addrs;
  176. struct in6_addr final_addr;
  177. segments_left = rthdr->segments_left;
  178. if (segments_left == 0)
  179. return;
  180. rthdr->segments_left = 0;
  181. /* The value of rthdr->hdrlen has been verified either by the system
  182. * call if it is locally generated, or by ipv6_rthdr_rcv() for incoming
  183. * packets. So we can assume that it is even and that segments is
  184. * greater than or equal to segments_left.
  185. *
  186. * For the same reason we can assume that this option is of type 0.
  187. */
  188. segments = rthdr->hdrlen >> 1;
  189. addrs = ((struct rt0_hdr *)rthdr)->addr;
  190. final_addr = addrs[segments - 1];
  191. addrs += segments - segments_left;
  192. memmove(addrs + 1, addrs, (segments_left - 1) * sizeof(*addrs));
  193. addrs[0] = iph->daddr;
  194. iph->daddr = final_addr;
  195. }
  196. static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir)
  197. {
  198. union {
  199. struct ipv6hdr *iph;
  200. struct ipv6_opt_hdr *opth;
  201. struct ipv6_rt_hdr *rth;
  202. char *raw;
  203. } exthdr = { .iph = iph };
  204. char *end = exthdr.raw + len;
  205. int nexthdr = iph->nexthdr;
  206. exthdr.iph++;
  207. while (exthdr.raw < end) {
  208. switch (nexthdr) {
  209. case NEXTHDR_DEST:
  210. if (dir == XFRM_POLICY_OUT)
  211. ipv6_rearrange_destopt(iph, exthdr.opth);
  212. fallthrough;
  213. case NEXTHDR_HOP:
  214. if (!zero_out_mutable_opts(exthdr.opth)) {
  215. net_dbg_ratelimited("overrun %sopts\n",
  216. nexthdr == NEXTHDR_HOP ?
  217. "hop" : "dest");
  218. return -EINVAL;
  219. }
  220. break;
  221. case NEXTHDR_ROUTING:
  222. ipv6_rearrange_rthdr(iph, exthdr.rth);
  223. break;
  224. default:
  225. return 0;
  226. }
  227. nexthdr = exthdr.opth->nexthdr;
  228. exthdr.raw += ipv6_optlen(exthdr.opth);
  229. }
  230. return 0;
  231. }
  232. static void ah6_output_done(void *data, int err)
  233. {
  234. int extlen;
  235. u8 *iph_base;
  236. u8 *icv;
  237. struct sk_buff *skb = data;
  238. struct xfrm_state *x = skb_dst(skb)->xfrm;
  239. struct ah_data *ahp = x->data;
  240. struct ipv6hdr *top_iph = ipv6_hdr(skb);
  241. struct ip_auth_hdr *ah = ip_auth_hdr(skb);
  242. struct tmp_ext *iph_ext;
  243. extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr);
  244. if (extlen)
  245. extlen += sizeof(*iph_ext);
  246. iph_base = AH_SKB_CB(skb)->tmp;
  247. iph_ext = ah_tmp_ext(iph_base);
  248. icv = ah_tmp_icv(iph_ext, extlen);
  249. memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
  250. memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
  251. if (extlen) {
  252. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  253. memcpy(&top_iph->saddr, iph_ext, extlen);
  254. #else
  255. memcpy(&top_iph->daddr, iph_ext, extlen);
  256. #endif
  257. }
  258. kfree(AH_SKB_CB(skb)->tmp);
  259. xfrm_output_resume(skb->sk, skb, err);
  260. }
  261. static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
  262. {
  263. int err;
  264. int nfrags;
  265. int extlen;
  266. u8 *iph_base;
  267. u8 *icv;
  268. u8 nexthdr;
  269. struct sk_buff *trailer;
  270. struct crypto_ahash *ahash;
  271. struct ahash_request *req;
  272. struct scatterlist *sg;
  273. struct ipv6hdr *top_iph;
  274. struct ip_auth_hdr *ah;
  275. struct ah_data *ahp;
  276. struct tmp_ext *iph_ext;
  277. int seqhi_len = 0;
  278. __be32 *seqhi;
  279. int sglists = 0;
  280. struct scatterlist *seqhisg;
  281. ahp = x->data;
  282. ahash = ahp->ahash;
  283. err = skb_cow_data(skb, 0, &trailer);
  284. if (err < 0)
  285. goto out;
  286. nfrags = err;
  287. skb_push(skb, -skb_network_offset(skb));
  288. extlen = skb_network_header_len(skb) - sizeof(struct ipv6hdr);
  289. if (extlen)
  290. extlen += sizeof(*iph_ext);
  291. if (x->props.flags & XFRM_STATE_ESN) {
  292. sglists = 1;
  293. seqhi_len = sizeof(*seqhi);
  294. }
  295. err = -ENOMEM;
  296. iph_base = ah_alloc_tmp(ahash, nfrags + sglists, IPV6HDR_BASELEN +
  297. extlen + seqhi_len);
  298. if (!iph_base)
  299. goto out;
  300. iph_ext = ah_tmp_ext(iph_base);
  301. seqhi = (__be32 *)((char *)iph_ext + extlen);
  302. icv = ah_tmp_icv(seqhi, seqhi_len);
  303. req = ah_tmp_req(ahash, icv);
  304. sg = ah_req_sg(ahash, req);
  305. seqhisg = sg + nfrags;
  306. ah = ip_auth_hdr(skb);
  307. memset(ah->auth_data, 0, ahp->icv_trunc_len);
  308. top_iph = ipv6_hdr(skb);
  309. top_iph->payload_len = htons(skb->len - sizeof(*top_iph));
  310. nexthdr = *skb_mac_header(skb);
  311. *skb_mac_header(skb) = IPPROTO_AH;
  312. /* When there are no extension headers, we only need to save the first
  313. * 8 bytes of the base IP header.
  314. */
  315. memcpy(iph_base, top_iph, IPV6HDR_BASELEN);
  316. if (extlen) {
  317. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  318. memcpy(iph_ext, &top_iph->saddr, extlen);
  319. #else
  320. memcpy(iph_ext, &top_iph->daddr, extlen);
  321. #endif
  322. err = ipv6_clear_mutable_options(top_iph,
  323. extlen - sizeof(*iph_ext) +
  324. sizeof(*top_iph),
  325. XFRM_POLICY_OUT);
  326. if (err)
  327. goto out_free;
  328. }
  329. ah->nexthdr = nexthdr;
  330. top_iph->priority = 0;
  331. top_iph->flow_lbl[0] = 0;
  332. top_iph->flow_lbl[1] = 0;
  333. top_iph->flow_lbl[2] = 0;
  334. top_iph->hop_limit = 0;
  335. ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
  336. ah->reserved = 0;
  337. ah->spi = x->id.spi;
  338. ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
  339. sg_init_table(sg, nfrags + sglists);
  340. err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
  341. if (unlikely(err < 0))
  342. goto out_free;
  343. if (x->props.flags & XFRM_STATE_ESN) {
  344. /* Attach seqhi sg right after packet payload */
  345. *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
  346. sg_set_buf(seqhisg, seqhi, seqhi_len);
  347. }
  348. ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
  349. ahash_request_set_callback(req, 0, ah6_output_done, skb);
  350. AH_SKB_CB(skb)->tmp = iph_base;
  351. err = crypto_ahash_digest(req);
  352. if (err) {
  353. if (err == -EINPROGRESS)
  354. goto out;
  355. if (err == -ENOSPC)
  356. err = NET_XMIT_DROP;
  357. goto out_free;
  358. }
  359. memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
  360. memcpy(top_iph, iph_base, IPV6HDR_BASELEN);
  361. if (extlen) {
  362. #if IS_ENABLED(CONFIG_IPV6_MIP6)
  363. memcpy(&top_iph->saddr, iph_ext, extlen);
  364. #else
  365. memcpy(&top_iph->daddr, iph_ext, extlen);
  366. #endif
  367. }
  368. out_free:
  369. kfree(iph_base);
  370. out:
  371. return err;
  372. }
  373. static void ah6_input_done(void *data, int err)
  374. {
  375. u8 *auth_data;
  376. u8 *icv;
  377. u8 *work_iph;
  378. struct sk_buff *skb = data;
  379. struct xfrm_state *x = xfrm_input_state(skb);
  380. struct ah_data *ahp = x->data;
  381. struct ip_auth_hdr *ah = ip_auth_hdr(skb);
  382. int hdr_len = skb_network_header_len(skb);
  383. int ah_hlen = ipv6_authlen(ah);
  384. if (err)
  385. goto out;
  386. work_iph = AH_SKB_CB(skb)->tmp;
  387. auth_data = ah_tmp_auth(work_iph, hdr_len);
  388. icv = ah_tmp_icv(auth_data, ahp->icv_trunc_len);
  389. err = crypto_memneq(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
  390. if (err)
  391. goto out;
  392. err = ah->nexthdr;
  393. skb->network_header += ah_hlen;
  394. memcpy(skb_network_header(skb), work_iph, hdr_len);
  395. __skb_pull(skb, ah_hlen + hdr_len);
  396. if (x->props.mode == XFRM_MODE_TUNNEL)
  397. skb_reset_transport_header(skb);
  398. else
  399. skb_set_transport_header(skb, -hdr_len);
  400. out:
  401. kfree(AH_SKB_CB(skb)->tmp);
  402. xfrm_input_resume(skb, err);
  403. }
  404. static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
  405. {
  406. /*
  407. * Before process AH
  408. * [IPv6][Ext1][Ext2][AH][Dest][Payload]
  409. * |<-------------->| hdr_len
  410. *
  411. * To erase AH:
  412. * Keeping copy of cleared headers. After AH processing,
  413. * Moving the pointer of skb->network_header by using skb_pull as long
  414. * as AH header length. Then copy back the copy as long as hdr_len
  415. * If destination header following AH exists, copy it into after [Ext2].
  416. *
  417. * |<>|[IPv6][Ext1][Ext2][Dest][Payload]
  418. * There is offset of AH before IPv6 header after the process.
  419. */
  420. u8 *auth_data;
  421. u8 *icv;
  422. u8 *work_iph;
  423. struct sk_buff *trailer;
  424. struct crypto_ahash *ahash;
  425. struct ahash_request *req;
  426. struct scatterlist *sg;
  427. struct ip_auth_hdr *ah;
  428. struct ipv6hdr *ip6h;
  429. struct ah_data *ahp;
  430. u16 hdr_len;
  431. u16 ah_hlen;
  432. int nexthdr;
  433. int nfrags;
  434. int err = -ENOMEM;
  435. int seqhi_len = 0;
  436. __be32 *seqhi;
  437. int sglists = 0;
  438. struct scatterlist *seqhisg;
  439. if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr)))
  440. goto out;
  441. /* We are going to _remove_ AH header to keep sockets happy,
  442. * so... Later this can change. */
  443. if (skb_unclone(skb, GFP_ATOMIC))
  444. goto out;
  445. skb->ip_summed = CHECKSUM_NONE;
  446. hdr_len = skb_network_header_len(skb);
  447. ah = (struct ip_auth_hdr *)skb->data;
  448. ahp = x->data;
  449. ahash = ahp->ahash;
  450. nexthdr = ah->nexthdr;
  451. ah_hlen = ipv6_authlen(ah);
  452. if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
  453. ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
  454. goto out;
  455. if (!pskb_may_pull(skb, ah_hlen))
  456. goto out;
  457. err = skb_cow_data(skb, 0, &trailer);
  458. if (err < 0)
  459. goto out;
  460. nfrags = err;
  461. ah = (struct ip_auth_hdr *)skb->data;
  462. ip6h = ipv6_hdr(skb);
  463. skb_push(skb, hdr_len);
  464. if (x->props.flags & XFRM_STATE_ESN) {
  465. sglists = 1;
  466. seqhi_len = sizeof(*seqhi);
  467. }
  468. work_iph = ah_alloc_tmp(ahash, nfrags + sglists, hdr_len +
  469. ahp->icv_trunc_len + seqhi_len);
  470. if (!work_iph) {
  471. err = -ENOMEM;
  472. goto out;
  473. }
  474. auth_data = ah_tmp_auth((u8 *)work_iph, hdr_len);
  475. seqhi = (__be32 *)(auth_data + ahp->icv_trunc_len);
  476. icv = ah_tmp_icv(seqhi, seqhi_len);
  477. req = ah_tmp_req(ahash, icv);
  478. sg = ah_req_sg(ahash, req);
  479. seqhisg = sg + nfrags;
  480. memcpy(work_iph, ip6h, hdr_len);
  481. memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
  482. memset(ah->auth_data, 0, ahp->icv_trunc_len);
  483. err = ipv6_clear_mutable_options(ip6h, hdr_len, XFRM_POLICY_IN);
  484. if (err)
  485. goto out_free;
  486. ip6h->priority = 0;
  487. ip6h->flow_lbl[0] = 0;
  488. ip6h->flow_lbl[1] = 0;
  489. ip6h->flow_lbl[2] = 0;
  490. ip6h->hop_limit = 0;
  491. sg_init_table(sg, nfrags + sglists);
  492. err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
  493. if (unlikely(err < 0))
  494. goto out_free;
  495. if (x->props.flags & XFRM_STATE_ESN) {
  496. /* Attach seqhi sg right after packet payload */
  497. *seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
  498. sg_set_buf(seqhisg, seqhi, seqhi_len);
  499. }
  500. ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
  501. ahash_request_set_callback(req, 0, ah6_input_done, skb);
  502. AH_SKB_CB(skb)->tmp = work_iph;
  503. err = crypto_ahash_digest(req);
  504. if (err) {
  505. if (err == -EINPROGRESS)
  506. goto out;
  507. goto out_free;
  508. }
  509. err = crypto_memneq(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG : 0;
  510. if (err)
  511. goto out_free;
  512. skb->network_header += ah_hlen;
  513. memcpy(skb_network_header(skb), work_iph, hdr_len);
  514. __skb_pull(skb, ah_hlen + hdr_len);
  515. if (x->props.mode == XFRM_MODE_TUNNEL)
  516. skb_reset_transport_header(skb);
  517. else
  518. skb_set_transport_header(skb, -hdr_len);
  519. err = nexthdr;
  520. out_free:
  521. kfree(work_iph);
  522. out:
  523. return err;
  524. }
  525. static int ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
  526. u8 type, u8 code, int offset, __be32 info)
  527. {
  528. struct net *net = dev_net(skb->dev);
  529. struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
  530. struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+offset);
  531. struct xfrm_state *x;
  532. if (type != ICMPV6_PKT_TOOBIG &&
  533. type != NDISC_REDIRECT)
  534. return 0;
  535. x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6);
  536. if (!x)
  537. return 0;
  538. if (type == NDISC_REDIRECT)
  539. ip6_redirect(skb, net, skb->dev->ifindex, 0,
  540. sock_net_uid(net, NULL));
  541. else
  542. ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
  543. xfrm_state_put(x);
  544. return 0;
  545. }
  546. static int ah6_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
  547. {
  548. struct ah_data *ahp = NULL;
  549. struct xfrm_algo_desc *aalg_desc;
  550. struct crypto_ahash *ahash;
  551. if (!x->aalg) {
  552. NL_SET_ERR_MSG(extack, "AH requires a state with an AUTH algorithm");
  553. goto error;
  554. }
  555. if (x->encap) {
  556. NL_SET_ERR_MSG(extack, "AH is not compatible with encapsulation");
  557. goto error;
  558. }
  559. ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
  560. if (!ahp)
  561. return -ENOMEM;
  562. ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
  563. if (IS_ERR(ahash)) {
  564. NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
  565. goto error;
  566. }
  567. ahp->ahash = ahash;
  568. if (crypto_ahash_setkey(ahash, x->aalg->alg_key,
  569. (x->aalg->alg_key_len + 7) / 8)) {
  570. NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
  571. goto error;
  572. }
  573. /*
  574. * Lookup the algorithm description maintained by xfrm_algo,
  575. * verify crypto transform properties, and store information
  576. * we need for AH processing. This lookup cannot fail here
  577. * after a successful crypto_alloc_hash().
  578. */
  579. aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
  580. BUG_ON(!aalg_desc);
  581. if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
  582. crypto_ahash_digestsize(ahash)) {
  583. NL_SET_ERR_MSG(extack, "Kernel was unable to initialize cryptographic operations");
  584. goto error;
  585. }
  586. ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
  587. ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
  588. x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
  589. ahp->icv_trunc_len);
  590. switch (x->props.mode) {
  591. case XFRM_MODE_BEET:
  592. case XFRM_MODE_TRANSPORT:
  593. break;
  594. case XFRM_MODE_TUNNEL:
  595. x->props.header_len += sizeof(struct ipv6hdr);
  596. break;
  597. default:
  598. NL_SET_ERR_MSG(extack, "Invalid mode requested for AH, must be one of TRANSPORT, TUNNEL, BEET");
  599. goto error;
  600. }
  601. x->data = ahp;
  602. return 0;
  603. error:
  604. if (ahp) {
  605. crypto_free_ahash(ahp->ahash);
  606. kfree(ahp);
  607. }
  608. return -EINVAL;
  609. }
  610. static void ah6_destroy(struct xfrm_state *x)
  611. {
  612. struct ah_data *ahp = x->data;
  613. if (!ahp)
  614. return;
  615. crypto_free_ahash(ahp->ahash);
  616. kfree(ahp);
  617. }
  618. static int ah6_rcv_cb(struct sk_buff *skb, int err)
  619. {
  620. return 0;
  621. }
  622. static const struct xfrm_type ah6_type = {
  623. .owner = THIS_MODULE,
  624. .proto = IPPROTO_AH,
  625. .flags = XFRM_TYPE_REPLAY_PROT,
  626. .init_state = ah6_init_state,
  627. .destructor = ah6_destroy,
  628. .input = ah6_input,
  629. .output = ah6_output,
  630. };
  631. static struct xfrm6_protocol ah6_protocol = {
  632. .handler = xfrm6_rcv,
  633. .input_handler = xfrm_input,
  634. .cb_handler = ah6_rcv_cb,
  635. .err_handler = ah6_err,
  636. .priority = 0,
  637. };
  638. static int __init ah6_init(void)
  639. {
  640. if (xfrm_register_type(&ah6_type, AF_INET6) < 0) {
  641. pr_info("%s: can't add xfrm type\n", __func__);
  642. return -EAGAIN;
  643. }
  644. if (xfrm6_protocol_register(&ah6_protocol, IPPROTO_AH) < 0) {
  645. pr_info("%s: can't add protocol\n", __func__);
  646. xfrm_unregister_type(&ah6_type, AF_INET6);
  647. return -EAGAIN;
  648. }
  649. return 0;
  650. }
  651. static void __exit ah6_fini(void)
  652. {
  653. if (xfrm6_protocol_deregister(&ah6_protocol, IPPROTO_AH) < 0)
  654. pr_info("%s: can't remove protocol\n", __func__);
  655. xfrm_unregister_type(&ah6_type, AF_INET6);
  656. }
  657. module_init(ah6_init);
  658. module_exit(ah6_fini);
  659. MODULE_DESCRIPTION("IPv6 AH transformation helpers");
  660. MODULE_LICENSE("GPL");
  661. MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_AH);