adiantum.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Adiantum length-preserving encryption mode
  4. *
  5. * Copyright 2018 Google LLC
  6. */
  7. /*
  8. * Adiantum is a tweakable, length-preserving encryption mode designed for fast
  9. * and secure disk encryption, especially on CPUs without dedicated crypto
  10. * instructions. Adiantum encrypts each sector using the XChaCha12 stream
  11. * cipher, two passes of an ε-almost-∆-universal (ε-∆U) hash function based on
  12. * NH and Poly1305, and an invocation of the AES-256 block cipher on a single
  13. * 16-byte block. See the paper for details:
  14. *
  15. * Adiantum: length-preserving encryption for entry-level processors
  16. * (https://eprint.iacr.org/2018/720.pdf)
  17. *
  18. * For flexibility, this implementation also allows other ciphers:
  19. *
  20. * - Stream cipher: XChaCha12 or XChaCha20
  21. * - Block cipher: any with a 128-bit block size and 256-bit key
  22. *
  23. * This implementation doesn't currently allow other ε-∆U hash functions, i.e.
  24. * HPolyC is not supported. This is because Adiantum is ~20% faster than HPolyC
  25. * but still provably as secure, and also the ε-∆U hash function of HBSH is
  26. * formally defined to take two inputs (tweak, message) which makes it difficult
  27. * to wrap with the crypto_shash API. Rather, some details need to be handled
  28. * here. Nevertheless, if needed in the future, support for other ε-∆U hash
  29. * functions could be added here.
  30. */
  31. #include <crypto/b128ops.h>
  32. #include <crypto/chacha.h>
  33. #include <crypto/internal/cipher.h>
  34. #include <crypto/internal/hash.h>
  35. #include <crypto/internal/poly1305.h>
  36. #include <crypto/internal/skcipher.h>
  37. #include <crypto/nhpoly1305.h>
  38. #include <crypto/scatterwalk.h>
  39. #include <linux/module.h>
  40. /*
  41. * Size of right-hand part of input data, in bytes; also the size of the block
  42. * cipher's block size and the hash function's output.
  43. */
  44. #define BLOCKCIPHER_BLOCK_SIZE 16
  45. /* Size of the block cipher key (K_E) in bytes */
  46. #define BLOCKCIPHER_KEY_SIZE 32
  47. /* Size of the hash key (K_H) in bytes */
  48. #define HASH_KEY_SIZE (POLY1305_BLOCK_SIZE + NHPOLY1305_KEY_SIZE)
  49. /*
  50. * The specification allows variable-length tweaks, but Linux's crypto API
  51. * currently only allows algorithms to support a single length. The "natural"
  52. * tweak length for Adiantum is 16, since that fits into one Poly1305 block for
  53. * the best performance. But longer tweaks are useful for fscrypt, to avoid
  54. * needing to derive per-file keys. So instead we use two blocks, or 32 bytes.
  55. */
  56. #define TWEAK_SIZE 32
  57. struct adiantum_instance_ctx {
  58. struct crypto_skcipher_spawn streamcipher_spawn;
  59. struct crypto_cipher_spawn blockcipher_spawn;
  60. struct crypto_shash_spawn hash_spawn;
  61. };
  62. struct adiantum_tfm_ctx {
  63. struct crypto_skcipher *streamcipher;
  64. struct crypto_cipher *blockcipher;
  65. struct crypto_shash *hash;
  66. struct poly1305_core_key header_hash_key;
  67. };
  68. struct adiantum_request_ctx {
  69. /*
  70. * Buffer for right-hand part of data, i.e.
  71. *
  72. * P_L => P_M => C_M => C_R when encrypting, or
  73. * C_R => C_M => P_M => P_L when decrypting.
  74. *
  75. * Also used to build the IV for the stream cipher.
  76. */
  77. union {
  78. u8 bytes[XCHACHA_IV_SIZE];
  79. __le32 words[XCHACHA_IV_SIZE / sizeof(__le32)];
  80. le128 bignum; /* interpret as element of Z/(2^{128}Z) */
  81. } rbuf;
  82. bool enc; /* true if encrypting, false if decrypting */
  83. /*
  84. * The result of the Poly1305 ε-∆U hash function applied to
  85. * (bulk length, tweak)
  86. */
  87. le128 header_hash;
  88. /* Sub-requests, must be last */
  89. union {
  90. struct shash_desc hash_desc;
  91. struct skcipher_request streamcipher_req;
  92. } u;
  93. };
  94. /*
  95. * Given the XChaCha stream key K_S, derive the block cipher key K_E and the
  96. * hash key K_H as follows:
  97. *
  98. * K_E || K_H || ... = XChaCha(key=K_S, nonce=1||0^191)
  99. *
  100. * Note that this denotes using bits from the XChaCha keystream, which here we
  101. * get indirectly by encrypting a buffer containing all 0's.
  102. */
  103. static int adiantum_setkey(struct crypto_skcipher *tfm, const u8 *key,
  104. unsigned int keylen)
  105. {
  106. struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
  107. struct {
  108. u8 iv[XCHACHA_IV_SIZE];
  109. u8 derived_keys[BLOCKCIPHER_KEY_SIZE + HASH_KEY_SIZE];
  110. struct scatterlist sg;
  111. struct crypto_wait wait;
  112. struct skcipher_request req; /* must be last */
  113. } *data;
  114. u8 *keyp;
  115. int err;
  116. /* Set the stream cipher key (K_S) */
  117. crypto_skcipher_clear_flags(tctx->streamcipher, CRYPTO_TFM_REQ_MASK);
  118. crypto_skcipher_set_flags(tctx->streamcipher,
  119. crypto_skcipher_get_flags(tfm) &
  120. CRYPTO_TFM_REQ_MASK);
  121. err = crypto_skcipher_setkey(tctx->streamcipher, key, keylen);
  122. if (err)
  123. return err;
  124. /* Derive the subkeys */
  125. data = kzalloc(sizeof(*data) +
  126. crypto_skcipher_reqsize(tctx->streamcipher), GFP_KERNEL);
  127. if (!data)
  128. return -ENOMEM;
  129. data->iv[0] = 1;
  130. sg_init_one(&data->sg, data->derived_keys, sizeof(data->derived_keys));
  131. crypto_init_wait(&data->wait);
  132. skcipher_request_set_tfm(&data->req, tctx->streamcipher);
  133. skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
  134. CRYPTO_TFM_REQ_MAY_BACKLOG,
  135. crypto_req_done, &data->wait);
  136. skcipher_request_set_crypt(&data->req, &data->sg, &data->sg,
  137. sizeof(data->derived_keys), data->iv);
  138. err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), &data->wait);
  139. if (err)
  140. goto out;
  141. keyp = data->derived_keys;
  142. /* Set the block cipher key (K_E) */
  143. crypto_cipher_clear_flags(tctx->blockcipher, CRYPTO_TFM_REQ_MASK);
  144. crypto_cipher_set_flags(tctx->blockcipher,
  145. crypto_skcipher_get_flags(tfm) &
  146. CRYPTO_TFM_REQ_MASK);
  147. err = crypto_cipher_setkey(tctx->blockcipher, keyp,
  148. BLOCKCIPHER_KEY_SIZE);
  149. if (err)
  150. goto out;
  151. keyp += BLOCKCIPHER_KEY_SIZE;
  152. /* Set the hash key (K_H) */
  153. poly1305_core_setkey(&tctx->header_hash_key, keyp);
  154. keyp += POLY1305_BLOCK_SIZE;
  155. crypto_shash_clear_flags(tctx->hash, CRYPTO_TFM_REQ_MASK);
  156. crypto_shash_set_flags(tctx->hash, crypto_skcipher_get_flags(tfm) &
  157. CRYPTO_TFM_REQ_MASK);
  158. err = crypto_shash_setkey(tctx->hash, keyp, NHPOLY1305_KEY_SIZE);
  159. keyp += NHPOLY1305_KEY_SIZE;
  160. WARN_ON(keyp != &data->derived_keys[ARRAY_SIZE(data->derived_keys)]);
  161. out:
  162. kfree_sensitive(data);
  163. return err;
  164. }
  165. /* Addition in Z/(2^{128}Z) */
  166. static inline void le128_add(le128 *r, const le128 *v1, const le128 *v2)
  167. {
  168. u64 x = le64_to_cpu(v1->b);
  169. u64 y = le64_to_cpu(v2->b);
  170. r->b = cpu_to_le64(x + y);
  171. r->a = cpu_to_le64(le64_to_cpu(v1->a) + le64_to_cpu(v2->a) +
  172. (x + y < x));
  173. }
  174. /* Subtraction in Z/(2^{128}Z) */
  175. static inline void le128_sub(le128 *r, const le128 *v1, const le128 *v2)
  176. {
  177. u64 x = le64_to_cpu(v1->b);
  178. u64 y = le64_to_cpu(v2->b);
  179. r->b = cpu_to_le64(x - y);
  180. r->a = cpu_to_le64(le64_to_cpu(v1->a) - le64_to_cpu(v2->a) -
  181. (x - y > x));
  182. }
  183. /*
  184. * Apply the Poly1305 ε-∆U hash function to (bulk length, tweak) and save the
  185. * result to rctx->header_hash. This is the calculation
  186. *
  187. * H_T ← Poly1305_{K_T}(bin_{128}(|L|) || T)
  188. *
  189. * from the procedure in section 6.4 of the Adiantum paper. The resulting value
  190. * is reused in both the first and second hash steps. Specifically, it's added
  191. * to the result of an independently keyed ε-∆U hash function (for equal length
  192. * inputs only) taken over the left-hand part (the "bulk") of the message, to
  193. * give the overall Adiantum hash of the (tweak, left-hand part) pair.
  194. */
  195. static void adiantum_hash_header(struct skcipher_request *req)
  196. {
  197. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  198. const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
  199. struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
  200. const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
  201. struct {
  202. __le64 message_bits;
  203. __le64 padding;
  204. } header = {
  205. .message_bits = cpu_to_le64((u64)bulk_len * 8)
  206. };
  207. struct poly1305_state state;
  208. poly1305_core_init(&state);
  209. BUILD_BUG_ON(sizeof(header) % POLY1305_BLOCK_SIZE != 0);
  210. poly1305_core_blocks(&state, &tctx->header_hash_key,
  211. &header, sizeof(header) / POLY1305_BLOCK_SIZE, 1);
  212. BUILD_BUG_ON(TWEAK_SIZE % POLY1305_BLOCK_SIZE != 0);
  213. poly1305_core_blocks(&state, &tctx->header_hash_key, req->iv,
  214. TWEAK_SIZE / POLY1305_BLOCK_SIZE, 1);
  215. poly1305_core_emit(&state, NULL, &rctx->header_hash);
  216. }
  217. /* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */
  218. static int adiantum_hash_message(struct skcipher_request *req,
  219. struct scatterlist *sgl, unsigned int nents,
  220. le128 *digest)
  221. {
  222. struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
  223. const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
  224. struct shash_desc *hash_desc = &rctx->u.hash_desc;
  225. struct sg_mapping_iter miter;
  226. unsigned int i, n;
  227. int err;
  228. err = crypto_shash_init(hash_desc);
  229. if (err)
  230. return err;
  231. sg_miter_start(&miter, sgl, nents, SG_MITER_FROM_SG | SG_MITER_ATOMIC);
  232. for (i = 0; i < bulk_len; i += n) {
  233. sg_miter_next(&miter);
  234. n = min_t(unsigned int, miter.length, bulk_len - i);
  235. err = crypto_shash_update(hash_desc, miter.addr, n);
  236. if (err)
  237. break;
  238. }
  239. sg_miter_stop(&miter);
  240. if (err)
  241. return err;
  242. return crypto_shash_final(hash_desc, (u8 *)digest);
  243. }
  244. /* Continue Adiantum encryption/decryption after the stream cipher step */
  245. static int adiantum_finish(struct skcipher_request *req)
  246. {
  247. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  248. const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
  249. struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
  250. const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
  251. struct scatterlist *dst = req->dst;
  252. const unsigned int dst_nents = sg_nents(dst);
  253. le128 digest;
  254. int err;
  255. /* If decrypting, decrypt C_M with the block cipher to get P_M */
  256. if (!rctx->enc)
  257. crypto_cipher_decrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
  258. rctx->rbuf.bytes);
  259. /*
  260. * Second hash step
  261. * enc: C_R = C_M - H_{K_H}(T, C_L)
  262. * dec: P_R = P_M - H_{K_H}(T, P_L)
  263. */
  264. rctx->u.hash_desc.tfm = tctx->hash;
  265. le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
  266. if (dst_nents == 1 && dst->offset + req->cryptlen <= PAGE_SIZE) {
  267. /* Fast path for single-page destination */
  268. struct page *page = sg_page(dst);
  269. void *virt = kmap_local_page(page) + dst->offset;
  270. err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len,
  271. (u8 *)&digest);
  272. if (err) {
  273. kunmap_local(virt);
  274. return err;
  275. }
  276. le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
  277. memcpy(virt + bulk_len, &rctx->rbuf.bignum, sizeof(le128));
  278. flush_dcache_page(page);
  279. kunmap_local(virt);
  280. } else {
  281. /* Slow path that works for any destination scatterlist */
  282. err = adiantum_hash_message(req, dst, dst_nents, &digest);
  283. if (err)
  284. return err;
  285. le128_sub(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
  286. scatterwalk_map_and_copy(&rctx->rbuf.bignum, dst,
  287. bulk_len, sizeof(le128), 1);
  288. }
  289. return 0;
  290. }
  291. static void adiantum_streamcipher_done(void *data, int err)
  292. {
  293. struct skcipher_request *req = data;
  294. if (!err)
  295. err = adiantum_finish(req);
  296. skcipher_request_complete(req, err);
  297. }
  298. static int adiantum_crypt(struct skcipher_request *req, bool enc)
  299. {
  300. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  301. const struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
  302. struct adiantum_request_ctx *rctx = skcipher_request_ctx(req);
  303. const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
  304. struct scatterlist *src = req->src;
  305. const unsigned int src_nents = sg_nents(src);
  306. unsigned int stream_len;
  307. le128 digest;
  308. int err;
  309. if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE)
  310. return -EINVAL;
  311. rctx->enc = enc;
  312. /*
  313. * First hash step
  314. * enc: P_M = P_R + H_{K_H}(T, P_L)
  315. * dec: C_M = C_R + H_{K_H}(T, C_L)
  316. */
  317. adiantum_hash_header(req);
  318. rctx->u.hash_desc.tfm = tctx->hash;
  319. if (src_nents == 1 && src->offset + req->cryptlen <= PAGE_SIZE) {
  320. /* Fast path for single-page source */
  321. void *virt = kmap_local_page(sg_page(src)) + src->offset;
  322. err = crypto_shash_digest(&rctx->u.hash_desc, virt, bulk_len,
  323. (u8 *)&digest);
  324. memcpy(&rctx->rbuf.bignum, virt + bulk_len, sizeof(le128));
  325. kunmap_local(virt);
  326. } else {
  327. /* Slow path that works for any source scatterlist */
  328. err = adiantum_hash_message(req, src, src_nents, &digest);
  329. scatterwalk_map_and_copy(&rctx->rbuf.bignum, src,
  330. bulk_len, sizeof(le128), 0);
  331. }
  332. if (err)
  333. return err;
  334. le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &rctx->header_hash);
  335. le128_add(&rctx->rbuf.bignum, &rctx->rbuf.bignum, &digest);
  336. /* If encrypting, encrypt P_M with the block cipher to get C_M */
  337. if (enc)
  338. crypto_cipher_encrypt_one(tctx->blockcipher, rctx->rbuf.bytes,
  339. rctx->rbuf.bytes);
  340. /* Initialize the rest of the XChaCha IV (first part is C_M) */
  341. BUILD_BUG_ON(BLOCKCIPHER_BLOCK_SIZE != 16);
  342. BUILD_BUG_ON(XCHACHA_IV_SIZE != 32); /* nonce || stream position */
  343. rctx->rbuf.words[4] = cpu_to_le32(1);
  344. rctx->rbuf.words[5] = 0;
  345. rctx->rbuf.words[6] = 0;
  346. rctx->rbuf.words[7] = 0;
  347. /*
  348. * XChaCha needs to be done on all the data except the last 16 bytes;
  349. * for disk encryption that usually means 4080 or 496 bytes. But ChaCha
  350. * implementations tend to be most efficient when passed a whole number
  351. * of 64-byte ChaCha blocks, or sometimes even a multiple of 256 bytes.
  352. * And here it doesn't matter whether the last 16 bytes are written to,
  353. * as the second hash step will overwrite them. Thus, round the XChaCha
  354. * length up to the next 64-byte boundary if possible.
  355. */
  356. stream_len = bulk_len;
  357. if (round_up(stream_len, CHACHA_BLOCK_SIZE) <= req->cryptlen)
  358. stream_len = round_up(stream_len, CHACHA_BLOCK_SIZE);
  359. skcipher_request_set_tfm(&rctx->u.streamcipher_req, tctx->streamcipher);
  360. skcipher_request_set_crypt(&rctx->u.streamcipher_req, req->src,
  361. req->dst, stream_len, &rctx->rbuf);
  362. skcipher_request_set_callback(&rctx->u.streamcipher_req,
  363. req->base.flags,
  364. adiantum_streamcipher_done, req);
  365. return crypto_skcipher_encrypt(&rctx->u.streamcipher_req) ?:
  366. adiantum_finish(req);
  367. }
  368. static int adiantum_encrypt(struct skcipher_request *req)
  369. {
  370. return adiantum_crypt(req, true);
  371. }
  372. static int adiantum_decrypt(struct skcipher_request *req)
  373. {
  374. return adiantum_crypt(req, false);
  375. }
  376. static int adiantum_init_tfm(struct crypto_skcipher *tfm)
  377. {
  378. struct skcipher_instance *inst = skcipher_alg_instance(tfm);
  379. struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst);
  380. struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
  381. struct crypto_skcipher *streamcipher;
  382. struct crypto_cipher *blockcipher;
  383. struct crypto_shash *hash;
  384. unsigned int subreq_size;
  385. int err;
  386. streamcipher = crypto_spawn_skcipher(&ictx->streamcipher_spawn);
  387. if (IS_ERR(streamcipher))
  388. return PTR_ERR(streamcipher);
  389. blockcipher = crypto_spawn_cipher(&ictx->blockcipher_spawn);
  390. if (IS_ERR(blockcipher)) {
  391. err = PTR_ERR(blockcipher);
  392. goto err_free_streamcipher;
  393. }
  394. hash = crypto_spawn_shash(&ictx->hash_spawn);
  395. if (IS_ERR(hash)) {
  396. err = PTR_ERR(hash);
  397. goto err_free_blockcipher;
  398. }
  399. tctx->streamcipher = streamcipher;
  400. tctx->blockcipher = blockcipher;
  401. tctx->hash = hash;
  402. BUILD_BUG_ON(offsetofend(struct adiantum_request_ctx, u) !=
  403. sizeof(struct adiantum_request_ctx));
  404. subreq_size = max(sizeof_field(struct adiantum_request_ctx,
  405. u.hash_desc) +
  406. crypto_shash_descsize(hash),
  407. sizeof_field(struct adiantum_request_ctx,
  408. u.streamcipher_req) +
  409. crypto_skcipher_reqsize(streamcipher));
  410. crypto_skcipher_set_reqsize(tfm,
  411. offsetof(struct adiantum_request_ctx, u) +
  412. subreq_size);
  413. return 0;
  414. err_free_blockcipher:
  415. crypto_free_cipher(blockcipher);
  416. err_free_streamcipher:
  417. crypto_free_skcipher(streamcipher);
  418. return err;
  419. }
  420. static void adiantum_exit_tfm(struct crypto_skcipher *tfm)
  421. {
  422. struct adiantum_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
  423. crypto_free_skcipher(tctx->streamcipher);
  424. crypto_free_cipher(tctx->blockcipher);
  425. crypto_free_shash(tctx->hash);
  426. }
  427. static void adiantum_free_instance(struct skcipher_instance *inst)
  428. {
  429. struct adiantum_instance_ctx *ictx = skcipher_instance_ctx(inst);
  430. crypto_drop_skcipher(&ictx->streamcipher_spawn);
  431. crypto_drop_cipher(&ictx->blockcipher_spawn);
  432. crypto_drop_shash(&ictx->hash_spawn);
  433. kfree(inst);
  434. }
  435. /*
  436. * Check for a supported set of inner algorithms.
  437. * See the comment at the beginning of this file.
  438. */
  439. static bool adiantum_supported_algorithms(struct skcipher_alg_common *streamcipher_alg,
  440. struct crypto_alg *blockcipher_alg,
  441. struct shash_alg *hash_alg)
  442. {
  443. if (strcmp(streamcipher_alg->base.cra_name, "xchacha12") != 0 &&
  444. strcmp(streamcipher_alg->base.cra_name, "xchacha20") != 0)
  445. return false;
  446. if (blockcipher_alg->cra_cipher.cia_min_keysize > BLOCKCIPHER_KEY_SIZE ||
  447. blockcipher_alg->cra_cipher.cia_max_keysize < BLOCKCIPHER_KEY_SIZE)
  448. return false;
  449. if (blockcipher_alg->cra_blocksize != BLOCKCIPHER_BLOCK_SIZE)
  450. return false;
  451. if (strcmp(hash_alg->base.cra_name, "nhpoly1305") != 0)
  452. return false;
  453. return true;
  454. }
  455. static int adiantum_create(struct crypto_template *tmpl, struct rtattr **tb)
  456. {
  457. u32 mask;
  458. const char *nhpoly1305_name;
  459. struct skcipher_instance *inst;
  460. struct adiantum_instance_ctx *ictx;
  461. struct skcipher_alg_common *streamcipher_alg;
  462. struct crypto_alg *blockcipher_alg;
  463. struct shash_alg *hash_alg;
  464. int err;
  465. err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
  466. if (err)
  467. return err;
  468. inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
  469. if (!inst)
  470. return -ENOMEM;
  471. ictx = skcipher_instance_ctx(inst);
  472. /* Stream cipher, e.g. "xchacha12" */
  473. err = crypto_grab_skcipher(&ictx->streamcipher_spawn,
  474. skcipher_crypto_instance(inst),
  475. crypto_attr_alg_name(tb[1]), 0, mask);
  476. if (err)
  477. goto err_free_inst;
  478. streamcipher_alg = crypto_spawn_skcipher_alg_common(&ictx->streamcipher_spawn);
  479. /* Block cipher, e.g. "aes" */
  480. err = crypto_grab_cipher(&ictx->blockcipher_spawn,
  481. skcipher_crypto_instance(inst),
  482. crypto_attr_alg_name(tb[2]), 0, mask);
  483. if (err)
  484. goto err_free_inst;
  485. blockcipher_alg = crypto_spawn_cipher_alg(&ictx->blockcipher_spawn);
  486. /* NHPoly1305 ε-∆U hash function */
  487. nhpoly1305_name = crypto_attr_alg_name(tb[3]);
  488. if (nhpoly1305_name == ERR_PTR(-ENOENT))
  489. nhpoly1305_name = "nhpoly1305";
  490. err = crypto_grab_shash(&ictx->hash_spawn,
  491. skcipher_crypto_instance(inst),
  492. nhpoly1305_name, 0, mask);
  493. if (err)
  494. goto err_free_inst;
  495. hash_alg = crypto_spawn_shash_alg(&ictx->hash_spawn);
  496. /* Check the set of algorithms */
  497. if (!adiantum_supported_algorithms(streamcipher_alg, blockcipher_alg,
  498. hash_alg)) {
  499. pr_warn("Unsupported Adiantum instantiation: (%s,%s,%s)\n",
  500. streamcipher_alg->base.cra_name,
  501. blockcipher_alg->cra_name, hash_alg->base.cra_name);
  502. err = -EINVAL;
  503. goto err_free_inst;
  504. }
  505. /* Instance fields */
  506. err = -ENAMETOOLONG;
  507. if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
  508. "adiantum(%s,%s)", streamcipher_alg->base.cra_name,
  509. blockcipher_alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
  510. goto err_free_inst;
  511. if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
  512. "adiantum(%s,%s,%s)",
  513. streamcipher_alg->base.cra_driver_name,
  514. blockcipher_alg->cra_driver_name,
  515. hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
  516. goto err_free_inst;
  517. inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE;
  518. inst->alg.base.cra_ctxsize = sizeof(struct adiantum_tfm_ctx);
  519. inst->alg.base.cra_alignmask = streamcipher_alg->base.cra_alignmask;
  520. /*
  521. * The block cipher is only invoked once per message, so for long
  522. * messages (e.g. sectors for disk encryption) its performance doesn't
  523. * matter as much as that of the stream cipher and hash function. Thus,
  524. * weigh the block cipher's ->cra_priority less.
  525. */
  526. inst->alg.base.cra_priority = (4 * streamcipher_alg->base.cra_priority +
  527. 2 * hash_alg->base.cra_priority +
  528. blockcipher_alg->cra_priority) / 7;
  529. inst->alg.setkey = adiantum_setkey;
  530. inst->alg.encrypt = adiantum_encrypt;
  531. inst->alg.decrypt = adiantum_decrypt;
  532. inst->alg.init = adiantum_init_tfm;
  533. inst->alg.exit = adiantum_exit_tfm;
  534. inst->alg.min_keysize = streamcipher_alg->min_keysize;
  535. inst->alg.max_keysize = streamcipher_alg->max_keysize;
  536. inst->alg.ivsize = TWEAK_SIZE;
  537. inst->free = adiantum_free_instance;
  538. err = skcipher_register_instance(tmpl, inst);
  539. if (err) {
  540. err_free_inst:
  541. adiantum_free_instance(inst);
  542. }
  543. return err;
  544. }
  545. /* adiantum(streamcipher_name, blockcipher_name [, nhpoly1305_name]) */
  546. static struct crypto_template adiantum_tmpl = {
  547. .name = "adiantum",
  548. .create = adiantum_create,
  549. .module = THIS_MODULE,
  550. };
  551. static int __init adiantum_module_init(void)
  552. {
  553. return crypto_register_template(&adiantum_tmpl);
  554. }
  555. static void __exit adiantum_module_exit(void)
  556. {
  557. crypto_unregister_template(&adiantum_tmpl);
  558. }
  559. subsys_initcall(adiantum_module_init);
  560. module_exit(adiantum_module_exit);
  561. MODULE_DESCRIPTION("Adiantum length-preserving encryption mode");
  562. MODULE_LICENSE("GPL v2");
  563. MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
  564. MODULE_ALIAS_CRYPTO("adiantum");
  565. MODULE_IMPORT_NS(CRYPTO_INTERNAL);