lskcipher.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Linear symmetric key cipher operations.
  4. *
  5. * Generic encrypt/decrypt wrapper for ciphers.
  6. *
  7. * Copyright (c) 2023 Herbert Xu <herbert@gondor.apana.org.au>
  8. */
  9. #include <linux/cryptouser.h>
  10. #include <linux/err.h>
  11. #include <linux/export.h>
  12. #include <linux/kernel.h>
  13. #include <linux/seq_file.h>
  14. #include <linux/slab.h>
  15. #include <linux/string.h>
  16. #include <net/netlink.h>
  17. #include "skcipher.h"
  18. static inline struct crypto_lskcipher *__crypto_lskcipher_cast(
  19. struct crypto_tfm *tfm)
  20. {
  21. return container_of(tfm, struct crypto_lskcipher, base);
  22. }
  23. static inline struct lskcipher_alg *__crypto_lskcipher_alg(
  24. struct crypto_alg *alg)
  25. {
  26. return container_of(alg, struct lskcipher_alg, co.base);
  27. }
  28. static int lskcipher_setkey_unaligned(struct crypto_lskcipher *tfm,
  29. const u8 *key, unsigned int keylen)
  30. {
  31. unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
  32. struct lskcipher_alg *cipher = crypto_lskcipher_alg(tfm);
  33. u8 *buffer, *alignbuffer;
  34. unsigned long absize;
  35. int ret;
  36. absize = keylen + alignmask;
  37. buffer = kmalloc(absize, GFP_ATOMIC);
  38. if (!buffer)
  39. return -ENOMEM;
  40. alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
  41. memcpy(alignbuffer, key, keylen);
  42. ret = cipher->setkey(tfm, alignbuffer, keylen);
  43. kfree_sensitive(buffer);
  44. return ret;
  45. }
  46. int crypto_lskcipher_setkey(struct crypto_lskcipher *tfm, const u8 *key,
  47. unsigned int keylen)
  48. {
  49. unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
  50. struct lskcipher_alg *cipher = crypto_lskcipher_alg(tfm);
  51. if (keylen < cipher->co.min_keysize || keylen > cipher->co.max_keysize)
  52. return -EINVAL;
  53. if ((unsigned long)key & alignmask)
  54. return lskcipher_setkey_unaligned(tfm, key, keylen);
  55. else
  56. return cipher->setkey(tfm, key, keylen);
  57. }
  58. EXPORT_SYMBOL_GPL(crypto_lskcipher_setkey);
  59. static int crypto_lskcipher_crypt_unaligned(
  60. struct crypto_lskcipher *tfm, const u8 *src, u8 *dst, unsigned len,
  61. u8 *iv, int (*crypt)(struct crypto_lskcipher *tfm, const u8 *src,
  62. u8 *dst, unsigned len, u8 *iv, u32 flags))
  63. {
  64. unsigned statesize = crypto_lskcipher_statesize(tfm);
  65. unsigned ivsize = crypto_lskcipher_ivsize(tfm);
  66. unsigned bs = crypto_lskcipher_blocksize(tfm);
  67. unsigned cs = crypto_lskcipher_chunksize(tfm);
  68. int err;
  69. u8 *tiv;
  70. u8 *p;
  71. BUILD_BUG_ON(MAX_CIPHER_BLOCKSIZE > PAGE_SIZE ||
  72. MAX_CIPHER_ALIGNMASK >= PAGE_SIZE);
  73. tiv = kmalloc(PAGE_SIZE, GFP_ATOMIC);
  74. if (!tiv)
  75. return -ENOMEM;
  76. memcpy(tiv, iv, ivsize + statesize);
  77. p = kmalloc(PAGE_SIZE, GFP_ATOMIC);
  78. err = -ENOMEM;
  79. if (!p)
  80. goto out;
  81. while (len >= bs) {
  82. unsigned chunk = min((unsigned)PAGE_SIZE, len);
  83. int err;
  84. if (chunk > cs)
  85. chunk &= ~(cs - 1);
  86. memcpy(p, src, chunk);
  87. err = crypt(tfm, p, p, chunk, tiv, CRYPTO_LSKCIPHER_FLAG_FINAL);
  88. if (err)
  89. goto out;
  90. memcpy(dst, p, chunk);
  91. src += chunk;
  92. dst += chunk;
  93. len -= chunk;
  94. }
  95. err = len ? -EINVAL : 0;
  96. out:
  97. memcpy(iv, tiv, ivsize + statesize);
  98. kfree_sensitive(p);
  99. kfree_sensitive(tiv);
  100. return err;
  101. }
  102. static int crypto_lskcipher_crypt(struct crypto_lskcipher *tfm, const u8 *src,
  103. u8 *dst, unsigned len, u8 *iv,
  104. int (*crypt)(struct crypto_lskcipher *tfm,
  105. const u8 *src, u8 *dst,
  106. unsigned len, u8 *iv,
  107. u32 flags))
  108. {
  109. unsigned long alignmask = crypto_lskcipher_alignmask(tfm);
  110. if (((unsigned long)src | (unsigned long)dst | (unsigned long)iv) &
  111. alignmask)
  112. return crypto_lskcipher_crypt_unaligned(tfm, src, dst, len, iv,
  113. crypt);
  114. return crypt(tfm, src, dst, len, iv, CRYPTO_LSKCIPHER_FLAG_FINAL);
  115. }
  116. int crypto_lskcipher_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
  117. u8 *dst, unsigned len, u8 *iv)
  118. {
  119. struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
  120. return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->encrypt);
  121. }
  122. EXPORT_SYMBOL_GPL(crypto_lskcipher_encrypt);
  123. int crypto_lskcipher_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
  124. u8 *dst, unsigned len, u8 *iv)
  125. {
  126. struct lskcipher_alg *alg = crypto_lskcipher_alg(tfm);
  127. return crypto_lskcipher_crypt(tfm, src, dst, len, iv, alg->decrypt);
  128. }
  129. EXPORT_SYMBOL_GPL(crypto_lskcipher_decrypt);
  130. static int crypto_lskcipher_crypt_sg(struct skcipher_request *req,
  131. int (*crypt)(struct crypto_lskcipher *tfm,
  132. const u8 *src, u8 *dst,
  133. unsigned len, u8 *ivs,
  134. u32 flags))
  135. {
  136. struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
  137. struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher);
  138. u8 *ivs = skcipher_request_ctx(req);
  139. struct crypto_lskcipher *tfm = *ctx;
  140. struct skcipher_walk walk;
  141. unsigned ivsize;
  142. u32 flags;
  143. int err;
  144. ivsize = crypto_lskcipher_ivsize(tfm);
  145. ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(skcipher) + 1);
  146. memcpy(ivs, req->iv, ivsize);
  147. flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  148. if (req->base.flags & CRYPTO_SKCIPHER_REQ_CONT)
  149. flags |= CRYPTO_LSKCIPHER_FLAG_CONT;
  150. if (!(req->base.flags & CRYPTO_SKCIPHER_REQ_NOTFINAL))
  151. flags |= CRYPTO_LSKCIPHER_FLAG_FINAL;
  152. err = skcipher_walk_virt(&walk, req, false);
  153. while (walk.nbytes) {
  154. err = crypt(tfm, walk.src.virt.addr, walk.dst.virt.addr,
  155. walk.nbytes, ivs,
  156. flags & ~(walk.nbytes == walk.total ?
  157. 0 : CRYPTO_LSKCIPHER_FLAG_FINAL));
  158. err = skcipher_walk_done(&walk, err);
  159. flags |= CRYPTO_LSKCIPHER_FLAG_CONT;
  160. }
  161. memcpy(req->iv, ivs, ivsize);
  162. return err;
  163. }
  164. int crypto_lskcipher_encrypt_sg(struct skcipher_request *req)
  165. {
  166. struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
  167. struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher);
  168. struct lskcipher_alg *alg = crypto_lskcipher_alg(*ctx);
  169. return crypto_lskcipher_crypt_sg(req, alg->encrypt);
  170. }
  171. int crypto_lskcipher_decrypt_sg(struct skcipher_request *req)
  172. {
  173. struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
  174. struct crypto_lskcipher **ctx = crypto_skcipher_ctx(skcipher);
  175. struct lskcipher_alg *alg = crypto_lskcipher_alg(*ctx);
  176. return crypto_lskcipher_crypt_sg(req, alg->decrypt);
  177. }
  178. static void crypto_lskcipher_exit_tfm(struct crypto_tfm *tfm)
  179. {
  180. struct crypto_lskcipher *skcipher = __crypto_lskcipher_cast(tfm);
  181. struct lskcipher_alg *alg = crypto_lskcipher_alg(skcipher);
  182. alg->exit(skcipher);
  183. }
  184. static int crypto_lskcipher_init_tfm(struct crypto_tfm *tfm)
  185. {
  186. struct crypto_lskcipher *skcipher = __crypto_lskcipher_cast(tfm);
  187. struct lskcipher_alg *alg = crypto_lskcipher_alg(skcipher);
  188. if (alg->exit)
  189. skcipher->base.exit = crypto_lskcipher_exit_tfm;
  190. if (alg->init)
  191. return alg->init(skcipher);
  192. return 0;
  193. }
  194. static void crypto_lskcipher_free_instance(struct crypto_instance *inst)
  195. {
  196. struct lskcipher_instance *skcipher =
  197. container_of(inst, struct lskcipher_instance, s.base);
  198. skcipher->free(skcipher);
  199. }
  200. static void __maybe_unused crypto_lskcipher_show(
  201. struct seq_file *m, struct crypto_alg *alg)
  202. {
  203. struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg);
  204. seq_printf(m, "type : lskcipher\n");
  205. seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
  206. seq_printf(m, "min keysize : %u\n", skcipher->co.min_keysize);
  207. seq_printf(m, "max keysize : %u\n", skcipher->co.max_keysize);
  208. seq_printf(m, "ivsize : %u\n", skcipher->co.ivsize);
  209. seq_printf(m, "chunksize : %u\n", skcipher->co.chunksize);
  210. seq_printf(m, "statesize : %u\n", skcipher->co.statesize);
  211. }
  212. static int __maybe_unused crypto_lskcipher_report(
  213. struct sk_buff *skb, struct crypto_alg *alg)
  214. {
  215. struct lskcipher_alg *skcipher = __crypto_lskcipher_alg(alg);
  216. struct crypto_report_blkcipher rblkcipher;
  217. memset(&rblkcipher, 0, sizeof(rblkcipher));
  218. strscpy(rblkcipher.type, "lskcipher", sizeof(rblkcipher.type));
  219. strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
  220. rblkcipher.blocksize = alg->cra_blocksize;
  221. rblkcipher.min_keysize = skcipher->co.min_keysize;
  222. rblkcipher.max_keysize = skcipher->co.max_keysize;
  223. rblkcipher.ivsize = skcipher->co.ivsize;
  224. return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
  225. sizeof(rblkcipher), &rblkcipher);
  226. }
  227. static const struct crypto_type crypto_lskcipher_type = {
  228. .extsize = crypto_alg_extsize,
  229. .init_tfm = crypto_lskcipher_init_tfm,
  230. .free = crypto_lskcipher_free_instance,
  231. #ifdef CONFIG_PROC_FS
  232. .show = crypto_lskcipher_show,
  233. #endif
  234. #if IS_ENABLED(CONFIG_CRYPTO_USER)
  235. .report = crypto_lskcipher_report,
  236. #endif
  237. .maskclear = ~CRYPTO_ALG_TYPE_MASK,
  238. .maskset = CRYPTO_ALG_TYPE_MASK,
  239. .type = CRYPTO_ALG_TYPE_LSKCIPHER,
  240. .tfmsize = offsetof(struct crypto_lskcipher, base),
  241. };
  242. static void crypto_lskcipher_exit_tfm_sg(struct crypto_tfm *tfm)
  243. {
  244. struct crypto_lskcipher **ctx = crypto_tfm_ctx(tfm);
  245. crypto_free_lskcipher(*ctx);
  246. }
  247. int crypto_init_lskcipher_ops_sg(struct crypto_tfm *tfm)
  248. {
  249. struct crypto_lskcipher **ctx = crypto_tfm_ctx(tfm);
  250. struct crypto_alg *calg = tfm->__crt_alg;
  251. struct crypto_lskcipher *skcipher;
  252. if (!crypto_mod_get(calg))
  253. return -EAGAIN;
  254. skcipher = crypto_create_tfm(calg, &crypto_lskcipher_type);
  255. if (IS_ERR(skcipher)) {
  256. crypto_mod_put(calg);
  257. return PTR_ERR(skcipher);
  258. }
  259. *ctx = skcipher;
  260. tfm->exit = crypto_lskcipher_exit_tfm_sg;
  261. return 0;
  262. }
  263. int crypto_grab_lskcipher(struct crypto_lskcipher_spawn *spawn,
  264. struct crypto_instance *inst,
  265. const char *name, u32 type, u32 mask)
  266. {
  267. spawn->base.frontend = &crypto_lskcipher_type;
  268. return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
  269. }
  270. EXPORT_SYMBOL_GPL(crypto_grab_lskcipher);
  271. struct crypto_lskcipher *crypto_alloc_lskcipher(const char *alg_name,
  272. u32 type, u32 mask)
  273. {
  274. return crypto_alloc_tfm(alg_name, &crypto_lskcipher_type, type, mask);
  275. }
  276. EXPORT_SYMBOL_GPL(crypto_alloc_lskcipher);
  277. static int lskcipher_prepare_alg(struct lskcipher_alg *alg)
  278. {
  279. struct crypto_alg *base = &alg->co.base;
  280. int err;
  281. err = skcipher_prepare_alg_common(&alg->co);
  282. if (err)
  283. return err;
  284. if (alg->co.chunksize & (alg->co.chunksize - 1))
  285. return -EINVAL;
  286. base->cra_type = &crypto_lskcipher_type;
  287. base->cra_flags |= CRYPTO_ALG_TYPE_LSKCIPHER;
  288. return 0;
  289. }
  290. int crypto_register_lskcipher(struct lskcipher_alg *alg)
  291. {
  292. struct crypto_alg *base = &alg->co.base;
  293. int err;
  294. err = lskcipher_prepare_alg(alg);
  295. if (err)
  296. return err;
  297. return crypto_register_alg(base);
  298. }
  299. EXPORT_SYMBOL_GPL(crypto_register_lskcipher);
  300. void crypto_unregister_lskcipher(struct lskcipher_alg *alg)
  301. {
  302. crypto_unregister_alg(&alg->co.base);
  303. }
  304. EXPORT_SYMBOL_GPL(crypto_unregister_lskcipher);
  305. int crypto_register_lskciphers(struct lskcipher_alg *algs, int count)
  306. {
  307. int i, ret;
  308. for (i = 0; i < count; i++) {
  309. ret = crypto_register_lskcipher(&algs[i]);
  310. if (ret)
  311. goto err;
  312. }
  313. return 0;
  314. err:
  315. for (--i; i >= 0; --i)
  316. crypto_unregister_lskcipher(&algs[i]);
  317. return ret;
  318. }
  319. EXPORT_SYMBOL_GPL(crypto_register_lskciphers);
  320. void crypto_unregister_lskciphers(struct lskcipher_alg *algs, int count)
  321. {
  322. int i;
  323. for (i = count - 1; i >= 0; --i)
  324. crypto_unregister_lskcipher(&algs[i]);
  325. }
  326. EXPORT_SYMBOL_GPL(crypto_unregister_lskciphers);
  327. int lskcipher_register_instance(struct crypto_template *tmpl,
  328. struct lskcipher_instance *inst)
  329. {
  330. int err;
  331. if (WARN_ON(!inst->free))
  332. return -EINVAL;
  333. err = lskcipher_prepare_alg(&inst->alg);
  334. if (err)
  335. return err;
  336. return crypto_register_instance(tmpl, lskcipher_crypto_instance(inst));
  337. }
  338. EXPORT_SYMBOL_GPL(lskcipher_register_instance);
  339. static int lskcipher_setkey_simple(struct crypto_lskcipher *tfm, const u8 *key,
  340. unsigned int keylen)
  341. {
  342. struct crypto_lskcipher *cipher = lskcipher_cipher_simple(tfm);
  343. crypto_lskcipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
  344. crypto_lskcipher_set_flags(cipher, crypto_lskcipher_get_flags(tfm) &
  345. CRYPTO_TFM_REQ_MASK);
  346. return crypto_lskcipher_setkey(cipher, key, keylen);
  347. }
  348. static int lskcipher_init_tfm_simple(struct crypto_lskcipher *tfm)
  349. {
  350. struct lskcipher_instance *inst = lskcipher_alg_instance(tfm);
  351. struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
  352. struct crypto_lskcipher_spawn *spawn;
  353. struct crypto_lskcipher *cipher;
  354. spawn = lskcipher_instance_ctx(inst);
  355. cipher = crypto_spawn_lskcipher(spawn);
  356. if (IS_ERR(cipher))
  357. return PTR_ERR(cipher);
  358. *ctx = cipher;
  359. return 0;
  360. }
  361. static void lskcipher_exit_tfm_simple(struct crypto_lskcipher *tfm)
  362. {
  363. struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
  364. crypto_free_lskcipher(*ctx);
  365. }
  366. static void lskcipher_free_instance_simple(struct lskcipher_instance *inst)
  367. {
  368. crypto_drop_lskcipher(lskcipher_instance_ctx(inst));
  369. kfree(inst);
  370. }
  371. /**
  372. * lskcipher_alloc_instance_simple - allocate instance of simple block cipher
  373. *
  374. * Allocate an lskcipher_instance for a simple block cipher mode of operation,
  375. * e.g. cbc or ecb. The instance context will have just a single crypto_spawn,
  376. * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize,
  377. * alignmask, and priority are set from the underlying cipher but can be
  378. * overridden if needed. The tfm context defaults to
  379. * struct crypto_lskcipher *, and default ->setkey(), ->init(), and
  380. * ->exit() methods are installed.
  381. *
  382. * @tmpl: the template being instantiated
  383. * @tb: the template parameters
  384. *
  385. * Return: a pointer to the new instance, or an ERR_PTR(). The caller still
  386. * needs to register the instance.
  387. */
  388. struct lskcipher_instance *lskcipher_alloc_instance_simple(
  389. struct crypto_template *tmpl, struct rtattr **tb)
  390. {
  391. u32 mask;
  392. struct lskcipher_instance *inst;
  393. struct crypto_lskcipher_spawn *spawn;
  394. char ecb_name[CRYPTO_MAX_ALG_NAME];
  395. struct lskcipher_alg *cipher_alg;
  396. const char *cipher_name;
  397. int err;
  398. err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_LSKCIPHER, &mask);
  399. if (err)
  400. return ERR_PTR(err);
  401. cipher_name = crypto_attr_alg_name(tb[1]);
  402. if (IS_ERR(cipher_name))
  403. return ERR_CAST(cipher_name);
  404. inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
  405. if (!inst)
  406. return ERR_PTR(-ENOMEM);
  407. spawn = lskcipher_instance_ctx(inst);
  408. err = crypto_grab_lskcipher(spawn,
  409. lskcipher_crypto_instance(inst),
  410. cipher_name, 0, mask);
  411. ecb_name[0] = 0;
  412. if (err == -ENOENT && !!memcmp(tmpl->name, "ecb", 4)) {
  413. err = -ENAMETOOLONG;
  414. if (snprintf(ecb_name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
  415. cipher_name) >= CRYPTO_MAX_ALG_NAME)
  416. goto err_free_inst;
  417. err = crypto_grab_lskcipher(spawn,
  418. lskcipher_crypto_instance(inst),
  419. ecb_name, 0, mask);
  420. }
  421. if (err)
  422. goto err_free_inst;
  423. cipher_alg = crypto_lskcipher_spawn_alg(spawn);
  424. err = crypto_inst_setname(lskcipher_crypto_instance(inst), tmpl->name,
  425. &cipher_alg->co.base);
  426. if (err)
  427. goto err_free_inst;
  428. if (ecb_name[0]) {
  429. int len;
  430. err = -EINVAL;
  431. len = strscpy(ecb_name, &cipher_alg->co.base.cra_name[4],
  432. sizeof(ecb_name));
  433. if (len < 2)
  434. goto err_free_inst;
  435. if (ecb_name[len - 1] != ')')
  436. goto err_free_inst;
  437. ecb_name[len - 1] = 0;
  438. err = -ENAMETOOLONG;
  439. if (snprintf(inst->alg.co.base.cra_name, CRYPTO_MAX_ALG_NAME,
  440. "%s(%s)", tmpl->name, ecb_name) >=
  441. CRYPTO_MAX_ALG_NAME)
  442. goto err_free_inst;
  443. if (strcmp(ecb_name, cipher_name) &&
  444. snprintf(inst->alg.co.base.cra_driver_name,
  445. CRYPTO_MAX_ALG_NAME,
  446. "%s(%s)", tmpl->name, cipher_name) >=
  447. CRYPTO_MAX_ALG_NAME)
  448. goto err_free_inst;
  449. } else {
  450. /* Don't allow nesting. */
  451. err = -ELOOP;
  452. if ((cipher_alg->co.base.cra_flags & CRYPTO_ALG_INSTANCE))
  453. goto err_free_inst;
  454. }
  455. err = -EINVAL;
  456. if (cipher_alg->co.ivsize)
  457. goto err_free_inst;
  458. inst->free = lskcipher_free_instance_simple;
  459. /* Default algorithm properties, can be overridden */
  460. inst->alg.co.base.cra_blocksize = cipher_alg->co.base.cra_blocksize;
  461. inst->alg.co.base.cra_alignmask = cipher_alg->co.base.cra_alignmask;
  462. inst->alg.co.base.cra_priority = cipher_alg->co.base.cra_priority;
  463. inst->alg.co.min_keysize = cipher_alg->co.min_keysize;
  464. inst->alg.co.max_keysize = cipher_alg->co.max_keysize;
  465. inst->alg.co.ivsize = cipher_alg->co.base.cra_blocksize;
  466. inst->alg.co.statesize = cipher_alg->co.statesize;
  467. /* Use struct crypto_lskcipher * by default, can be overridden */
  468. inst->alg.co.base.cra_ctxsize = sizeof(struct crypto_lskcipher *);
  469. inst->alg.setkey = lskcipher_setkey_simple;
  470. inst->alg.init = lskcipher_init_tfm_simple;
  471. inst->alg.exit = lskcipher_exit_tfm_simple;
  472. return inst;
  473. err_free_inst:
  474. lskcipher_free_instance_simple(inst);
  475. return ERR_PTR(err);
  476. }
  477. EXPORT_SYMBOL_GPL(lskcipher_alloc_instance_simple);