rk3288_crypto_skcipher.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Crypto acceleration support for Rockchip RK3288
  4. *
  5. * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd
  6. *
  7. * Author: Zain Wang <zain.wang@rock-chips.com>
  8. *
  9. * Some ideas are from marvell-cesa.c and s5p-sss.c driver.
  10. */
  11. #include <crypto/engine.h>
  12. #include <crypto/internal/skcipher.h>
  13. #include <crypto/scatterwalk.h>
  14. #include <linux/device.h>
  15. #include <linux/err.h>
  16. #include <linux/kernel.h>
  17. #include <linux/string.h>
  18. #include "rk3288_crypto.h"
  19. #define RK_CRYPTO_DEC BIT(0)
  20. static int rk_cipher_need_fallback(struct skcipher_request *req)
  21. {
  22. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  23. struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
  24. struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base);
  25. struct scatterlist *sgs, *sgd;
  26. unsigned int stodo, dtodo, len;
  27. unsigned int bs = crypto_skcipher_blocksize(tfm);
  28. if (!req->cryptlen)
  29. return true;
  30. len = req->cryptlen;
  31. sgs = req->src;
  32. sgd = req->dst;
  33. while (sgs && sgd) {
  34. if (!IS_ALIGNED(sgs->offset, sizeof(u32))) {
  35. algt->stat_fb_align++;
  36. return true;
  37. }
  38. if (!IS_ALIGNED(sgd->offset, sizeof(u32))) {
  39. algt->stat_fb_align++;
  40. return true;
  41. }
  42. stodo = min(len, sgs->length);
  43. if (stodo % bs) {
  44. algt->stat_fb_len++;
  45. return true;
  46. }
  47. dtodo = min(len, sgd->length);
  48. if (dtodo % bs) {
  49. algt->stat_fb_len++;
  50. return true;
  51. }
  52. if (stodo != dtodo) {
  53. algt->stat_fb_sgdiff++;
  54. return true;
  55. }
  56. len -= stodo;
  57. sgs = sg_next(sgs);
  58. sgd = sg_next(sgd);
  59. }
  60. return false;
  61. }
  62. static int rk_cipher_fallback(struct skcipher_request *areq)
  63. {
  64. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  65. struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm);
  66. struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
  67. struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
  68. struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base);
  69. int err;
  70. algt->stat_fb++;
  71. skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
  72. skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
  73. areq->base.complete, areq->base.data);
  74. skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
  75. areq->cryptlen, areq->iv);
  76. if (rctx->mode & RK_CRYPTO_DEC)
  77. err = crypto_skcipher_decrypt(&rctx->fallback_req);
  78. else
  79. err = crypto_skcipher_encrypt(&rctx->fallback_req);
  80. return err;
  81. }
  82. static int rk_cipher_handle_req(struct skcipher_request *req)
  83. {
  84. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  85. struct rk_crypto_info *rkc;
  86. struct crypto_engine *engine;
  87. if (rk_cipher_need_fallback(req))
  88. return rk_cipher_fallback(req);
  89. rkc = get_rk_crypto();
  90. engine = rkc->engine;
  91. rctx->dev = rkc;
  92. return crypto_transfer_skcipher_request_to_engine(engine, req);
  93. }
  94. static int rk_aes_setkey(struct crypto_skcipher *cipher,
  95. const u8 *key, unsigned int keylen)
  96. {
  97. struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
  98. struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
  99. if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
  100. keylen != AES_KEYSIZE_256)
  101. return -EINVAL;
  102. ctx->keylen = keylen;
  103. memcpy(ctx->key, key, keylen);
  104. return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
  105. }
  106. static int rk_des_setkey(struct crypto_skcipher *cipher,
  107. const u8 *key, unsigned int keylen)
  108. {
  109. struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
  110. int err;
  111. err = verify_skcipher_des_key(cipher, key);
  112. if (err)
  113. return err;
  114. ctx->keylen = keylen;
  115. memcpy(ctx->key, key, keylen);
  116. return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
  117. }
  118. static int rk_tdes_setkey(struct crypto_skcipher *cipher,
  119. const u8 *key, unsigned int keylen)
  120. {
  121. struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
  122. int err;
  123. err = verify_skcipher_des3_key(cipher, key);
  124. if (err)
  125. return err;
  126. ctx->keylen = keylen;
  127. memcpy(ctx->key, key, keylen);
  128. return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen);
  129. }
  130. static int rk_aes_ecb_encrypt(struct skcipher_request *req)
  131. {
  132. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  133. rctx->mode = RK_CRYPTO_AES_ECB_MODE;
  134. return rk_cipher_handle_req(req);
  135. }
  136. static int rk_aes_ecb_decrypt(struct skcipher_request *req)
  137. {
  138. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  139. rctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC;
  140. return rk_cipher_handle_req(req);
  141. }
  142. static int rk_aes_cbc_encrypt(struct skcipher_request *req)
  143. {
  144. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  145. rctx->mode = RK_CRYPTO_AES_CBC_MODE;
  146. return rk_cipher_handle_req(req);
  147. }
  148. static int rk_aes_cbc_decrypt(struct skcipher_request *req)
  149. {
  150. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  151. rctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC;
  152. return rk_cipher_handle_req(req);
  153. }
  154. static int rk_des_ecb_encrypt(struct skcipher_request *req)
  155. {
  156. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  157. rctx->mode = 0;
  158. return rk_cipher_handle_req(req);
  159. }
  160. static int rk_des_ecb_decrypt(struct skcipher_request *req)
  161. {
  162. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  163. rctx->mode = RK_CRYPTO_DEC;
  164. return rk_cipher_handle_req(req);
  165. }
  166. static int rk_des_cbc_encrypt(struct skcipher_request *req)
  167. {
  168. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  169. rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC;
  170. return rk_cipher_handle_req(req);
  171. }
  172. static int rk_des_cbc_decrypt(struct skcipher_request *req)
  173. {
  174. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  175. rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC;
  176. return rk_cipher_handle_req(req);
  177. }
  178. static int rk_des3_ede_ecb_encrypt(struct skcipher_request *req)
  179. {
  180. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  181. rctx->mode = RK_CRYPTO_TDES_SELECT;
  182. return rk_cipher_handle_req(req);
  183. }
  184. static int rk_des3_ede_ecb_decrypt(struct skcipher_request *req)
  185. {
  186. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  187. rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC;
  188. return rk_cipher_handle_req(req);
  189. }
  190. static int rk_des3_ede_cbc_encrypt(struct skcipher_request *req)
  191. {
  192. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  193. rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC;
  194. return rk_cipher_handle_req(req);
  195. }
  196. static int rk_des3_ede_cbc_decrypt(struct skcipher_request *req)
  197. {
  198. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  199. rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC |
  200. RK_CRYPTO_DEC;
  201. return rk_cipher_handle_req(req);
  202. }
  203. static void rk_cipher_hw_init(struct rk_crypto_info *dev, struct skcipher_request *req)
  204. {
  205. struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
  206. struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
  207. struct rk_cipher_rctx *rctx = skcipher_request_ctx(req);
  208. struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher);
  209. u32 block, conf_reg = 0;
  210. block = crypto_tfm_alg_blocksize(tfm);
  211. if (block == DES_BLOCK_SIZE) {
  212. rctx->mode |= RK_CRYPTO_TDES_FIFO_MODE |
  213. RK_CRYPTO_TDES_BYTESWAP_KEY |
  214. RK_CRYPTO_TDES_BYTESWAP_IV;
  215. CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, rctx->mode);
  216. memcpy_toio(dev->reg + RK_CRYPTO_TDES_KEY1_0, ctx->key, ctx->keylen);
  217. conf_reg = RK_CRYPTO_DESSEL;
  218. } else {
  219. rctx->mode |= RK_CRYPTO_AES_FIFO_MODE |
  220. RK_CRYPTO_AES_KEY_CHANGE |
  221. RK_CRYPTO_AES_BYTESWAP_KEY |
  222. RK_CRYPTO_AES_BYTESWAP_IV;
  223. if (ctx->keylen == AES_KEYSIZE_192)
  224. rctx->mode |= RK_CRYPTO_AES_192BIT_key;
  225. else if (ctx->keylen == AES_KEYSIZE_256)
  226. rctx->mode |= RK_CRYPTO_AES_256BIT_key;
  227. CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, rctx->mode);
  228. memcpy_toio(dev->reg + RK_CRYPTO_AES_KEY_0, ctx->key, ctx->keylen);
  229. }
  230. conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO |
  231. RK_CRYPTO_BYTESWAP_BRFIFO;
  232. CRYPTO_WRITE(dev, RK_CRYPTO_CONF, conf_reg);
  233. CRYPTO_WRITE(dev, RK_CRYPTO_INTENA,
  234. RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA);
  235. }
  236. static void crypto_dma_start(struct rk_crypto_info *dev,
  237. struct scatterlist *sgs,
  238. struct scatterlist *sgd, unsigned int todo)
  239. {
  240. CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, sg_dma_address(sgs));
  241. CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, todo);
  242. CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, sg_dma_address(sgd));
  243. CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START |
  244. _SBF(RK_CRYPTO_BLOCK_START, 16));
  245. }
  246. static int rk_cipher_run(struct crypto_engine *engine, void *async_req)
  247. {
  248. struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base);
  249. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  250. struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq);
  251. struct scatterlist *sgs, *sgd;
  252. int err = 0;
  253. int ivsize = crypto_skcipher_ivsize(tfm);
  254. int offset;
  255. u8 iv[AES_BLOCK_SIZE];
  256. u8 biv[AES_BLOCK_SIZE];
  257. u8 *ivtouse = areq->iv;
  258. unsigned int len = areq->cryptlen;
  259. unsigned int todo;
  260. struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
  261. struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base);
  262. struct rk_crypto_info *rkc = rctx->dev;
  263. err = pm_runtime_resume_and_get(rkc->dev);
  264. if (err)
  265. return err;
  266. algt->stat_req++;
  267. rkc->nreq++;
  268. ivsize = crypto_skcipher_ivsize(tfm);
  269. if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
  270. if (rctx->mode & RK_CRYPTO_DEC) {
  271. offset = areq->cryptlen - ivsize;
  272. scatterwalk_map_and_copy(rctx->backup_iv, areq->src,
  273. offset, ivsize, 0);
  274. }
  275. }
  276. sgs = areq->src;
  277. sgd = areq->dst;
  278. while (sgs && sgd && len) {
  279. if (!sgs->length) {
  280. sgs = sg_next(sgs);
  281. sgd = sg_next(sgd);
  282. continue;
  283. }
  284. if (rctx->mode & RK_CRYPTO_DEC) {
  285. /* we backup last block of source to be used as IV at next step */
  286. offset = sgs->length - ivsize;
  287. scatterwalk_map_and_copy(biv, sgs, offset, ivsize, 0);
  288. }
  289. if (sgs == sgd) {
  290. err = dma_map_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL);
  291. if (err <= 0) {
  292. err = -EINVAL;
  293. goto theend_iv;
  294. }
  295. } else {
  296. err = dma_map_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE);
  297. if (err <= 0) {
  298. err = -EINVAL;
  299. goto theend_iv;
  300. }
  301. err = dma_map_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE);
  302. if (err <= 0) {
  303. err = -EINVAL;
  304. goto theend_sgs;
  305. }
  306. }
  307. err = 0;
  308. rk_cipher_hw_init(rkc, areq);
  309. if (ivsize) {
  310. if (ivsize == DES_BLOCK_SIZE)
  311. memcpy_toio(rkc->reg + RK_CRYPTO_TDES_IV_0, ivtouse, ivsize);
  312. else
  313. memcpy_toio(rkc->reg + RK_CRYPTO_AES_IV_0, ivtouse, ivsize);
  314. }
  315. reinit_completion(&rkc->complete);
  316. rkc->status = 0;
  317. todo = min(sg_dma_len(sgs), len);
  318. len -= todo;
  319. crypto_dma_start(rkc, sgs, sgd, todo / 4);
  320. wait_for_completion_interruptible_timeout(&rkc->complete,
  321. msecs_to_jiffies(2000));
  322. if (!rkc->status) {
  323. dev_err(rkc->dev, "DMA timeout\n");
  324. err = -EFAULT;
  325. goto theend;
  326. }
  327. if (sgs == sgd) {
  328. dma_unmap_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL);
  329. } else {
  330. dma_unmap_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE);
  331. dma_unmap_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE);
  332. }
  333. if (rctx->mode & RK_CRYPTO_DEC) {
  334. memcpy(iv, biv, ivsize);
  335. ivtouse = iv;
  336. } else {
  337. offset = sgd->length - ivsize;
  338. scatterwalk_map_and_copy(iv, sgd, offset, ivsize, 0);
  339. ivtouse = iv;
  340. }
  341. sgs = sg_next(sgs);
  342. sgd = sg_next(sgd);
  343. }
  344. if (areq->iv && ivsize > 0) {
  345. offset = areq->cryptlen - ivsize;
  346. if (rctx->mode & RK_CRYPTO_DEC) {
  347. memcpy(areq->iv, rctx->backup_iv, ivsize);
  348. memzero_explicit(rctx->backup_iv, ivsize);
  349. } else {
  350. scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
  351. ivsize, 0);
  352. }
  353. }
  354. theend:
  355. pm_runtime_put_autosuspend(rkc->dev);
  356. local_bh_disable();
  357. crypto_finalize_skcipher_request(engine, areq, err);
  358. local_bh_enable();
  359. return 0;
  360. theend_sgs:
  361. if (sgs == sgd) {
  362. dma_unmap_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL);
  363. } else {
  364. dma_unmap_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE);
  365. dma_unmap_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE);
  366. }
  367. theend_iv:
  368. return err;
  369. }
  370. static int rk_cipher_tfm_init(struct crypto_skcipher *tfm)
  371. {
  372. struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  373. const char *name = crypto_tfm_alg_name(&tfm->base);
  374. struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
  375. struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base);
  376. ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
  377. if (IS_ERR(ctx->fallback_tfm)) {
  378. dev_err(algt->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
  379. name, PTR_ERR(ctx->fallback_tfm));
  380. return PTR_ERR(ctx->fallback_tfm);
  381. }
  382. crypto_skcipher_set_reqsize(tfm, sizeof(struct rk_cipher_rctx) +
  383. crypto_skcipher_reqsize(ctx->fallback_tfm));
  384. return 0;
  385. }
  386. static void rk_cipher_tfm_exit(struct crypto_skcipher *tfm)
  387. {
  388. struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm);
  389. memzero_explicit(ctx->key, ctx->keylen);
  390. crypto_free_skcipher(ctx->fallback_tfm);
  391. }
  392. struct rk_crypto_tmp rk_ecb_aes_alg = {
  393. .type = CRYPTO_ALG_TYPE_SKCIPHER,
  394. .alg.skcipher.base = {
  395. .base.cra_name = "ecb(aes)",
  396. .base.cra_driver_name = "ecb-aes-rk",
  397. .base.cra_priority = 300,
  398. .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
  399. .base.cra_blocksize = AES_BLOCK_SIZE,
  400. .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
  401. .base.cra_alignmask = 0x0f,
  402. .base.cra_module = THIS_MODULE,
  403. .init = rk_cipher_tfm_init,
  404. .exit = rk_cipher_tfm_exit,
  405. .min_keysize = AES_MIN_KEY_SIZE,
  406. .max_keysize = AES_MAX_KEY_SIZE,
  407. .setkey = rk_aes_setkey,
  408. .encrypt = rk_aes_ecb_encrypt,
  409. .decrypt = rk_aes_ecb_decrypt,
  410. },
  411. .alg.skcipher.op = {
  412. .do_one_request = rk_cipher_run,
  413. },
  414. };
  415. struct rk_crypto_tmp rk_cbc_aes_alg = {
  416. .type = CRYPTO_ALG_TYPE_SKCIPHER,
  417. .alg.skcipher.base = {
  418. .base.cra_name = "cbc(aes)",
  419. .base.cra_driver_name = "cbc-aes-rk",
  420. .base.cra_priority = 300,
  421. .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
  422. .base.cra_blocksize = AES_BLOCK_SIZE,
  423. .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
  424. .base.cra_alignmask = 0x0f,
  425. .base.cra_module = THIS_MODULE,
  426. .init = rk_cipher_tfm_init,
  427. .exit = rk_cipher_tfm_exit,
  428. .min_keysize = AES_MIN_KEY_SIZE,
  429. .max_keysize = AES_MAX_KEY_SIZE,
  430. .ivsize = AES_BLOCK_SIZE,
  431. .setkey = rk_aes_setkey,
  432. .encrypt = rk_aes_cbc_encrypt,
  433. .decrypt = rk_aes_cbc_decrypt,
  434. },
  435. .alg.skcipher.op = {
  436. .do_one_request = rk_cipher_run,
  437. },
  438. };
  439. struct rk_crypto_tmp rk_ecb_des_alg = {
  440. .type = CRYPTO_ALG_TYPE_SKCIPHER,
  441. .alg.skcipher.base = {
  442. .base.cra_name = "ecb(des)",
  443. .base.cra_driver_name = "ecb-des-rk",
  444. .base.cra_priority = 300,
  445. .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
  446. .base.cra_blocksize = DES_BLOCK_SIZE,
  447. .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
  448. .base.cra_alignmask = 0x07,
  449. .base.cra_module = THIS_MODULE,
  450. .init = rk_cipher_tfm_init,
  451. .exit = rk_cipher_tfm_exit,
  452. .min_keysize = DES_KEY_SIZE,
  453. .max_keysize = DES_KEY_SIZE,
  454. .setkey = rk_des_setkey,
  455. .encrypt = rk_des_ecb_encrypt,
  456. .decrypt = rk_des_ecb_decrypt,
  457. },
  458. .alg.skcipher.op = {
  459. .do_one_request = rk_cipher_run,
  460. },
  461. };
  462. struct rk_crypto_tmp rk_cbc_des_alg = {
  463. .type = CRYPTO_ALG_TYPE_SKCIPHER,
  464. .alg.skcipher.base = {
  465. .base.cra_name = "cbc(des)",
  466. .base.cra_driver_name = "cbc-des-rk",
  467. .base.cra_priority = 300,
  468. .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
  469. .base.cra_blocksize = DES_BLOCK_SIZE,
  470. .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
  471. .base.cra_alignmask = 0x07,
  472. .base.cra_module = THIS_MODULE,
  473. .init = rk_cipher_tfm_init,
  474. .exit = rk_cipher_tfm_exit,
  475. .min_keysize = DES_KEY_SIZE,
  476. .max_keysize = DES_KEY_SIZE,
  477. .ivsize = DES_BLOCK_SIZE,
  478. .setkey = rk_des_setkey,
  479. .encrypt = rk_des_cbc_encrypt,
  480. .decrypt = rk_des_cbc_decrypt,
  481. },
  482. .alg.skcipher.op = {
  483. .do_one_request = rk_cipher_run,
  484. },
  485. };
  486. struct rk_crypto_tmp rk_ecb_des3_ede_alg = {
  487. .type = CRYPTO_ALG_TYPE_SKCIPHER,
  488. .alg.skcipher.base = {
  489. .base.cra_name = "ecb(des3_ede)",
  490. .base.cra_driver_name = "ecb-des3-ede-rk",
  491. .base.cra_priority = 300,
  492. .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
  493. .base.cra_blocksize = DES_BLOCK_SIZE,
  494. .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
  495. .base.cra_alignmask = 0x07,
  496. .base.cra_module = THIS_MODULE,
  497. .init = rk_cipher_tfm_init,
  498. .exit = rk_cipher_tfm_exit,
  499. .min_keysize = DES3_EDE_KEY_SIZE,
  500. .max_keysize = DES3_EDE_KEY_SIZE,
  501. .setkey = rk_tdes_setkey,
  502. .encrypt = rk_des3_ede_ecb_encrypt,
  503. .decrypt = rk_des3_ede_ecb_decrypt,
  504. },
  505. .alg.skcipher.op = {
  506. .do_one_request = rk_cipher_run,
  507. },
  508. };
  509. struct rk_crypto_tmp rk_cbc_des3_ede_alg = {
  510. .type = CRYPTO_ALG_TYPE_SKCIPHER,
  511. .alg.skcipher.base = {
  512. .base.cra_name = "cbc(des3_ede)",
  513. .base.cra_driver_name = "cbc-des3-ede-rk",
  514. .base.cra_priority = 300,
  515. .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
  516. .base.cra_blocksize = DES_BLOCK_SIZE,
  517. .base.cra_ctxsize = sizeof(struct rk_cipher_ctx),
  518. .base.cra_alignmask = 0x07,
  519. .base.cra_module = THIS_MODULE,
  520. .init = rk_cipher_tfm_init,
  521. .exit = rk_cipher_tfm_exit,
  522. .min_keysize = DES3_EDE_KEY_SIZE,
  523. .max_keysize = DES3_EDE_KEY_SIZE,
  524. .ivsize = DES_BLOCK_SIZE,
  525. .setkey = rk_tdes_setkey,
  526. .encrypt = rk_des3_ede_cbc_encrypt,
  527. .decrypt = rk_des3_ede_cbc_decrypt,
  528. },
  529. .alg.skcipher.op = {
  530. .do_one_request = rk_cipher_run,
  531. },
  532. };