sun4i-ss-cipher.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573
  1. /*
  2. * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
  3. *
  4. * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
  5. *
  6. * This file add support for AES cipher with 128,192,256 bits
  7. * keysize in CBC and ECB mode.
  8. * Add support also for DES and 3DES in CBC and ECB mode.
  9. *
  10. * You could find the datasheet in Documentation/arm/sunxi/README
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. */
  17. #include "sun4i-ss.h"
  18. static int sun4i_ss_opti_poll(struct skcipher_request *areq)
  19. {
  20. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  21. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  22. struct sun4i_ss_ctx *ss = op->ss;
  23. unsigned int ivsize = crypto_skcipher_ivsize(tfm);
  24. struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
  25. u32 mode = ctx->mode;
  26. /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
  27. u32 rx_cnt = SS_RX_DEFAULT;
  28. u32 tx_cnt = 0;
  29. u32 spaces;
  30. u32 v;
  31. int err = 0;
  32. unsigned int i;
  33. unsigned int ileft = areq->cryptlen;
  34. unsigned int oleft = areq->cryptlen;
  35. unsigned int todo;
  36. unsigned long pi = 0, po = 0; /* progress for in and out */
  37. bool miter_err;
  38. struct sg_mapping_iter mi, mo;
  39. unsigned int oi, oo; /* offset for in and out */
  40. unsigned long flags;
  41. if (!areq->cryptlen)
  42. return 0;
  43. if (!areq->iv) {
  44. dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
  45. return -EINVAL;
  46. }
  47. if (!areq->src || !areq->dst) {
  48. dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
  49. return -EINVAL;
  50. }
  51. spin_lock_irqsave(&ss->slock, flags);
  52. for (i = 0; i < op->keylen / 4; i++)
  53. writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
  54. if (areq->iv) {
  55. for (i = 0; i < 4 && i < ivsize / 4; i++) {
  56. v = *(u32 *)(areq->iv + i * 4);
  57. writesl(ss->base + SS_IV0 + i * 4, &v, 1);
  58. }
  59. }
  60. writel(mode, ss->base + SS_CTL);
  61. ileft = areq->cryptlen / 4;
  62. oleft = areq->cryptlen / 4;
  63. oi = 0;
  64. oo = 0;
  65. do {
  66. if (ileft) {
  67. sg_miter_start(&mi, areq->src, sg_nents(areq->src),
  68. SG_MITER_FROM_SG | SG_MITER_ATOMIC);
  69. if (pi)
  70. sg_miter_skip(&mi, pi);
  71. miter_err = sg_miter_next(&mi);
  72. if (!miter_err || !mi.addr) {
  73. dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
  74. err = -EINVAL;
  75. goto release_ss;
  76. }
  77. todo = min(rx_cnt, ileft);
  78. todo = min_t(size_t, todo, (mi.length - oi) / 4);
  79. if (todo) {
  80. ileft -= todo;
  81. writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
  82. oi += todo * 4;
  83. }
  84. if (oi == mi.length) {
  85. pi += mi.length;
  86. oi = 0;
  87. }
  88. sg_miter_stop(&mi);
  89. }
  90. spaces = readl(ss->base + SS_FCSR);
  91. rx_cnt = SS_RXFIFO_SPACES(spaces);
  92. tx_cnt = SS_TXFIFO_SPACES(spaces);
  93. sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
  94. SG_MITER_TO_SG | SG_MITER_ATOMIC);
  95. if (po)
  96. sg_miter_skip(&mo, po);
  97. miter_err = sg_miter_next(&mo);
  98. if (!miter_err || !mo.addr) {
  99. dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
  100. err = -EINVAL;
  101. goto release_ss;
  102. }
  103. todo = min(tx_cnt, oleft);
  104. todo = min_t(size_t, todo, (mo.length - oo) / 4);
  105. if (todo) {
  106. oleft -= todo;
  107. readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
  108. oo += todo * 4;
  109. }
  110. if (oo == mo.length) {
  111. oo = 0;
  112. po += mo.length;
  113. }
  114. sg_miter_stop(&mo);
  115. } while (oleft);
  116. if (areq->iv) {
  117. for (i = 0; i < 4 && i < ivsize / 4; i++) {
  118. v = readl(ss->base + SS_IV0 + i * 4);
  119. *(u32 *)(areq->iv + i * 4) = v;
  120. }
  121. }
  122. release_ss:
  123. writel(0, ss->base + SS_CTL);
  124. spin_unlock_irqrestore(&ss->slock, flags);
  125. return err;
  126. }
  127. /* Generic function that support SG with size not multiple of 4 */
  128. static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
  129. {
  130. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  131. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  132. struct sun4i_ss_ctx *ss = op->ss;
  133. int no_chunk = 1;
  134. struct scatterlist *in_sg = areq->src;
  135. struct scatterlist *out_sg = areq->dst;
  136. unsigned int ivsize = crypto_skcipher_ivsize(tfm);
  137. struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
  138. u32 mode = ctx->mode;
  139. /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
  140. u32 rx_cnt = SS_RX_DEFAULT;
  141. u32 tx_cnt = 0;
  142. u32 v;
  143. u32 spaces;
  144. int err = 0;
  145. unsigned int i;
  146. unsigned int ileft = areq->cryptlen;
  147. unsigned int oleft = areq->cryptlen;
  148. unsigned int todo;
  149. struct sg_mapping_iter mi, mo;
  150. unsigned long pi = 0, po = 0; /* progress for in and out */
  151. bool miter_err;
  152. unsigned int oi, oo; /* offset for in and out */
  153. char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
  154. char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
  155. unsigned int ob = 0; /* offset in buf */
  156. unsigned int obo = 0; /* offset in bufo*/
  157. unsigned int obl = 0; /* length of data in bufo */
  158. unsigned long flags;
  159. if (!areq->cryptlen)
  160. return 0;
  161. if (!areq->iv) {
  162. dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
  163. return -EINVAL;
  164. }
  165. if (!areq->src || !areq->dst) {
  166. dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
  167. return -EINVAL;
  168. }
  169. /*
  170. * if we have only SGs with size multiple of 4,
  171. * we can use the SS optimized function
  172. */
  173. while (in_sg && no_chunk == 1) {
  174. if ((in_sg->length | in_sg->offset) & 3u)
  175. no_chunk = 0;
  176. in_sg = sg_next(in_sg);
  177. }
  178. while (out_sg && no_chunk == 1) {
  179. if ((out_sg->length | out_sg->offset) & 3u)
  180. no_chunk = 0;
  181. out_sg = sg_next(out_sg);
  182. }
  183. if (no_chunk == 1)
  184. return sun4i_ss_opti_poll(areq);
  185. spin_lock_irqsave(&ss->slock, flags);
  186. for (i = 0; i < op->keylen / 4; i++)
  187. writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
  188. if (areq->iv) {
  189. for (i = 0; i < 4 && i < ivsize / 4; i++) {
  190. v = *(u32 *)(areq->iv + i * 4);
  191. writesl(ss->base + SS_IV0 + i * 4, &v, 1);
  192. }
  193. }
  194. writel(mode, ss->base + SS_CTL);
  195. ileft = areq->cryptlen;
  196. oleft = areq->cryptlen;
  197. oi = 0;
  198. oo = 0;
  199. while (oleft) {
  200. if (ileft) {
  201. sg_miter_start(&mi, areq->src, sg_nents(areq->src),
  202. SG_MITER_FROM_SG | SG_MITER_ATOMIC);
  203. if (pi)
  204. sg_miter_skip(&mi, pi);
  205. miter_err = sg_miter_next(&mi);
  206. if (!miter_err || !mi.addr) {
  207. dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
  208. err = -EINVAL;
  209. goto release_ss;
  210. }
  211. /*
  212. * todo is the number of consecutive 4byte word that we
  213. * can read from current SG
  214. */
  215. todo = min(rx_cnt, ileft / 4);
  216. todo = min_t(size_t, todo, (mi.length - oi) / 4);
  217. if (todo && !ob) {
  218. writesl(ss->base + SS_RXFIFO, mi.addr + oi,
  219. todo);
  220. ileft -= todo * 4;
  221. oi += todo * 4;
  222. } else {
  223. /*
  224. * not enough consecutive bytes, so we need to
  225. * linearize in buf. todo is in bytes
  226. * After that copy, if we have a multiple of 4
  227. * we need to be able to write all buf in one
  228. * pass, so it is why we min() with rx_cnt
  229. */
  230. todo = min(rx_cnt * 4 - ob, ileft);
  231. todo = min_t(size_t, todo, mi.length - oi);
  232. memcpy(buf + ob, mi.addr + oi, todo);
  233. ileft -= todo;
  234. oi += todo;
  235. ob += todo;
  236. if (!(ob % 4)) {
  237. writesl(ss->base + SS_RXFIFO, buf,
  238. ob / 4);
  239. ob = 0;
  240. }
  241. }
  242. if (oi == mi.length) {
  243. pi += mi.length;
  244. oi = 0;
  245. }
  246. sg_miter_stop(&mi);
  247. }
  248. spaces = readl(ss->base + SS_FCSR);
  249. rx_cnt = SS_RXFIFO_SPACES(spaces);
  250. tx_cnt = SS_TXFIFO_SPACES(spaces);
  251. if (!tx_cnt)
  252. continue;
  253. sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
  254. SG_MITER_TO_SG | SG_MITER_ATOMIC);
  255. if (po)
  256. sg_miter_skip(&mo, po);
  257. miter_err = sg_miter_next(&mo);
  258. if (!miter_err || !mo.addr) {
  259. dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
  260. err = -EINVAL;
  261. goto release_ss;
  262. }
  263. /* todo in 4bytes word */
  264. todo = min(tx_cnt, oleft / 4);
  265. todo = min_t(size_t, todo, (mo.length - oo) / 4);
  266. if (todo) {
  267. readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
  268. oleft -= todo * 4;
  269. oo += todo * 4;
  270. if (oo == mo.length) {
  271. po += mo.length;
  272. oo = 0;
  273. }
  274. } else {
  275. /*
  276. * read obl bytes in bufo, we read at maximum for
  277. * emptying the device
  278. */
  279. readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
  280. obl = tx_cnt * 4;
  281. obo = 0;
  282. do {
  283. /*
  284. * how many bytes we can copy ?
  285. * no more than remaining SG size
  286. * no more than remaining buffer
  287. * no need to test against oleft
  288. */
  289. todo = min_t(size_t,
  290. mo.length - oo, obl - obo);
  291. memcpy(mo.addr + oo, bufo + obo, todo);
  292. oleft -= todo;
  293. obo += todo;
  294. oo += todo;
  295. if (oo == mo.length) {
  296. po += mo.length;
  297. sg_miter_next(&mo);
  298. oo = 0;
  299. }
  300. } while (obo < obl);
  301. /* bufo must be fully used here */
  302. }
  303. sg_miter_stop(&mo);
  304. }
  305. if (areq->iv) {
  306. for (i = 0; i < 4 && i < ivsize / 4; i++) {
  307. v = readl(ss->base + SS_IV0 + i * 4);
  308. *(u32 *)(areq->iv + i * 4) = v;
  309. }
  310. }
  311. release_ss:
  312. writel(0, ss->base + SS_CTL);
  313. spin_unlock_irqrestore(&ss->slock, flags);
  314. return err;
  315. }
  316. /* CBC AES */
  317. int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq)
  318. {
  319. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  320. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  321. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  322. rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
  323. op->keymode;
  324. return sun4i_ss_cipher_poll(areq);
  325. }
  326. int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq)
  327. {
  328. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  329. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  330. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  331. rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
  332. op->keymode;
  333. return sun4i_ss_cipher_poll(areq);
  334. }
  335. /* ECB AES */
  336. int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq)
  337. {
  338. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  339. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  340. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  341. rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
  342. op->keymode;
  343. return sun4i_ss_cipher_poll(areq);
  344. }
  345. int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq)
  346. {
  347. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  348. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  349. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  350. rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
  351. op->keymode;
  352. return sun4i_ss_cipher_poll(areq);
  353. }
  354. /* CBC DES */
  355. int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq)
  356. {
  357. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  358. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  359. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  360. rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
  361. op->keymode;
  362. return sun4i_ss_cipher_poll(areq);
  363. }
  364. int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq)
  365. {
  366. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  367. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  368. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  369. rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
  370. op->keymode;
  371. return sun4i_ss_cipher_poll(areq);
  372. }
  373. /* ECB DES */
  374. int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq)
  375. {
  376. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  377. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  378. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  379. rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
  380. op->keymode;
  381. return sun4i_ss_cipher_poll(areq);
  382. }
  383. int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq)
  384. {
  385. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  386. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  387. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  388. rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
  389. op->keymode;
  390. return sun4i_ss_cipher_poll(areq);
  391. }
  392. /* CBC 3DES */
  393. int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq)
  394. {
  395. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  396. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  397. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  398. rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
  399. op->keymode;
  400. return sun4i_ss_cipher_poll(areq);
  401. }
  402. int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq)
  403. {
  404. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  405. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  406. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  407. rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
  408. op->keymode;
  409. return sun4i_ss_cipher_poll(areq);
  410. }
  411. /* ECB 3DES */
  412. int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq)
  413. {
  414. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  415. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  416. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  417. rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
  418. op->keymode;
  419. return sun4i_ss_cipher_poll(areq);
  420. }
  421. int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq)
  422. {
  423. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
  424. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  425. struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
  426. rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
  427. op->keymode;
  428. return sun4i_ss_cipher_poll(areq);
  429. }
  430. int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
  431. {
  432. struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
  433. struct sun4i_ss_alg_template *algt;
  434. memset(op, 0, sizeof(struct sun4i_tfm_ctx));
  435. algt = container_of(tfm->__crt_alg, struct sun4i_ss_alg_template,
  436. alg.crypto.base);
  437. op->ss = algt->ss;
  438. crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
  439. sizeof(struct sun4i_cipher_req_ctx));
  440. return 0;
  441. }
  442. /* check and set the AES key, prepare the mode to be used */
  443. int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
  444. unsigned int keylen)
  445. {
  446. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  447. struct sun4i_ss_ctx *ss = op->ss;
  448. switch (keylen) {
  449. case 128 / 8:
  450. op->keymode = SS_AES_128BITS;
  451. break;
  452. case 192 / 8:
  453. op->keymode = SS_AES_192BITS;
  454. break;
  455. case 256 / 8:
  456. op->keymode = SS_AES_256BITS;
  457. break;
  458. default:
  459. dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
  460. crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  461. return -EINVAL;
  462. }
  463. op->keylen = keylen;
  464. memcpy(op->key, key, keylen);
  465. return 0;
  466. }
  467. /* check and set the DES key, prepare the mode to be used */
  468. int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
  469. unsigned int keylen)
  470. {
  471. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  472. struct sun4i_ss_ctx *ss = op->ss;
  473. u32 flags;
  474. u32 tmp[DES_EXPKEY_WORDS];
  475. int ret;
  476. if (unlikely(keylen != DES_KEY_SIZE)) {
  477. dev_err(ss->dev, "Invalid keylen %u\n", keylen);
  478. crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  479. return -EINVAL;
  480. }
  481. flags = crypto_skcipher_get_flags(tfm);
  482. ret = des_ekey(tmp, key);
  483. if (unlikely(!ret) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
  484. crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
  485. dev_dbg(ss->dev, "Weak key %u\n", keylen);
  486. return -EINVAL;
  487. }
  488. op->keylen = keylen;
  489. memcpy(op->key, key, keylen);
  490. return 0;
  491. }
  492. /* check and set the 3DES key, prepare the mode to be used */
  493. int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
  494. unsigned int keylen)
  495. {
  496. struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
  497. struct sun4i_ss_ctx *ss = op->ss;
  498. if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
  499. dev_err(ss->dev, "Invalid keylen %u\n", keylen);
  500. crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  501. return -EINVAL;
  502. }
  503. op->keylen = keylen;
  504. memcpy(op->key, key, keylen);
  505. return 0;
  506. }