sun8i-ss-hash.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * sun8i-ss-hash.c - hardware cryptographic offloader for
  4. * Allwinner A80/A83T SoC
  5. *
  6. * Copyright (C) 2015-2020 Corentin Labbe <clabbe@baylibre.com>
  7. *
  8. * This file add support for MD5 and SHA1/SHA224/SHA256.
  9. *
  10. * You could find the datasheet in Documentation/arch/arm/sunxi.rst
  11. */
  12. #include <crypto/hmac.h>
  13. #include <crypto/internal/hash.h>
  14. #include <crypto/md5.h>
  15. #include <crypto/scatterwalk.h>
  16. #include <crypto/sha1.h>
  17. #include <crypto/sha2.h>
  18. #include <linux/bottom_half.h>
  19. #include <linux/dma-mapping.h>
  20. #include <linux/err.h>
  21. #include <linux/kernel.h>
  22. #include <linux/pm_runtime.h>
  23. #include <linux/scatterlist.h>
  24. #include <linux/slab.h>
  25. #include <linux/string.h>
  26. #include "sun8i-ss.h"
  27. static int sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx *tfmctx, const u8 *key,
  28. unsigned int keylen)
  29. {
  30. struct crypto_shash *xtfm;
  31. int ret;
  32. xtfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_NEED_FALLBACK);
  33. if (IS_ERR(xtfm))
  34. return PTR_ERR(xtfm);
  35. ret = crypto_shash_tfm_digest(xtfm, key, keylen, tfmctx->key);
  36. if (ret)
  37. dev_err(tfmctx->ss->dev, "shash digest error ret=%d\n", ret);
  38. crypto_free_shash(xtfm);
  39. return ret;
  40. }
  41. int sun8i_ss_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
  42. unsigned int keylen)
  43. {
  44. struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(ahash);
  45. int digestsize, i;
  46. int bs = crypto_ahash_blocksize(ahash);
  47. int ret;
  48. digestsize = crypto_ahash_digestsize(ahash);
  49. if (keylen > bs) {
  50. ret = sun8i_ss_hashkey(tfmctx, key, keylen);
  51. if (ret)
  52. return ret;
  53. tfmctx->keylen = digestsize;
  54. } else {
  55. tfmctx->keylen = keylen;
  56. memcpy(tfmctx->key, key, keylen);
  57. }
  58. tfmctx->ipad = kzalloc(bs, GFP_KERNEL);
  59. if (!tfmctx->ipad)
  60. return -ENOMEM;
  61. tfmctx->opad = kzalloc(bs, GFP_KERNEL);
  62. if (!tfmctx->opad) {
  63. ret = -ENOMEM;
  64. goto err_opad;
  65. }
  66. memset(tfmctx->key + tfmctx->keylen, 0, bs - tfmctx->keylen);
  67. memcpy(tfmctx->ipad, tfmctx->key, tfmctx->keylen);
  68. memcpy(tfmctx->opad, tfmctx->key, tfmctx->keylen);
  69. for (i = 0; i < bs; i++) {
  70. tfmctx->ipad[i] ^= HMAC_IPAD_VALUE;
  71. tfmctx->opad[i] ^= HMAC_OPAD_VALUE;
  72. }
  73. ret = crypto_ahash_setkey(tfmctx->fallback_tfm, key, keylen);
  74. if (!ret)
  75. return 0;
  76. memzero_explicit(tfmctx->key, keylen);
  77. kfree_sensitive(tfmctx->opad);
  78. err_opad:
  79. kfree_sensitive(tfmctx->ipad);
  80. return ret;
  81. }
  82. int sun8i_ss_hash_init_tfm(struct crypto_ahash *tfm)
  83. {
  84. struct sun8i_ss_hash_tfm_ctx *op = crypto_ahash_ctx(tfm);
  85. struct ahash_alg *alg = crypto_ahash_alg(tfm);
  86. struct sun8i_ss_alg_template *algt;
  87. int err;
  88. algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
  89. op->ss = algt->ss;
  90. /* FALLBACK */
  91. op->fallback_tfm = crypto_alloc_ahash(crypto_ahash_alg_name(tfm), 0,
  92. CRYPTO_ALG_NEED_FALLBACK);
  93. if (IS_ERR(op->fallback_tfm)) {
  94. dev_err(algt->ss->dev, "Fallback driver could no be loaded\n");
  95. return PTR_ERR(op->fallback_tfm);
  96. }
  97. crypto_ahash_set_statesize(tfm,
  98. crypto_ahash_statesize(op->fallback_tfm));
  99. crypto_ahash_set_reqsize(tfm,
  100. sizeof(struct sun8i_ss_hash_reqctx) +
  101. crypto_ahash_reqsize(op->fallback_tfm));
  102. memcpy(algt->fbname, crypto_ahash_driver_name(op->fallback_tfm),
  103. CRYPTO_MAX_ALG_NAME);
  104. err = pm_runtime_get_sync(op->ss->dev);
  105. if (err < 0)
  106. goto error_pm;
  107. return 0;
  108. error_pm:
  109. pm_runtime_put_noidle(op->ss->dev);
  110. crypto_free_ahash(op->fallback_tfm);
  111. return err;
  112. }
  113. void sun8i_ss_hash_exit_tfm(struct crypto_ahash *tfm)
  114. {
  115. struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
  116. kfree_sensitive(tfmctx->ipad);
  117. kfree_sensitive(tfmctx->opad);
  118. crypto_free_ahash(tfmctx->fallback_tfm);
  119. pm_runtime_put_sync_suspend(tfmctx->ss->dev);
  120. }
  121. int sun8i_ss_hash_init(struct ahash_request *areq)
  122. {
  123. struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
  124. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  125. struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
  126. memset(rctx, 0, sizeof(struct sun8i_ss_hash_reqctx));
  127. ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
  128. rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  129. return crypto_ahash_init(&rctx->fallback_req);
  130. }
  131. int sun8i_ss_hash_export(struct ahash_request *areq, void *out)
  132. {
  133. struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
  134. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  135. struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
  136. ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
  137. rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  138. return crypto_ahash_export(&rctx->fallback_req, out);
  139. }
  140. int sun8i_ss_hash_import(struct ahash_request *areq, const void *in)
  141. {
  142. struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
  143. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  144. struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
  145. ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
  146. rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  147. return crypto_ahash_import(&rctx->fallback_req, in);
  148. }
  149. int sun8i_ss_hash_final(struct ahash_request *areq)
  150. {
  151. struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
  152. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  153. struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
  154. ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
  155. rctx->fallback_req.base.flags = areq->base.flags &
  156. CRYPTO_TFM_REQ_MAY_SLEEP;
  157. rctx->fallback_req.result = areq->result;
  158. if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
  159. struct ahash_alg *alg = crypto_ahash_alg(tfm);
  160. struct sun8i_ss_alg_template *algt __maybe_unused;
  161. algt = container_of(alg, struct sun8i_ss_alg_template,
  162. alg.hash.base);
  163. #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
  164. algt->stat_fb++;
  165. #endif
  166. }
  167. return crypto_ahash_final(&rctx->fallback_req);
  168. }
  169. int sun8i_ss_hash_update(struct ahash_request *areq)
  170. {
  171. struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
  172. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  173. struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
  174. ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
  175. rctx->fallback_req.base.flags = areq->base.flags &
  176. CRYPTO_TFM_REQ_MAY_SLEEP;
  177. rctx->fallback_req.nbytes = areq->nbytes;
  178. rctx->fallback_req.src = areq->src;
  179. return crypto_ahash_update(&rctx->fallback_req);
  180. }
  181. int sun8i_ss_hash_finup(struct ahash_request *areq)
  182. {
  183. struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
  184. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  185. struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
  186. ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
  187. rctx->fallback_req.base.flags = areq->base.flags &
  188. CRYPTO_TFM_REQ_MAY_SLEEP;
  189. rctx->fallback_req.nbytes = areq->nbytes;
  190. rctx->fallback_req.src = areq->src;
  191. rctx->fallback_req.result = areq->result;
  192. if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
  193. struct ahash_alg *alg = crypto_ahash_alg(tfm);
  194. struct sun8i_ss_alg_template *algt __maybe_unused;
  195. algt = container_of(alg, struct sun8i_ss_alg_template,
  196. alg.hash.base);
  197. #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
  198. algt->stat_fb++;
  199. #endif
  200. }
  201. return crypto_ahash_finup(&rctx->fallback_req);
  202. }
  203. static int sun8i_ss_hash_digest_fb(struct ahash_request *areq)
  204. {
  205. struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
  206. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  207. struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
  208. ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm);
  209. rctx->fallback_req.base.flags = areq->base.flags &
  210. CRYPTO_TFM_REQ_MAY_SLEEP;
  211. rctx->fallback_req.nbytes = areq->nbytes;
  212. rctx->fallback_req.src = areq->src;
  213. rctx->fallback_req.result = areq->result;
  214. if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) {
  215. struct ahash_alg *alg = crypto_ahash_alg(tfm);
  216. struct sun8i_ss_alg_template *algt __maybe_unused;
  217. algt = container_of(alg, struct sun8i_ss_alg_template,
  218. alg.hash.base);
  219. #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
  220. algt->stat_fb++;
  221. #endif
  222. }
  223. return crypto_ahash_digest(&rctx->fallback_req);
  224. }
  225. static int sun8i_ss_run_hash_task(struct sun8i_ss_dev *ss,
  226. struct sun8i_ss_hash_reqctx *rctx,
  227. const char *name)
  228. {
  229. int flow = rctx->flow;
  230. u32 v = SS_START;
  231. int i;
  232. #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
  233. ss->flows[flow].stat_req++;
  234. #endif
  235. /* choose between stream0/stream1 */
  236. if (flow)
  237. v |= SS_FLOW1;
  238. else
  239. v |= SS_FLOW0;
  240. v |= rctx->method;
  241. for (i = 0; i < MAX_SG; i++) {
  242. if (!rctx->t_dst[i].addr)
  243. break;
  244. mutex_lock(&ss->mlock);
  245. if (i > 0) {
  246. v |= BIT(17);
  247. writel(rctx->t_dst[i - 1].addr, ss->base + SS_KEY_ADR_REG);
  248. writel(rctx->t_dst[i - 1].addr, ss->base + SS_IV_ADR_REG);
  249. }
  250. dev_dbg(ss->dev,
  251. "Processing SG %d on flow %d %s ctl=%x %d to %d method=%x src=%x dst=%x\n",
  252. i, flow, name, v,
  253. rctx->t_src[i].len, rctx->t_dst[i].len,
  254. rctx->method, rctx->t_src[i].addr, rctx->t_dst[i].addr);
  255. writel(rctx->t_src[i].addr, ss->base + SS_SRC_ADR_REG);
  256. writel(rctx->t_dst[i].addr, ss->base + SS_DST_ADR_REG);
  257. writel(rctx->t_src[i].len, ss->base + SS_LEN_ADR_REG);
  258. writel(BIT(0) | BIT(1), ss->base + SS_INT_CTL_REG);
  259. reinit_completion(&ss->flows[flow].complete);
  260. ss->flows[flow].status = 0;
  261. wmb();
  262. writel(v, ss->base + SS_CTL_REG);
  263. mutex_unlock(&ss->mlock);
  264. wait_for_completion_interruptible_timeout(&ss->flows[flow].complete,
  265. msecs_to_jiffies(2000));
  266. if (ss->flows[flow].status == 0) {
  267. dev_err(ss->dev, "DMA timeout for %s\n", name);
  268. return -EFAULT;
  269. }
  270. }
  271. return 0;
  272. }
  273. static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq)
  274. {
  275. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  276. struct ahash_alg *alg = crypto_ahash_alg(tfm);
  277. struct sun8i_ss_alg_template *algt;
  278. struct scatterlist *sg;
  279. algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
  280. if (areq->nbytes == 0) {
  281. algt->stat_fb_len++;
  282. return true;
  283. }
  284. if (areq->nbytes >= MAX_PAD_SIZE - 64) {
  285. algt->stat_fb_len++;
  286. return true;
  287. }
  288. /* we need to reserve one SG for the padding one */
  289. if (sg_nents(areq->src) > MAX_SG - 1) {
  290. algt->stat_fb_sgnum++;
  291. return true;
  292. }
  293. sg = areq->src;
  294. while (sg) {
  295. /* SS can operate hash only on full block size
  296. * since SS support only MD5,sha1,sha224 and sha256, blocksize
  297. * is always 64
  298. */
  299. /* Only the last block could be bounced to the pad buffer */
  300. if (sg->length % 64 && sg_next(sg)) {
  301. algt->stat_fb_sglen++;
  302. return true;
  303. }
  304. if (!IS_ALIGNED(sg->offset, sizeof(u32))) {
  305. algt->stat_fb_align++;
  306. return true;
  307. }
  308. if (sg->length % 4) {
  309. algt->stat_fb_sglen++;
  310. return true;
  311. }
  312. sg = sg_next(sg);
  313. }
  314. return false;
  315. }
  316. int sun8i_ss_hash_digest(struct ahash_request *areq)
  317. {
  318. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  319. struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
  320. struct ahash_alg *alg = crypto_ahash_alg(tfm);
  321. struct sun8i_ss_alg_template *algt;
  322. struct sun8i_ss_dev *ss;
  323. struct crypto_engine *engine;
  324. int e;
  325. if (sun8i_ss_hash_need_fallback(areq))
  326. return sun8i_ss_hash_digest_fb(areq);
  327. algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
  328. ss = algt->ss;
  329. e = sun8i_ss_get_engine_number(ss);
  330. rctx->flow = e;
  331. engine = ss->flows[e].engine;
  332. return crypto_transfer_hash_request_to_engine(engine, areq);
  333. }
  334. static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, bool le, int bs)
  335. {
  336. u64 fill, min_fill, j, k;
  337. __be64 *bebits;
  338. __le64 *lebits;
  339. j = padi;
  340. buf[j++] = cpu_to_le32(0x80);
  341. if (bs == 64) {
  342. fill = 64 - (byte_count % 64);
  343. min_fill = 2 * sizeof(u32) + sizeof(u32);
  344. } else {
  345. fill = 128 - (byte_count % 128);
  346. min_fill = 4 * sizeof(u32) + sizeof(u32);
  347. }
  348. if (fill < min_fill)
  349. fill += bs;
  350. k = j;
  351. j += (fill - min_fill) / sizeof(u32);
  352. if (j * 4 > bufsize) {
  353. pr_err("%s OVERFLOW %llu\n", __func__, j);
  354. return 0;
  355. }
  356. for (; k < j; k++)
  357. buf[k] = 0;
  358. if (le) {
  359. /* MD5 */
  360. lebits = (__le64 *)&buf[j];
  361. *lebits = cpu_to_le64(byte_count << 3);
  362. j += 2;
  363. } else {
  364. if (bs == 64) {
  365. /* sha1 sha224 sha256 */
  366. bebits = (__be64 *)&buf[j];
  367. *bebits = cpu_to_be64(byte_count << 3);
  368. j += 2;
  369. } else {
  370. /* sha384 sha512*/
  371. bebits = (__be64 *)&buf[j];
  372. *bebits = cpu_to_be64(byte_count >> 61);
  373. j += 2;
  374. bebits = (__be64 *)&buf[j];
  375. *bebits = cpu_to_be64(byte_count << 3);
  376. j += 2;
  377. }
  378. }
  379. if (j * 4 > bufsize) {
  380. pr_err("%s OVERFLOW %llu\n", __func__, j);
  381. return 0;
  382. }
  383. return j;
  384. }
  385. /* sun8i_ss_hash_run - run an ahash request
  386. * Send the data of the request to the SS along with an extra SG with padding
  387. */
  388. int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
  389. {
  390. struct ahash_request *areq = container_of(breq, struct ahash_request, base);
  391. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  392. struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
  393. struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq);
  394. struct ahash_alg *alg = crypto_ahash_alg(tfm);
  395. struct sun8i_ss_alg_template *algt;
  396. struct sun8i_ss_dev *ss;
  397. struct scatterlist *sg;
  398. int bs = crypto_ahash_blocksize(tfm);
  399. int nr_sgs, err, digestsize;
  400. unsigned int len;
  401. u64 byte_count;
  402. void *pad, *result;
  403. int j, i, k, todo;
  404. dma_addr_t addr_res, addr_pad, addr_xpad;
  405. __le32 *bf;
  406. /* HMAC step:
  407. * 0: normal hashing
  408. * 1: IPAD
  409. * 2: OPAD
  410. */
  411. int hmac = 0;
  412. algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base);
  413. ss = algt->ss;
  414. digestsize = crypto_ahash_digestsize(tfm);
  415. if (digestsize == SHA224_DIGEST_SIZE)
  416. digestsize = SHA256_DIGEST_SIZE;
  417. result = ss->flows[rctx->flow].result;
  418. pad = ss->flows[rctx->flow].pad;
  419. bf = (__le32 *)pad;
  420. for (i = 0; i < MAX_SG; i++) {
  421. rctx->t_dst[i].addr = 0;
  422. rctx->t_dst[i].len = 0;
  423. }
  424. #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
  425. algt->stat_req++;
  426. #endif
  427. rctx->method = ss->variant->alg_hash[algt->ss_algo_id];
  428. nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
  429. if (nr_sgs <= 0 || nr_sgs > MAX_SG) {
  430. dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs);
  431. err = -EINVAL;
  432. goto theend;
  433. }
  434. addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE);
  435. if (dma_mapping_error(ss->dev, addr_res)) {
  436. dev_err(ss->dev, "DMA map dest\n");
  437. err = -EINVAL;
  438. goto err_dma_result;
  439. }
  440. j = 0;
  441. len = areq->nbytes;
  442. sg = areq->src;
  443. i = 0;
  444. while (len > 0 && sg) {
  445. if (sg_dma_len(sg) == 0) {
  446. sg = sg_next(sg);
  447. continue;
  448. }
  449. todo = min(len, sg_dma_len(sg));
  450. /* only the last SG could be with a size not modulo64 */
  451. if (todo % 64 == 0) {
  452. rctx->t_src[i].addr = sg_dma_address(sg);
  453. rctx->t_src[i].len = todo / 4;
  454. rctx->t_dst[i].addr = addr_res;
  455. rctx->t_dst[i].len = digestsize / 4;
  456. len -= todo;
  457. } else {
  458. scatterwalk_map_and_copy(bf, sg, 0, todo, 0);
  459. j += todo / 4;
  460. len -= todo;
  461. }
  462. sg = sg_next(sg);
  463. i++;
  464. }
  465. if (len > 0) {
  466. dev_err(ss->dev, "remaining len %d\n", len);
  467. err = -EINVAL;
  468. goto theend;
  469. }
  470. if (j > 0)
  471. i--;
  472. retry:
  473. byte_count = areq->nbytes;
  474. if (tfmctx->keylen && hmac == 0) {
  475. hmac = 1;
  476. /* shift all SG one slot up, to free slot 0 for IPAD */
  477. for (k = 6; k >= 0; k--) {
  478. rctx->t_src[k + 1].addr = rctx->t_src[k].addr;
  479. rctx->t_src[k + 1].len = rctx->t_src[k].len;
  480. rctx->t_dst[k + 1].addr = rctx->t_dst[k].addr;
  481. rctx->t_dst[k + 1].len = rctx->t_dst[k].len;
  482. }
  483. addr_xpad = dma_map_single(ss->dev, tfmctx->ipad, bs, DMA_TO_DEVICE);
  484. err = dma_mapping_error(ss->dev, addr_xpad);
  485. if (err) {
  486. dev_err(ss->dev, "Fail to create DMA mapping of ipad\n");
  487. goto err_dma_xpad;
  488. }
  489. rctx->t_src[0].addr = addr_xpad;
  490. rctx->t_src[0].len = bs / 4;
  491. rctx->t_dst[0].addr = addr_res;
  492. rctx->t_dst[0].len = digestsize / 4;
  493. i++;
  494. byte_count = areq->nbytes + bs;
  495. }
  496. if (tfmctx->keylen && hmac == 2) {
  497. for (i = 0; i < MAX_SG; i++) {
  498. rctx->t_src[i].addr = 0;
  499. rctx->t_src[i].len = 0;
  500. rctx->t_dst[i].addr = 0;
  501. rctx->t_dst[i].len = 0;
  502. }
  503. addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE);
  504. if (dma_mapping_error(ss->dev, addr_res)) {
  505. dev_err(ss->dev, "Fail to create DMA mapping of result\n");
  506. err = -EINVAL;
  507. goto err_dma_result;
  508. }
  509. addr_xpad = dma_map_single(ss->dev, tfmctx->opad, bs, DMA_TO_DEVICE);
  510. err = dma_mapping_error(ss->dev, addr_xpad);
  511. if (err) {
  512. dev_err(ss->dev, "Fail to create DMA mapping of opad\n");
  513. goto err_dma_xpad;
  514. }
  515. rctx->t_src[0].addr = addr_xpad;
  516. rctx->t_src[0].len = bs / 4;
  517. memcpy(bf, result, digestsize);
  518. j = digestsize / 4;
  519. i = 1;
  520. byte_count = digestsize + bs;
  521. rctx->t_dst[0].addr = addr_res;
  522. rctx->t_dst[0].len = digestsize / 4;
  523. }
  524. switch (algt->ss_algo_id) {
  525. case SS_ID_HASH_MD5:
  526. j = hash_pad(bf, 4096, j, byte_count, true, bs);
  527. break;
  528. case SS_ID_HASH_SHA1:
  529. case SS_ID_HASH_SHA224:
  530. case SS_ID_HASH_SHA256:
  531. j = hash_pad(bf, 4096, j, byte_count, false, bs);
  532. break;
  533. }
  534. if (!j) {
  535. err = -EINVAL;
  536. goto theend;
  537. }
  538. addr_pad = dma_map_single(ss->dev, pad, j * 4, DMA_TO_DEVICE);
  539. if (dma_mapping_error(ss->dev, addr_pad)) {
  540. dev_err(ss->dev, "DMA error on padding SG\n");
  541. err = -EINVAL;
  542. goto err_dma_pad;
  543. }
  544. rctx->t_src[i].addr = addr_pad;
  545. rctx->t_src[i].len = j;
  546. rctx->t_dst[i].addr = addr_res;
  547. rctx->t_dst[i].len = digestsize / 4;
  548. err = sun8i_ss_run_hash_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm));
  549. /*
  550. * mini helper for checking dma map/unmap
  551. * flow start for hmac = 0 (and HMAC = 1)
  552. * HMAC = 0
  553. * MAP src
  554. * MAP res
  555. *
  556. * retry:
  557. * if hmac then hmac = 1
  558. * MAP xpad (ipad)
  559. * if hmac == 2
  560. * MAP res
  561. * MAP xpad (opad)
  562. * MAP pad
  563. * ACTION!
  564. * UNMAP pad
  565. * if hmac
  566. * UNMAP xpad
  567. * UNMAP res
  568. * if hmac < 2
  569. * UNMAP SRC
  570. *
  571. * if hmac = 1 then hmac = 2 goto retry
  572. */
  573. dma_unmap_single(ss->dev, addr_pad, j * 4, DMA_TO_DEVICE);
  574. err_dma_pad:
  575. if (hmac > 0)
  576. dma_unmap_single(ss->dev, addr_xpad, bs, DMA_TO_DEVICE);
  577. err_dma_xpad:
  578. dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE);
  579. err_dma_result:
  580. if (hmac < 2)
  581. dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src),
  582. DMA_TO_DEVICE);
  583. if (hmac == 1 && !err) {
  584. hmac = 2;
  585. goto retry;
  586. }
  587. if (!err)
  588. memcpy(areq->result, result, crypto_ahash_digestsize(tfm));
  589. theend:
  590. local_bh_disable();
  591. crypto_finalize_hash_request(engine, breq, err);
  592. local_bh_enable();
  593. return 0;
  594. }