zip_crypto.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2019 HiSilicon Limited. */
  3. #include <crypto/internal/acompress.h>
  4. #include <linux/bitfield.h>
  5. #include <linux/bitmap.h>
  6. #include <linux/dma-mapping.h>
  7. #include <linux/scatterlist.h>
  8. #include "zip.h"
  9. /* hisi_zip_sqe dw3 */
  10. #define HZIP_BD_STATUS_M GENMASK(7, 0)
  11. /* hisi_zip_sqe dw7 */
  12. #define HZIP_IN_SGE_DATA_OFFSET_M GENMASK(23, 0)
  13. #define HZIP_SQE_TYPE_M GENMASK(31, 28)
  14. /* hisi_zip_sqe dw8 */
  15. #define HZIP_OUT_SGE_DATA_OFFSET_M GENMASK(23, 0)
  16. /* hisi_zip_sqe dw9 */
  17. #define HZIP_REQ_TYPE_M GENMASK(7, 0)
  18. #define HZIP_ALG_TYPE_DEFLATE 0x01
  19. #define HZIP_BUF_TYPE_M GENMASK(11, 8)
  20. #define HZIP_SGL 0x1
  21. #define HZIP_ALG_PRIORITY 300
  22. #define HZIP_SGL_SGE_NR 10
  23. #define HZIP_ALG_DEFLATE GENMASK(5, 4)
  24. static DEFINE_MUTEX(zip_algs_lock);
  25. static unsigned int zip_available_devs;
  26. enum hisi_zip_alg_type {
  27. HZIP_ALG_TYPE_COMP = 0,
  28. HZIP_ALG_TYPE_DECOMP = 1,
  29. };
  30. enum {
  31. HZIP_QPC_COMP,
  32. HZIP_QPC_DECOMP,
  33. HZIP_CTX_Q_NUM
  34. };
  35. #define COMP_NAME_TO_TYPE(alg_name) \
  36. (!strcmp((alg_name), "deflate") ? HZIP_ALG_TYPE_DEFLATE : 0)
  37. struct hisi_zip_req {
  38. struct acomp_req *req;
  39. struct hisi_acc_hw_sgl *hw_src;
  40. struct hisi_acc_hw_sgl *hw_dst;
  41. dma_addr_t dma_src;
  42. dma_addr_t dma_dst;
  43. u16 req_id;
  44. };
  45. struct hisi_zip_req_q {
  46. struct hisi_zip_req *q;
  47. unsigned long *req_bitmap;
  48. spinlock_t req_lock;
  49. u16 size;
  50. };
  51. struct hisi_zip_qp_ctx {
  52. struct hisi_qp *qp;
  53. struct hisi_zip_req_q req_q;
  54. struct hisi_acc_sgl_pool *sgl_pool;
  55. struct hisi_zip *zip_dev;
  56. struct hisi_zip_ctx *ctx;
  57. };
  58. struct hisi_zip_sqe_ops {
  59. u8 sqe_type;
  60. void (*fill_addr)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
  61. void (*fill_buf_size)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
  62. void (*fill_buf_type)(struct hisi_zip_sqe *sqe, u8 buf_type);
  63. void (*fill_req_type)(struct hisi_zip_sqe *sqe, u8 req_type);
  64. void (*fill_tag)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
  65. void (*fill_sqe_type)(struct hisi_zip_sqe *sqe, u8 sqe_type);
  66. u32 (*get_tag)(struct hisi_zip_sqe *sqe);
  67. u32 (*get_status)(struct hisi_zip_sqe *sqe);
  68. u32 (*get_dstlen)(struct hisi_zip_sqe *sqe);
  69. };
  70. struct hisi_zip_ctx {
  71. struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM];
  72. const struct hisi_zip_sqe_ops *ops;
  73. };
  74. static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp)
  75. {
  76. int ret;
  77. u16 n;
  78. if (!val)
  79. return -EINVAL;
  80. ret = kstrtou16(val, 10, &n);
  81. if (ret || n == 0 || n > HISI_ACC_SGL_SGE_NR_MAX)
  82. return -EINVAL;
  83. return param_set_ushort(val, kp);
  84. }
  85. static const struct kernel_param_ops sgl_sge_nr_ops = {
  86. .set = sgl_sge_nr_set,
  87. .get = param_get_ushort,
  88. };
  89. static u16 sgl_sge_nr = HZIP_SGL_SGE_NR;
  90. module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444);
  91. MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)");
  92. static struct hisi_zip_req *hisi_zip_create_req(struct hisi_zip_qp_ctx *qp_ctx,
  93. struct acomp_req *req)
  94. {
  95. struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
  96. struct hisi_zip_req *q = req_q->q;
  97. struct hisi_zip_req *req_cache;
  98. int req_id;
  99. spin_lock(&req_q->req_lock);
  100. req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size);
  101. if (req_id >= req_q->size) {
  102. spin_unlock(&req_q->req_lock);
  103. dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
  104. return ERR_PTR(-EAGAIN);
  105. }
  106. set_bit(req_id, req_q->req_bitmap);
  107. spin_unlock(&req_q->req_lock);
  108. req_cache = q + req_id;
  109. req_cache->req_id = req_id;
  110. req_cache->req = req;
  111. return req_cache;
  112. }
  113. static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
  114. struct hisi_zip_req *req)
  115. {
  116. struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
  117. spin_lock(&req_q->req_lock);
  118. clear_bit(req->req_id, req_q->req_bitmap);
  119. spin_unlock(&req_q->req_lock);
  120. }
  121. static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
  122. {
  123. sqe->source_addr_l = lower_32_bits(req->dma_src);
  124. sqe->source_addr_h = upper_32_bits(req->dma_src);
  125. sqe->dest_addr_l = lower_32_bits(req->dma_dst);
  126. sqe->dest_addr_h = upper_32_bits(req->dma_dst);
  127. }
  128. static void hisi_zip_fill_buf_size(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
  129. {
  130. struct acomp_req *a_req = req->req;
  131. sqe->input_data_length = a_req->slen;
  132. sqe->dest_avail_out = a_req->dlen;
  133. }
  134. static void hisi_zip_fill_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type)
  135. {
  136. u32 val;
  137. val = sqe->dw9 & ~HZIP_BUF_TYPE_M;
  138. val |= FIELD_PREP(HZIP_BUF_TYPE_M, buf_type);
  139. sqe->dw9 = val;
  140. }
  141. static void hisi_zip_fill_req_type(struct hisi_zip_sqe *sqe, u8 req_type)
  142. {
  143. u32 val;
  144. val = sqe->dw9 & ~HZIP_REQ_TYPE_M;
  145. val |= FIELD_PREP(HZIP_REQ_TYPE_M, req_type);
  146. sqe->dw9 = val;
  147. }
  148. static void hisi_zip_fill_tag(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
  149. {
  150. sqe->dw26 = req->req_id;
  151. }
  152. static void hisi_zip_fill_sqe_type(struct hisi_zip_sqe *sqe, u8 sqe_type)
  153. {
  154. u32 val;
  155. val = sqe->dw7 & ~HZIP_SQE_TYPE_M;
  156. val |= FIELD_PREP(HZIP_SQE_TYPE_M, sqe_type);
  157. sqe->dw7 = val;
  158. }
  159. static void hisi_zip_fill_sqe(struct hisi_zip_ctx *ctx, struct hisi_zip_sqe *sqe,
  160. u8 req_type, struct hisi_zip_req *req)
  161. {
  162. const struct hisi_zip_sqe_ops *ops = ctx->ops;
  163. memset(sqe, 0, sizeof(struct hisi_zip_sqe));
  164. ops->fill_addr(sqe, req);
  165. ops->fill_buf_size(sqe, req);
  166. ops->fill_buf_type(sqe, HZIP_SGL);
  167. ops->fill_req_type(sqe, req_type);
  168. ops->fill_tag(sqe, req);
  169. ops->fill_sqe_type(sqe, ops->sqe_type);
  170. }
  171. static int hisi_zip_do_work(struct hisi_zip_qp_ctx *qp_ctx,
  172. struct hisi_zip_req *req)
  173. {
  174. struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
  175. struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
  176. struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
  177. struct acomp_req *a_req = req->req;
  178. struct hisi_qp *qp = qp_ctx->qp;
  179. struct device *dev = &qp->qm->pdev->dev;
  180. struct hisi_zip_sqe zip_sqe;
  181. int ret;
  182. if (unlikely(!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen))
  183. return -EINVAL;
  184. req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
  185. req->req_id << 1, &req->dma_src);
  186. if (IS_ERR(req->hw_src)) {
  187. dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n",
  188. PTR_ERR(req->hw_src));
  189. return PTR_ERR(req->hw_src);
  190. }
  191. req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
  192. (req->req_id << 1) + 1,
  193. &req->dma_dst);
  194. if (IS_ERR(req->hw_dst)) {
  195. ret = PTR_ERR(req->hw_dst);
  196. dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n",
  197. ret);
  198. goto err_unmap_input;
  199. }
  200. hisi_zip_fill_sqe(qp_ctx->ctx, &zip_sqe, qp->req_type, req);
  201. /* send command to start a task */
  202. atomic64_inc(&dfx->send_cnt);
  203. spin_lock_bh(&req_q->req_lock);
  204. ret = hisi_qp_send(qp, &zip_sqe);
  205. spin_unlock_bh(&req_q->req_lock);
  206. if (unlikely(ret < 0)) {
  207. atomic64_inc(&dfx->send_busy_cnt);
  208. ret = -EAGAIN;
  209. dev_dbg_ratelimited(dev, "failed to send request!\n");
  210. goto err_unmap_output;
  211. }
  212. return -EINPROGRESS;
  213. err_unmap_output:
  214. hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst);
  215. err_unmap_input:
  216. hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src);
  217. return ret;
  218. }
  219. static u32 hisi_zip_get_tag(struct hisi_zip_sqe *sqe)
  220. {
  221. return sqe->dw26;
  222. }
  223. static u32 hisi_zip_get_status(struct hisi_zip_sqe *sqe)
  224. {
  225. return sqe->dw3 & HZIP_BD_STATUS_M;
  226. }
  227. static u32 hisi_zip_get_dstlen(struct hisi_zip_sqe *sqe)
  228. {
  229. return sqe->produced;
  230. }
  231. static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
  232. {
  233. struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx;
  234. const struct hisi_zip_sqe_ops *ops = qp_ctx->ctx->ops;
  235. struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
  236. struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
  237. struct device *dev = &qp->qm->pdev->dev;
  238. struct hisi_zip_sqe *sqe = data;
  239. u32 tag = ops->get_tag(sqe);
  240. struct hisi_zip_req *req = req_q->q + tag;
  241. struct acomp_req *acomp_req = req->req;
  242. int err = 0;
  243. u32 status;
  244. atomic64_inc(&dfx->recv_cnt);
  245. status = ops->get_status(sqe);
  246. if (unlikely(status != 0 && status != HZIP_NC_ERR)) {
  247. dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
  248. (qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
  249. sqe->produced);
  250. atomic64_inc(&dfx->err_bd_cnt);
  251. err = -EIO;
  252. }
  253. hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src);
  254. hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst);
  255. acomp_req->dlen = ops->get_dstlen(sqe);
  256. if (acomp_req->base.complete)
  257. acomp_request_complete(acomp_req, err);
  258. hisi_zip_remove_req(qp_ctx, req);
  259. }
  260. static int hisi_zip_acompress(struct acomp_req *acomp_req)
  261. {
  262. struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
  263. struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP];
  264. struct device *dev = &qp_ctx->qp->qm->pdev->dev;
  265. struct hisi_zip_req *req;
  266. int ret;
  267. req = hisi_zip_create_req(qp_ctx, acomp_req);
  268. if (IS_ERR(req))
  269. return PTR_ERR(req);
  270. ret = hisi_zip_do_work(qp_ctx, req);
  271. if (unlikely(ret != -EINPROGRESS)) {
  272. dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret);
  273. hisi_zip_remove_req(qp_ctx, req);
  274. }
  275. return ret;
  276. }
  277. static int hisi_zip_adecompress(struct acomp_req *acomp_req)
  278. {
  279. struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
  280. struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP];
  281. struct device *dev = &qp_ctx->qp->qm->pdev->dev;
  282. struct hisi_zip_req *req;
  283. int ret;
  284. req = hisi_zip_create_req(qp_ctx, acomp_req);
  285. if (IS_ERR(req))
  286. return PTR_ERR(req);
  287. ret = hisi_zip_do_work(qp_ctx, req);
  288. if (unlikely(ret != -EINPROGRESS)) {
  289. dev_info_ratelimited(dev, "failed to do decompress (%d)!\n",
  290. ret);
  291. hisi_zip_remove_req(qp_ctx, req);
  292. }
  293. return ret;
  294. }
  295. static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *qp_ctx,
  296. int alg_type, int req_type)
  297. {
  298. struct device *dev = &qp->qm->pdev->dev;
  299. int ret;
  300. qp->req_type = req_type;
  301. qp->alg_type = alg_type;
  302. qp->qp_ctx = qp_ctx;
  303. ret = hisi_qm_start_qp(qp, 0);
  304. if (ret < 0) {
  305. dev_err(dev, "failed to start qp (%d)!\n", ret);
  306. return ret;
  307. }
  308. qp_ctx->qp = qp;
  309. return 0;
  310. }
  311. static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *qp_ctx)
  312. {
  313. hisi_qm_stop_qp(qp_ctx->qp);
  314. hisi_qm_free_qps(&qp_ctx->qp, 1);
  315. }
  316. static const struct hisi_zip_sqe_ops hisi_zip_ops = {
  317. .sqe_type = 0x3,
  318. .fill_addr = hisi_zip_fill_addr,
  319. .fill_buf_size = hisi_zip_fill_buf_size,
  320. .fill_buf_type = hisi_zip_fill_buf_type,
  321. .fill_req_type = hisi_zip_fill_req_type,
  322. .fill_tag = hisi_zip_fill_tag,
  323. .fill_sqe_type = hisi_zip_fill_sqe_type,
  324. .get_tag = hisi_zip_get_tag,
  325. .get_status = hisi_zip_get_status,
  326. .get_dstlen = hisi_zip_get_dstlen,
  327. };
  328. static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int node)
  329. {
  330. struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL };
  331. struct hisi_zip_qp_ctx *qp_ctx;
  332. struct hisi_zip *hisi_zip;
  333. int ret, i, j;
  334. ret = zip_create_qps(qps, HZIP_CTX_Q_NUM, node);
  335. if (ret) {
  336. pr_err("failed to create zip qps (%d)!\n", ret);
  337. return -ENODEV;
  338. }
  339. hisi_zip = container_of(qps[0]->qm, struct hisi_zip, qm);
  340. for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
  341. /* alg_type = 0 for compress, 1 for decompress in hw sqe */
  342. qp_ctx = &hisi_zip_ctx->qp_ctx[i];
  343. qp_ctx->ctx = hisi_zip_ctx;
  344. ret = hisi_zip_start_qp(qps[i], qp_ctx, i, req_type);
  345. if (ret) {
  346. for (j = i - 1; j >= 0; j--)
  347. hisi_qm_stop_qp(hisi_zip_ctx->qp_ctx[j].qp);
  348. hisi_qm_free_qps(qps, HZIP_CTX_Q_NUM);
  349. return ret;
  350. }
  351. qp_ctx->zip_dev = hisi_zip;
  352. }
  353. hisi_zip_ctx->ops = &hisi_zip_ops;
  354. return 0;
  355. }
  356. static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx)
  357. {
  358. int i;
  359. for (i = 0; i < HZIP_CTX_Q_NUM; i++)
  360. hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]);
  361. }
  362. static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
  363. {
  364. u16 q_depth = ctx->qp_ctx[0].qp->sq_depth;
  365. struct hisi_zip_req_q *req_q;
  366. int i, ret;
  367. for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
  368. req_q = &ctx->qp_ctx[i].req_q;
  369. req_q->size = q_depth;
  370. req_q->req_bitmap = bitmap_zalloc(req_q->size, GFP_KERNEL);
  371. if (!req_q->req_bitmap) {
  372. ret = -ENOMEM;
  373. if (i == 0)
  374. return ret;
  375. goto err_free_comp_q;
  376. }
  377. spin_lock_init(&req_q->req_lock);
  378. req_q->q = kcalloc(req_q->size, sizeof(struct hisi_zip_req),
  379. GFP_KERNEL);
  380. if (!req_q->q) {
  381. ret = -ENOMEM;
  382. if (i == 0)
  383. goto err_free_comp_bitmap;
  384. else
  385. goto err_free_decomp_bitmap;
  386. }
  387. }
  388. return 0;
  389. err_free_decomp_bitmap:
  390. bitmap_free(ctx->qp_ctx[HZIP_QPC_DECOMP].req_q.req_bitmap);
  391. err_free_comp_q:
  392. kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.q);
  393. err_free_comp_bitmap:
  394. bitmap_free(ctx->qp_ctx[HZIP_QPC_COMP].req_q.req_bitmap);
  395. return ret;
  396. }
  397. static void hisi_zip_release_req_q(struct hisi_zip_ctx *ctx)
  398. {
  399. int i;
  400. for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
  401. kfree(ctx->qp_ctx[i].req_q.q);
  402. bitmap_free(ctx->qp_ctx[i].req_q.req_bitmap);
  403. }
  404. }
  405. static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
  406. {
  407. u16 q_depth = ctx->qp_ctx[0].qp->sq_depth;
  408. struct hisi_zip_qp_ctx *tmp;
  409. struct device *dev;
  410. int i;
  411. for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
  412. tmp = &ctx->qp_ctx[i];
  413. dev = &tmp->qp->qm->pdev->dev;
  414. tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, q_depth << 1,
  415. sgl_sge_nr);
  416. if (IS_ERR(tmp->sgl_pool)) {
  417. if (i == 1)
  418. goto err_free_sgl_pool0;
  419. return -ENOMEM;
  420. }
  421. }
  422. return 0;
  423. err_free_sgl_pool0:
  424. hisi_acc_free_sgl_pool(&ctx->qp_ctx[HZIP_QPC_COMP].qp->qm->pdev->dev,
  425. ctx->qp_ctx[HZIP_QPC_COMP].sgl_pool);
  426. return -ENOMEM;
  427. }
  428. static void hisi_zip_release_sgl_pool(struct hisi_zip_ctx *ctx)
  429. {
  430. int i;
  431. for (i = 0; i < HZIP_CTX_Q_NUM; i++)
  432. hisi_acc_free_sgl_pool(&ctx->qp_ctx[i].qp->qm->pdev->dev,
  433. ctx->qp_ctx[i].sgl_pool);
  434. }
  435. static void hisi_zip_set_acomp_cb(struct hisi_zip_ctx *ctx,
  436. void (*fn)(struct hisi_qp *, void *))
  437. {
  438. int i;
  439. for (i = 0; i < HZIP_CTX_Q_NUM; i++)
  440. ctx->qp_ctx[i].qp->req_cb = fn;
  441. }
  442. static int hisi_zip_acomp_init(struct crypto_acomp *tfm)
  443. {
  444. const char *alg_name = crypto_tfm_alg_name(&tfm->base);
  445. struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base);
  446. struct device *dev;
  447. int ret;
  448. ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name), tfm->base.node);
  449. if (ret) {
  450. pr_err("failed to init ctx (%d)!\n", ret);
  451. return ret;
  452. }
  453. dev = &ctx->qp_ctx[0].qp->qm->pdev->dev;
  454. ret = hisi_zip_create_req_q(ctx);
  455. if (ret) {
  456. dev_err(dev, "failed to create request queue (%d)!\n", ret);
  457. goto err_ctx_exit;
  458. }
  459. ret = hisi_zip_create_sgl_pool(ctx);
  460. if (ret) {
  461. dev_err(dev, "failed to create sgl pool (%d)!\n", ret);
  462. goto err_release_req_q;
  463. }
  464. hisi_zip_set_acomp_cb(ctx, hisi_zip_acomp_cb);
  465. return 0;
  466. err_release_req_q:
  467. hisi_zip_release_req_q(ctx);
  468. err_ctx_exit:
  469. hisi_zip_ctx_exit(ctx);
  470. return ret;
  471. }
  472. static void hisi_zip_acomp_exit(struct crypto_acomp *tfm)
  473. {
  474. struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base);
  475. hisi_zip_set_acomp_cb(ctx, NULL);
  476. hisi_zip_release_sgl_pool(ctx);
  477. hisi_zip_release_req_q(ctx);
  478. hisi_zip_ctx_exit(ctx);
  479. }
  480. static struct acomp_alg hisi_zip_acomp_deflate = {
  481. .init = hisi_zip_acomp_init,
  482. .exit = hisi_zip_acomp_exit,
  483. .compress = hisi_zip_acompress,
  484. .decompress = hisi_zip_adecompress,
  485. .base = {
  486. .cra_name = "deflate",
  487. .cra_driver_name = "hisi-deflate-acomp",
  488. .cra_flags = CRYPTO_ALG_ASYNC,
  489. .cra_module = THIS_MODULE,
  490. .cra_priority = HZIP_ALG_PRIORITY,
  491. .cra_ctxsize = sizeof(struct hisi_zip_ctx),
  492. }
  493. };
  494. static int hisi_zip_register_deflate(struct hisi_qm *qm)
  495. {
  496. int ret;
  497. if (!hisi_zip_alg_support(qm, HZIP_ALG_DEFLATE))
  498. return 0;
  499. ret = crypto_register_acomp(&hisi_zip_acomp_deflate);
  500. if (ret)
  501. dev_err(&qm->pdev->dev, "failed to register to deflate (%d)!\n", ret);
  502. return ret;
  503. }
  504. static void hisi_zip_unregister_deflate(struct hisi_qm *qm)
  505. {
  506. if (!hisi_zip_alg_support(qm, HZIP_ALG_DEFLATE))
  507. return;
  508. crypto_unregister_acomp(&hisi_zip_acomp_deflate);
  509. }
  510. int hisi_zip_register_to_crypto(struct hisi_qm *qm)
  511. {
  512. int ret = 0;
  513. mutex_lock(&zip_algs_lock);
  514. if (zip_available_devs++)
  515. goto unlock;
  516. ret = hisi_zip_register_deflate(qm);
  517. if (ret)
  518. zip_available_devs--;
  519. unlock:
  520. mutex_unlock(&zip_algs_lock);
  521. return ret;
  522. }
  523. void hisi_zip_unregister_from_crypto(struct hisi_qm *qm)
  524. {
  525. mutex_lock(&zip_algs_lock);
  526. if (--zip_available_devs)
  527. goto unlock;
  528. hisi_zip_unregister_deflate(qm);
  529. unlock:
  530. mutex_unlock(&zip_algs_lock);
  531. }