sec_algs.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2016-2017 Hisilicon Limited. */
  3. #include <linux/crypto.h>
  4. #include <linux/dma-mapping.h>
  5. #include <linux/dmapool.h>
  6. #include <linux/module.h>
  7. #include <linux/mutex.h>
  8. #include <linux/slab.h>
  9. #include <crypto/aes.h>
  10. #include <crypto/algapi.h>
  11. #include <crypto/des.h>
  12. #include <crypto/skcipher.h>
  13. #include <crypto/xts.h>
  14. #include <crypto/internal/skcipher.h>
  15. #include "sec_drv.h"
  16. #define SEC_MAX_CIPHER_KEY 64
  17. #define SEC_REQ_LIMIT SZ_32M
  18. struct sec_c_alg_cfg {
  19. unsigned c_alg : 3;
  20. unsigned c_mode : 3;
  21. unsigned key_len : 2;
  22. unsigned c_width : 2;
  23. };
  24. static const struct sec_c_alg_cfg sec_c_alg_cfgs[] = {
  25. [SEC_C_DES_ECB_64] = {
  26. .c_alg = SEC_C_ALG_DES,
  27. .c_mode = SEC_C_MODE_ECB,
  28. .key_len = SEC_KEY_LEN_DES,
  29. },
  30. [SEC_C_DES_CBC_64] = {
  31. .c_alg = SEC_C_ALG_DES,
  32. .c_mode = SEC_C_MODE_CBC,
  33. .key_len = SEC_KEY_LEN_DES,
  34. },
  35. [SEC_C_3DES_ECB_192_3KEY] = {
  36. .c_alg = SEC_C_ALG_3DES,
  37. .c_mode = SEC_C_MODE_ECB,
  38. .key_len = SEC_KEY_LEN_3DES_3_KEY,
  39. },
  40. [SEC_C_3DES_ECB_192_2KEY] = {
  41. .c_alg = SEC_C_ALG_3DES,
  42. .c_mode = SEC_C_MODE_ECB,
  43. .key_len = SEC_KEY_LEN_3DES_2_KEY,
  44. },
  45. [SEC_C_3DES_CBC_192_3KEY] = {
  46. .c_alg = SEC_C_ALG_3DES,
  47. .c_mode = SEC_C_MODE_CBC,
  48. .key_len = SEC_KEY_LEN_3DES_3_KEY,
  49. },
  50. [SEC_C_3DES_CBC_192_2KEY] = {
  51. .c_alg = SEC_C_ALG_3DES,
  52. .c_mode = SEC_C_MODE_CBC,
  53. .key_len = SEC_KEY_LEN_3DES_2_KEY,
  54. },
  55. [SEC_C_AES_ECB_128] = {
  56. .c_alg = SEC_C_ALG_AES,
  57. .c_mode = SEC_C_MODE_ECB,
  58. .key_len = SEC_KEY_LEN_AES_128,
  59. },
  60. [SEC_C_AES_ECB_192] = {
  61. .c_alg = SEC_C_ALG_AES,
  62. .c_mode = SEC_C_MODE_ECB,
  63. .key_len = SEC_KEY_LEN_AES_192,
  64. },
  65. [SEC_C_AES_ECB_256] = {
  66. .c_alg = SEC_C_ALG_AES,
  67. .c_mode = SEC_C_MODE_ECB,
  68. .key_len = SEC_KEY_LEN_AES_256,
  69. },
  70. [SEC_C_AES_CBC_128] = {
  71. .c_alg = SEC_C_ALG_AES,
  72. .c_mode = SEC_C_MODE_CBC,
  73. .key_len = SEC_KEY_LEN_AES_128,
  74. },
  75. [SEC_C_AES_CBC_192] = {
  76. .c_alg = SEC_C_ALG_AES,
  77. .c_mode = SEC_C_MODE_CBC,
  78. .key_len = SEC_KEY_LEN_AES_192,
  79. },
  80. [SEC_C_AES_CBC_256] = {
  81. .c_alg = SEC_C_ALG_AES,
  82. .c_mode = SEC_C_MODE_CBC,
  83. .key_len = SEC_KEY_LEN_AES_256,
  84. },
  85. [SEC_C_AES_CTR_128] = {
  86. .c_alg = SEC_C_ALG_AES,
  87. .c_mode = SEC_C_MODE_CTR,
  88. .key_len = SEC_KEY_LEN_AES_128,
  89. },
  90. [SEC_C_AES_CTR_192] = {
  91. .c_alg = SEC_C_ALG_AES,
  92. .c_mode = SEC_C_MODE_CTR,
  93. .key_len = SEC_KEY_LEN_AES_192,
  94. },
  95. [SEC_C_AES_CTR_256] = {
  96. .c_alg = SEC_C_ALG_AES,
  97. .c_mode = SEC_C_MODE_CTR,
  98. .key_len = SEC_KEY_LEN_AES_256,
  99. },
  100. [SEC_C_AES_XTS_128] = {
  101. .c_alg = SEC_C_ALG_AES,
  102. .c_mode = SEC_C_MODE_XTS,
  103. .key_len = SEC_KEY_LEN_AES_128,
  104. },
  105. [SEC_C_AES_XTS_256] = {
  106. .c_alg = SEC_C_ALG_AES,
  107. .c_mode = SEC_C_MODE_XTS,
  108. .key_len = SEC_KEY_LEN_AES_256,
  109. },
  110. [SEC_C_NULL] = {
  111. },
  112. };
  113. /*
  114. * Mutex used to ensure safe operation of reference count of
  115. * alg providers
  116. */
  117. static DEFINE_MUTEX(algs_lock);
  118. static unsigned int active_devs;
  119. static void sec_alg_skcipher_init_template(struct sec_alg_tfm_ctx *ctx,
  120. struct sec_bd_info *req,
  121. enum sec_cipher_alg alg)
  122. {
  123. const struct sec_c_alg_cfg *cfg = &sec_c_alg_cfgs[alg];
  124. memset(req, 0, sizeof(*req));
  125. req->w0 |= cfg->c_mode << SEC_BD_W0_C_MODE_S;
  126. req->w1 |= cfg->c_alg << SEC_BD_W1_C_ALG_S;
  127. req->w3 |= cfg->key_len << SEC_BD_W3_C_KEY_LEN_S;
  128. req->w0 |= cfg->c_width << SEC_BD_W0_C_WIDTH_S;
  129. req->cipher_key_addr_lo = lower_32_bits(ctx->pkey);
  130. req->cipher_key_addr_hi = upper_32_bits(ctx->pkey);
  131. }
  132. static void sec_alg_skcipher_init_context(struct crypto_skcipher *atfm,
  133. const u8 *key,
  134. unsigned int keylen,
  135. enum sec_cipher_alg alg)
  136. {
  137. struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
  138. struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
  139. ctx->cipher_alg = alg;
  140. memcpy(ctx->key, key, keylen);
  141. sec_alg_skcipher_init_template(ctx, &ctx->req_template,
  142. ctx->cipher_alg);
  143. }
  144. static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
  145. dma_addr_t psec_sgl, struct sec_dev_info *info)
  146. {
  147. struct sec_hw_sgl *sgl_current, *sgl_next;
  148. dma_addr_t sgl_next_dma;
  149. sgl_current = hw_sgl;
  150. while (sgl_current) {
  151. sgl_next = sgl_current->next;
  152. sgl_next_dma = sgl_current->next_sgl;
  153. dma_pool_free(info->hw_sgl_pool, sgl_current, psec_sgl);
  154. sgl_current = sgl_next;
  155. psec_sgl = sgl_next_dma;
  156. }
  157. }
  158. static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
  159. dma_addr_t *psec_sgl,
  160. struct scatterlist *sgl,
  161. int count,
  162. struct sec_dev_info *info,
  163. gfp_t gfp)
  164. {
  165. struct sec_hw_sgl *sgl_current = NULL;
  166. struct sec_hw_sgl *sgl_next;
  167. dma_addr_t sgl_next_dma;
  168. struct scatterlist *sg;
  169. int ret, sge_index, i;
  170. if (!count)
  171. return -EINVAL;
  172. for_each_sg(sgl, sg, count, i) {
  173. sge_index = i % SEC_MAX_SGE_NUM;
  174. if (sge_index == 0) {
  175. sgl_next = dma_pool_zalloc(info->hw_sgl_pool,
  176. gfp, &sgl_next_dma);
  177. if (!sgl_next) {
  178. ret = -ENOMEM;
  179. goto err_free_hw_sgls;
  180. }
  181. if (!sgl_current) { /* First one */
  182. *psec_sgl = sgl_next_dma;
  183. *sec_sgl = sgl_next;
  184. } else { /* Chained */
  185. sgl_current->entry_sum_in_sgl = SEC_MAX_SGE_NUM;
  186. sgl_current->next_sgl = sgl_next_dma;
  187. sgl_current->next = sgl_next;
  188. }
  189. sgl_current = sgl_next;
  190. }
  191. sgl_current->sge_entries[sge_index].buf = sg_dma_address(sg);
  192. sgl_current->sge_entries[sge_index].len = sg_dma_len(sg);
  193. sgl_current->data_bytes_in_sgl += sg_dma_len(sg);
  194. }
  195. sgl_current->entry_sum_in_sgl = count % SEC_MAX_SGE_NUM;
  196. sgl_current->next_sgl = 0;
  197. (*sec_sgl)->entry_sum_in_chain = count;
  198. return 0;
  199. err_free_hw_sgls:
  200. sec_free_hw_sgl(*sec_sgl, *psec_sgl, info);
  201. *psec_sgl = 0;
  202. return ret;
  203. }
  204. static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
  205. const u8 *key, unsigned int keylen,
  206. enum sec_cipher_alg alg)
  207. {
  208. struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
  209. struct device *dev = ctx->queue->dev_info->dev;
  210. mutex_lock(&ctx->lock);
  211. if (ctx->key) {
  212. /* rekeying */
  213. memset(ctx->key, 0, SEC_MAX_CIPHER_KEY);
  214. } else {
  215. /* new key */
  216. ctx->key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY,
  217. &ctx->pkey, GFP_KERNEL);
  218. if (!ctx->key) {
  219. mutex_unlock(&ctx->lock);
  220. return -ENOMEM;
  221. }
  222. }
  223. mutex_unlock(&ctx->lock);
  224. sec_alg_skcipher_init_context(tfm, key, keylen, alg);
  225. return 0;
  226. }
  227. static int sec_alg_skcipher_setkey_aes_ecb(struct crypto_skcipher *tfm,
  228. const u8 *key, unsigned int keylen)
  229. {
  230. enum sec_cipher_alg alg;
  231. switch (keylen) {
  232. case AES_KEYSIZE_128:
  233. alg = SEC_C_AES_ECB_128;
  234. break;
  235. case AES_KEYSIZE_192:
  236. alg = SEC_C_AES_ECB_192;
  237. break;
  238. case AES_KEYSIZE_256:
  239. alg = SEC_C_AES_ECB_256;
  240. break;
  241. default:
  242. return -EINVAL;
  243. }
  244. return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
  245. }
  246. static int sec_alg_skcipher_setkey_aes_cbc(struct crypto_skcipher *tfm,
  247. const u8 *key, unsigned int keylen)
  248. {
  249. enum sec_cipher_alg alg;
  250. switch (keylen) {
  251. case AES_KEYSIZE_128:
  252. alg = SEC_C_AES_CBC_128;
  253. break;
  254. case AES_KEYSIZE_192:
  255. alg = SEC_C_AES_CBC_192;
  256. break;
  257. case AES_KEYSIZE_256:
  258. alg = SEC_C_AES_CBC_256;
  259. break;
  260. default:
  261. return -EINVAL;
  262. }
  263. return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
  264. }
  265. static int sec_alg_skcipher_setkey_aes_ctr(struct crypto_skcipher *tfm,
  266. const u8 *key, unsigned int keylen)
  267. {
  268. enum sec_cipher_alg alg;
  269. switch (keylen) {
  270. case AES_KEYSIZE_128:
  271. alg = SEC_C_AES_CTR_128;
  272. break;
  273. case AES_KEYSIZE_192:
  274. alg = SEC_C_AES_CTR_192;
  275. break;
  276. case AES_KEYSIZE_256:
  277. alg = SEC_C_AES_CTR_256;
  278. break;
  279. default:
  280. return -EINVAL;
  281. }
  282. return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
  283. }
  284. static int sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher *tfm,
  285. const u8 *key, unsigned int keylen)
  286. {
  287. enum sec_cipher_alg alg;
  288. int ret;
  289. ret = xts_verify_key(tfm, key, keylen);
  290. if (ret)
  291. return ret;
  292. switch (keylen) {
  293. case AES_KEYSIZE_128 * 2:
  294. alg = SEC_C_AES_XTS_128;
  295. break;
  296. case AES_KEYSIZE_256 * 2:
  297. alg = SEC_C_AES_XTS_256;
  298. break;
  299. default:
  300. return -EINVAL;
  301. }
  302. return sec_alg_skcipher_setkey(tfm, key, keylen, alg);
  303. }
  304. static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher *tfm,
  305. const u8 *key, unsigned int keylen)
  306. {
  307. if (keylen != DES_KEY_SIZE)
  308. return -EINVAL;
  309. return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_ECB_64);
  310. }
  311. static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm,
  312. const u8 *key, unsigned int keylen)
  313. {
  314. if (keylen != DES_KEY_SIZE)
  315. return -EINVAL;
  316. return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_CBC_64);
  317. }
  318. static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm,
  319. const u8 *key, unsigned int keylen)
  320. {
  321. if (keylen != DES_KEY_SIZE * 3)
  322. return -EINVAL;
  323. return sec_alg_skcipher_setkey(tfm, key, keylen,
  324. SEC_C_3DES_ECB_192_3KEY);
  325. }
  326. static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm,
  327. const u8 *key, unsigned int keylen)
  328. {
  329. if (keylen != DES3_EDE_KEY_SIZE)
  330. return -EINVAL;
  331. return sec_alg_skcipher_setkey(tfm, key, keylen,
  332. SEC_C_3DES_CBC_192_3KEY);
  333. }
  334. static void sec_alg_free_el(struct sec_request_el *el,
  335. struct sec_dev_info *info)
  336. {
  337. sec_free_hw_sgl(el->out, el->dma_out, info);
  338. sec_free_hw_sgl(el->in, el->dma_in, info);
  339. kfree(el->sgl_in);
  340. kfree(el->sgl_out);
  341. kfree(el);
  342. }
  343. /* queuelock must be held */
  344. static int sec_send_request(struct sec_request *sec_req, struct sec_queue *queue)
  345. {
  346. struct sec_request_el *el, *temp;
  347. int ret = 0;
  348. mutex_lock(&sec_req->lock);
  349. list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
  350. /*
  351. * Add to hardware queue only under following circumstances
  352. * 1) Software and hardware queue empty so no chain dependencies
  353. * 2) No dependencies as new IV - (check software queue empty
  354. * to maintain order)
  355. * 3) No dependencies because the mode does no chaining.
  356. *
  357. * In other cases first insert onto the software queue which
  358. * is then emptied as requests complete
  359. */
  360. if (!queue->havesoftqueue ||
  361. (kfifo_is_empty(&queue->softqueue) &&
  362. sec_queue_empty(queue))) {
  363. ret = sec_queue_send(queue, &el->req, sec_req);
  364. if (ret == -EAGAIN) {
  365. /* Wait unti we can send then try again */
  366. /* DEAD if here - should not happen */
  367. ret = -EBUSY;
  368. goto err_unlock;
  369. }
  370. } else {
  371. kfifo_put(&queue->softqueue, el);
  372. }
  373. }
  374. err_unlock:
  375. mutex_unlock(&sec_req->lock);
  376. return ret;
  377. }
  378. static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
  379. struct crypto_async_request *req_base)
  380. {
  381. struct skcipher_request *skreq = container_of(req_base,
  382. struct skcipher_request,
  383. base);
  384. struct sec_request *sec_req = skcipher_request_ctx(skreq);
  385. struct sec_request *backlog_req;
  386. struct sec_request_el *sec_req_el, *nextrequest;
  387. struct sec_alg_tfm_ctx *ctx = sec_req->tfm_ctx;
  388. struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
  389. struct device *dev = ctx->queue->dev_info->dev;
  390. int icv_or_skey_en, ret;
  391. bool done;
  392. sec_req_el = list_first_entry(&sec_req->elements, struct sec_request_el,
  393. head);
  394. icv_or_skey_en = (sec_resp->w0 & SEC_BD_W0_ICV_OR_SKEY_EN_M) >>
  395. SEC_BD_W0_ICV_OR_SKEY_EN_S;
  396. if (sec_resp->w1 & SEC_BD_W1_BD_INVALID || icv_or_skey_en == 3) {
  397. dev_err(dev, "Got an invalid answer %lu %d\n",
  398. sec_resp->w1 & SEC_BD_W1_BD_INVALID,
  399. icv_or_skey_en);
  400. sec_req->err = -EINVAL;
  401. /*
  402. * We need to muddle on to avoid getting stuck with elements
  403. * on the queue. Error will be reported so requester so
  404. * it should be able to handle appropriately.
  405. */
  406. }
  407. mutex_lock(&ctx->queue->queuelock);
  408. /* Put the IV in place for chained cases */
  409. switch (ctx->cipher_alg) {
  410. case SEC_C_AES_CBC_128:
  411. case SEC_C_AES_CBC_192:
  412. case SEC_C_AES_CBC_256:
  413. if (sec_req_el->req.w0 & SEC_BD_W0_DE)
  414. sg_pcopy_to_buffer(sec_req_el->sgl_out,
  415. sg_nents(sec_req_el->sgl_out),
  416. skreq->iv,
  417. crypto_skcipher_ivsize(atfm),
  418. sec_req_el->el_length -
  419. crypto_skcipher_ivsize(atfm));
  420. else
  421. sg_pcopy_to_buffer(sec_req_el->sgl_in,
  422. sg_nents(sec_req_el->sgl_in),
  423. skreq->iv,
  424. crypto_skcipher_ivsize(atfm),
  425. sec_req_el->el_length -
  426. crypto_skcipher_ivsize(atfm));
  427. /* No need to sync to the device as coherent DMA */
  428. break;
  429. case SEC_C_AES_CTR_128:
  430. case SEC_C_AES_CTR_192:
  431. case SEC_C_AES_CTR_256:
  432. crypto_inc(skreq->iv, 16);
  433. break;
  434. default:
  435. /* Do not update */
  436. break;
  437. }
  438. if (ctx->queue->havesoftqueue &&
  439. !kfifo_is_empty(&ctx->queue->softqueue) &&
  440. sec_queue_empty(ctx->queue)) {
  441. ret = kfifo_get(&ctx->queue->softqueue, &nextrequest);
  442. if (ret <= 0)
  443. dev_err(dev,
  444. "Error getting next element from kfifo %d\n",
  445. ret);
  446. else
  447. /* We know there is space so this cannot fail */
  448. sec_queue_send(ctx->queue, &nextrequest->req,
  449. nextrequest->sec_req);
  450. } else if (!list_empty(&ctx->backlog)) {
  451. /* Need to verify there is room first */
  452. backlog_req = list_first_entry(&ctx->backlog,
  453. typeof(*backlog_req),
  454. backlog_head);
  455. if (sec_queue_can_enqueue(ctx->queue,
  456. backlog_req->num_elements) ||
  457. (ctx->queue->havesoftqueue &&
  458. kfifo_avail(&ctx->queue->softqueue) >
  459. backlog_req->num_elements)) {
  460. sec_send_request(backlog_req, ctx->queue);
  461. backlog_req->req_base->complete(backlog_req->req_base,
  462. -EINPROGRESS);
  463. list_del(&backlog_req->backlog_head);
  464. }
  465. }
  466. mutex_unlock(&ctx->queue->queuelock);
  467. mutex_lock(&sec_req->lock);
  468. list_del(&sec_req_el->head);
  469. mutex_unlock(&sec_req->lock);
  470. sec_alg_free_el(sec_req_el, ctx->queue->dev_info);
  471. /*
  472. * Request is done.
  473. * The dance is needed as the lock is freed in the completion
  474. */
  475. mutex_lock(&sec_req->lock);
  476. done = list_empty(&sec_req->elements);
  477. mutex_unlock(&sec_req->lock);
  478. if (done) {
  479. if (crypto_skcipher_ivsize(atfm)) {
  480. dma_unmap_single(dev, sec_req->dma_iv,
  481. crypto_skcipher_ivsize(atfm),
  482. DMA_TO_DEVICE);
  483. }
  484. dma_unmap_sg(dev, skreq->src, sec_req->len_in,
  485. DMA_BIDIRECTIONAL);
  486. if (skreq->src != skreq->dst)
  487. dma_unmap_sg(dev, skreq->dst, sec_req->len_out,
  488. DMA_BIDIRECTIONAL);
  489. skreq->base.complete(&skreq->base, sec_req->err);
  490. }
  491. }
  492. void sec_alg_callback(struct sec_bd_info *resp, void *shadow)
  493. {
  494. struct sec_request *sec_req = shadow;
  495. sec_req->cb(resp, sec_req->req_base);
  496. }
  497. static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes,
  498. int *steps, gfp_t gfp)
  499. {
  500. size_t *sizes;
  501. int i;
  502. /* Split into suitable sized blocks */
  503. *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT;
  504. sizes = kcalloc(*steps, sizeof(*sizes), gfp);
  505. if (!sizes)
  506. return -ENOMEM;
  507. for (i = 0; i < *steps - 1; i++)
  508. sizes[i] = SEC_REQ_LIMIT;
  509. sizes[*steps - 1] = length - SEC_REQ_LIMIT * (*steps - 1);
  510. *split_sizes = sizes;
  511. return 0;
  512. }
  513. static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes,
  514. int steps, struct scatterlist ***splits,
  515. int **splits_nents,
  516. int sgl_len_in,
  517. struct device *dev, gfp_t gfp)
  518. {
  519. int ret, count;
  520. count = dma_map_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
  521. if (!count)
  522. return -EINVAL;
  523. *splits = kcalloc(steps, sizeof(struct scatterlist *), gfp);
  524. if (!*splits) {
  525. ret = -ENOMEM;
  526. goto err_unmap_sg;
  527. }
  528. *splits_nents = kcalloc(steps, sizeof(int), gfp);
  529. if (!*splits_nents) {
  530. ret = -ENOMEM;
  531. goto err_free_splits;
  532. }
  533. /* output the scatter list before and after this */
  534. ret = sg_split(sgl, count, 0, steps, split_sizes,
  535. *splits, *splits_nents, gfp);
  536. if (ret) {
  537. ret = -ENOMEM;
  538. goto err_free_splits_nents;
  539. }
  540. return 0;
  541. err_free_splits_nents:
  542. kfree(*splits_nents);
  543. err_free_splits:
  544. kfree(*splits);
  545. err_unmap_sg:
  546. dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
  547. return ret;
  548. }
  549. /*
  550. * Reverses the sec_map_and_split_sg call for messages not yet added to
  551. * the queues.
  552. */
  553. static void sec_unmap_sg_on_err(struct scatterlist *sgl, int steps,
  554. struct scatterlist **splits, int *splits_nents,
  555. int sgl_len_in, struct device *dev)
  556. {
  557. int i;
  558. for (i = 0; i < steps; i++)
  559. kfree(splits[i]);
  560. kfree(splits_nents);
  561. kfree(splits);
  562. dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL);
  563. }
  564. static struct sec_request_el
  565. *sec_alg_alloc_and_fill_el(struct sec_bd_info *template, int encrypt,
  566. int el_size, bool different_dest,
  567. struct scatterlist *sgl_in, int n_ents_in,
  568. struct scatterlist *sgl_out, int n_ents_out,
  569. struct sec_dev_info *info, gfp_t gfp)
  570. {
  571. struct sec_request_el *el;
  572. struct sec_bd_info *req;
  573. int ret;
  574. el = kzalloc(sizeof(*el), gfp);
  575. if (!el)
  576. return ERR_PTR(-ENOMEM);
  577. el->el_length = el_size;
  578. req = &el->req;
  579. memcpy(req, template, sizeof(*req));
  580. req->w0 &= ~SEC_BD_W0_CIPHER_M;
  581. if (encrypt)
  582. req->w0 |= SEC_CIPHER_ENCRYPT << SEC_BD_W0_CIPHER_S;
  583. else
  584. req->w0 |= SEC_CIPHER_DECRYPT << SEC_BD_W0_CIPHER_S;
  585. req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_19_16_M;
  586. req->w0 |= ((el_size >> 16) << SEC_BD_W0_C_GRAN_SIZE_19_16_S) &
  587. SEC_BD_W0_C_GRAN_SIZE_19_16_M;
  588. req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_21_20_M;
  589. req->w0 |= ((el_size >> 20) << SEC_BD_W0_C_GRAN_SIZE_21_20_S) &
  590. SEC_BD_W0_C_GRAN_SIZE_21_20_M;
  591. /* Writing whole u32 so no need to take care of masking */
  592. req->w2 = ((1 << SEC_BD_W2_GRAN_NUM_S) & SEC_BD_W2_GRAN_NUM_M) |
  593. ((el_size << SEC_BD_W2_C_GRAN_SIZE_15_0_S) &
  594. SEC_BD_W2_C_GRAN_SIZE_15_0_M);
  595. req->w3 &= ~SEC_BD_W3_CIPHER_LEN_OFFSET_M;
  596. req->w1 |= SEC_BD_W1_ADDR_TYPE;
  597. el->sgl_in = sgl_in;
  598. ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in,
  599. n_ents_in, info, gfp);
  600. if (ret)
  601. goto err_free_el;
  602. req->data_addr_lo = lower_32_bits(el->dma_in);
  603. req->data_addr_hi = upper_32_bits(el->dma_in);
  604. if (different_dest) {
  605. el->sgl_out = sgl_out;
  606. ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out,
  607. el->sgl_out,
  608. n_ents_out, info, gfp);
  609. if (ret)
  610. goto err_free_hw_sgl_in;
  611. req->w0 |= SEC_BD_W0_DE;
  612. req->cipher_destin_addr_lo = lower_32_bits(el->dma_out);
  613. req->cipher_destin_addr_hi = upper_32_bits(el->dma_out);
  614. } else {
  615. req->w0 &= ~SEC_BD_W0_DE;
  616. req->cipher_destin_addr_lo = lower_32_bits(el->dma_in);
  617. req->cipher_destin_addr_hi = upper_32_bits(el->dma_in);
  618. }
  619. return el;
  620. err_free_hw_sgl_in:
  621. sec_free_hw_sgl(el->in, el->dma_in, info);
  622. err_free_el:
  623. kfree(el);
  624. return ERR_PTR(ret);
  625. }
  626. static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
  627. bool encrypt)
  628. {
  629. struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq);
  630. struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm);
  631. struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
  632. struct sec_queue *queue = ctx->queue;
  633. struct sec_request *sec_req = skcipher_request_ctx(skreq);
  634. struct sec_dev_info *info = queue->dev_info;
  635. int i, ret, steps;
  636. size_t *split_sizes;
  637. struct scatterlist **splits_in;
  638. struct scatterlist **splits_out = NULL;
  639. int *splits_in_nents;
  640. int *splits_out_nents = NULL;
  641. struct sec_request_el *el, *temp;
  642. bool split = skreq->src != skreq->dst;
  643. gfp_t gfp = skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
  644. mutex_init(&sec_req->lock);
  645. sec_req->req_base = &skreq->base;
  646. sec_req->err = 0;
  647. /* SGL mapping out here to allow us to break it up as necessary */
  648. sec_req->len_in = sg_nents(skreq->src);
  649. ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes,
  650. &steps, gfp);
  651. if (ret)
  652. return ret;
  653. sec_req->num_elements = steps;
  654. ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in,
  655. &splits_in_nents, sec_req->len_in,
  656. info->dev, gfp);
  657. if (ret)
  658. goto err_free_split_sizes;
  659. if (split) {
  660. sec_req->len_out = sg_nents(skreq->dst);
  661. ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
  662. &splits_out, &splits_out_nents,
  663. sec_req->len_out, info->dev, gfp);
  664. if (ret)
  665. goto err_unmap_in_sg;
  666. }
  667. /* Shared info stored in seq_req - applies to all BDs */
  668. sec_req->tfm_ctx = ctx;
  669. sec_req->cb = sec_skcipher_alg_callback;
  670. INIT_LIST_HEAD(&sec_req->elements);
  671. /*
  672. * Future optimization.
  673. * In the chaining case we can't use a dma pool bounce buffer
  674. * but in the case where we know there is no chaining we can
  675. */
  676. if (crypto_skcipher_ivsize(atfm)) {
  677. sec_req->dma_iv = dma_map_single(info->dev, skreq->iv,
  678. crypto_skcipher_ivsize(atfm),
  679. DMA_TO_DEVICE);
  680. if (dma_mapping_error(info->dev, sec_req->dma_iv)) {
  681. ret = -ENOMEM;
  682. goto err_unmap_out_sg;
  683. }
  684. }
  685. /* Set them all up then queue - cleaner error handling. */
  686. for (i = 0; i < steps; i++) {
  687. el = sec_alg_alloc_and_fill_el(&ctx->req_template,
  688. encrypt ? 1 : 0,
  689. split_sizes[i],
  690. skreq->src != skreq->dst,
  691. splits_in[i], splits_in_nents[i],
  692. split ? splits_out[i] : NULL,
  693. split ? splits_out_nents[i] : 0,
  694. info, gfp);
  695. if (IS_ERR(el)) {
  696. ret = PTR_ERR(el);
  697. goto err_free_elements;
  698. }
  699. el->req.cipher_iv_addr_lo = lower_32_bits(sec_req->dma_iv);
  700. el->req.cipher_iv_addr_hi = upper_32_bits(sec_req->dma_iv);
  701. el->sec_req = sec_req;
  702. list_add_tail(&el->head, &sec_req->elements);
  703. }
  704. /*
  705. * Only attempt to queue if the whole lot can fit in the queue -
  706. * we can't successfully cleanup after a partial queing so this
  707. * must succeed or fail atomically.
  708. *
  709. * Big hammer test of both software and hardware queues - could be
  710. * more refined but this is unlikely to happen so no need.
  711. */
  712. /* Grab a big lock for a long time to avoid concurrency issues */
  713. mutex_lock(&queue->queuelock);
  714. /*
  715. * Can go on to queue if we have space in either:
  716. * 1) The hardware queue and no software queue
  717. * 2) The software queue
  718. * AND there is nothing in the backlog. If there is backlog we
  719. * have to only queue to the backlog queue and return busy.
  720. */
  721. if ((!sec_queue_can_enqueue(queue, steps) &&
  722. (!queue->havesoftqueue ||
  723. kfifo_avail(&queue->softqueue) > steps)) ||
  724. !list_empty(&ctx->backlog)) {
  725. ret = -EBUSY;
  726. if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
  727. list_add_tail(&sec_req->backlog_head, &ctx->backlog);
  728. mutex_unlock(&queue->queuelock);
  729. goto out;
  730. }
  731. mutex_unlock(&queue->queuelock);
  732. goto err_free_elements;
  733. }
  734. ret = sec_send_request(sec_req, queue);
  735. mutex_unlock(&queue->queuelock);
  736. if (ret)
  737. goto err_free_elements;
  738. ret = -EINPROGRESS;
  739. out:
  740. /* Cleanup - all elements in pointer arrays have been copied */
  741. kfree(splits_in_nents);
  742. kfree(splits_in);
  743. kfree(splits_out_nents);
  744. kfree(splits_out);
  745. kfree(split_sizes);
  746. return ret;
  747. err_free_elements:
  748. list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
  749. list_del(&el->head);
  750. sec_alg_free_el(el, info);
  751. }
  752. if (crypto_skcipher_ivsize(atfm))
  753. dma_unmap_single(info->dev, sec_req->dma_iv,
  754. crypto_skcipher_ivsize(atfm),
  755. DMA_BIDIRECTIONAL);
  756. err_unmap_out_sg:
  757. if (split)
  758. sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
  759. splits_out_nents, sec_req->len_out,
  760. info->dev);
  761. err_unmap_in_sg:
  762. sec_unmap_sg_on_err(skreq->src, steps, splits_in, splits_in_nents,
  763. sec_req->len_in, info->dev);
  764. err_free_split_sizes:
  765. kfree(split_sizes);
  766. return ret;
  767. }
  768. static int sec_alg_skcipher_encrypt(struct skcipher_request *req)
  769. {
  770. return sec_alg_skcipher_crypto(req, true);
  771. }
  772. static int sec_alg_skcipher_decrypt(struct skcipher_request *req)
  773. {
  774. return sec_alg_skcipher_crypto(req, false);
  775. }
  776. static int sec_alg_skcipher_init(struct crypto_skcipher *tfm)
  777. {
  778. struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
  779. mutex_init(&ctx->lock);
  780. INIT_LIST_HEAD(&ctx->backlog);
  781. crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_request));
  782. ctx->queue = sec_queue_alloc_start_safe();
  783. if (IS_ERR(ctx->queue))
  784. return PTR_ERR(ctx->queue);
  785. mutex_init(&ctx->queue->queuelock);
  786. ctx->queue->havesoftqueue = false;
  787. return 0;
  788. }
  789. static void sec_alg_skcipher_exit(struct crypto_skcipher *tfm)
  790. {
  791. struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
  792. struct device *dev = ctx->queue->dev_info->dev;
  793. if (ctx->key) {
  794. memzero_explicit(ctx->key, SEC_MAX_CIPHER_KEY);
  795. dma_free_coherent(dev, SEC_MAX_CIPHER_KEY, ctx->key,
  796. ctx->pkey);
  797. }
  798. sec_queue_stop_release(ctx->queue);
  799. }
  800. static int sec_alg_skcipher_init_with_queue(struct crypto_skcipher *tfm)
  801. {
  802. struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
  803. int ret;
  804. ret = sec_alg_skcipher_init(tfm);
  805. if (ret)
  806. return ret;
  807. INIT_KFIFO(ctx->queue->softqueue);
  808. ret = kfifo_alloc(&ctx->queue->softqueue, 512, GFP_KERNEL);
  809. if (ret) {
  810. sec_alg_skcipher_exit(tfm);
  811. return ret;
  812. }
  813. ctx->queue->havesoftqueue = true;
  814. return 0;
  815. }
  816. static void sec_alg_skcipher_exit_with_queue(struct crypto_skcipher *tfm)
  817. {
  818. struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
  819. kfifo_free(&ctx->queue->softqueue);
  820. sec_alg_skcipher_exit(tfm);
  821. }
  822. static struct skcipher_alg sec_algs[] = {
  823. {
  824. .base = {
  825. .cra_name = "ecb(aes)",
  826. .cra_driver_name = "hisi_sec_aes_ecb",
  827. .cra_priority = 4001,
  828. .cra_flags = CRYPTO_ALG_ASYNC,
  829. .cra_blocksize = AES_BLOCK_SIZE,
  830. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  831. .cra_alignmask = 0,
  832. .cra_module = THIS_MODULE,
  833. },
  834. .init = sec_alg_skcipher_init,
  835. .exit = sec_alg_skcipher_exit,
  836. .setkey = sec_alg_skcipher_setkey_aes_ecb,
  837. .decrypt = sec_alg_skcipher_decrypt,
  838. .encrypt = sec_alg_skcipher_encrypt,
  839. .min_keysize = AES_MIN_KEY_SIZE,
  840. .max_keysize = AES_MAX_KEY_SIZE,
  841. .ivsize = 0,
  842. }, {
  843. .base = {
  844. .cra_name = "cbc(aes)",
  845. .cra_driver_name = "hisi_sec_aes_cbc",
  846. .cra_priority = 4001,
  847. .cra_flags = CRYPTO_ALG_ASYNC,
  848. .cra_blocksize = AES_BLOCK_SIZE,
  849. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  850. .cra_alignmask = 0,
  851. .cra_module = THIS_MODULE,
  852. },
  853. .init = sec_alg_skcipher_init_with_queue,
  854. .exit = sec_alg_skcipher_exit_with_queue,
  855. .setkey = sec_alg_skcipher_setkey_aes_cbc,
  856. .decrypt = sec_alg_skcipher_decrypt,
  857. .encrypt = sec_alg_skcipher_encrypt,
  858. .min_keysize = AES_MIN_KEY_SIZE,
  859. .max_keysize = AES_MAX_KEY_SIZE,
  860. .ivsize = AES_BLOCK_SIZE,
  861. }, {
  862. .base = {
  863. .cra_name = "ctr(aes)",
  864. .cra_driver_name = "hisi_sec_aes_ctr",
  865. .cra_priority = 4001,
  866. .cra_flags = CRYPTO_ALG_ASYNC,
  867. .cra_blocksize = AES_BLOCK_SIZE,
  868. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  869. .cra_alignmask = 0,
  870. .cra_module = THIS_MODULE,
  871. },
  872. .init = sec_alg_skcipher_init_with_queue,
  873. .exit = sec_alg_skcipher_exit_with_queue,
  874. .setkey = sec_alg_skcipher_setkey_aes_ctr,
  875. .decrypt = sec_alg_skcipher_decrypt,
  876. .encrypt = sec_alg_skcipher_encrypt,
  877. .min_keysize = AES_MIN_KEY_SIZE,
  878. .max_keysize = AES_MAX_KEY_SIZE,
  879. .ivsize = AES_BLOCK_SIZE,
  880. }, {
  881. .base = {
  882. .cra_name = "xts(aes)",
  883. .cra_driver_name = "hisi_sec_aes_xts",
  884. .cra_priority = 4001,
  885. .cra_flags = CRYPTO_ALG_ASYNC,
  886. .cra_blocksize = AES_BLOCK_SIZE,
  887. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  888. .cra_alignmask = 0,
  889. .cra_module = THIS_MODULE,
  890. },
  891. .init = sec_alg_skcipher_init,
  892. .exit = sec_alg_skcipher_exit,
  893. .setkey = sec_alg_skcipher_setkey_aes_xts,
  894. .decrypt = sec_alg_skcipher_decrypt,
  895. .encrypt = sec_alg_skcipher_encrypt,
  896. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  897. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  898. .ivsize = AES_BLOCK_SIZE,
  899. }, {
  900. /* Unable to find any test vectors so untested */
  901. .base = {
  902. .cra_name = "ecb(des)",
  903. .cra_driver_name = "hisi_sec_des_ecb",
  904. .cra_priority = 4001,
  905. .cra_flags = CRYPTO_ALG_ASYNC,
  906. .cra_blocksize = DES_BLOCK_SIZE,
  907. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  908. .cra_alignmask = 0,
  909. .cra_module = THIS_MODULE,
  910. },
  911. .init = sec_alg_skcipher_init,
  912. .exit = sec_alg_skcipher_exit,
  913. .setkey = sec_alg_skcipher_setkey_des_ecb,
  914. .decrypt = sec_alg_skcipher_decrypt,
  915. .encrypt = sec_alg_skcipher_encrypt,
  916. .min_keysize = DES_KEY_SIZE,
  917. .max_keysize = DES_KEY_SIZE,
  918. .ivsize = 0,
  919. }, {
  920. .base = {
  921. .cra_name = "cbc(des)",
  922. .cra_driver_name = "hisi_sec_des_cbc",
  923. .cra_priority = 4001,
  924. .cra_flags = CRYPTO_ALG_ASYNC,
  925. .cra_blocksize = DES_BLOCK_SIZE,
  926. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  927. .cra_alignmask = 0,
  928. .cra_module = THIS_MODULE,
  929. },
  930. .init = sec_alg_skcipher_init_with_queue,
  931. .exit = sec_alg_skcipher_exit_with_queue,
  932. .setkey = sec_alg_skcipher_setkey_des_cbc,
  933. .decrypt = sec_alg_skcipher_decrypt,
  934. .encrypt = sec_alg_skcipher_encrypt,
  935. .min_keysize = DES_KEY_SIZE,
  936. .max_keysize = DES_KEY_SIZE,
  937. .ivsize = DES_BLOCK_SIZE,
  938. }, {
  939. .base = {
  940. .cra_name = "cbc(des3_ede)",
  941. .cra_driver_name = "hisi_sec_3des_cbc",
  942. .cra_priority = 4001,
  943. .cra_flags = CRYPTO_ALG_ASYNC,
  944. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  945. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  946. .cra_alignmask = 0,
  947. .cra_module = THIS_MODULE,
  948. },
  949. .init = sec_alg_skcipher_init_with_queue,
  950. .exit = sec_alg_skcipher_exit_with_queue,
  951. .setkey = sec_alg_skcipher_setkey_3des_cbc,
  952. .decrypt = sec_alg_skcipher_decrypt,
  953. .encrypt = sec_alg_skcipher_encrypt,
  954. .min_keysize = DES3_EDE_KEY_SIZE,
  955. .max_keysize = DES3_EDE_KEY_SIZE,
  956. .ivsize = DES3_EDE_BLOCK_SIZE,
  957. }, {
  958. .base = {
  959. .cra_name = "ecb(des3_ede)",
  960. .cra_driver_name = "hisi_sec_3des_ecb",
  961. .cra_priority = 4001,
  962. .cra_flags = CRYPTO_ALG_ASYNC,
  963. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  964. .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx),
  965. .cra_alignmask = 0,
  966. .cra_module = THIS_MODULE,
  967. },
  968. .init = sec_alg_skcipher_init,
  969. .exit = sec_alg_skcipher_exit,
  970. .setkey = sec_alg_skcipher_setkey_3des_ecb,
  971. .decrypt = sec_alg_skcipher_decrypt,
  972. .encrypt = sec_alg_skcipher_encrypt,
  973. .min_keysize = DES3_EDE_KEY_SIZE,
  974. .max_keysize = DES3_EDE_KEY_SIZE,
  975. .ivsize = 0,
  976. }
  977. };
  978. int sec_algs_register(void)
  979. {
  980. int ret = 0;
  981. mutex_lock(&algs_lock);
  982. if (++active_devs != 1)
  983. goto unlock;
  984. ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
  985. if (ret)
  986. --active_devs;
  987. unlock:
  988. mutex_unlock(&algs_lock);
  989. return ret;
  990. }
  991. void sec_algs_unregister(void)
  992. {
  993. mutex_lock(&algs_lock);
  994. if (--active_devs != 0)
  995. goto unlock;
  996. crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs));
  997. unlock:
  998. mutex_unlock(&algs_lock);
  999. }