cptvf_algs.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525
  1. /*
  2. * Copyright (C) 2016 Cavium, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of version 2 of the GNU General Public License
  6. * as published by the Free Software Foundation.
  7. */
  8. #include <crypto/aes.h>
  9. #include <crypto/algapi.h>
  10. #include <crypto/authenc.h>
  11. #include <crypto/cryptd.h>
  12. #include <crypto/crypto_wq.h>
  13. #include <crypto/des.h>
  14. #include <crypto/xts.h>
  15. #include <linux/crypto.h>
  16. #include <linux/err.h>
  17. #include <linux/list.h>
  18. #include <linux/scatterlist.h>
  19. #include "cptvf.h"
  20. #include "cptvf_algs.h"
  21. struct cpt_device_handle {
  22. void *cdev[MAX_DEVICES];
  23. u32 dev_count;
  24. };
  25. static struct cpt_device_handle dev_handle;
  26. static void cvm_callback(u32 status, void *arg)
  27. {
  28. struct crypto_async_request *req = (struct crypto_async_request *)arg;
  29. req->complete(req, !status);
  30. }
  31. static inline void update_input_iv(struct cpt_request_info *req_info,
  32. u8 *iv, u32 enc_iv_len,
  33. u32 *argcnt)
  34. {
  35. /* Setting the iv information */
  36. req_info->in[*argcnt].vptr = (void *)iv;
  37. req_info->in[*argcnt].size = enc_iv_len;
  38. req_info->req.dlen += enc_iv_len;
  39. ++(*argcnt);
  40. }
  41. static inline void update_output_iv(struct cpt_request_info *req_info,
  42. u8 *iv, u32 enc_iv_len,
  43. u32 *argcnt)
  44. {
  45. /* Setting the iv information */
  46. req_info->out[*argcnt].vptr = (void *)iv;
  47. req_info->out[*argcnt].size = enc_iv_len;
  48. req_info->rlen += enc_iv_len;
  49. ++(*argcnt);
  50. }
  51. static inline void update_input_data(struct cpt_request_info *req_info,
  52. struct scatterlist *inp_sg,
  53. u32 nbytes, u32 *argcnt)
  54. {
  55. req_info->req.dlen += nbytes;
  56. while (nbytes) {
  57. u32 len = min(nbytes, inp_sg->length);
  58. u8 *ptr = sg_virt(inp_sg);
  59. req_info->in[*argcnt].vptr = (void *)ptr;
  60. req_info->in[*argcnt].size = len;
  61. nbytes -= len;
  62. ++(*argcnt);
  63. ++inp_sg;
  64. }
  65. }
  66. static inline void update_output_data(struct cpt_request_info *req_info,
  67. struct scatterlist *outp_sg,
  68. u32 nbytes, u32 *argcnt)
  69. {
  70. req_info->rlen += nbytes;
  71. while (nbytes) {
  72. u32 len = min(nbytes, outp_sg->length);
  73. u8 *ptr = sg_virt(outp_sg);
  74. req_info->out[*argcnt].vptr = (void *)ptr;
  75. req_info->out[*argcnt].size = len;
  76. nbytes -= len;
  77. ++(*argcnt);
  78. ++outp_sg;
  79. }
  80. }
  81. static inline u32 create_ctx_hdr(struct ablkcipher_request *req, u32 enc,
  82. u32 *argcnt)
  83. {
  84. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  85. struct cvm_enc_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  86. struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
  87. struct fc_context *fctx = &rctx->fctx;
  88. u64 *offset_control = &rctx->control_word;
  89. u32 enc_iv_len = crypto_ablkcipher_ivsize(tfm);
  90. struct cpt_request_info *req_info = &rctx->cpt_req;
  91. u64 *ctrl_flags = NULL;
  92. req_info->ctrl.s.grp = 0;
  93. req_info->ctrl.s.dma_mode = DMA_GATHER_SCATTER;
  94. req_info->ctrl.s.se_req = SE_CORE_REQ;
  95. req_info->req.opcode.s.major = MAJOR_OP_FC |
  96. DMA_MODE_FLAG(DMA_GATHER_SCATTER);
  97. if (enc)
  98. req_info->req.opcode.s.minor = 2;
  99. else
  100. req_info->req.opcode.s.minor = 3;
  101. req_info->req.param1 = req->nbytes; /* Encryption Data length */
  102. req_info->req.param2 = 0; /*Auth data length */
  103. fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type;
  104. fctx->enc.enc_ctrl.e.aes_key = ctx->key_type;
  105. fctx->enc.enc_ctrl.e.iv_source = FROM_DPTR;
  106. if (ctx->cipher_type == AES_XTS)
  107. memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2);
  108. else
  109. memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len);
  110. ctrl_flags = (u64 *)&fctx->enc.enc_ctrl.flags;
  111. *ctrl_flags = cpu_to_be64(*ctrl_flags);
  112. *offset_control = cpu_to_be64(((u64)(enc_iv_len) << 16));
  113. /* Storing Packet Data Information in offset
  114. * Control Word First 8 bytes
  115. */
  116. req_info->in[*argcnt].vptr = (u8 *)offset_control;
  117. req_info->in[*argcnt].size = CONTROL_WORD_LEN;
  118. req_info->req.dlen += CONTROL_WORD_LEN;
  119. ++(*argcnt);
  120. req_info->in[*argcnt].vptr = (u8 *)fctx;
  121. req_info->in[*argcnt].size = sizeof(struct fc_context);
  122. req_info->req.dlen += sizeof(struct fc_context);
  123. ++(*argcnt);
  124. return 0;
  125. }
  126. static inline u32 create_input_list(struct ablkcipher_request *req, u32 enc,
  127. u32 enc_iv_len)
  128. {
  129. struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
  130. struct cpt_request_info *req_info = &rctx->cpt_req;
  131. u32 argcnt = 0;
  132. create_ctx_hdr(req, enc, &argcnt);
  133. update_input_iv(req_info, req->info, enc_iv_len, &argcnt);
  134. update_input_data(req_info, req->src, req->nbytes, &argcnt);
  135. req_info->incnt = argcnt;
  136. return 0;
  137. }
  138. static inline void store_cb_info(struct ablkcipher_request *req,
  139. struct cpt_request_info *req_info)
  140. {
  141. req_info->callback = (void *)cvm_callback;
  142. req_info->callback_arg = (void *)&req->base;
  143. }
  144. static inline void create_output_list(struct ablkcipher_request *req,
  145. u32 enc_iv_len)
  146. {
  147. struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
  148. struct cpt_request_info *req_info = &rctx->cpt_req;
  149. u32 argcnt = 0;
  150. /* OUTPUT Buffer Processing
  151. * AES encryption/decryption output would be
  152. * received in the following format
  153. *
  154. * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----|
  155. * [ 16 Bytes/ [ Request Enc/Dec/ DATA Len AES CBC ]
  156. */
  157. /* Reading IV information */
  158. update_output_iv(req_info, req->info, enc_iv_len, &argcnt);
  159. update_output_data(req_info, req->dst, req->nbytes, &argcnt);
  160. req_info->outcnt = argcnt;
  161. }
  162. static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc)
  163. {
  164. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  165. struct cvm_req_ctx *rctx = ablkcipher_request_ctx(req);
  166. u32 enc_iv_len = crypto_ablkcipher_ivsize(tfm);
  167. struct fc_context *fctx = &rctx->fctx;
  168. struct cpt_request_info *req_info = &rctx->cpt_req;
  169. void *cdev = NULL;
  170. int status;
  171. memset(req_info, 0, sizeof(struct cpt_request_info));
  172. req_info->may_sleep = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) != 0;
  173. memset(fctx, 0, sizeof(struct fc_context));
  174. create_input_list(req, enc, enc_iv_len);
  175. create_output_list(req, enc_iv_len);
  176. store_cb_info(req, req_info);
  177. cdev = dev_handle.cdev[smp_processor_id()];
  178. status = cptvf_do_request(cdev, req_info);
  179. /* We perform an asynchronous send and once
  180. * the request is completed the driver would
  181. * intimate through registered call back functions
  182. */
  183. if (status)
  184. return status;
  185. else
  186. return -EINPROGRESS;
  187. }
  188. static int cvm_encrypt(struct ablkcipher_request *req)
  189. {
  190. return cvm_enc_dec(req, true);
  191. }
  192. static int cvm_decrypt(struct ablkcipher_request *req)
  193. {
  194. return cvm_enc_dec(req, false);
  195. }
  196. static int cvm_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
  197. u32 keylen)
  198. {
  199. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  200. struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
  201. int err;
  202. const u8 *key1 = key;
  203. const u8 *key2 = key + (keylen / 2);
  204. err = xts_check_key(tfm, key, keylen);
  205. if (err)
  206. return err;
  207. ctx->key_len = keylen;
  208. memcpy(ctx->enc_key, key1, keylen / 2);
  209. memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2);
  210. ctx->cipher_type = AES_XTS;
  211. switch (ctx->key_len) {
  212. case 32:
  213. ctx->key_type = AES_128_BIT;
  214. break;
  215. case 64:
  216. ctx->key_type = AES_256_BIT;
  217. break;
  218. default:
  219. return -EINVAL;
  220. }
  221. return 0;
  222. }
  223. static int cvm_validate_keylen(struct cvm_enc_ctx *ctx, u32 keylen)
  224. {
  225. if ((keylen == 16) || (keylen == 24) || (keylen == 32)) {
  226. ctx->key_len = keylen;
  227. switch (ctx->key_len) {
  228. case 16:
  229. ctx->key_type = AES_128_BIT;
  230. break;
  231. case 24:
  232. ctx->key_type = AES_192_BIT;
  233. break;
  234. case 32:
  235. ctx->key_type = AES_256_BIT;
  236. break;
  237. default:
  238. return -EINVAL;
  239. }
  240. if (ctx->cipher_type == DES3_CBC)
  241. ctx->key_type = 0;
  242. return 0;
  243. }
  244. return -EINVAL;
  245. }
  246. static int cvm_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
  247. u32 keylen, u8 cipher_type)
  248. {
  249. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  250. struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
  251. ctx->cipher_type = cipher_type;
  252. if (!cvm_validate_keylen(ctx, keylen)) {
  253. memcpy(ctx->enc_key, key, keylen);
  254. return 0;
  255. } else {
  256. crypto_ablkcipher_set_flags(cipher,
  257. CRYPTO_TFM_RES_BAD_KEY_LEN);
  258. return -EINVAL;
  259. }
  260. }
  261. static int cvm_cbc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
  262. u32 keylen)
  263. {
  264. return cvm_setkey(cipher, key, keylen, AES_CBC);
  265. }
  266. static int cvm_ecb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
  267. u32 keylen)
  268. {
  269. return cvm_setkey(cipher, key, keylen, AES_ECB);
  270. }
  271. static int cvm_cfb_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
  272. u32 keylen)
  273. {
  274. return cvm_setkey(cipher, key, keylen, AES_CFB);
  275. }
  276. static int cvm_cbc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
  277. u32 keylen)
  278. {
  279. return cvm_setkey(cipher, key, keylen, DES3_CBC);
  280. }
  281. static int cvm_ecb_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
  282. u32 keylen)
  283. {
  284. return cvm_setkey(cipher, key, keylen, DES3_ECB);
  285. }
  286. static int cvm_enc_dec_init(struct crypto_tfm *tfm)
  287. {
  288. struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
  289. memset(ctx, 0, sizeof(*ctx));
  290. tfm->crt_ablkcipher.reqsize = sizeof(struct cvm_req_ctx) +
  291. sizeof(struct ablkcipher_request);
  292. /* Additional memory for ablkcipher_request is
  293. * allocated since the cryptd daemon uses
  294. * this memory for request_ctx information
  295. */
  296. return 0;
  297. }
  298. static struct crypto_alg algs[] = { {
  299. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  300. .cra_blocksize = AES_BLOCK_SIZE,
  301. .cra_ctxsize = sizeof(struct cvm_enc_ctx),
  302. .cra_alignmask = 7,
  303. .cra_priority = 4001,
  304. .cra_name = "xts(aes)",
  305. .cra_driver_name = "cavium-xts-aes",
  306. .cra_type = &crypto_ablkcipher_type,
  307. .cra_u = {
  308. .ablkcipher = {
  309. .ivsize = AES_BLOCK_SIZE,
  310. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  311. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  312. .setkey = cvm_xts_setkey,
  313. .encrypt = cvm_encrypt,
  314. .decrypt = cvm_decrypt,
  315. },
  316. },
  317. .cra_init = cvm_enc_dec_init,
  318. .cra_module = THIS_MODULE,
  319. }, {
  320. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  321. .cra_blocksize = AES_BLOCK_SIZE,
  322. .cra_ctxsize = sizeof(struct cvm_enc_ctx),
  323. .cra_alignmask = 7,
  324. .cra_priority = 4001,
  325. .cra_name = "cbc(aes)",
  326. .cra_driver_name = "cavium-cbc-aes",
  327. .cra_type = &crypto_ablkcipher_type,
  328. .cra_u = {
  329. .ablkcipher = {
  330. .ivsize = AES_BLOCK_SIZE,
  331. .min_keysize = AES_MIN_KEY_SIZE,
  332. .max_keysize = AES_MAX_KEY_SIZE,
  333. .setkey = cvm_cbc_aes_setkey,
  334. .encrypt = cvm_encrypt,
  335. .decrypt = cvm_decrypt,
  336. },
  337. },
  338. .cra_init = cvm_enc_dec_init,
  339. .cra_module = THIS_MODULE,
  340. }, {
  341. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  342. .cra_blocksize = AES_BLOCK_SIZE,
  343. .cra_ctxsize = sizeof(struct cvm_enc_ctx),
  344. .cra_alignmask = 7,
  345. .cra_priority = 4001,
  346. .cra_name = "ecb(aes)",
  347. .cra_driver_name = "cavium-ecb-aes",
  348. .cra_type = &crypto_ablkcipher_type,
  349. .cra_u = {
  350. .ablkcipher = {
  351. .ivsize = AES_BLOCK_SIZE,
  352. .min_keysize = AES_MIN_KEY_SIZE,
  353. .max_keysize = AES_MAX_KEY_SIZE,
  354. .setkey = cvm_ecb_aes_setkey,
  355. .encrypt = cvm_encrypt,
  356. .decrypt = cvm_decrypt,
  357. },
  358. },
  359. .cra_init = cvm_enc_dec_init,
  360. .cra_module = THIS_MODULE,
  361. }, {
  362. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  363. .cra_blocksize = AES_BLOCK_SIZE,
  364. .cra_ctxsize = sizeof(struct cvm_enc_ctx),
  365. .cra_alignmask = 7,
  366. .cra_priority = 4001,
  367. .cra_name = "cfb(aes)",
  368. .cra_driver_name = "cavium-cfb-aes",
  369. .cra_type = &crypto_ablkcipher_type,
  370. .cra_u = {
  371. .ablkcipher = {
  372. .ivsize = AES_BLOCK_SIZE,
  373. .min_keysize = AES_MIN_KEY_SIZE,
  374. .max_keysize = AES_MAX_KEY_SIZE,
  375. .setkey = cvm_cfb_aes_setkey,
  376. .encrypt = cvm_encrypt,
  377. .decrypt = cvm_decrypt,
  378. },
  379. },
  380. .cra_init = cvm_enc_dec_init,
  381. .cra_module = THIS_MODULE,
  382. }, {
  383. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  384. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  385. .cra_ctxsize = sizeof(struct cvm_des3_ctx),
  386. .cra_alignmask = 7,
  387. .cra_priority = 4001,
  388. .cra_name = "cbc(des3_ede)",
  389. .cra_driver_name = "cavium-cbc-des3_ede",
  390. .cra_type = &crypto_ablkcipher_type,
  391. .cra_u = {
  392. .ablkcipher = {
  393. .min_keysize = DES3_EDE_KEY_SIZE,
  394. .max_keysize = DES3_EDE_KEY_SIZE,
  395. .ivsize = DES_BLOCK_SIZE,
  396. .setkey = cvm_cbc_des3_setkey,
  397. .encrypt = cvm_encrypt,
  398. .decrypt = cvm_decrypt,
  399. },
  400. },
  401. .cra_init = cvm_enc_dec_init,
  402. .cra_module = THIS_MODULE,
  403. }, {
  404. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
  405. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  406. .cra_ctxsize = sizeof(struct cvm_des3_ctx),
  407. .cra_alignmask = 7,
  408. .cra_priority = 4001,
  409. .cra_name = "ecb(des3_ede)",
  410. .cra_driver_name = "cavium-ecb-des3_ede",
  411. .cra_type = &crypto_ablkcipher_type,
  412. .cra_u = {
  413. .ablkcipher = {
  414. .min_keysize = DES3_EDE_KEY_SIZE,
  415. .max_keysize = DES3_EDE_KEY_SIZE,
  416. .ivsize = DES_BLOCK_SIZE,
  417. .setkey = cvm_ecb_des3_setkey,
  418. .encrypt = cvm_encrypt,
  419. .decrypt = cvm_decrypt,
  420. },
  421. },
  422. .cra_init = cvm_enc_dec_init,
  423. .cra_module = THIS_MODULE,
  424. } };
  425. static inline int cav_register_algs(void)
  426. {
  427. int err = 0;
  428. err = crypto_register_algs(algs, ARRAY_SIZE(algs));
  429. if (err)
  430. return err;
  431. return 0;
  432. }
  433. static inline void cav_unregister_algs(void)
  434. {
  435. crypto_unregister_algs(algs, ARRAY_SIZE(algs));
  436. }
  437. int cvm_crypto_init(struct cpt_vf *cptvf)
  438. {
  439. struct pci_dev *pdev = cptvf->pdev;
  440. u32 dev_count;
  441. dev_count = dev_handle.dev_count;
  442. dev_handle.cdev[dev_count] = cptvf;
  443. dev_handle.dev_count++;
  444. if (dev_count == 3) {
  445. if (cav_register_algs()) {
  446. dev_err(&pdev->dev, "Error in registering crypto algorithms\n");
  447. return -EINVAL;
  448. }
  449. }
  450. return 0;
  451. }
  452. void cvm_crypto_exit(void)
  453. {
  454. u32 dev_count;
  455. dev_count = --dev_handle.dev_count;
  456. if (!dev_count)
  457. cav_unregister_algs();
  458. }