caampkc.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230
  1. // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
  2. /*
  3. * caam - Freescale FSL CAAM support for Public Key Cryptography
  4. *
  5. * Copyright 2016 Freescale Semiconductor, Inc.
  6. * Copyright 2018-2019, 2023 NXP
  7. *
  8. * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
  9. * all the desired key parameters, input and output pointers.
  10. */
  11. #include "compat.h"
  12. #include "regs.h"
  13. #include "intern.h"
  14. #include "jr.h"
  15. #include "error.h"
  16. #include "desc_constr.h"
  17. #include "sg_sw_sec4.h"
  18. #include "caampkc.h"
  19. #include <crypto/internal/engine.h>
  20. #include <linux/dma-mapping.h>
  21. #include <linux/err.h>
  22. #include <linux/kernel.h>
  23. #include <linux/slab.h>
  24. #include <linux/string.h>
  25. #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
  26. #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
  27. SIZEOF_RSA_PRIV_F1_PDB)
  28. #define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
  29. SIZEOF_RSA_PRIV_F2_PDB)
  30. #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
  31. SIZEOF_RSA_PRIV_F3_PDB)
  32. #define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */
  33. /* buffer filled with zeros, used for padding */
  34. static u8 *zero_buffer;
  35. /*
  36. * variable used to avoid double free of resources in case
  37. * algorithm registration was unsuccessful
  38. */
  39. static bool init_done;
  40. struct caam_akcipher_alg {
  41. struct akcipher_engine_alg akcipher;
  42. bool registered;
  43. };
  44. static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
  45. struct akcipher_request *req)
  46. {
  47. struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
  48. dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
  49. dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
  50. if (edesc->sec4_sg_bytes)
  51. dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
  52. DMA_TO_DEVICE);
  53. }
  54. static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
  55. struct akcipher_request *req)
  56. {
  57. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  58. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
  59. struct caam_rsa_key *key = &ctx->key;
  60. struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
  61. dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
  62. dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
  63. }
  64. static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
  65. struct akcipher_request *req)
  66. {
  67. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  68. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
  69. struct caam_rsa_key *key = &ctx->key;
  70. struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
  71. dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
  72. dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
  73. }
  74. static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
  75. struct akcipher_request *req)
  76. {
  77. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  78. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
  79. struct caam_rsa_key *key = &ctx->key;
  80. struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
  81. size_t p_sz = key->p_sz;
  82. size_t q_sz = key->q_sz;
  83. dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
  84. dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
  85. dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
  86. dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
  87. dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
  88. }
  89. static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
  90. struct akcipher_request *req)
  91. {
  92. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  93. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
  94. struct caam_rsa_key *key = &ctx->key;
  95. struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
  96. size_t p_sz = key->p_sz;
  97. size_t q_sz = key->q_sz;
  98. dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
  99. dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
  100. dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
  101. dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
  102. dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
  103. dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
  104. dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
  105. }
  106. /* RSA Job Completion handler */
  107. static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
  108. {
  109. struct akcipher_request *req = context;
  110. struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
  111. struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
  112. struct rsa_edesc *edesc;
  113. int ecode = 0;
  114. bool has_bklog;
  115. if (err)
  116. ecode = caam_jr_strstatus(dev, err);
  117. edesc = req_ctx->edesc;
  118. has_bklog = edesc->bklog;
  119. rsa_pub_unmap(dev, edesc, req);
  120. rsa_io_unmap(dev, edesc, req);
  121. kfree(edesc);
  122. /*
  123. * If no backlog flag, the completion of the request is done
  124. * by CAAM, not crypto engine.
  125. */
  126. if (!has_bklog)
  127. akcipher_request_complete(req, ecode);
  128. else
  129. crypto_finalize_akcipher_request(jrp->engine, req, ecode);
  130. }
  131. static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
  132. void *context)
  133. {
  134. struct akcipher_request *req = context;
  135. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  136. struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
  137. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
  138. struct caam_rsa_key *key = &ctx->key;
  139. struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
  140. struct rsa_edesc *edesc;
  141. int ecode = 0;
  142. bool has_bklog;
  143. if (err)
  144. ecode = caam_jr_strstatus(dev, err);
  145. edesc = req_ctx->edesc;
  146. has_bklog = edesc->bklog;
  147. switch (key->priv_form) {
  148. case FORM1:
  149. rsa_priv_f1_unmap(dev, edesc, req);
  150. break;
  151. case FORM2:
  152. rsa_priv_f2_unmap(dev, edesc, req);
  153. break;
  154. case FORM3:
  155. rsa_priv_f3_unmap(dev, edesc, req);
  156. }
  157. rsa_io_unmap(dev, edesc, req);
  158. kfree(edesc);
  159. /*
  160. * If no backlog flag, the completion of the request is done
  161. * by CAAM, not crypto engine.
  162. */
  163. if (!has_bklog)
  164. akcipher_request_complete(req, ecode);
  165. else
  166. crypto_finalize_akcipher_request(jrp->engine, req, ecode);
  167. }
  168. /**
  169. * caam_rsa_count_leading_zeros - Count leading zeros, need it to strip,
  170. * from a given scatterlist
  171. *
  172. * @sgl : scatterlist to count zeros from
  173. * @nbytes: number of zeros, in bytes, to strip
  174. * @flags : operation flags
  175. */
  176. static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
  177. unsigned int nbytes,
  178. unsigned int flags)
  179. {
  180. struct sg_mapping_iter miter;
  181. int lzeros, ents;
  182. unsigned int len;
  183. unsigned int tbytes = nbytes;
  184. const u8 *buff;
  185. ents = sg_nents_for_len(sgl, nbytes);
  186. if (ents < 0)
  187. return ents;
  188. sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
  189. lzeros = 0;
  190. len = 0;
  191. while (nbytes > 0) {
  192. /* do not strip more than given bytes */
  193. while (len && !*buff && lzeros < nbytes) {
  194. lzeros++;
  195. len--;
  196. buff++;
  197. }
  198. if (len && *buff)
  199. break;
  200. if (!sg_miter_next(&miter))
  201. break;
  202. buff = miter.addr;
  203. len = miter.length;
  204. nbytes -= lzeros;
  205. lzeros = 0;
  206. }
  207. miter.consumed = lzeros;
  208. sg_miter_stop(&miter);
  209. nbytes -= lzeros;
  210. return tbytes - nbytes;
  211. }
  212. static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
  213. size_t desclen)
  214. {
  215. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  216. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
  217. struct device *dev = ctx->dev;
  218. struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
  219. struct caam_rsa_key *key = &ctx->key;
  220. struct rsa_edesc *edesc;
  221. gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  222. GFP_KERNEL : GFP_ATOMIC;
  223. int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
  224. int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
  225. int src_nents, dst_nents;
  226. int mapped_src_nents, mapped_dst_nents;
  227. unsigned int diff_size = 0;
  228. int lzeros;
  229. if (req->src_len > key->n_sz) {
  230. /*
  231. * strip leading zeros and
  232. * return the number of zeros to skip
  233. */
  234. lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
  235. key->n_sz, sg_flags);
  236. if (lzeros < 0)
  237. return ERR_PTR(lzeros);
  238. req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
  239. lzeros);
  240. req_ctx->fixup_src_len = req->src_len - lzeros;
  241. } else {
  242. /*
  243. * input src is less then n key modulus,
  244. * so there will be zero padding
  245. */
  246. diff_size = key->n_sz - req->src_len;
  247. req_ctx->fixup_src = req->src;
  248. req_ctx->fixup_src_len = req->src_len;
  249. }
  250. src_nents = sg_nents_for_len(req_ctx->fixup_src,
  251. req_ctx->fixup_src_len);
  252. dst_nents = sg_nents_for_len(req->dst, req->dst_len);
  253. mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents,
  254. DMA_TO_DEVICE);
  255. if (unlikely(!mapped_src_nents)) {
  256. dev_err(dev, "unable to map source\n");
  257. return ERR_PTR(-ENOMEM);
  258. }
  259. mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
  260. DMA_FROM_DEVICE);
  261. if (unlikely(!mapped_dst_nents)) {
  262. dev_err(dev, "unable to map destination\n");
  263. goto src_fail;
  264. }
  265. if (!diff_size && mapped_src_nents == 1)
  266. sec4_sg_len = 0; /* no need for an input hw s/g table */
  267. else
  268. sec4_sg_len = mapped_src_nents + !!diff_size;
  269. sec4_sg_index = sec4_sg_len;
  270. if (mapped_dst_nents > 1)
  271. sec4_sg_len += pad_sg_nents(mapped_dst_nents);
  272. else
  273. sec4_sg_len = pad_sg_nents(sec4_sg_len);
  274. sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
  275. /* allocate space for base edesc, hw desc commands and link tables */
  276. edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes, flags);
  277. if (!edesc)
  278. goto dst_fail;
  279. edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
  280. if (diff_size)
  281. dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
  282. 0);
  283. if (sec4_sg_index)
  284. sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
  285. edesc->sec4_sg + !!diff_size, 0);
  286. if (mapped_dst_nents > 1)
  287. sg_to_sec4_sg_last(req->dst, req->dst_len,
  288. edesc->sec4_sg + sec4_sg_index, 0);
  289. /* Save nents for later use in Job Descriptor */
  290. edesc->src_nents = src_nents;
  291. edesc->dst_nents = dst_nents;
  292. req_ctx->edesc = edesc;
  293. if (!sec4_sg_bytes)
  294. return edesc;
  295. edesc->mapped_src_nents = mapped_src_nents;
  296. edesc->mapped_dst_nents = mapped_dst_nents;
  297. edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
  298. sec4_sg_bytes, DMA_TO_DEVICE);
  299. if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
  300. dev_err(dev, "unable to map S/G table\n");
  301. goto sec4_sg_fail;
  302. }
  303. edesc->sec4_sg_bytes = sec4_sg_bytes;
  304. print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
  305. DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
  306. edesc->sec4_sg_bytes, 1);
  307. return edesc;
  308. sec4_sg_fail:
  309. kfree(edesc);
  310. dst_fail:
  311. dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
  312. src_fail:
  313. dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
  314. return ERR_PTR(-ENOMEM);
  315. }
  316. static int akcipher_do_one_req(struct crypto_engine *engine, void *areq)
  317. {
  318. struct akcipher_request *req = container_of(areq,
  319. struct akcipher_request,
  320. base);
  321. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  322. struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
  323. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
  324. struct device *jrdev = ctx->dev;
  325. u32 *desc = req_ctx->edesc->hw_desc;
  326. int ret;
  327. req_ctx->edesc->bklog = true;
  328. ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req);
  329. if (ret == -ENOSPC && engine->retry_support)
  330. return ret;
  331. if (ret != -EINPROGRESS) {
  332. rsa_pub_unmap(jrdev, req_ctx->edesc, req);
  333. rsa_io_unmap(jrdev, req_ctx->edesc, req);
  334. kfree(req_ctx->edesc);
  335. } else {
  336. ret = 0;
  337. }
  338. return ret;
  339. }
  340. static int set_rsa_pub_pdb(struct akcipher_request *req,
  341. struct rsa_edesc *edesc)
  342. {
  343. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  344. struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
  345. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
  346. struct caam_rsa_key *key = &ctx->key;
  347. struct device *dev = ctx->dev;
  348. struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
  349. int sec4_sg_index = 0;
  350. pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
  351. if (dma_mapping_error(dev, pdb->n_dma)) {
  352. dev_err(dev, "Unable to map RSA modulus memory\n");
  353. return -ENOMEM;
  354. }
  355. pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
  356. if (dma_mapping_error(dev, pdb->e_dma)) {
  357. dev_err(dev, "Unable to map RSA public exponent memory\n");
  358. dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
  359. return -ENOMEM;
  360. }
  361. if (edesc->mapped_src_nents > 1) {
  362. pdb->sgf |= RSA_PDB_SGF_F;
  363. pdb->f_dma = edesc->sec4_sg_dma;
  364. sec4_sg_index += edesc->mapped_src_nents;
  365. } else {
  366. pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
  367. }
  368. if (edesc->mapped_dst_nents > 1) {
  369. pdb->sgf |= RSA_PDB_SGF_G;
  370. pdb->g_dma = edesc->sec4_sg_dma +
  371. sec4_sg_index * sizeof(struct sec4_sg_entry);
  372. } else {
  373. pdb->g_dma = sg_dma_address(req->dst);
  374. }
  375. pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
  376. pdb->f_len = req_ctx->fixup_src_len;
  377. return 0;
  378. }
  379. static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
  380. struct rsa_edesc *edesc)
  381. {
  382. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  383. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
  384. struct caam_rsa_key *key = &ctx->key;
  385. struct device *dev = ctx->dev;
  386. struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
  387. int sec4_sg_index = 0;
  388. pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
  389. if (dma_mapping_error(dev, pdb->n_dma)) {
  390. dev_err(dev, "Unable to map modulus memory\n");
  391. return -ENOMEM;
  392. }
  393. pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
  394. if (dma_mapping_error(dev, pdb->d_dma)) {
  395. dev_err(dev, "Unable to map RSA private exponent memory\n");
  396. dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
  397. return -ENOMEM;
  398. }
  399. if (edesc->mapped_src_nents > 1) {
  400. pdb->sgf |= RSA_PRIV_PDB_SGF_G;
  401. pdb->g_dma = edesc->sec4_sg_dma;
  402. sec4_sg_index += edesc->mapped_src_nents;
  403. } else {
  404. struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
  405. pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
  406. }
  407. if (edesc->mapped_dst_nents > 1) {
  408. pdb->sgf |= RSA_PRIV_PDB_SGF_F;
  409. pdb->f_dma = edesc->sec4_sg_dma +
  410. sec4_sg_index * sizeof(struct sec4_sg_entry);
  411. } else {
  412. pdb->f_dma = sg_dma_address(req->dst);
  413. }
  414. pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
  415. return 0;
  416. }
  417. static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
  418. struct rsa_edesc *edesc)
  419. {
  420. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  421. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
  422. struct caam_rsa_key *key = &ctx->key;
  423. struct device *dev = ctx->dev;
  424. struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
  425. int sec4_sg_index = 0;
  426. size_t p_sz = key->p_sz;
  427. size_t q_sz = key->q_sz;
  428. pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
  429. if (dma_mapping_error(dev, pdb->d_dma)) {
  430. dev_err(dev, "Unable to map RSA private exponent memory\n");
  431. return -ENOMEM;
  432. }
  433. pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
  434. if (dma_mapping_error(dev, pdb->p_dma)) {
  435. dev_err(dev, "Unable to map RSA prime factor p memory\n");
  436. goto unmap_d;
  437. }
  438. pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
  439. if (dma_mapping_error(dev, pdb->q_dma)) {
  440. dev_err(dev, "Unable to map RSA prime factor q memory\n");
  441. goto unmap_p;
  442. }
  443. pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
  444. if (dma_mapping_error(dev, pdb->tmp1_dma)) {
  445. dev_err(dev, "Unable to map RSA tmp1 memory\n");
  446. goto unmap_q;
  447. }
  448. pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
  449. if (dma_mapping_error(dev, pdb->tmp2_dma)) {
  450. dev_err(dev, "Unable to map RSA tmp2 memory\n");
  451. goto unmap_tmp1;
  452. }
  453. if (edesc->mapped_src_nents > 1) {
  454. pdb->sgf |= RSA_PRIV_PDB_SGF_G;
  455. pdb->g_dma = edesc->sec4_sg_dma;
  456. sec4_sg_index += edesc->mapped_src_nents;
  457. } else {
  458. struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
  459. pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
  460. }
  461. if (edesc->mapped_dst_nents > 1) {
  462. pdb->sgf |= RSA_PRIV_PDB_SGF_F;
  463. pdb->f_dma = edesc->sec4_sg_dma +
  464. sec4_sg_index * sizeof(struct sec4_sg_entry);
  465. } else {
  466. pdb->f_dma = sg_dma_address(req->dst);
  467. }
  468. pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
  469. pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
  470. return 0;
  471. unmap_tmp1:
  472. dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
  473. unmap_q:
  474. dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
  475. unmap_p:
  476. dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
  477. unmap_d:
  478. dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
  479. return -ENOMEM;
  480. }
  481. static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
  482. struct rsa_edesc *edesc)
  483. {
  484. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  485. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
  486. struct caam_rsa_key *key = &ctx->key;
  487. struct device *dev = ctx->dev;
  488. struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
  489. int sec4_sg_index = 0;
  490. size_t p_sz = key->p_sz;
  491. size_t q_sz = key->q_sz;
  492. pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
  493. if (dma_mapping_error(dev, pdb->p_dma)) {
  494. dev_err(dev, "Unable to map RSA prime factor p memory\n");
  495. return -ENOMEM;
  496. }
  497. pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
  498. if (dma_mapping_error(dev, pdb->q_dma)) {
  499. dev_err(dev, "Unable to map RSA prime factor q memory\n");
  500. goto unmap_p;
  501. }
  502. pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
  503. if (dma_mapping_error(dev, pdb->dp_dma)) {
  504. dev_err(dev, "Unable to map RSA exponent dp memory\n");
  505. goto unmap_q;
  506. }
  507. pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
  508. if (dma_mapping_error(dev, pdb->dq_dma)) {
  509. dev_err(dev, "Unable to map RSA exponent dq memory\n");
  510. goto unmap_dp;
  511. }
  512. pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
  513. if (dma_mapping_error(dev, pdb->c_dma)) {
  514. dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
  515. goto unmap_dq;
  516. }
  517. pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
  518. if (dma_mapping_error(dev, pdb->tmp1_dma)) {
  519. dev_err(dev, "Unable to map RSA tmp1 memory\n");
  520. goto unmap_qinv;
  521. }
  522. pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
  523. if (dma_mapping_error(dev, pdb->tmp2_dma)) {
  524. dev_err(dev, "Unable to map RSA tmp2 memory\n");
  525. goto unmap_tmp1;
  526. }
  527. if (edesc->mapped_src_nents > 1) {
  528. pdb->sgf |= RSA_PRIV_PDB_SGF_G;
  529. pdb->g_dma = edesc->sec4_sg_dma;
  530. sec4_sg_index += edesc->mapped_src_nents;
  531. } else {
  532. struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
  533. pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
  534. }
  535. if (edesc->mapped_dst_nents > 1) {
  536. pdb->sgf |= RSA_PRIV_PDB_SGF_F;
  537. pdb->f_dma = edesc->sec4_sg_dma +
  538. sec4_sg_index * sizeof(struct sec4_sg_entry);
  539. } else {
  540. pdb->f_dma = sg_dma_address(req->dst);
  541. }
  542. pdb->sgf |= key->n_sz;
  543. pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
  544. return 0;
  545. unmap_tmp1:
  546. dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
  547. unmap_qinv:
  548. dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
  549. unmap_dq:
  550. dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
  551. unmap_dp:
  552. dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
  553. unmap_q:
  554. dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
  555. unmap_p:
  556. dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
  557. return -ENOMEM;
  558. }
  559. static int akcipher_enqueue_req(struct device *jrdev,
  560. void (*cbk)(struct device *jrdev, u32 *desc,
  561. u32 err, void *context),
  562. struct akcipher_request *req)
  563. {
  564. struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
  565. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  566. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
  567. struct caam_rsa_key *key = &ctx->key;
  568. struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
  569. struct rsa_edesc *edesc = req_ctx->edesc;
  570. u32 *desc = edesc->hw_desc;
  571. int ret;
  572. req_ctx->akcipher_op_done = cbk;
  573. /*
  574. * Only the backlog request are sent to crypto-engine since the others
  575. * can be handled by CAAM, if free, especially since JR has up to 1024
  576. * entries (more than the 10 entries from crypto-engine).
  577. */
  578. if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
  579. ret = crypto_transfer_akcipher_request_to_engine(jrpriv->engine,
  580. req);
  581. else
  582. ret = caam_jr_enqueue(jrdev, desc, cbk, req);
  583. if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
  584. switch (key->priv_form) {
  585. case FORM1:
  586. rsa_priv_f1_unmap(jrdev, edesc, req);
  587. break;
  588. case FORM2:
  589. rsa_priv_f2_unmap(jrdev, edesc, req);
  590. break;
  591. case FORM3:
  592. rsa_priv_f3_unmap(jrdev, edesc, req);
  593. break;
  594. default:
  595. rsa_pub_unmap(jrdev, edesc, req);
  596. }
  597. rsa_io_unmap(jrdev, edesc, req);
  598. kfree(edesc);
  599. }
  600. return ret;
  601. }
  602. static int caam_rsa_enc(struct akcipher_request *req)
  603. {
  604. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  605. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
  606. struct caam_rsa_key *key = &ctx->key;
  607. struct device *jrdev = ctx->dev;
  608. struct rsa_edesc *edesc;
  609. int ret;
  610. if (unlikely(!key->n || !key->e))
  611. return -EINVAL;
  612. if (req->dst_len < key->n_sz) {
  613. req->dst_len = key->n_sz;
  614. dev_err(jrdev, "Output buffer length less than parameter n\n");
  615. return -EOVERFLOW;
  616. }
  617. /* Allocate extended descriptor */
  618. edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
  619. if (IS_ERR(edesc))
  620. return PTR_ERR(edesc);
  621. /* Set RSA Encrypt Protocol Data Block */
  622. ret = set_rsa_pub_pdb(req, edesc);
  623. if (ret)
  624. goto init_fail;
  625. /* Initialize Job Descriptor */
  626. init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
  627. return akcipher_enqueue_req(jrdev, rsa_pub_done, req);
  628. init_fail:
  629. rsa_io_unmap(jrdev, edesc, req);
  630. kfree(edesc);
  631. return ret;
  632. }
  633. static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
  634. {
  635. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  636. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
  637. struct device *jrdev = ctx->dev;
  638. struct rsa_edesc *edesc;
  639. int ret;
  640. /* Allocate extended descriptor */
  641. edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
  642. if (IS_ERR(edesc))
  643. return PTR_ERR(edesc);
  644. /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
  645. ret = set_rsa_priv_f1_pdb(req, edesc);
  646. if (ret)
  647. goto init_fail;
  648. /* Initialize Job Descriptor */
  649. init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
  650. return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
  651. init_fail:
  652. rsa_io_unmap(jrdev, edesc, req);
  653. kfree(edesc);
  654. return ret;
  655. }
  656. static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
  657. {
  658. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  659. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
  660. struct device *jrdev = ctx->dev;
  661. struct rsa_edesc *edesc;
  662. int ret;
  663. /* Allocate extended descriptor */
  664. edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
  665. if (IS_ERR(edesc))
  666. return PTR_ERR(edesc);
  667. /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
  668. ret = set_rsa_priv_f2_pdb(req, edesc);
  669. if (ret)
  670. goto init_fail;
  671. /* Initialize Job Descriptor */
  672. init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
  673. return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
  674. init_fail:
  675. rsa_io_unmap(jrdev, edesc, req);
  676. kfree(edesc);
  677. return ret;
  678. }
  679. static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
  680. {
  681. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  682. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
  683. struct device *jrdev = ctx->dev;
  684. struct rsa_edesc *edesc;
  685. int ret;
  686. /* Allocate extended descriptor */
  687. edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
  688. if (IS_ERR(edesc))
  689. return PTR_ERR(edesc);
  690. /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
  691. ret = set_rsa_priv_f3_pdb(req, edesc);
  692. if (ret)
  693. goto init_fail;
  694. /* Initialize Job Descriptor */
  695. init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
  696. return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
  697. init_fail:
  698. rsa_io_unmap(jrdev, edesc, req);
  699. kfree(edesc);
  700. return ret;
  701. }
  702. static int caam_rsa_dec(struct akcipher_request *req)
  703. {
  704. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  705. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
  706. struct caam_rsa_key *key = &ctx->key;
  707. int ret;
  708. if (unlikely(!key->n || !key->d))
  709. return -EINVAL;
  710. if (req->dst_len < key->n_sz) {
  711. req->dst_len = key->n_sz;
  712. dev_err(ctx->dev, "Output buffer length less than parameter n\n");
  713. return -EOVERFLOW;
  714. }
  715. if (key->priv_form == FORM3)
  716. ret = caam_rsa_dec_priv_f3(req);
  717. else if (key->priv_form == FORM2)
  718. ret = caam_rsa_dec_priv_f2(req);
  719. else
  720. ret = caam_rsa_dec_priv_f1(req);
  721. return ret;
  722. }
  723. static void caam_rsa_free_key(struct caam_rsa_key *key)
  724. {
  725. kfree_sensitive(key->d);
  726. kfree_sensitive(key->p);
  727. kfree_sensitive(key->q);
  728. kfree_sensitive(key->dp);
  729. kfree_sensitive(key->dq);
  730. kfree_sensitive(key->qinv);
  731. kfree_sensitive(key->tmp1);
  732. kfree_sensitive(key->tmp2);
  733. kfree(key->e);
  734. kfree(key->n);
  735. memset(key, 0, sizeof(*key));
  736. }
  737. static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
  738. {
  739. while (!**ptr && *nbytes) {
  740. (*ptr)++;
  741. (*nbytes)--;
  742. }
  743. }
  744. /**
  745. * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
  746. * dP, dQ and qInv could decode to less than corresponding p, q length, as the
  747. * BER-encoding requires that the minimum number of bytes be used to encode the
  748. * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
  749. * length.
  750. *
  751. * @ptr : pointer to {dP, dQ, qInv} CRT member
  752. * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
  753. * @dstlen: length in bytes of corresponding p or q prime factor
  754. */
  755. static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
  756. {
  757. u8 *dst;
  758. caam_rsa_drop_leading_zeros(&ptr, &nbytes);
  759. if (!nbytes)
  760. return NULL;
  761. dst = kzalloc(dstlen, GFP_KERNEL);
  762. if (!dst)
  763. return NULL;
  764. memcpy(dst + (dstlen - nbytes), ptr, nbytes);
  765. return dst;
  766. }
  767. /**
  768. * caam_read_raw_data - Read a raw byte stream as a positive integer.
  769. * The function skips buffer's leading zeros, copies the remained data
  770. * to a buffer allocated in the GFP_KERNEL zone and returns
  771. * the address of the new buffer.
  772. *
  773. * @buf : The data to read
  774. * @nbytes: The amount of data to read
  775. */
  776. static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
  777. {
  778. caam_rsa_drop_leading_zeros(&buf, nbytes);
  779. if (!*nbytes)
  780. return NULL;
  781. return kmemdup(buf, *nbytes, GFP_KERNEL);
  782. }
  783. static int caam_rsa_check_key_length(unsigned int len)
  784. {
  785. if (len > 4096)
  786. return -EINVAL;
  787. return 0;
  788. }
  789. static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
  790. unsigned int keylen)
  791. {
  792. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
  793. struct rsa_key raw_key = {NULL};
  794. struct caam_rsa_key *rsa_key = &ctx->key;
  795. int ret;
  796. /* Free the old RSA key if any */
  797. caam_rsa_free_key(rsa_key);
  798. ret = rsa_parse_pub_key(&raw_key, key, keylen);
  799. if (ret)
  800. return ret;
  801. /* Copy key in DMA zone */
  802. rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_KERNEL);
  803. if (!rsa_key->e)
  804. goto err;
  805. /*
  806. * Skip leading zeros and copy the positive integer to a buffer
  807. * allocated in the GFP_KERNEL zone. The decryption descriptor
  808. * expects a positive integer for the RSA modulus and uses its length as
  809. * decryption output length.
  810. */
  811. rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
  812. if (!rsa_key->n)
  813. goto err;
  814. if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
  815. caam_rsa_free_key(rsa_key);
  816. return -EINVAL;
  817. }
  818. rsa_key->e_sz = raw_key.e_sz;
  819. rsa_key->n_sz = raw_key.n_sz;
  820. return 0;
  821. err:
  822. caam_rsa_free_key(rsa_key);
  823. return -ENOMEM;
  824. }
  825. static int caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
  826. struct rsa_key *raw_key)
  827. {
  828. struct caam_rsa_key *rsa_key = &ctx->key;
  829. size_t p_sz = raw_key->p_sz;
  830. size_t q_sz = raw_key->q_sz;
  831. unsigned aligned_size;
  832. rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
  833. if (!rsa_key->p)
  834. return -ENOMEM;
  835. rsa_key->p_sz = p_sz;
  836. rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
  837. if (!rsa_key->q)
  838. goto free_p;
  839. rsa_key->q_sz = q_sz;
  840. aligned_size = ALIGN(raw_key->p_sz, dma_get_cache_alignment());
  841. rsa_key->tmp1 = kzalloc(aligned_size, GFP_KERNEL);
  842. if (!rsa_key->tmp1)
  843. goto free_q;
  844. aligned_size = ALIGN(raw_key->q_sz, dma_get_cache_alignment());
  845. rsa_key->tmp2 = kzalloc(aligned_size, GFP_KERNEL);
  846. if (!rsa_key->tmp2)
  847. goto free_tmp1;
  848. rsa_key->priv_form = FORM2;
  849. rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
  850. if (!rsa_key->dp)
  851. goto free_tmp2;
  852. rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
  853. if (!rsa_key->dq)
  854. goto free_dp;
  855. rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
  856. q_sz);
  857. if (!rsa_key->qinv)
  858. goto free_dq;
  859. rsa_key->priv_form = FORM3;
  860. return 0;
  861. free_dq:
  862. kfree_sensitive(rsa_key->dq);
  863. free_dp:
  864. kfree_sensitive(rsa_key->dp);
  865. free_tmp2:
  866. kfree_sensitive(rsa_key->tmp2);
  867. free_tmp1:
  868. kfree_sensitive(rsa_key->tmp1);
  869. free_q:
  870. kfree_sensitive(rsa_key->q);
  871. free_p:
  872. kfree_sensitive(rsa_key->p);
  873. return -ENOMEM;
  874. }
  875. static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
  876. unsigned int keylen)
  877. {
  878. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
  879. struct rsa_key raw_key = {NULL};
  880. struct caam_rsa_key *rsa_key = &ctx->key;
  881. int ret;
  882. /* Free the old RSA key if any */
  883. caam_rsa_free_key(rsa_key);
  884. ret = rsa_parse_priv_key(&raw_key, key, keylen);
  885. if (ret)
  886. return ret;
  887. /* Copy key in DMA zone */
  888. rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_KERNEL);
  889. if (!rsa_key->d)
  890. goto err;
  891. rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_KERNEL);
  892. if (!rsa_key->e)
  893. goto err;
  894. /*
  895. * Skip leading zeros and copy the positive integer to a buffer
  896. * allocated in the GFP_KERNEL zone. The decryption descriptor
  897. * expects a positive integer for the RSA modulus and uses its length as
  898. * decryption output length.
  899. */
  900. rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
  901. if (!rsa_key->n)
  902. goto err;
  903. if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
  904. caam_rsa_free_key(rsa_key);
  905. return -EINVAL;
  906. }
  907. rsa_key->d_sz = raw_key.d_sz;
  908. rsa_key->e_sz = raw_key.e_sz;
  909. rsa_key->n_sz = raw_key.n_sz;
  910. ret = caam_rsa_set_priv_key_form(ctx, &raw_key);
  911. if (ret)
  912. goto err;
  913. return 0;
  914. err:
  915. caam_rsa_free_key(rsa_key);
  916. return -ENOMEM;
  917. }
  918. static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
  919. {
  920. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
  921. return ctx->key.n_sz;
  922. }
  923. /* Per session pkc's driver context creation function */
  924. static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
  925. {
  926. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
  927. akcipher_set_reqsize(tfm, sizeof(struct caam_rsa_req_ctx));
  928. ctx->dev = caam_jr_alloc();
  929. if (IS_ERR(ctx->dev)) {
  930. pr_err("Job Ring Device allocation for transform failed\n");
  931. return PTR_ERR(ctx->dev);
  932. }
  933. ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
  934. CAAM_RSA_MAX_INPUT_SIZE - 1,
  935. DMA_TO_DEVICE);
  936. if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
  937. dev_err(ctx->dev, "unable to map padding\n");
  938. caam_jr_free(ctx->dev);
  939. return -ENOMEM;
  940. }
  941. return 0;
  942. }
  943. /* Per session pkc's driver context cleanup function */
  944. static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
  945. {
  946. struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
  947. struct caam_rsa_key *key = &ctx->key;
  948. dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
  949. 1, DMA_TO_DEVICE);
  950. caam_rsa_free_key(key);
  951. caam_jr_free(ctx->dev);
  952. }
  953. static struct caam_akcipher_alg caam_rsa = {
  954. .akcipher.base = {
  955. .encrypt = caam_rsa_enc,
  956. .decrypt = caam_rsa_dec,
  957. .set_pub_key = caam_rsa_set_pub_key,
  958. .set_priv_key = caam_rsa_set_priv_key,
  959. .max_size = caam_rsa_max_size,
  960. .init = caam_rsa_init_tfm,
  961. .exit = caam_rsa_exit_tfm,
  962. .base = {
  963. .cra_name = "rsa",
  964. .cra_driver_name = "rsa-caam",
  965. .cra_priority = 3000,
  966. .cra_module = THIS_MODULE,
  967. .cra_ctxsize = sizeof(struct caam_rsa_ctx) +
  968. CRYPTO_DMA_PADDING,
  969. },
  970. },
  971. .akcipher.op = {
  972. .do_one_request = akcipher_do_one_req,
  973. },
  974. };
  975. /* Public Key Cryptography module initialization handler */
  976. int caam_pkc_init(struct device *ctrldev)
  977. {
  978. struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
  979. u32 pk_inst, pkha;
  980. int err;
  981. init_done = false;
  982. /* Determine public key hardware accelerator presence. */
  983. if (priv->era < 10) {
  984. pk_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) &
  985. CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
  986. } else {
  987. pkha = rd_reg32(&priv->jr[0]->vreg.pkha);
  988. pk_inst = pkha & CHA_VER_NUM_MASK;
  989. /*
  990. * Newer CAAMs support partially disabled functionality. If this is the
  991. * case, the number is non-zero, but this bit is set to indicate that
  992. * no encryption or decryption is supported. Only signing and verifying
  993. * is supported.
  994. */
  995. if (pkha & CHA_VER_MISC_PKHA_NO_CRYPT)
  996. pk_inst = 0;
  997. }
  998. /* Do not register algorithms if PKHA is not present. */
  999. if (!pk_inst)
  1000. return 0;
  1001. /* allocate zero buffer, used for padding input */
  1002. zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_KERNEL);
  1003. if (!zero_buffer)
  1004. return -ENOMEM;
  1005. err = crypto_engine_register_akcipher(&caam_rsa.akcipher);
  1006. if (err) {
  1007. kfree(zero_buffer);
  1008. dev_warn(ctrldev, "%s alg registration failed\n",
  1009. caam_rsa.akcipher.base.base.cra_driver_name);
  1010. } else {
  1011. init_done = true;
  1012. caam_rsa.registered = true;
  1013. dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
  1014. }
  1015. return err;
  1016. }
  1017. void caam_pkc_exit(void)
  1018. {
  1019. if (!init_done)
  1020. return;
  1021. if (caam_rsa.registered)
  1022. crypto_engine_unregister_akcipher(&caam_rsa.akcipher);
  1023. kfree(zero_buffer);
  1024. }