hpre_crypto.c 53 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2019 HiSilicon Limited. */
  3. #include <crypto/akcipher.h>
  4. #include <crypto/curve25519.h>
  5. #include <crypto/dh.h>
  6. #include <crypto/ecc_curve.h>
  7. #include <crypto/ecdh.h>
  8. #include <crypto/rng.h>
  9. #include <crypto/internal/akcipher.h>
  10. #include <crypto/internal/kpp.h>
  11. #include <crypto/internal/rsa.h>
  12. #include <crypto/kpp.h>
  13. #include <crypto/scatterwalk.h>
  14. #include <linux/dma-mapping.h>
  15. #include <linux/fips.h>
  16. #include <linux/module.h>
  17. #include <linux/time.h>
  18. #include "hpre.h"
  19. struct hpre_ctx;
  20. #define HPRE_CRYPTO_ALG_PRI 1000
  21. #define HPRE_ALIGN_SZ 64
  22. #define HPRE_BITS_2_BYTES_SHIFT 3
  23. #define HPRE_RSA_512BITS_KSZ 64
  24. #define HPRE_RSA_1536BITS_KSZ 192
  25. #define HPRE_CRT_PRMS 5
  26. #define HPRE_CRT_Q 2
  27. #define HPRE_CRT_P 3
  28. #define HPRE_CRT_INV 4
  29. #define HPRE_DH_G_FLAG 0x02
  30. #define HPRE_TRY_SEND_TIMES 100
  31. #define HPRE_INVLD_REQ_ID (-1)
  32. #define HPRE_SQE_ALG_BITS 5
  33. #define HPRE_SQE_DONE_SHIFT 30
  34. #define HPRE_DH_MAX_P_SZ 512
  35. #define HPRE_DFX_SEC_TO_US 1000000
  36. #define HPRE_DFX_US_TO_NS 1000
  37. /* due to nist p521 */
  38. #define HPRE_ECC_MAX_KSZ 66
  39. /* size in bytes of the n prime */
  40. #define HPRE_ECC_NIST_P192_N_SIZE 24
  41. #define HPRE_ECC_NIST_P256_N_SIZE 32
  42. #define HPRE_ECC_NIST_P384_N_SIZE 48
  43. /* size in bytes */
  44. #define HPRE_ECC_HW256_KSZ_B 32
  45. #define HPRE_ECC_HW384_KSZ_B 48
  46. /* capability register mask of driver */
  47. #define HPRE_DRV_RSA_MASK_CAP BIT(0)
  48. #define HPRE_DRV_DH_MASK_CAP BIT(1)
  49. #define HPRE_DRV_ECDH_MASK_CAP BIT(2)
  50. #define HPRE_DRV_X25519_MASK_CAP BIT(5)
  51. static DEFINE_MUTEX(hpre_algs_lock);
  52. static unsigned int hpre_available_devs;
  53. typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
  54. struct hpre_rsa_ctx {
  55. /* low address: e--->n */
  56. char *pubkey;
  57. dma_addr_t dma_pubkey;
  58. /* low address: d--->n */
  59. char *prikey;
  60. dma_addr_t dma_prikey;
  61. /* low address: dq->dp->q->p->qinv */
  62. char *crt_prikey;
  63. dma_addr_t dma_crt_prikey;
  64. struct crypto_akcipher *soft_tfm;
  65. };
  66. struct hpre_dh_ctx {
  67. /*
  68. * If base is g we compute the public key
  69. * ya = g^xa mod p; [RFC2631 sec 2.1.1]
  70. * else if base if the counterpart public key we
  71. * compute the shared secret
  72. * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
  73. * low address: d--->n, please refer to Hisilicon HPRE UM
  74. */
  75. char *xa_p;
  76. dma_addr_t dma_xa_p;
  77. char *g; /* m */
  78. dma_addr_t dma_g;
  79. };
  80. struct hpre_ecdh_ctx {
  81. /* low address: p->a->k->b */
  82. unsigned char *p;
  83. dma_addr_t dma_p;
  84. /* low address: x->y */
  85. unsigned char *g;
  86. dma_addr_t dma_g;
  87. };
  88. struct hpre_curve25519_ctx {
  89. /* low address: p->a->k */
  90. unsigned char *p;
  91. dma_addr_t dma_p;
  92. /* gx coordinate */
  93. unsigned char *g;
  94. dma_addr_t dma_g;
  95. };
  96. struct hpre_ctx {
  97. struct hisi_qp *qp;
  98. struct device *dev;
  99. struct hpre_asym_request **req_list;
  100. struct hpre *hpre;
  101. spinlock_t req_lock;
  102. unsigned int key_sz;
  103. bool crt_g2_mode;
  104. struct idr req_idr;
  105. union {
  106. struct hpre_rsa_ctx rsa;
  107. struct hpre_dh_ctx dh;
  108. struct hpre_ecdh_ctx ecdh;
  109. struct hpre_curve25519_ctx curve25519;
  110. };
  111. /* for ecc algorithms */
  112. unsigned int curve_id;
  113. };
  114. struct hpre_asym_request {
  115. char *src;
  116. char *dst;
  117. struct hpre_sqe req;
  118. struct hpre_ctx *ctx;
  119. union {
  120. struct akcipher_request *rsa;
  121. struct kpp_request *dh;
  122. struct kpp_request *ecdh;
  123. struct kpp_request *curve25519;
  124. } areq;
  125. int err;
  126. int req_id;
  127. hpre_cb cb;
  128. struct timespec64 req_time;
  129. };
  130. static inline unsigned int hpre_align_sz(void)
  131. {
  132. return ((crypto_dma_align() - 1) | (HPRE_ALIGN_SZ - 1)) + 1;
  133. }
  134. static inline unsigned int hpre_align_pd(void)
  135. {
  136. return (hpre_align_sz() - 1) & ~(crypto_tfm_ctx_alignment() - 1);
  137. }
  138. static int hpre_alloc_req_id(struct hpre_ctx *ctx)
  139. {
  140. unsigned long flags;
  141. int id;
  142. spin_lock_irqsave(&ctx->req_lock, flags);
  143. id = idr_alloc(&ctx->req_idr, NULL, 0, ctx->qp->sq_depth, GFP_ATOMIC);
  144. spin_unlock_irqrestore(&ctx->req_lock, flags);
  145. return id;
  146. }
  147. static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
  148. {
  149. unsigned long flags;
  150. spin_lock_irqsave(&ctx->req_lock, flags);
  151. idr_remove(&ctx->req_idr, req_id);
  152. spin_unlock_irqrestore(&ctx->req_lock, flags);
  153. }
  154. static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
  155. {
  156. struct hpre_ctx *ctx;
  157. struct hpre_dfx *dfx;
  158. int id;
  159. ctx = hpre_req->ctx;
  160. id = hpre_alloc_req_id(ctx);
  161. if (unlikely(id < 0))
  162. return -EINVAL;
  163. ctx->req_list[id] = hpre_req;
  164. hpre_req->req_id = id;
  165. dfx = ctx->hpre->debug.dfx;
  166. if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
  167. ktime_get_ts64(&hpre_req->req_time);
  168. return id;
  169. }
  170. static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
  171. {
  172. struct hpre_ctx *ctx = hpre_req->ctx;
  173. int id = hpre_req->req_id;
  174. if (hpre_req->req_id >= 0) {
  175. hpre_req->req_id = HPRE_INVLD_REQ_ID;
  176. ctx->req_list[id] = NULL;
  177. hpre_free_req_id(ctx, id);
  178. }
  179. }
  180. static struct hisi_qp *hpre_get_qp_and_start(u8 type)
  181. {
  182. struct hisi_qp *qp;
  183. int ret;
  184. qp = hpre_create_qp(type);
  185. if (!qp) {
  186. pr_err("Can not create hpre qp!\n");
  187. return ERR_PTR(-ENODEV);
  188. }
  189. ret = hisi_qm_start_qp(qp, 0);
  190. if (ret < 0) {
  191. hisi_qm_free_qps(&qp, 1);
  192. pci_err(qp->qm->pdev, "Can not start qp!\n");
  193. return ERR_PTR(-EINVAL);
  194. }
  195. return qp;
  196. }
  197. static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
  198. struct scatterlist *data, unsigned int len,
  199. int is_src, dma_addr_t *tmp)
  200. {
  201. struct device *dev = hpre_req->ctx->dev;
  202. enum dma_data_direction dma_dir;
  203. if (is_src) {
  204. hpre_req->src = NULL;
  205. dma_dir = DMA_TO_DEVICE;
  206. } else {
  207. hpre_req->dst = NULL;
  208. dma_dir = DMA_FROM_DEVICE;
  209. }
  210. *tmp = dma_map_single(dev, sg_virt(data), len, dma_dir);
  211. if (unlikely(dma_mapping_error(dev, *tmp))) {
  212. dev_err(dev, "dma map data err!\n");
  213. return -ENOMEM;
  214. }
  215. return 0;
  216. }
  217. static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
  218. struct scatterlist *data, unsigned int len,
  219. int is_src, dma_addr_t *tmp)
  220. {
  221. struct hpre_ctx *ctx = hpre_req->ctx;
  222. struct device *dev = ctx->dev;
  223. void *ptr;
  224. int shift;
  225. shift = ctx->key_sz - len;
  226. if (unlikely(shift < 0))
  227. return -EINVAL;
  228. ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_ATOMIC);
  229. if (unlikely(!ptr))
  230. return -ENOMEM;
  231. if (is_src) {
  232. scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0);
  233. hpre_req->src = ptr;
  234. } else {
  235. hpre_req->dst = ptr;
  236. }
  237. return 0;
  238. }
  239. static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
  240. struct scatterlist *data, unsigned int len,
  241. int is_src, int is_dh)
  242. {
  243. struct hpre_sqe *msg = &hpre_req->req;
  244. struct hpre_ctx *ctx = hpre_req->ctx;
  245. dma_addr_t tmp = 0;
  246. int ret;
  247. /* when the data is dh's source, we should format it */
  248. if ((sg_is_last(data) && len == ctx->key_sz) &&
  249. ((is_dh && !is_src) || !is_dh))
  250. ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp);
  251. else
  252. ret = hpre_prepare_dma_buf(hpre_req, data, len, is_src, &tmp);
  253. if (unlikely(ret))
  254. return ret;
  255. if (is_src)
  256. msg->in = cpu_to_le64(tmp);
  257. else
  258. msg->out = cpu_to_le64(tmp);
  259. return 0;
  260. }
  261. static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
  262. struct hpre_asym_request *req,
  263. struct scatterlist *dst,
  264. struct scatterlist *src)
  265. {
  266. struct device *dev = ctx->dev;
  267. struct hpre_sqe *sqe = &req->req;
  268. dma_addr_t tmp;
  269. tmp = le64_to_cpu(sqe->in);
  270. if (unlikely(dma_mapping_error(dev, tmp)))
  271. return;
  272. if (src) {
  273. if (req->src)
  274. dma_free_coherent(dev, ctx->key_sz, req->src, tmp);
  275. else
  276. dma_unmap_single(dev, tmp, ctx->key_sz, DMA_TO_DEVICE);
  277. }
  278. tmp = le64_to_cpu(sqe->out);
  279. if (unlikely(dma_mapping_error(dev, tmp)))
  280. return;
  281. if (req->dst) {
  282. if (dst)
  283. scatterwalk_map_and_copy(req->dst, dst, 0,
  284. ctx->key_sz, 1);
  285. dma_free_coherent(dev, ctx->key_sz, req->dst, tmp);
  286. } else {
  287. dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE);
  288. }
  289. }
  290. static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
  291. void **kreq)
  292. {
  293. struct hpre_asym_request *req;
  294. unsigned int err, done, alg;
  295. int id;
  296. #define HPRE_NO_HW_ERR 0
  297. #define HPRE_HW_TASK_DONE 3
  298. #define HREE_HW_ERR_MASK GENMASK(10, 0)
  299. #define HREE_SQE_DONE_MASK GENMASK(1, 0)
  300. #define HREE_ALG_TYPE_MASK GENMASK(4, 0)
  301. id = (int)le16_to_cpu(sqe->tag);
  302. req = ctx->req_list[id];
  303. hpre_rm_req_from_ctx(req);
  304. *kreq = req;
  305. err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &
  306. HREE_HW_ERR_MASK;
  307. done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
  308. HREE_SQE_DONE_MASK;
  309. if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
  310. return 0;
  311. alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK;
  312. dev_err_ratelimited(ctx->dev, "alg[0x%x] error: done[0x%x], etype[0x%x]\n",
  313. alg, done, err);
  314. return -EINVAL;
  315. }
  316. static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
  317. {
  318. struct hpre *hpre;
  319. if (!ctx || !qp || qlen < 0)
  320. return -EINVAL;
  321. spin_lock_init(&ctx->req_lock);
  322. ctx->qp = qp;
  323. ctx->dev = &qp->qm->pdev->dev;
  324. hpre = container_of(ctx->qp->qm, struct hpre, qm);
  325. ctx->hpre = hpre;
  326. ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
  327. if (!ctx->req_list)
  328. return -ENOMEM;
  329. ctx->key_sz = 0;
  330. ctx->crt_g2_mode = false;
  331. idr_init(&ctx->req_idr);
  332. return 0;
  333. }
  334. static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
  335. {
  336. if (is_clear_all) {
  337. idr_destroy(&ctx->req_idr);
  338. kfree(ctx->req_list);
  339. hisi_qm_free_qps(&ctx->qp, 1);
  340. }
  341. ctx->crt_g2_mode = false;
  342. ctx->key_sz = 0;
  343. }
  344. static bool hpre_is_bd_timeout(struct hpre_asym_request *req,
  345. u64 overtime_thrhld)
  346. {
  347. struct timespec64 reply_time;
  348. u64 time_use_us;
  349. ktime_get_ts64(&reply_time);
  350. time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) *
  351. HPRE_DFX_SEC_TO_US +
  352. (reply_time.tv_nsec - req->req_time.tv_nsec) /
  353. HPRE_DFX_US_TO_NS;
  354. if (time_use_us <= overtime_thrhld)
  355. return false;
  356. return true;
  357. }
  358. static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
  359. {
  360. struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
  361. struct hpre_asym_request *req;
  362. struct kpp_request *areq;
  363. u64 overtime_thrhld;
  364. int ret;
  365. ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
  366. areq = req->areq.dh;
  367. areq->dst_len = ctx->key_sz;
  368. overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
  369. if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
  370. atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
  371. hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
  372. kpp_request_complete(areq, ret);
  373. atomic64_inc(&dfx[HPRE_RECV_CNT].value);
  374. }
  375. static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
  376. {
  377. struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
  378. struct hpre_asym_request *req;
  379. struct akcipher_request *areq;
  380. u64 overtime_thrhld;
  381. int ret;
  382. ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
  383. overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
  384. if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
  385. atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
  386. areq = req->areq.rsa;
  387. areq->dst_len = ctx->key_sz;
  388. hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
  389. akcipher_request_complete(areq, ret);
  390. atomic64_inc(&dfx[HPRE_RECV_CNT].value);
  391. }
  392. static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
  393. {
  394. struct hpre_ctx *ctx = qp->qp_ctx;
  395. struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
  396. struct hpre_sqe *sqe = resp;
  397. struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
  398. if (unlikely(!req)) {
  399. atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
  400. return;
  401. }
  402. req->cb(ctx, resp);
  403. }
  404. static void hpre_stop_qp_and_put(struct hisi_qp *qp)
  405. {
  406. hisi_qm_stop_qp(qp);
  407. hisi_qm_free_qps(&qp, 1);
  408. }
  409. static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
  410. {
  411. struct hisi_qp *qp;
  412. int ret;
  413. qp = hpre_get_qp_and_start(type);
  414. if (IS_ERR(qp))
  415. return PTR_ERR(qp);
  416. qp->qp_ctx = ctx;
  417. qp->req_cb = hpre_alg_cb;
  418. ret = hpre_ctx_set(ctx, qp, qp->sq_depth);
  419. if (ret)
  420. hpre_stop_qp_and_put(qp);
  421. return ret;
  422. }
  423. static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
  424. {
  425. struct hpre_asym_request *h_req;
  426. struct hpre_sqe *msg;
  427. int req_id;
  428. void *tmp;
  429. if (is_rsa) {
  430. struct akcipher_request *akreq = req;
  431. if (akreq->dst_len < ctx->key_sz) {
  432. akreq->dst_len = ctx->key_sz;
  433. return -EOVERFLOW;
  434. }
  435. tmp = akcipher_request_ctx(akreq);
  436. h_req = PTR_ALIGN(tmp, hpre_align_sz());
  437. h_req->cb = hpre_rsa_cb;
  438. h_req->areq.rsa = akreq;
  439. msg = &h_req->req;
  440. memset(msg, 0, sizeof(*msg));
  441. } else {
  442. struct kpp_request *kreq = req;
  443. if (kreq->dst_len < ctx->key_sz) {
  444. kreq->dst_len = ctx->key_sz;
  445. return -EOVERFLOW;
  446. }
  447. tmp = kpp_request_ctx(kreq);
  448. h_req = PTR_ALIGN(tmp, hpre_align_sz());
  449. h_req->cb = hpre_dh_cb;
  450. h_req->areq.dh = kreq;
  451. msg = &h_req->req;
  452. memset(msg, 0, sizeof(*msg));
  453. msg->key = cpu_to_le64(ctx->dh.dma_xa_p);
  454. }
  455. msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
  456. msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
  457. msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
  458. msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
  459. h_req->ctx = ctx;
  460. req_id = hpre_add_req_to_ctx(h_req);
  461. if (req_id < 0)
  462. return -EBUSY;
  463. msg->tag = cpu_to_le16((u16)req_id);
  464. return 0;
  465. }
  466. static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
  467. {
  468. struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
  469. int ctr = 0;
  470. int ret;
  471. do {
  472. atomic64_inc(&dfx[HPRE_SEND_CNT].value);
  473. spin_lock_bh(&ctx->req_lock);
  474. ret = hisi_qp_send(ctx->qp, msg);
  475. spin_unlock_bh(&ctx->req_lock);
  476. if (ret != -EBUSY)
  477. break;
  478. atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
  479. } while (ctr++ < HPRE_TRY_SEND_TIMES);
  480. if (likely(!ret))
  481. return ret;
  482. if (ret != -EBUSY)
  483. atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value);
  484. return ret;
  485. }
  486. static int hpre_dh_compute_value(struct kpp_request *req)
  487. {
  488. struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
  489. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  490. void *tmp = kpp_request_ctx(req);
  491. struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
  492. struct hpre_sqe *msg = &hpre_req->req;
  493. int ret;
  494. ret = hpre_msg_request_set(ctx, req, false);
  495. if (unlikely(ret))
  496. return ret;
  497. if (req->src) {
  498. ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
  499. if (unlikely(ret))
  500. goto clear_all;
  501. } else {
  502. msg->in = cpu_to_le64(ctx->dh.dma_g);
  503. }
  504. ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
  505. if (unlikely(ret))
  506. goto clear_all;
  507. if (ctx->crt_g2_mode && !req->src)
  508. msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
  509. else
  510. msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
  511. /* success */
  512. ret = hpre_send(ctx, msg);
  513. if (likely(!ret))
  514. return -EINPROGRESS;
  515. clear_all:
  516. hpre_rm_req_from_ctx(hpre_req);
  517. hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
  518. return ret;
  519. }
  520. static int hpre_is_dh_params_length_valid(unsigned int key_sz)
  521. {
  522. #define _HPRE_DH_GRP1 768
  523. #define _HPRE_DH_GRP2 1024
  524. #define _HPRE_DH_GRP5 1536
  525. #define _HPRE_DH_GRP14 2048
  526. #define _HPRE_DH_GRP15 3072
  527. #define _HPRE_DH_GRP16 4096
  528. switch (key_sz) {
  529. case _HPRE_DH_GRP1:
  530. case _HPRE_DH_GRP2:
  531. case _HPRE_DH_GRP5:
  532. case _HPRE_DH_GRP14:
  533. case _HPRE_DH_GRP15:
  534. case _HPRE_DH_GRP16:
  535. return 0;
  536. default:
  537. return -EINVAL;
  538. }
  539. }
  540. static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
  541. {
  542. struct device *dev = ctx->dev;
  543. unsigned int sz;
  544. if (params->p_size > HPRE_DH_MAX_P_SZ)
  545. return -EINVAL;
  546. if (hpre_is_dh_params_length_valid(params->p_size <<
  547. HPRE_BITS_2_BYTES_SHIFT))
  548. return -EINVAL;
  549. sz = ctx->key_sz = params->p_size;
  550. ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
  551. &ctx->dh.dma_xa_p, GFP_KERNEL);
  552. if (!ctx->dh.xa_p)
  553. return -ENOMEM;
  554. memcpy(ctx->dh.xa_p + sz, params->p, sz);
  555. /* If g equals 2 don't copy it */
  556. if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) {
  557. ctx->crt_g2_mode = true;
  558. return 0;
  559. }
  560. ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL);
  561. if (!ctx->dh.g) {
  562. dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
  563. ctx->dh.dma_xa_p);
  564. ctx->dh.xa_p = NULL;
  565. return -ENOMEM;
  566. }
  567. memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size);
  568. return 0;
  569. }
  570. static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
  571. {
  572. struct device *dev = ctx->dev;
  573. unsigned int sz = ctx->key_sz;
  574. if (is_clear_all)
  575. hisi_qm_stop_qp(ctx->qp);
  576. if (ctx->dh.g) {
  577. dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
  578. ctx->dh.g = NULL;
  579. }
  580. if (ctx->dh.xa_p) {
  581. memzero_explicit(ctx->dh.xa_p, sz);
  582. dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
  583. ctx->dh.dma_xa_p);
  584. ctx->dh.xa_p = NULL;
  585. }
  586. hpre_ctx_clear(ctx, is_clear_all);
  587. }
  588. static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
  589. unsigned int len)
  590. {
  591. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  592. struct dh params;
  593. int ret;
  594. if (crypto_dh_decode_key(buf, len, &params) < 0)
  595. return -EINVAL;
  596. /* Free old secret if any */
  597. hpre_dh_clear_ctx(ctx, false);
  598. ret = hpre_dh_set_params(ctx, &params);
  599. if (ret < 0)
  600. goto err_clear_ctx;
  601. memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key,
  602. params.key_size);
  603. return 0;
  604. err_clear_ctx:
  605. hpre_dh_clear_ctx(ctx, false);
  606. return ret;
  607. }
  608. static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm)
  609. {
  610. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  611. return ctx->key_sz;
  612. }
  613. static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
  614. {
  615. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  616. kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
  617. return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
  618. }
  619. static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
  620. {
  621. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  622. hpre_dh_clear_ctx(ctx, true);
  623. }
  624. static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
  625. {
  626. while (!**ptr && *len) {
  627. (*ptr)++;
  628. (*len)--;
  629. }
  630. }
  631. static bool hpre_rsa_key_size_is_support(unsigned int len)
  632. {
  633. unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT;
  634. #define _RSA_1024BITS_KEY_WDTH 1024
  635. #define _RSA_2048BITS_KEY_WDTH 2048
  636. #define _RSA_3072BITS_KEY_WDTH 3072
  637. #define _RSA_4096BITS_KEY_WDTH 4096
  638. switch (bits) {
  639. case _RSA_1024BITS_KEY_WDTH:
  640. case _RSA_2048BITS_KEY_WDTH:
  641. case _RSA_3072BITS_KEY_WDTH:
  642. case _RSA_4096BITS_KEY_WDTH:
  643. return true;
  644. default:
  645. return false;
  646. }
  647. }
  648. static int hpre_rsa_enc(struct akcipher_request *req)
  649. {
  650. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  651. struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
  652. void *tmp = akcipher_request_ctx(req);
  653. struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
  654. struct hpre_sqe *msg = &hpre_req->req;
  655. int ret;
  656. /* For 512 and 1536 bits key size, use soft tfm instead */
  657. if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
  658. ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
  659. akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
  660. ret = crypto_akcipher_encrypt(req);
  661. akcipher_request_set_tfm(req, tfm);
  662. return ret;
  663. }
  664. if (unlikely(!ctx->rsa.pubkey))
  665. return -EINVAL;
  666. ret = hpre_msg_request_set(ctx, req, true);
  667. if (unlikely(ret))
  668. return ret;
  669. msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);
  670. msg->key = cpu_to_le64(ctx->rsa.dma_pubkey);
  671. ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
  672. if (unlikely(ret))
  673. goto clear_all;
  674. ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
  675. if (unlikely(ret))
  676. goto clear_all;
  677. /* success */
  678. ret = hpre_send(ctx, msg);
  679. if (likely(!ret))
  680. return -EINPROGRESS;
  681. clear_all:
  682. hpre_rm_req_from_ctx(hpre_req);
  683. hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
  684. return ret;
  685. }
  686. static int hpre_rsa_dec(struct akcipher_request *req)
  687. {
  688. struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
  689. struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
  690. void *tmp = akcipher_request_ctx(req);
  691. struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
  692. struct hpre_sqe *msg = &hpre_req->req;
  693. int ret;
  694. /* For 512 and 1536 bits key size, use soft tfm instead */
  695. if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
  696. ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
  697. akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
  698. ret = crypto_akcipher_decrypt(req);
  699. akcipher_request_set_tfm(req, tfm);
  700. return ret;
  701. }
  702. if (unlikely(!ctx->rsa.prikey))
  703. return -EINVAL;
  704. ret = hpre_msg_request_set(ctx, req, true);
  705. if (unlikely(ret))
  706. return ret;
  707. if (ctx->crt_g2_mode) {
  708. msg->key = cpu_to_le64(ctx->rsa.dma_crt_prikey);
  709. msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
  710. HPRE_ALG_NC_CRT);
  711. } else {
  712. msg->key = cpu_to_le64(ctx->rsa.dma_prikey);
  713. msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
  714. HPRE_ALG_NC_NCRT);
  715. }
  716. ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
  717. if (unlikely(ret))
  718. goto clear_all;
  719. ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
  720. if (unlikely(ret))
  721. goto clear_all;
  722. /* success */
  723. ret = hpre_send(ctx, msg);
  724. if (likely(!ret))
  725. return -EINPROGRESS;
  726. clear_all:
  727. hpre_rm_req_from_ctx(hpre_req);
  728. hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
  729. return ret;
  730. }
  731. static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value,
  732. size_t vlen, bool private)
  733. {
  734. const char *ptr = value;
  735. hpre_rsa_drop_leading_zeros(&ptr, &vlen);
  736. ctx->key_sz = vlen;
  737. /* if invalid key size provided, we use software tfm */
  738. if (!hpre_rsa_key_size_is_support(ctx->key_sz))
  739. return 0;
  740. ctx->rsa.pubkey = dma_alloc_coherent(ctx->dev, vlen << 1,
  741. &ctx->rsa.dma_pubkey,
  742. GFP_KERNEL);
  743. if (!ctx->rsa.pubkey)
  744. return -ENOMEM;
  745. if (private) {
  746. ctx->rsa.prikey = dma_alloc_coherent(ctx->dev, vlen << 1,
  747. &ctx->rsa.dma_prikey,
  748. GFP_KERNEL);
  749. if (!ctx->rsa.prikey) {
  750. dma_free_coherent(ctx->dev, vlen << 1,
  751. ctx->rsa.pubkey,
  752. ctx->rsa.dma_pubkey);
  753. ctx->rsa.pubkey = NULL;
  754. return -ENOMEM;
  755. }
  756. memcpy(ctx->rsa.prikey + vlen, ptr, vlen);
  757. }
  758. memcpy(ctx->rsa.pubkey + vlen, ptr, vlen);
  759. /* Using hardware HPRE to do RSA */
  760. return 1;
  761. }
  762. static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
  763. size_t vlen)
  764. {
  765. const char *ptr = value;
  766. hpre_rsa_drop_leading_zeros(&ptr, &vlen);
  767. if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
  768. return -EINVAL;
  769. memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
  770. return 0;
  771. }
  772. static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
  773. size_t vlen)
  774. {
  775. const char *ptr = value;
  776. hpre_rsa_drop_leading_zeros(&ptr, &vlen);
  777. if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
  778. return -EINVAL;
  779. memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen);
  780. return 0;
  781. }
  782. static int hpre_crt_para_get(char *para, size_t para_sz,
  783. const char *raw, size_t raw_sz)
  784. {
  785. const char *ptr = raw;
  786. size_t len = raw_sz;
  787. hpre_rsa_drop_leading_zeros(&ptr, &len);
  788. if (!len || len > para_sz)
  789. return -EINVAL;
  790. memcpy(para + para_sz - len, ptr, len);
  791. return 0;
  792. }
  793. static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
  794. {
  795. unsigned int hlf_ksz = ctx->key_sz >> 1;
  796. struct device *dev = ctx->dev;
  797. u64 offset;
  798. int ret;
  799. ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS,
  800. &ctx->rsa.dma_crt_prikey,
  801. GFP_KERNEL);
  802. if (!ctx->rsa.crt_prikey)
  803. return -ENOMEM;
  804. ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz,
  805. rsa_key->dq, rsa_key->dq_sz);
  806. if (ret)
  807. goto free_key;
  808. offset = hlf_ksz;
  809. ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
  810. rsa_key->dp, rsa_key->dp_sz);
  811. if (ret)
  812. goto free_key;
  813. offset = hlf_ksz * HPRE_CRT_Q;
  814. ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
  815. rsa_key->q, rsa_key->q_sz);
  816. if (ret)
  817. goto free_key;
  818. offset = hlf_ksz * HPRE_CRT_P;
  819. ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
  820. rsa_key->p, rsa_key->p_sz);
  821. if (ret)
  822. goto free_key;
  823. offset = hlf_ksz * HPRE_CRT_INV;
  824. ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
  825. rsa_key->qinv, rsa_key->qinv_sz);
  826. if (ret)
  827. goto free_key;
  828. ctx->crt_g2_mode = true;
  829. return 0;
  830. free_key:
  831. offset = hlf_ksz * HPRE_CRT_PRMS;
  832. memzero_explicit(ctx->rsa.crt_prikey, offset);
  833. dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,
  834. ctx->rsa.dma_crt_prikey);
  835. ctx->rsa.crt_prikey = NULL;
  836. ctx->crt_g2_mode = false;
  837. return ret;
  838. }
  839. /* If it is clear all, all the resources of the QP will be cleaned. */
  840. static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
  841. {
  842. unsigned int half_key_sz = ctx->key_sz >> 1;
  843. struct device *dev = ctx->dev;
  844. if (is_clear_all)
  845. hisi_qm_stop_qp(ctx->qp);
  846. if (ctx->rsa.pubkey) {
  847. dma_free_coherent(dev, ctx->key_sz << 1,
  848. ctx->rsa.pubkey, ctx->rsa.dma_pubkey);
  849. ctx->rsa.pubkey = NULL;
  850. }
  851. if (ctx->rsa.crt_prikey) {
  852. memzero_explicit(ctx->rsa.crt_prikey,
  853. half_key_sz * HPRE_CRT_PRMS);
  854. dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,
  855. ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);
  856. ctx->rsa.crt_prikey = NULL;
  857. }
  858. if (ctx->rsa.prikey) {
  859. memzero_explicit(ctx->rsa.prikey, ctx->key_sz);
  860. dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,
  861. ctx->rsa.dma_prikey);
  862. ctx->rsa.prikey = NULL;
  863. }
  864. hpre_ctx_clear(ctx, is_clear_all);
  865. }
  866. /*
  867. * we should judge if it is CRT or not,
  868. * CRT: return true, N-CRT: return false .
  869. */
  870. static bool hpre_is_crt_key(struct rsa_key *key)
  871. {
  872. u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz +
  873. key->qinv_sz;
  874. #define LEN_OF_NCRT_PARA 5
  875. /* N-CRT less than 5 parameters */
  876. return len > LEN_OF_NCRT_PARA;
  877. }
  878. static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key,
  879. unsigned int keylen, bool private)
  880. {
  881. struct rsa_key rsa_key;
  882. int ret;
  883. hpre_rsa_clear_ctx(ctx, false);
  884. if (private)
  885. ret = rsa_parse_priv_key(&rsa_key, key, keylen);
  886. else
  887. ret = rsa_parse_pub_key(&rsa_key, key, keylen);
  888. if (ret < 0)
  889. return ret;
  890. ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private);
  891. if (ret <= 0)
  892. return ret;
  893. if (private) {
  894. ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
  895. if (ret < 0)
  896. goto free;
  897. if (hpre_is_crt_key(&rsa_key)) {
  898. ret = hpre_rsa_setkey_crt(ctx, &rsa_key);
  899. if (ret < 0)
  900. goto free;
  901. }
  902. }
  903. ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
  904. if (ret < 0)
  905. goto free;
  906. if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) {
  907. ret = -EINVAL;
  908. goto free;
  909. }
  910. return 0;
  911. free:
  912. hpre_rsa_clear_ctx(ctx, false);
  913. return ret;
  914. }
  915. static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
  916. unsigned int keylen)
  917. {
  918. struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
  919. int ret;
  920. ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen);
  921. if (ret)
  922. return ret;
  923. return hpre_rsa_setkey(ctx, key, keylen, false);
  924. }
  925. static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
  926. unsigned int keylen)
  927. {
  928. struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
  929. int ret;
  930. ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen);
  931. if (ret)
  932. return ret;
  933. return hpre_rsa_setkey(ctx, key, keylen, true);
  934. }
  935. static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
  936. {
  937. struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
  938. /* For 512 and 1536 bits key size, use soft tfm instead */
  939. if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
  940. ctx->key_sz == HPRE_RSA_1536BITS_KSZ)
  941. return crypto_akcipher_maxsize(ctx->rsa.soft_tfm);
  942. return ctx->key_sz;
  943. }
  944. static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
  945. {
  946. struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
  947. int ret;
  948. ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
  949. if (IS_ERR(ctx->rsa.soft_tfm)) {
  950. pr_err("Can not alloc_akcipher!\n");
  951. return PTR_ERR(ctx->rsa.soft_tfm);
  952. }
  953. akcipher_set_reqsize(tfm, sizeof(struct hpre_asym_request) +
  954. hpre_align_pd());
  955. ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
  956. if (ret)
  957. crypto_free_akcipher(ctx->rsa.soft_tfm);
  958. return ret;
  959. }
  960. static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
  961. {
  962. struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
  963. hpre_rsa_clear_ctx(ctx, true);
  964. crypto_free_akcipher(ctx->rsa.soft_tfm);
  965. }
  966. static void hpre_key_to_big_end(u8 *data, int len)
  967. {
  968. int i, j;
  969. for (i = 0; i < len / 2; i++) {
  970. j = len - i - 1;
  971. swap(data[j], data[i]);
  972. }
  973. }
  974. static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
  975. bool is_ecdh)
  976. {
  977. struct device *dev = ctx->dev;
  978. unsigned int sz = ctx->key_sz;
  979. unsigned int shift = sz << 1;
  980. if (is_clear_all)
  981. hisi_qm_stop_qp(ctx->qp);
  982. if (is_ecdh && ctx->ecdh.p) {
  983. /* ecdh: p->a->k->b */
  984. memzero_explicit(ctx->ecdh.p + shift, sz);
  985. dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
  986. ctx->ecdh.p = NULL;
  987. } else if (!is_ecdh && ctx->curve25519.p) {
  988. /* curve25519: p->a->k */
  989. memzero_explicit(ctx->curve25519.p + shift, sz);
  990. dma_free_coherent(dev, sz << 2, ctx->curve25519.p,
  991. ctx->curve25519.dma_p);
  992. ctx->curve25519.p = NULL;
  993. }
  994. hpre_ctx_clear(ctx, is_clear_all);
  995. }
  996. /*
  997. * The bits of 192/224/256/384/521 are supported by HPRE,
  998. * and convert the bits like:
  999. * bits<=256, bits=256; 256<bits<=384, bits=384; 384<bits<=576, bits=576;
  1000. * If the parameter bit width is insufficient, then we fill in the
  1001. * high-order zeros by soft, so TASK_LENGTH1 is 0x3/0x5/0x8;
  1002. */
  1003. static unsigned int hpre_ecdh_supported_curve(unsigned short id)
  1004. {
  1005. switch (id) {
  1006. case ECC_CURVE_NIST_P192:
  1007. case ECC_CURVE_NIST_P256:
  1008. return HPRE_ECC_HW256_KSZ_B;
  1009. case ECC_CURVE_NIST_P384:
  1010. return HPRE_ECC_HW384_KSZ_B;
  1011. default:
  1012. break;
  1013. }
  1014. return 0;
  1015. }
  1016. static void fill_curve_param(void *addr, u64 *param, unsigned int cur_sz, u8 ndigits)
  1017. {
  1018. unsigned int sz = cur_sz - (ndigits - 1) * sizeof(u64);
  1019. u8 i = 0;
  1020. while (i < ndigits - 1) {
  1021. memcpy(addr + sizeof(u64) * i, &param[i], sizeof(u64));
  1022. i++;
  1023. }
  1024. memcpy(addr + sizeof(u64) * i, &param[ndigits - 1], sz);
  1025. hpre_key_to_big_end((u8 *)addr, cur_sz);
  1026. }
  1027. static int hpre_ecdh_fill_curve(struct hpre_ctx *ctx, struct ecdh *params,
  1028. unsigned int cur_sz)
  1029. {
  1030. unsigned int shifta = ctx->key_sz << 1;
  1031. unsigned int shiftb = ctx->key_sz << 2;
  1032. void *p = ctx->ecdh.p + ctx->key_sz - cur_sz;
  1033. void *a = ctx->ecdh.p + shifta - cur_sz;
  1034. void *b = ctx->ecdh.p + shiftb - cur_sz;
  1035. void *x = ctx->ecdh.g + ctx->key_sz - cur_sz;
  1036. void *y = ctx->ecdh.g + shifta - cur_sz;
  1037. const struct ecc_curve *curve = ecc_get_curve(ctx->curve_id);
  1038. char *n;
  1039. if (unlikely(!curve))
  1040. return -EINVAL;
  1041. n = kzalloc(ctx->key_sz, GFP_KERNEL);
  1042. if (!n)
  1043. return -ENOMEM;
  1044. fill_curve_param(p, curve->p, cur_sz, curve->g.ndigits);
  1045. fill_curve_param(a, curve->a, cur_sz, curve->g.ndigits);
  1046. fill_curve_param(b, curve->b, cur_sz, curve->g.ndigits);
  1047. fill_curve_param(x, curve->g.x, cur_sz, curve->g.ndigits);
  1048. fill_curve_param(y, curve->g.y, cur_sz, curve->g.ndigits);
  1049. fill_curve_param(n, curve->n, cur_sz, curve->g.ndigits);
  1050. if (params->key_size == cur_sz && memcmp(params->key, n, cur_sz) >= 0) {
  1051. kfree(n);
  1052. return -EINVAL;
  1053. }
  1054. kfree(n);
  1055. return 0;
  1056. }
  1057. static unsigned int hpre_ecdh_get_curvesz(unsigned short id)
  1058. {
  1059. switch (id) {
  1060. case ECC_CURVE_NIST_P192:
  1061. return HPRE_ECC_NIST_P192_N_SIZE;
  1062. case ECC_CURVE_NIST_P256:
  1063. return HPRE_ECC_NIST_P256_N_SIZE;
  1064. case ECC_CURVE_NIST_P384:
  1065. return HPRE_ECC_NIST_P384_N_SIZE;
  1066. default:
  1067. break;
  1068. }
  1069. return 0;
  1070. }
  1071. static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params)
  1072. {
  1073. struct device *dev = ctx->dev;
  1074. unsigned int sz, shift, curve_sz;
  1075. int ret;
  1076. ctx->key_sz = hpre_ecdh_supported_curve(ctx->curve_id);
  1077. if (!ctx->key_sz)
  1078. return -EINVAL;
  1079. curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
  1080. if (!curve_sz || params->key_size > curve_sz)
  1081. return -EINVAL;
  1082. sz = ctx->key_sz;
  1083. if (!ctx->ecdh.p) {
  1084. ctx->ecdh.p = dma_alloc_coherent(dev, sz << 3, &ctx->ecdh.dma_p,
  1085. GFP_KERNEL);
  1086. if (!ctx->ecdh.p)
  1087. return -ENOMEM;
  1088. }
  1089. shift = sz << 2;
  1090. ctx->ecdh.g = ctx->ecdh.p + shift;
  1091. ctx->ecdh.dma_g = ctx->ecdh.dma_p + shift;
  1092. ret = hpre_ecdh_fill_curve(ctx, params, curve_sz);
  1093. if (ret) {
  1094. dev_err(dev, "failed to fill curve_param, ret = %d!\n", ret);
  1095. dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
  1096. ctx->ecdh.p = NULL;
  1097. return ret;
  1098. }
  1099. return 0;
  1100. }
  1101. static bool hpre_key_is_zero(char *key, unsigned short key_sz)
  1102. {
  1103. int i;
  1104. for (i = 0; i < key_sz; i++)
  1105. if (key[i])
  1106. return false;
  1107. return true;
  1108. }
  1109. static int ecdh_gen_privkey(struct hpre_ctx *ctx, struct ecdh *params)
  1110. {
  1111. struct device *dev = ctx->dev;
  1112. int ret;
  1113. ret = crypto_get_default_rng();
  1114. if (ret) {
  1115. dev_err(dev, "failed to get default rng, ret = %d!\n", ret);
  1116. return ret;
  1117. }
  1118. ret = crypto_rng_get_bytes(crypto_default_rng, (u8 *)params->key,
  1119. params->key_size);
  1120. crypto_put_default_rng();
  1121. if (ret)
  1122. dev_err(dev, "failed to get rng, ret = %d!\n", ret);
  1123. return ret;
  1124. }
  1125. static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
  1126. unsigned int len)
  1127. {
  1128. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  1129. unsigned int sz, sz_shift, curve_sz;
  1130. struct device *dev = ctx->dev;
  1131. char key[HPRE_ECC_MAX_KSZ];
  1132. struct ecdh params;
  1133. int ret;
  1134. if (crypto_ecdh_decode_key(buf, len, &params) < 0) {
  1135. dev_err(dev, "failed to decode ecdh key!\n");
  1136. return -EINVAL;
  1137. }
  1138. /* Use stdrng to generate private key */
  1139. if (!params.key || !params.key_size) {
  1140. params.key = key;
  1141. curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
  1142. if (!curve_sz) {
  1143. dev_err(dev, "Invalid curve size!\n");
  1144. return -EINVAL;
  1145. }
  1146. params.key_size = curve_sz - 1;
  1147. ret = ecdh_gen_privkey(ctx, &params);
  1148. if (ret)
  1149. return ret;
  1150. }
  1151. if (hpre_key_is_zero(params.key, params.key_size)) {
  1152. dev_err(dev, "Invalid hpre key!\n");
  1153. return -EINVAL;
  1154. }
  1155. hpre_ecc_clear_ctx(ctx, false, true);
  1156. ret = hpre_ecdh_set_param(ctx, &params);
  1157. if (ret < 0) {
  1158. dev_err(dev, "failed to set hpre param, ret = %d!\n", ret);
  1159. return ret;
  1160. }
  1161. sz = ctx->key_sz;
  1162. sz_shift = (sz << 1) + sz - params.key_size;
  1163. memcpy(ctx->ecdh.p + sz_shift, params.key, params.key_size);
  1164. return 0;
  1165. }
  1166. static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx,
  1167. struct hpre_asym_request *req,
  1168. struct scatterlist *dst,
  1169. struct scatterlist *src)
  1170. {
  1171. struct device *dev = ctx->dev;
  1172. struct hpre_sqe *sqe = &req->req;
  1173. dma_addr_t dma;
  1174. dma = le64_to_cpu(sqe->in);
  1175. if (unlikely(dma_mapping_error(dev, dma)))
  1176. return;
  1177. if (src && req->src)
  1178. dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma);
  1179. dma = le64_to_cpu(sqe->out);
  1180. if (unlikely(dma_mapping_error(dev, dma)))
  1181. return;
  1182. if (req->dst)
  1183. dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma);
  1184. if (dst)
  1185. dma_unmap_single(dev, dma, ctx->key_sz << 1, DMA_FROM_DEVICE);
  1186. }
  1187. static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp)
  1188. {
  1189. unsigned int curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
  1190. struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
  1191. struct hpre_asym_request *req = NULL;
  1192. struct kpp_request *areq;
  1193. u64 overtime_thrhld;
  1194. char *p;
  1195. int ret;
  1196. ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
  1197. areq = req->areq.ecdh;
  1198. areq->dst_len = ctx->key_sz << 1;
  1199. overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
  1200. if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
  1201. atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
  1202. p = sg_virt(areq->dst);
  1203. memmove(p, p + ctx->key_sz - curve_sz, curve_sz);
  1204. memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz);
  1205. hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
  1206. kpp_request_complete(areq, ret);
  1207. atomic64_inc(&dfx[HPRE_RECV_CNT].value);
  1208. }
  1209. static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
  1210. struct kpp_request *req)
  1211. {
  1212. struct hpre_asym_request *h_req;
  1213. struct hpre_sqe *msg;
  1214. int req_id;
  1215. void *tmp;
  1216. if (req->dst_len < ctx->key_sz << 1) {
  1217. req->dst_len = ctx->key_sz << 1;
  1218. return -EINVAL;
  1219. }
  1220. tmp = kpp_request_ctx(req);
  1221. h_req = PTR_ALIGN(tmp, hpre_align_sz());
  1222. h_req->cb = hpre_ecdh_cb;
  1223. h_req->areq.ecdh = req;
  1224. msg = &h_req->req;
  1225. memset(msg, 0, sizeof(*msg));
  1226. msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
  1227. msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
  1228. msg->key = cpu_to_le64(ctx->ecdh.dma_p);
  1229. msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
  1230. msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
  1231. h_req->ctx = ctx;
  1232. req_id = hpre_add_req_to_ctx(h_req);
  1233. if (req_id < 0)
  1234. return -EBUSY;
  1235. msg->tag = cpu_to_le16((u16)req_id);
  1236. return 0;
  1237. }
  1238. static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req,
  1239. struct scatterlist *data, unsigned int len)
  1240. {
  1241. struct hpre_sqe *msg = &hpre_req->req;
  1242. struct hpre_ctx *ctx = hpre_req->ctx;
  1243. struct device *dev = ctx->dev;
  1244. unsigned int tmpshift;
  1245. dma_addr_t dma = 0;
  1246. void *ptr;
  1247. int shift;
  1248. /* Src_data include gx and gy. */
  1249. shift = ctx->key_sz - (len >> 1);
  1250. if (unlikely(shift < 0))
  1251. return -EINVAL;
  1252. ptr = dma_alloc_coherent(dev, ctx->key_sz << 2, &dma, GFP_KERNEL);
  1253. if (unlikely(!ptr))
  1254. return -ENOMEM;
  1255. tmpshift = ctx->key_sz << 1;
  1256. scatterwalk_map_and_copy(ptr + tmpshift, data, 0, len, 0);
  1257. memcpy(ptr + shift, ptr + tmpshift, len >> 1);
  1258. memcpy(ptr + ctx->key_sz + shift, ptr + tmpshift + (len >> 1), len >> 1);
  1259. hpre_req->src = ptr;
  1260. msg->in = cpu_to_le64(dma);
  1261. return 0;
  1262. }
  1263. static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req,
  1264. struct scatterlist *data, unsigned int len)
  1265. {
  1266. struct hpre_sqe *msg = &hpre_req->req;
  1267. struct hpre_ctx *ctx = hpre_req->ctx;
  1268. struct device *dev = ctx->dev;
  1269. dma_addr_t dma;
  1270. if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) {
  1271. dev_err(dev, "data or data length is illegal!\n");
  1272. return -EINVAL;
  1273. }
  1274. hpre_req->dst = NULL;
  1275. dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
  1276. if (unlikely(dma_mapping_error(dev, dma))) {
  1277. dev_err(dev, "dma map data err!\n");
  1278. return -ENOMEM;
  1279. }
  1280. msg->out = cpu_to_le64(dma);
  1281. return 0;
  1282. }
  1283. static int hpre_ecdh_compute_value(struct kpp_request *req)
  1284. {
  1285. struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
  1286. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  1287. struct device *dev = ctx->dev;
  1288. void *tmp = kpp_request_ctx(req);
  1289. struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
  1290. struct hpre_sqe *msg = &hpre_req->req;
  1291. int ret;
  1292. ret = hpre_ecdh_msg_request_set(ctx, req);
  1293. if (unlikely(ret)) {
  1294. dev_err(dev, "failed to set ecdh request, ret = %d!\n", ret);
  1295. return ret;
  1296. }
  1297. if (req->src) {
  1298. ret = hpre_ecdh_src_data_init(hpre_req, req->src, req->src_len);
  1299. if (unlikely(ret)) {
  1300. dev_err(dev, "failed to init src data, ret = %d!\n", ret);
  1301. goto clear_all;
  1302. }
  1303. } else {
  1304. msg->in = cpu_to_le64(ctx->ecdh.dma_g);
  1305. }
  1306. ret = hpre_ecdh_dst_data_init(hpre_req, req->dst, req->dst_len);
  1307. if (unlikely(ret)) {
  1308. dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
  1309. goto clear_all;
  1310. }
  1311. msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL);
  1312. ret = hpre_send(ctx, msg);
  1313. if (likely(!ret))
  1314. return -EINPROGRESS;
  1315. clear_all:
  1316. hpre_rm_req_from_ctx(hpre_req);
  1317. hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
  1318. return ret;
  1319. }
  1320. static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm)
  1321. {
  1322. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  1323. /* max size is the pub_key_size, include x and y */
  1324. return ctx->key_sz << 1;
  1325. }
  1326. static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
  1327. {
  1328. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  1329. ctx->curve_id = ECC_CURVE_NIST_P192;
  1330. kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
  1331. return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
  1332. }
  1333. static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
  1334. {
  1335. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  1336. ctx->curve_id = ECC_CURVE_NIST_P256;
  1337. kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
  1338. return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
  1339. }
  1340. static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
  1341. {
  1342. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  1343. ctx->curve_id = ECC_CURVE_NIST_P384;
  1344. kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
  1345. return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
  1346. }
  1347. static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
  1348. {
  1349. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  1350. hpre_ecc_clear_ctx(ctx, true, true);
  1351. }
  1352. static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf,
  1353. unsigned int len)
  1354. {
  1355. u8 secret[CURVE25519_KEY_SIZE] = { 0 };
  1356. unsigned int sz = ctx->key_sz;
  1357. const struct ecc_curve *curve;
  1358. unsigned int shift = sz << 1;
  1359. void *p;
  1360. /*
  1361. * The key from 'buf' is in little-endian, we should preprocess it as
  1362. * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64",
  1363. * then convert it to big endian. Only in this way, the result can be
  1364. * the same as the software curve-25519 that exists in crypto.
  1365. */
  1366. memcpy(secret, buf, len);
  1367. curve25519_clamp_secret(secret);
  1368. hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE);
  1369. p = ctx->curve25519.p + sz - len;
  1370. curve = ecc_get_curve25519();
  1371. /* fill curve parameters */
  1372. fill_curve_param(p, curve->p, len, curve->g.ndigits);
  1373. fill_curve_param(p + sz, curve->a, len, curve->g.ndigits);
  1374. memcpy(p + shift, secret, len);
  1375. fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits);
  1376. memzero_explicit(secret, CURVE25519_KEY_SIZE);
  1377. }
  1378. static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf,
  1379. unsigned int len)
  1380. {
  1381. struct device *dev = ctx->dev;
  1382. unsigned int sz = ctx->key_sz;
  1383. unsigned int shift = sz << 1;
  1384. /* p->a->k->gx */
  1385. if (!ctx->curve25519.p) {
  1386. ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2,
  1387. &ctx->curve25519.dma_p,
  1388. GFP_KERNEL);
  1389. if (!ctx->curve25519.p)
  1390. return -ENOMEM;
  1391. }
  1392. ctx->curve25519.g = ctx->curve25519.p + shift + sz;
  1393. ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz;
  1394. hpre_curve25519_fill_curve(ctx, buf, len);
  1395. return 0;
  1396. }
  1397. static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
  1398. unsigned int len)
  1399. {
  1400. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  1401. struct device *dev = ctx->dev;
  1402. int ret = -EINVAL;
  1403. if (len != CURVE25519_KEY_SIZE ||
  1404. !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) {
  1405. dev_err(dev, "key is null or key len is not 32bytes!\n");
  1406. return ret;
  1407. }
  1408. /* Free old secret if any */
  1409. hpre_ecc_clear_ctx(ctx, false, false);
  1410. ctx->key_sz = CURVE25519_KEY_SIZE;
  1411. ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE);
  1412. if (ret) {
  1413. dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret);
  1414. hpre_ecc_clear_ctx(ctx, false, false);
  1415. return ret;
  1416. }
  1417. return 0;
  1418. }
  1419. static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
  1420. struct hpre_asym_request *req,
  1421. struct scatterlist *dst,
  1422. struct scatterlist *src)
  1423. {
  1424. struct device *dev = ctx->dev;
  1425. struct hpre_sqe *sqe = &req->req;
  1426. dma_addr_t dma;
  1427. dma = le64_to_cpu(sqe->in);
  1428. if (unlikely(dma_mapping_error(dev, dma)))
  1429. return;
  1430. if (src && req->src)
  1431. dma_free_coherent(dev, ctx->key_sz, req->src, dma);
  1432. dma = le64_to_cpu(sqe->out);
  1433. if (unlikely(dma_mapping_error(dev, dma)))
  1434. return;
  1435. if (req->dst)
  1436. dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
  1437. if (dst)
  1438. dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE);
  1439. }
  1440. static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
  1441. {
  1442. struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
  1443. struct hpre_asym_request *req = NULL;
  1444. struct kpp_request *areq;
  1445. u64 overtime_thrhld;
  1446. int ret;
  1447. ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
  1448. areq = req->areq.curve25519;
  1449. areq->dst_len = ctx->key_sz;
  1450. overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
  1451. if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
  1452. atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
  1453. hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
  1454. hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
  1455. kpp_request_complete(areq, ret);
  1456. atomic64_inc(&dfx[HPRE_RECV_CNT].value);
  1457. }
  1458. static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
  1459. struct kpp_request *req)
  1460. {
  1461. struct hpre_asym_request *h_req;
  1462. struct hpre_sqe *msg;
  1463. int req_id;
  1464. void *tmp;
  1465. if (unlikely(req->dst_len < ctx->key_sz)) {
  1466. req->dst_len = ctx->key_sz;
  1467. return -EINVAL;
  1468. }
  1469. tmp = kpp_request_ctx(req);
  1470. h_req = PTR_ALIGN(tmp, hpre_align_sz());
  1471. h_req->cb = hpre_curve25519_cb;
  1472. h_req->areq.curve25519 = req;
  1473. msg = &h_req->req;
  1474. memset(msg, 0, sizeof(*msg));
  1475. msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
  1476. msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
  1477. msg->key = cpu_to_le64(ctx->curve25519.dma_p);
  1478. msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
  1479. msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
  1480. h_req->ctx = ctx;
  1481. req_id = hpre_add_req_to_ctx(h_req);
  1482. if (req_id < 0)
  1483. return -EBUSY;
  1484. msg->tag = cpu_to_le16((u16)req_id);
  1485. return 0;
  1486. }
  1487. static void hpre_curve25519_src_modulo_p(u8 *ptr)
  1488. {
  1489. int i;
  1490. for (i = 0; i < CURVE25519_KEY_SIZE - 1; i++)
  1491. ptr[i] = 0;
  1492. /* The modulus is ptr's last byte minus '0xed'(last byte of p) */
  1493. ptr[i] -= 0xed;
  1494. }
  1495. static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
  1496. struct scatterlist *data, unsigned int len)
  1497. {
  1498. struct hpre_sqe *msg = &hpre_req->req;
  1499. struct hpre_ctx *ctx = hpre_req->ctx;
  1500. struct device *dev = ctx->dev;
  1501. u8 p[CURVE25519_KEY_SIZE] = { 0 };
  1502. const struct ecc_curve *curve;
  1503. dma_addr_t dma = 0;
  1504. u8 *ptr;
  1505. if (len != CURVE25519_KEY_SIZE) {
  1506. dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len);
  1507. return -EINVAL;
  1508. }
  1509. ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL);
  1510. if (unlikely(!ptr))
  1511. return -ENOMEM;
  1512. scatterwalk_map_and_copy(ptr, data, 0, len, 0);
  1513. if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) {
  1514. dev_err(dev, "gx is null!\n");
  1515. goto err;
  1516. }
  1517. /*
  1518. * Src_data(gx) is in little-endian order, MSB in the final byte should
  1519. * be masked as described in RFC7748, then transform it to big-endian
  1520. * form, then hisi_hpre can use the data.
  1521. */
  1522. ptr[31] &= 0x7f;
  1523. hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE);
  1524. curve = ecc_get_curve25519();
  1525. fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits);
  1526. /*
  1527. * When src_data equals (2^255 - 19) ~ (2^255 - 1), it is out of p,
  1528. * we get its modulus to p, and then use it.
  1529. */
  1530. if (memcmp(ptr, p, ctx->key_sz) == 0) {
  1531. dev_err(dev, "gx is p!\n");
  1532. goto err;
  1533. } else if (memcmp(ptr, p, ctx->key_sz) > 0) {
  1534. hpre_curve25519_src_modulo_p(ptr);
  1535. }
  1536. hpre_req->src = ptr;
  1537. msg->in = cpu_to_le64(dma);
  1538. return 0;
  1539. err:
  1540. dma_free_coherent(dev, ctx->key_sz, ptr, dma);
  1541. return -EINVAL;
  1542. }
  1543. static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req,
  1544. struct scatterlist *data, unsigned int len)
  1545. {
  1546. struct hpre_sqe *msg = &hpre_req->req;
  1547. struct hpre_ctx *ctx = hpre_req->ctx;
  1548. struct device *dev = ctx->dev;
  1549. dma_addr_t dma;
  1550. if (!data || !sg_is_last(data) || len != ctx->key_sz) {
  1551. dev_err(dev, "data or data length is illegal!\n");
  1552. return -EINVAL;
  1553. }
  1554. hpre_req->dst = NULL;
  1555. dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
  1556. if (unlikely(dma_mapping_error(dev, dma))) {
  1557. dev_err(dev, "dma map data err!\n");
  1558. return -ENOMEM;
  1559. }
  1560. msg->out = cpu_to_le64(dma);
  1561. return 0;
  1562. }
  1563. static int hpre_curve25519_compute_value(struct kpp_request *req)
  1564. {
  1565. struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
  1566. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  1567. struct device *dev = ctx->dev;
  1568. void *tmp = kpp_request_ctx(req);
  1569. struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
  1570. struct hpre_sqe *msg = &hpre_req->req;
  1571. int ret;
  1572. ret = hpre_curve25519_msg_request_set(ctx, req);
  1573. if (unlikely(ret)) {
  1574. dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret);
  1575. return ret;
  1576. }
  1577. if (req->src) {
  1578. ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len);
  1579. if (unlikely(ret)) {
  1580. dev_err(dev, "failed to init src data, ret = %d!\n",
  1581. ret);
  1582. goto clear_all;
  1583. }
  1584. } else {
  1585. msg->in = cpu_to_le64(ctx->curve25519.dma_g);
  1586. }
  1587. ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len);
  1588. if (unlikely(ret)) {
  1589. dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
  1590. goto clear_all;
  1591. }
  1592. msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL);
  1593. ret = hpre_send(ctx, msg);
  1594. if (likely(!ret))
  1595. return -EINPROGRESS;
  1596. clear_all:
  1597. hpre_rm_req_from_ctx(hpre_req);
  1598. hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
  1599. return ret;
  1600. }
  1601. static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm)
  1602. {
  1603. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  1604. return ctx->key_sz;
  1605. }
  1606. static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm)
  1607. {
  1608. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  1609. kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
  1610. return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
  1611. }
  1612. static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
  1613. {
  1614. struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
  1615. hpre_ecc_clear_ctx(ctx, true, false);
  1616. }
  1617. static struct akcipher_alg rsa = {
  1618. .sign = hpre_rsa_dec,
  1619. .verify = hpre_rsa_enc,
  1620. .encrypt = hpre_rsa_enc,
  1621. .decrypt = hpre_rsa_dec,
  1622. .set_pub_key = hpre_rsa_setpubkey,
  1623. .set_priv_key = hpre_rsa_setprivkey,
  1624. .max_size = hpre_rsa_max_size,
  1625. .init = hpre_rsa_init_tfm,
  1626. .exit = hpre_rsa_exit_tfm,
  1627. .base = {
  1628. .cra_ctxsize = sizeof(struct hpre_ctx),
  1629. .cra_priority = HPRE_CRYPTO_ALG_PRI,
  1630. .cra_name = "rsa",
  1631. .cra_driver_name = "hpre-rsa",
  1632. .cra_module = THIS_MODULE,
  1633. },
  1634. };
  1635. static struct kpp_alg dh = {
  1636. .set_secret = hpre_dh_set_secret,
  1637. .generate_public_key = hpre_dh_compute_value,
  1638. .compute_shared_secret = hpre_dh_compute_value,
  1639. .max_size = hpre_dh_max_size,
  1640. .init = hpre_dh_init_tfm,
  1641. .exit = hpre_dh_exit_tfm,
  1642. .base = {
  1643. .cra_ctxsize = sizeof(struct hpre_ctx),
  1644. .cra_priority = HPRE_CRYPTO_ALG_PRI,
  1645. .cra_name = "dh",
  1646. .cra_driver_name = "hpre-dh",
  1647. .cra_module = THIS_MODULE,
  1648. },
  1649. };
  1650. static struct kpp_alg ecdh_curves[] = {
  1651. {
  1652. .set_secret = hpre_ecdh_set_secret,
  1653. .generate_public_key = hpre_ecdh_compute_value,
  1654. .compute_shared_secret = hpre_ecdh_compute_value,
  1655. .max_size = hpre_ecdh_max_size,
  1656. .init = hpre_ecdh_nist_p192_init_tfm,
  1657. .exit = hpre_ecdh_exit_tfm,
  1658. .base = {
  1659. .cra_ctxsize = sizeof(struct hpre_ctx),
  1660. .cra_priority = HPRE_CRYPTO_ALG_PRI,
  1661. .cra_name = "ecdh-nist-p192",
  1662. .cra_driver_name = "hpre-ecdh-nist-p192",
  1663. .cra_module = THIS_MODULE,
  1664. },
  1665. }, {
  1666. .set_secret = hpre_ecdh_set_secret,
  1667. .generate_public_key = hpre_ecdh_compute_value,
  1668. .compute_shared_secret = hpre_ecdh_compute_value,
  1669. .max_size = hpre_ecdh_max_size,
  1670. .init = hpre_ecdh_nist_p256_init_tfm,
  1671. .exit = hpre_ecdh_exit_tfm,
  1672. .base = {
  1673. .cra_ctxsize = sizeof(struct hpre_ctx),
  1674. .cra_priority = HPRE_CRYPTO_ALG_PRI,
  1675. .cra_name = "ecdh-nist-p256",
  1676. .cra_driver_name = "hpre-ecdh-nist-p256",
  1677. .cra_module = THIS_MODULE,
  1678. },
  1679. }, {
  1680. .set_secret = hpre_ecdh_set_secret,
  1681. .generate_public_key = hpre_ecdh_compute_value,
  1682. .compute_shared_secret = hpre_ecdh_compute_value,
  1683. .max_size = hpre_ecdh_max_size,
  1684. .init = hpre_ecdh_nist_p384_init_tfm,
  1685. .exit = hpre_ecdh_exit_tfm,
  1686. .base = {
  1687. .cra_ctxsize = sizeof(struct hpre_ctx),
  1688. .cra_priority = HPRE_CRYPTO_ALG_PRI,
  1689. .cra_name = "ecdh-nist-p384",
  1690. .cra_driver_name = "hpre-ecdh-nist-p384",
  1691. .cra_module = THIS_MODULE,
  1692. },
  1693. }
  1694. };
  1695. static struct kpp_alg curve25519_alg = {
  1696. .set_secret = hpre_curve25519_set_secret,
  1697. .generate_public_key = hpre_curve25519_compute_value,
  1698. .compute_shared_secret = hpre_curve25519_compute_value,
  1699. .max_size = hpre_curve25519_max_size,
  1700. .init = hpre_curve25519_init_tfm,
  1701. .exit = hpre_curve25519_exit_tfm,
  1702. .base = {
  1703. .cra_ctxsize = sizeof(struct hpre_ctx),
  1704. .cra_priority = HPRE_CRYPTO_ALG_PRI,
  1705. .cra_name = "curve25519",
  1706. .cra_driver_name = "hpre-curve25519",
  1707. .cra_module = THIS_MODULE,
  1708. },
  1709. };
  1710. static int hpre_register_rsa(struct hisi_qm *qm)
  1711. {
  1712. int ret;
  1713. if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
  1714. return 0;
  1715. rsa.base.cra_flags = 0;
  1716. ret = crypto_register_akcipher(&rsa);
  1717. if (ret)
  1718. dev_err(&qm->pdev->dev, "failed to register rsa (%d)!\n", ret);
  1719. return ret;
  1720. }
  1721. static void hpre_unregister_rsa(struct hisi_qm *qm)
  1722. {
  1723. if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
  1724. return;
  1725. crypto_unregister_akcipher(&rsa);
  1726. }
  1727. static int hpre_register_dh(struct hisi_qm *qm)
  1728. {
  1729. int ret;
  1730. if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
  1731. return 0;
  1732. ret = crypto_register_kpp(&dh);
  1733. if (ret)
  1734. dev_err(&qm->pdev->dev, "failed to register dh (%d)!\n", ret);
  1735. return ret;
  1736. }
  1737. static void hpre_unregister_dh(struct hisi_qm *qm)
  1738. {
  1739. if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
  1740. return;
  1741. crypto_unregister_kpp(&dh);
  1742. }
  1743. static int hpre_register_ecdh(struct hisi_qm *qm)
  1744. {
  1745. int ret, i;
  1746. if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
  1747. return 0;
  1748. for (i = 0; i < ARRAY_SIZE(ecdh_curves); i++) {
  1749. ret = crypto_register_kpp(&ecdh_curves[i]);
  1750. if (ret) {
  1751. dev_err(&qm->pdev->dev, "failed to register %s (%d)!\n",
  1752. ecdh_curves[i].base.cra_name, ret);
  1753. goto unreg_kpp;
  1754. }
  1755. }
  1756. return 0;
  1757. unreg_kpp:
  1758. for (--i; i >= 0; --i)
  1759. crypto_unregister_kpp(&ecdh_curves[i]);
  1760. return ret;
  1761. }
  1762. static void hpre_unregister_ecdh(struct hisi_qm *qm)
  1763. {
  1764. int i;
  1765. if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
  1766. return;
  1767. for (i = ARRAY_SIZE(ecdh_curves) - 1; i >= 0; --i)
  1768. crypto_unregister_kpp(&ecdh_curves[i]);
  1769. }
  1770. static int hpre_register_x25519(struct hisi_qm *qm)
  1771. {
  1772. int ret;
  1773. if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
  1774. return 0;
  1775. ret = crypto_register_kpp(&curve25519_alg);
  1776. if (ret)
  1777. dev_err(&qm->pdev->dev, "failed to register x25519 (%d)!\n", ret);
  1778. return ret;
  1779. }
  1780. static void hpre_unregister_x25519(struct hisi_qm *qm)
  1781. {
  1782. if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
  1783. return;
  1784. crypto_unregister_kpp(&curve25519_alg);
  1785. }
  1786. int hpre_algs_register(struct hisi_qm *qm)
  1787. {
  1788. int ret = 0;
  1789. mutex_lock(&hpre_algs_lock);
  1790. if (hpre_available_devs) {
  1791. hpre_available_devs++;
  1792. goto unlock;
  1793. }
  1794. ret = hpre_register_rsa(qm);
  1795. if (ret)
  1796. goto unlock;
  1797. ret = hpre_register_dh(qm);
  1798. if (ret)
  1799. goto unreg_rsa;
  1800. ret = hpre_register_ecdh(qm);
  1801. if (ret)
  1802. goto unreg_dh;
  1803. ret = hpre_register_x25519(qm);
  1804. if (ret)
  1805. goto unreg_ecdh;
  1806. hpre_available_devs++;
  1807. mutex_unlock(&hpre_algs_lock);
  1808. return ret;
  1809. unreg_ecdh:
  1810. hpre_unregister_ecdh(qm);
  1811. unreg_dh:
  1812. hpre_unregister_dh(qm);
  1813. unreg_rsa:
  1814. hpre_unregister_rsa(qm);
  1815. unlock:
  1816. mutex_unlock(&hpre_algs_lock);
  1817. return ret;
  1818. }
  1819. void hpre_algs_unregister(struct hisi_qm *qm)
  1820. {
  1821. mutex_lock(&hpre_algs_lock);
  1822. if (--hpre_available_devs)
  1823. goto unlock;
  1824. hpre_unregister_x25519(qm);
  1825. hpre_unregister_ecdh(qm);
  1826. hpre_unregister_dh(qm);
  1827. hpre_unregister_rsa(qm);
  1828. unlock:
  1829. mutex_unlock(&hpre_algs_lock);
  1830. }