| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263 |
- // SPDX-License-Identifier: GPL-2.0
- /* Copyright (c) 2019 HiSilicon Limited. */
- #include <crypto/akcipher.h>
- #include <crypto/curve25519.h>
- #include <crypto/dh.h>
- #include <crypto/ecc_curve.h>
- #include <crypto/ecdh.h>
- #include <crypto/rng.h>
- #include <crypto/internal/akcipher.h>
- #include <crypto/internal/kpp.h>
- #include <crypto/internal/rsa.h>
- #include <crypto/kpp.h>
- #include <crypto/scatterwalk.h>
- #include <linux/dma-mapping.h>
- #include <linux/fips.h>
- #include <linux/module.h>
- #include <linux/time.h>
- #include "hpre.h"
- struct hpre_ctx;
- #define HPRE_CRYPTO_ALG_PRI 1000
- #define HPRE_ALIGN_SZ 64
- #define HPRE_BITS_2_BYTES_SHIFT 3
- #define HPRE_RSA_512BITS_KSZ 64
- #define HPRE_RSA_1536BITS_KSZ 192
- #define HPRE_CRT_PRMS 5
- #define HPRE_CRT_Q 2
- #define HPRE_CRT_P 3
- #define HPRE_CRT_INV 4
- #define HPRE_DH_G_FLAG 0x02
- #define HPRE_TRY_SEND_TIMES 100
- #define HPRE_INVLD_REQ_ID (-1)
- #define HPRE_SQE_ALG_BITS 5
- #define HPRE_SQE_DONE_SHIFT 30
- #define HPRE_DH_MAX_P_SZ 512
- #define HPRE_DFX_SEC_TO_US 1000000
- #define HPRE_DFX_US_TO_NS 1000
- /* due to nist p521 */
- #define HPRE_ECC_MAX_KSZ 66
- /* size in bytes of the n prime */
- #define HPRE_ECC_NIST_P192_N_SIZE 24
- #define HPRE_ECC_NIST_P256_N_SIZE 32
- #define HPRE_ECC_NIST_P384_N_SIZE 48
- /* size in bytes */
- #define HPRE_ECC_HW256_KSZ_B 32
- #define HPRE_ECC_HW384_KSZ_B 48
- /* capability register mask of driver */
- #define HPRE_DRV_RSA_MASK_CAP BIT(0)
- #define HPRE_DRV_DH_MASK_CAP BIT(1)
- #define HPRE_DRV_ECDH_MASK_CAP BIT(2)
- #define HPRE_DRV_X25519_MASK_CAP BIT(5)
- static DEFINE_MUTEX(hpre_algs_lock);
- static unsigned int hpre_available_devs;
- typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe);
- struct hpre_rsa_ctx {
- /* low address: e--->n */
- char *pubkey;
- dma_addr_t dma_pubkey;
- /* low address: d--->n */
- char *prikey;
- dma_addr_t dma_prikey;
- /* low address: dq->dp->q->p->qinv */
- char *crt_prikey;
- dma_addr_t dma_crt_prikey;
- struct crypto_akcipher *soft_tfm;
- };
- struct hpre_dh_ctx {
- /*
- * If base is g we compute the public key
- * ya = g^xa mod p; [RFC2631 sec 2.1.1]
- * else if base if the counterpart public key we
- * compute the shared secret
- * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1]
- * low address: d--->n, please refer to Hisilicon HPRE UM
- */
- char *xa_p;
- dma_addr_t dma_xa_p;
- char *g; /* m */
- dma_addr_t dma_g;
- };
- struct hpre_ecdh_ctx {
- /* low address: p->a->k->b */
- unsigned char *p;
- dma_addr_t dma_p;
- /* low address: x->y */
- unsigned char *g;
- dma_addr_t dma_g;
- };
- struct hpre_curve25519_ctx {
- /* low address: p->a->k */
- unsigned char *p;
- dma_addr_t dma_p;
- /* gx coordinate */
- unsigned char *g;
- dma_addr_t dma_g;
- };
- struct hpre_ctx {
- struct hisi_qp *qp;
- struct device *dev;
- struct hpre_asym_request **req_list;
- struct hpre *hpre;
- spinlock_t req_lock;
- unsigned int key_sz;
- bool crt_g2_mode;
- struct idr req_idr;
- union {
- struct hpre_rsa_ctx rsa;
- struct hpre_dh_ctx dh;
- struct hpre_ecdh_ctx ecdh;
- struct hpre_curve25519_ctx curve25519;
- };
- /* for ecc algorithms */
- unsigned int curve_id;
- };
- struct hpre_asym_request {
- char *src;
- char *dst;
- struct hpre_sqe req;
- struct hpre_ctx *ctx;
- union {
- struct akcipher_request *rsa;
- struct kpp_request *dh;
- struct kpp_request *ecdh;
- struct kpp_request *curve25519;
- } areq;
- int err;
- int req_id;
- hpre_cb cb;
- struct timespec64 req_time;
- };
- static inline unsigned int hpre_align_sz(void)
- {
- return ((crypto_dma_align() - 1) | (HPRE_ALIGN_SZ - 1)) + 1;
- }
- static inline unsigned int hpre_align_pd(void)
- {
- return (hpre_align_sz() - 1) & ~(crypto_tfm_ctx_alignment() - 1);
- }
- static int hpre_alloc_req_id(struct hpre_ctx *ctx)
- {
- unsigned long flags;
- int id;
- spin_lock_irqsave(&ctx->req_lock, flags);
- id = idr_alloc(&ctx->req_idr, NULL, 0, ctx->qp->sq_depth, GFP_ATOMIC);
- spin_unlock_irqrestore(&ctx->req_lock, flags);
- return id;
- }
- static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id)
- {
- unsigned long flags;
- spin_lock_irqsave(&ctx->req_lock, flags);
- idr_remove(&ctx->req_idr, req_id);
- spin_unlock_irqrestore(&ctx->req_lock, flags);
- }
- static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req)
- {
- struct hpre_ctx *ctx;
- struct hpre_dfx *dfx;
- int id;
- ctx = hpre_req->ctx;
- id = hpre_alloc_req_id(ctx);
- if (unlikely(id < 0))
- return -EINVAL;
- ctx->req_list[id] = hpre_req;
- hpre_req->req_id = id;
- dfx = ctx->hpre->debug.dfx;
- if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value))
- ktime_get_ts64(&hpre_req->req_time);
- return id;
- }
- static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req)
- {
- struct hpre_ctx *ctx = hpre_req->ctx;
- int id = hpre_req->req_id;
- if (hpre_req->req_id >= 0) {
- hpre_req->req_id = HPRE_INVLD_REQ_ID;
- ctx->req_list[id] = NULL;
- hpre_free_req_id(ctx, id);
- }
- }
- static struct hisi_qp *hpre_get_qp_and_start(u8 type)
- {
- struct hisi_qp *qp;
- int ret;
- qp = hpre_create_qp(type);
- if (!qp) {
- pr_err("Can not create hpre qp!\n");
- return ERR_PTR(-ENODEV);
- }
- ret = hisi_qm_start_qp(qp, 0);
- if (ret < 0) {
- hisi_qm_free_qps(&qp, 1);
- pci_err(qp->qm->pdev, "Can not start qp!\n");
- return ERR_PTR(-EINVAL);
- }
- return qp;
- }
- static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len,
- int is_src, dma_addr_t *tmp)
- {
- struct device *dev = hpre_req->ctx->dev;
- enum dma_data_direction dma_dir;
- if (is_src) {
- hpre_req->src = NULL;
- dma_dir = DMA_TO_DEVICE;
- } else {
- hpre_req->dst = NULL;
- dma_dir = DMA_FROM_DEVICE;
- }
- *tmp = dma_map_single(dev, sg_virt(data), len, dma_dir);
- if (unlikely(dma_mapping_error(dev, *tmp))) {
- dev_err(dev, "dma map data err!\n");
- return -ENOMEM;
- }
- return 0;
- }
- static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len,
- int is_src, dma_addr_t *tmp)
- {
- struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = ctx->dev;
- void *ptr;
- int shift;
- shift = ctx->key_sz - len;
- if (unlikely(shift < 0))
- return -EINVAL;
- ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_ATOMIC);
- if (unlikely(!ptr))
- return -ENOMEM;
- if (is_src) {
- scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0);
- hpre_req->src = ptr;
- } else {
- hpre_req->dst = ptr;
- }
- return 0;
- }
- static int hpre_hw_data_init(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len,
- int is_src, int is_dh)
- {
- struct hpre_sqe *msg = &hpre_req->req;
- struct hpre_ctx *ctx = hpre_req->ctx;
- dma_addr_t tmp = 0;
- int ret;
- /* when the data is dh's source, we should format it */
- if ((sg_is_last(data) && len == ctx->key_sz) &&
- ((is_dh && !is_src) || !is_dh))
- ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp);
- else
- ret = hpre_prepare_dma_buf(hpre_req, data, len, is_src, &tmp);
- if (unlikely(ret))
- return ret;
- if (is_src)
- msg->in = cpu_to_le64(tmp);
- else
- msg->out = cpu_to_le64(tmp);
- return 0;
- }
- static void hpre_hw_data_clr_all(struct hpre_ctx *ctx,
- struct hpre_asym_request *req,
- struct scatterlist *dst,
- struct scatterlist *src)
- {
- struct device *dev = ctx->dev;
- struct hpre_sqe *sqe = &req->req;
- dma_addr_t tmp;
- tmp = le64_to_cpu(sqe->in);
- if (unlikely(dma_mapping_error(dev, tmp)))
- return;
- if (src) {
- if (req->src)
- dma_free_coherent(dev, ctx->key_sz, req->src, tmp);
- else
- dma_unmap_single(dev, tmp, ctx->key_sz, DMA_TO_DEVICE);
- }
- tmp = le64_to_cpu(sqe->out);
- if (unlikely(dma_mapping_error(dev, tmp)))
- return;
- if (req->dst) {
- if (dst)
- scatterwalk_map_and_copy(req->dst, dst, 0,
- ctx->key_sz, 1);
- dma_free_coherent(dev, ctx->key_sz, req->dst, tmp);
- } else {
- dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE);
- }
- }
- static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe,
- void **kreq)
- {
- struct hpre_asym_request *req;
- unsigned int err, done, alg;
- int id;
- #define HPRE_NO_HW_ERR 0
- #define HPRE_HW_TASK_DONE 3
- #define HREE_HW_ERR_MASK GENMASK(10, 0)
- #define HREE_SQE_DONE_MASK GENMASK(1, 0)
- #define HREE_ALG_TYPE_MASK GENMASK(4, 0)
- id = (int)le16_to_cpu(sqe->tag);
- req = ctx->req_list[id];
- hpre_rm_req_from_ctx(req);
- *kreq = req;
- err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) &
- HREE_HW_ERR_MASK;
- done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) &
- HREE_SQE_DONE_MASK;
- if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE))
- return 0;
- alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK;
- dev_err_ratelimited(ctx->dev, "alg[0x%x] error: done[0x%x], etype[0x%x]\n",
- alg, done, err);
- return -EINVAL;
- }
- static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen)
- {
- struct hpre *hpre;
- if (!ctx || !qp || qlen < 0)
- return -EINVAL;
- spin_lock_init(&ctx->req_lock);
- ctx->qp = qp;
- ctx->dev = &qp->qm->pdev->dev;
- hpre = container_of(ctx->qp->qm, struct hpre, qm);
- ctx->hpre = hpre;
- ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL);
- if (!ctx->req_list)
- return -ENOMEM;
- ctx->key_sz = 0;
- ctx->crt_g2_mode = false;
- idr_init(&ctx->req_idr);
- return 0;
- }
- static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all)
- {
- if (is_clear_all) {
- idr_destroy(&ctx->req_idr);
- kfree(ctx->req_list);
- hisi_qm_free_qps(&ctx->qp, 1);
- }
- ctx->crt_g2_mode = false;
- ctx->key_sz = 0;
- }
- static bool hpre_is_bd_timeout(struct hpre_asym_request *req,
- u64 overtime_thrhld)
- {
- struct timespec64 reply_time;
- u64 time_use_us;
- ktime_get_ts64(&reply_time);
- time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) *
- HPRE_DFX_SEC_TO_US +
- (reply_time.tv_nsec - req->req_time.tv_nsec) /
- HPRE_DFX_US_TO_NS;
- if (time_use_us <= overtime_thrhld)
- return false;
- return true;
- }
- static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp)
- {
- struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
- struct hpre_asym_request *req;
- struct kpp_request *areq;
- u64 overtime_thrhld;
- int ret;
- ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
- areq = req->areq.dh;
- areq->dst_len = ctx->key_sz;
- overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
- if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
- atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
- hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
- kpp_request_complete(areq, ret);
- atomic64_inc(&dfx[HPRE_RECV_CNT].value);
- }
- static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp)
- {
- struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
- struct hpre_asym_request *req;
- struct akcipher_request *areq;
- u64 overtime_thrhld;
- int ret;
- ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
- overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
- if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
- atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
- areq = req->areq.rsa;
- areq->dst_len = ctx->key_sz;
- hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src);
- akcipher_request_complete(areq, ret);
- atomic64_inc(&dfx[HPRE_RECV_CNT].value);
- }
- static void hpre_alg_cb(struct hisi_qp *qp, void *resp)
- {
- struct hpre_ctx *ctx = qp->qp_ctx;
- struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
- struct hpre_sqe *sqe = resp;
- struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)];
- if (unlikely(!req)) {
- atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value);
- return;
- }
- req->cb(ctx, resp);
- }
- static void hpre_stop_qp_and_put(struct hisi_qp *qp)
- {
- hisi_qm_stop_qp(qp);
- hisi_qm_free_qps(&qp, 1);
- }
- static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type)
- {
- struct hisi_qp *qp;
- int ret;
- qp = hpre_get_qp_and_start(type);
- if (IS_ERR(qp))
- return PTR_ERR(qp);
- qp->qp_ctx = ctx;
- qp->req_cb = hpre_alg_cb;
- ret = hpre_ctx_set(ctx, qp, qp->sq_depth);
- if (ret)
- hpre_stop_qp_and_put(qp);
- return ret;
- }
- static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa)
- {
- struct hpre_asym_request *h_req;
- struct hpre_sqe *msg;
- int req_id;
- void *tmp;
- if (is_rsa) {
- struct akcipher_request *akreq = req;
- if (akreq->dst_len < ctx->key_sz) {
- akreq->dst_len = ctx->key_sz;
- return -EOVERFLOW;
- }
- tmp = akcipher_request_ctx(akreq);
- h_req = PTR_ALIGN(tmp, hpre_align_sz());
- h_req->cb = hpre_rsa_cb;
- h_req->areq.rsa = akreq;
- msg = &h_req->req;
- memset(msg, 0, sizeof(*msg));
- } else {
- struct kpp_request *kreq = req;
- if (kreq->dst_len < ctx->key_sz) {
- kreq->dst_len = ctx->key_sz;
- return -EOVERFLOW;
- }
- tmp = kpp_request_ctx(kreq);
- h_req = PTR_ALIGN(tmp, hpre_align_sz());
- h_req->cb = hpre_dh_cb;
- h_req->areq.dh = kreq;
- msg = &h_req->req;
- memset(msg, 0, sizeof(*msg));
- msg->key = cpu_to_le64(ctx->dh.dma_xa_p);
- }
- msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
- msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
- msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT);
- msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
- h_req->ctx = ctx;
- req_id = hpre_add_req_to_ctx(h_req);
- if (req_id < 0)
- return -EBUSY;
- msg->tag = cpu_to_le16((u16)req_id);
- return 0;
- }
- static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg)
- {
- struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
- int ctr = 0;
- int ret;
- do {
- atomic64_inc(&dfx[HPRE_SEND_CNT].value);
- spin_lock_bh(&ctx->req_lock);
- ret = hisi_qp_send(ctx->qp, msg);
- spin_unlock_bh(&ctx->req_lock);
- if (ret != -EBUSY)
- break;
- atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value);
- } while (ctr++ < HPRE_TRY_SEND_TIMES);
- if (likely(!ret))
- return ret;
- if (ret != -EBUSY)
- atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value);
- return ret;
- }
- static int hpre_dh_compute_value(struct kpp_request *req)
- {
- struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- void *tmp = kpp_request_ctx(req);
- struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
- struct hpre_sqe *msg = &hpre_req->req;
- int ret;
- ret = hpre_msg_request_set(ctx, req, false);
- if (unlikely(ret))
- return ret;
- if (req->src) {
- ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1);
- if (unlikely(ret))
- goto clear_all;
- } else {
- msg->in = cpu_to_le64(ctx->dh.dma_g);
- }
- ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1);
- if (unlikely(ret))
- goto clear_all;
- if (ctx->crt_g2_mode && !req->src)
- msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2);
- else
- msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH);
- /* success */
- ret = hpre_send(ctx, msg);
- if (likely(!ret))
- return -EINPROGRESS;
- clear_all:
- hpre_rm_req_from_ctx(hpre_req);
- hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
- return ret;
- }
- static int hpre_is_dh_params_length_valid(unsigned int key_sz)
- {
- #define _HPRE_DH_GRP1 768
- #define _HPRE_DH_GRP2 1024
- #define _HPRE_DH_GRP5 1536
- #define _HPRE_DH_GRP14 2048
- #define _HPRE_DH_GRP15 3072
- #define _HPRE_DH_GRP16 4096
- switch (key_sz) {
- case _HPRE_DH_GRP1:
- case _HPRE_DH_GRP2:
- case _HPRE_DH_GRP5:
- case _HPRE_DH_GRP14:
- case _HPRE_DH_GRP15:
- case _HPRE_DH_GRP16:
- return 0;
- default:
- return -EINVAL;
- }
- }
- static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params)
- {
- struct device *dev = ctx->dev;
- unsigned int sz;
- if (params->p_size > HPRE_DH_MAX_P_SZ)
- return -EINVAL;
- if (hpre_is_dh_params_length_valid(params->p_size <<
- HPRE_BITS_2_BYTES_SHIFT))
- return -EINVAL;
- sz = ctx->key_sz = params->p_size;
- ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1,
- &ctx->dh.dma_xa_p, GFP_KERNEL);
- if (!ctx->dh.xa_p)
- return -ENOMEM;
- memcpy(ctx->dh.xa_p + sz, params->p, sz);
- /* If g equals 2 don't copy it */
- if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) {
- ctx->crt_g2_mode = true;
- return 0;
- }
- ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL);
- if (!ctx->dh.g) {
- dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
- ctx->dh.dma_xa_p);
- ctx->dh.xa_p = NULL;
- return -ENOMEM;
- }
- memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size);
- return 0;
- }
- static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
- {
- struct device *dev = ctx->dev;
- unsigned int sz = ctx->key_sz;
- if (is_clear_all)
- hisi_qm_stop_qp(ctx->qp);
- if (ctx->dh.g) {
- dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g);
- ctx->dh.g = NULL;
- }
- if (ctx->dh.xa_p) {
- memzero_explicit(ctx->dh.xa_p, sz);
- dma_free_coherent(dev, sz << 1, ctx->dh.xa_p,
- ctx->dh.dma_xa_p);
- ctx->dh.xa_p = NULL;
- }
- hpre_ctx_clear(ctx, is_clear_all);
- }
- static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
- unsigned int len)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct dh params;
- int ret;
- if (crypto_dh_decode_key(buf, len, ¶ms) < 0)
- return -EINVAL;
- /* Free old secret if any */
- hpre_dh_clear_ctx(ctx, false);
- ret = hpre_dh_set_params(ctx, ¶ms);
- if (ret < 0)
- goto err_clear_ctx;
- memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key,
- params.key_size);
- return 0;
- err_clear_ctx:
- hpre_dh_clear_ctx(ctx, false);
- return ret;
- }
- static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- return ctx->key_sz;
- }
- static int hpre_dh_init_tfm(struct crypto_kpp *tfm)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
- return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
- }
- static void hpre_dh_exit_tfm(struct crypto_kpp *tfm)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- hpre_dh_clear_ctx(ctx, true);
- }
- static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len)
- {
- while (!**ptr && *len) {
- (*ptr)++;
- (*len)--;
- }
- }
- static bool hpre_rsa_key_size_is_support(unsigned int len)
- {
- unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT;
- #define _RSA_1024BITS_KEY_WDTH 1024
- #define _RSA_2048BITS_KEY_WDTH 2048
- #define _RSA_3072BITS_KEY_WDTH 3072
- #define _RSA_4096BITS_KEY_WDTH 4096
- switch (bits) {
- case _RSA_1024BITS_KEY_WDTH:
- case _RSA_2048BITS_KEY_WDTH:
- case _RSA_3072BITS_KEY_WDTH:
- case _RSA_4096BITS_KEY_WDTH:
- return true;
- default:
- return false;
- }
- }
- static int hpre_rsa_enc(struct akcipher_request *req)
- {
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
- void *tmp = akcipher_request_ctx(req);
- struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
- struct hpre_sqe *msg = &hpre_req->req;
- int ret;
- /* For 512 and 1536 bits key size, use soft tfm instead */
- if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
- ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
- akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
- ret = crypto_akcipher_encrypt(req);
- akcipher_request_set_tfm(req, tfm);
- return ret;
- }
- if (unlikely(!ctx->rsa.pubkey))
- return -EINVAL;
- ret = hpre_msg_request_set(ctx, req, true);
- if (unlikely(ret))
- return ret;
- msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT);
- msg->key = cpu_to_le64(ctx->rsa.dma_pubkey);
- ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
- if (unlikely(ret))
- goto clear_all;
- ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
- if (unlikely(ret))
- goto clear_all;
- /* success */
- ret = hpre_send(ctx, msg);
- if (likely(!ret))
- return -EINPROGRESS;
- clear_all:
- hpre_rm_req_from_ctx(hpre_req);
- hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
- return ret;
- }
- static int hpre_rsa_dec(struct akcipher_request *req)
- {
- struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
- struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
- void *tmp = akcipher_request_ctx(req);
- struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
- struct hpre_sqe *msg = &hpre_req->req;
- int ret;
- /* For 512 and 1536 bits key size, use soft tfm instead */
- if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
- ctx->key_sz == HPRE_RSA_1536BITS_KSZ) {
- akcipher_request_set_tfm(req, ctx->rsa.soft_tfm);
- ret = crypto_akcipher_decrypt(req);
- akcipher_request_set_tfm(req, tfm);
- return ret;
- }
- if (unlikely(!ctx->rsa.prikey))
- return -EINVAL;
- ret = hpre_msg_request_set(ctx, req, true);
- if (unlikely(ret))
- return ret;
- if (ctx->crt_g2_mode) {
- msg->key = cpu_to_le64(ctx->rsa.dma_crt_prikey);
- msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
- HPRE_ALG_NC_CRT);
- } else {
- msg->key = cpu_to_le64(ctx->rsa.dma_prikey);
- msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) |
- HPRE_ALG_NC_NCRT);
- }
- ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0);
- if (unlikely(ret))
- goto clear_all;
- ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0);
- if (unlikely(ret))
- goto clear_all;
- /* success */
- ret = hpre_send(ctx, msg);
- if (likely(!ret))
- return -EINPROGRESS;
- clear_all:
- hpre_rm_req_from_ctx(hpre_req);
- hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
- return ret;
- }
- static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value,
- size_t vlen, bool private)
- {
- const char *ptr = value;
- hpre_rsa_drop_leading_zeros(&ptr, &vlen);
- ctx->key_sz = vlen;
- /* if invalid key size provided, we use software tfm */
- if (!hpre_rsa_key_size_is_support(ctx->key_sz))
- return 0;
- ctx->rsa.pubkey = dma_alloc_coherent(ctx->dev, vlen << 1,
- &ctx->rsa.dma_pubkey,
- GFP_KERNEL);
- if (!ctx->rsa.pubkey)
- return -ENOMEM;
- if (private) {
- ctx->rsa.prikey = dma_alloc_coherent(ctx->dev, vlen << 1,
- &ctx->rsa.dma_prikey,
- GFP_KERNEL);
- if (!ctx->rsa.prikey) {
- dma_free_coherent(ctx->dev, vlen << 1,
- ctx->rsa.pubkey,
- ctx->rsa.dma_pubkey);
- ctx->rsa.pubkey = NULL;
- return -ENOMEM;
- }
- memcpy(ctx->rsa.prikey + vlen, ptr, vlen);
- }
- memcpy(ctx->rsa.pubkey + vlen, ptr, vlen);
- /* Using hardware HPRE to do RSA */
- return 1;
- }
- static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value,
- size_t vlen)
- {
- const char *ptr = value;
- hpre_rsa_drop_leading_zeros(&ptr, &vlen);
- if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
- return -EINVAL;
- memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen);
- return 0;
- }
- static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value,
- size_t vlen)
- {
- const char *ptr = value;
- hpre_rsa_drop_leading_zeros(&ptr, &vlen);
- if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
- return -EINVAL;
- memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen);
- return 0;
- }
- static int hpre_crt_para_get(char *para, size_t para_sz,
- const char *raw, size_t raw_sz)
- {
- const char *ptr = raw;
- size_t len = raw_sz;
- hpre_rsa_drop_leading_zeros(&ptr, &len);
- if (!len || len > para_sz)
- return -EINVAL;
- memcpy(para + para_sz - len, ptr, len);
- return 0;
- }
- static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key)
- {
- unsigned int hlf_ksz = ctx->key_sz >> 1;
- struct device *dev = ctx->dev;
- u64 offset;
- int ret;
- ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS,
- &ctx->rsa.dma_crt_prikey,
- GFP_KERNEL);
- if (!ctx->rsa.crt_prikey)
- return -ENOMEM;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz,
- rsa_key->dq, rsa_key->dq_sz);
- if (ret)
- goto free_key;
- offset = hlf_ksz;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
- rsa_key->dp, rsa_key->dp_sz);
- if (ret)
- goto free_key;
- offset = hlf_ksz * HPRE_CRT_Q;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
- rsa_key->q, rsa_key->q_sz);
- if (ret)
- goto free_key;
- offset = hlf_ksz * HPRE_CRT_P;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
- rsa_key->p, rsa_key->p_sz);
- if (ret)
- goto free_key;
- offset = hlf_ksz * HPRE_CRT_INV;
- ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz,
- rsa_key->qinv, rsa_key->qinv_sz);
- if (ret)
- goto free_key;
- ctx->crt_g2_mode = true;
- return 0;
- free_key:
- offset = hlf_ksz * HPRE_CRT_PRMS;
- memzero_explicit(ctx->rsa.crt_prikey, offset);
- dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey,
- ctx->rsa.dma_crt_prikey);
- ctx->rsa.crt_prikey = NULL;
- ctx->crt_g2_mode = false;
- return ret;
- }
- /* If it is clear all, all the resources of the QP will be cleaned. */
- static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all)
- {
- unsigned int half_key_sz = ctx->key_sz >> 1;
- struct device *dev = ctx->dev;
- if (is_clear_all)
- hisi_qm_stop_qp(ctx->qp);
- if (ctx->rsa.pubkey) {
- dma_free_coherent(dev, ctx->key_sz << 1,
- ctx->rsa.pubkey, ctx->rsa.dma_pubkey);
- ctx->rsa.pubkey = NULL;
- }
- if (ctx->rsa.crt_prikey) {
- memzero_explicit(ctx->rsa.crt_prikey,
- half_key_sz * HPRE_CRT_PRMS);
- dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS,
- ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey);
- ctx->rsa.crt_prikey = NULL;
- }
- if (ctx->rsa.prikey) {
- memzero_explicit(ctx->rsa.prikey, ctx->key_sz);
- dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey,
- ctx->rsa.dma_prikey);
- ctx->rsa.prikey = NULL;
- }
- hpre_ctx_clear(ctx, is_clear_all);
- }
- /*
- * we should judge if it is CRT or not,
- * CRT: return true, N-CRT: return false .
- */
- static bool hpre_is_crt_key(struct rsa_key *key)
- {
- u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz +
- key->qinv_sz;
- #define LEN_OF_NCRT_PARA 5
- /* N-CRT less than 5 parameters */
- return len > LEN_OF_NCRT_PARA;
- }
- static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key,
- unsigned int keylen, bool private)
- {
- struct rsa_key rsa_key;
- int ret;
- hpre_rsa_clear_ctx(ctx, false);
- if (private)
- ret = rsa_parse_priv_key(&rsa_key, key, keylen);
- else
- ret = rsa_parse_pub_key(&rsa_key, key, keylen);
- if (ret < 0)
- return ret;
- ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private);
- if (ret <= 0)
- return ret;
- if (private) {
- ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
- if (ret < 0)
- goto free;
- if (hpre_is_crt_key(&rsa_key)) {
- ret = hpre_rsa_setkey_crt(ctx, &rsa_key);
- if (ret < 0)
- goto free;
- }
- }
- ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
- if (ret < 0)
- goto free;
- if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) {
- ret = -EINVAL;
- goto free;
- }
- return 0;
- free:
- hpre_rsa_clear_ctx(ctx, false);
- return ret;
- }
- static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
- unsigned int keylen)
- {
- struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
- int ret;
- ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen);
- if (ret)
- return ret;
- return hpre_rsa_setkey(ctx, key, keylen, false);
- }
- static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
- unsigned int keylen)
- {
- struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
- int ret;
- ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen);
- if (ret)
- return ret;
- return hpre_rsa_setkey(ctx, key, keylen, true);
- }
- static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm)
- {
- struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
- /* For 512 and 1536 bits key size, use soft tfm instead */
- if (ctx->key_sz == HPRE_RSA_512BITS_KSZ ||
- ctx->key_sz == HPRE_RSA_1536BITS_KSZ)
- return crypto_akcipher_maxsize(ctx->rsa.soft_tfm);
- return ctx->key_sz;
- }
- static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm)
- {
- struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
- int ret;
- ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0);
- if (IS_ERR(ctx->rsa.soft_tfm)) {
- pr_err("Can not alloc_akcipher!\n");
- return PTR_ERR(ctx->rsa.soft_tfm);
- }
- akcipher_set_reqsize(tfm, sizeof(struct hpre_asym_request) +
- hpre_align_pd());
- ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE);
- if (ret)
- crypto_free_akcipher(ctx->rsa.soft_tfm);
- return ret;
- }
- static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm)
- {
- struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm);
- hpre_rsa_clear_ctx(ctx, true);
- crypto_free_akcipher(ctx->rsa.soft_tfm);
- }
- static void hpre_key_to_big_end(u8 *data, int len)
- {
- int i, j;
- for (i = 0; i < len / 2; i++) {
- j = len - i - 1;
- swap(data[j], data[i]);
- }
- }
- static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all,
- bool is_ecdh)
- {
- struct device *dev = ctx->dev;
- unsigned int sz = ctx->key_sz;
- unsigned int shift = sz << 1;
- if (is_clear_all)
- hisi_qm_stop_qp(ctx->qp);
- if (is_ecdh && ctx->ecdh.p) {
- /* ecdh: p->a->k->b */
- memzero_explicit(ctx->ecdh.p + shift, sz);
- dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
- ctx->ecdh.p = NULL;
- } else if (!is_ecdh && ctx->curve25519.p) {
- /* curve25519: p->a->k */
- memzero_explicit(ctx->curve25519.p + shift, sz);
- dma_free_coherent(dev, sz << 2, ctx->curve25519.p,
- ctx->curve25519.dma_p);
- ctx->curve25519.p = NULL;
- }
- hpre_ctx_clear(ctx, is_clear_all);
- }
- /*
- * The bits of 192/224/256/384/521 are supported by HPRE,
- * and convert the bits like:
- * bits<=256, bits=256; 256<bits<=384, bits=384; 384<bits<=576, bits=576;
- * If the parameter bit width is insufficient, then we fill in the
- * high-order zeros by soft, so TASK_LENGTH1 is 0x3/0x5/0x8;
- */
- static unsigned int hpre_ecdh_supported_curve(unsigned short id)
- {
- switch (id) {
- case ECC_CURVE_NIST_P192:
- case ECC_CURVE_NIST_P256:
- return HPRE_ECC_HW256_KSZ_B;
- case ECC_CURVE_NIST_P384:
- return HPRE_ECC_HW384_KSZ_B;
- default:
- break;
- }
- return 0;
- }
- static void fill_curve_param(void *addr, u64 *param, unsigned int cur_sz, u8 ndigits)
- {
- unsigned int sz = cur_sz - (ndigits - 1) * sizeof(u64);
- u8 i = 0;
- while (i < ndigits - 1) {
- memcpy(addr + sizeof(u64) * i, ¶m[i], sizeof(u64));
- i++;
- }
- memcpy(addr + sizeof(u64) * i, ¶m[ndigits - 1], sz);
- hpre_key_to_big_end((u8 *)addr, cur_sz);
- }
- static int hpre_ecdh_fill_curve(struct hpre_ctx *ctx, struct ecdh *params,
- unsigned int cur_sz)
- {
- unsigned int shifta = ctx->key_sz << 1;
- unsigned int shiftb = ctx->key_sz << 2;
- void *p = ctx->ecdh.p + ctx->key_sz - cur_sz;
- void *a = ctx->ecdh.p + shifta - cur_sz;
- void *b = ctx->ecdh.p + shiftb - cur_sz;
- void *x = ctx->ecdh.g + ctx->key_sz - cur_sz;
- void *y = ctx->ecdh.g + shifta - cur_sz;
- const struct ecc_curve *curve = ecc_get_curve(ctx->curve_id);
- char *n;
- if (unlikely(!curve))
- return -EINVAL;
- n = kzalloc(ctx->key_sz, GFP_KERNEL);
- if (!n)
- return -ENOMEM;
- fill_curve_param(p, curve->p, cur_sz, curve->g.ndigits);
- fill_curve_param(a, curve->a, cur_sz, curve->g.ndigits);
- fill_curve_param(b, curve->b, cur_sz, curve->g.ndigits);
- fill_curve_param(x, curve->g.x, cur_sz, curve->g.ndigits);
- fill_curve_param(y, curve->g.y, cur_sz, curve->g.ndigits);
- fill_curve_param(n, curve->n, cur_sz, curve->g.ndigits);
- if (params->key_size == cur_sz && memcmp(params->key, n, cur_sz) >= 0) {
- kfree(n);
- return -EINVAL;
- }
- kfree(n);
- return 0;
- }
- static unsigned int hpre_ecdh_get_curvesz(unsigned short id)
- {
- switch (id) {
- case ECC_CURVE_NIST_P192:
- return HPRE_ECC_NIST_P192_N_SIZE;
- case ECC_CURVE_NIST_P256:
- return HPRE_ECC_NIST_P256_N_SIZE;
- case ECC_CURVE_NIST_P384:
- return HPRE_ECC_NIST_P384_N_SIZE;
- default:
- break;
- }
- return 0;
- }
- static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params)
- {
- struct device *dev = ctx->dev;
- unsigned int sz, shift, curve_sz;
- int ret;
- ctx->key_sz = hpre_ecdh_supported_curve(ctx->curve_id);
- if (!ctx->key_sz)
- return -EINVAL;
- curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
- if (!curve_sz || params->key_size > curve_sz)
- return -EINVAL;
- sz = ctx->key_sz;
- if (!ctx->ecdh.p) {
- ctx->ecdh.p = dma_alloc_coherent(dev, sz << 3, &ctx->ecdh.dma_p,
- GFP_KERNEL);
- if (!ctx->ecdh.p)
- return -ENOMEM;
- }
- shift = sz << 2;
- ctx->ecdh.g = ctx->ecdh.p + shift;
- ctx->ecdh.dma_g = ctx->ecdh.dma_p + shift;
- ret = hpre_ecdh_fill_curve(ctx, params, curve_sz);
- if (ret) {
- dev_err(dev, "failed to fill curve_param, ret = %d!\n", ret);
- dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p);
- ctx->ecdh.p = NULL;
- return ret;
- }
- return 0;
- }
- static bool hpre_key_is_zero(char *key, unsigned short key_sz)
- {
- int i;
- for (i = 0; i < key_sz; i++)
- if (key[i])
- return false;
- return true;
- }
- static int ecdh_gen_privkey(struct hpre_ctx *ctx, struct ecdh *params)
- {
- struct device *dev = ctx->dev;
- int ret;
- ret = crypto_get_default_rng();
- if (ret) {
- dev_err(dev, "failed to get default rng, ret = %d!\n", ret);
- return ret;
- }
- ret = crypto_rng_get_bytes(crypto_default_rng, (u8 *)params->key,
- params->key_size);
- crypto_put_default_rng();
- if (ret)
- dev_err(dev, "failed to get rng, ret = %d!\n", ret);
- return ret;
- }
- static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
- unsigned int len)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- unsigned int sz, sz_shift, curve_sz;
- struct device *dev = ctx->dev;
- char key[HPRE_ECC_MAX_KSZ];
- struct ecdh params;
- int ret;
- if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0) {
- dev_err(dev, "failed to decode ecdh key!\n");
- return -EINVAL;
- }
- /* Use stdrng to generate private key */
- if (!params.key || !params.key_size) {
- params.key = key;
- curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
- if (!curve_sz) {
- dev_err(dev, "Invalid curve size!\n");
- return -EINVAL;
- }
- params.key_size = curve_sz - 1;
- ret = ecdh_gen_privkey(ctx, ¶ms);
- if (ret)
- return ret;
- }
- if (hpre_key_is_zero(params.key, params.key_size)) {
- dev_err(dev, "Invalid hpre key!\n");
- return -EINVAL;
- }
- hpre_ecc_clear_ctx(ctx, false, true);
- ret = hpre_ecdh_set_param(ctx, ¶ms);
- if (ret < 0) {
- dev_err(dev, "failed to set hpre param, ret = %d!\n", ret);
- return ret;
- }
- sz = ctx->key_sz;
- sz_shift = (sz << 1) + sz - params.key_size;
- memcpy(ctx->ecdh.p + sz_shift, params.key, params.key_size);
- return 0;
- }
- static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx,
- struct hpre_asym_request *req,
- struct scatterlist *dst,
- struct scatterlist *src)
- {
- struct device *dev = ctx->dev;
- struct hpre_sqe *sqe = &req->req;
- dma_addr_t dma;
- dma = le64_to_cpu(sqe->in);
- if (unlikely(dma_mapping_error(dev, dma)))
- return;
- if (src && req->src)
- dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma);
- dma = le64_to_cpu(sqe->out);
- if (unlikely(dma_mapping_error(dev, dma)))
- return;
- if (req->dst)
- dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma);
- if (dst)
- dma_unmap_single(dev, dma, ctx->key_sz << 1, DMA_FROM_DEVICE);
- }
- static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp)
- {
- unsigned int curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id);
- struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
- struct hpre_asym_request *req = NULL;
- struct kpp_request *areq;
- u64 overtime_thrhld;
- char *p;
- int ret;
- ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
- areq = req->areq.ecdh;
- areq->dst_len = ctx->key_sz << 1;
- overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
- if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
- atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
- p = sg_virt(areq->dst);
- memmove(p, p + ctx->key_sz - curve_sz, curve_sz);
- memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz);
- hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src);
- kpp_request_complete(areq, ret);
- atomic64_inc(&dfx[HPRE_RECV_CNT].value);
- }
- static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx,
- struct kpp_request *req)
- {
- struct hpre_asym_request *h_req;
- struct hpre_sqe *msg;
- int req_id;
- void *tmp;
- if (req->dst_len < ctx->key_sz << 1) {
- req->dst_len = ctx->key_sz << 1;
- return -EINVAL;
- }
- tmp = kpp_request_ctx(req);
- h_req = PTR_ALIGN(tmp, hpre_align_sz());
- h_req->cb = hpre_ecdh_cb;
- h_req->areq.ecdh = req;
- msg = &h_req->req;
- memset(msg, 0, sizeof(*msg));
- msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
- msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
- msg->key = cpu_to_le64(ctx->ecdh.dma_p);
- msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
- msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
- h_req->ctx = ctx;
- req_id = hpre_add_req_to_ctx(h_req);
- if (req_id < 0)
- return -EBUSY;
- msg->tag = cpu_to_le16((u16)req_id);
- return 0;
- }
- static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len)
- {
- struct hpre_sqe *msg = &hpre_req->req;
- struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = ctx->dev;
- unsigned int tmpshift;
- dma_addr_t dma = 0;
- void *ptr;
- int shift;
- /* Src_data include gx and gy. */
- shift = ctx->key_sz - (len >> 1);
- if (unlikely(shift < 0))
- return -EINVAL;
- ptr = dma_alloc_coherent(dev, ctx->key_sz << 2, &dma, GFP_KERNEL);
- if (unlikely(!ptr))
- return -ENOMEM;
- tmpshift = ctx->key_sz << 1;
- scatterwalk_map_and_copy(ptr + tmpshift, data, 0, len, 0);
- memcpy(ptr + shift, ptr + tmpshift, len >> 1);
- memcpy(ptr + ctx->key_sz + shift, ptr + tmpshift + (len >> 1), len >> 1);
- hpre_req->src = ptr;
- msg->in = cpu_to_le64(dma);
- return 0;
- }
- static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len)
- {
- struct hpre_sqe *msg = &hpre_req->req;
- struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = ctx->dev;
- dma_addr_t dma;
- if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) {
- dev_err(dev, "data or data length is illegal!\n");
- return -EINVAL;
- }
- hpre_req->dst = NULL;
- dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, dma))) {
- dev_err(dev, "dma map data err!\n");
- return -ENOMEM;
- }
- msg->out = cpu_to_le64(dma);
- return 0;
- }
- static int hpre_ecdh_compute_value(struct kpp_request *req)
- {
- struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct device *dev = ctx->dev;
- void *tmp = kpp_request_ctx(req);
- struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
- struct hpre_sqe *msg = &hpre_req->req;
- int ret;
- ret = hpre_ecdh_msg_request_set(ctx, req);
- if (unlikely(ret)) {
- dev_err(dev, "failed to set ecdh request, ret = %d!\n", ret);
- return ret;
- }
- if (req->src) {
- ret = hpre_ecdh_src_data_init(hpre_req, req->src, req->src_len);
- if (unlikely(ret)) {
- dev_err(dev, "failed to init src data, ret = %d!\n", ret);
- goto clear_all;
- }
- } else {
- msg->in = cpu_to_le64(ctx->ecdh.dma_g);
- }
- ret = hpre_ecdh_dst_data_init(hpre_req, req->dst, req->dst_len);
- if (unlikely(ret)) {
- dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
- goto clear_all;
- }
- msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL);
- ret = hpre_send(ctx, msg);
- if (likely(!ret))
- return -EINPROGRESS;
- clear_all:
- hpre_rm_req_from_ctx(hpre_req);
- hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
- return ret;
- }
- static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- /* max size is the pub_key_size, include x and y */
- return ctx->key_sz << 1;
- }
- static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- ctx->curve_id = ECC_CURVE_NIST_P192;
- kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
- return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
- }
- static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- ctx->curve_id = ECC_CURVE_NIST_P256;
- kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
- return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
- }
- static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- ctx->curve_id = ECC_CURVE_NIST_P384;
- kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
- return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
- }
- static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- hpre_ecc_clear_ctx(ctx, true, true);
- }
- static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf,
- unsigned int len)
- {
- u8 secret[CURVE25519_KEY_SIZE] = { 0 };
- unsigned int sz = ctx->key_sz;
- const struct ecc_curve *curve;
- unsigned int shift = sz << 1;
- void *p;
- /*
- * The key from 'buf' is in little-endian, we should preprocess it as
- * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64",
- * then convert it to big endian. Only in this way, the result can be
- * the same as the software curve-25519 that exists in crypto.
- */
- memcpy(secret, buf, len);
- curve25519_clamp_secret(secret);
- hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE);
- p = ctx->curve25519.p + sz - len;
- curve = ecc_get_curve25519();
- /* fill curve parameters */
- fill_curve_param(p, curve->p, len, curve->g.ndigits);
- fill_curve_param(p + sz, curve->a, len, curve->g.ndigits);
- memcpy(p + shift, secret, len);
- fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits);
- memzero_explicit(secret, CURVE25519_KEY_SIZE);
- }
- static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf,
- unsigned int len)
- {
- struct device *dev = ctx->dev;
- unsigned int sz = ctx->key_sz;
- unsigned int shift = sz << 1;
- /* p->a->k->gx */
- if (!ctx->curve25519.p) {
- ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2,
- &ctx->curve25519.dma_p,
- GFP_KERNEL);
- if (!ctx->curve25519.p)
- return -ENOMEM;
- }
- ctx->curve25519.g = ctx->curve25519.p + shift + sz;
- ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz;
- hpre_curve25519_fill_curve(ctx, buf, len);
- return 0;
- }
- static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
- unsigned int len)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct device *dev = ctx->dev;
- int ret = -EINVAL;
- if (len != CURVE25519_KEY_SIZE ||
- !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) {
- dev_err(dev, "key is null or key len is not 32bytes!\n");
- return ret;
- }
- /* Free old secret if any */
- hpre_ecc_clear_ctx(ctx, false, false);
- ctx->key_sz = CURVE25519_KEY_SIZE;
- ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE);
- if (ret) {
- dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret);
- hpre_ecc_clear_ctx(ctx, false, false);
- return ret;
- }
- return 0;
- }
- static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx,
- struct hpre_asym_request *req,
- struct scatterlist *dst,
- struct scatterlist *src)
- {
- struct device *dev = ctx->dev;
- struct hpre_sqe *sqe = &req->req;
- dma_addr_t dma;
- dma = le64_to_cpu(sqe->in);
- if (unlikely(dma_mapping_error(dev, dma)))
- return;
- if (src && req->src)
- dma_free_coherent(dev, ctx->key_sz, req->src, dma);
- dma = le64_to_cpu(sqe->out);
- if (unlikely(dma_mapping_error(dev, dma)))
- return;
- if (req->dst)
- dma_free_coherent(dev, ctx->key_sz, req->dst, dma);
- if (dst)
- dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE);
- }
- static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp)
- {
- struct hpre_dfx *dfx = ctx->hpre->debug.dfx;
- struct hpre_asym_request *req = NULL;
- struct kpp_request *areq;
- u64 overtime_thrhld;
- int ret;
- ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req);
- areq = req->areq.curve25519;
- areq->dst_len = ctx->key_sz;
- overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value);
- if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld))
- atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value);
- hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE);
- hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src);
- kpp_request_complete(areq, ret);
- atomic64_inc(&dfx[HPRE_RECV_CNT].value);
- }
- static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx,
- struct kpp_request *req)
- {
- struct hpre_asym_request *h_req;
- struct hpre_sqe *msg;
- int req_id;
- void *tmp;
- if (unlikely(req->dst_len < ctx->key_sz)) {
- req->dst_len = ctx->key_sz;
- return -EINVAL;
- }
- tmp = kpp_request_ctx(req);
- h_req = PTR_ALIGN(tmp, hpre_align_sz());
- h_req->cb = hpre_curve25519_cb;
- h_req->areq.curve25519 = req;
- msg = &h_req->req;
- memset(msg, 0, sizeof(*msg));
- msg->in = cpu_to_le64(DMA_MAPPING_ERROR);
- msg->out = cpu_to_le64(DMA_MAPPING_ERROR);
- msg->key = cpu_to_le64(ctx->curve25519.dma_p);
- msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT);
- msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1;
- h_req->ctx = ctx;
- req_id = hpre_add_req_to_ctx(h_req);
- if (req_id < 0)
- return -EBUSY;
- msg->tag = cpu_to_le16((u16)req_id);
- return 0;
- }
- static void hpre_curve25519_src_modulo_p(u8 *ptr)
- {
- int i;
- for (i = 0; i < CURVE25519_KEY_SIZE - 1; i++)
- ptr[i] = 0;
- /* The modulus is ptr's last byte minus '0xed'(last byte of p) */
- ptr[i] -= 0xed;
- }
- static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len)
- {
- struct hpre_sqe *msg = &hpre_req->req;
- struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = ctx->dev;
- u8 p[CURVE25519_KEY_SIZE] = { 0 };
- const struct ecc_curve *curve;
- dma_addr_t dma = 0;
- u8 *ptr;
- if (len != CURVE25519_KEY_SIZE) {
- dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len);
- return -EINVAL;
- }
- ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL);
- if (unlikely(!ptr))
- return -ENOMEM;
- scatterwalk_map_and_copy(ptr, data, 0, len, 0);
- if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) {
- dev_err(dev, "gx is null!\n");
- goto err;
- }
- /*
- * Src_data(gx) is in little-endian order, MSB in the final byte should
- * be masked as described in RFC7748, then transform it to big-endian
- * form, then hisi_hpre can use the data.
- */
- ptr[31] &= 0x7f;
- hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE);
- curve = ecc_get_curve25519();
- fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits);
- /*
- * When src_data equals (2^255 - 19) ~ (2^255 - 1), it is out of p,
- * we get its modulus to p, and then use it.
- */
- if (memcmp(ptr, p, ctx->key_sz) == 0) {
- dev_err(dev, "gx is p!\n");
- goto err;
- } else if (memcmp(ptr, p, ctx->key_sz) > 0) {
- hpre_curve25519_src_modulo_p(ptr);
- }
- hpre_req->src = ptr;
- msg->in = cpu_to_le64(dma);
- return 0;
- err:
- dma_free_coherent(dev, ctx->key_sz, ptr, dma);
- return -EINVAL;
- }
- static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req,
- struct scatterlist *data, unsigned int len)
- {
- struct hpre_sqe *msg = &hpre_req->req;
- struct hpre_ctx *ctx = hpre_req->ctx;
- struct device *dev = ctx->dev;
- dma_addr_t dma;
- if (!data || !sg_is_last(data) || len != ctx->key_sz) {
- dev_err(dev, "data or data length is illegal!\n");
- return -EINVAL;
- }
- hpre_req->dst = NULL;
- dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, dma))) {
- dev_err(dev, "dma map data err!\n");
- return -ENOMEM;
- }
- msg->out = cpu_to_le64(dma);
- return 0;
- }
- static int hpre_curve25519_compute_value(struct kpp_request *req)
- {
- struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- struct device *dev = ctx->dev;
- void *tmp = kpp_request_ctx(req);
- struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz());
- struct hpre_sqe *msg = &hpre_req->req;
- int ret;
- ret = hpre_curve25519_msg_request_set(ctx, req);
- if (unlikely(ret)) {
- dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret);
- return ret;
- }
- if (req->src) {
- ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len);
- if (unlikely(ret)) {
- dev_err(dev, "failed to init src data, ret = %d!\n",
- ret);
- goto clear_all;
- }
- } else {
- msg->in = cpu_to_le64(ctx->curve25519.dma_g);
- }
- ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len);
- if (unlikely(ret)) {
- dev_err(dev, "failed to init dst data, ret = %d!\n", ret);
- goto clear_all;
- }
- msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL);
- ret = hpre_send(ctx, msg);
- if (likely(!ret))
- return -EINPROGRESS;
- clear_all:
- hpre_rm_req_from_ctx(hpre_req);
- hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src);
- return ret;
- }
- static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- return ctx->key_sz;
- }
- static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd());
- return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE);
- }
- static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm)
- {
- struct hpre_ctx *ctx = kpp_tfm_ctx(tfm);
- hpre_ecc_clear_ctx(ctx, true, false);
- }
- static struct akcipher_alg rsa = {
- .sign = hpre_rsa_dec,
- .verify = hpre_rsa_enc,
- .encrypt = hpre_rsa_enc,
- .decrypt = hpre_rsa_dec,
- .set_pub_key = hpre_rsa_setpubkey,
- .set_priv_key = hpre_rsa_setprivkey,
- .max_size = hpre_rsa_max_size,
- .init = hpre_rsa_init_tfm,
- .exit = hpre_rsa_exit_tfm,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "rsa",
- .cra_driver_name = "hpre-rsa",
- .cra_module = THIS_MODULE,
- },
- };
- static struct kpp_alg dh = {
- .set_secret = hpre_dh_set_secret,
- .generate_public_key = hpre_dh_compute_value,
- .compute_shared_secret = hpre_dh_compute_value,
- .max_size = hpre_dh_max_size,
- .init = hpre_dh_init_tfm,
- .exit = hpre_dh_exit_tfm,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "dh",
- .cra_driver_name = "hpre-dh",
- .cra_module = THIS_MODULE,
- },
- };
- static struct kpp_alg ecdh_curves[] = {
- {
- .set_secret = hpre_ecdh_set_secret,
- .generate_public_key = hpre_ecdh_compute_value,
- .compute_shared_secret = hpre_ecdh_compute_value,
- .max_size = hpre_ecdh_max_size,
- .init = hpre_ecdh_nist_p192_init_tfm,
- .exit = hpre_ecdh_exit_tfm,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "ecdh-nist-p192",
- .cra_driver_name = "hpre-ecdh-nist-p192",
- .cra_module = THIS_MODULE,
- },
- }, {
- .set_secret = hpre_ecdh_set_secret,
- .generate_public_key = hpre_ecdh_compute_value,
- .compute_shared_secret = hpre_ecdh_compute_value,
- .max_size = hpre_ecdh_max_size,
- .init = hpre_ecdh_nist_p256_init_tfm,
- .exit = hpre_ecdh_exit_tfm,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "ecdh-nist-p256",
- .cra_driver_name = "hpre-ecdh-nist-p256",
- .cra_module = THIS_MODULE,
- },
- }, {
- .set_secret = hpre_ecdh_set_secret,
- .generate_public_key = hpre_ecdh_compute_value,
- .compute_shared_secret = hpre_ecdh_compute_value,
- .max_size = hpre_ecdh_max_size,
- .init = hpre_ecdh_nist_p384_init_tfm,
- .exit = hpre_ecdh_exit_tfm,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "ecdh-nist-p384",
- .cra_driver_name = "hpre-ecdh-nist-p384",
- .cra_module = THIS_MODULE,
- },
- }
- };
- static struct kpp_alg curve25519_alg = {
- .set_secret = hpre_curve25519_set_secret,
- .generate_public_key = hpre_curve25519_compute_value,
- .compute_shared_secret = hpre_curve25519_compute_value,
- .max_size = hpre_curve25519_max_size,
- .init = hpre_curve25519_init_tfm,
- .exit = hpre_curve25519_exit_tfm,
- .base = {
- .cra_ctxsize = sizeof(struct hpre_ctx),
- .cra_priority = HPRE_CRYPTO_ALG_PRI,
- .cra_name = "curve25519",
- .cra_driver_name = "hpre-curve25519",
- .cra_module = THIS_MODULE,
- },
- };
- static int hpre_register_rsa(struct hisi_qm *qm)
- {
- int ret;
- if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
- return 0;
- rsa.base.cra_flags = 0;
- ret = crypto_register_akcipher(&rsa);
- if (ret)
- dev_err(&qm->pdev->dev, "failed to register rsa (%d)!\n", ret);
- return ret;
- }
- static void hpre_unregister_rsa(struct hisi_qm *qm)
- {
- if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP))
- return;
- crypto_unregister_akcipher(&rsa);
- }
- static int hpre_register_dh(struct hisi_qm *qm)
- {
- int ret;
- if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
- return 0;
- ret = crypto_register_kpp(&dh);
- if (ret)
- dev_err(&qm->pdev->dev, "failed to register dh (%d)!\n", ret);
- return ret;
- }
- static void hpre_unregister_dh(struct hisi_qm *qm)
- {
- if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP))
- return;
- crypto_unregister_kpp(&dh);
- }
- static int hpre_register_ecdh(struct hisi_qm *qm)
- {
- int ret, i;
- if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
- return 0;
- for (i = 0; i < ARRAY_SIZE(ecdh_curves); i++) {
- ret = crypto_register_kpp(&ecdh_curves[i]);
- if (ret) {
- dev_err(&qm->pdev->dev, "failed to register %s (%d)!\n",
- ecdh_curves[i].base.cra_name, ret);
- goto unreg_kpp;
- }
- }
- return 0;
- unreg_kpp:
- for (--i; i >= 0; --i)
- crypto_unregister_kpp(&ecdh_curves[i]);
- return ret;
- }
- static void hpre_unregister_ecdh(struct hisi_qm *qm)
- {
- int i;
- if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP))
- return;
- for (i = ARRAY_SIZE(ecdh_curves) - 1; i >= 0; --i)
- crypto_unregister_kpp(&ecdh_curves[i]);
- }
- static int hpre_register_x25519(struct hisi_qm *qm)
- {
- int ret;
- if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
- return 0;
- ret = crypto_register_kpp(&curve25519_alg);
- if (ret)
- dev_err(&qm->pdev->dev, "failed to register x25519 (%d)!\n", ret);
- return ret;
- }
- static void hpre_unregister_x25519(struct hisi_qm *qm)
- {
- if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP))
- return;
- crypto_unregister_kpp(&curve25519_alg);
- }
- int hpre_algs_register(struct hisi_qm *qm)
- {
- int ret = 0;
- mutex_lock(&hpre_algs_lock);
- if (hpre_available_devs) {
- hpre_available_devs++;
- goto unlock;
- }
- ret = hpre_register_rsa(qm);
- if (ret)
- goto unlock;
- ret = hpre_register_dh(qm);
- if (ret)
- goto unreg_rsa;
- ret = hpre_register_ecdh(qm);
- if (ret)
- goto unreg_dh;
- ret = hpre_register_x25519(qm);
- if (ret)
- goto unreg_ecdh;
- hpre_available_devs++;
- mutex_unlock(&hpre_algs_lock);
- return ret;
- unreg_ecdh:
- hpre_unregister_ecdh(qm);
- unreg_dh:
- hpre_unregister_dh(qm);
- unreg_rsa:
- hpre_unregister_rsa(qm);
- unlock:
- mutex_unlock(&hpre_algs_lock);
- return ret;
- }
- void hpre_algs_unregister(struct hisi_qm *qm)
- {
- mutex_lock(&hpre_algs_lock);
- if (--hpre_available_devs)
- goto unlock;
- hpre_unregister_x25519(qm);
- hpre_unregister_ecdh(qm);
- hpre_unregister_dh(qm);
- hpre_unregister_rsa(qm);
- unlock:
- mutex_unlock(&hpre_algs_lock);
- }
|