sec_crypto.c 65 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright (c) 2019 HiSilicon Limited. */
  3. #include <crypto/aes.h>
  4. #include <crypto/aead.h>
  5. #include <crypto/algapi.h>
  6. #include <crypto/authenc.h>
  7. #include <crypto/des.h>
  8. #include <crypto/hash.h>
  9. #include <crypto/internal/aead.h>
  10. #include <crypto/internal/des.h>
  11. #include <crypto/sha1.h>
  12. #include <crypto/sha2.h>
  13. #include <crypto/skcipher.h>
  14. #include <crypto/xts.h>
  15. #include <linux/crypto.h>
  16. #include <linux/dma-mapping.h>
  17. #include <linux/idr.h>
  18. #include "sec.h"
  19. #include "sec_crypto.h"
  20. #define SEC_PRIORITY 4001
  21. #define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE)
  22. #define SEC_XTS_MID_KEY_SIZE (3 * AES_MIN_KEY_SIZE)
  23. #define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE)
  24. #define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE)
  25. #define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE)
  26. /* SEC sqe(bd) bit operational relative MACRO */
  27. #define SEC_DE_OFFSET 1
  28. #define SEC_CIPHER_OFFSET 4
  29. #define SEC_SCENE_OFFSET 3
  30. #define SEC_DST_SGL_OFFSET 2
  31. #define SEC_SRC_SGL_OFFSET 7
  32. #define SEC_CKEY_OFFSET 9
  33. #define SEC_CMODE_OFFSET 12
  34. #define SEC_AKEY_OFFSET 5
  35. #define SEC_AEAD_ALG_OFFSET 11
  36. #define SEC_AUTH_OFFSET 6
  37. #define SEC_DE_OFFSET_V3 9
  38. #define SEC_SCENE_OFFSET_V3 5
  39. #define SEC_CKEY_OFFSET_V3 13
  40. #define SEC_CTR_CNT_OFFSET 25
  41. #define SEC_CTR_CNT_ROLLOVER 2
  42. #define SEC_SRC_SGL_OFFSET_V3 11
  43. #define SEC_DST_SGL_OFFSET_V3 14
  44. #define SEC_CALG_OFFSET_V3 4
  45. #define SEC_AKEY_OFFSET_V3 9
  46. #define SEC_MAC_OFFSET_V3 4
  47. #define SEC_AUTH_ALG_OFFSET_V3 15
  48. #define SEC_CIPHER_AUTH_V3 0xbf
  49. #define SEC_AUTH_CIPHER_V3 0x40
  50. #define SEC_FLAG_OFFSET 7
  51. #define SEC_FLAG_MASK 0x0780
  52. #define SEC_TYPE_MASK 0x0F
  53. #define SEC_DONE_MASK 0x0001
  54. #define SEC_ICV_MASK 0x000E
  55. #define SEC_SQE_LEN_RATE_MASK 0x3
  56. #define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth))
  57. #define SEC_SGL_SGE_NR 128
  58. #define SEC_CIPHER_AUTH 0xfe
  59. #define SEC_AUTH_CIPHER 0x1
  60. #define SEC_MAX_MAC_LEN 64
  61. #define SEC_MAX_AAD_LEN 65535
  62. #define SEC_MAX_CCM_AAD_LEN 65279
  63. #define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth))
  64. #define SEC_PBUF_SZ 512
  65. #define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ
  66. #define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE)
  67. #define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \
  68. SEC_MAX_MAC_LEN * 2)
  69. #define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG)
  70. #define SEC_PBUF_PAGE_NUM(depth) ((depth) / SEC_PBUF_NUM)
  71. #define SEC_PBUF_LEFT_SZ(depth) (SEC_PBUF_PKG * ((depth) - \
  72. SEC_PBUF_PAGE_NUM(depth) * SEC_PBUF_NUM))
  73. #define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \
  74. SEC_PBUF_LEFT_SZ(depth))
  75. #define SEC_SQE_LEN_RATE 4
  76. #define SEC_SQE_CFLAG 2
  77. #define SEC_SQE_AEAD_FLAG 3
  78. #define SEC_SQE_DONE 0x1
  79. #define SEC_ICV_ERR 0x2
  80. #define MIN_MAC_LEN 4
  81. #define MAC_LEN_MASK 0x1U
  82. #define MAX_INPUT_DATA_LEN 0xFFFE00
  83. #define BITS_MASK 0xFF
  84. #define BYTE_BITS 0x8
  85. #define SEC_XTS_NAME_SZ 0x3
  86. #define IV_CM_CAL_NUM 2
  87. #define IV_CL_MASK 0x7
  88. #define IV_CL_MIN 2
  89. #define IV_CL_MID 4
  90. #define IV_CL_MAX 8
  91. #define IV_FLAGS_OFFSET 0x6
  92. #define IV_CM_OFFSET 0x3
  93. #define IV_LAST_BYTE1 1
  94. #define IV_LAST_BYTE2 2
  95. #define IV_LAST_BYTE_MASK 0xFF
  96. #define IV_CTR_INIT 0x1
  97. #define IV_BYTE_OFFSET 0x8
  98. static DEFINE_MUTEX(sec_algs_lock);
  99. static unsigned int sec_available_devs;
  100. struct sec_skcipher {
  101. u64 alg_msk;
  102. struct skcipher_alg alg;
  103. };
  104. struct sec_aead {
  105. u64 alg_msk;
  106. struct aead_alg alg;
  107. };
  108. /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */
  109. static inline u32 sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req)
  110. {
  111. if (req->c_req.encrypt)
  112. return (u32)atomic_inc_return(&ctx->enc_qcyclic) %
  113. ctx->hlf_q_num;
  114. return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num +
  115. ctx->hlf_q_num;
  116. }
  117. static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req)
  118. {
  119. if (req->c_req.encrypt)
  120. atomic_dec(&ctx->enc_qcyclic);
  121. else
  122. atomic_dec(&ctx->dec_qcyclic);
  123. }
  124. static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx)
  125. {
  126. int req_id;
  127. spin_lock_bh(&qp_ctx->req_lock);
  128. req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC);
  129. spin_unlock_bh(&qp_ctx->req_lock);
  130. if (unlikely(req_id < 0)) {
  131. dev_err(req->ctx->dev, "alloc req id fail!\n");
  132. return req_id;
  133. }
  134. req->qp_ctx = qp_ctx;
  135. qp_ctx->req_list[req_id] = req;
  136. return req_id;
  137. }
  138. static void sec_free_req_id(struct sec_req *req)
  139. {
  140. struct sec_qp_ctx *qp_ctx = req->qp_ctx;
  141. int req_id = req->req_id;
  142. if (unlikely(req_id < 0 || req_id >= qp_ctx->qp->sq_depth)) {
  143. dev_err(req->ctx->dev, "free request id invalid!\n");
  144. return;
  145. }
  146. qp_ctx->req_list[req_id] = NULL;
  147. req->qp_ctx = NULL;
  148. spin_lock_bh(&qp_ctx->req_lock);
  149. idr_remove(&qp_ctx->req_idr, req_id);
  150. spin_unlock_bh(&qp_ctx->req_lock);
  151. }
  152. static u8 pre_parse_finished_bd(struct bd_status *status, void *resp)
  153. {
  154. struct sec_sqe *bd = resp;
  155. status->done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK;
  156. status->icv = (le16_to_cpu(bd->type2.done_flag) & SEC_ICV_MASK) >> 1;
  157. status->flag = (le16_to_cpu(bd->type2.done_flag) &
  158. SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
  159. status->tag = le16_to_cpu(bd->type2.tag);
  160. status->err_type = bd->type2.error_type;
  161. return bd->type_cipher_auth & SEC_TYPE_MASK;
  162. }
  163. static u8 pre_parse_finished_bd3(struct bd_status *status, void *resp)
  164. {
  165. struct sec_sqe3 *bd3 = resp;
  166. status->done = le16_to_cpu(bd3->done_flag) & SEC_DONE_MASK;
  167. status->icv = (le16_to_cpu(bd3->done_flag) & SEC_ICV_MASK) >> 1;
  168. status->flag = (le16_to_cpu(bd3->done_flag) &
  169. SEC_FLAG_MASK) >> SEC_FLAG_OFFSET;
  170. status->tag = le64_to_cpu(bd3->tag);
  171. status->err_type = bd3->error_type;
  172. return le32_to_cpu(bd3->bd_param) & SEC_TYPE_MASK;
  173. }
  174. static int sec_cb_status_check(struct sec_req *req,
  175. struct bd_status *status)
  176. {
  177. struct sec_ctx *ctx = req->ctx;
  178. if (unlikely(req->err_type || status->done != SEC_SQE_DONE)) {
  179. dev_err_ratelimited(ctx->dev, "err_type[%d], done[%u]\n",
  180. req->err_type, status->done);
  181. return -EIO;
  182. }
  183. if (unlikely(ctx->alg_type == SEC_SKCIPHER)) {
  184. if (unlikely(status->flag != SEC_SQE_CFLAG)) {
  185. dev_err_ratelimited(ctx->dev, "flag[%u]\n",
  186. status->flag);
  187. return -EIO;
  188. }
  189. } else if (unlikely(ctx->alg_type == SEC_AEAD)) {
  190. if (unlikely(status->flag != SEC_SQE_AEAD_FLAG ||
  191. status->icv == SEC_ICV_ERR)) {
  192. dev_err_ratelimited(ctx->dev,
  193. "flag[%u], icv[%u]\n",
  194. status->flag, status->icv);
  195. return -EBADMSG;
  196. }
  197. }
  198. return 0;
  199. }
  200. static void sec_req_cb(struct hisi_qp *qp, void *resp)
  201. {
  202. struct sec_qp_ctx *qp_ctx = qp->qp_ctx;
  203. struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx;
  204. u8 type_supported = qp_ctx->ctx->type_supported;
  205. struct bd_status status;
  206. struct sec_ctx *ctx;
  207. struct sec_req *req;
  208. int err;
  209. u8 type;
  210. if (type_supported == SEC_BD_TYPE2) {
  211. type = pre_parse_finished_bd(&status, resp);
  212. req = qp_ctx->req_list[status.tag];
  213. } else {
  214. type = pre_parse_finished_bd3(&status, resp);
  215. req = (void *)(uintptr_t)status.tag;
  216. }
  217. if (unlikely(type != type_supported)) {
  218. atomic64_inc(&dfx->err_bd_cnt);
  219. pr_err("err bd type [%u]\n", type);
  220. return;
  221. }
  222. if (unlikely(!req)) {
  223. atomic64_inc(&dfx->invalid_req_cnt);
  224. atomic_inc(&qp->qp_status.used);
  225. return;
  226. }
  227. req->err_type = status.err_type;
  228. ctx = req->ctx;
  229. err = sec_cb_status_check(req, &status);
  230. if (err)
  231. atomic64_inc(&dfx->done_flag_cnt);
  232. atomic64_inc(&dfx->recv_cnt);
  233. ctx->req_op->buf_unmap(ctx, req);
  234. ctx->req_op->callback(ctx, req, err);
  235. }
  236. static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req)
  237. {
  238. struct sec_qp_ctx *qp_ctx = req->qp_ctx;
  239. int ret;
  240. if (ctx->fake_req_limit <=
  241. atomic_read(&qp_ctx->qp->qp_status.used) &&
  242. !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG))
  243. return -EBUSY;
  244. spin_lock_bh(&qp_ctx->req_lock);
  245. ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe);
  246. if (ctx->fake_req_limit <=
  247. atomic_read(&qp_ctx->qp->qp_status.used) && !ret) {
  248. list_add_tail(&req->backlog_head, &qp_ctx->backlog);
  249. atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
  250. atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt);
  251. spin_unlock_bh(&qp_ctx->req_lock);
  252. return -EBUSY;
  253. }
  254. spin_unlock_bh(&qp_ctx->req_lock);
  255. if (unlikely(ret == -EBUSY))
  256. return -ENOBUFS;
  257. if (likely(!ret)) {
  258. ret = -EINPROGRESS;
  259. atomic64_inc(&ctx->sec->debug.dfx.send_cnt);
  260. }
  261. return ret;
  262. }
  263. /* Get DMA memory resources */
  264. static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res)
  265. {
  266. u16 q_depth = res->depth;
  267. int i;
  268. res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),
  269. &res->c_ivin_dma, GFP_KERNEL);
  270. if (!res->c_ivin)
  271. return -ENOMEM;
  272. for (i = 1; i < q_depth; i++) {
  273. res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE;
  274. res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE;
  275. }
  276. return 0;
  277. }
  278. static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res)
  279. {
  280. if (res->c_ivin)
  281. dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth),
  282. res->c_ivin, res->c_ivin_dma);
  283. }
  284. static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res)
  285. {
  286. u16 q_depth = res->depth;
  287. int i;
  288. res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth),
  289. &res->a_ivin_dma, GFP_KERNEL);
  290. if (!res->a_ivin)
  291. return -ENOMEM;
  292. for (i = 1; i < q_depth; i++) {
  293. res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE;
  294. res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE;
  295. }
  296. return 0;
  297. }
  298. static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res)
  299. {
  300. if (res->a_ivin)
  301. dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth),
  302. res->a_ivin, res->a_ivin_dma);
  303. }
  304. static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res)
  305. {
  306. u16 q_depth = res->depth;
  307. int i;
  308. res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ(q_depth) << 1,
  309. &res->out_mac_dma, GFP_KERNEL);
  310. if (!res->out_mac)
  311. return -ENOMEM;
  312. for (i = 1; i < q_depth; i++) {
  313. res[i].out_mac_dma = res->out_mac_dma +
  314. i * (SEC_MAX_MAC_LEN << 1);
  315. res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1);
  316. }
  317. return 0;
  318. }
  319. static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res)
  320. {
  321. if (res->out_mac)
  322. dma_free_coherent(dev, SEC_TOTAL_MAC_SZ(res->depth) << 1,
  323. res->out_mac, res->out_mac_dma);
  324. }
  325. static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res)
  326. {
  327. if (res->pbuf)
  328. dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ(res->depth),
  329. res->pbuf, res->pbuf_dma);
  330. }
  331. /*
  332. * To improve performance, pbuffer is used for
  333. * small packets (< 512Bytes) as IOMMU translation using.
  334. */
  335. static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res)
  336. {
  337. u16 q_depth = res->depth;
  338. int size = SEC_PBUF_PAGE_NUM(q_depth);
  339. int pbuf_page_offset;
  340. int i, j, k;
  341. res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ(q_depth),
  342. &res->pbuf_dma, GFP_KERNEL);
  343. if (!res->pbuf)
  344. return -ENOMEM;
  345. /*
  346. * SEC_PBUF_PKG contains data pbuf, iv and
  347. * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC>
  348. * Every PAGE contains six SEC_PBUF_PKG
  349. * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG
  350. * So we need SEC_PBUF_PAGE_NUM numbers of PAGE
  351. * for the SEC_TOTAL_PBUF_SZ
  352. */
  353. for (i = 0; i <= size; i++) {
  354. pbuf_page_offset = PAGE_SIZE * i;
  355. for (j = 0; j < SEC_PBUF_NUM; j++) {
  356. k = i * SEC_PBUF_NUM + j;
  357. if (k == q_depth)
  358. break;
  359. res[k].pbuf = res->pbuf +
  360. j * SEC_PBUF_PKG + pbuf_page_offset;
  361. res[k].pbuf_dma = res->pbuf_dma +
  362. j * SEC_PBUF_PKG + pbuf_page_offset;
  363. }
  364. }
  365. return 0;
  366. }
  367. static int sec_alg_resource_alloc(struct sec_ctx *ctx,
  368. struct sec_qp_ctx *qp_ctx)
  369. {
  370. struct sec_alg_res *res = qp_ctx->res;
  371. struct device *dev = ctx->dev;
  372. int ret;
  373. ret = sec_alloc_civ_resource(dev, res);
  374. if (ret)
  375. return ret;
  376. if (ctx->alg_type == SEC_AEAD) {
  377. ret = sec_alloc_aiv_resource(dev, res);
  378. if (ret)
  379. goto alloc_aiv_fail;
  380. ret = sec_alloc_mac_resource(dev, res);
  381. if (ret)
  382. goto alloc_mac_fail;
  383. }
  384. if (ctx->pbuf_supported) {
  385. ret = sec_alloc_pbuf_resource(dev, res);
  386. if (ret) {
  387. dev_err(dev, "fail to alloc pbuf dma resource!\n");
  388. goto alloc_pbuf_fail;
  389. }
  390. }
  391. return 0;
  392. alloc_pbuf_fail:
  393. if (ctx->alg_type == SEC_AEAD)
  394. sec_free_mac_resource(dev, qp_ctx->res);
  395. alloc_mac_fail:
  396. if (ctx->alg_type == SEC_AEAD)
  397. sec_free_aiv_resource(dev, res);
  398. alloc_aiv_fail:
  399. sec_free_civ_resource(dev, res);
  400. return ret;
  401. }
  402. static void sec_alg_resource_free(struct sec_ctx *ctx,
  403. struct sec_qp_ctx *qp_ctx)
  404. {
  405. struct device *dev = ctx->dev;
  406. sec_free_civ_resource(dev, qp_ctx->res);
  407. if (ctx->pbuf_supported)
  408. sec_free_pbuf_resource(dev, qp_ctx->res);
  409. if (ctx->alg_type == SEC_AEAD) {
  410. sec_free_mac_resource(dev, qp_ctx->res);
  411. sec_free_aiv_resource(dev, qp_ctx->res);
  412. }
  413. }
  414. static int sec_alloc_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
  415. {
  416. u16 q_depth = qp_ctx->qp->sq_depth;
  417. struct device *dev = ctx->dev;
  418. int ret = -ENOMEM;
  419. qp_ctx->req_list = kcalloc(q_depth, sizeof(struct sec_req *), GFP_KERNEL);
  420. if (!qp_ctx->req_list)
  421. return ret;
  422. qp_ctx->res = kcalloc(q_depth, sizeof(struct sec_alg_res), GFP_KERNEL);
  423. if (!qp_ctx->res)
  424. goto err_free_req_list;
  425. qp_ctx->res->depth = q_depth;
  426. qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR);
  427. if (IS_ERR(qp_ctx->c_in_pool)) {
  428. dev_err(dev, "fail to create sgl pool for input!\n");
  429. goto err_free_res;
  430. }
  431. qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR);
  432. if (IS_ERR(qp_ctx->c_out_pool)) {
  433. dev_err(dev, "fail to create sgl pool for output!\n");
  434. goto err_free_c_in_pool;
  435. }
  436. ret = sec_alg_resource_alloc(ctx, qp_ctx);
  437. if (ret)
  438. goto err_free_c_out_pool;
  439. return 0;
  440. err_free_c_out_pool:
  441. hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
  442. err_free_c_in_pool:
  443. hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
  444. err_free_res:
  445. kfree(qp_ctx->res);
  446. err_free_req_list:
  447. kfree(qp_ctx->req_list);
  448. return ret;
  449. }
  450. static void sec_free_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx)
  451. {
  452. struct device *dev = ctx->dev;
  453. sec_alg_resource_free(ctx, qp_ctx);
  454. hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool);
  455. hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool);
  456. kfree(qp_ctx->res);
  457. kfree(qp_ctx->req_list);
  458. }
  459. static int sec_create_qp_ctx(struct sec_ctx *ctx, int qp_ctx_id)
  460. {
  461. struct sec_qp_ctx *qp_ctx;
  462. struct hisi_qp *qp;
  463. int ret;
  464. qp_ctx = &ctx->qp_ctx[qp_ctx_id];
  465. qp = ctx->qps[qp_ctx_id];
  466. qp->req_type = 0;
  467. qp->qp_ctx = qp_ctx;
  468. qp_ctx->qp = qp;
  469. qp_ctx->ctx = ctx;
  470. qp->req_cb = sec_req_cb;
  471. spin_lock_init(&qp_ctx->req_lock);
  472. idr_init(&qp_ctx->req_idr);
  473. INIT_LIST_HEAD(&qp_ctx->backlog);
  474. ret = sec_alloc_qp_ctx_resource(ctx, qp_ctx);
  475. if (ret)
  476. goto err_destroy_idr;
  477. ret = hisi_qm_start_qp(qp, 0);
  478. if (ret < 0)
  479. goto err_resource_free;
  480. return 0;
  481. err_resource_free:
  482. sec_free_qp_ctx_resource(ctx, qp_ctx);
  483. err_destroy_idr:
  484. idr_destroy(&qp_ctx->req_idr);
  485. return ret;
  486. }
  487. static void sec_release_qp_ctx(struct sec_ctx *ctx,
  488. struct sec_qp_ctx *qp_ctx)
  489. {
  490. hisi_qm_stop_qp(qp_ctx->qp);
  491. sec_free_qp_ctx_resource(ctx, qp_ctx);
  492. idr_destroy(&qp_ctx->req_idr);
  493. }
  494. static int sec_ctx_base_init(struct sec_ctx *ctx)
  495. {
  496. struct sec_dev *sec;
  497. int i, ret;
  498. ctx->qps = sec_create_qps();
  499. if (!ctx->qps) {
  500. pr_err("Can not create sec qps!\n");
  501. return -ENODEV;
  502. }
  503. sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
  504. ctx->sec = sec;
  505. ctx->dev = &sec->qm.pdev->dev;
  506. ctx->hlf_q_num = sec->ctx_q_num >> 1;
  507. ctx->pbuf_supported = ctx->sec->iommu_used;
  508. /* Half of queue depth is taken as fake requests limit in the queue. */
  509. ctx->fake_req_limit = ctx->qps[0]->sq_depth >> 1;
  510. ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx),
  511. GFP_KERNEL);
  512. if (!ctx->qp_ctx) {
  513. ret = -ENOMEM;
  514. goto err_destroy_qps;
  515. }
  516. for (i = 0; i < sec->ctx_q_num; i++) {
  517. ret = sec_create_qp_ctx(ctx, i);
  518. if (ret)
  519. goto err_sec_release_qp_ctx;
  520. }
  521. return 0;
  522. err_sec_release_qp_ctx:
  523. for (i = i - 1; i >= 0; i--)
  524. sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
  525. kfree(ctx->qp_ctx);
  526. err_destroy_qps:
  527. sec_destroy_qps(ctx->qps, sec->ctx_q_num);
  528. return ret;
  529. }
  530. static void sec_ctx_base_uninit(struct sec_ctx *ctx)
  531. {
  532. int i;
  533. for (i = 0; i < ctx->sec->ctx_q_num; i++)
  534. sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
  535. sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num);
  536. kfree(ctx->qp_ctx);
  537. }
  538. static int sec_cipher_init(struct sec_ctx *ctx)
  539. {
  540. struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
  541. c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
  542. &c_ctx->c_key_dma, GFP_KERNEL);
  543. if (!c_ctx->c_key)
  544. return -ENOMEM;
  545. return 0;
  546. }
  547. static void sec_cipher_uninit(struct sec_ctx *ctx)
  548. {
  549. struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
  550. memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
  551. dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
  552. c_ctx->c_key, c_ctx->c_key_dma);
  553. }
  554. static int sec_auth_init(struct sec_ctx *ctx)
  555. {
  556. struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
  557. a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,
  558. &a_ctx->a_key_dma, GFP_KERNEL);
  559. if (!a_ctx->a_key)
  560. return -ENOMEM;
  561. return 0;
  562. }
  563. static void sec_auth_uninit(struct sec_ctx *ctx)
  564. {
  565. struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
  566. memzero_explicit(a_ctx->a_key, SEC_MAX_AKEY_SIZE);
  567. dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,
  568. a_ctx->a_key, a_ctx->a_key_dma);
  569. }
  570. static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
  571. {
  572. const char *alg = crypto_tfm_alg_name(&tfm->base);
  573. struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
  574. struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
  575. c_ctx->fallback = false;
  576. /* Currently, only XTS mode need fallback tfm when using 192bit key */
  577. if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ)))
  578. return 0;
  579. c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0,
  580. CRYPTO_ALG_NEED_FALLBACK);
  581. if (IS_ERR(c_ctx->fbtfm)) {
  582. pr_err("failed to alloc xts mode fallback tfm!\n");
  583. return PTR_ERR(c_ctx->fbtfm);
  584. }
  585. return 0;
  586. }
  587. static int sec_skcipher_init(struct crypto_skcipher *tfm)
  588. {
  589. struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
  590. int ret;
  591. ctx->alg_type = SEC_SKCIPHER;
  592. crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req));
  593. ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm);
  594. if (ctx->c_ctx.ivsize > SEC_IV_SIZE) {
  595. pr_err("get error skcipher iv size!\n");
  596. return -EINVAL;
  597. }
  598. ret = sec_ctx_base_init(ctx);
  599. if (ret)
  600. return ret;
  601. ret = sec_cipher_init(ctx);
  602. if (ret)
  603. goto err_cipher_init;
  604. ret = sec_skcipher_fbtfm_init(tfm);
  605. if (ret)
  606. goto err_fbtfm_init;
  607. return 0;
  608. err_fbtfm_init:
  609. sec_cipher_uninit(ctx);
  610. err_cipher_init:
  611. sec_ctx_base_uninit(ctx);
  612. return ret;
  613. }
  614. static void sec_skcipher_uninit(struct crypto_skcipher *tfm)
  615. {
  616. struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
  617. if (ctx->c_ctx.fbtfm)
  618. crypto_free_sync_skcipher(ctx->c_ctx.fbtfm);
  619. sec_cipher_uninit(ctx);
  620. sec_ctx_base_uninit(ctx);
  621. }
  622. static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key, const u32 keylen)
  623. {
  624. struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
  625. struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
  626. int ret;
  627. ret = verify_skcipher_des3_key(tfm, key);
  628. if (ret)
  629. return ret;
  630. switch (keylen) {
  631. case SEC_DES3_2KEY_SIZE:
  632. c_ctx->c_key_len = SEC_CKEY_3DES_2KEY;
  633. break;
  634. case SEC_DES3_3KEY_SIZE:
  635. c_ctx->c_key_len = SEC_CKEY_3DES_3KEY;
  636. break;
  637. default:
  638. return -EINVAL;
  639. }
  640. return 0;
  641. }
  642. static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx,
  643. const u32 keylen,
  644. const enum sec_cmode c_mode)
  645. {
  646. if (c_mode == SEC_CMODE_XTS) {
  647. switch (keylen) {
  648. case SEC_XTS_MIN_KEY_SIZE:
  649. c_ctx->c_key_len = SEC_CKEY_128BIT;
  650. break;
  651. case SEC_XTS_MID_KEY_SIZE:
  652. c_ctx->fallback = true;
  653. break;
  654. case SEC_XTS_MAX_KEY_SIZE:
  655. c_ctx->c_key_len = SEC_CKEY_256BIT;
  656. break;
  657. default:
  658. pr_err("hisi_sec2: xts mode key error!\n");
  659. return -EINVAL;
  660. }
  661. } else {
  662. if (c_ctx->c_alg == SEC_CALG_SM4 &&
  663. keylen != AES_KEYSIZE_128) {
  664. pr_err("hisi_sec2: sm4 key error!\n");
  665. return -EINVAL;
  666. } else {
  667. switch (keylen) {
  668. case AES_KEYSIZE_128:
  669. c_ctx->c_key_len = SEC_CKEY_128BIT;
  670. break;
  671. case AES_KEYSIZE_192:
  672. c_ctx->c_key_len = SEC_CKEY_192BIT;
  673. break;
  674. case AES_KEYSIZE_256:
  675. c_ctx->c_key_len = SEC_CKEY_256BIT;
  676. break;
  677. default:
  678. pr_err("hisi_sec2: aes key error!\n");
  679. return -EINVAL;
  680. }
  681. }
  682. }
  683. return 0;
  684. }
  685. static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
  686. const u32 keylen, const enum sec_calg c_alg,
  687. const enum sec_cmode c_mode)
  688. {
  689. struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
  690. struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
  691. struct device *dev = ctx->dev;
  692. int ret;
  693. if (c_mode == SEC_CMODE_XTS) {
  694. ret = xts_verify_key(tfm, key, keylen);
  695. if (ret) {
  696. dev_err(dev, "xts mode key err!\n");
  697. return ret;
  698. }
  699. }
  700. c_ctx->c_alg = c_alg;
  701. c_ctx->c_mode = c_mode;
  702. switch (c_alg) {
  703. case SEC_CALG_3DES:
  704. ret = sec_skcipher_3des_setkey(tfm, key, keylen);
  705. break;
  706. case SEC_CALG_AES:
  707. case SEC_CALG_SM4:
  708. ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
  709. break;
  710. default:
  711. dev_err(dev, "sec c_alg err!\n");
  712. return -EINVAL;
  713. }
  714. if (ret) {
  715. dev_err(dev, "set sec key err!\n");
  716. return ret;
  717. }
  718. memcpy(c_ctx->c_key, key, keylen);
  719. if (c_ctx->fallback && c_ctx->fbtfm) {
  720. ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
  721. if (ret) {
  722. dev_err(dev, "failed to set fallback skcipher key!\n");
  723. return ret;
  724. }
  725. }
  726. return 0;
  727. }
  728. #define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \
  729. static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\
  730. u32 keylen) \
  731. { \
  732. return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \
  733. }
  734. GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB)
  735. GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC)
  736. GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS)
  737. GEN_SEC_SETKEY_FUNC(aes_ctr, SEC_CALG_AES, SEC_CMODE_CTR)
  738. GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB)
  739. GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC)
  740. GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS)
  741. GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC)
  742. GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR)
  743. static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req,
  744. struct scatterlist *src)
  745. {
  746. struct sec_aead_req *a_req = &req->aead_req;
  747. struct aead_request *aead_req = a_req->aead_req;
  748. struct sec_cipher_req *c_req = &req->c_req;
  749. struct sec_qp_ctx *qp_ctx = req->qp_ctx;
  750. struct device *dev = ctx->dev;
  751. int copy_size, pbuf_length;
  752. int req_id = req->req_id;
  753. struct crypto_aead *tfm;
  754. size_t authsize;
  755. u8 *mac_offset;
  756. if (ctx->alg_type == SEC_AEAD)
  757. copy_size = aead_req->cryptlen + aead_req->assoclen;
  758. else
  759. copy_size = c_req->c_len;
  760. pbuf_length = sg_copy_to_buffer(src, sg_nents(src),
  761. qp_ctx->res[req_id].pbuf, copy_size);
  762. if (unlikely(pbuf_length != copy_size)) {
  763. dev_err(dev, "copy src data to pbuf error!\n");
  764. return -EINVAL;
  765. }
  766. if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
  767. tfm = crypto_aead_reqtfm(aead_req);
  768. authsize = crypto_aead_authsize(tfm);
  769. mac_offset = qp_ctx->res[req_id].pbuf + copy_size - authsize;
  770. memcpy(a_req->out_mac, mac_offset, authsize);
  771. }
  772. req->in_dma = qp_ctx->res[req_id].pbuf_dma;
  773. c_req->c_out_dma = req->in_dma;
  774. return 0;
  775. }
  776. static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req,
  777. struct scatterlist *dst)
  778. {
  779. struct aead_request *aead_req = req->aead_req.aead_req;
  780. struct sec_cipher_req *c_req = &req->c_req;
  781. struct sec_qp_ctx *qp_ctx = req->qp_ctx;
  782. int copy_size, pbuf_length;
  783. int req_id = req->req_id;
  784. if (ctx->alg_type == SEC_AEAD)
  785. copy_size = c_req->c_len + aead_req->assoclen;
  786. else
  787. copy_size = c_req->c_len;
  788. pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst),
  789. qp_ctx->res[req_id].pbuf, copy_size);
  790. if (unlikely(pbuf_length != copy_size))
  791. dev_err(ctx->dev, "copy pbuf data to dst error!\n");
  792. }
  793. static int sec_aead_mac_init(struct sec_aead_req *req)
  794. {
  795. struct aead_request *aead_req = req->aead_req;
  796. struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
  797. size_t authsize = crypto_aead_authsize(tfm);
  798. struct scatterlist *sgl = aead_req->src;
  799. u8 *mac_out = req->out_mac;
  800. size_t copy_size;
  801. off_t skip_size;
  802. /* Copy input mac */
  803. skip_size = aead_req->assoclen + aead_req->cryptlen - authsize;
  804. copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, authsize, skip_size);
  805. if (unlikely(copy_size != authsize))
  806. return -EINVAL;
  807. return 0;
  808. }
  809. static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req,
  810. struct scatterlist *src, struct scatterlist *dst)
  811. {
  812. struct sec_cipher_req *c_req = &req->c_req;
  813. struct sec_aead_req *a_req = &req->aead_req;
  814. struct sec_qp_ctx *qp_ctx = req->qp_ctx;
  815. struct sec_alg_res *res = &qp_ctx->res[req->req_id];
  816. struct device *dev = ctx->dev;
  817. int ret;
  818. if (req->use_pbuf) {
  819. c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET;
  820. c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET;
  821. if (ctx->alg_type == SEC_AEAD) {
  822. a_req->a_ivin = res->a_ivin;
  823. a_req->a_ivin_dma = res->a_ivin_dma;
  824. a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET;
  825. a_req->out_mac_dma = res->pbuf_dma +
  826. SEC_PBUF_MAC_OFFSET;
  827. }
  828. ret = sec_cipher_pbuf_map(ctx, req, src);
  829. return ret;
  830. }
  831. c_req->c_ivin = res->c_ivin;
  832. c_req->c_ivin_dma = res->c_ivin_dma;
  833. if (ctx->alg_type == SEC_AEAD) {
  834. a_req->a_ivin = res->a_ivin;
  835. a_req->a_ivin_dma = res->a_ivin_dma;
  836. a_req->out_mac = res->out_mac;
  837. a_req->out_mac_dma = res->out_mac_dma;
  838. }
  839. req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src,
  840. qp_ctx->c_in_pool,
  841. req->req_id,
  842. &req->in_dma);
  843. if (IS_ERR(req->in)) {
  844. dev_err(dev, "fail to dma map input sgl buffers!\n");
  845. return PTR_ERR(req->in);
  846. }
  847. if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) {
  848. ret = sec_aead_mac_init(a_req);
  849. if (unlikely(ret)) {
  850. dev_err(dev, "fail to init mac data for ICV!\n");
  851. hisi_acc_sg_buf_unmap(dev, src, req->in);
  852. return ret;
  853. }
  854. }
  855. if (dst == src) {
  856. c_req->c_out = req->in;
  857. c_req->c_out_dma = req->in_dma;
  858. } else {
  859. c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst,
  860. qp_ctx->c_out_pool,
  861. req->req_id,
  862. &c_req->c_out_dma);
  863. if (IS_ERR(c_req->c_out)) {
  864. dev_err(dev, "fail to dma map output sgl buffers!\n");
  865. hisi_acc_sg_buf_unmap(dev, src, req->in);
  866. return PTR_ERR(c_req->c_out);
  867. }
  868. }
  869. return 0;
  870. }
  871. static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req,
  872. struct scatterlist *src, struct scatterlist *dst)
  873. {
  874. struct sec_cipher_req *c_req = &req->c_req;
  875. struct device *dev = ctx->dev;
  876. if (req->use_pbuf) {
  877. sec_cipher_pbuf_unmap(ctx, req, dst);
  878. } else {
  879. if (dst != src)
  880. hisi_acc_sg_buf_unmap(dev, src, req->in);
  881. hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out);
  882. }
  883. }
  884. static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
  885. {
  886. struct skcipher_request *sq = req->c_req.sk_req;
  887. return sec_cipher_map(ctx, req, sq->src, sq->dst);
  888. }
  889. static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
  890. {
  891. struct skcipher_request *sq = req->c_req.sk_req;
  892. sec_cipher_unmap(ctx, req, sq->src, sq->dst);
  893. }
  894. static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx,
  895. struct crypto_authenc_keys *keys)
  896. {
  897. switch (keys->enckeylen) {
  898. case AES_KEYSIZE_128:
  899. c_ctx->c_key_len = SEC_CKEY_128BIT;
  900. break;
  901. case AES_KEYSIZE_192:
  902. c_ctx->c_key_len = SEC_CKEY_192BIT;
  903. break;
  904. case AES_KEYSIZE_256:
  905. c_ctx->c_key_len = SEC_CKEY_256BIT;
  906. break;
  907. default:
  908. pr_err("hisi_sec2: aead aes key error!\n");
  909. return -EINVAL;
  910. }
  911. memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen);
  912. return 0;
  913. }
  914. static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx,
  915. struct crypto_authenc_keys *keys)
  916. {
  917. struct crypto_shash *hash_tfm = ctx->hash_tfm;
  918. int blocksize, digestsize, ret;
  919. if (!keys->authkeylen) {
  920. pr_err("hisi_sec2: aead auth key error!\n");
  921. return -EINVAL;
  922. }
  923. blocksize = crypto_shash_blocksize(hash_tfm);
  924. digestsize = crypto_shash_digestsize(hash_tfm);
  925. if (keys->authkeylen > blocksize) {
  926. ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey,
  927. keys->authkeylen, ctx->a_key);
  928. if (ret) {
  929. pr_err("hisi_sec2: aead auth digest error!\n");
  930. return -EINVAL;
  931. }
  932. ctx->a_key_len = digestsize;
  933. } else {
  934. memcpy(ctx->a_key, keys->authkey, keys->authkeylen);
  935. ctx->a_key_len = keys->authkeylen;
  936. }
  937. return 0;
  938. }
  939. static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize)
  940. {
  941. struct crypto_tfm *tfm = crypto_aead_tfm(aead);
  942. struct sec_ctx *ctx = crypto_tfm_ctx(tfm);
  943. struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
  944. return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize);
  945. }
  946. static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx,
  947. struct crypto_aead *tfm, const u8 *key,
  948. unsigned int keylen)
  949. {
  950. crypto_aead_clear_flags(a_ctx->fallback_aead_tfm, CRYPTO_TFM_REQ_MASK);
  951. crypto_aead_set_flags(a_ctx->fallback_aead_tfm,
  952. crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK);
  953. return crypto_aead_setkey(a_ctx->fallback_aead_tfm, key, keylen);
  954. }
  955. static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
  956. const u32 keylen, const enum sec_hash_alg a_alg,
  957. const enum sec_calg c_alg,
  958. const enum sec_cmode c_mode)
  959. {
  960. struct sec_ctx *ctx = crypto_aead_ctx(tfm);
  961. struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
  962. struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
  963. struct device *dev = ctx->dev;
  964. struct crypto_authenc_keys keys;
  965. int ret;
  966. ctx->a_ctx.a_alg = a_alg;
  967. ctx->c_ctx.c_alg = c_alg;
  968. c_ctx->c_mode = c_mode;
  969. if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) {
  970. ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode);
  971. if (ret) {
  972. dev_err(dev, "set sec aes ccm cipher key err!\n");
  973. return ret;
  974. }
  975. memcpy(c_ctx->c_key, key, keylen);
  976. return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
  977. }
  978. ret = crypto_authenc_extractkeys(&keys, key, keylen);
  979. if (ret)
  980. goto bad_key;
  981. ret = sec_aead_aes_set_key(c_ctx, &keys);
  982. if (ret) {
  983. dev_err(dev, "set sec cipher key err!\n");
  984. goto bad_key;
  985. }
  986. ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys);
  987. if (ret) {
  988. dev_err(dev, "set sec auth key err!\n");
  989. goto bad_key;
  990. }
  991. if (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK) {
  992. ret = -EINVAL;
  993. dev_err(dev, "AUTH key length error!\n");
  994. goto bad_key;
  995. }
  996. ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
  997. if (ret) {
  998. dev_err(dev, "set sec fallback key err!\n");
  999. goto bad_key;
  1000. }
  1001. return 0;
  1002. bad_key:
  1003. memzero_explicit(&keys, sizeof(struct crypto_authenc_keys));
  1004. return ret;
  1005. }
  1006. #define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, cmode) \
  1007. static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, u32 keylen) \
  1008. { \
  1009. return sec_aead_setkey(tfm, key, keylen, aalg, calg, cmode); \
  1010. }
  1011. GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, SEC_CALG_AES, SEC_CMODE_CBC)
  1012. GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, SEC_CALG_AES, SEC_CMODE_CBC)
  1013. GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, SEC_CALG_AES, SEC_CMODE_CBC)
  1014. GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, SEC_CMODE_CCM)
  1015. GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, SEC_CMODE_GCM)
  1016. GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, SEC_CMODE_CCM)
  1017. GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, SEC_CMODE_GCM)
  1018. static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req)
  1019. {
  1020. struct aead_request *aq = req->aead_req.aead_req;
  1021. return sec_cipher_map(ctx, req, aq->src, aq->dst);
  1022. }
  1023. static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req)
  1024. {
  1025. struct aead_request *aq = req->aead_req.aead_req;
  1026. sec_cipher_unmap(ctx, req, aq->src, aq->dst);
  1027. }
  1028. static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req)
  1029. {
  1030. int ret;
  1031. ret = ctx->req_op->buf_map(ctx, req);
  1032. if (unlikely(ret))
  1033. return ret;
  1034. ctx->req_op->do_transfer(ctx, req);
  1035. ret = ctx->req_op->bd_fill(ctx, req);
  1036. if (unlikely(ret))
  1037. goto unmap_req_buf;
  1038. return ret;
  1039. unmap_req_buf:
  1040. ctx->req_op->buf_unmap(ctx, req);
  1041. return ret;
  1042. }
  1043. static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req)
  1044. {
  1045. ctx->req_op->buf_unmap(ctx, req);
  1046. }
  1047. static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req)
  1048. {
  1049. struct skcipher_request *sk_req = req->c_req.sk_req;
  1050. struct sec_cipher_req *c_req = &req->c_req;
  1051. memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize);
  1052. }
  1053. static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
  1054. {
  1055. struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
  1056. struct sec_cipher_req *c_req = &req->c_req;
  1057. struct sec_sqe *sec_sqe = &req->sec_sqe;
  1058. u8 scene, sa_type, da_type;
  1059. u8 bd_type, cipher;
  1060. u8 de = 0;
  1061. memset(sec_sqe, 0, sizeof(struct sec_sqe));
  1062. sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
  1063. sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
  1064. sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma);
  1065. sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma);
  1066. sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) <<
  1067. SEC_CMODE_OFFSET);
  1068. sec_sqe->type2.c_alg = c_ctx->c_alg;
  1069. sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
  1070. SEC_CKEY_OFFSET);
  1071. bd_type = SEC_BD_TYPE2;
  1072. if (c_req->encrypt)
  1073. cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET;
  1074. else
  1075. cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET;
  1076. sec_sqe->type_cipher_auth = bd_type | cipher;
  1077. /* Set destination and source address type */
  1078. if (req->use_pbuf) {
  1079. sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET;
  1080. da_type = SEC_PBUF << SEC_DST_SGL_OFFSET;
  1081. } else {
  1082. sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET;
  1083. da_type = SEC_SGL << SEC_DST_SGL_OFFSET;
  1084. }
  1085. sec_sqe->sdm_addr_type |= da_type;
  1086. scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET;
  1087. if (req->in_dma != c_req->c_out_dma)
  1088. de = 0x1 << SEC_DE_OFFSET;
  1089. sec_sqe->sds_sa_type = (de | scene | sa_type);
  1090. sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len);
  1091. sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id);
  1092. return 0;
  1093. }
  1094. static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
  1095. {
  1096. struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
  1097. struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
  1098. struct sec_cipher_req *c_req = &req->c_req;
  1099. u32 bd_param = 0;
  1100. u16 cipher;
  1101. memset(sec_sqe3, 0, sizeof(struct sec_sqe3));
  1102. sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma);
  1103. sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma);
  1104. sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma);
  1105. sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma);
  1106. sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) |
  1107. c_ctx->c_mode;
  1108. sec_sqe3->c_icv_key |= cpu_to_le16(((u16)c_ctx->c_key_len) <<
  1109. SEC_CKEY_OFFSET_V3);
  1110. if (c_req->encrypt)
  1111. cipher = SEC_CIPHER_ENC;
  1112. else
  1113. cipher = SEC_CIPHER_DEC;
  1114. sec_sqe3->c_icv_key |= cpu_to_le16(cipher);
  1115. /* Set the CTR counter mode is 128bit rollover */
  1116. sec_sqe3->auth_mac_key = cpu_to_le32((u32)SEC_CTR_CNT_ROLLOVER <<
  1117. SEC_CTR_CNT_OFFSET);
  1118. if (req->use_pbuf) {
  1119. bd_param |= SEC_PBUF << SEC_SRC_SGL_OFFSET_V3;
  1120. bd_param |= SEC_PBUF << SEC_DST_SGL_OFFSET_V3;
  1121. } else {
  1122. bd_param |= SEC_SGL << SEC_SRC_SGL_OFFSET_V3;
  1123. bd_param |= SEC_SGL << SEC_DST_SGL_OFFSET_V3;
  1124. }
  1125. bd_param |= SEC_COMM_SCENE << SEC_SCENE_OFFSET_V3;
  1126. if (req->in_dma != c_req->c_out_dma)
  1127. bd_param |= 0x1 << SEC_DE_OFFSET_V3;
  1128. bd_param |= SEC_BD_TYPE3;
  1129. sec_sqe3->bd_param = cpu_to_le32(bd_param);
  1130. sec_sqe3->c_len_ivin |= cpu_to_le32(c_req->c_len);
  1131. sec_sqe3->tag = cpu_to_le64((unsigned long)req);
  1132. return 0;
  1133. }
  1134. /* increment counter (128-bit int) */
  1135. static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums)
  1136. {
  1137. do {
  1138. --bits;
  1139. nums += counter[bits];
  1140. counter[bits] = nums & BITS_MASK;
  1141. nums >>= BYTE_BITS;
  1142. } while (bits && nums);
  1143. }
  1144. static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type)
  1145. {
  1146. struct aead_request *aead_req = req->aead_req.aead_req;
  1147. struct skcipher_request *sk_req = req->c_req.sk_req;
  1148. u32 iv_size = req->ctx->c_ctx.ivsize;
  1149. struct scatterlist *sgl;
  1150. unsigned int cryptlen;
  1151. size_t sz;
  1152. u8 *iv;
  1153. if (req->c_req.encrypt)
  1154. sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst;
  1155. else
  1156. sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src;
  1157. if (alg_type == SEC_SKCIPHER) {
  1158. iv = sk_req->iv;
  1159. cryptlen = sk_req->cryptlen;
  1160. } else {
  1161. iv = aead_req->iv;
  1162. cryptlen = aead_req->cryptlen;
  1163. }
  1164. if (req->ctx->c_ctx.c_mode == SEC_CMODE_CBC) {
  1165. sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size,
  1166. cryptlen - iv_size);
  1167. if (unlikely(sz != iv_size))
  1168. dev_err(req->ctx->dev, "copy output iv error!\n");
  1169. } else {
  1170. sz = cryptlen / iv_size;
  1171. if (cryptlen % iv_size)
  1172. sz += 1;
  1173. ctr_iv_inc(iv, iv_size, sz);
  1174. }
  1175. }
  1176. static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx,
  1177. struct sec_qp_ctx *qp_ctx)
  1178. {
  1179. struct sec_req *backlog_req = NULL;
  1180. spin_lock_bh(&qp_ctx->req_lock);
  1181. if (ctx->fake_req_limit >=
  1182. atomic_read(&qp_ctx->qp->qp_status.used) &&
  1183. !list_empty(&qp_ctx->backlog)) {
  1184. backlog_req = list_first_entry(&qp_ctx->backlog,
  1185. typeof(*backlog_req), backlog_head);
  1186. list_del(&backlog_req->backlog_head);
  1187. }
  1188. spin_unlock_bh(&qp_ctx->req_lock);
  1189. return backlog_req;
  1190. }
  1191. static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
  1192. int err)
  1193. {
  1194. struct skcipher_request *sk_req = req->c_req.sk_req;
  1195. struct sec_qp_ctx *qp_ctx = req->qp_ctx;
  1196. struct skcipher_request *backlog_sk_req;
  1197. struct sec_req *backlog_req;
  1198. sec_free_req_id(req);
  1199. /* IV output at encrypto of CBC/CTR mode */
  1200. if (!err && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
  1201. ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt)
  1202. sec_update_iv(req, SEC_SKCIPHER);
  1203. while (1) {
  1204. backlog_req = sec_back_req_clear(ctx, qp_ctx);
  1205. if (!backlog_req)
  1206. break;
  1207. backlog_sk_req = backlog_req->c_req.sk_req;
  1208. skcipher_request_complete(backlog_sk_req, -EINPROGRESS);
  1209. atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
  1210. }
  1211. skcipher_request_complete(sk_req, err);
  1212. }
  1213. static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
  1214. {
  1215. struct aead_request *aead_req = req->aead_req.aead_req;
  1216. struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req);
  1217. size_t authsize = crypto_aead_authsize(tfm);
  1218. struct sec_aead_req *a_req = &req->aead_req;
  1219. struct sec_cipher_req *c_req = &req->c_req;
  1220. u32 data_size = aead_req->cryptlen;
  1221. u8 flage = 0;
  1222. u8 cm, cl;
  1223. /* the specification has been checked in aead_iv_demension_check() */
  1224. cl = c_req->c_ivin[0] + 1;
  1225. c_req->c_ivin[ctx->c_ctx.ivsize - cl] = 0x00;
  1226. memset(&c_req->c_ivin[ctx->c_ctx.ivsize - cl], 0, cl);
  1227. c_req->c_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = IV_CTR_INIT;
  1228. /* the last 3bit is L' */
  1229. flage |= c_req->c_ivin[0] & IV_CL_MASK;
  1230. /* the M' is bit3~bit5, the Flags is bit6 */
  1231. cm = (authsize - IV_CM_CAL_NUM) / IV_CM_CAL_NUM;
  1232. flage |= cm << IV_CM_OFFSET;
  1233. if (aead_req->assoclen)
  1234. flage |= 0x01 << IV_FLAGS_OFFSET;
  1235. memcpy(a_req->a_ivin, c_req->c_ivin, ctx->c_ctx.ivsize);
  1236. a_req->a_ivin[0] = flage;
  1237. /*
  1238. * the last 32bit is counter's initial number,
  1239. * but the nonce uses the first 16bit
  1240. * the tail 16bit fill with the cipher length
  1241. */
  1242. if (!c_req->encrypt)
  1243. data_size = aead_req->cryptlen - authsize;
  1244. a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] =
  1245. data_size & IV_LAST_BYTE_MASK;
  1246. data_size >>= IV_BYTE_OFFSET;
  1247. a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE2] =
  1248. data_size & IV_LAST_BYTE_MASK;
  1249. }
  1250. static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req)
  1251. {
  1252. struct aead_request *aead_req = req->aead_req.aead_req;
  1253. struct sec_aead_req *a_req = &req->aead_req;
  1254. struct sec_cipher_req *c_req = &req->c_req;
  1255. memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize);
  1256. if (ctx->c_ctx.c_mode == SEC_CMODE_CCM) {
  1257. /*
  1258. * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter},
  1259. * the counter must set to 0x01
  1260. * CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length}
  1261. */
  1262. set_aead_auth_iv(ctx, req);
  1263. } else if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) {
  1264. /* GCM 12Byte Cipher_IV == Auth_IV */
  1265. memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE);
  1266. }
  1267. }
  1268. static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir,
  1269. struct sec_req *req, struct sec_sqe *sec_sqe)
  1270. {
  1271. struct sec_aead_req *a_req = &req->aead_req;
  1272. struct aead_request *aq = a_req->aead_req;
  1273. struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
  1274. size_t authsize = crypto_aead_authsize(tfm);
  1275. /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
  1276. sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)authsize);
  1277. /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
  1278. sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr;
  1279. sec_sqe->type2.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
  1280. sec_sqe->type_cipher_auth |= SEC_NO_AUTH << SEC_AUTH_OFFSET;
  1281. if (dir)
  1282. sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
  1283. else
  1284. sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
  1285. sec_sqe->type2.alen_ivllen = cpu_to_le32(aq->assoclen);
  1286. sec_sqe->type2.auth_src_offset = cpu_to_le16(0x0);
  1287. sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
  1288. sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
  1289. }
  1290. static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir,
  1291. struct sec_req *req, struct sec_sqe3 *sqe3)
  1292. {
  1293. struct sec_aead_req *a_req = &req->aead_req;
  1294. struct aead_request *aq = a_req->aead_req;
  1295. struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
  1296. size_t authsize = crypto_aead_authsize(tfm);
  1297. /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */
  1298. sqe3->c_icv_key |= cpu_to_le16((u16)authsize << SEC_MAC_OFFSET_V3);
  1299. /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */
  1300. sqe3->a_key_addr = sqe3->c_key_addr;
  1301. sqe3->auth_ivin.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma);
  1302. sqe3->auth_mac_key |= SEC_NO_AUTH;
  1303. if (dir)
  1304. sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
  1305. else
  1306. sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
  1307. sqe3->a_len_key = cpu_to_le32(aq->assoclen);
  1308. sqe3->auth_src_offset = cpu_to_le16(0x0);
  1309. sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
  1310. sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
  1311. }
  1312. static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir,
  1313. struct sec_req *req, struct sec_sqe *sec_sqe)
  1314. {
  1315. struct sec_aead_req *a_req = &req->aead_req;
  1316. struct sec_cipher_req *c_req = &req->c_req;
  1317. struct aead_request *aq = a_req->aead_req;
  1318. struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
  1319. size_t authsize = crypto_aead_authsize(tfm);
  1320. sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma);
  1321. sec_sqe->type2.mac_key_alg = cpu_to_le32(authsize / SEC_SQE_LEN_RATE);
  1322. sec_sqe->type2.mac_key_alg |=
  1323. cpu_to_le32((u32)((ctx->a_key_len) /
  1324. SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET);
  1325. sec_sqe->type2.mac_key_alg |=
  1326. cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET);
  1327. if (dir) {
  1328. sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET;
  1329. sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH;
  1330. } else {
  1331. sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE2 << SEC_AUTH_OFFSET;
  1332. sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER;
  1333. }
  1334. sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen);
  1335. sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
  1336. sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma);
  1337. }
  1338. static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req)
  1339. {
  1340. struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
  1341. struct sec_sqe *sec_sqe = &req->sec_sqe;
  1342. int ret;
  1343. ret = sec_skcipher_bd_fill(ctx, req);
  1344. if (unlikely(ret)) {
  1345. dev_err(ctx->dev, "skcipher bd fill is error!\n");
  1346. return ret;
  1347. }
  1348. if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
  1349. ctx->c_ctx.c_mode == SEC_CMODE_GCM)
  1350. sec_auth_bd_fill_xcm(auth_ctx, req->c_req.encrypt, req, sec_sqe);
  1351. else
  1352. sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe);
  1353. return 0;
  1354. }
  1355. static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir,
  1356. struct sec_req *req, struct sec_sqe3 *sqe3)
  1357. {
  1358. struct sec_aead_req *a_req = &req->aead_req;
  1359. struct sec_cipher_req *c_req = &req->c_req;
  1360. struct aead_request *aq = a_req->aead_req;
  1361. struct crypto_aead *tfm = crypto_aead_reqtfm(aq);
  1362. size_t authsize = crypto_aead_authsize(tfm);
  1363. sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma);
  1364. sqe3->auth_mac_key |=
  1365. cpu_to_le32((u32)(authsize /
  1366. SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3);
  1367. sqe3->auth_mac_key |=
  1368. cpu_to_le32((u32)(ctx->a_key_len /
  1369. SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3);
  1370. sqe3->auth_mac_key |=
  1371. cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3);
  1372. if (dir) {
  1373. sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1);
  1374. sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3;
  1375. } else {
  1376. sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE2);
  1377. sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3;
  1378. }
  1379. sqe3->a_len_key = cpu_to_le32(c_req->c_len + aq->assoclen);
  1380. sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen);
  1381. sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma);
  1382. }
  1383. static int sec_aead_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req)
  1384. {
  1385. struct sec_auth_ctx *auth_ctx = &ctx->a_ctx;
  1386. struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3;
  1387. int ret;
  1388. ret = sec_skcipher_bd_fill_v3(ctx, req);
  1389. if (unlikely(ret)) {
  1390. dev_err(ctx->dev, "skcipher bd3 fill is error!\n");
  1391. return ret;
  1392. }
  1393. if (ctx->c_ctx.c_mode == SEC_CMODE_CCM ||
  1394. ctx->c_ctx.c_mode == SEC_CMODE_GCM)
  1395. sec_auth_bd_fill_xcm_v3(auth_ctx, req->c_req.encrypt,
  1396. req, sec_sqe3);
  1397. else
  1398. sec_auth_bd_fill_ex_v3(auth_ctx, req->c_req.encrypt,
  1399. req, sec_sqe3);
  1400. return 0;
  1401. }
  1402. static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
  1403. {
  1404. struct aead_request *a_req = req->aead_req.aead_req;
  1405. struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
  1406. size_t authsize = crypto_aead_authsize(tfm);
  1407. struct sec_aead_req *aead_req = &req->aead_req;
  1408. struct sec_cipher_req *c_req = &req->c_req;
  1409. struct sec_qp_ctx *qp_ctx = req->qp_ctx;
  1410. struct aead_request *backlog_aead_req;
  1411. struct sec_req *backlog_req;
  1412. size_t sz;
  1413. if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt)
  1414. sec_update_iv(req, SEC_AEAD);
  1415. /* Copy output mac */
  1416. if (!err && c_req->encrypt) {
  1417. struct scatterlist *sgl = a_req->dst;
  1418. sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), aead_req->out_mac,
  1419. authsize, a_req->cryptlen + a_req->assoclen);
  1420. if (unlikely(sz != authsize)) {
  1421. dev_err(c->dev, "copy out mac err!\n");
  1422. err = -EINVAL;
  1423. }
  1424. }
  1425. sec_free_req_id(req);
  1426. while (1) {
  1427. backlog_req = sec_back_req_clear(c, qp_ctx);
  1428. if (!backlog_req)
  1429. break;
  1430. backlog_aead_req = backlog_req->aead_req.aead_req;
  1431. aead_request_complete(backlog_aead_req, -EINPROGRESS);
  1432. atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt);
  1433. }
  1434. aead_request_complete(a_req, err);
  1435. }
  1436. static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
  1437. {
  1438. sec_free_req_id(req);
  1439. sec_free_queue_id(ctx, req);
  1440. }
  1441. static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req)
  1442. {
  1443. struct sec_qp_ctx *qp_ctx;
  1444. int queue_id;
  1445. /* To load balance */
  1446. queue_id = sec_alloc_queue_id(ctx, req);
  1447. qp_ctx = &ctx->qp_ctx[queue_id];
  1448. req->req_id = sec_alloc_req_id(req, qp_ctx);
  1449. if (unlikely(req->req_id < 0)) {
  1450. sec_free_queue_id(ctx, req);
  1451. return req->req_id;
  1452. }
  1453. return 0;
  1454. }
  1455. static int sec_process(struct sec_ctx *ctx, struct sec_req *req)
  1456. {
  1457. struct sec_cipher_req *c_req = &req->c_req;
  1458. int ret;
  1459. ret = sec_request_init(ctx, req);
  1460. if (unlikely(ret))
  1461. return ret;
  1462. ret = sec_request_transfer(ctx, req);
  1463. if (unlikely(ret))
  1464. goto err_uninit_req;
  1465. /* Output IV as decrypto */
  1466. if (!req->c_req.encrypt && (ctx->c_ctx.c_mode == SEC_CMODE_CBC ||
  1467. ctx->c_ctx.c_mode == SEC_CMODE_CTR))
  1468. sec_update_iv(req, ctx->alg_type);
  1469. ret = ctx->req_op->bd_send(ctx, req);
  1470. if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) ||
  1471. (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) {
  1472. dev_err_ratelimited(ctx->dev, "send sec request failed!\n");
  1473. goto err_send_req;
  1474. }
  1475. return ret;
  1476. err_send_req:
  1477. /* As failing, restore the IV from user */
  1478. if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) {
  1479. if (ctx->alg_type == SEC_SKCIPHER)
  1480. memcpy(req->c_req.sk_req->iv, c_req->c_ivin,
  1481. ctx->c_ctx.ivsize);
  1482. else
  1483. memcpy(req->aead_req.aead_req->iv, c_req->c_ivin,
  1484. ctx->c_ctx.ivsize);
  1485. }
  1486. sec_request_untransfer(ctx, req);
  1487. err_uninit_req:
  1488. sec_request_uninit(ctx, req);
  1489. return ret;
  1490. }
  1491. static const struct sec_req_op sec_skcipher_req_ops = {
  1492. .buf_map = sec_skcipher_sgl_map,
  1493. .buf_unmap = sec_skcipher_sgl_unmap,
  1494. .do_transfer = sec_skcipher_copy_iv,
  1495. .bd_fill = sec_skcipher_bd_fill,
  1496. .bd_send = sec_bd_send,
  1497. .callback = sec_skcipher_callback,
  1498. .process = sec_process,
  1499. };
  1500. static const struct sec_req_op sec_aead_req_ops = {
  1501. .buf_map = sec_aead_sgl_map,
  1502. .buf_unmap = sec_aead_sgl_unmap,
  1503. .do_transfer = sec_aead_set_iv,
  1504. .bd_fill = sec_aead_bd_fill,
  1505. .bd_send = sec_bd_send,
  1506. .callback = sec_aead_callback,
  1507. .process = sec_process,
  1508. };
  1509. static const struct sec_req_op sec_skcipher_req_ops_v3 = {
  1510. .buf_map = sec_skcipher_sgl_map,
  1511. .buf_unmap = sec_skcipher_sgl_unmap,
  1512. .do_transfer = sec_skcipher_copy_iv,
  1513. .bd_fill = sec_skcipher_bd_fill_v3,
  1514. .bd_send = sec_bd_send,
  1515. .callback = sec_skcipher_callback,
  1516. .process = sec_process,
  1517. };
  1518. static const struct sec_req_op sec_aead_req_ops_v3 = {
  1519. .buf_map = sec_aead_sgl_map,
  1520. .buf_unmap = sec_aead_sgl_unmap,
  1521. .do_transfer = sec_aead_set_iv,
  1522. .bd_fill = sec_aead_bd_fill_v3,
  1523. .bd_send = sec_bd_send,
  1524. .callback = sec_aead_callback,
  1525. .process = sec_process,
  1526. };
  1527. static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm)
  1528. {
  1529. struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
  1530. int ret;
  1531. ret = sec_skcipher_init(tfm);
  1532. if (ret)
  1533. return ret;
  1534. if (ctx->sec->qm.ver < QM_HW_V3) {
  1535. ctx->type_supported = SEC_BD_TYPE2;
  1536. ctx->req_op = &sec_skcipher_req_ops;
  1537. } else {
  1538. ctx->type_supported = SEC_BD_TYPE3;
  1539. ctx->req_op = &sec_skcipher_req_ops_v3;
  1540. }
  1541. return ret;
  1542. }
  1543. static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
  1544. {
  1545. sec_skcipher_uninit(tfm);
  1546. }
  1547. static int sec_aead_init(struct crypto_aead *tfm)
  1548. {
  1549. struct sec_ctx *ctx = crypto_aead_ctx(tfm);
  1550. int ret;
  1551. crypto_aead_set_reqsize(tfm, sizeof(struct sec_req));
  1552. ctx->alg_type = SEC_AEAD;
  1553. ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm);
  1554. if (ctx->c_ctx.ivsize < SEC_AIV_SIZE ||
  1555. ctx->c_ctx.ivsize > SEC_IV_SIZE) {
  1556. pr_err("get error aead iv size!\n");
  1557. return -EINVAL;
  1558. }
  1559. ret = sec_ctx_base_init(ctx);
  1560. if (ret)
  1561. return ret;
  1562. if (ctx->sec->qm.ver < QM_HW_V3) {
  1563. ctx->type_supported = SEC_BD_TYPE2;
  1564. ctx->req_op = &sec_aead_req_ops;
  1565. } else {
  1566. ctx->type_supported = SEC_BD_TYPE3;
  1567. ctx->req_op = &sec_aead_req_ops_v3;
  1568. }
  1569. ret = sec_auth_init(ctx);
  1570. if (ret)
  1571. goto err_auth_init;
  1572. ret = sec_cipher_init(ctx);
  1573. if (ret)
  1574. goto err_cipher_init;
  1575. return ret;
  1576. err_cipher_init:
  1577. sec_auth_uninit(ctx);
  1578. err_auth_init:
  1579. sec_ctx_base_uninit(ctx);
  1580. return ret;
  1581. }
  1582. static void sec_aead_exit(struct crypto_aead *tfm)
  1583. {
  1584. struct sec_ctx *ctx = crypto_aead_ctx(tfm);
  1585. sec_cipher_uninit(ctx);
  1586. sec_auth_uninit(ctx);
  1587. sec_ctx_base_uninit(ctx);
  1588. }
  1589. static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name)
  1590. {
  1591. struct aead_alg *alg = crypto_aead_alg(tfm);
  1592. struct sec_ctx *ctx = crypto_aead_ctx(tfm);
  1593. struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
  1594. const char *aead_name = alg->base.cra_name;
  1595. int ret;
  1596. ret = sec_aead_init(tfm);
  1597. if (ret) {
  1598. pr_err("hisi_sec2: aead init error!\n");
  1599. return ret;
  1600. }
  1601. a_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
  1602. if (IS_ERR(a_ctx->hash_tfm)) {
  1603. dev_err(ctx->dev, "aead alloc shash error!\n");
  1604. sec_aead_exit(tfm);
  1605. return PTR_ERR(a_ctx->hash_tfm);
  1606. }
  1607. a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
  1608. CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
  1609. if (IS_ERR(a_ctx->fallback_aead_tfm)) {
  1610. dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
  1611. crypto_free_shash(ctx->a_ctx.hash_tfm);
  1612. sec_aead_exit(tfm);
  1613. return PTR_ERR(a_ctx->fallback_aead_tfm);
  1614. }
  1615. return 0;
  1616. }
  1617. static void sec_aead_ctx_exit(struct crypto_aead *tfm)
  1618. {
  1619. struct sec_ctx *ctx = crypto_aead_ctx(tfm);
  1620. crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
  1621. crypto_free_shash(ctx->a_ctx.hash_tfm);
  1622. sec_aead_exit(tfm);
  1623. }
  1624. static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm)
  1625. {
  1626. struct aead_alg *alg = crypto_aead_alg(tfm);
  1627. struct sec_ctx *ctx = crypto_aead_ctx(tfm);
  1628. struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
  1629. const char *aead_name = alg->base.cra_name;
  1630. int ret;
  1631. ret = sec_aead_init(tfm);
  1632. if (ret) {
  1633. dev_err(ctx->dev, "hisi_sec2: aead xcm init error!\n");
  1634. return ret;
  1635. }
  1636. a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0,
  1637. CRYPTO_ALG_NEED_FALLBACK |
  1638. CRYPTO_ALG_ASYNC);
  1639. if (IS_ERR(a_ctx->fallback_aead_tfm)) {
  1640. dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n");
  1641. sec_aead_exit(tfm);
  1642. return PTR_ERR(a_ctx->fallback_aead_tfm);
  1643. }
  1644. return 0;
  1645. }
  1646. static void sec_aead_xcm_ctx_exit(struct crypto_aead *tfm)
  1647. {
  1648. struct sec_ctx *ctx = crypto_aead_ctx(tfm);
  1649. crypto_free_aead(ctx->a_ctx.fallback_aead_tfm);
  1650. sec_aead_exit(tfm);
  1651. }
  1652. static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm)
  1653. {
  1654. return sec_aead_ctx_init(tfm, "sha1");
  1655. }
  1656. static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm)
  1657. {
  1658. return sec_aead_ctx_init(tfm, "sha256");
  1659. }
  1660. static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
  1661. {
  1662. return sec_aead_ctx_init(tfm, "sha512");
  1663. }
  1664. static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
  1665. struct sec_req *sreq)
  1666. {
  1667. u32 cryptlen = sreq->c_req.sk_req->cryptlen;
  1668. struct device *dev = ctx->dev;
  1669. u8 c_mode = ctx->c_ctx.c_mode;
  1670. int ret = 0;
  1671. switch (c_mode) {
  1672. case SEC_CMODE_XTS:
  1673. if (unlikely(cryptlen < AES_BLOCK_SIZE)) {
  1674. dev_err(dev, "skcipher XTS mode input length error!\n");
  1675. ret = -EINVAL;
  1676. }
  1677. break;
  1678. case SEC_CMODE_ECB:
  1679. case SEC_CMODE_CBC:
  1680. if (unlikely(cryptlen & (AES_BLOCK_SIZE - 1))) {
  1681. dev_err(dev, "skcipher AES input length error!\n");
  1682. ret = -EINVAL;
  1683. }
  1684. break;
  1685. case SEC_CMODE_CTR:
  1686. if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) {
  1687. dev_err(dev, "skcipher HW version error!\n");
  1688. ret = -EINVAL;
  1689. }
  1690. break;
  1691. default:
  1692. ret = -EINVAL;
  1693. }
  1694. return ret;
  1695. }
  1696. static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
  1697. {
  1698. struct skcipher_request *sk_req = sreq->c_req.sk_req;
  1699. struct device *dev = ctx->dev;
  1700. u8 c_alg = ctx->c_ctx.c_alg;
  1701. if (unlikely(!sk_req->src || !sk_req->dst ||
  1702. sk_req->cryptlen > MAX_INPUT_DATA_LEN)) {
  1703. dev_err(dev, "skcipher input param error!\n");
  1704. return -EINVAL;
  1705. }
  1706. sreq->c_req.c_len = sk_req->cryptlen;
  1707. if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ)
  1708. sreq->use_pbuf = true;
  1709. else
  1710. sreq->use_pbuf = false;
  1711. if (c_alg == SEC_CALG_3DES) {
  1712. if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) {
  1713. dev_err(dev, "skcipher 3des input length error!\n");
  1714. return -EINVAL;
  1715. }
  1716. return 0;
  1717. } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) {
  1718. return sec_skcipher_cryptlen_check(ctx, sreq);
  1719. }
  1720. dev_err(dev, "skcipher algorithm error!\n");
  1721. return -EINVAL;
  1722. }
  1723. static int sec_skcipher_soft_crypto(struct sec_ctx *ctx,
  1724. struct skcipher_request *sreq, bool encrypt)
  1725. {
  1726. struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
  1727. SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm);
  1728. struct device *dev = ctx->dev;
  1729. int ret;
  1730. if (!c_ctx->fbtfm) {
  1731. dev_err_ratelimited(dev, "the soft tfm isn't supported in the current system.\n");
  1732. return -EINVAL;
  1733. }
  1734. skcipher_request_set_sync_tfm(subreq, c_ctx->fbtfm);
  1735. /* software need sync mode to do crypto */
  1736. skcipher_request_set_callback(subreq, sreq->base.flags,
  1737. NULL, NULL);
  1738. skcipher_request_set_crypt(subreq, sreq->src, sreq->dst,
  1739. sreq->cryptlen, sreq->iv);
  1740. if (encrypt)
  1741. ret = crypto_skcipher_encrypt(subreq);
  1742. else
  1743. ret = crypto_skcipher_decrypt(subreq);
  1744. skcipher_request_zero(subreq);
  1745. return ret;
  1746. }
  1747. static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
  1748. {
  1749. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req);
  1750. struct sec_req *req = skcipher_request_ctx(sk_req);
  1751. struct sec_ctx *ctx = crypto_skcipher_ctx(tfm);
  1752. int ret;
  1753. if (!sk_req->cryptlen) {
  1754. if (ctx->c_ctx.c_mode == SEC_CMODE_XTS)
  1755. return -EINVAL;
  1756. return 0;
  1757. }
  1758. req->flag = sk_req->base.flags;
  1759. req->c_req.sk_req = sk_req;
  1760. req->c_req.encrypt = encrypt;
  1761. req->ctx = ctx;
  1762. ret = sec_skcipher_param_check(ctx, req);
  1763. if (unlikely(ret))
  1764. return -EINVAL;
  1765. if (unlikely(ctx->c_ctx.fallback))
  1766. return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
  1767. return ctx->req_op->process(ctx, req);
  1768. }
  1769. static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
  1770. {
  1771. return sec_skcipher_crypto(sk_req, true);
  1772. }
  1773. static int sec_skcipher_decrypt(struct skcipher_request *sk_req)
  1774. {
  1775. return sec_skcipher_crypto(sk_req, false);
  1776. }
  1777. #define SEC_SKCIPHER_ALG(sec_cra_name, sec_set_key, \
  1778. sec_min_key_size, sec_max_key_size, blk_size, iv_size)\
  1779. {\
  1780. .base = {\
  1781. .cra_name = sec_cra_name,\
  1782. .cra_driver_name = "hisi_sec_"sec_cra_name,\
  1783. .cra_priority = SEC_PRIORITY,\
  1784. .cra_flags = CRYPTO_ALG_ASYNC |\
  1785. CRYPTO_ALG_NEED_FALLBACK,\
  1786. .cra_blocksize = blk_size,\
  1787. .cra_ctxsize = sizeof(struct sec_ctx),\
  1788. .cra_module = THIS_MODULE,\
  1789. },\
  1790. .init = sec_skcipher_ctx_init,\
  1791. .exit = sec_skcipher_ctx_exit,\
  1792. .setkey = sec_set_key,\
  1793. .decrypt = sec_skcipher_decrypt,\
  1794. .encrypt = sec_skcipher_encrypt,\
  1795. .min_keysize = sec_min_key_size,\
  1796. .max_keysize = sec_max_key_size,\
  1797. .ivsize = iv_size,\
  1798. }
  1799. static struct sec_skcipher sec_skciphers[] = {
  1800. {
  1801. .alg_msk = BIT(0),
  1802. .alg = SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, AES_MIN_KEY_SIZE,
  1803. AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0),
  1804. },
  1805. {
  1806. .alg_msk = BIT(1),
  1807. .alg = SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, AES_MIN_KEY_SIZE,
  1808. AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
  1809. },
  1810. {
  1811. .alg_msk = BIT(2),
  1812. .alg = SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr, AES_MIN_KEY_SIZE,
  1813. AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
  1814. },
  1815. {
  1816. .alg_msk = BIT(3),
  1817. .alg = SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, SEC_XTS_MIN_KEY_SIZE,
  1818. SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
  1819. },
  1820. {
  1821. .alg_msk = BIT(12),
  1822. .alg = SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, AES_MIN_KEY_SIZE,
  1823. AES_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
  1824. },
  1825. {
  1826. .alg_msk = BIT(13),
  1827. .alg = SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr, AES_MIN_KEY_SIZE,
  1828. AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE),
  1829. },
  1830. {
  1831. .alg_msk = BIT(14),
  1832. .alg = SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, SEC_XTS_MIN_KEY_SIZE,
  1833. SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE),
  1834. },
  1835. {
  1836. .alg_msk = BIT(23),
  1837. .alg = SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, SEC_DES3_3KEY_SIZE,
  1838. SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0),
  1839. },
  1840. {
  1841. .alg_msk = BIT(24),
  1842. .alg = SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, SEC_DES3_3KEY_SIZE,
  1843. SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE,
  1844. DES3_EDE_BLOCK_SIZE),
  1845. },
  1846. };
  1847. static int aead_iv_demension_check(struct aead_request *aead_req)
  1848. {
  1849. u8 cl;
  1850. cl = aead_req->iv[0] + 1;
  1851. if (cl < IV_CL_MIN || cl > IV_CL_MAX)
  1852. return -EINVAL;
  1853. if (cl < IV_CL_MID && aead_req->cryptlen >> (BYTE_BITS * cl))
  1854. return -EOVERFLOW;
  1855. return 0;
  1856. }
  1857. static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
  1858. {
  1859. struct aead_request *req = sreq->aead_req.aead_req;
  1860. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1861. size_t sz = crypto_aead_authsize(tfm);
  1862. u8 c_mode = ctx->c_ctx.c_mode;
  1863. struct device *dev = ctx->dev;
  1864. int ret;
  1865. /* Hardware does not handle cases where authsize is less than 4 bytes */
  1866. if (unlikely(sz < MIN_MAC_LEN)) {
  1867. sreq->aead_req.fallback = true;
  1868. return -EINVAL;
  1869. }
  1870. if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN ||
  1871. req->assoclen > SEC_MAX_AAD_LEN)) {
  1872. dev_err(dev, "aead input spec error!\n");
  1873. return -EINVAL;
  1874. }
  1875. if (c_mode == SEC_CMODE_CCM) {
  1876. if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) {
  1877. dev_err_ratelimited(dev, "CCM input aad parameter is too long!\n");
  1878. return -EINVAL;
  1879. }
  1880. ret = aead_iv_demension_check(req);
  1881. if (ret) {
  1882. dev_err(dev, "aead input iv param error!\n");
  1883. return ret;
  1884. }
  1885. }
  1886. if (sreq->c_req.encrypt)
  1887. sreq->c_req.c_len = req->cryptlen;
  1888. else
  1889. sreq->c_req.c_len = req->cryptlen - sz;
  1890. if (c_mode == SEC_CMODE_CBC) {
  1891. if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) {
  1892. dev_err(dev, "aead crypto length error!\n");
  1893. return -EINVAL;
  1894. }
  1895. }
  1896. return 0;
  1897. }
  1898. static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
  1899. {
  1900. struct aead_request *req = sreq->aead_req.aead_req;
  1901. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1902. size_t authsize = crypto_aead_authsize(tfm);
  1903. struct device *dev = ctx->dev;
  1904. u8 c_alg = ctx->c_ctx.c_alg;
  1905. if (unlikely(!req->src || !req->dst)) {
  1906. dev_err(dev, "aead input param error!\n");
  1907. return -EINVAL;
  1908. }
  1909. if (ctx->sec->qm.ver == QM_HW_V2) {
  1910. if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt &&
  1911. req->cryptlen <= authsize))) {
  1912. sreq->aead_req.fallback = true;
  1913. return -EINVAL;
  1914. }
  1915. }
  1916. /* Support AES or SM4 */
  1917. if (unlikely(c_alg != SEC_CALG_AES && c_alg != SEC_CALG_SM4)) {
  1918. dev_err(dev, "aead crypto alg error!\n");
  1919. return -EINVAL;
  1920. }
  1921. if (unlikely(sec_aead_spec_check(ctx, sreq)))
  1922. return -EINVAL;
  1923. if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <=
  1924. SEC_PBUF_SZ)
  1925. sreq->use_pbuf = true;
  1926. else
  1927. sreq->use_pbuf = false;
  1928. return 0;
  1929. }
  1930. static int sec_aead_soft_crypto(struct sec_ctx *ctx,
  1931. struct aead_request *aead_req,
  1932. bool encrypt)
  1933. {
  1934. struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
  1935. struct aead_request *subreq;
  1936. int ret;
  1937. subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL);
  1938. if (!subreq)
  1939. return -ENOMEM;
  1940. aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm);
  1941. aead_request_set_callback(subreq, aead_req->base.flags,
  1942. aead_req->base.complete, aead_req->base.data);
  1943. aead_request_set_crypt(subreq, aead_req->src, aead_req->dst,
  1944. aead_req->cryptlen, aead_req->iv);
  1945. aead_request_set_ad(subreq, aead_req->assoclen);
  1946. if (encrypt)
  1947. ret = crypto_aead_encrypt(subreq);
  1948. else
  1949. ret = crypto_aead_decrypt(subreq);
  1950. aead_request_free(subreq);
  1951. return ret;
  1952. }
  1953. static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
  1954. {
  1955. struct crypto_aead *tfm = crypto_aead_reqtfm(a_req);
  1956. struct sec_req *req = aead_request_ctx(a_req);
  1957. struct sec_ctx *ctx = crypto_aead_ctx(tfm);
  1958. int ret;
  1959. req->flag = a_req->base.flags;
  1960. req->aead_req.aead_req = a_req;
  1961. req->c_req.encrypt = encrypt;
  1962. req->ctx = ctx;
  1963. req->aead_req.fallback = false;
  1964. ret = sec_aead_param_check(ctx, req);
  1965. if (unlikely(ret)) {
  1966. if (req->aead_req.fallback)
  1967. return sec_aead_soft_crypto(ctx, a_req, encrypt);
  1968. return -EINVAL;
  1969. }
  1970. return ctx->req_op->process(ctx, req);
  1971. }
  1972. static int sec_aead_encrypt(struct aead_request *a_req)
  1973. {
  1974. return sec_aead_crypto(a_req, true);
  1975. }
  1976. static int sec_aead_decrypt(struct aead_request *a_req)
  1977. {
  1978. return sec_aead_crypto(a_req, false);
  1979. }
  1980. #define SEC_AEAD_ALG(sec_cra_name, sec_set_key, ctx_init,\
  1981. ctx_exit, blk_size, iv_size, max_authsize)\
  1982. {\
  1983. .base = {\
  1984. .cra_name = sec_cra_name,\
  1985. .cra_driver_name = "hisi_sec_"sec_cra_name,\
  1986. .cra_priority = SEC_PRIORITY,\
  1987. .cra_flags = CRYPTO_ALG_ASYNC |\
  1988. CRYPTO_ALG_NEED_FALLBACK,\
  1989. .cra_blocksize = blk_size,\
  1990. .cra_ctxsize = sizeof(struct sec_ctx),\
  1991. .cra_module = THIS_MODULE,\
  1992. },\
  1993. .init = ctx_init,\
  1994. .exit = ctx_exit,\
  1995. .setkey = sec_set_key,\
  1996. .setauthsize = sec_aead_setauthsize,\
  1997. .decrypt = sec_aead_decrypt,\
  1998. .encrypt = sec_aead_encrypt,\
  1999. .ivsize = iv_size,\
  2000. .maxauthsize = max_authsize,\
  2001. }
  2002. static struct sec_aead sec_aeads[] = {
  2003. {
  2004. .alg_msk = BIT(6),
  2005. .alg = SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init,
  2006. sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,
  2007. AES_BLOCK_SIZE),
  2008. },
  2009. {
  2010. .alg_msk = BIT(7),
  2011. .alg = SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init,
  2012. sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,
  2013. AES_BLOCK_SIZE),
  2014. },
  2015. {
  2016. .alg_msk = BIT(17),
  2017. .alg = SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init,
  2018. sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE,
  2019. AES_BLOCK_SIZE),
  2020. },
  2021. {
  2022. .alg_msk = BIT(18),
  2023. .alg = SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init,
  2024. sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE,
  2025. AES_BLOCK_SIZE),
  2026. },
  2027. {
  2028. .alg_msk = BIT(43),
  2029. .alg = SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", sec_setkey_aes_cbc_sha1,
  2030. sec_aead_sha1_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
  2031. AES_BLOCK_SIZE, SHA1_DIGEST_SIZE),
  2032. },
  2033. {
  2034. .alg_msk = BIT(44),
  2035. .alg = SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", sec_setkey_aes_cbc_sha256,
  2036. sec_aead_sha256_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
  2037. AES_BLOCK_SIZE, SHA256_DIGEST_SIZE),
  2038. },
  2039. {
  2040. .alg_msk = BIT(45),
  2041. .alg = SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", sec_setkey_aes_cbc_sha512,
  2042. sec_aead_sha512_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE,
  2043. AES_BLOCK_SIZE, SHA512_DIGEST_SIZE),
  2044. },
  2045. };
  2046. static void sec_unregister_skcipher(u64 alg_mask, int end)
  2047. {
  2048. int i;
  2049. for (i = 0; i < end; i++)
  2050. if (sec_skciphers[i].alg_msk & alg_mask)
  2051. crypto_unregister_skcipher(&sec_skciphers[i].alg);
  2052. }
  2053. static int sec_register_skcipher(u64 alg_mask)
  2054. {
  2055. int i, ret, count;
  2056. count = ARRAY_SIZE(sec_skciphers);
  2057. for (i = 0; i < count; i++) {
  2058. if (!(sec_skciphers[i].alg_msk & alg_mask))
  2059. continue;
  2060. ret = crypto_register_skcipher(&sec_skciphers[i].alg);
  2061. if (ret)
  2062. goto err;
  2063. }
  2064. return 0;
  2065. err:
  2066. sec_unregister_skcipher(alg_mask, i);
  2067. return ret;
  2068. }
  2069. static void sec_unregister_aead(u64 alg_mask, int end)
  2070. {
  2071. int i;
  2072. for (i = 0; i < end; i++)
  2073. if (sec_aeads[i].alg_msk & alg_mask)
  2074. crypto_unregister_aead(&sec_aeads[i].alg);
  2075. }
  2076. static int sec_register_aead(u64 alg_mask)
  2077. {
  2078. int i, ret, count;
  2079. count = ARRAY_SIZE(sec_aeads);
  2080. for (i = 0; i < count; i++) {
  2081. if (!(sec_aeads[i].alg_msk & alg_mask))
  2082. continue;
  2083. ret = crypto_register_aead(&sec_aeads[i].alg);
  2084. if (ret)
  2085. goto err;
  2086. }
  2087. return 0;
  2088. err:
  2089. sec_unregister_aead(alg_mask, i);
  2090. return ret;
  2091. }
  2092. int sec_register_to_crypto(struct hisi_qm *qm)
  2093. {
  2094. u64 alg_mask;
  2095. int ret = 0;
  2096. alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX,
  2097. SEC_DRV_ALG_BITMAP_LOW_IDX);
  2098. mutex_lock(&sec_algs_lock);
  2099. if (sec_available_devs) {
  2100. sec_available_devs++;
  2101. goto unlock;
  2102. }
  2103. ret = sec_register_skcipher(alg_mask);
  2104. if (ret)
  2105. goto unlock;
  2106. ret = sec_register_aead(alg_mask);
  2107. if (ret)
  2108. goto unreg_skcipher;
  2109. sec_available_devs++;
  2110. mutex_unlock(&sec_algs_lock);
  2111. return 0;
  2112. unreg_skcipher:
  2113. sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
  2114. unlock:
  2115. mutex_unlock(&sec_algs_lock);
  2116. return ret;
  2117. }
  2118. void sec_unregister_from_crypto(struct hisi_qm *qm)
  2119. {
  2120. u64 alg_mask;
  2121. alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH_IDX,
  2122. SEC_DRV_ALG_BITMAP_LOW_IDX);
  2123. mutex_lock(&sec_algs_lock);
  2124. if (--sec_available_devs)
  2125. goto unlock;
  2126. sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads));
  2127. sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers));
  2128. unlock:
  2129. mutex_unlock(&sec_algs_lock);
  2130. }