caamalg_qi.c 71 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Freescale FSL CAAM support for crypto API over QI backend.
  4. * Based on caamalg.c
  5. *
  6. * Copyright 2013-2016 Freescale Semiconductor, Inc.
  7. * Copyright 2016-2019 NXP
  8. */
  9. #include "compat.h"
  10. #include "ctrl.h"
  11. #include "regs.h"
  12. #include "intern.h"
  13. #include "desc_constr.h"
  14. #include "error.h"
  15. #include "sg_sw_qm.h"
  16. #include "key_gen.h"
  17. #include "qi.h"
  18. #include "jr.h"
  19. #include "caamalg_desc.h"
  20. #include <crypto/xts.h>
  21. #include <linux/unaligned.h>
  22. #include <linux/device.h>
  23. #include <linux/err.h>
  24. #include <linux/dma-mapping.h>
  25. #include <linux/kernel.h>
  26. #include <linux/string.h>
  27. /*
  28. * crypto alg
  29. */
  30. #define CAAM_CRA_PRIORITY 2000
  31. /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
  32. #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \
  33. SHA512_DIGEST_SIZE * 2)
  34. #define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \
  35. CAAM_MAX_KEY_SIZE)
  36. #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
  37. struct caam_alg_entry {
  38. int class1_alg_type;
  39. int class2_alg_type;
  40. bool rfc3686;
  41. bool geniv;
  42. bool nodkp;
  43. };
  44. struct caam_aead_alg {
  45. struct aead_alg aead;
  46. struct caam_alg_entry caam;
  47. bool registered;
  48. };
  49. struct caam_skcipher_alg {
  50. struct skcipher_alg skcipher;
  51. struct caam_alg_entry caam;
  52. bool registered;
  53. };
  54. /*
  55. * per-session context
  56. */
  57. struct caam_ctx {
  58. struct device *jrdev;
  59. u32 sh_desc_enc[DESC_MAX_USED_LEN];
  60. u32 sh_desc_dec[DESC_MAX_USED_LEN];
  61. u8 key[CAAM_MAX_KEY_SIZE];
  62. dma_addr_t key_dma;
  63. enum dma_data_direction dir;
  64. struct alginfo adata;
  65. struct alginfo cdata;
  66. unsigned int authsize;
  67. struct device *qidev;
  68. spinlock_t lock; /* Protects multiple init of driver context */
  69. struct caam_drv_ctx *drv_ctx[NUM_OP];
  70. bool xts_key_fallback;
  71. struct crypto_skcipher *fallback;
  72. };
  73. struct caam_skcipher_req_ctx {
  74. struct skcipher_request fallback_req;
  75. };
  76. static int aead_set_sh_desc(struct crypto_aead *aead)
  77. {
  78. struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
  79. typeof(*alg), aead);
  80. struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
  81. unsigned int ivsize = crypto_aead_ivsize(aead);
  82. u32 ctx1_iv_off = 0;
  83. u32 *nonce = NULL;
  84. unsigned int data_len[2];
  85. u32 inl_mask;
  86. const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
  87. OP_ALG_AAI_CTR_MOD128);
  88. const bool is_rfc3686 = alg->caam.rfc3686;
  89. struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
  90. if (!ctx->cdata.keylen || !ctx->authsize)
  91. return 0;
  92. /*
  93. * AES-CTR needs to load IV in CONTEXT1 reg
  94. * at an offset of 128bits (16bytes)
  95. * CONTEXT1[255:128] = IV
  96. */
  97. if (ctr_mode)
  98. ctx1_iv_off = 16;
  99. /*
  100. * RFC3686 specific:
  101. * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  102. */
  103. if (is_rfc3686) {
  104. ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
  105. nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
  106. ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
  107. }
  108. /*
  109. * In case |user key| > |derived key|, using DKP<imm,imm> would result
  110. * in invalid opcodes (last bytes of user key) in the resulting
  111. * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
  112. * addresses are needed.
  113. */
  114. ctx->adata.key_virt = ctx->key;
  115. ctx->adata.key_dma = ctx->key_dma;
  116. ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
  117. ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
  118. data_len[0] = ctx->adata.keylen_pad;
  119. data_len[1] = ctx->cdata.keylen;
  120. if (alg->caam.geniv)
  121. goto skip_enc;
  122. /* aead_encrypt shared descriptor */
  123. if (desc_inline_query(DESC_QI_AEAD_ENC_LEN +
  124. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
  125. DESC_JOB_IO_LEN, data_len, &inl_mask,
  126. ARRAY_SIZE(data_len)) < 0)
  127. return -EINVAL;
  128. ctx->adata.key_inline = !!(inl_mask & 1);
  129. ctx->cdata.key_inline = !!(inl_mask & 2);
  130. cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
  131. ivsize, ctx->authsize, is_rfc3686, nonce,
  132. ctx1_iv_off, true, ctrlpriv->era);
  133. skip_enc:
  134. /* aead_decrypt shared descriptor */
  135. if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
  136. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
  137. DESC_JOB_IO_LEN, data_len, &inl_mask,
  138. ARRAY_SIZE(data_len)) < 0)
  139. return -EINVAL;
  140. ctx->adata.key_inline = !!(inl_mask & 1);
  141. ctx->cdata.key_inline = !!(inl_mask & 2);
  142. cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata,
  143. ivsize, ctx->authsize, alg->caam.geniv,
  144. is_rfc3686, nonce, ctx1_iv_off, true,
  145. ctrlpriv->era);
  146. if (!alg->caam.geniv)
  147. goto skip_givenc;
  148. /* aead_givencrypt shared descriptor */
  149. if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN +
  150. (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
  151. DESC_JOB_IO_LEN, data_len, &inl_mask,
  152. ARRAY_SIZE(data_len)) < 0)
  153. return -EINVAL;
  154. ctx->adata.key_inline = !!(inl_mask & 1);
  155. ctx->cdata.key_inline = !!(inl_mask & 2);
  156. cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata,
  157. ivsize, ctx->authsize, is_rfc3686, nonce,
  158. ctx1_iv_off, true, ctrlpriv->era);
  159. skip_givenc:
  160. return 0;
  161. }
  162. static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
  163. {
  164. struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
  165. ctx->authsize = authsize;
  166. aead_set_sh_desc(authenc);
  167. return 0;
  168. }
  169. static int aead_setkey(struct crypto_aead *aead, const u8 *key,
  170. unsigned int keylen)
  171. {
  172. struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
  173. struct device *jrdev = ctx->jrdev;
  174. struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
  175. struct crypto_authenc_keys keys;
  176. int ret = 0;
  177. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
  178. goto badkey;
  179. dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n",
  180. keys.authkeylen + keys.enckeylen, keys.enckeylen,
  181. keys.authkeylen);
  182. print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
  183. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  184. /*
  185. * If DKP is supported, use it in the shared descriptor to generate
  186. * the split key.
  187. */
  188. if (ctrlpriv->era >= 6) {
  189. ctx->adata.keylen = keys.authkeylen;
  190. ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
  191. OP_ALG_ALGSEL_MASK);
  192. if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
  193. goto badkey;
  194. memcpy(ctx->key, keys.authkey, keys.authkeylen);
  195. memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey,
  196. keys.enckeylen);
  197. dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
  198. ctx->adata.keylen_pad +
  199. keys.enckeylen, ctx->dir);
  200. goto skip_split_key;
  201. }
  202. ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey,
  203. keys.authkeylen, CAAM_MAX_KEY_SIZE -
  204. keys.enckeylen);
  205. if (ret)
  206. goto badkey;
  207. /* postpend encryption key to auth split key */
  208. memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
  209. dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
  210. ctx->adata.keylen_pad + keys.enckeylen,
  211. ctx->dir);
  212. print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
  213. DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
  214. ctx->adata.keylen_pad + keys.enckeylen, 1);
  215. skip_split_key:
  216. ctx->cdata.keylen = keys.enckeylen;
  217. ret = aead_set_sh_desc(aead);
  218. if (ret)
  219. goto badkey;
  220. /* Now update the driver contexts with the new shared descriptor */
  221. if (ctx->drv_ctx[ENCRYPT]) {
  222. ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  223. ctx->sh_desc_enc);
  224. if (ret) {
  225. dev_err(jrdev, "driver enc context update failed\n");
  226. goto badkey;
  227. }
  228. }
  229. if (ctx->drv_ctx[DECRYPT]) {
  230. ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  231. ctx->sh_desc_dec);
  232. if (ret) {
  233. dev_err(jrdev, "driver dec context update failed\n");
  234. goto badkey;
  235. }
  236. }
  237. memzero_explicit(&keys, sizeof(keys));
  238. return ret;
  239. badkey:
  240. memzero_explicit(&keys, sizeof(keys));
  241. return -EINVAL;
  242. }
  243. static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
  244. unsigned int keylen)
  245. {
  246. struct crypto_authenc_keys keys;
  247. int err;
  248. err = crypto_authenc_extractkeys(&keys, key, keylen);
  249. if (unlikely(err))
  250. return err;
  251. err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
  252. aead_setkey(aead, key, keylen);
  253. memzero_explicit(&keys, sizeof(keys));
  254. return err;
  255. }
  256. static int gcm_set_sh_desc(struct crypto_aead *aead)
  257. {
  258. struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
  259. unsigned int ivsize = crypto_aead_ivsize(aead);
  260. int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
  261. ctx->cdata.keylen;
  262. if (!ctx->cdata.keylen || !ctx->authsize)
  263. return 0;
  264. /*
  265. * Job Descriptor and Shared Descriptor
  266. * must fit into the 64-word Descriptor h/w Buffer
  267. */
  268. if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
  269. ctx->cdata.key_inline = true;
  270. ctx->cdata.key_virt = ctx->key;
  271. } else {
  272. ctx->cdata.key_inline = false;
  273. ctx->cdata.key_dma = ctx->key_dma;
  274. }
  275. cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
  276. ctx->authsize, true);
  277. /*
  278. * Job Descriptor and Shared Descriptor
  279. * must fit into the 64-word Descriptor h/w Buffer
  280. */
  281. if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
  282. ctx->cdata.key_inline = true;
  283. ctx->cdata.key_virt = ctx->key;
  284. } else {
  285. ctx->cdata.key_inline = false;
  286. ctx->cdata.key_dma = ctx->key_dma;
  287. }
  288. cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
  289. ctx->authsize, true);
  290. return 0;
  291. }
  292. static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
  293. {
  294. struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
  295. int err;
  296. err = crypto_gcm_check_authsize(authsize);
  297. if (err)
  298. return err;
  299. ctx->authsize = authsize;
  300. gcm_set_sh_desc(authenc);
  301. return 0;
  302. }
  303. static int gcm_setkey(struct crypto_aead *aead,
  304. const u8 *key, unsigned int keylen)
  305. {
  306. struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
  307. struct device *jrdev = ctx->jrdev;
  308. int ret;
  309. ret = aes_check_keylen(keylen);
  310. if (ret)
  311. return ret;
  312. print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
  313. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  314. memcpy(ctx->key, key, keylen);
  315. dma_sync_single_for_device(jrdev->parent, ctx->key_dma, keylen,
  316. ctx->dir);
  317. ctx->cdata.keylen = keylen;
  318. ret = gcm_set_sh_desc(aead);
  319. if (ret)
  320. return ret;
  321. /* Now update the driver contexts with the new shared descriptor */
  322. if (ctx->drv_ctx[ENCRYPT]) {
  323. ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  324. ctx->sh_desc_enc);
  325. if (ret) {
  326. dev_err(jrdev, "driver enc context update failed\n");
  327. return ret;
  328. }
  329. }
  330. if (ctx->drv_ctx[DECRYPT]) {
  331. ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  332. ctx->sh_desc_dec);
  333. if (ret) {
  334. dev_err(jrdev, "driver dec context update failed\n");
  335. return ret;
  336. }
  337. }
  338. return 0;
  339. }
  340. static int rfc4106_set_sh_desc(struct crypto_aead *aead)
  341. {
  342. struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
  343. unsigned int ivsize = crypto_aead_ivsize(aead);
  344. int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
  345. ctx->cdata.keylen;
  346. if (!ctx->cdata.keylen || !ctx->authsize)
  347. return 0;
  348. ctx->cdata.key_virt = ctx->key;
  349. /*
  350. * Job Descriptor and Shared Descriptor
  351. * must fit into the 64-word Descriptor h/w Buffer
  352. */
  353. if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
  354. ctx->cdata.key_inline = true;
  355. } else {
  356. ctx->cdata.key_inline = false;
  357. ctx->cdata.key_dma = ctx->key_dma;
  358. }
  359. cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
  360. ctx->authsize, true);
  361. /*
  362. * Job Descriptor and Shared Descriptor
  363. * must fit into the 64-word Descriptor h/w Buffer
  364. */
  365. if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
  366. ctx->cdata.key_inline = true;
  367. } else {
  368. ctx->cdata.key_inline = false;
  369. ctx->cdata.key_dma = ctx->key_dma;
  370. }
  371. cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
  372. ctx->authsize, true);
  373. return 0;
  374. }
  375. static int rfc4106_setauthsize(struct crypto_aead *authenc,
  376. unsigned int authsize)
  377. {
  378. struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
  379. int err;
  380. err = crypto_rfc4106_check_authsize(authsize);
  381. if (err)
  382. return err;
  383. ctx->authsize = authsize;
  384. rfc4106_set_sh_desc(authenc);
  385. return 0;
  386. }
  387. static int rfc4106_setkey(struct crypto_aead *aead,
  388. const u8 *key, unsigned int keylen)
  389. {
  390. struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
  391. struct device *jrdev = ctx->jrdev;
  392. int ret;
  393. ret = aes_check_keylen(keylen - 4);
  394. if (ret)
  395. return ret;
  396. print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
  397. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  398. memcpy(ctx->key, key, keylen);
  399. /*
  400. * The last four bytes of the key material are used as the salt value
  401. * in the nonce. Update the AES key length.
  402. */
  403. ctx->cdata.keylen = keylen - 4;
  404. dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
  405. ctx->cdata.keylen, ctx->dir);
  406. ret = rfc4106_set_sh_desc(aead);
  407. if (ret)
  408. return ret;
  409. /* Now update the driver contexts with the new shared descriptor */
  410. if (ctx->drv_ctx[ENCRYPT]) {
  411. ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  412. ctx->sh_desc_enc);
  413. if (ret) {
  414. dev_err(jrdev, "driver enc context update failed\n");
  415. return ret;
  416. }
  417. }
  418. if (ctx->drv_ctx[DECRYPT]) {
  419. ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  420. ctx->sh_desc_dec);
  421. if (ret) {
  422. dev_err(jrdev, "driver dec context update failed\n");
  423. return ret;
  424. }
  425. }
  426. return 0;
  427. }
  428. static int rfc4543_set_sh_desc(struct crypto_aead *aead)
  429. {
  430. struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
  431. unsigned int ivsize = crypto_aead_ivsize(aead);
  432. int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
  433. ctx->cdata.keylen;
  434. if (!ctx->cdata.keylen || !ctx->authsize)
  435. return 0;
  436. ctx->cdata.key_virt = ctx->key;
  437. /*
  438. * Job Descriptor and Shared Descriptor
  439. * must fit into the 64-word Descriptor h/w Buffer
  440. */
  441. if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
  442. ctx->cdata.key_inline = true;
  443. } else {
  444. ctx->cdata.key_inline = false;
  445. ctx->cdata.key_dma = ctx->key_dma;
  446. }
  447. cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
  448. ctx->authsize, true);
  449. /*
  450. * Job Descriptor and Shared Descriptor
  451. * must fit into the 64-word Descriptor h/w Buffer
  452. */
  453. if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
  454. ctx->cdata.key_inline = true;
  455. } else {
  456. ctx->cdata.key_inline = false;
  457. ctx->cdata.key_dma = ctx->key_dma;
  458. }
  459. cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
  460. ctx->authsize, true);
  461. return 0;
  462. }
  463. static int rfc4543_setauthsize(struct crypto_aead *authenc,
  464. unsigned int authsize)
  465. {
  466. struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
  467. if (authsize != 16)
  468. return -EINVAL;
  469. ctx->authsize = authsize;
  470. rfc4543_set_sh_desc(authenc);
  471. return 0;
  472. }
  473. static int rfc4543_setkey(struct crypto_aead *aead,
  474. const u8 *key, unsigned int keylen)
  475. {
  476. struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
  477. struct device *jrdev = ctx->jrdev;
  478. int ret;
  479. ret = aes_check_keylen(keylen - 4);
  480. if (ret)
  481. return ret;
  482. print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
  483. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  484. memcpy(ctx->key, key, keylen);
  485. /*
  486. * The last four bytes of the key material are used as the salt value
  487. * in the nonce. Update the AES key length.
  488. */
  489. ctx->cdata.keylen = keylen - 4;
  490. dma_sync_single_for_device(jrdev->parent, ctx->key_dma,
  491. ctx->cdata.keylen, ctx->dir);
  492. ret = rfc4543_set_sh_desc(aead);
  493. if (ret)
  494. return ret;
  495. /* Now update the driver contexts with the new shared descriptor */
  496. if (ctx->drv_ctx[ENCRYPT]) {
  497. ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  498. ctx->sh_desc_enc);
  499. if (ret) {
  500. dev_err(jrdev, "driver enc context update failed\n");
  501. return ret;
  502. }
  503. }
  504. if (ctx->drv_ctx[DECRYPT]) {
  505. ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  506. ctx->sh_desc_dec);
  507. if (ret) {
  508. dev_err(jrdev, "driver dec context update failed\n");
  509. return ret;
  510. }
  511. }
  512. return 0;
  513. }
  514. static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
  515. unsigned int keylen, const u32 ctx1_iv_off)
  516. {
  517. struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
  518. struct caam_skcipher_alg *alg =
  519. container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
  520. skcipher);
  521. struct device *jrdev = ctx->jrdev;
  522. unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
  523. const bool is_rfc3686 = alg->caam.rfc3686;
  524. int ret = 0;
  525. print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
  526. DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
  527. ctx->cdata.keylen = keylen;
  528. ctx->cdata.key_virt = key;
  529. ctx->cdata.key_inline = true;
  530. /* skcipher encrypt, decrypt shared descriptors */
  531. cnstr_shdsc_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
  532. is_rfc3686, ctx1_iv_off);
  533. cnstr_shdsc_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
  534. is_rfc3686, ctx1_iv_off);
  535. /* Now update the driver contexts with the new shared descriptor */
  536. if (ctx->drv_ctx[ENCRYPT]) {
  537. ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  538. ctx->sh_desc_enc);
  539. if (ret) {
  540. dev_err(jrdev, "driver enc context update failed\n");
  541. return -EINVAL;
  542. }
  543. }
  544. if (ctx->drv_ctx[DECRYPT]) {
  545. ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  546. ctx->sh_desc_dec);
  547. if (ret) {
  548. dev_err(jrdev, "driver dec context update failed\n");
  549. return -EINVAL;
  550. }
  551. }
  552. return ret;
  553. }
  554. static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
  555. const u8 *key, unsigned int keylen)
  556. {
  557. int err;
  558. err = aes_check_keylen(keylen);
  559. if (err)
  560. return err;
  561. return skcipher_setkey(skcipher, key, keylen, 0);
  562. }
  563. static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
  564. const u8 *key, unsigned int keylen)
  565. {
  566. u32 ctx1_iv_off;
  567. int err;
  568. /*
  569. * RFC3686 specific:
  570. * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
  571. * | *key = {KEY, NONCE}
  572. */
  573. ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
  574. keylen -= CTR_RFC3686_NONCE_SIZE;
  575. err = aes_check_keylen(keylen);
  576. if (err)
  577. return err;
  578. return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
  579. }
  580. static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
  581. const u8 *key, unsigned int keylen)
  582. {
  583. u32 ctx1_iv_off;
  584. int err;
  585. /*
  586. * AES-CTR needs to load IV in CONTEXT1 reg
  587. * at an offset of 128bits (16bytes)
  588. * CONTEXT1[255:128] = IV
  589. */
  590. ctx1_iv_off = 16;
  591. err = aes_check_keylen(keylen);
  592. if (err)
  593. return err;
  594. return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
  595. }
  596. static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
  597. const u8 *key, unsigned int keylen)
  598. {
  599. return verify_skcipher_des3_key(skcipher, key) ?:
  600. skcipher_setkey(skcipher, key, keylen, 0);
  601. }
  602. static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
  603. const u8 *key, unsigned int keylen)
  604. {
  605. return verify_skcipher_des_key(skcipher, key) ?:
  606. skcipher_setkey(skcipher, key, keylen, 0);
  607. }
  608. static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
  609. unsigned int keylen)
  610. {
  611. struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
  612. struct device *jrdev = ctx->jrdev;
  613. struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent);
  614. int ret = 0;
  615. int err;
  616. err = xts_verify_key(skcipher, key, keylen);
  617. if (err) {
  618. dev_dbg(jrdev, "key size mismatch\n");
  619. return err;
  620. }
  621. if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
  622. ctx->xts_key_fallback = true;
  623. if (ctrlpriv->era <= 8 || ctx->xts_key_fallback) {
  624. err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
  625. if (err)
  626. return err;
  627. }
  628. ctx->cdata.keylen = keylen;
  629. ctx->cdata.key_virt = key;
  630. ctx->cdata.key_inline = true;
  631. /* xts skcipher encrypt, decrypt shared descriptors */
  632. cnstr_shdsc_xts_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
  633. cnstr_shdsc_xts_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
  634. /* Now update the driver contexts with the new shared descriptor */
  635. if (ctx->drv_ctx[ENCRYPT]) {
  636. ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT],
  637. ctx->sh_desc_enc);
  638. if (ret) {
  639. dev_err(jrdev, "driver enc context update failed\n");
  640. return -EINVAL;
  641. }
  642. }
  643. if (ctx->drv_ctx[DECRYPT]) {
  644. ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT],
  645. ctx->sh_desc_dec);
  646. if (ret) {
  647. dev_err(jrdev, "driver dec context update failed\n");
  648. return -EINVAL;
  649. }
  650. }
  651. return ret;
  652. }
  653. /*
  654. * aead_edesc - s/w-extended aead descriptor
  655. * @src_nents: number of segments in input scatterlist
  656. * @dst_nents: number of segments in output scatterlist
  657. * @iv_dma: dma address of iv for checking continuity and link table
  658. * @qm_sg_bytes: length of dma mapped h/w link table
  659. * @qm_sg_dma: bus physical mapped address of h/w link table
  660. * @assoclen: associated data length, in CAAM endianness
  661. * @assoclen_dma: bus physical mapped address of req->assoclen
  662. * @drv_req: driver-specific request structure
  663. * @sgt: the h/w link table, followed by IV
  664. */
  665. struct aead_edesc {
  666. int src_nents;
  667. int dst_nents;
  668. dma_addr_t iv_dma;
  669. int qm_sg_bytes;
  670. dma_addr_t qm_sg_dma;
  671. unsigned int assoclen;
  672. dma_addr_t assoclen_dma;
  673. struct caam_drv_req drv_req;
  674. struct qm_sg_entry sgt[];
  675. };
  676. /*
  677. * skcipher_edesc - s/w-extended skcipher descriptor
  678. * @src_nents: number of segments in input scatterlist
  679. * @dst_nents: number of segments in output scatterlist
  680. * @iv_dma: dma address of iv for checking continuity and link table
  681. * @qm_sg_bytes: length of dma mapped h/w link table
  682. * @qm_sg_dma: bus physical mapped address of h/w link table
  683. * @drv_req: driver-specific request structure
  684. * @sgt: the h/w link table, followed by IV
  685. */
  686. struct skcipher_edesc {
  687. int src_nents;
  688. int dst_nents;
  689. dma_addr_t iv_dma;
  690. int qm_sg_bytes;
  691. dma_addr_t qm_sg_dma;
  692. struct caam_drv_req drv_req;
  693. struct qm_sg_entry sgt[];
  694. };
  695. static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
  696. enum optype type)
  697. {
  698. /*
  699. * This function is called on the fast path with values of 'type'
  700. * known at compile time. Invalid arguments are not expected and
  701. * thus no checks are made.
  702. */
  703. struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type];
  704. u32 *desc;
  705. if (unlikely(!drv_ctx)) {
  706. spin_lock(&ctx->lock);
  707. /* Read again to check if some other core init drv_ctx */
  708. drv_ctx = ctx->drv_ctx[type];
  709. if (!drv_ctx) {
  710. int cpu;
  711. if (type == ENCRYPT)
  712. desc = ctx->sh_desc_enc;
  713. else /* (type == DECRYPT) */
  714. desc = ctx->sh_desc_dec;
  715. cpu = smp_processor_id();
  716. drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
  717. if (!IS_ERR(drv_ctx))
  718. drv_ctx->op_type = type;
  719. ctx->drv_ctx[type] = drv_ctx;
  720. }
  721. spin_unlock(&ctx->lock);
  722. }
  723. return drv_ctx;
  724. }
  725. static void caam_unmap(struct device *dev, struct scatterlist *src,
  726. struct scatterlist *dst, int src_nents,
  727. int dst_nents, dma_addr_t iv_dma, int ivsize,
  728. enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
  729. int qm_sg_bytes)
  730. {
  731. if (dst != src) {
  732. if (src_nents)
  733. dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
  734. if (dst_nents)
  735. dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
  736. } else {
  737. dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
  738. }
  739. if (iv_dma)
  740. dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
  741. if (qm_sg_bytes)
  742. dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
  743. }
  744. static void aead_unmap(struct device *dev,
  745. struct aead_edesc *edesc,
  746. struct aead_request *req)
  747. {
  748. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  749. int ivsize = crypto_aead_ivsize(aead);
  750. caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
  751. edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
  752. edesc->qm_sg_bytes);
  753. dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
  754. }
  755. static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
  756. struct skcipher_request *req)
  757. {
  758. struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
  759. int ivsize = crypto_skcipher_ivsize(skcipher);
  760. caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
  761. edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
  762. edesc->qm_sg_bytes);
  763. }
  764. static void aead_done(struct caam_drv_req *drv_req, u32 status)
  765. {
  766. struct device *qidev;
  767. struct aead_edesc *edesc;
  768. struct aead_request *aead_req = drv_req->app_ctx;
  769. struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
  770. struct caam_ctx *caam_ctx = crypto_aead_ctx_dma(aead);
  771. int ecode = 0;
  772. qidev = caam_ctx->qidev;
  773. if (unlikely(status))
  774. ecode = caam_jr_strstatus(qidev, status);
  775. edesc = container_of(drv_req, typeof(*edesc), drv_req);
  776. aead_unmap(qidev, edesc, aead_req);
  777. aead_request_complete(aead_req, ecode);
  778. qi_cache_free(edesc);
  779. }
  780. /*
  781. * allocate and map the aead extended descriptor
  782. */
  783. static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
  784. bool encrypt)
  785. {
  786. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  787. struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
  788. struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
  789. typeof(*alg), aead);
  790. struct device *qidev = ctx->qidev;
  791. gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  792. GFP_KERNEL : GFP_ATOMIC;
  793. int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
  794. int src_len, dst_len = 0;
  795. struct aead_edesc *edesc;
  796. dma_addr_t qm_sg_dma, iv_dma = 0;
  797. int ivsize = 0;
  798. unsigned int authsize = ctx->authsize;
  799. int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes;
  800. int in_len, out_len;
  801. struct qm_sg_entry *sg_table, *fd_sgt;
  802. struct caam_drv_ctx *drv_ctx;
  803. drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
  804. if (IS_ERR(drv_ctx))
  805. return ERR_CAST(drv_ctx);
  806. /* allocate space for base edesc and hw desc commands, link tables */
  807. edesc = qi_cache_alloc(flags);
  808. if (unlikely(!edesc)) {
  809. dev_err(qidev, "could not allocate extended descriptor\n");
  810. return ERR_PTR(-ENOMEM);
  811. }
  812. if (likely(req->src == req->dst)) {
  813. src_len = req->assoclen + req->cryptlen +
  814. (encrypt ? authsize : 0);
  815. src_nents = sg_nents_for_len(req->src, src_len);
  816. if (unlikely(src_nents < 0)) {
  817. dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
  818. src_len);
  819. qi_cache_free(edesc);
  820. return ERR_PTR(src_nents);
  821. }
  822. mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
  823. DMA_BIDIRECTIONAL);
  824. if (unlikely(!mapped_src_nents)) {
  825. dev_err(qidev, "unable to map source\n");
  826. qi_cache_free(edesc);
  827. return ERR_PTR(-ENOMEM);
  828. }
  829. } else {
  830. src_len = req->assoclen + req->cryptlen;
  831. dst_len = src_len + (encrypt ? authsize : (-authsize));
  832. src_nents = sg_nents_for_len(req->src, src_len);
  833. if (unlikely(src_nents < 0)) {
  834. dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
  835. src_len);
  836. qi_cache_free(edesc);
  837. return ERR_PTR(src_nents);
  838. }
  839. dst_nents = sg_nents_for_len(req->dst, dst_len);
  840. if (unlikely(dst_nents < 0)) {
  841. dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
  842. dst_len);
  843. qi_cache_free(edesc);
  844. return ERR_PTR(dst_nents);
  845. }
  846. if (src_nents) {
  847. mapped_src_nents = dma_map_sg(qidev, req->src,
  848. src_nents, DMA_TO_DEVICE);
  849. if (unlikely(!mapped_src_nents)) {
  850. dev_err(qidev, "unable to map source\n");
  851. qi_cache_free(edesc);
  852. return ERR_PTR(-ENOMEM);
  853. }
  854. } else {
  855. mapped_src_nents = 0;
  856. }
  857. if (dst_nents) {
  858. mapped_dst_nents = dma_map_sg(qidev, req->dst,
  859. dst_nents,
  860. DMA_FROM_DEVICE);
  861. if (unlikely(!mapped_dst_nents)) {
  862. dev_err(qidev, "unable to map destination\n");
  863. dma_unmap_sg(qidev, req->src, src_nents,
  864. DMA_TO_DEVICE);
  865. qi_cache_free(edesc);
  866. return ERR_PTR(-ENOMEM);
  867. }
  868. } else {
  869. mapped_dst_nents = 0;
  870. }
  871. }
  872. if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
  873. ivsize = crypto_aead_ivsize(aead);
  874. /*
  875. * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
  876. * Input is not contiguous.
  877. * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
  878. * the end of the table by allocating more S/G entries. Logic:
  879. * if (src != dst && output S/G)
  880. * pad output S/G, if needed
  881. * else if (src == dst && S/G)
  882. * overlapping S/Gs; pad one of them
  883. * else if (input S/G) ...
  884. * pad input S/G, if needed
  885. */
  886. qm_sg_ents = 1 + !!ivsize + mapped_src_nents;
  887. if (mapped_dst_nents > 1)
  888. qm_sg_ents += pad_sg_nents(mapped_dst_nents);
  889. else if ((req->src == req->dst) && (mapped_src_nents > 1))
  890. qm_sg_ents = max(pad_sg_nents(qm_sg_ents),
  891. 1 + !!ivsize + pad_sg_nents(mapped_src_nents));
  892. else
  893. qm_sg_ents = pad_sg_nents(qm_sg_ents);
  894. sg_table = &edesc->sgt[0];
  895. qm_sg_bytes = qm_sg_ents * sizeof(*sg_table);
  896. if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
  897. CAAM_QI_MEMCACHE_SIZE)) {
  898. dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
  899. qm_sg_ents, ivsize);
  900. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
  901. 0, DMA_NONE, 0, 0);
  902. qi_cache_free(edesc);
  903. return ERR_PTR(-ENOMEM);
  904. }
  905. if (ivsize) {
  906. u8 *iv = (u8 *)(sg_table + qm_sg_ents);
  907. /* Make sure IV is located in a DMAable area */
  908. memcpy(iv, req->iv, ivsize);
  909. iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
  910. if (dma_mapping_error(qidev, iv_dma)) {
  911. dev_err(qidev, "unable to map IV\n");
  912. caam_unmap(qidev, req->src, req->dst, src_nents,
  913. dst_nents, 0, 0, DMA_NONE, 0, 0);
  914. qi_cache_free(edesc);
  915. return ERR_PTR(-ENOMEM);
  916. }
  917. }
  918. edesc->src_nents = src_nents;
  919. edesc->dst_nents = dst_nents;
  920. edesc->iv_dma = iv_dma;
  921. edesc->drv_req.app_ctx = req;
  922. edesc->drv_req.cbk = aead_done;
  923. edesc->drv_req.drv_ctx = drv_ctx;
  924. edesc->assoclen = cpu_to_caam32(req->assoclen);
  925. edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4,
  926. DMA_TO_DEVICE);
  927. if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
  928. dev_err(qidev, "unable to map assoclen\n");
  929. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
  930. iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
  931. qi_cache_free(edesc);
  932. return ERR_PTR(-ENOMEM);
  933. }
  934. dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
  935. qm_sg_index++;
  936. if (ivsize) {
  937. dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
  938. qm_sg_index++;
  939. }
  940. sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
  941. qm_sg_index += mapped_src_nents;
  942. if (mapped_dst_nents > 1)
  943. sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
  944. qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
  945. if (dma_mapping_error(qidev, qm_sg_dma)) {
  946. dev_err(qidev, "unable to map S/G table\n");
  947. dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
  948. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
  949. iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
  950. qi_cache_free(edesc);
  951. return ERR_PTR(-ENOMEM);
  952. }
  953. edesc->qm_sg_dma = qm_sg_dma;
  954. edesc->qm_sg_bytes = qm_sg_bytes;
  955. out_len = req->assoclen + req->cryptlen +
  956. (encrypt ? ctx->authsize : (-ctx->authsize));
  957. in_len = 4 + ivsize + req->assoclen + req->cryptlen;
  958. fd_sgt = &edesc->drv_req.fd_sgt[0];
  959. dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0);
  960. if (req->dst == req->src) {
  961. if (mapped_src_nents == 1)
  962. dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src),
  963. out_len, 0);
  964. else
  965. dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma +
  966. (1 + !!ivsize) * sizeof(*sg_table),
  967. out_len, 0);
  968. } else if (mapped_dst_nents <= 1) {
  969. dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len,
  970. 0);
  971. } else {
  972. dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) *
  973. qm_sg_index, out_len, 0);
  974. }
  975. return edesc;
  976. }
  977. static inline int aead_crypt(struct aead_request *req, bool encrypt)
  978. {
  979. struct aead_edesc *edesc;
  980. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  981. struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
  982. int ret;
  983. if (unlikely(caam_congested))
  984. return -EAGAIN;
  985. /* allocate extended descriptor */
  986. edesc = aead_edesc_alloc(req, encrypt);
  987. if (IS_ERR(edesc))
  988. return PTR_ERR(edesc);
  989. /* Create and submit job descriptor */
  990. ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
  991. if (!ret) {
  992. ret = -EINPROGRESS;
  993. } else {
  994. aead_unmap(ctx->qidev, edesc, req);
  995. qi_cache_free(edesc);
  996. }
  997. return ret;
  998. }
  999. static int aead_encrypt(struct aead_request *req)
  1000. {
  1001. return aead_crypt(req, true);
  1002. }
  1003. static int aead_decrypt(struct aead_request *req)
  1004. {
  1005. return aead_crypt(req, false);
  1006. }
  1007. static int ipsec_gcm_encrypt(struct aead_request *req)
  1008. {
  1009. return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req,
  1010. true);
  1011. }
  1012. static int ipsec_gcm_decrypt(struct aead_request *req)
  1013. {
  1014. return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req,
  1015. false);
  1016. }
  1017. static inline u8 *skcipher_edesc_iv(struct skcipher_edesc *edesc)
  1018. {
  1019. return PTR_ALIGN((u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
  1020. dma_get_cache_alignment());
  1021. }
  1022. static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
  1023. {
  1024. struct skcipher_edesc *edesc;
  1025. struct skcipher_request *req = drv_req->app_ctx;
  1026. struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
  1027. struct caam_ctx *caam_ctx = crypto_skcipher_ctx_dma(skcipher);
  1028. struct device *qidev = caam_ctx->qidev;
  1029. int ivsize = crypto_skcipher_ivsize(skcipher);
  1030. int ecode = 0;
  1031. dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
  1032. edesc = container_of(drv_req, typeof(*edesc), drv_req);
  1033. if (status)
  1034. ecode = caam_jr_strstatus(qidev, status);
  1035. print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
  1036. DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
  1037. edesc->src_nents > 1 ? 100 : ivsize, 1);
  1038. caam_dump_sg("dst @" __stringify(__LINE__)": ",
  1039. DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
  1040. edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
  1041. skcipher_unmap(qidev, edesc, req);
  1042. /*
  1043. * The crypto API expects us to set the IV (req->iv) to the last
  1044. * ciphertext block (CBC mode) or last counter (CTR mode).
  1045. * This is used e.g. by the CTS mode.
  1046. */
  1047. if (!ecode)
  1048. memcpy(req->iv, skcipher_edesc_iv(edesc), ivsize);
  1049. qi_cache_free(edesc);
  1050. skcipher_request_complete(req, ecode);
  1051. }
  1052. static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
  1053. bool encrypt)
  1054. {
  1055. struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
  1056. struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
  1057. struct device *qidev = ctx->qidev;
  1058. gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  1059. GFP_KERNEL : GFP_ATOMIC;
  1060. int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
  1061. struct skcipher_edesc *edesc;
  1062. dma_addr_t iv_dma;
  1063. u8 *iv;
  1064. int ivsize = crypto_skcipher_ivsize(skcipher);
  1065. int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
  1066. struct qm_sg_entry *sg_table, *fd_sgt;
  1067. struct caam_drv_ctx *drv_ctx;
  1068. unsigned int len;
  1069. drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
  1070. if (IS_ERR(drv_ctx))
  1071. return ERR_CAST(drv_ctx);
  1072. src_nents = sg_nents_for_len(req->src, req->cryptlen);
  1073. if (unlikely(src_nents < 0)) {
  1074. dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
  1075. req->cryptlen);
  1076. return ERR_PTR(src_nents);
  1077. }
  1078. if (unlikely(req->src != req->dst)) {
  1079. dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
  1080. if (unlikely(dst_nents < 0)) {
  1081. dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
  1082. req->cryptlen);
  1083. return ERR_PTR(dst_nents);
  1084. }
  1085. mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
  1086. DMA_TO_DEVICE);
  1087. if (unlikely(!mapped_src_nents)) {
  1088. dev_err(qidev, "unable to map source\n");
  1089. return ERR_PTR(-ENOMEM);
  1090. }
  1091. mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
  1092. DMA_FROM_DEVICE);
  1093. if (unlikely(!mapped_dst_nents)) {
  1094. dev_err(qidev, "unable to map destination\n");
  1095. dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
  1096. return ERR_PTR(-ENOMEM);
  1097. }
  1098. } else {
  1099. mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
  1100. DMA_BIDIRECTIONAL);
  1101. if (unlikely(!mapped_src_nents)) {
  1102. dev_err(qidev, "unable to map source\n");
  1103. return ERR_PTR(-ENOMEM);
  1104. }
  1105. }
  1106. qm_sg_ents = 1 + mapped_src_nents;
  1107. dst_sg_idx = qm_sg_ents;
  1108. /*
  1109. * Input, output HW S/G tables: [IV, src][dst, IV]
  1110. * IV entries point to the same buffer
  1111. * If src == dst, S/G entries are reused (S/G tables overlap)
  1112. *
  1113. * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
  1114. * the end of the table by allocating more S/G entries.
  1115. */
  1116. if (req->src != req->dst)
  1117. qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
  1118. else
  1119. qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
  1120. qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
  1121. len = offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes;
  1122. len = ALIGN(len, dma_get_cache_alignment());
  1123. len += ivsize;
  1124. if (unlikely(len > CAAM_QI_MEMCACHE_SIZE)) {
  1125. dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
  1126. qm_sg_ents, ivsize);
  1127. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
  1128. 0, DMA_NONE, 0, 0);
  1129. return ERR_PTR(-ENOMEM);
  1130. }
  1131. /* allocate space for base edesc, link tables and IV */
  1132. edesc = qi_cache_alloc(flags);
  1133. if (unlikely(!edesc)) {
  1134. dev_err(qidev, "could not allocate extended descriptor\n");
  1135. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
  1136. 0, DMA_NONE, 0, 0);
  1137. return ERR_PTR(-ENOMEM);
  1138. }
  1139. edesc->src_nents = src_nents;
  1140. edesc->dst_nents = dst_nents;
  1141. edesc->qm_sg_bytes = qm_sg_bytes;
  1142. edesc->drv_req.app_ctx = req;
  1143. edesc->drv_req.cbk = skcipher_done;
  1144. edesc->drv_req.drv_ctx = drv_ctx;
  1145. /* Make sure IV is located in a DMAable area */
  1146. sg_table = &edesc->sgt[0];
  1147. iv = skcipher_edesc_iv(edesc);
  1148. memcpy(iv, req->iv, ivsize);
  1149. iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL);
  1150. if (dma_mapping_error(qidev, iv_dma)) {
  1151. dev_err(qidev, "unable to map IV\n");
  1152. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
  1153. 0, DMA_NONE, 0, 0);
  1154. qi_cache_free(edesc);
  1155. return ERR_PTR(-ENOMEM);
  1156. }
  1157. edesc->iv_dma = iv_dma;
  1158. dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
  1159. sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
  1160. if (req->src != req->dst)
  1161. sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
  1162. dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
  1163. ivsize, 0);
  1164. edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
  1165. DMA_TO_DEVICE);
  1166. if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
  1167. dev_err(qidev, "unable to map S/G table\n");
  1168. caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
  1169. iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
  1170. qi_cache_free(edesc);
  1171. return ERR_PTR(-ENOMEM);
  1172. }
  1173. fd_sgt = &edesc->drv_req.fd_sgt[0];
  1174. dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
  1175. ivsize + req->cryptlen, 0);
  1176. if (req->src == req->dst)
  1177. dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
  1178. sizeof(*sg_table), req->cryptlen + ivsize,
  1179. 0);
  1180. else
  1181. dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
  1182. sizeof(*sg_table), req->cryptlen + ivsize,
  1183. 0);
  1184. return edesc;
  1185. }
  1186. static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
  1187. {
  1188. struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
  1189. unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
  1190. return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
  1191. }
  1192. static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
  1193. {
  1194. struct skcipher_edesc *edesc;
  1195. struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
  1196. struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
  1197. struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent);
  1198. int ret;
  1199. /*
  1200. * XTS is expected to return an error even for input length = 0
  1201. * Note that the case input length < block size will be caught during
  1202. * HW offloading and return an error.
  1203. */
  1204. if (!req->cryptlen && !ctx->fallback)
  1205. return 0;
  1206. if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) ||
  1207. ctx->xts_key_fallback)) {
  1208. struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req);
  1209. skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
  1210. skcipher_request_set_callback(&rctx->fallback_req,
  1211. req->base.flags,
  1212. req->base.complete,
  1213. req->base.data);
  1214. skcipher_request_set_crypt(&rctx->fallback_req, req->src,
  1215. req->dst, req->cryptlen, req->iv);
  1216. return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) :
  1217. crypto_skcipher_decrypt(&rctx->fallback_req);
  1218. }
  1219. if (unlikely(caam_congested))
  1220. return -EAGAIN;
  1221. /* allocate extended descriptor */
  1222. edesc = skcipher_edesc_alloc(req, encrypt);
  1223. if (IS_ERR(edesc))
  1224. return PTR_ERR(edesc);
  1225. ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
  1226. if (!ret) {
  1227. ret = -EINPROGRESS;
  1228. } else {
  1229. skcipher_unmap(ctx->qidev, edesc, req);
  1230. qi_cache_free(edesc);
  1231. }
  1232. return ret;
  1233. }
  1234. static int skcipher_encrypt(struct skcipher_request *req)
  1235. {
  1236. return skcipher_crypt(req, true);
  1237. }
  1238. static int skcipher_decrypt(struct skcipher_request *req)
  1239. {
  1240. return skcipher_crypt(req, false);
  1241. }
  1242. static struct caam_skcipher_alg driver_algs[] = {
  1243. {
  1244. .skcipher = {
  1245. .base = {
  1246. .cra_name = "cbc(aes)",
  1247. .cra_driver_name = "cbc-aes-caam-qi",
  1248. .cra_blocksize = AES_BLOCK_SIZE,
  1249. },
  1250. .setkey = aes_skcipher_setkey,
  1251. .encrypt = skcipher_encrypt,
  1252. .decrypt = skcipher_decrypt,
  1253. .min_keysize = AES_MIN_KEY_SIZE,
  1254. .max_keysize = AES_MAX_KEY_SIZE,
  1255. .ivsize = AES_BLOCK_SIZE,
  1256. },
  1257. .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1258. },
  1259. {
  1260. .skcipher = {
  1261. .base = {
  1262. .cra_name = "cbc(des3_ede)",
  1263. .cra_driver_name = "cbc-3des-caam-qi",
  1264. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1265. },
  1266. .setkey = des3_skcipher_setkey,
  1267. .encrypt = skcipher_encrypt,
  1268. .decrypt = skcipher_decrypt,
  1269. .min_keysize = DES3_EDE_KEY_SIZE,
  1270. .max_keysize = DES3_EDE_KEY_SIZE,
  1271. .ivsize = DES3_EDE_BLOCK_SIZE,
  1272. },
  1273. .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1274. },
  1275. {
  1276. .skcipher = {
  1277. .base = {
  1278. .cra_name = "cbc(des)",
  1279. .cra_driver_name = "cbc-des-caam-qi",
  1280. .cra_blocksize = DES_BLOCK_SIZE,
  1281. },
  1282. .setkey = des_skcipher_setkey,
  1283. .encrypt = skcipher_encrypt,
  1284. .decrypt = skcipher_decrypt,
  1285. .min_keysize = DES_KEY_SIZE,
  1286. .max_keysize = DES_KEY_SIZE,
  1287. .ivsize = DES_BLOCK_SIZE,
  1288. },
  1289. .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1290. },
  1291. {
  1292. .skcipher = {
  1293. .base = {
  1294. .cra_name = "ctr(aes)",
  1295. .cra_driver_name = "ctr-aes-caam-qi",
  1296. .cra_blocksize = 1,
  1297. },
  1298. .setkey = ctr_skcipher_setkey,
  1299. .encrypt = skcipher_encrypt,
  1300. .decrypt = skcipher_decrypt,
  1301. .min_keysize = AES_MIN_KEY_SIZE,
  1302. .max_keysize = AES_MAX_KEY_SIZE,
  1303. .ivsize = AES_BLOCK_SIZE,
  1304. .chunksize = AES_BLOCK_SIZE,
  1305. },
  1306. .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
  1307. OP_ALG_AAI_CTR_MOD128,
  1308. },
  1309. {
  1310. .skcipher = {
  1311. .base = {
  1312. .cra_name = "rfc3686(ctr(aes))",
  1313. .cra_driver_name = "rfc3686-ctr-aes-caam-qi",
  1314. .cra_blocksize = 1,
  1315. },
  1316. .setkey = rfc3686_skcipher_setkey,
  1317. .encrypt = skcipher_encrypt,
  1318. .decrypt = skcipher_decrypt,
  1319. .min_keysize = AES_MIN_KEY_SIZE +
  1320. CTR_RFC3686_NONCE_SIZE,
  1321. .max_keysize = AES_MAX_KEY_SIZE +
  1322. CTR_RFC3686_NONCE_SIZE,
  1323. .ivsize = CTR_RFC3686_IV_SIZE,
  1324. .chunksize = AES_BLOCK_SIZE,
  1325. },
  1326. .caam = {
  1327. .class1_alg_type = OP_ALG_ALGSEL_AES |
  1328. OP_ALG_AAI_CTR_MOD128,
  1329. .rfc3686 = true,
  1330. },
  1331. },
  1332. {
  1333. .skcipher = {
  1334. .base = {
  1335. .cra_name = "xts(aes)",
  1336. .cra_driver_name = "xts-aes-caam-qi",
  1337. .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
  1338. .cra_blocksize = AES_BLOCK_SIZE,
  1339. },
  1340. .setkey = xts_skcipher_setkey,
  1341. .encrypt = skcipher_encrypt,
  1342. .decrypt = skcipher_decrypt,
  1343. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  1344. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  1345. .ivsize = AES_BLOCK_SIZE,
  1346. },
  1347. .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
  1348. },
  1349. };
  1350. static struct caam_aead_alg driver_aeads[] = {
  1351. {
  1352. .aead = {
  1353. .base = {
  1354. .cra_name = "rfc4106(gcm(aes))",
  1355. .cra_driver_name = "rfc4106-gcm-aes-caam-qi",
  1356. .cra_blocksize = 1,
  1357. },
  1358. .setkey = rfc4106_setkey,
  1359. .setauthsize = rfc4106_setauthsize,
  1360. .encrypt = ipsec_gcm_encrypt,
  1361. .decrypt = ipsec_gcm_decrypt,
  1362. .ivsize = 8,
  1363. .maxauthsize = AES_BLOCK_SIZE,
  1364. },
  1365. .caam = {
  1366. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  1367. .nodkp = true,
  1368. },
  1369. },
  1370. {
  1371. .aead = {
  1372. .base = {
  1373. .cra_name = "rfc4543(gcm(aes))",
  1374. .cra_driver_name = "rfc4543-gcm-aes-caam-qi",
  1375. .cra_blocksize = 1,
  1376. },
  1377. .setkey = rfc4543_setkey,
  1378. .setauthsize = rfc4543_setauthsize,
  1379. .encrypt = ipsec_gcm_encrypt,
  1380. .decrypt = ipsec_gcm_decrypt,
  1381. .ivsize = 8,
  1382. .maxauthsize = AES_BLOCK_SIZE,
  1383. },
  1384. .caam = {
  1385. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  1386. .nodkp = true,
  1387. },
  1388. },
  1389. /* Galois Counter Mode */
  1390. {
  1391. .aead = {
  1392. .base = {
  1393. .cra_name = "gcm(aes)",
  1394. .cra_driver_name = "gcm-aes-caam-qi",
  1395. .cra_blocksize = 1,
  1396. },
  1397. .setkey = gcm_setkey,
  1398. .setauthsize = gcm_setauthsize,
  1399. .encrypt = aead_encrypt,
  1400. .decrypt = aead_decrypt,
  1401. .ivsize = 12,
  1402. .maxauthsize = AES_BLOCK_SIZE,
  1403. },
  1404. .caam = {
  1405. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
  1406. .nodkp = true,
  1407. }
  1408. },
  1409. /* single-pass ipsec_esp descriptor */
  1410. {
  1411. .aead = {
  1412. .base = {
  1413. .cra_name = "authenc(hmac(md5),cbc(aes))",
  1414. .cra_driver_name = "authenc-hmac-md5-"
  1415. "cbc-aes-caam-qi",
  1416. .cra_blocksize = AES_BLOCK_SIZE,
  1417. },
  1418. .setkey = aead_setkey,
  1419. .setauthsize = aead_setauthsize,
  1420. .encrypt = aead_encrypt,
  1421. .decrypt = aead_decrypt,
  1422. .ivsize = AES_BLOCK_SIZE,
  1423. .maxauthsize = MD5_DIGEST_SIZE,
  1424. },
  1425. .caam = {
  1426. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1427. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1428. OP_ALG_AAI_HMAC_PRECOMP,
  1429. }
  1430. },
  1431. {
  1432. .aead = {
  1433. .base = {
  1434. .cra_name = "echainiv(authenc(hmac(md5),"
  1435. "cbc(aes)))",
  1436. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  1437. "cbc-aes-caam-qi",
  1438. .cra_blocksize = AES_BLOCK_SIZE,
  1439. },
  1440. .setkey = aead_setkey,
  1441. .setauthsize = aead_setauthsize,
  1442. .encrypt = aead_encrypt,
  1443. .decrypt = aead_decrypt,
  1444. .ivsize = AES_BLOCK_SIZE,
  1445. .maxauthsize = MD5_DIGEST_SIZE,
  1446. },
  1447. .caam = {
  1448. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1449. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1450. OP_ALG_AAI_HMAC_PRECOMP,
  1451. .geniv = true,
  1452. }
  1453. },
  1454. {
  1455. .aead = {
  1456. .base = {
  1457. .cra_name = "authenc(hmac(sha1),cbc(aes))",
  1458. .cra_driver_name = "authenc-hmac-sha1-"
  1459. "cbc-aes-caam-qi",
  1460. .cra_blocksize = AES_BLOCK_SIZE,
  1461. },
  1462. .setkey = aead_setkey,
  1463. .setauthsize = aead_setauthsize,
  1464. .encrypt = aead_encrypt,
  1465. .decrypt = aead_decrypt,
  1466. .ivsize = AES_BLOCK_SIZE,
  1467. .maxauthsize = SHA1_DIGEST_SIZE,
  1468. },
  1469. .caam = {
  1470. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1471. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1472. OP_ALG_AAI_HMAC_PRECOMP,
  1473. }
  1474. },
  1475. {
  1476. .aead = {
  1477. .base = {
  1478. .cra_name = "echainiv(authenc(hmac(sha1),"
  1479. "cbc(aes)))",
  1480. .cra_driver_name = "echainiv-authenc-"
  1481. "hmac-sha1-cbc-aes-caam-qi",
  1482. .cra_blocksize = AES_BLOCK_SIZE,
  1483. },
  1484. .setkey = aead_setkey,
  1485. .setauthsize = aead_setauthsize,
  1486. .encrypt = aead_encrypt,
  1487. .decrypt = aead_decrypt,
  1488. .ivsize = AES_BLOCK_SIZE,
  1489. .maxauthsize = SHA1_DIGEST_SIZE,
  1490. },
  1491. .caam = {
  1492. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1493. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1494. OP_ALG_AAI_HMAC_PRECOMP,
  1495. .geniv = true,
  1496. },
  1497. },
  1498. {
  1499. .aead = {
  1500. .base = {
  1501. .cra_name = "authenc(hmac(sha224),cbc(aes))",
  1502. .cra_driver_name = "authenc-hmac-sha224-"
  1503. "cbc-aes-caam-qi",
  1504. .cra_blocksize = AES_BLOCK_SIZE,
  1505. },
  1506. .setkey = aead_setkey,
  1507. .setauthsize = aead_setauthsize,
  1508. .encrypt = aead_encrypt,
  1509. .decrypt = aead_decrypt,
  1510. .ivsize = AES_BLOCK_SIZE,
  1511. .maxauthsize = SHA224_DIGEST_SIZE,
  1512. },
  1513. .caam = {
  1514. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1515. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1516. OP_ALG_AAI_HMAC_PRECOMP,
  1517. }
  1518. },
  1519. {
  1520. .aead = {
  1521. .base = {
  1522. .cra_name = "echainiv(authenc(hmac(sha224),"
  1523. "cbc(aes)))",
  1524. .cra_driver_name = "echainiv-authenc-"
  1525. "hmac-sha224-cbc-aes-caam-qi",
  1526. .cra_blocksize = AES_BLOCK_SIZE,
  1527. },
  1528. .setkey = aead_setkey,
  1529. .setauthsize = aead_setauthsize,
  1530. .encrypt = aead_encrypt,
  1531. .decrypt = aead_decrypt,
  1532. .ivsize = AES_BLOCK_SIZE,
  1533. .maxauthsize = SHA224_DIGEST_SIZE,
  1534. },
  1535. .caam = {
  1536. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1537. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1538. OP_ALG_AAI_HMAC_PRECOMP,
  1539. .geniv = true,
  1540. }
  1541. },
  1542. {
  1543. .aead = {
  1544. .base = {
  1545. .cra_name = "authenc(hmac(sha256),cbc(aes))",
  1546. .cra_driver_name = "authenc-hmac-sha256-"
  1547. "cbc-aes-caam-qi",
  1548. .cra_blocksize = AES_BLOCK_SIZE,
  1549. },
  1550. .setkey = aead_setkey,
  1551. .setauthsize = aead_setauthsize,
  1552. .encrypt = aead_encrypt,
  1553. .decrypt = aead_decrypt,
  1554. .ivsize = AES_BLOCK_SIZE,
  1555. .maxauthsize = SHA256_DIGEST_SIZE,
  1556. },
  1557. .caam = {
  1558. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1559. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1560. OP_ALG_AAI_HMAC_PRECOMP,
  1561. }
  1562. },
  1563. {
  1564. .aead = {
  1565. .base = {
  1566. .cra_name = "echainiv(authenc(hmac(sha256),"
  1567. "cbc(aes)))",
  1568. .cra_driver_name = "echainiv-authenc-"
  1569. "hmac-sha256-cbc-aes-"
  1570. "caam-qi",
  1571. .cra_blocksize = AES_BLOCK_SIZE,
  1572. },
  1573. .setkey = aead_setkey,
  1574. .setauthsize = aead_setauthsize,
  1575. .encrypt = aead_encrypt,
  1576. .decrypt = aead_decrypt,
  1577. .ivsize = AES_BLOCK_SIZE,
  1578. .maxauthsize = SHA256_DIGEST_SIZE,
  1579. },
  1580. .caam = {
  1581. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1582. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1583. OP_ALG_AAI_HMAC_PRECOMP,
  1584. .geniv = true,
  1585. }
  1586. },
  1587. {
  1588. .aead = {
  1589. .base = {
  1590. .cra_name = "authenc(hmac(sha384),cbc(aes))",
  1591. .cra_driver_name = "authenc-hmac-sha384-"
  1592. "cbc-aes-caam-qi",
  1593. .cra_blocksize = AES_BLOCK_SIZE,
  1594. },
  1595. .setkey = aead_setkey,
  1596. .setauthsize = aead_setauthsize,
  1597. .encrypt = aead_encrypt,
  1598. .decrypt = aead_decrypt,
  1599. .ivsize = AES_BLOCK_SIZE,
  1600. .maxauthsize = SHA384_DIGEST_SIZE,
  1601. },
  1602. .caam = {
  1603. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1604. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1605. OP_ALG_AAI_HMAC_PRECOMP,
  1606. }
  1607. },
  1608. {
  1609. .aead = {
  1610. .base = {
  1611. .cra_name = "echainiv(authenc(hmac(sha384),"
  1612. "cbc(aes)))",
  1613. .cra_driver_name = "echainiv-authenc-"
  1614. "hmac-sha384-cbc-aes-"
  1615. "caam-qi",
  1616. .cra_blocksize = AES_BLOCK_SIZE,
  1617. },
  1618. .setkey = aead_setkey,
  1619. .setauthsize = aead_setauthsize,
  1620. .encrypt = aead_encrypt,
  1621. .decrypt = aead_decrypt,
  1622. .ivsize = AES_BLOCK_SIZE,
  1623. .maxauthsize = SHA384_DIGEST_SIZE,
  1624. },
  1625. .caam = {
  1626. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1627. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1628. OP_ALG_AAI_HMAC_PRECOMP,
  1629. .geniv = true,
  1630. }
  1631. },
  1632. {
  1633. .aead = {
  1634. .base = {
  1635. .cra_name = "authenc(hmac(sha512),cbc(aes))",
  1636. .cra_driver_name = "authenc-hmac-sha512-"
  1637. "cbc-aes-caam-qi",
  1638. .cra_blocksize = AES_BLOCK_SIZE,
  1639. },
  1640. .setkey = aead_setkey,
  1641. .setauthsize = aead_setauthsize,
  1642. .encrypt = aead_encrypt,
  1643. .decrypt = aead_decrypt,
  1644. .ivsize = AES_BLOCK_SIZE,
  1645. .maxauthsize = SHA512_DIGEST_SIZE,
  1646. },
  1647. .caam = {
  1648. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1649. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1650. OP_ALG_AAI_HMAC_PRECOMP,
  1651. }
  1652. },
  1653. {
  1654. .aead = {
  1655. .base = {
  1656. .cra_name = "echainiv(authenc(hmac(sha512),"
  1657. "cbc(aes)))",
  1658. .cra_driver_name = "echainiv-authenc-"
  1659. "hmac-sha512-cbc-aes-"
  1660. "caam-qi",
  1661. .cra_blocksize = AES_BLOCK_SIZE,
  1662. },
  1663. .setkey = aead_setkey,
  1664. .setauthsize = aead_setauthsize,
  1665. .encrypt = aead_encrypt,
  1666. .decrypt = aead_decrypt,
  1667. .ivsize = AES_BLOCK_SIZE,
  1668. .maxauthsize = SHA512_DIGEST_SIZE,
  1669. },
  1670. .caam = {
  1671. .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
  1672. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1673. OP_ALG_AAI_HMAC_PRECOMP,
  1674. .geniv = true,
  1675. }
  1676. },
  1677. {
  1678. .aead = {
  1679. .base = {
  1680. .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
  1681. .cra_driver_name = "authenc-hmac-md5-"
  1682. "cbc-des3_ede-caam-qi",
  1683. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1684. },
  1685. .setkey = des3_aead_setkey,
  1686. .setauthsize = aead_setauthsize,
  1687. .encrypt = aead_encrypt,
  1688. .decrypt = aead_decrypt,
  1689. .ivsize = DES3_EDE_BLOCK_SIZE,
  1690. .maxauthsize = MD5_DIGEST_SIZE,
  1691. },
  1692. .caam = {
  1693. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1694. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1695. OP_ALG_AAI_HMAC_PRECOMP,
  1696. }
  1697. },
  1698. {
  1699. .aead = {
  1700. .base = {
  1701. .cra_name = "echainiv(authenc(hmac(md5),"
  1702. "cbc(des3_ede)))",
  1703. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  1704. "cbc-des3_ede-caam-qi",
  1705. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1706. },
  1707. .setkey = des3_aead_setkey,
  1708. .setauthsize = aead_setauthsize,
  1709. .encrypt = aead_encrypt,
  1710. .decrypt = aead_decrypt,
  1711. .ivsize = DES3_EDE_BLOCK_SIZE,
  1712. .maxauthsize = MD5_DIGEST_SIZE,
  1713. },
  1714. .caam = {
  1715. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1716. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1717. OP_ALG_AAI_HMAC_PRECOMP,
  1718. .geniv = true,
  1719. }
  1720. },
  1721. {
  1722. .aead = {
  1723. .base = {
  1724. .cra_name = "authenc(hmac(sha1),"
  1725. "cbc(des3_ede))",
  1726. .cra_driver_name = "authenc-hmac-sha1-"
  1727. "cbc-des3_ede-caam-qi",
  1728. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1729. },
  1730. .setkey = des3_aead_setkey,
  1731. .setauthsize = aead_setauthsize,
  1732. .encrypt = aead_encrypt,
  1733. .decrypt = aead_decrypt,
  1734. .ivsize = DES3_EDE_BLOCK_SIZE,
  1735. .maxauthsize = SHA1_DIGEST_SIZE,
  1736. },
  1737. .caam = {
  1738. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1739. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1740. OP_ALG_AAI_HMAC_PRECOMP,
  1741. },
  1742. },
  1743. {
  1744. .aead = {
  1745. .base = {
  1746. .cra_name = "echainiv(authenc(hmac(sha1),"
  1747. "cbc(des3_ede)))",
  1748. .cra_driver_name = "echainiv-authenc-"
  1749. "hmac-sha1-"
  1750. "cbc-des3_ede-caam-qi",
  1751. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1752. },
  1753. .setkey = des3_aead_setkey,
  1754. .setauthsize = aead_setauthsize,
  1755. .encrypt = aead_encrypt,
  1756. .decrypt = aead_decrypt,
  1757. .ivsize = DES3_EDE_BLOCK_SIZE,
  1758. .maxauthsize = SHA1_DIGEST_SIZE,
  1759. },
  1760. .caam = {
  1761. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1762. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  1763. OP_ALG_AAI_HMAC_PRECOMP,
  1764. .geniv = true,
  1765. }
  1766. },
  1767. {
  1768. .aead = {
  1769. .base = {
  1770. .cra_name = "authenc(hmac(sha224),"
  1771. "cbc(des3_ede))",
  1772. .cra_driver_name = "authenc-hmac-sha224-"
  1773. "cbc-des3_ede-caam-qi",
  1774. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1775. },
  1776. .setkey = des3_aead_setkey,
  1777. .setauthsize = aead_setauthsize,
  1778. .encrypt = aead_encrypt,
  1779. .decrypt = aead_decrypt,
  1780. .ivsize = DES3_EDE_BLOCK_SIZE,
  1781. .maxauthsize = SHA224_DIGEST_SIZE,
  1782. },
  1783. .caam = {
  1784. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1785. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1786. OP_ALG_AAI_HMAC_PRECOMP,
  1787. },
  1788. },
  1789. {
  1790. .aead = {
  1791. .base = {
  1792. .cra_name = "echainiv(authenc(hmac(sha224),"
  1793. "cbc(des3_ede)))",
  1794. .cra_driver_name = "echainiv-authenc-"
  1795. "hmac-sha224-"
  1796. "cbc-des3_ede-caam-qi",
  1797. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1798. },
  1799. .setkey = des3_aead_setkey,
  1800. .setauthsize = aead_setauthsize,
  1801. .encrypt = aead_encrypt,
  1802. .decrypt = aead_decrypt,
  1803. .ivsize = DES3_EDE_BLOCK_SIZE,
  1804. .maxauthsize = SHA224_DIGEST_SIZE,
  1805. },
  1806. .caam = {
  1807. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1808. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  1809. OP_ALG_AAI_HMAC_PRECOMP,
  1810. .geniv = true,
  1811. }
  1812. },
  1813. {
  1814. .aead = {
  1815. .base = {
  1816. .cra_name = "authenc(hmac(sha256),"
  1817. "cbc(des3_ede))",
  1818. .cra_driver_name = "authenc-hmac-sha256-"
  1819. "cbc-des3_ede-caam-qi",
  1820. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1821. },
  1822. .setkey = des3_aead_setkey,
  1823. .setauthsize = aead_setauthsize,
  1824. .encrypt = aead_encrypt,
  1825. .decrypt = aead_decrypt,
  1826. .ivsize = DES3_EDE_BLOCK_SIZE,
  1827. .maxauthsize = SHA256_DIGEST_SIZE,
  1828. },
  1829. .caam = {
  1830. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1831. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1832. OP_ALG_AAI_HMAC_PRECOMP,
  1833. },
  1834. },
  1835. {
  1836. .aead = {
  1837. .base = {
  1838. .cra_name = "echainiv(authenc(hmac(sha256),"
  1839. "cbc(des3_ede)))",
  1840. .cra_driver_name = "echainiv-authenc-"
  1841. "hmac-sha256-"
  1842. "cbc-des3_ede-caam-qi",
  1843. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1844. },
  1845. .setkey = des3_aead_setkey,
  1846. .setauthsize = aead_setauthsize,
  1847. .encrypt = aead_encrypt,
  1848. .decrypt = aead_decrypt,
  1849. .ivsize = DES3_EDE_BLOCK_SIZE,
  1850. .maxauthsize = SHA256_DIGEST_SIZE,
  1851. },
  1852. .caam = {
  1853. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1854. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  1855. OP_ALG_AAI_HMAC_PRECOMP,
  1856. .geniv = true,
  1857. }
  1858. },
  1859. {
  1860. .aead = {
  1861. .base = {
  1862. .cra_name = "authenc(hmac(sha384),"
  1863. "cbc(des3_ede))",
  1864. .cra_driver_name = "authenc-hmac-sha384-"
  1865. "cbc-des3_ede-caam-qi",
  1866. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1867. },
  1868. .setkey = des3_aead_setkey,
  1869. .setauthsize = aead_setauthsize,
  1870. .encrypt = aead_encrypt,
  1871. .decrypt = aead_decrypt,
  1872. .ivsize = DES3_EDE_BLOCK_SIZE,
  1873. .maxauthsize = SHA384_DIGEST_SIZE,
  1874. },
  1875. .caam = {
  1876. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1877. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1878. OP_ALG_AAI_HMAC_PRECOMP,
  1879. },
  1880. },
  1881. {
  1882. .aead = {
  1883. .base = {
  1884. .cra_name = "echainiv(authenc(hmac(sha384),"
  1885. "cbc(des3_ede)))",
  1886. .cra_driver_name = "echainiv-authenc-"
  1887. "hmac-sha384-"
  1888. "cbc-des3_ede-caam-qi",
  1889. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1890. },
  1891. .setkey = des3_aead_setkey,
  1892. .setauthsize = aead_setauthsize,
  1893. .encrypt = aead_encrypt,
  1894. .decrypt = aead_decrypt,
  1895. .ivsize = DES3_EDE_BLOCK_SIZE,
  1896. .maxauthsize = SHA384_DIGEST_SIZE,
  1897. },
  1898. .caam = {
  1899. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1900. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  1901. OP_ALG_AAI_HMAC_PRECOMP,
  1902. .geniv = true,
  1903. }
  1904. },
  1905. {
  1906. .aead = {
  1907. .base = {
  1908. .cra_name = "authenc(hmac(sha512),"
  1909. "cbc(des3_ede))",
  1910. .cra_driver_name = "authenc-hmac-sha512-"
  1911. "cbc-des3_ede-caam-qi",
  1912. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1913. },
  1914. .setkey = des3_aead_setkey,
  1915. .setauthsize = aead_setauthsize,
  1916. .encrypt = aead_encrypt,
  1917. .decrypt = aead_decrypt,
  1918. .ivsize = DES3_EDE_BLOCK_SIZE,
  1919. .maxauthsize = SHA512_DIGEST_SIZE,
  1920. },
  1921. .caam = {
  1922. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1923. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1924. OP_ALG_AAI_HMAC_PRECOMP,
  1925. },
  1926. },
  1927. {
  1928. .aead = {
  1929. .base = {
  1930. .cra_name = "echainiv(authenc(hmac(sha512),"
  1931. "cbc(des3_ede)))",
  1932. .cra_driver_name = "echainiv-authenc-"
  1933. "hmac-sha512-"
  1934. "cbc-des3_ede-caam-qi",
  1935. .cra_blocksize = DES3_EDE_BLOCK_SIZE,
  1936. },
  1937. .setkey = des3_aead_setkey,
  1938. .setauthsize = aead_setauthsize,
  1939. .encrypt = aead_encrypt,
  1940. .decrypt = aead_decrypt,
  1941. .ivsize = DES3_EDE_BLOCK_SIZE,
  1942. .maxauthsize = SHA512_DIGEST_SIZE,
  1943. },
  1944. .caam = {
  1945. .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
  1946. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  1947. OP_ALG_AAI_HMAC_PRECOMP,
  1948. .geniv = true,
  1949. }
  1950. },
  1951. {
  1952. .aead = {
  1953. .base = {
  1954. .cra_name = "authenc(hmac(md5),cbc(des))",
  1955. .cra_driver_name = "authenc-hmac-md5-"
  1956. "cbc-des-caam-qi",
  1957. .cra_blocksize = DES_BLOCK_SIZE,
  1958. },
  1959. .setkey = aead_setkey,
  1960. .setauthsize = aead_setauthsize,
  1961. .encrypt = aead_encrypt,
  1962. .decrypt = aead_decrypt,
  1963. .ivsize = DES_BLOCK_SIZE,
  1964. .maxauthsize = MD5_DIGEST_SIZE,
  1965. },
  1966. .caam = {
  1967. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1968. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1969. OP_ALG_AAI_HMAC_PRECOMP,
  1970. },
  1971. },
  1972. {
  1973. .aead = {
  1974. .base = {
  1975. .cra_name = "echainiv(authenc(hmac(md5),"
  1976. "cbc(des)))",
  1977. .cra_driver_name = "echainiv-authenc-hmac-md5-"
  1978. "cbc-des-caam-qi",
  1979. .cra_blocksize = DES_BLOCK_SIZE,
  1980. },
  1981. .setkey = aead_setkey,
  1982. .setauthsize = aead_setauthsize,
  1983. .encrypt = aead_encrypt,
  1984. .decrypt = aead_decrypt,
  1985. .ivsize = DES_BLOCK_SIZE,
  1986. .maxauthsize = MD5_DIGEST_SIZE,
  1987. },
  1988. .caam = {
  1989. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  1990. .class2_alg_type = OP_ALG_ALGSEL_MD5 |
  1991. OP_ALG_AAI_HMAC_PRECOMP,
  1992. .geniv = true,
  1993. }
  1994. },
  1995. {
  1996. .aead = {
  1997. .base = {
  1998. .cra_name = "authenc(hmac(sha1),cbc(des))",
  1999. .cra_driver_name = "authenc-hmac-sha1-"
  2000. "cbc-des-caam-qi",
  2001. .cra_blocksize = DES_BLOCK_SIZE,
  2002. },
  2003. .setkey = aead_setkey,
  2004. .setauthsize = aead_setauthsize,
  2005. .encrypt = aead_encrypt,
  2006. .decrypt = aead_decrypt,
  2007. .ivsize = DES_BLOCK_SIZE,
  2008. .maxauthsize = SHA1_DIGEST_SIZE,
  2009. },
  2010. .caam = {
  2011. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2012. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2013. OP_ALG_AAI_HMAC_PRECOMP,
  2014. },
  2015. },
  2016. {
  2017. .aead = {
  2018. .base = {
  2019. .cra_name = "echainiv(authenc(hmac(sha1),"
  2020. "cbc(des)))",
  2021. .cra_driver_name = "echainiv-authenc-"
  2022. "hmac-sha1-cbc-des-caam-qi",
  2023. .cra_blocksize = DES_BLOCK_SIZE,
  2024. },
  2025. .setkey = aead_setkey,
  2026. .setauthsize = aead_setauthsize,
  2027. .encrypt = aead_encrypt,
  2028. .decrypt = aead_decrypt,
  2029. .ivsize = DES_BLOCK_SIZE,
  2030. .maxauthsize = SHA1_DIGEST_SIZE,
  2031. },
  2032. .caam = {
  2033. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2034. .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
  2035. OP_ALG_AAI_HMAC_PRECOMP,
  2036. .geniv = true,
  2037. }
  2038. },
  2039. {
  2040. .aead = {
  2041. .base = {
  2042. .cra_name = "authenc(hmac(sha224),cbc(des))",
  2043. .cra_driver_name = "authenc-hmac-sha224-"
  2044. "cbc-des-caam-qi",
  2045. .cra_blocksize = DES_BLOCK_SIZE,
  2046. },
  2047. .setkey = aead_setkey,
  2048. .setauthsize = aead_setauthsize,
  2049. .encrypt = aead_encrypt,
  2050. .decrypt = aead_decrypt,
  2051. .ivsize = DES_BLOCK_SIZE,
  2052. .maxauthsize = SHA224_DIGEST_SIZE,
  2053. },
  2054. .caam = {
  2055. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2056. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2057. OP_ALG_AAI_HMAC_PRECOMP,
  2058. },
  2059. },
  2060. {
  2061. .aead = {
  2062. .base = {
  2063. .cra_name = "echainiv(authenc(hmac(sha224),"
  2064. "cbc(des)))",
  2065. .cra_driver_name = "echainiv-authenc-"
  2066. "hmac-sha224-cbc-des-"
  2067. "caam-qi",
  2068. .cra_blocksize = DES_BLOCK_SIZE,
  2069. },
  2070. .setkey = aead_setkey,
  2071. .setauthsize = aead_setauthsize,
  2072. .encrypt = aead_encrypt,
  2073. .decrypt = aead_decrypt,
  2074. .ivsize = DES_BLOCK_SIZE,
  2075. .maxauthsize = SHA224_DIGEST_SIZE,
  2076. },
  2077. .caam = {
  2078. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2079. .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
  2080. OP_ALG_AAI_HMAC_PRECOMP,
  2081. .geniv = true,
  2082. }
  2083. },
  2084. {
  2085. .aead = {
  2086. .base = {
  2087. .cra_name = "authenc(hmac(sha256),cbc(des))",
  2088. .cra_driver_name = "authenc-hmac-sha256-"
  2089. "cbc-des-caam-qi",
  2090. .cra_blocksize = DES_BLOCK_SIZE,
  2091. },
  2092. .setkey = aead_setkey,
  2093. .setauthsize = aead_setauthsize,
  2094. .encrypt = aead_encrypt,
  2095. .decrypt = aead_decrypt,
  2096. .ivsize = DES_BLOCK_SIZE,
  2097. .maxauthsize = SHA256_DIGEST_SIZE,
  2098. },
  2099. .caam = {
  2100. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2101. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2102. OP_ALG_AAI_HMAC_PRECOMP,
  2103. },
  2104. },
  2105. {
  2106. .aead = {
  2107. .base = {
  2108. .cra_name = "echainiv(authenc(hmac(sha256),"
  2109. "cbc(des)))",
  2110. .cra_driver_name = "echainiv-authenc-"
  2111. "hmac-sha256-cbc-des-"
  2112. "caam-qi",
  2113. .cra_blocksize = DES_BLOCK_SIZE,
  2114. },
  2115. .setkey = aead_setkey,
  2116. .setauthsize = aead_setauthsize,
  2117. .encrypt = aead_encrypt,
  2118. .decrypt = aead_decrypt,
  2119. .ivsize = DES_BLOCK_SIZE,
  2120. .maxauthsize = SHA256_DIGEST_SIZE,
  2121. },
  2122. .caam = {
  2123. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2124. .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
  2125. OP_ALG_AAI_HMAC_PRECOMP,
  2126. .geniv = true,
  2127. },
  2128. },
  2129. {
  2130. .aead = {
  2131. .base = {
  2132. .cra_name = "authenc(hmac(sha384),cbc(des))",
  2133. .cra_driver_name = "authenc-hmac-sha384-"
  2134. "cbc-des-caam-qi",
  2135. .cra_blocksize = DES_BLOCK_SIZE,
  2136. },
  2137. .setkey = aead_setkey,
  2138. .setauthsize = aead_setauthsize,
  2139. .encrypt = aead_encrypt,
  2140. .decrypt = aead_decrypt,
  2141. .ivsize = DES_BLOCK_SIZE,
  2142. .maxauthsize = SHA384_DIGEST_SIZE,
  2143. },
  2144. .caam = {
  2145. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2146. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2147. OP_ALG_AAI_HMAC_PRECOMP,
  2148. },
  2149. },
  2150. {
  2151. .aead = {
  2152. .base = {
  2153. .cra_name = "echainiv(authenc(hmac(sha384),"
  2154. "cbc(des)))",
  2155. .cra_driver_name = "echainiv-authenc-"
  2156. "hmac-sha384-cbc-des-"
  2157. "caam-qi",
  2158. .cra_blocksize = DES_BLOCK_SIZE,
  2159. },
  2160. .setkey = aead_setkey,
  2161. .setauthsize = aead_setauthsize,
  2162. .encrypt = aead_encrypt,
  2163. .decrypt = aead_decrypt,
  2164. .ivsize = DES_BLOCK_SIZE,
  2165. .maxauthsize = SHA384_DIGEST_SIZE,
  2166. },
  2167. .caam = {
  2168. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2169. .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
  2170. OP_ALG_AAI_HMAC_PRECOMP,
  2171. .geniv = true,
  2172. }
  2173. },
  2174. {
  2175. .aead = {
  2176. .base = {
  2177. .cra_name = "authenc(hmac(sha512),cbc(des))",
  2178. .cra_driver_name = "authenc-hmac-sha512-"
  2179. "cbc-des-caam-qi",
  2180. .cra_blocksize = DES_BLOCK_SIZE,
  2181. },
  2182. .setkey = aead_setkey,
  2183. .setauthsize = aead_setauthsize,
  2184. .encrypt = aead_encrypt,
  2185. .decrypt = aead_decrypt,
  2186. .ivsize = DES_BLOCK_SIZE,
  2187. .maxauthsize = SHA512_DIGEST_SIZE,
  2188. },
  2189. .caam = {
  2190. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2191. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2192. OP_ALG_AAI_HMAC_PRECOMP,
  2193. }
  2194. },
  2195. {
  2196. .aead = {
  2197. .base = {
  2198. .cra_name = "echainiv(authenc(hmac(sha512),"
  2199. "cbc(des)))",
  2200. .cra_driver_name = "echainiv-authenc-"
  2201. "hmac-sha512-cbc-des-"
  2202. "caam-qi",
  2203. .cra_blocksize = DES_BLOCK_SIZE,
  2204. },
  2205. .setkey = aead_setkey,
  2206. .setauthsize = aead_setauthsize,
  2207. .encrypt = aead_encrypt,
  2208. .decrypt = aead_decrypt,
  2209. .ivsize = DES_BLOCK_SIZE,
  2210. .maxauthsize = SHA512_DIGEST_SIZE,
  2211. },
  2212. .caam = {
  2213. .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
  2214. .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
  2215. OP_ALG_AAI_HMAC_PRECOMP,
  2216. .geniv = true,
  2217. }
  2218. },
  2219. };
  2220. static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
  2221. bool uses_dkp)
  2222. {
  2223. struct caam_drv_private *priv;
  2224. struct device *dev;
  2225. /*
  2226. * distribute tfms across job rings to ensure in-order
  2227. * crypto request processing per tfm
  2228. */
  2229. ctx->jrdev = caam_jr_alloc();
  2230. if (IS_ERR(ctx->jrdev)) {
  2231. pr_err("Job Ring Device allocation for transform failed\n");
  2232. return PTR_ERR(ctx->jrdev);
  2233. }
  2234. dev = ctx->jrdev->parent;
  2235. priv = dev_get_drvdata(dev);
  2236. if (priv->era >= 6 && uses_dkp)
  2237. ctx->dir = DMA_BIDIRECTIONAL;
  2238. else
  2239. ctx->dir = DMA_TO_DEVICE;
  2240. ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key),
  2241. ctx->dir);
  2242. if (dma_mapping_error(dev, ctx->key_dma)) {
  2243. dev_err(dev, "unable to map key\n");
  2244. caam_jr_free(ctx->jrdev);
  2245. return -ENOMEM;
  2246. }
  2247. /* copy descriptor header template value */
  2248. ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
  2249. ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
  2250. ctx->qidev = dev;
  2251. spin_lock_init(&ctx->lock);
  2252. ctx->drv_ctx[ENCRYPT] = NULL;
  2253. ctx->drv_ctx[DECRYPT] = NULL;
  2254. return 0;
  2255. }
  2256. static int caam_cra_init(struct crypto_skcipher *tfm)
  2257. {
  2258. struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
  2259. struct caam_skcipher_alg *caam_alg =
  2260. container_of(alg, typeof(*caam_alg), skcipher);
  2261. struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
  2262. u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
  2263. int ret = 0;
  2264. if (alg_aai == OP_ALG_AAI_XTS) {
  2265. const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
  2266. struct crypto_skcipher *fallback;
  2267. fallback = crypto_alloc_skcipher(tfm_name, 0,
  2268. CRYPTO_ALG_NEED_FALLBACK);
  2269. if (IS_ERR(fallback)) {
  2270. pr_err("Failed to allocate %s fallback: %ld\n",
  2271. tfm_name, PTR_ERR(fallback));
  2272. return PTR_ERR(fallback);
  2273. }
  2274. ctx->fallback = fallback;
  2275. crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
  2276. crypto_skcipher_reqsize(fallback));
  2277. }
  2278. ret = caam_init_common(ctx, &caam_alg->caam, false);
  2279. if (ret && ctx->fallback)
  2280. crypto_free_skcipher(ctx->fallback);
  2281. return ret;
  2282. }
  2283. static int caam_aead_init(struct crypto_aead *tfm)
  2284. {
  2285. struct aead_alg *alg = crypto_aead_alg(tfm);
  2286. struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
  2287. aead);
  2288. struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm);
  2289. return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp);
  2290. }
  2291. static void caam_exit_common(struct caam_ctx *ctx)
  2292. {
  2293. caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
  2294. caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
  2295. dma_unmap_single(ctx->jrdev->parent, ctx->key_dma, sizeof(ctx->key),
  2296. ctx->dir);
  2297. caam_jr_free(ctx->jrdev);
  2298. }
  2299. static void caam_cra_exit(struct crypto_skcipher *tfm)
  2300. {
  2301. struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
  2302. if (ctx->fallback)
  2303. crypto_free_skcipher(ctx->fallback);
  2304. caam_exit_common(ctx);
  2305. }
  2306. static void caam_aead_exit(struct crypto_aead *tfm)
  2307. {
  2308. caam_exit_common(crypto_aead_ctx_dma(tfm));
  2309. }
  2310. void caam_qi_algapi_exit(void)
  2311. {
  2312. int i;
  2313. for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
  2314. struct caam_aead_alg *t_alg = driver_aeads + i;
  2315. if (t_alg->registered)
  2316. crypto_unregister_aead(&t_alg->aead);
  2317. }
  2318. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  2319. struct caam_skcipher_alg *t_alg = driver_algs + i;
  2320. if (t_alg->registered)
  2321. crypto_unregister_skcipher(&t_alg->skcipher);
  2322. }
  2323. }
  2324. static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
  2325. {
  2326. struct skcipher_alg *alg = &t_alg->skcipher;
  2327. alg->base.cra_module = THIS_MODULE;
  2328. alg->base.cra_priority = CAAM_CRA_PRIORITY;
  2329. alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
  2330. alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
  2331. CRYPTO_ALG_KERN_DRIVER_ONLY);
  2332. alg->init = caam_cra_init;
  2333. alg->exit = caam_cra_exit;
  2334. }
  2335. static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
  2336. {
  2337. struct aead_alg *alg = &t_alg->aead;
  2338. alg->base.cra_module = THIS_MODULE;
  2339. alg->base.cra_priority = CAAM_CRA_PRIORITY;
  2340. alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
  2341. alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
  2342. CRYPTO_ALG_KERN_DRIVER_ONLY;
  2343. alg->init = caam_aead_init;
  2344. alg->exit = caam_aead_exit;
  2345. }
  2346. int caam_qi_algapi_init(struct device *ctrldev)
  2347. {
  2348. struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
  2349. int i = 0, err = 0;
  2350. u32 aes_vid, aes_inst, des_inst, md_vid, md_inst;
  2351. unsigned int md_limit = SHA512_DIGEST_SIZE;
  2352. bool registered = false;
  2353. /* Make sure this runs only on (DPAA 1.x) QI */
  2354. if (!priv->qi_present || caam_dpaa2)
  2355. return 0;
  2356. /*
  2357. * Register crypto algorithms the device supports.
  2358. * First, detect presence and attributes of DES, AES, and MD blocks.
  2359. */
  2360. if (priv->era < 10) {
  2361. u32 cha_vid, cha_inst;
  2362. cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls);
  2363. aes_vid = cha_vid & CHA_ID_LS_AES_MASK;
  2364. md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
  2365. cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls);
  2366. des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >>
  2367. CHA_ID_LS_DES_SHIFT;
  2368. aes_inst = cha_inst & CHA_ID_LS_AES_MASK;
  2369. md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT;
  2370. } else {
  2371. u32 aesa, mdha;
  2372. aesa = rd_reg32(&priv->ctrl->vreg.aesa);
  2373. mdha = rd_reg32(&priv->ctrl->vreg.mdha);
  2374. aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
  2375. md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT;
  2376. des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK;
  2377. aes_inst = aesa & CHA_VER_NUM_MASK;
  2378. md_inst = mdha & CHA_VER_NUM_MASK;
  2379. }
  2380. /* If MD is present, limit digest size based on LP256 */
  2381. if (md_inst && md_vid == CHA_VER_VID_MD_LP256)
  2382. md_limit = SHA256_DIGEST_SIZE;
  2383. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  2384. struct caam_skcipher_alg *t_alg = driver_algs + i;
  2385. u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
  2386. /* Skip DES algorithms if not supported by device */
  2387. if (!des_inst &&
  2388. ((alg_sel == OP_ALG_ALGSEL_3DES) ||
  2389. (alg_sel == OP_ALG_ALGSEL_DES)))
  2390. continue;
  2391. /* Skip AES algorithms if not supported by device */
  2392. if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
  2393. continue;
  2394. caam_skcipher_alg_init(t_alg);
  2395. err = crypto_register_skcipher(&t_alg->skcipher);
  2396. if (err) {
  2397. dev_warn(ctrldev, "%s alg registration failed\n",
  2398. t_alg->skcipher.base.cra_driver_name);
  2399. continue;
  2400. }
  2401. t_alg->registered = true;
  2402. registered = true;
  2403. }
  2404. for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
  2405. struct caam_aead_alg *t_alg = driver_aeads + i;
  2406. u32 c1_alg_sel = t_alg->caam.class1_alg_type &
  2407. OP_ALG_ALGSEL_MASK;
  2408. u32 c2_alg_sel = t_alg->caam.class2_alg_type &
  2409. OP_ALG_ALGSEL_MASK;
  2410. u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
  2411. /* Skip DES algorithms if not supported by device */
  2412. if (!des_inst &&
  2413. ((c1_alg_sel == OP_ALG_ALGSEL_3DES) ||
  2414. (c1_alg_sel == OP_ALG_ALGSEL_DES)))
  2415. continue;
  2416. /* Skip AES algorithms if not supported by device */
  2417. if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES))
  2418. continue;
  2419. /*
  2420. * Check support for AES algorithms not available
  2421. * on LP devices.
  2422. */
  2423. if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM)
  2424. continue;
  2425. /*
  2426. * Skip algorithms requiring message digests
  2427. * if MD or MD size is not supported by device.
  2428. */
  2429. if (c2_alg_sel &&
  2430. (!md_inst || (t_alg->aead.maxauthsize > md_limit)))
  2431. continue;
  2432. caam_aead_alg_init(t_alg);
  2433. err = crypto_register_aead(&t_alg->aead);
  2434. if (err) {
  2435. pr_warn("%s alg registration failed\n",
  2436. t_alg->aead.base.cra_driver_name);
  2437. continue;
  2438. }
  2439. t_alg->registered = true;
  2440. registered = true;
  2441. }
  2442. if (registered)
  2443. dev_info(ctrldev, "algorithms registered in /proc/crypto\n");
  2444. return err;
  2445. }