chcr_algo.c 122 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316
  1. /*
  2. * This file is part of the Chelsio T6 Crypto driver for Linux.
  3. *
  4. * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. *
  34. * Written and Maintained by:
  35. * Manoj Malviya (manojmalviya@chelsio.com)
  36. * Atul Gupta (atul.gupta@chelsio.com)
  37. * Jitendra Lulla (jlulla@chelsio.com)
  38. * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
  39. * Harsh Jain (harsh@chelsio.com)
  40. */
  41. #define pr_fmt(fmt) "chcr:" fmt
  42. #include <linux/kernel.h>
  43. #include <linux/module.h>
  44. #include <linux/crypto.h>
  45. #include <linux/cryptohash.h>
  46. #include <linux/skbuff.h>
  47. #include <linux/rtnetlink.h>
  48. #include <linux/highmem.h>
  49. #include <linux/scatterlist.h>
  50. #include <crypto/aes.h>
  51. #include <crypto/algapi.h>
  52. #include <crypto/hash.h>
  53. #include <crypto/gcm.h>
  54. #include <crypto/sha.h>
  55. #include <crypto/authenc.h>
  56. #include <crypto/ctr.h>
  57. #include <crypto/gf128mul.h>
  58. #include <crypto/internal/aead.h>
  59. #include <crypto/null.h>
  60. #include <crypto/internal/skcipher.h>
  61. #include <crypto/aead.h>
  62. #include <crypto/scatterwalk.h>
  63. #include <crypto/internal/hash.h>
  64. #include "t4fw_api.h"
  65. #include "t4_msg.h"
  66. #include "chcr_core.h"
  67. #include "chcr_algo.h"
  68. #include "chcr_crypto.h"
  69. #define IV AES_BLOCK_SIZE
  70. static unsigned int sgl_ent_len[] = {
  71. 0, 0, 16, 24, 40, 48, 64, 72, 88,
  72. 96, 112, 120, 136, 144, 160, 168, 184,
  73. 192, 208, 216, 232, 240, 256, 264, 280,
  74. 288, 304, 312, 328, 336, 352, 360, 376
  75. };
  76. static unsigned int dsgl_ent_len[] = {
  77. 0, 32, 32, 48, 48, 64, 64, 80, 80,
  78. 112, 112, 128, 128, 144, 144, 160, 160,
  79. 192, 192, 208, 208, 224, 224, 240, 240,
  80. 272, 272, 288, 288, 304, 304, 320, 320
  81. };
  82. static u32 round_constant[11] = {
  83. 0x01000000, 0x02000000, 0x04000000, 0x08000000,
  84. 0x10000000, 0x20000000, 0x40000000, 0x80000000,
  85. 0x1B000000, 0x36000000, 0x6C000000
  86. };
  87. static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
  88. unsigned char *input, int err);
  89. static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
  90. {
  91. return ctx->crypto_ctx->aeadctx;
  92. }
  93. static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
  94. {
  95. return ctx->crypto_ctx->ablkctx;
  96. }
  97. static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
  98. {
  99. return ctx->crypto_ctx->hmacctx;
  100. }
  101. static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
  102. {
  103. return gctx->ctx->gcm;
  104. }
  105. static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
  106. {
  107. return gctx->ctx->authenc;
  108. }
  109. static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
  110. {
  111. return ctx->dev->u_ctx;
  112. }
  113. static inline int is_ofld_imm(const struct sk_buff *skb)
  114. {
  115. return (skb->len <= SGE_MAX_WR_LEN);
  116. }
  117. static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
  118. {
  119. memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
  120. }
  121. static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
  122. unsigned int entlen,
  123. unsigned int skip)
  124. {
  125. int nents = 0;
  126. unsigned int less;
  127. unsigned int skip_len = 0;
  128. while (sg && skip) {
  129. if (sg_dma_len(sg) <= skip) {
  130. skip -= sg_dma_len(sg);
  131. skip_len = 0;
  132. sg = sg_next(sg);
  133. } else {
  134. skip_len = skip;
  135. skip = 0;
  136. }
  137. }
  138. while (sg && reqlen) {
  139. less = min(reqlen, sg_dma_len(sg) - skip_len);
  140. nents += DIV_ROUND_UP(less, entlen);
  141. reqlen -= less;
  142. skip_len = 0;
  143. sg = sg_next(sg);
  144. }
  145. return nents;
  146. }
  147. static inline int get_aead_subtype(struct crypto_aead *aead)
  148. {
  149. struct aead_alg *alg = crypto_aead_alg(aead);
  150. struct chcr_alg_template *chcr_crypto_alg =
  151. container_of(alg, struct chcr_alg_template, alg.aead);
  152. return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
  153. }
  154. void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
  155. {
  156. u8 temp[SHA512_DIGEST_SIZE];
  157. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  158. int authsize = crypto_aead_authsize(tfm);
  159. struct cpl_fw6_pld *fw6_pld;
  160. int cmp = 0;
  161. fw6_pld = (struct cpl_fw6_pld *)input;
  162. if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
  163. (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
  164. cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
  165. } else {
  166. sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
  167. authsize, req->assoclen +
  168. req->cryptlen - authsize);
  169. cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
  170. }
  171. if (cmp)
  172. *err = -EBADMSG;
  173. else
  174. *err = 0;
  175. }
  176. static inline void chcr_handle_aead_resp(struct aead_request *req,
  177. unsigned char *input,
  178. int err)
  179. {
  180. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  181. chcr_aead_common_exit(req);
  182. if (reqctx->verify == VERIFY_SW) {
  183. chcr_verify_tag(req, input, &err);
  184. reqctx->verify = VERIFY_HW;
  185. }
  186. req->base.complete(&req->base, err);
  187. }
  188. static void get_aes_decrypt_key(unsigned char *dec_key,
  189. const unsigned char *key,
  190. unsigned int keylength)
  191. {
  192. u32 temp;
  193. u32 w_ring[MAX_NK];
  194. int i, j, k;
  195. u8 nr, nk;
  196. switch (keylength) {
  197. case AES_KEYLENGTH_128BIT:
  198. nk = KEYLENGTH_4BYTES;
  199. nr = NUMBER_OF_ROUNDS_10;
  200. break;
  201. case AES_KEYLENGTH_192BIT:
  202. nk = KEYLENGTH_6BYTES;
  203. nr = NUMBER_OF_ROUNDS_12;
  204. break;
  205. case AES_KEYLENGTH_256BIT:
  206. nk = KEYLENGTH_8BYTES;
  207. nr = NUMBER_OF_ROUNDS_14;
  208. break;
  209. default:
  210. return;
  211. }
  212. for (i = 0; i < nk; i++)
  213. w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
  214. i = 0;
  215. temp = w_ring[nk - 1];
  216. while (i + nk < (nr + 1) * 4) {
  217. if (!(i % nk)) {
  218. /* RotWord(temp) */
  219. temp = (temp << 8) | (temp >> 24);
  220. temp = aes_ks_subword(temp);
  221. temp ^= round_constant[i / nk];
  222. } else if (nk == 8 && (i % 4 == 0)) {
  223. temp = aes_ks_subword(temp);
  224. }
  225. w_ring[i % nk] ^= temp;
  226. temp = w_ring[i % nk];
  227. i++;
  228. }
  229. i--;
  230. for (k = 0, j = i % nk; k < nk; k++) {
  231. *((u32 *)dec_key + k) = htonl(w_ring[j]);
  232. j--;
  233. if (j < 0)
  234. j += nk;
  235. }
  236. }
  237. static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
  238. {
  239. struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
  240. switch (ds) {
  241. case SHA1_DIGEST_SIZE:
  242. base_hash = crypto_alloc_shash("sha1", 0, 0);
  243. break;
  244. case SHA224_DIGEST_SIZE:
  245. base_hash = crypto_alloc_shash("sha224", 0, 0);
  246. break;
  247. case SHA256_DIGEST_SIZE:
  248. base_hash = crypto_alloc_shash("sha256", 0, 0);
  249. break;
  250. case SHA384_DIGEST_SIZE:
  251. base_hash = crypto_alloc_shash("sha384", 0, 0);
  252. break;
  253. case SHA512_DIGEST_SIZE:
  254. base_hash = crypto_alloc_shash("sha512", 0, 0);
  255. break;
  256. }
  257. return base_hash;
  258. }
  259. static int chcr_compute_partial_hash(struct shash_desc *desc,
  260. char *iopad, char *result_hash,
  261. int digest_size)
  262. {
  263. struct sha1_state sha1_st;
  264. struct sha256_state sha256_st;
  265. struct sha512_state sha512_st;
  266. int error;
  267. if (digest_size == SHA1_DIGEST_SIZE) {
  268. error = crypto_shash_init(desc) ?:
  269. crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
  270. crypto_shash_export(desc, (void *)&sha1_st);
  271. memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
  272. } else if (digest_size == SHA224_DIGEST_SIZE) {
  273. error = crypto_shash_init(desc) ?:
  274. crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
  275. crypto_shash_export(desc, (void *)&sha256_st);
  276. memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
  277. } else if (digest_size == SHA256_DIGEST_SIZE) {
  278. error = crypto_shash_init(desc) ?:
  279. crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
  280. crypto_shash_export(desc, (void *)&sha256_st);
  281. memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
  282. } else if (digest_size == SHA384_DIGEST_SIZE) {
  283. error = crypto_shash_init(desc) ?:
  284. crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
  285. crypto_shash_export(desc, (void *)&sha512_st);
  286. memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
  287. } else if (digest_size == SHA512_DIGEST_SIZE) {
  288. error = crypto_shash_init(desc) ?:
  289. crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
  290. crypto_shash_export(desc, (void *)&sha512_st);
  291. memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
  292. } else {
  293. error = -EINVAL;
  294. pr_err("Unknown digest size %d\n", digest_size);
  295. }
  296. return error;
  297. }
  298. static void chcr_change_order(char *buf, int ds)
  299. {
  300. int i;
  301. if (ds == SHA512_DIGEST_SIZE) {
  302. for (i = 0; i < (ds / sizeof(u64)); i++)
  303. *((__be64 *)buf + i) =
  304. cpu_to_be64(*((u64 *)buf + i));
  305. } else {
  306. for (i = 0; i < (ds / sizeof(u32)); i++)
  307. *((__be32 *)buf + i) =
  308. cpu_to_be32(*((u32 *)buf + i));
  309. }
  310. }
  311. static inline int is_hmac(struct crypto_tfm *tfm)
  312. {
  313. struct crypto_alg *alg = tfm->__crt_alg;
  314. struct chcr_alg_template *chcr_crypto_alg =
  315. container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
  316. alg.hash);
  317. if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
  318. return 1;
  319. return 0;
  320. }
  321. static inline void dsgl_walk_init(struct dsgl_walk *walk,
  322. struct cpl_rx_phys_dsgl *dsgl)
  323. {
  324. walk->dsgl = dsgl;
  325. walk->nents = 0;
  326. walk->to = (struct phys_sge_pairs *)(dsgl + 1);
  327. }
  328. static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
  329. int pci_chan_id)
  330. {
  331. struct cpl_rx_phys_dsgl *phys_cpl;
  332. phys_cpl = walk->dsgl;
  333. phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
  334. | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
  335. phys_cpl->pcirlxorder_to_noofsgentr =
  336. htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
  337. CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
  338. CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
  339. CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
  340. CPL_RX_PHYS_DSGL_DCAID_V(0) |
  341. CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
  342. phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
  343. phys_cpl->rss_hdr_int.qid = htons(qid);
  344. phys_cpl->rss_hdr_int.hash_val = 0;
  345. phys_cpl->rss_hdr_int.channel = pci_chan_id;
  346. }
  347. static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
  348. size_t size,
  349. dma_addr_t *addr)
  350. {
  351. int j;
  352. if (!size)
  353. return;
  354. j = walk->nents;
  355. walk->to->len[j % 8] = htons(size);
  356. walk->to->addr[j % 8] = cpu_to_be64(*addr);
  357. j++;
  358. if ((j % 8) == 0)
  359. walk->to++;
  360. walk->nents = j;
  361. }
  362. static void dsgl_walk_add_sg(struct dsgl_walk *walk,
  363. struct scatterlist *sg,
  364. unsigned int slen,
  365. unsigned int skip)
  366. {
  367. int skip_len = 0;
  368. unsigned int left_size = slen, len = 0;
  369. unsigned int j = walk->nents;
  370. int offset, ent_len;
  371. if (!slen)
  372. return;
  373. while (sg && skip) {
  374. if (sg_dma_len(sg) <= skip) {
  375. skip -= sg_dma_len(sg);
  376. skip_len = 0;
  377. sg = sg_next(sg);
  378. } else {
  379. skip_len = skip;
  380. skip = 0;
  381. }
  382. }
  383. while (left_size && sg) {
  384. len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
  385. offset = 0;
  386. while (len) {
  387. ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
  388. walk->to->len[j % 8] = htons(ent_len);
  389. walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
  390. offset + skip_len);
  391. offset += ent_len;
  392. len -= ent_len;
  393. j++;
  394. if ((j % 8) == 0)
  395. walk->to++;
  396. }
  397. walk->last_sg = sg;
  398. walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
  399. skip_len) + skip_len;
  400. left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
  401. skip_len = 0;
  402. sg = sg_next(sg);
  403. }
  404. walk->nents = j;
  405. }
  406. static inline void ulptx_walk_init(struct ulptx_walk *walk,
  407. struct ulptx_sgl *ulp)
  408. {
  409. walk->sgl = ulp;
  410. walk->nents = 0;
  411. walk->pair_idx = 0;
  412. walk->pair = ulp->sge;
  413. walk->last_sg = NULL;
  414. walk->last_sg_len = 0;
  415. }
  416. static inline void ulptx_walk_end(struct ulptx_walk *walk)
  417. {
  418. walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
  419. ULPTX_NSGE_V(walk->nents));
  420. }
  421. static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
  422. size_t size,
  423. dma_addr_t *addr)
  424. {
  425. if (!size)
  426. return;
  427. if (walk->nents == 0) {
  428. walk->sgl->len0 = cpu_to_be32(size);
  429. walk->sgl->addr0 = cpu_to_be64(*addr);
  430. } else {
  431. walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
  432. walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
  433. walk->pair_idx = !walk->pair_idx;
  434. if (!walk->pair_idx)
  435. walk->pair++;
  436. }
  437. walk->nents++;
  438. }
  439. static void ulptx_walk_add_sg(struct ulptx_walk *walk,
  440. struct scatterlist *sg,
  441. unsigned int len,
  442. unsigned int skip)
  443. {
  444. int small;
  445. int skip_len = 0;
  446. unsigned int sgmin;
  447. if (!len)
  448. return;
  449. while (sg && skip) {
  450. if (sg_dma_len(sg) <= skip) {
  451. skip -= sg_dma_len(sg);
  452. skip_len = 0;
  453. sg = sg_next(sg);
  454. } else {
  455. skip_len = skip;
  456. skip = 0;
  457. }
  458. }
  459. WARN(!sg, "SG should not be null here\n");
  460. if (sg && (walk->nents == 0)) {
  461. small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
  462. sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
  463. walk->sgl->len0 = cpu_to_be32(sgmin);
  464. walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
  465. walk->nents++;
  466. len -= sgmin;
  467. walk->last_sg = sg;
  468. walk->last_sg_len = sgmin + skip_len;
  469. skip_len += sgmin;
  470. if (sg_dma_len(sg) == skip_len) {
  471. sg = sg_next(sg);
  472. skip_len = 0;
  473. }
  474. }
  475. while (sg && len) {
  476. small = min(sg_dma_len(sg) - skip_len, len);
  477. sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
  478. walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
  479. walk->pair->addr[walk->pair_idx] =
  480. cpu_to_be64(sg_dma_address(sg) + skip_len);
  481. walk->pair_idx = !walk->pair_idx;
  482. walk->nents++;
  483. if (!walk->pair_idx)
  484. walk->pair++;
  485. len -= sgmin;
  486. skip_len += sgmin;
  487. walk->last_sg = sg;
  488. walk->last_sg_len = skip_len;
  489. if (sg_dma_len(sg) == skip_len) {
  490. sg = sg_next(sg);
  491. skip_len = 0;
  492. }
  493. }
  494. }
  495. static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
  496. {
  497. struct crypto_alg *alg = tfm->__crt_alg;
  498. struct chcr_alg_template *chcr_crypto_alg =
  499. container_of(alg, struct chcr_alg_template, alg.crypto);
  500. return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
  501. }
  502. static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
  503. {
  504. struct adapter *adap = netdev2adap(dev);
  505. struct sge_uld_txq_info *txq_info =
  506. adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
  507. struct sge_uld_txq *txq;
  508. int ret = 0;
  509. local_bh_disable();
  510. txq = &txq_info->uldtxq[idx];
  511. spin_lock(&txq->sendq.lock);
  512. if (txq->full)
  513. ret = -1;
  514. spin_unlock(&txq->sendq.lock);
  515. local_bh_enable();
  516. return ret;
  517. }
  518. static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
  519. struct _key_ctx *key_ctx)
  520. {
  521. if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
  522. memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
  523. } else {
  524. memcpy(key_ctx->key,
  525. ablkctx->key + (ablkctx->enckey_len >> 1),
  526. ablkctx->enckey_len >> 1);
  527. memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
  528. ablkctx->rrkey, ablkctx->enckey_len >> 1);
  529. }
  530. return 0;
  531. }
  532. static int chcr_hash_ent_in_wr(struct scatterlist *src,
  533. unsigned int minsg,
  534. unsigned int space,
  535. unsigned int srcskip)
  536. {
  537. int srclen = 0;
  538. int srcsg = minsg;
  539. int soffset = 0, sless;
  540. if (sg_dma_len(src) == srcskip) {
  541. src = sg_next(src);
  542. srcskip = 0;
  543. }
  544. while (src && space > (sgl_ent_len[srcsg + 1])) {
  545. sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
  546. CHCR_SRC_SG_SIZE);
  547. srclen += sless;
  548. soffset += sless;
  549. srcsg++;
  550. if (sg_dma_len(src) == (soffset + srcskip)) {
  551. src = sg_next(src);
  552. soffset = 0;
  553. srcskip = 0;
  554. }
  555. }
  556. return srclen;
  557. }
  558. static int chcr_sg_ent_in_wr(struct scatterlist *src,
  559. struct scatterlist *dst,
  560. unsigned int minsg,
  561. unsigned int space,
  562. unsigned int srcskip,
  563. unsigned int dstskip)
  564. {
  565. int srclen = 0, dstlen = 0;
  566. int srcsg = minsg, dstsg = minsg;
  567. int offset = 0, soffset = 0, less, sless = 0;
  568. if (sg_dma_len(src) == srcskip) {
  569. src = sg_next(src);
  570. srcskip = 0;
  571. }
  572. if (sg_dma_len(dst) == dstskip) {
  573. dst = sg_next(dst);
  574. dstskip = 0;
  575. }
  576. while (src && dst &&
  577. space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
  578. sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
  579. CHCR_SRC_SG_SIZE);
  580. srclen += sless;
  581. srcsg++;
  582. offset = 0;
  583. while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
  584. space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
  585. if (srclen <= dstlen)
  586. break;
  587. less = min_t(unsigned int, sg_dma_len(dst) - offset -
  588. dstskip, CHCR_DST_SG_SIZE);
  589. dstlen += less;
  590. offset += less;
  591. if ((offset + dstskip) == sg_dma_len(dst)) {
  592. dst = sg_next(dst);
  593. offset = 0;
  594. }
  595. dstsg++;
  596. dstskip = 0;
  597. }
  598. soffset += sless;
  599. if ((soffset + srcskip) == sg_dma_len(src)) {
  600. src = sg_next(src);
  601. srcskip = 0;
  602. soffset = 0;
  603. }
  604. }
  605. return min(srclen, dstlen);
  606. }
  607. static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
  608. u32 flags,
  609. struct scatterlist *src,
  610. struct scatterlist *dst,
  611. unsigned int nbytes,
  612. u8 *iv,
  613. unsigned short op_type)
  614. {
  615. int err;
  616. SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
  617. skcipher_request_set_tfm(subreq, cipher);
  618. skcipher_request_set_callback(subreq, flags, NULL, NULL);
  619. skcipher_request_set_crypt(subreq, src, dst,
  620. nbytes, iv);
  621. err = op_type ? crypto_skcipher_decrypt(subreq) :
  622. crypto_skcipher_encrypt(subreq);
  623. skcipher_request_zero(subreq);
  624. return err;
  625. }
  626. static inline void create_wreq(struct chcr_context *ctx,
  627. struct chcr_wr *chcr_req,
  628. struct crypto_async_request *req,
  629. unsigned int imm,
  630. int hash_sz,
  631. unsigned int len16,
  632. unsigned int sc_len,
  633. unsigned int lcb)
  634. {
  635. struct uld_ctx *u_ctx = ULD_CTX(ctx);
  636. int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
  637. chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
  638. chcr_req->wreq.pld_size_hash_size =
  639. htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
  640. chcr_req->wreq.len16_pkd =
  641. htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
  642. chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
  643. chcr_req->wreq.rx_chid_to_rx_q_id =
  644. FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
  645. !!lcb, ctx->tx_qidx);
  646. chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
  647. qid);
  648. chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
  649. ((sizeof(chcr_req->wreq)) >> 4)));
  650. chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
  651. chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
  652. sizeof(chcr_req->key_ctx) + sc_len);
  653. }
  654. /**
  655. * create_cipher_wr - form the WR for cipher operations
  656. * @req: cipher req.
  657. * @ctx: crypto driver context of the request.
  658. * @qid: ingress qid where response of this WR should be received.
  659. * @op_type: encryption or decryption
  660. */
  661. static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
  662. {
  663. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
  664. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
  665. struct sk_buff *skb = NULL;
  666. struct chcr_wr *chcr_req;
  667. struct cpl_rx_phys_dsgl *phys_cpl;
  668. struct ulptx_sgl *ulptx;
  669. struct chcr_blkcipher_req_ctx *reqctx =
  670. ablkcipher_request_ctx(wrparam->req);
  671. unsigned int temp = 0, transhdr_len, dst_size;
  672. int error;
  673. int nents;
  674. unsigned int kctx_len;
  675. gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
  676. GFP_KERNEL : GFP_ATOMIC;
  677. struct adapter *adap = padap(c_ctx(tfm)->dev);
  678. nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
  679. reqctx->dst_ofst);
  680. dst_size = get_space_for_phys_dsgl(nents);
  681. kctx_len = roundup(ablkctx->enckey_len, 16);
  682. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
  683. nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
  684. CHCR_SRC_SG_SIZE, reqctx->src_ofst);
  685. temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
  686. (sgl_len(nents) * 8);
  687. transhdr_len += temp;
  688. transhdr_len = roundup(transhdr_len, 16);
  689. skb = alloc_skb(SGE_MAX_WR_LEN, flags);
  690. if (!skb) {
  691. error = -ENOMEM;
  692. goto err;
  693. }
  694. chcr_req = __skb_put_zero(skb, transhdr_len);
  695. chcr_req->sec_cpl.op_ivinsrtofst =
  696. FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
  697. chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
  698. chcr_req->sec_cpl.aadstart_cipherstop_hi =
  699. FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
  700. chcr_req->sec_cpl.cipherstop_lo_authinsert =
  701. FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
  702. chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
  703. ablkctx->ciph_mode,
  704. 0, 0, IV >> 1);
  705. chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
  706. 0, 1, dst_size);
  707. chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
  708. if ((reqctx->op == CHCR_DECRYPT_OP) &&
  709. (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  710. CRYPTO_ALG_SUB_TYPE_CTR)) &&
  711. (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  712. CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
  713. generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
  714. } else {
  715. if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
  716. (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
  717. memcpy(chcr_req->key_ctx.key, ablkctx->key,
  718. ablkctx->enckey_len);
  719. } else {
  720. memcpy(chcr_req->key_ctx.key, ablkctx->key +
  721. (ablkctx->enckey_len >> 1),
  722. ablkctx->enckey_len >> 1);
  723. memcpy(chcr_req->key_ctx.key +
  724. (ablkctx->enckey_len >> 1),
  725. ablkctx->key,
  726. ablkctx->enckey_len >> 1);
  727. }
  728. }
  729. phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
  730. ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
  731. chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
  732. chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
  733. atomic_inc(&adap->chcr_stats.cipher_rqst);
  734. temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
  735. + (reqctx->imm ? (wrparam->bytes) : 0);
  736. create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
  737. transhdr_len, temp,
  738. ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
  739. reqctx->skb = skb;
  740. if (reqctx->op && (ablkctx->ciph_mode ==
  741. CHCR_SCMD_CIPHER_MODE_AES_CBC))
  742. sg_pcopy_to_buffer(wrparam->req->src,
  743. sg_nents(wrparam->req->src), wrparam->req->info, 16,
  744. reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
  745. return skb;
  746. err:
  747. return ERR_PTR(error);
  748. }
  749. static inline int chcr_keyctx_ck_size(unsigned int keylen)
  750. {
  751. int ck_size = 0;
  752. if (keylen == AES_KEYSIZE_128)
  753. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  754. else if (keylen == AES_KEYSIZE_192)
  755. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  756. else if (keylen == AES_KEYSIZE_256)
  757. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  758. else
  759. ck_size = 0;
  760. return ck_size;
  761. }
  762. static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
  763. const u8 *key,
  764. unsigned int keylen)
  765. {
  766. struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
  767. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  768. int err = 0;
  769. crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  770. crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags &
  771. CRYPTO_TFM_REQ_MASK);
  772. err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
  773. tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
  774. tfm->crt_flags |=
  775. crypto_skcipher_get_flags(ablkctx->sw_cipher) &
  776. CRYPTO_TFM_RES_MASK;
  777. return err;
  778. }
  779. static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
  780. const u8 *key,
  781. unsigned int keylen)
  782. {
  783. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  784. unsigned int ck_size, context_size;
  785. u16 alignment = 0;
  786. int err;
  787. err = chcr_cipher_fallback_setkey(cipher, key, keylen);
  788. if (err)
  789. goto badkey_err;
  790. ck_size = chcr_keyctx_ck_size(keylen);
  791. alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
  792. memcpy(ablkctx->key, key, keylen);
  793. ablkctx->enckey_len = keylen;
  794. get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
  795. context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
  796. keylen + alignment) >> 4;
  797. ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
  798. 0, 0, context_size);
  799. ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
  800. return 0;
  801. badkey_err:
  802. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  803. ablkctx->enckey_len = 0;
  804. return err;
  805. }
  806. static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
  807. const u8 *key,
  808. unsigned int keylen)
  809. {
  810. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  811. unsigned int ck_size, context_size;
  812. u16 alignment = 0;
  813. int err;
  814. err = chcr_cipher_fallback_setkey(cipher, key, keylen);
  815. if (err)
  816. goto badkey_err;
  817. ck_size = chcr_keyctx_ck_size(keylen);
  818. alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
  819. memcpy(ablkctx->key, key, keylen);
  820. ablkctx->enckey_len = keylen;
  821. context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
  822. keylen + alignment) >> 4;
  823. ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
  824. 0, 0, context_size);
  825. ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
  826. return 0;
  827. badkey_err:
  828. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  829. ablkctx->enckey_len = 0;
  830. return err;
  831. }
  832. static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
  833. const u8 *key,
  834. unsigned int keylen)
  835. {
  836. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  837. unsigned int ck_size, context_size;
  838. u16 alignment = 0;
  839. int err;
  840. if (keylen < CTR_RFC3686_NONCE_SIZE)
  841. return -EINVAL;
  842. memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
  843. CTR_RFC3686_NONCE_SIZE);
  844. keylen -= CTR_RFC3686_NONCE_SIZE;
  845. err = chcr_cipher_fallback_setkey(cipher, key, keylen);
  846. if (err)
  847. goto badkey_err;
  848. ck_size = chcr_keyctx_ck_size(keylen);
  849. alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
  850. memcpy(ablkctx->key, key, keylen);
  851. ablkctx->enckey_len = keylen;
  852. context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
  853. keylen + alignment) >> 4;
  854. ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
  855. 0, 0, context_size);
  856. ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
  857. return 0;
  858. badkey_err:
  859. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  860. ablkctx->enckey_len = 0;
  861. return err;
  862. }
  863. static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
  864. {
  865. unsigned int size = AES_BLOCK_SIZE;
  866. __be32 *b = (__be32 *)(dstiv + size);
  867. u32 c, prev;
  868. memcpy(dstiv, srciv, AES_BLOCK_SIZE);
  869. for (; size >= 4; size -= 4) {
  870. prev = be32_to_cpu(*--b);
  871. c = prev + add;
  872. *b = cpu_to_be32(c);
  873. if (prev < c)
  874. break;
  875. add = 1;
  876. }
  877. }
  878. static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
  879. {
  880. __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
  881. u64 c;
  882. u32 temp = be32_to_cpu(*--b);
  883. temp = ~temp;
  884. c = (u64)temp + 1; // No of block can processed withou overflow
  885. if ((bytes / AES_BLOCK_SIZE) > c)
  886. bytes = c * AES_BLOCK_SIZE;
  887. return bytes;
  888. }
  889. static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
  890. u32 isfinal)
  891. {
  892. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  893. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
  894. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  895. struct crypto_cipher *cipher;
  896. int ret, i;
  897. u8 *key;
  898. unsigned int keylen;
  899. int round = reqctx->last_req_len / AES_BLOCK_SIZE;
  900. int round8 = round / 8;
  901. cipher = ablkctx->aes_generic;
  902. memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
  903. keylen = ablkctx->enckey_len / 2;
  904. key = ablkctx->key + keylen;
  905. ret = crypto_cipher_setkey(cipher, key, keylen);
  906. if (ret)
  907. goto out;
  908. crypto_cipher_encrypt_one(cipher, iv, iv);
  909. for (i = 0; i < round8; i++)
  910. gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
  911. for (i = 0; i < (round % 8); i++)
  912. gf128mul_x_ble((le128 *)iv, (le128 *)iv);
  913. if (!isfinal)
  914. crypto_cipher_decrypt_one(cipher, iv, iv);
  915. out:
  916. return ret;
  917. }
  918. static int chcr_update_cipher_iv(struct ablkcipher_request *req,
  919. struct cpl_fw6_pld *fw6_pld, u8 *iv)
  920. {
  921. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  922. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  923. int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
  924. int ret = 0;
  925. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
  926. ctr_add_iv(iv, req->info, (reqctx->processed /
  927. AES_BLOCK_SIZE));
  928. else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
  929. *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
  930. CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
  931. AES_BLOCK_SIZE) + 1);
  932. else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
  933. ret = chcr_update_tweak(req, iv, 0);
  934. else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
  935. if (reqctx->op)
  936. /*Updated before sending last WR*/
  937. memcpy(iv, req->info, AES_BLOCK_SIZE);
  938. else
  939. memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
  940. }
  941. return ret;
  942. }
  943. /* We need separate function for final iv because in rfc3686 Initial counter
  944. * starts from 1 and buffer size of iv is 8 byte only which remains constant
  945. * for subsequent update requests
  946. */
  947. static int chcr_final_cipher_iv(struct ablkcipher_request *req,
  948. struct cpl_fw6_pld *fw6_pld, u8 *iv)
  949. {
  950. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  951. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  952. int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
  953. int ret = 0;
  954. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
  955. ctr_add_iv(iv, req->info, (reqctx->processed /
  956. AES_BLOCK_SIZE));
  957. else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
  958. ret = chcr_update_tweak(req, iv, 1);
  959. else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
  960. /*Already updated for Decrypt*/
  961. if (!reqctx->op)
  962. memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
  963. }
  964. return ret;
  965. }
  966. static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
  967. unsigned char *input, int err)
  968. {
  969. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  970. struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
  971. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
  972. struct sk_buff *skb;
  973. struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
  974. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  975. struct cipher_wr_param wrparam;
  976. int bytes;
  977. if (err)
  978. goto unmap;
  979. if (req->nbytes == reqctx->processed) {
  980. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
  981. req);
  982. err = chcr_final_cipher_iv(req, fw6_pld, req->info);
  983. goto complete;
  984. }
  985. if (!reqctx->imm) {
  986. bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
  987. CIP_SPACE_LEFT(ablkctx->enckey_len),
  988. reqctx->src_ofst, reqctx->dst_ofst);
  989. if ((bytes + reqctx->processed) >= req->nbytes)
  990. bytes = req->nbytes - reqctx->processed;
  991. else
  992. bytes = rounddown(bytes, 16);
  993. } else {
  994. /*CTR mode counter overfloa*/
  995. bytes = req->nbytes - reqctx->processed;
  996. }
  997. err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
  998. if (err)
  999. goto unmap;
  1000. if (unlikely(bytes == 0)) {
  1001. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
  1002. req);
  1003. err = chcr_cipher_fallback(ablkctx->sw_cipher,
  1004. req->base.flags,
  1005. req->src,
  1006. req->dst,
  1007. req->nbytes,
  1008. req->info,
  1009. reqctx->op);
  1010. goto complete;
  1011. }
  1012. if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  1013. CRYPTO_ALG_SUB_TYPE_CTR)
  1014. bytes = adjust_ctr_overflow(reqctx->iv, bytes);
  1015. wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
  1016. wrparam.req = req;
  1017. wrparam.bytes = bytes;
  1018. skb = create_cipher_wr(&wrparam);
  1019. if (IS_ERR(skb)) {
  1020. pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
  1021. err = PTR_ERR(skb);
  1022. goto unmap;
  1023. }
  1024. skb->dev = u_ctx->lldi.ports[0];
  1025. set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
  1026. chcr_send_wr(skb);
  1027. reqctx->last_req_len = bytes;
  1028. reqctx->processed += bytes;
  1029. return 0;
  1030. unmap:
  1031. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
  1032. complete:
  1033. req->base.complete(&req->base, err);
  1034. return err;
  1035. }
  1036. static int process_cipher(struct ablkcipher_request *req,
  1037. unsigned short qid,
  1038. struct sk_buff **skb,
  1039. unsigned short op_type)
  1040. {
  1041. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  1042. unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
  1043. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  1044. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
  1045. struct cipher_wr_param wrparam;
  1046. int bytes, err = -EINVAL;
  1047. reqctx->processed = 0;
  1048. if (!req->info)
  1049. goto error;
  1050. if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
  1051. (req->nbytes == 0) ||
  1052. (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
  1053. pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
  1054. ablkctx->enckey_len, req->nbytes, ivsize);
  1055. goto error;
  1056. }
  1057. chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
  1058. if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
  1059. AES_MIN_KEY_SIZE +
  1060. sizeof(struct cpl_rx_phys_dsgl) +
  1061. /*Min dsgl size*/
  1062. 32))) {
  1063. /* Can be sent as Imm*/
  1064. unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
  1065. dnents = sg_nents_xlen(req->dst, req->nbytes,
  1066. CHCR_DST_SG_SIZE, 0);
  1067. phys_dsgl = get_space_for_phys_dsgl(dnents);
  1068. kctx_len = roundup(ablkctx->enckey_len, 16);
  1069. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
  1070. reqctx->imm = (transhdr_len + IV + req->nbytes) <=
  1071. SGE_MAX_WR_LEN;
  1072. bytes = IV + req->nbytes;
  1073. } else {
  1074. reqctx->imm = 0;
  1075. }
  1076. if (!reqctx->imm) {
  1077. bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
  1078. CIP_SPACE_LEFT(ablkctx->enckey_len),
  1079. 0, 0);
  1080. if ((bytes + reqctx->processed) >= req->nbytes)
  1081. bytes = req->nbytes - reqctx->processed;
  1082. else
  1083. bytes = rounddown(bytes, 16);
  1084. } else {
  1085. bytes = req->nbytes;
  1086. }
  1087. if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  1088. CRYPTO_ALG_SUB_TYPE_CTR) {
  1089. bytes = adjust_ctr_overflow(req->info, bytes);
  1090. }
  1091. if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
  1092. CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
  1093. memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
  1094. memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
  1095. CTR_RFC3686_IV_SIZE);
  1096. /* initialize counter portion of counter block */
  1097. *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
  1098. CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
  1099. } else {
  1100. memcpy(reqctx->iv, req->info, IV);
  1101. }
  1102. if (unlikely(bytes == 0)) {
  1103. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
  1104. req);
  1105. err = chcr_cipher_fallback(ablkctx->sw_cipher,
  1106. req->base.flags,
  1107. req->src,
  1108. req->dst,
  1109. req->nbytes,
  1110. reqctx->iv,
  1111. op_type);
  1112. goto error;
  1113. }
  1114. reqctx->op = op_type;
  1115. reqctx->srcsg = req->src;
  1116. reqctx->dstsg = req->dst;
  1117. reqctx->src_ofst = 0;
  1118. reqctx->dst_ofst = 0;
  1119. wrparam.qid = qid;
  1120. wrparam.req = req;
  1121. wrparam.bytes = bytes;
  1122. *skb = create_cipher_wr(&wrparam);
  1123. if (IS_ERR(*skb)) {
  1124. err = PTR_ERR(*skb);
  1125. goto unmap;
  1126. }
  1127. reqctx->processed = bytes;
  1128. reqctx->last_req_len = bytes;
  1129. return 0;
  1130. unmap:
  1131. chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
  1132. error:
  1133. return err;
  1134. }
  1135. static int chcr_aes_encrypt(struct ablkcipher_request *req)
  1136. {
  1137. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  1138. struct sk_buff *skb = NULL;
  1139. int err, isfull = 0;
  1140. struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
  1141. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1142. c_ctx(tfm)->tx_qidx))) {
  1143. isfull = 1;
  1144. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1145. return -ENOSPC;
  1146. }
  1147. err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
  1148. &skb, CHCR_ENCRYPT_OP);
  1149. if (err || !skb)
  1150. return err;
  1151. skb->dev = u_ctx->lldi.ports[0];
  1152. set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
  1153. chcr_send_wr(skb);
  1154. return isfull ? -EBUSY : -EINPROGRESS;
  1155. }
  1156. static int chcr_aes_decrypt(struct ablkcipher_request *req)
  1157. {
  1158. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
  1159. struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
  1160. struct sk_buff *skb = NULL;
  1161. int err, isfull = 0;
  1162. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1163. c_ctx(tfm)->tx_qidx))) {
  1164. isfull = 1;
  1165. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1166. return -ENOSPC;
  1167. }
  1168. err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
  1169. &skb, CHCR_DECRYPT_OP);
  1170. if (err || !skb)
  1171. return err;
  1172. skb->dev = u_ctx->lldi.ports[0];
  1173. set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
  1174. chcr_send_wr(skb);
  1175. return isfull ? -EBUSY : -EINPROGRESS;
  1176. }
  1177. static int chcr_device_init(struct chcr_context *ctx)
  1178. {
  1179. struct uld_ctx *u_ctx = NULL;
  1180. struct adapter *adap;
  1181. unsigned int id;
  1182. int txq_perchan, txq_idx, ntxq;
  1183. int err = 0, rxq_perchan, rxq_idx;
  1184. id = smp_processor_id();
  1185. if (!ctx->dev) {
  1186. u_ctx = assign_chcr_device();
  1187. if (!u_ctx) {
  1188. pr_err("chcr device assignment fails\n");
  1189. goto out;
  1190. }
  1191. ctx->dev = u_ctx->dev;
  1192. adap = padap(ctx->dev);
  1193. ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
  1194. adap->vres.ncrypto_fc);
  1195. rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
  1196. txq_perchan = ntxq / u_ctx->lldi.nchan;
  1197. spin_lock(&ctx->dev->lock_chcr_dev);
  1198. ctx->tx_chan_id = ctx->dev->tx_channel_id;
  1199. ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
  1200. ctx->dev->rx_channel_id = 0;
  1201. spin_unlock(&ctx->dev->lock_chcr_dev);
  1202. rxq_idx = ctx->tx_chan_id * rxq_perchan;
  1203. rxq_idx += id % rxq_perchan;
  1204. txq_idx = ctx->tx_chan_id * txq_perchan;
  1205. txq_idx += id % txq_perchan;
  1206. ctx->rx_qidx = rxq_idx;
  1207. ctx->tx_qidx = txq_idx;
  1208. /* Channel Id used by SGE to forward packet to Host.
  1209. * Same value should be used in cpl_fw6_pld RSS_CH field
  1210. * by FW. Driver programs PCI channel ID to be used in fw
  1211. * at the time of queue allocation with value "pi->tx_chan"
  1212. */
  1213. ctx->pci_chan_id = txq_idx / txq_perchan;
  1214. }
  1215. out:
  1216. return err;
  1217. }
  1218. static int chcr_cra_init(struct crypto_tfm *tfm)
  1219. {
  1220. struct crypto_alg *alg = tfm->__crt_alg;
  1221. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1222. struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
  1223. ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0,
  1224. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
  1225. if (IS_ERR(ablkctx->sw_cipher)) {
  1226. pr_err("failed to allocate fallback for %s\n", alg->cra_name);
  1227. return PTR_ERR(ablkctx->sw_cipher);
  1228. }
  1229. if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
  1230. /* To update tweak*/
  1231. ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
  1232. if (IS_ERR(ablkctx->aes_generic)) {
  1233. pr_err("failed to allocate aes cipher for tweak\n");
  1234. return PTR_ERR(ablkctx->aes_generic);
  1235. }
  1236. } else
  1237. ablkctx->aes_generic = NULL;
  1238. tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
  1239. return chcr_device_init(crypto_tfm_ctx(tfm));
  1240. }
  1241. static int chcr_rfc3686_init(struct crypto_tfm *tfm)
  1242. {
  1243. struct crypto_alg *alg = tfm->__crt_alg;
  1244. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1245. struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
  1246. /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
  1247. * cannot be used as fallback in chcr_handle_cipher_response
  1248. */
  1249. ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
  1250. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
  1251. if (IS_ERR(ablkctx->sw_cipher)) {
  1252. pr_err("failed to allocate fallback for %s\n", alg->cra_name);
  1253. return PTR_ERR(ablkctx->sw_cipher);
  1254. }
  1255. tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
  1256. return chcr_device_init(crypto_tfm_ctx(tfm));
  1257. }
  1258. static void chcr_cra_exit(struct crypto_tfm *tfm)
  1259. {
  1260. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1261. struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
  1262. crypto_free_skcipher(ablkctx->sw_cipher);
  1263. if (ablkctx->aes_generic)
  1264. crypto_free_cipher(ablkctx->aes_generic);
  1265. }
  1266. static int get_alg_config(struct algo_param *params,
  1267. unsigned int auth_size)
  1268. {
  1269. switch (auth_size) {
  1270. case SHA1_DIGEST_SIZE:
  1271. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
  1272. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
  1273. params->result_size = SHA1_DIGEST_SIZE;
  1274. break;
  1275. case SHA224_DIGEST_SIZE:
  1276. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
  1277. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
  1278. params->result_size = SHA256_DIGEST_SIZE;
  1279. break;
  1280. case SHA256_DIGEST_SIZE:
  1281. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
  1282. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
  1283. params->result_size = SHA256_DIGEST_SIZE;
  1284. break;
  1285. case SHA384_DIGEST_SIZE:
  1286. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
  1287. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
  1288. params->result_size = SHA512_DIGEST_SIZE;
  1289. break;
  1290. case SHA512_DIGEST_SIZE:
  1291. params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
  1292. params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
  1293. params->result_size = SHA512_DIGEST_SIZE;
  1294. break;
  1295. default:
  1296. pr_err("chcr : ERROR, unsupported digest size\n");
  1297. return -EINVAL;
  1298. }
  1299. return 0;
  1300. }
  1301. static inline void chcr_free_shash(struct crypto_shash *base_hash)
  1302. {
  1303. crypto_free_shash(base_hash);
  1304. }
  1305. /**
  1306. * create_hash_wr - Create hash work request
  1307. * @req - Cipher req base
  1308. */
  1309. static struct sk_buff *create_hash_wr(struct ahash_request *req,
  1310. struct hash_wr_param *param)
  1311. {
  1312. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1313. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1314. struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
  1315. struct sk_buff *skb = NULL;
  1316. struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
  1317. struct chcr_wr *chcr_req;
  1318. struct ulptx_sgl *ulptx;
  1319. unsigned int nents = 0, transhdr_len;
  1320. unsigned int temp = 0;
  1321. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
  1322. GFP_ATOMIC;
  1323. struct adapter *adap = padap(h_ctx(tfm)->dev);
  1324. int error = 0;
  1325. transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
  1326. req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
  1327. param->sg_len) <= SGE_MAX_WR_LEN;
  1328. nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
  1329. CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
  1330. nents += param->bfr_len ? 1 : 0;
  1331. transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
  1332. param->sg_len, 16) : (sgl_len(nents) * 8);
  1333. transhdr_len = roundup(transhdr_len, 16);
  1334. skb = alloc_skb(transhdr_len, flags);
  1335. if (!skb)
  1336. return ERR_PTR(-ENOMEM);
  1337. chcr_req = __skb_put_zero(skb, transhdr_len);
  1338. chcr_req->sec_cpl.op_ivinsrtofst =
  1339. FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
  1340. chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
  1341. chcr_req->sec_cpl.aadstart_cipherstop_hi =
  1342. FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
  1343. chcr_req->sec_cpl.cipherstop_lo_authinsert =
  1344. FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
  1345. chcr_req->sec_cpl.seqno_numivs =
  1346. FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
  1347. param->opad_needed, 0);
  1348. chcr_req->sec_cpl.ivgen_hdrlen =
  1349. FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
  1350. memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
  1351. param->alg_prm.result_size);
  1352. if (param->opad_needed)
  1353. memcpy(chcr_req->key_ctx.key +
  1354. ((param->alg_prm.result_size <= 32) ? 32 :
  1355. CHCR_HASH_MAX_DIGEST_SIZE),
  1356. hmacctx->opad, param->alg_prm.result_size);
  1357. chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
  1358. param->alg_prm.mk_size, 0,
  1359. param->opad_needed,
  1360. ((param->kctx_len +
  1361. sizeof(chcr_req->key_ctx)) >> 4));
  1362. chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
  1363. ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
  1364. DUMMY_BYTES);
  1365. if (param->bfr_len != 0) {
  1366. req_ctx->hctx_wr.dma_addr =
  1367. dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
  1368. param->bfr_len, DMA_TO_DEVICE);
  1369. if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
  1370. req_ctx->hctx_wr. dma_addr)) {
  1371. error = -ENOMEM;
  1372. goto err;
  1373. }
  1374. req_ctx->hctx_wr.dma_len = param->bfr_len;
  1375. } else {
  1376. req_ctx->hctx_wr.dma_addr = 0;
  1377. }
  1378. chcr_add_hash_src_ent(req, ulptx, param);
  1379. /* Request upto max wr size */
  1380. temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
  1381. (param->sg_len + param->bfr_len) : 0);
  1382. atomic_inc(&adap->chcr_stats.digest_rqst);
  1383. create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
  1384. param->hash_size, transhdr_len,
  1385. temp, 0);
  1386. req_ctx->hctx_wr.skb = skb;
  1387. return skb;
  1388. err:
  1389. kfree_skb(skb);
  1390. return ERR_PTR(error);
  1391. }
  1392. static int chcr_ahash_update(struct ahash_request *req)
  1393. {
  1394. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1395. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1396. struct uld_ctx *u_ctx = NULL;
  1397. struct sk_buff *skb;
  1398. u8 remainder = 0, bs;
  1399. unsigned int nbytes = req->nbytes;
  1400. struct hash_wr_param params;
  1401. int error, isfull = 0;
  1402. bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1403. u_ctx = ULD_CTX(h_ctx(rtfm));
  1404. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1405. h_ctx(rtfm)->tx_qidx))) {
  1406. isfull = 1;
  1407. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1408. return -ENOSPC;
  1409. }
  1410. if (nbytes + req_ctx->reqlen >= bs) {
  1411. remainder = (nbytes + req_ctx->reqlen) % bs;
  1412. nbytes = nbytes + req_ctx->reqlen - remainder;
  1413. } else {
  1414. sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
  1415. + req_ctx->reqlen, nbytes, 0);
  1416. req_ctx->reqlen += nbytes;
  1417. return 0;
  1418. }
  1419. chcr_init_hctx_per_wr(req_ctx);
  1420. error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
  1421. if (error)
  1422. return -ENOMEM;
  1423. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1424. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1425. params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
  1426. HASH_SPACE_LEFT(params.kctx_len), 0);
  1427. if (params.sg_len > req->nbytes)
  1428. params.sg_len = req->nbytes;
  1429. params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
  1430. req_ctx->reqlen;
  1431. params.opad_needed = 0;
  1432. params.more = 1;
  1433. params.last = 0;
  1434. params.bfr_len = req_ctx->reqlen;
  1435. params.scmd1 = 0;
  1436. req_ctx->hctx_wr.srcsg = req->src;
  1437. params.hash_size = params.alg_prm.result_size;
  1438. req_ctx->data_len += params.sg_len + params.bfr_len;
  1439. skb = create_hash_wr(req, &params);
  1440. if (IS_ERR(skb)) {
  1441. error = PTR_ERR(skb);
  1442. goto unmap;
  1443. }
  1444. req_ctx->hctx_wr.processed += params.sg_len;
  1445. if (remainder) {
  1446. /* Swap buffers */
  1447. swap(req_ctx->reqbfr, req_ctx->skbfr);
  1448. sg_pcopy_to_buffer(req->src, sg_nents(req->src),
  1449. req_ctx->reqbfr, remainder, req->nbytes -
  1450. remainder);
  1451. }
  1452. req_ctx->reqlen = remainder;
  1453. skb->dev = u_ctx->lldi.ports[0];
  1454. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1455. chcr_send_wr(skb);
  1456. return isfull ? -EBUSY : -EINPROGRESS;
  1457. unmap:
  1458. chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
  1459. return error;
  1460. }
  1461. static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
  1462. {
  1463. memset(bfr_ptr, 0, bs);
  1464. *bfr_ptr = 0x80;
  1465. if (bs == 64)
  1466. *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
  1467. else
  1468. *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
  1469. }
  1470. static int chcr_ahash_final(struct ahash_request *req)
  1471. {
  1472. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1473. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1474. struct hash_wr_param params;
  1475. struct sk_buff *skb;
  1476. struct uld_ctx *u_ctx = NULL;
  1477. u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1478. chcr_init_hctx_per_wr(req_ctx);
  1479. u_ctx = ULD_CTX(h_ctx(rtfm));
  1480. if (is_hmac(crypto_ahash_tfm(rtfm)))
  1481. params.opad_needed = 1;
  1482. else
  1483. params.opad_needed = 0;
  1484. params.sg_len = 0;
  1485. req_ctx->hctx_wr.isfinal = 1;
  1486. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1487. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1488. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1489. params.opad_needed = 1;
  1490. params.kctx_len *= 2;
  1491. } else {
  1492. params.opad_needed = 0;
  1493. }
  1494. req_ctx->hctx_wr.result = 1;
  1495. params.bfr_len = req_ctx->reqlen;
  1496. req_ctx->data_len += params.bfr_len + params.sg_len;
  1497. req_ctx->hctx_wr.srcsg = req->src;
  1498. if (req_ctx->reqlen == 0) {
  1499. create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
  1500. params.last = 0;
  1501. params.more = 1;
  1502. params.scmd1 = 0;
  1503. params.bfr_len = bs;
  1504. } else {
  1505. params.scmd1 = req_ctx->data_len;
  1506. params.last = 1;
  1507. params.more = 0;
  1508. }
  1509. params.hash_size = crypto_ahash_digestsize(rtfm);
  1510. skb = create_hash_wr(req, &params);
  1511. if (IS_ERR(skb))
  1512. return PTR_ERR(skb);
  1513. req_ctx->reqlen = 0;
  1514. skb->dev = u_ctx->lldi.ports[0];
  1515. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1516. chcr_send_wr(skb);
  1517. return -EINPROGRESS;
  1518. }
  1519. static int chcr_ahash_finup(struct ahash_request *req)
  1520. {
  1521. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1522. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1523. struct uld_ctx *u_ctx = NULL;
  1524. struct sk_buff *skb;
  1525. struct hash_wr_param params;
  1526. u8 bs;
  1527. int error, isfull = 0;
  1528. bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1529. u_ctx = ULD_CTX(h_ctx(rtfm));
  1530. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1531. h_ctx(rtfm)->tx_qidx))) {
  1532. isfull = 1;
  1533. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1534. return -ENOSPC;
  1535. }
  1536. chcr_init_hctx_per_wr(req_ctx);
  1537. error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
  1538. if (error)
  1539. return -ENOMEM;
  1540. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1541. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1542. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1543. params.kctx_len *= 2;
  1544. params.opad_needed = 1;
  1545. } else {
  1546. params.opad_needed = 0;
  1547. }
  1548. params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
  1549. HASH_SPACE_LEFT(params.kctx_len), 0);
  1550. if (params.sg_len < req->nbytes) {
  1551. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1552. params.kctx_len /= 2;
  1553. params.opad_needed = 0;
  1554. }
  1555. params.last = 0;
  1556. params.more = 1;
  1557. params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
  1558. - req_ctx->reqlen;
  1559. params.hash_size = params.alg_prm.result_size;
  1560. params.scmd1 = 0;
  1561. } else {
  1562. params.last = 1;
  1563. params.more = 0;
  1564. params.sg_len = req->nbytes;
  1565. params.hash_size = crypto_ahash_digestsize(rtfm);
  1566. params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
  1567. params.sg_len;
  1568. }
  1569. params.bfr_len = req_ctx->reqlen;
  1570. req_ctx->data_len += params.bfr_len + params.sg_len;
  1571. req_ctx->hctx_wr.result = 1;
  1572. req_ctx->hctx_wr.srcsg = req->src;
  1573. if ((req_ctx->reqlen + req->nbytes) == 0) {
  1574. create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
  1575. params.last = 0;
  1576. params.more = 1;
  1577. params.scmd1 = 0;
  1578. params.bfr_len = bs;
  1579. }
  1580. skb = create_hash_wr(req, &params);
  1581. if (IS_ERR(skb)) {
  1582. error = PTR_ERR(skb);
  1583. goto unmap;
  1584. }
  1585. req_ctx->reqlen = 0;
  1586. req_ctx->hctx_wr.processed += params.sg_len;
  1587. skb->dev = u_ctx->lldi.ports[0];
  1588. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1589. chcr_send_wr(skb);
  1590. return isfull ? -EBUSY : -EINPROGRESS;
  1591. unmap:
  1592. chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
  1593. return error;
  1594. }
  1595. static int chcr_ahash_digest(struct ahash_request *req)
  1596. {
  1597. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  1598. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1599. struct uld_ctx *u_ctx = NULL;
  1600. struct sk_buff *skb;
  1601. struct hash_wr_param params;
  1602. u8 bs;
  1603. int error, isfull = 0;
  1604. rtfm->init(req);
  1605. bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1606. u_ctx = ULD_CTX(h_ctx(rtfm));
  1607. if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  1608. h_ctx(rtfm)->tx_qidx))) {
  1609. isfull = 1;
  1610. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  1611. return -ENOSPC;
  1612. }
  1613. chcr_init_hctx_per_wr(req_ctx);
  1614. error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
  1615. if (error)
  1616. return -ENOMEM;
  1617. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1618. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1619. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1620. params.kctx_len *= 2;
  1621. params.opad_needed = 1;
  1622. } else {
  1623. params.opad_needed = 0;
  1624. }
  1625. params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
  1626. HASH_SPACE_LEFT(params.kctx_len), 0);
  1627. if (params.sg_len < req->nbytes) {
  1628. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1629. params.kctx_len /= 2;
  1630. params.opad_needed = 0;
  1631. }
  1632. params.last = 0;
  1633. params.more = 1;
  1634. params.scmd1 = 0;
  1635. params.sg_len = rounddown(params.sg_len, bs);
  1636. params.hash_size = params.alg_prm.result_size;
  1637. } else {
  1638. params.sg_len = req->nbytes;
  1639. params.hash_size = crypto_ahash_digestsize(rtfm);
  1640. params.last = 1;
  1641. params.more = 0;
  1642. params.scmd1 = req->nbytes + req_ctx->data_len;
  1643. }
  1644. params.bfr_len = 0;
  1645. req_ctx->hctx_wr.result = 1;
  1646. req_ctx->hctx_wr.srcsg = req->src;
  1647. req_ctx->data_len += params.bfr_len + params.sg_len;
  1648. if (req->nbytes == 0) {
  1649. create_last_hash_block(req_ctx->reqbfr, bs, 0);
  1650. params.more = 1;
  1651. params.bfr_len = bs;
  1652. }
  1653. skb = create_hash_wr(req, &params);
  1654. if (IS_ERR(skb)) {
  1655. error = PTR_ERR(skb);
  1656. goto unmap;
  1657. }
  1658. req_ctx->hctx_wr.processed += params.sg_len;
  1659. skb->dev = u_ctx->lldi.ports[0];
  1660. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1661. chcr_send_wr(skb);
  1662. return isfull ? -EBUSY : -EINPROGRESS;
  1663. unmap:
  1664. chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
  1665. return error;
  1666. }
  1667. static int chcr_ahash_continue(struct ahash_request *req)
  1668. {
  1669. struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
  1670. struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
  1671. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
  1672. struct uld_ctx *u_ctx = NULL;
  1673. struct sk_buff *skb;
  1674. struct hash_wr_param params;
  1675. u8 bs;
  1676. int error;
  1677. bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1678. u_ctx = ULD_CTX(h_ctx(rtfm));
  1679. get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
  1680. params.kctx_len = roundup(params.alg_prm.result_size, 16);
  1681. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1682. params.kctx_len *= 2;
  1683. params.opad_needed = 1;
  1684. } else {
  1685. params.opad_needed = 0;
  1686. }
  1687. params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
  1688. HASH_SPACE_LEFT(params.kctx_len),
  1689. hctx_wr->src_ofst);
  1690. if ((params.sg_len + hctx_wr->processed) > req->nbytes)
  1691. params.sg_len = req->nbytes - hctx_wr->processed;
  1692. if (!hctx_wr->result ||
  1693. ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
  1694. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1695. params.kctx_len /= 2;
  1696. params.opad_needed = 0;
  1697. }
  1698. params.last = 0;
  1699. params.more = 1;
  1700. params.sg_len = rounddown(params.sg_len, bs);
  1701. params.hash_size = params.alg_prm.result_size;
  1702. params.scmd1 = 0;
  1703. } else {
  1704. params.last = 1;
  1705. params.more = 0;
  1706. params.hash_size = crypto_ahash_digestsize(rtfm);
  1707. params.scmd1 = reqctx->data_len + params.sg_len;
  1708. }
  1709. params.bfr_len = 0;
  1710. reqctx->data_len += params.sg_len;
  1711. skb = create_hash_wr(req, &params);
  1712. if (IS_ERR(skb)) {
  1713. error = PTR_ERR(skb);
  1714. goto err;
  1715. }
  1716. hctx_wr->processed += params.sg_len;
  1717. skb->dev = u_ctx->lldi.ports[0];
  1718. set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
  1719. chcr_send_wr(skb);
  1720. return 0;
  1721. err:
  1722. return error;
  1723. }
  1724. static inline void chcr_handle_ahash_resp(struct ahash_request *req,
  1725. unsigned char *input,
  1726. int err)
  1727. {
  1728. struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
  1729. struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
  1730. int digestsize, updated_digestsize;
  1731. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1732. struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
  1733. if (input == NULL)
  1734. goto out;
  1735. digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
  1736. updated_digestsize = digestsize;
  1737. if (digestsize == SHA224_DIGEST_SIZE)
  1738. updated_digestsize = SHA256_DIGEST_SIZE;
  1739. else if (digestsize == SHA384_DIGEST_SIZE)
  1740. updated_digestsize = SHA512_DIGEST_SIZE;
  1741. if (hctx_wr->dma_addr) {
  1742. dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
  1743. hctx_wr->dma_len, DMA_TO_DEVICE);
  1744. hctx_wr->dma_addr = 0;
  1745. }
  1746. if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
  1747. req->nbytes)) {
  1748. if (hctx_wr->result == 1) {
  1749. hctx_wr->result = 0;
  1750. memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
  1751. digestsize);
  1752. } else {
  1753. memcpy(reqctx->partial_hash,
  1754. input + sizeof(struct cpl_fw6_pld),
  1755. updated_digestsize);
  1756. }
  1757. goto unmap;
  1758. }
  1759. memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
  1760. updated_digestsize);
  1761. err = chcr_ahash_continue(req);
  1762. if (err)
  1763. goto unmap;
  1764. return;
  1765. unmap:
  1766. if (hctx_wr->is_sg_map)
  1767. chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
  1768. out:
  1769. req->base.complete(&req->base, err);
  1770. }
  1771. /*
  1772. * chcr_handle_resp - Unmap the DMA buffers associated with the request
  1773. * @req: crypto request
  1774. */
  1775. int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
  1776. int err)
  1777. {
  1778. struct crypto_tfm *tfm = req->tfm;
  1779. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1780. struct adapter *adap = padap(ctx->dev);
  1781. switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
  1782. case CRYPTO_ALG_TYPE_AEAD:
  1783. chcr_handle_aead_resp(aead_request_cast(req), input, err);
  1784. break;
  1785. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  1786. err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
  1787. input, err);
  1788. break;
  1789. case CRYPTO_ALG_TYPE_AHASH:
  1790. chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
  1791. }
  1792. atomic_inc(&adap->chcr_stats.complete);
  1793. return err;
  1794. }
  1795. static int chcr_ahash_export(struct ahash_request *areq, void *out)
  1796. {
  1797. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1798. struct chcr_ahash_req_ctx *state = out;
  1799. state->reqlen = req_ctx->reqlen;
  1800. state->data_len = req_ctx->data_len;
  1801. memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
  1802. memcpy(state->partial_hash, req_ctx->partial_hash,
  1803. CHCR_HASH_MAX_DIGEST_SIZE);
  1804. chcr_init_hctx_per_wr(state);
  1805. return 0;
  1806. }
  1807. static int chcr_ahash_import(struct ahash_request *areq, const void *in)
  1808. {
  1809. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1810. struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
  1811. req_ctx->reqlen = state->reqlen;
  1812. req_ctx->data_len = state->data_len;
  1813. req_ctx->reqbfr = req_ctx->bfr1;
  1814. req_ctx->skbfr = req_ctx->bfr2;
  1815. memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
  1816. memcpy(req_ctx->partial_hash, state->partial_hash,
  1817. CHCR_HASH_MAX_DIGEST_SIZE);
  1818. chcr_init_hctx_per_wr(req_ctx);
  1819. return 0;
  1820. }
  1821. static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
  1822. unsigned int keylen)
  1823. {
  1824. struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
  1825. unsigned int digestsize = crypto_ahash_digestsize(tfm);
  1826. unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
  1827. unsigned int i, err = 0, updated_digestsize;
  1828. SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
  1829. /* use the key to calculate the ipad and opad. ipad will sent with the
  1830. * first request's data. opad will be sent with the final hash result
  1831. * ipad in hmacctx->ipad and opad in hmacctx->opad location
  1832. */
  1833. shash->tfm = hmacctx->base_hash;
  1834. shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
  1835. if (keylen > bs) {
  1836. err = crypto_shash_digest(shash, key, keylen,
  1837. hmacctx->ipad);
  1838. if (err)
  1839. goto out;
  1840. keylen = digestsize;
  1841. } else {
  1842. memcpy(hmacctx->ipad, key, keylen);
  1843. }
  1844. memset(hmacctx->ipad + keylen, 0, bs - keylen);
  1845. memcpy(hmacctx->opad, hmacctx->ipad, bs);
  1846. for (i = 0; i < bs / sizeof(int); i++) {
  1847. *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
  1848. *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
  1849. }
  1850. updated_digestsize = digestsize;
  1851. if (digestsize == SHA224_DIGEST_SIZE)
  1852. updated_digestsize = SHA256_DIGEST_SIZE;
  1853. else if (digestsize == SHA384_DIGEST_SIZE)
  1854. updated_digestsize = SHA512_DIGEST_SIZE;
  1855. err = chcr_compute_partial_hash(shash, hmacctx->ipad,
  1856. hmacctx->ipad, digestsize);
  1857. if (err)
  1858. goto out;
  1859. chcr_change_order(hmacctx->ipad, updated_digestsize);
  1860. err = chcr_compute_partial_hash(shash, hmacctx->opad,
  1861. hmacctx->opad, digestsize);
  1862. if (err)
  1863. goto out;
  1864. chcr_change_order(hmacctx->opad, updated_digestsize);
  1865. out:
  1866. return err;
  1867. }
  1868. static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
  1869. unsigned int key_len)
  1870. {
  1871. struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
  1872. unsigned short context_size = 0;
  1873. int err;
  1874. err = chcr_cipher_fallback_setkey(cipher, key, key_len);
  1875. if (err)
  1876. goto badkey_err;
  1877. memcpy(ablkctx->key, key, key_len);
  1878. ablkctx->enckey_len = key_len;
  1879. get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
  1880. context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
  1881. ablkctx->key_ctx_hdr =
  1882. FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
  1883. CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
  1884. CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
  1885. CHCR_KEYCTX_NO_KEY, 1,
  1886. 0, context_size);
  1887. ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
  1888. return 0;
  1889. badkey_err:
  1890. crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
  1891. ablkctx->enckey_len = 0;
  1892. return err;
  1893. }
  1894. static int chcr_sha_init(struct ahash_request *areq)
  1895. {
  1896. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1897. struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
  1898. int digestsize = crypto_ahash_digestsize(tfm);
  1899. req_ctx->data_len = 0;
  1900. req_ctx->reqlen = 0;
  1901. req_ctx->reqbfr = req_ctx->bfr1;
  1902. req_ctx->skbfr = req_ctx->bfr2;
  1903. copy_hash_init_values(req_ctx->partial_hash, digestsize);
  1904. return 0;
  1905. }
  1906. static int chcr_sha_cra_init(struct crypto_tfm *tfm)
  1907. {
  1908. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1909. sizeof(struct chcr_ahash_req_ctx));
  1910. return chcr_device_init(crypto_tfm_ctx(tfm));
  1911. }
  1912. static int chcr_hmac_init(struct ahash_request *areq)
  1913. {
  1914. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
  1915. struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
  1916. struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
  1917. unsigned int digestsize = crypto_ahash_digestsize(rtfm);
  1918. unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
  1919. chcr_sha_init(areq);
  1920. req_ctx->data_len = bs;
  1921. if (is_hmac(crypto_ahash_tfm(rtfm))) {
  1922. if (digestsize == SHA224_DIGEST_SIZE)
  1923. memcpy(req_ctx->partial_hash, hmacctx->ipad,
  1924. SHA256_DIGEST_SIZE);
  1925. else if (digestsize == SHA384_DIGEST_SIZE)
  1926. memcpy(req_ctx->partial_hash, hmacctx->ipad,
  1927. SHA512_DIGEST_SIZE);
  1928. else
  1929. memcpy(req_ctx->partial_hash, hmacctx->ipad,
  1930. digestsize);
  1931. }
  1932. return 0;
  1933. }
  1934. static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
  1935. {
  1936. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1937. struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
  1938. unsigned int digestsize =
  1939. crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
  1940. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1941. sizeof(struct chcr_ahash_req_ctx));
  1942. hmacctx->base_hash = chcr_alloc_shash(digestsize);
  1943. if (IS_ERR(hmacctx->base_hash))
  1944. return PTR_ERR(hmacctx->base_hash);
  1945. return chcr_device_init(crypto_tfm_ctx(tfm));
  1946. }
  1947. static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
  1948. {
  1949. struct chcr_context *ctx = crypto_tfm_ctx(tfm);
  1950. struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
  1951. if (hmacctx->base_hash) {
  1952. chcr_free_shash(hmacctx->base_hash);
  1953. hmacctx->base_hash = NULL;
  1954. }
  1955. }
  1956. inline void chcr_aead_common_exit(struct aead_request *req)
  1957. {
  1958. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  1959. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1960. struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
  1961. chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
  1962. }
  1963. static int chcr_aead_common_init(struct aead_request *req)
  1964. {
  1965. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1966. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  1967. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  1968. unsigned int authsize = crypto_aead_authsize(tfm);
  1969. int error = -EINVAL;
  1970. /* validate key size */
  1971. if (aeadctx->enckey_len == 0)
  1972. goto err;
  1973. if (reqctx->op && req->cryptlen < authsize)
  1974. goto err;
  1975. if (reqctx->b0_len)
  1976. reqctx->scratch_pad = reqctx->iv + IV;
  1977. else
  1978. reqctx->scratch_pad = NULL;
  1979. error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
  1980. reqctx->op);
  1981. if (error) {
  1982. error = -ENOMEM;
  1983. goto err;
  1984. }
  1985. reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
  1986. CHCR_SRC_SG_SIZE, 0);
  1987. reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
  1988. CHCR_SRC_SG_SIZE, req->assoclen);
  1989. return 0;
  1990. err:
  1991. return error;
  1992. }
  1993. static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
  1994. int aadmax, int wrlen,
  1995. unsigned short op_type)
  1996. {
  1997. unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
  1998. if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
  1999. dst_nents > MAX_DSGL_ENT ||
  2000. (req->assoclen > aadmax) ||
  2001. (wrlen > SGE_MAX_WR_LEN))
  2002. return 1;
  2003. return 0;
  2004. }
  2005. static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
  2006. {
  2007. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2008. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2009. struct aead_request *subreq = aead_request_ctx(req);
  2010. aead_request_set_tfm(subreq, aeadctx->sw_cipher);
  2011. aead_request_set_callback(subreq, req->base.flags,
  2012. req->base.complete, req->base.data);
  2013. aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
  2014. req->iv);
  2015. aead_request_set_ad(subreq, req->assoclen);
  2016. return op_type ? crypto_aead_decrypt(subreq) :
  2017. crypto_aead_encrypt(subreq);
  2018. }
  2019. static struct sk_buff *create_authenc_wr(struct aead_request *req,
  2020. unsigned short qid,
  2021. int size)
  2022. {
  2023. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2024. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2025. struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
  2026. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2027. struct sk_buff *skb = NULL;
  2028. struct chcr_wr *chcr_req;
  2029. struct cpl_rx_phys_dsgl *phys_cpl;
  2030. struct ulptx_sgl *ulptx;
  2031. unsigned int transhdr_len;
  2032. unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
  2033. unsigned int kctx_len = 0, dnents;
  2034. unsigned int assoclen = req->assoclen;
  2035. unsigned int authsize = crypto_aead_authsize(tfm);
  2036. int error = -EINVAL;
  2037. int null = 0;
  2038. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
  2039. GFP_ATOMIC;
  2040. struct adapter *adap = padap(a_ctx(tfm)->dev);
  2041. if (req->cryptlen == 0)
  2042. return NULL;
  2043. reqctx->b0_len = 0;
  2044. error = chcr_aead_common_init(req);
  2045. if (error)
  2046. return ERR_PTR(error);
  2047. if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
  2048. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
  2049. null = 1;
  2050. assoclen = 0;
  2051. reqctx->aad_nents = 0;
  2052. }
  2053. dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
  2054. dnents += sg_nents_xlen(req->dst, req->cryptlen +
  2055. (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE,
  2056. req->assoclen);
  2057. dnents += MIN_AUTH_SG; // For IV
  2058. dst_size = get_space_for_phys_dsgl(dnents);
  2059. kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
  2060. - sizeof(chcr_req->key_ctx);
  2061. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
  2062. reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
  2063. SGE_MAX_WR_LEN;
  2064. temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16)
  2065. : (sgl_len(reqctx->src_nents + reqctx->aad_nents
  2066. + MIN_GCM_SG) * 8);
  2067. transhdr_len += temp;
  2068. transhdr_len = roundup(transhdr_len, 16);
  2069. if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
  2070. transhdr_len, reqctx->op)) {
  2071. atomic_inc(&adap->chcr_stats.fallback);
  2072. chcr_aead_common_exit(req);
  2073. return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
  2074. }
  2075. skb = alloc_skb(SGE_MAX_WR_LEN, flags);
  2076. if (!skb) {
  2077. error = -ENOMEM;
  2078. goto err;
  2079. }
  2080. chcr_req = __skb_put_zero(skb, transhdr_len);
  2081. temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
  2082. /*
  2083. * Input order is AAD,IV and Payload. where IV should be included as
  2084. * the part of authdata. All other fields should be filled according
  2085. * to the hardware spec
  2086. */
  2087. chcr_req->sec_cpl.op_ivinsrtofst =
  2088. FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
  2089. assoclen + 1);
  2090. chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
  2091. chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
  2092. assoclen ? 1 : 0, assoclen,
  2093. assoclen + IV + 1,
  2094. (temp & 0x1F0) >> 4);
  2095. chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
  2096. temp & 0xF,
  2097. null ? 0 : assoclen + IV + 1,
  2098. temp, temp);
  2099. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
  2100. subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
  2101. temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
  2102. else
  2103. temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
  2104. chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
  2105. (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
  2106. temp,
  2107. actx->auth_mode, aeadctx->hmac_ctrl,
  2108. IV >> 1);
  2109. chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
  2110. 0, 0, dst_size);
  2111. chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
  2112. if (reqctx->op == CHCR_ENCRYPT_OP ||
  2113. subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
  2114. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
  2115. memcpy(chcr_req->key_ctx.key, aeadctx->key,
  2116. aeadctx->enckey_len);
  2117. else
  2118. memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
  2119. aeadctx->enckey_len);
  2120. memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
  2121. actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
  2122. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
  2123. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
  2124. memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
  2125. memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
  2126. CTR_RFC3686_IV_SIZE);
  2127. *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
  2128. CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
  2129. } else {
  2130. memcpy(reqctx->iv, req->iv, IV);
  2131. }
  2132. phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
  2133. ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
  2134. chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
  2135. chcr_add_aead_src_ent(req, ulptx, assoclen);
  2136. atomic_inc(&adap->chcr_stats.cipher_rqst);
  2137. temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
  2138. kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
  2139. create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
  2140. transhdr_len, temp, 0);
  2141. reqctx->skb = skb;
  2142. return skb;
  2143. err:
  2144. chcr_aead_common_exit(req);
  2145. return ERR_PTR(error);
  2146. }
  2147. int chcr_aead_dma_map(struct device *dev,
  2148. struct aead_request *req,
  2149. unsigned short op_type)
  2150. {
  2151. int error;
  2152. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2153. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2154. unsigned int authsize = crypto_aead_authsize(tfm);
  2155. int dst_size;
  2156. dst_size = req->assoclen + req->cryptlen + (op_type ?
  2157. -authsize : authsize);
  2158. if (!req->cryptlen || !dst_size)
  2159. return 0;
  2160. reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
  2161. DMA_BIDIRECTIONAL);
  2162. if (dma_mapping_error(dev, reqctx->iv_dma))
  2163. return -ENOMEM;
  2164. if (reqctx->b0_len)
  2165. reqctx->b0_dma = reqctx->iv_dma + IV;
  2166. else
  2167. reqctx->b0_dma = 0;
  2168. if (req->src == req->dst) {
  2169. error = dma_map_sg(dev, req->src,
  2170. sg_nents_for_len(req->src, dst_size),
  2171. DMA_BIDIRECTIONAL);
  2172. if (!error)
  2173. goto err;
  2174. } else {
  2175. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2176. DMA_TO_DEVICE);
  2177. if (!error)
  2178. goto err;
  2179. error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
  2180. DMA_FROM_DEVICE);
  2181. if (!error) {
  2182. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2183. DMA_TO_DEVICE);
  2184. goto err;
  2185. }
  2186. }
  2187. return 0;
  2188. err:
  2189. dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
  2190. return -ENOMEM;
  2191. }
  2192. void chcr_aead_dma_unmap(struct device *dev,
  2193. struct aead_request *req,
  2194. unsigned short op_type)
  2195. {
  2196. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2197. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2198. unsigned int authsize = crypto_aead_authsize(tfm);
  2199. int dst_size;
  2200. dst_size = req->assoclen + req->cryptlen + (op_type ?
  2201. -authsize : authsize);
  2202. if (!req->cryptlen || !dst_size)
  2203. return;
  2204. dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
  2205. DMA_BIDIRECTIONAL);
  2206. if (req->src == req->dst) {
  2207. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2208. DMA_BIDIRECTIONAL);
  2209. } else {
  2210. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2211. DMA_TO_DEVICE);
  2212. dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
  2213. DMA_FROM_DEVICE);
  2214. }
  2215. }
  2216. void chcr_add_aead_src_ent(struct aead_request *req,
  2217. struct ulptx_sgl *ulptx,
  2218. unsigned int assoclen)
  2219. {
  2220. struct ulptx_walk ulp_walk;
  2221. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2222. if (reqctx->imm) {
  2223. u8 *buf = (u8 *)ulptx;
  2224. if (reqctx->b0_len) {
  2225. memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
  2226. buf += reqctx->b0_len;
  2227. }
  2228. sg_pcopy_to_buffer(req->src, sg_nents(req->src),
  2229. buf, assoclen, 0);
  2230. buf += assoclen;
  2231. memcpy(buf, reqctx->iv, IV);
  2232. buf += IV;
  2233. sg_pcopy_to_buffer(req->src, sg_nents(req->src),
  2234. buf, req->cryptlen, req->assoclen);
  2235. } else {
  2236. ulptx_walk_init(&ulp_walk, ulptx);
  2237. if (reqctx->b0_len)
  2238. ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
  2239. &reqctx->b0_dma);
  2240. ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
  2241. ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
  2242. ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
  2243. req->assoclen);
  2244. ulptx_walk_end(&ulp_walk);
  2245. }
  2246. }
  2247. void chcr_add_aead_dst_ent(struct aead_request *req,
  2248. struct cpl_rx_phys_dsgl *phys_cpl,
  2249. unsigned int assoclen,
  2250. unsigned short qid)
  2251. {
  2252. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2253. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2254. struct dsgl_walk dsgl_walk;
  2255. unsigned int authsize = crypto_aead_authsize(tfm);
  2256. struct chcr_context *ctx = a_ctx(tfm);
  2257. u32 temp;
  2258. dsgl_walk_init(&dsgl_walk, phys_cpl);
  2259. if (reqctx->b0_len)
  2260. dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
  2261. dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
  2262. dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
  2263. temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
  2264. dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
  2265. dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
  2266. }
  2267. void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
  2268. void *ulptx,
  2269. struct cipher_wr_param *wrparam)
  2270. {
  2271. struct ulptx_walk ulp_walk;
  2272. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  2273. u8 *buf = ulptx;
  2274. memcpy(buf, reqctx->iv, IV);
  2275. buf += IV;
  2276. if (reqctx->imm) {
  2277. sg_pcopy_to_buffer(req->src, sg_nents(req->src),
  2278. buf, wrparam->bytes, reqctx->processed);
  2279. } else {
  2280. ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
  2281. ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
  2282. reqctx->src_ofst);
  2283. reqctx->srcsg = ulp_walk.last_sg;
  2284. reqctx->src_ofst = ulp_walk.last_sg_len;
  2285. ulptx_walk_end(&ulp_walk);
  2286. }
  2287. }
  2288. void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
  2289. struct cpl_rx_phys_dsgl *phys_cpl,
  2290. struct cipher_wr_param *wrparam,
  2291. unsigned short qid)
  2292. {
  2293. struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
  2294. struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
  2295. struct chcr_context *ctx = c_ctx(tfm);
  2296. struct dsgl_walk dsgl_walk;
  2297. dsgl_walk_init(&dsgl_walk, phys_cpl);
  2298. dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
  2299. reqctx->dst_ofst);
  2300. reqctx->dstsg = dsgl_walk.last_sg;
  2301. reqctx->dst_ofst = dsgl_walk.last_sg_len;
  2302. dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
  2303. }
  2304. void chcr_add_hash_src_ent(struct ahash_request *req,
  2305. struct ulptx_sgl *ulptx,
  2306. struct hash_wr_param *param)
  2307. {
  2308. struct ulptx_walk ulp_walk;
  2309. struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
  2310. if (reqctx->hctx_wr.imm) {
  2311. u8 *buf = (u8 *)ulptx;
  2312. if (param->bfr_len) {
  2313. memcpy(buf, reqctx->reqbfr, param->bfr_len);
  2314. buf += param->bfr_len;
  2315. }
  2316. sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
  2317. sg_nents(reqctx->hctx_wr.srcsg), buf,
  2318. param->sg_len, 0);
  2319. } else {
  2320. ulptx_walk_init(&ulp_walk, ulptx);
  2321. if (param->bfr_len)
  2322. ulptx_walk_add_page(&ulp_walk, param->bfr_len,
  2323. &reqctx->hctx_wr.dma_addr);
  2324. ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
  2325. param->sg_len, reqctx->hctx_wr.src_ofst);
  2326. reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
  2327. reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
  2328. ulptx_walk_end(&ulp_walk);
  2329. }
  2330. }
  2331. int chcr_hash_dma_map(struct device *dev,
  2332. struct ahash_request *req)
  2333. {
  2334. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  2335. int error = 0;
  2336. if (!req->nbytes)
  2337. return 0;
  2338. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2339. DMA_TO_DEVICE);
  2340. if (!error)
  2341. return -ENOMEM;
  2342. req_ctx->hctx_wr.is_sg_map = 1;
  2343. return 0;
  2344. }
  2345. void chcr_hash_dma_unmap(struct device *dev,
  2346. struct ahash_request *req)
  2347. {
  2348. struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
  2349. if (!req->nbytes)
  2350. return;
  2351. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2352. DMA_TO_DEVICE);
  2353. req_ctx->hctx_wr.is_sg_map = 0;
  2354. }
  2355. int chcr_cipher_dma_map(struct device *dev,
  2356. struct ablkcipher_request *req)
  2357. {
  2358. int error;
  2359. if (req->src == req->dst) {
  2360. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2361. DMA_BIDIRECTIONAL);
  2362. if (!error)
  2363. goto err;
  2364. } else {
  2365. error = dma_map_sg(dev, req->src, sg_nents(req->src),
  2366. DMA_TO_DEVICE);
  2367. if (!error)
  2368. goto err;
  2369. error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
  2370. DMA_FROM_DEVICE);
  2371. if (!error) {
  2372. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2373. DMA_TO_DEVICE);
  2374. goto err;
  2375. }
  2376. }
  2377. return 0;
  2378. err:
  2379. return -ENOMEM;
  2380. }
  2381. void chcr_cipher_dma_unmap(struct device *dev,
  2382. struct ablkcipher_request *req)
  2383. {
  2384. if (req->src == req->dst) {
  2385. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2386. DMA_BIDIRECTIONAL);
  2387. } else {
  2388. dma_unmap_sg(dev, req->src, sg_nents(req->src),
  2389. DMA_TO_DEVICE);
  2390. dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
  2391. DMA_FROM_DEVICE);
  2392. }
  2393. }
  2394. static int set_msg_len(u8 *block, unsigned int msglen, int csize)
  2395. {
  2396. __be32 data;
  2397. memset(block, 0, csize);
  2398. block += csize;
  2399. if (csize >= 4)
  2400. csize = 4;
  2401. else if (msglen > (unsigned int)(1 << (8 * csize)))
  2402. return -EOVERFLOW;
  2403. data = cpu_to_be32(msglen);
  2404. memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
  2405. return 0;
  2406. }
  2407. static void generate_b0(struct aead_request *req,
  2408. struct chcr_aead_ctx *aeadctx,
  2409. unsigned short op_type)
  2410. {
  2411. unsigned int l, lp, m;
  2412. int rc;
  2413. struct crypto_aead *aead = crypto_aead_reqtfm(req);
  2414. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2415. u8 *b0 = reqctx->scratch_pad;
  2416. m = crypto_aead_authsize(aead);
  2417. memcpy(b0, reqctx->iv, 16);
  2418. lp = b0[0];
  2419. l = lp + 1;
  2420. /* set m, bits 3-5 */
  2421. *b0 |= (8 * ((m - 2) / 2));
  2422. /* set adata, bit 6, if associated data is used */
  2423. if (req->assoclen)
  2424. *b0 |= 64;
  2425. rc = set_msg_len(b0 + 16 - l,
  2426. (op_type == CHCR_DECRYPT_OP) ?
  2427. req->cryptlen - m : req->cryptlen, l);
  2428. }
  2429. static inline int crypto_ccm_check_iv(const u8 *iv)
  2430. {
  2431. /* 2 <= L <= 8, so 1 <= L' <= 7. */
  2432. if (iv[0] < 1 || iv[0] > 7)
  2433. return -EINVAL;
  2434. return 0;
  2435. }
  2436. static int ccm_format_packet(struct aead_request *req,
  2437. struct chcr_aead_ctx *aeadctx,
  2438. unsigned int sub_type,
  2439. unsigned short op_type,
  2440. unsigned int assoclen)
  2441. {
  2442. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2443. int rc = 0;
  2444. if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
  2445. reqctx->iv[0] = 3;
  2446. memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
  2447. memcpy(reqctx->iv + 4, req->iv, 8);
  2448. memset(reqctx->iv + 12, 0, 4);
  2449. } else {
  2450. memcpy(reqctx->iv, req->iv, 16);
  2451. }
  2452. if (assoclen)
  2453. *((unsigned short *)(reqctx->scratch_pad + 16)) =
  2454. htons(assoclen);
  2455. generate_b0(req, aeadctx, op_type);
  2456. /* zero the ctr value */
  2457. memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
  2458. return rc;
  2459. }
  2460. static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
  2461. unsigned int dst_size,
  2462. struct aead_request *req,
  2463. unsigned short op_type)
  2464. {
  2465. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2466. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2467. unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
  2468. unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
  2469. unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
  2470. unsigned int ccm_xtra;
  2471. unsigned int tag_offset = 0, auth_offset = 0;
  2472. unsigned int assoclen;
  2473. if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
  2474. assoclen = req->assoclen - 8;
  2475. else
  2476. assoclen = req->assoclen;
  2477. ccm_xtra = CCM_B0_SIZE +
  2478. ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
  2479. auth_offset = req->cryptlen ?
  2480. (assoclen + IV + 1 + ccm_xtra) : 0;
  2481. if (op_type == CHCR_DECRYPT_OP) {
  2482. if (crypto_aead_authsize(tfm) != req->cryptlen)
  2483. tag_offset = crypto_aead_authsize(tfm);
  2484. else
  2485. auth_offset = 0;
  2486. }
  2487. sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
  2488. 2, assoclen + 1 + ccm_xtra);
  2489. sec_cpl->pldlen =
  2490. htonl(assoclen + IV + req->cryptlen + ccm_xtra);
  2491. /* For CCM there wil be b0 always. So AAD start will be 1 always */
  2492. sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
  2493. 1, assoclen + ccm_xtra, assoclen
  2494. + IV + 1 + ccm_xtra, 0);
  2495. sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
  2496. auth_offset, tag_offset,
  2497. (op_type == CHCR_ENCRYPT_OP) ? 0 :
  2498. crypto_aead_authsize(tfm));
  2499. sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
  2500. (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
  2501. cipher_mode, mac_mode,
  2502. aeadctx->hmac_ctrl, IV >> 1);
  2503. sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
  2504. 0, dst_size);
  2505. }
  2506. static int aead_ccm_validate_input(unsigned short op_type,
  2507. struct aead_request *req,
  2508. struct chcr_aead_ctx *aeadctx,
  2509. unsigned int sub_type)
  2510. {
  2511. if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
  2512. if (crypto_ccm_check_iv(req->iv)) {
  2513. pr_err("CCM: IV check fails\n");
  2514. return -EINVAL;
  2515. }
  2516. } else {
  2517. if (req->assoclen != 16 && req->assoclen != 20) {
  2518. pr_err("RFC4309: Invalid AAD length %d\n",
  2519. req->assoclen);
  2520. return -EINVAL;
  2521. }
  2522. }
  2523. return 0;
  2524. }
  2525. static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
  2526. unsigned short qid,
  2527. int size)
  2528. {
  2529. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2530. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2531. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2532. struct sk_buff *skb = NULL;
  2533. struct chcr_wr *chcr_req;
  2534. struct cpl_rx_phys_dsgl *phys_cpl;
  2535. struct ulptx_sgl *ulptx;
  2536. unsigned int transhdr_len;
  2537. unsigned int dst_size = 0, kctx_len, dnents, temp;
  2538. unsigned int sub_type, assoclen = req->assoclen;
  2539. unsigned int authsize = crypto_aead_authsize(tfm);
  2540. int error = -EINVAL;
  2541. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
  2542. GFP_ATOMIC;
  2543. struct adapter *adap = padap(a_ctx(tfm)->dev);
  2544. sub_type = get_aead_subtype(tfm);
  2545. if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
  2546. assoclen -= 8;
  2547. reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
  2548. error = chcr_aead_common_init(req);
  2549. if (error)
  2550. return ERR_PTR(error);
  2551. error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
  2552. if (error)
  2553. goto err;
  2554. dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
  2555. dnents += sg_nents_xlen(req->dst, req->cryptlen
  2556. + (reqctx->op ? -authsize : authsize),
  2557. CHCR_DST_SG_SIZE, req->assoclen);
  2558. dnents += MIN_CCM_SG; // For IV and B0
  2559. dst_size = get_space_for_phys_dsgl(dnents);
  2560. kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
  2561. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
  2562. reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
  2563. reqctx->b0_len) <= SGE_MAX_WR_LEN;
  2564. temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen +
  2565. reqctx->b0_len, 16) :
  2566. (sgl_len(reqctx->src_nents + reqctx->aad_nents +
  2567. MIN_CCM_SG) * 8);
  2568. transhdr_len += temp;
  2569. transhdr_len = roundup(transhdr_len, 16);
  2570. if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
  2571. reqctx->b0_len, transhdr_len, reqctx->op)) {
  2572. atomic_inc(&adap->chcr_stats.fallback);
  2573. chcr_aead_common_exit(req);
  2574. return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
  2575. }
  2576. skb = alloc_skb(SGE_MAX_WR_LEN, flags);
  2577. if (!skb) {
  2578. error = -ENOMEM;
  2579. goto err;
  2580. }
  2581. chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
  2582. fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
  2583. chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
  2584. memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
  2585. memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
  2586. aeadctx->key, aeadctx->enckey_len);
  2587. phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
  2588. ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
  2589. error = ccm_format_packet(req, aeadctx, sub_type, reqctx->op, assoclen);
  2590. if (error)
  2591. goto dstmap_fail;
  2592. chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
  2593. chcr_add_aead_src_ent(req, ulptx, assoclen);
  2594. atomic_inc(&adap->chcr_stats.aead_rqst);
  2595. temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
  2596. kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
  2597. reqctx->b0_len) : 0);
  2598. create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
  2599. transhdr_len, temp, 0);
  2600. reqctx->skb = skb;
  2601. return skb;
  2602. dstmap_fail:
  2603. kfree_skb(skb);
  2604. err:
  2605. chcr_aead_common_exit(req);
  2606. return ERR_PTR(error);
  2607. }
  2608. static struct sk_buff *create_gcm_wr(struct aead_request *req,
  2609. unsigned short qid,
  2610. int size)
  2611. {
  2612. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  2613. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2614. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  2615. struct sk_buff *skb = NULL;
  2616. struct chcr_wr *chcr_req;
  2617. struct cpl_rx_phys_dsgl *phys_cpl;
  2618. struct ulptx_sgl *ulptx;
  2619. unsigned int transhdr_len, dnents = 0;
  2620. unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
  2621. unsigned int authsize = crypto_aead_authsize(tfm);
  2622. int error = -EINVAL;
  2623. gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
  2624. GFP_ATOMIC;
  2625. struct adapter *adap = padap(a_ctx(tfm)->dev);
  2626. if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
  2627. assoclen = req->assoclen - 8;
  2628. reqctx->b0_len = 0;
  2629. error = chcr_aead_common_init(req);
  2630. if (error)
  2631. return ERR_PTR(error);
  2632. dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
  2633. dnents += sg_nents_xlen(req->dst, req->cryptlen +
  2634. (reqctx->op ? -authsize : authsize),
  2635. CHCR_DST_SG_SIZE, req->assoclen);
  2636. dnents += MIN_GCM_SG; // For IV
  2637. dst_size = get_space_for_phys_dsgl(dnents);
  2638. kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
  2639. transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
  2640. reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
  2641. SGE_MAX_WR_LEN;
  2642. temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) :
  2643. (sgl_len(reqctx->src_nents +
  2644. reqctx->aad_nents + MIN_GCM_SG) * 8);
  2645. transhdr_len += temp;
  2646. transhdr_len = roundup(transhdr_len, 16);
  2647. if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
  2648. transhdr_len, reqctx->op)) {
  2649. atomic_inc(&adap->chcr_stats.fallback);
  2650. chcr_aead_common_exit(req);
  2651. return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
  2652. }
  2653. skb = alloc_skb(SGE_MAX_WR_LEN, flags);
  2654. if (!skb) {
  2655. error = -ENOMEM;
  2656. goto err;
  2657. }
  2658. chcr_req = __skb_put_zero(skb, transhdr_len);
  2659. //Offset of tag from end
  2660. temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
  2661. chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
  2662. a_ctx(tfm)->dev->rx_channel_id, 2,
  2663. (assoclen + 1));
  2664. chcr_req->sec_cpl.pldlen =
  2665. htonl(assoclen + IV + req->cryptlen);
  2666. chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
  2667. assoclen ? 1 : 0, assoclen,
  2668. assoclen + IV + 1, 0);
  2669. chcr_req->sec_cpl.cipherstop_lo_authinsert =
  2670. FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
  2671. temp, temp);
  2672. chcr_req->sec_cpl.seqno_numivs =
  2673. FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
  2674. CHCR_ENCRYPT_OP) ? 1 : 0,
  2675. CHCR_SCMD_CIPHER_MODE_AES_GCM,
  2676. CHCR_SCMD_AUTH_MODE_GHASH,
  2677. aeadctx->hmac_ctrl, IV >> 1);
  2678. chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
  2679. 0, 0, dst_size);
  2680. chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
  2681. memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
  2682. memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
  2683. GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
  2684. /* prepare a 16 byte iv */
  2685. /* S A L T | IV | 0x00000001 */
  2686. if (get_aead_subtype(tfm) ==
  2687. CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
  2688. memcpy(reqctx->iv, aeadctx->salt, 4);
  2689. memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
  2690. } else {
  2691. memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
  2692. }
  2693. *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
  2694. phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
  2695. ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
  2696. chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
  2697. chcr_add_aead_src_ent(req, ulptx, assoclen);
  2698. atomic_inc(&adap->chcr_stats.aead_rqst);
  2699. temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
  2700. kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
  2701. create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
  2702. transhdr_len, temp, reqctx->verify);
  2703. reqctx->skb = skb;
  2704. return skb;
  2705. err:
  2706. chcr_aead_common_exit(req);
  2707. return ERR_PTR(error);
  2708. }
  2709. static int chcr_aead_cra_init(struct crypto_aead *tfm)
  2710. {
  2711. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2712. struct aead_alg *alg = crypto_aead_alg(tfm);
  2713. aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
  2714. CRYPTO_ALG_NEED_FALLBACK |
  2715. CRYPTO_ALG_ASYNC);
  2716. if (IS_ERR(aeadctx->sw_cipher))
  2717. return PTR_ERR(aeadctx->sw_cipher);
  2718. crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
  2719. sizeof(struct aead_request) +
  2720. crypto_aead_reqsize(aeadctx->sw_cipher)));
  2721. return chcr_device_init(a_ctx(tfm));
  2722. }
  2723. static void chcr_aead_cra_exit(struct crypto_aead *tfm)
  2724. {
  2725. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2726. crypto_free_aead(aeadctx->sw_cipher);
  2727. }
  2728. static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
  2729. unsigned int authsize)
  2730. {
  2731. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2732. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
  2733. aeadctx->mayverify = VERIFY_HW;
  2734. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2735. }
  2736. static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
  2737. unsigned int authsize)
  2738. {
  2739. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2740. u32 maxauth = crypto_aead_maxauthsize(tfm);
  2741. /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
  2742. * true for sha1. authsize == 12 condition should be before
  2743. * authsize == (maxauth >> 1)
  2744. */
  2745. if (authsize == ICV_4) {
  2746. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
  2747. aeadctx->mayverify = VERIFY_HW;
  2748. } else if (authsize == ICV_6) {
  2749. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
  2750. aeadctx->mayverify = VERIFY_HW;
  2751. } else if (authsize == ICV_10) {
  2752. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
  2753. aeadctx->mayverify = VERIFY_HW;
  2754. } else if (authsize == ICV_12) {
  2755. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
  2756. aeadctx->mayverify = VERIFY_HW;
  2757. } else if (authsize == ICV_14) {
  2758. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
  2759. aeadctx->mayverify = VERIFY_HW;
  2760. } else if (authsize == (maxauth >> 1)) {
  2761. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
  2762. aeadctx->mayverify = VERIFY_HW;
  2763. } else if (authsize == maxauth) {
  2764. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2765. aeadctx->mayverify = VERIFY_HW;
  2766. } else {
  2767. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2768. aeadctx->mayverify = VERIFY_SW;
  2769. }
  2770. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2771. }
  2772. static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
  2773. {
  2774. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2775. switch (authsize) {
  2776. case ICV_4:
  2777. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
  2778. aeadctx->mayverify = VERIFY_HW;
  2779. break;
  2780. case ICV_8:
  2781. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
  2782. aeadctx->mayverify = VERIFY_HW;
  2783. break;
  2784. case ICV_12:
  2785. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
  2786. aeadctx->mayverify = VERIFY_HW;
  2787. break;
  2788. case ICV_14:
  2789. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
  2790. aeadctx->mayverify = VERIFY_HW;
  2791. break;
  2792. case ICV_16:
  2793. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2794. aeadctx->mayverify = VERIFY_HW;
  2795. break;
  2796. case ICV_13:
  2797. case ICV_15:
  2798. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2799. aeadctx->mayverify = VERIFY_SW;
  2800. break;
  2801. default:
  2802. return -EINVAL;
  2803. }
  2804. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2805. }
  2806. static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
  2807. unsigned int authsize)
  2808. {
  2809. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2810. switch (authsize) {
  2811. case ICV_8:
  2812. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
  2813. aeadctx->mayverify = VERIFY_HW;
  2814. break;
  2815. case ICV_12:
  2816. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
  2817. aeadctx->mayverify = VERIFY_HW;
  2818. break;
  2819. case ICV_16:
  2820. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2821. aeadctx->mayverify = VERIFY_HW;
  2822. break;
  2823. default:
  2824. return -EINVAL;
  2825. }
  2826. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2827. }
  2828. static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
  2829. unsigned int authsize)
  2830. {
  2831. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  2832. switch (authsize) {
  2833. case ICV_4:
  2834. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
  2835. aeadctx->mayverify = VERIFY_HW;
  2836. break;
  2837. case ICV_6:
  2838. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
  2839. aeadctx->mayverify = VERIFY_HW;
  2840. break;
  2841. case ICV_8:
  2842. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
  2843. aeadctx->mayverify = VERIFY_HW;
  2844. break;
  2845. case ICV_10:
  2846. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
  2847. aeadctx->mayverify = VERIFY_HW;
  2848. break;
  2849. case ICV_12:
  2850. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
  2851. aeadctx->mayverify = VERIFY_HW;
  2852. break;
  2853. case ICV_14:
  2854. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
  2855. aeadctx->mayverify = VERIFY_HW;
  2856. break;
  2857. case ICV_16:
  2858. aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
  2859. aeadctx->mayverify = VERIFY_HW;
  2860. break;
  2861. default:
  2862. return -EINVAL;
  2863. }
  2864. return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
  2865. }
  2866. static int chcr_ccm_common_setkey(struct crypto_aead *aead,
  2867. const u8 *key,
  2868. unsigned int keylen)
  2869. {
  2870. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
  2871. unsigned char ck_size, mk_size;
  2872. int key_ctx_size = 0;
  2873. key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
  2874. if (keylen == AES_KEYSIZE_128) {
  2875. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  2876. mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
  2877. } else if (keylen == AES_KEYSIZE_192) {
  2878. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  2879. mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
  2880. } else if (keylen == AES_KEYSIZE_256) {
  2881. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  2882. mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
  2883. } else {
  2884. crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
  2885. aeadctx->enckey_len = 0;
  2886. return -EINVAL;
  2887. }
  2888. aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
  2889. key_ctx_size >> 4);
  2890. memcpy(aeadctx->key, key, keylen);
  2891. aeadctx->enckey_len = keylen;
  2892. return 0;
  2893. }
  2894. static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
  2895. const u8 *key,
  2896. unsigned int keylen)
  2897. {
  2898. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
  2899. int error;
  2900. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  2901. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
  2902. CRYPTO_TFM_REQ_MASK);
  2903. error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  2904. crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
  2905. crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
  2906. CRYPTO_TFM_RES_MASK);
  2907. if (error)
  2908. return error;
  2909. return chcr_ccm_common_setkey(aead, key, keylen);
  2910. }
  2911. static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
  2912. unsigned int keylen)
  2913. {
  2914. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
  2915. int error;
  2916. if (keylen < 3) {
  2917. crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
  2918. aeadctx->enckey_len = 0;
  2919. return -EINVAL;
  2920. }
  2921. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  2922. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
  2923. CRYPTO_TFM_REQ_MASK);
  2924. error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  2925. crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
  2926. crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
  2927. CRYPTO_TFM_RES_MASK);
  2928. if (error)
  2929. return error;
  2930. keylen -= 3;
  2931. memcpy(aeadctx->salt, key + keylen, 3);
  2932. return chcr_ccm_common_setkey(aead, key, keylen);
  2933. }
  2934. static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
  2935. unsigned int keylen)
  2936. {
  2937. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
  2938. struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
  2939. struct crypto_cipher *cipher;
  2940. unsigned int ck_size;
  2941. int ret = 0, key_ctx_size = 0;
  2942. aeadctx->enckey_len = 0;
  2943. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  2944. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
  2945. & CRYPTO_TFM_REQ_MASK);
  2946. ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  2947. crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
  2948. crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
  2949. CRYPTO_TFM_RES_MASK);
  2950. if (ret)
  2951. goto out;
  2952. if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
  2953. keylen > 3) {
  2954. keylen -= 4; /* nonce/salt is present in the last 4 bytes */
  2955. memcpy(aeadctx->salt, key + keylen, 4);
  2956. }
  2957. if (keylen == AES_KEYSIZE_128) {
  2958. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  2959. } else if (keylen == AES_KEYSIZE_192) {
  2960. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  2961. } else if (keylen == AES_KEYSIZE_256) {
  2962. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  2963. } else {
  2964. crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
  2965. pr_err("GCM: Invalid key length %d\n", keylen);
  2966. ret = -EINVAL;
  2967. goto out;
  2968. }
  2969. memcpy(aeadctx->key, key, keylen);
  2970. aeadctx->enckey_len = keylen;
  2971. key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
  2972. AEAD_H_SIZE;
  2973. aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
  2974. CHCR_KEYCTX_MAC_KEY_SIZE_128,
  2975. 0, 0,
  2976. key_ctx_size >> 4);
  2977. /* Calculate the H = CIPH(K, 0 repeated 16 times).
  2978. * It will go in key context
  2979. */
  2980. cipher = crypto_alloc_cipher("aes-generic", 0, 0);
  2981. if (IS_ERR(cipher)) {
  2982. aeadctx->enckey_len = 0;
  2983. ret = -ENOMEM;
  2984. goto out;
  2985. }
  2986. ret = crypto_cipher_setkey(cipher, key, keylen);
  2987. if (ret) {
  2988. aeadctx->enckey_len = 0;
  2989. goto out1;
  2990. }
  2991. memset(gctx->ghash_h, 0, AEAD_H_SIZE);
  2992. crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
  2993. out1:
  2994. crypto_free_cipher(cipher);
  2995. out:
  2996. return ret;
  2997. }
  2998. static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
  2999. unsigned int keylen)
  3000. {
  3001. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
  3002. struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
  3003. /* it contains auth and cipher key both*/
  3004. struct crypto_authenc_keys keys;
  3005. unsigned int bs, subtype;
  3006. unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
  3007. int err = 0, i, key_ctx_len = 0;
  3008. unsigned char ck_size = 0;
  3009. unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
  3010. struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
  3011. struct algo_param param;
  3012. int align;
  3013. u8 *o_ptr = NULL;
  3014. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  3015. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
  3016. & CRYPTO_TFM_REQ_MASK);
  3017. err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  3018. crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
  3019. crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
  3020. & CRYPTO_TFM_RES_MASK);
  3021. if (err)
  3022. goto out;
  3023. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
  3024. crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
  3025. goto out;
  3026. }
  3027. if (get_alg_config(&param, max_authsize)) {
  3028. pr_err("chcr : Unsupported digest size\n");
  3029. goto out;
  3030. }
  3031. subtype = get_aead_subtype(authenc);
  3032. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
  3033. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
  3034. if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
  3035. goto out;
  3036. memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
  3037. - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
  3038. keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
  3039. }
  3040. if (keys.enckeylen == AES_KEYSIZE_128) {
  3041. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  3042. } else if (keys.enckeylen == AES_KEYSIZE_192) {
  3043. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  3044. } else if (keys.enckeylen == AES_KEYSIZE_256) {
  3045. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  3046. } else {
  3047. pr_err("chcr : Unsupported cipher key\n");
  3048. goto out;
  3049. }
  3050. /* Copy only encryption key. We use authkey to generate h(ipad) and
  3051. * h(opad) so authkey is not needed again. authkeylen size have the
  3052. * size of the hash digest size.
  3053. */
  3054. memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
  3055. aeadctx->enckey_len = keys.enckeylen;
  3056. if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
  3057. subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
  3058. get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
  3059. aeadctx->enckey_len << 3);
  3060. }
  3061. base_hash = chcr_alloc_shash(max_authsize);
  3062. if (IS_ERR(base_hash)) {
  3063. pr_err("chcr : Base driver cannot be loaded\n");
  3064. aeadctx->enckey_len = 0;
  3065. memzero_explicit(&keys, sizeof(keys));
  3066. return -EINVAL;
  3067. }
  3068. {
  3069. SHASH_DESC_ON_STACK(shash, base_hash);
  3070. shash->tfm = base_hash;
  3071. shash->flags = crypto_shash_get_flags(base_hash);
  3072. bs = crypto_shash_blocksize(base_hash);
  3073. align = KEYCTX_ALIGN_PAD(max_authsize);
  3074. o_ptr = actx->h_iopad + param.result_size + align;
  3075. if (keys.authkeylen > bs) {
  3076. err = crypto_shash_digest(shash, keys.authkey,
  3077. keys.authkeylen,
  3078. o_ptr);
  3079. if (err) {
  3080. pr_err("chcr : Base driver cannot be loaded\n");
  3081. goto out;
  3082. }
  3083. keys.authkeylen = max_authsize;
  3084. } else
  3085. memcpy(o_ptr, keys.authkey, keys.authkeylen);
  3086. /* Compute the ipad-digest*/
  3087. memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
  3088. memcpy(pad, o_ptr, keys.authkeylen);
  3089. for (i = 0; i < bs >> 2; i++)
  3090. *((unsigned int *)pad + i) ^= IPAD_DATA;
  3091. if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
  3092. max_authsize))
  3093. goto out;
  3094. /* Compute the opad-digest */
  3095. memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
  3096. memcpy(pad, o_ptr, keys.authkeylen);
  3097. for (i = 0; i < bs >> 2; i++)
  3098. *((unsigned int *)pad + i) ^= OPAD_DATA;
  3099. if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
  3100. goto out;
  3101. /* convert the ipad and opad digest to network order */
  3102. chcr_change_order(actx->h_iopad, param.result_size);
  3103. chcr_change_order(o_ptr, param.result_size);
  3104. key_ctx_len = sizeof(struct _key_ctx) +
  3105. roundup(keys.enckeylen, 16) +
  3106. (param.result_size + align) * 2;
  3107. aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
  3108. 0, 1, key_ctx_len >> 4);
  3109. actx->auth_mode = param.auth_mode;
  3110. chcr_free_shash(base_hash);
  3111. memzero_explicit(&keys, sizeof(keys));
  3112. return 0;
  3113. }
  3114. out:
  3115. aeadctx->enckey_len = 0;
  3116. memzero_explicit(&keys, sizeof(keys));
  3117. if (!IS_ERR(base_hash))
  3118. chcr_free_shash(base_hash);
  3119. return -EINVAL;
  3120. }
  3121. static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
  3122. const u8 *key, unsigned int keylen)
  3123. {
  3124. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
  3125. struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
  3126. struct crypto_authenc_keys keys;
  3127. int err;
  3128. /* it contains auth and cipher key both*/
  3129. unsigned int subtype;
  3130. int key_ctx_len = 0;
  3131. unsigned char ck_size = 0;
  3132. crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
  3133. crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
  3134. & CRYPTO_TFM_REQ_MASK);
  3135. err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
  3136. crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
  3137. crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
  3138. & CRYPTO_TFM_RES_MASK);
  3139. if (err)
  3140. goto out;
  3141. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
  3142. crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
  3143. goto out;
  3144. }
  3145. subtype = get_aead_subtype(authenc);
  3146. if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
  3147. subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
  3148. if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
  3149. goto out;
  3150. memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
  3151. - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
  3152. keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
  3153. }
  3154. if (keys.enckeylen == AES_KEYSIZE_128) {
  3155. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
  3156. } else if (keys.enckeylen == AES_KEYSIZE_192) {
  3157. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
  3158. } else if (keys.enckeylen == AES_KEYSIZE_256) {
  3159. ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
  3160. } else {
  3161. pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
  3162. goto out;
  3163. }
  3164. memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
  3165. aeadctx->enckey_len = keys.enckeylen;
  3166. if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
  3167. subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
  3168. get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
  3169. aeadctx->enckey_len << 3);
  3170. }
  3171. key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
  3172. aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
  3173. 0, key_ctx_len >> 4);
  3174. actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
  3175. memzero_explicit(&keys, sizeof(keys));
  3176. return 0;
  3177. out:
  3178. aeadctx->enckey_len = 0;
  3179. memzero_explicit(&keys, sizeof(keys));
  3180. return -EINVAL;
  3181. }
  3182. static int chcr_aead_op(struct aead_request *req,
  3183. int size,
  3184. create_wr_t create_wr_fn)
  3185. {
  3186. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  3187. struct uld_ctx *u_ctx;
  3188. struct sk_buff *skb;
  3189. int isfull = 0;
  3190. if (!a_ctx(tfm)->dev) {
  3191. pr_err("chcr : %s : No crypto device.\n", __func__);
  3192. return -ENXIO;
  3193. }
  3194. u_ctx = ULD_CTX(a_ctx(tfm));
  3195. if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
  3196. a_ctx(tfm)->tx_qidx)) {
  3197. isfull = 1;
  3198. if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
  3199. return -ENOSPC;
  3200. }
  3201. /* Form a WR from req */
  3202. skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
  3203. if (IS_ERR(skb) || !skb)
  3204. return PTR_ERR(skb);
  3205. skb->dev = u_ctx->lldi.ports[0];
  3206. set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
  3207. chcr_send_wr(skb);
  3208. return isfull ? -EBUSY : -EINPROGRESS;
  3209. }
  3210. static int chcr_aead_encrypt(struct aead_request *req)
  3211. {
  3212. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  3213. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  3214. reqctx->verify = VERIFY_HW;
  3215. reqctx->op = CHCR_ENCRYPT_OP;
  3216. switch (get_aead_subtype(tfm)) {
  3217. case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
  3218. case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
  3219. case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
  3220. case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
  3221. return chcr_aead_op(req, 0, create_authenc_wr);
  3222. case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
  3223. case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
  3224. return chcr_aead_op(req, 0, create_aead_ccm_wr);
  3225. default:
  3226. return chcr_aead_op(req, 0, create_gcm_wr);
  3227. }
  3228. }
  3229. static int chcr_aead_decrypt(struct aead_request *req)
  3230. {
  3231. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  3232. struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
  3233. struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
  3234. int size;
  3235. if (aeadctx->mayverify == VERIFY_SW) {
  3236. size = crypto_aead_maxauthsize(tfm);
  3237. reqctx->verify = VERIFY_SW;
  3238. } else {
  3239. size = 0;
  3240. reqctx->verify = VERIFY_HW;
  3241. }
  3242. reqctx->op = CHCR_DECRYPT_OP;
  3243. switch (get_aead_subtype(tfm)) {
  3244. case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
  3245. case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
  3246. case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
  3247. case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
  3248. return chcr_aead_op(req, size, create_authenc_wr);
  3249. case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
  3250. case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
  3251. return chcr_aead_op(req, size, create_aead_ccm_wr);
  3252. default:
  3253. return chcr_aead_op(req, size, create_gcm_wr);
  3254. }
  3255. }
  3256. static struct chcr_alg_template driver_algs[] = {
  3257. /* AES-CBC */
  3258. {
  3259. .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
  3260. .is_registered = 0,
  3261. .alg.crypto = {
  3262. .cra_name = "cbc(aes)",
  3263. .cra_driver_name = "cbc-aes-chcr",
  3264. .cra_blocksize = AES_BLOCK_SIZE,
  3265. .cra_init = chcr_cra_init,
  3266. .cra_exit = chcr_cra_exit,
  3267. .cra_u.ablkcipher = {
  3268. .min_keysize = AES_MIN_KEY_SIZE,
  3269. .max_keysize = AES_MAX_KEY_SIZE,
  3270. .ivsize = AES_BLOCK_SIZE,
  3271. .setkey = chcr_aes_cbc_setkey,
  3272. .encrypt = chcr_aes_encrypt,
  3273. .decrypt = chcr_aes_decrypt,
  3274. }
  3275. }
  3276. },
  3277. {
  3278. .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
  3279. .is_registered = 0,
  3280. .alg.crypto = {
  3281. .cra_name = "xts(aes)",
  3282. .cra_driver_name = "xts-aes-chcr",
  3283. .cra_blocksize = AES_BLOCK_SIZE,
  3284. .cra_init = chcr_cra_init,
  3285. .cra_exit = NULL,
  3286. .cra_u .ablkcipher = {
  3287. .min_keysize = 2 * AES_MIN_KEY_SIZE,
  3288. .max_keysize = 2 * AES_MAX_KEY_SIZE,
  3289. .ivsize = AES_BLOCK_SIZE,
  3290. .setkey = chcr_aes_xts_setkey,
  3291. .encrypt = chcr_aes_encrypt,
  3292. .decrypt = chcr_aes_decrypt,
  3293. }
  3294. }
  3295. },
  3296. {
  3297. .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
  3298. .is_registered = 0,
  3299. .alg.crypto = {
  3300. .cra_name = "ctr(aes)",
  3301. .cra_driver_name = "ctr-aes-chcr",
  3302. .cra_blocksize = 1,
  3303. .cra_init = chcr_cra_init,
  3304. .cra_exit = chcr_cra_exit,
  3305. .cra_u.ablkcipher = {
  3306. .min_keysize = AES_MIN_KEY_SIZE,
  3307. .max_keysize = AES_MAX_KEY_SIZE,
  3308. .ivsize = AES_BLOCK_SIZE,
  3309. .setkey = chcr_aes_ctr_setkey,
  3310. .encrypt = chcr_aes_encrypt,
  3311. .decrypt = chcr_aes_decrypt,
  3312. }
  3313. }
  3314. },
  3315. {
  3316. .type = CRYPTO_ALG_TYPE_ABLKCIPHER |
  3317. CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
  3318. .is_registered = 0,
  3319. .alg.crypto = {
  3320. .cra_name = "rfc3686(ctr(aes))",
  3321. .cra_driver_name = "rfc3686-ctr-aes-chcr",
  3322. .cra_blocksize = 1,
  3323. .cra_init = chcr_rfc3686_init,
  3324. .cra_exit = chcr_cra_exit,
  3325. .cra_u.ablkcipher = {
  3326. .min_keysize = AES_MIN_KEY_SIZE +
  3327. CTR_RFC3686_NONCE_SIZE,
  3328. .max_keysize = AES_MAX_KEY_SIZE +
  3329. CTR_RFC3686_NONCE_SIZE,
  3330. .ivsize = CTR_RFC3686_IV_SIZE,
  3331. .setkey = chcr_aes_rfc3686_setkey,
  3332. .encrypt = chcr_aes_encrypt,
  3333. .decrypt = chcr_aes_decrypt,
  3334. .geniv = "seqiv",
  3335. }
  3336. }
  3337. },
  3338. /* SHA */
  3339. {
  3340. .type = CRYPTO_ALG_TYPE_AHASH,
  3341. .is_registered = 0,
  3342. .alg.hash = {
  3343. .halg.digestsize = SHA1_DIGEST_SIZE,
  3344. .halg.base = {
  3345. .cra_name = "sha1",
  3346. .cra_driver_name = "sha1-chcr",
  3347. .cra_blocksize = SHA1_BLOCK_SIZE,
  3348. }
  3349. }
  3350. },
  3351. {
  3352. .type = CRYPTO_ALG_TYPE_AHASH,
  3353. .is_registered = 0,
  3354. .alg.hash = {
  3355. .halg.digestsize = SHA256_DIGEST_SIZE,
  3356. .halg.base = {
  3357. .cra_name = "sha256",
  3358. .cra_driver_name = "sha256-chcr",
  3359. .cra_blocksize = SHA256_BLOCK_SIZE,
  3360. }
  3361. }
  3362. },
  3363. {
  3364. .type = CRYPTO_ALG_TYPE_AHASH,
  3365. .is_registered = 0,
  3366. .alg.hash = {
  3367. .halg.digestsize = SHA224_DIGEST_SIZE,
  3368. .halg.base = {
  3369. .cra_name = "sha224",
  3370. .cra_driver_name = "sha224-chcr",
  3371. .cra_blocksize = SHA224_BLOCK_SIZE,
  3372. }
  3373. }
  3374. },
  3375. {
  3376. .type = CRYPTO_ALG_TYPE_AHASH,
  3377. .is_registered = 0,
  3378. .alg.hash = {
  3379. .halg.digestsize = SHA384_DIGEST_SIZE,
  3380. .halg.base = {
  3381. .cra_name = "sha384",
  3382. .cra_driver_name = "sha384-chcr",
  3383. .cra_blocksize = SHA384_BLOCK_SIZE,
  3384. }
  3385. }
  3386. },
  3387. {
  3388. .type = CRYPTO_ALG_TYPE_AHASH,
  3389. .is_registered = 0,
  3390. .alg.hash = {
  3391. .halg.digestsize = SHA512_DIGEST_SIZE,
  3392. .halg.base = {
  3393. .cra_name = "sha512",
  3394. .cra_driver_name = "sha512-chcr",
  3395. .cra_blocksize = SHA512_BLOCK_SIZE,
  3396. }
  3397. }
  3398. },
  3399. /* HMAC */
  3400. {
  3401. .type = CRYPTO_ALG_TYPE_HMAC,
  3402. .is_registered = 0,
  3403. .alg.hash = {
  3404. .halg.digestsize = SHA1_DIGEST_SIZE,
  3405. .halg.base = {
  3406. .cra_name = "hmac(sha1)",
  3407. .cra_driver_name = "hmac-sha1-chcr",
  3408. .cra_blocksize = SHA1_BLOCK_SIZE,
  3409. }
  3410. }
  3411. },
  3412. {
  3413. .type = CRYPTO_ALG_TYPE_HMAC,
  3414. .is_registered = 0,
  3415. .alg.hash = {
  3416. .halg.digestsize = SHA224_DIGEST_SIZE,
  3417. .halg.base = {
  3418. .cra_name = "hmac(sha224)",
  3419. .cra_driver_name = "hmac-sha224-chcr",
  3420. .cra_blocksize = SHA224_BLOCK_SIZE,
  3421. }
  3422. }
  3423. },
  3424. {
  3425. .type = CRYPTO_ALG_TYPE_HMAC,
  3426. .is_registered = 0,
  3427. .alg.hash = {
  3428. .halg.digestsize = SHA256_DIGEST_SIZE,
  3429. .halg.base = {
  3430. .cra_name = "hmac(sha256)",
  3431. .cra_driver_name = "hmac-sha256-chcr",
  3432. .cra_blocksize = SHA256_BLOCK_SIZE,
  3433. }
  3434. }
  3435. },
  3436. {
  3437. .type = CRYPTO_ALG_TYPE_HMAC,
  3438. .is_registered = 0,
  3439. .alg.hash = {
  3440. .halg.digestsize = SHA384_DIGEST_SIZE,
  3441. .halg.base = {
  3442. .cra_name = "hmac(sha384)",
  3443. .cra_driver_name = "hmac-sha384-chcr",
  3444. .cra_blocksize = SHA384_BLOCK_SIZE,
  3445. }
  3446. }
  3447. },
  3448. {
  3449. .type = CRYPTO_ALG_TYPE_HMAC,
  3450. .is_registered = 0,
  3451. .alg.hash = {
  3452. .halg.digestsize = SHA512_DIGEST_SIZE,
  3453. .halg.base = {
  3454. .cra_name = "hmac(sha512)",
  3455. .cra_driver_name = "hmac-sha512-chcr",
  3456. .cra_blocksize = SHA512_BLOCK_SIZE,
  3457. }
  3458. }
  3459. },
  3460. /* Add AEAD Algorithms */
  3461. {
  3462. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
  3463. .is_registered = 0,
  3464. .alg.aead = {
  3465. .base = {
  3466. .cra_name = "gcm(aes)",
  3467. .cra_driver_name = "gcm-aes-chcr",
  3468. .cra_blocksize = 1,
  3469. .cra_priority = CHCR_AEAD_PRIORITY,
  3470. .cra_ctxsize = sizeof(struct chcr_context) +
  3471. sizeof(struct chcr_aead_ctx) +
  3472. sizeof(struct chcr_gcm_ctx),
  3473. },
  3474. .ivsize = GCM_AES_IV_SIZE,
  3475. .maxauthsize = GHASH_DIGEST_SIZE,
  3476. .setkey = chcr_gcm_setkey,
  3477. .setauthsize = chcr_gcm_setauthsize,
  3478. }
  3479. },
  3480. {
  3481. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
  3482. .is_registered = 0,
  3483. .alg.aead = {
  3484. .base = {
  3485. .cra_name = "rfc4106(gcm(aes))",
  3486. .cra_driver_name = "rfc4106-gcm-aes-chcr",
  3487. .cra_blocksize = 1,
  3488. .cra_priority = CHCR_AEAD_PRIORITY + 1,
  3489. .cra_ctxsize = sizeof(struct chcr_context) +
  3490. sizeof(struct chcr_aead_ctx) +
  3491. sizeof(struct chcr_gcm_ctx),
  3492. },
  3493. .ivsize = GCM_RFC4106_IV_SIZE,
  3494. .maxauthsize = GHASH_DIGEST_SIZE,
  3495. .setkey = chcr_gcm_setkey,
  3496. .setauthsize = chcr_4106_4309_setauthsize,
  3497. }
  3498. },
  3499. {
  3500. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
  3501. .is_registered = 0,
  3502. .alg.aead = {
  3503. .base = {
  3504. .cra_name = "ccm(aes)",
  3505. .cra_driver_name = "ccm-aes-chcr",
  3506. .cra_blocksize = 1,
  3507. .cra_priority = CHCR_AEAD_PRIORITY,
  3508. .cra_ctxsize = sizeof(struct chcr_context) +
  3509. sizeof(struct chcr_aead_ctx),
  3510. },
  3511. .ivsize = AES_BLOCK_SIZE,
  3512. .maxauthsize = GHASH_DIGEST_SIZE,
  3513. .setkey = chcr_aead_ccm_setkey,
  3514. .setauthsize = chcr_ccm_setauthsize,
  3515. }
  3516. },
  3517. {
  3518. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
  3519. .is_registered = 0,
  3520. .alg.aead = {
  3521. .base = {
  3522. .cra_name = "rfc4309(ccm(aes))",
  3523. .cra_driver_name = "rfc4309-ccm-aes-chcr",
  3524. .cra_blocksize = 1,
  3525. .cra_priority = CHCR_AEAD_PRIORITY + 1,
  3526. .cra_ctxsize = sizeof(struct chcr_context) +
  3527. sizeof(struct chcr_aead_ctx),
  3528. },
  3529. .ivsize = 8,
  3530. .maxauthsize = GHASH_DIGEST_SIZE,
  3531. .setkey = chcr_aead_rfc4309_setkey,
  3532. .setauthsize = chcr_4106_4309_setauthsize,
  3533. }
  3534. },
  3535. {
  3536. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3537. .is_registered = 0,
  3538. .alg.aead = {
  3539. .base = {
  3540. .cra_name = "authenc(hmac(sha1),cbc(aes))",
  3541. .cra_driver_name =
  3542. "authenc-hmac-sha1-cbc-aes-chcr",
  3543. .cra_blocksize = AES_BLOCK_SIZE,
  3544. .cra_priority = CHCR_AEAD_PRIORITY,
  3545. .cra_ctxsize = sizeof(struct chcr_context) +
  3546. sizeof(struct chcr_aead_ctx) +
  3547. sizeof(struct chcr_authenc_ctx),
  3548. },
  3549. .ivsize = AES_BLOCK_SIZE,
  3550. .maxauthsize = SHA1_DIGEST_SIZE,
  3551. .setkey = chcr_authenc_setkey,
  3552. .setauthsize = chcr_authenc_setauthsize,
  3553. }
  3554. },
  3555. {
  3556. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3557. .is_registered = 0,
  3558. .alg.aead = {
  3559. .base = {
  3560. .cra_name = "authenc(hmac(sha256),cbc(aes))",
  3561. .cra_driver_name =
  3562. "authenc-hmac-sha256-cbc-aes-chcr",
  3563. .cra_blocksize = AES_BLOCK_SIZE,
  3564. .cra_priority = CHCR_AEAD_PRIORITY,
  3565. .cra_ctxsize = sizeof(struct chcr_context) +
  3566. sizeof(struct chcr_aead_ctx) +
  3567. sizeof(struct chcr_authenc_ctx),
  3568. },
  3569. .ivsize = AES_BLOCK_SIZE,
  3570. .maxauthsize = SHA256_DIGEST_SIZE,
  3571. .setkey = chcr_authenc_setkey,
  3572. .setauthsize = chcr_authenc_setauthsize,
  3573. }
  3574. },
  3575. {
  3576. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3577. .is_registered = 0,
  3578. .alg.aead = {
  3579. .base = {
  3580. .cra_name = "authenc(hmac(sha224),cbc(aes))",
  3581. .cra_driver_name =
  3582. "authenc-hmac-sha224-cbc-aes-chcr",
  3583. .cra_blocksize = AES_BLOCK_SIZE,
  3584. .cra_priority = CHCR_AEAD_PRIORITY,
  3585. .cra_ctxsize = sizeof(struct chcr_context) +
  3586. sizeof(struct chcr_aead_ctx) +
  3587. sizeof(struct chcr_authenc_ctx),
  3588. },
  3589. .ivsize = AES_BLOCK_SIZE,
  3590. .maxauthsize = SHA224_DIGEST_SIZE,
  3591. .setkey = chcr_authenc_setkey,
  3592. .setauthsize = chcr_authenc_setauthsize,
  3593. }
  3594. },
  3595. {
  3596. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3597. .is_registered = 0,
  3598. .alg.aead = {
  3599. .base = {
  3600. .cra_name = "authenc(hmac(sha384),cbc(aes))",
  3601. .cra_driver_name =
  3602. "authenc-hmac-sha384-cbc-aes-chcr",
  3603. .cra_blocksize = AES_BLOCK_SIZE,
  3604. .cra_priority = CHCR_AEAD_PRIORITY,
  3605. .cra_ctxsize = sizeof(struct chcr_context) +
  3606. sizeof(struct chcr_aead_ctx) +
  3607. sizeof(struct chcr_authenc_ctx),
  3608. },
  3609. .ivsize = AES_BLOCK_SIZE,
  3610. .maxauthsize = SHA384_DIGEST_SIZE,
  3611. .setkey = chcr_authenc_setkey,
  3612. .setauthsize = chcr_authenc_setauthsize,
  3613. }
  3614. },
  3615. {
  3616. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
  3617. .is_registered = 0,
  3618. .alg.aead = {
  3619. .base = {
  3620. .cra_name = "authenc(hmac(sha512),cbc(aes))",
  3621. .cra_driver_name =
  3622. "authenc-hmac-sha512-cbc-aes-chcr",
  3623. .cra_blocksize = AES_BLOCK_SIZE,
  3624. .cra_priority = CHCR_AEAD_PRIORITY,
  3625. .cra_ctxsize = sizeof(struct chcr_context) +
  3626. sizeof(struct chcr_aead_ctx) +
  3627. sizeof(struct chcr_authenc_ctx),
  3628. },
  3629. .ivsize = AES_BLOCK_SIZE,
  3630. .maxauthsize = SHA512_DIGEST_SIZE,
  3631. .setkey = chcr_authenc_setkey,
  3632. .setauthsize = chcr_authenc_setauthsize,
  3633. }
  3634. },
  3635. {
  3636. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
  3637. .is_registered = 0,
  3638. .alg.aead = {
  3639. .base = {
  3640. .cra_name = "authenc(digest_null,cbc(aes))",
  3641. .cra_driver_name =
  3642. "authenc-digest_null-cbc-aes-chcr",
  3643. .cra_blocksize = AES_BLOCK_SIZE,
  3644. .cra_priority = CHCR_AEAD_PRIORITY,
  3645. .cra_ctxsize = sizeof(struct chcr_context) +
  3646. sizeof(struct chcr_aead_ctx) +
  3647. sizeof(struct chcr_authenc_ctx),
  3648. },
  3649. .ivsize = AES_BLOCK_SIZE,
  3650. .maxauthsize = 0,
  3651. .setkey = chcr_aead_digest_null_setkey,
  3652. .setauthsize = chcr_authenc_null_setauthsize,
  3653. }
  3654. },
  3655. {
  3656. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3657. .is_registered = 0,
  3658. .alg.aead = {
  3659. .base = {
  3660. .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
  3661. .cra_driver_name =
  3662. "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
  3663. .cra_blocksize = 1,
  3664. .cra_priority = CHCR_AEAD_PRIORITY,
  3665. .cra_ctxsize = sizeof(struct chcr_context) +
  3666. sizeof(struct chcr_aead_ctx) +
  3667. sizeof(struct chcr_authenc_ctx),
  3668. },
  3669. .ivsize = CTR_RFC3686_IV_SIZE,
  3670. .maxauthsize = SHA1_DIGEST_SIZE,
  3671. .setkey = chcr_authenc_setkey,
  3672. .setauthsize = chcr_authenc_setauthsize,
  3673. }
  3674. },
  3675. {
  3676. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3677. .is_registered = 0,
  3678. .alg.aead = {
  3679. .base = {
  3680. .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
  3681. .cra_driver_name =
  3682. "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
  3683. .cra_blocksize = 1,
  3684. .cra_priority = CHCR_AEAD_PRIORITY,
  3685. .cra_ctxsize = sizeof(struct chcr_context) +
  3686. sizeof(struct chcr_aead_ctx) +
  3687. sizeof(struct chcr_authenc_ctx),
  3688. },
  3689. .ivsize = CTR_RFC3686_IV_SIZE,
  3690. .maxauthsize = SHA256_DIGEST_SIZE,
  3691. .setkey = chcr_authenc_setkey,
  3692. .setauthsize = chcr_authenc_setauthsize,
  3693. }
  3694. },
  3695. {
  3696. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3697. .is_registered = 0,
  3698. .alg.aead = {
  3699. .base = {
  3700. .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
  3701. .cra_driver_name =
  3702. "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
  3703. .cra_blocksize = 1,
  3704. .cra_priority = CHCR_AEAD_PRIORITY,
  3705. .cra_ctxsize = sizeof(struct chcr_context) +
  3706. sizeof(struct chcr_aead_ctx) +
  3707. sizeof(struct chcr_authenc_ctx),
  3708. },
  3709. .ivsize = CTR_RFC3686_IV_SIZE,
  3710. .maxauthsize = SHA224_DIGEST_SIZE,
  3711. .setkey = chcr_authenc_setkey,
  3712. .setauthsize = chcr_authenc_setauthsize,
  3713. }
  3714. },
  3715. {
  3716. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3717. .is_registered = 0,
  3718. .alg.aead = {
  3719. .base = {
  3720. .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
  3721. .cra_driver_name =
  3722. "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
  3723. .cra_blocksize = 1,
  3724. .cra_priority = CHCR_AEAD_PRIORITY,
  3725. .cra_ctxsize = sizeof(struct chcr_context) +
  3726. sizeof(struct chcr_aead_ctx) +
  3727. sizeof(struct chcr_authenc_ctx),
  3728. },
  3729. .ivsize = CTR_RFC3686_IV_SIZE,
  3730. .maxauthsize = SHA384_DIGEST_SIZE,
  3731. .setkey = chcr_authenc_setkey,
  3732. .setauthsize = chcr_authenc_setauthsize,
  3733. }
  3734. },
  3735. {
  3736. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
  3737. .is_registered = 0,
  3738. .alg.aead = {
  3739. .base = {
  3740. .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
  3741. .cra_driver_name =
  3742. "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
  3743. .cra_blocksize = 1,
  3744. .cra_priority = CHCR_AEAD_PRIORITY,
  3745. .cra_ctxsize = sizeof(struct chcr_context) +
  3746. sizeof(struct chcr_aead_ctx) +
  3747. sizeof(struct chcr_authenc_ctx),
  3748. },
  3749. .ivsize = CTR_RFC3686_IV_SIZE,
  3750. .maxauthsize = SHA512_DIGEST_SIZE,
  3751. .setkey = chcr_authenc_setkey,
  3752. .setauthsize = chcr_authenc_setauthsize,
  3753. }
  3754. },
  3755. {
  3756. .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
  3757. .is_registered = 0,
  3758. .alg.aead = {
  3759. .base = {
  3760. .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
  3761. .cra_driver_name =
  3762. "authenc-digest_null-rfc3686-ctr-aes-chcr",
  3763. .cra_blocksize = 1,
  3764. .cra_priority = CHCR_AEAD_PRIORITY,
  3765. .cra_ctxsize = sizeof(struct chcr_context) +
  3766. sizeof(struct chcr_aead_ctx) +
  3767. sizeof(struct chcr_authenc_ctx),
  3768. },
  3769. .ivsize = CTR_RFC3686_IV_SIZE,
  3770. .maxauthsize = 0,
  3771. .setkey = chcr_aead_digest_null_setkey,
  3772. .setauthsize = chcr_authenc_null_setauthsize,
  3773. }
  3774. },
  3775. };
  3776. /*
  3777. * chcr_unregister_alg - Deregister crypto algorithms with
  3778. * kernel framework.
  3779. */
  3780. static int chcr_unregister_alg(void)
  3781. {
  3782. int i;
  3783. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  3784. switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
  3785. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  3786. if (driver_algs[i].is_registered)
  3787. crypto_unregister_alg(
  3788. &driver_algs[i].alg.crypto);
  3789. break;
  3790. case CRYPTO_ALG_TYPE_AEAD:
  3791. if (driver_algs[i].is_registered)
  3792. crypto_unregister_aead(
  3793. &driver_algs[i].alg.aead);
  3794. break;
  3795. case CRYPTO_ALG_TYPE_AHASH:
  3796. if (driver_algs[i].is_registered)
  3797. crypto_unregister_ahash(
  3798. &driver_algs[i].alg.hash);
  3799. break;
  3800. }
  3801. driver_algs[i].is_registered = 0;
  3802. }
  3803. return 0;
  3804. }
  3805. #define SZ_AHASH_CTX sizeof(struct chcr_context)
  3806. #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
  3807. #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
  3808. /*
  3809. * chcr_register_alg - Register crypto algorithms with kernel framework.
  3810. */
  3811. static int chcr_register_alg(void)
  3812. {
  3813. struct crypto_alg ai;
  3814. struct ahash_alg *a_hash;
  3815. int err = 0, i;
  3816. char *name = NULL;
  3817. for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
  3818. if (driver_algs[i].is_registered)
  3819. continue;
  3820. switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
  3821. case CRYPTO_ALG_TYPE_ABLKCIPHER:
  3822. driver_algs[i].alg.crypto.cra_priority =
  3823. CHCR_CRA_PRIORITY;
  3824. driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
  3825. driver_algs[i].alg.crypto.cra_flags =
  3826. CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
  3827. CRYPTO_ALG_NEED_FALLBACK;
  3828. driver_algs[i].alg.crypto.cra_ctxsize =
  3829. sizeof(struct chcr_context) +
  3830. sizeof(struct ablk_ctx);
  3831. driver_algs[i].alg.crypto.cra_alignmask = 0;
  3832. driver_algs[i].alg.crypto.cra_type =
  3833. &crypto_ablkcipher_type;
  3834. err = crypto_register_alg(&driver_algs[i].alg.crypto);
  3835. name = driver_algs[i].alg.crypto.cra_driver_name;
  3836. break;
  3837. case CRYPTO_ALG_TYPE_AEAD:
  3838. driver_algs[i].alg.aead.base.cra_flags =
  3839. CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
  3840. driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
  3841. driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
  3842. driver_algs[i].alg.aead.init = chcr_aead_cra_init;
  3843. driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
  3844. driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
  3845. err = crypto_register_aead(&driver_algs[i].alg.aead);
  3846. name = driver_algs[i].alg.aead.base.cra_driver_name;
  3847. break;
  3848. case CRYPTO_ALG_TYPE_AHASH:
  3849. a_hash = &driver_algs[i].alg.hash;
  3850. a_hash->update = chcr_ahash_update;
  3851. a_hash->final = chcr_ahash_final;
  3852. a_hash->finup = chcr_ahash_finup;
  3853. a_hash->digest = chcr_ahash_digest;
  3854. a_hash->export = chcr_ahash_export;
  3855. a_hash->import = chcr_ahash_import;
  3856. a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
  3857. a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
  3858. a_hash->halg.base.cra_module = THIS_MODULE;
  3859. a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
  3860. a_hash->halg.base.cra_alignmask = 0;
  3861. a_hash->halg.base.cra_exit = NULL;
  3862. if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
  3863. a_hash->halg.base.cra_init = chcr_hmac_cra_init;
  3864. a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
  3865. a_hash->init = chcr_hmac_init;
  3866. a_hash->setkey = chcr_ahash_setkey;
  3867. a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
  3868. } else {
  3869. a_hash->init = chcr_sha_init;
  3870. a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
  3871. a_hash->halg.base.cra_init = chcr_sha_cra_init;
  3872. }
  3873. err = crypto_register_ahash(&driver_algs[i].alg.hash);
  3874. ai = driver_algs[i].alg.hash.halg.base;
  3875. name = ai.cra_driver_name;
  3876. break;
  3877. }
  3878. if (err) {
  3879. pr_err("chcr : %s : Algorithm registration failed\n",
  3880. name);
  3881. goto register_err;
  3882. } else {
  3883. driver_algs[i].is_registered = 1;
  3884. }
  3885. }
  3886. return 0;
  3887. register_err:
  3888. chcr_unregister_alg();
  3889. return err;
  3890. }
  3891. /*
  3892. * start_crypto - Register the crypto algorithms.
  3893. * This should called once when the first device comesup. After this
  3894. * kernel will start calling driver APIs for crypto operations.
  3895. */
  3896. int start_crypto(void)
  3897. {
  3898. return chcr_register_alg();
  3899. }
  3900. /*
  3901. * stop_crypto - Deregister all the crypto algorithms with kernel.
  3902. * This should be called once when the last device goes down. After this
  3903. * kernel will not call the driver API for crypto operations.
  3904. */
  3905. int stop_crypto(void)
  3906. {
  3907. chcr_unregister_alg();
  3908. return 0;
  3909. }