artpec6_crypto.c 84 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204
  1. /*
  2. * Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api.
  3. *
  4. * Copyright (C) 2014-2017 Axis Communications AB
  5. */
  6. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  7. #include <linux/bitfield.h>
  8. #include <linux/crypto.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/delay.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/fault-inject.h>
  13. #include <linux/init.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/kernel.h>
  16. #include <linux/list.h>
  17. #include <linux/module.h>
  18. #include <linux/of.h>
  19. #include <linux/platform_device.h>
  20. #include <linux/scatterlist.h>
  21. #include <linux/slab.h>
  22. #include <crypto/aes.h>
  23. #include <crypto/gcm.h>
  24. #include <crypto/internal/aead.h>
  25. #include <crypto/internal/hash.h>
  26. #include <crypto/internal/skcipher.h>
  27. #include <crypto/scatterwalk.h>
  28. #include <crypto/sha.h>
  29. #include <crypto/xts.h>
  30. /* Max length of a line in all cache levels for Artpec SoCs. */
  31. #define ARTPEC_CACHE_LINE_MAX 32
  32. #define PDMA_OUT_CFG 0x0000
  33. #define PDMA_OUT_BUF_CFG 0x0004
  34. #define PDMA_OUT_CMD 0x0008
  35. #define PDMA_OUT_DESCRQ_PUSH 0x0010
  36. #define PDMA_OUT_DESCRQ_STAT 0x0014
  37. #define A6_PDMA_IN_CFG 0x0028
  38. #define A6_PDMA_IN_BUF_CFG 0x002c
  39. #define A6_PDMA_IN_CMD 0x0030
  40. #define A6_PDMA_IN_STATQ_PUSH 0x0038
  41. #define A6_PDMA_IN_DESCRQ_PUSH 0x0044
  42. #define A6_PDMA_IN_DESCRQ_STAT 0x0048
  43. #define A6_PDMA_INTR_MASK 0x0068
  44. #define A6_PDMA_ACK_INTR 0x006c
  45. #define A6_PDMA_MASKED_INTR 0x0074
  46. #define A7_PDMA_IN_CFG 0x002c
  47. #define A7_PDMA_IN_BUF_CFG 0x0030
  48. #define A7_PDMA_IN_CMD 0x0034
  49. #define A7_PDMA_IN_STATQ_PUSH 0x003c
  50. #define A7_PDMA_IN_DESCRQ_PUSH 0x0048
  51. #define A7_PDMA_IN_DESCRQ_STAT 0x004C
  52. #define A7_PDMA_INTR_MASK 0x006c
  53. #define A7_PDMA_ACK_INTR 0x0070
  54. #define A7_PDMA_MASKED_INTR 0x0078
  55. #define PDMA_OUT_CFG_EN BIT(0)
  56. #define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
  57. #define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
  58. #define PDMA_OUT_CMD_START BIT(0)
  59. #define A6_PDMA_OUT_CMD_STOP BIT(3)
  60. #define A7_PDMA_OUT_CMD_STOP BIT(2)
  61. #define PDMA_OUT_DESCRQ_PUSH_LEN GENMASK(5, 0)
  62. #define PDMA_OUT_DESCRQ_PUSH_ADDR GENMASK(31, 6)
  63. #define PDMA_OUT_DESCRQ_STAT_LEVEL GENMASK(3, 0)
  64. #define PDMA_OUT_DESCRQ_STAT_SIZE GENMASK(7, 4)
  65. #define PDMA_IN_CFG_EN BIT(0)
  66. #define PDMA_IN_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
  67. #define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
  68. #define PDMA_IN_BUF_CFG_STAT_BUF_SIZE GENMASK(14, 10)
  69. #define PDMA_IN_CMD_START BIT(0)
  70. #define A6_PDMA_IN_CMD_FLUSH_STAT BIT(2)
  71. #define A6_PDMA_IN_CMD_STOP BIT(3)
  72. #define A7_PDMA_IN_CMD_FLUSH_STAT BIT(1)
  73. #define A7_PDMA_IN_CMD_STOP BIT(2)
  74. #define PDMA_IN_STATQ_PUSH_LEN GENMASK(5, 0)
  75. #define PDMA_IN_STATQ_PUSH_ADDR GENMASK(31, 6)
  76. #define PDMA_IN_DESCRQ_PUSH_LEN GENMASK(5, 0)
  77. #define PDMA_IN_DESCRQ_PUSH_ADDR GENMASK(31, 6)
  78. #define PDMA_IN_DESCRQ_STAT_LEVEL GENMASK(3, 0)
  79. #define PDMA_IN_DESCRQ_STAT_SIZE GENMASK(7, 4)
  80. #define A6_PDMA_INTR_MASK_IN_DATA BIT(2)
  81. #define A6_PDMA_INTR_MASK_IN_EOP BIT(3)
  82. #define A6_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(4)
  83. #define A7_PDMA_INTR_MASK_IN_DATA BIT(3)
  84. #define A7_PDMA_INTR_MASK_IN_EOP BIT(4)
  85. #define A7_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(5)
  86. #define A6_CRY_MD_OPER GENMASK(19, 16)
  87. #define A6_CRY_MD_HASH_SEL_CTX GENMASK(21, 20)
  88. #define A6_CRY_MD_HASH_HMAC_FIN BIT(23)
  89. #define A6_CRY_MD_CIPHER_LEN GENMASK(21, 20)
  90. #define A6_CRY_MD_CIPHER_DECR BIT(22)
  91. #define A6_CRY_MD_CIPHER_TWEAK BIT(23)
  92. #define A6_CRY_MD_CIPHER_DSEQ BIT(24)
  93. #define A7_CRY_MD_OPER GENMASK(11, 8)
  94. #define A7_CRY_MD_HASH_SEL_CTX GENMASK(13, 12)
  95. #define A7_CRY_MD_HASH_HMAC_FIN BIT(15)
  96. #define A7_CRY_MD_CIPHER_LEN GENMASK(13, 12)
  97. #define A7_CRY_MD_CIPHER_DECR BIT(14)
  98. #define A7_CRY_MD_CIPHER_TWEAK BIT(15)
  99. #define A7_CRY_MD_CIPHER_DSEQ BIT(16)
  100. /* DMA metadata constants */
  101. #define regk_crypto_aes_cbc 0x00000002
  102. #define regk_crypto_aes_ctr 0x00000003
  103. #define regk_crypto_aes_ecb 0x00000001
  104. #define regk_crypto_aes_gcm 0x00000004
  105. #define regk_crypto_aes_xts 0x00000005
  106. #define regk_crypto_cache 0x00000002
  107. #define a6_regk_crypto_dlkey 0x0000000a
  108. #define a7_regk_crypto_dlkey 0x0000000e
  109. #define regk_crypto_ext 0x00000001
  110. #define regk_crypto_hmac_sha1 0x00000007
  111. #define regk_crypto_hmac_sha256 0x00000009
  112. #define regk_crypto_hmac_sha384 0x0000000b
  113. #define regk_crypto_hmac_sha512 0x0000000d
  114. #define regk_crypto_init 0x00000000
  115. #define regk_crypto_key_128 0x00000000
  116. #define regk_crypto_key_192 0x00000001
  117. #define regk_crypto_key_256 0x00000002
  118. #define regk_crypto_null 0x00000000
  119. #define regk_crypto_sha1 0x00000006
  120. #define regk_crypto_sha256 0x00000008
  121. #define regk_crypto_sha384 0x0000000a
  122. #define regk_crypto_sha512 0x0000000c
  123. /* DMA descriptor structures */
  124. struct pdma_descr_ctrl {
  125. unsigned char short_descr : 1;
  126. unsigned char pad1 : 1;
  127. unsigned char eop : 1;
  128. unsigned char intr : 1;
  129. unsigned char short_len : 3;
  130. unsigned char pad2 : 1;
  131. } __packed;
  132. struct pdma_data_descr {
  133. unsigned int len : 24;
  134. unsigned int buf : 32;
  135. } __packed;
  136. struct pdma_short_descr {
  137. unsigned char data[7];
  138. } __packed;
  139. struct pdma_descr {
  140. struct pdma_descr_ctrl ctrl;
  141. union {
  142. struct pdma_data_descr data;
  143. struct pdma_short_descr shrt;
  144. };
  145. };
  146. struct pdma_stat_descr {
  147. unsigned char pad1 : 1;
  148. unsigned char pad2 : 1;
  149. unsigned char eop : 1;
  150. unsigned char pad3 : 5;
  151. unsigned int len : 24;
  152. };
  153. /* Each descriptor array can hold max 64 entries */
  154. #define PDMA_DESCR_COUNT 64
  155. #define MODULE_NAME "Artpec-6 CA"
  156. /* Hash modes (including HMAC variants) */
  157. #define ARTPEC6_CRYPTO_HASH_SHA1 1
  158. #define ARTPEC6_CRYPTO_HASH_SHA256 2
  159. #define ARTPEC6_CRYPTO_HASH_SHA384 3
  160. #define ARTPEC6_CRYPTO_HASH_SHA512 4
  161. /* Crypto modes */
  162. #define ARTPEC6_CRYPTO_CIPHER_AES_ECB 1
  163. #define ARTPEC6_CRYPTO_CIPHER_AES_CBC 2
  164. #define ARTPEC6_CRYPTO_CIPHER_AES_CTR 3
  165. #define ARTPEC6_CRYPTO_CIPHER_AES_XTS 5
  166. /* The PDMA is a DMA-engine tightly coupled with a ciphering engine.
  167. * It operates on a descriptor array with up to 64 descriptor entries.
  168. * The arrays must be 64 byte aligned in memory.
  169. *
  170. * The ciphering unit has no registers and is completely controlled by
  171. * a 4-byte metadata that is inserted at the beginning of each dma packet.
  172. *
  173. * A dma packet is a sequence of descriptors terminated by setting the .eop
  174. * field in the final descriptor of the packet.
  175. *
  176. * Multiple packets are used for providing context data, key data and
  177. * the plain/ciphertext.
  178. *
  179. * PDMA Descriptors (Array)
  180. * +------+------+------+~~+-------+------+----
  181. * | 0 | 1 | 2 |~~| 11 EOP| 12 | ....
  182. * +--+---+--+---+----+-+~~+-------+----+-+----
  183. * | | | | |
  184. * | | | | |
  185. * __|__ +-------++-------++-------+ +----+
  186. * | MD | |Payload||Payload||Payload| | MD |
  187. * +-----+ +-------++-------++-------+ +----+
  188. */
  189. struct artpec6_crypto_bounce_buffer {
  190. struct list_head list;
  191. size_t length;
  192. struct scatterlist *sg;
  193. size_t offset;
  194. /* buf is aligned to ARTPEC_CACHE_LINE_MAX and
  195. * holds up to ARTPEC_CACHE_LINE_MAX bytes data.
  196. */
  197. void *buf;
  198. };
  199. struct artpec6_crypto_dma_map {
  200. dma_addr_t dma_addr;
  201. size_t size;
  202. enum dma_data_direction dir;
  203. };
  204. struct artpec6_crypto_dma_descriptors {
  205. struct pdma_descr out[PDMA_DESCR_COUNT] __aligned(64);
  206. struct pdma_descr in[PDMA_DESCR_COUNT] __aligned(64);
  207. u32 stat[PDMA_DESCR_COUNT] __aligned(64);
  208. struct list_head bounce_buffers;
  209. /* Enough maps for all out/in buffers, and all three descr. arrays */
  210. struct artpec6_crypto_dma_map maps[PDMA_DESCR_COUNT * 2 + 2];
  211. dma_addr_t out_dma_addr;
  212. dma_addr_t in_dma_addr;
  213. dma_addr_t stat_dma_addr;
  214. size_t out_cnt;
  215. size_t in_cnt;
  216. size_t map_count;
  217. };
  218. enum artpec6_crypto_variant {
  219. ARTPEC6_CRYPTO,
  220. ARTPEC7_CRYPTO,
  221. };
  222. struct artpec6_crypto {
  223. void __iomem *base;
  224. spinlock_t queue_lock;
  225. struct list_head queue; /* waiting for pdma fifo space */
  226. struct list_head pending; /* submitted to pdma fifo */
  227. struct tasklet_struct task;
  228. struct kmem_cache *dma_cache;
  229. int pending_count;
  230. struct timer_list timer;
  231. enum artpec6_crypto_variant variant;
  232. void *pad_buffer; /* cache-aligned block padding buffer */
  233. void *zero_buffer;
  234. };
  235. enum artpec6_crypto_hash_flags {
  236. HASH_FLAG_INIT_CTX = 2,
  237. HASH_FLAG_UPDATE = 4,
  238. HASH_FLAG_FINALIZE = 8,
  239. HASH_FLAG_HMAC = 16,
  240. HASH_FLAG_UPDATE_KEY = 32,
  241. };
  242. struct artpec6_crypto_req_common {
  243. struct list_head list;
  244. struct list_head complete_in_progress;
  245. struct artpec6_crypto_dma_descriptors *dma;
  246. struct crypto_async_request *req;
  247. void (*complete)(struct crypto_async_request *req);
  248. gfp_t gfp_flags;
  249. };
  250. struct artpec6_hash_request_context {
  251. char partial_buffer[SHA512_BLOCK_SIZE];
  252. char partial_buffer_out[SHA512_BLOCK_SIZE];
  253. char key_buffer[SHA512_BLOCK_SIZE];
  254. char pad_buffer[SHA512_BLOCK_SIZE + 32];
  255. unsigned char digeststate[SHA512_DIGEST_SIZE];
  256. size_t partial_bytes;
  257. u64 digcnt;
  258. u32 key_md;
  259. u32 hash_md;
  260. enum artpec6_crypto_hash_flags hash_flags;
  261. struct artpec6_crypto_req_common common;
  262. };
  263. struct artpec6_hash_export_state {
  264. char partial_buffer[SHA512_BLOCK_SIZE];
  265. unsigned char digeststate[SHA512_DIGEST_SIZE];
  266. size_t partial_bytes;
  267. u64 digcnt;
  268. int oper;
  269. unsigned int hash_flags;
  270. };
  271. struct artpec6_hashalg_context {
  272. char hmac_key[SHA512_BLOCK_SIZE];
  273. size_t hmac_key_length;
  274. struct crypto_shash *child_hash;
  275. };
  276. struct artpec6_crypto_request_context {
  277. u32 cipher_md;
  278. bool decrypt;
  279. struct artpec6_crypto_req_common common;
  280. };
  281. struct artpec6_cryptotfm_context {
  282. unsigned char aes_key[2*AES_MAX_KEY_SIZE];
  283. size_t key_length;
  284. u32 key_md;
  285. int crypto_type;
  286. struct crypto_skcipher *fallback;
  287. };
  288. struct artpec6_crypto_aead_hw_ctx {
  289. __be64 aad_length_bits;
  290. __be64 text_length_bits;
  291. __u8 J0[AES_BLOCK_SIZE];
  292. };
  293. struct artpec6_crypto_aead_req_ctx {
  294. struct artpec6_crypto_aead_hw_ctx hw_ctx;
  295. u32 cipher_md;
  296. bool decrypt;
  297. struct artpec6_crypto_req_common common;
  298. __u8 decryption_tag[AES_BLOCK_SIZE] ____cacheline_aligned;
  299. };
  300. /* The crypto framework makes it hard to avoid this global. */
  301. static struct device *artpec6_crypto_dev;
  302. #ifdef CONFIG_FAULT_INJECTION
  303. static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
  304. static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
  305. #endif
  306. enum {
  307. ARTPEC6_CRYPTO_PREPARE_HASH_NO_START,
  308. ARTPEC6_CRYPTO_PREPARE_HASH_START,
  309. };
  310. static int artpec6_crypto_prepare_aead(struct aead_request *areq);
  311. static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq);
  312. static int artpec6_crypto_prepare_hash(struct ahash_request *areq);
  313. static void
  314. artpec6_crypto_complete_crypto(struct crypto_async_request *req);
  315. static void
  316. artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req);
  317. static void
  318. artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req);
  319. static void
  320. artpec6_crypto_complete_aead(struct crypto_async_request *req);
  321. static void
  322. artpec6_crypto_complete_hash(struct crypto_async_request *req);
  323. static int
  324. artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common);
  325. static void
  326. artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common);
  327. struct artpec6_crypto_walk {
  328. struct scatterlist *sg;
  329. size_t offset;
  330. };
  331. static void artpec6_crypto_walk_init(struct artpec6_crypto_walk *awalk,
  332. struct scatterlist *sg)
  333. {
  334. awalk->sg = sg;
  335. awalk->offset = 0;
  336. }
  337. static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk *awalk,
  338. size_t nbytes)
  339. {
  340. while (nbytes && awalk->sg) {
  341. size_t piece;
  342. WARN_ON(awalk->offset > awalk->sg->length);
  343. piece = min(nbytes, (size_t)awalk->sg->length - awalk->offset);
  344. nbytes -= piece;
  345. awalk->offset += piece;
  346. if (awalk->offset == awalk->sg->length) {
  347. awalk->sg = sg_next(awalk->sg);
  348. awalk->offset = 0;
  349. }
  350. }
  351. return nbytes;
  352. }
  353. static size_t
  354. artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk *awalk)
  355. {
  356. WARN_ON(awalk->sg->length == awalk->offset);
  357. return awalk->sg->length - awalk->offset;
  358. }
  359. static dma_addr_t
  360. artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk *awalk)
  361. {
  362. return sg_phys(awalk->sg) + awalk->offset;
  363. }
  364. static void
  365. artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common *common)
  366. {
  367. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  368. struct artpec6_crypto_bounce_buffer *b;
  369. struct artpec6_crypto_bounce_buffer *next;
  370. list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
  371. pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n",
  372. b, b->length, b->offset, b->buf);
  373. sg_pcopy_from_buffer(b->sg,
  374. 1,
  375. b->buf,
  376. b->length,
  377. b->offset);
  378. list_del(&b->list);
  379. kfree(b);
  380. }
  381. }
  382. static inline bool artpec6_crypto_busy(void)
  383. {
  384. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  385. int fifo_count = ac->pending_count;
  386. return fifo_count > 6;
  387. }
  388. static int artpec6_crypto_submit(struct artpec6_crypto_req_common *req)
  389. {
  390. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  391. int ret = -EBUSY;
  392. spin_lock_bh(&ac->queue_lock);
  393. if (!artpec6_crypto_busy()) {
  394. list_add_tail(&req->list, &ac->pending);
  395. artpec6_crypto_start_dma(req);
  396. ret = -EINPROGRESS;
  397. } else if (req->req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
  398. list_add_tail(&req->list, &ac->queue);
  399. } else {
  400. artpec6_crypto_common_destroy(req);
  401. }
  402. spin_unlock_bh(&ac->queue_lock);
  403. return ret;
  404. }
  405. static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common)
  406. {
  407. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  408. enum artpec6_crypto_variant variant = ac->variant;
  409. void __iomem *base = ac->base;
  410. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  411. u32 ind, statd, outd;
  412. /* Make descriptor content visible to the DMA before starting it. */
  413. wmb();
  414. ind = FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN, dma->in_cnt - 1) |
  415. FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR, dma->in_dma_addr >> 6);
  416. statd = FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN, dma->in_cnt - 1) |
  417. FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR, dma->stat_dma_addr >> 6);
  418. outd = FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN, dma->out_cnt - 1) |
  419. FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR, dma->out_dma_addr >> 6);
  420. if (variant == ARTPEC6_CRYPTO) {
  421. writel_relaxed(ind, base + A6_PDMA_IN_DESCRQ_PUSH);
  422. writel_relaxed(statd, base + A6_PDMA_IN_STATQ_PUSH);
  423. writel_relaxed(PDMA_IN_CMD_START, base + A6_PDMA_IN_CMD);
  424. } else {
  425. writel_relaxed(ind, base + A7_PDMA_IN_DESCRQ_PUSH);
  426. writel_relaxed(statd, base + A7_PDMA_IN_STATQ_PUSH);
  427. writel_relaxed(PDMA_IN_CMD_START, base + A7_PDMA_IN_CMD);
  428. }
  429. writel_relaxed(outd, base + PDMA_OUT_DESCRQ_PUSH);
  430. writel_relaxed(PDMA_OUT_CMD_START, base + PDMA_OUT_CMD);
  431. ac->pending_count++;
  432. }
  433. static void
  434. artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common *common)
  435. {
  436. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  437. dma->out_cnt = 0;
  438. dma->in_cnt = 0;
  439. dma->map_count = 0;
  440. INIT_LIST_HEAD(&dma->bounce_buffers);
  441. }
  442. static bool fault_inject_dma_descr(void)
  443. {
  444. #ifdef CONFIG_FAULT_INJECTION
  445. return should_fail(&artpec6_crypto_fail_dma_array_full, 1);
  446. #else
  447. return false;
  448. #endif
  449. }
  450. /** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a
  451. * physical address
  452. *
  453. * @addr: The physical address of the data buffer
  454. * @len: The length of the data buffer
  455. * @eop: True if this is the last buffer in the packet
  456. *
  457. * @return 0 on success or -ENOSPC if there are no more descriptors available
  458. */
  459. static int
  460. artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common *common,
  461. dma_addr_t addr, size_t len, bool eop)
  462. {
  463. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  464. struct pdma_descr *d;
  465. if (dma->out_cnt >= PDMA_DESCR_COUNT ||
  466. fault_inject_dma_descr()) {
  467. pr_err("No free OUT DMA descriptors available!\n");
  468. return -ENOSPC;
  469. }
  470. d = &dma->out[dma->out_cnt++];
  471. memset(d, 0, sizeof(*d));
  472. d->ctrl.short_descr = 0;
  473. d->ctrl.eop = eop;
  474. d->data.len = len;
  475. d->data.buf = addr;
  476. return 0;
  477. }
  478. /** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor
  479. *
  480. * @dst: The virtual address of the data
  481. * @len: The length of the data, must be between 1 to 7 bytes
  482. * @eop: True if this is the last buffer in the packet
  483. *
  484. * @return 0 on success
  485. * -ENOSPC if no more descriptors are available
  486. * -EINVAL if the data length exceeds 7 bytes
  487. */
  488. static int
  489. artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common *common,
  490. void *dst, unsigned int len, bool eop)
  491. {
  492. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  493. struct pdma_descr *d;
  494. if (dma->out_cnt >= PDMA_DESCR_COUNT ||
  495. fault_inject_dma_descr()) {
  496. pr_err("No free OUT DMA descriptors available!\n");
  497. return -ENOSPC;
  498. } else if (len > 7 || len < 1) {
  499. return -EINVAL;
  500. }
  501. d = &dma->out[dma->out_cnt++];
  502. memset(d, 0, sizeof(*d));
  503. d->ctrl.short_descr = 1;
  504. d->ctrl.short_len = len;
  505. d->ctrl.eop = eop;
  506. memcpy(d->shrt.data, dst, len);
  507. return 0;
  508. }
  509. static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common *common,
  510. struct page *page, size_t offset,
  511. size_t size,
  512. enum dma_data_direction dir,
  513. dma_addr_t *dma_addr_out)
  514. {
  515. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  516. struct device *dev = artpec6_crypto_dev;
  517. struct artpec6_crypto_dma_map *map;
  518. dma_addr_t dma_addr;
  519. *dma_addr_out = 0;
  520. if (dma->map_count >= ARRAY_SIZE(dma->maps))
  521. return -ENOMEM;
  522. dma_addr = dma_map_page(dev, page, offset, size, dir);
  523. if (dma_mapping_error(dev, dma_addr))
  524. return -ENOMEM;
  525. map = &dma->maps[dma->map_count++];
  526. map->size = size;
  527. map->dma_addr = dma_addr;
  528. map->dir = dir;
  529. *dma_addr_out = dma_addr;
  530. return 0;
  531. }
  532. static int
  533. artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common *common,
  534. void *ptr, size_t size,
  535. enum dma_data_direction dir,
  536. dma_addr_t *dma_addr_out)
  537. {
  538. struct page *page = virt_to_page(ptr);
  539. size_t offset = (uintptr_t)ptr & ~PAGE_MASK;
  540. return artpec6_crypto_dma_map_page(common, page, offset, size, dir,
  541. dma_addr_out);
  542. }
  543. static int
  544. artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common)
  545. {
  546. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  547. int ret;
  548. ret = artpec6_crypto_dma_map_single(common, dma->in,
  549. sizeof(dma->in[0]) * dma->in_cnt,
  550. DMA_TO_DEVICE, &dma->in_dma_addr);
  551. if (ret)
  552. return ret;
  553. ret = artpec6_crypto_dma_map_single(common, dma->out,
  554. sizeof(dma->out[0]) * dma->out_cnt,
  555. DMA_TO_DEVICE, &dma->out_dma_addr);
  556. if (ret)
  557. return ret;
  558. /* We only read one stat descriptor */
  559. dma->stat[dma->in_cnt - 1] = 0;
  560. /*
  561. * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor
  562. * to be written.
  563. */
  564. return artpec6_crypto_dma_map_single(common,
  565. dma->stat + dma->in_cnt - 1,
  566. sizeof(dma->stat[0]),
  567. DMA_BIDIRECTIONAL,
  568. &dma->stat_dma_addr);
  569. }
  570. static void
  571. artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common *common)
  572. {
  573. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  574. struct device *dev = artpec6_crypto_dev;
  575. int i;
  576. for (i = 0; i < dma->map_count; i++) {
  577. struct artpec6_crypto_dma_map *map = &dma->maps[i];
  578. dma_unmap_page(dev, map->dma_addr, map->size, map->dir);
  579. }
  580. dma->map_count = 0;
  581. }
  582. /** artpec6_crypto_setup_out_descr - Setup an out descriptor
  583. *
  584. * @dst: The virtual address of the data
  585. * @len: The length of the data
  586. * @eop: True if this is the last buffer in the packet
  587. * @use_short: If this is true and the data length is 7 bytes or less then
  588. * a short descriptor will be used
  589. *
  590. * @return 0 on success
  591. * Any errors from artpec6_crypto_setup_out_descr_short() or
  592. * setup_out_descr_phys()
  593. */
  594. static int
  595. artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common *common,
  596. void *dst, unsigned int len, bool eop,
  597. bool use_short)
  598. {
  599. if (use_short && len < 7) {
  600. return artpec6_crypto_setup_out_descr_short(common, dst, len,
  601. eop);
  602. } else {
  603. int ret;
  604. dma_addr_t dma_addr;
  605. ret = artpec6_crypto_dma_map_single(common, dst, len,
  606. DMA_TO_DEVICE,
  607. &dma_addr);
  608. if (ret)
  609. return ret;
  610. return artpec6_crypto_setup_out_descr_phys(common, dma_addr,
  611. len, eop);
  612. }
  613. }
  614. /** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a
  615. * physical address
  616. *
  617. * @addr: The physical address of the data buffer
  618. * @len: The length of the data buffer
  619. * @intr: True if an interrupt should be fired after HW processing of this
  620. * descriptor
  621. *
  622. */
  623. static int
  624. artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common *common,
  625. dma_addr_t addr, unsigned int len, bool intr)
  626. {
  627. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  628. struct pdma_descr *d;
  629. if (dma->in_cnt >= PDMA_DESCR_COUNT ||
  630. fault_inject_dma_descr()) {
  631. pr_err("No free IN DMA descriptors available!\n");
  632. return -ENOSPC;
  633. }
  634. d = &dma->in[dma->in_cnt++];
  635. memset(d, 0, sizeof(*d));
  636. d->ctrl.intr = intr;
  637. d->data.len = len;
  638. d->data.buf = addr;
  639. return 0;
  640. }
  641. /** artpec6_crypto_setup_in_descr - Setup an in channel descriptor
  642. *
  643. * @buffer: The virtual address to of the data buffer
  644. * @len: The length of the data buffer
  645. * @last: If this is the last data buffer in the request (i.e. an interrupt
  646. * is needed
  647. *
  648. * Short descriptors are not used for the in channel
  649. */
  650. static int
  651. artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common *common,
  652. void *buffer, unsigned int len, bool last)
  653. {
  654. dma_addr_t dma_addr;
  655. int ret;
  656. ret = artpec6_crypto_dma_map_single(common, buffer, len,
  657. DMA_FROM_DEVICE, &dma_addr);
  658. if (ret)
  659. return ret;
  660. return artpec6_crypto_setup_in_descr_phys(common, dma_addr, len, last);
  661. }
  662. static struct artpec6_crypto_bounce_buffer *
  663. artpec6_crypto_alloc_bounce(gfp_t flags)
  664. {
  665. void *base;
  666. size_t alloc_size = sizeof(struct artpec6_crypto_bounce_buffer) +
  667. 2 * ARTPEC_CACHE_LINE_MAX;
  668. struct artpec6_crypto_bounce_buffer *bbuf = kzalloc(alloc_size, flags);
  669. if (!bbuf)
  670. return NULL;
  671. base = bbuf + 1;
  672. bbuf->buf = PTR_ALIGN(base, ARTPEC_CACHE_LINE_MAX);
  673. return bbuf;
  674. }
  675. static int setup_bounce_buffer_in(struct artpec6_crypto_req_common *common,
  676. struct artpec6_crypto_walk *walk, size_t size)
  677. {
  678. struct artpec6_crypto_bounce_buffer *bbuf;
  679. int ret;
  680. bbuf = artpec6_crypto_alloc_bounce(common->gfp_flags);
  681. if (!bbuf)
  682. return -ENOMEM;
  683. bbuf->length = size;
  684. bbuf->sg = walk->sg;
  685. bbuf->offset = walk->offset;
  686. ret = artpec6_crypto_setup_in_descr(common, bbuf->buf, size, false);
  687. if (ret) {
  688. kfree(bbuf);
  689. return ret;
  690. }
  691. pr_debug("BOUNCE %zu offset %zu\n", size, walk->offset);
  692. list_add_tail(&bbuf->list, &common->dma->bounce_buffers);
  693. return 0;
  694. }
  695. static int
  696. artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common *common,
  697. struct artpec6_crypto_walk *walk,
  698. size_t count)
  699. {
  700. size_t chunk;
  701. int ret;
  702. dma_addr_t addr;
  703. while (walk->sg && count) {
  704. chunk = min(count, artpec6_crypto_walk_chunklen(walk));
  705. addr = artpec6_crypto_walk_chunk_phys(walk);
  706. /* When destination buffers are not aligned to the cache line
  707. * size we need bounce buffers. The DMA-API requires that the
  708. * entire line is owned by the DMA buffer and this holds also
  709. * for the case when coherent DMA is used.
  710. */
  711. if (!IS_ALIGNED(addr, ARTPEC_CACHE_LINE_MAX)) {
  712. chunk = min_t(dma_addr_t, chunk,
  713. ALIGN(addr, ARTPEC_CACHE_LINE_MAX) -
  714. addr);
  715. pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
  716. ret = setup_bounce_buffer_in(common, walk, chunk);
  717. } else if (chunk < ARTPEC_CACHE_LINE_MAX) {
  718. pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
  719. ret = setup_bounce_buffer_in(common, walk, chunk);
  720. } else {
  721. dma_addr_t dma_addr;
  722. chunk = chunk & ~(ARTPEC_CACHE_LINE_MAX-1);
  723. pr_debug("CHUNK %pad:%zu\n", &addr, chunk);
  724. ret = artpec6_crypto_dma_map_page(common,
  725. sg_page(walk->sg),
  726. walk->sg->offset +
  727. walk->offset,
  728. chunk,
  729. DMA_FROM_DEVICE,
  730. &dma_addr);
  731. if (ret)
  732. return ret;
  733. ret = artpec6_crypto_setup_in_descr_phys(common,
  734. dma_addr,
  735. chunk, false);
  736. }
  737. if (ret)
  738. return ret;
  739. count = count - chunk;
  740. artpec6_crypto_walk_advance(walk, chunk);
  741. }
  742. if (count)
  743. pr_err("EOL unexpected %zu bytes left\n", count);
  744. return count ? -EINVAL : 0;
  745. }
  746. static int
  747. artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common *common,
  748. struct artpec6_crypto_walk *walk,
  749. size_t count)
  750. {
  751. size_t chunk;
  752. int ret;
  753. dma_addr_t addr;
  754. while (walk->sg && count) {
  755. chunk = min(count, artpec6_crypto_walk_chunklen(walk));
  756. addr = artpec6_crypto_walk_chunk_phys(walk);
  757. pr_debug("OUT-CHUNK %pad:%zu\n", &addr, chunk);
  758. if (addr & 3) {
  759. char buf[3];
  760. chunk = min_t(size_t, chunk, (4-(addr&3)));
  761. sg_pcopy_to_buffer(walk->sg, 1, buf, chunk,
  762. walk->offset);
  763. ret = artpec6_crypto_setup_out_descr_short(common, buf,
  764. chunk,
  765. false);
  766. } else {
  767. dma_addr_t dma_addr;
  768. ret = artpec6_crypto_dma_map_page(common,
  769. sg_page(walk->sg),
  770. walk->sg->offset +
  771. walk->offset,
  772. chunk,
  773. DMA_TO_DEVICE,
  774. &dma_addr);
  775. if (ret)
  776. return ret;
  777. ret = artpec6_crypto_setup_out_descr_phys(common,
  778. dma_addr,
  779. chunk, false);
  780. }
  781. if (ret)
  782. return ret;
  783. count = count - chunk;
  784. artpec6_crypto_walk_advance(walk, chunk);
  785. }
  786. if (count)
  787. pr_err("EOL unexpected %zu bytes left\n", count);
  788. return count ? -EINVAL : 0;
  789. }
  790. /** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor
  791. *
  792. * If the out descriptor list is non-empty, then the eop flag on the
  793. * last used out descriptor will be set.
  794. *
  795. * @return 0 on success
  796. * -EINVAL if the out descriptor is empty or has overflown
  797. */
  798. static int
  799. artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common *common)
  800. {
  801. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  802. struct pdma_descr *d;
  803. if (!dma->out_cnt || dma->out_cnt > PDMA_DESCR_COUNT) {
  804. pr_err("%s: OUT descriptor list is %s\n",
  805. MODULE_NAME, dma->out_cnt ? "empty" : "full");
  806. return -EINVAL;
  807. }
  808. d = &dma->out[dma->out_cnt-1];
  809. d->ctrl.eop = 1;
  810. return 0;
  811. }
  812. /** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last
  813. * in descriptor
  814. *
  815. * See artpec6_crypto_terminate_out_descrs() for return values
  816. */
  817. static int
  818. artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common *common)
  819. {
  820. struct artpec6_crypto_dma_descriptors *dma = common->dma;
  821. struct pdma_descr *d;
  822. if (!dma->in_cnt || dma->in_cnt > PDMA_DESCR_COUNT) {
  823. pr_err("%s: IN descriptor list is %s\n",
  824. MODULE_NAME, dma->in_cnt ? "empty" : "full");
  825. return -EINVAL;
  826. }
  827. d = &dma->in[dma->in_cnt-1];
  828. d->ctrl.intr = 1;
  829. return 0;
  830. }
  831. /** create_hash_pad - Create a Secure Hash conformant pad
  832. *
  833. * @dst: The destination buffer to write the pad. Must be at least 64 bytes
  834. * @dgstlen: The total length of the hash digest in bytes
  835. * @bitcount: The total length of the digest in bits
  836. *
  837. * @return The total number of padding bytes written to @dst
  838. */
  839. static size_t
  840. create_hash_pad(int oper, unsigned char *dst, u64 dgstlen, u64 bitcount)
  841. {
  842. unsigned int mod, target, diff, pad_bytes, size_bytes;
  843. __be64 bits = __cpu_to_be64(bitcount);
  844. switch (oper) {
  845. case regk_crypto_sha1:
  846. case regk_crypto_sha256:
  847. case regk_crypto_hmac_sha1:
  848. case regk_crypto_hmac_sha256:
  849. target = 448 / 8;
  850. mod = 512 / 8;
  851. size_bytes = 8;
  852. break;
  853. default:
  854. target = 896 / 8;
  855. mod = 1024 / 8;
  856. size_bytes = 16;
  857. break;
  858. }
  859. target -= 1;
  860. diff = dgstlen & (mod - 1);
  861. pad_bytes = diff > target ? target + mod - diff : target - diff;
  862. memset(dst + 1, 0, pad_bytes);
  863. dst[0] = 0x80;
  864. if (size_bytes == 16) {
  865. memset(dst + 1 + pad_bytes, 0, 8);
  866. memcpy(dst + 1 + pad_bytes + 8, &bits, 8);
  867. } else {
  868. memcpy(dst + 1 + pad_bytes, &bits, 8);
  869. }
  870. return pad_bytes + size_bytes + 1;
  871. }
  872. static int artpec6_crypto_common_init(struct artpec6_crypto_req_common *common,
  873. struct crypto_async_request *parent,
  874. void (*complete)(struct crypto_async_request *req),
  875. struct scatterlist *dstsg, unsigned int nbytes)
  876. {
  877. gfp_t flags;
  878. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  879. flags = (parent->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
  880. GFP_KERNEL : GFP_ATOMIC;
  881. common->gfp_flags = flags;
  882. common->dma = kmem_cache_alloc(ac->dma_cache, flags);
  883. if (!common->dma)
  884. return -ENOMEM;
  885. common->req = parent;
  886. common->complete = complete;
  887. return 0;
  888. }
  889. static void
  890. artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors *dma)
  891. {
  892. struct artpec6_crypto_bounce_buffer *b;
  893. struct artpec6_crypto_bounce_buffer *next;
  894. list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
  895. kfree(b);
  896. }
  897. }
  898. static int
  899. artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common)
  900. {
  901. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  902. artpec6_crypto_dma_unmap_all(common);
  903. artpec6_crypto_bounce_destroy(common->dma);
  904. kmem_cache_free(ac->dma_cache, common->dma);
  905. common->dma = NULL;
  906. return 0;
  907. }
  908. /*
  909. * Ciphering functions.
  910. */
  911. static int artpec6_crypto_encrypt(struct skcipher_request *req)
  912. {
  913. struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
  914. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
  915. struct artpec6_crypto_request_context *req_ctx = NULL;
  916. void (*complete)(struct crypto_async_request *req);
  917. int ret;
  918. req_ctx = skcipher_request_ctx(req);
  919. switch (ctx->crypto_type) {
  920. case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
  921. case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
  922. case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
  923. req_ctx->decrypt = 0;
  924. break;
  925. default:
  926. break;
  927. }
  928. switch (ctx->crypto_type) {
  929. case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
  930. complete = artpec6_crypto_complete_cbc_encrypt;
  931. break;
  932. default:
  933. complete = artpec6_crypto_complete_crypto;
  934. break;
  935. }
  936. ret = artpec6_crypto_common_init(&req_ctx->common,
  937. &req->base,
  938. complete,
  939. req->dst, req->cryptlen);
  940. if (ret)
  941. return ret;
  942. ret = artpec6_crypto_prepare_crypto(req);
  943. if (ret) {
  944. artpec6_crypto_common_destroy(&req_ctx->common);
  945. return ret;
  946. }
  947. return artpec6_crypto_submit(&req_ctx->common);
  948. }
  949. static int artpec6_crypto_decrypt(struct skcipher_request *req)
  950. {
  951. int ret;
  952. struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
  953. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
  954. struct artpec6_crypto_request_context *req_ctx = NULL;
  955. void (*complete)(struct crypto_async_request *req);
  956. req_ctx = skcipher_request_ctx(req);
  957. switch (ctx->crypto_type) {
  958. case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
  959. case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
  960. case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
  961. req_ctx->decrypt = 1;
  962. break;
  963. default:
  964. break;
  965. }
  966. switch (ctx->crypto_type) {
  967. case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
  968. complete = artpec6_crypto_complete_cbc_decrypt;
  969. break;
  970. default:
  971. complete = artpec6_crypto_complete_crypto;
  972. break;
  973. }
  974. ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
  975. complete,
  976. req->dst, req->cryptlen);
  977. if (ret)
  978. return ret;
  979. ret = artpec6_crypto_prepare_crypto(req);
  980. if (ret) {
  981. artpec6_crypto_common_destroy(&req_ctx->common);
  982. return ret;
  983. }
  984. return artpec6_crypto_submit(&req_ctx->common);
  985. }
  986. static int
  987. artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
  988. {
  989. struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
  990. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
  991. size_t iv_len = crypto_skcipher_ivsize(cipher);
  992. unsigned int counter = be32_to_cpup((__be32 *)
  993. (req->iv + iv_len - 4));
  994. unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
  995. AES_BLOCK_SIZE;
  996. /*
  997. * The hardware uses only the last 32-bits as the counter while the
  998. * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
  999. * the whole IV is a counter. So fallback if the counter is going to
  1000. * overlow.
  1001. */
  1002. if (counter + nblks < counter) {
  1003. int ret;
  1004. pr_debug("counter %x will overflow (nblks %u), falling back\n",
  1005. counter, counter + nblks);
  1006. ret = crypto_skcipher_setkey(ctx->fallback, ctx->aes_key,
  1007. ctx->key_length);
  1008. if (ret)
  1009. return ret;
  1010. {
  1011. SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
  1012. skcipher_request_set_tfm(subreq, ctx->fallback);
  1013. skcipher_request_set_callback(subreq, req->base.flags,
  1014. NULL, NULL);
  1015. skcipher_request_set_crypt(subreq, req->src, req->dst,
  1016. req->cryptlen, req->iv);
  1017. ret = encrypt ? crypto_skcipher_encrypt(subreq)
  1018. : crypto_skcipher_decrypt(subreq);
  1019. skcipher_request_zero(subreq);
  1020. }
  1021. return ret;
  1022. }
  1023. return encrypt ? artpec6_crypto_encrypt(req)
  1024. : artpec6_crypto_decrypt(req);
  1025. }
  1026. static int artpec6_crypto_ctr_encrypt(struct skcipher_request *req)
  1027. {
  1028. return artpec6_crypto_ctr_crypt(req, true);
  1029. }
  1030. static int artpec6_crypto_ctr_decrypt(struct skcipher_request *req)
  1031. {
  1032. return artpec6_crypto_ctr_crypt(req, false);
  1033. }
  1034. /*
  1035. * AEAD functions
  1036. */
  1037. static int artpec6_crypto_aead_init(struct crypto_aead *tfm)
  1038. {
  1039. struct artpec6_cryptotfm_context *tfm_ctx = crypto_aead_ctx(tfm);
  1040. memset(tfm_ctx, 0, sizeof(*tfm_ctx));
  1041. crypto_aead_set_reqsize(tfm,
  1042. sizeof(struct artpec6_crypto_aead_req_ctx));
  1043. return 0;
  1044. }
  1045. static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
  1046. unsigned int len)
  1047. {
  1048. struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base);
  1049. if (len != 16 && len != 24 && len != 32) {
  1050. crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  1051. return -EINVAL;
  1052. }
  1053. ctx->key_length = len;
  1054. memcpy(ctx->aes_key, key, len);
  1055. return 0;
  1056. }
  1057. static int artpec6_crypto_aead_encrypt(struct aead_request *req)
  1058. {
  1059. int ret;
  1060. struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
  1061. req_ctx->decrypt = false;
  1062. ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
  1063. artpec6_crypto_complete_aead,
  1064. NULL, 0);
  1065. if (ret)
  1066. return ret;
  1067. ret = artpec6_crypto_prepare_aead(req);
  1068. if (ret) {
  1069. artpec6_crypto_common_destroy(&req_ctx->common);
  1070. return ret;
  1071. }
  1072. return artpec6_crypto_submit(&req_ctx->common);
  1073. }
  1074. static int artpec6_crypto_aead_decrypt(struct aead_request *req)
  1075. {
  1076. int ret;
  1077. struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
  1078. req_ctx->decrypt = true;
  1079. if (req->cryptlen < AES_BLOCK_SIZE)
  1080. return -EINVAL;
  1081. ret = artpec6_crypto_common_init(&req_ctx->common,
  1082. &req->base,
  1083. artpec6_crypto_complete_aead,
  1084. NULL, 0);
  1085. if (ret)
  1086. return ret;
  1087. ret = artpec6_crypto_prepare_aead(req);
  1088. if (ret) {
  1089. artpec6_crypto_common_destroy(&req_ctx->common);
  1090. return ret;
  1091. }
  1092. return artpec6_crypto_submit(&req_ctx->common);
  1093. }
  1094. static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
  1095. {
  1096. struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm);
  1097. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq);
  1098. size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
  1099. size_t contextsize = digestsize == SHA384_DIGEST_SIZE ?
  1100. SHA512_DIGEST_SIZE : digestsize;
  1101. size_t blocksize = crypto_tfm_alg_blocksize(
  1102. crypto_ahash_tfm(crypto_ahash_reqtfm(areq)));
  1103. struct artpec6_crypto_req_common *common = &req_ctx->common;
  1104. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  1105. enum artpec6_crypto_variant variant = ac->variant;
  1106. u32 sel_ctx;
  1107. bool ext_ctx = false;
  1108. bool run_hw = false;
  1109. int error = 0;
  1110. artpec6_crypto_init_dma_operation(common);
  1111. /* Upload HMAC key, must be first the first packet */
  1112. if (req_ctx->hash_flags & HASH_FLAG_HMAC) {
  1113. if (variant == ARTPEC6_CRYPTO) {
  1114. req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
  1115. a6_regk_crypto_dlkey);
  1116. } else {
  1117. req_ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
  1118. a7_regk_crypto_dlkey);
  1119. }
  1120. /* Copy and pad up the key */
  1121. memcpy(req_ctx->key_buffer, ctx->hmac_key,
  1122. ctx->hmac_key_length);
  1123. memset(req_ctx->key_buffer + ctx->hmac_key_length, 0,
  1124. blocksize - ctx->hmac_key_length);
  1125. error = artpec6_crypto_setup_out_descr(common,
  1126. (void *)&req_ctx->key_md,
  1127. sizeof(req_ctx->key_md), false, false);
  1128. if (error)
  1129. return error;
  1130. error = artpec6_crypto_setup_out_descr(common,
  1131. req_ctx->key_buffer, blocksize,
  1132. true, false);
  1133. if (error)
  1134. return error;
  1135. }
  1136. if (!(req_ctx->hash_flags & HASH_FLAG_INIT_CTX)) {
  1137. /* Restore context */
  1138. sel_ctx = regk_crypto_ext;
  1139. ext_ctx = true;
  1140. } else {
  1141. sel_ctx = regk_crypto_init;
  1142. }
  1143. if (variant == ARTPEC6_CRYPTO) {
  1144. req_ctx->hash_md &= ~A6_CRY_MD_HASH_SEL_CTX;
  1145. req_ctx->hash_md |= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX, sel_ctx);
  1146. /* If this is the final round, set the final flag */
  1147. if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
  1148. req_ctx->hash_md |= A6_CRY_MD_HASH_HMAC_FIN;
  1149. } else {
  1150. req_ctx->hash_md &= ~A7_CRY_MD_HASH_SEL_CTX;
  1151. req_ctx->hash_md |= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX, sel_ctx);
  1152. /* If this is the final round, set the final flag */
  1153. if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
  1154. req_ctx->hash_md |= A7_CRY_MD_HASH_HMAC_FIN;
  1155. }
  1156. /* Setup up metadata descriptors */
  1157. error = artpec6_crypto_setup_out_descr(common,
  1158. (void *)&req_ctx->hash_md,
  1159. sizeof(req_ctx->hash_md), false, false);
  1160. if (error)
  1161. return error;
  1162. error = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
  1163. if (error)
  1164. return error;
  1165. if (ext_ctx) {
  1166. error = artpec6_crypto_setup_out_descr(common,
  1167. req_ctx->digeststate,
  1168. contextsize, false, false);
  1169. if (error)
  1170. return error;
  1171. }
  1172. if (req_ctx->hash_flags & HASH_FLAG_UPDATE) {
  1173. size_t done_bytes = 0;
  1174. size_t total_bytes = areq->nbytes + req_ctx->partial_bytes;
  1175. size_t ready_bytes = round_down(total_bytes, blocksize);
  1176. struct artpec6_crypto_walk walk;
  1177. run_hw = ready_bytes > 0;
  1178. if (req_ctx->partial_bytes && ready_bytes) {
  1179. /* We have a partial buffer and will at least some bytes
  1180. * to the HW. Empty this partial buffer before tackling
  1181. * the SG lists
  1182. */
  1183. memcpy(req_ctx->partial_buffer_out,
  1184. req_ctx->partial_buffer,
  1185. req_ctx->partial_bytes);
  1186. error = artpec6_crypto_setup_out_descr(common,
  1187. req_ctx->partial_buffer_out,
  1188. req_ctx->partial_bytes,
  1189. false, true);
  1190. if (error)
  1191. return error;
  1192. /* Reset partial buffer */
  1193. done_bytes += req_ctx->partial_bytes;
  1194. req_ctx->partial_bytes = 0;
  1195. }
  1196. artpec6_crypto_walk_init(&walk, areq->src);
  1197. error = artpec6_crypto_setup_sg_descrs_out(common, &walk,
  1198. ready_bytes -
  1199. done_bytes);
  1200. if (error)
  1201. return error;
  1202. if (walk.sg) {
  1203. size_t sg_skip = ready_bytes - done_bytes;
  1204. size_t sg_rem = areq->nbytes - sg_skip;
  1205. sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
  1206. req_ctx->partial_buffer +
  1207. req_ctx->partial_bytes,
  1208. sg_rem, sg_skip);
  1209. req_ctx->partial_bytes += sg_rem;
  1210. }
  1211. req_ctx->digcnt += ready_bytes;
  1212. req_ctx->hash_flags &= ~(HASH_FLAG_UPDATE);
  1213. }
  1214. /* Finalize */
  1215. if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) {
  1216. bool needtrim = contextsize != digestsize;
  1217. size_t hash_pad_len;
  1218. u64 digest_bits;
  1219. u32 oper;
  1220. if (variant == ARTPEC6_CRYPTO)
  1221. oper = FIELD_GET(A6_CRY_MD_OPER, req_ctx->hash_md);
  1222. else
  1223. oper = FIELD_GET(A7_CRY_MD_OPER, req_ctx->hash_md);
  1224. /* Write out the partial buffer if present */
  1225. if (req_ctx->partial_bytes) {
  1226. memcpy(req_ctx->partial_buffer_out,
  1227. req_ctx->partial_buffer,
  1228. req_ctx->partial_bytes);
  1229. error = artpec6_crypto_setup_out_descr(common,
  1230. req_ctx->partial_buffer_out,
  1231. req_ctx->partial_bytes,
  1232. false, true);
  1233. if (error)
  1234. return error;
  1235. req_ctx->digcnt += req_ctx->partial_bytes;
  1236. req_ctx->partial_bytes = 0;
  1237. }
  1238. if (req_ctx->hash_flags & HASH_FLAG_HMAC)
  1239. digest_bits = 8 * (req_ctx->digcnt + blocksize);
  1240. else
  1241. digest_bits = 8 * req_ctx->digcnt;
  1242. /* Add the hash pad */
  1243. hash_pad_len = create_hash_pad(oper, req_ctx->pad_buffer,
  1244. req_ctx->digcnt, digest_bits);
  1245. error = artpec6_crypto_setup_out_descr(common,
  1246. req_ctx->pad_buffer,
  1247. hash_pad_len, false,
  1248. true);
  1249. req_ctx->digcnt = 0;
  1250. if (error)
  1251. return error;
  1252. /* Descriptor for the final result */
  1253. error = artpec6_crypto_setup_in_descr(common, areq->result,
  1254. digestsize,
  1255. !needtrim);
  1256. if (error)
  1257. return error;
  1258. if (needtrim) {
  1259. /* Discard the extra context bytes for SHA-384 */
  1260. error = artpec6_crypto_setup_in_descr(common,
  1261. req_ctx->partial_buffer,
  1262. digestsize - contextsize, true);
  1263. if (error)
  1264. return error;
  1265. }
  1266. } else { /* This is not the final operation for this request */
  1267. if (!run_hw)
  1268. return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START;
  1269. /* Save the result to the context */
  1270. error = artpec6_crypto_setup_in_descr(common,
  1271. req_ctx->digeststate,
  1272. contextsize, false);
  1273. if (error)
  1274. return error;
  1275. /* fall through */
  1276. }
  1277. req_ctx->hash_flags &= ~(HASH_FLAG_INIT_CTX | HASH_FLAG_UPDATE |
  1278. HASH_FLAG_FINALIZE);
  1279. error = artpec6_crypto_terminate_in_descrs(common);
  1280. if (error)
  1281. return error;
  1282. error = artpec6_crypto_terminate_out_descrs(common);
  1283. if (error)
  1284. return error;
  1285. error = artpec6_crypto_dma_map_descs(common);
  1286. if (error)
  1287. return error;
  1288. return ARTPEC6_CRYPTO_PREPARE_HASH_START;
  1289. }
  1290. static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm)
  1291. {
  1292. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
  1293. tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
  1294. ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB;
  1295. return 0;
  1296. }
  1297. static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
  1298. {
  1299. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
  1300. ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base),
  1301. 0,
  1302. CRYPTO_ALG_ASYNC |
  1303. CRYPTO_ALG_NEED_FALLBACK);
  1304. if (IS_ERR(ctx->fallback))
  1305. return PTR_ERR(ctx->fallback);
  1306. tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
  1307. ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR;
  1308. return 0;
  1309. }
  1310. static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm)
  1311. {
  1312. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
  1313. tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
  1314. ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC;
  1315. return 0;
  1316. }
  1317. static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm)
  1318. {
  1319. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
  1320. tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
  1321. ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS;
  1322. return 0;
  1323. }
  1324. static void artpec6_crypto_aes_exit(struct crypto_skcipher *tfm)
  1325. {
  1326. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
  1327. memset(ctx, 0, sizeof(*ctx));
  1328. }
  1329. static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm)
  1330. {
  1331. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
  1332. crypto_free_skcipher(ctx->fallback);
  1333. artpec6_crypto_aes_exit(tfm);
  1334. }
  1335. static int
  1336. artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key,
  1337. unsigned int keylen)
  1338. {
  1339. struct artpec6_cryptotfm_context *ctx =
  1340. crypto_skcipher_ctx(cipher);
  1341. switch (keylen) {
  1342. case 16:
  1343. case 24:
  1344. case 32:
  1345. break;
  1346. default:
  1347. crypto_skcipher_set_flags(cipher,
  1348. CRYPTO_TFM_RES_BAD_KEY_LEN);
  1349. return -EINVAL;
  1350. }
  1351. memcpy(ctx->aes_key, key, keylen);
  1352. ctx->key_length = keylen;
  1353. return 0;
  1354. }
  1355. static int
  1356. artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key,
  1357. unsigned int keylen)
  1358. {
  1359. struct artpec6_cryptotfm_context *ctx =
  1360. crypto_skcipher_ctx(cipher);
  1361. int ret;
  1362. ret = xts_check_key(&cipher->base, key, keylen);
  1363. if (ret)
  1364. return ret;
  1365. switch (keylen) {
  1366. case 32:
  1367. case 48:
  1368. case 64:
  1369. break;
  1370. default:
  1371. crypto_skcipher_set_flags(cipher,
  1372. CRYPTO_TFM_RES_BAD_KEY_LEN);
  1373. return -EINVAL;
  1374. }
  1375. memcpy(ctx->aes_key, key, keylen);
  1376. ctx->key_length = keylen;
  1377. return 0;
  1378. }
  1379. /** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request
  1380. *
  1381. * @req: The asynch request to process
  1382. *
  1383. * @return 0 if the dma job was successfully prepared
  1384. * <0 on error
  1385. *
  1386. * This function sets up the PDMA descriptors for a block cipher request.
  1387. *
  1388. * The required padding is added for AES-CTR using a statically defined
  1389. * buffer.
  1390. *
  1391. * The PDMA descriptor list will be as follows:
  1392. *
  1393. * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop>
  1394. * IN: <CIPHER_MD><data_0>...[data_n]<intr>
  1395. *
  1396. */
  1397. static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq)
  1398. {
  1399. int ret;
  1400. struct artpec6_crypto_walk walk;
  1401. struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
  1402. struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
  1403. struct artpec6_crypto_request_context *req_ctx = NULL;
  1404. size_t iv_len = crypto_skcipher_ivsize(cipher);
  1405. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  1406. enum artpec6_crypto_variant variant = ac->variant;
  1407. struct artpec6_crypto_req_common *common;
  1408. bool cipher_decr = false;
  1409. size_t cipher_klen;
  1410. u32 cipher_len = 0; /* Same as regk_crypto_key_128 for NULL crypto */
  1411. u32 oper;
  1412. req_ctx = skcipher_request_ctx(areq);
  1413. common = &req_ctx->common;
  1414. artpec6_crypto_init_dma_operation(common);
  1415. if (variant == ARTPEC6_CRYPTO)
  1416. ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, a6_regk_crypto_dlkey);
  1417. else
  1418. ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, a7_regk_crypto_dlkey);
  1419. ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
  1420. sizeof(ctx->key_md), false, false);
  1421. if (ret)
  1422. return ret;
  1423. ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
  1424. ctx->key_length, true, false);
  1425. if (ret)
  1426. return ret;
  1427. req_ctx->cipher_md = 0;
  1428. if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS)
  1429. cipher_klen = ctx->key_length/2;
  1430. else
  1431. cipher_klen = ctx->key_length;
  1432. /* Metadata */
  1433. switch (cipher_klen) {
  1434. case 16:
  1435. cipher_len = regk_crypto_key_128;
  1436. break;
  1437. case 24:
  1438. cipher_len = regk_crypto_key_192;
  1439. break;
  1440. case 32:
  1441. cipher_len = regk_crypto_key_256;
  1442. break;
  1443. default:
  1444. pr_err("%s: Invalid key length %d!\n",
  1445. MODULE_NAME, ctx->key_length);
  1446. return -EINVAL;
  1447. }
  1448. switch (ctx->crypto_type) {
  1449. case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
  1450. oper = regk_crypto_aes_ecb;
  1451. cipher_decr = req_ctx->decrypt;
  1452. break;
  1453. case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
  1454. oper = regk_crypto_aes_cbc;
  1455. cipher_decr = req_ctx->decrypt;
  1456. break;
  1457. case ARTPEC6_CRYPTO_CIPHER_AES_CTR:
  1458. oper = regk_crypto_aes_ctr;
  1459. cipher_decr = false;
  1460. break;
  1461. case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
  1462. oper = regk_crypto_aes_xts;
  1463. cipher_decr = req_ctx->decrypt;
  1464. if (variant == ARTPEC6_CRYPTO)
  1465. req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DSEQ;
  1466. else
  1467. req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DSEQ;
  1468. break;
  1469. default:
  1470. pr_err("%s: Invalid cipher mode %d!\n",
  1471. MODULE_NAME, ctx->crypto_type);
  1472. return -EINVAL;
  1473. }
  1474. if (variant == ARTPEC6_CRYPTO) {
  1475. req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, oper);
  1476. req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
  1477. cipher_len);
  1478. if (cipher_decr)
  1479. req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
  1480. } else {
  1481. req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, oper);
  1482. req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
  1483. cipher_len);
  1484. if (cipher_decr)
  1485. req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
  1486. }
  1487. ret = artpec6_crypto_setup_out_descr(common,
  1488. &req_ctx->cipher_md,
  1489. sizeof(req_ctx->cipher_md),
  1490. false, false);
  1491. if (ret)
  1492. return ret;
  1493. ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
  1494. if (ret)
  1495. return ret;
  1496. if (iv_len) {
  1497. ret = artpec6_crypto_setup_out_descr(common, areq->iv, iv_len,
  1498. false, false);
  1499. if (ret)
  1500. return ret;
  1501. }
  1502. /* Data out */
  1503. artpec6_crypto_walk_init(&walk, areq->src);
  1504. ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, areq->cryptlen);
  1505. if (ret)
  1506. return ret;
  1507. /* Data in */
  1508. artpec6_crypto_walk_init(&walk, areq->dst);
  1509. ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, areq->cryptlen);
  1510. if (ret)
  1511. return ret;
  1512. /* CTR-mode padding required by the HW. */
  1513. if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_CTR ||
  1514. ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) {
  1515. size_t pad = ALIGN(areq->cryptlen, AES_BLOCK_SIZE) -
  1516. areq->cryptlen;
  1517. if (pad) {
  1518. ret = artpec6_crypto_setup_out_descr(common,
  1519. ac->pad_buffer,
  1520. pad, false, false);
  1521. if (ret)
  1522. return ret;
  1523. ret = artpec6_crypto_setup_in_descr(common,
  1524. ac->pad_buffer, pad,
  1525. false);
  1526. if (ret)
  1527. return ret;
  1528. }
  1529. }
  1530. ret = artpec6_crypto_terminate_out_descrs(common);
  1531. if (ret)
  1532. return ret;
  1533. ret = artpec6_crypto_terminate_in_descrs(common);
  1534. if (ret)
  1535. return ret;
  1536. return artpec6_crypto_dma_map_descs(common);
  1537. }
  1538. static int artpec6_crypto_prepare_aead(struct aead_request *areq)
  1539. {
  1540. size_t count;
  1541. int ret;
  1542. size_t input_length;
  1543. struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(areq->base.tfm);
  1544. struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
  1545. struct crypto_aead *cipher = crypto_aead_reqtfm(areq);
  1546. struct artpec6_crypto_req_common *common = &req_ctx->common;
  1547. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  1548. enum artpec6_crypto_variant variant = ac->variant;
  1549. u32 md_cipher_len;
  1550. artpec6_crypto_init_dma_operation(common);
  1551. /* Key */
  1552. if (variant == ARTPEC6_CRYPTO) {
  1553. ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
  1554. a6_regk_crypto_dlkey);
  1555. } else {
  1556. ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
  1557. a7_regk_crypto_dlkey);
  1558. }
  1559. ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
  1560. sizeof(ctx->key_md), false, false);
  1561. if (ret)
  1562. return ret;
  1563. ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
  1564. ctx->key_length, true, false);
  1565. if (ret)
  1566. return ret;
  1567. req_ctx->cipher_md = 0;
  1568. switch (ctx->key_length) {
  1569. case 16:
  1570. md_cipher_len = regk_crypto_key_128;
  1571. break;
  1572. case 24:
  1573. md_cipher_len = regk_crypto_key_192;
  1574. break;
  1575. case 32:
  1576. md_cipher_len = regk_crypto_key_256;
  1577. break;
  1578. default:
  1579. return -EINVAL;
  1580. }
  1581. if (variant == ARTPEC6_CRYPTO) {
  1582. req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER,
  1583. regk_crypto_aes_gcm);
  1584. req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
  1585. md_cipher_len);
  1586. if (req_ctx->decrypt)
  1587. req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
  1588. } else {
  1589. req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER,
  1590. regk_crypto_aes_gcm);
  1591. req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
  1592. md_cipher_len);
  1593. if (req_ctx->decrypt)
  1594. req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
  1595. }
  1596. ret = artpec6_crypto_setup_out_descr(common,
  1597. (void *) &req_ctx->cipher_md,
  1598. sizeof(req_ctx->cipher_md), false,
  1599. false);
  1600. if (ret)
  1601. return ret;
  1602. ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
  1603. if (ret)
  1604. return ret;
  1605. /* For the decryption, cryptlen includes the tag. */
  1606. input_length = areq->cryptlen;
  1607. if (req_ctx->decrypt)
  1608. input_length -= AES_BLOCK_SIZE;
  1609. /* Prepare the context buffer */
  1610. req_ctx->hw_ctx.aad_length_bits =
  1611. __cpu_to_be64(8*areq->assoclen);
  1612. req_ctx->hw_ctx.text_length_bits =
  1613. __cpu_to_be64(8*input_length);
  1614. memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher));
  1615. // The HW omits the initial increment of the counter field.
  1616. memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4);
  1617. ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx,
  1618. sizeof(struct artpec6_crypto_aead_hw_ctx), false, false);
  1619. if (ret)
  1620. return ret;
  1621. {
  1622. struct artpec6_crypto_walk walk;
  1623. artpec6_crypto_walk_init(&walk, areq->src);
  1624. /* Associated data */
  1625. count = areq->assoclen;
  1626. ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
  1627. if (ret)
  1628. return ret;
  1629. if (!IS_ALIGNED(areq->assoclen, 16)) {
  1630. size_t assoc_pad = 16 - (areq->assoclen % 16);
  1631. /* The HW mandates zero padding here */
  1632. ret = artpec6_crypto_setup_out_descr(common,
  1633. ac->zero_buffer,
  1634. assoc_pad, false,
  1635. false);
  1636. if (ret)
  1637. return ret;
  1638. }
  1639. /* Data to crypto */
  1640. count = input_length;
  1641. ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
  1642. if (ret)
  1643. return ret;
  1644. if (!IS_ALIGNED(input_length, 16)) {
  1645. size_t crypto_pad = 16 - (input_length % 16);
  1646. /* The HW mandates zero padding here */
  1647. ret = artpec6_crypto_setup_out_descr(common,
  1648. ac->zero_buffer,
  1649. crypto_pad,
  1650. false,
  1651. false);
  1652. if (ret)
  1653. return ret;
  1654. }
  1655. }
  1656. /* Data from crypto */
  1657. {
  1658. struct artpec6_crypto_walk walk;
  1659. size_t output_len = areq->cryptlen;
  1660. if (req_ctx->decrypt)
  1661. output_len -= AES_BLOCK_SIZE;
  1662. artpec6_crypto_walk_init(&walk, areq->dst);
  1663. /* skip associated data in the output */
  1664. count = artpec6_crypto_walk_advance(&walk, areq->assoclen);
  1665. if (count)
  1666. return -EINVAL;
  1667. count = output_len;
  1668. ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, count);
  1669. if (ret)
  1670. return ret;
  1671. /* Put padding between the cryptotext and the auth tag */
  1672. if (!IS_ALIGNED(output_len, 16)) {
  1673. size_t crypto_pad = 16 - (output_len % 16);
  1674. ret = artpec6_crypto_setup_in_descr(common,
  1675. ac->pad_buffer,
  1676. crypto_pad, false);
  1677. if (ret)
  1678. return ret;
  1679. }
  1680. /* The authentication tag shall follow immediately after
  1681. * the output ciphertext. For decryption it is put in a context
  1682. * buffer for later compare against the input tag.
  1683. */
  1684. count = AES_BLOCK_SIZE;
  1685. if (req_ctx->decrypt) {
  1686. ret = artpec6_crypto_setup_in_descr(common,
  1687. req_ctx->decryption_tag, count, false);
  1688. if (ret)
  1689. return ret;
  1690. } else {
  1691. ret = artpec6_crypto_setup_sg_descrs_in(common, &walk,
  1692. count);
  1693. if (ret)
  1694. return ret;
  1695. }
  1696. }
  1697. ret = artpec6_crypto_terminate_in_descrs(common);
  1698. if (ret)
  1699. return ret;
  1700. ret = artpec6_crypto_terminate_out_descrs(common);
  1701. if (ret)
  1702. return ret;
  1703. return artpec6_crypto_dma_map_descs(common);
  1704. }
  1705. static void artpec6_crypto_process_queue(struct artpec6_crypto *ac,
  1706. struct list_head *completions)
  1707. {
  1708. struct artpec6_crypto_req_common *req;
  1709. while (!list_empty(&ac->queue) && !artpec6_crypto_busy()) {
  1710. req = list_first_entry(&ac->queue,
  1711. struct artpec6_crypto_req_common,
  1712. list);
  1713. list_move_tail(&req->list, &ac->pending);
  1714. artpec6_crypto_start_dma(req);
  1715. list_add_tail(&req->complete_in_progress, completions);
  1716. }
  1717. /*
  1718. * In some cases, the hardware can raise an in_eop_flush interrupt
  1719. * before actually updating the status, so we have an timer which will
  1720. * recheck the status on timeout. Since the cases are expected to be
  1721. * very rare, we use a relatively large timeout value. There should be
  1722. * no noticeable negative effect if we timeout spuriously.
  1723. */
  1724. if (ac->pending_count)
  1725. mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100));
  1726. else
  1727. del_timer(&ac->timer);
  1728. }
  1729. static void artpec6_crypto_timeout(struct timer_list *t)
  1730. {
  1731. struct artpec6_crypto *ac = from_timer(ac, t, timer);
  1732. dev_info_ratelimited(artpec6_crypto_dev, "timeout\n");
  1733. tasklet_schedule(&ac->task);
  1734. }
  1735. static void artpec6_crypto_task(unsigned long data)
  1736. {
  1737. struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
  1738. struct artpec6_crypto_req_common *req;
  1739. struct artpec6_crypto_req_common *n;
  1740. struct list_head complete_done;
  1741. struct list_head complete_in_progress;
  1742. INIT_LIST_HEAD(&complete_done);
  1743. INIT_LIST_HEAD(&complete_in_progress);
  1744. if (list_empty(&ac->pending)) {
  1745. pr_debug("Spurious IRQ\n");
  1746. return;
  1747. }
  1748. spin_lock_bh(&ac->queue_lock);
  1749. list_for_each_entry_safe(req, n, &ac->pending, list) {
  1750. struct artpec6_crypto_dma_descriptors *dma = req->dma;
  1751. u32 stat;
  1752. dma_sync_single_for_cpu(artpec6_crypto_dev, dma->stat_dma_addr,
  1753. sizeof(dma->stat[0]),
  1754. DMA_BIDIRECTIONAL);
  1755. stat = req->dma->stat[req->dma->in_cnt-1];
  1756. /* A non-zero final status descriptor indicates
  1757. * this job has finished.
  1758. */
  1759. pr_debug("Request %p status is %X\n", req, stat);
  1760. if (!stat)
  1761. break;
  1762. /* Allow testing of timeout handling with fault injection */
  1763. #ifdef CONFIG_FAULT_INJECTION
  1764. if (should_fail(&artpec6_crypto_fail_status_read, 1))
  1765. continue;
  1766. #endif
  1767. pr_debug("Completing request %p\n", req);
  1768. list_move_tail(&req->list, &complete_done);
  1769. artpec6_crypto_dma_unmap_all(req);
  1770. artpec6_crypto_copy_bounce_buffers(req);
  1771. ac->pending_count--;
  1772. artpec6_crypto_common_destroy(req);
  1773. }
  1774. artpec6_crypto_process_queue(ac, &complete_in_progress);
  1775. spin_unlock_bh(&ac->queue_lock);
  1776. /* Perform the completion callbacks without holding the queue lock
  1777. * to allow new request submissions from the callbacks.
  1778. */
  1779. list_for_each_entry_safe(req, n, &complete_done, list) {
  1780. req->complete(req->req);
  1781. }
  1782. list_for_each_entry_safe(req, n, &complete_in_progress,
  1783. complete_in_progress) {
  1784. req->req->complete(req->req, -EINPROGRESS);
  1785. }
  1786. }
  1787. static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
  1788. {
  1789. req->complete(req, 0);
  1790. }
  1791. static void
  1792. artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req)
  1793. {
  1794. struct skcipher_request *cipher_req = container_of(req,
  1795. struct skcipher_request, base);
  1796. scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src,
  1797. cipher_req->cryptlen - AES_BLOCK_SIZE,
  1798. AES_BLOCK_SIZE, 0);
  1799. req->complete(req, 0);
  1800. }
  1801. static void
  1802. artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req)
  1803. {
  1804. struct skcipher_request *cipher_req = container_of(req,
  1805. struct skcipher_request, base);
  1806. scatterwalk_map_and_copy(cipher_req->iv, cipher_req->dst,
  1807. cipher_req->cryptlen - AES_BLOCK_SIZE,
  1808. AES_BLOCK_SIZE, 0);
  1809. req->complete(req, 0);
  1810. }
  1811. static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
  1812. {
  1813. int result = 0;
  1814. /* Verify GCM hashtag. */
  1815. struct aead_request *areq = container_of(req,
  1816. struct aead_request, base);
  1817. struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
  1818. if (req_ctx->decrypt) {
  1819. u8 input_tag[AES_BLOCK_SIZE];
  1820. sg_pcopy_to_buffer(areq->src,
  1821. sg_nents(areq->src),
  1822. input_tag,
  1823. AES_BLOCK_SIZE,
  1824. areq->assoclen + areq->cryptlen -
  1825. AES_BLOCK_SIZE);
  1826. if (memcmp(req_ctx->decryption_tag,
  1827. input_tag,
  1828. AES_BLOCK_SIZE)) {
  1829. pr_debug("***EBADMSG:\n");
  1830. print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1,
  1831. input_tag, AES_BLOCK_SIZE, true);
  1832. print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1,
  1833. req_ctx->decryption_tag,
  1834. AES_BLOCK_SIZE, true);
  1835. result = -EBADMSG;
  1836. }
  1837. }
  1838. req->complete(req, result);
  1839. }
  1840. static void artpec6_crypto_complete_hash(struct crypto_async_request *req)
  1841. {
  1842. req->complete(req, 0);
  1843. }
  1844. /*------------------- Hash functions -----------------------------------------*/
  1845. static int
  1846. artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
  1847. const u8 *key, unsigned int keylen)
  1848. {
  1849. struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(&tfm->base);
  1850. size_t blocksize;
  1851. int ret;
  1852. if (!keylen) {
  1853. pr_err("Invalid length (%d) of HMAC key\n",
  1854. keylen);
  1855. return -EINVAL;
  1856. }
  1857. memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
  1858. blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
  1859. if (keylen > blocksize) {
  1860. SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash);
  1861. hdesc->tfm = tfm_ctx->child_hash;
  1862. hdesc->flags = crypto_ahash_get_flags(tfm) &
  1863. CRYPTO_TFM_REQ_MAY_SLEEP;
  1864. tfm_ctx->hmac_key_length = blocksize;
  1865. ret = crypto_shash_digest(hdesc, key, keylen,
  1866. tfm_ctx->hmac_key);
  1867. if (ret)
  1868. return ret;
  1869. } else {
  1870. memcpy(tfm_ctx->hmac_key, key, keylen);
  1871. tfm_ctx->hmac_key_length = keylen;
  1872. }
  1873. return 0;
  1874. }
  1875. static int
  1876. artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac)
  1877. {
  1878. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  1879. enum artpec6_crypto_variant variant = ac->variant;
  1880. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1881. u32 oper;
  1882. memset(req_ctx, 0, sizeof(*req_ctx));
  1883. req_ctx->hash_flags = HASH_FLAG_INIT_CTX;
  1884. if (hmac)
  1885. req_ctx->hash_flags |= (HASH_FLAG_HMAC | HASH_FLAG_UPDATE_KEY);
  1886. switch (type) {
  1887. case ARTPEC6_CRYPTO_HASH_SHA1:
  1888. oper = hmac ? regk_crypto_hmac_sha1 : regk_crypto_sha1;
  1889. break;
  1890. case ARTPEC6_CRYPTO_HASH_SHA256:
  1891. oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256;
  1892. break;
  1893. case ARTPEC6_CRYPTO_HASH_SHA384:
  1894. oper = hmac ? regk_crypto_hmac_sha384 : regk_crypto_sha384;
  1895. break;
  1896. case ARTPEC6_CRYPTO_HASH_SHA512:
  1897. oper = hmac ? regk_crypto_hmac_sha512 : regk_crypto_sha512;
  1898. break;
  1899. default:
  1900. pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type);
  1901. return -EINVAL;
  1902. }
  1903. if (variant == ARTPEC6_CRYPTO)
  1904. req_ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, oper);
  1905. else
  1906. req_ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, oper);
  1907. return 0;
  1908. }
  1909. static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req)
  1910. {
  1911. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1912. int ret;
  1913. if (!req_ctx->common.dma) {
  1914. ret = artpec6_crypto_common_init(&req_ctx->common,
  1915. &req->base,
  1916. artpec6_crypto_complete_hash,
  1917. NULL, 0);
  1918. if (ret)
  1919. return ret;
  1920. }
  1921. ret = artpec6_crypto_prepare_hash(req);
  1922. switch (ret) {
  1923. case ARTPEC6_CRYPTO_PREPARE_HASH_START:
  1924. ret = artpec6_crypto_submit(&req_ctx->common);
  1925. break;
  1926. case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START:
  1927. ret = 0;
  1928. /* Fallthrough */
  1929. default:
  1930. artpec6_crypto_common_destroy(&req_ctx->common);
  1931. break;
  1932. }
  1933. return ret;
  1934. }
  1935. static int artpec6_crypto_hash_final(struct ahash_request *req)
  1936. {
  1937. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1938. req_ctx->hash_flags |= HASH_FLAG_FINALIZE;
  1939. return artpec6_crypto_prepare_submit_hash(req);
  1940. }
  1941. static int artpec6_crypto_hash_update(struct ahash_request *req)
  1942. {
  1943. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1944. req_ctx->hash_flags |= HASH_FLAG_UPDATE;
  1945. return artpec6_crypto_prepare_submit_hash(req);
  1946. }
  1947. static int artpec6_crypto_sha1_init(struct ahash_request *req)
  1948. {
  1949. return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
  1950. }
  1951. static int artpec6_crypto_sha1_digest(struct ahash_request *req)
  1952. {
  1953. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1954. artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
  1955. req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
  1956. return artpec6_crypto_prepare_submit_hash(req);
  1957. }
  1958. static int artpec6_crypto_sha256_init(struct ahash_request *req)
  1959. {
  1960. return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
  1961. }
  1962. static int artpec6_crypto_sha256_digest(struct ahash_request *req)
  1963. {
  1964. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1965. artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
  1966. req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
  1967. return artpec6_crypto_prepare_submit_hash(req);
  1968. }
  1969. static int __maybe_unused artpec6_crypto_sha384_init(struct ahash_request *req)
  1970. {
  1971. return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0);
  1972. }
  1973. static int __maybe_unused
  1974. artpec6_crypto_sha384_digest(struct ahash_request *req)
  1975. {
  1976. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1977. artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 0);
  1978. req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
  1979. return artpec6_crypto_prepare_submit_hash(req);
  1980. }
  1981. static int artpec6_crypto_sha512_init(struct ahash_request *req)
  1982. {
  1983. return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0);
  1984. }
  1985. static int artpec6_crypto_sha512_digest(struct ahash_request *req)
  1986. {
  1987. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  1988. artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 0);
  1989. req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
  1990. return artpec6_crypto_prepare_submit_hash(req);
  1991. }
  1992. static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req)
  1993. {
  1994. return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
  1995. }
  1996. static int __maybe_unused
  1997. artpec6_crypto_hmac_sha384_init(struct ahash_request *req)
  1998. {
  1999. return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1);
  2000. }
  2001. static int artpec6_crypto_hmac_sha512_init(struct ahash_request *req)
  2002. {
  2003. return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1);
  2004. }
  2005. static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req)
  2006. {
  2007. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  2008. artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
  2009. req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
  2010. return artpec6_crypto_prepare_submit_hash(req);
  2011. }
  2012. static int __maybe_unused
  2013. artpec6_crypto_hmac_sha384_digest(struct ahash_request *req)
  2014. {
  2015. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  2016. artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA384, 1);
  2017. req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
  2018. return artpec6_crypto_prepare_submit_hash(req);
  2019. }
  2020. static int artpec6_crypto_hmac_sha512_digest(struct ahash_request *req)
  2021. {
  2022. struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
  2023. artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA512, 1);
  2024. req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
  2025. return artpec6_crypto_prepare_submit_hash(req);
  2026. }
  2027. static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm,
  2028. const char *base_hash_name)
  2029. {
  2030. struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
  2031. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  2032. sizeof(struct artpec6_hash_request_context));
  2033. memset(tfm_ctx, 0, sizeof(*tfm_ctx));
  2034. if (base_hash_name) {
  2035. struct crypto_shash *child;
  2036. child = crypto_alloc_shash(base_hash_name, 0,
  2037. CRYPTO_ALG_NEED_FALLBACK);
  2038. if (IS_ERR(child))
  2039. return PTR_ERR(child);
  2040. tfm_ctx->child_hash = child;
  2041. }
  2042. return 0;
  2043. }
  2044. static int artpec6_crypto_ahash_init(struct crypto_tfm *tfm)
  2045. {
  2046. return artpec6_crypto_ahash_init_common(tfm, NULL);
  2047. }
  2048. static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm)
  2049. {
  2050. return artpec6_crypto_ahash_init_common(tfm, "sha256");
  2051. }
  2052. static int __maybe_unused
  2053. artpec6_crypto_ahash_init_hmac_sha384(struct crypto_tfm *tfm)
  2054. {
  2055. return artpec6_crypto_ahash_init_common(tfm, "sha384");
  2056. }
  2057. static int artpec6_crypto_ahash_init_hmac_sha512(struct crypto_tfm *tfm)
  2058. {
  2059. return artpec6_crypto_ahash_init_common(tfm, "sha512");
  2060. }
  2061. static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm)
  2062. {
  2063. struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
  2064. if (tfm_ctx->child_hash)
  2065. crypto_free_shash(tfm_ctx->child_hash);
  2066. memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
  2067. tfm_ctx->hmac_key_length = 0;
  2068. }
  2069. static int artpec6_crypto_hash_export(struct ahash_request *req, void *out)
  2070. {
  2071. const struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
  2072. struct artpec6_hash_export_state *state = out;
  2073. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  2074. enum artpec6_crypto_variant variant = ac->variant;
  2075. BUILD_BUG_ON(sizeof(state->partial_buffer) !=
  2076. sizeof(ctx->partial_buffer));
  2077. BUILD_BUG_ON(sizeof(state->digeststate) != sizeof(ctx->digeststate));
  2078. state->digcnt = ctx->digcnt;
  2079. state->partial_bytes = ctx->partial_bytes;
  2080. state->hash_flags = ctx->hash_flags;
  2081. if (variant == ARTPEC6_CRYPTO)
  2082. state->oper = FIELD_GET(A6_CRY_MD_OPER, ctx->hash_md);
  2083. else
  2084. state->oper = FIELD_GET(A7_CRY_MD_OPER, ctx->hash_md);
  2085. memcpy(state->partial_buffer, ctx->partial_buffer,
  2086. sizeof(state->partial_buffer));
  2087. memcpy(state->digeststate, ctx->digeststate,
  2088. sizeof(state->digeststate));
  2089. return 0;
  2090. }
  2091. static int artpec6_crypto_hash_import(struct ahash_request *req, const void *in)
  2092. {
  2093. struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
  2094. const struct artpec6_hash_export_state *state = in;
  2095. struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
  2096. enum artpec6_crypto_variant variant = ac->variant;
  2097. memset(ctx, 0, sizeof(*ctx));
  2098. ctx->digcnt = state->digcnt;
  2099. ctx->partial_bytes = state->partial_bytes;
  2100. ctx->hash_flags = state->hash_flags;
  2101. if (variant == ARTPEC6_CRYPTO)
  2102. ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, state->oper);
  2103. else
  2104. ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, state->oper);
  2105. memcpy(ctx->partial_buffer, state->partial_buffer,
  2106. sizeof(state->partial_buffer));
  2107. memcpy(ctx->digeststate, state->digeststate,
  2108. sizeof(state->digeststate));
  2109. return 0;
  2110. }
  2111. static int init_crypto_hw(struct artpec6_crypto *ac)
  2112. {
  2113. enum artpec6_crypto_variant variant = ac->variant;
  2114. void __iomem *base = ac->base;
  2115. u32 out_descr_buf_size;
  2116. u32 out_data_buf_size;
  2117. u32 in_data_buf_size;
  2118. u32 in_descr_buf_size;
  2119. u32 in_stat_buf_size;
  2120. u32 in, out;
  2121. /*
  2122. * The PDMA unit contains 1984 bytes of internal memory for the OUT
  2123. * channels and 1024 bytes for the IN channel. This is an elastic
  2124. * memory used to internally store the descriptors and data. The values
  2125. * ares specified in 64 byte incremements. Trustzone buffers are not
  2126. * used at this stage.
  2127. */
  2128. out_data_buf_size = 16; /* 1024 bytes for data */
  2129. out_descr_buf_size = 15; /* 960 bytes for descriptors */
  2130. in_data_buf_size = 8; /* 512 bytes for data */
  2131. in_descr_buf_size = 4; /* 256 bytes for descriptors */
  2132. in_stat_buf_size = 4; /* 256 bytes for stat descrs */
  2133. BUILD_BUG_ON_MSG((out_data_buf_size
  2134. + out_descr_buf_size) * 64 > 1984,
  2135. "Invalid OUT configuration");
  2136. BUILD_BUG_ON_MSG((in_data_buf_size
  2137. + in_descr_buf_size
  2138. + in_stat_buf_size) * 64 > 1024,
  2139. "Invalid IN configuration");
  2140. in = FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE, in_data_buf_size) |
  2141. FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE, in_descr_buf_size) |
  2142. FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE, in_stat_buf_size);
  2143. out = FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE, out_data_buf_size) |
  2144. FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE, out_descr_buf_size);
  2145. writel_relaxed(out, base + PDMA_OUT_BUF_CFG);
  2146. writel_relaxed(PDMA_OUT_CFG_EN, base + PDMA_OUT_CFG);
  2147. if (variant == ARTPEC6_CRYPTO) {
  2148. writel_relaxed(in, base + A6_PDMA_IN_BUF_CFG);
  2149. writel_relaxed(PDMA_IN_CFG_EN, base + A6_PDMA_IN_CFG);
  2150. writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA |
  2151. A6_PDMA_INTR_MASK_IN_EOP_FLUSH,
  2152. base + A6_PDMA_INTR_MASK);
  2153. } else {
  2154. writel_relaxed(in, base + A7_PDMA_IN_BUF_CFG);
  2155. writel_relaxed(PDMA_IN_CFG_EN, base + A7_PDMA_IN_CFG);
  2156. writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA |
  2157. A7_PDMA_INTR_MASK_IN_EOP_FLUSH,
  2158. base + A7_PDMA_INTR_MASK);
  2159. }
  2160. return 0;
  2161. }
  2162. static void artpec6_crypto_disable_hw(struct artpec6_crypto *ac)
  2163. {
  2164. enum artpec6_crypto_variant variant = ac->variant;
  2165. void __iomem *base = ac->base;
  2166. if (variant == ARTPEC6_CRYPTO) {
  2167. writel_relaxed(A6_PDMA_IN_CMD_STOP, base + A6_PDMA_IN_CMD);
  2168. writel_relaxed(0, base + A6_PDMA_IN_CFG);
  2169. writel_relaxed(A6_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
  2170. } else {
  2171. writel_relaxed(A7_PDMA_IN_CMD_STOP, base + A7_PDMA_IN_CMD);
  2172. writel_relaxed(0, base + A7_PDMA_IN_CFG);
  2173. writel_relaxed(A7_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
  2174. }
  2175. writel_relaxed(0, base + PDMA_OUT_CFG);
  2176. }
  2177. static irqreturn_t artpec6_crypto_irq(int irq, void *dev_id)
  2178. {
  2179. struct artpec6_crypto *ac = dev_id;
  2180. enum artpec6_crypto_variant variant = ac->variant;
  2181. void __iomem *base = ac->base;
  2182. u32 mask_in_data, mask_in_eop_flush;
  2183. u32 in_cmd_flush_stat, in_cmd_reg;
  2184. u32 ack_intr_reg;
  2185. u32 ack = 0;
  2186. u32 intr;
  2187. if (variant == ARTPEC6_CRYPTO) {
  2188. intr = readl_relaxed(base + A6_PDMA_MASKED_INTR);
  2189. mask_in_data = A6_PDMA_INTR_MASK_IN_DATA;
  2190. mask_in_eop_flush = A6_PDMA_INTR_MASK_IN_EOP_FLUSH;
  2191. in_cmd_flush_stat = A6_PDMA_IN_CMD_FLUSH_STAT;
  2192. in_cmd_reg = A6_PDMA_IN_CMD;
  2193. ack_intr_reg = A6_PDMA_ACK_INTR;
  2194. } else {
  2195. intr = readl_relaxed(base + A7_PDMA_MASKED_INTR);
  2196. mask_in_data = A7_PDMA_INTR_MASK_IN_DATA;
  2197. mask_in_eop_flush = A7_PDMA_INTR_MASK_IN_EOP_FLUSH;
  2198. in_cmd_flush_stat = A7_PDMA_IN_CMD_FLUSH_STAT;
  2199. in_cmd_reg = A7_PDMA_IN_CMD;
  2200. ack_intr_reg = A7_PDMA_ACK_INTR;
  2201. }
  2202. /* We get two interrupt notifications from each job.
  2203. * The in_data means all data was sent to memory and then
  2204. * we request a status flush command to write the per-job
  2205. * status to its status vector. This ensures that the
  2206. * tasklet can detect exactly how many submitted jobs
  2207. * that have finished.
  2208. */
  2209. if (intr & mask_in_data)
  2210. ack |= mask_in_data;
  2211. if (intr & mask_in_eop_flush)
  2212. ack |= mask_in_eop_flush;
  2213. else
  2214. writel_relaxed(in_cmd_flush_stat, base + in_cmd_reg);
  2215. writel_relaxed(ack, base + ack_intr_reg);
  2216. if (intr & mask_in_eop_flush)
  2217. tasklet_schedule(&ac->task);
  2218. return IRQ_HANDLED;
  2219. }
  2220. /*------------------- Algorithm definitions ----------------------------------*/
  2221. /* Hashes */
  2222. static struct ahash_alg hash_algos[] = {
  2223. /* SHA-1 */
  2224. {
  2225. .init = artpec6_crypto_sha1_init,
  2226. .update = artpec6_crypto_hash_update,
  2227. .final = artpec6_crypto_hash_final,
  2228. .digest = artpec6_crypto_sha1_digest,
  2229. .import = artpec6_crypto_hash_import,
  2230. .export = artpec6_crypto_hash_export,
  2231. .halg.digestsize = SHA1_DIGEST_SIZE,
  2232. .halg.statesize = sizeof(struct artpec6_hash_export_state),
  2233. .halg.base = {
  2234. .cra_name = "sha1",
  2235. .cra_driver_name = "artpec-sha1",
  2236. .cra_priority = 300,
  2237. .cra_flags = CRYPTO_ALG_ASYNC,
  2238. .cra_blocksize = SHA1_BLOCK_SIZE,
  2239. .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
  2240. .cra_alignmask = 3,
  2241. .cra_module = THIS_MODULE,
  2242. .cra_init = artpec6_crypto_ahash_init,
  2243. .cra_exit = artpec6_crypto_ahash_exit,
  2244. }
  2245. },
  2246. /* SHA-256 */
  2247. {
  2248. .init = artpec6_crypto_sha256_init,
  2249. .update = artpec6_crypto_hash_update,
  2250. .final = artpec6_crypto_hash_final,
  2251. .digest = artpec6_crypto_sha256_digest,
  2252. .import = artpec6_crypto_hash_import,
  2253. .export = artpec6_crypto_hash_export,
  2254. .halg.digestsize = SHA256_DIGEST_SIZE,
  2255. .halg.statesize = sizeof(struct artpec6_hash_export_state),
  2256. .halg.base = {
  2257. .cra_name = "sha256",
  2258. .cra_driver_name = "artpec-sha256",
  2259. .cra_priority = 300,
  2260. .cra_flags = CRYPTO_ALG_ASYNC,
  2261. .cra_blocksize = SHA256_BLOCK_SIZE,
  2262. .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
  2263. .cra_alignmask = 3,
  2264. .cra_module = THIS_MODULE,
  2265. .cra_init = artpec6_crypto_ahash_init,
  2266. .cra_exit = artpec6_crypto_ahash_exit,
  2267. }
  2268. },
  2269. /* HMAC SHA-256 */
  2270. {
  2271. .init = artpec6_crypto_hmac_sha256_init,
  2272. .update = artpec6_crypto_hash_update,
  2273. .final = artpec6_crypto_hash_final,
  2274. .digest = artpec6_crypto_hmac_sha256_digest,
  2275. .import = artpec6_crypto_hash_import,
  2276. .export = artpec6_crypto_hash_export,
  2277. .setkey = artpec6_crypto_hash_set_key,
  2278. .halg.digestsize = SHA256_DIGEST_SIZE,
  2279. .halg.statesize = sizeof(struct artpec6_hash_export_state),
  2280. .halg.base = {
  2281. .cra_name = "hmac(sha256)",
  2282. .cra_driver_name = "artpec-hmac-sha256",
  2283. .cra_priority = 300,
  2284. .cra_flags = CRYPTO_ALG_ASYNC,
  2285. .cra_blocksize = SHA256_BLOCK_SIZE,
  2286. .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
  2287. .cra_alignmask = 3,
  2288. .cra_module = THIS_MODULE,
  2289. .cra_init = artpec6_crypto_ahash_init_hmac_sha256,
  2290. .cra_exit = artpec6_crypto_ahash_exit,
  2291. }
  2292. },
  2293. };
  2294. static struct ahash_alg artpec7_hash_algos[] = {
  2295. /* SHA-384 */
  2296. {
  2297. .init = artpec6_crypto_sha384_init,
  2298. .update = artpec6_crypto_hash_update,
  2299. .final = artpec6_crypto_hash_final,
  2300. .digest = artpec6_crypto_sha384_digest,
  2301. .import = artpec6_crypto_hash_import,
  2302. .export = artpec6_crypto_hash_export,
  2303. .halg.digestsize = SHA384_DIGEST_SIZE,
  2304. .halg.statesize = sizeof(struct artpec6_hash_export_state),
  2305. .halg.base = {
  2306. .cra_name = "sha384",
  2307. .cra_driver_name = "artpec-sha384",
  2308. .cra_priority = 300,
  2309. .cra_flags = CRYPTO_ALG_ASYNC,
  2310. .cra_blocksize = SHA384_BLOCK_SIZE,
  2311. .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
  2312. .cra_alignmask = 3,
  2313. .cra_module = THIS_MODULE,
  2314. .cra_init = artpec6_crypto_ahash_init,
  2315. .cra_exit = artpec6_crypto_ahash_exit,
  2316. }
  2317. },
  2318. /* HMAC SHA-384 */
  2319. {
  2320. .init = artpec6_crypto_hmac_sha384_init,
  2321. .update = artpec6_crypto_hash_update,
  2322. .final = artpec6_crypto_hash_final,
  2323. .digest = artpec6_crypto_hmac_sha384_digest,
  2324. .import = artpec6_crypto_hash_import,
  2325. .export = artpec6_crypto_hash_export,
  2326. .setkey = artpec6_crypto_hash_set_key,
  2327. .halg.digestsize = SHA384_DIGEST_SIZE,
  2328. .halg.statesize = sizeof(struct artpec6_hash_export_state),
  2329. .halg.base = {
  2330. .cra_name = "hmac(sha384)",
  2331. .cra_driver_name = "artpec-hmac-sha384",
  2332. .cra_priority = 300,
  2333. .cra_flags = CRYPTO_ALG_ASYNC,
  2334. .cra_blocksize = SHA384_BLOCK_SIZE,
  2335. .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
  2336. .cra_alignmask = 3,
  2337. .cra_module = THIS_MODULE,
  2338. .cra_init = artpec6_crypto_ahash_init_hmac_sha384,
  2339. .cra_exit = artpec6_crypto_ahash_exit,
  2340. }
  2341. },
  2342. /* SHA-512 */
  2343. {
  2344. .init = artpec6_crypto_sha512_init,
  2345. .update = artpec6_crypto_hash_update,
  2346. .final = artpec6_crypto_hash_final,
  2347. .digest = artpec6_crypto_sha512_digest,
  2348. .import = artpec6_crypto_hash_import,
  2349. .export = artpec6_crypto_hash_export,
  2350. .halg.digestsize = SHA512_DIGEST_SIZE,
  2351. .halg.statesize = sizeof(struct artpec6_hash_export_state),
  2352. .halg.base = {
  2353. .cra_name = "sha512",
  2354. .cra_driver_name = "artpec-sha512",
  2355. .cra_priority = 300,
  2356. .cra_flags = CRYPTO_ALG_ASYNC,
  2357. .cra_blocksize = SHA512_BLOCK_SIZE,
  2358. .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
  2359. .cra_alignmask = 3,
  2360. .cra_module = THIS_MODULE,
  2361. .cra_init = artpec6_crypto_ahash_init,
  2362. .cra_exit = artpec6_crypto_ahash_exit,
  2363. }
  2364. },
  2365. /* HMAC SHA-512 */
  2366. {
  2367. .init = artpec6_crypto_hmac_sha512_init,
  2368. .update = artpec6_crypto_hash_update,
  2369. .final = artpec6_crypto_hash_final,
  2370. .digest = artpec6_crypto_hmac_sha512_digest,
  2371. .import = artpec6_crypto_hash_import,
  2372. .export = artpec6_crypto_hash_export,
  2373. .setkey = artpec6_crypto_hash_set_key,
  2374. .halg.digestsize = SHA512_DIGEST_SIZE,
  2375. .halg.statesize = sizeof(struct artpec6_hash_export_state),
  2376. .halg.base = {
  2377. .cra_name = "hmac(sha512)",
  2378. .cra_driver_name = "artpec-hmac-sha512",
  2379. .cra_priority = 300,
  2380. .cra_flags = CRYPTO_ALG_ASYNC,
  2381. .cra_blocksize = SHA512_BLOCK_SIZE,
  2382. .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
  2383. .cra_alignmask = 3,
  2384. .cra_module = THIS_MODULE,
  2385. .cra_init = artpec6_crypto_ahash_init_hmac_sha512,
  2386. .cra_exit = artpec6_crypto_ahash_exit,
  2387. }
  2388. },
  2389. };
  2390. /* Crypto */
  2391. static struct skcipher_alg crypto_algos[] = {
  2392. /* AES - ECB */
  2393. {
  2394. .base = {
  2395. .cra_name = "ecb(aes)",
  2396. .cra_driver_name = "artpec6-ecb-aes",
  2397. .cra_priority = 300,
  2398. .cra_flags = CRYPTO_ALG_ASYNC,
  2399. .cra_blocksize = AES_BLOCK_SIZE,
  2400. .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
  2401. .cra_alignmask = 3,
  2402. .cra_module = THIS_MODULE,
  2403. },
  2404. .min_keysize = AES_MIN_KEY_SIZE,
  2405. .max_keysize = AES_MAX_KEY_SIZE,
  2406. .setkey = artpec6_crypto_cipher_set_key,
  2407. .encrypt = artpec6_crypto_encrypt,
  2408. .decrypt = artpec6_crypto_decrypt,
  2409. .init = artpec6_crypto_aes_ecb_init,
  2410. .exit = artpec6_crypto_aes_exit,
  2411. },
  2412. /* AES - CTR */
  2413. {
  2414. .base = {
  2415. .cra_name = "ctr(aes)",
  2416. .cra_driver_name = "artpec6-ctr-aes",
  2417. .cra_priority = 300,
  2418. .cra_flags = CRYPTO_ALG_ASYNC |
  2419. CRYPTO_ALG_NEED_FALLBACK,
  2420. .cra_blocksize = 1,
  2421. .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
  2422. .cra_alignmask = 3,
  2423. .cra_module = THIS_MODULE,
  2424. },
  2425. .min_keysize = AES_MIN_KEY_SIZE,
  2426. .max_keysize = AES_MAX_KEY_SIZE,
  2427. .ivsize = AES_BLOCK_SIZE,
  2428. .setkey = artpec6_crypto_cipher_set_key,
  2429. .encrypt = artpec6_crypto_ctr_encrypt,
  2430. .decrypt = artpec6_crypto_ctr_decrypt,
  2431. .init = artpec6_crypto_aes_ctr_init,
  2432. .exit = artpec6_crypto_aes_ctr_exit,
  2433. },
  2434. /* AES - CBC */
  2435. {
  2436. .base = {
  2437. .cra_name = "cbc(aes)",
  2438. .cra_driver_name = "artpec6-cbc-aes",
  2439. .cra_priority = 300,
  2440. .cra_flags = CRYPTO_ALG_ASYNC,
  2441. .cra_blocksize = AES_BLOCK_SIZE,
  2442. .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
  2443. .cra_alignmask = 3,
  2444. .cra_module = THIS_MODULE,
  2445. },
  2446. .min_keysize = AES_MIN_KEY_SIZE,
  2447. .max_keysize = AES_MAX_KEY_SIZE,
  2448. .ivsize = AES_BLOCK_SIZE,
  2449. .setkey = artpec6_crypto_cipher_set_key,
  2450. .encrypt = artpec6_crypto_encrypt,
  2451. .decrypt = artpec6_crypto_decrypt,
  2452. .init = artpec6_crypto_aes_cbc_init,
  2453. .exit = artpec6_crypto_aes_exit
  2454. },
  2455. /* AES - XTS */
  2456. {
  2457. .base = {
  2458. .cra_name = "xts(aes)",
  2459. .cra_driver_name = "artpec6-xts-aes",
  2460. .cra_priority = 300,
  2461. .cra_flags = CRYPTO_ALG_ASYNC,
  2462. .cra_blocksize = 1,
  2463. .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
  2464. .cra_alignmask = 3,
  2465. .cra_module = THIS_MODULE,
  2466. },
  2467. .min_keysize = 2*AES_MIN_KEY_SIZE,
  2468. .max_keysize = 2*AES_MAX_KEY_SIZE,
  2469. .ivsize = 16,
  2470. .setkey = artpec6_crypto_xts_set_key,
  2471. .encrypt = artpec6_crypto_encrypt,
  2472. .decrypt = artpec6_crypto_decrypt,
  2473. .init = artpec6_crypto_aes_xts_init,
  2474. .exit = artpec6_crypto_aes_exit,
  2475. },
  2476. };
  2477. static struct aead_alg aead_algos[] = {
  2478. {
  2479. .init = artpec6_crypto_aead_init,
  2480. .setkey = artpec6_crypto_aead_set_key,
  2481. .encrypt = artpec6_crypto_aead_encrypt,
  2482. .decrypt = artpec6_crypto_aead_decrypt,
  2483. .ivsize = GCM_AES_IV_SIZE,
  2484. .maxauthsize = AES_BLOCK_SIZE,
  2485. .base = {
  2486. .cra_name = "gcm(aes)",
  2487. .cra_driver_name = "artpec-gcm-aes",
  2488. .cra_priority = 300,
  2489. .cra_flags = CRYPTO_ALG_ASYNC |
  2490. CRYPTO_ALG_KERN_DRIVER_ONLY,
  2491. .cra_blocksize = 1,
  2492. .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
  2493. .cra_alignmask = 3,
  2494. .cra_module = THIS_MODULE,
  2495. },
  2496. }
  2497. };
  2498. #ifdef CONFIG_DEBUG_FS
  2499. struct dbgfs_u32 {
  2500. char *name;
  2501. mode_t mode;
  2502. u32 *flag;
  2503. char *desc;
  2504. };
  2505. static struct dentry *dbgfs_root;
  2506. static void artpec6_crypto_init_debugfs(void)
  2507. {
  2508. dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
  2509. if (!dbgfs_root || IS_ERR(dbgfs_root)) {
  2510. dbgfs_root = NULL;
  2511. pr_err("%s: Could not initialise debugfs!\n", MODULE_NAME);
  2512. return;
  2513. }
  2514. #ifdef CONFIG_FAULT_INJECTION
  2515. fault_create_debugfs_attr("fail_status_read", dbgfs_root,
  2516. &artpec6_crypto_fail_status_read);
  2517. fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root,
  2518. &artpec6_crypto_fail_dma_array_full);
  2519. #endif
  2520. }
  2521. static void artpec6_crypto_free_debugfs(void)
  2522. {
  2523. if (!dbgfs_root)
  2524. return;
  2525. debugfs_remove_recursive(dbgfs_root);
  2526. dbgfs_root = NULL;
  2527. }
  2528. #endif
  2529. static const struct of_device_id artpec6_crypto_of_match[] = {
  2530. { .compatible = "axis,artpec6-crypto", .data = (void *)ARTPEC6_CRYPTO },
  2531. { .compatible = "axis,artpec7-crypto", .data = (void *)ARTPEC7_CRYPTO },
  2532. {}
  2533. };
  2534. MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match);
  2535. static int artpec6_crypto_probe(struct platform_device *pdev)
  2536. {
  2537. const struct of_device_id *match;
  2538. enum artpec6_crypto_variant variant;
  2539. struct artpec6_crypto *ac;
  2540. struct device *dev = &pdev->dev;
  2541. void __iomem *base;
  2542. struct resource *res;
  2543. int irq;
  2544. int err;
  2545. if (artpec6_crypto_dev)
  2546. return -ENODEV;
  2547. match = of_match_node(artpec6_crypto_of_match, dev->of_node);
  2548. if (!match)
  2549. return -EINVAL;
  2550. variant = (enum artpec6_crypto_variant)match->data;
  2551. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2552. base = devm_ioremap_resource(&pdev->dev, res);
  2553. if (IS_ERR(base))
  2554. return PTR_ERR(base);
  2555. irq = platform_get_irq(pdev, 0);
  2556. if (irq < 0)
  2557. return -ENODEV;
  2558. ac = devm_kzalloc(&pdev->dev, sizeof(struct artpec6_crypto),
  2559. GFP_KERNEL);
  2560. if (!ac)
  2561. return -ENOMEM;
  2562. platform_set_drvdata(pdev, ac);
  2563. ac->variant = variant;
  2564. spin_lock_init(&ac->queue_lock);
  2565. INIT_LIST_HEAD(&ac->queue);
  2566. INIT_LIST_HEAD(&ac->pending);
  2567. timer_setup(&ac->timer, artpec6_crypto_timeout, 0);
  2568. ac->base = base;
  2569. ac->dma_cache = kmem_cache_create("artpec6_crypto_dma",
  2570. sizeof(struct artpec6_crypto_dma_descriptors),
  2571. 64,
  2572. 0,
  2573. NULL);
  2574. if (!ac->dma_cache)
  2575. return -ENOMEM;
  2576. #ifdef CONFIG_DEBUG_FS
  2577. artpec6_crypto_init_debugfs();
  2578. #endif
  2579. tasklet_init(&ac->task, artpec6_crypto_task,
  2580. (unsigned long)ac);
  2581. ac->pad_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
  2582. GFP_KERNEL);
  2583. if (!ac->pad_buffer)
  2584. return -ENOMEM;
  2585. ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX);
  2586. ac->zero_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
  2587. GFP_KERNEL);
  2588. if (!ac->zero_buffer)
  2589. return -ENOMEM;
  2590. ac->zero_buffer = PTR_ALIGN(ac->zero_buffer, ARTPEC_CACHE_LINE_MAX);
  2591. err = init_crypto_hw(ac);
  2592. if (err)
  2593. goto free_cache;
  2594. err = devm_request_irq(&pdev->dev, irq, artpec6_crypto_irq, 0,
  2595. "artpec6-crypto", ac);
  2596. if (err)
  2597. goto disable_hw;
  2598. artpec6_crypto_dev = &pdev->dev;
  2599. err = crypto_register_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
  2600. if (err) {
  2601. dev_err(dev, "Failed to register ahashes\n");
  2602. goto disable_hw;
  2603. }
  2604. if (variant != ARTPEC6_CRYPTO) {
  2605. err = crypto_register_ahashes(artpec7_hash_algos,
  2606. ARRAY_SIZE(artpec7_hash_algos));
  2607. if (err) {
  2608. dev_err(dev, "Failed to register ahashes\n");
  2609. goto unregister_ahashes;
  2610. }
  2611. }
  2612. err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
  2613. if (err) {
  2614. dev_err(dev, "Failed to register ciphers\n");
  2615. goto unregister_a7_ahashes;
  2616. }
  2617. err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos));
  2618. if (err) {
  2619. dev_err(dev, "Failed to register aeads\n");
  2620. goto unregister_algs;
  2621. }
  2622. return 0;
  2623. unregister_algs:
  2624. crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
  2625. unregister_a7_ahashes:
  2626. if (variant != ARTPEC6_CRYPTO)
  2627. crypto_unregister_ahashes(artpec7_hash_algos,
  2628. ARRAY_SIZE(artpec7_hash_algos));
  2629. unregister_ahashes:
  2630. crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
  2631. disable_hw:
  2632. artpec6_crypto_disable_hw(ac);
  2633. free_cache:
  2634. kmem_cache_destroy(ac->dma_cache);
  2635. return err;
  2636. }
  2637. static int artpec6_crypto_remove(struct platform_device *pdev)
  2638. {
  2639. struct artpec6_crypto *ac = platform_get_drvdata(pdev);
  2640. int irq = platform_get_irq(pdev, 0);
  2641. crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
  2642. if (ac->variant != ARTPEC6_CRYPTO)
  2643. crypto_unregister_ahashes(artpec7_hash_algos,
  2644. ARRAY_SIZE(artpec7_hash_algos));
  2645. crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
  2646. crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos));
  2647. tasklet_disable(&ac->task);
  2648. devm_free_irq(&pdev->dev, irq, ac);
  2649. tasklet_kill(&ac->task);
  2650. del_timer_sync(&ac->timer);
  2651. artpec6_crypto_disable_hw(ac);
  2652. kmem_cache_destroy(ac->dma_cache);
  2653. #ifdef CONFIG_DEBUG_FS
  2654. artpec6_crypto_free_debugfs();
  2655. #endif
  2656. return 0;
  2657. }
  2658. static struct platform_driver artpec6_crypto_driver = {
  2659. .probe = artpec6_crypto_probe,
  2660. .remove = artpec6_crypto_remove,
  2661. .driver = {
  2662. .name = "artpec6-crypto",
  2663. .owner = THIS_MODULE,
  2664. .of_match_table = artpec6_crypto_of_match,
  2665. },
  2666. };
  2667. module_platform_driver(artpec6_crypto_driver);
  2668. MODULE_AUTHOR("Axis Communications AB");
  2669. MODULE_DESCRIPTION("ARTPEC-6 Crypto driver");
  2670. MODULE_LICENSE("GPL");