sa2ul.c 67 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * K3 SA2UL crypto accelerator driver
  4. *
  5. * Copyright (C) 2018-2020 Texas Instruments Incorporated - http://www.ti.com
  6. *
  7. * Authors: Keerthy
  8. * Vitaly Andrianov
  9. * Tero Kristo
  10. */
  11. #include <linux/bitfield.h>
  12. #include <linux/clk.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/dmaengine.h>
  15. #include <linux/dmapool.h>
  16. #include <linux/kernel.h>
  17. #include <linux/module.h>
  18. #include <linux/of.h>
  19. #include <linux/of_platform.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/pm_runtime.h>
  22. #include <crypto/aes.h>
  23. #include <crypto/authenc.h>
  24. #include <crypto/des.h>
  25. #include <crypto/internal/aead.h>
  26. #include <crypto/internal/hash.h>
  27. #include <crypto/internal/skcipher.h>
  28. #include <crypto/scatterwalk.h>
  29. #include <crypto/sha1.h>
  30. #include <crypto/sha2.h>
  31. #include "sa2ul.h"
  32. /* Byte offset for key in encryption security context */
  33. #define SC_ENC_KEY_OFFSET (1 + 27 + 4)
  34. /* Byte offset for Aux-1 in encryption security context */
  35. #define SC_ENC_AUX1_OFFSET (1 + 27 + 4 + 32)
  36. #define SA_CMDL_UPD_ENC 0x0001
  37. #define SA_CMDL_UPD_AUTH 0x0002
  38. #define SA_CMDL_UPD_ENC_IV 0x0004
  39. #define SA_CMDL_UPD_AUTH_IV 0x0008
  40. #define SA_CMDL_UPD_AUX_KEY 0x0010
  41. #define SA_AUTH_SUBKEY_LEN 16
  42. #define SA_CMDL_PAYLOAD_LENGTH_MASK 0xFFFF
  43. #define SA_CMDL_SOP_BYPASS_LEN_MASK 0xFF000000
  44. #define MODE_CONTROL_BYTES 27
  45. #define SA_HASH_PROCESSING 0
  46. #define SA_CRYPTO_PROCESSING 0
  47. #define SA_UPLOAD_HASH_TO_TLR BIT(6)
  48. #define SA_SW0_FLAGS_MASK 0xF0000
  49. #define SA_SW0_CMDL_INFO_MASK 0x1F00000
  50. #define SA_SW0_CMDL_PRESENT BIT(4)
  51. #define SA_SW0_ENG_ID_MASK 0x3E000000
  52. #define SA_SW0_DEST_INFO_PRESENT BIT(30)
  53. #define SA_SW2_EGRESS_LENGTH 0xFF000000
  54. #define SA_BASIC_HASH 0x10
  55. #define SHA256_DIGEST_WORDS 8
  56. /* Make 32-bit word from 4 bytes */
  57. #define SA_MK_U32(b0, b1, b2, b3) (((b0) << 24) | ((b1) << 16) | \
  58. ((b2) << 8) | (b3))
  59. /* size of SCCTL structure in bytes */
  60. #define SA_SCCTL_SZ 16
  61. /* Max Authentication tag size */
  62. #define SA_MAX_AUTH_TAG_SZ 64
  63. enum sa_algo_id {
  64. SA_ALG_CBC_AES = 0,
  65. SA_ALG_EBC_AES,
  66. SA_ALG_CBC_DES3,
  67. SA_ALG_ECB_DES3,
  68. SA_ALG_SHA1,
  69. SA_ALG_SHA256,
  70. SA_ALG_SHA512,
  71. SA_ALG_AUTHENC_SHA1_AES,
  72. SA_ALG_AUTHENC_SHA256_AES,
  73. };
  74. struct sa_match_data {
  75. u8 priv;
  76. u8 priv_id;
  77. u32 supported_algos;
  78. };
  79. static struct device *sa_k3_dev;
  80. /**
  81. * struct sa_cmdl_cfg - Command label configuration descriptor
  82. * @aalg: authentication algorithm ID
  83. * @enc_eng_id: Encryption Engine ID supported by the SA hardware
  84. * @auth_eng_id: Authentication Engine ID
  85. * @iv_size: Initialization Vector size
  86. * @akey: Authentication key
  87. * @akey_len: Authentication key length
  88. * @enc: True, if this is an encode request
  89. */
  90. struct sa_cmdl_cfg {
  91. int aalg;
  92. u8 enc_eng_id;
  93. u8 auth_eng_id;
  94. u8 iv_size;
  95. const u8 *akey;
  96. u16 akey_len;
  97. bool enc;
  98. };
  99. /**
  100. * struct algo_data - Crypto algorithm specific data
  101. * @enc_eng: Encryption engine info structure
  102. * @auth_eng: Authentication engine info structure
  103. * @auth_ctrl: Authentication control word
  104. * @hash_size: Size of digest
  105. * @iv_idx: iv index in psdata
  106. * @iv_out_size: iv out size
  107. * @ealg_id: Encryption Algorithm ID
  108. * @aalg_id: Authentication algorithm ID
  109. * @mci_enc: Mode Control Instruction for Encryption algorithm
  110. * @mci_dec: Mode Control Instruction for Decryption
  111. * @inv_key: Whether the encryption algorithm demands key inversion
  112. * @ctx: Pointer to the algorithm context
  113. * @keyed_mac: Whether the authentication algorithm has key
  114. * @prep_iopad: Function pointer to generate intermediate ipad/opad
  115. */
  116. struct algo_data {
  117. struct sa_eng_info enc_eng;
  118. struct sa_eng_info auth_eng;
  119. u8 auth_ctrl;
  120. u8 hash_size;
  121. u8 iv_idx;
  122. u8 iv_out_size;
  123. u8 ealg_id;
  124. u8 aalg_id;
  125. u8 *mci_enc;
  126. u8 *mci_dec;
  127. bool inv_key;
  128. struct sa_tfm_ctx *ctx;
  129. bool keyed_mac;
  130. void (*prep_iopad)(struct algo_data *algo, const u8 *key,
  131. u16 key_sz, __be32 *ipad, __be32 *opad);
  132. };
  133. /**
  134. * struct sa_alg_tmpl: A generic template encompassing crypto/aead algorithms
  135. * @type: Type of the crypto algorithm.
  136. * @alg: Union of crypto algorithm definitions.
  137. * @registered: Flag indicating if the crypto algorithm is already registered
  138. */
  139. struct sa_alg_tmpl {
  140. u32 type; /* CRYPTO_ALG_TYPE from <linux/crypto.h> */
  141. union {
  142. struct skcipher_alg skcipher;
  143. struct ahash_alg ahash;
  144. struct aead_alg aead;
  145. } alg;
  146. bool registered;
  147. };
  148. /**
  149. * struct sa_mapped_sg: scatterlist information for tx and rx
  150. * @mapped: Set to true if the @sgt is mapped
  151. * @dir: mapping direction used for @sgt
  152. * @split_sg: Set if the sg is split and needs to be freed up
  153. * @static_sg: Static scatterlist entry for overriding data
  154. * @sgt: scatterlist table for DMA API use
  155. */
  156. struct sa_mapped_sg {
  157. bool mapped;
  158. enum dma_data_direction dir;
  159. struct scatterlist static_sg;
  160. struct scatterlist *split_sg;
  161. struct sg_table sgt;
  162. };
  163. /**
  164. * struct sa_rx_data: RX Packet miscellaneous data place holder
  165. * @req: crypto request data pointer
  166. * @ddev: pointer to the DMA device
  167. * @tx_in: dma_async_tx_descriptor pointer for rx channel
  168. * @mapped_sg: Information on tx (0) and rx (1) scatterlist DMA mapping
  169. * @enc: Flag indicating either encryption or decryption
  170. * @enc_iv_size: Initialisation vector size
  171. * @iv_idx: Initialisation vector index
  172. */
  173. struct sa_rx_data {
  174. void *req;
  175. struct device *ddev;
  176. struct dma_async_tx_descriptor *tx_in;
  177. struct sa_mapped_sg mapped_sg[2];
  178. u8 enc;
  179. u8 enc_iv_size;
  180. u8 iv_idx;
  181. };
  182. /**
  183. * struct sa_req: SA request definition
  184. * @dev: device for the request
  185. * @size: total data to the xmitted via DMA
  186. * @enc_offset: offset of cipher data
  187. * @enc_size: data to be passed to cipher engine
  188. * @enc_iv: cipher IV
  189. * @auth_offset: offset of the authentication data
  190. * @auth_size: size of the authentication data
  191. * @auth_iv: authentication IV
  192. * @type: algorithm type for the request
  193. * @cmdl: command label pointer
  194. * @base: pointer to the base request
  195. * @ctx: pointer to the algorithm context data
  196. * @enc: true if this is an encode request
  197. * @src: source data
  198. * @dst: destination data
  199. * @callback: DMA callback for the request
  200. * @mdata_size: metadata size passed to DMA
  201. */
  202. struct sa_req {
  203. struct device *dev;
  204. u16 size;
  205. u8 enc_offset;
  206. u16 enc_size;
  207. u8 *enc_iv;
  208. u8 auth_offset;
  209. u16 auth_size;
  210. u8 *auth_iv;
  211. u32 type;
  212. u32 *cmdl;
  213. struct crypto_async_request *base;
  214. struct sa_tfm_ctx *ctx;
  215. bool enc;
  216. struct scatterlist *src;
  217. struct scatterlist *dst;
  218. dma_async_tx_callback callback;
  219. u16 mdata_size;
  220. };
  221. /*
  222. * Mode Control Instructions for various Key lengths 128, 192, 256
  223. * For CBC (Cipher Block Chaining) mode for encryption
  224. */
  225. static u8 mci_cbc_enc_array[3][MODE_CONTROL_BYTES] = {
  226. { 0x61, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
  227. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  228. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  229. { 0x61, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
  230. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  231. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  232. { 0x61, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
  233. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  234. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  235. };
  236. /*
  237. * Mode Control Instructions for various Key lengths 128, 192, 256
  238. * For CBC (Cipher Block Chaining) mode for decryption
  239. */
  240. static u8 mci_cbc_dec_array[3][MODE_CONTROL_BYTES] = {
  241. { 0x71, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
  242. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  243. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  244. { 0x71, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
  245. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  246. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  247. { 0x71, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
  248. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  249. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  250. };
  251. /*
  252. * Mode Control Instructions for various Key lengths 128, 192, 256
  253. * For CBC (Cipher Block Chaining) mode for encryption
  254. */
  255. static u8 mci_cbc_enc_no_iv_array[3][MODE_CONTROL_BYTES] = {
  256. { 0x21, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
  257. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  258. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  259. { 0x21, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
  260. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  261. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  262. { 0x21, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
  263. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  264. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  265. };
  266. /*
  267. * Mode Control Instructions for various Key lengths 128, 192, 256
  268. * For CBC (Cipher Block Chaining) mode for decryption
  269. */
  270. static u8 mci_cbc_dec_no_iv_array[3][MODE_CONTROL_BYTES] = {
  271. { 0x31, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
  272. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  273. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  274. { 0x31, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
  275. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  276. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  277. { 0x31, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
  278. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  279. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  280. };
  281. /*
  282. * Mode Control Instructions for various Key lengths 128, 192, 256
  283. * For ECB (Electronic Code Book) mode for encryption
  284. */
  285. static u8 mci_ecb_enc_array[3][27] = {
  286. { 0x21, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
  287. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  288. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  289. { 0x21, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
  290. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  291. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  292. { 0x21, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
  293. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  294. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  295. };
  296. /*
  297. * Mode Control Instructions for various Key lengths 128, 192, 256
  298. * For ECB (Electronic Code Book) mode for decryption
  299. */
  300. static u8 mci_ecb_dec_array[3][27] = {
  301. { 0x31, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
  302. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  303. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  304. { 0x31, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
  305. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  306. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  307. { 0x31, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
  308. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  309. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
  310. };
  311. /*
  312. * Mode Control Instructions for DES algorithm
  313. * For CBC (Cipher Block Chaining) mode and ECB mode
  314. * encryption and for decryption respectively
  315. */
  316. static u8 mci_cbc_3des_enc_array[MODE_CONTROL_BYTES] = {
  317. 0x60, 0x00, 0x00, 0x18, 0x88, 0x52, 0xaa, 0x4b, 0x7e, 0x00, 0x00, 0x00,
  318. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  319. 0x00, 0x00, 0x00,
  320. };
  321. static u8 mci_cbc_3des_dec_array[MODE_CONTROL_BYTES] = {
  322. 0x70, 0x00, 0x00, 0x85, 0x0a, 0xca, 0x98, 0xf4, 0x40, 0xc0, 0x00, 0x00,
  323. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  324. 0x00, 0x00, 0x00,
  325. };
  326. static u8 mci_ecb_3des_enc_array[MODE_CONTROL_BYTES] = {
  327. 0x20, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
  328. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  329. 0x00, 0x00, 0x00,
  330. };
  331. static u8 mci_ecb_3des_dec_array[MODE_CONTROL_BYTES] = {
  332. 0x30, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
  333. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  334. 0x00, 0x00, 0x00,
  335. };
  336. /*
  337. * Perform 16 byte or 128 bit swizzling
  338. * The SA2UL Expects the security context to
  339. * be in little Endian and the bus width is 128 bits or 16 bytes
  340. * Hence swap 16 bytes at a time from higher to lower address
  341. */
  342. static void sa_swiz_128(u8 *in, u16 len)
  343. {
  344. u8 data[16];
  345. int i, j;
  346. for (i = 0; i < len; i += 16) {
  347. memcpy(data, &in[i], 16);
  348. for (j = 0; j < 16; j++)
  349. in[i + j] = data[15 - j];
  350. }
  351. }
  352. /* Prepare the ipad and opad from key as per SHA algorithm step 1*/
  353. static void prepare_kipad(u8 *k_ipad, const u8 *key, u16 key_sz)
  354. {
  355. int i;
  356. for (i = 0; i < key_sz; i++)
  357. k_ipad[i] = key[i] ^ 0x36;
  358. /* Instead of XOR with 0 */
  359. for (; i < SHA1_BLOCK_SIZE; i++)
  360. k_ipad[i] = 0x36;
  361. }
  362. static void prepare_kopad(u8 *k_opad, const u8 *key, u16 key_sz)
  363. {
  364. int i;
  365. for (i = 0; i < key_sz; i++)
  366. k_opad[i] = key[i] ^ 0x5c;
  367. /* Instead of XOR with 0 */
  368. for (; i < SHA1_BLOCK_SIZE; i++)
  369. k_opad[i] = 0x5c;
  370. }
  371. static void sa_export_shash(void *state, struct shash_desc *hash,
  372. int digest_size, __be32 *out)
  373. {
  374. struct sha1_state *sha1;
  375. struct sha256_state *sha256;
  376. u32 *result;
  377. switch (digest_size) {
  378. case SHA1_DIGEST_SIZE:
  379. sha1 = state;
  380. result = sha1->state;
  381. break;
  382. case SHA256_DIGEST_SIZE:
  383. sha256 = state;
  384. result = sha256->state;
  385. break;
  386. default:
  387. dev_err(sa_k3_dev, "%s: bad digest_size=%d\n", __func__,
  388. digest_size);
  389. return;
  390. }
  391. crypto_shash_export(hash, state);
  392. cpu_to_be32_array(out, result, digest_size / 4);
  393. }
  394. static void sa_prepare_iopads(struct algo_data *data, const u8 *key,
  395. u16 key_sz, __be32 *ipad, __be32 *opad)
  396. {
  397. SHASH_DESC_ON_STACK(shash, data->ctx->shash);
  398. int block_size = crypto_shash_blocksize(data->ctx->shash);
  399. int digest_size = crypto_shash_digestsize(data->ctx->shash);
  400. union {
  401. struct sha1_state sha1;
  402. struct sha256_state sha256;
  403. u8 k_pad[SHA1_BLOCK_SIZE];
  404. } sha;
  405. shash->tfm = data->ctx->shash;
  406. prepare_kipad(sha.k_pad, key, key_sz);
  407. crypto_shash_init(shash);
  408. crypto_shash_update(shash, sha.k_pad, block_size);
  409. sa_export_shash(&sha, shash, digest_size, ipad);
  410. prepare_kopad(sha.k_pad, key, key_sz);
  411. crypto_shash_init(shash);
  412. crypto_shash_update(shash, sha.k_pad, block_size);
  413. sa_export_shash(&sha, shash, digest_size, opad);
  414. memzero_explicit(&sha, sizeof(sha));
  415. }
  416. /* Derive the inverse key used in AES-CBC decryption operation */
  417. static inline int sa_aes_inv_key(u8 *inv_key, const u8 *key, u16 key_sz)
  418. {
  419. struct crypto_aes_ctx ctx;
  420. int key_pos;
  421. if (aes_expandkey(&ctx, key, key_sz)) {
  422. dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
  423. return -EINVAL;
  424. }
  425. /* work around to get the right inverse for AES_KEYSIZE_192 size keys */
  426. if (key_sz == AES_KEYSIZE_192) {
  427. ctx.key_enc[52] = ctx.key_enc[51] ^ ctx.key_enc[46];
  428. ctx.key_enc[53] = ctx.key_enc[52] ^ ctx.key_enc[47];
  429. }
  430. /* Based crypto_aes_expand_key logic */
  431. switch (key_sz) {
  432. case AES_KEYSIZE_128:
  433. case AES_KEYSIZE_192:
  434. key_pos = key_sz + 24;
  435. break;
  436. case AES_KEYSIZE_256:
  437. key_pos = key_sz + 24 - 4;
  438. break;
  439. default:
  440. dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
  441. return -EINVAL;
  442. }
  443. memcpy(inv_key, &ctx.key_enc[key_pos], key_sz);
  444. return 0;
  445. }
  446. /* Set Security context for the encryption engine */
  447. static int sa_set_sc_enc(struct algo_data *ad, const u8 *key, u16 key_sz,
  448. u8 enc, u8 *sc_buf)
  449. {
  450. const u8 *mci = NULL;
  451. /* Set Encryption mode selector to crypto processing */
  452. sc_buf[0] = SA_CRYPTO_PROCESSING;
  453. if (enc)
  454. mci = ad->mci_enc;
  455. else
  456. mci = ad->mci_dec;
  457. /* Set the mode control instructions in security context */
  458. if (mci)
  459. memcpy(&sc_buf[1], mci, MODE_CONTROL_BYTES);
  460. /* For AES-CBC decryption get the inverse key */
  461. if (ad->inv_key && !enc) {
  462. if (sa_aes_inv_key(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz))
  463. return -EINVAL;
  464. /* For all other cases: key is used */
  465. } else {
  466. memcpy(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz);
  467. }
  468. return 0;
  469. }
  470. /* Set Security context for the authentication engine */
  471. static void sa_set_sc_auth(struct algo_data *ad, const u8 *key, u16 key_sz,
  472. u8 *sc_buf)
  473. {
  474. __be32 *ipad = (void *)(sc_buf + 32);
  475. __be32 *opad = (void *)(sc_buf + 64);
  476. /* Set Authentication mode selector to hash processing */
  477. sc_buf[0] = SA_HASH_PROCESSING;
  478. /* Auth SW ctrl word: bit[6]=1 (upload computed hash to TLR section) */
  479. sc_buf[1] = SA_UPLOAD_HASH_TO_TLR;
  480. sc_buf[1] |= ad->auth_ctrl;
  481. /* Copy the keys or ipad/opad */
  482. if (ad->keyed_mac)
  483. ad->prep_iopad(ad, key, key_sz, ipad, opad);
  484. else {
  485. /* basic hash */
  486. sc_buf[1] |= SA_BASIC_HASH;
  487. }
  488. }
  489. static inline void sa_copy_iv(__be32 *out, const u8 *iv, bool size16)
  490. {
  491. int j;
  492. for (j = 0; j < ((size16) ? 4 : 2); j++) {
  493. *out = cpu_to_be32(*((u32 *)iv));
  494. iv += 4;
  495. out++;
  496. }
  497. }
  498. /* Format general command label */
  499. static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
  500. struct sa_cmdl_upd_info *upd_info)
  501. {
  502. u8 enc_offset = 0, auth_offset = 0, total = 0;
  503. u8 enc_next_eng = SA_ENG_ID_OUTPORT2;
  504. u8 auth_next_eng = SA_ENG_ID_OUTPORT2;
  505. u32 *word_ptr = (u32 *)cmdl;
  506. int i;
  507. /* Clear the command label */
  508. memzero_explicit(cmdl, (SA_MAX_CMDL_WORDS * sizeof(u32)));
  509. /* Iniialize the command update structure */
  510. memzero_explicit(upd_info, sizeof(*upd_info));
  511. if (cfg->enc_eng_id && cfg->auth_eng_id) {
  512. if (cfg->enc) {
  513. auth_offset = SA_CMDL_HEADER_SIZE_BYTES;
  514. enc_next_eng = cfg->auth_eng_id;
  515. if (cfg->iv_size)
  516. auth_offset += cfg->iv_size;
  517. } else {
  518. enc_offset = SA_CMDL_HEADER_SIZE_BYTES;
  519. auth_next_eng = cfg->enc_eng_id;
  520. }
  521. }
  522. if (cfg->enc_eng_id) {
  523. upd_info->flags |= SA_CMDL_UPD_ENC;
  524. upd_info->enc_size.index = enc_offset >> 2;
  525. upd_info->enc_offset.index = upd_info->enc_size.index + 1;
  526. /* Encryption command label */
  527. cmdl[enc_offset + SA_CMDL_OFFSET_NESC] = enc_next_eng;
  528. /* Encryption modes requiring IV */
  529. if (cfg->iv_size) {
  530. upd_info->flags |= SA_CMDL_UPD_ENC_IV;
  531. upd_info->enc_iv.index =
  532. (enc_offset + SA_CMDL_HEADER_SIZE_BYTES) >> 2;
  533. upd_info->enc_iv.size = cfg->iv_size;
  534. cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
  535. SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
  536. cmdl[enc_offset + SA_CMDL_OFFSET_OPTION_CTRL1] =
  537. (SA_CTX_ENC_AUX2_OFFSET | (cfg->iv_size >> 3));
  538. total += SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
  539. } else {
  540. cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
  541. SA_CMDL_HEADER_SIZE_BYTES;
  542. total += SA_CMDL_HEADER_SIZE_BYTES;
  543. }
  544. }
  545. if (cfg->auth_eng_id) {
  546. upd_info->flags |= SA_CMDL_UPD_AUTH;
  547. upd_info->auth_size.index = auth_offset >> 2;
  548. upd_info->auth_offset.index = upd_info->auth_size.index + 1;
  549. cmdl[auth_offset + SA_CMDL_OFFSET_NESC] = auth_next_eng;
  550. cmdl[auth_offset + SA_CMDL_OFFSET_LABEL_LEN] =
  551. SA_CMDL_HEADER_SIZE_BYTES;
  552. total += SA_CMDL_HEADER_SIZE_BYTES;
  553. }
  554. total = roundup(total, 8);
  555. for (i = 0; i < total / 4; i++)
  556. word_ptr[i] = swab32(word_ptr[i]);
  557. return total;
  558. }
  559. /* Update Command label */
  560. static inline void sa_update_cmdl(struct sa_req *req, u32 *cmdl,
  561. struct sa_cmdl_upd_info *upd_info)
  562. {
  563. int i = 0, j;
  564. if (likely(upd_info->flags & SA_CMDL_UPD_ENC)) {
  565. cmdl[upd_info->enc_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
  566. cmdl[upd_info->enc_size.index] |= req->enc_size;
  567. cmdl[upd_info->enc_offset.index] &=
  568. ~SA_CMDL_SOP_BYPASS_LEN_MASK;
  569. cmdl[upd_info->enc_offset.index] |=
  570. FIELD_PREP(SA_CMDL_SOP_BYPASS_LEN_MASK,
  571. req->enc_offset);
  572. if (likely(upd_info->flags & SA_CMDL_UPD_ENC_IV)) {
  573. __be32 *data = (__be32 *)&cmdl[upd_info->enc_iv.index];
  574. u32 *enc_iv = (u32 *)req->enc_iv;
  575. for (j = 0; i < upd_info->enc_iv.size; i += 4, j++) {
  576. data[j] = cpu_to_be32(*enc_iv);
  577. enc_iv++;
  578. }
  579. }
  580. }
  581. if (likely(upd_info->flags & SA_CMDL_UPD_AUTH)) {
  582. cmdl[upd_info->auth_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
  583. cmdl[upd_info->auth_size.index] |= req->auth_size;
  584. cmdl[upd_info->auth_offset.index] &=
  585. ~SA_CMDL_SOP_BYPASS_LEN_MASK;
  586. cmdl[upd_info->auth_offset.index] |=
  587. FIELD_PREP(SA_CMDL_SOP_BYPASS_LEN_MASK,
  588. req->auth_offset);
  589. if (upd_info->flags & SA_CMDL_UPD_AUTH_IV) {
  590. sa_copy_iv((void *)&cmdl[upd_info->auth_iv.index],
  591. req->auth_iv,
  592. (upd_info->auth_iv.size > 8));
  593. }
  594. if (upd_info->flags & SA_CMDL_UPD_AUX_KEY) {
  595. int offset = (req->auth_size & 0xF) ? 4 : 0;
  596. memcpy(&cmdl[upd_info->aux_key_info.index],
  597. &upd_info->aux_key[offset], 16);
  598. }
  599. }
  600. }
  601. /* Format SWINFO words to be sent to SA */
  602. static
  603. void sa_set_swinfo(u8 eng_id, u16 sc_id, dma_addr_t sc_phys,
  604. u8 cmdl_present, u8 cmdl_offset, u8 flags,
  605. u8 hash_size, u32 *swinfo)
  606. {
  607. swinfo[0] = sc_id;
  608. swinfo[0] |= FIELD_PREP(SA_SW0_FLAGS_MASK, flags);
  609. if (likely(cmdl_present))
  610. swinfo[0] |= FIELD_PREP(SA_SW0_CMDL_INFO_MASK,
  611. cmdl_offset | SA_SW0_CMDL_PRESENT);
  612. swinfo[0] |= FIELD_PREP(SA_SW0_ENG_ID_MASK, eng_id);
  613. swinfo[0] |= SA_SW0_DEST_INFO_PRESENT;
  614. swinfo[1] = (u32)(sc_phys & 0xFFFFFFFFULL);
  615. swinfo[2] = (u32)((sc_phys & 0xFFFFFFFF00000000ULL) >> 32);
  616. swinfo[2] |= FIELD_PREP(SA_SW2_EGRESS_LENGTH, hash_size);
  617. }
  618. /* Dump the security context */
  619. static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr)
  620. {
  621. #ifdef DEBUG
  622. dev_info(sa_k3_dev, "Security context dump:: 0x%pad\n", &dma_addr);
  623. print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
  624. 16, 1, buf, SA_CTX_MAX_SZ, false);
  625. #endif
  626. }
  627. static
  628. int sa_init_sc(struct sa_ctx_info *ctx, const struct sa_match_data *match_data,
  629. const u8 *enc_key, u16 enc_key_sz,
  630. const u8 *auth_key, u16 auth_key_sz,
  631. struct algo_data *ad, u8 enc, u32 *swinfo)
  632. {
  633. int enc_sc_offset = 0;
  634. int auth_sc_offset = 0;
  635. u8 *sc_buf = ctx->sc;
  636. u16 sc_id = ctx->sc_id;
  637. u8 first_engine = 0;
  638. memzero_explicit(sc_buf, SA_CTX_MAX_SZ);
  639. if (ad->auth_eng.eng_id) {
  640. if (enc)
  641. first_engine = ad->enc_eng.eng_id;
  642. else
  643. first_engine = ad->auth_eng.eng_id;
  644. enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
  645. auth_sc_offset = enc_sc_offset + ad->enc_eng.sc_size;
  646. sc_buf[1] = SA_SCCTL_FE_AUTH_ENC;
  647. if (!ad->hash_size)
  648. return -EINVAL;
  649. ad->hash_size = roundup(ad->hash_size, 8);
  650. } else if (ad->enc_eng.eng_id && !ad->auth_eng.eng_id) {
  651. enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
  652. first_engine = ad->enc_eng.eng_id;
  653. sc_buf[1] = SA_SCCTL_FE_ENC;
  654. ad->hash_size = ad->iv_out_size;
  655. }
  656. /* SCCTL Owner info: 0=host, 1=CP_ACE */
  657. sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0;
  658. memcpy(&sc_buf[2], &sc_id, 2);
  659. sc_buf[4] = 0x0;
  660. sc_buf[5] = match_data->priv_id;
  661. sc_buf[6] = match_data->priv;
  662. sc_buf[7] = 0x0;
  663. /* Prepare context for encryption engine */
  664. if (ad->enc_eng.sc_size) {
  665. if (sa_set_sc_enc(ad, enc_key, enc_key_sz, enc,
  666. &sc_buf[enc_sc_offset]))
  667. return -EINVAL;
  668. }
  669. /* Prepare context for authentication engine */
  670. if (ad->auth_eng.sc_size)
  671. sa_set_sc_auth(ad, auth_key, auth_key_sz,
  672. &sc_buf[auth_sc_offset]);
  673. /* Set the ownership of context to CP_ACE */
  674. sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0x80;
  675. /* swizzle the security context */
  676. sa_swiz_128(sc_buf, SA_CTX_MAX_SZ);
  677. sa_set_swinfo(first_engine, ctx->sc_id, ctx->sc_phys, 1, 0,
  678. SA_SW_INFO_FLAG_EVICT, ad->hash_size, swinfo);
  679. sa_dump_sc(sc_buf, ctx->sc_phys);
  680. return 0;
  681. }
  682. /* Free the per direction context memory */
  683. static void sa_free_ctx_info(struct sa_ctx_info *ctx,
  684. struct sa_crypto_data *data)
  685. {
  686. unsigned long bn;
  687. bn = ctx->sc_id - data->sc_id_start;
  688. spin_lock(&data->scid_lock);
  689. __clear_bit(bn, data->ctx_bm);
  690. data->sc_id--;
  691. spin_unlock(&data->scid_lock);
  692. if (ctx->sc) {
  693. dma_pool_free(data->sc_pool, ctx->sc, ctx->sc_phys);
  694. ctx->sc = NULL;
  695. }
  696. }
  697. static int sa_init_ctx_info(struct sa_ctx_info *ctx,
  698. struct sa_crypto_data *data)
  699. {
  700. unsigned long bn;
  701. int err;
  702. spin_lock(&data->scid_lock);
  703. bn = find_first_zero_bit(data->ctx_bm, SA_MAX_NUM_CTX);
  704. __set_bit(bn, data->ctx_bm);
  705. data->sc_id++;
  706. spin_unlock(&data->scid_lock);
  707. ctx->sc_id = (u16)(data->sc_id_start + bn);
  708. ctx->sc = dma_pool_alloc(data->sc_pool, GFP_KERNEL, &ctx->sc_phys);
  709. if (!ctx->sc) {
  710. dev_err(&data->pdev->dev, "Failed to allocate SC memory\n");
  711. err = -ENOMEM;
  712. goto scid_rollback;
  713. }
  714. return 0;
  715. scid_rollback:
  716. spin_lock(&data->scid_lock);
  717. __clear_bit(bn, data->ctx_bm);
  718. data->sc_id--;
  719. spin_unlock(&data->scid_lock);
  720. return err;
  721. }
  722. static void sa_cipher_cra_exit(struct crypto_skcipher *tfm)
  723. {
  724. struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
  725. struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
  726. dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
  727. __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
  728. ctx->dec.sc_id, &ctx->dec.sc_phys);
  729. sa_free_ctx_info(&ctx->enc, data);
  730. sa_free_ctx_info(&ctx->dec, data);
  731. crypto_free_skcipher(ctx->fallback.skcipher);
  732. }
  733. static int sa_cipher_cra_init(struct crypto_skcipher *tfm)
  734. {
  735. struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
  736. struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
  737. const char *name = crypto_tfm_alg_name(&tfm->base);
  738. struct crypto_skcipher *child;
  739. int ret;
  740. memzero_explicit(ctx, sizeof(*ctx));
  741. ctx->dev_data = data;
  742. ret = sa_init_ctx_info(&ctx->enc, data);
  743. if (ret)
  744. return ret;
  745. ret = sa_init_ctx_info(&ctx->dec, data);
  746. if (ret) {
  747. sa_free_ctx_info(&ctx->enc, data);
  748. return ret;
  749. }
  750. child = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
  751. if (IS_ERR(child)) {
  752. dev_err(sa_k3_dev, "Error allocating fallback algo %s\n", name);
  753. return PTR_ERR(child);
  754. }
  755. ctx->fallback.skcipher = child;
  756. crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
  757. sizeof(struct skcipher_request));
  758. dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
  759. __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
  760. ctx->dec.sc_id, &ctx->dec.sc_phys);
  761. return 0;
  762. }
  763. static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
  764. unsigned int keylen, struct algo_data *ad)
  765. {
  766. struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
  767. struct crypto_skcipher *child = ctx->fallback.skcipher;
  768. int cmdl_len;
  769. struct sa_cmdl_cfg cfg;
  770. int ret;
  771. if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
  772. keylen != AES_KEYSIZE_256)
  773. return -EINVAL;
  774. ad->enc_eng.eng_id = SA_ENG_ID_EM1;
  775. ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
  776. memzero_explicit(&cfg, sizeof(cfg));
  777. cfg.enc_eng_id = ad->enc_eng.eng_id;
  778. cfg.iv_size = crypto_skcipher_ivsize(tfm);
  779. crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
  780. crypto_skcipher_set_flags(child, tfm->base.crt_flags &
  781. CRYPTO_TFM_REQ_MASK);
  782. ret = crypto_skcipher_setkey(child, key, keylen);
  783. if (ret)
  784. return ret;
  785. /* Setup Encryption Security Context & Command label template */
  786. if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, key, keylen, NULL, 0,
  787. ad, 1, &ctx->enc.epib[1]))
  788. goto badkey;
  789. cmdl_len = sa_format_cmdl_gen(&cfg,
  790. (u8 *)ctx->enc.cmdl,
  791. &ctx->enc.cmdl_upd_info);
  792. if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
  793. goto badkey;
  794. ctx->enc.cmdl_size = cmdl_len;
  795. /* Setup Decryption Security Context & Command label template */
  796. if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, key, keylen, NULL, 0,
  797. ad, 0, &ctx->dec.epib[1]))
  798. goto badkey;
  799. cfg.enc_eng_id = ad->enc_eng.eng_id;
  800. cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
  801. &ctx->dec.cmdl_upd_info);
  802. if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
  803. goto badkey;
  804. ctx->dec.cmdl_size = cmdl_len;
  805. ctx->iv_idx = ad->iv_idx;
  806. return 0;
  807. badkey:
  808. dev_err(sa_k3_dev, "%s: badkey\n", __func__);
  809. return -EINVAL;
  810. }
  811. static int sa_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
  812. unsigned int keylen)
  813. {
  814. struct algo_data ad = { 0 };
  815. /* Convert the key size (16/24/32) to the key size index (0/1/2) */
  816. int key_idx = (keylen >> 3) - 2;
  817. if (key_idx >= 3)
  818. return -EINVAL;
  819. ad.mci_enc = mci_cbc_enc_array[key_idx];
  820. ad.mci_dec = mci_cbc_dec_array[key_idx];
  821. ad.inv_key = true;
  822. ad.ealg_id = SA_EALG_ID_AES_CBC;
  823. ad.iv_idx = 4;
  824. ad.iv_out_size = 16;
  825. return sa_cipher_setkey(tfm, key, keylen, &ad);
  826. }
  827. static int sa_aes_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
  828. unsigned int keylen)
  829. {
  830. struct algo_data ad = { 0 };
  831. /* Convert the key size (16/24/32) to the key size index (0/1/2) */
  832. int key_idx = (keylen >> 3) - 2;
  833. if (key_idx >= 3)
  834. return -EINVAL;
  835. ad.mci_enc = mci_ecb_enc_array[key_idx];
  836. ad.mci_dec = mci_ecb_dec_array[key_idx];
  837. ad.inv_key = true;
  838. ad.ealg_id = SA_EALG_ID_AES_ECB;
  839. return sa_cipher_setkey(tfm, key, keylen, &ad);
  840. }
  841. static int sa_3des_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
  842. unsigned int keylen)
  843. {
  844. struct algo_data ad = { 0 };
  845. ad.mci_enc = mci_cbc_3des_enc_array;
  846. ad.mci_dec = mci_cbc_3des_dec_array;
  847. ad.ealg_id = SA_EALG_ID_3DES_CBC;
  848. ad.iv_idx = 6;
  849. ad.iv_out_size = 8;
  850. return sa_cipher_setkey(tfm, key, keylen, &ad);
  851. }
  852. static int sa_3des_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
  853. unsigned int keylen)
  854. {
  855. struct algo_data ad = { 0 };
  856. ad.mci_enc = mci_ecb_3des_enc_array;
  857. ad.mci_dec = mci_ecb_3des_dec_array;
  858. return sa_cipher_setkey(tfm, key, keylen, &ad);
  859. }
  860. static void sa_sync_from_device(struct sa_rx_data *rxd)
  861. {
  862. struct sg_table *sgt;
  863. if (rxd->mapped_sg[0].dir == DMA_BIDIRECTIONAL)
  864. sgt = &rxd->mapped_sg[0].sgt;
  865. else
  866. sgt = &rxd->mapped_sg[1].sgt;
  867. dma_sync_sgtable_for_cpu(rxd->ddev, sgt, DMA_FROM_DEVICE);
  868. }
  869. static void sa_free_sa_rx_data(struct sa_rx_data *rxd)
  870. {
  871. int i;
  872. for (i = 0; i < ARRAY_SIZE(rxd->mapped_sg); i++) {
  873. struct sa_mapped_sg *mapped_sg = &rxd->mapped_sg[i];
  874. if (mapped_sg->mapped) {
  875. dma_unmap_sgtable(rxd->ddev, &mapped_sg->sgt,
  876. mapped_sg->dir, 0);
  877. kfree(mapped_sg->split_sg);
  878. }
  879. }
  880. kfree(rxd);
  881. }
  882. static void sa_aes_dma_in_callback(void *data)
  883. {
  884. struct sa_rx_data *rxd = data;
  885. struct skcipher_request *req;
  886. u32 *result;
  887. __be32 *mdptr;
  888. size_t ml, pl;
  889. int i;
  890. sa_sync_from_device(rxd);
  891. req = container_of(rxd->req, struct skcipher_request, base);
  892. if (req->iv) {
  893. mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl,
  894. &ml);
  895. result = (u32 *)req->iv;
  896. for (i = 0; i < (rxd->enc_iv_size / 4); i++)
  897. result[i] = be32_to_cpu(mdptr[i + rxd->iv_idx]);
  898. }
  899. sa_free_sa_rx_data(rxd);
  900. skcipher_request_complete(req, 0);
  901. }
  902. static void
  903. sa_prepare_tx_desc(u32 *mdptr, u32 pslen, u32 *psdata, u32 epiblen, u32 *epib)
  904. {
  905. u32 *out, *in;
  906. int i;
  907. for (out = mdptr, in = epib, i = 0; i < epiblen / sizeof(u32); i++)
  908. *out++ = *in++;
  909. mdptr[4] = (0xFFFF << 16);
  910. for (out = &mdptr[5], in = psdata, i = 0;
  911. i < pslen / sizeof(u32); i++)
  912. *out++ = *in++;
  913. }
  914. static int sa_run(struct sa_req *req)
  915. {
  916. struct sa_rx_data *rxd;
  917. gfp_t gfp_flags;
  918. u32 cmdl[SA_MAX_CMDL_WORDS];
  919. struct sa_crypto_data *pdata = dev_get_drvdata(sa_k3_dev);
  920. struct device *ddev;
  921. struct dma_chan *dma_rx;
  922. int sg_nents, src_nents, dst_nents;
  923. struct scatterlist *src, *dst;
  924. size_t pl, ml, split_size;
  925. struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec;
  926. int ret;
  927. struct dma_async_tx_descriptor *tx_out;
  928. u32 *mdptr;
  929. bool diff_dst;
  930. enum dma_data_direction dir_src;
  931. struct sa_mapped_sg *mapped_sg;
  932. gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
  933. GFP_KERNEL : GFP_ATOMIC;
  934. rxd = kzalloc(sizeof(*rxd), gfp_flags);
  935. if (!rxd)
  936. return -ENOMEM;
  937. if (req->src != req->dst) {
  938. diff_dst = true;
  939. dir_src = DMA_TO_DEVICE;
  940. } else {
  941. diff_dst = false;
  942. dir_src = DMA_BIDIRECTIONAL;
  943. }
  944. /*
  945. * SA2UL has an interesting feature where the receive DMA channel
  946. * is selected based on the data passed to the engine. Within the
  947. * transition range, there is also a space where it is impossible
  948. * to determine where the data will end up, and this should be
  949. * avoided. This will be handled by the SW fallback mechanism by
  950. * the individual algorithm implementations.
  951. */
  952. if (req->size >= 256)
  953. dma_rx = pdata->dma_rx2;
  954. else
  955. dma_rx = pdata->dma_rx1;
  956. ddev = dmaengine_get_dma_device(pdata->dma_tx);
  957. rxd->ddev = ddev;
  958. memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
  959. sa_update_cmdl(req, cmdl, &sa_ctx->cmdl_upd_info);
  960. if (req->type != CRYPTO_ALG_TYPE_AHASH) {
  961. if (req->enc)
  962. req->type |=
  963. (SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT);
  964. else
  965. req->type |=
  966. (SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT);
  967. }
  968. cmdl[sa_ctx->cmdl_size / sizeof(u32)] = req->type;
  969. /*
  970. * Map the packets, first we check if the data fits into a single
  971. * sg entry and use that if possible. If it does not fit, we check
  972. * if we need to do sg_split to align the scatterlist data on the
  973. * actual data size being processed by the crypto engine.
  974. */
  975. src = req->src;
  976. sg_nents = sg_nents_for_len(src, req->size);
  977. split_size = req->size;
  978. mapped_sg = &rxd->mapped_sg[0];
  979. if (sg_nents == 1 && split_size <= req->src->length) {
  980. src = &mapped_sg->static_sg;
  981. src_nents = 1;
  982. sg_init_table(src, 1);
  983. sg_set_page(src, sg_page(req->src), split_size,
  984. req->src->offset);
  985. mapped_sg->sgt.sgl = src;
  986. mapped_sg->sgt.orig_nents = src_nents;
  987. ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
  988. if (ret) {
  989. kfree(rxd);
  990. return ret;
  991. }
  992. mapped_sg->dir = dir_src;
  993. mapped_sg->mapped = true;
  994. } else {
  995. mapped_sg->sgt.sgl = req->src;
  996. mapped_sg->sgt.orig_nents = sg_nents;
  997. ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
  998. if (ret) {
  999. kfree(rxd);
  1000. return ret;
  1001. }
  1002. mapped_sg->dir = dir_src;
  1003. mapped_sg->mapped = true;
  1004. ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents, 0, 1,
  1005. &split_size, &src, &src_nents, gfp_flags);
  1006. if (ret) {
  1007. src_nents = mapped_sg->sgt.nents;
  1008. src = mapped_sg->sgt.sgl;
  1009. } else {
  1010. mapped_sg->split_sg = src;
  1011. }
  1012. }
  1013. dma_sync_sgtable_for_device(ddev, &mapped_sg->sgt, DMA_TO_DEVICE);
  1014. if (!diff_dst) {
  1015. dst_nents = src_nents;
  1016. dst = src;
  1017. } else {
  1018. dst_nents = sg_nents_for_len(req->dst, req->size);
  1019. mapped_sg = &rxd->mapped_sg[1];
  1020. if (dst_nents == 1 && split_size <= req->dst->length) {
  1021. dst = &mapped_sg->static_sg;
  1022. dst_nents = 1;
  1023. sg_init_table(dst, 1);
  1024. sg_set_page(dst, sg_page(req->dst), split_size,
  1025. req->dst->offset);
  1026. mapped_sg->sgt.sgl = dst;
  1027. mapped_sg->sgt.orig_nents = dst_nents;
  1028. ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
  1029. DMA_FROM_DEVICE, 0);
  1030. if (ret)
  1031. goto err_cleanup;
  1032. mapped_sg->dir = DMA_FROM_DEVICE;
  1033. mapped_sg->mapped = true;
  1034. } else {
  1035. mapped_sg->sgt.sgl = req->dst;
  1036. mapped_sg->sgt.orig_nents = dst_nents;
  1037. ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
  1038. DMA_FROM_DEVICE, 0);
  1039. if (ret)
  1040. goto err_cleanup;
  1041. mapped_sg->dir = DMA_FROM_DEVICE;
  1042. mapped_sg->mapped = true;
  1043. ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents,
  1044. 0, 1, &split_size, &dst, &dst_nents,
  1045. gfp_flags);
  1046. if (ret) {
  1047. dst_nents = mapped_sg->sgt.nents;
  1048. dst = mapped_sg->sgt.sgl;
  1049. } else {
  1050. mapped_sg->split_sg = dst;
  1051. }
  1052. }
  1053. }
  1054. rxd->tx_in = dmaengine_prep_slave_sg(dma_rx, dst, dst_nents,
  1055. DMA_DEV_TO_MEM,
  1056. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  1057. if (!rxd->tx_in) {
  1058. dev_err(pdata->dev, "IN prep_slave_sg() failed\n");
  1059. ret = -EINVAL;
  1060. goto err_cleanup;
  1061. }
  1062. rxd->req = (void *)req->base;
  1063. rxd->enc = req->enc;
  1064. rxd->iv_idx = req->ctx->iv_idx;
  1065. rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size;
  1066. rxd->tx_in->callback = req->callback;
  1067. rxd->tx_in->callback_param = rxd;
  1068. tx_out = dmaengine_prep_slave_sg(pdata->dma_tx, src,
  1069. src_nents, DMA_MEM_TO_DEV,
  1070. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  1071. if (!tx_out) {
  1072. dev_err(pdata->dev, "OUT prep_slave_sg() failed\n");
  1073. ret = -EINVAL;
  1074. goto err_cleanup;
  1075. }
  1076. /*
  1077. * Prepare metadata for DMA engine. This essentially describes the
  1078. * crypto algorithm to be used, data sizes, different keys etc.
  1079. */
  1080. mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(tx_out, &pl, &ml);
  1081. sa_prepare_tx_desc(mdptr, (sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS *
  1082. sizeof(u32))), cmdl, sizeof(sa_ctx->epib),
  1083. sa_ctx->epib);
  1084. ml = sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * sizeof(u32));
  1085. dmaengine_desc_set_metadata_len(tx_out, req->mdata_size);
  1086. dmaengine_submit(tx_out);
  1087. dmaengine_submit(rxd->tx_in);
  1088. dma_async_issue_pending(dma_rx);
  1089. dma_async_issue_pending(pdata->dma_tx);
  1090. return -EINPROGRESS;
  1091. err_cleanup:
  1092. sa_free_sa_rx_data(rxd);
  1093. return ret;
  1094. }
  1095. static int sa_cipher_run(struct skcipher_request *req, u8 *iv, int enc)
  1096. {
  1097. struct sa_tfm_ctx *ctx =
  1098. crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
  1099. struct crypto_alg *alg = req->base.tfm->__crt_alg;
  1100. struct sa_req sa_req = { 0 };
  1101. if (!req->cryptlen)
  1102. return 0;
  1103. if (req->cryptlen % alg->cra_blocksize)
  1104. return -EINVAL;
  1105. /* Use SW fallback if the data size is not supported */
  1106. if (req->cryptlen > SA_MAX_DATA_SZ ||
  1107. (req->cryptlen >= SA_UNSAFE_DATA_SZ_MIN &&
  1108. req->cryptlen <= SA_UNSAFE_DATA_SZ_MAX)) {
  1109. struct skcipher_request *subreq = skcipher_request_ctx(req);
  1110. skcipher_request_set_tfm(subreq, ctx->fallback.skcipher);
  1111. skcipher_request_set_callback(subreq, req->base.flags,
  1112. req->base.complete,
  1113. req->base.data);
  1114. skcipher_request_set_crypt(subreq, req->src, req->dst,
  1115. req->cryptlen, req->iv);
  1116. if (enc)
  1117. return crypto_skcipher_encrypt(subreq);
  1118. else
  1119. return crypto_skcipher_decrypt(subreq);
  1120. }
  1121. sa_req.size = req->cryptlen;
  1122. sa_req.enc_size = req->cryptlen;
  1123. sa_req.src = req->src;
  1124. sa_req.dst = req->dst;
  1125. sa_req.enc_iv = iv;
  1126. sa_req.type = CRYPTO_ALG_TYPE_SKCIPHER;
  1127. sa_req.enc = enc;
  1128. sa_req.callback = sa_aes_dma_in_callback;
  1129. sa_req.mdata_size = 44;
  1130. sa_req.base = &req->base;
  1131. sa_req.ctx = ctx;
  1132. return sa_run(&sa_req);
  1133. }
  1134. static int sa_encrypt(struct skcipher_request *req)
  1135. {
  1136. return sa_cipher_run(req, req->iv, 1);
  1137. }
  1138. static int sa_decrypt(struct skcipher_request *req)
  1139. {
  1140. return sa_cipher_run(req, req->iv, 0);
  1141. }
  1142. static void sa_sha_dma_in_callback(void *data)
  1143. {
  1144. struct sa_rx_data *rxd = data;
  1145. struct ahash_request *req;
  1146. struct crypto_ahash *tfm;
  1147. unsigned int authsize;
  1148. int i;
  1149. size_t ml, pl;
  1150. u32 *result;
  1151. __be32 *mdptr;
  1152. sa_sync_from_device(rxd);
  1153. req = container_of(rxd->req, struct ahash_request, base);
  1154. tfm = crypto_ahash_reqtfm(req);
  1155. authsize = crypto_ahash_digestsize(tfm);
  1156. mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
  1157. result = (u32 *)req->result;
  1158. for (i = 0; i < (authsize / 4); i++)
  1159. result[i] = be32_to_cpu(mdptr[i + 4]);
  1160. sa_free_sa_rx_data(rxd);
  1161. ahash_request_complete(req, 0);
  1162. }
  1163. static int zero_message_process(struct ahash_request *req)
  1164. {
  1165. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1166. int sa_digest_size = crypto_ahash_digestsize(tfm);
  1167. switch (sa_digest_size) {
  1168. case SHA1_DIGEST_SIZE:
  1169. memcpy(req->result, sha1_zero_message_hash, sa_digest_size);
  1170. break;
  1171. case SHA256_DIGEST_SIZE:
  1172. memcpy(req->result, sha256_zero_message_hash, sa_digest_size);
  1173. break;
  1174. case SHA512_DIGEST_SIZE:
  1175. memcpy(req->result, sha512_zero_message_hash, sa_digest_size);
  1176. break;
  1177. default:
  1178. return -EINVAL;
  1179. }
  1180. return 0;
  1181. }
  1182. static int sa_sha_run(struct ahash_request *req)
  1183. {
  1184. struct sa_tfm_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
  1185. struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
  1186. struct sa_req sa_req = { 0 };
  1187. size_t auth_len;
  1188. auth_len = req->nbytes;
  1189. if (!auth_len)
  1190. return zero_message_process(req);
  1191. if (auth_len > SA_MAX_DATA_SZ ||
  1192. (auth_len >= SA_UNSAFE_DATA_SZ_MIN &&
  1193. auth_len <= SA_UNSAFE_DATA_SZ_MAX)) {
  1194. struct ahash_request *subreq = &rctx->fallback_req;
  1195. int ret = 0;
  1196. ahash_request_set_tfm(subreq, ctx->fallback.ahash);
  1197. subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  1198. crypto_ahash_init(subreq);
  1199. subreq->nbytes = auth_len;
  1200. subreq->src = req->src;
  1201. subreq->result = req->result;
  1202. ret |= crypto_ahash_update(subreq);
  1203. subreq->nbytes = 0;
  1204. ret |= crypto_ahash_final(subreq);
  1205. return ret;
  1206. }
  1207. sa_req.size = auth_len;
  1208. sa_req.auth_size = auth_len;
  1209. sa_req.src = req->src;
  1210. sa_req.dst = req->src;
  1211. sa_req.enc = true;
  1212. sa_req.type = CRYPTO_ALG_TYPE_AHASH;
  1213. sa_req.callback = sa_sha_dma_in_callback;
  1214. sa_req.mdata_size = 28;
  1215. sa_req.ctx = ctx;
  1216. sa_req.base = &req->base;
  1217. return sa_run(&sa_req);
  1218. }
  1219. static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct algo_data *ad)
  1220. {
  1221. int bs = crypto_shash_blocksize(ctx->shash);
  1222. int cmdl_len;
  1223. struct sa_cmdl_cfg cfg;
  1224. ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
  1225. ad->auth_eng.eng_id = SA_ENG_ID_AM1;
  1226. ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
  1227. memset(ctx->authkey, 0, bs);
  1228. memset(&cfg, 0, sizeof(cfg));
  1229. cfg.aalg = ad->aalg_id;
  1230. cfg.enc_eng_id = ad->enc_eng.eng_id;
  1231. cfg.auth_eng_id = ad->auth_eng.eng_id;
  1232. cfg.iv_size = 0;
  1233. cfg.akey = NULL;
  1234. cfg.akey_len = 0;
  1235. ctx->dev_data = dev_get_drvdata(sa_k3_dev);
  1236. /* Setup Encryption Security Context & Command label template */
  1237. if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, NULL, 0, NULL, 0,
  1238. ad, 0, &ctx->enc.epib[1]))
  1239. goto badkey;
  1240. cmdl_len = sa_format_cmdl_gen(&cfg,
  1241. (u8 *)ctx->enc.cmdl,
  1242. &ctx->enc.cmdl_upd_info);
  1243. if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
  1244. goto badkey;
  1245. ctx->enc.cmdl_size = cmdl_len;
  1246. return 0;
  1247. badkey:
  1248. dev_err(sa_k3_dev, "%s: badkey\n", __func__);
  1249. return -EINVAL;
  1250. }
  1251. static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
  1252. {
  1253. struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
  1254. struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
  1255. int ret;
  1256. memset(ctx, 0, sizeof(*ctx));
  1257. ctx->dev_data = data;
  1258. ret = sa_init_ctx_info(&ctx->enc, data);
  1259. if (ret)
  1260. return ret;
  1261. if (alg_base) {
  1262. ctx->shash = crypto_alloc_shash(alg_base, 0,
  1263. CRYPTO_ALG_NEED_FALLBACK);
  1264. if (IS_ERR(ctx->shash)) {
  1265. dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n",
  1266. alg_base);
  1267. return PTR_ERR(ctx->shash);
  1268. }
  1269. /* for fallback */
  1270. ctx->fallback.ahash =
  1271. crypto_alloc_ahash(alg_base, 0,
  1272. CRYPTO_ALG_NEED_FALLBACK);
  1273. if (IS_ERR(ctx->fallback.ahash)) {
  1274. dev_err(ctx->dev_data->dev,
  1275. "Could not load fallback driver\n");
  1276. return PTR_ERR(ctx->fallback.ahash);
  1277. }
  1278. }
  1279. dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
  1280. __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
  1281. ctx->dec.sc_id, &ctx->dec.sc_phys);
  1282. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1283. sizeof(struct sa_sha_req_ctx) +
  1284. crypto_ahash_reqsize(ctx->fallback.ahash));
  1285. return 0;
  1286. }
  1287. static int sa_sha_digest(struct ahash_request *req)
  1288. {
  1289. return sa_sha_run(req);
  1290. }
  1291. static int sa_sha_init(struct ahash_request *req)
  1292. {
  1293. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1294. struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
  1295. struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
  1296. dev_dbg(sa_k3_dev, "init: digest size: %u, rctx=%p\n",
  1297. crypto_ahash_digestsize(tfm), rctx);
  1298. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
  1299. rctx->fallback_req.base.flags =
  1300. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  1301. return crypto_ahash_init(&rctx->fallback_req);
  1302. }
  1303. static int sa_sha_update(struct ahash_request *req)
  1304. {
  1305. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1306. struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
  1307. struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
  1308. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
  1309. rctx->fallback_req.base.flags =
  1310. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  1311. rctx->fallback_req.nbytes = req->nbytes;
  1312. rctx->fallback_req.src = req->src;
  1313. return crypto_ahash_update(&rctx->fallback_req);
  1314. }
  1315. static int sa_sha_final(struct ahash_request *req)
  1316. {
  1317. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1318. struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
  1319. struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
  1320. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
  1321. rctx->fallback_req.base.flags =
  1322. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  1323. rctx->fallback_req.result = req->result;
  1324. return crypto_ahash_final(&rctx->fallback_req);
  1325. }
  1326. static int sa_sha_finup(struct ahash_request *req)
  1327. {
  1328. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1329. struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
  1330. struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
  1331. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
  1332. rctx->fallback_req.base.flags =
  1333. req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  1334. rctx->fallback_req.nbytes = req->nbytes;
  1335. rctx->fallback_req.src = req->src;
  1336. rctx->fallback_req.result = req->result;
  1337. return crypto_ahash_finup(&rctx->fallback_req);
  1338. }
  1339. static int sa_sha_import(struct ahash_request *req, const void *in)
  1340. {
  1341. struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
  1342. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1343. struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
  1344. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
  1345. rctx->fallback_req.base.flags = req->base.flags &
  1346. CRYPTO_TFM_REQ_MAY_SLEEP;
  1347. return crypto_ahash_import(&rctx->fallback_req, in);
  1348. }
  1349. static int sa_sha_export(struct ahash_request *req, void *out)
  1350. {
  1351. struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
  1352. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1353. struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
  1354. struct ahash_request *subreq = &rctx->fallback_req;
  1355. ahash_request_set_tfm(subreq, ctx->fallback.ahash);
  1356. subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
  1357. return crypto_ahash_export(subreq, out);
  1358. }
  1359. static int sa_sha1_cra_init(struct crypto_tfm *tfm)
  1360. {
  1361. struct algo_data ad = { 0 };
  1362. struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
  1363. sa_sha_cra_init_alg(tfm, "sha1");
  1364. ad.aalg_id = SA_AALG_ID_SHA1;
  1365. ad.hash_size = SHA1_DIGEST_SIZE;
  1366. ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
  1367. sa_sha_setup(ctx, &ad);
  1368. return 0;
  1369. }
  1370. static int sa_sha256_cra_init(struct crypto_tfm *tfm)
  1371. {
  1372. struct algo_data ad = { 0 };
  1373. struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
  1374. sa_sha_cra_init_alg(tfm, "sha256");
  1375. ad.aalg_id = SA_AALG_ID_SHA2_256;
  1376. ad.hash_size = SHA256_DIGEST_SIZE;
  1377. ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
  1378. sa_sha_setup(ctx, &ad);
  1379. return 0;
  1380. }
  1381. static int sa_sha512_cra_init(struct crypto_tfm *tfm)
  1382. {
  1383. struct algo_data ad = { 0 };
  1384. struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
  1385. sa_sha_cra_init_alg(tfm, "sha512");
  1386. ad.aalg_id = SA_AALG_ID_SHA2_512;
  1387. ad.hash_size = SHA512_DIGEST_SIZE;
  1388. ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA512;
  1389. sa_sha_setup(ctx, &ad);
  1390. return 0;
  1391. }
  1392. static void sa_sha_cra_exit(struct crypto_tfm *tfm)
  1393. {
  1394. struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
  1395. struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
  1396. dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
  1397. __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
  1398. ctx->dec.sc_id, &ctx->dec.sc_phys);
  1399. if (crypto_tfm_alg_type(tfm) == CRYPTO_ALG_TYPE_AHASH)
  1400. sa_free_ctx_info(&ctx->enc, data);
  1401. crypto_free_shash(ctx->shash);
  1402. crypto_free_ahash(ctx->fallback.ahash);
  1403. }
  1404. static void sa_aead_dma_in_callback(void *data)
  1405. {
  1406. struct sa_rx_data *rxd = data;
  1407. struct aead_request *req;
  1408. struct crypto_aead *tfm;
  1409. unsigned int start;
  1410. unsigned int authsize;
  1411. u8 auth_tag[SA_MAX_AUTH_TAG_SZ];
  1412. size_t pl, ml;
  1413. int i;
  1414. int err = 0;
  1415. u32 *mdptr;
  1416. sa_sync_from_device(rxd);
  1417. req = container_of(rxd->req, struct aead_request, base);
  1418. tfm = crypto_aead_reqtfm(req);
  1419. start = req->assoclen + req->cryptlen;
  1420. authsize = crypto_aead_authsize(tfm);
  1421. mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
  1422. for (i = 0; i < (authsize / 4); i++)
  1423. mdptr[i + 4] = swab32(mdptr[i + 4]);
  1424. if (rxd->enc) {
  1425. scatterwalk_map_and_copy(&mdptr[4], req->dst, start, authsize,
  1426. 1);
  1427. } else {
  1428. start -= authsize;
  1429. scatterwalk_map_and_copy(auth_tag, req->src, start, authsize,
  1430. 0);
  1431. err = memcmp(&mdptr[4], auth_tag, authsize) ? -EBADMSG : 0;
  1432. }
  1433. sa_free_sa_rx_data(rxd);
  1434. aead_request_complete(req, err);
  1435. }
  1436. static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash,
  1437. const char *fallback)
  1438. {
  1439. struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
  1440. struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
  1441. int ret;
  1442. memzero_explicit(ctx, sizeof(*ctx));
  1443. ctx->dev_data = data;
  1444. ctx->shash = crypto_alloc_shash(hash, 0, CRYPTO_ALG_NEED_FALLBACK);
  1445. if (IS_ERR(ctx->shash)) {
  1446. dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n", hash);
  1447. return PTR_ERR(ctx->shash);
  1448. }
  1449. ctx->fallback.aead = crypto_alloc_aead(fallback, 0,
  1450. CRYPTO_ALG_NEED_FALLBACK);
  1451. if (IS_ERR(ctx->fallback.aead)) {
  1452. dev_err(sa_k3_dev, "fallback driver %s couldn't be loaded\n",
  1453. fallback);
  1454. return PTR_ERR(ctx->fallback.aead);
  1455. }
  1456. crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
  1457. crypto_aead_reqsize(ctx->fallback.aead));
  1458. ret = sa_init_ctx_info(&ctx->enc, data);
  1459. if (ret)
  1460. return ret;
  1461. ret = sa_init_ctx_info(&ctx->dec, data);
  1462. if (ret) {
  1463. sa_free_ctx_info(&ctx->enc, data);
  1464. return ret;
  1465. }
  1466. dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
  1467. __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
  1468. ctx->dec.sc_id, &ctx->dec.sc_phys);
  1469. return ret;
  1470. }
  1471. static int sa_cra_init_aead_sha1(struct crypto_aead *tfm)
  1472. {
  1473. return sa_cra_init_aead(tfm, "sha1",
  1474. "authenc(hmac(sha1-ce),cbc(aes-ce))");
  1475. }
  1476. static int sa_cra_init_aead_sha256(struct crypto_aead *tfm)
  1477. {
  1478. return sa_cra_init_aead(tfm, "sha256",
  1479. "authenc(hmac(sha256-ce),cbc(aes-ce))");
  1480. }
  1481. static void sa_exit_tfm_aead(struct crypto_aead *tfm)
  1482. {
  1483. struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
  1484. struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
  1485. crypto_free_shash(ctx->shash);
  1486. crypto_free_aead(ctx->fallback.aead);
  1487. sa_free_ctx_info(&ctx->enc, data);
  1488. sa_free_ctx_info(&ctx->dec, data);
  1489. }
  1490. /* AEAD algorithm configuration interface function */
  1491. static int sa_aead_setkey(struct crypto_aead *authenc,
  1492. const u8 *key, unsigned int keylen,
  1493. struct algo_data *ad)
  1494. {
  1495. struct sa_tfm_ctx *ctx = crypto_aead_ctx(authenc);
  1496. struct crypto_authenc_keys keys;
  1497. int cmdl_len;
  1498. struct sa_cmdl_cfg cfg;
  1499. int key_idx;
  1500. if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
  1501. return -EINVAL;
  1502. /* Convert the key size (16/24/32) to the key size index (0/1/2) */
  1503. key_idx = (keys.enckeylen >> 3) - 2;
  1504. if (key_idx >= 3)
  1505. return -EINVAL;
  1506. ad->ctx = ctx;
  1507. ad->enc_eng.eng_id = SA_ENG_ID_EM1;
  1508. ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
  1509. ad->auth_eng.eng_id = SA_ENG_ID_AM1;
  1510. ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
  1511. ad->mci_enc = mci_cbc_enc_no_iv_array[key_idx];
  1512. ad->mci_dec = mci_cbc_dec_no_iv_array[key_idx];
  1513. ad->inv_key = true;
  1514. ad->keyed_mac = true;
  1515. ad->ealg_id = SA_EALG_ID_AES_CBC;
  1516. ad->prep_iopad = sa_prepare_iopads;
  1517. memset(&cfg, 0, sizeof(cfg));
  1518. cfg.enc = true;
  1519. cfg.aalg = ad->aalg_id;
  1520. cfg.enc_eng_id = ad->enc_eng.eng_id;
  1521. cfg.auth_eng_id = ad->auth_eng.eng_id;
  1522. cfg.iv_size = crypto_aead_ivsize(authenc);
  1523. cfg.akey = keys.authkey;
  1524. cfg.akey_len = keys.authkeylen;
  1525. /* Setup Encryption Security Context & Command label template */
  1526. if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, keys.enckey,
  1527. keys.enckeylen, keys.authkey, keys.authkeylen,
  1528. ad, 1, &ctx->enc.epib[1]))
  1529. return -EINVAL;
  1530. cmdl_len = sa_format_cmdl_gen(&cfg,
  1531. (u8 *)ctx->enc.cmdl,
  1532. &ctx->enc.cmdl_upd_info);
  1533. if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
  1534. return -EINVAL;
  1535. ctx->enc.cmdl_size = cmdl_len;
  1536. /* Setup Decryption Security Context & Command label template */
  1537. if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, keys.enckey,
  1538. keys.enckeylen, keys.authkey, keys.authkeylen,
  1539. ad, 0, &ctx->dec.epib[1]))
  1540. return -EINVAL;
  1541. cfg.enc = false;
  1542. cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
  1543. &ctx->dec.cmdl_upd_info);
  1544. if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
  1545. return -EINVAL;
  1546. ctx->dec.cmdl_size = cmdl_len;
  1547. crypto_aead_clear_flags(ctx->fallback.aead, CRYPTO_TFM_REQ_MASK);
  1548. crypto_aead_set_flags(ctx->fallback.aead,
  1549. crypto_aead_get_flags(authenc) &
  1550. CRYPTO_TFM_REQ_MASK);
  1551. return crypto_aead_setkey(ctx->fallback.aead, key, keylen);
  1552. }
  1553. static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
  1554. {
  1555. struct sa_tfm_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
  1556. return crypto_aead_setauthsize(ctx->fallback.aead, authsize);
  1557. }
  1558. static int sa_aead_cbc_sha1_setkey(struct crypto_aead *authenc,
  1559. const u8 *key, unsigned int keylen)
  1560. {
  1561. struct algo_data ad = { 0 };
  1562. ad.ealg_id = SA_EALG_ID_AES_CBC;
  1563. ad.aalg_id = SA_AALG_ID_HMAC_SHA1;
  1564. ad.hash_size = SHA1_DIGEST_SIZE;
  1565. ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
  1566. return sa_aead_setkey(authenc, key, keylen, &ad);
  1567. }
  1568. static int sa_aead_cbc_sha256_setkey(struct crypto_aead *authenc,
  1569. const u8 *key, unsigned int keylen)
  1570. {
  1571. struct algo_data ad = { 0 };
  1572. ad.ealg_id = SA_EALG_ID_AES_CBC;
  1573. ad.aalg_id = SA_AALG_ID_HMAC_SHA2_256;
  1574. ad.hash_size = SHA256_DIGEST_SIZE;
  1575. ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
  1576. return sa_aead_setkey(authenc, key, keylen, &ad);
  1577. }
  1578. static int sa_aead_run(struct aead_request *req, u8 *iv, int enc)
  1579. {
  1580. struct crypto_aead *tfm = crypto_aead_reqtfm(req);
  1581. struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
  1582. struct sa_req sa_req = { 0 };
  1583. size_t auth_size, enc_size;
  1584. enc_size = req->cryptlen;
  1585. auth_size = req->assoclen + req->cryptlen;
  1586. if (!enc) {
  1587. enc_size -= crypto_aead_authsize(tfm);
  1588. auth_size -= crypto_aead_authsize(tfm);
  1589. }
  1590. if (auth_size > SA_MAX_DATA_SZ ||
  1591. (auth_size >= SA_UNSAFE_DATA_SZ_MIN &&
  1592. auth_size <= SA_UNSAFE_DATA_SZ_MAX)) {
  1593. struct aead_request *subreq = aead_request_ctx(req);
  1594. int ret;
  1595. aead_request_set_tfm(subreq, ctx->fallback.aead);
  1596. aead_request_set_callback(subreq, req->base.flags,
  1597. req->base.complete, req->base.data);
  1598. aead_request_set_crypt(subreq, req->src, req->dst,
  1599. req->cryptlen, req->iv);
  1600. aead_request_set_ad(subreq, req->assoclen);
  1601. ret = enc ? crypto_aead_encrypt(subreq) :
  1602. crypto_aead_decrypt(subreq);
  1603. return ret;
  1604. }
  1605. sa_req.enc_offset = req->assoclen;
  1606. sa_req.enc_size = enc_size;
  1607. sa_req.auth_size = auth_size;
  1608. sa_req.size = auth_size;
  1609. sa_req.enc_iv = iv;
  1610. sa_req.type = CRYPTO_ALG_TYPE_AEAD;
  1611. sa_req.enc = enc;
  1612. sa_req.callback = sa_aead_dma_in_callback;
  1613. sa_req.mdata_size = 52;
  1614. sa_req.base = &req->base;
  1615. sa_req.ctx = ctx;
  1616. sa_req.src = req->src;
  1617. sa_req.dst = req->dst;
  1618. return sa_run(&sa_req);
  1619. }
  1620. /* AEAD algorithm encrypt interface function */
  1621. static int sa_aead_encrypt(struct aead_request *req)
  1622. {
  1623. return sa_aead_run(req, req->iv, 1);
  1624. }
  1625. /* AEAD algorithm decrypt interface function */
  1626. static int sa_aead_decrypt(struct aead_request *req)
  1627. {
  1628. return sa_aead_run(req, req->iv, 0);
  1629. }
  1630. static struct sa_alg_tmpl sa_algs[] = {
  1631. [SA_ALG_CBC_AES] = {
  1632. .type = CRYPTO_ALG_TYPE_SKCIPHER,
  1633. .alg.skcipher = {
  1634. .base.cra_name = "cbc(aes)",
  1635. .base.cra_driver_name = "cbc-aes-sa2ul",
  1636. .base.cra_priority = 30000,
  1637. .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
  1638. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1639. CRYPTO_ALG_ASYNC |
  1640. CRYPTO_ALG_NEED_FALLBACK,
  1641. .base.cra_blocksize = AES_BLOCK_SIZE,
  1642. .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
  1643. .base.cra_module = THIS_MODULE,
  1644. .init = sa_cipher_cra_init,
  1645. .exit = sa_cipher_cra_exit,
  1646. .min_keysize = AES_MIN_KEY_SIZE,
  1647. .max_keysize = AES_MAX_KEY_SIZE,
  1648. .ivsize = AES_BLOCK_SIZE,
  1649. .setkey = sa_aes_cbc_setkey,
  1650. .encrypt = sa_encrypt,
  1651. .decrypt = sa_decrypt,
  1652. }
  1653. },
  1654. [SA_ALG_EBC_AES] = {
  1655. .type = CRYPTO_ALG_TYPE_SKCIPHER,
  1656. .alg.skcipher = {
  1657. .base.cra_name = "ecb(aes)",
  1658. .base.cra_driver_name = "ecb-aes-sa2ul",
  1659. .base.cra_priority = 30000,
  1660. .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
  1661. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1662. CRYPTO_ALG_ASYNC |
  1663. CRYPTO_ALG_NEED_FALLBACK,
  1664. .base.cra_blocksize = AES_BLOCK_SIZE,
  1665. .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
  1666. .base.cra_module = THIS_MODULE,
  1667. .init = sa_cipher_cra_init,
  1668. .exit = sa_cipher_cra_exit,
  1669. .min_keysize = AES_MIN_KEY_SIZE,
  1670. .max_keysize = AES_MAX_KEY_SIZE,
  1671. .setkey = sa_aes_ecb_setkey,
  1672. .encrypt = sa_encrypt,
  1673. .decrypt = sa_decrypt,
  1674. }
  1675. },
  1676. [SA_ALG_CBC_DES3] = {
  1677. .type = CRYPTO_ALG_TYPE_SKCIPHER,
  1678. .alg.skcipher = {
  1679. .base.cra_name = "cbc(des3_ede)",
  1680. .base.cra_driver_name = "cbc-des3-sa2ul",
  1681. .base.cra_priority = 30000,
  1682. .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
  1683. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1684. CRYPTO_ALG_ASYNC |
  1685. CRYPTO_ALG_NEED_FALLBACK,
  1686. .base.cra_blocksize = DES_BLOCK_SIZE,
  1687. .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
  1688. .base.cra_module = THIS_MODULE,
  1689. .init = sa_cipher_cra_init,
  1690. .exit = sa_cipher_cra_exit,
  1691. .min_keysize = 3 * DES_KEY_SIZE,
  1692. .max_keysize = 3 * DES_KEY_SIZE,
  1693. .ivsize = DES_BLOCK_SIZE,
  1694. .setkey = sa_3des_cbc_setkey,
  1695. .encrypt = sa_encrypt,
  1696. .decrypt = sa_decrypt,
  1697. }
  1698. },
  1699. [SA_ALG_ECB_DES3] = {
  1700. .type = CRYPTO_ALG_TYPE_SKCIPHER,
  1701. .alg.skcipher = {
  1702. .base.cra_name = "ecb(des3_ede)",
  1703. .base.cra_driver_name = "ecb-des3-sa2ul",
  1704. .base.cra_priority = 30000,
  1705. .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
  1706. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1707. CRYPTO_ALG_ASYNC |
  1708. CRYPTO_ALG_NEED_FALLBACK,
  1709. .base.cra_blocksize = DES_BLOCK_SIZE,
  1710. .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
  1711. .base.cra_module = THIS_MODULE,
  1712. .init = sa_cipher_cra_init,
  1713. .exit = sa_cipher_cra_exit,
  1714. .min_keysize = 3 * DES_KEY_SIZE,
  1715. .max_keysize = 3 * DES_KEY_SIZE,
  1716. .setkey = sa_3des_ecb_setkey,
  1717. .encrypt = sa_encrypt,
  1718. .decrypt = sa_decrypt,
  1719. }
  1720. },
  1721. [SA_ALG_SHA1] = {
  1722. .type = CRYPTO_ALG_TYPE_AHASH,
  1723. .alg.ahash = {
  1724. .halg.base = {
  1725. .cra_name = "sha1",
  1726. .cra_driver_name = "sha1-sa2ul",
  1727. .cra_priority = 400,
  1728. .cra_flags = CRYPTO_ALG_TYPE_AHASH |
  1729. CRYPTO_ALG_ASYNC |
  1730. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1731. CRYPTO_ALG_NEED_FALLBACK,
  1732. .cra_blocksize = SHA1_BLOCK_SIZE,
  1733. .cra_ctxsize = sizeof(struct sa_tfm_ctx),
  1734. .cra_module = THIS_MODULE,
  1735. .cra_init = sa_sha1_cra_init,
  1736. .cra_exit = sa_sha_cra_exit,
  1737. },
  1738. .halg.digestsize = SHA1_DIGEST_SIZE,
  1739. .halg.statesize = sizeof(struct sa_sha_req_ctx) +
  1740. sizeof(struct sha1_state),
  1741. .init = sa_sha_init,
  1742. .update = sa_sha_update,
  1743. .final = sa_sha_final,
  1744. .finup = sa_sha_finup,
  1745. .digest = sa_sha_digest,
  1746. .export = sa_sha_export,
  1747. .import = sa_sha_import,
  1748. },
  1749. },
  1750. [SA_ALG_SHA256] = {
  1751. .type = CRYPTO_ALG_TYPE_AHASH,
  1752. .alg.ahash = {
  1753. .halg.base = {
  1754. .cra_name = "sha256",
  1755. .cra_driver_name = "sha256-sa2ul",
  1756. .cra_priority = 400,
  1757. .cra_flags = CRYPTO_ALG_TYPE_AHASH |
  1758. CRYPTO_ALG_ASYNC |
  1759. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1760. CRYPTO_ALG_NEED_FALLBACK,
  1761. .cra_blocksize = SHA256_BLOCK_SIZE,
  1762. .cra_ctxsize = sizeof(struct sa_tfm_ctx),
  1763. .cra_module = THIS_MODULE,
  1764. .cra_init = sa_sha256_cra_init,
  1765. .cra_exit = sa_sha_cra_exit,
  1766. },
  1767. .halg.digestsize = SHA256_DIGEST_SIZE,
  1768. .halg.statesize = sizeof(struct sa_sha_req_ctx) +
  1769. sizeof(struct sha256_state),
  1770. .init = sa_sha_init,
  1771. .update = sa_sha_update,
  1772. .final = sa_sha_final,
  1773. .finup = sa_sha_finup,
  1774. .digest = sa_sha_digest,
  1775. .export = sa_sha_export,
  1776. .import = sa_sha_import,
  1777. },
  1778. },
  1779. [SA_ALG_SHA512] = {
  1780. .type = CRYPTO_ALG_TYPE_AHASH,
  1781. .alg.ahash = {
  1782. .halg.base = {
  1783. .cra_name = "sha512",
  1784. .cra_driver_name = "sha512-sa2ul",
  1785. .cra_priority = 400,
  1786. .cra_flags = CRYPTO_ALG_TYPE_AHASH |
  1787. CRYPTO_ALG_ASYNC |
  1788. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1789. CRYPTO_ALG_NEED_FALLBACK,
  1790. .cra_blocksize = SHA512_BLOCK_SIZE,
  1791. .cra_ctxsize = sizeof(struct sa_tfm_ctx),
  1792. .cra_module = THIS_MODULE,
  1793. .cra_init = sa_sha512_cra_init,
  1794. .cra_exit = sa_sha_cra_exit,
  1795. },
  1796. .halg.digestsize = SHA512_DIGEST_SIZE,
  1797. .halg.statesize = sizeof(struct sa_sha_req_ctx) +
  1798. sizeof(struct sha512_state),
  1799. .init = sa_sha_init,
  1800. .update = sa_sha_update,
  1801. .final = sa_sha_final,
  1802. .finup = sa_sha_finup,
  1803. .digest = sa_sha_digest,
  1804. .export = sa_sha_export,
  1805. .import = sa_sha_import,
  1806. },
  1807. },
  1808. [SA_ALG_AUTHENC_SHA1_AES] = {
  1809. .type = CRYPTO_ALG_TYPE_AEAD,
  1810. .alg.aead = {
  1811. .base = {
  1812. .cra_name = "authenc(hmac(sha1),cbc(aes))",
  1813. .cra_driver_name =
  1814. "authenc(hmac(sha1),cbc(aes))-sa2ul",
  1815. .cra_blocksize = AES_BLOCK_SIZE,
  1816. .cra_flags = CRYPTO_ALG_TYPE_AEAD |
  1817. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1818. CRYPTO_ALG_ASYNC |
  1819. CRYPTO_ALG_NEED_FALLBACK,
  1820. .cra_ctxsize = sizeof(struct sa_tfm_ctx),
  1821. .cra_module = THIS_MODULE,
  1822. .cra_priority = 3000,
  1823. },
  1824. .ivsize = AES_BLOCK_SIZE,
  1825. .maxauthsize = SHA1_DIGEST_SIZE,
  1826. .init = sa_cra_init_aead_sha1,
  1827. .exit = sa_exit_tfm_aead,
  1828. .setkey = sa_aead_cbc_sha1_setkey,
  1829. .setauthsize = sa_aead_setauthsize,
  1830. .encrypt = sa_aead_encrypt,
  1831. .decrypt = sa_aead_decrypt,
  1832. },
  1833. },
  1834. [SA_ALG_AUTHENC_SHA256_AES] = {
  1835. .type = CRYPTO_ALG_TYPE_AEAD,
  1836. .alg.aead = {
  1837. .base = {
  1838. .cra_name = "authenc(hmac(sha256),cbc(aes))",
  1839. .cra_driver_name =
  1840. "authenc(hmac(sha256),cbc(aes))-sa2ul",
  1841. .cra_blocksize = AES_BLOCK_SIZE,
  1842. .cra_flags = CRYPTO_ALG_TYPE_AEAD |
  1843. CRYPTO_ALG_KERN_DRIVER_ONLY |
  1844. CRYPTO_ALG_ASYNC |
  1845. CRYPTO_ALG_NEED_FALLBACK,
  1846. .cra_ctxsize = sizeof(struct sa_tfm_ctx),
  1847. .cra_module = THIS_MODULE,
  1848. .cra_alignmask = 0,
  1849. .cra_priority = 3000,
  1850. },
  1851. .ivsize = AES_BLOCK_SIZE,
  1852. .maxauthsize = SHA256_DIGEST_SIZE,
  1853. .init = sa_cra_init_aead_sha256,
  1854. .exit = sa_exit_tfm_aead,
  1855. .setkey = sa_aead_cbc_sha256_setkey,
  1856. .setauthsize = sa_aead_setauthsize,
  1857. .encrypt = sa_aead_encrypt,
  1858. .decrypt = sa_aead_decrypt,
  1859. },
  1860. },
  1861. };
  1862. /* Register the algorithms in crypto framework */
  1863. static void sa_register_algos(struct sa_crypto_data *dev_data)
  1864. {
  1865. const struct sa_match_data *match_data = dev_data->match_data;
  1866. struct device *dev = dev_data->dev;
  1867. char *alg_name;
  1868. u32 type;
  1869. int i, err;
  1870. for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
  1871. /* Skip unsupported algos */
  1872. if (!(match_data->supported_algos & BIT(i)))
  1873. continue;
  1874. type = sa_algs[i].type;
  1875. if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
  1876. alg_name = sa_algs[i].alg.skcipher.base.cra_name;
  1877. err = crypto_register_skcipher(&sa_algs[i].alg.skcipher);
  1878. } else if (type == CRYPTO_ALG_TYPE_AHASH) {
  1879. alg_name = sa_algs[i].alg.ahash.halg.base.cra_name;
  1880. err = crypto_register_ahash(&sa_algs[i].alg.ahash);
  1881. } else if (type == CRYPTO_ALG_TYPE_AEAD) {
  1882. alg_name = sa_algs[i].alg.aead.base.cra_name;
  1883. err = crypto_register_aead(&sa_algs[i].alg.aead);
  1884. } else {
  1885. dev_err(dev,
  1886. "un-supported crypto algorithm (%d)",
  1887. sa_algs[i].type);
  1888. continue;
  1889. }
  1890. if (err)
  1891. dev_err(dev, "Failed to register '%s'\n", alg_name);
  1892. else
  1893. sa_algs[i].registered = true;
  1894. }
  1895. }
  1896. /* Unregister the algorithms in crypto framework */
  1897. static void sa_unregister_algos(const struct device *dev)
  1898. {
  1899. u32 type;
  1900. int i;
  1901. for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
  1902. type = sa_algs[i].type;
  1903. if (!sa_algs[i].registered)
  1904. continue;
  1905. if (type == CRYPTO_ALG_TYPE_SKCIPHER)
  1906. crypto_unregister_skcipher(&sa_algs[i].alg.skcipher);
  1907. else if (type == CRYPTO_ALG_TYPE_AHASH)
  1908. crypto_unregister_ahash(&sa_algs[i].alg.ahash);
  1909. else if (type == CRYPTO_ALG_TYPE_AEAD)
  1910. crypto_unregister_aead(&sa_algs[i].alg.aead);
  1911. sa_algs[i].registered = false;
  1912. }
  1913. }
  1914. static int sa_init_mem(struct sa_crypto_data *dev_data)
  1915. {
  1916. struct device *dev = &dev_data->pdev->dev;
  1917. /* Setup dma pool for security context buffers */
  1918. dev_data->sc_pool = dma_pool_create("keystone-sc", dev,
  1919. SA_CTX_MAX_SZ, 64, 0);
  1920. if (!dev_data->sc_pool) {
  1921. dev_err(dev, "Failed to create dma pool");
  1922. return -ENOMEM;
  1923. }
  1924. return 0;
  1925. }
  1926. static int sa_dma_init(struct sa_crypto_data *dd)
  1927. {
  1928. int ret;
  1929. struct dma_slave_config cfg;
  1930. dd->dma_rx1 = NULL;
  1931. dd->dma_tx = NULL;
  1932. dd->dma_rx2 = NULL;
  1933. ret = dma_coerce_mask_and_coherent(dd->dev, DMA_BIT_MASK(48));
  1934. if (ret)
  1935. return ret;
  1936. dd->dma_rx1 = dma_request_chan(dd->dev, "rx1");
  1937. if (IS_ERR(dd->dma_rx1))
  1938. return dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx1),
  1939. "Unable to request rx1 DMA channel\n");
  1940. dd->dma_rx2 = dma_request_chan(dd->dev, "rx2");
  1941. if (IS_ERR(dd->dma_rx2)) {
  1942. ret = dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx2),
  1943. "Unable to request rx2 DMA channel\n");
  1944. goto err_dma_rx2;
  1945. }
  1946. dd->dma_tx = dma_request_chan(dd->dev, "tx");
  1947. if (IS_ERR(dd->dma_tx)) {
  1948. ret = dev_err_probe(dd->dev, PTR_ERR(dd->dma_tx),
  1949. "Unable to request tx DMA channel\n");
  1950. goto err_dma_tx;
  1951. }
  1952. memzero_explicit(&cfg, sizeof(cfg));
  1953. cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  1954. cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  1955. cfg.src_maxburst = 4;
  1956. cfg.dst_maxburst = 4;
  1957. ret = dmaengine_slave_config(dd->dma_rx1, &cfg);
  1958. if (ret) {
  1959. dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
  1960. ret);
  1961. goto err_dma_config;
  1962. }
  1963. ret = dmaengine_slave_config(dd->dma_rx2, &cfg);
  1964. if (ret) {
  1965. dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
  1966. ret);
  1967. goto err_dma_config;
  1968. }
  1969. ret = dmaengine_slave_config(dd->dma_tx, &cfg);
  1970. if (ret) {
  1971. dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
  1972. ret);
  1973. goto err_dma_config;
  1974. }
  1975. return 0;
  1976. err_dma_config:
  1977. dma_release_channel(dd->dma_tx);
  1978. err_dma_tx:
  1979. dma_release_channel(dd->dma_rx2);
  1980. err_dma_rx2:
  1981. dma_release_channel(dd->dma_rx1);
  1982. return ret;
  1983. }
  1984. static int sa_link_child(struct device *dev, void *data)
  1985. {
  1986. struct device *parent = data;
  1987. device_link_add(dev, parent, DL_FLAG_AUTOPROBE_CONSUMER);
  1988. return 0;
  1989. }
  1990. static struct sa_match_data am654_match_data = {
  1991. .priv = 1,
  1992. .priv_id = 1,
  1993. .supported_algos = BIT(SA_ALG_CBC_AES) |
  1994. BIT(SA_ALG_EBC_AES) |
  1995. BIT(SA_ALG_CBC_DES3) |
  1996. BIT(SA_ALG_ECB_DES3) |
  1997. BIT(SA_ALG_SHA1) |
  1998. BIT(SA_ALG_SHA256) |
  1999. BIT(SA_ALG_SHA512) |
  2000. BIT(SA_ALG_AUTHENC_SHA1_AES) |
  2001. BIT(SA_ALG_AUTHENC_SHA256_AES),
  2002. };
  2003. static struct sa_match_data am64_match_data = {
  2004. .priv = 0,
  2005. .priv_id = 0,
  2006. .supported_algos = BIT(SA_ALG_CBC_AES) |
  2007. BIT(SA_ALG_EBC_AES) |
  2008. BIT(SA_ALG_SHA256) |
  2009. BIT(SA_ALG_SHA512) |
  2010. BIT(SA_ALG_AUTHENC_SHA256_AES),
  2011. };
  2012. static const struct of_device_id of_match[] = {
  2013. { .compatible = "ti,j721e-sa2ul", .data = &am654_match_data, },
  2014. { .compatible = "ti,am654-sa2ul", .data = &am654_match_data, },
  2015. { .compatible = "ti,am64-sa2ul", .data = &am64_match_data, },
  2016. { .compatible = "ti,am62-sa3ul", .data = &am64_match_data, },
  2017. {},
  2018. };
  2019. MODULE_DEVICE_TABLE(of, of_match);
  2020. static int sa_ul_probe(struct platform_device *pdev)
  2021. {
  2022. struct device *dev = &pdev->dev;
  2023. struct device_node *node = dev->of_node;
  2024. static void __iomem *saul_base;
  2025. struct sa_crypto_data *dev_data;
  2026. u32 status, val;
  2027. int ret;
  2028. dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
  2029. if (!dev_data)
  2030. return -ENOMEM;
  2031. dev_data->match_data = of_device_get_match_data(dev);
  2032. if (!dev_data->match_data)
  2033. return -ENODEV;
  2034. saul_base = devm_platform_ioremap_resource(pdev, 0);
  2035. if (IS_ERR(saul_base))
  2036. return PTR_ERR(saul_base);
  2037. sa_k3_dev = dev;
  2038. dev_data->dev = dev;
  2039. dev_data->pdev = pdev;
  2040. dev_data->base = saul_base;
  2041. platform_set_drvdata(pdev, dev_data);
  2042. dev_set_drvdata(sa_k3_dev, dev_data);
  2043. pm_runtime_enable(dev);
  2044. ret = pm_runtime_resume_and_get(dev);
  2045. if (ret < 0) {
  2046. dev_err(dev, "%s: failed to get sync: %d\n", __func__, ret);
  2047. pm_runtime_disable(dev);
  2048. return ret;
  2049. }
  2050. sa_init_mem(dev_data);
  2051. ret = sa_dma_init(dev_data);
  2052. if (ret)
  2053. goto destroy_dma_pool;
  2054. spin_lock_init(&dev_data->scid_lock);
  2055. val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
  2056. SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
  2057. SA_EEC_TRNG_EN;
  2058. status = readl_relaxed(saul_base + SA_ENGINE_STATUS);
  2059. /* Only enable engines if all are not already enabled */
  2060. if (val & ~status)
  2061. writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
  2062. sa_register_algos(dev_data);
  2063. ret = of_platform_populate(node, NULL, NULL, dev);
  2064. if (ret)
  2065. goto release_dma;
  2066. device_for_each_child(dev, dev, sa_link_child);
  2067. return 0;
  2068. release_dma:
  2069. sa_unregister_algos(dev);
  2070. dma_release_channel(dev_data->dma_rx2);
  2071. dma_release_channel(dev_data->dma_rx1);
  2072. dma_release_channel(dev_data->dma_tx);
  2073. destroy_dma_pool:
  2074. dma_pool_destroy(dev_data->sc_pool);
  2075. pm_runtime_put_sync(dev);
  2076. pm_runtime_disable(dev);
  2077. return ret;
  2078. }
  2079. static void sa_ul_remove(struct platform_device *pdev)
  2080. {
  2081. struct sa_crypto_data *dev_data = platform_get_drvdata(pdev);
  2082. of_platform_depopulate(&pdev->dev);
  2083. sa_unregister_algos(&pdev->dev);
  2084. dma_release_channel(dev_data->dma_rx2);
  2085. dma_release_channel(dev_data->dma_rx1);
  2086. dma_release_channel(dev_data->dma_tx);
  2087. dma_pool_destroy(dev_data->sc_pool);
  2088. platform_set_drvdata(pdev, NULL);
  2089. pm_runtime_put_sync(&pdev->dev);
  2090. pm_runtime_disable(&pdev->dev);
  2091. }
  2092. static struct platform_driver sa_ul_driver = {
  2093. .probe = sa_ul_probe,
  2094. .remove_new = sa_ul_remove,
  2095. .driver = {
  2096. .name = "saul-crypto",
  2097. .of_match_table = of_match,
  2098. },
  2099. };
  2100. module_platform_driver(sa_ul_driver);
  2101. MODULE_DESCRIPTION("K3 SA2UL crypto accelerator driver");
  2102. MODULE_LICENSE("GPL v2");