s5p-sss.c 59 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350
  1. // SPDX-License-Identifier: GPL-2.0
  2. //
  3. // Cryptographic API.
  4. //
  5. // Support for Samsung S5PV210 and Exynos HW acceleration.
  6. //
  7. // Copyright (C) 2011 NetUP Inc. All rights reserved.
  8. // Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved.
  9. //
  10. // Hash part based on omap-sham.c driver.
  11. #include <linux/clk.h>
  12. #include <linux/crypto.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/err.h>
  15. #include <linux/errno.h>
  16. #include <linux/init.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/io.h>
  19. #include <linux/kernel.h>
  20. #include <linux/module.h>
  21. #include <linux/of.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/scatterlist.h>
  24. #include <crypto/ctr.h>
  25. #include <crypto/aes.h>
  26. #include <crypto/algapi.h>
  27. #include <crypto/scatterwalk.h>
  28. #include <crypto/hash.h>
  29. #include <crypto/md5.h>
  30. #include <crypto/sha1.h>
  31. #include <crypto/sha2.h>
  32. #include <crypto/internal/hash.h>
  33. #define _SBF(s, v) ((v) << (s))
  34. /* Feed control registers */
  35. #define SSS_REG_FCINTSTAT 0x0000
  36. #define SSS_FCINTSTAT_HPARTINT BIT(7)
  37. #define SSS_FCINTSTAT_HDONEINT BIT(5)
  38. #define SSS_FCINTSTAT_BRDMAINT BIT(3)
  39. #define SSS_FCINTSTAT_BTDMAINT BIT(2)
  40. #define SSS_FCINTSTAT_HRDMAINT BIT(1)
  41. #define SSS_FCINTSTAT_PKDMAINT BIT(0)
  42. #define SSS_REG_FCINTENSET 0x0004
  43. #define SSS_FCINTENSET_HPARTINTENSET BIT(7)
  44. #define SSS_FCINTENSET_HDONEINTENSET BIT(5)
  45. #define SSS_FCINTENSET_BRDMAINTENSET BIT(3)
  46. #define SSS_FCINTENSET_BTDMAINTENSET BIT(2)
  47. #define SSS_FCINTENSET_HRDMAINTENSET BIT(1)
  48. #define SSS_FCINTENSET_PKDMAINTENSET BIT(0)
  49. #define SSS_REG_FCINTENCLR 0x0008
  50. #define SSS_FCINTENCLR_HPARTINTENCLR BIT(7)
  51. #define SSS_FCINTENCLR_HDONEINTENCLR BIT(5)
  52. #define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3)
  53. #define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2)
  54. #define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1)
  55. #define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0)
  56. #define SSS_REG_FCINTPEND 0x000C
  57. #define SSS_FCINTPEND_HPARTINTP BIT(7)
  58. #define SSS_FCINTPEND_HDONEINTP BIT(5)
  59. #define SSS_FCINTPEND_BRDMAINTP BIT(3)
  60. #define SSS_FCINTPEND_BTDMAINTP BIT(2)
  61. #define SSS_FCINTPEND_HRDMAINTP BIT(1)
  62. #define SSS_FCINTPEND_PKDMAINTP BIT(0)
  63. #define SSS_REG_FCFIFOSTAT 0x0010
  64. #define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7)
  65. #define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6)
  66. #define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5)
  67. #define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4)
  68. #define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3)
  69. #define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2)
  70. #define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1)
  71. #define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0)
  72. #define SSS_REG_FCFIFOCTRL 0x0014
  73. #define SSS_FCFIFOCTRL_DESSEL BIT(2)
  74. #define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00)
  75. #define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01)
  76. #define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02)
  77. #define SSS_HASHIN_MASK _SBF(0, 0x03)
  78. #define SSS_REG_FCBRDMAS 0x0020
  79. #define SSS_REG_FCBRDMAL 0x0024
  80. #define SSS_REG_FCBRDMAC 0x0028
  81. #define SSS_FCBRDMAC_BYTESWAP BIT(1)
  82. #define SSS_FCBRDMAC_FLUSH BIT(0)
  83. #define SSS_REG_FCBTDMAS 0x0030
  84. #define SSS_REG_FCBTDMAL 0x0034
  85. #define SSS_REG_FCBTDMAC 0x0038
  86. #define SSS_FCBTDMAC_BYTESWAP BIT(1)
  87. #define SSS_FCBTDMAC_FLUSH BIT(0)
  88. #define SSS_REG_FCHRDMAS 0x0040
  89. #define SSS_REG_FCHRDMAL 0x0044
  90. #define SSS_REG_FCHRDMAC 0x0048
  91. #define SSS_FCHRDMAC_BYTESWAP BIT(1)
  92. #define SSS_FCHRDMAC_FLUSH BIT(0)
  93. #define SSS_REG_FCPKDMAS 0x0050
  94. #define SSS_REG_FCPKDMAL 0x0054
  95. #define SSS_REG_FCPKDMAC 0x0058
  96. #define SSS_FCPKDMAC_BYTESWAP BIT(3)
  97. #define SSS_FCPKDMAC_DESCEND BIT(2)
  98. #define SSS_FCPKDMAC_TRANSMIT BIT(1)
  99. #define SSS_FCPKDMAC_FLUSH BIT(0)
  100. #define SSS_REG_FCPKDMAO 0x005C
  101. /* AES registers */
  102. #define SSS_REG_AES_CONTROL 0x00
  103. #define SSS_AES_BYTESWAP_DI BIT(11)
  104. #define SSS_AES_BYTESWAP_DO BIT(10)
  105. #define SSS_AES_BYTESWAP_IV BIT(9)
  106. #define SSS_AES_BYTESWAP_CNT BIT(8)
  107. #define SSS_AES_BYTESWAP_KEY BIT(7)
  108. #define SSS_AES_KEY_CHANGE_MODE BIT(6)
  109. #define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00)
  110. #define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01)
  111. #define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02)
  112. #define SSS_AES_FIFO_MODE BIT(3)
  113. #define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00)
  114. #define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01)
  115. #define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
  116. #define SSS_AES_MODE_DECRYPT BIT(0)
  117. #define SSS_REG_AES_STATUS 0x04
  118. #define SSS_AES_BUSY BIT(2)
  119. #define SSS_AES_INPUT_READY BIT(1)
  120. #define SSS_AES_OUTPUT_READY BIT(0)
  121. #define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2))
  122. #define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2))
  123. #define SSS_REG_AES_IV_DATA(s) (0x30 + (s << 2))
  124. #define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2))
  125. #define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2))
  126. #define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
  127. #define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg))
  128. #define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg))
  129. #define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg)
  130. #define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \
  131. SSS_AES_REG(dev, reg))
  132. /* HW engine modes */
  133. #define FLAGS_AES_DECRYPT BIT(0)
  134. #define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
  135. #define FLAGS_AES_CBC _SBF(1, 0x01)
  136. #define FLAGS_AES_CTR _SBF(1, 0x02)
  137. #define AES_KEY_LEN 16
  138. #define CRYPTO_QUEUE_LEN 1
  139. /* HASH registers */
  140. #define SSS_REG_HASH_CTRL 0x00
  141. #define SSS_HASH_USER_IV_EN BIT(5)
  142. #define SSS_HASH_INIT_BIT BIT(4)
  143. #define SSS_HASH_ENGINE_SHA1 _SBF(1, 0x00)
  144. #define SSS_HASH_ENGINE_MD5 _SBF(1, 0x01)
  145. #define SSS_HASH_ENGINE_SHA256 _SBF(1, 0x02)
  146. #define SSS_HASH_ENGINE_MASK _SBF(1, 0x03)
  147. #define SSS_REG_HASH_CTRL_PAUSE 0x04
  148. #define SSS_HASH_PAUSE BIT(0)
  149. #define SSS_REG_HASH_CTRL_FIFO 0x08
  150. #define SSS_HASH_FIFO_MODE_DMA BIT(0)
  151. #define SSS_HASH_FIFO_MODE_CPU 0
  152. #define SSS_REG_HASH_CTRL_SWAP 0x0C
  153. #define SSS_HASH_BYTESWAP_DI BIT(3)
  154. #define SSS_HASH_BYTESWAP_DO BIT(2)
  155. #define SSS_HASH_BYTESWAP_IV BIT(1)
  156. #define SSS_HASH_BYTESWAP_KEY BIT(0)
  157. #define SSS_REG_HASH_STATUS 0x10
  158. #define SSS_HASH_STATUS_MSG_DONE BIT(6)
  159. #define SSS_HASH_STATUS_PARTIAL_DONE BIT(4)
  160. #define SSS_HASH_STATUS_BUFFER_READY BIT(0)
  161. #define SSS_REG_HASH_MSG_SIZE_LOW 0x20
  162. #define SSS_REG_HASH_MSG_SIZE_HIGH 0x24
  163. #define SSS_REG_HASH_PRE_MSG_SIZE_LOW 0x28
  164. #define SSS_REG_HASH_PRE_MSG_SIZE_HIGH 0x2C
  165. #define SSS_REG_HASH_IV(s) (0xB0 + ((s) << 2))
  166. #define SSS_REG_HASH_OUT(s) (0x100 + ((s) << 2))
  167. #define HASH_BLOCK_SIZE 64
  168. #define HASH_REG_SIZEOF 4
  169. #define HASH_MD5_MAX_REG (MD5_DIGEST_SIZE / HASH_REG_SIZEOF)
  170. #define HASH_SHA1_MAX_REG (SHA1_DIGEST_SIZE / HASH_REG_SIZEOF)
  171. #define HASH_SHA256_MAX_REG (SHA256_DIGEST_SIZE / HASH_REG_SIZEOF)
  172. /*
  173. * HASH bit numbers, used by device, setting in dev->hash_flags with
  174. * functions set_bit(), clear_bit() or tested with test_bit() or BIT(),
  175. * to keep HASH state BUSY or FREE, or to signal state from irq_handler
  176. * to hash_tasklet. SGS keep track of allocated memory for scatterlist
  177. */
  178. #define HASH_FLAGS_BUSY 0
  179. #define HASH_FLAGS_FINAL 1
  180. #define HASH_FLAGS_DMA_ACTIVE 2
  181. #define HASH_FLAGS_OUTPUT_READY 3
  182. #define HASH_FLAGS_DMA_READY 4
  183. #define HASH_FLAGS_SGS_COPIED 5
  184. #define HASH_FLAGS_SGS_ALLOCED 6
  185. /* HASH HW constants */
  186. #define BUFLEN HASH_BLOCK_SIZE
  187. #define SSS_HASH_QUEUE_LENGTH 10
  188. /**
  189. * struct samsung_aes_variant - platform specific SSS driver data
  190. * @aes_offset: AES register offset from SSS module's base.
  191. * @hash_offset: HASH register offset from SSS module's base.
  192. * @clk_names: names of clocks needed to run SSS IP
  193. *
  194. * Specifies platform specific configuration of SSS module.
  195. * Note: A structure for driver specific platform data is used for future
  196. * expansion of its usage.
  197. */
  198. struct samsung_aes_variant {
  199. unsigned int aes_offset;
  200. unsigned int hash_offset;
  201. const char *clk_names[2];
  202. };
  203. struct s5p_aes_reqctx {
  204. unsigned long mode;
  205. };
  206. struct s5p_aes_ctx {
  207. struct s5p_aes_dev *dev;
  208. u8 aes_key[AES_MAX_KEY_SIZE];
  209. u8 nonce[CTR_RFC3686_NONCE_SIZE];
  210. int keylen;
  211. };
  212. /**
  213. * struct s5p_aes_dev - Crypto device state container
  214. * @dev: Associated device
  215. * @clk: Clock for accessing hardware
  216. * @pclk: APB bus clock necessary to access the hardware
  217. * @ioaddr: Mapped IO memory region
  218. * @aes_ioaddr: Per-varian offset for AES block IO memory
  219. * @irq_fc: Feed control interrupt line
  220. * @req: Crypto request currently handled by the device
  221. * @ctx: Configuration for currently handled crypto request
  222. * @sg_src: Scatter list with source data for currently handled block
  223. * in device. This is DMA-mapped into device.
  224. * @sg_dst: Scatter list with destination data for currently handled block
  225. * in device. This is DMA-mapped into device.
  226. * @sg_src_cpy: In case of unaligned access, copied scatter list
  227. * with source data.
  228. * @sg_dst_cpy: In case of unaligned access, copied scatter list
  229. * with destination data.
  230. * @tasklet: New request scheduling jib
  231. * @queue: Crypto queue
  232. * @busy: Indicates whether the device is currently handling some request
  233. * thus it uses some of the fields from this state, like:
  234. * req, ctx, sg_src/dst (and copies). This essentially
  235. * protects against concurrent access to these fields.
  236. * @lock: Lock for protecting both access to device hardware registers
  237. * and fields related to current request (including the busy field).
  238. * @res: Resources for hash.
  239. * @io_hash_base: Per-variant offset for HASH block IO memory.
  240. * @hash_lock: Lock for protecting hash_req, hash_queue and hash_flags
  241. * variable.
  242. * @hash_flags: Flags for current HASH op.
  243. * @hash_queue: Async hash queue.
  244. * @hash_tasklet: New HASH request scheduling job.
  245. * @xmit_buf: Buffer for current HASH request transfer into SSS block.
  246. * @hash_req: Current request sending to SSS HASH block.
  247. * @hash_sg_iter: Scatterlist transferred through DMA into SSS HASH block.
  248. * @hash_sg_cnt: Counter for hash_sg_iter.
  249. *
  250. * @use_hash: true if HASH algs enabled
  251. */
  252. struct s5p_aes_dev {
  253. struct device *dev;
  254. struct clk *clk;
  255. struct clk *pclk;
  256. void __iomem *ioaddr;
  257. void __iomem *aes_ioaddr;
  258. int irq_fc;
  259. struct skcipher_request *req;
  260. struct s5p_aes_ctx *ctx;
  261. struct scatterlist *sg_src;
  262. struct scatterlist *sg_dst;
  263. struct scatterlist *sg_src_cpy;
  264. struct scatterlist *sg_dst_cpy;
  265. struct tasklet_struct tasklet;
  266. struct crypto_queue queue;
  267. bool busy;
  268. spinlock_t lock;
  269. struct resource *res;
  270. void __iomem *io_hash_base;
  271. spinlock_t hash_lock; /* protect hash_ vars */
  272. unsigned long hash_flags;
  273. struct crypto_queue hash_queue;
  274. struct tasklet_struct hash_tasklet;
  275. u8 xmit_buf[BUFLEN];
  276. struct ahash_request *hash_req;
  277. struct scatterlist *hash_sg_iter;
  278. unsigned int hash_sg_cnt;
  279. bool use_hash;
  280. };
  281. /**
  282. * struct s5p_hash_reqctx - HASH request context
  283. * @dd: Associated device
  284. * @op_update: Current request operation (OP_UPDATE or OP_FINAL)
  285. * @digcnt: Number of bytes processed by HW (without buffer[] ones)
  286. * @digest: Digest message or IV for partial result
  287. * @nregs: Number of HW registers for digest or IV read/write
  288. * @engine: Bits for selecting type of HASH in SSS block
  289. * @sg: sg for DMA transfer
  290. * @sg_len: Length of sg for DMA transfer
  291. * @sgl: sg for joining buffer and req->src scatterlist
  292. * @skip: Skip offset in req->src for current op
  293. * @total: Total number of bytes for current request
  294. * @finup: Keep state for finup or final.
  295. * @error: Keep track of error.
  296. * @bufcnt: Number of bytes holded in buffer[]
  297. * @buffer: For byte(s) from end of req->src in UPDATE op
  298. */
  299. struct s5p_hash_reqctx {
  300. struct s5p_aes_dev *dd;
  301. bool op_update;
  302. u64 digcnt;
  303. u8 digest[SHA256_DIGEST_SIZE];
  304. unsigned int nregs; /* digest_size / sizeof(reg) */
  305. u32 engine;
  306. struct scatterlist *sg;
  307. unsigned int sg_len;
  308. struct scatterlist sgl[2];
  309. unsigned int skip;
  310. unsigned int total;
  311. bool finup;
  312. bool error;
  313. u32 bufcnt;
  314. u8 buffer[];
  315. };
  316. /**
  317. * struct s5p_hash_ctx - HASH transformation context
  318. * @dd: Associated device
  319. * @flags: Bits for algorithm HASH.
  320. * @fallback: Software transformation for zero message or size < BUFLEN.
  321. */
  322. struct s5p_hash_ctx {
  323. struct s5p_aes_dev *dd;
  324. unsigned long flags;
  325. struct crypto_shash *fallback;
  326. };
  327. static const struct samsung_aes_variant s5p_aes_data = {
  328. .aes_offset = 0x4000,
  329. .hash_offset = 0x6000,
  330. .clk_names = { "secss", },
  331. };
  332. static const struct samsung_aes_variant exynos_aes_data = {
  333. .aes_offset = 0x200,
  334. .hash_offset = 0x400,
  335. .clk_names = { "secss", },
  336. };
  337. static const struct samsung_aes_variant exynos5433_slim_aes_data = {
  338. .aes_offset = 0x400,
  339. .hash_offset = 0x800,
  340. .clk_names = { "aclk", "pclk", },
  341. };
  342. static const struct of_device_id s5p_sss_dt_match[] = {
  343. {
  344. .compatible = "samsung,s5pv210-secss",
  345. .data = &s5p_aes_data,
  346. },
  347. {
  348. .compatible = "samsung,exynos4210-secss",
  349. .data = &exynos_aes_data,
  350. },
  351. {
  352. .compatible = "samsung,exynos5433-slim-sss",
  353. .data = &exynos5433_slim_aes_data,
  354. },
  355. { },
  356. };
  357. MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
  358. static inline const struct samsung_aes_variant *find_s5p_sss_version
  359. (const struct platform_device *pdev)
  360. {
  361. if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node))
  362. return of_device_get_match_data(&pdev->dev);
  363. return (const struct samsung_aes_variant *)
  364. platform_get_device_id(pdev)->driver_data;
  365. }
  366. static struct s5p_aes_dev *s5p_dev;
  367. static void s5p_set_dma_indata(struct s5p_aes_dev *dev,
  368. const struct scatterlist *sg)
  369. {
  370. SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
  371. SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg));
  372. }
  373. static void s5p_set_dma_outdata(struct s5p_aes_dev *dev,
  374. const struct scatterlist *sg)
  375. {
  376. SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg));
  377. SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
  378. }
  379. static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
  380. {
  381. int len;
  382. if (!*sg)
  383. return;
  384. len = ALIGN(dev->req->cryptlen, AES_BLOCK_SIZE);
  385. free_pages((unsigned long)sg_virt(*sg), get_order(len));
  386. kfree(*sg);
  387. *sg = NULL;
  388. }
  389. static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
  390. unsigned int nbytes, int out)
  391. {
  392. struct scatter_walk walk;
  393. if (!nbytes)
  394. return;
  395. scatterwalk_start(&walk, sg);
  396. scatterwalk_copychunks(buf, &walk, nbytes, out);
  397. scatterwalk_done(&walk, out, 0);
  398. }
  399. static void s5p_sg_done(struct s5p_aes_dev *dev)
  400. {
  401. struct skcipher_request *req = dev->req;
  402. struct s5p_aes_reqctx *reqctx = skcipher_request_ctx(req);
  403. if (dev->sg_dst_cpy) {
  404. dev_dbg(dev->dev,
  405. "Copying %d bytes of output data back to original place\n",
  406. dev->req->cryptlen);
  407. s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
  408. dev->req->cryptlen, 1);
  409. }
  410. s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
  411. s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
  412. if (reqctx->mode & FLAGS_AES_CBC)
  413. memcpy_fromio(req->iv, dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), AES_BLOCK_SIZE);
  414. else if (reqctx->mode & FLAGS_AES_CTR)
  415. memcpy_fromio(req->iv, dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), AES_BLOCK_SIZE);
  416. }
  417. /* Calls the completion. Cannot be called with dev->lock hold. */
  418. static void s5p_aes_complete(struct skcipher_request *req, int err)
  419. {
  420. skcipher_request_complete(req, err);
  421. }
  422. static void s5p_unset_outdata(struct s5p_aes_dev *dev)
  423. {
  424. dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE);
  425. }
  426. static void s5p_unset_indata(struct s5p_aes_dev *dev)
  427. {
  428. dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
  429. }
  430. static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
  431. struct scatterlist **dst)
  432. {
  433. void *pages;
  434. int len;
  435. *dst = kmalloc(sizeof(**dst), GFP_ATOMIC);
  436. if (!*dst)
  437. return -ENOMEM;
  438. len = ALIGN(dev->req->cryptlen, AES_BLOCK_SIZE);
  439. pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len));
  440. if (!pages) {
  441. kfree(*dst);
  442. *dst = NULL;
  443. return -ENOMEM;
  444. }
  445. s5p_sg_copy_buf(pages, src, dev->req->cryptlen, 0);
  446. sg_init_table(*dst, 1);
  447. sg_set_buf(*dst, pages, len);
  448. return 0;
  449. }
  450. static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
  451. {
  452. if (!sg->length)
  453. return -EINVAL;
  454. if (!dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE))
  455. return -ENOMEM;
  456. dev->sg_dst = sg;
  457. return 0;
  458. }
  459. static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
  460. {
  461. if (!sg->length)
  462. return -EINVAL;
  463. if (!dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE))
  464. return -ENOMEM;
  465. dev->sg_src = sg;
  466. return 0;
  467. }
  468. /*
  469. * Returns -ERRNO on error (mapping of new data failed).
  470. * On success returns:
  471. * - 0 if there is no more data,
  472. * - 1 if new transmitting (output) data is ready and its address+length
  473. * have to be written to device (by calling s5p_set_dma_outdata()).
  474. */
  475. static int s5p_aes_tx(struct s5p_aes_dev *dev)
  476. {
  477. int ret = 0;
  478. s5p_unset_outdata(dev);
  479. if (!sg_is_last(dev->sg_dst)) {
  480. ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
  481. if (!ret)
  482. ret = 1;
  483. }
  484. return ret;
  485. }
  486. /*
  487. * Returns -ERRNO on error (mapping of new data failed).
  488. * On success returns:
  489. * - 0 if there is no more data,
  490. * - 1 if new receiving (input) data is ready and its address+length
  491. * have to be written to device (by calling s5p_set_dma_indata()).
  492. */
  493. static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
  494. {
  495. int ret = 0;
  496. s5p_unset_indata(dev);
  497. if (!sg_is_last(dev->sg_src)) {
  498. ret = s5p_set_indata(dev, sg_next(dev->sg_src));
  499. if (!ret)
  500. ret = 1;
  501. }
  502. return ret;
  503. }
  504. static inline u32 s5p_hash_read(struct s5p_aes_dev *dd, u32 offset)
  505. {
  506. return __raw_readl(dd->io_hash_base + offset);
  507. }
  508. static inline void s5p_hash_write(struct s5p_aes_dev *dd,
  509. u32 offset, u32 value)
  510. {
  511. __raw_writel(value, dd->io_hash_base + offset);
  512. }
  513. /**
  514. * s5p_set_dma_hashdata() - start DMA with sg
  515. * @dev: device
  516. * @sg: scatterlist ready to DMA transmit
  517. */
  518. static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev,
  519. const struct scatterlist *sg)
  520. {
  521. dev->hash_sg_cnt--;
  522. SSS_WRITE(dev, FCHRDMAS, sg_dma_address(sg));
  523. SSS_WRITE(dev, FCHRDMAL, sg_dma_len(sg)); /* DMA starts */
  524. }
  525. /**
  526. * s5p_hash_rx() - get next hash_sg_iter
  527. * @dev: device
  528. *
  529. * Return:
  530. * 2 if there is no more data and it is UPDATE op
  531. * 1 if new receiving (input) data is ready and can be written to device
  532. * 0 if there is no more data and it is FINAL op
  533. */
  534. static int s5p_hash_rx(struct s5p_aes_dev *dev)
  535. {
  536. if (dev->hash_sg_cnt > 0) {
  537. dev->hash_sg_iter = sg_next(dev->hash_sg_iter);
  538. return 1;
  539. }
  540. set_bit(HASH_FLAGS_DMA_READY, &dev->hash_flags);
  541. if (test_bit(HASH_FLAGS_FINAL, &dev->hash_flags))
  542. return 0;
  543. return 2;
  544. }
  545. static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
  546. {
  547. struct platform_device *pdev = dev_id;
  548. struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
  549. struct skcipher_request *req;
  550. int err_dma_tx = 0;
  551. int err_dma_rx = 0;
  552. int err_dma_hx = 0;
  553. bool tx_end = false;
  554. bool hx_end = false;
  555. unsigned long flags;
  556. u32 status, st_bits;
  557. int err;
  558. spin_lock_irqsave(&dev->lock, flags);
  559. /*
  560. * Handle rx or tx interrupt. If there is still data (scatterlist did not
  561. * reach end), then map next scatterlist entry.
  562. * In case of such mapping error, s5p_aes_complete() should be called.
  563. *
  564. * If there is no more data in tx scatter list, call s5p_aes_complete()
  565. * and schedule new tasklet.
  566. *
  567. * Handle hx interrupt. If there is still data map next entry.
  568. */
  569. status = SSS_READ(dev, FCINTSTAT);
  570. if (status & SSS_FCINTSTAT_BRDMAINT)
  571. err_dma_rx = s5p_aes_rx(dev);
  572. if (status & SSS_FCINTSTAT_BTDMAINT) {
  573. if (sg_is_last(dev->sg_dst))
  574. tx_end = true;
  575. err_dma_tx = s5p_aes_tx(dev);
  576. }
  577. if (status & SSS_FCINTSTAT_HRDMAINT)
  578. err_dma_hx = s5p_hash_rx(dev);
  579. st_bits = status & (SSS_FCINTSTAT_BRDMAINT | SSS_FCINTSTAT_BTDMAINT |
  580. SSS_FCINTSTAT_HRDMAINT);
  581. /* clear DMA bits */
  582. SSS_WRITE(dev, FCINTPEND, st_bits);
  583. /* clear HASH irq bits */
  584. if (status & (SSS_FCINTSTAT_HDONEINT | SSS_FCINTSTAT_HPARTINT)) {
  585. /* cannot have both HPART and HDONE */
  586. if (status & SSS_FCINTSTAT_HPARTINT)
  587. st_bits = SSS_HASH_STATUS_PARTIAL_DONE;
  588. if (status & SSS_FCINTSTAT_HDONEINT)
  589. st_bits = SSS_HASH_STATUS_MSG_DONE;
  590. set_bit(HASH_FLAGS_OUTPUT_READY, &dev->hash_flags);
  591. s5p_hash_write(dev, SSS_REG_HASH_STATUS, st_bits);
  592. hx_end = true;
  593. /* when DONE or PART, do not handle HASH DMA */
  594. err_dma_hx = 0;
  595. }
  596. if (err_dma_rx < 0) {
  597. err = err_dma_rx;
  598. goto error;
  599. }
  600. if (err_dma_tx < 0) {
  601. err = err_dma_tx;
  602. goto error;
  603. }
  604. if (tx_end) {
  605. s5p_sg_done(dev);
  606. if (err_dma_hx == 1)
  607. s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
  608. spin_unlock_irqrestore(&dev->lock, flags);
  609. s5p_aes_complete(dev->req, 0);
  610. /* Device is still busy */
  611. tasklet_schedule(&dev->tasklet);
  612. } else {
  613. /*
  614. * Writing length of DMA block (either receiving or
  615. * transmitting) will start the operation immediately, so this
  616. * should be done at the end (even after clearing pending
  617. * interrupts to not miss the interrupt).
  618. */
  619. if (err_dma_tx == 1)
  620. s5p_set_dma_outdata(dev, dev->sg_dst);
  621. if (err_dma_rx == 1)
  622. s5p_set_dma_indata(dev, dev->sg_src);
  623. if (err_dma_hx == 1)
  624. s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
  625. spin_unlock_irqrestore(&dev->lock, flags);
  626. }
  627. goto hash_irq_end;
  628. error:
  629. s5p_sg_done(dev);
  630. dev->busy = false;
  631. req = dev->req;
  632. if (err_dma_hx == 1)
  633. s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
  634. spin_unlock_irqrestore(&dev->lock, flags);
  635. s5p_aes_complete(req, err);
  636. hash_irq_end:
  637. /*
  638. * Note about else if:
  639. * when hash_sg_iter reaches end and its UPDATE op,
  640. * issue SSS_HASH_PAUSE and wait for HPART irq
  641. */
  642. if (hx_end)
  643. tasklet_schedule(&dev->hash_tasklet);
  644. else if (err_dma_hx == 2)
  645. s5p_hash_write(dev, SSS_REG_HASH_CTRL_PAUSE,
  646. SSS_HASH_PAUSE);
  647. return IRQ_HANDLED;
  648. }
  649. /**
  650. * s5p_hash_read_msg() - read message or IV from HW
  651. * @req: AHASH request
  652. */
  653. static void s5p_hash_read_msg(struct ahash_request *req)
  654. {
  655. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  656. struct s5p_aes_dev *dd = ctx->dd;
  657. u32 *hash = (u32 *)ctx->digest;
  658. unsigned int i;
  659. for (i = 0; i < ctx->nregs; i++)
  660. hash[i] = s5p_hash_read(dd, SSS_REG_HASH_OUT(i));
  661. }
  662. /**
  663. * s5p_hash_write_ctx_iv() - write IV for next partial/finup op.
  664. * @dd: device
  665. * @ctx: request context
  666. */
  667. static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd,
  668. const struct s5p_hash_reqctx *ctx)
  669. {
  670. const u32 *hash = (const u32 *)ctx->digest;
  671. unsigned int i;
  672. for (i = 0; i < ctx->nregs; i++)
  673. s5p_hash_write(dd, SSS_REG_HASH_IV(i), hash[i]);
  674. }
  675. /**
  676. * s5p_hash_write_iv() - write IV for next partial/finup op.
  677. * @req: AHASH request
  678. */
  679. static void s5p_hash_write_iv(struct ahash_request *req)
  680. {
  681. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  682. s5p_hash_write_ctx_iv(ctx->dd, ctx);
  683. }
  684. /**
  685. * s5p_hash_copy_result() - copy digest into req->result
  686. * @req: AHASH request
  687. */
  688. static void s5p_hash_copy_result(struct ahash_request *req)
  689. {
  690. const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  691. if (!req->result)
  692. return;
  693. memcpy(req->result, ctx->digest, ctx->nregs * HASH_REG_SIZEOF);
  694. }
  695. /**
  696. * s5p_hash_dma_flush() - flush HASH DMA
  697. * @dev: secss device
  698. */
  699. static void s5p_hash_dma_flush(struct s5p_aes_dev *dev)
  700. {
  701. SSS_WRITE(dev, FCHRDMAC, SSS_FCHRDMAC_FLUSH);
  702. }
  703. /**
  704. * s5p_hash_dma_enable() - enable DMA mode for HASH
  705. * @dev: secss device
  706. *
  707. * enable DMA mode for HASH
  708. */
  709. static void s5p_hash_dma_enable(struct s5p_aes_dev *dev)
  710. {
  711. s5p_hash_write(dev, SSS_REG_HASH_CTRL_FIFO, SSS_HASH_FIFO_MODE_DMA);
  712. }
  713. /**
  714. * s5p_hash_irq_disable() - disable irq HASH signals
  715. * @dev: secss device
  716. * @flags: bitfield with irq's to be disabled
  717. */
  718. static void s5p_hash_irq_disable(struct s5p_aes_dev *dev, u32 flags)
  719. {
  720. SSS_WRITE(dev, FCINTENCLR, flags);
  721. }
  722. /**
  723. * s5p_hash_irq_enable() - enable irq signals
  724. * @dev: secss device
  725. * @flags: bitfield with irq's to be enabled
  726. */
  727. static void s5p_hash_irq_enable(struct s5p_aes_dev *dev, int flags)
  728. {
  729. SSS_WRITE(dev, FCINTENSET, flags);
  730. }
  731. /**
  732. * s5p_hash_set_flow() - set flow inside SecSS AES/DES with/without HASH
  733. * @dev: secss device
  734. * @hashflow: HASH stream flow with/without crypto AES/DES
  735. */
  736. static void s5p_hash_set_flow(struct s5p_aes_dev *dev, u32 hashflow)
  737. {
  738. unsigned long flags;
  739. u32 flow;
  740. spin_lock_irqsave(&dev->lock, flags);
  741. flow = SSS_READ(dev, FCFIFOCTRL);
  742. flow &= ~SSS_HASHIN_MASK;
  743. flow |= hashflow;
  744. SSS_WRITE(dev, FCFIFOCTRL, flow);
  745. spin_unlock_irqrestore(&dev->lock, flags);
  746. }
  747. /**
  748. * s5p_ahash_dma_init() - enable DMA and set HASH flow inside SecSS
  749. * @dev: secss device
  750. * @hashflow: HASH stream flow with/without AES/DES
  751. *
  752. * flush HASH DMA and enable DMA, set HASH stream flow inside SecSS HW,
  753. * enable HASH irq's HRDMA, HDONE, HPART
  754. */
  755. static void s5p_ahash_dma_init(struct s5p_aes_dev *dev, u32 hashflow)
  756. {
  757. s5p_hash_irq_disable(dev, SSS_FCINTENCLR_HRDMAINTENCLR |
  758. SSS_FCINTENCLR_HDONEINTENCLR |
  759. SSS_FCINTENCLR_HPARTINTENCLR);
  760. s5p_hash_dma_flush(dev);
  761. s5p_hash_dma_enable(dev);
  762. s5p_hash_set_flow(dev, hashflow & SSS_HASHIN_MASK);
  763. s5p_hash_irq_enable(dev, SSS_FCINTENSET_HRDMAINTENSET |
  764. SSS_FCINTENSET_HDONEINTENSET |
  765. SSS_FCINTENSET_HPARTINTENSET);
  766. }
  767. /**
  768. * s5p_hash_write_ctrl() - prepare HASH block in SecSS for processing
  769. * @dd: secss device
  770. * @length: length for request
  771. * @final: true if final op
  772. *
  773. * Prepare SSS HASH block for processing bytes in DMA mode. If it is called
  774. * after previous updates, fill up IV words. For final, calculate and set
  775. * lengths for HASH so SecSS can finalize hash. For partial, set SSS HASH
  776. * length as 2^63 so it will be never reached and set to zero prelow and
  777. * prehigh.
  778. *
  779. * This function does not start DMA transfer.
  780. */
  781. static void s5p_hash_write_ctrl(struct s5p_aes_dev *dd, size_t length,
  782. bool final)
  783. {
  784. struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
  785. u32 prelow, prehigh, low, high;
  786. u32 configflags, swapflags;
  787. u64 tmplen;
  788. configflags = ctx->engine | SSS_HASH_INIT_BIT;
  789. if (likely(ctx->digcnt)) {
  790. s5p_hash_write_ctx_iv(dd, ctx);
  791. configflags |= SSS_HASH_USER_IV_EN;
  792. }
  793. if (final) {
  794. /* number of bytes for last part */
  795. low = length;
  796. high = 0;
  797. /* total number of bits prev hashed */
  798. tmplen = ctx->digcnt * 8;
  799. prelow = (u32)tmplen;
  800. prehigh = (u32)(tmplen >> 32);
  801. } else {
  802. prelow = 0;
  803. prehigh = 0;
  804. low = 0;
  805. high = BIT(31);
  806. }
  807. swapflags = SSS_HASH_BYTESWAP_DI | SSS_HASH_BYTESWAP_DO |
  808. SSS_HASH_BYTESWAP_IV | SSS_HASH_BYTESWAP_KEY;
  809. s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_LOW, low);
  810. s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_HIGH, high);
  811. s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_LOW, prelow);
  812. s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_HIGH, prehigh);
  813. s5p_hash_write(dd, SSS_REG_HASH_CTRL_SWAP, swapflags);
  814. s5p_hash_write(dd, SSS_REG_HASH_CTRL, configflags);
  815. }
  816. /**
  817. * s5p_hash_xmit_dma() - start DMA hash processing
  818. * @dd: secss device
  819. * @length: length for request
  820. * @final: true if final op
  821. *
  822. * Update digcnt here, as it is needed for finup/final op.
  823. */
  824. static int s5p_hash_xmit_dma(struct s5p_aes_dev *dd, size_t length,
  825. bool final)
  826. {
  827. struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
  828. unsigned int cnt;
  829. cnt = dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
  830. if (!cnt) {
  831. dev_err(dd->dev, "dma_map_sg error\n");
  832. ctx->error = true;
  833. return -EINVAL;
  834. }
  835. set_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
  836. dd->hash_sg_iter = ctx->sg;
  837. dd->hash_sg_cnt = cnt;
  838. s5p_hash_write_ctrl(dd, length, final);
  839. ctx->digcnt += length;
  840. ctx->total -= length;
  841. /* catch last interrupt */
  842. if (final)
  843. set_bit(HASH_FLAGS_FINAL, &dd->hash_flags);
  844. s5p_set_dma_hashdata(dd, dd->hash_sg_iter); /* DMA starts */
  845. return -EINPROGRESS;
  846. }
  847. /**
  848. * s5p_hash_copy_sgs() - copy request's bytes into new buffer
  849. * @ctx: request context
  850. * @sg: source scatterlist request
  851. * @new_len: number of bytes to process from sg
  852. *
  853. * Allocate new buffer, copy data for HASH into it. If there was xmit_buf
  854. * filled, copy it first, then copy data from sg into it. Prepare one sgl[0]
  855. * with allocated buffer.
  856. *
  857. * Set bit in dd->hash_flag so we can free it after irq ends processing.
  858. */
  859. static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx,
  860. struct scatterlist *sg, unsigned int new_len)
  861. {
  862. unsigned int pages, len;
  863. void *buf;
  864. len = new_len + ctx->bufcnt;
  865. pages = get_order(len);
  866. buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
  867. if (!buf) {
  868. dev_err(ctx->dd->dev, "alloc pages for unaligned case.\n");
  869. ctx->error = true;
  870. return -ENOMEM;
  871. }
  872. if (ctx->bufcnt)
  873. memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
  874. scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip,
  875. new_len, 0);
  876. sg_init_table(ctx->sgl, 1);
  877. sg_set_buf(ctx->sgl, buf, len);
  878. ctx->sg = ctx->sgl;
  879. ctx->sg_len = 1;
  880. ctx->bufcnt = 0;
  881. ctx->skip = 0;
  882. set_bit(HASH_FLAGS_SGS_COPIED, &ctx->dd->hash_flags);
  883. return 0;
  884. }
  885. /**
  886. * s5p_hash_copy_sg_lists() - copy sg list and make fixes in copy
  887. * @ctx: request context
  888. * @sg: source scatterlist request
  889. * @new_len: number of bytes to process from sg
  890. *
  891. * Allocate new scatterlist table, copy data for HASH into it. If there was
  892. * xmit_buf filled, prepare it first, then copy page, length and offset from
  893. * source sg into it, adjusting begin and/or end for skip offset and
  894. * hash_later value.
  895. *
  896. * Resulting sg table will be assigned to ctx->sg. Set flag so we can free
  897. * it after irq ends processing.
  898. */
  899. static int s5p_hash_copy_sg_lists(struct s5p_hash_reqctx *ctx,
  900. struct scatterlist *sg, unsigned int new_len)
  901. {
  902. unsigned int skip = ctx->skip, n = sg_nents(sg);
  903. struct scatterlist *tmp;
  904. unsigned int len;
  905. if (ctx->bufcnt)
  906. n++;
  907. ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
  908. if (!ctx->sg) {
  909. ctx->error = true;
  910. return -ENOMEM;
  911. }
  912. sg_init_table(ctx->sg, n);
  913. tmp = ctx->sg;
  914. ctx->sg_len = 0;
  915. if (ctx->bufcnt) {
  916. sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
  917. tmp = sg_next(tmp);
  918. ctx->sg_len++;
  919. }
  920. while (sg && skip >= sg->length) {
  921. skip -= sg->length;
  922. sg = sg_next(sg);
  923. }
  924. while (sg && new_len) {
  925. len = sg->length - skip;
  926. if (new_len < len)
  927. len = new_len;
  928. new_len -= len;
  929. sg_set_page(tmp, sg_page(sg), len, sg->offset + skip);
  930. skip = 0;
  931. if (new_len <= 0)
  932. sg_mark_end(tmp);
  933. tmp = sg_next(tmp);
  934. ctx->sg_len++;
  935. sg = sg_next(sg);
  936. }
  937. set_bit(HASH_FLAGS_SGS_ALLOCED, &ctx->dd->hash_flags);
  938. return 0;
  939. }
  940. /**
  941. * s5p_hash_prepare_sgs() - prepare sg for processing
  942. * @ctx: request context
  943. * @sg: source scatterlist request
  944. * @new_len: number of bytes to process from sg
  945. * @final: final flag
  946. *
  947. * Check two conditions: (1) if buffers in sg have len aligned data, and (2)
  948. * sg table have good aligned elements (list_ok). If one of this checks fails,
  949. * then either (1) allocates new buffer for data with s5p_hash_copy_sgs, copy
  950. * data into this buffer and prepare request in sgl, or (2) allocates new sg
  951. * table and prepare sg elements.
  952. *
  953. * For digest or finup all conditions can be good, and we may not need any
  954. * fixes.
  955. */
  956. static int s5p_hash_prepare_sgs(struct s5p_hash_reqctx *ctx,
  957. struct scatterlist *sg,
  958. unsigned int new_len, bool final)
  959. {
  960. unsigned int skip = ctx->skip, nbytes = new_len, n = 0;
  961. bool aligned = true, list_ok = true;
  962. struct scatterlist *sg_tmp = sg;
  963. if (!sg || !sg->length || !new_len)
  964. return 0;
  965. if (skip || !final)
  966. list_ok = false;
  967. while (nbytes > 0 && sg_tmp) {
  968. n++;
  969. if (skip >= sg_tmp->length) {
  970. skip -= sg_tmp->length;
  971. if (!sg_tmp->length) {
  972. aligned = false;
  973. break;
  974. }
  975. } else {
  976. if (!IS_ALIGNED(sg_tmp->length - skip, BUFLEN)) {
  977. aligned = false;
  978. break;
  979. }
  980. if (nbytes < sg_tmp->length - skip) {
  981. list_ok = false;
  982. break;
  983. }
  984. nbytes -= sg_tmp->length - skip;
  985. skip = 0;
  986. }
  987. sg_tmp = sg_next(sg_tmp);
  988. }
  989. if (!aligned)
  990. return s5p_hash_copy_sgs(ctx, sg, new_len);
  991. else if (!list_ok)
  992. return s5p_hash_copy_sg_lists(ctx, sg, new_len);
  993. /*
  994. * Have aligned data from previous operation and/or current
  995. * Note: will enter here only if (digest or finup) and aligned
  996. */
  997. if (ctx->bufcnt) {
  998. ctx->sg_len = n;
  999. sg_init_table(ctx->sgl, 2);
  1000. sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, ctx->bufcnt);
  1001. sg_chain(ctx->sgl, 2, sg);
  1002. ctx->sg = ctx->sgl;
  1003. ctx->sg_len++;
  1004. } else {
  1005. ctx->sg = sg;
  1006. ctx->sg_len = n;
  1007. }
  1008. return 0;
  1009. }
  1010. /**
  1011. * s5p_hash_prepare_request() - prepare request for processing
  1012. * @req: AHASH request
  1013. * @update: true if UPDATE op
  1014. *
  1015. * Note 1: we can have update flag _and_ final flag at the same time.
  1016. * Note 2: we enter here when digcnt > BUFLEN (=HASH_BLOCK_SIZE) or
  1017. * either req->nbytes or ctx->bufcnt + req->nbytes is > BUFLEN or
  1018. * we have final op
  1019. */
  1020. static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
  1021. {
  1022. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1023. bool final = ctx->finup;
  1024. int xmit_len, hash_later, nbytes;
  1025. int ret;
  1026. if (update)
  1027. nbytes = req->nbytes;
  1028. else
  1029. nbytes = 0;
  1030. ctx->total = nbytes + ctx->bufcnt;
  1031. if (!ctx->total)
  1032. return 0;
  1033. if (nbytes && (!IS_ALIGNED(ctx->bufcnt, BUFLEN))) {
  1034. /* bytes left from previous request, so fill up to BUFLEN */
  1035. int len = BUFLEN - ctx->bufcnt % BUFLEN;
  1036. if (len > nbytes)
  1037. len = nbytes;
  1038. scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
  1039. 0, len, 0);
  1040. ctx->bufcnt += len;
  1041. nbytes -= len;
  1042. ctx->skip = len;
  1043. } else {
  1044. ctx->skip = 0;
  1045. }
  1046. if (ctx->bufcnt)
  1047. memcpy(ctx->dd->xmit_buf, ctx->buffer, ctx->bufcnt);
  1048. xmit_len = ctx->total;
  1049. if (final) {
  1050. hash_later = 0;
  1051. } else {
  1052. if (IS_ALIGNED(xmit_len, BUFLEN))
  1053. xmit_len -= BUFLEN;
  1054. else
  1055. xmit_len -= xmit_len & (BUFLEN - 1);
  1056. hash_later = ctx->total - xmit_len;
  1057. /* copy hash_later bytes from end of req->src */
  1058. /* previous bytes are in xmit_buf, so no overwrite */
  1059. scatterwalk_map_and_copy(ctx->buffer, req->src,
  1060. req->nbytes - hash_later,
  1061. hash_later, 0);
  1062. }
  1063. if (xmit_len > BUFLEN) {
  1064. ret = s5p_hash_prepare_sgs(ctx, req->src, nbytes - hash_later,
  1065. final);
  1066. if (ret)
  1067. return ret;
  1068. } else {
  1069. /* have buffered data only */
  1070. if (unlikely(!ctx->bufcnt)) {
  1071. /* first update didn't fill up buffer */
  1072. scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src,
  1073. 0, xmit_len, 0);
  1074. }
  1075. sg_init_table(ctx->sgl, 1);
  1076. sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, xmit_len);
  1077. ctx->sg = ctx->sgl;
  1078. ctx->sg_len = 1;
  1079. }
  1080. ctx->bufcnt = hash_later;
  1081. if (!final)
  1082. ctx->total = xmit_len;
  1083. return 0;
  1084. }
  1085. /**
  1086. * s5p_hash_update_dma_stop() - unmap DMA
  1087. * @dd: secss device
  1088. *
  1089. * Unmap scatterlist ctx->sg.
  1090. */
  1091. static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd)
  1092. {
  1093. const struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
  1094. dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
  1095. clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
  1096. }
  1097. /**
  1098. * s5p_hash_finish() - copy calculated digest to crypto layer
  1099. * @req: AHASH request
  1100. */
  1101. static void s5p_hash_finish(struct ahash_request *req)
  1102. {
  1103. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1104. struct s5p_aes_dev *dd = ctx->dd;
  1105. if (ctx->digcnt)
  1106. s5p_hash_copy_result(req);
  1107. dev_dbg(dd->dev, "hash_finish digcnt: %lld\n", ctx->digcnt);
  1108. }
  1109. /**
  1110. * s5p_hash_finish_req() - finish request
  1111. * @req: AHASH request
  1112. * @err: error
  1113. */
  1114. static void s5p_hash_finish_req(struct ahash_request *req, int err)
  1115. {
  1116. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1117. struct s5p_aes_dev *dd = ctx->dd;
  1118. unsigned long flags;
  1119. if (test_bit(HASH_FLAGS_SGS_COPIED, &dd->hash_flags))
  1120. free_pages((unsigned long)sg_virt(ctx->sg),
  1121. get_order(ctx->sg->length));
  1122. if (test_bit(HASH_FLAGS_SGS_ALLOCED, &dd->hash_flags))
  1123. kfree(ctx->sg);
  1124. ctx->sg = NULL;
  1125. dd->hash_flags &= ~(BIT(HASH_FLAGS_SGS_ALLOCED) |
  1126. BIT(HASH_FLAGS_SGS_COPIED));
  1127. if (!err && !ctx->error) {
  1128. s5p_hash_read_msg(req);
  1129. if (test_bit(HASH_FLAGS_FINAL, &dd->hash_flags))
  1130. s5p_hash_finish(req);
  1131. } else {
  1132. ctx->error = true;
  1133. }
  1134. spin_lock_irqsave(&dd->hash_lock, flags);
  1135. dd->hash_flags &= ~(BIT(HASH_FLAGS_BUSY) | BIT(HASH_FLAGS_FINAL) |
  1136. BIT(HASH_FLAGS_DMA_READY) |
  1137. BIT(HASH_FLAGS_OUTPUT_READY));
  1138. spin_unlock_irqrestore(&dd->hash_lock, flags);
  1139. if (req->base.complete)
  1140. ahash_request_complete(req, err);
  1141. }
  1142. /**
  1143. * s5p_hash_handle_queue() - handle hash queue
  1144. * @dd: device s5p_aes_dev
  1145. * @req: AHASH request
  1146. *
  1147. * If req!=NULL enqueue it on dd->queue, if FLAGS_BUSY is not set on the
  1148. * device then processes the first request from the dd->queue
  1149. *
  1150. * Returns: see s5p_hash_final below.
  1151. */
  1152. static int s5p_hash_handle_queue(struct s5p_aes_dev *dd,
  1153. struct ahash_request *req)
  1154. {
  1155. struct crypto_async_request *async_req, *backlog;
  1156. struct s5p_hash_reqctx *ctx;
  1157. unsigned long flags;
  1158. int err = 0, ret = 0;
  1159. retry:
  1160. spin_lock_irqsave(&dd->hash_lock, flags);
  1161. if (req)
  1162. ret = ahash_enqueue_request(&dd->hash_queue, req);
  1163. if (test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
  1164. spin_unlock_irqrestore(&dd->hash_lock, flags);
  1165. return ret;
  1166. }
  1167. backlog = crypto_get_backlog(&dd->hash_queue);
  1168. async_req = crypto_dequeue_request(&dd->hash_queue);
  1169. if (async_req)
  1170. set_bit(HASH_FLAGS_BUSY, &dd->hash_flags);
  1171. spin_unlock_irqrestore(&dd->hash_lock, flags);
  1172. if (!async_req)
  1173. return ret;
  1174. if (backlog)
  1175. crypto_request_complete(backlog, -EINPROGRESS);
  1176. req = ahash_request_cast(async_req);
  1177. dd->hash_req = req;
  1178. ctx = ahash_request_ctx(req);
  1179. err = s5p_hash_prepare_request(req, ctx->op_update);
  1180. if (err || !ctx->total)
  1181. goto out;
  1182. dev_dbg(dd->dev, "handling new req, op_update: %u, nbytes: %d\n",
  1183. ctx->op_update, req->nbytes);
  1184. s5p_ahash_dma_init(dd, SSS_HASHIN_INDEPENDENT);
  1185. if (ctx->digcnt)
  1186. s5p_hash_write_iv(req); /* restore hash IV */
  1187. if (ctx->op_update) { /* HASH_OP_UPDATE */
  1188. err = s5p_hash_xmit_dma(dd, ctx->total, ctx->finup);
  1189. if (err != -EINPROGRESS && ctx->finup && !ctx->error)
  1190. /* no final() after finup() */
  1191. err = s5p_hash_xmit_dma(dd, ctx->total, true);
  1192. } else { /* HASH_OP_FINAL */
  1193. err = s5p_hash_xmit_dma(dd, ctx->total, true);
  1194. }
  1195. out:
  1196. if (err != -EINPROGRESS) {
  1197. /* hash_tasklet_cb will not finish it, so do it here */
  1198. s5p_hash_finish_req(req, err);
  1199. req = NULL;
  1200. /*
  1201. * Execute next request immediately if there is anything
  1202. * in queue.
  1203. */
  1204. goto retry;
  1205. }
  1206. return ret;
  1207. }
  1208. /**
  1209. * s5p_hash_tasklet_cb() - hash tasklet
  1210. * @data: ptr to s5p_aes_dev
  1211. */
  1212. static void s5p_hash_tasklet_cb(unsigned long data)
  1213. {
  1214. struct s5p_aes_dev *dd = (struct s5p_aes_dev *)data;
  1215. if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
  1216. s5p_hash_handle_queue(dd, NULL);
  1217. return;
  1218. }
  1219. if (test_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags)) {
  1220. if (test_and_clear_bit(HASH_FLAGS_DMA_ACTIVE,
  1221. &dd->hash_flags)) {
  1222. s5p_hash_update_dma_stop(dd);
  1223. }
  1224. if (test_and_clear_bit(HASH_FLAGS_OUTPUT_READY,
  1225. &dd->hash_flags)) {
  1226. /* hash or semi-hash ready */
  1227. clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags);
  1228. goto finish;
  1229. }
  1230. }
  1231. return;
  1232. finish:
  1233. /* finish curent request */
  1234. s5p_hash_finish_req(dd->hash_req, 0);
  1235. /* If we are not busy, process next req */
  1236. if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags))
  1237. s5p_hash_handle_queue(dd, NULL);
  1238. }
  1239. /**
  1240. * s5p_hash_enqueue() - enqueue request
  1241. * @req: AHASH request
  1242. * @op: operation UPDATE (true) or FINAL (false)
  1243. *
  1244. * Returns: see s5p_hash_final below.
  1245. */
  1246. static int s5p_hash_enqueue(struct ahash_request *req, bool op)
  1247. {
  1248. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1249. struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
  1250. ctx->op_update = op;
  1251. return s5p_hash_handle_queue(tctx->dd, req);
  1252. }
  1253. /**
  1254. * s5p_hash_update() - process the hash input data
  1255. * @req: AHASH request
  1256. *
  1257. * If request will fit in buffer, copy it and return immediately
  1258. * else enqueue it with OP_UPDATE.
  1259. *
  1260. * Returns: see s5p_hash_final below.
  1261. */
  1262. static int s5p_hash_update(struct ahash_request *req)
  1263. {
  1264. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1265. if (!req->nbytes)
  1266. return 0;
  1267. if (ctx->bufcnt + req->nbytes <= BUFLEN) {
  1268. scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
  1269. 0, req->nbytes, 0);
  1270. ctx->bufcnt += req->nbytes;
  1271. return 0;
  1272. }
  1273. return s5p_hash_enqueue(req, true); /* HASH_OP_UPDATE */
  1274. }
  1275. /**
  1276. * s5p_hash_final() - close up hash and calculate digest
  1277. * @req: AHASH request
  1278. *
  1279. * Note: in final req->src do not have any data, and req->nbytes can be
  1280. * non-zero.
  1281. *
  1282. * If there were no input data processed yet and the buffered hash data is
  1283. * less than BUFLEN (64) then calculate the final hash immediately by using
  1284. * SW algorithm fallback.
  1285. *
  1286. * Otherwise enqueues the current AHASH request with OP_FINAL operation op
  1287. * and finalize hash message in HW. Note that if digcnt!=0 then there were
  1288. * previous update op, so there are always some buffered bytes in ctx->buffer,
  1289. * which means that ctx->bufcnt!=0
  1290. *
  1291. * Returns:
  1292. * 0 if the request has been processed immediately,
  1293. * -EINPROGRESS if the operation has been queued for later execution or is set
  1294. * to processing by HW,
  1295. * -EBUSY if queue is full and request should be resubmitted later,
  1296. * other negative values denotes an error.
  1297. */
  1298. static int s5p_hash_final(struct ahash_request *req)
  1299. {
  1300. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1301. ctx->finup = true;
  1302. if (ctx->error)
  1303. return -EINVAL; /* uncompleted hash is not needed */
  1304. if (!ctx->digcnt && ctx->bufcnt < BUFLEN) {
  1305. struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
  1306. return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer,
  1307. ctx->bufcnt, req->result);
  1308. }
  1309. return s5p_hash_enqueue(req, false); /* HASH_OP_FINAL */
  1310. }
  1311. /**
  1312. * s5p_hash_finup() - process last req->src and calculate digest
  1313. * @req: AHASH request containing the last update data
  1314. *
  1315. * Return values: see s5p_hash_final above.
  1316. */
  1317. static int s5p_hash_finup(struct ahash_request *req)
  1318. {
  1319. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1320. int err1, err2;
  1321. ctx->finup = true;
  1322. err1 = s5p_hash_update(req);
  1323. if (err1 == -EINPROGRESS || err1 == -EBUSY)
  1324. return err1;
  1325. /*
  1326. * final() has to be always called to cleanup resources even if
  1327. * update() failed, except EINPROGRESS or calculate digest for small
  1328. * size
  1329. */
  1330. err2 = s5p_hash_final(req);
  1331. return err1 ?: err2;
  1332. }
  1333. /**
  1334. * s5p_hash_init() - initialize AHASH request contex
  1335. * @req: AHASH request
  1336. *
  1337. * Init async hash request context.
  1338. */
  1339. static int s5p_hash_init(struct ahash_request *req)
  1340. {
  1341. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1342. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1343. struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
  1344. ctx->dd = tctx->dd;
  1345. ctx->error = false;
  1346. ctx->finup = false;
  1347. ctx->bufcnt = 0;
  1348. ctx->digcnt = 0;
  1349. ctx->total = 0;
  1350. ctx->skip = 0;
  1351. dev_dbg(tctx->dd->dev, "init: digest size: %d\n",
  1352. crypto_ahash_digestsize(tfm));
  1353. switch (crypto_ahash_digestsize(tfm)) {
  1354. case MD5_DIGEST_SIZE:
  1355. ctx->engine = SSS_HASH_ENGINE_MD5;
  1356. ctx->nregs = HASH_MD5_MAX_REG;
  1357. break;
  1358. case SHA1_DIGEST_SIZE:
  1359. ctx->engine = SSS_HASH_ENGINE_SHA1;
  1360. ctx->nregs = HASH_SHA1_MAX_REG;
  1361. break;
  1362. case SHA256_DIGEST_SIZE:
  1363. ctx->engine = SSS_HASH_ENGINE_SHA256;
  1364. ctx->nregs = HASH_SHA256_MAX_REG;
  1365. break;
  1366. default:
  1367. ctx->error = true;
  1368. return -EINVAL;
  1369. }
  1370. return 0;
  1371. }
  1372. /**
  1373. * s5p_hash_digest - calculate digest from req->src
  1374. * @req: AHASH request
  1375. *
  1376. * Return values: see s5p_hash_final above.
  1377. */
  1378. static int s5p_hash_digest(struct ahash_request *req)
  1379. {
  1380. return s5p_hash_init(req) ?: s5p_hash_finup(req);
  1381. }
  1382. /**
  1383. * s5p_hash_cra_init_alg - init crypto alg transformation
  1384. * @tfm: crypto transformation
  1385. */
  1386. static int s5p_hash_cra_init_alg(struct crypto_tfm *tfm)
  1387. {
  1388. struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
  1389. const char *alg_name = crypto_tfm_alg_name(tfm);
  1390. tctx->dd = s5p_dev;
  1391. /* Allocate a fallback and abort if it failed. */
  1392. tctx->fallback = crypto_alloc_shash(alg_name, 0,
  1393. CRYPTO_ALG_NEED_FALLBACK);
  1394. if (IS_ERR(tctx->fallback)) {
  1395. pr_err("fallback alloc fails for '%s'\n", alg_name);
  1396. return PTR_ERR(tctx->fallback);
  1397. }
  1398. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1399. sizeof(struct s5p_hash_reqctx) + BUFLEN);
  1400. return 0;
  1401. }
  1402. /**
  1403. * s5p_hash_cra_init - init crypto tfm
  1404. * @tfm: crypto transformation
  1405. */
  1406. static int s5p_hash_cra_init(struct crypto_tfm *tfm)
  1407. {
  1408. return s5p_hash_cra_init_alg(tfm);
  1409. }
  1410. /**
  1411. * s5p_hash_cra_exit - exit crypto tfm
  1412. * @tfm: crypto transformation
  1413. *
  1414. * free allocated fallback
  1415. */
  1416. static void s5p_hash_cra_exit(struct crypto_tfm *tfm)
  1417. {
  1418. struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
  1419. crypto_free_shash(tctx->fallback);
  1420. tctx->fallback = NULL;
  1421. }
  1422. /**
  1423. * s5p_hash_export - export hash state
  1424. * @req: AHASH request
  1425. * @out: buffer for exported state
  1426. */
  1427. static int s5p_hash_export(struct ahash_request *req, void *out)
  1428. {
  1429. const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1430. memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt);
  1431. return 0;
  1432. }
  1433. /**
  1434. * s5p_hash_import - import hash state
  1435. * @req: AHASH request
  1436. * @in: buffer with state to be imported from
  1437. */
  1438. static int s5p_hash_import(struct ahash_request *req, const void *in)
  1439. {
  1440. struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
  1441. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1442. struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
  1443. const struct s5p_hash_reqctx *ctx_in = in;
  1444. memcpy(ctx, in, sizeof(*ctx) + BUFLEN);
  1445. if (ctx_in->bufcnt > BUFLEN) {
  1446. ctx->error = true;
  1447. return -EINVAL;
  1448. }
  1449. ctx->dd = tctx->dd;
  1450. ctx->error = false;
  1451. return 0;
  1452. }
  1453. static struct ahash_alg algs_sha1_md5_sha256[] = {
  1454. {
  1455. .init = s5p_hash_init,
  1456. .update = s5p_hash_update,
  1457. .final = s5p_hash_final,
  1458. .finup = s5p_hash_finup,
  1459. .digest = s5p_hash_digest,
  1460. .export = s5p_hash_export,
  1461. .import = s5p_hash_import,
  1462. .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
  1463. .halg.digestsize = SHA1_DIGEST_SIZE,
  1464. .halg.base = {
  1465. .cra_name = "sha1",
  1466. .cra_driver_name = "exynos-sha1",
  1467. .cra_priority = 100,
  1468. .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
  1469. CRYPTO_ALG_ASYNC |
  1470. CRYPTO_ALG_NEED_FALLBACK,
  1471. .cra_blocksize = HASH_BLOCK_SIZE,
  1472. .cra_ctxsize = sizeof(struct s5p_hash_ctx),
  1473. .cra_module = THIS_MODULE,
  1474. .cra_init = s5p_hash_cra_init,
  1475. .cra_exit = s5p_hash_cra_exit,
  1476. }
  1477. },
  1478. {
  1479. .init = s5p_hash_init,
  1480. .update = s5p_hash_update,
  1481. .final = s5p_hash_final,
  1482. .finup = s5p_hash_finup,
  1483. .digest = s5p_hash_digest,
  1484. .export = s5p_hash_export,
  1485. .import = s5p_hash_import,
  1486. .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
  1487. .halg.digestsize = MD5_DIGEST_SIZE,
  1488. .halg.base = {
  1489. .cra_name = "md5",
  1490. .cra_driver_name = "exynos-md5",
  1491. .cra_priority = 100,
  1492. .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
  1493. CRYPTO_ALG_ASYNC |
  1494. CRYPTO_ALG_NEED_FALLBACK,
  1495. .cra_blocksize = HASH_BLOCK_SIZE,
  1496. .cra_ctxsize = sizeof(struct s5p_hash_ctx),
  1497. .cra_module = THIS_MODULE,
  1498. .cra_init = s5p_hash_cra_init,
  1499. .cra_exit = s5p_hash_cra_exit,
  1500. }
  1501. },
  1502. {
  1503. .init = s5p_hash_init,
  1504. .update = s5p_hash_update,
  1505. .final = s5p_hash_final,
  1506. .finup = s5p_hash_finup,
  1507. .digest = s5p_hash_digest,
  1508. .export = s5p_hash_export,
  1509. .import = s5p_hash_import,
  1510. .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
  1511. .halg.digestsize = SHA256_DIGEST_SIZE,
  1512. .halg.base = {
  1513. .cra_name = "sha256",
  1514. .cra_driver_name = "exynos-sha256",
  1515. .cra_priority = 100,
  1516. .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
  1517. CRYPTO_ALG_ASYNC |
  1518. CRYPTO_ALG_NEED_FALLBACK,
  1519. .cra_blocksize = HASH_BLOCK_SIZE,
  1520. .cra_ctxsize = sizeof(struct s5p_hash_ctx),
  1521. .cra_module = THIS_MODULE,
  1522. .cra_init = s5p_hash_cra_init,
  1523. .cra_exit = s5p_hash_cra_exit,
  1524. }
  1525. }
  1526. };
  1527. static void s5p_set_aes(struct s5p_aes_dev *dev,
  1528. const u8 *key, const u8 *iv, const u8 *ctr,
  1529. unsigned int keylen)
  1530. {
  1531. void __iomem *keystart;
  1532. if (iv)
  1533. memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv,
  1534. AES_BLOCK_SIZE);
  1535. if (ctr)
  1536. memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr,
  1537. AES_BLOCK_SIZE);
  1538. if (keylen == AES_KEYSIZE_256)
  1539. keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
  1540. else if (keylen == AES_KEYSIZE_192)
  1541. keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2);
  1542. else
  1543. keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4);
  1544. memcpy_toio(keystart, key, keylen);
  1545. }
  1546. static bool s5p_is_sg_aligned(struct scatterlist *sg)
  1547. {
  1548. while (sg) {
  1549. if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
  1550. return false;
  1551. sg = sg_next(sg);
  1552. }
  1553. return true;
  1554. }
  1555. static int s5p_set_indata_start(struct s5p_aes_dev *dev,
  1556. struct skcipher_request *req)
  1557. {
  1558. struct scatterlist *sg;
  1559. int err;
  1560. dev->sg_src_cpy = NULL;
  1561. sg = req->src;
  1562. if (!s5p_is_sg_aligned(sg)) {
  1563. dev_dbg(dev->dev,
  1564. "At least one unaligned source scatter list, making a copy\n");
  1565. err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy);
  1566. if (err)
  1567. return err;
  1568. sg = dev->sg_src_cpy;
  1569. }
  1570. err = s5p_set_indata(dev, sg);
  1571. if (err) {
  1572. s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
  1573. return err;
  1574. }
  1575. return 0;
  1576. }
  1577. static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
  1578. struct skcipher_request *req)
  1579. {
  1580. struct scatterlist *sg;
  1581. int err;
  1582. dev->sg_dst_cpy = NULL;
  1583. sg = req->dst;
  1584. if (!s5p_is_sg_aligned(sg)) {
  1585. dev_dbg(dev->dev,
  1586. "At least one unaligned dest scatter list, making a copy\n");
  1587. err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy);
  1588. if (err)
  1589. return err;
  1590. sg = dev->sg_dst_cpy;
  1591. }
  1592. err = s5p_set_outdata(dev, sg);
  1593. if (err) {
  1594. s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
  1595. return err;
  1596. }
  1597. return 0;
  1598. }
  1599. static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
  1600. {
  1601. struct skcipher_request *req = dev->req;
  1602. u32 aes_control;
  1603. unsigned long flags;
  1604. int err;
  1605. u8 *iv, *ctr;
  1606. /* This sets bit [13:12] to 00, which selects 128-bit counter */
  1607. aes_control = SSS_AES_KEY_CHANGE_MODE;
  1608. if (mode & FLAGS_AES_DECRYPT)
  1609. aes_control |= SSS_AES_MODE_DECRYPT;
  1610. if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
  1611. aes_control |= SSS_AES_CHAIN_MODE_CBC;
  1612. iv = req->iv;
  1613. ctr = NULL;
  1614. } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
  1615. aes_control |= SSS_AES_CHAIN_MODE_CTR;
  1616. iv = NULL;
  1617. ctr = req->iv;
  1618. } else {
  1619. iv = NULL; /* AES_ECB */
  1620. ctr = NULL;
  1621. }
  1622. if (dev->ctx->keylen == AES_KEYSIZE_192)
  1623. aes_control |= SSS_AES_KEY_SIZE_192;
  1624. else if (dev->ctx->keylen == AES_KEYSIZE_256)
  1625. aes_control |= SSS_AES_KEY_SIZE_256;
  1626. aes_control |= SSS_AES_FIFO_MODE;
  1627. /* as a variant it is possible to use byte swapping on DMA side */
  1628. aes_control |= SSS_AES_BYTESWAP_DI
  1629. | SSS_AES_BYTESWAP_DO
  1630. | SSS_AES_BYTESWAP_IV
  1631. | SSS_AES_BYTESWAP_KEY
  1632. | SSS_AES_BYTESWAP_CNT;
  1633. spin_lock_irqsave(&dev->lock, flags);
  1634. SSS_WRITE(dev, FCINTENCLR,
  1635. SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR);
  1636. SSS_WRITE(dev, FCFIFOCTRL, 0x00);
  1637. err = s5p_set_indata_start(dev, req);
  1638. if (err)
  1639. goto indata_error;
  1640. err = s5p_set_outdata_start(dev, req);
  1641. if (err)
  1642. goto outdata_error;
  1643. SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
  1644. s5p_set_aes(dev, dev->ctx->aes_key, iv, ctr, dev->ctx->keylen);
  1645. s5p_set_dma_indata(dev, dev->sg_src);
  1646. s5p_set_dma_outdata(dev, dev->sg_dst);
  1647. SSS_WRITE(dev, FCINTENSET,
  1648. SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET);
  1649. spin_unlock_irqrestore(&dev->lock, flags);
  1650. return;
  1651. outdata_error:
  1652. s5p_unset_indata(dev);
  1653. indata_error:
  1654. s5p_sg_done(dev);
  1655. dev->busy = false;
  1656. spin_unlock_irqrestore(&dev->lock, flags);
  1657. s5p_aes_complete(req, err);
  1658. }
  1659. static void s5p_tasklet_cb(unsigned long data)
  1660. {
  1661. struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data;
  1662. struct crypto_async_request *async_req, *backlog;
  1663. struct s5p_aes_reqctx *reqctx;
  1664. unsigned long flags;
  1665. spin_lock_irqsave(&dev->lock, flags);
  1666. backlog = crypto_get_backlog(&dev->queue);
  1667. async_req = crypto_dequeue_request(&dev->queue);
  1668. if (!async_req) {
  1669. dev->busy = false;
  1670. spin_unlock_irqrestore(&dev->lock, flags);
  1671. return;
  1672. }
  1673. spin_unlock_irqrestore(&dev->lock, flags);
  1674. if (backlog)
  1675. crypto_request_complete(backlog, -EINPROGRESS);
  1676. dev->req = skcipher_request_cast(async_req);
  1677. dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
  1678. reqctx = skcipher_request_ctx(dev->req);
  1679. s5p_aes_crypt_start(dev, reqctx->mode);
  1680. }
  1681. static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
  1682. struct skcipher_request *req)
  1683. {
  1684. unsigned long flags;
  1685. int err;
  1686. spin_lock_irqsave(&dev->lock, flags);
  1687. err = crypto_enqueue_request(&dev->queue, &req->base);
  1688. if (dev->busy) {
  1689. spin_unlock_irqrestore(&dev->lock, flags);
  1690. return err;
  1691. }
  1692. dev->busy = true;
  1693. spin_unlock_irqrestore(&dev->lock, flags);
  1694. tasklet_schedule(&dev->tasklet);
  1695. return err;
  1696. }
  1697. static int s5p_aes_crypt(struct skcipher_request *req, unsigned long mode)
  1698. {
  1699. struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
  1700. struct s5p_aes_reqctx *reqctx = skcipher_request_ctx(req);
  1701. struct s5p_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
  1702. struct s5p_aes_dev *dev = ctx->dev;
  1703. if (!req->cryptlen)
  1704. return 0;
  1705. if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE) &&
  1706. ((mode & FLAGS_AES_MODE_MASK) != FLAGS_AES_CTR)) {
  1707. dev_dbg(dev->dev, "request size is not exact amount of AES blocks\n");
  1708. return -EINVAL;
  1709. }
  1710. reqctx->mode = mode;
  1711. return s5p_aes_handle_req(dev, req);
  1712. }
  1713. static int s5p_aes_setkey(struct crypto_skcipher *cipher,
  1714. const u8 *key, unsigned int keylen)
  1715. {
  1716. struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
  1717. struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
  1718. if (keylen != AES_KEYSIZE_128 &&
  1719. keylen != AES_KEYSIZE_192 &&
  1720. keylen != AES_KEYSIZE_256)
  1721. return -EINVAL;
  1722. memcpy(ctx->aes_key, key, keylen);
  1723. ctx->keylen = keylen;
  1724. return 0;
  1725. }
  1726. static int s5p_aes_ecb_encrypt(struct skcipher_request *req)
  1727. {
  1728. return s5p_aes_crypt(req, 0);
  1729. }
  1730. static int s5p_aes_ecb_decrypt(struct skcipher_request *req)
  1731. {
  1732. return s5p_aes_crypt(req, FLAGS_AES_DECRYPT);
  1733. }
  1734. static int s5p_aes_cbc_encrypt(struct skcipher_request *req)
  1735. {
  1736. return s5p_aes_crypt(req, FLAGS_AES_CBC);
  1737. }
  1738. static int s5p_aes_cbc_decrypt(struct skcipher_request *req)
  1739. {
  1740. return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
  1741. }
  1742. static int s5p_aes_ctr_crypt(struct skcipher_request *req)
  1743. {
  1744. return s5p_aes_crypt(req, FLAGS_AES_CTR);
  1745. }
  1746. static int s5p_aes_init_tfm(struct crypto_skcipher *tfm)
  1747. {
  1748. struct s5p_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
  1749. ctx->dev = s5p_dev;
  1750. crypto_skcipher_set_reqsize(tfm, sizeof(struct s5p_aes_reqctx));
  1751. return 0;
  1752. }
  1753. static struct skcipher_alg algs[] = {
  1754. {
  1755. .base.cra_name = "ecb(aes)",
  1756. .base.cra_driver_name = "ecb-aes-s5p",
  1757. .base.cra_priority = 100,
  1758. .base.cra_flags = CRYPTO_ALG_ASYNC |
  1759. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1760. .base.cra_blocksize = AES_BLOCK_SIZE,
  1761. .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
  1762. .base.cra_alignmask = 0x0f,
  1763. .base.cra_module = THIS_MODULE,
  1764. .min_keysize = AES_MIN_KEY_SIZE,
  1765. .max_keysize = AES_MAX_KEY_SIZE,
  1766. .setkey = s5p_aes_setkey,
  1767. .encrypt = s5p_aes_ecb_encrypt,
  1768. .decrypt = s5p_aes_ecb_decrypt,
  1769. .init = s5p_aes_init_tfm,
  1770. },
  1771. {
  1772. .base.cra_name = "cbc(aes)",
  1773. .base.cra_driver_name = "cbc-aes-s5p",
  1774. .base.cra_priority = 100,
  1775. .base.cra_flags = CRYPTO_ALG_ASYNC |
  1776. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1777. .base.cra_blocksize = AES_BLOCK_SIZE,
  1778. .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
  1779. .base.cra_alignmask = 0x0f,
  1780. .base.cra_module = THIS_MODULE,
  1781. .min_keysize = AES_MIN_KEY_SIZE,
  1782. .max_keysize = AES_MAX_KEY_SIZE,
  1783. .ivsize = AES_BLOCK_SIZE,
  1784. .setkey = s5p_aes_setkey,
  1785. .encrypt = s5p_aes_cbc_encrypt,
  1786. .decrypt = s5p_aes_cbc_decrypt,
  1787. .init = s5p_aes_init_tfm,
  1788. },
  1789. {
  1790. .base.cra_name = "ctr(aes)",
  1791. .base.cra_driver_name = "ctr-aes-s5p",
  1792. .base.cra_priority = 100,
  1793. .base.cra_flags = CRYPTO_ALG_ASYNC |
  1794. CRYPTO_ALG_KERN_DRIVER_ONLY,
  1795. .base.cra_blocksize = 1,
  1796. .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
  1797. .base.cra_alignmask = 0x0f,
  1798. .base.cra_module = THIS_MODULE,
  1799. .min_keysize = AES_MIN_KEY_SIZE,
  1800. .max_keysize = AES_MAX_KEY_SIZE,
  1801. .ivsize = AES_BLOCK_SIZE,
  1802. .setkey = s5p_aes_setkey,
  1803. .encrypt = s5p_aes_ctr_crypt,
  1804. .decrypt = s5p_aes_ctr_crypt,
  1805. .init = s5p_aes_init_tfm,
  1806. },
  1807. };
  1808. static int s5p_aes_probe(struct platform_device *pdev)
  1809. {
  1810. struct device *dev = &pdev->dev;
  1811. int i, j, err;
  1812. const struct samsung_aes_variant *variant;
  1813. struct s5p_aes_dev *pdata;
  1814. struct resource *res;
  1815. unsigned int hash_i;
  1816. if (s5p_dev)
  1817. return -EEXIST;
  1818. pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
  1819. if (!pdata)
  1820. return -ENOMEM;
  1821. variant = find_s5p_sss_version(pdev);
  1822. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1823. if (!res)
  1824. return -EINVAL;
  1825. /*
  1826. * Note: HASH and PRNG uses the same registers in secss, avoid
  1827. * overwrite each other. This will drop HASH when CONFIG_EXYNOS_RNG
  1828. * is enabled in config. We need larger size for HASH registers in
  1829. * secss, current describe only AES/DES
  1830. */
  1831. if (IS_ENABLED(CONFIG_CRYPTO_DEV_EXYNOS_HASH)) {
  1832. if (variant == &exynos_aes_data) {
  1833. res->end += 0x300;
  1834. pdata->use_hash = true;
  1835. }
  1836. }
  1837. pdata->res = res;
  1838. pdata->ioaddr = devm_ioremap_resource(dev, res);
  1839. if (IS_ERR(pdata->ioaddr)) {
  1840. if (!pdata->use_hash)
  1841. return PTR_ERR(pdata->ioaddr);
  1842. /* try AES without HASH */
  1843. res->end -= 0x300;
  1844. pdata->use_hash = false;
  1845. pdata->ioaddr = devm_ioremap_resource(dev, res);
  1846. if (IS_ERR(pdata->ioaddr))
  1847. return PTR_ERR(pdata->ioaddr);
  1848. }
  1849. pdata->clk = devm_clk_get(dev, variant->clk_names[0]);
  1850. if (IS_ERR(pdata->clk))
  1851. return dev_err_probe(dev, PTR_ERR(pdata->clk),
  1852. "failed to find secss clock %s\n",
  1853. variant->clk_names[0]);
  1854. err = clk_prepare_enable(pdata->clk);
  1855. if (err < 0) {
  1856. dev_err(dev, "Enabling clock %s failed, err %d\n",
  1857. variant->clk_names[0], err);
  1858. return err;
  1859. }
  1860. if (variant->clk_names[1]) {
  1861. pdata->pclk = devm_clk_get(dev, variant->clk_names[1]);
  1862. if (IS_ERR(pdata->pclk)) {
  1863. err = dev_err_probe(dev, PTR_ERR(pdata->pclk),
  1864. "failed to find clock %s\n",
  1865. variant->clk_names[1]);
  1866. goto err_clk;
  1867. }
  1868. err = clk_prepare_enable(pdata->pclk);
  1869. if (err < 0) {
  1870. dev_err(dev, "Enabling clock %s failed, err %d\n",
  1871. variant->clk_names[0], err);
  1872. goto err_clk;
  1873. }
  1874. } else {
  1875. pdata->pclk = NULL;
  1876. }
  1877. spin_lock_init(&pdata->lock);
  1878. spin_lock_init(&pdata->hash_lock);
  1879. pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset;
  1880. pdata->io_hash_base = pdata->ioaddr + variant->hash_offset;
  1881. pdata->irq_fc = platform_get_irq(pdev, 0);
  1882. if (pdata->irq_fc < 0) {
  1883. err = pdata->irq_fc;
  1884. dev_warn(dev, "feed control interrupt is not available.\n");
  1885. goto err_irq;
  1886. }
  1887. err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
  1888. s5p_aes_interrupt, IRQF_ONESHOT,
  1889. pdev->name, pdev);
  1890. if (err < 0) {
  1891. dev_warn(dev, "feed control interrupt is not available.\n");
  1892. goto err_irq;
  1893. }
  1894. pdata->busy = false;
  1895. pdata->dev = dev;
  1896. platform_set_drvdata(pdev, pdata);
  1897. s5p_dev = pdata;
  1898. tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata);
  1899. crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
  1900. for (i = 0; i < ARRAY_SIZE(algs); i++) {
  1901. err = crypto_register_skcipher(&algs[i]);
  1902. if (err)
  1903. goto err_algs;
  1904. }
  1905. if (pdata->use_hash) {
  1906. tasklet_init(&pdata->hash_tasklet, s5p_hash_tasklet_cb,
  1907. (unsigned long)pdata);
  1908. crypto_init_queue(&pdata->hash_queue, SSS_HASH_QUEUE_LENGTH);
  1909. for (hash_i = 0; hash_i < ARRAY_SIZE(algs_sha1_md5_sha256);
  1910. hash_i++) {
  1911. struct ahash_alg *alg;
  1912. alg = &algs_sha1_md5_sha256[hash_i];
  1913. err = crypto_register_ahash(alg);
  1914. if (err) {
  1915. dev_err(dev, "can't register '%s': %d\n",
  1916. alg->halg.base.cra_driver_name, err);
  1917. goto err_hash;
  1918. }
  1919. }
  1920. }
  1921. dev_info(dev, "s5p-sss driver registered\n");
  1922. return 0;
  1923. err_hash:
  1924. for (j = hash_i - 1; j >= 0; j--)
  1925. crypto_unregister_ahash(&algs_sha1_md5_sha256[j]);
  1926. tasklet_kill(&pdata->hash_tasklet);
  1927. res->end -= 0x300;
  1928. err_algs:
  1929. if (i < ARRAY_SIZE(algs))
  1930. dev_err(dev, "can't register '%s': %d\n", algs[i].base.cra_name,
  1931. err);
  1932. for (j = 0; j < i; j++)
  1933. crypto_unregister_skcipher(&algs[j]);
  1934. tasklet_kill(&pdata->tasklet);
  1935. err_irq:
  1936. clk_disable_unprepare(pdata->pclk);
  1937. err_clk:
  1938. clk_disable_unprepare(pdata->clk);
  1939. s5p_dev = NULL;
  1940. return err;
  1941. }
  1942. static void s5p_aes_remove(struct platform_device *pdev)
  1943. {
  1944. struct s5p_aes_dev *pdata = platform_get_drvdata(pdev);
  1945. int i;
  1946. for (i = 0; i < ARRAY_SIZE(algs); i++)
  1947. crypto_unregister_skcipher(&algs[i]);
  1948. tasklet_kill(&pdata->tasklet);
  1949. if (pdata->use_hash) {
  1950. for (i = ARRAY_SIZE(algs_sha1_md5_sha256) - 1; i >= 0; i--)
  1951. crypto_unregister_ahash(&algs_sha1_md5_sha256[i]);
  1952. pdata->res->end -= 0x300;
  1953. tasklet_kill(&pdata->hash_tasklet);
  1954. pdata->use_hash = false;
  1955. }
  1956. clk_disable_unprepare(pdata->pclk);
  1957. clk_disable_unprepare(pdata->clk);
  1958. s5p_dev = NULL;
  1959. }
  1960. static struct platform_driver s5p_aes_crypto = {
  1961. .probe = s5p_aes_probe,
  1962. .remove_new = s5p_aes_remove,
  1963. .driver = {
  1964. .name = "s5p-secss",
  1965. .of_match_table = s5p_sss_dt_match,
  1966. },
  1967. };
  1968. module_platform_driver(s5p_aes_crypto);
  1969. MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
  1970. MODULE_LICENSE("GPL v2");
  1971. MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>");
  1972. MODULE_AUTHOR("Kamil Konieczny <k.konieczny@partner.samsung.com>");