mtk-aes.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289
  1. /*
  2. * Cryptographic API.
  3. *
  4. * Driver for EIP97 AES acceleration.
  5. *
  6. * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * Some ideas are from atmel-aes.c drivers.
  13. */
  14. #include <crypto/aes.h>
  15. #include <crypto/gcm.h>
  16. #include "mtk-platform.h"
  17. #define AES_QUEUE_SIZE 512
  18. #define AES_BUF_ORDER 2
  19. #define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \
  20. & ~(AES_BLOCK_SIZE - 1))
  21. #define AES_MAX_STATE_BUF_SIZE SIZE_IN_WORDS(AES_KEYSIZE_256 + \
  22. AES_BLOCK_SIZE * 2)
  23. #define AES_MAX_CT_SIZE 6
  24. #define AES_CT_CTRL_HDR cpu_to_le32(0x00220000)
  25. /* AES-CBC/ECB/CTR command token */
  26. #define AES_CMD0 cpu_to_le32(0x05000000)
  27. #define AES_CMD1 cpu_to_le32(0x2d060000)
  28. #define AES_CMD2 cpu_to_le32(0xe4a63806)
  29. /* AES-GCM command token */
  30. #define AES_GCM_CMD0 cpu_to_le32(0x0b000000)
  31. #define AES_GCM_CMD1 cpu_to_le32(0xa0800000)
  32. #define AES_GCM_CMD2 cpu_to_le32(0x25000010)
  33. #define AES_GCM_CMD3 cpu_to_le32(0x0f020000)
  34. #define AES_GCM_CMD4 cpu_to_le32(0x21e60000)
  35. #define AES_GCM_CMD5 cpu_to_le32(0x40e60000)
  36. #define AES_GCM_CMD6 cpu_to_le32(0xd0070000)
  37. /* AES transform information word 0 fields */
  38. #define AES_TFM_BASIC_OUT cpu_to_le32(0x4 << 0)
  39. #define AES_TFM_BASIC_IN cpu_to_le32(0x5 << 0)
  40. #define AES_TFM_GCM_OUT cpu_to_le32(0x6 << 0)
  41. #define AES_TFM_GCM_IN cpu_to_le32(0xf << 0)
  42. #define AES_TFM_SIZE(x) cpu_to_le32((x) << 8)
  43. #define AES_TFM_128BITS cpu_to_le32(0xb << 16)
  44. #define AES_TFM_192BITS cpu_to_le32(0xd << 16)
  45. #define AES_TFM_256BITS cpu_to_le32(0xf << 16)
  46. #define AES_TFM_GHASH_DIGEST cpu_to_le32(0x2 << 21)
  47. #define AES_TFM_GHASH cpu_to_le32(0x4 << 23)
  48. /* AES transform information word 1 fields */
  49. #define AES_TFM_ECB cpu_to_le32(0x0 << 0)
  50. #define AES_TFM_CBC cpu_to_le32(0x1 << 0)
  51. #define AES_TFM_CTR_INIT cpu_to_le32(0x2 << 0) /* init counter to 1 */
  52. #define AES_TFM_CTR_LOAD cpu_to_le32(0x6 << 0) /* load/reuse counter */
  53. #define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */
  54. #define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */
  55. #define AES_TFM_IV_CTR_MODE cpu_to_le32(0x1 << 10)
  56. #define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17)
  57. /* AES flags */
  58. #define AES_FLAGS_CIPHER_MSK GENMASK(2, 0)
  59. #define AES_FLAGS_ECB BIT(0)
  60. #define AES_FLAGS_CBC BIT(1)
  61. #define AES_FLAGS_CTR BIT(2)
  62. #define AES_FLAGS_GCM BIT(3)
  63. #define AES_FLAGS_ENCRYPT BIT(4)
  64. #define AES_FLAGS_BUSY BIT(5)
  65. #define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26))
  66. /**
  67. * mtk_aes_info - hardware information of AES
  68. * @cmd: command token, hardware instruction
  69. * @tfm: transform state of cipher algorithm.
  70. * @state: contains keys and initial vectors.
  71. *
  72. * Memory layout of GCM buffer:
  73. * /-----------\
  74. * | AES KEY | 128/196/256 bits
  75. * |-----------|
  76. * | HASH KEY | a string 128 zero bits encrypted using the block cipher
  77. * |-----------|
  78. * | IVs | 4 * 4 bytes
  79. * \-----------/
  80. *
  81. * The engine requires all these info to do:
  82. * - Commands decoding and control of the engine's data path.
  83. * - Coordinating hardware data fetch and store operations.
  84. * - Result token construction and output.
  85. */
  86. struct mtk_aes_info {
  87. __le32 cmd[AES_MAX_CT_SIZE];
  88. __le32 tfm[2];
  89. __le32 state[AES_MAX_STATE_BUF_SIZE];
  90. };
  91. struct mtk_aes_reqctx {
  92. u64 mode;
  93. };
  94. struct mtk_aes_base_ctx {
  95. struct mtk_cryp *cryp;
  96. u32 keylen;
  97. __le32 keymode;
  98. mtk_aes_fn start;
  99. struct mtk_aes_info info;
  100. dma_addr_t ct_dma;
  101. dma_addr_t tfm_dma;
  102. __le32 ct_hdr;
  103. u32 ct_size;
  104. };
  105. struct mtk_aes_ctx {
  106. struct mtk_aes_base_ctx base;
  107. };
  108. struct mtk_aes_ctr_ctx {
  109. struct mtk_aes_base_ctx base;
  110. u32 iv[AES_BLOCK_SIZE / sizeof(u32)];
  111. size_t offset;
  112. struct scatterlist src[2];
  113. struct scatterlist dst[2];
  114. };
  115. struct mtk_aes_gcm_ctx {
  116. struct mtk_aes_base_ctx base;
  117. u32 authsize;
  118. size_t textlen;
  119. struct crypto_skcipher *ctr;
  120. };
  121. struct mtk_aes_drv {
  122. struct list_head dev_list;
  123. /* Device list lock */
  124. spinlock_t lock;
  125. };
  126. static struct mtk_aes_drv mtk_aes = {
  127. .dev_list = LIST_HEAD_INIT(mtk_aes.dev_list),
  128. .lock = __SPIN_LOCK_UNLOCKED(mtk_aes.lock),
  129. };
  130. static inline u32 mtk_aes_read(struct mtk_cryp *cryp, u32 offset)
  131. {
  132. return readl_relaxed(cryp->base + offset);
  133. }
  134. static inline void mtk_aes_write(struct mtk_cryp *cryp,
  135. u32 offset, u32 value)
  136. {
  137. writel_relaxed(value, cryp->base + offset);
  138. }
  139. static struct mtk_cryp *mtk_aes_find_dev(struct mtk_aes_base_ctx *ctx)
  140. {
  141. struct mtk_cryp *cryp = NULL;
  142. struct mtk_cryp *tmp;
  143. spin_lock_bh(&mtk_aes.lock);
  144. if (!ctx->cryp) {
  145. list_for_each_entry(tmp, &mtk_aes.dev_list, aes_list) {
  146. cryp = tmp;
  147. break;
  148. }
  149. ctx->cryp = cryp;
  150. } else {
  151. cryp = ctx->cryp;
  152. }
  153. spin_unlock_bh(&mtk_aes.lock);
  154. return cryp;
  155. }
  156. static inline size_t mtk_aes_padlen(size_t len)
  157. {
  158. len &= AES_BLOCK_SIZE - 1;
  159. return len ? AES_BLOCK_SIZE - len : 0;
  160. }
  161. static bool mtk_aes_check_aligned(struct scatterlist *sg, size_t len,
  162. struct mtk_aes_dma *dma)
  163. {
  164. int nents;
  165. if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
  166. return false;
  167. for (nents = 0; sg; sg = sg_next(sg), ++nents) {
  168. if (!IS_ALIGNED(sg->offset, sizeof(u32)))
  169. return false;
  170. if (len <= sg->length) {
  171. if (!IS_ALIGNED(len, AES_BLOCK_SIZE))
  172. return false;
  173. dma->nents = nents + 1;
  174. dma->remainder = sg->length - len;
  175. sg->length = len;
  176. return true;
  177. }
  178. if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
  179. return false;
  180. len -= sg->length;
  181. }
  182. return false;
  183. }
  184. static inline void mtk_aes_set_mode(struct mtk_aes_rec *aes,
  185. const struct mtk_aes_reqctx *rctx)
  186. {
  187. /* Clear all but persistent flags and set request flags. */
  188. aes->flags = (aes->flags & AES_FLAGS_BUSY) | rctx->mode;
  189. }
  190. static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma)
  191. {
  192. struct scatterlist *sg = dma->sg;
  193. int nents = dma->nents;
  194. if (!dma->remainder)
  195. return;
  196. while (--nents > 0 && sg)
  197. sg = sg_next(sg);
  198. if (!sg)
  199. return;
  200. sg->length += dma->remainder;
  201. }
  202. static inline void mtk_aes_write_state_le(__le32 *dst, const u32 *src, u32 size)
  203. {
  204. int i;
  205. for (i = 0; i < SIZE_IN_WORDS(size); i++)
  206. dst[i] = cpu_to_le32(src[i]);
  207. }
  208. static inline void mtk_aes_write_state_be(__be32 *dst, const u32 *src, u32 size)
  209. {
  210. int i;
  211. for (i = 0; i < SIZE_IN_WORDS(size); i++)
  212. dst[i] = cpu_to_be32(src[i]);
  213. }
  214. static inline int mtk_aes_complete(struct mtk_cryp *cryp,
  215. struct mtk_aes_rec *aes,
  216. int err)
  217. {
  218. aes->flags &= ~AES_FLAGS_BUSY;
  219. aes->areq->complete(aes->areq, err);
  220. /* Handle new request */
  221. tasklet_schedule(&aes->queue_task);
  222. return err;
  223. }
  224. /*
  225. * Write descriptors for processing. This will configure the engine, load
  226. * the transform information and then start the packet processing.
  227. */
  228. static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  229. {
  230. struct mtk_ring *ring = cryp->ring[aes->id];
  231. struct mtk_desc *cmd = NULL, *res = NULL;
  232. struct scatterlist *ssg = aes->src.sg, *dsg = aes->dst.sg;
  233. u32 slen = aes->src.sg_len, dlen = aes->dst.sg_len;
  234. int nents;
  235. /* Write command descriptors */
  236. for (nents = 0; nents < slen; ++nents, ssg = sg_next(ssg)) {
  237. cmd = ring->cmd_next;
  238. cmd->hdr = MTK_DESC_BUF_LEN(ssg->length);
  239. cmd->buf = cpu_to_le32(sg_dma_address(ssg));
  240. if (nents == 0) {
  241. cmd->hdr |= MTK_DESC_FIRST |
  242. MTK_DESC_CT_LEN(aes->ctx->ct_size);
  243. cmd->ct = cpu_to_le32(aes->ctx->ct_dma);
  244. cmd->ct_hdr = aes->ctx->ct_hdr;
  245. cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma);
  246. }
  247. /* Shift ring buffer and check boundary */
  248. if (++ring->cmd_next == ring->cmd_base + MTK_DESC_NUM)
  249. ring->cmd_next = ring->cmd_base;
  250. }
  251. cmd->hdr |= MTK_DESC_LAST;
  252. /* Prepare result descriptors */
  253. for (nents = 0; nents < dlen; ++nents, dsg = sg_next(dsg)) {
  254. res = ring->res_next;
  255. res->hdr = MTK_DESC_BUF_LEN(dsg->length);
  256. res->buf = cpu_to_le32(sg_dma_address(dsg));
  257. if (nents == 0)
  258. res->hdr |= MTK_DESC_FIRST;
  259. /* Shift ring buffer and check boundary */
  260. if (++ring->res_next == ring->res_base + MTK_DESC_NUM)
  261. ring->res_next = ring->res_base;
  262. }
  263. res->hdr |= MTK_DESC_LAST;
  264. /* Pointer to current result descriptor */
  265. ring->res_prev = res;
  266. /* Prepare enough space for authenticated tag */
  267. if (aes->flags & AES_FLAGS_GCM)
  268. res->hdr += AES_BLOCK_SIZE;
  269. /*
  270. * Make sure that all changes to the DMA ring are done before we
  271. * start engine.
  272. */
  273. wmb();
  274. /* Start DMA transfer */
  275. mtk_aes_write(cryp, RDR_PREP_COUNT(aes->id), MTK_DESC_CNT(dlen));
  276. mtk_aes_write(cryp, CDR_PREP_COUNT(aes->id), MTK_DESC_CNT(slen));
  277. return -EINPROGRESS;
  278. }
  279. static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  280. {
  281. struct mtk_aes_base_ctx *ctx = aes->ctx;
  282. dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info),
  283. DMA_TO_DEVICE);
  284. if (aes->src.sg == aes->dst.sg) {
  285. dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
  286. DMA_BIDIRECTIONAL);
  287. if (aes->src.sg != &aes->aligned_sg)
  288. mtk_aes_restore_sg(&aes->src);
  289. } else {
  290. dma_unmap_sg(cryp->dev, aes->dst.sg, aes->dst.nents,
  291. DMA_FROM_DEVICE);
  292. if (aes->dst.sg != &aes->aligned_sg)
  293. mtk_aes_restore_sg(&aes->dst);
  294. dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
  295. DMA_TO_DEVICE);
  296. if (aes->src.sg != &aes->aligned_sg)
  297. mtk_aes_restore_sg(&aes->src);
  298. }
  299. if (aes->dst.sg == &aes->aligned_sg)
  300. sg_copy_from_buffer(aes->real_dst, sg_nents(aes->real_dst),
  301. aes->buf, aes->total);
  302. }
  303. static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  304. {
  305. struct mtk_aes_base_ctx *ctx = aes->ctx;
  306. struct mtk_aes_info *info = &ctx->info;
  307. ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info),
  308. DMA_TO_DEVICE);
  309. if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma)))
  310. goto exit;
  311. ctx->tfm_dma = ctx->ct_dma + sizeof(info->cmd);
  312. if (aes->src.sg == aes->dst.sg) {
  313. aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
  314. aes->src.nents,
  315. DMA_BIDIRECTIONAL);
  316. aes->dst.sg_len = aes->src.sg_len;
  317. if (unlikely(!aes->src.sg_len))
  318. goto sg_map_err;
  319. } else {
  320. aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg,
  321. aes->src.nents, DMA_TO_DEVICE);
  322. if (unlikely(!aes->src.sg_len))
  323. goto sg_map_err;
  324. aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg,
  325. aes->dst.nents, DMA_FROM_DEVICE);
  326. if (unlikely(!aes->dst.sg_len)) {
  327. dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents,
  328. DMA_TO_DEVICE);
  329. goto sg_map_err;
  330. }
  331. }
  332. return mtk_aes_xmit(cryp, aes);
  333. sg_map_err:
  334. dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(*info), DMA_TO_DEVICE);
  335. exit:
  336. return mtk_aes_complete(cryp, aes, -EINVAL);
  337. }
  338. /* Initialize transform information of CBC/ECB/CTR mode */
  339. static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
  340. size_t len)
  341. {
  342. struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
  343. struct mtk_aes_base_ctx *ctx = aes->ctx;
  344. struct mtk_aes_info *info = &ctx->info;
  345. u32 cnt = 0;
  346. ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len);
  347. info->cmd[cnt++] = AES_CMD0 | cpu_to_le32(len);
  348. info->cmd[cnt++] = AES_CMD1;
  349. info->tfm[0] = AES_TFM_SIZE(ctx->keylen) | ctx->keymode;
  350. if (aes->flags & AES_FLAGS_ENCRYPT)
  351. info->tfm[0] |= AES_TFM_BASIC_OUT;
  352. else
  353. info->tfm[0] |= AES_TFM_BASIC_IN;
  354. switch (aes->flags & AES_FLAGS_CIPHER_MSK) {
  355. case AES_FLAGS_CBC:
  356. info->tfm[1] = AES_TFM_CBC;
  357. break;
  358. case AES_FLAGS_ECB:
  359. info->tfm[1] = AES_TFM_ECB;
  360. goto ecb;
  361. case AES_FLAGS_CTR:
  362. info->tfm[1] = AES_TFM_CTR_LOAD;
  363. goto ctr;
  364. default:
  365. /* Should not happen... */
  366. return;
  367. }
  368. mtk_aes_write_state_le(info->state + ctx->keylen, req->info,
  369. AES_BLOCK_SIZE);
  370. ctr:
  371. info->tfm[0] += AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE));
  372. info->tfm[1] |= AES_TFM_FULL_IV;
  373. info->cmd[cnt++] = AES_CMD2;
  374. ecb:
  375. ctx->ct_size = cnt;
  376. }
  377. static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
  378. struct scatterlist *src, struct scatterlist *dst,
  379. size_t len)
  380. {
  381. size_t padlen = 0;
  382. bool src_aligned, dst_aligned;
  383. aes->total = len;
  384. aes->src.sg = src;
  385. aes->dst.sg = dst;
  386. aes->real_dst = dst;
  387. src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
  388. if (src == dst)
  389. dst_aligned = src_aligned;
  390. else
  391. dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
  392. if (!src_aligned || !dst_aligned) {
  393. padlen = mtk_aes_padlen(len);
  394. if (len + padlen > AES_BUF_SIZE)
  395. return mtk_aes_complete(cryp, aes, -ENOMEM);
  396. if (!src_aligned) {
  397. sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
  398. aes->src.sg = &aes->aligned_sg;
  399. aes->src.nents = 1;
  400. aes->src.remainder = 0;
  401. }
  402. if (!dst_aligned) {
  403. aes->dst.sg = &aes->aligned_sg;
  404. aes->dst.nents = 1;
  405. aes->dst.remainder = 0;
  406. }
  407. sg_init_table(&aes->aligned_sg, 1);
  408. sg_set_buf(&aes->aligned_sg, aes->buf, len + padlen);
  409. }
  410. mtk_aes_info_init(cryp, aes, len + padlen);
  411. return mtk_aes_map(cryp, aes);
  412. }
  413. static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id,
  414. struct crypto_async_request *new_areq)
  415. {
  416. struct mtk_aes_rec *aes = cryp->aes[id];
  417. struct crypto_async_request *areq, *backlog;
  418. struct mtk_aes_base_ctx *ctx;
  419. unsigned long flags;
  420. int ret = 0;
  421. spin_lock_irqsave(&aes->lock, flags);
  422. if (new_areq)
  423. ret = crypto_enqueue_request(&aes->queue, new_areq);
  424. if (aes->flags & AES_FLAGS_BUSY) {
  425. spin_unlock_irqrestore(&aes->lock, flags);
  426. return ret;
  427. }
  428. backlog = crypto_get_backlog(&aes->queue);
  429. areq = crypto_dequeue_request(&aes->queue);
  430. if (areq)
  431. aes->flags |= AES_FLAGS_BUSY;
  432. spin_unlock_irqrestore(&aes->lock, flags);
  433. if (!areq)
  434. return ret;
  435. if (backlog)
  436. backlog->complete(backlog, -EINPROGRESS);
  437. ctx = crypto_tfm_ctx(areq->tfm);
  438. aes->areq = areq;
  439. aes->ctx = ctx;
  440. return ctx->start(cryp, aes);
  441. }
  442. static int mtk_aes_transfer_complete(struct mtk_cryp *cryp,
  443. struct mtk_aes_rec *aes)
  444. {
  445. return mtk_aes_complete(cryp, aes, 0);
  446. }
  447. static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  448. {
  449. struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
  450. struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
  451. mtk_aes_set_mode(aes, rctx);
  452. aes->resume = mtk_aes_transfer_complete;
  453. return mtk_aes_dma(cryp, aes, req->src, req->dst, req->nbytes);
  454. }
  455. static inline struct mtk_aes_ctr_ctx *
  456. mtk_aes_ctr_ctx_cast(struct mtk_aes_base_ctx *ctx)
  457. {
  458. return container_of(ctx, struct mtk_aes_ctr_ctx, base);
  459. }
  460. static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  461. {
  462. struct mtk_aes_base_ctx *ctx = aes->ctx;
  463. struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx);
  464. struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
  465. struct scatterlist *src, *dst;
  466. u32 start, end, ctr, blocks;
  467. size_t datalen;
  468. bool fragmented = false;
  469. /* Check for transfer completion. */
  470. cctx->offset += aes->total;
  471. if (cctx->offset >= req->nbytes)
  472. return mtk_aes_transfer_complete(cryp, aes);
  473. /* Compute data length. */
  474. datalen = req->nbytes - cctx->offset;
  475. blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
  476. ctr = be32_to_cpu(cctx->iv[3]);
  477. /* Check 32bit counter overflow. */
  478. start = ctr;
  479. end = start + blocks - 1;
  480. if (end < start) {
  481. ctr |= 0xffffffff;
  482. datalen = AES_BLOCK_SIZE * -start;
  483. fragmented = true;
  484. }
  485. /* Jump to offset. */
  486. src = scatterwalk_ffwd(cctx->src, req->src, cctx->offset);
  487. dst = ((req->src == req->dst) ? src :
  488. scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset));
  489. /* Write IVs into transform state buffer. */
  490. mtk_aes_write_state_le(ctx->info.state + ctx->keylen, cctx->iv,
  491. AES_BLOCK_SIZE);
  492. if (unlikely(fragmented)) {
  493. /*
  494. * Increment the counter manually to cope with the hardware
  495. * counter overflow.
  496. */
  497. cctx->iv[3] = cpu_to_be32(ctr);
  498. crypto_inc((u8 *)cctx->iv, AES_BLOCK_SIZE);
  499. }
  500. return mtk_aes_dma(cryp, aes, src, dst, datalen);
  501. }
  502. static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  503. {
  504. struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(aes->ctx);
  505. struct ablkcipher_request *req = ablkcipher_request_cast(aes->areq);
  506. struct mtk_aes_reqctx *rctx = ablkcipher_request_ctx(req);
  507. mtk_aes_set_mode(aes, rctx);
  508. memcpy(cctx->iv, req->info, AES_BLOCK_SIZE);
  509. cctx->offset = 0;
  510. aes->total = 0;
  511. aes->resume = mtk_aes_ctr_transfer;
  512. return mtk_aes_ctr_transfer(cryp, aes);
  513. }
  514. /* Check and set the AES key to transform state buffer */
  515. static int mtk_aes_setkey(struct crypto_ablkcipher *tfm,
  516. const u8 *key, u32 keylen)
  517. {
  518. struct mtk_aes_base_ctx *ctx = crypto_ablkcipher_ctx(tfm);
  519. switch (keylen) {
  520. case AES_KEYSIZE_128:
  521. ctx->keymode = AES_TFM_128BITS;
  522. break;
  523. case AES_KEYSIZE_192:
  524. ctx->keymode = AES_TFM_192BITS;
  525. break;
  526. case AES_KEYSIZE_256:
  527. ctx->keymode = AES_TFM_256BITS;
  528. break;
  529. default:
  530. crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
  531. return -EINVAL;
  532. }
  533. ctx->keylen = SIZE_IN_WORDS(keylen);
  534. mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen);
  535. return 0;
  536. }
  537. static int mtk_aes_crypt(struct ablkcipher_request *req, u64 mode)
  538. {
  539. struct mtk_aes_base_ctx *ctx;
  540. struct mtk_aes_reqctx *rctx;
  541. ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
  542. rctx = ablkcipher_request_ctx(req);
  543. rctx->mode = mode;
  544. return mtk_aes_handle_queue(ctx->cryp, !(mode & AES_FLAGS_ENCRYPT),
  545. &req->base);
  546. }
  547. static int mtk_aes_ecb_encrypt(struct ablkcipher_request *req)
  548. {
  549. return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB);
  550. }
  551. static int mtk_aes_ecb_decrypt(struct ablkcipher_request *req)
  552. {
  553. return mtk_aes_crypt(req, AES_FLAGS_ECB);
  554. }
  555. static int mtk_aes_cbc_encrypt(struct ablkcipher_request *req)
  556. {
  557. return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC);
  558. }
  559. static int mtk_aes_cbc_decrypt(struct ablkcipher_request *req)
  560. {
  561. return mtk_aes_crypt(req, AES_FLAGS_CBC);
  562. }
  563. static int mtk_aes_ctr_encrypt(struct ablkcipher_request *req)
  564. {
  565. return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CTR);
  566. }
  567. static int mtk_aes_ctr_decrypt(struct ablkcipher_request *req)
  568. {
  569. return mtk_aes_crypt(req, AES_FLAGS_CTR);
  570. }
  571. static int mtk_aes_cra_init(struct crypto_tfm *tfm)
  572. {
  573. struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
  574. struct mtk_cryp *cryp = NULL;
  575. cryp = mtk_aes_find_dev(&ctx->base);
  576. if (!cryp) {
  577. pr_err("can't find crypto device\n");
  578. return -ENODEV;
  579. }
  580. tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
  581. ctx->base.start = mtk_aes_start;
  582. return 0;
  583. }
  584. static int mtk_aes_ctr_cra_init(struct crypto_tfm *tfm)
  585. {
  586. struct mtk_aes_ctx *ctx = crypto_tfm_ctx(tfm);
  587. struct mtk_cryp *cryp = NULL;
  588. cryp = mtk_aes_find_dev(&ctx->base);
  589. if (!cryp) {
  590. pr_err("can't find crypto device\n");
  591. return -ENODEV;
  592. }
  593. tfm->crt_ablkcipher.reqsize = sizeof(struct mtk_aes_reqctx);
  594. ctx->base.start = mtk_aes_ctr_start;
  595. return 0;
  596. }
  597. static struct crypto_alg aes_algs[] = {
  598. {
  599. .cra_name = "cbc(aes)",
  600. .cra_driver_name = "cbc-aes-mtk",
  601. .cra_priority = 400,
  602. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  603. CRYPTO_ALG_ASYNC,
  604. .cra_init = mtk_aes_cra_init,
  605. .cra_blocksize = AES_BLOCK_SIZE,
  606. .cra_ctxsize = sizeof(struct mtk_aes_ctx),
  607. .cra_alignmask = 0xf,
  608. .cra_type = &crypto_ablkcipher_type,
  609. .cra_module = THIS_MODULE,
  610. .cra_u.ablkcipher = {
  611. .min_keysize = AES_MIN_KEY_SIZE,
  612. .max_keysize = AES_MAX_KEY_SIZE,
  613. .setkey = mtk_aes_setkey,
  614. .encrypt = mtk_aes_cbc_encrypt,
  615. .decrypt = mtk_aes_cbc_decrypt,
  616. .ivsize = AES_BLOCK_SIZE,
  617. }
  618. },
  619. {
  620. .cra_name = "ecb(aes)",
  621. .cra_driver_name = "ecb-aes-mtk",
  622. .cra_priority = 400,
  623. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  624. CRYPTO_ALG_ASYNC,
  625. .cra_init = mtk_aes_cra_init,
  626. .cra_blocksize = AES_BLOCK_SIZE,
  627. .cra_ctxsize = sizeof(struct mtk_aes_ctx),
  628. .cra_alignmask = 0xf,
  629. .cra_type = &crypto_ablkcipher_type,
  630. .cra_module = THIS_MODULE,
  631. .cra_u.ablkcipher = {
  632. .min_keysize = AES_MIN_KEY_SIZE,
  633. .max_keysize = AES_MAX_KEY_SIZE,
  634. .setkey = mtk_aes_setkey,
  635. .encrypt = mtk_aes_ecb_encrypt,
  636. .decrypt = mtk_aes_ecb_decrypt,
  637. }
  638. },
  639. {
  640. .cra_name = "ctr(aes)",
  641. .cra_driver_name = "ctr-aes-mtk",
  642. .cra_priority = 400,
  643. .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
  644. CRYPTO_ALG_ASYNC,
  645. .cra_init = mtk_aes_ctr_cra_init,
  646. .cra_blocksize = 1,
  647. .cra_ctxsize = sizeof(struct mtk_aes_ctr_ctx),
  648. .cra_alignmask = 0xf,
  649. .cra_type = &crypto_ablkcipher_type,
  650. .cra_module = THIS_MODULE,
  651. .cra_u.ablkcipher = {
  652. .min_keysize = AES_MIN_KEY_SIZE,
  653. .max_keysize = AES_MAX_KEY_SIZE,
  654. .ivsize = AES_BLOCK_SIZE,
  655. .setkey = mtk_aes_setkey,
  656. .encrypt = mtk_aes_ctr_encrypt,
  657. .decrypt = mtk_aes_ctr_decrypt,
  658. }
  659. },
  660. };
  661. static inline struct mtk_aes_gcm_ctx *
  662. mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx *ctx)
  663. {
  664. return container_of(ctx, struct mtk_aes_gcm_ctx, base);
  665. }
  666. /*
  667. * Engine will verify and compare tag automatically, so we just need
  668. * to check returned status which stored in the result descriptor.
  669. */
  670. static int mtk_aes_gcm_tag_verify(struct mtk_cryp *cryp,
  671. struct mtk_aes_rec *aes)
  672. {
  673. u32 status = cryp->ring[aes->id]->res_prev->ct;
  674. return mtk_aes_complete(cryp, aes, (status & AES_AUTH_TAG_ERR) ?
  675. -EBADMSG : 0);
  676. }
  677. /* Initialize transform information of GCM mode */
  678. static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp,
  679. struct mtk_aes_rec *aes,
  680. size_t len)
  681. {
  682. struct aead_request *req = aead_request_cast(aes->areq);
  683. struct mtk_aes_base_ctx *ctx = aes->ctx;
  684. struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
  685. struct mtk_aes_info *info = &ctx->info;
  686. u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
  687. u32 cnt = 0;
  688. ctx->ct_hdr = AES_CT_CTRL_HDR | len;
  689. info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen);
  690. info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen);
  691. info->cmd[cnt++] = AES_GCM_CMD2;
  692. info->cmd[cnt++] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen);
  693. if (aes->flags & AES_FLAGS_ENCRYPT) {
  694. info->cmd[cnt++] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize);
  695. info->tfm[0] = AES_TFM_GCM_OUT;
  696. } else {
  697. info->cmd[cnt++] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize);
  698. info->cmd[cnt++] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize);
  699. info->tfm[0] = AES_TFM_GCM_IN;
  700. }
  701. ctx->ct_size = cnt;
  702. info->tfm[0] |= AES_TFM_GHASH_DIGEST | AES_TFM_GHASH | AES_TFM_SIZE(
  703. ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE + ivsize)) |
  704. ctx->keymode;
  705. info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV |
  706. AES_TFM_ENC_HASH;
  707. mtk_aes_write_state_le(info->state + ctx->keylen + SIZE_IN_WORDS(
  708. AES_BLOCK_SIZE), (const u32 *)req->iv, ivsize);
  709. }
  710. static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes,
  711. struct scatterlist *src, struct scatterlist *dst,
  712. size_t len)
  713. {
  714. bool src_aligned, dst_aligned;
  715. aes->src.sg = src;
  716. aes->dst.sg = dst;
  717. aes->real_dst = dst;
  718. src_aligned = mtk_aes_check_aligned(src, len, &aes->src);
  719. if (src == dst)
  720. dst_aligned = src_aligned;
  721. else
  722. dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst);
  723. if (!src_aligned || !dst_aligned) {
  724. if (aes->total > AES_BUF_SIZE)
  725. return mtk_aes_complete(cryp, aes, -ENOMEM);
  726. if (!src_aligned) {
  727. sg_copy_to_buffer(src, sg_nents(src), aes->buf, len);
  728. aes->src.sg = &aes->aligned_sg;
  729. aes->src.nents = 1;
  730. aes->src.remainder = 0;
  731. }
  732. if (!dst_aligned) {
  733. aes->dst.sg = &aes->aligned_sg;
  734. aes->dst.nents = 1;
  735. aes->dst.remainder = 0;
  736. }
  737. sg_init_table(&aes->aligned_sg, 1);
  738. sg_set_buf(&aes->aligned_sg, aes->buf, aes->total);
  739. }
  740. mtk_aes_gcm_info_init(cryp, aes, len);
  741. return mtk_aes_map(cryp, aes);
  742. }
  743. /* Todo: GMAC */
  744. static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes)
  745. {
  746. struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(aes->ctx);
  747. struct aead_request *req = aead_request_cast(aes->areq);
  748. struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
  749. u32 len = req->assoclen + req->cryptlen;
  750. mtk_aes_set_mode(aes, rctx);
  751. if (aes->flags & AES_FLAGS_ENCRYPT) {
  752. u32 tag[4];
  753. aes->resume = mtk_aes_transfer_complete;
  754. /* Compute total process length. */
  755. aes->total = len + gctx->authsize;
  756. /* Compute text length. */
  757. gctx->textlen = req->cryptlen;
  758. /* Hardware will append authenticated tag to output buffer */
  759. scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1);
  760. } else {
  761. aes->resume = mtk_aes_gcm_tag_verify;
  762. aes->total = len;
  763. gctx->textlen = req->cryptlen - gctx->authsize;
  764. }
  765. return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len);
  766. }
  767. static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode)
  768. {
  769. struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
  770. struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
  771. struct mtk_aes_reqctx *rctx = aead_request_ctx(req);
  772. /* Empty messages are not supported yet */
  773. if (!gctx->textlen && !req->assoclen)
  774. return -EINVAL;
  775. rctx->mode = AES_FLAGS_GCM | mode;
  776. return mtk_aes_handle_queue(ctx->cryp, !!(mode & AES_FLAGS_ENCRYPT),
  777. &req->base);
  778. }
  779. /*
  780. * Because of the hardware limitation, we need to pre-calculate key(H)
  781. * for the GHASH operation. The result of the encryption operation
  782. * need to be stored in the transform state buffer.
  783. */
  784. static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
  785. u32 keylen)
  786. {
  787. struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
  788. struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
  789. struct crypto_skcipher *ctr = gctx->ctr;
  790. struct {
  791. u32 hash[4];
  792. u8 iv[8];
  793. struct crypto_wait wait;
  794. struct scatterlist sg[1];
  795. struct skcipher_request req;
  796. } *data;
  797. int err;
  798. switch (keylen) {
  799. case AES_KEYSIZE_128:
  800. ctx->keymode = AES_TFM_128BITS;
  801. break;
  802. case AES_KEYSIZE_192:
  803. ctx->keymode = AES_TFM_192BITS;
  804. break;
  805. case AES_KEYSIZE_256:
  806. ctx->keymode = AES_TFM_256BITS;
  807. break;
  808. default:
  809. crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
  810. return -EINVAL;
  811. }
  812. ctx->keylen = SIZE_IN_WORDS(keylen);
  813. /* Same as crypto_gcm_setkey() from crypto/gcm.c */
  814. crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
  815. crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
  816. CRYPTO_TFM_REQ_MASK);
  817. err = crypto_skcipher_setkey(ctr, key, keylen);
  818. crypto_aead_set_flags(aead, crypto_skcipher_get_flags(ctr) &
  819. CRYPTO_TFM_RES_MASK);
  820. if (err)
  821. return err;
  822. data = kzalloc(sizeof(*data) + crypto_skcipher_reqsize(ctr),
  823. GFP_KERNEL);
  824. if (!data)
  825. return -ENOMEM;
  826. crypto_init_wait(&data->wait);
  827. sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE);
  828. skcipher_request_set_tfm(&data->req, ctr);
  829. skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
  830. CRYPTO_TFM_REQ_MAY_BACKLOG,
  831. crypto_req_done, &data->wait);
  832. skcipher_request_set_crypt(&data->req, data->sg, data->sg,
  833. AES_BLOCK_SIZE, data->iv);
  834. err = crypto_wait_req(crypto_skcipher_encrypt(&data->req),
  835. &data->wait);
  836. if (err)
  837. goto out;
  838. /* Write key into state buffer */
  839. mtk_aes_write_state_le(ctx->info.state, (const u32 *)key, keylen);
  840. /* Write key(H) into state buffer */
  841. mtk_aes_write_state_be(ctx->info.state + ctx->keylen, data->hash,
  842. AES_BLOCK_SIZE);
  843. out:
  844. kzfree(data);
  845. return err;
  846. }
  847. static int mtk_aes_gcm_setauthsize(struct crypto_aead *aead,
  848. u32 authsize)
  849. {
  850. struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead);
  851. struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx);
  852. /* Same as crypto_gcm_authsize() from crypto/gcm.c */
  853. switch (authsize) {
  854. case 8:
  855. case 12:
  856. case 16:
  857. break;
  858. default:
  859. return -EINVAL;
  860. }
  861. gctx->authsize = authsize;
  862. return 0;
  863. }
  864. static int mtk_aes_gcm_encrypt(struct aead_request *req)
  865. {
  866. return mtk_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT);
  867. }
  868. static int mtk_aes_gcm_decrypt(struct aead_request *req)
  869. {
  870. return mtk_aes_gcm_crypt(req, 0);
  871. }
  872. static int mtk_aes_gcm_init(struct crypto_aead *aead)
  873. {
  874. struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
  875. struct mtk_cryp *cryp = NULL;
  876. cryp = mtk_aes_find_dev(&ctx->base);
  877. if (!cryp) {
  878. pr_err("can't find crypto device\n");
  879. return -ENODEV;
  880. }
  881. ctx->ctr = crypto_alloc_skcipher("ctr(aes)", 0,
  882. CRYPTO_ALG_ASYNC);
  883. if (IS_ERR(ctx->ctr)) {
  884. pr_err("Error allocating ctr(aes)\n");
  885. return PTR_ERR(ctx->ctr);
  886. }
  887. crypto_aead_set_reqsize(aead, sizeof(struct mtk_aes_reqctx));
  888. ctx->base.start = mtk_aes_gcm_start;
  889. return 0;
  890. }
  891. static void mtk_aes_gcm_exit(struct crypto_aead *aead)
  892. {
  893. struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead);
  894. crypto_free_skcipher(ctx->ctr);
  895. }
  896. static struct aead_alg aes_gcm_alg = {
  897. .setkey = mtk_aes_gcm_setkey,
  898. .setauthsize = mtk_aes_gcm_setauthsize,
  899. .encrypt = mtk_aes_gcm_encrypt,
  900. .decrypt = mtk_aes_gcm_decrypt,
  901. .init = mtk_aes_gcm_init,
  902. .exit = mtk_aes_gcm_exit,
  903. .ivsize = GCM_AES_IV_SIZE,
  904. .maxauthsize = AES_BLOCK_SIZE,
  905. .base = {
  906. .cra_name = "gcm(aes)",
  907. .cra_driver_name = "gcm-aes-mtk",
  908. .cra_priority = 400,
  909. .cra_flags = CRYPTO_ALG_ASYNC,
  910. .cra_blocksize = 1,
  911. .cra_ctxsize = sizeof(struct mtk_aes_gcm_ctx),
  912. .cra_alignmask = 0xf,
  913. .cra_module = THIS_MODULE,
  914. },
  915. };
  916. static void mtk_aes_queue_task(unsigned long data)
  917. {
  918. struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
  919. mtk_aes_handle_queue(aes->cryp, aes->id, NULL);
  920. }
  921. static void mtk_aes_done_task(unsigned long data)
  922. {
  923. struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data;
  924. struct mtk_cryp *cryp = aes->cryp;
  925. mtk_aes_unmap(cryp, aes);
  926. aes->resume(cryp, aes);
  927. }
  928. static irqreturn_t mtk_aes_irq(int irq, void *dev_id)
  929. {
  930. struct mtk_aes_rec *aes = (struct mtk_aes_rec *)dev_id;
  931. struct mtk_cryp *cryp = aes->cryp;
  932. u32 val = mtk_aes_read(cryp, RDR_STAT(aes->id));
  933. mtk_aes_write(cryp, RDR_STAT(aes->id), val);
  934. if (likely(AES_FLAGS_BUSY & aes->flags)) {
  935. mtk_aes_write(cryp, RDR_PROC_COUNT(aes->id), MTK_CNT_RST);
  936. mtk_aes_write(cryp, RDR_THRESH(aes->id),
  937. MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE);
  938. tasklet_schedule(&aes->done_task);
  939. } else {
  940. dev_warn(cryp->dev, "AES interrupt when no active requests.\n");
  941. }
  942. return IRQ_HANDLED;
  943. }
  944. /*
  945. * The purpose of creating encryption and decryption records is
  946. * to process outbound/inbound data in parallel, it can improve
  947. * performance in most use cases, such as IPSec VPN, especially
  948. * under heavy network traffic.
  949. */
  950. static int mtk_aes_record_init(struct mtk_cryp *cryp)
  951. {
  952. struct mtk_aes_rec **aes = cryp->aes;
  953. int i, err = -ENOMEM;
  954. for (i = 0; i < MTK_REC_NUM; i++) {
  955. aes[i] = kzalloc(sizeof(**aes), GFP_KERNEL);
  956. if (!aes[i])
  957. goto err_cleanup;
  958. aes[i]->buf = (void *)__get_free_pages(GFP_KERNEL,
  959. AES_BUF_ORDER);
  960. if (!aes[i]->buf)
  961. goto err_cleanup;
  962. aes[i]->cryp = cryp;
  963. spin_lock_init(&aes[i]->lock);
  964. crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE);
  965. tasklet_init(&aes[i]->queue_task, mtk_aes_queue_task,
  966. (unsigned long)aes[i]);
  967. tasklet_init(&aes[i]->done_task, mtk_aes_done_task,
  968. (unsigned long)aes[i]);
  969. }
  970. /* Link to ring0 and ring1 respectively */
  971. aes[0]->id = MTK_RING0;
  972. aes[1]->id = MTK_RING1;
  973. return 0;
  974. err_cleanup:
  975. for (; i--; ) {
  976. free_page((unsigned long)aes[i]->buf);
  977. kfree(aes[i]);
  978. }
  979. return err;
  980. }
  981. static void mtk_aes_record_free(struct mtk_cryp *cryp)
  982. {
  983. int i;
  984. for (i = 0; i < MTK_REC_NUM; i++) {
  985. tasklet_kill(&cryp->aes[i]->done_task);
  986. tasklet_kill(&cryp->aes[i]->queue_task);
  987. free_page((unsigned long)cryp->aes[i]->buf);
  988. kfree(cryp->aes[i]);
  989. }
  990. }
  991. static void mtk_aes_unregister_algs(void)
  992. {
  993. int i;
  994. crypto_unregister_aead(&aes_gcm_alg);
  995. for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
  996. crypto_unregister_alg(&aes_algs[i]);
  997. }
  998. static int mtk_aes_register_algs(void)
  999. {
  1000. int err, i;
  1001. for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
  1002. err = crypto_register_alg(&aes_algs[i]);
  1003. if (err)
  1004. goto err_aes_algs;
  1005. }
  1006. err = crypto_register_aead(&aes_gcm_alg);
  1007. if (err)
  1008. goto err_aes_algs;
  1009. return 0;
  1010. err_aes_algs:
  1011. for (; i--; )
  1012. crypto_unregister_alg(&aes_algs[i]);
  1013. return err;
  1014. }
  1015. int mtk_cipher_alg_register(struct mtk_cryp *cryp)
  1016. {
  1017. int ret;
  1018. INIT_LIST_HEAD(&cryp->aes_list);
  1019. /* Initialize two cipher records */
  1020. ret = mtk_aes_record_init(cryp);
  1021. if (ret)
  1022. goto err_record;
  1023. ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING0], mtk_aes_irq,
  1024. 0, "mtk-aes", cryp->aes[0]);
  1025. if (ret) {
  1026. dev_err(cryp->dev, "unable to request AES irq.\n");
  1027. goto err_res;
  1028. }
  1029. ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING1], mtk_aes_irq,
  1030. 0, "mtk-aes", cryp->aes[1]);
  1031. if (ret) {
  1032. dev_err(cryp->dev, "unable to request AES irq.\n");
  1033. goto err_res;
  1034. }
  1035. /* Enable ring0 and ring1 interrupt */
  1036. mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING0), MTK_IRQ_RDR0);
  1037. mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING1), MTK_IRQ_RDR1);
  1038. spin_lock(&mtk_aes.lock);
  1039. list_add_tail(&cryp->aes_list, &mtk_aes.dev_list);
  1040. spin_unlock(&mtk_aes.lock);
  1041. ret = mtk_aes_register_algs();
  1042. if (ret)
  1043. goto err_algs;
  1044. return 0;
  1045. err_algs:
  1046. spin_lock(&mtk_aes.lock);
  1047. list_del(&cryp->aes_list);
  1048. spin_unlock(&mtk_aes.lock);
  1049. err_res:
  1050. mtk_aes_record_free(cryp);
  1051. err_record:
  1052. dev_err(cryp->dev, "mtk-aes initialization failed.\n");
  1053. return ret;
  1054. }
  1055. void mtk_cipher_alg_release(struct mtk_cryp *cryp)
  1056. {
  1057. spin_lock(&mtk_aes.lock);
  1058. list_del(&cryp->aes_list);
  1059. spin_unlock(&mtk_aes.lock);
  1060. mtk_aes_unregister_algs();
  1061. mtk_aes_record_free(cryp);
  1062. }