sm3-neon-core.S 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * sm3-neon-core.S - SM3 secure hash using NEON instructions
  4. *
  5. * Linux/arm64 port of the libgcrypt SM3 implementation for AArch64
  6. *
  7. * Copyright (C) 2021 Jussi Kivilinna <jussi.kivilinna@iki.fi>
  8. * Copyright (c) 2022 Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
  9. */
  10. #include <linux/linkage.h>
  11. #include <linux/cfi_types.h>
  12. #include <asm/assembler.h>
  13. /* Context structure */
  14. #define state_h0 0
  15. #define state_h1 4
  16. #define state_h2 8
  17. #define state_h3 12
  18. #define state_h4 16
  19. #define state_h5 20
  20. #define state_h6 24
  21. #define state_h7 28
  22. /* Stack structure */
  23. #define STACK_W_SIZE (32 * 2 * 3)
  24. #define STACK_W (0)
  25. #define STACK_SIZE (STACK_W + STACK_W_SIZE)
  26. /* Register macros */
  27. #define RSTATE x0
  28. #define RDATA x1
  29. #define RNBLKS x2
  30. #define RKPTR x28
  31. #define RFRAME x29
  32. #define ra w3
  33. #define rb w4
  34. #define rc w5
  35. #define rd w6
  36. #define re w7
  37. #define rf w8
  38. #define rg w9
  39. #define rh w10
  40. #define t0 w11
  41. #define t1 w12
  42. #define t2 w13
  43. #define t3 w14
  44. #define t4 w15
  45. #define t5 w16
  46. #define t6 w17
  47. #define k_even w19
  48. #define k_odd w20
  49. #define addr0 x21
  50. #define addr1 x22
  51. #define s0 w23
  52. #define s1 w24
  53. #define s2 w25
  54. #define s3 w26
  55. #define W0 v0
  56. #define W1 v1
  57. #define W2 v2
  58. #define W3 v3
  59. #define W4 v4
  60. #define W5 v5
  61. #define XTMP0 v6
  62. #define XTMP1 v7
  63. #define XTMP2 v16
  64. #define XTMP3 v17
  65. #define XTMP4 v18
  66. #define XTMP5 v19
  67. #define XTMP6 v20
  68. /* Helper macros. */
  69. #define _(...) /*_*/
  70. #define clear_vec(x) \
  71. movi x.8h, #0;
  72. #define rolw(o, a, n) \
  73. ror o, a, #(32 - n);
  74. /* Round function macros. */
  75. #define GG1_1(x, y, z, o, t) \
  76. eor o, x, y;
  77. #define GG1_2(x, y, z, o, t) \
  78. eor o, o, z;
  79. #define GG1_3(x, y, z, o, t)
  80. #define FF1_1(x, y, z, o, t) GG1_1(x, y, z, o, t)
  81. #define FF1_2(x, y, z, o, t)
  82. #define FF1_3(x, y, z, o, t) GG1_2(x, y, z, o, t)
  83. #define GG2_1(x, y, z, o, t) \
  84. bic o, z, x;
  85. #define GG2_2(x, y, z, o, t) \
  86. and t, y, x;
  87. #define GG2_3(x, y, z, o, t) \
  88. eor o, o, t;
  89. #define FF2_1(x, y, z, o, t) \
  90. eor o, x, y;
  91. #define FF2_2(x, y, z, o, t) \
  92. and t, x, y; \
  93. and o, o, z;
  94. #define FF2_3(x, y, z, o, t) \
  95. eor o, o, t;
  96. #define R(i, a, b, c, d, e, f, g, h, k, K_LOAD, round, widx, wtype, IOP, iop_param) \
  97. K_LOAD(round); \
  98. ldr t5, [sp, #(wtype##_W1_ADDR(round, widx))]; \
  99. rolw(t0, a, 12); /* rol(a, 12) => t0 */ \
  100. IOP(1, iop_param); \
  101. FF##i##_1(a, b, c, t1, t2); \
  102. ldr t6, [sp, #(wtype##_W1W2_ADDR(round, widx))]; \
  103. add k, k, e; \
  104. IOP(2, iop_param); \
  105. GG##i##_1(e, f, g, t3, t4); \
  106. FF##i##_2(a, b, c, t1, t2); \
  107. IOP(3, iop_param); \
  108. add k, k, t0; \
  109. add h, h, t5; \
  110. add d, d, t6; /* w1w2 + d => d */ \
  111. IOP(4, iop_param); \
  112. rolw(k, k, 7); /* rol (t0 + e + t), 7) => k */ \
  113. GG##i##_2(e, f, g, t3, t4); \
  114. add h, h, k; /* h + w1 + k => h */ \
  115. IOP(5, iop_param); \
  116. FF##i##_3(a, b, c, t1, t2); \
  117. eor t0, t0, k; /* k ^ t0 => t0 */ \
  118. GG##i##_3(e, f, g, t3, t4); \
  119. add d, d, t1; /* FF(a,b,c) + d => d */ \
  120. IOP(6, iop_param); \
  121. add t3, t3, h; /* GG(e,f,g) + h => t3 */ \
  122. rolw(b, b, 9); /* rol(b, 9) => b */ \
  123. eor h, t3, t3, ror #(32-9); \
  124. IOP(7, iop_param); \
  125. add d, d, t0; /* t0 + d => d */ \
  126. rolw(f, f, 19); /* rol(f, 19) => f */ \
  127. IOP(8, iop_param); \
  128. eor h, h, t3, ror #(32-17); /* P0(t3) => h */
  129. #define R1(a, b, c, d, e, f, g, h, k, K_LOAD, round, widx, wtype, IOP, iop_param) \
  130. R(1, ##a, ##b, ##c, ##d, ##e, ##f, ##g, ##h, ##k, K_LOAD, round, widx, wtype, IOP, iop_param)
  131. #define R2(a, b, c, d, e, f, g, h, k, K_LOAD, round, widx, wtype, IOP, iop_param) \
  132. R(2, ##a, ##b, ##c, ##d, ##e, ##f, ##g, ##h, ##k, K_LOAD, round, widx, wtype, IOP, iop_param)
  133. #define KL(round) \
  134. ldp k_even, k_odd, [RKPTR, #(4*(round))];
  135. /* Input expansion macros. */
  136. /* Byte-swapped input address. */
  137. #define IW_W_ADDR(round, widx, offs) \
  138. (STACK_W + ((round) / 4) * 64 + (offs) + ((widx) * 4))
  139. /* Expanded input address. */
  140. #define XW_W_ADDR(round, widx, offs) \
  141. (STACK_W + ((((round) / 3) - 4) % 2) * 64 + (offs) + ((widx) * 4))
  142. /* Rounds 1-12, byte-swapped input block addresses. */
  143. #define IW_W1_ADDR(round, widx) IW_W_ADDR(round, widx, 32)
  144. #define IW_W1W2_ADDR(round, widx) IW_W_ADDR(round, widx, 48)
  145. /* Rounds 1-12, expanded input block addresses. */
  146. #define XW_W1_ADDR(round, widx) XW_W_ADDR(round, widx, 0)
  147. #define XW_W1W2_ADDR(round, widx) XW_W_ADDR(round, widx, 16)
  148. /* Input block loading.
  149. * Interleaving within round function needed for in-order CPUs. */
  150. #define LOAD_W_VEC_1_1() \
  151. add addr0, sp, #IW_W1_ADDR(0, 0);
  152. #define LOAD_W_VEC_1_2() \
  153. add addr1, sp, #IW_W1_ADDR(4, 0);
  154. #define LOAD_W_VEC_1_3() \
  155. ld1 {W0.16b}, [RDATA], #16;
  156. #define LOAD_W_VEC_1_4() \
  157. ld1 {W1.16b}, [RDATA], #16;
  158. #define LOAD_W_VEC_1_5() \
  159. ld1 {W2.16b}, [RDATA], #16;
  160. #define LOAD_W_VEC_1_6() \
  161. ld1 {W3.16b}, [RDATA], #16;
  162. #define LOAD_W_VEC_1_7() \
  163. rev32 XTMP0.16b, W0.16b;
  164. #define LOAD_W_VEC_1_8() \
  165. rev32 XTMP1.16b, W1.16b;
  166. #define LOAD_W_VEC_2_1() \
  167. rev32 XTMP2.16b, W2.16b;
  168. #define LOAD_W_VEC_2_2() \
  169. rev32 XTMP3.16b, W3.16b;
  170. #define LOAD_W_VEC_2_3() \
  171. eor XTMP4.16b, XTMP1.16b, XTMP0.16b;
  172. #define LOAD_W_VEC_2_4() \
  173. eor XTMP5.16b, XTMP2.16b, XTMP1.16b;
  174. #define LOAD_W_VEC_2_5() \
  175. st1 {XTMP0.16b}, [addr0], #16;
  176. #define LOAD_W_VEC_2_6() \
  177. st1 {XTMP4.16b}, [addr0]; \
  178. add addr0, sp, #IW_W1_ADDR(8, 0);
  179. #define LOAD_W_VEC_2_7() \
  180. eor XTMP6.16b, XTMP3.16b, XTMP2.16b;
  181. #define LOAD_W_VEC_2_8() \
  182. ext W0.16b, XTMP0.16b, XTMP0.16b, #8; /* W0: xx, w0, xx, xx */
  183. #define LOAD_W_VEC_3_1() \
  184. mov W2.16b, XTMP1.16b; /* W2: xx, w6, w5, w4 */
  185. #define LOAD_W_VEC_3_2() \
  186. st1 {XTMP1.16b}, [addr1], #16;
  187. #define LOAD_W_VEC_3_3() \
  188. st1 {XTMP5.16b}, [addr1]; \
  189. ext W1.16b, XTMP0.16b, XTMP0.16b, #4; /* W1: xx, w3, w2, w1 */
  190. #define LOAD_W_VEC_3_4() \
  191. ext W3.16b, XTMP1.16b, XTMP2.16b, #12; /* W3: xx, w9, w8, w7 */
  192. #define LOAD_W_VEC_3_5() \
  193. ext W4.16b, XTMP2.16b, XTMP3.16b, #8; /* W4: xx, w12, w11, w10 */
  194. #define LOAD_W_VEC_3_6() \
  195. st1 {XTMP2.16b}, [addr0], #16;
  196. #define LOAD_W_VEC_3_7() \
  197. st1 {XTMP6.16b}, [addr0];
  198. #define LOAD_W_VEC_3_8() \
  199. ext W5.16b, XTMP3.16b, XTMP3.16b, #4; /* W5: xx, w15, w14, w13 */
  200. #define LOAD_W_VEC_1(iop_num, ...) \
  201. LOAD_W_VEC_1_##iop_num()
  202. #define LOAD_W_VEC_2(iop_num, ...) \
  203. LOAD_W_VEC_2_##iop_num()
  204. #define LOAD_W_VEC_3(iop_num, ...) \
  205. LOAD_W_VEC_3_##iop_num()
  206. /* Message scheduling. Note: 3 words per vector register.
  207. * Interleaving within round function needed for in-order CPUs. */
  208. #define SCHED_W_1_1(round, w0, w1, w2, w3, w4, w5) \
  209. /* Load (w[i - 16]) => XTMP0 */ \
  210. /* Load (w[i - 13]) => XTMP5 */ \
  211. ext XTMP0.16b, w0.16b, w0.16b, #12; /* XTMP0: w0, xx, xx, xx */
  212. #define SCHED_W_1_2(round, w0, w1, w2, w3, w4, w5) \
  213. ext XTMP5.16b, w1.16b, w1.16b, #12;
  214. #define SCHED_W_1_3(round, w0, w1, w2, w3, w4, w5) \
  215. ext XTMP0.16b, XTMP0.16b, w1.16b, #12; /* XTMP0: xx, w2, w1, w0 */
  216. #define SCHED_W_1_4(round, w0, w1, w2, w3, w4, w5) \
  217. ext XTMP5.16b, XTMP5.16b, w2.16b, #12;
  218. #define SCHED_W_1_5(round, w0, w1, w2, w3, w4, w5) \
  219. /* w[i - 9] == w3 */ \
  220. /* W3 ^ XTMP0 => XTMP0 */ \
  221. eor XTMP0.16b, XTMP0.16b, w3.16b;
  222. #define SCHED_W_1_6(round, w0, w1, w2, w3, w4, w5) \
  223. /* w[i - 3] == w5 */ \
  224. /* rol(XMM5, 15) ^ XTMP0 => XTMP0 */ \
  225. /* rol(XTMP5, 7) => XTMP1 */ \
  226. add addr0, sp, #XW_W1_ADDR((round), 0); \
  227. shl XTMP2.4s, w5.4s, #15;
  228. #define SCHED_W_1_7(round, w0, w1, w2, w3, w4, w5) \
  229. shl XTMP1.4s, XTMP5.4s, #7;
  230. #define SCHED_W_1_8(round, w0, w1, w2, w3, w4, w5) \
  231. sri XTMP2.4s, w5.4s, #(32-15);
  232. #define SCHED_W_2_1(round, w0, w1, w2, w3, w4, w5) \
  233. sri XTMP1.4s, XTMP5.4s, #(32-7);
  234. #define SCHED_W_2_2(round, w0, w1, w2, w3, w4, w5) \
  235. eor XTMP0.16b, XTMP0.16b, XTMP2.16b;
  236. #define SCHED_W_2_3(round, w0, w1, w2, w3, w4, w5) \
  237. /* w[i - 6] == W4 */ \
  238. /* W4 ^ XTMP1 => XTMP1 */ \
  239. eor XTMP1.16b, XTMP1.16b, w4.16b;
  240. #define SCHED_W_2_4(round, w0, w1, w2, w3, w4, w5) \
  241. /* P1(XTMP0) ^ XTMP1 => W0 */ \
  242. shl XTMP3.4s, XTMP0.4s, #15;
  243. #define SCHED_W_2_5(round, w0, w1, w2, w3, w4, w5) \
  244. shl XTMP4.4s, XTMP0.4s, #23;
  245. #define SCHED_W_2_6(round, w0, w1, w2, w3, w4, w5) \
  246. eor w0.16b, XTMP1.16b, XTMP0.16b;
  247. #define SCHED_W_2_7(round, w0, w1, w2, w3, w4, w5) \
  248. sri XTMP3.4s, XTMP0.4s, #(32-15);
  249. #define SCHED_W_2_8(round, w0, w1, w2, w3, w4, w5) \
  250. sri XTMP4.4s, XTMP0.4s, #(32-23);
  251. #define SCHED_W_3_1(round, w0, w1, w2, w3, w4, w5) \
  252. eor w0.16b, w0.16b, XTMP3.16b;
  253. #define SCHED_W_3_2(round, w0, w1, w2, w3, w4, w5) \
  254. /* Load (w[i - 3]) => XTMP2 */ \
  255. ext XTMP2.16b, w4.16b, w4.16b, #12;
  256. #define SCHED_W_3_3(round, w0, w1, w2, w3, w4, w5) \
  257. eor w0.16b, w0.16b, XTMP4.16b;
  258. #define SCHED_W_3_4(round, w0, w1, w2, w3, w4, w5) \
  259. ext XTMP2.16b, XTMP2.16b, w5.16b, #12;
  260. #define SCHED_W_3_5(round, w0, w1, w2, w3, w4, w5) \
  261. /* W1 ^ W2 => XTMP3 */ \
  262. eor XTMP3.16b, XTMP2.16b, w0.16b;
  263. #define SCHED_W_3_6(round, w0, w1, w2, w3, w4, w5)
  264. #define SCHED_W_3_7(round, w0, w1, w2, w3, w4, w5) \
  265. st1 {XTMP2.16b-XTMP3.16b}, [addr0];
  266. #define SCHED_W_3_8(round, w0, w1, w2, w3, w4, w5)
  267. #define SCHED_W_W0W1W2W3W4W5_1(iop_num, round) \
  268. SCHED_W_1_##iop_num(round, W0, W1, W2, W3, W4, W5)
  269. #define SCHED_W_W0W1W2W3W4W5_2(iop_num, round) \
  270. SCHED_W_2_##iop_num(round, W0, W1, W2, W3, W4, W5)
  271. #define SCHED_W_W0W1W2W3W4W5_3(iop_num, round) \
  272. SCHED_W_3_##iop_num(round, W0, W1, W2, W3, W4, W5)
  273. #define SCHED_W_W1W2W3W4W5W0_1(iop_num, round) \
  274. SCHED_W_1_##iop_num(round, W1, W2, W3, W4, W5, W0)
  275. #define SCHED_W_W1W2W3W4W5W0_2(iop_num, round) \
  276. SCHED_W_2_##iop_num(round, W1, W2, W3, W4, W5, W0)
  277. #define SCHED_W_W1W2W3W4W5W0_3(iop_num, round) \
  278. SCHED_W_3_##iop_num(round, W1, W2, W3, W4, W5, W0)
  279. #define SCHED_W_W2W3W4W5W0W1_1(iop_num, round) \
  280. SCHED_W_1_##iop_num(round, W2, W3, W4, W5, W0, W1)
  281. #define SCHED_W_W2W3W4W5W0W1_2(iop_num, round) \
  282. SCHED_W_2_##iop_num(round, W2, W3, W4, W5, W0, W1)
  283. #define SCHED_W_W2W3W4W5W0W1_3(iop_num, round) \
  284. SCHED_W_3_##iop_num(round, W2, W3, W4, W5, W0, W1)
  285. #define SCHED_W_W3W4W5W0W1W2_1(iop_num, round) \
  286. SCHED_W_1_##iop_num(round, W3, W4, W5, W0, W1, W2)
  287. #define SCHED_W_W3W4W5W0W1W2_2(iop_num, round) \
  288. SCHED_W_2_##iop_num(round, W3, W4, W5, W0, W1, W2)
  289. #define SCHED_W_W3W4W5W0W1W2_3(iop_num, round) \
  290. SCHED_W_3_##iop_num(round, W3, W4, W5, W0, W1, W2)
  291. #define SCHED_W_W4W5W0W1W2W3_1(iop_num, round) \
  292. SCHED_W_1_##iop_num(round, W4, W5, W0, W1, W2, W3)
  293. #define SCHED_W_W4W5W0W1W2W3_2(iop_num, round) \
  294. SCHED_W_2_##iop_num(round, W4, W5, W0, W1, W2, W3)
  295. #define SCHED_W_W4W5W0W1W2W3_3(iop_num, round) \
  296. SCHED_W_3_##iop_num(round, W4, W5, W0, W1, W2, W3)
  297. #define SCHED_W_W5W0W1W2W3W4_1(iop_num, round) \
  298. SCHED_W_1_##iop_num(round, W5, W0, W1, W2, W3, W4)
  299. #define SCHED_W_W5W0W1W2W3W4_2(iop_num, round) \
  300. SCHED_W_2_##iop_num(round, W5, W0, W1, W2, W3, W4)
  301. #define SCHED_W_W5W0W1W2W3W4_3(iop_num, round) \
  302. SCHED_W_3_##iop_num(round, W5, W0, W1, W2, W3, W4)
  303. /*
  304. * Transform blocks*64 bytes (blocks*16 32-bit words) at 'src'.
  305. *
  306. * void sm3_neon_transform(struct sm3_state *sst, u8 const *src,
  307. * int blocks)
  308. */
  309. .text
  310. .align 3
  311. SYM_TYPED_FUNC_START(sm3_neon_transform)
  312. ldp ra, rb, [RSTATE, #0]
  313. ldp rc, rd, [RSTATE, #8]
  314. ldp re, rf, [RSTATE, #16]
  315. ldp rg, rh, [RSTATE, #24]
  316. stp x28, x29, [sp, #-16]!
  317. stp x19, x20, [sp, #-16]!
  318. stp x21, x22, [sp, #-16]!
  319. stp x23, x24, [sp, #-16]!
  320. stp x25, x26, [sp, #-16]!
  321. mov RFRAME, sp
  322. sub addr0, sp, #STACK_SIZE
  323. adr_l RKPTR, .LKtable
  324. and sp, addr0, #(~63)
  325. /* Preload first block. */
  326. LOAD_W_VEC_1(1, 0)
  327. LOAD_W_VEC_1(2, 0)
  328. LOAD_W_VEC_1(3, 0)
  329. LOAD_W_VEC_1(4, 0)
  330. LOAD_W_VEC_1(5, 0)
  331. LOAD_W_VEC_1(6, 0)
  332. LOAD_W_VEC_1(7, 0)
  333. LOAD_W_VEC_1(8, 0)
  334. LOAD_W_VEC_2(1, 0)
  335. LOAD_W_VEC_2(2, 0)
  336. LOAD_W_VEC_2(3, 0)
  337. LOAD_W_VEC_2(4, 0)
  338. LOAD_W_VEC_2(5, 0)
  339. LOAD_W_VEC_2(6, 0)
  340. LOAD_W_VEC_2(7, 0)
  341. LOAD_W_VEC_2(8, 0)
  342. LOAD_W_VEC_3(1, 0)
  343. LOAD_W_VEC_3(2, 0)
  344. LOAD_W_VEC_3(3, 0)
  345. LOAD_W_VEC_3(4, 0)
  346. LOAD_W_VEC_3(5, 0)
  347. LOAD_W_VEC_3(6, 0)
  348. LOAD_W_VEC_3(7, 0)
  349. LOAD_W_VEC_3(8, 0)
  350. .balign 16
  351. .Loop:
  352. /* Transform 0-3 */
  353. R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 0, 0, IW, _, 0)
  354. R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 1, 1, IW, _, 0)
  355. R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 2, 2, IW, _, 0)
  356. R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 3, 3, IW, _, 0)
  357. /* Transform 4-7 + Precalc 12-14 */
  358. R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 4, 0, IW, _, 0)
  359. R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 5, 1, IW, _, 0)
  360. R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 6, 2, IW, SCHED_W_W0W1W2W3W4W5_1, 12)
  361. R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 7, 3, IW, SCHED_W_W0W1W2W3W4W5_2, 12)
  362. /* Transform 8-11 + Precalc 12-17 */
  363. R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 8, 0, IW, SCHED_W_W0W1W2W3W4W5_3, 12)
  364. R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 9, 1, IW, SCHED_W_W1W2W3W4W5W0_1, 15)
  365. R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 10, 2, IW, SCHED_W_W1W2W3W4W5W0_2, 15)
  366. R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 11, 3, IW, SCHED_W_W1W2W3W4W5W0_3, 15)
  367. /* Transform 12-14 + Precalc 18-20 */
  368. R1(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 12, 0, XW, SCHED_W_W2W3W4W5W0W1_1, 18)
  369. R1(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 13, 1, XW, SCHED_W_W2W3W4W5W0W1_2, 18)
  370. R1(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 14, 2, XW, SCHED_W_W2W3W4W5W0W1_3, 18)
  371. /* Transform 15-17 + Precalc 21-23 */
  372. R1(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 15, 0, XW, SCHED_W_W3W4W5W0W1W2_1, 21)
  373. R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 16, 1, XW, SCHED_W_W3W4W5W0W1W2_2, 21)
  374. R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 17, 2, XW, SCHED_W_W3W4W5W0W1W2_3, 21)
  375. /* Transform 18-20 + Precalc 24-26 */
  376. R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 18, 0, XW, SCHED_W_W4W5W0W1W2W3_1, 24)
  377. R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 19, 1, XW, SCHED_W_W4W5W0W1W2W3_2, 24)
  378. R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 20, 2, XW, SCHED_W_W4W5W0W1W2W3_3, 24)
  379. /* Transform 21-23 + Precalc 27-29 */
  380. R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 21, 0, XW, SCHED_W_W5W0W1W2W3W4_1, 27)
  381. R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 22, 1, XW, SCHED_W_W5W0W1W2W3W4_2, 27)
  382. R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 23, 2, XW, SCHED_W_W5W0W1W2W3W4_3, 27)
  383. /* Transform 24-26 + Precalc 30-32 */
  384. R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 24, 0, XW, SCHED_W_W0W1W2W3W4W5_1, 30)
  385. R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 25, 1, XW, SCHED_W_W0W1W2W3W4W5_2, 30)
  386. R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 26, 2, XW, SCHED_W_W0W1W2W3W4W5_3, 30)
  387. /* Transform 27-29 + Precalc 33-35 */
  388. R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 27, 0, XW, SCHED_W_W1W2W3W4W5W0_1, 33)
  389. R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 28, 1, XW, SCHED_W_W1W2W3W4W5W0_2, 33)
  390. R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 29, 2, XW, SCHED_W_W1W2W3W4W5W0_3, 33)
  391. /* Transform 30-32 + Precalc 36-38 */
  392. R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 30, 0, XW, SCHED_W_W2W3W4W5W0W1_1, 36)
  393. R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 31, 1, XW, SCHED_W_W2W3W4W5W0W1_2, 36)
  394. R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 32, 2, XW, SCHED_W_W2W3W4W5W0W1_3, 36)
  395. /* Transform 33-35 + Precalc 39-41 */
  396. R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 33, 0, XW, SCHED_W_W3W4W5W0W1W2_1, 39)
  397. R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 34, 1, XW, SCHED_W_W3W4W5W0W1W2_2, 39)
  398. R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 35, 2, XW, SCHED_W_W3W4W5W0W1W2_3, 39)
  399. /* Transform 36-38 + Precalc 42-44 */
  400. R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 36, 0, XW, SCHED_W_W4W5W0W1W2W3_1, 42)
  401. R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 37, 1, XW, SCHED_W_W4W5W0W1W2W3_2, 42)
  402. R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 38, 2, XW, SCHED_W_W4W5W0W1W2W3_3, 42)
  403. /* Transform 39-41 + Precalc 45-47 */
  404. R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 39, 0, XW, SCHED_W_W5W0W1W2W3W4_1, 45)
  405. R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 40, 1, XW, SCHED_W_W5W0W1W2W3W4_2, 45)
  406. R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 41, 2, XW, SCHED_W_W5W0W1W2W3W4_3, 45)
  407. /* Transform 42-44 + Precalc 48-50 */
  408. R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 42, 0, XW, SCHED_W_W0W1W2W3W4W5_1, 48)
  409. R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 43, 1, XW, SCHED_W_W0W1W2W3W4W5_2, 48)
  410. R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 44, 2, XW, SCHED_W_W0W1W2W3W4W5_3, 48)
  411. /* Transform 45-47 + Precalc 51-53 */
  412. R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 45, 0, XW, SCHED_W_W1W2W3W4W5W0_1, 51)
  413. R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 46, 1, XW, SCHED_W_W1W2W3W4W5W0_2, 51)
  414. R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 47, 2, XW, SCHED_W_W1W2W3W4W5W0_3, 51)
  415. /* Transform 48-50 + Precalc 54-56 */
  416. R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 48, 0, XW, SCHED_W_W2W3W4W5W0W1_1, 54)
  417. R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 49, 1, XW, SCHED_W_W2W3W4W5W0W1_2, 54)
  418. R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 50, 2, XW, SCHED_W_W2W3W4W5W0W1_3, 54)
  419. /* Transform 51-53 + Precalc 57-59 */
  420. R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 51, 0, XW, SCHED_W_W3W4W5W0W1W2_1, 57)
  421. R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 52, 1, XW, SCHED_W_W3W4W5W0W1W2_2, 57)
  422. R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 53, 2, XW, SCHED_W_W3W4W5W0W1W2_3, 57)
  423. /* Transform 54-56 + Precalc 60-62 */
  424. R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 54, 0, XW, SCHED_W_W4W5W0W1W2W3_1, 60)
  425. R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 55, 1, XW, SCHED_W_W4W5W0W1W2W3_2, 60)
  426. R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 56, 2, XW, SCHED_W_W4W5W0W1W2W3_3, 60)
  427. /* Transform 57-59 + Precalc 63 */
  428. R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 57, 0, XW, SCHED_W_W5W0W1W2W3W4_1, 63)
  429. R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 58, 1, XW, SCHED_W_W5W0W1W2W3W4_2, 63)
  430. R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 59, 2, XW, SCHED_W_W5W0W1W2W3W4_3, 63)
  431. /* Transform 60 */
  432. R2(ra, rb, rc, rd, re, rf, rg, rh, k_even, KL, 60, 0, XW, _, _)
  433. subs RNBLKS, RNBLKS, #1
  434. b.eq .Lend
  435. /* Transform 61-63 + Preload next block */
  436. R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 61, 1, XW, LOAD_W_VEC_1, _)
  437. ldp s0, s1, [RSTATE, #0]
  438. R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 62, 2, XW, LOAD_W_VEC_2, _)
  439. ldp s2, s3, [RSTATE, #8]
  440. R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 63, 0, XW, LOAD_W_VEC_3, _)
  441. /* Update the chaining variables. */
  442. eor ra, ra, s0
  443. eor rb, rb, s1
  444. ldp s0, s1, [RSTATE, #16]
  445. eor rc, rc, s2
  446. ldp k_even, k_odd, [RSTATE, #24]
  447. eor rd, rd, s3
  448. eor re, re, s0
  449. stp ra, rb, [RSTATE, #0]
  450. eor rf, rf, s1
  451. stp rc, rd, [RSTATE, #8]
  452. eor rg, rg, k_even
  453. stp re, rf, [RSTATE, #16]
  454. eor rh, rh, k_odd
  455. stp rg, rh, [RSTATE, #24]
  456. b .Loop
  457. .Lend:
  458. /* Transform 61-63 */
  459. R2(rd, ra, rb, rc, rh, re, rf, rg, k_odd, _, 61, 1, XW, _, _)
  460. ldp s0, s1, [RSTATE, #0]
  461. R2(rc, rd, ra, rb, rg, rh, re, rf, k_even, KL, 62, 2, XW, _, _)
  462. ldp s2, s3, [RSTATE, #8]
  463. R2(rb, rc, rd, ra, rf, rg, rh, re, k_odd, _, 63, 0, XW, _, _)
  464. /* Update the chaining variables. */
  465. eor ra, ra, s0
  466. clear_vec(W0)
  467. eor rb, rb, s1
  468. clear_vec(W1)
  469. ldp s0, s1, [RSTATE, #16]
  470. clear_vec(W2)
  471. eor rc, rc, s2
  472. clear_vec(W3)
  473. ldp k_even, k_odd, [RSTATE, #24]
  474. clear_vec(W4)
  475. eor rd, rd, s3
  476. clear_vec(W5)
  477. eor re, re, s0
  478. clear_vec(XTMP0)
  479. stp ra, rb, [RSTATE, #0]
  480. clear_vec(XTMP1)
  481. eor rf, rf, s1
  482. clear_vec(XTMP2)
  483. stp rc, rd, [RSTATE, #8]
  484. clear_vec(XTMP3)
  485. eor rg, rg, k_even
  486. clear_vec(XTMP4)
  487. stp re, rf, [RSTATE, #16]
  488. clear_vec(XTMP5)
  489. eor rh, rh, k_odd
  490. clear_vec(XTMP6)
  491. stp rg, rh, [RSTATE, #24]
  492. /* Clear message expansion area */
  493. add addr0, sp, #STACK_W
  494. st1 {W0.16b-W3.16b}, [addr0], #64
  495. st1 {W0.16b-W3.16b}, [addr0], #64
  496. st1 {W0.16b-W3.16b}, [addr0]
  497. mov sp, RFRAME
  498. ldp x25, x26, [sp], #16
  499. ldp x23, x24, [sp], #16
  500. ldp x21, x22, [sp], #16
  501. ldp x19, x20, [sp], #16
  502. ldp x28, x29, [sp], #16
  503. ret
  504. SYM_FUNC_END(sm3_neon_transform)
  505. .section ".rodata", "a"
  506. .align 4
  507. .LKtable:
  508. .long 0x79cc4519, 0xf3988a32, 0xe7311465, 0xce6228cb
  509. .long 0x9cc45197, 0x3988a32f, 0x7311465e, 0xe6228cbc
  510. .long 0xcc451979, 0x988a32f3, 0x311465e7, 0x6228cbce
  511. .long 0xc451979c, 0x88a32f39, 0x11465e73, 0x228cbce6
  512. .long 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c
  513. .long 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce
  514. .long 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec
  515. .long 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5
  516. .long 0x7a879d8a, 0xf50f3b14, 0xea1e7629, 0xd43cec53
  517. .long 0xa879d8a7, 0x50f3b14f, 0xa1e7629e, 0x43cec53d
  518. .long 0x879d8a7a, 0x0f3b14f5, 0x1e7629ea, 0x3cec53d4
  519. .long 0x79d8a7a8, 0xf3b14f50, 0xe7629ea1, 0xcec53d43
  520. .long 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c
  521. .long 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce
  522. .long 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec
  523. .long 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5