camellia-aesni-avx2-asm_64.S 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047
  1. /* SPDX-License-Identifier: GPL-2.0-or-later */
  2. /*
  3. * x86_64/AVX2/AES-NI assembler implementation of Camellia
  4. *
  5. * Copyright © 2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
  6. */
  7. #include <linux/linkage.h>
  8. #include <asm/frame.h>
  9. #define CAMELLIA_TABLE_BYTE_LEN 272
  10. /* struct camellia_ctx: */
  11. #define key_table 0
  12. #define key_length CAMELLIA_TABLE_BYTE_LEN
  13. /* register macros */
  14. #define CTX %rdi
  15. #define RIO %r8
  16. /**********************************************************************
  17. helper macros
  18. **********************************************************************/
  19. #define filter_8bit(x, lo_t, hi_t, mask4bit, tmp0) \
  20. vpand x, mask4bit, tmp0; \
  21. vpandn x, mask4bit, x; \
  22. vpsrld $4, x, x; \
  23. \
  24. vpshufb tmp0, lo_t, tmp0; \
  25. vpshufb x, hi_t, x; \
  26. vpxor tmp0, x, x;
  27. #define ymm0_x xmm0
  28. #define ymm1_x xmm1
  29. #define ymm2_x xmm2
  30. #define ymm3_x xmm3
  31. #define ymm4_x xmm4
  32. #define ymm5_x xmm5
  33. #define ymm6_x xmm6
  34. #define ymm7_x xmm7
  35. #define ymm8_x xmm8
  36. #define ymm9_x xmm9
  37. #define ymm10_x xmm10
  38. #define ymm11_x xmm11
  39. #define ymm12_x xmm12
  40. #define ymm13_x xmm13
  41. #define ymm14_x xmm14
  42. #define ymm15_x xmm15
  43. /**********************************************************************
  44. 32-way camellia
  45. **********************************************************************/
  46. /*
  47. * IN:
  48. * x0..x7: byte-sliced AB state
  49. * mem_cd: register pointer storing CD state
  50. * key: index for key material
  51. * OUT:
  52. * x0..x7: new byte-sliced CD state
  53. */
  54. #define roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, t0, t1, t2, t3, t4, t5, t6, \
  55. t7, mem_cd, key) \
  56. /* \
  57. * S-function with AES subbytes \
  58. */ \
  59. vbroadcasti128 .Linv_shift_row(%rip), t4; \
  60. vpbroadcastd .L0f0f0f0f(%rip), t7; \
  61. vbroadcasti128 .Lpre_tf_lo_s1(%rip), t5; \
  62. vbroadcasti128 .Lpre_tf_hi_s1(%rip), t6; \
  63. vbroadcasti128 .Lpre_tf_lo_s4(%rip), t2; \
  64. vbroadcasti128 .Lpre_tf_hi_s4(%rip), t3; \
  65. \
  66. /* AES inverse shift rows */ \
  67. vpshufb t4, x0, x0; \
  68. vpshufb t4, x7, x7; \
  69. vpshufb t4, x3, x3; \
  70. vpshufb t4, x6, x6; \
  71. vpshufb t4, x2, x2; \
  72. vpshufb t4, x5, x5; \
  73. vpshufb t4, x1, x1; \
  74. vpshufb t4, x4, x4; \
  75. \
  76. /* prefilter sboxes 1, 2 and 3 */ \
  77. /* prefilter sbox 4 */ \
  78. filter_8bit(x0, t5, t6, t7, t4); \
  79. filter_8bit(x7, t5, t6, t7, t4); \
  80. vextracti128 $1, x0, t0##_x; \
  81. vextracti128 $1, x7, t1##_x; \
  82. filter_8bit(x3, t2, t3, t7, t4); \
  83. filter_8bit(x6, t2, t3, t7, t4); \
  84. vextracti128 $1, x3, t3##_x; \
  85. vextracti128 $1, x6, t2##_x; \
  86. filter_8bit(x2, t5, t6, t7, t4); \
  87. filter_8bit(x5, t5, t6, t7, t4); \
  88. filter_8bit(x1, t5, t6, t7, t4); \
  89. filter_8bit(x4, t5, t6, t7, t4); \
  90. \
  91. vpxor t4##_x, t4##_x, t4##_x; \
  92. \
  93. /* AES subbytes + AES shift rows */ \
  94. vextracti128 $1, x2, t6##_x; \
  95. vextracti128 $1, x5, t5##_x; \
  96. vaesenclast t4##_x, x0##_x, x0##_x; \
  97. vaesenclast t4##_x, t0##_x, t0##_x; \
  98. vinserti128 $1, t0##_x, x0, x0; \
  99. vaesenclast t4##_x, x7##_x, x7##_x; \
  100. vaesenclast t4##_x, t1##_x, t1##_x; \
  101. vinserti128 $1, t1##_x, x7, x7; \
  102. vaesenclast t4##_x, x3##_x, x3##_x; \
  103. vaesenclast t4##_x, t3##_x, t3##_x; \
  104. vinserti128 $1, t3##_x, x3, x3; \
  105. vaesenclast t4##_x, x6##_x, x6##_x; \
  106. vaesenclast t4##_x, t2##_x, t2##_x; \
  107. vinserti128 $1, t2##_x, x6, x6; \
  108. vextracti128 $1, x1, t3##_x; \
  109. vextracti128 $1, x4, t2##_x; \
  110. vbroadcasti128 .Lpost_tf_lo_s1(%rip), t0; \
  111. vbroadcasti128 .Lpost_tf_hi_s1(%rip), t1; \
  112. vaesenclast t4##_x, x2##_x, x2##_x; \
  113. vaesenclast t4##_x, t6##_x, t6##_x; \
  114. vinserti128 $1, t6##_x, x2, x2; \
  115. vaesenclast t4##_x, x5##_x, x5##_x; \
  116. vaesenclast t4##_x, t5##_x, t5##_x; \
  117. vinserti128 $1, t5##_x, x5, x5; \
  118. vaesenclast t4##_x, x1##_x, x1##_x; \
  119. vaesenclast t4##_x, t3##_x, t3##_x; \
  120. vinserti128 $1, t3##_x, x1, x1; \
  121. vaesenclast t4##_x, x4##_x, x4##_x; \
  122. vaesenclast t4##_x, t2##_x, t2##_x; \
  123. vinserti128 $1, t2##_x, x4, x4; \
  124. \
  125. /* postfilter sboxes 1 and 4 */ \
  126. vbroadcasti128 .Lpost_tf_lo_s3(%rip), t2; \
  127. vbroadcasti128 .Lpost_tf_hi_s3(%rip), t3; \
  128. filter_8bit(x0, t0, t1, t7, t6); \
  129. filter_8bit(x7, t0, t1, t7, t6); \
  130. filter_8bit(x3, t0, t1, t7, t6); \
  131. filter_8bit(x6, t0, t1, t7, t6); \
  132. \
  133. /* postfilter sbox 3 */ \
  134. vbroadcasti128 .Lpost_tf_lo_s2(%rip), t4; \
  135. vbroadcasti128 .Lpost_tf_hi_s2(%rip), t5; \
  136. filter_8bit(x2, t2, t3, t7, t6); \
  137. filter_8bit(x5, t2, t3, t7, t6); \
  138. \
  139. vpbroadcastq key, t0; /* higher 64-bit duplicate ignored */ \
  140. \
  141. /* postfilter sbox 2 */ \
  142. filter_8bit(x1, t4, t5, t7, t2); \
  143. filter_8bit(x4, t4, t5, t7, t2); \
  144. vpxor t7, t7, t7; \
  145. \
  146. vpsrldq $1, t0, t1; \
  147. vpsrldq $2, t0, t2; \
  148. vpshufb t7, t1, t1; \
  149. vpsrldq $3, t0, t3; \
  150. \
  151. /* P-function */ \
  152. vpxor x5, x0, x0; \
  153. vpxor x6, x1, x1; \
  154. vpxor x7, x2, x2; \
  155. vpxor x4, x3, x3; \
  156. \
  157. vpshufb t7, t2, t2; \
  158. vpsrldq $4, t0, t4; \
  159. vpshufb t7, t3, t3; \
  160. vpsrldq $5, t0, t5; \
  161. vpshufb t7, t4, t4; \
  162. \
  163. vpxor x2, x4, x4; \
  164. vpxor x3, x5, x5; \
  165. vpxor x0, x6, x6; \
  166. vpxor x1, x7, x7; \
  167. \
  168. vpsrldq $6, t0, t6; \
  169. vpshufb t7, t5, t5; \
  170. vpshufb t7, t6, t6; \
  171. \
  172. vpxor x7, x0, x0; \
  173. vpxor x4, x1, x1; \
  174. vpxor x5, x2, x2; \
  175. vpxor x6, x3, x3; \
  176. \
  177. vpxor x3, x4, x4; \
  178. vpxor x0, x5, x5; \
  179. vpxor x1, x6, x6; \
  180. vpxor x2, x7, x7; /* note: high and low parts swapped */ \
  181. \
  182. /* Add key material and result to CD (x becomes new CD) */ \
  183. \
  184. vpxor t6, x1, x1; \
  185. vpxor 5 * 32(mem_cd), x1, x1; \
  186. \
  187. vpsrldq $7, t0, t6; \
  188. vpshufb t7, t0, t0; \
  189. vpshufb t7, t6, t7; \
  190. \
  191. vpxor t7, x0, x0; \
  192. vpxor 4 * 32(mem_cd), x0, x0; \
  193. \
  194. vpxor t5, x2, x2; \
  195. vpxor 6 * 32(mem_cd), x2, x2; \
  196. \
  197. vpxor t4, x3, x3; \
  198. vpxor 7 * 32(mem_cd), x3, x3; \
  199. \
  200. vpxor t3, x4, x4; \
  201. vpxor 0 * 32(mem_cd), x4, x4; \
  202. \
  203. vpxor t2, x5, x5; \
  204. vpxor 1 * 32(mem_cd), x5, x5; \
  205. \
  206. vpxor t1, x6, x6; \
  207. vpxor 2 * 32(mem_cd), x6, x6; \
  208. \
  209. vpxor t0, x7, x7; \
  210. vpxor 3 * 32(mem_cd), x7, x7;
  211. /*
  212. * Size optimization... with inlined roundsm32 binary would be over 5 times
  213. * larger and would only marginally faster.
  214. */
  215. SYM_FUNC_START_LOCAL(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
  216. roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  217. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
  218. %rcx, (%r9));
  219. RET;
  220. SYM_FUNC_END(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
  221. SYM_FUNC_START_LOCAL(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
  222. roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
  223. %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
  224. %rax, (%r9));
  225. RET;
  226. SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
  227. /*
  228. * IN/OUT:
  229. * x0..x7: byte-sliced AB state preloaded
  230. * mem_ab: byte-sliced AB state in memory
  231. * mem_cb: byte-sliced CD state in memory
  232. */
  233. #define two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
  234. y6, y7, mem_ab, mem_cd, i, dir, store_ab) \
  235. leaq (key_table + (i) * 8)(CTX), %r9; \
  236. call roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd; \
  237. \
  238. vmovdqu x0, 4 * 32(mem_cd); \
  239. vmovdqu x1, 5 * 32(mem_cd); \
  240. vmovdqu x2, 6 * 32(mem_cd); \
  241. vmovdqu x3, 7 * 32(mem_cd); \
  242. vmovdqu x4, 0 * 32(mem_cd); \
  243. vmovdqu x5, 1 * 32(mem_cd); \
  244. vmovdqu x6, 2 * 32(mem_cd); \
  245. vmovdqu x7, 3 * 32(mem_cd); \
  246. \
  247. leaq (key_table + ((i) + (dir)) * 8)(CTX), %r9; \
  248. call roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab; \
  249. \
  250. store_ab(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab);
  251. #define dummy_store(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) /* do nothing */
  252. #define store_ab_state(x0, x1, x2, x3, x4, x5, x6, x7, mem_ab) \
  253. /* Store new AB state */ \
  254. vmovdqu x4, 4 * 32(mem_ab); \
  255. vmovdqu x5, 5 * 32(mem_ab); \
  256. vmovdqu x6, 6 * 32(mem_ab); \
  257. vmovdqu x7, 7 * 32(mem_ab); \
  258. vmovdqu x0, 0 * 32(mem_ab); \
  259. vmovdqu x1, 1 * 32(mem_ab); \
  260. vmovdqu x2, 2 * 32(mem_ab); \
  261. vmovdqu x3, 3 * 32(mem_ab);
  262. #define enc_rounds32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
  263. y6, y7, mem_ab, mem_cd, i) \
  264. two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
  265. y6, y7, mem_ab, mem_cd, (i) + 2, 1, store_ab_state); \
  266. two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
  267. y6, y7, mem_ab, mem_cd, (i) + 4, 1, store_ab_state); \
  268. two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
  269. y6, y7, mem_ab, mem_cd, (i) + 6, 1, dummy_store);
  270. #define dec_rounds32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
  271. y6, y7, mem_ab, mem_cd, i) \
  272. two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
  273. y6, y7, mem_ab, mem_cd, (i) + 7, -1, store_ab_state); \
  274. two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
  275. y6, y7, mem_ab, mem_cd, (i) + 5, -1, store_ab_state); \
  276. two_roundsm32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
  277. y6, y7, mem_ab, mem_cd, (i) + 3, -1, dummy_store);
  278. /*
  279. * IN:
  280. * v0..3: byte-sliced 32-bit integers
  281. * OUT:
  282. * v0..3: (IN <<< 1)
  283. */
  284. #define rol32_1_32(v0, v1, v2, v3, t0, t1, t2, zero) \
  285. vpcmpgtb v0, zero, t0; \
  286. vpaddb v0, v0, v0; \
  287. vpabsb t0, t0; \
  288. \
  289. vpcmpgtb v1, zero, t1; \
  290. vpaddb v1, v1, v1; \
  291. vpabsb t1, t1; \
  292. \
  293. vpcmpgtb v2, zero, t2; \
  294. vpaddb v2, v2, v2; \
  295. vpabsb t2, t2; \
  296. \
  297. vpor t0, v1, v1; \
  298. \
  299. vpcmpgtb v3, zero, t0; \
  300. vpaddb v3, v3, v3; \
  301. vpabsb t0, t0; \
  302. \
  303. vpor t1, v2, v2; \
  304. vpor t2, v3, v3; \
  305. vpor t0, v0, v0;
  306. /*
  307. * IN:
  308. * r: byte-sliced AB state in memory
  309. * l: byte-sliced CD state in memory
  310. * OUT:
  311. * x0..x7: new byte-sliced CD state
  312. */
  313. #define fls32(l, l0, l1, l2, l3, l4, l5, l6, l7, r, t0, t1, t2, t3, tt0, \
  314. tt1, tt2, tt3, kll, klr, krl, krr) \
  315. /* \
  316. * t0 = kll; \
  317. * t0 &= ll; \
  318. * lr ^= rol32(t0, 1); \
  319. */ \
  320. vpbroadcastd kll, t0; /* only lowest 32-bit used */ \
  321. vpxor tt0, tt0, tt0; \
  322. vpshufb tt0, t0, t3; \
  323. vpsrldq $1, t0, t0; \
  324. vpshufb tt0, t0, t2; \
  325. vpsrldq $1, t0, t0; \
  326. vpshufb tt0, t0, t1; \
  327. vpsrldq $1, t0, t0; \
  328. vpshufb tt0, t0, t0; \
  329. \
  330. vpand l0, t0, t0; \
  331. vpand l1, t1, t1; \
  332. vpand l2, t2, t2; \
  333. vpand l3, t3, t3; \
  334. \
  335. rol32_1_32(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \
  336. \
  337. vpxor l4, t0, l4; \
  338. vpbroadcastd krr, t0; /* only lowest 32-bit used */ \
  339. vmovdqu l4, 4 * 32(l); \
  340. vpxor l5, t1, l5; \
  341. vmovdqu l5, 5 * 32(l); \
  342. vpxor l6, t2, l6; \
  343. vmovdqu l6, 6 * 32(l); \
  344. vpxor l7, t3, l7; \
  345. vmovdqu l7, 7 * 32(l); \
  346. \
  347. /* \
  348. * t2 = krr; \
  349. * t2 |= rr; \
  350. * rl ^= t2; \
  351. */ \
  352. \
  353. vpshufb tt0, t0, t3; \
  354. vpsrldq $1, t0, t0; \
  355. vpshufb tt0, t0, t2; \
  356. vpsrldq $1, t0, t0; \
  357. vpshufb tt0, t0, t1; \
  358. vpsrldq $1, t0, t0; \
  359. vpshufb tt0, t0, t0; \
  360. \
  361. vpor 4 * 32(r), t0, t0; \
  362. vpor 5 * 32(r), t1, t1; \
  363. vpor 6 * 32(r), t2, t2; \
  364. vpor 7 * 32(r), t3, t3; \
  365. \
  366. vpxor 0 * 32(r), t0, t0; \
  367. vpxor 1 * 32(r), t1, t1; \
  368. vpxor 2 * 32(r), t2, t2; \
  369. vpxor 3 * 32(r), t3, t3; \
  370. vmovdqu t0, 0 * 32(r); \
  371. vpbroadcastd krl, t0; /* only lowest 32-bit used */ \
  372. vmovdqu t1, 1 * 32(r); \
  373. vmovdqu t2, 2 * 32(r); \
  374. vmovdqu t3, 3 * 32(r); \
  375. \
  376. /* \
  377. * t2 = krl; \
  378. * t2 &= rl; \
  379. * rr ^= rol32(t2, 1); \
  380. */ \
  381. vpshufb tt0, t0, t3; \
  382. vpsrldq $1, t0, t0; \
  383. vpshufb tt0, t0, t2; \
  384. vpsrldq $1, t0, t0; \
  385. vpshufb tt0, t0, t1; \
  386. vpsrldq $1, t0, t0; \
  387. vpshufb tt0, t0, t0; \
  388. \
  389. vpand 0 * 32(r), t0, t0; \
  390. vpand 1 * 32(r), t1, t1; \
  391. vpand 2 * 32(r), t2, t2; \
  392. vpand 3 * 32(r), t3, t3; \
  393. \
  394. rol32_1_32(t3, t2, t1, t0, tt1, tt2, tt3, tt0); \
  395. \
  396. vpxor 4 * 32(r), t0, t0; \
  397. vpxor 5 * 32(r), t1, t1; \
  398. vpxor 6 * 32(r), t2, t2; \
  399. vpxor 7 * 32(r), t3, t3; \
  400. vmovdqu t0, 4 * 32(r); \
  401. vpbroadcastd klr, t0; /* only lowest 32-bit used */ \
  402. vmovdqu t1, 5 * 32(r); \
  403. vmovdqu t2, 6 * 32(r); \
  404. vmovdqu t3, 7 * 32(r); \
  405. \
  406. /* \
  407. * t0 = klr; \
  408. * t0 |= lr; \
  409. * ll ^= t0; \
  410. */ \
  411. \
  412. vpshufb tt0, t0, t3; \
  413. vpsrldq $1, t0, t0; \
  414. vpshufb tt0, t0, t2; \
  415. vpsrldq $1, t0, t0; \
  416. vpshufb tt0, t0, t1; \
  417. vpsrldq $1, t0, t0; \
  418. vpshufb tt0, t0, t0; \
  419. \
  420. vpor l4, t0, t0; \
  421. vpor l5, t1, t1; \
  422. vpor l6, t2, t2; \
  423. vpor l7, t3, t3; \
  424. \
  425. vpxor l0, t0, l0; \
  426. vmovdqu l0, 0 * 32(l); \
  427. vpxor l1, t1, l1; \
  428. vmovdqu l1, 1 * 32(l); \
  429. vpxor l2, t2, l2; \
  430. vmovdqu l2, 2 * 32(l); \
  431. vpxor l3, t3, l3; \
  432. vmovdqu l3, 3 * 32(l);
  433. #define transpose_4x4(x0, x1, x2, x3, t1, t2) \
  434. vpunpckhdq x1, x0, t2; \
  435. vpunpckldq x1, x0, x0; \
  436. \
  437. vpunpckldq x3, x2, t1; \
  438. vpunpckhdq x3, x2, x2; \
  439. \
  440. vpunpckhqdq t1, x0, x1; \
  441. vpunpcklqdq t1, x0, x0; \
  442. \
  443. vpunpckhqdq x2, t2, x3; \
  444. vpunpcklqdq x2, t2, x2;
  445. #define byteslice_16x16b_fast(a0, b0, c0, d0, a1, b1, c1, d1, a2, b2, c2, d2, \
  446. a3, b3, c3, d3, st0, st1) \
  447. vmovdqu d2, st0; \
  448. vmovdqu d3, st1; \
  449. transpose_4x4(a0, a1, a2, a3, d2, d3); \
  450. transpose_4x4(b0, b1, b2, b3, d2, d3); \
  451. vmovdqu st0, d2; \
  452. vmovdqu st1, d3; \
  453. \
  454. vmovdqu a0, st0; \
  455. vmovdqu a1, st1; \
  456. transpose_4x4(c0, c1, c2, c3, a0, a1); \
  457. transpose_4x4(d0, d1, d2, d3, a0, a1); \
  458. \
  459. vbroadcasti128 .Lshufb_16x16b(%rip), a0; \
  460. vmovdqu st1, a1; \
  461. vpshufb a0, a2, a2; \
  462. vpshufb a0, a3, a3; \
  463. vpshufb a0, b0, b0; \
  464. vpshufb a0, b1, b1; \
  465. vpshufb a0, b2, b2; \
  466. vpshufb a0, b3, b3; \
  467. vpshufb a0, a1, a1; \
  468. vpshufb a0, c0, c0; \
  469. vpshufb a0, c1, c1; \
  470. vpshufb a0, c2, c2; \
  471. vpshufb a0, c3, c3; \
  472. vpshufb a0, d0, d0; \
  473. vpshufb a0, d1, d1; \
  474. vpshufb a0, d2, d2; \
  475. vpshufb a0, d3, d3; \
  476. vmovdqu d3, st1; \
  477. vmovdqu st0, d3; \
  478. vpshufb a0, d3, a0; \
  479. vmovdqu d2, st0; \
  480. \
  481. transpose_4x4(a0, b0, c0, d0, d2, d3); \
  482. transpose_4x4(a1, b1, c1, d1, d2, d3); \
  483. vmovdqu st0, d2; \
  484. vmovdqu st1, d3; \
  485. \
  486. vmovdqu b0, st0; \
  487. vmovdqu b1, st1; \
  488. transpose_4x4(a2, b2, c2, d2, b0, b1); \
  489. transpose_4x4(a3, b3, c3, d3, b0, b1); \
  490. vmovdqu st0, b0; \
  491. vmovdqu st1, b1; \
  492. /* does not adjust output bytes inside vectors */
  493. /* load blocks to registers and apply pre-whitening */
  494. #define inpack32_pre(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
  495. y6, y7, rio, key) \
  496. vpbroadcastq key, x0; \
  497. vpshufb .Lpack_bswap(%rip), x0, x0; \
  498. \
  499. vpxor 0 * 32(rio), x0, y7; \
  500. vpxor 1 * 32(rio), x0, y6; \
  501. vpxor 2 * 32(rio), x0, y5; \
  502. vpxor 3 * 32(rio), x0, y4; \
  503. vpxor 4 * 32(rio), x0, y3; \
  504. vpxor 5 * 32(rio), x0, y2; \
  505. vpxor 6 * 32(rio), x0, y1; \
  506. vpxor 7 * 32(rio), x0, y0; \
  507. vpxor 8 * 32(rio), x0, x7; \
  508. vpxor 9 * 32(rio), x0, x6; \
  509. vpxor 10 * 32(rio), x0, x5; \
  510. vpxor 11 * 32(rio), x0, x4; \
  511. vpxor 12 * 32(rio), x0, x3; \
  512. vpxor 13 * 32(rio), x0, x2; \
  513. vpxor 14 * 32(rio), x0, x1; \
  514. vpxor 15 * 32(rio), x0, x0;
  515. /* byteslice pre-whitened blocks and store to temporary memory */
  516. #define inpack32_post(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
  517. y6, y7, mem_ab, mem_cd) \
  518. byteslice_16x16b_fast(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, \
  519. y4, y5, y6, y7, (mem_ab), (mem_cd)); \
  520. \
  521. vmovdqu x0, 0 * 32(mem_ab); \
  522. vmovdqu x1, 1 * 32(mem_ab); \
  523. vmovdqu x2, 2 * 32(mem_ab); \
  524. vmovdqu x3, 3 * 32(mem_ab); \
  525. vmovdqu x4, 4 * 32(mem_ab); \
  526. vmovdqu x5, 5 * 32(mem_ab); \
  527. vmovdqu x6, 6 * 32(mem_ab); \
  528. vmovdqu x7, 7 * 32(mem_ab); \
  529. vmovdqu y0, 0 * 32(mem_cd); \
  530. vmovdqu y1, 1 * 32(mem_cd); \
  531. vmovdqu y2, 2 * 32(mem_cd); \
  532. vmovdqu y3, 3 * 32(mem_cd); \
  533. vmovdqu y4, 4 * 32(mem_cd); \
  534. vmovdqu y5, 5 * 32(mem_cd); \
  535. vmovdqu y6, 6 * 32(mem_cd); \
  536. vmovdqu y7, 7 * 32(mem_cd);
  537. /* de-byteslice, apply post-whitening and store blocks */
  538. #define outunpack32(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, \
  539. y5, y6, y7, key, stack_tmp0, stack_tmp1) \
  540. byteslice_16x16b_fast(y0, y4, x0, x4, y1, y5, x1, x5, y2, y6, x2, x6, \
  541. y3, y7, x3, x7, stack_tmp0, stack_tmp1); \
  542. \
  543. vmovdqu x0, stack_tmp0; \
  544. \
  545. vpbroadcastq key, x0; \
  546. vpshufb .Lpack_bswap(%rip), x0, x0; \
  547. \
  548. vpxor x0, y7, y7; \
  549. vpxor x0, y6, y6; \
  550. vpxor x0, y5, y5; \
  551. vpxor x0, y4, y4; \
  552. vpxor x0, y3, y3; \
  553. vpxor x0, y2, y2; \
  554. vpxor x0, y1, y1; \
  555. vpxor x0, y0, y0; \
  556. vpxor x0, x7, x7; \
  557. vpxor x0, x6, x6; \
  558. vpxor x0, x5, x5; \
  559. vpxor x0, x4, x4; \
  560. vpxor x0, x3, x3; \
  561. vpxor x0, x2, x2; \
  562. vpxor x0, x1, x1; \
  563. vpxor stack_tmp0, x0, x0;
  564. #define write_output(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, \
  565. y6, y7, rio) \
  566. vmovdqu x0, 0 * 32(rio); \
  567. vmovdqu x1, 1 * 32(rio); \
  568. vmovdqu x2, 2 * 32(rio); \
  569. vmovdqu x3, 3 * 32(rio); \
  570. vmovdqu x4, 4 * 32(rio); \
  571. vmovdqu x5, 5 * 32(rio); \
  572. vmovdqu x6, 6 * 32(rio); \
  573. vmovdqu x7, 7 * 32(rio); \
  574. vmovdqu y0, 8 * 32(rio); \
  575. vmovdqu y1, 9 * 32(rio); \
  576. vmovdqu y2, 10 * 32(rio); \
  577. vmovdqu y3, 11 * 32(rio); \
  578. vmovdqu y4, 12 * 32(rio); \
  579. vmovdqu y5, 13 * 32(rio); \
  580. vmovdqu y6, 14 * 32(rio); \
  581. vmovdqu y7, 15 * 32(rio);
  582. .section .rodata.cst32.shufb_16x16b, "aM", @progbits, 32
  583. .align 32
  584. #define SHUFB_BYTES(idx) \
  585. 0 + (idx), 4 + (idx), 8 + (idx), 12 + (idx)
  586. .Lshufb_16x16b:
  587. .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3)
  588. .byte SHUFB_BYTES(0), SHUFB_BYTES(1), SHUFB_BYTES(2), SHUFB_BYTES(3)
  589. .section .rodata.cst32.pack_bswap, "aM", @progbits, 32
  590. .align 32
  591. .Lpack_bswap:
  592. .long 0x00010203, 0x04050607, 0x80808080, 0x80808080
  593. .long 0x00010203, 0x04050607, 0x80808080, 0x80808080
  594. /* NB: section is mergeable, all elements must be aligned 16-byte blocks */
  595. .section .rodata.cst16, "aM", @progbits, 16
  596. .align 16
  597. /*
  598. * pre-SubByte transform
  599. *
  600. * pre-lookup for sbox1, sbox2, sbox3:
  601. * swap_bitendianness(
  602. * isom_map_camellia_to_aes(
  603. * camellia_f(
  604. * swap_bitendianess(in)
  605. * )
  606. * )
  607. * )
  608. *
  609. * (note: '⊕ 0xc5' inside camellia_f())
  610. */
  611. .Lpre_tf_lo_s1:
  612. .byte 0x45, 0xe8, 0x40, 0xed, 0x2e, 0x83, 0x2b, 0x86
  613. .byte 0x4b, 0xe6, 0x4e, 0xe3, 0x20, 0x8d, 0x25, 0x88
  614. .Lpre_tf_hi_s1:
  615. .byte 0x00, 0x51, 0xf1, 0xa0, 0x8a, 0xdb, 0x7b, 0x2a
  616. .byte 0x09, 0x58, 0xf8, 0xa9, 0x83, 0xd2, 0x72, 0x23
  617. /*
  618. * pre-SubByte transform
  619. *
  620. * pre-lookup for sbox4:
  621. * swap_bitendianness(
  622. * isom_map_camellia_to_aes(
  623. * camellia_f(
  624. * swap_bitendianess(in <<< 1)
  625. * )
  626. * )
  627. * )
  628. *
  629. * (note: '⊕ 0xc5' inside camellia_f())
  630. */
  631. .Lpre_tf_lo_s4:
  632. .byte 0x45, 0x40, 0x2e, 0x2b, 0x4b, 0x4e, 0x20, 0x25
  633. .byte 0x14, 0x11, 0x7f, 0x7a, 0x1a, 0x1f, 0x71, 0x74
  634. .Lpre_tf_hi_s4:
  635. .byte 0x00, 0xf1, 0x8a, 0x7b, 0x09, 0xf8, 0x83, 0x72
  636. .byte 0xad, 0x5c, 0x27, 0xd6, 0xa4, 0x55, 0x2e, 0xdf
  637. /*
  638. * post-SubByte transform
  639. *
  640. * post-lookup for sbox1, sbox4:
  641. * swap_bitendianness(
  642. * camellia_h(
  643. * isom_map_aes_to_camellia(
  644. * swap_bitendianness(
  645. * aes_inverse_affine_transform(in)
  646. * )
  647. * )
  648. * )
  649. * )
  650. *
  651. * (note: '⊕ 0x6e' inside camellia_h())
  652. */
  653. .Lpost_tf_lo_s1:
  654. .byte 0x3c, 0xcc, 0xcf, 0x3f, 0x32, 0xc2, 0xc1, 0x31
  655. .byte 0xdc, 0x2c, 0x2f, 0xdf, 0xd2, 0x22, 0x21, 0xd1
  656. .Lpost_tf_hi_s1:
  657. .byte 0x00, 0xf9, 0x86, 0x7f, 0xd7, 0x2e, 0x51, 0xa8
  658. .byte 0xa4, 0x5d, 0x22, 0xdb, 0x73, 0x8a, 0xf5, 0x0c
  659. /*
  660. * post-SubByte transform
  661. *
  662. * post-lookup for sbox2:
  663. * swap_bitendianness(
  664. * camellia_h(
  665. * isom_map_aes_to_camellia(
  666. * swap_bitendianness(
  667. * aes_inverse_affine_transform(in)
  668. * )
  669. * )
  670. * )
  671. * ) <<< 1
  672. *
  673. * (note: '⊕ 0x6e' inside camellia_h())
  674. */
  675. .Lpost_tf_lo_s2:
  676. .byte 0x78, 0x99, 0x9f, 0x7e, 0x64, 0x85, 0x83, 0x62
  677. .byte 0xb9, 0x58, 0x5e, 0xbf, 0xa5, 0x44, 0x42, 0xa3
  678. .Lpost_tf_hi_s2:
  679. .byte 0x00, 0xf3, 0x0d, 0xfe, 0xaf, 0x5c, 0xa2, 0x51
  680. .byte 0x49, 0xba, 0x44, 0xb7, 0xe6, 0x15, 0xeb, 0x18
  681. /*
  682. * post-SubByte transform
  683. *
  684. * post-lookup for sbox3:
  685. * swap_bitendianness(
  686. * camellia_h(
  687. * isom_map_aes_to_camellia(
  688. * swap_bitendianness(
  689. * aes_inverse_affine_transform(in)
  690. * )
  691. * )
  692. * )
  693. * ) >>> 1
  694. *
  695. * (note: '⊕ 0x6e' inside camellia_h())
  696. */
  697. .Lpost_tf_lo_s3:
  698. .byte 0x1e, 0x66, 0xe7, 0x9f, 0x19, 0x61, 0xe0, 0x98
  699. .byte 0x6e, 0x16, 0x97, 0xef, 0x69, 0x11, 0x90, 0xe8
  700. .Lpost_tf_hi_s3:
  701. .byte 0x00, 0xfc, 0x43, 0xbf, 0xeb, 0x17, 0xa8, 0x54
  702. .byte 0x52, 0xae, 0x11, 0xed, 0xb9, 0x45, 0xfa, 0x06
  703. /* For isolating SubBytes from AESENCLAST, inverse shift row */
  704. .Linv_shift_row:
  705. .byte 0x00, 0x0d, 0x0a, 0x07, 0x04, 0x01, 0x0e, 0x0b
  706. .byte 0x08, 0x05, 0x02, 0x0f, 0x0c, 0x09, 0x06, 0x03
  707. .section .rodata.cst4.L0f0f0f0f, "aM", @progbits, 4
  708. .align 4
  709. /* 4-bit mask */
  710. .L0f0f0f0f:
  711. .long 0x0f0f0f0f
  712. .text
  713. SYM_FUNC_START_LOCAL(__camellia_enc_blk32)
  714. /* input:
  715. * %rdi: ctx, CTX
  716. * %rax: temporary storage, 512 bytes
  717. * %ymm0..%ymm15: 32 plaintext blocks
  718. * output:
  719. * %ymm0..%ymm15: 32 encrypted blocks, order swapped:
  720. * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8
  721. */
  722. FRAME_BEGIN
  723. leaq 8 * 32(%rax), %rcx;
  724. inpack32_post(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  725. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  726. %ymm15, %rax, %rcx);
  727. enc_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  728. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  729. %ymm15, %rax, %rcx, 0);
  730. fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  731. %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  732. %ymm15,
  733. ((key_table + (8) * 8) + 0)(CTX),
  734. ((key_table + (8) * 8) + 4)(CTX),
  735. ((key_table + (8) * 8) + 8)(CTX),
  736. ((key_table + (8) * 8) + 12)(CTX));
  737. enc_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  738. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  739. %ymm15, %rax, %rcx, 8);
  740. fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  741. %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  742. %ymm15,
  743. ((key_table + (16) * 8) + 0)(CTX),
  744. ((key_table + (16) * 8) + 4)(CTX),
  745. ((key_table + (16) * 8) + 8)(CTX),
  746. ((key_table + (16) * 8) + 12)(CTX));
  747. enc_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  748. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  749. %ymm15, %rax, %rcx, 16);
  750. movl $24, %r8d;
  751. cmpl $16, key_length(CTX);
  752. jne .Lenc_max32;
  753. .Lenc_done:
  754. /* load CD for output */
  755. vmovdqu 0 * 32(%rcx), %ymm8;
  756. vmovdqu 1 * 32(%rcx), %ymm9;
  757. vmovdqu 2 * 32(%rcx), %ymm10;
  758. vmovdqu 3 * 32(%rcx), %ymm11;
  759. vmovdqu 4 * 32(%rcx), %ymm12;
  760. vmovdqu 5 * 32(%rcx), %ymm13;
  761. vmovdqu 6 * 32(%rcx), %ymm14;
  762. vmovdqu 7 * 32(%rcx), %ymm15;
  763. outunpack32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  764. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  765. %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
  766. FRAME_END
  767. RET;
  768. .align 8
  769. .Lenc_max32:
  770. movl $32, %r8d;
  771. fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  772. %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  773. %ymm15,
  774. ((key_table + (24) * 8) + 0)(CTX),
  775. ((key_table + (24) * 8) + 4)(CTX),
  776. ((key_table + (24) * 8) + 8)(CTX),
  777. ((key_table + (24) * 8) + 12)(CTX));
  778. enc_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  779. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  780. %ymm15, %rax, %rcx, 24);
  781. jmp .Lenc_done;
  782. SYM_FUNC_END(__camellia_enc_blk32)
  783. SYM_FUNC_START_LOCAL(__camellia_dec_blk32)
  784. /* input:
  785. * %rdi: ctx, CTX
  786. * %rax: temporary storage, 512 bytes
  787. * %r8d: 24 for 16 byte key, 32 for larger
  788. * %ymm0..%ymm15: 16 encrypted blocks
  789. * output:
  790. * %ymm0..%ymm15: 16 plaintext blocks, order swapped:
  791. * 7, 8, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8
  792. */
  793. FRAME_BEGIN
  794. leaq 8 * 32(%rax), %rcx;
  795. inpack32_post(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  796. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  797. %ymm15, %rax, %rcx);
  798. cmpl $32, %r8d;
  799. je .Ldec_max32;
  800. .Ldec_max24:
  801. dec_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  802. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  803. %ymm15, %rax, %rcx, 16);
  804. fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  805. %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  806. %ymm15,
  807. ((key_table + (16) * 8) + 8)(CTX),
  808. ((key_table + (16) * 8) + 12)(CTX),
  809. ((key_table + (16) * 8) + 0)(CTX),
  810. ((key_table + (16) * 8) + 4)(CTX));
  811. dec_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  812. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  813. %ymm15, %rax, %rcx, 8);
  814. fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  815. %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  816. %ymm15,
  817. ((key_table + (8) * 8) + 8)(CTX),
  818. ((key_table + (8) * 8) + 12)(CTX),
  819. ((key_table + (8) * 8) + 0)(CTX),
  820. ((key_table + (8) * 8) + 4)(CTX));
  821. dec_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  822. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  823. %ymm15, %rax, %rcx, 0);
  824. /* load CD for output */
  825. vmovdqu 0 * 32(%rcx), %ymm8;
  826. vmovdqu 1 * 32(%rcx), %ymm9;
  827. vmovdqu 2 * 32(%rcx), %ymm10;
  828. vmovdqu 3 * 32(%rcx), %ymm11;
  829. vmovdqu 4 * 32(%rcx), %ymm12;
  830. vmovdqu 5 * 32(%rcx), %ymm13;
  831. vmovdqu 6 * 32(%rcx), %ymm14;
  832. vmovdqu 7 * 32(%rcx), %ymm15;
  833. outunpack32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  834. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  835. %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
  836. FRAME_END
  837. RET;
  838. .align 8
  839. .Ldec_max32:
  840. dec_rounds32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  841. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  842. %ymm15, %rax, %rcx, 24);
  843. fls32(%rax, %ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  844. %rcx, %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  845. %ymm15,
  846. ((key_table + (24) * 8) + 8)(CTX),
  847. ((key_table + (24) * 8) + 12)(CTX),
  848. ((key_table + (24) * 8) + 0)(CTX),
  849. ((key_table + (24) * 8) + 4)(CTX));
  850. jmp .Ldec_max24;
  851. SYM_FUNC_END(__camellia_dec_blk32)
  852. SYM_FUNC_START(camellia_ecb_enc_32way)
  853. /* input:
  854. * %rdi: ctx, CTX
  855. * %rsi: dst (32 blocks)
  856. * %rdx: src (32 blocks)
  857. */
  858. FRAME_BEGIN
  859. vzeroupper;
  860. inpack32_pre(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  861. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  862. %ymm15, %rdx, (key_table)(CTX));
  863. /* now dst can be used as temporary buffer (even in src == dst case) */
  864. movq %rsi, %rax;
  865. call __camellia_enc_blk32;
  866. write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0,
  867. %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9,
  868. %ymm8, %rsi);
  869. vzeroupper;
  870. FRAME_END
  871. RET;
  872. SYM_FUNC_END(camellia_ecb_enc_32way)
  873. SYM_FUNC_START(camellia_ecb_dec_32way)
  874. /* input:
  875. * %rdi: ctx, CTX
  876. * %rsi: dst (32 blocks)
  877. * %rdx: src (32 blocks)
  878. */
  879. FRAME_BEGIN
  880. vzeroupper;
  881. cmpl $16, key_length(CTX);
  882. movl $32, %r8d;
  883. movl $24, %eax;
  884. cmovel %eax, %r8d; /* max */
  885. inpack32_pre(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  886. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  887. %ymm15, %rdx, (key_table)(CTX, %r8, 8));
  888. /* now dst can be used as temporary buffer (even in src == dst case) */
  889. movq %rsi, %rax;
  890. call __camellia_dec_blk32;
  891. write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0,
  892. %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9,
  893. %ymm8, %rsi);
  894. vzeroupper;
  895. FRAME_END
  896. RET;
  897. SYM_FUNC_END(camellia_ecb_dec_32way)
  898. SYM_FUNC_START(camellia_cbc_dec_32way)
  899. /* input:
  900. * %rdi: ctx, CTX
  901. * %rsi: dst (32 blocks)
  902. * %rdx: src (32 blocks)
  903. */
  904. FRAME_BEGIN
  905. subq $(16 * 32), %rsp;
  906. vzeroupper;
  907. cmpl $16, key_length(CTX);
  908. movl $32, %r8d;
  909. movl $24, %eax;
  910. cmovel %eax, %r8d; /* max */
  911. inpack32_pre(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
  912. %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
  913. %ymm15, %rdx, (key_table)(CTX, %r8, 8));
  914. cmpq %rsi, %rdx;
  915. je .Lcbc_dec_use_stack;
  916. /* dst can be used as temporary storage, src is not overwritten. */
  917. movq %rsi, %rax;
  918. jmp .Lcbc_dec_continue;
  919. .Lcbc_dec_use_stack:
  920. /*
  921. * dst still in-use (because dst == src), so use stack for temporary
  922. * storage.
  923. */
  924. movq %rsp, %rax;
  925. .Lcbc_dec_continue:
  926. call __camellia_dec_blk32;
  927. vmovdqu %ymm7, (%rax);
  928. vpxor %ymm7, %ymm7, %ymm7;
  929. vinserti128 $1, (%rdx), %ymm7, %ymm7;
  930. vpxor (%rax), %ymm7, %ymm7;
  931. vpxor (0 * 32 + 16)(%rdx), %ymm6, %ymm6;
  932. vpxor (1 * 32 + 16)(%rdx), %ymm5, %ymm5;
  933. vpxor (2 * 32 + 16)(%rdx), %ymm4, %ymm4;
  934. vpxor (3 * 32 + 16)(%rdx), %ymm3, %ymm3;
  935. vpxor (4 * 32 + 16)(%rdx), %ymm2, %ymm2;
  936. vpxor (5 * 32 + 16)(%rdx), %ymm1, %ymm1;
  937. vpxor (6 * 32 + 16)(%rdx), %ymm0, %ymm0;
  938. vpxor (7 * 32 + 16)(%rdx), %ymm15, %ymm15;
  939. vpxor (8 * 32 + 16)(%rdx), %ymm14, %ymm14;
  940. vpxor (9 * 32 + 16)(%rdx), %ymm13, %ymm13;
  941. vpxor (10 * 32 + 16)(%rdx), %ymm12, %ymm12;
  942. vpxor (11 * 32 + 16)(%rdx), %ymm11, %ymm11;
  943. vpxor (12 * 32 + 16)(%rdx), %ymm10, %ymm10;
  944. vpxor (13 * 32 + 16)(%rdx), %ymm9, %ymm9;
  945. vpxor (14 * 32 + 16)(%rdx), %ymm8, %ymm8;
  946. write_output(%ymm7, %ymm6, %ymm5, %ymm4, %ymm3, %ymm2, %ymm1, %ymm0,
  947. %ymm15, %ymm14, %ymm13, %ymm12, %ymm11, %ymm10, %ymm9,
  948. %ymm8, %rsi);
  949. vzeroupper;
  950. addq $(16 * 32), %rsp;
  951. FRAME_END
  952. RET;
  953. SYM_FUNC_END(camellia_cbc_dec_32way)