aes-gcm-avx10-x86_64.S 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222
  1. /* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
  2. //
  3. // VAES and VPCLMULQDQ optimized AES-GCM for x86_64
  4. //
  5. // Copyright 2024 Google LLC
  6. //
  7. // Author: Eric Biggers <ebiggers@google.com>
  8. //
  9. //------------------------------------------------------------------------------
  10. //
  11. // This file is dual-licensed, meaning that you can use it under your choice of
  12. // either of the following two licenses:
  13. //
  14. // Licensed under the Apache License 2.0 (the "License"). You may obtain a copy
  15. // of the License at
  16. //
  17. // http://www.apache.org/licenses/LICENSE-2.0
  18. //
  19. // Unless required by applicable law or agreed to in writing, software
  20. // distributed under the License is distributed on an "AS IS" BASIS,
  21. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  22. // See the License for the specific language governing permissions and
  23. // limitations under the License.
  24. //
  25. // or
  26. //
  27. // Redistribution and use in source and binary forms, with or without
  28. // modification, are permitted provided that the following conditions are met:
  29. //
  30. // 1. Redistributions of source code must retain the above copyright notice,
  31. // this list of conditions and the following disclaimer.
  32. //
  33. // 2. Redistributions in binary form must reproduce the above copyright
  34. // notice, this list of conditions and the following disclaimer in the
  35. // documentation and/or other materials provided with the distribution.
  36. //
  37. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  38. // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  39. // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  40. // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
  41. // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  42. // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  43. // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  44. // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  45. // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  46. // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  47. // POSSIBILITY OF SUCH DAMAGE.
  48. //
  49. //------------------------------------------------------------------------------
  50. //
  51. // This file implements AES-GCM (Galois/Counter Mode) for x86_64 CPUs that
  52. // support VAES (vector AES), VPCLMULQDQ (vector carryless multiplication), and
  53. // either AVX512 or AVX10. Some of the functions, notably the encryption and
  54. // decryption update functions which are the most performance-critical, are
  55. // provided in two variants generated from a macro: one using 256-bit vectors
  56. // (suffix: vaes_avx10_256) and one using 512-bit vectors (vaes_avx10_512). The
  57. // other, "shared" functions (vaes_avx10) use at most 256-bit vectors.
  58. //
  59. // The functions that use 512-bit vectors are intended for CPUs that support
  60. // 512-bit vectors *and* where using them doesn't cause significant
  61. // downclocking. They require the following CPU features:
  62. //
  63. // VAES && VPCLMULQDQ && BMI2 && ((AVX512BW && AVX512VL) || AVX10/512)
  64. //
  65. // The other functions require the following CPU features:
  66. //
  67. // VAES && VPCLMULQDQ && BMI2 && ((AVX512BW && AVX512VL) || AVX10/256)
  68. //
  69. // All functions use the "System V" ABI. The Windows ABI is not supported.
  70. //
  71. // Note that we use "avx10" in the names of the functions as a shorthand to
  72. // really mean "AVX10 or a certain set of AVX512 features". Due to Intel's
  73. // introduction of AVX512 and then its replacement by AVX10, there doesn't seem
  74. // to be a simple way to name things that makes sense on all CPUs.
  75. //
  76. // Note that the macros that support both 256-bit and 512-bit vectors could
  77. // fairly easily be changed to support 128-bit too. However, this would *not*
  78. // be sufficient to allow the code to run on CPUs without AVX512 or AVX10,
  79. // because the code heavily uses several features of these extensions other than
  80. // the vector length: the increase in the number of SIMD registers from 16 to
  81. // 32, masking support, and new instructions such as vpternlogd (which can do a
  82. // three-argument XOR). These features are very useful for AES-GCM.
  83. #include <linux/linkage.h>
  84. .section .rodata
  85. .p2align 6
  86. // A shuffle mask that reflects the bytes of 16-byte blocks
  87. .Lbswap_mask:
  88. .octa 0x000102030405060708090a0b0c0d0e0f
  89. // This is the GHASH reducing polynomial without its constant term, i.e.
  90. // x^128 + x^7 + x^2 + x, represented using the backwards mapping
  91. // between bits and polynomial coefficients.
  92. //
  93. // Alternatively, it can be interpreted as the naturally-ordered
  94. // representation of the polynomial x^127 + x^126 + x^121 + 1, i.e. the
  95. // "reversed" GHASH reducing polynomial without its x^128 term.
  96. .Lgfpoly:
  97. .octa 0xc2000000000000000000000000000001
  98. // Same as above, but with the (1 << 64) bit set.
  99. .Lgfpoly_and_internal_carrybit:
  100. .octa 0xc2000000000000010000000000000001
  101. // The below constants are used for incrementing the counter blocks.
  102. // ctr_pattern points to the four 128-bit values [0, 1, 2, 3].
  103. // inc_2blocks and inc_4blocks point to the single 128-bit values 2 and
  104. // 4. Note that the same '2' is reused in ctr_pattern and inc_2blocks.
  105. .Lctr_pattern:
  106. .octa 0
  107. .octa 1
  108. .Linc_2blocks:
  109. .octa 2
  110. .octa 3
  111. .Linc_4blocks:
  112. .octa 4
  113. // Number of powers of the hash key stored in the key struct. The powers are
  114. // stored from highest (H^NUM_H_POWERS) to lowest (H^1).
  115. #define NUM_H_POWERS 16
  116. // Offset to AES key length (in bytes) in the key struct
  117. #define OFFSETOF_AESKEYLEN 480
  118. // Offset to start of hash key powers array in the key struct
  119. #define OFFSETOF_H_POWERS 512
  120. // Offset to end of hash key powers array in the key struct.
  121. //
  122. // This is immediately followed by three zeroized padding blocks, which are
  123. // included so that partial vectors can be handled more easily. E.g. if VL=64
  124. // and two blocks remain, we load the 4 values [H^2, H^1, 0, 0]. The most
  125. // padding blocks needed is 3, which occurs if [H^1, 0, 0, 0] is loaded.
  126. #define OFFSETOFEND_H_POWERS (OFFSETOF_H_POWERS + (NUM_H_POWERS * 16))
  127. .text
  128. // Set the vector length in bytes. This sets the VL variable and defines
  129. // register aliases V0-V31 that map to the ymm or zmm registers.
  130. .macro _set_veclen vl
  131. .set VL, \vl
  132. .irp i, 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15, \
  133. 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
  134. .if VL == 32
  135. .set V\i, %ymm\i
  136. .elseif VL == 64
  137. .set V\i, %zmm\i
  138. .else
  139. .error "Unsupported vector length"
  140. .endif
  141. .endr
  142. .endm
  143. // The _ghash_mul_step macro does one step of GHASH multiplication of the
  144. // 128-bit lanes of \a by the corresponding 128-bit lanes of \b and storing the
  145. // reduced products in \dst. \t0, \t1, and \t2 are temporary registers of the
  146. // same size as \a and \b. To complete all steps, this must invoked with \i=0
  147. // through \i=9. The division into steps allows users of this macro to
  148. // optionally interleave the computation with other instructions. Users of this
  149. // macro must preserve the parameter registers across steps.
  150. //
  151. // The multiplications are done in GHASH's representation of the finite field
  152. // GF(2^128). Elements of GF(2^128) are represented as binary polynomials
  153. // (i.e. polynomials whose coefficients are bits) modulo a reducing polynomial
  154. // G. The GCM specification uses G = x^128 + x^7 + x^2 + x + 1. Addition is
  155. // just XOR, while multiplication is more complex and has two parts: (a) do
  156. // carryless multiplication of two 128-bit input polynomials to get a 256-bit
  157. // intermediate product polynomial, and (b) reduce the intermediate product to
  158. // 128 bits by adding multiples of G that cancel out terms in it. (Adding
  159. // multiples of G doesn't change which field element the polynomial represents.)
  160. //
  161. // Unfortunately, the GCM specification maps bits to/from polynomial
  162. // coefficients backwards from the natural order. In each byte it specifies the
  163. // highest bit to be the lowest order polynomial coefficient, *not* the highest!
  164. // This makes it nontrivial to work with the GHASH polynomials. We could
  165. // reflect the bits, but x86 doesn't have an instruction that does that.
  166. //
  167. // Instead, we operate on the values without bit-reflecting them. This *mostly*
  168. // just works, since XOR and carryless multiplication are symmetric with respect
  169. // to bit order, but it has some consequences. First, due to GHASH's byte
  170. // order, by skipping bit reflection, *byte* reflection becomes necessary to
  171. // give the polynomial terms a consistent order. E.g., considering an N-bit
  172. // value interpreted using the G = x^128 + x^7 + x^2 + x + 1 convention, bits 0
  173. // through N-1 of the byte-reflected value represent the coefficients of x^(N-1)
  174. // through x^0, whereas bits 0 through N-1 of the non-byte-reflected value
  175. // represent x^7...x^0, x^15...x^8, ..., x^(N-1)...x^(N-8) which can't be worked
  176. // with. Fortunately, x86's vpshufb instruction can do byte reflection.
  177. //
  178. // Second, forgoing the bit reflection causes an extra multiple of x (still
  179. // using the G = x^128 + x^7 + x^2 + x + 1 convention) to be introduced by each
  180. // multiplication. This is because an M-bit by N-bit carryless multiplication
  181. // really produces a (M+N-1)-bit product, but in practice it's zero-extended to
  182. // M+N bits. In the G = x^128 + x^7 + x^2 + x + 1 convention, which maps bits
  183. // to polynomial coefficients backwards, this zero-extension actually changes
  184. // the product by introducing an extra factor of x. Therefore, users of this
  185. // macro must ensure that one of the inputs has an extra factor of x^-1, i.e.
  186. // the multiplicative inverse of x, to cancel out the extra x.
  187. //
  188. // Third, the backwards coefficients convention is just confusing to work with,
  189. // since it makes "low" and "high" in the polynomial math mean the opposite of
  190. // their normal meaning in computer programming. This can be solved by using an
  191. // alternative interpretation: the polynomial coefficients are understood to be
  192. // in the natural order, and the multiplication is actually \a * \b * x^-128 mod
  193. // x^128 + x^127 + x^126 + x^121 + 1. This doesn't change the inputs, outputs,
  194. // or the implementation at all; it just changes the mathematical interpretation
  195. // of what each instruction is doing. Starting from here, we'll use this
  196. // alternative interpretation, as it's easier to understand the code that way.
  197. //
  198. // Moving onto the implementation, the vpclmulqdq instruction does 64 x 64 =>
  199. // 128-bit carryless multiplication, so we break the 128 x 128 multiplication
  200. // into parts as follows (the _L and _H suffixes denote low and high 64 bits):
  201. //
  202. // LO = a_L * b_L
  203. // MI = (a_L * b_H) + (a_H * b_L)
  204. // HI = a_H * b_H
  205. //
  206. // The 256-bit product is x^128*HI + x^64*MI + LO. LO, MI, and HI are 128-bit.
  207. // Note that MI "overlaps" with LO and HI. We don't consolidate MI into LO and
  208. // HI right away, since the way the reduction works makes that unnecessary.
  209. //
  210. // For the reduction, we cancel out the low 128 bits by adding multiples of G =
  211. // x^128 + x^127 + x^126 + x^121 + 1. This is done by two iterations, each of
  212. // which cancels out the next lowest 64 bits. Consider a value x^64*A + B,
  213. // where A and B are 128-bit. Adding B_L*G to that value gives:
  214. //
  215. // x^64*A + B + B_L*G
  216. // = x^64*A + x^64*B_H + B_L + B_L*(x^128 + x^127 + x^126 + x^121 + 1)
  217. // = x^64*A + x^64*B_H + B_L + x^128*B_L + x^64*B_L*(x^63 + x^62 + x^57) + B_L
  218. // = x^64*A + x^64*B_H + x^128*B_L + x^64*B_L*(x^63 + x^62 + x^57) + B_L + B_L
  219. // = x^64*(A + B_H + x^64*B_L + B_L*(x^63 + x^62 + x^57))
  220. //
  221. // So: if we sum A, B with its halves swapped, and the low half of B times x^63
  222. // + x^62 + x^57, we get a 128-bit value C where x^64*C is congruent to the
  223. // original value x^64*A + B. I.e., the low 64 bits got canceled out.
  224. //
  225. // We just need to apply this twice: first to fold LO into MI, and second to
  226. // fold the updated MI into HI.
  227. //
  228. // The needed three-argument XORs are done using the vpternlogd instruction with
  229. // immediate 0x96, since this is faster than two vpxord instructions.
  230. //
  231. // A potential optimization, assuming that b is fixed per-key (if a is fixed
  232. // per-key it would work the other way around), is to use one iteration of the
  233. // reduction described above to precompute a value c such that x^64*c = b mod G,
  234. // and then multiply a_L by c (and implicitly by x^64) instead of by b:
  235. //
  236. // MI = (a_L * c_L) + (a_H * b_L)
  237. // HI = (a_L * c_H) + (a_H * b_H)
  238. //
  239. // This would eliminate the LO part of the intermediate product, which would
  240. // eliminate the need to fold LO into MI. This would save two instructions,
  241. // including a vpclmulqdq. However, we currently don't use this optimization
  242. // because it would require twice as many per-key precomputed values.
  243. //
  244. // Using Karatsuba multiplication instead of "schoolbook" multiplication
  245. // similarly would save a vpclmulqdq but does not seem to be worth it.
  246. .macro _ghash_mul_step i, a, b, dst, gfpoly, t0, t1, t2
  247. .if \i == 0
  248. vpclmulqdq $0x00, \a, \b, \t0 // LO = a_L * b_L
  249. vpclmulqdq $0x01, \a, \b, \t1 // MI_0 = a_L * b_H
  250. .elseif \i == 1
  251. vpclmulqdq $0x10, \a, \b, \t2 // MI_1 = a_H * b_L
  252. .elseif \i == 2
  253. vpxord \t2, \t1, \t1 // MI = MI_0 + MI_1
  254. .elseif \i == 3
  255. vpclmulqdq $0x01, \t0, \gfpoly, \t2 // LO_L*(x^63 + x^62 + x^57)
  256. .elseif \i == 4
  257. vpshufd $0x4e, \t0, \t0 // Swap halves of LO
  258. .elseif \i == 5
  259. vpternlogd $0x96, \t2, \t0, \t1 // Fold LO into MI
  260. .elseif \i == 6
  261. vpclmulqdq $0x11, \a, \b, \dst // HI = a_H * b_H
  262. .elseif \i == 7
  263. vpclmulqdq $0x01, \t1, \gfpoly, \t0 // MI_L*(x^63 + x^62 + x^57)
  264. .elseif \i == 8
  265. vpshufd $0x4e, \t1, \t1 // Swap halves of MI
  266. .elseif \i == 9
  267. vpternlogd $0x96, \t0, \t1, \dst // Fold MI into HI
  268. .endif
  269. .endm
  270. // GHASH-multiply the 128-bit lanes of \a by the 128-bit lanes of \b and store
  271. // the reduced products in \dst. See _ghash_mul_step for full explanation.
  272. .macro _ghash_mul a, b, dst, gfpoly, t0, t1, t2
  273. .irp i, 0,1,2,3,4,5,6,7,8,9
  274. _ghash_mul_step \i, \a, \b, \dst, \gfpoly, \t0, \t1, \t2
  275. .endr
  276. .endm
  277. // GHASH-multiply the 128-bit lanes of \a by the 128-bit lanes of \b and add the
  278. // *unreduced* products to \lo, \mi, and \hi.
  279. .macro _ghash_mul_noreduce a, b, lo, mi, hi, t0, t1, t2, t3
  280. vpclmulqdq $0x00, \a, \b, \t0 // a_L * b_L
  281. vpclmulqdq $0x01, \a, \b, \t1 // a_L * b_H
  282. vpclmulqdq $0x10, \a, \b, \t2 // a_H * b_L
  283. vpclmulqdq $0x11, \a, \b, \t3 // a_H * b_H
  284. vpxord \t0, \lo, \lo
  285. vpternlogd $0x96, \t2, \t1, \mi
  286. vpxord \t3, \hi, \hi
  287. .endm
  288. // Reduce the unreduced products from \lo, \mi, and \hi and store the 128-bit
  289. // reduced products in \hi. See _ghash_mul_step for explanation of reduction.
  290. .macro _ghash_reduce lo, mi, hi, gfpoly, t0
  291. vpclmulqdq $0x01, \lo, \gfpoly, \t0
  292. vpshufd $0x4e, \lo, \lo
  293. vpternlogd $0x96, \t0, \lo, \mi
  294. vpclmulqdq $0x01, \mi, \gfpoly, \t0
  295. vpshufd $0x4e, \mi, \mi
  296. vpternlogd $0x96, \t0, \mi, \hi
  297. .endm
  298. // void aes_gcm_precompute_##suffix(struct aes_gcm_key_avx10 *key);
  299. //
  300. // Given the expanded AES key |key->aes_key|, this function derives the GHASH
  301. // subkey and initializes |key->ghash_key_powers| with powers of it.
  302. //
  303. // The number of key powers initialized is NUM_H_POWERS, and they are stored in
  304. // the order H^NUM_H_POWERS to H^1. The zeroized padding blocks after the key
  305. // powers themselves are also initialized.
  306. //
  307. // This macro supports both VL=32 and VL=64. _set_veclen must have been invoked
  308. // with the desired length. In the VL=32 case, the function computes twice as
  309. // many key powers than are actually used by the VL=32 GCM update functions.
  310. // This is done to keep the key format the same regardless of vector length.
  311. .macro _aes_gcm_precompute
  312. // Function arguments
  313. .set KEY, %rdi
  314. // Additional local variables. V0-V2 and %rax are used as temporaries.
  315. .set POWERS_PTR, %rsi
  316. .set RNDKEYLAST_PTR, %rdx
  317. .set H_CUR, V3
  318. .set H_CUR_YMM, %ymm3
  319. .set H_CUR_XMM, %xmm3
  320. .set H_INC, V4
  321. .set H_INC_YMM, %ymm4
  322. .set H_INC_XMM, %xmm4
  323. .set GFPOLY, V5
  324. .set GFPOLY_YMM, %ymm5
  325. .set GFPOLY_XMM, %xmm5
  326. // Get pointer to lowest set of key powers (located at end of array).
  327. lea OFFSETOFEND_H_POWERS-VL(KEY), POWERS_PTR
  328. // Encrypt an all-zeroes block to get the raw hash subkey.
  329. movl OFFSETOF_AESKEYLEN(KEY), %eax
  330. lea 6*16(KEY,%rax,4), RNDKEYLAST_PTR
  331. vmovdqu (KEY), %xmm0 // Zero-th round key XOR all-zeroes block
  332. add $16, KEY
  333. 1:
  334. vaesenc (KEY), %xmm0, %xmm0
  335. add $16, KEY
  336. cmp KEY, RNDKEYLAST_PTR
  337. jne 1b
  338. vaesenclast (RNDKEYLAST_PTR), %xmm0, %xmm0
  339. // Reflect the bytes of the raw hash subkey.
  340. vpshufb .Lbswap_mask(%rip), %xmm0, H_CUR_XMM
  341. // Zeroize the padding blocks.
  342. vpxor %xmm0, %xmm0, %xmm0
  343. vmovdqu %ymm0, VL(POWERS_PTR)
  344. vmovdqu %xmm0, VL+2*16(POWERS_PTR)
  345. // Finish preprocessing the first key power, H^1. Since this GHASH
  346. // implementation operates directly on values with the backwards bit
  347. // order specified by the GCM standard, it's necessary to preprocess the
  348. // raw key as follows. First, reflect its bytes. Second, multiply it
  349. // by x^-1 mod x^128 + x^7 + x^2 + x + 1 (if using the backwards
  350. // interpretation of polynomial coefficients), which can also be
  351. // interpreted as multiplication by x mod x^128 + x^127 + x^126 + x^121
  352. // + 1 using the alternative, natural interpretation of polynomial
  353. // coefficients. For details, see the comment above _ghash_mul_step.
  354. //
  355. // Either way, for the multiplication the concrete operation performed
  356. // is a left shift of the 128-bit value by 1 bit, then an XOR with (0xc2
  357. // << 120) | 1 if a 1 bit was carried out. However, there's no 128-bit
  358. // wide shift instruction, so instead double each of the two 64-bit
  359. // halves and incorporate the internal carry bit into the value XOR'd.
  360. vpshufd $0xd3, H_CUR_XMM, %xmm0
  361. vpsrad $31, %xmm0, %xmm0
  362. vpaddq H_CUR_XMM, H_CUR_XMM, H_CUR_XMM
  363. vpand .Lgfpoly_and_internal_carrybit(%rip), %xmm0, %xmm0
  364. vpxor %xmm0, H_CUR_XMM, H_CUR_XMM
  365. // Load the gfpoly constant.
  366. vbroadcasti32x4 .Lgfpoly(%rip), GFPOLY
  367. // Square H^1 to get H^2.
  368. //
  369. // Note that as with H^1, all higher key powers also need an extra
  370. // factor of x^-1 (or x using the natural interpretation). Nothing
  371. // special needs to be done to make this happen, though: H^1 * H^1 would
  372. // end up with two factors of x^-1, but the multiplication consumes one.
  373. // So the product H^2 ends up with the desired one factor of x^-1.
  374. _ghash_mul H_CUR_XMM, H_CUR_XMM, H_INC_XMM, GFPOLY_XMM, \
  375. %xmm0, %xmm1, %xmm2
  376. // Create H_CUR_YMM = [H^2, H^1] and H_INC_YMM = [H^2, H^2].
  377. vinserti128 $1, H_CUR_XMM, H_INC_YMM, H_CUR_YMM
  378. vinserti128 $1, H_INC_XMM, H_INC_YMM, H_INC_YMM
  379. .if VL == 64
  380. // Create H_CUR = [H^4, H^3, H^2, H^1] and H_INC = [H^4, H^4, H^4, H^4].
  381. _ghash_mul H_INC_YMM, H_CUR_YMM, H_INC_YMM, GFPOLY_YMM, \
  382. %ymm0, %ymm1, %ymm2
  383. vinserti64x4 $1, H_CUR_YMM, H_INC, H_CUR
  384. vshufi64x2 $0, H_INC, H_INC, H_INC
  385. .endif
  386. // Store the lowest set of key powers.
  387. vmovdqu8 H_CUR, (POWERS_PTR)
  388. // Compute and store the remaining key powers. With VL=32, repeatedly
  389. // multiply [H^(i+1), H^i] by [H^2, H^2] to get [H^(i+3), H^(i+2)].
  390. // With VL=64, repeatedly multiply [H^(i+3), H^(i+2), H^(i+1), H^i] by
  391. // [H^4, H^4, H^4, H^4] to get [H^(i+7), H^(i+6), H^(i+5), H^(i+4)].
  392. mov $(NUM_H_POWERS*16/VL) - 1, %eax
  393. .Lprecompute_next\@:
  394. sub $VL, POWERS_PTR
  395. _ghash_mul H_INC, H_CUR, H_CUR, GFPOLY, V0, V1, V2
  396. vmovdqu8 H_CUR, (POWERS_PTR)
  397. dec %eax
  398. jnz .Lprecompute_next\@
  399. vzeroupper // This is needed after using ymm or zmm registers.
  400. RET
  401. .endm
  402. // XOR together the 128-bit lanes of \src (whose low lane is \src_xmm) and store
  403. // the result in \dst_xmm. This implicitly zeroizes the other lanes of dst.
  404. .macro _horizontal_xor src, src_xmm, dst_xmm, t0_xmm, t1_xmm, t2_xmm
  405. vextracti32x4 $1, \src, \t0_xmm
  406. .if VL == 32
  407. vpxord \t0_xmm, \src_xmm, \dst_xmm
  408. .elseif VL == 64
  409. vextracti32x4 $2, \src, \t1_xmm
  410. vextracti32x4 $3, \src, \t2_xmm
  411. vpxord \t0_xmm, \src_xmm, \dst_xmm
  412. vpternlogd $0x96, \t1_xmm, \t2_xmm, \dst_xmm
  413. .else
  414. .error "Unsupported vector length"
  415. .endif
  416. .endm
  417. // Do one step of the GHASH update of the data blocks given in the vector
  418. // registers GHASHDATA[0-3]. \i specifies the step to do, 0 through 9. The
  419. // division into steps allows users of this macro to optionally interleave the
  420. // computation with other instructions. This macro uses the vector register
  421. // GHASH_ACC as input/output; GHASHDATA[0-3] as inputs that are clobbered;
  422. // H_POW[4-1], GFPOLY, and BSWAP_MASK as inputs that aren't clobbered; and
  423. // GHASHTMP[0-2] as temporaries. This macro handles the byte-reflection of the
  424. // data blocks. The parameter registers must be preserved across steps.
  425. //
  426. // The GHASH update does: GHASH_ACC = H_POW4*(GHASHDATA0 + GHASH_ACC) +
  427. // H_POW3*GHASHDATA1 + H_POW2*GHASHDATA2 + H_POW1*GHASHDATA3, where the
  428. // operations are vectorized operations on vectors of 16-byte blocks. E.g.,
  429. // with VL=32 there are 2 blocks per vector and the vectorized terms correspond
  430. // to the following non-vectorized terms:
  431. //
  432. // H_POW4*(GHASHDATA0 + GHASH_ACC) => H^8*(blk0 + GHASH_ACC_XMM) and H^7*(blk1 + 0)
  433. // H_POW3*GHASHDATA1 => H^6*blk2 and H^5*blk3
  434. // H_POW2*GHASHDATA2 => H^4*blk4 and H^3*blk5
  435. // H_POW1*GHASHDATA3 => H^2*blk6 and H^1*blk7
  436. //
  437. // With VL=64, we use 4 blocks/vector, H^16 through H^1, and blk0 through blk15.
  438. //
  439. // More concretely, this code does:
  440. // - Do vectorized "schoolbook" multiplications to compute the intermediate
  441. // 256-bit product of each block and its corresponding hash key power.
  442. // There are 4*VL/16 of these intermediate products.
  443. // - Sum (XOR) the intermediate 256-bit products across vectors. This leaves
  444. // VL/16 256-bit intermediate values.
  445. // - Do a vectorized reduction of these 256-bit intermediate values to
  446. // 128-bits each. This leaves VL/16 128-bit intermediate values.
  447. // - Sum (XOR) these values and store the 128-bit result in GHASH_ACC_XMM.
  448. //
  449. // See _ghash_mul_step for the full explanation of the operations performed for
  450. // each individual finite field multiplication and reduction.
  451. .macro _ghash_step_4x i
  452. .if \i == 0
  453. vpshufb BSWAP_MASK, GHASHDATA0, GHASHDATA0
  454. vpxord GHASH_ACC, GHASHDATA0, GHASHDATA0
  455. vpshufb BSWAP_MASK, GHASHDATA1, GHASHDATA1
  456. vpshufb BSWAP_MASK, GHASHDATA2, GHASHDATA2
  457. .elseif \i == 1
  458. vpshufb BSWAP_MASK, GHASHDATA3, GHASHDATA3
  459. vpclmulqdq $0x00, H_POW4, GHASHDATA0, GHASH_ACC // LO_0
  460. vpclmulqdq $0x00, H_POW3, GHASHDATA1, GHASHTMP0 // LO_1
  461. vpclmulqdq $0x00, H_POW2, GHASHDATA2, GHASHTMP1 // LO_2
  462. .elseif \i == 2
  463. vpxord GHASHTMP0, GHASH_ACC, GHASH_ACC // sum(LO_{1,0})
  464. vpclmulqdq $0x00, H_POW1, GHASHDATA3, GHASHTMP2 // LO_3
  465. vpternlogd $0x96, GHASHTMP2, GHASHTMP1, GHASH_ACC // LO = sum(LO_{3,2,1,0})
  466. vpclmulqdq $0x01, H_POW4, GHASHDATA0, GHASHTMP0 // MI_0
  467. .elseif \i == 3
  468. vpclmulqdq $0x01, H_POW3, GHASHDATA1, GHASHTMP1 // MI_1
  469. vpclmulqdq $0x01, H_POW2, GHASHDATA2, GHASHTMP2 // MI_2
  470. vpternlogd $0x96, GHASHTMP2, GHASHTMP1, GHASHTMP0 // sum(MI_{2,1,0})
  471. vpclmulqdq $0x01, H_POW1, GHASHDATA3, GHASHTMP1 // MI_3
  472. .elseif \i == 4
  473. vpclmulqdq $0x10, H_POW4, GHASHDATA0, GHASHTMP2 // MI_4
  474. vpternlogd $0x96, GHASHTMP2, GHASHTMP1, GHASHTMP0 // sum(MI_{4,3,2,1,0})
  475. vpclmulqdq $0x10, H_POW3, GHASHDATA1, GHASHTMP1 // MI_5
  476. vpclmulqdq $0x10, H_POW2, GHASHDATA2, GHASHTMP2 // MI_6
  477. .elseif \i == 5
  478. vpternlogd $0x96, GHASHTMP2, GHASHTMP1, GHASHTMP0 // sum(MI_{6,5,4,3,2,1,0})
  479. vpclmulqdq $0x01, GHASH_ACC, GFPOLY, GHASHTMP2 // LO_L*(x^63 + x^62 + x^57)
  480. vpclmulqdq $0x10, H_POW1, GHASHDATA3, GHASHTMP1 // MI_7
  481. vpxord GHASHTMP1, GHASHTMP0, GHASHTMP0 // MI = sum(MI_{7,6,5,4,3,2,1,0})
  482. .elseif \i == 6
  483. vpshufd $0x4e, GHASH_ACC, GHASH_ACC // Swap halves of LO
  484. vpclmulqdq $0x11, H_POW4, GHASHDATA0, GHASHDATA0 // HI_0
  485. vpclmulqdq $0x11, H_POW3, GHASHDATA1, GHASHDATA1 // HI_1
  486. vpclmulqdq $0x11, H_POW2, GHASHDATA2, GHASHDATA2 // HI_2
  487. .elseif \i == 7
  488. vpternlogd $0x96, GHASHTMP2, GHASH_ACC, GHASHTMP0 // Fold LO into MI
  489. vpclmulqdq $0x11, H_POW1, GHASHDATA3, GHASHDATA3 // HI_3
  490. vpternlogd $0x96, GHASHDATA2, GHASHDATA1, GHASHDATA0 // sum(HI_{2,1,0})
  491. vpclmulqdq $0x01, GHASHTMP0, GFPOLY, GHASHTMP1 // MI_L*(x^63 + x^62 + x^57)
  492. .elseif \i == 8
  493. vpxord GHASHDATA3, GHASHDATA0, GHASH_ACC // HI = sum(HI_{3,2,1,0})
  494. vpshufd $0x4e, GHASHTMP0, GHASHTMP0 // Swap halves of MI
  495. vpternlogd $0x96, GHASHTMP1, GHASHTMP0, GHASH_ACC // Fold MI into HI
  496. .elseif \i == 9
  497. _horizontal_xor GHASH_ACC, GHASH_ACC_XMM, GHASH_ACC_XMM, \
  498. GHASHDATA0_XMM, GHASHDATA1_XMM, GHASHDATA2_XMM
  499. .endif
  500. .endm
  501. // Do one non-last round of AES encryption on the counter blocks in V0-V3 using
  502. // the round key that has been broadcast to all 128-bit lanes of \round_key.
  503. .macro _vaesenc_4x round_key
  504. vaesenc \round_key, V0, V0
  505. vaesenc \round_key, V1, V1
  506. vaesenc \round_key, V2, V2
  507. vaesenc \round_key, V3, V3
  508. .endm
  509. // Start the AES encryption of four vectors of counter blocks.
  510. .macro _ctr_begin_4x
  511. // Increment LE_CTR four times to generate four vectors of little-endian
  512. // counter blocks, swap each to big-endian, and store them in V0-V3.
  513. vpshufb BSWAP_MASK, LE_CTR, V0
  514. vpaddd LE_CTR_INC, LE_CTR, LE_CTR
  515. vpshufb BSWAP_MASK, LE_CTR, V1
  516. vpaddd LE_CTR_INC, LE_CTR, LE_CTR
  517. vpshufb BSWAP_MASK, LE_CTR, V2
  518. vpaddd LE_CTR_INC, LE_CTR, LE_CTR
  519. vpshufb BSWAP_MASK, LE_CTR, V3
  520. vpaddd LE_CTR_INC, LE_CTR, LE_CTR
  521. // AES "round zero": XOR in the zero-th round key.
  522. vpxord RNDKEY0, V0, V0
  523. vpxord RNDKEY0, V1, V1
  524. vpxord RNDKEY0, V2, V2
  525. vpxord RNDKEY0, V3, V3
  526. .endm
  527. // void aes_gcm_{enc,dec}_update_##suffix(const struct aes_gcm_key_avx10 *key,
  528. // const u32 le_ctr[4], u8 ghash_acc[16],
  529. // const u8 *src, u8 *dst, int datalen);
  530. //
  531. // This macro generates a GCM encryption or decryption update function with the
  532. // above prototype (with \enc selecting which one). This macro supports both
  533. // VL=32 and VL=64. _set_veclen must have been invoked with the desired length.
  534. //
  535. // This function computes the next portion of the CTR keystream, XOR's it with
  536. // |datalen| bytes from |src|, and writes the resulting encrypted or decrypted
  537. // data to |dst|. It also updates the GHASH accumulator |ghash_acc| using the
  538. // next |datalen| ciphertext bytes.
  539. //
  540. // |datalen| must be a multiple of 16, except on the last call where it can be
  541. // any length. The caller must do any buffering needed to ensure this. Both
  542. // in-place and out-of-place en/decryption are supported.
  543. //
  544. // |le_ctr| must give the current counter in little-endian format. For a new
  545. // message, the low word of the counter must be 2. This function loads the
  546. // counter from |le_ctr| and increments the loaded counter as needed, but it
  547. // does *not* store the updated counter back to |le_ctr|. The caller must
  548. // update |le_ctr| if any more data segments follow. Internally, only the low
  549. // 32-bit word of the counter is incremented, following the GCM standard.
  550. .macro _aes_gcm_update enc
  551. // Function arguments
  552. .set KEY, %rdi
  553. .set LE_CTR_PTR, %rsi
  554. .set GHASH_ACC_PTR, %rdx
  555. .set SRC, %rcx
  556. .set DST, %r8
  557. .set DATALEN, %r9d
  558. .set DATALEN64, %r9 // Zero-extend DATALEN before using!
  559. // Additional local variables
  560. // %rax and %k1 are used as temporary registers. LE_CTR_PTR is also
  561. // available as a temporary register after the counter is loaded.
  562. // AES key length in bytes
  563. .set AESKEYLEN, %r10d
  564. .set AESKEYLEN64, %r10
  565. // Pointer to the last AES round key for the chosen AES variant
  566. .set RNDKEYLAST_PTR, %r11
  567. // In the main loop, V0-V3 are used as AES input and output. Elsewhere
  568. // they are used as temporary registers.
  569. // GHASHDATA[0-3] hold the ciphertext blocks and GHASH input data.
  570. .set GHASHDATA0, V4
  571. .set GHASHDATA0_XMM, %xmm4
  572. .set GHASHDATA1, V5
  573. .set GHASHDATA1_XMM, %xmm5
  574. .set GHASHDATA2, V6
  575. .set GHASHDATA2_XMM, %xmm6
  576. .set GHASHDATA3, V7
  577. // BSWAP_MASK is the shuffle mask for byte-reflecting 128-bit values
  578. // using vpshufb, copied to all 128-bit lanes.
  579. .set BSWAP_MASK, V8
  580. // RNDKEY temporarily holds the next AES round key.
  581. .set RNDKEY, V9
  582. // GHASH_ACC is the accumulator variable for GHASH. When fully reduced,
  583. // only the lowest 128-bit lane can be nonzero. When not fully reduced,
  584. // more than one lane may be used, and they need to be XOR'd together.
  585. .set GHASH_ACC, V10
  586. .set GHASH_ACC_XMM, %xmm10
  587. // LE_CTR_INC is the vector of 32-bit words that need to be added to a
  588. // vector of little-endian counter blocks to advance it forwards.
  589. .set LE_CTR_INC, V11
  590. // LE_CTR contains the next set of little-endian counter blocks.
  591. .set LE_CTR, V12
  592. // RNDKEY0, RNDKEYLAST, and RNDKEY_M[9-5] contain cached AES round keys,
  593. // copied to all 128-bit lanes. RNDKEY0 is the zero-th round key,
  594. // RNDKEYLAST the last, and RNDKEY_M\i the one \i-th from the last.
  595. .set RNDKEY0, V13
  596. .set RNDKEYLAST, V14
  597. .set RNDKEY_M9, V15
  598. .set RNDKEY_M8, V16
  599. .set RNDKEY_M7, V17
  600. .set RNDKEY_M6, V18
  601. .set RNDKEY_M5, V19
  602. // RNDKEYLAST[0-3] temporarily store the last AES round key XOR'd with
  603. // the corresponding block of source data. This is useful because
  604. // vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a), and key ^ b can
  605. // be computed in parallel with the AES rounds.
  606. .set RNDKEYLAST0, V20
  607. .set RNDKEYLAST1, V21
  608. .set RNDKEYLAST2, V22
  609. .set RNDKEYLAST3, V23
  610. // GHASHTMP[0-2] are temporary variables used by _ghash_step_4x. These
  611. // cannot coincide with anything used for AES encryption, since for
  612. // performance reasons GHASH and AES encryption are interleaved.
  613. .set GHASHTMP0, V24
  614. .set GHASHTMP1, V25
  615. .set GHASHTMP2, V26
  616. // H_POW[4-1] contain the powers of the hash key H^(4*VL/16)...H^1. The
  617. // descending numbering reflects the order of the key powers.
  618. .set H_POW4, V27
  619. .set H_POW3, V28
  620. .set H_POW2, V29
  621. .set H_POW1, V30
  622. // GFPOLY contains the .Lgfpoly constant, copied to all 128-bit lanes.
  623. .set GFPOLY, V31
  624. // Load some constants.
  625. vbroadcasti32x4 .Lbswap_mask(%rip), BSWAP_MASK
  626. vbroadcasti32x4 .Lgfpoly(%rip), GFPOLY
  627. // Load the GHASH accumulator and the starting counter.
  628. vmovdqu (GHASH_ACC_PTR), GHASH_ACC_XMM
  629. vbroadcasti32x4 (LE_CTR_PTR), LE_CTR
  630. // Load the AES key length in bytes.
  631. movl OFFSETOF_AESKEYLEN(KEY), AESKEYLEN
  632. // Make RNDKEYLAST_PTR point to the last AES round key. This is the
  633. // round key with index 10, 12, or 14 for AES-128, AES-192, or AES-256
  634. // respectively. Then load the zero-th and last round keys.
  635. lea 6*16(KEY,AESKEYLEN64,4), RNDKEYLAST_PTR
  636. vbroadcasti32x4 (KEY), RNDKEY0
  637. vbroadcasti32x4 (RNDKEYLAST_PTR), RNDKEYLAST
  638. // Finish initializing LE_CTR by adding [0, 1, ...] to its low words.
  639. vpaddd .Lctr_pattern(%rip), LE_CTR, LE_CTR
  640. // Initialize LE_CTR_INC to contain VL/16 in all 128-bit lanes.
  641. .if VL == 32
  642. vbroadcasti32x4 .Linc_2blocks(%rip), LE_CTR_INC
  643. .elseif VL == 64
  644. vbroadcasti32x4 .Linc_4blocks(%rip), LE_CTR_INC
  645. .else
  646. .error "Unsupported vector length"
  647. .endif
  648. // If there are at least 4*VL bytes of data, then continue into the loop
  649. // that processes 4*VL bytes of data at a time. Otherwise skip it.
  650. //
  651. // Pre-subtracting 4*VL from DATALEN saves an instruction from the main
  652. // loop and also ensures that at least one write always occurs to
  653. // DATALEN, zero-extending it and allowing DATALEN64 to be used later.
  654. sub $4*VL, DATALEN
  655. jl .Lcrypt_loop_4x_done\@
  656. // Load powers of the hash key.
  657. vmovdqu8 OFFSETOFEND_H_POWERS-4*VL(KEY), H_POW4
  658. vmovdqu8 OFFSETOFEND_H_POWERS-3*VL(KEY), H_POW3
  659. vmovdqu8 OFFSETOFEND_H_POWERS-2*VL(KEY), H_POW2
  660. vmovdqu8 OFFSETOFEND_H_POWERS-1*VL(KEY), H_POW1
  661. // Main loop: en/decrypt and hash 4 vectors at a time.
  662. //
  663. // When possible, interleave the AES encryption of the counter blocks
  664. // with the GHASH update of the ciphertext blocks. This improves
  665. // performance on many CPUs because the execution ports used by the VAES
  666. // instructions often differ from those used by vpclmulqdq and other
  667. // instructions used in GHASH. For example, many Intel CPUs dispatch
  668. // vaesenc to ports 0 and 1 and vpclmulqdq to port 5.
  669. //
  670. // The interleaving is easiest to do during decryption, since during
  671. // decryption the ciphertext blocks are immediately available. For
  672. // encryption, instead encrypt the first set of blocks, then hash those
  673. // blocks while encrypting the next set of blocks, repeat that as
  674. // needed, and finally hash the last set of blocks.
  675. .if \enc
  676. // Encrypt the first 4 vectors of plaintext blocks. Leave the resulting
  677. // ciphertext in GHASHDATA[0-3] for GHASH.
  678. _ctr_begin_4x
  679. lea 16(KEY), %rax
  680. 1:
  681. vbroadcasti32x4 (%rax), RNDKEY
  682. _vaesenc_4x RNDKEY
  683. add $16, %rax
  684. cmp %rax, RNDKEYLAST_PTR
  685. jne 1b
  686. vpxord 0*VL(SRC), RNDKEYLAST, RNDKEYLAST0
  687. vpxord 1*VL(SRC), RNDKEYLAST, RNDKEYLAST1
  688. vpxord 2*VL(SRC), RNDKEYLAST, RNDKEYLAST2
  689. vpxord 3*VL(SRC), RNDKEYLAST, RNDKEYLAST3
  690. vaesenclast RNDKEYLAST0, V0, GHASHDATA0
  691. vaesenclast RNDKEYLAST1, V1, GHASHDATA1
  692. vaesenclast RNDKEYLAST2, V2, GHASHDATA2
  693. vaesenclast RNDKEYLAST3, V3, GHASHDATA3
  694. vmovdqu8 GHASHDATA0, 0*VL(DST)
  695. vmovdqu8 GHASHDATA1, 1*VL(DST)
  696. vmovdqu8 GHASHDATA2, 2*VL(DST)
  697. vmovdqu8 GHASHDATA3, 3*VL(DST)
  698. add $4*VL, SRC
  699. add $4*VL, DST
  700. sub $4*VL, DATALEN
  701. jl .Lghash_last_ciphertext_4x\@
  702. .endif
  703. // Cache as many additional AES round keys as possible.
  704. .irp i, 9,8,7,6,5
  705. vbroadcasti32x4 -\i*16(RNDKEYLAST_PTR), RNDKEY_M\i
  706. .endr
  707. .Lcrypt_loop_4x\@:
  708. // If decrypting, load more ciphertext blocks into GHASHDATA[0-3]. If
  709. // encrypting, GHASHDATA[0-3] already contain the previous ciphertext.
  710. .if !\enc
  711. vmovdqu8 0*VL(SRC), GHASHDATA0
  712. vmovdqu8 1*VL(SRC), GHASHDATA1
  713. vmovdqu8 2*VL(SRC), GHASHDATA2
  714. vmovdqu8 3*VL(SRC), GHASHDATA3
  715. .endif
  716. // Start the AES encryption of the counter blocks.
  717. _ctr_begin_4x
  718. cmp $24, AESKEYLEN
  719. jl 128f // AES-128?
  720. je 192f // AES-192?
  721. // AES-256
  722. vbroadcasti32x4 -13*16(RNDKEYLAST_PTR), RNDKEY
  723. _vaesenc_4x RNDKEY
  724. vbroadcasti32x4 -12*16(RNDKEYLAST_PTR), RNDKEY
  725. _vaesenc_4x RNDKEY
  726. 192:
  727. vbroadcasti32x4 -11*16(RNDKEYLAST_PTR), RNDKEY
  728. _vaesenc_4x RNDKEY
  729. vbroadcasti32x4 -10*16(RNDKEYLAST_PTR), RNDKEY
  730. _vaesenc_4x RNDKEY
  731. 128:
  732. // XOR the source data with the last round key, saving the result in
  733. // RNDKEYLAST[0-3]. This reduces latency by taking advantage of the
  734. // property vaesenclast(key, a) ^ b == vaesenclast(key ^ b, a).
  735. .if \enc
  736. vpxord 0*VL(SRC), RNDKEYLAST, RNDKEYLAST0
  737. vpxord 1*VL(SRC), RNDKEYLAST, RNDKEYLAST1
  738. vpxord 2*VL(SRC), RNDKEYLAST, RNDKEYLAST2
  739. vpxord 3*VL(SRC), RNDKEYLAST, RNDKEYLAST3
  740. .else
  741. vpxord GHASHDATA0, RNDKEYLAST, RNDKEYLAST0
  742. vpxord GHASHDATA1, RNDKEYLAST, RNDKEYLAST1
  743. vpxord GHASHDATA2, RNDKEYLAST, RNDKEYLAST2
  744. vpxord GHASHDATA3, RNDKEYLAST, RNDKEYLAST3
  745. .endif
  746. // Finish the AES encryption of the counter blocks in V0-V3, interleaved
  747. // with the GHASH update of the ciphertext blocks in GHASHDATA[0-3].
  748. .irp i, 9,8,7,6,5
  749. _vaesenc_4x RNDKEY_M\i
  750. _ghash_step_4x (9 - \i)
  751. .endr
  752. .irp i, 4,3,2,1
  753. vbroadcasti32x4 -\i*16(RNDKEYLAST_PTR), RNDKEY
  754. _vaesenc_4x RNDKEY
  755. _ghash_step_4x (9 - \i)
  756. .endr
  757. _ghash_step_4x 9
  758. // Do the last AES round. This handles the XOR with the source data
  759. // too, as per the optimization described above.
  760. vaesenclast RNDKEYLAST0, V0, GHASHDATA0
  761. vaesenclast RNDKEYLAST1, V1, GHASHDATA1
  762. vaesenclast RNDKEYLAST2, V2, GHASHDATA2
  763. vaesenclast RNDKEYLAST3, V3, GHASHDATA3
  764. // Store the en/decrypted data to DST.
  765. vmovdqu8 GHASHDATA0, 0*VL(DST)
  766. vmovdqu8 GHASHDATA1, 1*VL(DST)
  767. vmovdqu8 GHASHDATA2, 2*VL(DST)
  768. vmovdqu8 GHASHDATA3, 3*VL(DST)
  769. add $4*VL, SRC
  770. add $4*VL, DST
  771. sub $4*VL, DATALEN
  772. jge .Lcrypt_loop_4x\@
  773. .if \enc
  774. .Lghash_last_ciphertext_4x\@:
  775. // Update GHASH with the last set of ciphertext blocks.
  776. .irp i, 0,1,2,3,4,5,6,7,8,9
  777. _ghash_step_4x \i
  778. .endr
  779. .endif
  780. .Lcrypt_loop_4x_done\@:
  781. // Undo the extra subtraction by 4*VL and check whether data remains.
  782. add $4*VL, DATALEN
  783. jz .Ldone\@
  784. // The data length isn't a multiple of 4*VL. Process the remaining data
  785. // of length 1 <= DATALEN < 4*VL, up to one vector (VL bytes) at a time.
  786. // Going one vector at a time may seem inefficient compared to having
  787. // separate code paths for each possible number of vectors remaining.
  788. // However, using a loop keeps the code size down, and it performs
  789. // surprising well; modern CPUs will start executing the next iteration
  790. // before the previous one finishes and also predict the number of loop
  791. // iterations. For a similar reason, we roll up the AES rounds.
  792. //
  793. // On the last iteration, the remaining length may be less than VL.
  794. // Handle this using masking.
  795. //
  796. // Since there are enough key powers available for all remaining data,
  797. // there is no need to do a GHASH reduction after each iteration.
  798. // Instead, multiply each remaining block by its own key power, and only
  799. // do a GHASH reduction at the very end.
  800. // Make POWERS_PTR point to the key powers [H^N, H^(N-1), ...] where N
  801. // is the number of blocks that remain.
  802. .set POWERS_PTR, LE_CTR_PTR // LE_CTR_PTR is free to be reused.
  803. mov DATALEN, %eax
  804. neg %rax
  805. and $~15, %rax // -round_up(DATALEN, 16)
  806. lea OFFSETOFEND_H_POWERS(KEY,%rax), POWERS_PTR
  807. // Start collecting the unreduced GHASH intermediate value LO, MI, HI.
  808. .set LO, GHASHDATA0
  809. .set LO_XMM, GHASHDATA0_XMM
  810. .set MI, GHASHDATA1
  811. .set MI_XMM, GHASHDATA1_XMM
  812. .set HI, GHASHDATA2
  813. .set HI_XMM, GHASHDATA2_XMM
  814. vpxor LO_XMM, LO_XMM, LO_XMM
  815. vpxor MI_XMM, MI_XMM, MI_XMM
  816. vpxor HI_XMM, HI_XMM, HI_XMM
  817. .Lcrypt_loop_1x\@:
  818. // Select the appropriate mask for this iteration: all 1's if
  819. // DATALEN >= VL, otherwise DATALEN 1's. Do this branchlessly using the
  820. // bzhi instruction from BMI2. (This relies on DATALEN <= 255.)
  821. .if VL < 64
  822. mov $-1, %eax
  823. bzhi DATALEN, %eax, %eax
  824. kmovd %eax, %k1
  825. .else
  826. mov $-1, %rax
  827. bzhi DATALEN64, %rax, %rax
  828. kmovq %rax, %k1
  829. .endif
  830. // Encrypt a vector of counter blocks. This does not need to be masked.
  831. vpshufb BSWAP_MASK, LE_CTR, V0
  832. vpaddd LE_CTR_INC, LE_CTR, LE_CTR
  833. vpxord RNDKEY0, V0, V0
  834. lea 16(KEY), %rax
  835. 1:
  836. vbroadcasti32x4 (%rax), RNDKEY
  837. vaesenc RNDKEY, V0, V0
  838. add $16, %rax
  839. cmp %rax, RNDKEYLAST_PTR
  840. jne 1b
  841. vaesenclast RNDKEYLAST, V0, V0
  842. // XOR the data with the appropriate number of keystream bytes.
  843. vmovdqu8 (SRC), V1{%k1}{z}
  844. vpxord V1, V0, V0
  845. vmovdqu8 V0, (DST){%k1}
  846. // Update GHASH with the ciphertext block(s), without reducing.
  847. //
  848. // In the case of DATALEN < VL, the ciphertext is zero-padded to VL.
  849. // (If decrypting, it's done by the above masked load. If encrypting,
  850. // it's done by the below masked register-to-register move.) Note that
  851. // if DATALEN <= VL - 16, there will be additional padding beyond the
  852. // padding of the last block specified by GHASH itself; i.e., there may
  853. // be whole block(s) that get processed by the GHASH multiplication and
  854. // reduction instructions but should not actually be included in the
  855. // GHASH. However, any such blocks are all-zeroes, and the values that
  856. // they're multiplied with are also all-zeroes. Therefore they just add
  857. // 0 * 0 = 0 to the final GHASH result, which makes no difference.
  858. vmovdqu8 (POWERS_PTR), H_POW1
  859. .if \enc
  860. vmovdqu8 V0, V1{%k1}{z}
  861. .endif
  862. vpshufb BSWAP_MASK, V1, V0
  863. vpxord GHASH_ACC, V0, V0
  864. _ghash_mul_noreduce H_POW1, V0, LO, MI, HI, GHASHDATA3, V1, V2, V3
  865. vpxor GHASH_ACC_XMM, GHASH_ACC_XMM, GHASH_ACC_XMM
  866. add $VL, POWERS_PTR
  867. add $VL, SRC
  868. add $VL, DST
  869. sub $VL, DATALEN
  870. jg .Lcrypt_loop_1x\@
  871. // Finally, do the GHASH reduction.
  872. _ghash_reduce LO, MI, HI, GFPOLY, V0
  873. _horizontal_xor HI, HI_XMM, GHASH_ACC_XMM, %xmm0, %xmm1, %xmm2
  874. .Ldone\@:
  875. // Store the updated GHASH accumulator back to memory.
  876. vmovdqu GHASH_ACC_XMM, (GHASH_ACC_PTR)
  877. vzeroupper // This is needed after using ymm or zmm registers.
  878. RET
  879. .endm
  880. // void aes_gcm_enc_final_vaes_avx10(const struct aes_gcm_key_avx10 *key,
  881. // const u32 le_ctr[4], u8 ghash_acc[16],
  882. // u64 total_aadlen, u64 total_datalen);
  883. // bool aes_gcm_dec_final_vaes_avx10(const struct aes_gcm_key_avx10 *key,
  884. // const u32 le_ctr[4],
  885. // const u8 ghash_acc[16],
  886. // u64 total_aadlen, u64 total_datalen,
  887. // const u8 tag[16], int taglen);
  888. //
  889. // This macro generates one of the above two functions (with \enc selecting
  890. // which one). Both functions finish computing the GCM authentication tag by
  891. // updating GHASH with the lengths block and encrypting the GHASH accumulator.
  892. // |total_aadlen| and |total_datalen| must be the total length of the additional
  893. // authenticated data and the en/decrypted data in bytes, respectively.
  894. //
  895. // The encryption function then stores the full-length (16-byte) computed
  896. // authentication tag to |ghash_acc|. The decryption function instead loads the
  897. // expected authentication tag (the one that was transmitted) from the 16-byte
  898. // buffer |tag|, compares the first 4 <= |taglen| <= 16 bytes of it to the
  899. // computed tag in constant time, and returns true if and only if they match.
  900. .macro _aes_gcm_final enc
  901. // Function arguments
  902. .set KEY, %rdi
  903. .set LE_CTR_PTR, %rsi
  904. .set GHASH_ACC_PTR, %rdx
  905. .set TOTAL_AADLEN, %rcx
  906. .set TOTAL_DATALEN, %r8
  907. .set TAG, %r9
  908. .set TAGLEN, %r10d // Originally at 8(%rsp)
  909. // Additional local variables.
  910. // %rax, %xmm0-%xmm3, and %k1 are used as temporary registers.
  911. .set AESKEYLEN, %r11d
  912. .set AESKEYLEN64, %r11
  913. .set GFPOLY, %xmm4
  914. .set BSWAP_MASK, %xmm5
  915. .set LE_CTR, %xmm6
  916. .set GHASH_ACC, %xmm7
  917. .set H_POW1, %xmm8
  918. // Load some constants.
  919. vmovdqa .Lgfpoly(%rip), GFPOLY
  920. vmovdqa .Lbswap_mask(%rip), BSWAP_MASK
  921. // Load the AES key length in bytes.
  922. movl OFFSETOF_AESKEYLEN(KEY), AESKEYLEN
  923. // Set up a counter block with 1 in the low 32-bit word. This is the
  924. // counter that produces the ciphertext needed to encrypt the auth tag.
  925. // GFPOLY has 1 in the low word, so grab the 1 from there using a blend.
  926. vpblendd $0xe, (LE_CTR_PTR), GFPOLY, LE_CTR
  927. // Build the lengths block and XOR it with the GHASH accumulator.
  928. // Although the lengths block is defined as the AAD length followed by
  929. // the en/decrypted data length, both in big-endian byte order, a byte
  930. // reflection of the full block is needed because of the way we compute
  931. // GHASH (see _ghash_mul_step). By using little-endian values in the
  932. // opposite order, we avoid having to reflect any bytes here.
  933. vmovq TOTAL_DATALEN, %xmm0
  934. vpinsrq $1, TOTAL_AADLEN, %xmm0, %xmm0
  935. vpsllq $3, %xmm0, %xmm0 // Bytes to bits
  936. vpxor (GHASH_ACC_PTR), %xmm0, GHASH_ACC
  937. // Load the first hash key power (H^1), which is stored last.
  938. vmovdqu8 OFFSETOFEND_H_POWERS-16(KEY), H_POW1
  939. .if !\enc
  940. // Prepare a mask of TAGLEN one bits.
  941. movl 8(%rsp), TAGLEN
  942. mov $-1, %eax
  943. bzhi TAGLEN, %eax, %eax
  944. kmovd %eax, %k1
  945. .endif
  946. // Make %rax point to the last AES round key for the chosen AES variant.
  947. lea 6*16(KEY,AESKEYLEN64,4), %rax
  948. // Start the AES encryption of the counter block by swapping the counter
  949. // block to big-endian and XOR-ing it with the zero-th AES round key.
  950. vpshufb BSWAP_MASK, LE_CTR, %xmm0
  951. vpxor (KEY), %xmm0, %xmm0
  952. // Complete the AES encryption and multiply GHASH_ACC by H^1.
  953. // Interleave the AES and GHASH instructions to improve performance.
  954. cmp $24, AESKEYLEN
  955. jl 128f // AES-128?
  956. je 192f // AES-192?
  957. // AES-256
  958. vaesenc -13*16(%rax), %xmm0, %xmm0
  959. vaesenc -12*16(%rax), %xmm0, %xmm0
  960. 192:
  961. vaesenc -11*16(%rax), %xmm0, %xmm0
  962. vaesenc -10*16(%rax), %xmm0, %xmm0
  963. 128:
  964. .irp i, 0,1,2,3,4,5,6,7,8
  965. _ghash_mul_step \i, H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \
  966. %xmm1, %xmm2, %xmm3
  967. vaesenc (\i-9)*16(%rax), %xmm0, %xmm0
  968. .endr
  969. _ghash_mul_step 9, H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \
  970. %xmm1, %xmm2, %xmm3
  971. // Undo the byte reflection of the GHASH accumulator.
  972. vpshufb BSWAP_MASK, GHASH_ACC, GHASH_ACC
  973. // Do the last AES round and XOR the resulting keystream block with the
  974. // GHASH accumulator to produce the full computed authentication tag.
  975. //
  976. // Reduce latency by taking advantage of the property vaesenclast(key,
  977. // a) ^ b == vaesenclast(key ^ b, a). I.e., XOR GHASH_ACC into the last
  978. // round key, instead of XOR'ing the final AES output with GHASH_ACC.
  979. //
  980. // enc_final then returns the computed auth tag, while dec_final
  981. // compares it with the transmitted one and returns a bool. To compare
  982. // the tags, dec_final XORs them together and uses vptest to check
  983. // whether the result is all-zeroes. This should be constant-time.
  984. // dec_final applies the vaesenclast optimization to this additional
  985. // value XOR'd too, using vpternlogd to XOR the last round key, GHASH
  986. // accumulator, and transmitted auth tag together in one instruction.
  987. .if \enc
  988. vpxor (%rax), GHASH_ACC, %xmm1
  989. vaesenclast %xmm1, %xmm0, GHASH_ACC
  990. vmovdqu GHASH_ACC, (GHASH_ACC_PTR)
  991. .else
  992. vmovdqu (TAG), %xmm1
  993. vpternlogd $0x96, (%rax), GHASH_ACC, %xmm1
  994. vaesenclast %xmm1, %xmm0, %xmm0
  995. xor %eax, %eax
  996. vmovdqu8 %xmm0, %xmm0{%k1}{z} // Truncate to TAGLEN bytes
  997. vptest %xmm0, %xmm0
  998. sete %al
  999. .endif
  1000. // No need for vzeroupper here, since only used xmm registers were used.
  1001. RET
  1002. .endm
  1003. _set_veclen 32
  1004. SYM_FUNC_START(aes_gcm_precompute_vaes_avx10_256)
  1005. _aes_gcm_precompute
  1006. SYM_FUNC_END(aes_gcm_precompute_vaes_avx10_256)
  1007. SYM_FUNC_START(aes_gcm_enc_update_vaes_avx10_256)
  1008. _aes_gcm_update 1
  1009. SYM_FUNC_END(aes_gcm_enc_update_vaes_avx10_256)
  1010. SYM_FUNC_START(aes_gcm_dec_update_vaes_avx10_256)
  1011. _aes_gcm_update 0
  1012. SYM_FUNC_END(aes_gcm_dec_update_vaes_avx10_256)
  1013. _set_veclen 64
  1014. SYM_FUNC_START(aes_gcm_precompute_vaes_avx10_512)
  1015. _aes_gcm_precompute
  1016. SYM_FUNC_END(aes_gcm_precompute_vaes_avx10_512)
  1017. SYM_FUNC_START(aes_gcm_enc_update_vaes_avx10_512)
  1018. _aes_gcm_update 1
  1019. SYM_FUNC_END(aes_gcm_enc_update_vaes_avx10_512)
  1020. SYM_FUNC_START(aes_gcm_dec_update_vaes_avx10_512)
  1021. _aes_gcm_update 0
  1022. SYM_FUNC_END(aes_gcm_dec_update_vaes_avx10_512)
  1023. // void aes_gcm_aad_update_vaes_avx10(const struct aes_gcm_key_avx10 *key,
  1024. // u8 ghash_acc[16],
  1025. // const u8 *aad, int aadlen);
  1026. //
  1027. // This function processes the AAD (Additional Authenticated Data) in GCM.
  1028. // Using the key |key|, it updates the GHASH accumulator |ghash_acc| with the
  1029. // data given by |aad| and |aadlen|. |key->ghash_key_powers| must have been
  1030. // initialized. On the first call, |ghash_acc| must be all zeroes. |aadlen|
  1031. // must be a multiple of 16, except on the last call where it can be any length.
  1032. // The caller must do any buffering needed to ensure this.
  1033. //
  1034. // AES-GCM is almost always used with small amounts of AAD, less than 32 bytes.
  1035. // Therefore, for AAD processing we currently only provide this implementation
  1036. // which uses 256-bit vectors (ymm registers) and only has a 1x-wide loop. This
  1037. // keeps the code size down, and it enables some micro-optimizations, e.g. using
  1038. // VEX-coded instructions instead of EVEX-coded to save some instruction bytes.
  1039. // To optimize for large amounts of AAD, we could implement a 4x-wide loop and
  1040. // provide a version using 512-bit vectors, but that doesn't seem to be useful.
  1041. SYM_FUNC_START(aes_gcm_aad_update_vaes_avx10)
  1042. // Function arguments
  1043. .set KEY, %rdi
  1044. .set GHASH_ACC_PTR, %rsi
  1045. .set AAD, %rdx
  1046. .set AADLEN, %ecx
  1047. .set AADLEN64, %rcx // Zero-extend AADLEN before using!
  1048. // Additional local variables.
  1049. // %rax, %ymm0-%ymm3, and %k1 are used as temporary registers.
  1050. .set BSWAP_MASK, %ymm4
  1051. .set GFPOLY, %ymm5
  1052. .set GHASH_ACC, %ymm6
  1053. .set GHASH_ACC_XMM, %xmm6
  1054. .set H_POW1, %ymm7
  1055. // Load some constants.
  1056. vbroadcasti128 .Lbswap_mask(%rip), BSWAP_MASK
  1057. vbroadcasti128 .Lgfpoly(%rip), GFPOLY
  1058. // Load the GHASH accumulator.
  1059. vmovdqu (GHASH_ACC_PTR), GHASH_ACC_XMM
  1060. // Update GHASH with 32 bytes of AAD at a time.
  1061. //
  1062. // Pre-subtracting 32 from AADLEN saves an instruction from the loop and
  1063. // also ensures that at least one write always occurs to AADLEN,
  1064. // zero-extending it and allowing AADLEN64 to be used later.
  1065. sub $32, AADLEN
  1066. jl .Laad_loop_1x_done
  1067. vmovdqu8 OFFSETOFEND_H_POWERS-32(KEY), H_POW1 // [H^2, H^1]
  1068. .Laad_loop_1x:
  1069. vmovdqu (AAD), %ymm0
  1070. vpshufb BSWAP_MASK, %ymm0, %ymm0
  1071. vpxor %ymm0, GHASH_ACC, GHASH_ACC
  1072. _ghash_mul H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \
  1073. %ymm0, %ymm1, %ymm2
  1074. vextracti128 $1, GHASH_ACC, %xmm0
  1075. vpxor %xmm0, GHASH_ACC_XMM, GHASH_ACC_XMM
  1076. add $32, AAD
  1077. sub $32, AADLEN
  1078. jge .Laad_loop_1x
  1079. .Laad_loop_1x_done:
  1080. add $32, AADLEN
  1081. jz .Laad_done
  1082. // Update GHASH with the remaining 1 <= AADLEN < 32 bytes of AAD.
  1083. mov $-1, %eax
  1084. bzhi AADLEN, %eax, %eax
  1085. kmovd %eax, %k1
  1086. vmovdqu8 (AAD), %ymm0{%k1}{z}
  1087. neg AADLEN64
  1088. and $~15, AADLEN64 // -round_up(AADLEN, 16)
  1089. vmovdqu8 OFFSETOFEND_H_POWERS(KEY,AADLEN64), H_POW1
  1090. vpshufb BSWAP_MASK, %ymm0, %ymm0
  1091. vpxor %ymm0, GHASH_ACC, GHASH_ACC
  1092. _ghash_mul H_POW1, GHASH_ACC, GHASH_ACC, GFPOLY, \
  1093. %ymm0, %ymm1, %ymm2
  1094. vextracti128 $1, GHASH_ACC, %xmm0
  1095. vpxor %xmm0, GHASH_ACC_XMM, GHASH_ACC_XMM
  1096. .Laad_done:
  1097. // Store the updated GHASH accumulator back to memory.
  1098. vmovdqu GHASH_ACC_XMM, (GHASH_ACC_PTR)
  1099. vzeroupper // This is needed after using ymm or zmm registers.
  1100. RET
  1101. SYM_FUNC_END(aes_gcm_aad_update_vaes_avx10)
  1102. SYM_FUNC_START(aes_gcm_enc_final_vaes_avx10)
  1103. _aes_gcm_final 1
  1104. SYM_FUNC_END(aes_gcm_enc_final_vaes_avx10)
  1105. SYM_FUNC_START(aes_gcm_dec_final_vaes_avx10)
  1106. _aes_gcm_final 0
  1107. SYM_FUNC_END(aes_gcm_dec_final_vaes_avx10)