sm3-riscv64-zvksh-zvkb.S 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. /* SPDX-License-Identifier: Apache-2.0 OR BSD-2-Clause */
  2. //
  3. // This file is dual-licensed, meaning that you can use it under your
  4. // choice of either of the following two licenses:
  5. //
  6. // Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
  7. //
  8. // Licensed under the Apache License 2.0 (the "License"). You can obtain
  9. // a copy in the file LICENSE in the source distribution or at
  10. // https://www.openssl.org/source/license.html
  11. //
  12. // or
  13. //
  14. // Copyright (c) 2023, Christoph Müllner <christoph.muellner@vrull.eu>
  15. // Copyright (c) 2023, Jerry Shih <jerry.shih@sifive.com>
  16. // Copyright 2024 Google LLC
  17. // All rights reserved.
  18. //
  19. // Redistribution and use in source and binary forms, with or without
  20. // modification, are permitted provided that the following conditions
  21. // are met:
  22. // 1. Redistributions of source code must retain the above copyright
  23. // notice, this list of conditions and the following disclaimer.
  24. // 2. Redistributions in binary form must reproduce the above copyright
  25. // notice, this list of conditions and the following disclaimer in the
  26. // documentation and/or other materials provided with the distribution.
  27. //
  28. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  29. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  30. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  31. // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  32. // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  33. // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  34. // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  35. // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  36. // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  37. // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  38. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  39. // The generated code of this file depends on the following RISC-V extensions:
  40. // - RV64I
  41. // - RISC-V Vector ('V') with VLEN >= 128
  42. // - RISC-V Vector SM3 Secure Hash extension ('Zvksh')
  43. // - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
  44. #include <linux/cfi_types.h>
  45. .text
  46. .option arch, +zvksh, +zvkb
  47. #define STATEP a0
  48. #define DATA a1
  49. #define NUM_BLOCKS a2
  50. #define STATE v0 // LMUL=2
  51. #define PREV_STATE v2 // LMUL=2
  52. #define W0 v4 // LMUL=2
  53. #define W1 v6 // LMUL=2
  54. #define VTMP v8 // LMUL=2
  55. .macro sm3_8rounds i, w0, w1
  56. // Do 4 rounds using W_{0+i}..W_{7+i}.
  57. vsm3c.vi STATE, \w0, \i + 0
  58. vslidedown.vi VTMP, \w0, 2
  59. vsm3c.vi STATE, VTMP, \i + 1
  60. // Compute W_{4+i}..W_{11+i}.
  61. vslidedown.vi VTMP, \w0, 4
  62. vslideup.vi VTMP, \w1, 4
  63. // Do 4 rounds using W_{4+i}..W_{11+i}.
  64. vsm3c.vi STATE, VTMP, \i + 2
  65. vslidedown.vi VTMP, VTMP, 2
  66. vsm3c.vi STATE, VTMP, \i + 3
  67. .if \i < 28
  68. // Compute W_{16+i}..W_{23+i}.
  69. vsm3me.vv \w0, \w1, \w0
  70. .endif
  71. // For the next 8 rounds, w0 and w1 are swapped.
  72. .endm
  73. // void sm3_transform_zvksh_zvkb(u32 state[8], const u8 *data, int num_blocks);
  74. SYM_TYPED_FUNC_START(sm3_transform_zvksh_zvkb)
  75. // Load the state and endian-swap each 32-bit word.
  76. vsetivli zero, 8, e32, m2, ta, ma
  77. vle32.v STATE, (STATEP)
  78. vrev8.v STATE, STATE
  79. .Lnext_block:
  80. addi NUM_BLOCKS, NUM_BLOCKS, -1
  81. // Save the previous state, as it's needed later.
  82. vmv.v.v PREV_STATE, STATE
  83. // Load the next 512-bit message block into W0-W1.
  84. vle32.v W0, (DATA)
  85. addi DATA, DATA, 32
  86. vle32.v W1, (DATA)
  87. addi DATA, DATA, 32
  88. // Do the 64 rounds of SM3.
  89. sm3_8rounds 0, W0, W1
  90. sm3_8rounds 4, W1, W0
  91. sm3_8rounds 8, W0, W1
  92. sm3_8rounds 12, W1, W0
  93. sm3_8rounds 16, W0, W1
  94. sm3_8rounds 20, W1, W0
  95. sm3_8rounds 24, W0, W1
  96. sm3_8rounds 28, W1, W0
  97. // XOR in the previous state.
  98. vxor.vv STATE, STATE, PREV_STATE
  99. // Repeat if more blocks remain.
  100. bnez NUM_BLOCKS, .Lnext_block
  101. // Store the new state and return.
  102. vrev8.v STATE, STATE
  103. vse32.v STATE, (STATEP)
  104. ret
  105. SYM_FUNC_END(sm3_transform_zvksh_zvkb)