sm3-ce-core.S 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * sm3-ce-core.S - SM3 secure hash using ARMv8.2 Crypto Extensions
  4. *
  5. * Copyright (C) 2018 Linaro Ltd <ard.biesheuvel@linaro.org>
  6. */
  7. #include <linux/linkage.h>
  8. #include <linux/cfi_types.h>
  9. #include <asm/assembler.h>
  10. .irp b, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12
  11. .set .Lv\b\().4s, \b
  12. .endr
  13. .macro sm3partw1, rd, rn, rm
  14. .inst 0xce60c000 | .L\rd | (.L\rn << 5) | (.L\rm << 16)
  15. .endm
  16. .macro sm3partw2, rd, rn, rm
  17. .inst 0xce60c400 | .L\rd | (.L\rn << 5) | (.L\rm << 16)
  18. .endm
  19. .macro sm3ss1, rd, rn, rm, ra
  20. .inst 0xce400000 | .L\rd | (.L\rn << 5) | (.L\ra << 10) | (.L\rm << 16)
  21. .endm
  22. .macro sm3tt1a, rd, rn, rm, imm2
  23. .inst 0xce408000 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16)
  24. .endm
  25. .macro sm3tt1b, rd, rn, rm, imm2
  26. .inst 0xce408400 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16)
  27. .endm
  28. .macro sm3tt2a, rd, rn, rm, imm2
  29. .inst 0xce408800 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16)
  30. .endm
  31. .macro sm3tt2b, rd, rn, rm, imm2
  32. .inst 0xce408c00 | .L\rd | (.L\rn << 5) | ((\imm2) << 12) | (.L\rm << 16)
  33. .endm
  34. .macro round, ab, s0, t0, t1, i
  35. sm3ss1 v5.4s, v8.4s, \t0\().4s, v9.4s
  36. shl \t1\().4s, \t0\().4s, #1
  37. sri \t1\().4s, \t0\().4s, #31
  38. sm3tt1\ab v8.4s, v5.4s, v10.4s, \i
  39. sm3tt2\ab v9.4s, v5.4s, \s0\().4s, \i
  40. .endm
  41. .macro qround, ab, s0, s1, s2, s3, s4
  42. .ifnb \s4
  43. ext \s4\().16b, \s1\().16b, \s2\().16b, #12
  44. ext v6.16b, \s0\().16b, \s1\().16b, #12
  45. ext v7.16b, \s2\().16b, \s3\().16b, #8
  46. sm3partw1 \s4\().4s, \s0\().4s, \s3\().4s
  47. .endif
  48. eor v10.16b, \s0\().16b, \s1\().16b
  49. round \ab, \s0, v11, v12, 0
  50. round \ab, \s0, v12, v11, 1
  51. round \ab, \s0, v11, v12, 2
  52. round \ab, \s0, v12, v11, 3
  53. .ifnb \s4
  54. sm3partw2 \s4\().4s, v7.4s, v6.4s
  55. .endif
  56. .endm
  57. /*
  58. * void sm3_ce_transform(struct sm3_state *sst, u8 const *src,
  59. * int blocks)
  60. */
  61. .text
  62. SYM_TYPED_FUNC_START(sm3_ce_transform)
  63. /* load state */
  64. ld1 {v8.4s-v9.4s}, [x0]
  65. rev64 v8.4s, v8.4s
  66. rev64 v9.4s, v9.4s
  67. ext v8.16b, v8.16b, v8.16b, #8
  68. ext v9.16b, v9.16b, v9.16b, #8
  69. adr_l x8, .Lt
  70. ldp s13, s14, [x8]
  71. /* load input */
  72. 0: ld1 {v0.16b-v3.16b}, [x1], #64
  73. sub w2, w2, #1
  74. mov v15.16b, v8.16b
  75. mov v16.16b, v9.16b
  76. CPU_LE( rev32 v0.16b, v0.16b )
  77. CPU_LE( rev32 v1.16b, v1.16b )
  78. CPU_LE( rev32 v2.16b, v2.16b )
  79. CPU_LE( rev32 v3.16b, v3.16b )
  80. ext v11.16b, v13.16b, v13.16b, #4
  81. qround a, v0, v1, v2, v3, v4
  82. qround a, v1, v2, v3, v4, v0
  83. qround a, v2, v3, v4, v0, v1
  84. qround a, v3, v4, v0, v1, v2
  85. ext v11.16b, v14.16b, v14.16b, #4
  86. qround b, v4, v0, v1, v2, v3
  87. qround b, v0, v1, v2, v3, v4
  88. qround b, v1, v2, v3, v4, v0
  89. qround b, v2, v3, v4, v0, v1
  90. qround b, v3, v4, v0, v1, v2
  91. qround b, v4, v0, v1, v2, v3
  92. qround b, v0, v1, v2, v3, v4
  93. qround b, v1, v2, v3, v4, v0
  94. qround b, v2, v3, v4, v0, v1
  95. qround b, v3, v4
  96. qround b, v4, v0
  97. qround b, v0, v1
  98. eor v8.16b, v8.16b, v15.16b
  99. eor v9.16b, v9.16b, v16.16b
  100. /* handled all input blocks? */
  101. cbnz w2, 0b
  102. /* save state */
  103. rev64 v8.4s, v8.4s
  104. rev64 v9.4s, v9.4s
  105. ext v8.16b, v8.16b, v8.16b, #8
  106. ext v9.16b, v9.16b, v9.16b, #8
  107. st1 {v8.4s-v9.4s}, [x0]
  108. ret
  109. SYM_FUNC_END(sm3_ce_transform)
  110. .section ".rodata", "a"
  111. .align 3
  112. .Lt: .word 0x79cc4519, 0x9d8a7a87