archrandom.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _ASM_ARCHRANDOM_H
  3. #define _ASM_ARCHRANDOM_H
  4. #include <linux/arm-smccc.h>
  5. #include <linux/bug.h>
  6. #include <linux/kernel.h>
  7. #include <linux/irqflags.h>
  8. #include <asm/cpufeature.h>
  9. #define ARM_SMCCC_TRNG_MIN_VERSION 0x10000UL
  10. extern bool smccc_trng_available;
  11. static inline bool __init smccc_probe_trng(void)
  12. {
  13. struct arm_smccc_res res;
  14. arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_VERSION, &res);
  15. if ((s32)res.a0 < 0)
  16. return false;
  17. return res.a0 >= ARM_SMCCC_TRNG_MIN_VERSION;
  18. }
  19. static inline bool __arm64_rndr(unsigned long *v)
  20. {
  21. bool ok;
  22. /*
  23. * Reads of RNDR set PSTATE.NZCV to 0b0000 on success,
  24. * and set PSTATE.NZCV to 0b0100 otherwise.
  25. */
  26. asm volatile(
  27. __mrs_s("%0", SYS_RNDR_EL0) "\n"
  28. " cset %w1, ne\n"
  29. : "=r" (*v), "=r" (ok)
  30. :
  31. : "cc");
  32. return ok;
  33. }
  34. static inline bool __arm64_rndrrs(unsigned long *v)
  35. {
  36. bool ok;
  37. /*
  38. * Reads of RNDRRS set PSTATE.NZCV to 0b0000 on success,
  39. * and set PSTATE.NZCV to 0b0100 otherwise.
  40. */
  41. asm volatile(
  42. __mrs_s("%0", SYS_RNDRRS_EL0) "\n"
  43. " cset %w1, ne\n"
  44. : "=r" (*v), "=r" (ok)
  45. :
  46. : "cc");
  47. return ok;
  48. }
  49. static __always_inline bool __cpu_has_rng(void)
  50. {
  51. if (unlikely(!system_capabilities_finalized() && !preemptible()))
  52. return this_cpu_has_cap(ARM64_HAS_RNG);
  53. return alternative_has_cap_unlikely(ARM64_HAS_RNG);
  54. }
  55. static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs)
  56. {
  57. /*
  58. * Only support the generic interface after we have detected
  59. * the system wide capability, avoiding complexity with the
  60. * cpufeature code and with potential scheduling between CPUs
  61. * with and without the feature.
  62. */
  63. if (max_longs && __cpu_has_rng() && __arm64_rndr(v))
  64. return 1;
  65. return 0;
  66. }
  67. static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs)
  68. {
  69. if (!max_longs)
  70. return 0;
  71. /*
  72. * We prefer the SMCCC call, since its semantics (return actual
  73. * hardware backed entropy) is closer to the idea behind this
  74. * function here than what even the RNDRSS register provides
  75. * (the output of a pseudo RNG freshly seeded by a TRNG).
  76. */
  77. if (smccc_trng_available) {
  78. struct arm_smccc_res res;
  79. max_longs = min_t(size_t, 3, max_longs);
  80. arm_smccc_1_1_invoke(ARM_SMCCC_TRNG_RND64, max_longs * 64, &res);
  81. if ((int)res.a0 >= 0) {
  82. switch (max_longs) {
  83. case 3:
  84. *v++ = res.a1;
  85. fallthrough;
  86. case 2:
  87. *v++ = res.a2;
  88. fallthrough;
  89. case 1:
  90. *v++ = res.a3;
  91. break;
  92. }
  93. return max_longs;
  94. }
  95. }
  96. /*
  97. * RNDRRS is not backed by an entropy source but by a DRBG that is
  98. * reseeded after each invocation. This is not a 100% fit but good
  99. * enough to implement this API if no other entropy source exists.
  100. */
  101. if (__cpu_has_rng() && __arm64_rndrrs(v))
  102. return 1;
  103. return 0;
  104. }
  105. static inline bool __init __early_cpu_has_rndr(void)
  106. {
  107. /* Open code as we run prior to the first call to cpufeature. */
  108. unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1);
  109. return (ftr >> ID_AA64ISAR0_EL1_RNDR_SHIFT) & 0xf;
  110. }
  111. #endif /* _ASM_ARCHRANDOM_H */