rseq.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. /* SPDX-License-Identifier: LGPL-2.1 OR MIT */
  2. /*
  3. * rseq.h
  4. *
  5. * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  6. */
  7. #ifndef RSEQ_H
  8. #define RSEQ_H
  9. #include <stdint.h>
  10. #include <stdbool.h>
  11. #include <pthread.h>
  12. #include <signal.h>
  13. #include <sched.h>
  14. #include <errno.h>
  15. #include <stdio.h>
  16. #include <stdlib.h>
  17. #include <sched.h>
  18. #include <linux/rseq.h>
  19. /*
  20. * Empty code injection macros, override when testing.
  21. * It is important to consider that the ASM injection macros need to be
  22. * fully reentrant (e.g. do not modify the stack).
  23. */
  24. #ifndef RSEQ_INJECT_ASM
  25. #define RSEQ_INJECT_ASM(n)
  26. #endif
  27. #ifndef RSEQ_INJECT_C
  28. #define RSEQ_INJECT_C(n)
  29. #endif
  30. #ifndef RSEQ_INJECT_INPUT
  31. #define RSEQ_INJECT_INPUT
  32. #endif
  33. #ifndef RSEQ_INJECT_CLOBBER
  34. #define RSEQ_INJECT_CLOBBER
  35. #endif
  36. #ifndef RSEQ_INJECT_FAILED
  37. #define RSEQ_INJECT_FAILED
  38. #endif
  39. extern __thread volatile struct rseq __rseq_abi;
  40. #define rseq_likely(x) __builtin_expect(!!(x), 1)
  41. #define rseq_unlikely(x) __builtin_expect(!!(x), 0)
  42. #define rseq_barrier() __asm__ __volatile__("" : : : "memory")
  43. #define RSEQ_ACCESS_ONCE(x) (*(__volatile__ __typeof__(x) *)&(x))
  44. #define RSEQ_WRITE_ONCE(x, v) __extension__ ({ RSEQ_ACCESS_ONCE(x) = (v); })
  45. #define RSEQ_READ_ONCE(x) RSEQ_ACCESS_ONCE(x)
  46. #define __rseq_str_1(x) #x
  47. #define __rseq_str(x) __rseq_str_1(x)
  48. #define rseq_log(fmt, args...) \
  49. fprintf(stderr, fmt "(in %s() at " __FILE__ ":" __rseq_str(__LINE__)"\n", \
  50. ## args, __func__)
  51. #define rseq_bug(fmt, args...) \
  52. do { \
  53. rseq_log(fmt, ##args); \
  54. abort(); \
  55. } while (0)
  56. #if defined(__x86_64__) || defined(__i386__)
  57. #include <rseq-x86.h>
  58. #elif defined(__ARMEL__)
  59. #include <rseq-arm.h>
  60. #elif defined (__AARCH64EL__)
  61. #include <rseq-arm64.h>
  62. #elif defined(__PPC__)
  63. #include <rseq-ppc.h>
  64. #elif defined(__mips__)
  65. #include <rseq-mips.h>
  66. #elif defined(__s390__)
  67. #include <rseq-s390.h>
  68. #else
  69. #error unsupported target
  70. #endif
  71. /*
  72. * Register rseq for the current thread. This needs to be called once
  73. * by any thread which uses restartable sequences, before they start
  74. * using restartable sequences, to ensure restartable sequences
  75. * succeed. A restartable sequence executed from a non-registered
  76. * thread will always fail.
  77. */
  78. int rseq_register_current_thread(void);
  79. /*
  80. * Unregister rseq for current thread.
  81. */
  82. int rseq_unregister_current_thread(void);
  83. /*
  84. * Restartable sequence fallback for reading the current CPU number.
  85. */
  86. int32_t rseq_fallback_current_cpu(void);
  87. /*
  88. * Values returned can be either the current CPU number, -1 (rseq is
  89. * uninitialized), or -2 (rseq initialization has failed).
  90. */
  91. static inline int32_t rseq_current_cpu_raw(void)
  92. {
  93. return RSEQ_ACCESS_ONCE(__rseq_abi.cpu_id);
  94. }
  95. /*
  96. * Returns a possible CPU number, which is typically the current CPU.
  97. * The returned CPU number can be used to prepare for an rseq critical
  98. * section, which will confirm whether the cpu number is indeed the
  99. * current one, and whether rseq is initialized.
  100. *
  101. * The CPU number returned by rseq_cpu_start should always be validated
  102. * by passing it to a rseq asm sequence, or by comparing it to the
  103. * return value of rseq_current_cpu_raw() if the rseq asm sequence
  104. * does not need to be invoked.
  105. */
  106. static inline uint32_t rseq_cpu_start(void)
  107. {
  108. return RSEQ_ACCESS_ONCE(__rseq_abi.cpu_id_start);
  109. }
  110. static inline uint32_t rseq_current_cpu(void)
  111. {
  112. int32_t cpu;
  113. cpu = rseq_current_cpu_raw();
  114. if (rseq_unlikely(cpu < 0))
  115. cpu = rseq_fallback_current_cpu();
  116. return cpu;
  117. }
  118. static inline void rseq_clear_rseq_cs(void)
  119. {
  120. #ifdef __LP64__
  121. __rseq_abi.rseq_cs.ptr = 0;
  122. #else
  123. __rseq_abi.rseq_cs.ptr.ptr32 = 0;
  124. #endif
  125. }
  126. /*
  127. * rseq_prepare_unload() should be invoked by each thread executing a rseq
  128. * critical section at least once between their last critical section and
  129. * library unload of the library defining the rseq critical section
  130. * (struct rseq_cs). This also applies to use of rseq in code generated by
  131. * JIT: rseq_prepare_unload() should be invoked at least once by each
  132. * thread executing a rseq critical section before reclaim of the memory
  133. * holding the struct rseq_cs.
  134. */
  135. static inline void rseq_prepare_unload(void)
  136. {
  137. rseq_clear_rseq_cs();
  138. }
  139. #endif /* RSEQ_H_ */