spinlock_rt.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * PREEMPT_RT substitution for spin/rw_locks
  4. *
  5. * spinlocks and rwlocks on RT are based on rtmutexes, with a few twists to
  6. * resemble the non RT semantics:
  7. *
  8. * - Contrary to plain rtmutexes, spinlocks and rwlocks are state
  9. * preserving. The task state is saved before blocking on the underlying
  10. * rtmutex, and restored when the lock has been acquired. Regular wakeups
  11. * during that time are redirected to the saved state so no wake up is
  12. * missed.
  13. *
  14. * - Non RT spin/rwlocks disable preemption and eventually interrupts.
  15. * Disabling preemption has the side effect of disabling migration and
  16. * preventing RCU grace periods.
  17. *
  18. * The RT substitutions explicitly disable migration and take
  19. * rcu_read_lock() across the lock held section.
  20. */
  21. #include <linux/spinlock.h>
  22. #include <linux/export.h>
  23. #define RT_MUTEX_BUILD_SPINLOCKS
  24. #include "rtmutex.c"
  25. /*
  26. * __might_resched() skips the state check as rtlocks are state
  27. * preserving. Take RCU nesting into account as spin/read/write_lock() can
  28. * legitimately nest into an RCU read side critical section.
  29. */
  30. #define RTLOCK_RESCHED_OFFSETS \
  31. (rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT)
  32. #define rtlock_might_resched() \
  33. __might_resched(__FILE__, __LINE__, RTLOCK_RESCHED_OFFSETS)
  34. static __always_inline void rtlock_lock(struct rt_mutex_base *rtm)
  35. {
  36. lockdep_assert(!current->pi_blocked_on);
  37. if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
  38. rtlock_slowlock(rtm);
  39. }
  40. static __always_inline void __rt_spin_lock(spinlock_t *lock)
  41. {
  42. rtlock_might_resched();
  43. rtlock_lock(&lock->lock);
  44. rcu_read_lock();
  45. migrate_disable();
  46. }
  47. void __sched rt_spin_lock(spinlock_t *lock)
  48. {
  49. spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
  50. __rt_spin_lock(lock);
  51. }
  52. EXPORT_SYMBOL(rt_spin_lock);
  53. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  54. void __sched rt_spin_lock_nested(spinlock_t *lock, int subclass)
  55. {
  56. spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
  57. __rt_spin_lock(lock);
  58. }
  59. EXPORT_SYMBOL(rt_spin_lock_nested);
  60. void __sched rt_spin_lock_nest_lock(spinlock_t *lock,
  61. struct lockdep_map *nest_lock)
  62. {
  63. spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
  64. __rt_spin_lock(lock);
  65. }
  66. EXPORT_SYMBOL(rt_spin_lock_nest_lock);
  67. #endif
  68. void __sched rt_spin_unlock(spinlock_t *lock)
  69. {
  70. spin_release(&lock->dep_map, _RET_IP_);
  71. migrate_enable();
  72. rcu_read_unlock();
  73. if (unlikely(!rt_mutex_cmpxchg_release(&lock->lock, current, NULL)))
  74. rt_mutex_slowunlock(&lock->lock);
  75. }
  76. EXPORT_SYMBOL(rt_spin_unlock);
  77. /*
  78. * Wait for the lock to get unlocked: instead of polling for an unlock
  79. * (like raw spinlocks do), lock and unlock, to force the kernel to
  80. * schedule if there's contention:
  81. */
  82. void __sched rt_spin_lock_unlock(spinlock_t *lock)
  83. {
  84. spin_lock(lock);
  85. spin_unlock(lock);
  86. }
  87. EXPORT_SYMBOL(rt_spin_lock_unlock);
  88. static __always_inline int __rt_spin_trylock(spinlock_t *lock)
  89. {
  90. int ret = 1;
  91. if (unlikely(!rt_mutex_cmpxchg_acquire(&lock->lock, NULL, current)))
  92. ret = rt_mutex_slowtrylock(&lock->lock);
  93. if (ret) {
  94. spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  95. rcu_read_lock();
  96. migrate_disable();
  97. }
  98. return ret;
  99. }
  100. int __sched rt_spin_trylock(spinlock_t *lock)
  101. {
  102. return __rt_spin_trylock(lock);
  103. }
  104. EXPORT_SYMBOL(rt_spin_trylock);
  105. int __sched rt_spin_trylock_bh(spinlock_t *lock)
  106. {
  107. int ret;
  108. local_bh_disable();
  109. ret = __rt_spin_trylock(lock);
  110. if (!ret)
  111. local_bh_enable();
  112. return ret;
  113. }
  114. EXPORT_SYMBOL(rt_spin_trylock_bh);
  115. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  116. void __rt_spin_lock_init(spinlock_t *lock, const char *name,
  117. struct lock_class_key *key, bool percpu)
  118. {
  119. u8 type = percpu ? LD_LOCK_PERCPU : LD_LOCK_NORMAL;
  120. debug_check_no_locks_freed((void *)lock, sizeof(*lock));
  121. lockdep_init_map_type(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG,
  122. LD_WAIT_INV, type);
  123. }
  124. EXPORT_SYMBOL(__rt_spin_lock_init);
  125. #endif
  126. /*
  127. * RT-specific reader/writer locks
  128. */
  129. #define rwbase_set_and_save_current_state(state) \
  130. current_save_and_set_rtlock_wait_state()
  131. #define rwbase_restore_current_state() \
  132. current_restore_rtlock_saved_state()
  133. static __always_inline int
  134. rwbase_rtmutex_lock_state(struct rt_mutex_base *rtm, unsigned int state)
  135. {
  136. if (unlikely(!rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
  137. rtlock_slowlock(rtm);
  138. return 0;
  139. }
  140. static __always_inline int
  141. rwbase_rtmutex_slowlock_locked(struct rt_mutex_base *rtm, unsigned int state)
  142. {
  143. rtlock_slowlock_locked(rtm);
  144. return 0;
  145. }
  146. static __always_inline void rwbase_rtmutex_unlock(struct rt_mutex_base *rtm)
  147. {
  148. if (likely(rt_mutex_cmpxchg_acquire(rtm, current, NULL)))
  149. return;
  150. rt_mutex_slowunlock(rtm);
  151. }
  152. static __always_inline int rwbase_rtmutex_trylock(struct rt_mutex_base *rtm)
  153. {
  154. if (likely(rt_mutex_cmpxchg_acquire(rtm, NULL, current)))
  155. return 1;
  156. return rt_mutex_slowtrylock(rtm);
  157. }
  158. #define rwbase_signal_pending_state(state, current) (0)
  159. #define rwbase_pre_schedule()
  160. #define rwbase_schedule() \
  161. schedule_rtlock()
  162. #define rwbase_post_schedule()
  163. #include "rwbase_rt.c"
  164. /*
  165. * The common functions which get wrapped into the rwlock API.
  166. */
  167. int __sched rt_read_trylock(rwlock_t *rwlock)
  168. {
  169. int ret;
  170. ret = rwbase_read_trylock(&rwlock->rwbase);
  171. if (ret) {
  172. rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
  173. rcu_read_lock();
  174. migrate_disable();
  175. }
  176. return ret;
  177. }
  178. EXPORT_SYMBOL(rt_read_trylock);
  179. int __sched rt_write_trylock(rwlock_t *rwlock)
  180. {
  181. int ret;
  182. ret = rwbase_write_trylock(&rwlock->rwbase);
  183. if (ret) {
  184. rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
  185. rcu_read_lock();
  186. migrate_disable();
  187. }
  188. return ret;
  189. }
  190. EXPORT_SYMBOL(rt_write_trylock);
  191. void __sched rt_read_lock(rwlock_t *rwlock)
  192. {
  193. rtlock_might_resched();
  194. rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
  195. rwbase_read_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
  196. rcu_read_lock();
  197. migrate_disable();
  198. }
  199. EXPORT_SYMBOL(rt_read_lock);
  200. void __sched rt_write_lock(rwlock_t *rwlock)
  201. {
  202. rtlock_might_resched();
  203. rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
  204. rwbase_write_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
  205. rcu_read_lock();
  206. migrate_disable();
  207. }
  208. EXPORT_SYMBOL(rt_write_lock);
  209. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  210. void __sched rt_write_lock_nested(rwlock_t *rwlock, int subclass)
  211. {
  212. rtlock_might_resched();
  213. rwlock_acquire(&rwlock->dep_map, subclass, 0, _RET_IP_);
  214. rwbase_write_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
  215. rcu_read_lock();
  216. migrate_disable();
  217. }
  218. EXPORT_SYMBOL(rt_write_lock_nested);
  219. #endif
  220. void __sched rt_read_unlock(rwlock_t *rwlock)
  221. {
  222. rwlock_release(&rwlock->dep_map, _RET_IP_);
  223. migrate_enable();
  224. rcu_read_unlock();
  225. rwbase_read_unlock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
  226. }
  227. EXPORT_SYMBOL(rt_read_unlock);
  228. void __sched rt_write_unlock(rwlock_t *rwlock)
  229. {
  230. rwlock_release(&rwlock->dep_map, _RET_IP_);
  231. rcu_read_unlock();
  232. migrate_enable();
  233. rwbase_write_unlock(&rwlock->rwbase);
  234. }
  235. EXPORT_SYMBOL(rt_write_unlock);
  236. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  237. void __rt_rwlock_init(rwlock_t *rwlock, const char *name,
  238. struct lock_class_key *key)
  239. {
  240. debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
  241. lockdep_init_map_wait(&rwlock->dep_map, name, key, 0, LD_WAIT_CONFIG);
  242. }
  243. EXPORT_SYMBOL(__rt_rwlock_init);
  244. #endif