spinlock.h 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199
  1. /*
  2. * include/asm-xtensa/spinlock.h
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Copyright (C) 2001 - 2005 Tensilica Inc.
  9. */
  10. #ifndef _XTENSA_SPINLOCK_H
  11. #define _XTENSA_SPINLOCK_H
  12. #include <asm/barrier.h>
  13. #include <asm/processor.h>
  14. /*
  15. * spinlock
  16. *
  17. * There is at most one owner of a spinlock. There are not different
  18. * types of spinlock owners like there are for rwlocks (see below).
  19. *
  20. * When trying to obtain a spinlock, the function "spins" forever, or busy-
  21. * waits, until the lock is obtained. When spinning, presumably some other
  22. * owner will soon give up the spinlock making it available to others. Use
  23. * the trylock functions to avoid spinning forever.
  24. *
  25. * possible values:
  26. *
  27. * 0 nobody owns the spinlock
  28. * 1 somebody owns the spinlock
  29. */
  30. #define arch_spin_is_locked(x) ((x)->slock != 0)
  31. static inline void arch_spin_lock(arch_spinlock_t *lock)
  32. {
  33. unsigned long tmp;
  34. __asm__ __volatile__(
  35. " movi %0, 0\n"
  36. " wsr %0, scompare1\n"
  37. "1: movi %0, 1\n"
  38. " s32c1i %0, %1, 0\n"
  39. " bnez %0, 1b\n"
  40. : "=&a" (tmp)
  41. : "a" (&lock->slock)
  42. : "memory");
  43. }
  44. /* Returns 1 if the lock is obtained, 0 otherwise. */
  45. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  46. {
  47. unsigned long tmp;
  48. __asm__ __volatile__(
  49. " movi %0, 0\n"
  50. " wsr %0, scompare1\n"
  51. " movi %0, 1\n"
  52. " s32c1i %0, %1, 0\n"
  53. : "=&a" (tmp)
  54. : "a" (&lock->slock)
  55. : "memory");
  56. return tmp == 0 ? 1 : 0;
  57. }
  58. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  59. {
  60. unsigned long tmp;
  61. __asm__ __volatile__(
  62. " movi %0, 0\n"
  63. " s32ri %0, %1, 0\n"
  64. : "=&a" (tmp)
  65. : "a" (&lock->slock)
  66. : "memory");
  67. }
  68. /*
  69. * rwlock
  70. *
  71. * Read-write locks are really a more flexible spinlock. They allow
  72. * multiple readers but only one writer. Write ownership is exclusive
  73. * (i.e., all other readers and writers are blocked from ownership while
  74. * there is a write owner). These rwlocks are unfair to writers. Writers
  75. * can be starved for an indefinite time by readers.
  76. *
  77. * possible values:
  78. *
  79. * 0 nobody owns the rwlock
  80. * >0 one or more readers own the rwlock
  81. * (the positive value is the actual number of readers)
  82. * 0x80000000 one writer owns the rwlock, no other writers, no readers
  83. */
  84. static inline void arch_write_lock(arch_rwlock_t *rw)
  85. {
  86. unsigned long tmp;
  87. __asm__ __volatile__(
  88. " movi %0, 0\n"
  89. " wsr %0, scompare1\n"
  90. "1: movi %0, 1\n"
  91. " slli %0, %0, 31\n"
  92. " s32c1i %0, %1, 0\n"
  93. " bnez %0, 1b\n"
  94. : "=&a" (tmp)
  95. : "a" (&rw->lock)
  96. : "memory");
  97. }
  98. /* Returns 1 if the lock is obtained, 0 otherwise. */
  99. static inline int arch_write_trylock(arch_rwlock_t *rw)
  100. {
  101. unsigned long tmp;
  102. __asm__ __volatile__(
  103. " movi %0, 0\n"
  104. " wsr %0, scompare1\n"
  105. " movi %0, 1\n"
  106. " slli %0, %0, 31\n"
  107. " s32c1i %0, %1, 0\n"
  108. : "=&a" (tmp)
  109. : "a" (&rw->lock)
  110. : "memory");
  111. return tmp == 0 ? 1 : 0;
  112. }
  113. static inline void arch_write_unlock(arch_rwlock_t *rw)
  114. {
  115. unsigned long tmp;
  116. __asm__ __volatile__(
  117. " movi %0, 0\n"
  118. " s32ri %0, %1, 0\n"
  119. : "=&a" (tmp)
  120. : "a" (&rw->lock)
  121. : "memory");
  122. }
  123. static inline void arch_read_lock(arch_rwlock_t *rw)
  124. {
  125. unsigned long tmp;
  126. unsigned long result;
  127. __asm__ __volatile__(
  128. "1: l32i %1, %2, 0\n"
  129. " bltz %1, 1b\n"
  130. " wsr %1, scompare1\n"
  131. " addi %0, %1, 1\n"
  132. " s32c1i %0, %2, 0\n"
  133. " bne %0, %1, 1b\n"
  134. : "=&a" (result), "=&a" (tmp)
  135. : "a" (&rw->lock)
  136. : "memory");
  137. }
  138. /* Returns 1 if the lock is obtained, 0 otherwise. */
  139. static inline int arch_read_trylock(arch_rwlock_t *rw)
  140. {
  141. unsigned long result;
  142. unsigned long tmp;
  143. __asm__ __volatile__(
  144. " l32i %1, %2, 0\n"
  145. " addi %0, %1, 1\n"
  146. " bltz %0, 1f\n"
  147. " wsr %1, scompare1\n"
  148. " s32c1i %0, %2, 0\n"
  149. " sub %0, %0, %1\n"
  150. "1:\n"
  151. : "=&a" (result), "=&a" (tmp)
  152. : "a" (&rw->lock)
  153. : "memory");
  154. return result == 0;
  155. }
  156. static inline void arch_read_unlock(arch_rwlock_t *rw)
  157. {
  158. unsigned long tmp1, tmp2;
  159. __asm__ __volatile__(
  160. "1: l32i %1, %2, 0\n"
  161. " addi %0, %1, -1\n"
  162. " wsr %1, scompare1\n"
  163. " s32c1i %0, %2, 0\n"
  164. " bne %0, %1, 1b\n"
  165. : "=&a" (tmp1), "=&a" (tmp2)
  166. : "a" (&rw->lock)
  167. : "memory");
  168. }
  169. #endif /* _XTENSA_SPINLOCK_H */