lockref.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/export.h>
  3. #include <linux/lockref.h>
  4. #if USE_CMPXCHG_LOCKREF
  5. /*
  6. * Note that the "cmpxchg()" reloads the "old" value for the
  7. * failure case.
  8. */
  9. #define CMPXCHG_LOOP(CODE, SUCCESS) do { \
  10. struct lockref old; \
  11. BUILD_BUG_ON(sizeof(old) != 8); \
  12. old.lock_count = READ_ONCE(lockref->lock_count); \
  13. while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
  14. struct lockref new = old, prev = old; \
  15. CODE \
  16. old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \
  17. old.lock_count, \
  18. new.lock_count); \
  19. if (likely(old.lock_count == prev.lock_count)) { \
  20. SUCCESS; \
  21. } \
  22. cpu_relax(); \
  23. } \
  24. } while (0)
  25. #else
  26. #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
  27. #endif
  28. /**
  29. * lockref_get - Increments reference count unconditionally
  30. * @lockref: pointer to lockref structure
  31. *
  32. * This operation is only valid if you already hold a reference
  33. * to the object, so you know the count cannot be zero.
  34. */
  35. void lockref_get(struct lockref *lockref)
  36. {
  37. CMPXCHG_LOOP(
  38. new.count++;
  39. ,
  40. return;
  41. );
  42. spin_lock(&lockref->lock);
  43. lockref->count++;
  44. spin_unlock(&lockref->lock);
  45. }
  46. EXPORT_SYMBOL(lockref_get);
  47. /**
  48. * lockref_get_not_zero - Increments count unless the count is 0 or dead
  49. * @lockref: pointer to lockref structure
  50. * Return: 1 if count updated successfully or 0 if count was zero
  51. */
  52. int lockref_get_not_zero(struct lockref *lockref)
  53. {
  54. int retval;
  55. CMPXCHG_LOOP(
  56. new.count++;
  57. if (old.count <= 0)
  58. return 0;
  59. ,
  60. return 1;
  61. );
  62. spin_lock(&lockref->lock);
  63. retval = 0;
  64. if (lockref->count > 0) {
  65. lockref->count++;
  66. retval = 1;
  67. }
  68. spin_unlock(&lockref->lock);
  69. return retval;
  70. }
  71. EXPORT_SYMBOL(lockref_get_not_zero);
  72. /**
  73. * lockref_put_not_zero - Decrements count unless count <= 1 before decrement
  74. * @lockref: pointer to lockref structure
  75. * Return: 1 if count updated successfully or 0 if count would become zero
  76. */
  77. int lockref_put_not_zero(struct lockref *lockref)
  78. {
  79. int retval;
  80. CMPXCHG_LOOP(
  81. new.count--;
  82. if (old.count <= 1)
  83. return 0;
  84. ,
  85. return 1;
  86. );
  87. spin_lock(&lockref->lock);
  88. retval = 0;
  89. if (lockref->count > 1) {
  90. lockref->count--;
  91. retval = 1;
  92. }
  93. spin_unlock(&lockref->lock);
  94. return retval;
  95. }
  96. EXPORT_SYMBOL(lockref_put_not_zero);
  97. /**
  98. * lockref_get_or_lock - Increments count unless the count is 0 or dead
  99. * @lockref: pointer to lockref structure
  100. * Return: 1 if count updated successfully or 0 if count was zero
  101. * and we got the lock instead.
  102. */
  103. int lockref_get_or_lock(struct lockref *lockref)
  104. {
  105. CMPXCHG_LOOP(
  106. new.count++;
  107. if (old.count <= 0)
  108. break;
  109. ,
  110. return 1;
  111. );
  112. spin_lock(&lockref->lock);
  113. if (lockref->count <= 0)
  114. return 0;
  115. lockref->count++;
  116. spin_unlock(&lockref->lock);
  117. return 1;
  118. }
  119. EXPORT_SYMBOL(lockref_get_or_lock);
  120. /**
  121. * lockref_put_return - Decrement reference count if possible
  122. * @lockref: pointer to lockref structure
  123. *
  124. * Decrement the reference count and return the new value.
  125. * If the lockref was dead or locked, return an error.
  126. */
  127. int lockref_put_return(struct lockref *lockref)
  128. {
  129. CMPXCHG_LOOP(
  130. new.count--;
  131. if (old.count <= 0)
  132. return -1;
  133. ,
  134. return new.count;
  135. );
  136. return -1;
  137. }
  138. EXPORT_SYMBOL(lockref_put_return);
  139. /**
  140. * lockref_put_or_lock - decrements count unless count <= 1 before decrement
  141. * @lockref: pointer to lockref structure
  142. * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
  143. */
  144. int lockref_put_or_lock(struct lockref *lockref)
  145. {
  146. CMPXCHG_LOOP(
  147. new.count--;
  148. if (old.count <= 1)
  149. break;
  150. ,
  151. return 1;
  152. );
  153. spin_lock(&lockref->lock);
  154. if (lockref->count <= 1)
  155. return 0;
  156. lockref->count--;
  157. spin_unlock(&lockref->lock);
  158. return 1;
  159. }
  160. EXPORT_SYMBOL(lockref_put_or_lock);
  161. /**
  162. * lockref_mark_dead - mark lockref dead
  163. * @lockref: pointer to lockref structure
  164. */
  165. void lockref_mark_dead(struct lockref *lockref)
  166. {
  167. assert_spin_locked(&lockref->lock);
  168. lockref->count = -128;
  169. }
  170. EXPORT_SYMBOL(lockref_mark_dead);
  171. /**
  172. * lockref_get_not_dead - Increments count unless the ref is dead
  173. * @lockref: pointer to lockref structure
  174. * Return: 1 if count updated successfully or 0 if lockref was dead
  175. */
  176. int lockref_get_not_dead(struct lockref *lockref)
  177. {
  178. int retval;
  179. CMPXCHG_LOOP(
  180. new.count++;
  181. if (old.count < 0)
  182. return 0;
  183. ,
  184. return 1;
  185. );
  186. spin_lock(&lockref->lock);
  187. retval = 0;
  188. if (lockref->count >= 0) {
  189. lockref->count++;
  190. retval = 1;
  191. }
  192. spin_unlock(&lockref->lock);
  193. return retval;
  194. }
  195. EXPORT_SYMBOL(lockref_get_not_dead);