locking.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2008 Oracle. All rights reserved.
  4. */
  5. #include <linux/sched.h>
  6. #include <linux/pagemap.h>
  7. #include <linux/spinlock.h>
  8. #include <linux/page-flags.h>
  9. #include <asm/bug.h>
  10. #include "ctree.h"
  11. #include "extent_io.h"
  12. #include "locking.h"
  13. static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
  14. /*
  15. * if we currently have a spinning reader or writer lock
  16. * (indicated by the rw flag) this will bump the count
  17. * of blocking holders and drop the spinlock.
  18. */
  19. void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
  20. {
  21. /*
  22. * no lock is required. The lock owner may change if
  23. * we have a read lock, but it won't change to or away
  24. * from us. If we have the write lock, we are the owner
  25. * and it'll never change.
  26. */
  27. if (eb->lock_nested && current->pid == eb->lock_owner)
  28. return;
  29. if (rw == BTRFS_WRITE_LOCK) {
  30. if (atomic_read(&eb->blocking_writers) == 0) {
  31. WARN_ON(atomic_read(&eb->spinning_writers) != 1);
  32. atomic_dec(&eb->spinning_writers);
  33. btrfs_assert_tree_locked(eb);
  34. atomic_inc(&eb->blocking_writers);
  35. write_unlock(&eb->lock);
  36. }
  37. } else if (rw == BTRFS_READ_LOCK) {
  38. btrfs_assert_tree_read_locked(eb);
  39. atomic_inc(&eb->blocking_readers);
  40. WARN_ON(atomic_read(&eb->spinning_readers) == 0);
  41. atomic_dec(&eb->spinning_readers);
  42. read_unlock(&eb->lock);
  43. }
  44. }
  45. /*
  46. * if we currently have a blocking lock, take the spinlock
  47. * and drop our blocking count
  48. */
  49. void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
  50. {
  51. /*
  52. * no lock is required. The lock owner may change if
  53. * we have a read lock, but it won't change to or away
  54. * from us. If we have the write lock, we are the owner
  55. * and it'll never change.
  56. */
  57. if (eb->lock_nested && current->pid == eb->lock_owner)
  58. return;
  59. if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
  60. BUG_ON(atomic_read(&eb->blocking_writers) != 1);
  61. write_lock(&eb->lock);
  62. WARN_ON(atomic_read(&eb->spinning_writers));
  63. atomic_inc(&eb->spinning_writers);
  64. /* atomic_dec_and_test implies a barrier */
  65. if (atomic_dec_and_test(&eb->blocking_writers))
  66. cond_wake_up_nomb(&eb->write_lock_wq);
  67. } else if (rw == BTRFS_READ_LOCK_BLOCKING) {
  68. BUG_ON(atomic_read(&eb->blocking_readers) == 0);
  69. read_lock(&eb->lock);
  70. atomic_inc(&eb->spinning_readers);
  71. /* atomic_dec_and_test implies a barrier */
  72. if (atomic_dec_and_test(&eb->blocking_readers))
  73. cond_wake_up_nomb(&eb->read_lock_wq);
  74. }
  75. }
  76. /*
  77. * take a spinning read lock. This will wait for any blocking
  78. * writers
  79. */
  80. void btrfs_tree_read_lock(struct extent_buffer *eb)
  81. {
  82. again:
  83. BUG_ON(!atomic_read(&eb->blocking_writers) &&
  84. current->pid == eb->lock_owner);
  85. read_lock(&eb->lock);
  86. if (atomic_read(&eb->blocking_writers) &&
  87. current->pid == eb->lock_owner) {
  88. /*
  89. * This extent is already write-locked by our thread. We allow
  90. * an additional read lock to be added because it's for the same
  91. * thread. btrfs_find_all_roots() depends on this as it may be
  92. * called on a partly (write-)locked tree.
  93. */
  94. BUG_ON(eb->lock_nested);
  95. eb->lock_nested = 1;
  96. read_unlock(&eb->lock);
  97. return;
  98. }
  99. if (atomic_read(&eb->blocking_writers)) {
  100. read_unlock(&eb->lock);
  101. wait_event(eb->write_lock_wq,
  102. atomic_read(&eb->blocking_writers) == 0);
  103. goto again;
  104. }
  105. atomic_inc(&eb->read_locks);
  106. atomic_inc(&eb->spinning_readers);
  107. }
  108. /*
  109. * take a spinning read lock.
  110. * returns 1 if we get the read lock and 0 if we don't
  111. * this won't wait for blocking writers
  112. */
  113. int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
  114. {
  115. if (atomic_read(&eb->blocking_writers))
  116. return 0;
  117. read_lock(&eb->lock);
  118. if (atomic_read(&eb->blocking_writers)) {
  119. read_unlock(&eb->lock);
  120. return 0;
  121. }
  122. atomic_inc(&eb->read_locks);
  123. atomic_inc(&eb->spinning_readers);
  124. return 1;
  125. }
  126. /*
  127. * returns 1 if we get the read lock and 0 if we don't
  128. * this won't wait for blocking writers
  129. */
  130. int btrfs_try_tree_read_lock(struct extent_buffer *eb)
  131. {
  132. if (atomic_read(&eb->blocking_writers))
  133. return 0;
  134. if (!read_trylock(&eb->lock))
  135. return 0;
  136. if (atomic_read(&eb->blocking_writers)) {
  137. read_unlock(&eb->lock);
  138. return 0;
  139. }
  140. atomic_inc(&eb->read_locks);
  141. atomic_inc(&eb->spinning_readers);
  142. return 1;
  143. }
  144. /*
  145. * returns 1 if we get the read lock and 0 if we don't
  146. * this won't wait for blocking writers or readers
  147. */
  148. int btrfs_try_tree_write_lock(struct extent_buffer *eb)
  149. {
  150. if (atomic_read(&eb->blocking_writers) ||
  151. atomic_read(&eb->blocking_readers))
  152. return 0;
  153. write_lock(&eb->lock);
  154. if (atomic_read(&eb->blocking_writers) ||
  155. atomic_read(&eb->blocking_readers)) {
  156. write_unlock(&eb->lock);
  157. return 0;
  158. }
  159. atomic_inc(&eb->write_locks);
  160. atomic_inc(&eb->spinning_writers);
  161. eb->lock_owner = current->pid;
  162. return 1;
  163. }
  164. /*
  165. * drop a spinning read lock
  166. */
  167. void btrfs_tree_read_unlock(struct extent_buffer *eb)
  168. {
  169. /*
  170. * if we're nested, we have the write lock. No new locking
  171. * is needed as long as we are the lock owner.
  172. * The write unlock will do a barrier for us, and the lock_nested
  173. * field only matters to the lock owner.
  174. */
  175. if (eb->lock_nested && current->pid == eb->lock_owner) {
  176. eb->lock_nested = 0;
  177. return;
  178. }
  179. btrfs_assert_tree_read_locked(eb);
  180. WARN_ON(atomic_read(&eb->spinning_readers) == 0);
  181. atomic_dec(&eb->spinning_readers);
  182. atomic_dec(&eb->read_locks);
  183. read_unlock(&eb->lock);
  184. }
  185. /*
  186. * drop a blocking read lock
  187. */
  188. void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
  189. {
  190. /*
  191. * if we're nested, we have the write lock. No new locking
  192. * is needed as long as we are the lock owner.
  193. * The write unlock will do a barrier for us, and the lock_nested
  194. * field only matters to the lock owner.
  195. */
  196. if (eb->lock_nested && current->pid == eb->lock_owner) {
  197. eb->lock_nested = 0;
  198. return;
  199. }
  200. btrfs_assert_tree_read_locked(eb);
  201. WARN_ON(atomic_read(&eb->blocking_readers) == 0);
  202. /* atomic_dec_and_test implies a barrier */
  203. if (atomic_dec_and_test(&eb->blocking_readers))
  204. cond_wake_up_nomb(&eb->read_lock_wq);
  205. atomic_dec(&eb->read_locks);
  206. }
  207. /*
  208. * take a spinning write lock. This will wait for both
  209. * blocking readers or writers
  210. */
  211. void btrfs_tree_lock(struct extent_buffer *eb)
  212. {
  213. WARN_ON(eb->lock_owner == current->pid);
  214. again:
  215. wait_event(eb->read_lock_wq, atomic_read(&eb->blocking_readers) == 0);
  216. wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
  217. write_lock(&eb->lock);
  218. if (atomic_read(&eb->blocking_readers)) {
  219. write_unlock(&eb->lock);
  220. wait_event(eb->read_lock_wq,
  221. atomic_read(&eb->blocking_readers) == 0);
  222. goto again;
  223. }
  224. if (atomic_read(&eb->blocking_writers)) {
  225. write_unlock(&eb->lock);
  226. wait_event(eb->write_lock_wq,
  227. atomic_read(&eb->blocking_writers) == 0);
  228. goto again;
  229. }
  230. WARN_ON(atomic_read(&eb->spinning_writers));
  231. atomic_inc(&eb->spinning_writers);
  232. atomic_inc(&eb->write_locks);
  233. eb->lock_owner = current->pid;
  234. }
  235. /*
  236. * drop a spinning or a blocking write lock.
  237. */
  238. void btrfs_tree_unlock(struct extent_buffer *eb)
  239. {
  240. int blockers = atomic_read(&eb->blocking_writers);
  241. BUG_ON(blockers > 1);
  242. btrfs_assert_tree_locked(eb);
  243. eb->lock_owner = 0;
  244. atomic_dec(&eb->write_locks);
  245. if (blockers) {
  246. WARN_ON(atomic_read(&eb->spinning_writers));
  247. atomic_dec(&eb->blocking_writers);
  248. /* Use the lighter barrier after atomic */
  249. smp_mb__after_atomic();
  250. cond_wake_up_nomb(&eb->write_lock_wq);
  251. } else {
  252. WARN_ON(atomic_read(&eb->spinning_writers) != 1);
  253. atomic_dec(&eb->spinning_writers);
  254. write_unlock(&eb->lock);
  255. }
  256. }
  257. void btrfs_assert_tree_locked(struct extent_buffer *eb)
  258. {
  259. BUG_ON(!atomic_read(&eb->write_locks));
  260. }
  261. static void btrfs_assert_tree_read_locked(struct extent_buffer *eb)
  262. {
  263. BUG_ON(!atomic_read(&eb->read_locks));
  264. }