srcutiny.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Sleepable Read-Copy Update mechanism for mutual exclusion,
  4. * tiny version for non-preemptible single-CPU use.
  5. *
  6. * Copyright (C) IBM Corporation, 2017
  7. *
  8. * Author: Paul McKenney <paulmck@linux.ibm.com>
  9. */
  10. #include <linux/export.h>
  11. #include <linux/mutex.h>
  12. #include <linux/preempt.h>
  13. #include <linux/rcupdate_wait.h>
  14. #include <linux/sched.h>
  15. #include <linux/delay.h>
  16. #include <linux/srcu.h>
  17. #include <linux/rcu_node_tree.h>
  18. #include "rcu_segcblist.h"
  19. #include "rcu.h"
  20. int rcu_scheduler_active __read_mostly;
  21. static LIST_HEAD(srcu_boot_list);
  22. static bool srcu_init_done;
  23. static int init_srcu_struct_fields(struct srcu_struct *ssp)
  24. {
  25. ssp->srcu_lock_nesting[0] = 0;
  26. ssp->srcu_lock_nesting[1] = 0;
  27. init_swait_queue_head(&ssp->srcu_wq);
  28. ssp->srcu_cb_head = NULL;
  29. ssp->srcu_cb_tail = &ssp->srcu_cb_head;
  30. ssp->srcu_gp_running = false;
  31. ssp->srcu_gp_waiting = false;
  32. ssp->srcu_idx = 0;
  33. ssp->srcu_idx_max = 0;
  34. INIT_WORK(&ssp->srcu_work, srcu_drive_gp);
  35. INIT_LIST_HEAD(&ssp->srcu_work.entry);
  36. return 0;
  37. }
  38. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  39. int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
  40. struct lock_class_key *key)
  41. {
  42. /* Don't re-initialize a lock while it is held. */
  43. debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
  44. lockdep_init_map(&ssp->dep_map, name, key, 0);
  45. return init_srcu_struct_fields(ssp);
  46. }
  47. EXPORT_SYMBOL_GPL(__init_srcu_struct);
  48. #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  49. /*
  50. * init_srcu_struct - initialize a sleep-RCU structure
  51. * @ssp: structure to initialize.
  52. *
  53. * Must invoke this on a given srcu_struct before passing that srcu_struct
  54. * to any other function. Each srcu_struct represents a separate domain
  55. * of SRCU protection.
  56. */
  57. int init_srcu_struct(struct srcu_struct *ssp)
  58. {
  59. return init_srcu_struct_fields(ssp);
  60. }
  61. EXPORT_SYMBOL_GPL(init_srcu_struct);
  62. #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  63. /*
  64. * cleanup_srcu_struct - deconstruct a sleep-RCU structure
  65. * @ssp: structure to clean up.
  66. *
  67. * Must invoke this after you are finished using a given srcu_struct that
  68. * was initialized via init_srcu_struct(), else you leak memory.
  69. */
  70. void cleanup_srcu_struct(struct srcu_struct *ssp)
  71. {
  72. WARN_ON(ssp->srcu_lock_nesting[0] || ssp->srcu_lock_nesting[1]);
  73. flush_work(&ssp->srcu_work);
  74. WARN_ON(ssp->srcu_gp_running);
  75. WARN_ON(ssp->srcu_gp_waiting);
  76. WARN_ON(ssp->srcu_cb_head);
  77. WARN_ON(&ssp->srcu_cb_head != ssp->srcu_cb_tail);
  78. WARN_ON(ssp->srcu_idx != ssp->srcu_idx_max);
  79. WARN_ON(ssp->srcu_idx & 0x1);
  80. }
  81. EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
  82. /*
  83. * Removes the count for the old reader from the appropriate element of
  84. * the srcu_struct.
  85. */
  86. void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
  87. {
  88. int newval;
  89. preempt_disable(); // Needed for PREEMPT_AUTO
  90. newval = READ_ONCE(ssp->srcu_lock_nesting[idx]) - 1;
  91. WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval);
  92. preempt_enable();
  93. if (!newval && READ_ONCE(ssp->srcu_gp_waiting) && in_task())
  94. swake_up_one(&ssp->srcu_wq);
  95. }
  96. EXPORT_SYMBOL_GPL(__srcu_read_unlock);
  97. /*
  98. * Workqueue handler to drive one grace period and invoke any callbacks
  99. * that become ready as a result. Single-CPU and !PREEMPTION operation
  100. * means that we get away with murder on synchronization. ;-)
  101. */
  102. void srcu_drive_gp(struct work_struct *wp)
  103. {
  104. int idx;
  105. struct rcu_head *lh;
  106. struct rcu_head *rhp;
  107. struct srcu_struct *ssp;
  108. ssp = container_of(wp, struct srcu_struct, srcu_work);
  109. preempt_disable(); // Needed for PREEMPT_AUTO
  110. if (ssp->srcu_gp_running || ULONG_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max))) {
  111. preempt_enable();
  112. return; /* Already running or nothing to do. */
  113. }
  114. /* Remove recently arrived callbacks and wait for readers. */
  115. WRITE_ONCE(ssp->srcu_gp_running, true);
  116. local_irq_disable();
  117. lh = ssp->srcu_cb_head;
  118. ssp->srcu_cb_head = NULL;
  119. ssp->srcu_cb_tail = &ssp->srcu_cb_head;
  120. local_irq_enable();
  121. idx = (ssp->srcu_idx & 0x2) / 2;
  122. WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
  123. WRITE_ONCE(ssp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
  124. preempt_enable();
  125. swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx]));
  126. preempt_disable(); // Needed for PREEMPT_AUTO
  127. WRITE_ONCE(ssp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
  128. WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
  129. preempt_enable();
  130. /* Invoke the callbacks we removed above. */
  131. while (lh) {
  132. rhp = lh;
  133. lh = lh->next;
  134. debug_rcu_head_callback(rhp);
  135. local_bh_disable();
  136. rhp->func(rhp);
  137. local_bh_enable();
  138. }
  139. /*
  140. * Enable rescheduling, and if there are more callbacks,
  141. * reschedule ourselves. This can race with a call_srcu()
  142. * at interrupt level, but the ->srcu_gp_running checks will
  143. * straighten that out.
  144. */
  145. preempt_disable(); // Needed for PREEMPT_AUTO
  146. WRITE_ONCE(ssp->srcu_gp_running, false);
  147. idx = ULONG_CMP_LT(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max));
  148. preempt_enable();
  149. if (idx)
  150. schedule_work(&ssp->srcu_work);
  151. }
  152. EXPORT_SYMBOL_GPL(srcu_drive_gp);
  153. static void srcu_gp_start_if_needed(struct srcu_struct *ssp)
  154. {
  155. unsigned long cookie;
  156. preempt_disable(); // Needed for PREEMPT_AUTO
  157. cookie = get_state_synchronize_srcu(ssp);
  158. if (ULONG_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie)) {
  159. preempt_enable();
  160. return;
  161. }
  162. WRITE_ONCE(ssp->srcu_idx_max, cookie);
  163. if (!READ_ONCE(ssp->srcu_gp_running)) {
  164. if (likely(srcu_init_done))
  165. schedule_work(&ssp->srcu_work);
  166. else if (list_empty(&ssp->srcu_work.entry))
  167. list_add(&ssp->srcu_work.entry, &srcu_boot_list);
  168. }
  169. preempt_enable();
  170. }
  171. /*
  172. * Enqueue an SRCU callback on the specified srcu_struct structure,
  173. * initiating grace-period processing if it is not already running.
  174. */
  175. void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
  176. rcu_callback_t func)
  177. {
  178. unsigned long flags;
  179. rhp->func = func;
  180. rhp->next = NULL;
  181. preempt_disable(); // Needed for PREEMPT_AUTO
  182. local_irq_save(flags);
  183. *ssp->srcu_cb_tail = rhp;
  184. ssp->srcu_cb_tail = &rhp->next;
  185. local_irq_restore(flags);
  186. srcu_gp_start_if_needed(ssp);
  187. preempt_enable();
  188. }
  189. EXPORT_SYMBOL_GPL(call_srcu);
  190. /*
  191. * synchronize_srcu - wait for prior SRCU read-side critical-section completion
  192. */
  193. void synchronize_srcu(struct srcu_struct *ssp)
  194. {
  195. struct rcu_synchronize rs;
  196. srcu_lock_sync(&ssp->dep_map);
  197. RCU_LOCKDEP_WARN(lockdep_is_held(ssp) ||
  198. lock_is_held(&rcu_bh_lock_map) ||
  199. lock_is_held(&rcu_lock_map) ||
  200. lock_is_held(&rcu_sched_lock_map),
  201. "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
  202. if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
  203. return;
  204. might_sleep();
  205. init_rcu_head_on_stack(&rs.head);
  206. init_completion(&rs.completion);
  207. call_srcu(ssp, &rs.head, wakeme_after_rcu);
  208. wait_for_completion(&rs.completion);
  209. destroy_rcu_head_on_stack(&rs.head);
  210. }
  211. EXPORT_SYMBOL_GPL(synchronize_srcu);
  212. /*
  213. * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
  214. */
  215. unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
  216. {
  217. unsigned long ret;
  218. barrier();
  219. ret = (READ_ONCE(ssp->srcu_idx) + 3) & ~0x1;
  220. barrier();
  221. return ret;
  222. }
  223. EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
  224. /*
  225. * start_poll_synchronize_srcu - Provide cookie and start grace period
  226. *
  227. * The difference between this and get_state_synchronize_srcu() is that
  228. * this function ensures that the poll_state_synchronize_srcu() will
  229. * eventually return the value true.
  230. */
  231. unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
  232. {
  233. unsigned long ret;
  234. preempt_disable(); // Needed for PREEMPT_AUTO
  235. ret = get_state_synchronize_srcu(ssp);
  236. srcu_gp_start_if_needed(ssp);
  237. preempt_enable();
  238. return ret;
  239. }
  240. EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
  241. /*
  242. * poll_state_synchronize_srcu - Has cookie's grace period ended?
  243. */
  244. bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
  245. {
  246. unsigned long cur_s = READ_ONCE(ssp->srcu_idx);
  247. barrier();
  248. return cookie == SRCU_GET_STATE_COMPLETED ||
  249. ULONG_CMP_GE(cur_s, cookie) || ULONG_CMP_LT(cur_s, cookie - 3);
  250. }
  251. EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
  252. /* Lockdep diagnostics. */
  253. void __init rcu_scheduler_starting(void)
  254. {
  255. rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
  256. }
  257. /*
  258. * Queue work for srcu_struct structures with early boot callbacks.
  259. * The work won't actually execute until the workqueue initialization
  260. * phase that takes place after the scheduler starts.
  261. */
  262. void __init srcu_init(void)
  263. {
  264. struct srcu_struct *ssp;
  265. srcu_init_done = true;
  266. while (!list_empty(&srcu_boot_list)) {
  267. ssp = list_first_entry(&srcu_boot_list,
  268. struct srcu_struct, srcu_work.entry);
  269. list_del_init(&ssp->srcu_work.entry);
  270. schedule_work(&ssp->srcu_work);
  271. }
  272. }