srcutiny.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. /*
  2. * Sleepable Read-Copy Update mechanism for mutual exclusion,
  3. * tiny version for non-preemptible single-CPU use.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, you can access it online at
  17. * http://www.gnu.org/licenses/gpl-2.0.html.
  18. *
  19. * Copyright (C) IBM Corporation, 2017
  20. *
  21. * Author: Paul McKenney <paulmck@us.ibm.com>
  22. */
  23. #include <linux/export.h>
  24. #include <linux/mutex.h>
  25. #include <linux/preempt.h>
  26. #include <linux/rcupdate_wait.h>
  27. #include <linux/sched.h>
  28. #include <linux/delay.h>
  29. #include <linux/srcu.h>
  30. #include <linux/rcu_node_tree.h>
  31. #include "rcu_segcblist.h"
  32. #include "rcu.h"
  33. int rcu_scheduler_active __read_mostly;
  34. static int init_srcu_struct_fields(struct srcu_struct *sp)
  35. {
  36. sp->srcu_lock_nesting[0] = 0;
  37. sp->srcu_lock_nesting[1] = 0;
  38. init_swait_queue_head(&sp->srcu_wq);
  39. sp->srcu_cb_head = NULL;
  40. sp->srcu_cb_tail = &sp->srcu_cb_head;
  41. sp->srcu_gp_running = false;
  42. sp->srcu_gp_waiting = false;
  43. sp->srcu_idx = 0;
  44. INIT_WORK(&sp->srcu_work, srcu_drive_gp);
  45. return 0;
  46. }
  47. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  48. int __init_srcu_struct(struct srcu_struct *sp, const char *name,
  49. struct lock_class_key *key)
  50. {
  51. /* Don't re-initialize a lock while it is held. */
  52. debug_check_no_locks_freed((void *)sp, sizeof(*sp));
  53. lockdep_init_map(&sp->dep_map, name, key, 0);
  54. return init_srcu_struct_fields(sp);
  55. }
  56. EXPORT_SYMBOL_GPL(__init_srcu_struct);
  57. #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  58. /*
  59. * init_srcu_struct - initialize a sleep-RCU structure
  60. * @sp: structure to initialize.
  61. *
  62. * Must invoke this on a given srcu_struct before passing that srcu_struct
  63. * to any other function. Each srcu_struct represents a separate domain
  64. * of SRCU protection.
  65. */
  66. int init_srcu_struct(struct srcu_struct *sp)
  67. {
  68. return init_srcu_struct_fields(sp);
  69. }
  70. EXPORT_SYMBOL_GPL(init_srcu_struct);
  71. #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  72. /*
  73. * cleanup_srcu_struct - deconstruct a sleep-RCU structure
  74. * @sp: structure to clean up.
  75. *
  76. * Must invoke this after you are finished using a given srcu_struct that
  77. * was initialized via init_srcu_struct(), else you leak memory.
  78. */
  79. void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced)
  80. {
  81. WARN_ON(sp->srcu_lock_nesting[0] || sp->srcu_lock_nesting[1]);
  82. if (quiesced)
  83. WARN_ON(work_pending(&sp->srcu_work));
  84. else
  85. flush_work(&sp->srcu_work);
  86. WARN_ON(sp->srcu_gp_running);
  87. WARN_ON(sp->srcu_gp_waiting);
  88. WARN_ON(sp->srcu_cb_head);
  89. WARN_ON(&sp->srcu_cb_head != sp->srcu_cb_tail);
  90. }
  91. EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
  92. /*
  93. * Removes the count for the old reader from the appropriate element of
  94. * the srcu_struct.
  95. */
  96. void __srcu_read_unlock(struct srcu_struct *sp, int idx)
  97. {
  98. int newval = sp->srcu_lock_nesting[idx] - 1;
  99. WRITE_ONCE(sp->srcu_lock_nesting[idx], newval);
  100. if (!newval && READ_ONCE(sp->srcu_gp_waiting))
  101. swake_up_one(&sp->srcu_wq);
  102. }
  103. EXPORT_SYMBOL_GPL(__srcu_read_unlock);
  104. /*
  105. * Workqueue handler to drive one grace period and invoke any callbacks
  106. * that become ready as a result. Single-CPU and !PREEMPT operation
  107. * means that we get away with murder on synchronization. ;-)
  108. */
  109. void srcu_drive_gp(struct work_struct *wp)
  110. {
  111. int idx;
  112. struct rcu_head *lh;
  113. struct rcu_head *rhp;
  114. struct srcu_struct *sp;
  115. sp = container_of(wp, struct srcu_struct, srcu_work);
  116. if (sp->srcu_gp_running || !READ_ONCE(sp->srcu_cb_head))
  117. return; /* Already running or nothing to do. */
  118. /* Remove recently arrived callbacks and wait for readers. */
  119. WRITE_ONCE(sp->srcu_gp_running, true);
  120. local_irq_disable();
  121. lh = sp->srcu_cb_head;
  122. sp->srcu_cb_head = NULL;
  123. sp->srcu_cb_tail = &sp->srcu_cb_head;
  124. local_irq_enable();
  125. idx = sp->srcu_idx;
  126. WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx);
  127. WRITE_ONCE(sp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
  128. swait_event_exclusive(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx]));
  129. WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
  130. /* Invoke the callbacks we removed above. */
  131. while (lh) {
  132. rhp = lh;
  133. lh = lh->next;
  134. local_bh_disable();
  135. rhp->func(rhp);
  136. local_bh_enable();
  137. }
  138. /*
  139. * Enable rescheduling, and if there are more callbacks,
  140. * reschedule ourselves. This can race with a call_srcu()
  141. * at interrupt level, but the ->srcu_gp_running checks will
  142. * straighten that out.
  143. */
  144. WRITE_ONCE(sp->srcu_gp_running, false);
  145. if (READ_ONCE(sp->srcu_cb_head))
  146. schedule_work(&sp->srcu_work);
  147. }
  148. EXPORT_SYMBOL_GPL(srcu_drive_gp);
  149. /*
  150. * Enqueue an SRCU callback on the specified srcu_struct structure,
  151. * initiating grace-period processing if it is not already running.
  152. */
  153. void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
  154. rcu_callback_t func)
  155. {
  156. unsigned long flags;
  157. rhp->func = func;
  158. rhp->next = NULL;
  159. local_irq_save(flags);
  160. *sp->srcu_cb_tail = rhp;
  161. sp->srcu_cb_tail = &rhp->next;
  162. local_irq_restore(flags);
  163. if (!READ_ONCE(sp->srcu_gp_running))
  164. schedule_work(&sp->srcu_work);
  165. }
  166. EXPORT_SYMBOL_GPL(call_srcu);
  167. /*
  168. * synchronize_srcu - wait for prior SRCU read-side critical-section completion
  169. */
  170. void synchronize_srcu(struct srcu_struct *sp)
  171. {
  172. struct rcu_synchronize rs;
  173. init_rcu_head_on_stack(&rs.head);
  174. init_completion(&rs.completion);
  175. call_srcu(sp, &rs.head, wakeme_after_rcu);
  176. wait_for_completion(&rs.completion);
  177. destroy_rcu_head_on_stack(&rs.head);
  178. }
  179. EXPORT_SYMBOL_GPL(synchronize_srcu);
  180. /* Lockdep diagnostics. */
  181. void __init rcu_scheduler_starting(void)
  182. {
  183. rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
  184. }