1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
4 *
5 * Copyright IBM Corporation, 2008
6 *
7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
8 *
9 * For detailed explanation of Read-Copy Update mechanism see -
10 * Documentation/RCU
11 */
12 #include <linux/completion.h>
13 #include <linux/interrupt.h>
14 #include <linux/notifier.h>
15 #include <linux/rcupdate_wait.h>
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/mutex.h>
19 #include <linux/sched.h>
20 #include <linux/types.h>
21 #include <linux/init.h>
22 #include <linux/time.h>
23 #include <linux/cpu.h>
24 #include <linux/prefetch.h>
25 #include <linux/slab.h>
26 #include <linux/mm.h>
27
28 #include "rcu.h"
29
30 /* Global control variables for rcupdate callback mechanism. */
31 struct rcu_ctrlblk {
32 struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
33 struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
34 struct rcu_head **curtail; /* ->next pointer of last CB. */
35 };
36
37 /* Definition for rcupdate control block. */
38 static struct rcu_ctrlblk rcu_ctrlblk = {
39 .donetail = &rcu_ctrlblk.rcucblist,
40 .curtail = &rcu_ctrlblk.rcucblist,
41 };
42
rcu_barrier(void)43 void rcu_barrier(void)
44 {
45 wait_rcu_gp(call_rcu);
46 }
47 EXPORT_SYMBOL(rcu_barrier);
48
49 /* Record an rcu quiescent state. */
rcu_qs(void)50 void rcu_qs(void)
51 {
52 unsigned long flags;
53
54 local_irq_save(flags);
55 if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
56 rcu_ctrlblk.donetail = rcu_ctrlblk.curtail;
57 raise_softirq_irqoff(RCU_SOFTIRQ);
58 }
59 local_irq_restore(flags);
60 }
61
62 /*
63 * Check to see if the scheduling-clock interrupt came from an extended
64 * quiescent state, and, if so, tell RCU about it. This function must
65 * be called from hardirq context. It is normally called from the
66 * scheduling-clock interrupt.
67 */
rcu_sched_clock_irq(int user)68 void rcu_sched_clock_irq(int user)
69 {
70 if (user) {
71 rcu_qs();
72 } else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
73 set_tsk_need_resched(current);
74 set_preempt_need_resched();
75 }
76 }
77
78 /*
79 * Reclaim the specified callback, either by invoking it for non-kfree cases or
80 * freeing it directly (for kfree). Return true if kfreeing, false otherwise.
81 */
rcu_reclaim_tiny(struct rcu_head * head)82 static inline bool rcu_reclaim_tiny(struct rcu_head *head)
83 {
84 rcu_callback_t f;
85 unsigned long offset = (unsigned long)head->func;
86
87 rcu_lock_acquire(&rcu_callback_map);
88 if (__is_kvfree_rcu_offset(offset)) {
89 trace_rcu_invoke_kvfree_callback("", head, offset);
90 kvfree((void *)head - offset);
91 rcu_lock_release(&rcu_callback_map);
92 return true;
93 }
94
95 trace_rcu_invoke_callback("", head);
96 f = head->func;
97 WRITE_ONCE(head->func, (rcu_callback_t)0L);
98 f(head);
99 rcu_lock_release(&rcu_callback_map);
100 return false;
101 }
102
103 /* Invoke the RCU callbacks whose grace period has elapsed. */
rcu_process_callbacks(struct softirq_action * unused)104 static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
105 {
106 struct rcu_head *next, *list;
107 unsigned long flags;
108
109 /* Move the ready-to-invoke callbacks to a local list. */
110 local_irq_save(flags);
111 if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) {
112 /* No callbacks ready, so just leave. */
113 local_irq_restore(flags);
114 return;
115 }
116 list = rcu_ctrlblk.rcucblist;
117 rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail;
118 *rcu_ctrlblk.donetail = NULL;
119 if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail)
120 rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist;
121 rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist;
122 local_irq_restore(flags);
123
124 /* Invoke the callbacks on the local list. */
125 while (list) {
126 next = list->next;
127 prefetch(next);
128 debug_rcu_head_unqueue(list);
129 local_bh_disable();
130 rcu_reclaim_tiny(list);
131 local_bh_enable();
132 list = next;
133 }
134 }
135
136 /*
137 * Wait for a grace period to elapse. But it is illegal to invoke
138 * synchronize_rcu() from within an RCU read-side critical section.
139 * Therefore, any legal call to synchronize_rcu() is a quiescent
140 * state, and so on a UP system, synchronize_rcu() need do nothing.
141 * (But Lai Jiangshan points out the benefits of doing might_sleep()
142 * to reduce latency.)
143 *
144 * Cool, huh? (Due to Josh Triplett.)
145 */
synchronize_rcu(void)146 void synchronize_rcu(void)
147 {
148 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
149 lock_is_held(&rcu_lock_map) ||
150 lock_is_held(&rcu_sched_lock_map),
151 "Illegal synchronize_rcu() in RCU read-side critical section");
152 }
153 EXPORT_SYMBOL_GPL(synchronize_rcu);
154
155 /*
156 * Post an RCU callback to be invoked after the end of an RCU grace
157 * period. But since we have but one CPU, that would be after any
158 * quiescent state.
159 */
call_rcu(struct rcu_head * head,rcu_callback_t func)160 void call_rcu(struct rcu_head *head, rcu_callback_t func)
161 {
162 unsigned long flags;
163
164 debug_rcu_head_queue(head);
165 head->func = func;
166 head->next = NULL;
167
168 local_irq_save(flags);
169 *rcu_ctrlblk.curtail = head;
170 rcu_ctrlblk.curtail = &head->next;
171 local_irq_restore(flags);
172
173 if (unlikely(is_idle_task(current))) {
174 /* force scheduling for rcu_qs() */
175 resched_cpu(0);
176 }
177 }
178 EXPORT_SYMBOL_GPL(call_rcu);
179
rcu_init(void)180 void __init rcu_init(void)
181 {
182 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
183 rcu_early_boot_tests();
184 srcu_init();
185 }
186