1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/completion.h>
26#include <linux/interrupt.h>
27#include <linux/notifier.h>
28#include <linux/rcupdate.h>
29#include <linux/kernel.h>
30#include <linux/export.h>
31#include <linux/mutex.h>
32#include <linux/sched.h>
33#include <linux/types.h>
34#include <linux/init.h>
35#include <linux/time.h>
36#include <linux/cpu.h>
37#include <linux/prefetch.h>
38#include <linux/trace_events.h>
39
40#include "rcu.h"
41
42
43struct rcu_ctrlblk;
44static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
45static void rcu_process_callbacks(struct softirq_action *unused);
46static void __call_rcu(struct rcu_head *head,
47 rcu_callback_t func,
48 struct rcu_ctrlblk *rcp);
49
50#include "tiny_plugin.h"
51
52#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
53
54
55
56
57bool notrace __rcu_is_watching(void)
58{
59 return true;
60}
61EXPORT_SYMBOL(__rcu_is_watching);
62
63#endif
64
65
66
67
68
69
70static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
71{
72 RCU_TRACE(reset_cpu_stall_ticks(rcp));
73 if (rcp->donetail != rcp->curtail) {
74 rcp->donetail = rcp->curtail;
75 return 1;
76 }
77
78 return 0;
79}
80
81
82
83
84
85
86void rcu_sched_qs(void)
87{
88 unsigned long flags;
89
90 local_irq_save(flags);
91 if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
92 rcu_qsctr_help(&rcu_bh_ctrlblk))
93 raise_softirq(RCU_SOFTIRQ);
94 local_irq_restore(flags);
95}
96
97
98
99
100void rcu_bh_qs(void)
101{
102 unsigned long flags;
103
104 local_irq_save(flags);
105 if (rcu_qsctr_help(&rcu_bh_ctrlblk))
106 raise_softirq(RCU_SOFTIRQ);
107 local_irq_restore(flags);
108}
109
110
111
112
113
114
115
116void rcu_check_callbacks(int user)
117{
118 RCU_TRACE(check_cpu_stalls());
119 if (user)
120 rcu_sched_qs();
121 else if (!in_softirq())
122 rcu_bh_qs();
123 if (user)
124 rcu_note_voluntary_context_switch(current);
125}
126
127
128
129
130
131static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
132{
133 const char *rn = NULL;
134 struct rcu_head *next, *list;
135 unsigned long flags;
136 RCU_TRACE(int cb_count = 0);
137
138
139 local_irq_save(flags);
140 if (rcp->donetail == &rcp->rcucblist) {
141
142 local_irq_restore(flags);
143 return;
144 }
145 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1));
146 list = rcp->rcucblist;
147 rcp->rcucblist = *rcp->donetail;
148 *rcp->donetail = NULL;
149 if (rcp->curtail == rcp->donetail)
150 rcp->curtail = &rcp->rcucblist;
151 rcp->donetail = &rcp->rcucblist;
152 local_irq_restore(flags);
153
154
155 RCU_TRACE(rn = rcp->name);
156 while (list) {
157 next = list->next;
158 prefetch(next);
159 debug_rcu_head_unqueue(list);
160 local_bh_disable();
161 __rcu_reclaim(rn, list);
162 local_bh_enable();
163 list = next;
164 RCU_TRACE(cb_count++);
165 }
166 RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
167 RCU_TRACE(trace_rcu_batch_end(rcp->name,
168 cb_count, 0, need_resched(),
169 is_idle_task(current),
170 false));
171}
172
173static void rcu_process_callbacks(struct softirq_action *unused)
174{
175 __rcu_process_callbacks(&rcu_sched_ctrlblk);
176 __rcu_process_callbacks(&rcu_bh_ctrlblk);
177}
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192void synchronize_sched(void)
193{
194 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
195 lock_is_held(&rcu_lock_map) ||
196 lock_is_held(&rcu_sched_lock_map),
197 "Illegal synchronize_sched() in RCU read-side critical section");
198 cond_resched();
199}
200EXPORT_SYMBOL_GPL(synchronize_sched);
201
202
203
204
205static void __call_rcu(struct rcu_head *head,
206 rcu_callback_t func,
207 struct rcu_ctrlblk *rcp)
208{
209 unsigned long flags;
210
211 debug_rcu_head_queue(head);
212 head->func = func;
213 head->next = NULL;
214
215 local_irq_save(flags);
216 *rcp->curtail = head;
217 rcp->curtail = &head->next;
218 RCU_TRACE(rcp->qlen++);
219 local_irq_restore(flags);
220
221 if (unlikely(is_idle_task(current))) {
222
223 resched_cpu(0);
224 }
225}
226
227
228
229
230
231
232void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
233{
234 __call_rcu(head, func, &rcu_sched_ctrlblk);
235}
236EXPORT_SYMBOL_GPL(call_rcu_sched);
237
238
239
240
241
242void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
243{
244 __call_rcu(head, func, &rcu_bh_ctrlblk);
245}
246EXPORT_SYMBOL_GPL(call_rcu_bh);
247
248void __init rcu_init(void)
249{
250 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
251 RCU_TRACE(reset_cpu_stall_ticks(&rcu_sched_ctrlblk));
252 RCU_TRACE(reset_cpu_stall_ticks(&rcu_bh_ctrlblk));
253
254 rcu_early_boot_tests();
255}
256