1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/moduleparam.h>
26#include <linux/completion.h>
27#include <linux/interrupt.h>
28#include <linux/notifier.h>
29#include <linux/rcupdate.h>
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/mutex.h>
33#include <linux/sched.h>
34#include <linux/types.h>
35#include <linux/init.h>
36#include <linux/time.h>
37#include <linux/cpu.h>
38
39
40static struct task_struct *rcu_kthread_task;
41static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq);
42static unsigned long have_rcu_kthread_work;
43static void invoke_rcu_kthread(void);
44
45
46struct rcu_ctrlblk;
47static void rcu_process_callbacks(struct rcu_ctrlblk *rcp);
48static int rcu_kthread(void *arg);
49static void __call_rcu(struct rcu_head *head,
50 void (*func)(struct rcu_head *rcu),
51 struct rcu_ctrlblk *rcp);
52
53#include "rcutiny_plugin.h"
54
55#ifdef CONFIG_NO_HZ
56
57static long rcu_dynticks_nesting = 1;
58
59
60
61
62
63
64void rcu_enter_nohz(void)
65{
66 if (--rcu_dynticks_nesting == 0)
67 rcu_sched_qs(0);
68}
69
70
71
72
73
74void rcu_exit_nohz(void)
75{
76 rcu_dynticks_nesting++;
77}
78
79#endif
80
81
82
83
84
85
86static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
87{
88 unsigned long flags;
89
90 local_irq_save(flags);
91 if (rcp->rcucblist != NULL &&
92 rcp->donetail != rcp->curtail) {
93 rcp->donetail = rcp->curtail;
94 local_irq_restore(flags);
95 return 1;
96 }
97 local_irq_restore(flags);
98
99 return 0;
100}
101
102
103
104
105
106
107void rcu_sched_qs(int cpu)
108{
109 if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
110 rcu_qsctr_help(&rcu_bh_ctrlblk))
111 invoke_rcu_kthread();
112}
113
114
115
116
117void rcu_bh_qs(int cpu)
118{
119 if (rcu_qsctr_help(&rcu_bh_ctrlblk))
120 invoke_rcu_kthread();
121}
122
123
124
125
126
127void rcu_check_callbacks(int cpu, int user)
128{
129 if (user ||
130 (idle_cpu(cpu) &&
131 !in_softirq() &&
132 hardirq_count() <= (1 << HARDIRQ_SHIFT)))
133 rcu_sched_qs(cpu);
134 else if (!in_softirq())
135 rcu_bh_qs(cpu);
136 rcu_preempt_check_callbacks();
137}
138
139
140
141
142
143static void rcu_process_callbacks(struct rcu_ctrlblk *rcp)
144{
145 struct rcu_head *next, *list;
146 unsigned long flags;
147 RCU_TRACE(int cb_count = 0);
148
149
150 if (&rcp->rcucblist == rcp->donetail)
151 return;
152
153
154 local_irq_save(flags);
155 list = rcp->rcucblist;
156 rcp->rcucblist = *rcp->donetail;
157 *rcp->donetail = NULL;
158 if (rcp->curtail == rcp->donetail)
159 rcp->curtail = &rcp->rcucblist;
160 rcu_preempt_remove_callbacks(rcp);
161 rcp->donetail = &rcp->rcucblist;
162 local_irq_restore(flags);
163
164
165 while (list) {
166 next = list->next;
167 prefetch(next);
168 debug_rcu_head_unqueue(list);
169 local_bh_disable();
170 list->func(list);
171 local_bh_enable();
172 list = next;
173 RCU_TRACE(cb_count++);
174 }
175 RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
176}
177
178
179
180
181
182
183
184
185static int rcu_kthread(void *arg)
186{
187 unsigned long work;
188 unsigned long morework;
189 unsigned long flags;
190
191 for (;;) {
192 wait_event_interruptible(rcu_kthread_wq,
193 have_rcu_kthread_work != 0);
194 morework = rcu_boost();
195 local_irq_save(flags);
196 work = have_rcu_kthread_work;
197 have_rcu_kthread_work = morework;
198 local_irq_restore(flags);
199 if (work) {
200 rcu_process_callbacks(&rcu_sched_ctrlblk);
201 rcu_process_callbacks(&rcu_bh_ctrlblk);
202 rcu_preempt_process_callbacks();
203 }
204 schedule_timeout_interruptible(1);
205 }
206
207 return 0;
208}
209
210
211
212
213
214static void invoke_rcu_kthread(void)
215{
216 unsigned long flags;
217
218 local_irq_save(flags);
219 have_rcu_kthread_work = 1;
220 wake_up(&rcu_kthread_wq);
221 local_irq_restore(flags);
222}
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237void synchronize_sched(void)
238{
239 cond_resched();
240}
241EXPORT_SYMBOL_GPL(synchronize_sched);
242
243
244
245
246static void __call_rcu(struct rcu_head *head,
247 void (*func)(struct rcu_head *rcu),
248 struct rcu_ctrlblk *rcp)
249{
250 unsigned long flags;
251
252 debug_rcu_head_queue(head);
253 head->func = func;
254 head->next = NULL;
255
256 local_irq_save(flags);
257 *rcp->curtail = head;
258 rcp->curtail = &head->next;
259 RCU_TRACE(rcp->qlen++);
260 local_irq_restore(flags);
261}
262
263
264
265
266
267
268void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
269{
270 __call_rcu(head, func, &rcu_sched_ctrlblk);
271}
272EXPORT_SYMBOL_GPL(call_rcu_sched);
273
274
275
276
277
278void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
279{
280 __call_rcu(head, func, &rcu_bh_ctrlblk);
281}
282EXPORT_SYMBOL_GPL(call_rcu_bh);
283
284void rcu_barrier_bh(void)
285{
286 struct rcu_synchronize rcu;
287
288 init_rcu_head_on_stack(&rcu.head);
289 init_completion(&rcu.completion);
290
291 call_rcu_bh(&rcu.head, wakeme_after_rcu);
292
293 wait_for_completion(&rcu.completion);
294 destroy_rcu_head_on_stack(&rcu.head);
295}
296EXPORT_SYMBOL_GPL(rcu_barrier_bh);
297
298void rcu_barrier_sched(void)
299{
300 struct rcu_synchronize rcu;
301
302 init_rcu_head_on_stack(&rcu.head);
303 init_completion(&rcu.completion);
304
305 call_rcu_sched(&rcu.head, wakeme_after_rcu);
306
307 wait_for_completion(&rcu.completion);
308 destroy_rcu_head_on_stack(&rcu.head);
309}
310EXPORT_SYMBOL_GPL(rcu_barrier_sched);
311
312
313
314
315static int __init rcu_spawn_kthreads(void)
316{
317 struct sched_param sp;
318
319 rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread");
320 sp.sched_priority = RCU_BOOST_PRIO;
321 sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp);
322 return 0;
323}
324early_initcall(rcu_spawn_kthreads);
325