1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/completion.h>
26#include <linux/interrupt.h>
27#include <linux/notifier.h>
28#include <linux/rcupdate.h>
29#include <linux/kernel.h>
30#include <linux/export.h>
31#include <linux/mutex.h>
32#include <linux/sched.h>
33#include <linux/types.h>
34#include <linux/init.h>
35#include <linux/time.h>
36#include <linux/cpu.h>
37#include <linux/prefetch.h>
38
39#ifdef CONFIG_RCU_TRACE
40#include <trace/events/rcu.h>
41#endif
42
43#include "rcu.h"
44
45
46struct rcu_ctrlblk;
47static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
48static void rcu_process_callbacks(struct softirq_action *unused);
49static void __call_rcu(struct rcu_head *head,
50 void (*func)(struct rcu_head *rcu),
51 struct rcu_ctrlblk *rcp);
52
53static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
54
55#include "rcutiny_plugin.h"
56
57
58static void rcu_idle_enter_common(long long newval)
59{
60 if (newval) {
61 RCU_TRACE(trace_rcu_dyntick("--=",
62 rcu_dynticks_nesting, newval));
63 rcu_dynticks_nesting = newval;
64 return;
65 }
66 RCU_TRACE(trace_rcu_dyntick("Start", rcu_dynticks_nesting, newval));
67 if (!is_idle_task(current)) {
68 struct task_struct *idle = idle_task(smp_processor_id());
69
70 RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task",
71 rcu_dynticks_nesting, newval));
72 ftrace_dump(DUMP_ALL);
73 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
74 current->pid, current->comm,
75 idle->pid, idle->comm);
76 }
77 rcu_sched_qs(0);
78 barrier();
79 rcu_dynticks_nesting = newval;
80}
81
82
83
84
85
86void rcu_idle_enter(void)
87{
88 unsigned long flags;
89 long long newval;
90
91 local_irq_save(flags);
92 WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
93 if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) ==
94 DYNTICK_TASK_NEST_VALUE)
95 newval = 0;
96 else
97 newval = rcu_dynticks_nesting - DYNTICK_TASK_NEST_VALUE;
98 rcu_idle_enter_common(newval);
99 local_irq_restore(flags);
100}
101EXPORT_SYMBOL_GPL(rcu_idle_enter);
102
103
104
105
106void rcu_irq_exit(void)
107{
108 unsigned long flags;
109 long long newval;
110
111 local_irq_save(flags);
112 newval = rcu_dynticks_nesting - 1;
113 WARN_ON_ONCE(newval < 0);
114 rcu_idle_enter_common(newval);
115 local_irq_restore(flags);
116}
117EXPORT_SYMBOL_GPL(rcu_irq_exit);
118
119
120static void rcu_idle_exit_common(long long oldval)
121{
122 if (oldval) {
123 RCU_TRACE(trace_rcu_dyntick("++=",
124 oldval, rcu_dynticks_nesting));
125 return;
126 }
127 RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting));
128 if (!is_idle_task(current)) {
129 struct task_struct *idle = idle_task(smp_processor_id());
130
131 RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task",
132 oldval, rcu_dynticks_nesting));
133 ftrace_dump(DUMP_ALL);
134 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
135 current->pid, current->comm,
136 idle->pid, idle->comm);
137 }
138}
139
140
141
142
143void rcu_idle_exit(void)
144{
145 unsigned long flags;
146 long long oldval;
147
148 local_irq_save(flags);
149 oldval = rcu_dynticks_nesting;
150 WARN_ON_ONCE(rcu_dynticks_nesting < 0);
151 if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK)
152 rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
153 else
154 rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
155 rcu_idle_exit_common(oldval);
156 local_irq_restore(flags);
157}
158EXPORT_SYMBOL_GPL(rcu_idle_exit);
159
160
161
162
163void rcu_irq_enter(void)
164{
165 unsigned long flags;
166 long long oldval;
167
168 local_irq_save(flags);
169 oldval = rcu_dynticks_nesting;
170 rcu_dynticks_nesting++;
171 WARN_ON_ONCE(rcu_dynticks_nesting == 0);
172 rcu_idle_exit_common(oldval);
173 local_irq_restore(flags);
174}
175EXPORT_SYMBOL_GPL(rcu_irq_enter);
176
177#ifdef CONFIG_DEBUG_LOCK_ALLOC
178
179
180
181
182int rcu_is_cpu_idle(void)
183{
184 return !rcu_dynticks_nesting;
185}
186EXPORT_SYMBOL(rcu_is_cpu_idle);
187
188#endif
189
190
191
192
193
194
195static int rcu_is_cpu_rrupt_from_idle(void)
196{
197 return rcu_dynticks_nesting <= 1;
198}
199
200
201
202
203
204
205static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
206{
207 RCU_TRACE(reset_cpu_stall_ticks(rcp));
208 if (rcp->rcucblist != NULL &&
209 rcp->donetail != rcp->curtail) {
210 rcp->donetail = rcp->curtail;
211 return 1;
212 }
213
214 return 0;
215}
216
217
218
219
220
221
222void rcu_sched_qs(int cpu)
223{
224 unsigned long flags;
225
226 local_irq_save(flags);
227 if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
228 rcu_qsctr_help(&rcu_bh_ctrlblk))
229 raise_softirq(RCU_SOFTIRQ);
230 local_irq_restore(flags);
231}
232
233
234
235
236void rcu_bh_qs(int cpu)
237{
238 unsigned long flags;
239
240 local_irq_save(flags);
241 if (rcu_qsctr_help(&rcu_bh_ctrlblk))
242 raise_softirq(RCU_SOFTIRQ);
243 local_irq_restore(flags);
244}
245
246
247
248
249
250
251
252void rcu_check_callbacks(int cpu, int user)
253{
254 RCU_TRACE(check_cpu_stalls());
255 if (user || rcu_is_cpu_rrupt_from_idle())
256 rcu_sched_qs(cpu);
257 else if (!in_softirq())
258 rcu_bh_qs(cpu);
259}
260
261
262
263
264
265static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
266{
267 const char *rn = NULL;
268 struct rcu_head *next, *list;
269 unsigned long flags;
270 RCU_TRACE(int cb_count = 0);
271
272
273 if (&rcp->rcucblist == rcp->donetail) {
274 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, 0, -1));
275 RCU_TRACE(trace_rcu_batch_end(rcp->name, 0,
276 ACCESS_ONCE(rcp->rcucblist),
277 need_resched(),
278 is_idle_task(current),
279 false));
280 return;
281 }
282
283
284 local_irq_save(flags);
285 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1));
286 list = rcp->rcucblist;
287 rcp->rcucblist = *rcp->donetail;
288 *rcp->donetail = NULL;
289 if (rcp->curtail == rcp->donetail)
290 rcp->curtail = &rcp->rcucblist;
291 rcp->donetail = &rcp->rcucblist;
292 local_irq_restore(flags);
293
294
295 RCU_TRACE(rn = rcp->name);
296 while (list) {
297 next = list->next;
298 prefetch(next);
299 debug_rcu_head_unqueue(list);
300 local_bh_disable();
301 __rcu_reclaim(rn, list);
302 local_bh_enable();
303 list = next;
304 RCU_TRACE(cb_count++);
305 }
306 RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
307 RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count, 0, need_resched(),
308 is_idle_task(current),
309 false));
310}
311
312static void rcu_process_callbacks(struct softirq_action *unused)
313{
314 __rcu_process_callbacks(&rcu_sched_ctrlblk);
315 __rcu_process_callbacks(&rcu_bh_ctrlblk);
316}
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331void synchronize_sched(void)
332{
333 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
334 !lock_is_held(&rcu_lock_map) &&
335 !lock_is_held(&rcu_sched_lock_map),
336 "Illegal synchronize_sched() in RCU read-side critical section");
337 cond_resched();
338}
339EXPORT_SYMBOL_GPL(synchronize_sched);
340
341
342
343
344static void __call_rcu(struct rcu_head *head,
345 void (*func)(struct rcu_head *rcu),
346 struct rcu_ctrlblk *rcp)
347{
348 unsigned long flags;
349
350 debug_rcu_head_queue(head);
351 head->func = func;
352 head->next = NULL;
353
354 local_irq_save(flags);
355 *rcp->curtail = head;
356 rcp->curtail = &head->next;
357 RCU_TRACE(rcp->qlen++);
358 local_irq_restore(flags);
359}
360
361
362
363
364
365
366void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
367{
368 __call_rcu(head, func, &rcu_sched_ctrlblk);
369}
370EXPORT_SYMBOL_GPL(call_rcu_sched);
371
372
373
374
375
376void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
377{
378 __call_rcu(head, func, &rcu_bh_ctrlblk);
379}
380EXPORT_SYMBOL_GPL(call_rcu_bh);
381
382void rcu_init(void)
383{
384 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
385}
386