1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/cache.h>
26#include <linux/spinlock.h>
27#include <linux/threads.h>
28#include <linux/cpumask.h>
29#include <linux/seqlock.h>
30
31
32
33
34
35
36
37#define MAX_RCU_LVLS 4
38#if CONFIG_RCU_FANOUT > 16
39#define RCU_FANOUT_LEAF 16
40#else
41#define RCU_FANOUT_LEAF (CONFIG_RCU_FANOUT)
42#endif
43#define RCU_FANOUT_1 (RCU_FANOUT_LEAF)
44#define RCU_FANOUT_2 (RCU_FANOUT_1 * CONFIG_RCU_FANOUT)
45#define RCU_FANOUT_3 (RCU_FANOUT_2 * CONFIG_RCU_FANOUT)
46#define RCU_FANOUT_4 (RCU_FANOUT_3 * CONFIG_RCU_FANOUT)
47
48#if NR_CPUS <= RCU_FANOUT_1
49# define NUM_RCU_LVLS 1
50# define NUM_RCU_LVL_0 1
51# define NUM_RCU_LVL_1 (NR_CPUS)
52# define NUM_RCU_LVL_2 0
53# define NUM_RCU_LVL_3 0
54# define NUM_RCU_LVL_4 0
55#elif NR_CPUS <= RCU_FANOUT_2
56# define NUM_RCU_LVLS 2
57# define NUM_RCU_LVL_0 1
58# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
59# define NUM_RCU_LVL_2 (NR_CPUS)
60# define NUM_RCU_LVL_3 0
61# define NUM_RCU_LVL_4 0
62#elif NR_CPUS <= RCU_FANOUT_3
63# define NUM_RCU_LVLS 3
64# define NUM_RCU_LVL_0 1
65# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
66# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
67# define NUM_RCU_LVL_3 (NR_CPUS)
68# define NUM_RCU_LVL_4 0
69#elif NR_CPUS <= RCU_FANOUT_4
70# define NUM_RCU_LVLS 4
71# define NUM_RCU_LVL_0 1
72# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
73# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
74# define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
75# define NUM_RCU_LVL_4 (NR_CPUS)
76#else
77# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
78#endif
79
80#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4)
81#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
82
83
84
85
86struct rcu_dynticks {
87 int dynticks_nesting;
88 int dynticks_nmi_nesting;
89 atomic_t dynticks;
90};
91
92
93#define RCU_KTHREAD_STOPPED 0
94#define RCU_KTHREAD_RUNNING 1
95#define RCU_KTHREAD_WAITING 2
96#define RCU_KTHREAD_OFFCPU 3
97#define RCU_KTHREAD_YIELDING 4
98#define RCU_KTHREAD_MAX 4
99
100
101
102
103struct rcu_node {
104 raw_spinlock_t lock;
105
106 unsigned long gpnum;
107
108
109 unsigned long completed;
110
111
112 unsigned long qsmask;
113
114
115
116
117
118 unsigned long expmask;
119
120
121
122 atomic_t wakemask;
123
124
125 unsigned long qsmaskinit;
126
127 unsigned long grpmask;
128
129 int grplo;
130 int grphi;
131 u8 grpnum;
132 u8 level;
133 struct rcu_node *parent;
134 struct list_head blkd_tasks;
135
136
137
138 struct list_head *gp_tasks;
139
140
141
142 struct list_head *exp_tasks;
143
144
145
146
147
148#ifdef CONFIG_RCU_BOOST
149 struct list_head *boost_tasks;
150
151
152
153
154
155
156
157 unsigned long boost_time;
158
159 struct task_struct *boost_kthread_task;
160
161
162 unsigned int boost_kthread_status;
163
164 unsigned long n_tasks_boosted;
165
166 unsigned long n_exp_boosts;
167
168 unsigned long n_normal_boosts;
169
170 unsigned long n_balk_blkd_tasks;
171
172 unsigned long n_balk_exp_gp_tasks;
173
174 unsigned long n_balk_boost_tasks;
175
176 unsigned long n_balk_notblocked;
177
178 unsigned long n_balk_notyet;
179
180 unsigned long n_balk_nos;
181
182
183#endif
184 struct task_struct *node_kthread_task;
185
186
187
188 unsigned int node_kthread_status;
189
190} ____cacheline_internodealigned_in_smp;
191
192
193
194
195
196#define rcu_for_each_node_breadth_first(rsp, rnp) \
197 for ((rnp) = &(rsp)->node[0]; \
198 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
199
200
201
202
203
204
205#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
206 for ((rnp) = &(rsp)->node[0]; \
207 (rnp) < (rsp)->level[NUM_RCU_LVLS - 1]; (rnp)++)
208
209
210
211
212
213
214
215#define rcu_for_each_leaf_node(rsp, rnp) \
216 for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \
217 (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
218
219
220#define RCU_DONE_TAIL 0
221#define RCU_WAIT_TAIL 1
222#define RCU_NEXT_READY_TAIL 2
223#define RCU_NEXT_TAIL 3
224#define RCU_NEXT_SIZE 4
225
226
227struct rcu_data {
228
229 unsigned long completed;
230
231 unsigned long gpnum;
232
233 unsigned long passed_quiesce_gpnum;
234
235 bool passed_quiesce;
236 bool qs_pending;
237 bool beenonline;
238 bool preemptible;
239 struct rcu_node *mynode;
240 unsigned long grpmask;
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265 struct rcu_head *nxtlist;
266 struct rcu_head **nxttail[RCU_NEXT_SIZE];
267 long qlen;
268 long qlen_last_fqs_check;
269
270 unsigned long n_cbs_invoked;
271 unsigned long n_cbs_orphaned;
272 unsigned long n_cbs_adopted;
273 unsigned long n_force_qs_snap;
274
275 long blimit;
276
277#ifdef CONFIG_NO_HZ
278
279 struct rcu_dynticks *dynticks;
280 int dynticks_snap;
281#endif
282
283
284#ifdef CONFIG_NO_HZ
285 unsigned long dynticks_fqs;
286#endif
287 unsigned long offline_fqs;
288 unsigned long resched_ipi;
289
290
291 unsigned long n_rcu_pending;
292 unsigned long n_rp_qs_pending;
293 unsigned long n_rp_report_qs;
294 unsigned long n_rp_cb_ready;
295 unsigned long n_rp_cpu_needs_gp;
296 unsigned long n_rp_gp_completed;
297 unsigned long n_rp_gp_started;
298 unsigned long n_rp_need_fqs;
299 unsigned long n_rp_need_nothing;
300
301 int cpu;
302 struct rcu_state *rsp;
303};
304
305
306#define RCU_GP_IDLE 0
307#define RCU_GP_INIT 1
308#define RCU_SAVE_DYNTICK 2
309#define RCU_FORCE_QS 3
310#ifdef CONFIG_NO_HZ
311#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
312#else
313#define RCU_SIGNAL_INIT RCU_FORCE_QS
314#endif
315
316#define RCU_JIFFIES_TILL_FORCE_QS 3
317
318#ifdef CONFIG_PROVE_RCU
319#define RCU_STALL_DELAY_DELTA (5 * HZ)
320#else
321#define RCU_STALL_DELAY_DELTA 0
322#endif
323
324#define RCU_SECONDS_TILL_STALL_CHECK (CONFIG_RCU_CPU_STALL_TIMEOUT * HZ + \
325 RCU_STALL_DELAY_DELTA)
326
327#define RCU_SECONDS_TILL_STALL_RECHECK (3 * RCU_SECONDS_TILL_STALL_CHECK + 30)
328
329#define RCU_STALL_RAT_DELAY 2
330
331
332
333
334#define rcu_wait(cond) \
335do { \
336 for (;;) { \
337 set_current_state(TASK_INTERRUPTIBLE); \
338 if (cond) \
339 break; \
340 schedule(); \
341 } \
342 __set_current_state(TASK_RUNNING); \
343} while (0)
344
345
346
347
348
349
350
351
352
353
354
355struct rcu_state {
356 struct rcu_node node[NUM_RCU_NODES];
357 struct rcu_node *level[NUM_RCU_LVLS];
358 u32 levelcnt[MAX_RCU_LVLS + 1];
359 u8 levelspread[NUM_RCU_LVLS];
360 struct rcu_data __percpu *rda;
361
362
363
364 u8 signaled ____cacheline_internodealigned_in_smp;
365
366 u8 fqs_active;
367
368 u8 fqs_need_gp;
369
370
371
372
373 u8 boost;
374 unsigned long gpnum;
375 unsigned long completed;
376
377
378
379 raw_spinlock_t onofflock;
380
381 raw_spinlock_t fqslock;
382
383 unsigned long jiffies_force_qs;
384
385 unsigned long n_force_qs;
386
387 unsigned long n_force_qs_lh;
388
389 unsigned long n_force_qs_ngp;
390
391 unsigned long gp_start;
392
393 unsigned long jiffies_stall;
394
395 unsigned long gp_max;
396
397 char *name;
398};
399
400
401
402#define RCU_OFL_TASKS_NORM_GP 0x1
403
404#define RCU_OFL_TASKS_EXP_GP 0x2
405
406
407
408
409
410extern struct rcu_state rcu_sched_state;
411DECLARE_PER_CPU(struct rcu_data, rcu_sched_data);
412
413extern struct rcu_state rcu_bh_state;
414DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
415
416#ifdef CONFIG_TREE_PREEMPT_RCU
417extern struct rcu_state rcu_preempt_state;
418DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
419#endif
420
421#ifdef CONFIG_RCU_BOOST
422DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
423DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
424DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
425DECLARE_PER_CPU(char, rcu_cpu_has_work);
426#endif
427
428#ifndef RCU_TREE_NONCORE
429
430
431static void rcu_bootup_announce(void);
432long rcu_batches_completed(void);
433static void rcu_preempt_note_context_switch(int cpu);
434static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
435#ifdef CONFIG_HOTPLUG_CPU
436static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
437 unsigned long flags);
438static void rcu_stop_cpu_kthread(int cpu);
439#endif
440static void rcu_print_detail_task_stall(struct rcu_state *rsp);
441static int rcu_print_task_stall(struct rcu_node *rnp);
442static void rcu_preempt_stall_reset(void);
443static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
444#ifdef CONFIG_HOTPLUG_CPU
445static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
446 struct rcu_node *rnp,
447 struct rcu_data *rdp);
448static void rcu_preempt_offline_cpu(int cpu);
449#endif
450static void rcu_preempt_check_callbacks(int cpu);
451static void rcu_preempt_process_callbacks(void);
452void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
453#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU)
454static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp);
455#endif
456static int rcu_preempt_pending(int cpu);
457static int rcu_preempt_needs_cpu(int cpu);
458static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
459static void rcu_preempt_send_cbs_to_online(void);
460static void __init __rcu_init_preempt(void);
461static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
462static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
463static void invoke_rcu_callbacks_kthread(void);
464#ifdef CONFIG_RCU_BOOST
465static void rcu_preempt_do_callbacks(void);
466static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
467 cpumask_var_t cm);
468static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
469 struct rcu_node *rnp,
470 int rnp_index);
471static void invoke_rcu_node_kthread(struct rcu_node *rnp);
472static void rcu_yield(void (*f)(unsigned long), unsigned long arg);
473#endif
474static void rcu_cpu_kthread_setrt(int cpu, int to_rt);
475static void __cpuinit rcu_prepare_kthreads(int cpu);
476
477#endif
478