1
2
3
4
5
6
7
8#ifdef CONFIG_TASKS_RCU_GENERIC
9#include "rcu_segcblist.h"
10
11
12
13
14
15struct rcu_tasks;
16typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
17typedef void (*pregp_func_t)(void);
18typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
19typedef void (*postscan_func_t)(struct list_head *hop);
20typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
21typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
22
23
24
25
26
27
28
29
30
31
32
33
34
35struct rcu_tasks_percpu {
36 struct rcu_segcblist cblist;
37 raw_spinlock_t __private lock;
38 unsigned long rtp_jiffies;
39 unsigned long rtp_n_lock_retries;
40 struct work_struct rtp_work;
41 struct irq_work rtp_irq_work;
42 struct rcu_head barrier_q_head;
43 int cpu;
44 struct rcu_tasks *rtpp;
45};
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79struct rcu_tasks {
80 struct rcuwait cbs_wait;
81 raw_spinlock_t cbs_gbl_lock;
82 int gp_state;
83 int gp_sleep;
84 int init_fract;
85 unsigned long gp_jiffies;
86 unsigned long gp_start;
87 unsigned long tasks_gp_seq;
88 unsigned long n_ipis;
89 unsigned long n_ipis_fails;
90 struct task_struct *kthread_ptr;
91 rcu_tasks_gp_func_t gp_func;
92 pregp_func_t pregp_func;
93 pertask_func_t pertask_func;
94 postscan_func_t postscan_func;
95 holdouts_func_t holdouts_func;
96 postgp_func_t postgp_func;
97 call_rcu_func_t call_func;
98 struct rcu_tasks_percpu __percpu *rtpcpu;
99 int percpu_enqueue_shift;
100 int percpu_enqueue_lim;
101 int percpu_dequeue_lim;
102 unsigned long percpu_dequeue_gpseq;
103 struct mutex barrier_q_mutex;
104 atomic_t barrier_q_count;
105 struct completion barrier_q_completion;
106 unsigned long barrier_q_seq;
107 char *name;
108 char *kname;
109};
110
111static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp);
112
113#define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
114static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \
115 .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \
116 .rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup), \
117}; \
118static struct rcu_tasks rt_name = \
119{ \
120 .cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait), \
121 .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \
122 .gp_func = gp, \
123 .call_func = call, \
124 .rtpcpu = &rt_name ## __percpu, \
125 .name = n, \
126 .percpu_enqueue_shift = order_base_2(CONFIG_NR_CPUS), \
127 .percpu_enqueue_lim = 1, \
128 .percpu_dequeue_lim = 1, \
129 .barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \
130 .barrier_q_seq = (0UL - 50UL) << RCU_SEQ_CTR_SHIFT, \
131 .kname = #rt_name, \
132}
133
134
135DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
136
137
138#define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
139static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
140module_param(rcu_task_ipi_delay, int, 0644);
141
142
143#define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
144static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
145module_param(rcu_task_stall_timeout, int, 0644);
146#define RCU_TASK_STALL_INFO (HZ * 10)
147static int rcu_task_stall_info __read_mostly = RCU_TASK_STALL_INFO;
148module_param(rcu_task_stall_info, int, 0644);
149static int rcu_task_stall_info_mult __read_mostly = 3;
150module_param(rcu_task_stall_info_mult, int, 0444);
151
152static int rcu_task_enqueue_lim __read_mostly = -1;
153module_param(rcu_task_enqueue_lim, int, 0444);
154
155static bool rcu_task_cb_adjust;
156static int rcu_task_contend_lim __read_mostly = 100;
157module_param(rcu_task_contend_lim, int, 0444);
158static int rcu_task_collapse_lim __read_mostly = 10;
159module_param(rcu_task_collapse_lim, int, 0444);
160
161
162#define RTGS_INIT 0
163#define RTGS_WAIT_WAIT_CBS 1
164#define RTGS_WAIT_GP 2
165#define RTGS_PRE_WAIT_GP 3
166#define RTGS_SCAN_TASKLIST 4
167#define RTGS_POST_SCAN_TASKLIST 5
168#define RTGS_WAIT_SCAN_HOLDOUTS 6
169#define RTGS_SCAN_HOLDOUTS 7
170#define RTGS_POST_GP 8
171#define RTGS_WAIT_READERS 9
172#define RTGS_INVOKE_CBS 10
173#define RTGS_WAIT_CBS 11
174#ifndef CONFIG_TINY_RCU
175static const char * const rcu_tasks_gp_state_names[] = {
176 "RTGS_INIT",
177 "RTGS_WAIT_WAIT_CBS",
178 "RTGS_WAIT_GP",
179 "RTGS_PRE_WAIT_GP",
180 "RTGS_SCAN_TASKLIST",
181 "RTGS_POST_SCAN_TASKLIST",
182 "RTGS_WAIT_SCAN_HOLDOUTS",
183 "RTGS_SCAN_HOLDOUTS",
184 "RTGS_POST_GP",
185 "RTGS_WAIT_READERS",
186 "RTGS_INVOKE_CBS",
187 "RTGS_WAIT_CBS",
188};
189#endif
190
191
192
193
194
195static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp);
196
197
198static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
199{
200 rtp->gp_state = newstate;
201 rtp->gp_jiffies = jiffies;
202}
203
204#ifndef CONFIG_TINY_RCU
205
206static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
207{
208 int i = data_race(rtp->gp_state);
209 int j = READ_ONCE(i);
210
211 if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
212 return "???";
213 return rcu_tasks_gp_state_names[j];
214}
215#endif
216
217
218
219static void cblist_init_generic(struct rcu_tasks *rtp)
220{
221 int cpu;
222 unsigned long flags;
223 int lim;
224 int shift;
225
226 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
227 if (rcu_task_enqueue_lim < 0) {
228 rcu_task_enqueue_lim = 1;
229 rcu_task_cb_adjust = true;
230 pr_info("%s: Setting adjustable number of callback queues.\n", __func__);
231 } else if (rcu_task_enqueue_lim == 0) {
232 rcu_task_enqueue_lim = 1;
233 }
234 lim = rcu_task_enqueue_lim;
235
236 if (lim > nr_cpu_ids)
237 lim = nr_cpu_ids;
238 shift = ilog2(nr_cpu_ids / lim);
239 if (((nr_cpu_ids - 1) >> shift) >= lim)
240 shift++;
241 WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
242 WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
243 smp_store_release(&rtp->percpu_enqueue_lim, lim);
244 for_each_possible_cpu(cpu) {
245 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
246
247 WARN_ON_ONCE(!rtpcp);
248 if (cpu)
249 raw_spin_lock_init(&ACCESS_PRIVATE(rtpcp, lock));
250 raw_spin_lock_rcu_node(rtpcp);
251 if (rcu_segcblist_empty(&rtpcp->cblist))
252 rcu_segcblist_init(&rtpcp->cblist);
253 INIT_WORK(&rtpcp->rtp_work, rcu_tasks_invoke_cbs_wq);
254 rtpcp->cpu = cpu;
255 rtpcp->rtpp = rtp;
256 raw_spin_unlock_rcu_node(rtpcp);
257 }
258 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
259 pr_info("%s: Setting shift to %d and lim to %d.\n", __func__, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim));
260}
261
262
263static void call_rcu_tasks_iw_wakeup(struct irq_work *iwp)
264{
265 struct rcu_tasks *rtp;
266 struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work);
267
268 rtp = rtpcp->rtpp;
269 rcuwait_wake_up(&rtp->cbs_wait);
270}
271
272
273static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
274 struct rcu_tasks *rtp)
275{
276 int chosen_cpu;
277 unsigned long flags;
278 int ideal_cpu;
279 unsigned long j;
280 bool needadjust = false;
281 bool needwake;
282 struct rcu_tasks_percpu *rtpcp;
283
284 rhp->next = NULL;
285 rhp->func = func;
286 local_irq_save(flags);
287 rcu_read_lock();
288 ideal_cpu = smp_processor_id() >> READ_ONCE(rtp->percpu_enqueue_shift);
289 chosen_cpu = cpumask_next(ideal_cpu - 1, cpu_possible_mask);
290 rtpcp = per_cpu_ptr(rtp->rtpcpu, chosen_cpu);
291 if (!raw_spin_trylock_rcu_node(rtpcp)) {
292 raw_spin_lock_rcu_node(rtpcp);
293 j = jiffies;
294 if (rtpcp->rtp_jiffies != j) {
295 rtpcp->rtp_jiffies = j;
296 rtpcp->rtp_n_lock_retries = 0;
297 }
298 if (rcu_task_cb_adjust && ++rtpcp->rtp_n_lock_retries > rcu_task_contend_lim &&
299 READ_ONCE(rtp->percpu_enqueue_lim) != nr_cpu_ids)
300 needadjust = true;
301 }
302 if (!rcu_segcblist_is_enabled(&rtpcp->cblist)) {
303 raw_spin_unlock_rcu_node(rtpcp);
304 cblist_init_generic(rtp);
305 raw_spin_lock_rcu_node(rtpcp);
306 }
307 needwake = rcu_segcblist_empty(&rtpcp->cblist);
308 rcu_segcblist_enqueue(&rtpcp->cblist, rhp);
309 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
310 if (unlikely(needadjust)) {
311 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
312 if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
313 WRITE_ONCE(rtp->percpu_enqueue_shift, 0);
314 WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
315 smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
316 pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
317 }
318 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
319 }
320 rcu_read_unlock();
321
322 if (needwake && READ_ONCE(rtp->kthread_ptr))
323 irq_work_queue(&rtpcp->rtp_irq_work);
324}
325
326
327static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
328{
329
330 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
331 "synchronize_rcu_tasks called too soon");
332
333
334 wait_rcu_gp(rtp->call_func);
335}
336
337
338static void rcu_barrier_tasks_generic_cb(struct rcu_head *rhp)
339{
340 struct rcu_tasks *rtp;
341 struct rcu_tasks_percpu *rtpcp;
342
343 rtpcp = container_of(rhp, struct rcu_tasks_percpu, barrier_q_head);
344 rtp = rtpcp->rtpp;
345 if (atomic_dec_and_test(&rtp->barrier_q_count))
346 complete(&rtp->barrier_q_completion);
347}
348
349
350
351static void rcu_barrier_tasks_generic(struct rcu_tasks *rtp)
352{
353 int cpu;
354 unsigned long flags;
355 struct rcu_tasks_percpu *rtpcp;
356 unsigned long s = rcu_seq_snap(&rtp->barrier_q_seq);
357
358 mutex_lock(&rtp->barrier_q_mutex);
359 if (rcu_seq_done(&rtp->barrier_q_seq, s)) {
360 smp_mb();
361 mutex_unlock(&rtp->barrier_q_mutex);
362 return;
363 }
364 rcu_seq_start(&rtp->barrier_q_seq);
365 init_completion(&rtp->barrier_q_completion);
366 atomic_set(&rtp->barrier_q_count, 2);
367 for_each_possible_cpu(cpu) {
368 if (cpu >= smp_load_acquire(&rtp->percpu_dequeue_lim))
369 break;
370 rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
371 rtpcp->barrier_q_head.func = rcu_barrier_tasks_generic_cb;
372 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
373 if (rcu_segcblist_entrain(&rtpcp->cblist, &rtpcp->barrier_q_head))
374 atomic_inc(&rtp->barrier_q_count);
375 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
376 }
377 if (atomic_sub_and_test(2, &rtp->barrier_q_count))
378 complete(&rtp->barrier_q_completion);
379 wait_for_completion(&rtp->barrier_q_completion);
380 rcu_seq_end(&rtp->barrier_q_seq);
381 mutex_unlock(&rtp->barrier_q_mutex);
382}
383
384
385
386static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
387{
388 int cpu;
389 unsigned long flags;
390 long n;
391 long ncbs = 0;
392 long ncbsnz = 0;
393 int needgpcb = 0;
394
395 for (cpu = 0; cpu < smp_load_acquire(&rtp->percpu_dequeue_lim); cpu++) {
396 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
397
398
399 if (!rcu_segcblist_n_cbs(&rtpcp->cblist))
400 continue;
401 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
402
403 n = rcu_segcblist_n_cbs(&rtpcp->cblist);
404 if (n) {
405 ncbs += n;
406 if (cpu > 0)
407 ncbsnz += n;
408 }
409 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
410 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
411 if (rcu_segcblist_pend_cbs(&rtpcp->cblist))
412 needgpcb |= 0x3;
413 if (!rcu_segcblist_empty(&rtpcp->cblist))
414 needgpcb |= 0x1;
415 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
416 }
417
418
419
420
421
422
423
424
425 if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
426 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
427 if (rtp->percpu_enqueue_lim > 1) {
428 WRITE_ONCE(rtp->percpu_enqueue_shift, order_base_2(nr_cpu_ids));
429 smp_store_release(&rtp->percpu_enqueue_lim, 1);
430 rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
431 pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
432 }
433 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
434 }
435 if (rcu_task_cb_adjust && !ncbsnz &&
436 poll_state_synchronize_rcu(rtp->percpu_dequeue_gpseq)) {
437 raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
438 if (rtp->percpu_enqueue_lim < rtp->percpu_dequeue_lim) {
439 WRITE_ONCE(rtp->percpu_dequeue_lim, 1);
440 pr_info("Completing switch %s to CPU-0 callback queuing.\n", rtp->name);
441 }
442 raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags);
443 }
444
445 return needgpcb;
446}
447
448
449static void rcu_tasks_invoke_cbs(struct rcu_tasks *rtp, struct rcu_tasks_percpu *rtpcp)
450{
451 int cpu;
452 int cpunext;
453 unsigned long flags;
454 int len;
455 struct rcu_head *rhp;
456 struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
457 struct rcu_tasks_percpu *rtpcp_next;
458
459 cpu = rtpcp->cpu;
460 cpunext = cpu * 2 + 1;
461 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
462 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
463 queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work);
464 cpunext++;
465 if (cpunext < smp_load_acquire(&rtp->percpu_dequeue_lim)) {
466 rtpcp_next = per_cpu_ptr(rtp->rtpcpu, cpunext);
467 queue_work_on(cpunext, system_wq, &rtpcp_next->rtp_work);
468 }
469 }
470
471 if (rcu_segcblist_empty(&rtpcp->cblist) || !cpu_possible(cpu))
472 return;
473 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
474 rcu_segcblist_advance(&rtpcp->cblist, rcu_seq_current(&rtp->tasks_gp_seq));
475 rcu_segcblist_extract_done_cbs(&rtpcp->cblist, &rcl);
476 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
477 len = rcl.len;
478 for (rhp = rcu_cblist_dequeue(&rcl); rhp; rhp = rcu_cblist_dequeue(&rcl)) {
479 local_bh_disable();
480 rhp->func(rhp);
481 local_bh_enable();
482 cond_resched();
483 }
484 raw_spin_lock_irqsave_rcu_node(rtpcp, flags);
485 rcu_segcblist_add_len(&rtpcp->cblist, -len);
486 (void)rcu_segcblist_accelerate(&rtpcp->cblist, rcu_seq_snap(&rtp->tasks_gp_seq));
487 raw_spin_unlock_irqrestore_rcu_node(rtpcp, flags);
488}
489
490
491static void rcu_tasks_invoke_cbs_wq(struct work_struct *wp)
492{
493 struct rcu_tasks *rtp;
494 struct rcu_tasks_percpu *rtpcp = container_of(wp, struct rcu_tasks_percpu, rtp_work);
495
496 rtp = rtpcp->rtpp;
497 rcu_tasks_invoke_cbs(rtp, rtpcp);
498}
499
500
501static int __noreturn rcu_tasks_kthread(void *arg)
502{
503 int needgpcb;
504 struct rcu_tasks *rtp = arg;
505
506
507 housekeeping_affine(current, HK_TYPE_RCU);
508 WRITE_ONCE(rtp->kthread_ptr, current);
509
510
511
512
513
514
515
516 for (;;) {
517 set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
518
519
520 rcuwait_wait_event(&rtp->cbs_wait,
521 (needgpcb = rcu_tasks_need_gpcb(rtp)),
522 TASK_IDLE);
523
524 if (needgpcb & 0x2) {
525
526 set_tasks_gp_state(rtp, RTGS_WAIT_GP);
527 rtp->gp_start = jiffies;
528 rcu_seq_start(&rtp->tasks_gp_seq);
529 rtp->gp_func(rtp);
530 rcu_seq_end(&rtp->tasks_gp_seq);
531 }
532
533
534 set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
535 rcu_tasks_invoke_cbs(rtp, per_cpu_ptr(rtp->rtpcpu, 0));
536
537
538 schedule_timeout_idle(rtp->gp_sleep);
539 }
540}
541
542
543static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
544{
545 struct task_struct *t;
546
547 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
548 if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
549 return;
550 smp_mb();
551}
552
553#ifndef CONFIG_TINY_RCU
554
555
556
557
558static void __init rcu_tasks_bootup_oddness(void)
559{
560#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
561 int rtsimc;
562
563 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
564 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
565 rtsimc = clamp(rcu_task_stall_info_mult, 1, 10);
566 if (rtsimc != rcu_task_stall_info_mult) {
567 pr_info("\tTasks-RCU CPU stall info multiplier clamped to %d (rcu_task_stall_info_mult).\n", rtsimc);
568 rcu_task_stall_info_mult = rtsimc;
569 }
570#endif
571#ifdef CONFIG_TASKS_RCU
572 pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
573#endif
574#ifdef CONFIG_TASKS_RUDE_RCU
575 pr_info("\tRude variant of Tasks RCU enabled.\n");
576#endif
577#ifdef CONFIG_TASKS_TRACE_RCU
578 pr_info("\tTracing variant of Tasks RCU enabled.\n");
579#endif
580}
581
582#endif
583
584#ifndef CONFIG_TINY_RCU
585
586static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
587{
588 int cpu;
589 bool havecbs = false;
590
591 for_each_possible_cpu(cpu) {
592 struct rcu_tasks_percpu *rtpcp = per_cpu_ptr(rtp->rtpcpu, cpu);
593
594 if (!data_race(rcu_segcblist_empty(&rtpcp->cblist))) {
595 havecbs = true;
596 break;
597 }
598 }
599 pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n",
600 rtp->kname,
601 tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
602 jiffies - data_race(rtp->gp_jiffies),
603 data_race(rcu_seq_current(&rtp->tasks_gp_seq)),
604 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
605 ".k"[!!data_race(rtp->kthread_ptr)],
606 ".C"[havecbs],
607 s);
608}
609#endif
610
611static void exit_tasks_rcu_finish_trace(struct task_struct *t);
612
613#if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
614
615
616
617
618
619
620static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
621{
622 struct task_struct *g;
623 int fract;
624 LIST_HEAD(holdouts);
625 unsigned long j;
626 unsigned long lastinfo;
627 unsigned long lastreport;
628 bool reported = false;
629 int rtsi;
630 struct task_struct *t;
631
632 set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
633 rtp->pregp_func();
634
635
636
637
638
639
640
641 set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
642 rcu_read_lock();
643 for_each_process_thread(g, t)
644 rtp->pertask_func(t, &holdouts);
645 rcu_read_unlock();
646
647 set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
648 rtp->postscan_func(&holdouts);
649
650
651
652
653
654
655 lastreport = jiffies;
656 lastinfo = lastreport;
657 rtsi = READ_ONCE(rcu_task_stall_info);
658
659
660 fract = rtp->init_fract;
661
662 while (!list_empty(&holdouts)) {
663 ktime_t exp;
664 bool firstreport;
665 bool needreport;
666 int rtst;
667
668
669 set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
670 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
671 schedule_timeout_idle(fract);
672 } else {
673 exp = jiffies_to_nsecs(fract);
674 __set_current_state(TASK_IDLE);
675 schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), HRTIMER_MODE_REL_HARD);
676 }
677
678 if (fract < HZ)
679 fract++;
680
681 rtst = READ_ONCE(rcu_task_stall_timeout);
682 needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
683 if (needreport) {
684 lastreport = jiffies;
685 reported = true;
686 }
687 firstreport = true;
688 WARN_ON(signal_pending(current));
689 set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
690 rtp->holdouts_func(&holdouts, needreport, &firstreport);
691
692
693 j = jiffies;
694 if (rtsi > 0 && !reported && time_after(j, lastinfo + rtsi)) {
695 lastinfo = j;
696 rtsi = rtsi * rcu_task_stall_info_mult;
697 pr_info("%s: %s grace period %lu is %lu jiffies old.\n",
698 __func__, rtp->kname, rtp->tasks_gp_seq, j - rtp->gp_start);
699 }
700 }
701
702 set_tasks_gp_state(rtp, RTGS_POST_GP);
703 rtp->postgp_func(rtp);
704}
705
706#endif
707
708#ifdef CONFIG_TASKS_RCU
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763static void rcu_tasks_pregp_step(void)
764{
765
766
767
768
769
770
771
772
773
774
775
776
777
778 synchronize_rcu();
779}
780
781
782static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
783{
784 if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
785 get_task_struct(t);
786 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
787 WRITE_ONCE(t->rcu_tasks_holdout, true);
788 list_add(&t->rcu_tasks_holdout_list, hop);
789 }
790}
791
792
793static void rcu_tasks_postscan(struct list_head *hop)
794{
795
796
797
798
799
800
801
802 synchronize_srcu(&tasks_rcu_exit_srcu);
803}
804
805
806static void check_holdout_task(struct task_struct *t,
807 bool needreport, bool *firstreport)
808{
809 int cpu;
810
811 if (!READ_ONCE(t->rcu_tasks_holdout) ||
812 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
813 !READ_ONCE(t->on_rq) ||
814 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
815 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
816 WRITE_ONCE(t->rcu_tasks_holdout, false);
817 list_del_init(&t->rcu_tasks_holdout_list);
818 put_task_struct(t);
819 return;
820 }
821 rcu_request_urgent_qs_task(t);
822 if (!needreport)
823 return;
824 if (*firstreport) {
825 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
826 *firstreport = false;
827 }
828 cpu = task_cpu(t);
829 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
830 t, ".I"[is_idle_task(t)],
831 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
832 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
833 t->rcu_tasks_idle_cpu, cpu);
834 sched_show_task(t);
835}
836
837
838static void check_all_holdout_tasks(struct list_head *hop,
839 bool needreport, bool *firstreport)
840{
841 struct task_struct *t, *t1;
842
843 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
844 check_holdout_task(t, needreport, firstreport);
845 cond_resched();
846 }
847}
848
849
850static void rcu_tasks_postgp(struct rcu_tasks *rtp)
851{
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869 synchronize_rcu();
870}
871
872void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
873DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
894{
895 call_rcu_tasks_generic(rhp, func, &rcu_tasks);
896}
897EXPORT_SYMBOL_GPL(call_rcu_tasks);
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917void synchronize_rcu_tasks(void)
918{
919 synchronize_rcu_tasks_generic(&rcu_tasks);
920}
921EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
922
923
924
925
926
927
928
929void rcu_barrier_tasks(void)
930{
931 rcu_barrier_tasks_generic(&rcu_tasks);
932}
933EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
934
935static int __init rcu_spawn_tasks_kthread(void)
936{
937 cblist_init_generic(&rcu_tasks);
938 rcu_tasks.gp_sleep = HZ / 10;
939 rcu_tasks.init_fract = HZ / 10;
940 rcu_tasks.pregp_func = rcu_tasks_pregp_step;
941 rcu_tasks.pertask_func = rcu_tasks_pertask;
942 rcu_tasks.postscan_func = rcu_tasks_postscan;
943 rcu_tasks.holdouts_func = check_all_holdout_tasks;
944 rcu_tasks.postgp_func = rcu_tasks_postgp;
945 rcu_spawn_tasks_kthread_generic(&rcu_tasks);
946 return 0;
947}
948
949#if !defined(CONFIG_TINY_RCU)
950void show_rcu_tasks_classic_gp_kthread(void)
951{
952 show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
953}
954EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
955#endif
956
957
958void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
959{
960 preempt_disable();
961 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
962 preempt_enable();
963}
964
965
966void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
967{
968 struct task_struct *t = current;
969
970 preempt_disable();
971 __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
972 preempt_enable();
973 exit_tasks_rcu_finish_trace(t);
974}
975
976#else
977void exit_tasks_rcu_start(void) { }
978void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
979#endif
980
981#ifdef CONFIG_TASKS_RUDE_RCU
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998static void rcu_tasks_be_rude(struct work_struct *work)
999{
1000}
1001
1002
1003static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
1004{
1005 if (num_online_cpus() <= 1)
1006 return;
1007
1008 rtp->n_ipis += cpumask_weight(cpu_online_mask);
1009 schedule_on_each_cpu(rcu_tasks_be_rude);
1010}
1011
1012void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
1013DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
1014 "RCU Tasks Rude");
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
1035{
1036 call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
1037}
1038EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058void synchronize_rcu_tasks_rude(void)
1059{
1060 synchronize_rcu_tasks_generic(&rcu_tasks_rude);
1061}
1062EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
1063
1064
1065
1066
1067
1068
1069
1070void rcu_barrier_tasks_rude(void)
1071{
1072 rcu_barrier_tasks_generic(&rcu_tasks_rude);
1073}
1074EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
1075
1076static int __init rcu_spawn_tasks_rude_kthread(void)
1077{
1078 cblist_init_generic(&rcu_tasks_rude);
1079 rcu_tasks_rude.gp_sleep = HZ / 10;
1080 rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
1081 return 0;
1082}
1083
1084#if !defined(CONFIG_TINY_RCU)
1085void show_rcu_tasks_rude_gp_kthread(void)
1086{
1087 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
1088}
1089EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
1090#endif
1091#endif
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159#ifdef CONFIG_DEBUG_LOCK_ALLOC
1160static struct lock_class_key rcu_lock_trace_key;
1161struct lockdep_map rcu_trace_lock_map =
1162 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
1163EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
1164#endif
1165
1166#ifdef CONFIG_TASKS_TRACE_RCU
1167
1168static atomic_t trc_n_readers_need_end;
1169static DECLARE_WAIT_QUEUE_HEAD(trc_wait);
1170
1171
1172static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
1173
1174
1175
1176static unsigned long n_heavy_reader_attempts;
1177static unsigned long n_heavy_reader_updates;
1178static unsigned long n_heavy_reader_ofl_updates;
1179
1180void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
1181DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
1182 "RCU Tasks Trace");
1183
1184
1185
1186
1187
1188static void rcu_read_unlock_iw(struct irq_work *iwp)
1189{
1190 wake_up(&trc_wait);
1191}
1192static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw);
1193
1194
1195void rcu_read_unlock_trace_special(struct task_struct *t)
1196{
1197 int nq = READ_ONCE(t->trc_reader_special.b.need_qs);
1198
1199 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
1200 t->trc_reader_special.b.need_mb)
1201 smp_mb();
1202
1203 if (nq)
1204 WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
1205 WRITE_ONCE(t->trc_reader_nesting, 0);
1206 if (nq && atomic_dec_and_test(&trc_n_readers_need_end))
1207 irq_work_queue(&rcu_tasks_trace_iw);
1208}
1209EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
1210
1211
1212static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
1213{
1214 if (list_empty(&t->trc_holdout_list)) {
1215 get_task_struct(t);
1216 list_add(&t->trc_holdout_list, bhp);
1217 }
1218}
1219
1220
1221static void trc_del_holdout(struct task_struct *t)
1222{
1223 if (!list_empty(&t->trc_holdout_list)) {
1224 list_del_init(&t->trc_holdout_list);
1225 put_task_struct(t);
1226 }
1227}
1228
1229
1230static void trc_read_check_handler(void *t_in)
1231{
1232 struct task_struct *t = current;
1233 struct task_struct *texp = t_in;
1234
1235
1236 if (unlikely(texp != t)) {
1237 goto reset_ipi;
1238 }
1239
1240
1241
1242 if (likely(!READ_ONCE(t->trc_reader_nesting))) {
1243 WRITE_ONCE(t->trc_reader_checked, true);
1244 goto reset_ipi;
1245 }
1246
1247 if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0))
1248 goto reset_ipi;
1249 WRITE_ONCE(t->trc_reader_checked, true);
1250
1251
1252
1253
1254 atomic_inc(&trc_n_readers_need_end);
1255 WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
1256 WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
1257
1258reset_ipi:
1259
1260
1261
1262 smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false);
1263 smp_store_release(&texp->trc_ipi_to_cpu, -1);
1264}
1265
1266
1267static int trc_inspect_reader(struct task_struct *t, void *arg)
1268{
1269 int cpu = task_cpu(t);
1270 int nesting;
1271 bool ofl = cpu_is_offline(cpu);
1272
1273 if (task_curr(t)) {
1274 WARN_ON_ONCE(ofl && !is_idle_task(t));
1275
1276
1277 if (!ofl && !IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
1278 return -EINVAL;
1279
1280
1281
1282
1283 n_heavy_reader_attempts++;
1284 if (!ofl &&
1285 !rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
1286 return -EINVAL;
1287 n_heavy_reader_updates++;
1288 if (ofl)
1289 n_heavy_reader_ofl_updates++;
1290 nesting = 0;
1291 } else {
1292
1293 nesting = t->trc_reader_nesting;
1294 }
1295
1296
1297
1298
1299 t->trc_reader_checked = nesting >= 0;
1300 if (nesting <= 0)
1301 return nesting ? -EINVAL : 0;
1302
1303
1304
1305
1306 atomic_inc(&trc_n_readers_need_end);
1307 WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
1308 WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
1309 return 0;
1310}
1311
1312
1313static void trc_wait_for_one_reader(struct task_struct *t,
1314 struct list_head *bhp)
1315{
1316 int cpu;
1317
1318
1319 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1)
1320 return;
1321
1322
1323 if (t == current) {
1324 t->trc_reader_checked = true;
1325 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1326 return;
1327 }
1328
1329
1330 get_task_struct(t);
1331 if (!task_call_func(t, trc_inspect_reader, NULL)) {
1332 put_task_struct(t);
1333 return;
1334 }
1335 put_task_struct(t);
1336
1337
1338
1339
1340
1341
1342
1343
1344 trc_add_holdout(t, bhp);
1345 if (task_curr(t) &&
1346 time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
1347
1348 cpu = task_cpu(t);
1349
1350
1351 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
1352 return;
1353
1354 per_cpu(trc_ipi_to_cpu, cpu) = true;
1355 t->trc_ipi_to_cpu = cpu;
1356 rcu_tasks_trace.n_ipis++;
1357 if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
1358
1359
1360 WARN_ONCE(1, "%s(): smp_call_function_single() failed for CPU: %d\n",
1361 __func__, cpu);
1362 rcu_tasks_trace.n_ipis_fails++;
1363 per_cpu(trc_ipi_to_cpu, cpu) = false;
1364 t->trc_ipi_to_cpu = -1;
1365 }
1366 }
1367}
1368
1369
1370static void rcu_tasks_trace_pregp_step(void)
1371{
1372 int cpu;
1373
1374
1375 atomic_set(&trc_n_readers_need_end, 1);
1376
1377
1378 for_each_possible_cpu(cpu)
1379 WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
1380
1381
1382
1383 cpus_read_lock();
1384}
1385
1386
1387static void rcu_tasks_trace_pertask(struct task_struct *t,
1388 struct list_head *hop)
1389{
1390
1391
1392 if (unlikely(t == NULL))
1393 return;
1394
1395 WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
1396 WRITE_ONCE(t->trc_reader_checked, false);
1397 t->trc_ipi_to_cpu = -1;
1398 trc_wait_for_one_reader(t, hop);
1399}
1400
1401
1402
1403
1404
1405static void rcu_tasks_trace_postscan(struct list_head *hop)
1406{
1407 int cpu;
1408
1409 for_each_possible_cpu(cpu)
1410 rcu_tasks_trace_pertask(idle_task(cpu), hop);
1411
1412
1413 cpus_read_unlock();
1414
1415
1416
1417 synchronize_rcu();
1418
1419}
1420
1421
1422struct trc_stall_chk_rdr {
1423 int nesting;
1424 int ipi_to_cpu;
1425 u8 needqs;
1426};
1427
1428static int trc_check_slow_task(struct task_struct *t, void *arg)
1429{
1430 struct trc_stall_chk_rdr *trc_rdrp = arg;
1431
1432 if (task_curr(t))
1433 return false;
1434 trc_rdrp->nesting = READ_ONCE(t->trc_reader_nesting);
1435 trc_rdrp->ipi_to_cpu = READ_ONCE(t->trc_ipi_to_cpu);
1436 trc_rdrp->needqs = READ_ONCE(t->trc_reader_special.b.need_qs);
1437 return true;
1438}
1439
1440
1441static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
1442{
1443 int cpu;
1444 struct trc_stall_chk_rdr trc_rdr;
1445 bool is_idle_tsk = is_idle_task(t);
1446
1447 if (*firstreport) {
1448 pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1449 *firstreport = false;
1450 }
1451 cpu = task_cpu(t);
1452 if (!task_call_func(t, trc_check_slow_task, &trc_rdr))
1453 pr_alert("P%d: %c\n",
1454 t->pid,
1455 ".i"[is_idle_tsk]);
1456 else
1457 pr_alert("P%d: %c%c%c nesting: %d%c cpu: %d\n",
1458 t->pid,
1459 ".I"[trc_rdr.ipi_to_cpu >= 0],
1460 ".i"[is_idle_tsk],
1461 ".N"[cpu >= 0 && tick_nohz_full_cpu(cpu)],
1462 trc_rdr.nesting,
1463 " N"[!!trc_rdr.needqs],
1464 cpu);
1465 sched_show_task(t);
1466}
1467
1468
1469static void show_stalled_ipi_trace(void)
1470{
1471 int cpu;
1472
1473 for_each_possible_cpu(cpu)
1474 if (per_cpu(trc_ipi_to_cpu, cpu))
1475 pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1476}
1477
1478
1479static void check_all_holdout_tasks_trace(struct list_head *hop,
1480 bool needreport, bool *firstreport)
1481{
1482 struct task_struct *g, *t;
1483
1484
1485 cpus_read_lock();
1486
1487 list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1488
1489 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1490 !READ_ONCE(t->trc_reader_checked))
1491 trc_wait_for_one_reader(t, hop);
1492
1493
1494 if (smp_load_acquire(&t->trc_ipi_to_cpu) == -1 &&
1495 READ_ONCE(t->trc_reader_checked))
1496 trc_del_holdout(t);
1497 else if (needreport)
1498 show_stalled_task_trace(t, firstreport);
1499 }
1500
1501
1502 cpus_read_unlock();
1503
1504 if (needreport) {
1505 if (*firstreport)
1506 pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1507 show_stalled_ipi_trace();
1508 }
1509}
1510
1511static void rcu_tasks_trace_empty_fn(void *unused)
1512{
1513}
1514
1515
1516static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
1517{
1518 int cpu;
1519 bool firstreport;
1520 struct task_struct *g, *t;
1521 LIST_HEAD(holdouts);
1522 long ret;
1523
1524
1525
1526
1527
1528
1529 for_each_online_cpu(cpu)
1530 if (WARN_ON_ONCE(smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu))))
1531 smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
1532
1533
1534 smp_mb__before_atomic();
1535 atomic_dec(&trc_n_readers_need_end);
1536 smp_mb__after_atomic();
1537
1538
1539 set_tasks_gp_state(rtp, RTGS_WAIT_READERS);
1540 for (;;) {
1541 ret = wait_event_idle_exclusive_timeout(
1542 trc_wait,
1543 atomic_read(&trc_n_readers_need_end) == 0,
1544 READ_ONCE(rcu_task_stall_timeout));
1545 if (ret)
1546 break;
1547
1548 rcu_read_lock();
1549 for_each_process_thread(g, t)
1550 if (READ_ONCE(t->trc_reader_special.b.need_qs))
1551 trc_add_holdout(t, &holdouts);
1552 rcu_read_unlock();
1553 firstreport = true;
1554 list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) {
1555 if (READ_ONCE(t->trc_reader_special.b.need_qs))
1556 show_stalled_task_trace(t, &firstreport);
1557 trc_del_holdout(t);
1558 }
1559 if (firstreport)
1560 pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n");
1561 show_stalled_ipi_trace();
1562 pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end));
1563 }
1564 smp_mb();
1565
1566}
1567
1568
1569static void exit_tasks_rcu_finish_trace(struct task_struct *t)
1570{
1571 WRITE_ONCE(t->trc_reader_checked, true);
1572 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1573 WRITE_ONCE(t->trc_reader_nesting, 0);
1574 if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
1575 rcu_read_unlock_trace_special(t);
1576}
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1593{
1594 call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1595}
1596EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615void synchronize_rcu_tasks_trace(void)
1616{
1617 RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1618 synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1619}
1620EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1621
1622
1623
1624
1625
1626
1627
1628void rcu_barrier_tasks_trace(void)
1629{
1630 rcu_barrier_tasks_generic(&rcu_tasks_trace);
1631}
1632EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1633
1634static int __init rcu_spawn_tasks_trace_kthread(void)
1635{
1636 cblist_init_generic(&rcu_tasks_trace);
1637 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
1638 rcu_tasks_trace.gp_sleep = HZ / 10;
1639 rcu_tasks_trace.init_fract = HZ / 10;
1640 } else {
1641 rcu_tasks_trace.gp_sleep = HZ / 200;
1642 if (rcu_tasks_trace.gp_sleep <= 0)
1643 rcu_tasks_trace.gp_sleep = 1;
1644 rcu_tasks_trace.init_fract = HZ / 200;
1645 if (rcu_tasks_trace.init_fract <= 0)
1646 rcu_tasks_trace.init_fract = 1;
1647 }
1648 rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
1649 rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask;
1650 rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1651 rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1652 rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1653 rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1654 return 0;
1655}
1656
1657#if !defined(CONFIG_TINY_RCU)
1658void show_rcu_tasks_trace_gp_kthread(void)
1659{
1660 char buf[64];
1661
1662 sprintf(buf, "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end),
1663 data_race(n_heavy_reader_ofl_updates),
1664 data_race(n_heavy_reader_updates),
1665 data_race(n_heavy_reader_attempts));
1666 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
1667}
1668EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
1669#endif
1670
1671#else
1672static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
1673#endif
1674
1675#ifndef CONFIG_TINY_RCU
1676void show_rcu_tasks_gp_kthreads(void)
1677{
1678 show_rcu_tasks_classic_gp_kthread();
1679 show_rcu_tasks_rude_gp_kthread();
1680 show_rcu_tasks_trace_gp_kthread();
1681}
1682#endif
1683
1684#ifdef CONFIG_PROVE_RCU
1685struct rcu_tasks_test_desc {
1686 struct rcu_head rh;
1687 const char *name;
1688 bool notrun;
1689};
1690
1691static struct rcu_tasks_test_desc tests[] = {
1692 {
1693 .name = "call_rcu_tasks()",
1694
1695 .notrun = !IS_ENABLED(CONFIG_TASKS_RCU),
1696 },
1697 {
1698 .name = "call_rcu_tasks_rude()",
1699
1700 .notrun = !IS_ENABLED(CONFIG_TASKS_RUDE_RCU),
1701 },
1702 {
1703 .name = "call_rcu_tasks_trace()",
1704
1705 .notrun = !IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
1706 }
1707};
1708
1709static void test_rcu_tasks_callback(struct rcu_head *rhp)
1710{
1711 struct rcu_tasks_test_desc *rttd =
1712 container_of(rhp, struct rcu_tasks_test_desc, rh);
1713
1714 pr_info("Callback from %s invoked.\n", rttd->name);
1715
1716 rttd->notrun = true;
1717}
1718
1719static void rcu_tasks_initiate_self_tests(void)
1720{
1721 pr_info("Running RCU-tasks wait API self tests\n");
1722#ifdef CONFIG_TASKS_RCU
1723 synchronize_rcu_tasks();
1724 call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
1725#endif
1726
1727#ifdef CONFIG_TASKS_RUDE_RCU
1728 synchronize_rcu_tasks_rude();
1729 call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
1730#endif
1731
1732#ifdef CONFIG_TASKS_TRACE_RCU
1733 synchronize_rcu_tasks_trace();
1734 call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
1735#endif
1736}
1737
1738static int rcu_tasks_verify_self_tests(void)
1739{
1740 int ret = 0;
1741 int i;
1742
1743 for (i = 0; i < ARRAY_SIZE(tests); i++) {
1744 if (!tests[i].notrun) {
1745 pr_err("%s has been failed.\n", tests[i].name);
1746 ret = -1;
1747 }
1748 }
1749
1750 if (ret)
1751 WARN_ON(1);
1752
1753 return ret;
1754}
1755late_initcall(rcu_tasks_verify_self_tests);
1756#else
1757static void rcu_tasks_initiate_self_tests(void) { }
1758#endif
1759
1760void __init rcu_init_tasks_generic(void)
1761{
1762#ifdef CONFIG_TASKS_RCU
1763 rcu_spawn_tasks_kthread();
1764#endif
1765
1766#ifdef CONFIG_TASKS_RUDE_RCU
1767 rcu_spawn_tasks_rude_kthread();
1768#endif
1769
1770#ifdef CONFIG_TASKS_TRACE_RCU
1771 rcu_spawn_tasks_trace_kthread();
1772#endif
1773
1774
1775 rcu_tasks_initiate_self_tests();
1776}
1777
1778#else
1779static inline void rcu_tasks_bootup_oddness(void) {}
1780#endif
1781