1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#ifdef CONFIG_RCU_NOCB_CPU
17static cpumask_var_t rcu_nocb_mask;
18static bool __read_mostly rcu_nocb_poll;
19static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
20{
21 return lockdep_is_held(&rdp->nocb_lock);
22}
23
24static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
25{
26
27 if (!rdp->nocb_cb_kthread || !rdp->nocb_gp_kthread)
28 return true;
29
30 if (current == rdp->nocb_cb_kthread || current == rdp->nocb_gp_kthread)
31 if (in_task())
32 return true;
33 return false;
34}
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63static int __init rcu_nocb_setup(char *str)
64{
65 alloc_bootmem_cpumask_var(&rcu_nocb_mask);
66 if (cpulist_parse(str, rcu_nocb_mask)) {
67 pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
68 cpumask_setall(rcu_nocb_mask);
69 }
70 return 1;
71}
72__setup("rcu_nocbs=", rcu_nocb_setup);
73
74static int __init parse_rcu_nocb_poll(char *arg)
75{
76 rcu_nocb_poll = true;
77 return 0;
78}
79early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
80
81
82
83
84
85
86static int nocb_nobypass_lim_per_jiffy = 16 * 1000 / HZ;
87module_param(nocb_nobypass_lim_per_jiffy, int, 0);
88
89
90
91
92
93
94static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
95 __acquires(&rdp->nocb_bypass_lock)
96{
97 lockdep_assert_irqs_disabled();
98 if (raw_spin_trylock(&rdp->nocb_bypass_lock))
99 return;
100 atomic_inc(&rdp->nocb_lock_contended);
101 WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
102 smp_mb__after_atomic();
103 raw_spin_lock(&rdp->nocb_bypass_lock);
104 smp_mb__before_atomic();
105 atomic_dec(&rdp->nocb_lock_contended);
106}
107
108
109
110
111
112
113
114
115
116
117
118static void rcu_nocb_wait_contended(struct rcu_data *rdp)
119{
120 WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
121 while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended)))
122 cpu_relax();
123}
124
125
126
127
128
129static bool rcu_nocb_bypass_trylock(struct rcu_data *rdp)
130{
131 lockdep_assert_irqs_disabled();
132 return raw_spin_trylock(&rdp->nocb_bypass_lock);
133}
134
135
136
137
138static void rcu_nocb_bypass_unlock(struct rcu_data *rdp)
139 __releases(&rdp->nocb_bypass_lock)
140{
141 lockdep_assert_irqs_disabled();
142 raw_spin_unlock(&rdp->nocb_bypass_lock);
143}
144
145
146
147
148
149static void rcu_nocb_lock(struct rcu_data *rdp)
150{
151 lockdep_assert_irqs_disabled();
152 if (!rcu_rdp_is_offloaded(rdp))
153 return;
154 raw_spin_lock(&rdp->nocb_lock);
155}
156
157
158
159
160
161static void rcu_nocb_unlock(struct rcu_data *rdp)
162{
163 if (rcu_rdp_is_offloaded(rdp)) {
164 lockdep_assert_irqs_disabled();
165 raw_spin_unlock(&rdp->nocb_lock);
166 }
167}
168
169
170
171
172
173static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
174 unsigned long flags)
175{
176 if (rcu_rdp_is_offloaded(rdp)) {
177 lockdep_assert_irqs_disabled();
178 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
179 } else {
180 local_irq_restore(flags);
181 }
182}
183
184
185static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
186{
187 lockdep_assert_irqs_disabled();
188 if (rcu_rdp_is_offloaded(rdp))
189 lockdep_assert_held(&rdp->nocb_lock);
190}
191
192
193
194
195
196static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
197{
198 swake_up_all(sq);
199}
200
201static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
202{
203 return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1];
204}
205
206static void rcu_init_one_nocb(struct rcu_node *rnp)
207{
208 init_swait_queue_head(&rnp->nocb_gp_wq[0]);
209 init_swait_queue_head(&rnp->nocb_gp_wq[1]);
210}
211
212
213bool rcu_is_nocb_cpu(int cpu)
214{
215 if (cpumask_available(rcu_nocb_mask))
216 return cpumask_test_cpu(cpu, rcu_nocb_mask);
217 return false;
218}
219
220static bool __wake_nocb_gp(struct rcu_data *rdp_gp,
221 struct rcu_data *rdp,
222 bool force, unsigned long flags)
223 __releases(rdp_gp->nocb_gp_lock)
224{
225 bool needwake = false;
226
227 if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) {
228 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
229 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
230 TPS("AlreadyAwake"));
231 return false;
232 }
233
234 if (rdp_gp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
235 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
236 del_timer(&rdp_gp->nocb_timer);
237 }
238
239 if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
240 WRITE_ONCE(rdp_gp->nocb_gp_sleep, false);
241 needwake = true;
242 }
243 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
244 if (needwake) {
245 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
246 wake_up_process(rdp_gp->nocb_gp_kthread);
247 }
248
249 return needwake;
250}
251
252
253
254
255static bool wake_nocb_gp(struct rcu_data *rdp, bool force)
256{
257 unsigned long flags;
258 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
259
260 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
261 return __wake_nocb_gp(rdp_gp, rdp, force, flags);
262}
263
264
265
266
267
268static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
269 const char *reason)
270{
271 unsigned long flags;
272 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
273
274 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
275
276
277
278
279
280 if (waketype == RCU_NOCB_WAKE_BYPASS) {
281 mod_timer(&rdp_gp->nocb_timer, jiffies + 2);
282 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
283 } else {
284 if (rdp_gp->nocb_defer_wakeup < RCU_NOCB_WAKE)
285 mod_timer(&rdp_gp->nocb_timer, jiffies + 1);
286 if (rdp_gp->nocb_defer_wakeup < waketype)
287 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
288 }
289
290 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
291
292 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason);
293}
294
295
296
297
298
299
300
301
302
303static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
304 unsigned long j)
305{
306 struct rcu_cblist rcl;
307
308 WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
309 rcu_lockdep_assert_cblist_protected(rdp);
310 lockdep_assert_held(&rdp->nocb_bypass_lock);
311 if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) {
312 raw_spin_unlock(&rdp->nocb_bypass_lock);
313 return false;
314 }
315
316 if (rhp)
317 rcu_segcblist_inc_len(&rdp->cblist);
318 rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
319 rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl);
320 WRITE_ONCE(rdp->nocb_bypass_first, j);
321 rcu_nocb_bypass_unlock(rdp);
322 return true;
323}
324
325
326
327
328
329
330
331
332
333static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
334 unsigned long j)
335{
336 if (!rcu_rdp_is_offloaded(rdp))
337 return true;
338 rcu_lockdep_assert_cblist_protected(rdp);
339 rcu_nocb_bypass_lock(rdp);
340 return rcu_nocb_do_flush_bypass(rdp, rhp, j);
341}
342
343
344
345
346
347static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j)
348{
349 rcu_lockdep_assert_cblist_protected(rdp);
350 if (!rcu_rdp_is_offloaded(rdp) ||
351 !rcu_nocb_bypass_trylock(rdp))
352 return;
353 WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j));
354}
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
375 bool *was_alldone, unsigned long flags)
376{
377 unsigned long c;
378 unsigned long cur_gp_seq;
379 unsigned long j = jiffies;
380 long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
381
382 lockdep_assert_irqs_disabled();
383
384
385
386 if (!rcu_rdp_is_offloaded(rdp)) {
387 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
388 return false;
389 }
390
391
392
393 if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
394 rcu_nocb_lock(rdp);
395 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
396 return false;
397 }
398
399
400 if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
401 rcu_nocb_lock(rdp);
402 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
403 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
404 return false;
405 }
406
407
408
409 if (j == rdp->nocb_nobypass_last) {
410 c = rdp->nocb_nobypass_count + 1;
411 } else {
412 WRITE_ONCE(rdp->nocb_nobypass_last, j);
413 c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy;
414 if (ULONG_CMP_LT(rdp->nocb_nobypass_count,
415 nocb_nobypass_lim_per_jiffy))
416 c = 0;
417 else if (c > nocb_nobypass_lim_per_jiffy)
418 c = nocb_nobypass_lim_per_jiffy;
419 }
420 WRITE_ONCE(rdp->nocb_nobypass_count, c);
421
422
423
424
425 if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy) {
426 rcu_nocb_lock(rdp);
427 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
428 if (*was_alldone)
429 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
430 TPS("FirstQ"));
431 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, j));
432 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
433 return false;
434 }
435
436
437
438 if ((ncbs && j != READ_ONCE(rdp->nocb_bypass_first)) ||
439 ncbs >= qhimark) {
440 rcu_nocb_lock(rdp);
441 if (!rcu_nocb_flush_bypass(rdp, rhp, j)) {
442 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
443 if (*was_alldone)
444 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
445 TPS("FirstQ"));
446 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
447 return false;
448 }
449 if (j != rdp->nocb_gp_adv_time &&
450 rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
451 rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
452 rcu_advance_cbs_nowake(rdp->mynode, rdp);
453 rdp->nocb_gp_adv_time = j;
454 }
455 rcu_nocb_unlock_irqrestore(rdp, flags);
456 return true;
457 }
458
459
460 rcu_nocb_wait_contended(rdp);
461 rcu_nocb_bypass_lock(rdp);
462 ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
463 rcu_segcblist_inc_len(&rdp->cblist);
464 rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
465 if (!ncbs) {
466 WRITE_ONCE(rdp->nocb_bypass_first, j);
467 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ"));
468 }
469 rcu_nocb_bypass_unlock(rdp);
470 smp_mb();
471 if (ncbs) {
472 local_irq_restore(flags);
473 } else {
474
475 rcu_nocb_lock(rdp);
476 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
477 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
478 TPS("FirstBQwake"));
479 __call_rcu_nocb_wake(rdp, true, flags);
480 } else {
481 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
482 TPS("FirstBQnoWake"));
483 rcu_nocb_unlock_irqrestore(rdp, flags);
484 }
485 }
486 return true;
487}
488
489
490
491
492
493
494
495static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
496 unsigned long flags)
497 __releases(rdp->nocb_lock)
498{
499 unsigned long cur_gp_seq;
500 unsigned long j;
501 long len;
502 struct task_struct *t;
503
504
505 t = READ_ONCE(rdp->nocb_gp_kthread);
506 if (rcu_nocb_poll || !t) {
507 rcu_nocb_unlock_irqrestore(rdp, flags);
508 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
509 TPS("WakeNotPoll"));
510 return;
511 }
512
513 len = rcu_segcblist_n_cbs(&rdp->cblist);
514 if (was_alldone) {
515 rdp->qlen_last_fqs_check = len;
516 if (!irqs_disabled_flags(flags)) {
517
518 rcu_nocb_unlock_irqrestore(rdp, flags);
519 wake_nocb_gp(rdp, false);
520 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
521 TPS("WakeEmpty"));
522 } else {
523 rcu_nocb_unlock_irqrestore(rdp, flags);
524 wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
525 TPS("WakeEmptyIsDeferred"));
526 }
527 } else if (len > rdp->qlen_last_fqs_check + qhimark) {
528
529 rdp->qlen_last_fqs_check = len;
530 j = jiffies;
531 if (j != rdp->nocb_gp_adv_time &&
532 rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
533 rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
534 rcu_advance_cbs_nowake(rdp->mynode, rdp);
535 rdp->nocb_gp_adv_time = j;
536 }
537 smp_mb();
538 if ((rdp->nocb_cb_sleep ||
539 !rcu_segcblist_ready_cbs(&rdp->cblist)) &&
540 !timer_pending(&rdp->nocb_timer)) {
541 rcu_nocb_unlock_irqrestore(rdp, flags);
542 wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
543 TPS("WakeOvfIsDeferred"));
544 } else {
545 rcu_nocb_unlock_irqrestore(rdp, flags);
546 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
547 }
548 } else {
549 rcu_nocb_unlock_irqrestore(rdp, flags);
550 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
551 }
552 return;
553}
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569static inline bool nocb_gp_enabled_cb(struct rcu_data *rdp)
570{
571 u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_GP;
572
573 return rcu_segcblist_test_flags(&rdp->cblist, flags);
574}
575
576static inline bool nocb_gp_update_state_deoffloading(struct rcu_data *rdp,
577 bool *needwake_state)
578{
579 struct rcu_segcblist *cblist = &rdp->cblist;
580
581 if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
582 if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)) {
583 rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_GP);
584 if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
585 *needwake_state = true;
586 }
587 return false;
588 }
589
590
591
592
593
594 WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
595 rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP);
596 if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
597 *needwake_state = true;
598 return true;
599}
600
601
602
603
604
605
606static void nocb_gp_wait(struct rcu_data *my_rdp)
607{
608 bool bypass = false;
609 long bypass_ncbs;
610 int __maybe_unused cpu = my_rdp->cpu;
611 unsigned long cur_gp_seq;
612 unsigned long flags;
613 bool gotcbs = false;
614 unsigned long j = jiffies;
615 bool needwait_gp = false;
616 bool needwake;
617 bool needwake_gp;
618 struct rcu_data *rdp;
619 struct rcu_node *rnp;
620 unsigned long wait_gp_seq = 0;
621 bool wasempty = false;
622
623
624
625
626
627
628 WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp);
629 for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_cb_rdp) {
630 bool needwake_state = false;
631
632 if (!nocb_gp_enabled_cb(rdp))
633 continue;
634 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
635 rcu_nocb_lock_irqsave(rdp, flags);
636 if (nocb_gp_update_state_deoffloading(rdp, &needwake_state)) {
637 rcu_nocb_unlock_irqrestore(rdp, flags);
638 if (needwake_state)
639 swake_up_one(&rdp->nocb_state_wq);
640 continue;
641 }
642 bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
643 if (bypass_ncbs &&
644 (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) ||
645 bypass_ncbs > 2 * qhimark)) {
646
647 (void)rcu_nocb_try_flush_bypass(rdp, j);
648 bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
649 } else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) {
650 rcu_nocb_unlock_irqrestore(rdp, flags);
651 if (needwake_state)
652 swake_up_one(&rdp->nocb_state_wq);
653 continue;
654 }
655 if (bypass_ncbs) {
656 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
657 TPS("Bypass"));
658 bypass = true;
659 }
660 rnp = rdp->mynode;
661
662
663 needwake_gp = false;
664 if (!rcu_segcblist_restempty(&rdp->cblist,
665 RCU_NEXT_READY_TAIL) ||
666 (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
667 rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) {
668 raw_spin_lock_rcu_node(rnp);
669 needwake_gp = rcu_advance_cbs(rnp, rdp);
670 wasempty = rcu_segcblist_restempty(&rdp->cblist,
671 RCU_NEXT_READY_TAIL);
672 raw_spin_unlock_rcu_node(rnp);
673 }
674
675 WARN_ON_ONCE(wasempty &&
676 !rcu_segcblist_restempty(&rdp->cblist,
677 RCU_NEXT_READY_TAIL));
678 if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq)) {
679 if (!needwait_gp ||
680 ULONG_CMP_LT(cur_gp_seq, wait_gp_seq))
681 wait_gp_seq = cur_gp_seq;
682 needwait_gp = true;
683 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
684 TPS("NeedWaitGP"));
685 }
686 if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
687 needwake = rdp->nocb_cb_sleep;
688 WRITE_ONCE(rdp->nocb_cb_sleep, false);
689 smp_mb();
690 } else {
691 needwake = false;
692 }
693 rcu_nocb_unlock_irqrestore(rdp, flags);
694 if (needwake) {
695 swake_up_one(&rdp->nocb_cb_wq);
696 gotcbs = true;
697 }
698 if (needwake_gp)
699 rcu_gp_kthread_wake();
700 if (needwake_state)
701 swake_up_one(&rdp->nocb_state_wq);
702 }
703
704 my_rdp->nocb_gp_bypass = bypass;
705 my_rdp->nocb_gp_gp = needwait_gp;
706 my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0;
707
708 if (bypass && !rcu_nocb_poll) {
709
710
711 wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_BYPASS,
712 TPS("WakeBypassIsDeferred"));
713 }
714 if (rcu_nocb_poll) {
715
716 if (gotcbs)
717 trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Poll"));
718 schedule_timeout_idle(1);
719 } else if (!needwait_gp) {
720
721 trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Sleep"));
722 swait_event_interruptible_exclusive(my_rdp->nocb_gp_wq,
723 !READ_ONCE(my_rdp->nocb_gp_sleep));
724 trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("EndSleep"));
725 } else {
726 rnp = my_rdp->mynode;
727 trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("StartWait"));
728 swait_event_interruptible_exclusive(
729 rnp->nocb_gp_wq[rcu_seq_ctr(wait_gp_seq) & 0x1],
730 rcu_seq_done(&rnp->gp_seq, wait_gp_seq) ||
731 !READ_ONCE(my_rdp->nocb_gp_sleep));
732 trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("EndWait"));
733 }
734 if (!rcu_nocb_poll) {
735 raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
736 if (my_rdp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
737 WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
738 del_timer(&my_rdp->nocb_timer);
739 }
740 WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
741 raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
742 }
743 my_rdp->nocb_gp_seq = -1;
744 WARN_ON(signal_pending(current));
745}
746
747
748
749
750
751
752
753
754
755static int rcu_nocb_gp_kthread(void *arg)
756{
757 struct rcu_data *rdp = arg;
758
759 for (;;) {
760 WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1);
761 nocb_gp_wait(rdp);
762 cond_resched_tasks_rcu_qs();
763 }
764 return 0;
765}
766
767static inline bool nocb_cb_can_run(struct rcu_data *rdp)
768{
769 u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_CB;
770 return rcu_segcblist_test_flags(&rdp->cblist, flags);
771}
772
773static inline bool nocb_cb_wait_cond(struct rcu_data *rdp)
774{
775 return nocb_cb_can_run(rdp) && !READ_ONCE(rdp->nocb_cb_sleep);
776}
777
778
779
780
781
782static void nocb_cb_wait(struct rcu_data *rdp)
783{
784 struct rcu_segcblist *cblist = &rdp->cblist;
785 unsigned long cur_gp_seq;
786 unsigned long flags;
787 bool needwake_state = false;
788 bool needwake_gp = false;
789 bool can_sleep = true;
790 struct rcu_node *rnp = rdp->mynode;
791
792 local_irq_save(flags);
793 rcu_momentary_dyntick_idle();
794 local_irq_restore(flags);
795
796
797
798
799
800
801 local_bh_disable();
802 rcu_do_batch(rdp);
803 local_bh_enable();
804 lockdep_assert_irqs_enabled();
805 rcu_nocb_lock_irqsave(rdp, flags);
806 if (rcu_segcblist_nextgp(cblist, &cur_gp_seq) &&
807 rcu_seq_done(&rnp->gp_seq, cur_gp_seq) &&
808 raw_spin_trylock_rcu_node(rnp)) {
809 needwake_gp = rcu_advance_cbs(rdp->mynode, rdp);
810 raw_spin_unlock_rcu_node(rnp);
811 }
812
813 if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
814 if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB)) {
815 rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_CB);
816 if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
817 needwake_state = true;
818 }
819 if (rcu_segcblist_ready_cbs(cblist))
820 can_sleep = false;
821 } else {
822
823
824
825
826
827 WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB));
828 rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_CB);
829 if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
830 needwake_state = true;
831 }
832
833 WRITE_ONCE(rdp->nocb_cb_sleep, can_sleep);
834
835 if (rdp->nocb_cb_sleep)
836 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
837
838 rcu_nocb_unlock_irqrestore(rdp, flags);
839 if (needwake_gp)
840 rcu_gp_kthread_wake();
841
842 if (needwake_state)
843 swake_up_one(&rdp->nocb_state_wq);
844
845 do {
846 swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
847 nocb_cb_wait_cond(rdp));
848
849
850 if (smp_load_acquire(&rdp->nocb_cb_sleep)) {
851 WARN_ON(signal_pending(current));
852 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
853 }
854 } while (!nocb_cb_can_run(rdp));
855}
856
857
858
859
860
861static int rcu_nocb_cb_kthread(void *arg)
862{
863 struct rcu_data *rdp = arg;
864
865
866
867 for (;;) {
868 nocb_cb_wait(rdp);
869 cond_resched_tasks_rcu_qs();
870 }
871 return 0;
872}
873
874
875static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
876{
877 return READ_ONCE(rdp->nocb_defer_wakeup) >= level;
878}
879
880
881static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp_gp,
882 struct rcu_data *rdp, int level,
883 unsigned long flags)
884 __releases(rdp_gp->nocb_gp_lock)
885{
886 int ndw;
887 int ret;
888
889 if (!rcu_nocb_need_deferred_wakeup(rdp_gp, level)) {
890 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
891 return false;
892 }
893
894 ndw = rdp_gp->nocb_defer_wakeup;
895 ret = __wake_nocb_gp(rdp_gp, rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
896 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
897
898 return ret;
899}
900
901
902static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
903{
904 unsigned long flags;
905 struct rcu_data *rdp = from_timer(rdp, t, nocb_timer);
906
907 WARN_ON_ONCE(rdp->nocb_gp_rdp != rdp);
908 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer"));
909
910 raw_spin_lock_irqsave(&rdp->nocb_gp_lock, flags);
911 smp_mb__after_spinlock();
912 do_nocb_deferred_wakeup_common(rdp, rdp, RCU_NOCB_WAKE_BYPASS, flags);
913}
914
915
916
917
918
919
920static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
921{
922 unsigned long flags;
923 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
924
925 if (!rdp_gp || !rcu_nocb_need_deferred_wakeup(rdp_gp, RCU_NOCB_WAKE))
926 return false;
927
928 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
929 return do_nocb_deferred_wakeup_common(rdp_gp, rdp, RCU_NOCB_WAKE, flags);
930}
931
932void rcu_nocb_flush_deferred_wakeup(void)
933{
934 do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data));
935}
936EXPORT_SYMBOL_GPL(rcu_nocb_flush_deferred_wakeup);
937
938static int rdp_offload_toggle(struct rcu_data *rdp,
939 bool offload, unsigned long flags)
940 __releases(rdp->nocb_lock)
941{
942 struct rcu_segcblist *cblist = &rdp->cblist;
943 struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
944 bool wake_gp = false;
945
946 rcu_segcblist_offload(cblist, offload);
947
948 if (rdp->nocb_cb_sleep)
949 rdp->nocb_cb_sleep = false;
950 rcu_nocb_unlock_irqrestore(rdp, flags);
951
952
953
954
955
956 swake_up_one(&rdp->nocb_cb_wq);
957
958 raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
959 if (rdp_gp->nocb_gp_sleep) {
960 rdp_gp->nocb_gp_sleep = false;
961 wake_gp = true;
962 }
963 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
964
965 if (wake_gp)
966 wake_up_process(rdp_gp->nocb_gp_kthread);
967
968 return 0;
969}
970
971static long rcu_nocb_rdp_deoffload(void *arg)
972{
973 struct rcu_data *rdp = arg;
974 struct rcu_segcblist *cblist = &rdp->cblist;
975 unsigned long flags;
976 int ret;
977
978 WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
979
980 pr_info("De-offloading %d\n", rdp->cpu);
981
982 rcu_nocb_lock_irqsave(rdp, flags);
983
984
985
986
987
988
989
990
991
992 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
993 ret = rdp_offload_toggle(rdp, false, flags);
994 swait_event_exclusive(rdp->nocb_state_wq,
995 !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB |
996 SEGCBLIST_KTHREAD_GP));
997
998
999
1000
1001 rcu_nocb_lock_irqsave(rdp, flags);
1002
1003
1004
1005
1006 rcu_segcblist_set_flags(cblist, SEGCBLIST_SOFTIRQ_ONLY);
1007
1008
1009
1010
1011 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1012
1013
1014 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
1015
1016
1017 return ret;
1018}
1019
1020int rcu_nocb_cpu_deoffload(int cpu)
1021{
1022 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1023 int ret = 0;
1024
1025 mutex_lock(&rcu_state.barrier_mutex);
1026 cpus_read_lock();
1027 if (rcu_rdp_is_offloaded(rdp)) {
1028 if (cpu_online(cpu)) {
1029 ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp);
1030 if (!ret)
1031 cpumask_clear_cpu(cpu, rcu_nocb_mask);
1032 } else {
1033 pr_info("NOCB: Can't CB-deoffload an offline CPU\n");
1034 ret = -EINVAL;
1035 }
1036 }
1037 cpus_read_unlock();
1038 mutex_unlock(&rcu_state.barrier_mutex);
1039
1040 return ret;
1041}
1042EXPORT_SYMBOL_GPL(rcu_nocb_cpu_deoffload);
1043
1044static long rcu_nocb_rdp_offload(void *arg)
1045{
1046 struct rcu_data *rdp = arg;
1047 struct rcu_segcblist *cblist = &rdp->cblist;
1048 unsigned long flags;
1049 int ret;
1050
1051 WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
1052
1053
1054
1055
1056 if (!rdp->nocb_gp_rdp)
1057 return -EINVAL;
1058
1059 pr_info("Offloading %d\n", rdp->cpu);
1060
1061
1062
1063
1064 raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082 ret = rdp_offload_toggle(rdp, true, flags);
1083 swait_event_exclusive(rdp->nocb_state_wq,
1084 rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB) &&
1085 rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
1086
1087 return ret;
1088}
1089
1090int rcu_nocb_cpu_offload(int cpu)
1091{
1092 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1093 int ret = 0;
1094
1095 mutex_lock(&rcu_state.barrier_mutex);
1096 cpus_read_lock();
1097 if (!rcu_rdp_is_offloaded(rdp)) {
1098 if (cpu_online(cpu)) {
1099 ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp);
1100 if (!ret)
1101 cpumask_set_cpu(cpu, rcu_nocb_mask);
1102 } else {
1103 pr_info("NOCB: Can't CB-offload an offline CPU\n");
1104 ret = -EINVAL;
1105 }
1106 }
1107 cpus_read_unlock();
1108 mutex_unlock(&rcu_state.barrier_mutex);
1109
1110 return ret;
1111}
1112EXPORT_SYMBOL_GPL(rcu_nocb_cpu_offload);
1113
1114void __init rcu_init_nohz(void)
1115{
1116 int cpu;
1117 bool need_rcu_nocb_mask = false;
1118 struct rcu_data *rdp;
1119
1120#if defined(CONFIG_NO_HZ_FULL)
1121 if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
1122 need_rcu_nocb_mask = true;
1123#endif
1124
1125 if (!cpumask_available(rcu_nocb_mask) && need_rcu_nocb_mask) {
1126 if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
1127 pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
1128 return;
1129 }
1130 }
1131 if (!cpumask_available(rcu_nocb_mask))
1132 return;
1133
1134#if defined(CONFIG_NO_HZ_FULL)
1135 if (tick_nohz_full_running)
1136 cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
1137#endif
1138
1139 if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
1140 pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n");
1141 cpumask_and(rcu_nocb_mask, cpu_possible_mask,
1142 rcu_nocb_mask);
1143 }
1144 if (cpumask_empty(rcu_nocb_mask))
1145 pr_info("\tOffload RCU callbacks from CPUs: (none).\n");
1146 else
1147 pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
1148 cpumask_pr_args(rcu_nocb_mask));
1149 if (rcu_nocb_poll)
1150 pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
1151
1152 for_each_cpu(cpu, rcu_nocb_mask) {
1153 rdp = per_cpu_ptr(&rcu_data, cpu);
1154 if (rcu_segcblist_empty(&rdp->cblist))
1155 rcu_segcblist_init(&rdp->cblist);
1156 rcu_segcblist_offload(&rdp->cblist, true);
1157 rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB);
1158 rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_GP);
1159 }
1160 rcu_organize_nocb_kthreads();
1161}
1162
1163
1164static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
1165{
1166 init_swait_queue_head(&rdp->nocb_cb_wq);
1167 init_swait_queue_head(&rdp->nocb_gp_wq);
1168 init_swait_queue_head(&rdp->nocb_state_wq);
1169 raw_spin_lock_init(&rdp->nocb_lock);
1170 raw_spin_lock_init(&rdp->nocb_bypass_lock);
1171 raw_spin_lock_init(&rdp->nocb_gp_lock);
1172 timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
1173 rcu_cblist_init(&rdp->nocb_bypass);
1174}
1175
1176
1177
1178
1179
1180
1181static void rcu_spawn_one_nocb_kthread(int cpu)
1182{
1183 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1184 struct rcu_data *rdp_gp;
1185 struct task_struct *t;
1186
1187
1188
1189
1190
1191 if (!rcu_is_nocb_cpu(cpu) || rdp->nocb_cb_kthread)
1192 return;
1193
1194
1195 rdp_gp = rdp->nocb_gp_rdp;
1196 if (!rdp_gp->nocb_gp_kthread) {
1197 t = kthread_run(rcu_nocb_gp_kthread, rdp_gp,
1198 "rcuog/%d", rdp_gp->cpu);
1199 if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__))
1200 return;
1201 WRITE_ONCE(rdp_gp->nocb_gp_kthread, t);
1202 }
1203
1204
1205 t = kthread_run(rcu_nocb_cb_kthread, rdp,
1206 "rcuo%c/%d", rcu_state.abbr, cpu);
1207 if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
1208 return;
1209 WRITE_ONCE(rdp->nocb_cb_kthread, t);
1210 WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread);
1211}
1212
1213
1214
1215
1216
1217static void rcu_spawn_cpu_nocb_kthread(int cpu)
1218{
1219 if (rcu_scheduler_fully_active)
1220 rcu_spawn_one_nocb_kthread(cpu);
1221}
1222
1223
1224
1225
1226
1227
1228
1229static void __init rcu_spawn_nocb_kthreads(void)
1230{
1231 int cpu;
1232
1233 for_each_online_cpu(cpu)
1234 rcu_spawn_cpu_nocb_kthread(cpu);
1235}
1236
1237
1238static int rcu_nocb_gp_stride = -1;
1239module_param(rcu_nocb_gp_stride, int, 0444);
1240
1241
1242
1243
1244static void __init rcu_organize_nocb_kthreads(void)
1245{
1246 int cpu;
1247 bool firsttime = true;
1248 bool gotnocbs = false;
1249 bool gotnocbscbs = true;
1250 int ls = rcu_nocb_gp_stride;
1251 int nl = 0;
1252 struct rcu_data *rdp;
1253 struct rcu_data *rdp_gp = NULL;
1254 struct rcu_data *rdp_prev = NULL;
1255
1256 if (!cpumask_available(rcu_nocb_mask))
1257 return;
1258 if (ls == -1) {
1259 ls = nr_cpu_ids / int_sqrt(nr_cpu_ids);
1260 rcu_nocb_gp_stride = ls;
1261 }
1262
1263
1264
1265
1266
1267
1268 for_each_cpu(cpu, rcu_nocb_mask) {
1269 rdp = per_cpu_ptr(&rcu_data, cpu);
1270 if (rdp->cpu >= nl) {
1271
1272 gotnocbs = true;
1273 nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
1274 rdp->nocb_gp_rdp = rdp;
1275 rdp_gp = rdp;
1276 if (dump_tree) {
1277 if (!firsttime)
1278 pr_cont("%s\n", gotnocbscbs
1279 ? "" : " (self only)");
1280 gotnocbscbs = false;
1281 firsttime = false;
1282 pr_alert("%s: No-CB GP kthread CPU %d:",
1283 __func__, cpu);
1284 }
1285 } else {
1286
1287 gotnocbscbs = true;
1288 rdp->nocb_gp_rdp = rdp_gp;
1289 rdp_prev->nocb_next_cb_rdp = rdp;
1290 if (dump_tree)
1291 pr_cont(" %d", cpu);
1292 }
1293 rdp_prev = rdp;
1294 }
1295 if (gotnocbs && dump_tree)
1296 pr_cont("%s\n", gotnocbscbs ? "" : " (self only)");
1297}
1298
1299
1300
1301
1302
1303void rcu_bind_current_to_nocb(void)
1304{
1305 if (cpumask_available(rcu_nocb_mask) && cpumask_weight(rcu_nocb_mask))
1306 WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask));
1307}
1308EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
1309
1310
1311#ifdef CONFIG_SMP
1312static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
1313{
1314 return tsp && task_is_running(tsp) && !tsp->on_cpu ? "!" : "";
1315}
1316#else
1317static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
1318{
1319 return "";
1320}
1321#endif
1322
1323
1324
1325
1326
1327static void show_rcu_nocb_gp_state(struct rcu_data *rdp)
1328{
1329 struct rcu_node *rnp = rdp->mynode;
1330
1331 pr_info("nocb GP %d %c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n",
1332 rdp->cpu,
1333 "kK"[!!rdp->nocb_gp_kthread],
1334 "lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)],
1335 "dD"[!!rdp->nocb_defer_wakeup],
1336 "tT"[timer_pending(&rdp->nocb_timer)],
1337 "sS"[!!rdp->nocb_gp_sleep],
1338 ".W"[swait_active(&rdp->nocb_gp_wq)],
1339 ".W"[swait_active(&rnp->nocb_gp_wq[0])],
1340 ".W"[swait_active(&rnp->nocb_gp_wq[1])],
1341 ".B"[!!rdp->nocb_gp_bypass],
1342 ".G"[!!rdp->nocb_gp_gp],
1343 (long)rdp->nocb_gp_seq,
1344 rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops),
1345 rdp->nocb_gp_kthread ? task_state_to_char(rdp->nocb_gp_kthread) : '.',
1346 rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
1347 show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
1348}
1349
1350
1351static void show_rcu_nocb_state(struct rcu_data *rdp)
1352{
1353 char bufw[20];
1354 char bufr[20];
1355 struct rcu_segcblist *rsclp = &rdp->cblist;
1356 bool waslocked;
1357 bool wassleep;
1358
1359 if (rdp->nocb_gp_rdp == rdp)
1360 show_rcu_nocb_gp_state(rdp);
1361
1362 sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]);
1363 sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]);
1364 pr_info(" CB %d^%d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
1365 rdp->cpu, rdp->nocb_gp_rdp->cpu,
1366 rdp->nocb_next_cb_rdp ? rdp->nocb_next_cb_rdp->cpu : -1,
1367 "kK"[!!rdp->nocb_cb_kthread],
1368 "bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
1369 "cC"[!!atomic_read(&rdp->nocb_lock_contended)],
1370 "lL"[raw_spin_is_locked(&rdp->nocb_lock)],
1371 "sS"[!!rdp->nocb_cb_sleep],
1372 ".W"[swait_active(&rdp->nocb_cb_wq)],
1373 jiffies - rdp->nocb_bypass_first,
1374 jiffies - rdp->nocb_nobypass_last,
1375 rdp->nocb_nobypass_count,
1376 ".D"[rcu_segcblist_ready_cbs(rsclp)],
1377 ".W"[!rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL)],
1378 rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL) ? "" : bufw,
1379 ".R"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL)],
1380 rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL) ? "" : bufr,
1381 ".N"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_TAIL)],
1382 ".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)],
1383 rcu_segcblist_n_cbs(&rdp->cblist),
1384 rdp->nocb_cb_kthread ? task_state_to_char(rdp->nocb_cb_kthread) : '.',
1385 rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
1386 show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
1387
1388
1389 if (rdp->nocb_gp_rdp == rdp)
1390 return;
1391
1392 waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock);
1393 wassleep = swait_active(&rdp->nocb_gp_wq);
1394 if (!rdp->nocb_gp_sleep && !waslocked && !wassleep)
1395 return;
1396
1397 pr_info(" nocb GP activity on CB-only CPU!!! %c%c%c %c\n",
1398 "lL"[waslocked],
1399 "dD"[!!rdp->nocb_defer_wakeup],
1400 "sS"[!!rdp->nocb_gp_sleep],
1401 ".W"[wassleep]);
1402}
1403
1404#else
1405
1406static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
1407{
1408 return 0;
1409}
1410
1411static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
1412{
1413 return false;
1414}
1415
1416
1417static void rcu_nocb_lock(struct rcu_data *rdp)
1418{
1419}
1420
1421
1422static void rcu_nocb_unlock(struct rcu_data *rdp)
1423{
1424}
1425
1426
1427static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
1428 unsigned long flags)
1429{
1430 local_irq_restore(flags);
1431}
1432
1433
1434static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
1435{
1436 lockdep_assert_irqs_disabled();
1437}
1438
1439static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
1440{
1441}
1442
1443static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
1444{
1445 return NULL;
1446}
1447
1448static void rcu_init_one_nocb(struct rcu_node *rnp)
1449{
1450}
1451
1452static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
1453 unsigned long j)
1454{
1455 return true;
1456}
1457
1458static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
1459 bool *was_alldone, unsigned long flags)
1460{
1461 return false;
1462}
1463
1464static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
1465 unsigned long flags)
1466{
1467 WARN_ON_ONCE(1);
1468}
1469
1470static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
1471{
1472}
1473
1474static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
1475{
1476 return false;
1477}
1478
1479static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
1480{
1481 return false;
1482}
1483
1484static void rcu_spawn_cpu_nocb_kthread(int cpu)
1485{
1486}
1487
1488static void __init rcu_spawn_nocb_kthreads(void)
1489{
1490}
1491
1492static void show_rcu_nocb_state(struct rcu_data *rdp)
1493{
1494}
1495
1496#endif
1497