1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#define pr_fmt(fmt) fmt
27
28#include <linux/types.h>
29#include <linux/kernel.h>
30#include <linux/init.h>
31#include <linux/module.h>
32#include <linux/kthread.h>
33#include <linux/err.h>
34#include <linux/spinlock.h>
35#include <linux/smp.h>
36#include <linux/rcupdate.h>
37#include <linux/interrupt.h>
38#include <linux/sched/signal.h>
39#include <uapi/linux/sched/types.h>
40#include <linux/atomic.h>
41#include <linux/bitops.h>
42#include <linux/completion.h>
43#include <linux/moduleparam.h>
44#include <linux/percpu.h>
45#include <linux/notifier.h>
46#include <linux/reboot.h>
47#include <linux/freezer.h>
48#include <linux/cpu.h>
49#include <linux/delay.h>
50#include <linux/stat.h>
51#include <linux/srcu.h>
52#include <linux/slab.h>
53#include <linux/trace_clock.h>
54#include <asm/byteorder.h>
55#include <linux/torture.h>
56#include <linux/vmalloc.h>
57#include <linux/sched/debug.h>
58#include <linux/sched/sysctl.h>
59
60#include "rcu.h"
61
62MODULE_LICENSE("GPL");
63MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
64
65
66
67#define RCUTORTURE_RDR_SHIFT 8
68#define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1)
69#define RCUTORTURE_RDR_BH 0x01
70#define RCUTORTURE_RDR_IRQ 0x02
71#define RCUTORTURE_RDR_PREEMPT 0x04
72#define RCUTORTURE_RDR_RBH 0x08
73#define RCUTORTURE_RDR_SCHED 0x10
74#define RCUTORTURE_RDR_RCU 0x20
75#define RCUTORTURE_RDR_NBITS 6
76#define RCUTORTURE_MAX_EXTEND \
77 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
78 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
79#define RCUTORTURE_RDR_MAX_LOOPS 0x7
80
81#define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
82
83torture_param(int, cbflood_inter_holdoff, HZ,
84 "Holdoff between floods (jiffies)");
85torture_param(int, cbflood_intra_holdoff, 1,
86 "Holdoff between bursts (jiffies)");
87torture_param(int, cbflood_n_burst, 3, "# bursts in flood, zero to disable");
88torture_param(int, cbflood_n_per_burst, 20000,
89 "# callbacks per burst in flood");
90torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
91 "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
92torture_param(int, fqs_duration, 0,
93 "Duration of fqs bursts (us), 0 to disable");
94torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
95torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
96torture_param(bool, fwd_progress, 1, "Test grace-period forward progress");
97torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
98torture_param(int, fwd_progress_holdoff, 60,
99 "Time between forward-progress tests (s)");
100torture_param(bool, fwd_progress_need_resched, 1,
101 "Hide cond_resched() behind need_resched()");
102torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
103torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
104torture_param(bool, gp_normal, false,
105 "Use normal (non-expedited) GP wait primitives");
106torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
107torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
108torture_param(int, n_barrier_cbs, 0,
109 "# of callbacks/kthreads for barrier testing");
110torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
111torture_param(int, nreaders, -1, "Number of RCU reader threads");
112torture_param(int, object_debug, 0,
113 "Enable debug-object double call_rcu() testing");
114torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
115torture_param(int, onoff_interval, 0,
116 "Time between CPU hotplugs (jiffies), 0=disable");
117torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
118torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
119torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
120torture_param(int, stall_cpu_holdoff, 10,
121 "Time to wait before starting stall (s).");
122torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
123torture_param(int, stat_interval, 60,
124 "Number of seconds between stats printk()s");
125torture_param(int, stutter, 5, "Number of seconds to run/halt test");
126torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
127torture_param(int, test_boost_duration, 4,
128 "Duration of each boost test, seconds.");
129torture_param(int, test_boost_interval, 7,
130 "Interval between boost tests, seconds.");
131torture_param(bool, test_no_idle_hz, true,
132 "Test support for tickless idle CPUs");
133torture_param(int, verbose, 1,
134 "Enable verbose debugging printk()s");
135
136static char *torture_type = "rcu";
137module_param(torture_type, charp, 0444);
138MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
139
140static int nrealreaders;
141static int ncbflooders;
142static struct task_struct *writer_task;
143static struct task_struct **fakewriter_tasks;
144static struct task_struct **reader_tasks;
145static struct task_struct *stats_task;
146static struct task_struct **cbflood_task;
147static struct task_struct *fqs_task;
148static struct task_struct *boost_tasks[NR_CPUS];
149static struct task_struct *stall_task;
150static struct task_struct *fwd_prog_task;
151static struct task_struct **barrier_cbs_tasks;
152static struct task_struct *barrier_task;
153
154#define RCU_TORTURE_PIPE_LEN 10
155
156struct rcu_torture {
157 struct rcu_head rtort_rcu;
158 int rtort_pipe_count;
159 struct list_head rtort_free;
160 int rtort_mbtest;
161};
162
163static LIST_HEAD(rcu_torture_freelist);
164static struct rcu_torture __rcu *rcu_torture_current;
165static unsigned long rcu_torture_current_version;
166static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
167static DEFINE_SPINLOCK(rcu_torture_lock);
168static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
169static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
170static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
171static atomic_t n_rcu_torture_alloc;
172static atomic_t n_rcu_torture_alloc_fail;
173static atomic_t n_rcu_torture_free;
174static atomic_t n_rcu_torture_mberror;
175static atomic_t n_rcu_torture_error;
176static long n_rcu_torture_barrier_error;
177static long n_rcu_torture_boost_ktrerror;
178static long n_rcu_torture_boost_rterror;
179static long n_rcu_torture_boost_failure;
180static long n_rcu_torture_boosts;
181static atomic_long_t n_rcu_torture_timers;
182static long n_barrier_attempts;
183static long n_barrier_successes;
184static atomic_long_t n_cbfloods;
185static struct list_head rcu_torture_removed;
186
187static int rcu_torture_writer_state;
188#define RTWS_FIXED_DELAY 0
189#define RTWS_DELAY 1
190#define RTWS_REPLACE 2
191#define RTWS_DEF_FREE 3
192#define RTWS_EXP_SYNC 4
193#define RTWS_COND_GET 5
194#define RTWS_COND_SYNC 6
195#define RTWS_SYNC 7
196#define RTWS_STUTTER 8
197#define RTWS_STOPPING 9
198static const char * const rcu_torture_writer_state_names[] = {
199 "RTWS_FIXED_DELAY",
200 "RTWS_DELAY",
201 "RTWS_REPLACE",
202 "RTWS_DEF_FREE",
203 "RTWS_EXP_SYNC",
204 "RTWS_COND_GET",
205 "RTWS_COND_SYNC",
206 "RTWS_SYNC",
207 "RTWS_STUTTER",
208 "RTWS_STOPPING",
209};
210
211
212struct rt_read_seg {
213 int rt_readstate;
214 unsigned long rt_delay_jiffies;
215 unsigned long rt_delay_ms;
216 unsigned long rt_delay_us;
217 bool rt_preempted;
218};
219static int err_segs_recorded;
220static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
221static int rt_read_nsegs;
222
223static const char *rcu_torture_writer_state_getname(void)
224{
225 unsigned int i = READ_ONCE(rcu_torture_writer_state);
226
227 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
228 return "???";
229 return rcu_torture_writer_state_names[i];
230}
231
232#if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
233#define rcu_can_boost() 1
234#else
235#define rcu_can_boost() 0
236#endif
237
238#ifdef CONFIG_RCU_TRACE
239static u64 notrace rcu_trace_clock_local(void)
240{
241 u64 ts = trace_clock_local();
242
243 (void)do_div(ts, NSEC_PER_USEC);
244 return ts;
245}
246#else
247static u64 notrace rcu_trace_clock_local(void)
248{
249 return 0ULL;
250}
251#endif
252
253static unsigned long boost_starttime;
254static DEFINE_MUTEX(boost_mutex);
255
256static atomic_t barrier_cbs_count;
257static bool barrier_phase;
258static atomic_t barrier_cbs_invoked;
259static wait_queue_head_t *barrier_cbs_wq;
260static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
261
262
263
264
265static struct rcu_torture *
266rcu_torture_alloc(void)
267{
268 struct list_head *p;
269
270 spin_lock_bh(&rcu_torture_lock);
271 if (list_empty(&rcu_torture_freelist)) {
272 atomic_inc(&n_rcu_torture_alloc_fail);
273 spin_unlock_bh(&rcu_torture_lock);
274 return NULL;
275 }
276 atomic_inc(&n_rcu_torture_alloc);
277 p = rcu_torture_freelist.next;
278 list_del_init(p);
279 spin_unlock_bh(&rcu_torture_lock);
280 return container_of(p, struct rcu_torture, rtort_free);
281}
282
283
284
285
286static void
287rcu_torture_free(struct rcu_torture *p)
288{
289 atomic_inc(&n_rcu_torture_free);
290 spin_lock_bh(&rcu_torture_lock);
291 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
292 spin_unlock_bh(&rcu_torture_lock);
293}
294
295
296
297
298
299struct rcu_torture_ops {
300 int ttype;
301 void (*init)(void);
302 void (*cleanup)(void);
303 int (*readlock)(void);
304 void (*read_delay)(struct torture_random_state *rrsp,
305 struct rt_read_seg *rtrsp);
306 void (*readunlock)(int idx);
307 unsigned long (*get_gp_seq)(void);
308 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
309 void (*deferred_free)(struct rcu_torture *p);
310 void (*sync)(void);
311 void (*exp_sync)(void);
312 unsigned long (*get_state)(void);
313 void (*cond_sync)(unsigned long oldstate);
314 call_rcu_func_t call;
315 void (*cb_barrier)(void);
316 void (*fqs)(void);
317 void (*stats)(void);
318 int (*stall_dur)(void);
319 int irq_capable;
320 int can_boost;
321 int extendables;
322 int ext_irq_conflict;
323 const char *name;
324};
325
326static struct rcu_torture_ops *cur_ops;
327
328
329
330
331
332static int rcu_torture_read_lock(void) __acquires(RCU)
333{
334 rcu_read_lock();
335 return 0;
336}
337
338static void
339rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
340{
341 unsigned long started;
342 unsigned long completed;
343 const unsigned long shortdelay_us = 200;
344 unsigned long longdelay_ms = 300;
345 unsigned long long ts;
346
347
348
349
350
351 if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
352 started = cur_ops->get_gp_seq();
353 ts = rcu_trace_clock_local();
354 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
355 longdelay_ms = 5;
356 mdelay(longdelay_ms);
357 rtrsp->rt_delay_ms = longdelay_ms;
358 completed = cur_ops->get_gp_seq();
359 do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
360 started, completed);
361 }
362 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
363 udelay(shortdelay_us);
364 rtrsp->rt_delay_us = shortdelay_us;
365 }
366 if (!preempt_count() &&
367 !(torture_random(rrsp) % (nrealreaders * 500))) {
368 torture_preempt_schedule();
369 rtrsp->rt_preempted = true;
370 }
371}
372
373static void rcu_torture_read_unlock(int idx) __releases(RCU)
374{
375 rcu_read_unlock();
376}
377
378
379
380
381static bool
382rcu_torture_pipe_update_one(struct rcu_torture *rp)
383{
384 int i;
385
386 i = rp->rtort_pipe_count;
387 if (i > RCU_TORTURE_PIPE_LEN)
388 i = RCU_TORTURE_PIPE_LEN;
389 atomic_inc(&rcu_torture_wcount[i]);
390 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
391 rp->rtort_mbtest = 0;
392 return true;
393 }
394 return false;
395}
396
397
398
399
400
401static void
402rcu_torture_pipe_update(struct rcu_torture *old_rp)
403{
404 struct rcu_torture *rp;
405 struct rcu_torture *rp1;
406
407 if (old_rp)
408 list_add(&old_rp->rtort_free, &rcu_torture_removed);
409 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
410 if (rcu_torture_pipe_update_one(rp)) {
411 list_del(&rp->rtort_free);
412 rcu_torture_free(rp);
413 }
414 }
415}
416
417static void
418rcu_torture_cb(struct rcu_head *p)
419{
420 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
421
422 if (torture_must_stop_irq()) {
423
424
425 return;
426 }
427 if (rcu_torture_pipe_update_one(rp))
428 rcu_torture_free(rp);
429 else
430 cur_ops->deferred_free(rp);
431}
432
433static unsigned long rcu_no_completed(void)
434{
435 return 0;
436}
437
438static void rcu_torture_deferred_free(struct rcu_torture *p)
439{
440 call_rcu(&p->rtort_rcu, rcu_torture_cb);
441}
442
443static void rcu_sync_torture_init(void)
444{
445 INIT_LIST_HEAD(&rcu_torture_removed);
446}
447
448static struct rcu_torture_ops rcu_ops = {
449 .ttype = RCU_FLAVOR,
450 .init = rcu_sync_torture_init,
451 .readlock = rcu_torture_read_lock,
452 .read_delay = rcu_read_delay,
453 .readunlock = rcu_torture_read_unlock,
454 .get_gp_seq = rcu_get_gp_seq,
455 .gp_diff = rcu_seq_diff,
456 .deferred_free = rcu_torture_deferred_free,
457 .sync = synchronize_rcu,
458 .exp_sync = synchronize_rcu_expedited,
459 .get_state = get_state_synchronize_rcu,
460 .cond_sync = cond_synchronize_rcu,
461 .call = call_rcu,
462 .cb_barrier = rcu_barrier,
463 .fqs = rcu_force_quiescent_state,
464 .stats = NULL,
465 .stall_dur = rcu_jiffies_till_stall_check,
466 .irq_capable = 1,
467 .can_boost = rcu_can_boost(),
468 .extendables = RCUTORTURE_MAX_EXTEND,
469 .name = "rcu"
470};
471
472
473
474
475
476
477
478
479static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
480{
481
482 rcu_torture_cb(&p->rtort_rcu);
483}
484
485static void synchronize_rcu_busted(void)
486{
487
488}
489
490static void
491call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
492{
493
494 func(head);
495}
496
497static struct rcu_torture_ops rcu_busted_ops = {
498 .ttype = INVALID_RCU_FLAVOR,
499 .init = rcu_sync_torture_init,
500 .readlock = rcu_torture_read_lock,
501 .read_delay = rcu_read_delay,
502 .readunlock = rcu_torture_read_unlock,
503 .get_gp_seq = rcu_no_completed,
504 .deferred_free = rcu_busted_torture_deferred_free,
505 .sync = synchronize_rcu_busted,
506 .exp_sync = synchronize_rcu_busted,
507 .call = call_rcu_busted,
508 .cb_barrier = NULL,
509 .fqs = NULL,
510 .stats = NULL,
511 .irq_capable = 1,
512 .name = "busted"
513};
514
515
516
517
518
519DEFINE_STATIC_SRCU(srcu_ctl);
520static struct srcu_struct srcu_ctld;
521static struct srcu_struct *srcu_ctlp = &srcu_ctl;
522
523static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
524{
525 return srcu_read_lock(srcu_ctlp);
526}
527
528static void
529srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
530{
531 long delay;
532 const long uspertick = 1000000 / HZ;
533 const long longdelay = 10;
534
535
536
537 delay = torture_random(rrsp) %
538 (nrealreaders * 2 * longdelay * uspertick);
539 if (!delay && in_task()) {
540 schedule_timeout_interruptible(longdelay);
541 rtrsp->rt_delay_jiffies = longdelay;
542 } else {
543 rcu_read_delay(rrsp, rtrsp);
544 }
545}
546
547static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
548{
549 srcu_read_unlock(srcu_ctlp, idx);
550}
551
552static unsigned long srcu_torture_completed(void)
553{
554 return srcu_batches_completed(srcu_ctlp);
555}
556
557static void srcu_torture_deferred_free(struct rcu_torture *rp)
558{
559 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
560}
561
562static void srcu_torture_synchronize(void)
563{
564 synchronize_srcu(srcu_ctlp);
565}
566
567static void srcu_torture_call(struct rcu_head *head,
568 rcu_callback_t func)
569{
570 call_srcu(srcu_ctlp, head, func);
571}
572
573static void srcu_torture_barrier(void)
574{
575 srcu_barrier(srcu_ctlp);
576}
577
578static void srcu_torture_stats(void)
579{
580 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
581}
582
583static void srcu_torture_synchronize_expedited(void)
584{
585 synchronize_srcu_expedited(srcu_ctlp);
586}
587
588static struct rcu_torture_ops srcu_ops = {
589 .ttype = SRCU_FLAVOR,
590 .init = rcu_sync_torture_init,
591 .readlock = srcu_torture_read_lock,
592 .read_delay = srcu_read_delay,
593 .readunlock = srcu_torture_read_unlock,
594 .get_gp_seq = srcu_torture_completed,
595 .deferred_free = srcu_torture_deferred_free,
596 .sync = srcu_torture_synchronize,
597 .exp_sync = srcu_torture_synchronize_expedited,
598 .call = srcu_torture_call,
599 .cb_barrier = srcu_torture_barrier,
600 .stats = srcu_torture_stats,
601 .irq_capable = 1,
602 .name = "srcu"
603};
604
605static void srcu_torture_init(void)
606{
607 rcu_sync_torture_init();
608 WARN_ON(init_srcu_struct(&srcu_ctld));
609 srcu_ctlp = &srcu_ctld;
610}
611
612static void srcu_torture_cleanup(void)
613{
614 static DEFINE_TORTURE_RANDOM(rand);
615
616 if (torture_random(&rand) & 0x800)
617 cleanup_srcu_struct(&srcu_ctld);
618 else
619 cleanup_srcu_struct_quiesced(&srcu_ctld);
620 srcu_ctlp = &srcu_ctl;
621}
622
623
624static struct rcu_torture_ops srcud_ops = {
625 .ttype = SRCU_FLAVOR,
626 .init = srcu_torture_init,
627 .cleanup = srcu_torture_cleanup,
628 .readlock = srcu_torture_read_lock,
629 .read_delay = srcu_read_delay,
630 .readunlock = srcu_torture_read_unlock,
631 .get_gp_seq = srcu_torture_completed,
632 .deferred_free = srcu_torture_deferred_free,
633 .sync = srcu_torture_synchronize,
634 .exp_sync = srcu_torture_synchronize_expedited,
635 .call = srcu_torture_call,
636 .cb_barrier = srcu_torture_barrier,
637 .stats = srcu_torture_stats,
638 .irq_capable = 1,
639 .name = "srcud"
640};
641
642
643static struct rcu_torture_ops busted_srcud_ops = {
644 .ttype = SRCU_FLAVOR,
645 .init = srcu_torture_init,
646 .cleanup = srcu_torture_cleanup,
647 .readlock = srcu_torture_read_lock,
648 .read_delay = rcu_read_delay,
649 .readunlock = srcu_torture_read_unlock,
650 .get_gp_seq = srcu_torture_completed,
651 .deferred_free = srcu_torture_deferred_free,
652 .sync = srcu_torture_synchronize,
653 .exp_sync = srcu_torture_synchronize_expedited,
654 .call = srcu_torture_call,
655 .cb_barrier = srcu_torture_barrier,
656 .stats = srcu_torture_stats,
657 .irq_capable = 1,
658 .extendables = RCUTORTURE_MAX_EXTEND,
659 .name = "busted_srcud"
660};
661
662
663
664
665
666static int tasks_torture_read_lock(void)
667{
668 return 0;
669}
670
671static void tasks_torture_read_unlock(int idx)
672{
673}
674
675static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
676{
677 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
678}
679
680static struct rcu_torture_ops tasks_ops = {
681 .ttype = RCU_TASKS_FLAVOR,
682 .init = rcu_sync_torture_init,
683 .readlock = tasks_torture_read_lock,
684 .read_delay = rcu_read_delay,
685 .readunlock = tasks_torture_read_unlock,
686 .get_gp_seq = rcu_no_completed,
687 .deferred_free = rcu_tasks_torture_deferred_free,
688 .sync = synchronize_rcu_tasks,
689 .exp_sync = synchronize_rcu_tasks,
690 .call = call_rcu_tasks,
691 .cb_barrier = rcu_barrier_tasks,
692 .fqs = NULL,
693 .stats = NULL,
694 .irq_capable = 1,
695 .name = "tasks"
696};
697
698static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
699{
700 if (!cur_ops->gp_diff)
701 return new - old;
702 return cur_ops->gp_diff(new, old);
703}
704
705static bool __maybe_unused torturing_tasks(void)
706{
707 return cur_ops == &tasks_ops;
708}
709
710
711
712
713
714
715
716
717struct rcu_boost_inflight {
718 struct rcu_head rcu;
719 int inflight;
720};
721
722static void rcu_torture_boost_cb(struct rcu_head *head)
723{
724 struct rcu_boost_inflight *rbip =
725 container_of(head, struct rcu_boost_inflight, rcu);
726
727
728 smp_store_release(&rbip->inflight, 0);
729}
730
731static int old_rt_runtime = -1;
732
733static void rcu_torture_disable_rt_throttle(void)
734{
735
736
737
738
739
740
741 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
742 return;
743
744 old_rt_runtime = sysctl_sched_rt_runtime;
745 sysctl_sched_rt_runtime = -1;
746}
747
748static void rcu_torture_enable_rt_throttle(void)
749{
750 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
751 return;
752
753 sysctl_sched_rt_runtime = old_rt_runtime;
754 old_rt_runtime = -1;
755}
756
757static bool rcu_torture_boost_failed(unsigned long start, unsigned long end)
758{
759 if (end - start > test_boost_duration * HZ - HZ / 2) {
760 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
761 n_rcu_torture_boost_failure++;
762
763 return true;
764 }
765
766 return false;
767}
768
769static int rcu_torture_boost(void *arg)
770{
771 unsigned long call_rcu_time;
772 unsigned long endtime;
773 unsigned long oldstarttime;
774 struct rcu_boost_inflight rbi = { .inflight = 0 };
775 struct sched_param sp;
776
777 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
778
779
780 sp.sched_priority = 1;
781 if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
782 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!");
783 n_rcu_torture_boost_rterror++;
784 }
785
786 init_rcu_head_on_stack(&rbi.rcu);
787
788 do {
789
790 bool failed = false;
791
792
793 while (!kthread_should_stop()) {
794 if (mutex_trylock(&boost_mutex)) {
795 n_rcu_torture_boosts++;
796 mutex_unlock(&boost_mutex);
797 break;
798 }
799 schedule_timeout_uninterruptible(1);
800 }
801 if (kthread_should_stop())
802 goto checkwait;
803
804
805 oldstarttime = boost_starttime;
806 while (ULONG_CMP_LT(jiffies, oldstarttime)) {
807 schedule_timeout_interruptible(oldstarttime - jiffies);
808 stutter_wait("rcu_torture_boost");
809 if (torture_must_stop())
810 goto checkwait;
811 }
812
813
814 endtime = oldstarttime + test_boost_duration * HZ;
815 call_rcu_time = jiffies;
816 while (ULONG_CMP_LT(jiffies, endtime)) {
817
818 if (!smp_load_acquire(&rbi.inflight)) {
819
820 smp_store_release(&rbi.inflight, 1);
821 call_rcu(&rbi.rcu, rcu_torture_boost_cb);
822
823 failed = failed ||
824 rcu_torture_boost_failed(call_rcu_time,
825 jiffies);
826 call_rcu_time = jiffies;
827 }
828 stutter_wait("rcu_torture_boost");
829 if (torture_must_stop())
830 goto checkwait;
831 }
832
833
834
835
836
837
838 if (!failed && smp_load_acquire(&rbi.inflight))
839 rcu_torture_boost_failed(call_rcu_time, jiffies);
840
841
842
843
844
845
846
847
848 while (oldstarttime == boost_starttime &&
849 !kthread_should_stop()) {
850 if (mutex_trylock(&boost_mutex)) {
851 boost_starttime = jiffies +
852 test_boost_interval * HZ;
853 mutex_unlock(&boost_mutex);
854 break;
855 }
856 schedule_timeout_uninterruptible(1);
857 }
858
859
860checkwait: stutter_wait("rcu_torture_boost");
861 } while (!torture_must_stop());
862
863
864 while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) {
865 torture_shutdown_absorb("rcu_torture_boost");
866 schedule_timeout_uninterruptible(1);
867 }
868 destroy_rcu_head_on_stack(&rbi.rcu);
869 torture_kthread_stopping("rcu_torture_boost");
870 return 0;
871}
872
873static void rcu_torture_cbflood_cb(struct rcu_head *rhp)
874{
875}
876
877
878
879
880
881
882static int
883rcu_torture_cbflood(void *arg)
884{
885 int err = 1;
886 int i;
887 int j;
888 struct rcu_head *rhp;
889
890 if (cbflood_n_per_burst > 0 &&
891 cbflood_inter_holdoff > 0 &&
892 cbflood_intra_holdoff > 0 &&
893 cur_ops->call &&
894 cur_ops->cb_barrier) {
895 rhp = vmalloc(array3_size(cbflood_n_burst,
896 cbflood_n_per_burst,
897 sizeof(*rhp)));
898 err = !rhp;
899 }
900 if (err) {
901 VERBOSE_TOROUT_STRING("rcu_torture_cbflood disabled: Bad args or OOM");
902 goto wait_for_stop;
903 }
904 VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started");
905 do {
906 schedule_timeout_interruptible(cbflood_inter_holdoff);
907 atomic_long_inc(&n_cbfloods);
908 WARN_ON(signal_pending(current));
909 for (i = 0; i < cbflood_n_burst; i++) {
910 for (j = 0; j < cbflood_n_per_burst; j++) {
911 cur_ops->call(&rhp[i * cbflood_n_per_burst + j],
912 rcu_torture_cbflood_cb);
913 }
914 schedule_timeout_interruptible(cbflood_intra_holdoff);
915 WARN_ON(signal_pending(current));
916 }
917 cur_ops->cb_barrier();
918 stutter_wait("rcu_torture_cbflood");
919 } while (!torture_must_stop());
920 vfree(rhp);
921wait_for_stop:
922 torture_kthread_stopping("rcu_torture_cbflood");
923 return 0;
924}
925
926
927
928
929
930
931static int
932rcu_torture_fqs(void *arg)
933{
934 unsigned long fqs_resume_time;
935 int fqs_burst_remaining;
936
937 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
938 do {
939 fqs_resume_time = jiffies + fqs_stutter * HZ;
940 while (ULONG_CMP_LT(jiffies, fqs_resume_time) &&
941 !kthread_should_stop()) {
942 schedule_timeout_interruptible(1);
943 }
944 fqs_burst_remaining = fqs_duration;
945 while (fqs_burst_remaining > 0 &&
946 !kthread_should_stop()) {
947 cur_ops->fqs();
948 udelay(fqs_holdoff);
949 fqs_burst_remaining -= fqs_holdoff;
950 }
951 stutter_wait("rcu_torture_fqs");
952 } while (!torture_must_stop());
953 torture_kthread_stopping("rcu_torture_fqs");
954 return 0;
955}
956
957
958
959
960
961
962static int
963rcu_torture_writer(void *arg)
964{
965 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
966 int expediting = 0;
967 unsigned long gp_snap;
968 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
969 bool gp_sync1 = gp_sync;
970 int i;
971 struct rcu_torture *rp;
972 struct rcu_torture *old_rp;
973 static DEFINE_TORTURE_RANDOM(rand);
974 int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC,
975 RTWS_COND_GET, RTWS_SYNC };
976 int nsynctypes = 0;
977
978 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
979 if (!can_expedite)
980 pr_alert("%s" TORTURE_FLAG
981 " GP expediting controlled from boot/sysfs for %s.\n",
982 torture_type, cur_ops->name);
983
984
985 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1)
986 gp_cond1 = gp_exp1 = gp_normal1 = gp_sync1 = true;
987 if (gp_cond1 && cur_ops->get_state && cur_ops->cond_sync) {
988 synctype[nsynctypes++] = RTWS_COND_GET;
989 pr_info("%s: Testing conditional GPs.\n", __func__);
990 } else if (gp_cond && (!cur_ops->get_state || !cur_ops->cond_sync)) {
991 pr_alert("%s: gp_cond without primitives.\n", __func__);
992 }
993 if (gp_exp1 && cur_ops->exp_sync) {
994 synctype[nsynctypes++] = RTWS_EXP_SYNC;
995 pr_info("%s: Testing expedited GPs.\n", __func__);
996 } else if (gp_exp && !cur_ops->exp_sync) {
997 pr_alert("%s: gp_exp without primitives.\n", __func__);
998 }
999 if (gp_normal1 && cur_ops->deferred_free) {
1000 synctype[nsynctypes++] = RTWS_DEF_FREE;
1001 pr_info("%s: Testing asynchronous GPs.\n", __func__);
1002 } else if (gp_normal && !cur_ops->deferred_free) {
1003 pr_alert("%s: gp_normal without primitives.\n", __func__);
1004 }
1005 if (gp_sync1 && cur_ops->sync) {
1006 synctype[nsynctypes++] = RTWS_SYNC;
1007 pr_info("%s: Testing normal GPs.\n", __func__);
1008 } else if (gp_sync && !cur_ops->sync) {
1009 pr_alert("%s: gp_sync without primitives.\n", __func__);
1010 }
1011 if (WARN_ONCE(nsynctypes == 0,
1012 "rcu_torture_writer: No update-side primitives.\n")) {
1013
1014
1015
1016
1017
1018 rcu_torture_writer_state = RTWS_STOPPING;
1019 torture_kthread_stopping("rcu_torture_writer");
1020 }
1021
1022 do {
1023 rcu_torture_writer_state = RTWS_FIXED_DELAY;
1024 schedule_timeout_uninterruptible(1);
1025 rp = rcu_torture_alloc();
1026 if (rp == NULL)
1027 continue;
1028 rp->rtort_pipe_count = 0;
1029 rcu_torture_writer_state = RTWS_DELAY;
1030 udelay(torture_random(&rand) & 0x3ff);
1031 rcu_torture_writer_state = RTWS_REPLACE;
1032 old_rp = rcu_dereference_check(rcu_torture_current,
1033 current == writer_task);
1034 rp->rtort_mbtest = 1;
1035 rcu_assign_pointer(rcu_torture_current, rp);
1036 smp_wmb();
1037 if (old_rp) {
1038 i = old_rp->rtort_pipe_count;
1039 if (i > RCU_TORTURE_PIPE_LEN)
1040 i = RCU_TORTURE_PIPE_LEN;
1041 atomic_inc(&rcu_torture_wcount[i]);
1042 old_rp->rtort_pipe_count++;
1043 switch (synctype[torture_random(&rand) % nsynctypes]) {
1044 case RTWS_DEF_FREE:
1045 rcu_torture_writer_state = RTWS_DEF_FREE;
1046 cur_ops->deferred_free(old_rp);
1047 break;
1048 case RTWS_EXP_SYNC:
1049 rcu_torture_writer_state = RTWS_EXP_SYNC;
1050 cur_ops->exp_sync();
1051 rcu_torture_pipe_update(old_rp);
1052 break;
1053 case RTWS_COND_GET:
1054 rcu_torture_writer_state = RTWS_COND_GET;
1055 gp_snap = cur_ops->get_state();
1056 i = torture_random(&rand) % 16;
1057 if (i != 0)
1058 schedule_timeout_interruptible(i);
1059 udelay(torture_random(&rand) % 1000);
1060 rcu_torture_writer_state = RTWS_COND_SYNC;
1061 cur_ops->cond_sync(gp_snap);
1062 rcu_torture_pipe_update(old_rp);
1063 break;
1064 case RTWS_SYNC:
1065 rcu_torture_writer_state = RTWS_SYNC;
1066 cur_ops->sync();
1067 rcu_torture_pipe_update(old_rp);
1068 break;
1069 default:
1070 WARN_ON_ONCE(1);
1071 break;
1072 }
1073 }
1074 WRITE_ONCE(rcu_torture_current_version,
1075 rcu_torture_current_version + 1);
1076
1077 if (can_expedite &&
1078 !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1079 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1080 if (expediting >= 0)
1081 rcu_expedite_gp();
1082 else
1083 rcu_unexpedite_gp();
1084 if (++expediting > 3)
1085 expediting = -expediting;
1086 } else if (!can_expedite) {
1087 can_expedite = !rcu_gp_is_expedited() &&
1088 !rcu_gp_is_normal();
1089 }
1090 rcu_torture_writer_state = RTWS_STUTTER;
1091 if (stutter_wait("rcu_torture_writer"))
1092 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
1093 if (list_empty(&rcu_tortures[i].rtort_free))
1094 WARN_ON_ONCE(1);
1095 } while (!torture_must_stop());
1096
1097 if (expediting > 0)
1098 expediting = -expediting;
1099 while (can_expedite && expediting++ < 0)
1100 rcu_unexpedite_gp();
1101 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
1102 if (!can_expedite)
1103 pr_alert("%s" TORTURE_FLAG
1104 " Dynamic grace-period expediting was disabled.\n",
1105 torture_type);
1106 rcu_torture_writer_state = RTWS_STOPPING;
1107 torture_kthread_stopping("rcu_torture_writer");
1108 return 0;
1109}
1110
1111
1112
1113
1114
1115static int
1116rcu_torture_fakewriter(void *arg)
1117{
1118 DEFINE_TORTURE_RANDOM(rand);
1119
1120 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1121 set_user_nice(current, MAX_NICE);
1122
1123 do {
1124 schedule_timeout_uninterruptible(1 + torture_random(&rand)%10);
1125 udelay(torture_random(&rand) & 0x3ff);
1126 if (cur_ops->cb_barrier != NULL &&
1127 torture_random(&rand) % (nfakewriters * 8) == 0) {
1128 cur_ops->cb_barrier();
1129 } else if (gp_normal == gp_exp) {
1130 if (cur_ops->sync && torture_random(&rand) & 0x80)
1131 cur_ops->sync();
1132 else if (cur_ops->exp_sync)
1133 cur_ops->exp_sync();
1134 } else if (gp_normal && cur_ops->sync) {
1135 cur_ops->sync();
1136 } else if (cur_ops->exp_sync) {
1137 cur_ops->exp_sync();
1138 }
1139 stutter_wait("rcu_torture_fakewriter");
1140 } while (!torture_must_stop());
1141
1142 torture_kthread_stopping("rcu_torture_fakewriter");
1143 return 0;
1144}
1145
1146static void rcu_torture_timer_cb(struct rcu_head *rhp)
1147{
1148 kfree(rhp);
1149}
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160static void rcutorture_one_extend(int *readstate, int newstate,
1161 struct torture_random_state *trsp,
1162 struct rt_read_seg *rtrsp)
1163{
1164 int idxnew = -1;
1165 int idxold = *readstate;
1166 int statesnew = ~*readstate & newstate;
1167 int statesold = *readstate & ~newstate;
1168
1169 WARN_ON_ONCE(idxold < 0);
1170 WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
1171 rtrsp->rt_readstate = newstate;
1172
1173
1174 if (statesnew & RCUTORTURE_RDR_BH)
1175 local_bh_disable();
1176 if (statesnew & RCUTORTURE_RDR_IRQ)
1177 local_irq_disable();
1178 if (statesnew & RCUTORTURE_RDR_PREEMPT)
1179 preempt_disable();
1180 if (statesnew & RCUTORTURE_RDR_RBH)
1181 rcu_read_lock_bh();
1182 if (statesnew & RCUTORTURE_RDR_SCHED)
1183 rcu_read_lock_sched();
1184 if (statesnew & RCUTORTURE_RDR_RCU)
1185 idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
1186
1187
1188 if (statesold & RCUTORTURE_RDR_IRQ)
1189 local_irq_enable();
1190 if (statesold & RCUTORTURE_RDR_BH)
1191 local_bh_enable();
1192 if (statesold & RCUTORTURE_RDR_PREEMPT)
1193 preempt_enable();
1194 if (statesold & RCUTORTURE_RDR_RBH)
1195 rcu_read_unlock_bh();
1196 if (statesold & RCUTORTURE_RDR_SCHED)
1197 rcu_read_unlock_sched();
1198 if (statesold & RCUTORTURE_RDR_RCU)
1199 cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
1200
1201
1202 if ((statesnew || statesold) && *readstate && newstate)
1203 cur_ops->read_delay(trsp, rtrsp);
1204
1205
1206 if (idxnew == -1)
1207 idxnew = idxold & ~RCUTORTURE_RDR_MASK;
1208 WARN_ON_ONCE(idxnew < 0);
1209 WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1);
1210 *readstate = idxnew | newstate;
1211 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0);
1212 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1);
1213}
1214
1215
1216static int rcutorture_extend_mask_max(void)
1217{
1218 int mask;
1219
1220 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1221 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1222 mask = mask | RCUTORTURE_RDR_RCU;
1223 return mask;
1224}
1225
1226
1227static int
1228rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1229{
1230 int mask = rcutorture_extend_mask_max();
1231 unsigned long randmask1 = torture_random(trsp) >> 8;
1232 unsigned long randmask2 = randmask1 >> 3;
1233
1234 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
1235
1236 if (!(randmask1 & 0x7))
1237 mask = mask & randmask2;
1238 else
1239 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
1240
1241 if ((mask & RCUTORTURE_RDR_IRQ) &&
1242 ((!(mask & RCUTORTURE_RDR_BH) && (oldmask & RCUTORTURE_RDR_BH)) ||
1243 (!(mask & RCUTORTURE_RDR_RBH) && (oldmask & RCUTORTURE_RDR_RBH))))
1244 mask |= RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
1245 if ((mask & RCUTORTURE_RDR_IRQ) &&
1246 !(mask & cur_ops->ext_irq_conflict) &&
1247 (oldmask & cur_ops->ext_irq_conflict))
1248 mask |= cur_ops->ext_irq_conflict;
1249 return mask ?: RCUTORTURE_RDR_RCU;
1250}
1251
1252
1253
1254
1255
1256static struct rt_read_seg *
1257rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
1258 struct rt_read_seg *rtrsp)
1259{
1260 int i;
1261 int j;
1262 int mask = rcutorture_extend_mask_max();
1263
1264 WARN_ON_ONCE(!*readstate);
1265 if (!((mask - 1) & mask))
1266 return rtrsp;
1267
1268 i = (torture_random(trsp) >> 3);
1269 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
1270 for (j = 0; j < i; j++) {
1271 mask = rcutorture_extend_mask(*readstate, trsp);
1272 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
1273 }
1274 return &rtrsp[j];
1275}
1276
1277
1278
1279
1280
1281
1282static bool rcu_torture_one_read(struct torture_random_state *trsp)
1283{
1284 int i;
1285 unsigned long started;
1286 unsigned long completed;
1287 int newstate;
1288 struct rcu_torture *p;
1289 int pipe_count;
1290 int readstate = 0;
1291 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
1292 struct rt_read_seg *rtrsp = &rtseg[0];
1293 struct rt_read_seg *rtrsp1;
1294 unsigned long long ts;
1295
1296 newstate = rcutorture_extend_mask(readstate, trsp);
1297 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
1298 started = cur_ops->get_gp_seq();
1299 ts = rcu_trace_clock_local();
1300 p = rcu_dereference_check(rcu_torture_current,
1301 rcu_read_lock_bh_held() ||
1302 rcu_read_lock_sched_held() ||
1303 srcu_read_lock_held(srcu_ctlp) ||
1304 torturing_tasks());
1305 if (p == NULL) {
1306
1307 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1308 return false;
1309 }
1310 if (p->rtort_mbtest == 0)
1311 atomic_inc(&n_rcu_torture_mberror);
1312 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
1313 preempt_disable();
1314 pipe_count = p->rtort_pipe_count;
1315 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1316
1317 pipe_count = RCU_TORTURE_PIPE_LEN;
1318 }
1319 completed = cur_ops->get_gp_seq();
1320 if (pipe_count > 1) {
1321 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1322 ts, started, completed);
1323 rcu_ftrace_dump(DUMP_ALL);
1324 }
1325 __this_cpu_inc(rcu_torture_count[pipe_count]);
1326 completed = rcutorture_seq_diff(completed, started);
1327 if (completed > RCU_TORTURE_PIPE_LEN) {
1328
1329 completed = RCU_TORTURE_PIPE_LEN;
1330 }
1331 __this_cpu_inc(rcu_torture_batch[completed]);
1332 preempt_enable();
1333 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1334 WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
1335
1336
1337 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
1338 i = 0;
1339 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
1340 err_segs[i++] = *rtrsp1;
1341 rt_read_nsegs = i;
1342 }
1343
1344 return true;
1345}
1346
1347static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
1348
1349
1350
1351
1352
1353
1354
1355static void rcu_torture_timer(struct timer_list *unused)
1356{
1357 atomic_long_inc(&n_rcu_torture_timers);
1358 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand));
1359
1360
1361 if (cur_ops->call) {
1362 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
1363
1364 if (rhp)
1365 cur_ops->call(rhp, rcu_torture_timer_cb);
1366 }
1367}
1368
1369
1370
1371
1372
1373
1374
1375static int
1376rcu_torture_reader(void *arg)
1377{
1378 unsigned long lastsleep = jiffies;
1379 long myid = (long)arg;
1380 int mynumonline = myid;
1381 DEFINE_TORTURE_RANDOM(rand);
1382 struct timer_list t;
1383
1384 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
1385 set_user_nice(current, MAX_NICE);
1386 if (irqreader && cur_ops->irq_capable)
1387 timer_setup_on_stack(&t, rcu_torture_timer, 0);
1388
1389 do {
1390 if (irqreader && cur_ops->irq_capable) {
1391 if (!timer_pending(&t))
1392 mod_timer(&t, jiffies + 1);
1393 }
1394 if (!rcu_torture_one_read(&rand))
1395 schedule_timeout_interruptible(HZ);
1396 if (time_after(jiffies, lastsleep)) {
1397 schedule_timeout_interruptible(1);
1398 lastsleep = jiffies + 10;
1399 }
1400 while (num_online_cpus() < mynumonline && !torture_must_stop())
1401 schedule_timeout_interruptible(HZ / 5);
1402 stutter_wait("rcu_torture_reader");
1403 } while (!torture_must_stop());
1404 if (irqreader && cur_ops->irq_capable) {
1405 del_timer_sync(&t);
1406 destroy_timer_on_stack(&t);
1407 }
1408 torture_kthread_stopping("rcu_torture_reader");
1409 return 0;
1410}
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420static void
1421rcu_torture_stats_print(void)
1422{
1423 int cpu;
1424 int i;
1425 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1426 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1427 static unsigned long rtcv_snap = ULONG_MAX;
1428 static bool splatted;
1429 struct task_struct *wtp;
1430
1431 for_each_possible_cpu(cpu) {
1432 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1433 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
1434 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
1435 }
1436 }
1437 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1438 if (pipesummary[i] != 0)
1439 break;
1440 }
1441
1442 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1443 pr_cont("rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1444 rcu_torture_current,
1445 rcu_torture_current_version,
1446 list_empty(&rcu_torture_freelist),
1447 atomic_read(&n_rcu_torture_alloc),
1448 atomic_read(&n_rcu_torture_alloc_fail),
1449 atomic_read(&n_rcu_torture_free));
1450 pr_cont("rtmbe: %d rtbe: %ld rtbke: %ld rtbre: %ld ",
1451 atomic_read(&n_rcu_torture_mberror),
1452 n_rcu_torture_barrier_error,
1453 n_rcu_torture_boost_ktrerror,
1454 n_rcu_torture_boost_rterror);
1455 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1456 n_rcu_torture_boost_failure,
1457 n_rcu_torture_boosts,
1458 atomic_long_read(&n_rcu_torture_timers));
1459 torture_onoff_stats();
1460 pr_cont("barrier: %ld/%ld:%ld ",
1461 n_barrier_successes,
1462 n_barrier_attempts,
1463 n_rcu_torture_barrier_error);
1464 pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods));
1465
1466 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1467 if (atomic_read(&n_rcu_torture_mberror) != 0 ||
1468 n_rcu_torture_barrier_error != 0 ||
1469 n_rcu_torture_boost_ktrerror != 0 ||
1470 n_rcu_torture_boost_rterror != 0 ||
1471 n_rcu_torture_boost_failure != 0 ||
1472 i > 1) {
1473 pr_cont("%s", "!!! ");
1474 atomic_inc(&n_rcu_torture_error);
1475 WARN_ON_ONCE(1);
1476 }
1477 pr_cont("Reader Pipe: ");
1478 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1479 pr_cont(" %ld", pipesummary[i]);
1480 pr_cont("\n");
1481
1482 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1483 pr_cont("Reader Batch: ");
1484 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1485 pr_cont(" %ld", batchsummary[i]);
1486 pr_cont("\n");
1487
1488 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1489 pr_cont("Free-Block Circulation: ");
1490 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1491 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
1492 }
1493 pr_cont("\n");
1494
1495 if (cur_ops->stats)
1496 cur_ops->stats();
1497 if (rtcv_snap == rcu_torture_current_version &&
1498 rcu_torture_current != NULL) {
1499 int __maybe_unused flags = 0;
1500 unsigned long __maybe_unused gp_seq = 0;
1501
1502 rcutorture_get_gp_data(cur_ops->ttype,
1503 &flags, &gp_seq);
1504 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
1505 &flags, &gp_seq);
1506 wtp = READ_ONCE(writer_task);
1507 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#lx cpu %d\n",
1508 rcu_torture_writer_state_getname(),
1509 rcu_torture_writer_state, gp_seq, flags,
1510 wtp == NULL ? ~0UL : wtp->state,
1511 wtp == NULL ? -1 : (int)task_cpu(wtp));
1512 if (!splatted && wtp) {
1513 sched_show_task(wtp);
1514 splatted = true;
1515 }
1516 show_rcu_gp_kthreads();
1517 rcu_ftrace_dump(DUMP_ALL);
1518 }
1519 rtcv_snap = rcu_torture_current_version;
1520}
1521
1522
1523
1524
1525
1526static int
1527rcu_torture_stats(void *arg)
1528{
1529 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
1530 do {
1531 schedule_timeout_interruptible(stat_interval * HZ);
1532 rcu_torture_stats_print();
1533 torture_shutdown_absorb("rcu_torture_stats");
1534 } while (!torture_must_stop());
1535 torture_kthread_stopping("rcu_torture_stats");
1536 return 0;
1537}
1538
1539static void
1540rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
1541{
1542 pr_alert("%s" TORTURE_FLAG
1543 "--- %s: nreaders=%d nfakewriters=%d "
1544 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1545 "shuffle_interval=%d stutter=%d irqreader=%d "
1546 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1547 "test_boost=%d/%d test_boost_interval=%d "
1548 "test_boost_duration=%d shutdown_secs=%d "
1549 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
1550 "n_barrier_cbs=%d "
1551 "onoff_interval=%d onoff_holdoff=%d\n",
1552 torture_type, tag, nrealreaders, nfakewriters,
1553 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1554 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1555 test_boost, cur_ops->can_boost,
1556 test_boost_interval, test_boost_duration, shutdown_secs,
1557 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
1558 n_barrier_cbs,
1559 onoff_interval, onoff_holdoff);
1560}
1561
1562static int rcutorture_booster_cleanup(unsigned int cpu)
1563{
1564 struct task_struct *t;
1565
1566 if (boost_tasks[cpu] == NULL)
1567 return 0;
1568 mutex_lock(&boost_mutex);
1569 t = boost_tasks[cpu];
1570 boost_tasks[cpu] = NULL;
1571 rcu_torture_enable_rt_throttle();
1572 mutex_unlock(&boost_mutex);
1573
1574
1575 torture_stop_kthread(rcu_torture_boost, t);
1576 return 0;
1577}
1578
1579static int rcutorture_booster_init(unsigned int cpu)
1580{
1581 int retval;
1582
1583 if (boost_tasks[cpu] != NULL)
1584 return 0;
1585
1586
1587 mutex_lock(&boost_mutex);
1588 rcu_torture_disable_rt_throttle();
1589 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
1590 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1591 cpu_to_node(cpu),
1592 "rcu_torture_boost");
1593 if (IS_ERR(boost_tasks[cpu])) {
1594 retval = PTR_ERR(boost_tasks[cpu]);
1595 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
1596 n_rcu_torture_boost_ktrerror++;
1597 boost_tasks[cpu] = NULL;
1598 mutex_unlock(&boost_mutex);
1599 return retval;
1600 }
1601 kthread_bind(boost_tasks[cpu], cpu);
1602 wake_up_process(boost_tasks[cpu]);
1603 mutex_unlock(&boost_mutex);
1604 return 0;
1605}
1606
1607
1608
1609
1610
1611static int rcu_torture_stall(void *args)
1612{
1613 unsigned long stop_at;
1614
1615 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
1616 if (stall_cpu_holdoff > 0) {
1617 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
1618 schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
1619 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
1620 }
1621 if (!kthread_should_stop()) {
1622 stop_at = ktime_get_seconds() + stall_cpu;
1623
1624 rcu_read_lock();
1625 if (stall_cpu_irqsoff)
1626 local_irq_disable();
1627 else
1628 preempt_disable();
1629 pr_alert("rcu_torture_stall start on CPU %d.\n",
1630 smp_processor_id());
1631 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
1632 stop_at))
1633 continue;
1634 if (stall_cpu_irqsoff)
1635 local_irq_enable();
1636 else
1637 preempt_enable();
1638 rcu_read_unlock();
1639 pr_alert("rcu_torture_stall end.\n");
1640 }
1641 torture_shutdown_absorb("rcu_torture_stall");
1642 while (!kthread_should_stop())
1643 schedule_timeout_interruptible(10 * HZ);
1644 return 0;
1645}
1646
1647
1648static int __init rcu_torture_stall_init(void)
1649{
1650 if (stall_cpu <= 0)
1651 return 0;
1652 return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
1653}
1654
1655
1656struct fwd_cb_state {
1657 struct rcu_head rh;
1658 int stop;
1659};
1660
1661
1662
1663
1664
1665
1666static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
1667{
1668 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
1669
1670 if (READ_ONCE(fcsp->stop)) {
1671 WRITE_ONCE(fcsp->stop, 2);
1672 return;
1673 }
1674 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
1675}
1676
1677
1678static int rcu_torture_fwd_prog(void *args)
1679{
1680 unsigned long cver;
1681 unsigned long dur;
1682 struct fwd_cb_state fcs;
1683 unsigned long gps;
1684 int idx;
1685 int sd;
1686 int sd4;
1687 bool selfpropcb = false;
1688 unsigned long stopat;
1689 int tested = 0;
1690 int tested_tries = 0;
1691 static DEFINE_TORTURE_RANDOM(trs);
1692
1693 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
1694 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
1695 set_user_nice(current, MAX_NICE);
1696 if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) {
1697 init_rcu_head_on_stack(&fcs.rh);
1698 selfpropcb = true;
1699 }
1700 do {
1701 schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
1702 if (selfpropcb) {
1703 WRITE_ONCE(fcs.stop, 0);
1704 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
1705 }
1706 cver = READ_ONCE(rcu_torture_current_version);
1707 gps = cur_ops->get_gp_seq();
1708 sd = cur_ops->stall_dur() + 1;
1709 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
1710 dur = sd4 + torture_random(&trs) % (sd - sd4);
1711 stopat = jiffies + dur;
1712 while (time_before(jiffies, stopat) && !torture_must_stop()) {
1713 idx = cur_ops->readlock();
1714 udelay(10);
1715 cur_ops->readunlock(idx);
1716 if (!fwd_progress_need_resched || need_resched())
1717 cond_resched();
1718 }
1719 tested_tries++;
1720 if (!time_before(jiffies, stopat) && !torture_must_stop()) {
1721 tested++;
1722 cver = READ_ONCE(rcu_torture_current_version) - cver;
1723 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
1724 WARN_ON(!cver && gps < 2);
1725 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps);
1726 }
1727 if (selfpropcb) {
1728 WRITE_ONCE(fcs.stop, 1);
1729 cur_ops->sync();
1730 cur_ops->cb_barrier();
1731 }
1732
1733 stutter_wait("rcu_torture_fwd_prog");
1734 } while (!torture_must_stop());
1735 if (selfpropcb) {
1736 WARN_ON(READ_ONCE(fcs.stop) != 2);
1737 destroy_rcu_head_on_stack(&fcs.rh);
1738 }
1739
1740 WARN_ON(!tested && tested_tries >= 5);
1741 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
1742 torture_kthread_stopping("rcu_torture_fwd_prog");
1743 return 0;
1744}
1745
1746
1747static int __init rcu_torture_fwd_prog_init(void)
1748{
1749 if (!fwd_progress)
1750 return 0;
1751 if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0) {
1752 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
1753 return 0;
1754 }
1755 if (stall_cpu > 0) {
1756 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
1757 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS))
1758 return -EINVAL;
1759 WARN_ON(1);
1760 return 0;
1761 }
1762 if (fwd_progress_holdoff <= 0)
1763 fwd_progress_holdoff = 1;
1764 if (fwd_progress_div <= 0)
1765 fwd_progress_div = 4;
1766 return torture_create_kthread(rcu_torture_fwd_prog,
1767 NULL, fwd_prog_task);
1768}
1769
1770
1771static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
1772{
1773 atomic_inc(&barrier_cbs_invoked);
1774}
1775
1776
1777static int rcu_torture_barrier_cbs(void *arg)
1778{
1779 long myid = (long)arg;
1780 bool lastphase = 0;
1781 bool newphase;
1782 struct rcu_head rcu;
1783
1784 init_rcu_head_on_stack(&rcu);
1785 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
1786 set_user_nice(current, MAX_NICE);
1787 do {
1788 wait_event(barrier_cbs_wq[myid],
1789 (newphase =
1790 smp_load_acquire(&barrier_phase)) != lastphase ||
1791 torture_must_stop());
1792 lastphase = newphase;
1793 if (torture_must_stop())
1794 break;
1795
1796
1797
1798
1799 local_irq_disable();
1800 cur_ops->call(&rcu, rcu_torture_barrier_cbf);
1801 local_irq_enable();
1802 if (atomic_dec_and_test(&barrier_cbs_count))
1803 wake_up(&barrier_wq);
1804 } while (!torture_must_stop());
1805 if (cur_ops->cb_barrier != NULL)
1806 cur_ops->cb_barrier();
1807 destroy_rcu_head_on_stack(&rcu);
1808 torture_kthread_stopping("rcu_torture_barrier_cbs");
1809 return 0;
1810}
1811
1812
1813static int rcu_torture_barrier(void *arg)
1814{
1815 int i;
1816
1817 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
1818 do {
1819 atomic_set(&barrier_cbs_invoked, 0);
1820 atomic_set(&barrier_cbs_count, n_barrier_cbs);
1821
1822 smp_store_release(&barrier_phase, !barrier_phase);
1823 for (i = 0; i < n_barrier_cbs; i++)
1824 wake_up(&barrier_cbs_wq[i]);
1825 wait_event(barrier_wq,
1826 atomic_read(&barrier_cbs_count) == 0 ||
1827 torture_must_stop());
1828 if (torture_must_stop())
1829 break;
1830 n_barrier_attempts++;
1831 cur_ops->cb_barrier();
1832 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
1833 n_rcu_torture_barrier_error++;
1834 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
1835 atomic_read(&barrier_cbs_invoked),
1836 n_barrier_cbs);
1837 WARN_ON_ONCE(1);
1838 } else {
1839 n_barrier_successes++;
1840 }
1841 schedule_timeout_interruptible(HZ / 10);
1842 } while (!torture_must_stop());
1843 torture_kthread_stopping("rcu_torture_barrier");
1844 return 0;
1845}
1846
1847
1848static int rcu_torture_barrier_init(void)
1849{
1850 int i;
1851 int ret;
1852
1853 if (n_barrier_cbs <= 0)
1854 return 0;
1855 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
1856 pr_alert("%s" TORTURE_FLAG
1857 " Call or barrier ops missing for %s,\n",
1858 torture_type, cur_ops->name);
1859 pr_alert("%s" TORTURE_FLAG
1860 " RCU barrier testing omitted from run.\n",
1861 torture_type);
1862 return 0;
1863 }
1864 atomic_set(&barrier_cbs_count, 0);
1865 atomic_set(&barrier_cbs_invoked, 0);
1866 barrier_cbs_tasks =
1867 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
1868 GFP_KERNEL);
1869 barrier_cbs_wq =
1870 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
1871 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
1872 return -ENOMEM;
1873 for (i = 0; i < n_barrier_cbs; i++) {
1874 init_waitqueue_head(&barrier_cbs_wq[i]);
1875 ret = torture_create_kthread(rcu_torture_barrier_cbs,
1876 (void *)(long)i,
1877 barrier_cbs_tasks[i]);
1878 if (ret)
1879 return ret;
1880 }
1881 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
1882}
1883
1884
1885static void rcu_torture_barrier_cleanup(void)
1886{
1887 int i;
1888
1889 torture_stop_kthread(rcu_torture_barrier, barrier_task);
1890 if (barrier_cbs_tasks != NULL) {
1891 for (i = 0; i < n_barrier_cbs; i++)
1892 torture_stop_kthread(rcu_torture_barrier_cbs,
1893 barrier_cbs_tasks[i]);
1894 kfree(barrier_cbs_tasks);
1895 barrier_cbs_tasks = NULL;
1896 }
1897 if (barrier_cbs_wq != NULL) {
1898 kfree(barrier_cbs_wq);
1899 barrier_cbs_wq = NULL;
1900 }
1901}
1902
1903static bool rcu_torture_can_boost(void)
1904{
1905 static int boost_warn_once;
1906 int prio;
1907
1908 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
1909 return false;
1910
1911 prio = rcu_get_gp_kthreads_prio();
1912 if (!prio)
1913 return false;
1914
1915 if (prio < 2) {
1916 if (boost_warn_once == 1)
1917 return false;
1918
1919 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
1920 boost_warn_once = 1;
1921 return false;
1922 }
1923
1924 return true;
1925}
1926
1927static enum cpuhp_state rcutor_hp;
1928
1929static void
1930rcu_torture_cleanup(void)
1931{
1932 int firsttime;
1933 int flags = 0;
1934 unsigned long gp_seq = 0;
1935 int i;
1936
1937 if (torture_cleanup_begin()) {
1938 if (cur_ops->cb_barrier != NULL)
1939 cur_ops->cb_barrier();
1940 return;
1941 }
1942
1943 rcu_torture_barrier_cleanup();
1944 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
1945 torture_stop_kthread(rcu_torture_stall, stall_task);
1946 torture_stop_kthread(rcu_torture_writer, writer_task);
1947
1948 if (reader_tasks) {
1949 for (i = 0; i < nrealreaders; i++)
1950 torture_stop_kthread(rcu_torture_reader,
1951 reader_tasks[i]);
1952 kfree(reader_tasks);
1953 }
1954 rcu_torture_current = NULL;
1955
1956 if (fakewriter_tasks) {
1957 for (i = 0; i < nfakewriters; i++) {
1958 torture_stop_kthread(rcu_torture_fakewriter,
1959 fakewriter_tasks[i]);
1960 }
1961 kfree(fakewriter_tasks);
1962 fakewriter_tasks = NULL;
1963 }
1964
1965 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
1966 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
1967 pr_alert("%s: End-test grace-period state: g%lu f%#x\n",
1968 cur_ops->name, gp_seq, flags);
1969 torture_stop_kthread(rcu_torture_stats, stats_task);
1970 torture_stop_kthread(rcu_torture_fqs, fqs_task);
1971 for (i = 0; i < ncbflooders; i++)
1972 torture_stop_kthread(rcu_torture_cbflood, cbflood_task[i]);
1973 if (rcu_torture_can_boost())
1974 cpuhp_remove_state(rcutor_hp);
1975
1976
1977
1978
1979
1980 if (cur_ops->cb_barrier != NULL)
1981 cur_ops->cb_barrier();
1982 if (cur_ops->cleanup != NULL)
1983 cur_ops->cleanup();
1984
1985 rcu_torture_stats_print();
1986
1987 if (err_segs_recorded) {
1988 pr_alert("Failure/close-call rcutorture reader segments:\n");
1989 if (rt_read_nsegs == 0)
1990 pr_alert("\t: No segments recorded!!!\n");
1991 firsttime = 1;
1992 for (i = 0; i < rt_read_nsegs; i++) {
1993 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
1994 if (err_segs[i].rt_delay_jiffies != 0) {
1995 pr_cont("%s%ldjiffies", firsttime ? "" : "+",
1996 err_segs[i].rt_delay_jiffies);
1997 firsttime = 0;
1998 }
1999 if (err_segs[i].rt_delay_ms != 0) {
2000 pr_cont("%s%ldms", firsttime ? "" : "+",
2001 err_segs[i].rt_delay_ms);
2002 firsttime = 0;
2003 }
2004 if (err_segs[i].rt_delay_us != 0) {
2005 pr_cont("%s%ldus", firsttime ? "" : "+",
2006 err_segs[i].rt_delay_us);
2007 firsttime = 0;
2008 }
2009 pr_cont("%s\n",
2010 err_segs[i].rt_preempted ? "preempted" : "");
2011
2012 }
2013 }
2014 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
2015 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
2016 else if (torture_onoff_failures())
2017 rcu_torture_print_module_parms(cur_ops,
2018 "End of test: RCU_HOTPLUG");
2019 else
2020 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
2021 torture_cleanup_end();
2022}
2023
2024#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2025static void rcu_torture_leak_cb(struct rcu_head *rhp)
2026{
2027}
2028
2029static void rcu_torture_err_cb(struct rcu_head *rhp)
2030{
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
2041}
2042#endif
2043
2044
2045
2046
2047
2048
2049static void rcu_test_debug_objects(void)
2050{
2051#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2052 struct rcu_head rh1;
2053 struct rcu_head rh2;
2054
2055 init_rcu_head_on_stack(&rh1);
2056 init_rcu_head_on_stack(&rh2);
2057 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
2058
2059
2060 preempt_disable();
2061 rcu_read_lock();
2062 call_rcu(&rh1, rcu_torture_leak_cb);
2063 local_irq_disable();
2064 call_rcu(&rh2, rcu_torture_leak_cb);
2065 call_rcu(&rh2, rcu_torture_err_cb);
2066 local_irq_enable();
2067 rcu_read_unlock();
2068 preempt_enable();
2069
2070
2071 rcu_barrier();
2072 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
2073 destroy_rcu_head_on_stack(&rh1);
2074 destroy_rcu_head_on_stack(&rh2);
2075#else
2076 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
2077#endif
2078}
2079
2080static int __init
2081rcu_torture_init(void)
2082{
2083 long i;
2084 int cpu;
2085 int firsterr = 0;
2086 static struct rcu_torture_ops *torture_ops[] = {
2087 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
2088 &busted_srcud_ops, &tasks_ops,
2089 };
2090
2091 if (!torture_init_begin(torture_type, verbose))
2092 return -EBUSY;
2093
2094
2095 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
2096 cur_ops = torture_ops[i];
2097 if (strcmp(torture_type, cur_ops->name) == 0)
2098 break;
2099 }
2100 if (i == ARRAY_SIZE(torture_ops)) {
2101 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
2102 torture_type);
2103 pr_alert("rcu-torture types:");
2104 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
2105 pr_cont(" %s", torture_ops[i]->name);
2106 pr_cont("\n");
2107 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
2108 firsterr = -EINVAL;
2109 goto unwind;
2110 }
2111 if (cur_ops->fqs == NULL && fqs_duration != 0) {
2112 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
2113 fqs_duration = 0;
2114 }
2115 if (cur_ops->init)
2116 cur_ops->init();
2117
2118 if (nreaders >= 0) {
2119 nrealreaders = nreaders;
2120 } else {
2121 nrealreaders = num_online_cpus() - 2 - nreaders;
2122 if (nrealreaders <= 0)
2123 nrealreaders = 1;
2124 }
2125 rcu_torture_print_module_parms(cur_ops, "Start of test");
2126
2127
2128
2129 INIT_LIST_HEAD(&rcu_torture_freelist);
2130 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
2131 rcu_tortures[i].rtort_mbtest = 0;
2132 list_add_tail(&rcu_tortures[i].rtort_free,
2133 &rcu_torture_freelist);
2134 }
2135
2136
2137
2138 rcu_torture_current = NULL;
2139 rcu_torture_current_version = 0;
2140 atomic_set(&n_rcu_torture_alloc, 0);
2141 atomic_set(&n_rcu_torture_alloc_fail, 0);
2142 atomic_set(&n_rcu_torture_free, 0);
2143 atomic_set(&n_rcu_torture_mberror, 0);
2144 atomic_set(&n_rcu_torture_error, 0);
2145 n_rcu_torture_barrier_error = 0;
2146 n_rcu_torture_boost_ktrerror = 0;
2147 n_rcu_torture_boost_rterror = 0;
2148 n_rcu_torture_boost_failure = 0;
2149 n_rcu_torture_boosts = 0;
2150 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2151 atomic_set(&rcu_torture_wcount[i], 0);
2152 for_each_possible_cpu(cpu) {
2153 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2154 per_cpu(rcu_torture_count, cpu)[i] = 0;
2155 per_cpu(rcu_torture_batch, cpu)[i] = 0;
2156 }
2157 }
2158 err_segs_recorded = 0;
2159 rt_read_nsegs = 0;
2160
2161
2162
2163 firsterr = torture_create_kthread(rcu_torture_writer, NULL,
2164 writer_task);
2165 if (firsterr)
2166 goto unwind;
2167 if (nfakewriters > 0) {
2168 fakewriter_tasks = kcalloc(nfakewriters,
2169 sizeof(fakewriter_tasks[0]),
2170 GFP_KERNEL);
2171 if (fakewriter_tasks == NULL) {
2172 VERBOSE_TOROUT_ERRSTRING("out of memory");
2173 firsterr = -ENOMEM;
2174 goto unwind;
2175 }
2176 }
2177 for (i = 0; i < nfakewriters; i++) {
2178 firsterr = torture_create_kthread(rcu_torture_fakewriter,
2179 NULL, fakewriter_tasks[i]);
2180 if (firsterr)
2181 goto unwind;
2182 }
2183 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
2184 GFP_KERNEL);
2185 if (reader_tasks == NULL) {
2186 VERBOSE_TOROUT_ERRSTRING("out of memory");
2187 firsterr = -ENOMEM;
2188 goto unwind;
2189 }
2190 for (i = 0; i < nrealreaders; i++) {
2191 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
2192 reader_tasks[i]);
2193 if (firsterr)
2194 goto unwind;
2195 }
2196 if (stat_interval > 0) {
2197 firsterr = torture_create_kthread(rcu_torture_stats, NULL,
2198 stats_task);
2199 if (firsterr)
2200 goto unwind;
2201 }
2202 if (test_no_idle_hz && shuffle_interval > 0) {
2203 firsterr = torture_shuffle_init(shuffle_interval * HZ);
2204 if (firsterr)
2205 goto unwind;
2206 }
2207 if (stutter < 0)
2208 stutter = 0;
2209 if (stutter) {
2210 firsterr = torture_stutter_init(stutter * HZ);
2211 if (firsterr)
2212 goto unwind;
2213 }
2214 if (fqs_duration < 0)
2215 fqs_duration = 0;
2216 if (fqs_duration) {
2217
2218 firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
2219 fqs_task);
2220 if (firsterr)
2221 goto unwind;
2222 }
2223 if (test_boost_interval < 1)
2224 test_boost_interval = 1;
2225 if (test_boost_duration < 2)
2226 test_boost_duration = 2;
2227 if (rcu_torture_can_boost()) {
2228
2229 boost_starttime = jiffies + test_boost_interval * HZ;
2230
2231 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
2232 rcutorture_booster_init,
2233 rcutorture_booster_cleanup);
2234 if (firsterr < 0)
2235 goto unwind;
2236 rcutor_hp = firsterr;
2237 }
2238 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
2239 if (firsterr)
2240 goto unwind;
2241 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval);
2242 if (firsterr)
2243 goto unwind;
2244 firsterr = rcu_torture_stall_init();
2245 if (firsterr)
2246 goto unwind;
2247 firsterr = rcu_torture_fwd_prog_init();
2248 if (firsterr)
2249 goto unwind;
2250 firsterr = rcu_torture_barrier_init();
2251 if (firsterr)
2252 goto unwind;
2253 if (object_debug)
2254 rcu_test_debug_objects();
2255 if (cbflood_n_burst > 0) {
2256
2257 ncbflooders = (num_online_cpus() + 3) / 4;
2258 cbflood_task = kcalloc(ncbflooders, sizeof(*cbflood_task),
2259 GFP_KERNEL);
2260 if (!cbflood_task) {
2261 VERBOSE_TOROUT_ERRSTRING("out of memory");
2262 firsterr = -ENOMEM;
2263 goto unwind;
2264 }
2265 for (i = 0; i < ncbflooders; i++) {
2266 firsterr = torture_create_kthread(rcu_torture_cbflood,
2267 NULL,
2268 cbflood_task[i]);
2269 if (firsterr)
2270 goto unwind;
2271 }
2272 }
2273 torture_init_end();
2274 return 0;
2275
2276unwind:
2277 torture_init_end();
2278 rcu_torture_cleanup();
2279 return firsterr;
2280}
2281
2282module_init(rcu_torture_init);
2283module_exit(rcu_torture_cleanup);
2284