linux/kernel/rcutorture.c
<<
>>
Prefs
   1/*
   2 * Read-Copy Update module-based torture test facility
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17 *
  18 * Copyright (C) IBM Corporation, 2005, 2006
  19 *
  20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
  21 *        Josh Triplett <josh@freedesktop.org>
  22 *
  23 * See also:  Documentation/RCU/torture.txt
  24 */
  25#include <linux/types.h>
  26#include <linux/kernel.h>
  27#include <linux/init.h>
  28#include <linux/module.h>
  29#include <linux/kthread.h>
  30#include <linux/err.h>
  31#include <linux/spinlock.h>
  32#include <linux/smp.h>
  33#include <linux/rcupdate.h>
  34#include <linux/interrupt.h>
  35#include <linux/sched.h>
  36#include <asm/atomic.h>
  37#include <linux/bitops.h>
  38#include <linux/completion.h>
  39#include <linux/moduleparam.h>
  40#include <linux/percpu.h>
  41#include <linux/notifier.h>
  42#include <linux/reboot.h>
  43#include <linux/freezer.h>
  44#include <linux/cpu.h>
  45#include <linux/delay.h>
  46#include <linux/stat.h>
  47#include <linux/srcu.h>
  48#include <linux/slab.h>
  49#include <asm/byteorder.h>
  50#include <linux/sched.h>
  51
  52MODULE_LICENSE("GPL");
  53MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
  54              "Josh Triplett <josh@freedesktop.org>");
  55
  56static int nreaders = -1;       /* # reader threads, defaults to 2*ncpus */
  57static int nfakewriters = 4;    /* # fake writer threads */
  58static int stat_interval;       /* Interval between stats, in seconds. */
  59                                /*  Defaults to "only at end of test". */
  60static int verbose;             /* Print more debug info. */
  61static int test_no_idle_hz;     /* Test RCU's support for tickless idle CPUs. */
  62static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/
  63static int stutter = 5;         /* Start/stop testing interval (in sec) */
  64static int irqreader = 1;       /* RCU readers from irq (timers). */
  65static int fqs_duration = 0;    /* Duration of bursts (us), 0 to disable. */
  66static int fqs_holdoff = 0;     /* Hold time within burst (us). */
  67static int fqs_stutter = 3;     /* Wait time between bursts (s). */
  68static int test_boost = 1;      /* Test RCU prio boost: 0=no, 1=maybe, 2=yes. */
  69static int test_boost_interval = 7; /* Interval between boost tests, seconds. */
  70static int test_boost_duration = 4; /* Duration of each boost test, seconds. */
  71static char *torture_type = "rcu"; /* What RCU implementation to torture. */
  72
  73module_param(nreaders, int, 0444);
  74MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
  75module_param(nfakewriters, int, 0444);
  76MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
  77module_param(stat_interval, int, 0444);
  78MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
  79module_param(verbose, bool, 0444);
  80MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
  81module_param(test_no_idle_hz, bool, 0444);
  82MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
  83module_param(shuffle_interval, int, 0444);
  84MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
  85module_param(stutter, int, 0444);
  86MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test");
  87module_param(irqreader, int, 0444);
  88MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers");
  89module_param(fqs_duration, int, 0444);
  90MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us)");
  91module_param(fqs_holdoff, int, 0444);
  92MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)");
  93module_param(fqs_stutter, int, 0444);
  94MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)");
  95module_param(test_boost, int, 0444);
  96MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
  97module_param(test_boost_interval, int, 0444);
  98MODULE_PARM_DESC(test_boost_interval, "Interval between boost tests, seconds.");
  99module_param(test_boost_duration, int, 0444);
 100MODULE_PARM_DESC(test_boost_duration, "Duration of each boost test, seconds.");
 101module_param(torture_type, charp, 0444);
 102MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
 103
 104#define TORTURE_FLAG "-torture:"
 105#define PRINTK_STRING(s) \
 106        do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
 107#define VERBOSE_PRINTK_STRING(s) \
 108        do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
 109#define VERBOSE_PRINTK_ERRSTRING(s) \
 110        do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
 111
 112static char printk_buf[4096];
 113
 114static int nrealreaders;
 115static struct task_struct *writer_task;
 116static struct task_struct **fakewriter_tasks;
 117static struct task_struct **reader_tasks;
 118static struct task_struct *stats_task;
 119static struct task_struct *shuffler_task;
 120static struct task_struct *stutter_task;
 121static struct task_struct *fqs_task;
 122static struct task_struct *boost_tasks[NR_CPUS];
 123
 124#define RCU_TORTURE_PIPE_LEN 10
 125
 126struct rcu_torture {
 127        struct rcu_head rtort_rcu;
 128        int rtort_pipe_count;
 129        struct list_head rtort_free;
 130        int rtort_mbtest;
 131};
 132
 133static LIST_HEAD(rcu_torture_freelist);
 134static struct rcu_torture __rcu *rcu_torture_current;
 135static long rcu_torture_current_version;
 136static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
 137static DEFINE_SPINLOCK(rcu_torture_lock);
 138static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
 139        { 0 };
 140static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
 141        { 0 };
 142static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
 143static atomic_t n_rcu_torture_alloc;
 144static atomic_t n_rcu_torture_alloc_fail;
 145static atomic_t n_rcu_torture_free;
 146static atomic_t n_rcu_torture_mberror;
 147static atomic_t n_rcu_torture_error;
 148static long n_rcu_torture_boost_ktrerror;
 149static long n_rcu_torture_boost_rterror;
 150static long n_rcu_torture_boost_allocerror;
 151static long n_rcu_torture_boost_afferror;
 152static long n_rcu_torture_boost_failure;
 153static long n_rcu_torture_boosts;
 154static long n_rcu_torture_timers;
 155static struct list_head rcu_torture_removed;
 156static cpumask_var_t shuffle_tmp_mask;
 157
 158static int stutter_pause_test;
 159
 160#if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
 161#define RCUTORTURE_RUNNABLE_INIT 1
 162#else
 163#define RCUTORTURE_RUNNABLE_INIT 0
 164#endif
 165int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
 166
 167#ifdef CONFIG_RCU_BOOST
 168#define rcu_can_boost() 1
 169#else /* #ifdef CONFIG_RCU_BOOST */
 170#define rcu_can_boost() 0
 171#endif /* #else #ifdef CONFIG_RCU_BOOST */
 172
 173static unsigned long boost_starttime;   /* jiffies of next boost test start. */
 174DEFINE_MUTEX(boost_mutex);              /* protect setting boost_starttime */
 175                                        /*  and boost task create/destroy. */
 176
 177/* Mediate rmmod and system shutdown.  Concurrent rmmod & shutdown illegal! */
 178
 179#define FULLSTOP_DONTSTOP 0     /* Normal operation. */
 180#define FULLSTOP_SHUTDOWN 1     /* System shutdown with rcutorture running. */
 181#define FULLSTOP_RMMOD    2     /* Normal rmmod of rcutorture. */
 182static int fullstop = FULLSTOP_RMMOD;
 183/*
 184 * Protect fullstop transitions and spawning of kthreads.
 185 */
 186static DEFINE_MUTEX(fullstop_mutex);
 187
 188/*
 189 * Detect and respond to a system shutdown.
 190 */
 191static int
 192rcutorture_shutdown_notify(struct notifier_block *unused1,
 193                           unsigned long unused2, void *unused3)
 194{
 195        mutex_lock(&fullstop_mutex);
 196        if (fullstop == FULLSTOP_DONTSTOP)
 197                fullstop = FULLSTOP_SHUTDOWN;
 198        else
 199                printk(KERN_WARNING /* but going down anyway, so... */
 200                       "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
 201        mutex_unlock(&fullstop_mutex);
 202        return NOTIFY_DONE;
 203}
 204
 205/*
 206 * Absorb kthreads into a kernel function that won't return, so that
 207 * they won't ever access module text or data again.
 208 */
 209static void rcutorture_shutdown_absorb(char *title)
 210{
 211        if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
 212                printk(KERN_NOTICE
 213                       "rcutorture thread %s parking due to system shutdown\n",
 214                       title);
 215                schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
 216        }
 217}
 218
 219/*
 220 * Allocate an element from the rcu_tortures pool.
 221 */
 222static struct rcu_torture *
 223rcu_torture_alloc(void)
 224{
 225        struct list_head *p;
 226
 227        spin_lock_bh(&rcu_torture_lock);
 228        if (list_empty(&rcu_torture_freelist)) {
 229                atomic_inc(&n_rcu_torture_alloc_fail);
 230                spin_unlock_bh(&rcu_torture_lock);
 231                return NULL;
 232        }
 233        atomic_inc(&n_rcu_torture_alloc);
 234        p = rcu_torture_freelist.next;
 235        list_del_init(p);
 236        spin_unlock_bh(&rcu_torture_lock);
 237        return container_of(p, struct rcu_torture, rtort_free);
 238}
 239
 240/*
 241 * Free an element to the rcu_tortures pool.
 242 */
 243static void
 244rcu_torture_free(struct rcu_torture *p)
 245{
 246        atomic_inc(&n_rcu_torture_free);
 247        spin_lock_bh(&rcu_torture_lock);
 248        list_add_tail(&p->rtort_free, &rcu_torture_freelist);
 249        spin_unlock_bh(&rcu_torture_lock);
 250}
 251
 252struct rcu_random_state {
 253        unsigned long rrs_state;
 254        long rrs_count;
 255};
 256
 257#define RCU_RANDOM_MULT 39916801  /* prime */
 258#define RCU_RANDOM_ADD  479001701 /* prime */
 259#define RCU_RANDOM_REFRESH 10000
 260
 261#define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
 262
 263/*
 264 * Crude but fast random-number generator.  Uses a linear congruential
 265 * generator, with occasional help from cpu_clock().
 266 */
 267static unsigned long
 268rcu_random(struct rcu_random_state *rrsp)
 269{
 270        if (--rrsp->rrs_count < 0) {
 271                rrsp->rrs_state += (unsigned long)local_clock();
 272                rrsp->rrs_count = RCU_RANDOM_REFRESH;
 273        }
 274        rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
 275        return swahw32(rrsp->rrs_state);
 276}
 277
 278static void
 279rcu_stutter_wait(char *title)
 280{
 281        while (stutter_pause_test || !rcutorture_runnable) {
 282                if (rcutorture_runnable)
 283                        schedule_timeout_interruptible(1);
 284                else
 285                        schedule_timeout_interruptible(round_jiffies_relative(HZ));
 286                rcutorture_shutdown_absorb(title);
 287        }
 288}
 289
 290/*
 291 * Operations vector for selecting different types of tests.
 292 */
 293
 294struct rcu_torture_ops {
 295        void (*init)(void);
 296        void (*cleanup)(void);
 297        int (*readlock)(void);
 298        void (*read_delay)(struct rcu_random_state *rrsp);
 299        void (*readunlock)(int idx);
 300        int (*completed)(void);
 301        void (*deferred_free)(struct rcu_torture *p);
 302        void (*sync)(void);
 303        void (*cb_barrier)(void);
 304        void (*fqs)(void);
 305        int (*stats)(char *page);
 306        int irq_capable;
 307        int can_boost;
 308        char *name;
 309};
 310
 311static struct rcu_torture_ops *cur_ops;
 312
 313/*
 314 * Definitions for rcu torture testing.
 315 */
 316
 317static int rcu_torture_read_lock(void) __acquires(RCU)
 318{
 319        rcu_read_lock();
 320        return 0;
 321}
 322
 323static void rcu_read_delay(struct rcu_random_state *rrsp)
 324{
 325        const unsigned long shortdelay_us = 200;
 326        const unsigned long longdelay_ms = 50;
 327
 328        /* We want a short delay sometimes to make a reader delay the grace
 329         * period, and we want a long delay occasionally to trigger
 330         * force_quiescent_state. */
 331
 332        if (!(rcu_random(rrsp) % (nrealreaders * 2000 * longdelay_ms)))
 333                mdelay(longdelay_ms);
 334        if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
 335                udelay(shortdelay_us);
 336#ifdef CONFIG_PREEMPT
 337        if (!preempt_count() && !(rcu_random(rrsp) % (nrealreaders * 20000)))
 338                preempt_schedule();  /* No QS if preempt_disable() in effect */
 339#endif
 340}
 341
 342static void rcu_torture_read_unlock(int idx) __releases(RCU)
 343{
 344        rcu_read_unlock();
 345}
 346
 347static int rcu_torture_completed(void)
 348{
 349        return rcu_batches_completed();
 350}
 351
 352static void
 353rcu_torture_cb(struct rcu_head *p)
 354{
 355        int i;
 356        struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
 357
 358        if (fullstop != FULLSTOP_DONTSTOP) {
 359                /* Test is ending, just drop callbacks on the floor. */
 360                /* The next initialization will pick up the pieces. */
 361                return;
 362        }
 363        i = rp->rtort_pipe_count;
 364        if (i > RCU_TORTURE_PIPE_LEN)
 365                i = RCU_TORTURE_PIPE_LEN;
 366        atomic_inc(&rcu_torture_wcount[i]);
 367        if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
 368                rp->rtort_mbtest = 0;
 369                rcu_torture_free(rp);
 370        } else
 371                cur_ops->deferred_free(rp);
 372}
 373
 374static int rcu_no_completed(void)
 375{
 376        return 0;
 377}
 378
 379static void rcu_torture_deferred_free(struct rcu_torture *p)
 380{
 381        call_rcu(&p->rtort_rcu, rcu_torture_cb);
 382}
 383
 384static struct rcu_torture_ops rcu_ops = {
 385        .init           = NULL,
 386        .cleanup        = NULL,
 387        .readlock       = rcu_torture_read_lock,
 388        .read_delay     = rcu_read_delay,
 389        .readunlock     = rcu_torture_read_unlock,
 390        .completed      = rcu_torture_completed,
 391        .deferred_free  = rcu_torture_deferred_free,
 392        .sync           = synchronize_rcu,
 393        .cb_barrier     = rcu_barrier,
 394        .fqs            = rcu_force_quiescent_state,
 395        .stats          = NULL,
 396        .irq_capable    = 1,
 397        .can_boost      = rcu_can_boost(),
 398        .name           = "rcu"
 399};
 400
 401static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
 402{
 403        int i;
 404        struct rcu_torture *rp;
 405        struct rcu_torture *rp1;
 406
 407        cur_ops->sync();
 408        list_add(&p->rtort_free, &rcu_torture_removed);
 409        list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
 410                i = rp->rtort_pipe_count;
 411                if (i > RCU_TORTURE_PIPE_LEN)
 412                        i = RCU_TORTURE_PIPE_LEN;
 413                atomic_inc(&rcu_torture_wcount[i]);
 414                if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
 415                        rp->rtort_mbtest = 0;
 416                        list_del(&rp->rtort_free);
 417                        rcu_torture_free(rp);
 418                }
 419        }
 420}
 421
 422static void rcu_sync_torture_init(void)
 423{
 424        INIT_LIST_HEAD(&rcu_torture_removed);
 425}
 426
 427static struct rcu_torture_ops rcu_sync_ops = {
 428        .init           = rcu_sync_torture_init,
 429        .cleanup        = NULL,
 430        .readlock       = rcu_torture_read_lock,
 431        .read_delay     = rcu_read_delay,
 432        .readunlock     = rcu_torture_read_unlock,
 433        .completed      = rcu_torture_completed,
 434        .deferred_free  = rcu_sync_torture_deferred_free,
 435        .sync           = synchronize_rcu,
 436        .cb_barrier     = NULL,
 437        .fqs            = rcu_force_quiescent_state,
 438        .stats          = NULL,
 439        .irq_capable    = 1,
 440        .can_boost      = rcu_can_boost(),
 441        .name           = "rcu_sync"
 442};
 443
 444static struct rcu_torture_ops rcu_expedited_ops = {
 445        .init           = rcu_sync_torture_init,
 446        .cleanup        = NULL,
 447        .readlock       = rcu_torture_read_lock,
 448        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
 449        .readunlock     = rcu_torture_read_unlock,
 450        .completed      = rcu_no_completed,
 451        .deferred_free  = rcu_sync_torture_deferred_free,
 452        .sync           = synchronize_rcu_expedited,
 453        .cb_barrier     = NULL,
 454        .fqs            = rcu_force_quiescent_state,
 455        .stats          = NULL,
 456        .irq_capable    = 1,
 457        .can_boost      = rcu_can_boost(),
 458        .name           = "rcu_expedited"
 459};
 460
 461/*
 462 * Definitions for rcu_bh torture testing.
 463 */
 464
 465static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
 466{
 467        rcu_read_lock_bh();
 468        return 0;
 469}
 470
 471static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
 472{
 473        rcu_read_unlock_bh();
 474}
 475
 476static int rcu_bh_torture_completed(void)
 477{
 478        return rcu_batches_completed_bh();
 479}
 480
 481static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
 482{
 483        call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
 484}
 485
 486struct rcu_bh_torture_synchronize {
 487        struct rcu_head head;
 488        struct completion completion;
 489};
 490
 491static void rcu_bh_torture_wakeme_after_cb(struct rcu_head *head)
 492{
 493        struct rcu_bh_torture_synchronize *rcu;
 494
 495        rcu = container_of(head, struct rcu_bh_torture_synchronize, head);
 496        complete(&rcu->completion);
 497}
 498
 499static void rcu_bh_torture_synchronize(void)
 500{
 501        struct rcu_bh_torture_synchronize rcu;
 502
 503        init_rcu_head_on_stack(&rcu.head);
 504        init_completion(&rcu.completion);
 505        call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb);
 506        wait_for_completion(&rcu.completion);
 507        destroy_rcu_head_on_stack(&rcu.head);
 508}
 509
 510static struct rcu_torture_ops rcu_bh_ops = {
 511        .init           = NULL,
 512        .cleanup        = NULL,
 513        .readlock       = rcu_bh_torture_read_lock,
 514        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
 515        .readunlock     = rcu_bh_torture_read_unlock,
 516        .completed      = rcu_bh_torture_completed,
 517        .deferred_free  = rcu_bh_torture_deferred_free,
 518        .sync           = rcu_bh_torture_synchronize,
 519        .cb_barrier     = rcu_barrier_bh,
 520        .fqs            = rcu_bh_force_quiescent_state,
 521        .stats          = NULL,
 522        .irq_capable    = 1,
 523        .name           = "rcu_bh"
 524};
 525
 526static struct rcu_torture_ops rcu_bh_sync_ops = {
 527        .init           = rcu_sync_torture_init,
 528        .cleanup        = NULL,
 529        .readlock       = rcu_bh_torture_read_lock,
 530        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
 531        .readunlock     = rcu_bh_torture_read_unlock,
 532        .completed      = rcu_bh_torture_completed,
 533        .deferred_free  = rcu_sync_torture_deferred_free,
 534        .sync           = rcu_bh_torture_synchronize,
 535        .cb_barrier     = NULL,
 536        .fqs            = rcu_bh_force_quiescent_state,
 537        .stats          = NULL,
 538        .irq_capable    = 1,
 539        .name           = "rcu_bh_sync"
 540};
 541
 542/*
 543 * Definitions for srcu torture testing.
 544 */
 545
 546static struct srcu_struct srcu_ctl;
 547
 548static void srcu_torture_init(void)
 549{
 550        init_srcu_struct(&srcu_ctl);
 551        rcu_sync_torture_init();
 552}
 553
 554static void srcu_torture_cleanup(void)
 555{
 556        synchronize_srcu(&srcu_ctl);
 557        cleanup_srcu_struct(&srcu_ctl);
 558}
 559
 560static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
 561{
 562        return srcu_read_lock(&srcu_ctl);
 563}
 564
 565static void srcu_read_delay(struct rcu_random_state *rrsp)
 566{
 567        long delay;
 568        const long uspertick = 1000000 / HZ;
 569        const long longdelay = 10;
 570
 571        /* We want there to be long-running readers, but not all the time. */
 572
 573        delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
 574        if (!delay)
 575                schedule_timeout_interruptible(longdelay);
 576        else
 577                rcu_read_delay(rrsp);
 578}
 579
 580static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
 581{
 582        srcu_read_unlock(&srcu_ctl, idx);
 583}
 584
 585static int srcu_torture_completed(void)
 586{
 587        return srcu_batches_completed(&srcu_ctl);
 588}
 589
 590static void srcu_torture_synchronize(void)
 591{
 592        synchronize_srcu(&srcu_ctl);
 593}
 594
 595static int srcu_torture_stats(char *page)
 596{
 597        int cnt = 0;
 598        int cpu;
 599        int idx = srcu_ctl.completed & 0x1;
 600
 601        cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",
 602                       torture_type, TORTURE_FLAG, idx);
 603        for_each_possible_cpu(cpu) {
 604                cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu,
 605                               per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
 606                               per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
 607        }
 608        cnt += sprintf(&page[cnt], "\n");
 609        return cnt;
 610}
 611
 612static struct rcu_torture_ops srcu_ops = {
 613        .init           = srcu_torture_init,
 614        .cleanup        = srcu_torture_cleanup,
 615        .readlock       = srcu_torture_read_lock,
 616        .read_delay     = srcu_read_delay,
 617        .readunlock     = srcu_torture_read_unlock,
 618        .completed      = srcu_torture_completed,
 619        .deferred_free  = rcu_sync_torture_deferred_free,
 620        .sync           = srcu_torture_synchronize,
 621        .cb_barrier     = NULL,
 622        .stats          = srcu_torture_stats,
 623        .name           = "srcu"
 624};
 625
 626static void srcu_torture_synchronize_expedited(void)
 627{
 628        synchronize_srcu_expedited(&srcu_ctl);
 629}
 630
 631static struct rcu_torture_ops srcu_expedited_ops = {
 632        .init           = srcu_torture_init,
 633        .cleanup        = srcu_torture_cleanup,
 634        .readlock       = srcu_torture_read_lock,
 635        .read_delay     = srcu_read_delay,
 636        .readunlock     = srcu_torture_read_unlock,
 637        .completed      = srcu_torture_completed,
 638        .deferred_free  = rcu_sync_torture_deferred_free,
 639        .sync           = srcu_torture_synchronize_expedited,
 640        .cb_barrier     = NULL,
 641        .stats          = srcu_torture_stats,
 642        .name           = "srcu_expedited"
 643};
 644
 645/*
 646 * Definitions for sched torture testing.
 647 */
 648
 649static int sched_torture_read_lock(void)
 650{
 651        preempt_disable();
 652        return 0;
 653}
 654
 655static void sched_torture_read_unlock(int idx)
 656{
 657        preempt_enable();
 658}
 659
 660static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
 661{
 662        call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
 663}
 664
 665static void sched_torture_synchronize(void)
 666{
 667        synchronize_sched();
 668}
 669
 670static struct rcu_torture_ops sched_ops = {
 671        .init           = rcu_sync_torture_init,
 672        .cleanup        = NULL,
 673        .readlock       = sched_torture_read_lock,
 674        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
 675        .readunlock     = sched_torture_read_unlock,
 676        .completed      = rcu_no_completed,
 677        .deferred_free  = rcu_sched_torture_deferred_free,
 678        .sync           = sched_torture_synchronize,
 679        .cb_barrier     = rcu_barrier_sched,
 680        .fqs            = rcu_sched_force_quiescent_state,
 681        .stats          = NULL,
 682        .irq_capable    = 1,
 683        .name           = "sched"
 684};
 685
 686static struct rcu_torture_ops sched_sync_ops = {
 687        .init           = rcu_sync_torture_init,
 688        .cleanup        = NULL,
 689        .readlock       = sched_torture_read_lock,
 690        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
 691        .readunlock     = sched_torture_read_unlock,
 692        .completed      = rcu_no_completed,
 693        .deferred_free  = rcu_sync_torture_deferred_free,
 694        .sync           = sched_torture_synchronize,
 695        .cb_barrier     = NULL,
 696        .fqs            = rcu_sched_force_quiescent_state,
 697        .stats          = NULL,
 698        .name           = "sched_sync"
 699};
 700
 701static struct rcu_torture_ops sched_expedited_ops = {
 702        .init           = rcu_sync_torture_init,
 703        .cleanup        = NULL,
 704        .readlock       = sched_torture_read_lock,
 705        .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
 706        .readunlock     = sched_torture_read_unlock,
 707        .completed      = rcu_no_completed,
 708        .deferred_free  = rcu_sync_torture_deferred_free,
 709        .sync           = synchronize_sched_expedited,
 710        .cb_barrier     = NULL,
 711        .fqs            = rcu_sched_force_quiescent_state,
 712        .stats          = NULL,
 713        .irq_capable    = 1,
 714        .name           = "sched_expedited"
 715};
 716
 717/*
 718 * RCU torture priority-boost testing.  Runs one real-time thread per
 719 * CPU for moderate bursts, repeatedly registering RCU callbacks and
 720 * spinning waiting for them to be invoked.  If a given callback takes
 721 * too long to be invoked, we assume that priority inversion has occurred.
 722 */
 723
 724struct rcu_boost_inflight {
 725        struct rcu_head rcu;
 726        int inflight;
 727};
 728
 729static void rcu_torture_boost_cb(struct rcu_head *head)
 730{
 731        struct rcu_boost_inflight *rbip =
 732                container_of(head, struct rcu_boost_inflight, rcu);
 733
 734        smp_mb(); /* Ensure RCU-core accesses precede clearing ->inflight */
 735        rbip->inflight = 0;
 736}
 737
 738static int rcu_torture_boost(void *arg)
 739{
 740        unsigned long call_rcu_time;
 741        unsigned long endtime;
 742        unsigned long oldstarttime;
 743        struct rcu_boost_inflight rbi = { .inflight = 0 };
 744        struct sched_param sp;
 745
 746        VERBOSE_PRINTK_STRING("rcu_torture_boost started");
 747
 748        /* Set real-time priority. */
 749        sp.sched_priority = 1;
 750        if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
 751                VERBOSE_PRINTK_STRING("rcu_torture_boost RT prio failed!");
 752                n_rcu_torture_boost_rterror++;
 753        }
 754
 755        /* Each pass through the following loop does one boost-test cycle. */
 756        do {
 757                /* Wait for the next test interval. */
 758                oldstarttime = boost_starttime;
 759                while (jiffies - oldstarttime > ULONG_MAX / 2) {
 760                        schedule_timeout_uninterruptible(1);
 761                        rcu_stutter_wait("rcu_torture_boost");
 762                        if (kthread_should_stop() ||
 763                            fullstop != FULLSTOP_DONTSTOP)
 764                                goto checkwait;
 765                }
 766
 767                /* Do one boost-test interval. */
 768                endtime = oldstarttime + test_boost_duration * HZ;
 769                call_rcu_time = jiffies;
 770                while (jiffies - endtime > ULONG_MAX / 2) {
 771                        /* If we don't have a callback in flight, post one. */
 772                        if (!rbi.inflight) {
 773                                smp_mb(); /* RCU core before ->inflight = 1. */
 774                                rbi.inflight = 1;
 775                                call_rcu(&rbi.rcu, rcu_torture_boost_cb);
 776                                if (jiffies - call_rcu_time >
 777                                         test_boost_duration * HZ - HZ / 2) {
 778                                        VERBOSE_PRINTK_STRING("rcu_torture_boost boosting failed");
 779                                        n_rcu_torture_boost_failure++;
 780                                }
 781                                call_rcu_time = jiffies;
 782                        }
 783                        cond_resched();
 784                        rcu_stutter_wait("rcu_torture_boost");
 785                        if (kthread_should_stop() ||
 786                            fullstop != FULLSTOP_DONTSTOP)
 787                                goto checkwait;
 788                }
 789
 790                /*
 791                 * Set the start time of the next test interval.
 792                 * Yes, this is vulnerable to long delays, but such
 793                 * delays simply cause a false negative for the next
 794                 * interval.  Besides, we are running at RT priority,
 795                 * so delays should be relatively rare.
 796                 */
 797                while (oldstarttime == boost_starttime) {
 798                        if (mutex_trylock(&boost_mutex)) {
 799                                boost_starttime = jiffies +
 800                                                  test_boost_interval * HZ;
 801                                n_rcu_torture_boosts++;
 802                                mutex_unlock(&boost_mutex);
 803                                break;
 804                        }
 805                        schedule_timeout_uninterruptible(1);
 806                }
 807
 808                /* Go do the stutter. */
 809checkwait:      rcu_stutter_wait("rcu_torture_boost");
 810        } while (!kthread_should_stop() && fullstop  == FULLSTOP_DONTSTOP);
 811
 812        /* Clean up and exit. */
 813        VERBOSE_PRINTK_STRING("rcu_torture_boost task stopping");
 814        rcutorture_shutdown_absorb("rcu_torture_boost");
 815        while (!kthread_should_stop() || rbi.inflight)
 816                schedule_timeout_uninterruptible(1);
 817        smp_mb(); /* order accesses to ->inflight before stack-frame death. */
 818        return 0;
 819}
 820
 821/*
 822 * RCU torture force-quiescent-state kthread.  Repeatedly induces
 823 * bursts of calls to force_quiescent_state(), increasing the probability
 824 * of occurrence of some important types of race conditions.
 825 */
 826static int
 827rcu_torture_fqs(void *arg)
 828{
 829        unsigned long fqs_resume_time;
 830        int fqs_burst_remaining;
 831
 832        VERBOSE_PRINTK_STRING("rcu_torture_fqs task started");
 833        do {
 834                fqs_resume_time = jiffies + fqs_stutter * HZ;
 835                while (jiffies - fqs_resume_time > LONG_MAX) {
 836                        schedule_timeout_interruptible(1);
 837                }
 838                fqs_burst_remaining = fqs_duration;
 839                while (fqs_burst_remaining > 0) {
 840                        cur_ops->fqs();
 841                        udelay(fqs_holdoff);
 842                        fqs_burst_remaining -= fqs_holdoff;
 843                }
 844                rcu_stutter_wait("rcu_torture_fqs");
 845        } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
 846        VERBOSE_PRINTK_STRING("rcu_torture_fqs task stopping");
 847        rcutorture_shutdown_absorb("rcu_torture_fqs");
 848        while (!kthread_should_stop())
 849                schedule_timeout_uninterruptible(1);
 850        return 0;
 851}
 852
 853/*
 854 * RCU torture writer kthread.  Repeatedly substitutes a new structure
 855 * for that pointed to by rcu_torture_current, freeing the old structure
 856 * after a series of grace periods (the "pipeline").
 857 */
 858static int
 859rcu_torture_writer(void *arg)
 860{
 861        int i;
 862        long oldbatch = rcu_batches_completed();
 863        struct rcu_torture *rp;
 864        struct rcu_torture *old_rp;
 865        static DEFINE_RCU_RANDOM(rand);
 866
 867        VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
 868        set_user_nice(current, 19);
 869
 870        do {
 871                schedule_timeout_uninterruptible(1);
 872                rp = rcu_torture_alloc();
 873                if (rp == NULL)
 874                        continue;
 875                rp->rtort_pipe_count = 0;
 876                udelay(rcu_random(&rand) & 0x3ff);
 877                old_rp = rcu_dereference_check(rcu_torture_current,
 878                                               current == writer_task);
 879                rp->rtort_mbtest = 1;
 880                rcu_assign_pointer(rcu_torture_current, rp);
 881                smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
 882                if (old_rp) {
 883                        i = old_rp->rtort_pipe_count;
 884                        if (i > RCU_TORTURE_PIPE_LEN)
 885                                i = RCU_TORTURE_PIPE_LEN;
 886                        atomic_inc(&rcu_torture_wcount[i]);
 887                        old_rp->rtort_pipe_count++;
 888                        cur_ops->deferred_free(old_rp);
 889                }
 890                rcu_torture_current_version++;
 891                oldbatch = cur_ops->completed();
 892                rcu_stutter_wait("rcu_torture_writer");
 893        } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
 894        VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
 895        rcutorture_shutdown_absorb("rcu_torture_writer");
 896        while (!kthread_should_stop())
 897                schedule_timeout_uninterruptible(1);
 898        return 0;
 899}
 900
 901/*
 902 * RCU torture fake writer kthread.  Repeatedly calls sync, with a random
 903 * delay between calls.
 904 */
 905static int
 906rcu_torture_fakewriter(void *arg)
 907{
 908        DEFINE_RCU_RANDOM(rand);
 909
 910        VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
 911        set_user_nice(current, 19);
 912
 913        do {
 914                schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
 915                udelay(rcu_random(&rand) & 0x3ff);
 916                cur_ops->sync();
 917                rcu_stutter_wait("rcu_torture_fakewriter");
 918        } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
 919
 920        VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
 921        rcutorture_shutdown_absorb("rcu_torture_fakewriter");
 922        while (!kthread_should_stop())
 923                schedule_timeout_uninterruptible(1);
 924        return 0;
 925}
 926
 927/*
 928 * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
 929 * incrementing the corresponding element of the pipeline array.  The
 930 * counter in the element should never be greater than 1, otherwise, the
 931 * RCU implementation is broken.
 932 */
 933static void rcu_torture_timer(unsigned long unused)
 934{
 935        int idx;
 936        int completed;
 937        static DEFINE_RCU_RANDOM(rand);
 938        static DEFINE_SPINLOCK(rand_lock);
 939        struct rcu_torture *p;
 940        int pipe_count;
 941
 942        idx = cur_ops->readlock();
 943        completed = cur_ops->completed();
 944        p = rcu_dereference_check(rcu_torture_current,
 945                                  rcu_read_lock_held() ||
 946                                  rcu_read_lock_bh_held() ||
 947                                  rcu_read_lock_sched_held() ||
 948                                  srcu_read_lock_held(&srcu_ctl));
 949        if (p == NULL) {
 950                /* Leave because rcu_torture_writer is not yet underway */
 951                cur_ops->readunlock(idx);
 952                return;
 953        }
 954        if (p->rtort_mbtest == 0)
 955                atomic_inc(&n_rcu_torture_mberror);
 956        spin_lock(&rand_lock);
 957        cur_ops->read_delay(&rand);
 958        n_rcu_torture_timers++;
 959        spin_unlock(&rand_lock);
 960        preempt_disable();
 961        pipe_count = p->rtort_pipe_count;
 962        if (pipe_count > RCU_TORTURE_PIPE_LEN) {
 963                /* Should not happen, but... */
 964                pipe_count = RCU_TORTURE_PIPE_LEN;
 965        }
 966        __this_cpu_inc(rcu_torture_count[pipe_count]);
 967        completed = cur_ops->completed() - completed;
 968        if (completed > RCU_TORTURE_PIPE_LEN) {
 969                /* Should not happen, but... */
 970                completed = RCU_TORTURE_PIPE_LEN;
 971        }
 972        __this_cpu_inc(rcu_torture_batch[completed]);
 973        preempt_enable();
 974        cur_ops->readunlock(idx);
 975}
 976
 977/*
 978 * RCU torture reader kthread.  Repeatedly dereferences rcu_torture_current,
 979 * incrementing the corresponding element of the pipeline array.  The
 980 * counter in the element should never be greater than 1, otherwise, the
 981 * RCU implementation is broken.
 982 */
 983static int
 984rcu_torture_reader(void *arg)
 985{
 986        int completed;
 987        int idx;
 988        DEFINE_RCU_RANDOM(rand);
 989        struct rcu_torture *p;
 990        int pipe_count;
 991        struct timer_list t;
 992
 993        VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
 994        set_user_nice(current, 19);
 995        if (irqreader && cur_ops->irq_capable)
 996                setup_timer_on_stack(&t, rcu_torture_timer, 0);
 997
 998        do {
 999                if (irqreader && cur_ops->irq_capable) {
1000                        if (!timer_pending(&t))
1001                                mod_timer(&t, jiffies + 1);
1002                }
1003                idx = cur_ops->readlock();
1004                completed = cur_ops->completed();
1005                p = rcu_dereference_check(rcu_torture_current,
1006                                          rcu_read_lock_held() ||
1007                                          rcu_read_lock_bh_held() ||
1008                                          rcu_read_lock_sched_held() ||
1009                                          srcu_read_lock_held(&srcu_ctl));
1010                if (p == NULL) {
1011                        /* Wait for rcu_torture_writer to get underway */
1012                        cur_ops->readunlock(idx);
1013                        schedule_timeout_interruptible(HZ);
1014                        continue;
1015                }
1016                if (p->rtort_mbtest == 0)
1017                        atomic_inc(&n_rcu_torture_mberror);
1018                cur_ops->read_delay(&rand);
1019                preempt_disable();
1020                pipe_count = p->rtort_pipe_count;
1021                if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1022                        /* Should not happen, but... */
1023                        pipe_count = RCU_TORTURE_PIPE_LEN;
1024                }
1025                __this_cpu_inc(rcu_torture_count[pipe_count]);
1026                completed = cur_ops->completed() - completed;
1027                if (completed > RCU_TORTURE_PIPE_LEN) {
1028                        /* Should not happen, but... */
1029                        completed = RCU_TORTURE_PIPE_LEN;
1030                }
1031                __this_cpu_inc(rcu_torture_batch[completed]);
1032                preempt_enable();
1033                cur_ops->readunlock(idx);
1034                schedule();
1035                rcu_stutter_wait("rcu_torture_reader");
1036        } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
1037        VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
1038        rcutorture_shutdown_absorb("rcu_torture_reader");
1039        if (irqreader && cur_ops->irq_capable)
1040                del_timer_sync(&t);
1041        while (!kthread_should_stop())
1042                schedule_timeout_uninterruptible(1);
1043        return 0;
1044}
1045
1046/*
1047 * Create an RCU-torture statistics message in the specified buffer.
1048 */
1049static int
1050rcu_torture_printk(char *page)
1051{
1052        int cnt = 0;
1053        int cpu;
1054        int i;
1055        long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1056        long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1057
1058        for_each_possible_cpu(cpu) {
1059                for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1060                        pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
1061                        batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
1062                }
1063        }
1064        for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1065                if (pipesummary[i] != 0)
1066                        break;
1067        }
1068        cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
1069        cnt += sprintf(&page[cnt],
1070                       "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
1071                       "rtmbe: %d rtbke: %ld rtbre: %ld rtbae: %ld rtbafe: %ld "
1072                       "rtbf: %ld rtb: %ld nt: %ld",
1073                       rcu_torture_current,
1074                       rcu_torture_current_version,
1075                       list_empty(&rcu_torture_freelist),
1076                       atomic_read(&n_rcu_torture_alloc),
1077                       atomic_read(&n_rcu_torture_alloc_fail),
1078                       atomic_read(&n_rcu_torture_free),
1079                       atomic_read(&n_rcu_torture_mberror),
1080                       n_rcu_torture_boost_ktrerror,
1081                       n_rcu_torture_boost_rterror,
1082                       n_rcu_torture_boost_allocerror,
1083                       n_rcu_torture_boost_afferror,
1084                       n_rcu_torture_boost_failure,
1085                       n_rcu_torture_boosts,
1086                       n_rcu_torture_timers);
1087        if (atomic_read(&n_rcu_torture_mberror) != 0 ||
1088            n_rcu_torture_boost_ktrerror != 0 ||
1089            n_rcu_torture_boost_rterror != 0 ||
1090            n_rcu_torture_boost_allocerror != 0 ||
1091            n_rcu_torture_boost_afferror != 0 ||
1092            n_rcu_torture_boost_failure != 0)
1093                cnt += sprintf(&page[cnt], " !!!");
1094        cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
1095        if (i > 1) {
1096                cnt += sprintf(&page[cnt], "!!! ");
1097                atomic_inc(&n_rcu_torture_error);
1098                WARN_ON_ONCE(1);
1099        }
1100        cnt += sprintf(&page[cnt], "Reader Pipe: ");
1101        for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1102                cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
1103        cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
1104        cnt += sprintf(&page[cnt], "Reader Batch: ");
1105        for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1106                cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
1107        cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
1108        cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
1109        for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1110                cnt += sprintf(&page[cnt], " %d",
1111                               atomic_read(&rcu_torture_wcount[i]));
1112        }
1113        cnt += sprintf(&page[cnt], "\n");
1114        if (cur_ops->stats)
1115                cnt += cur_ops->stats(&page[cnt]);
1116        return cnt;
1117}
1118
1119/*
1120 * Print torture statistics.  Caller must ensure that there is only
1121 * one call to this function at a given time!!!  This is normally
1122 * accomplished by relying on the module system to only have one copy
1123 * of the module loaded, and then by giving the rcu_torture_stats
1124 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1125 * thread is not running).
1126 */
1127static void
1128rcu_torture_stats_print(void)
1129{
1130        int cnt;
1131
1132        cnt = rcu_torture_printk(printk_buf);
1133        printk(KERN_ALERT "%s", printk_buf);
1134}
1135
1136/*
1137 * Periodically prints torture statistics, if periodic statistics printing
1138 * was specified via the stat_interval module parameter.
1139 *
1140 * No need to worry about fullstop here, since this one doesn't reference
1141 * volatile state or register callbacks.
1142 */
1143static int
1144rcu_torture_stats(void *arg)
1145{
1146        VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
1147        do {
1148                schedule_timeout_interruptible(stat_interval * HZ);
1149                rcu_torture_stats_print();
1150                rcutorture_shutdown_absorb("rcu_torture_stats");
1151        } while (!kthread_should_stop());
1152        VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
1153        return 0;
1154}
1155
1156static int rcu_idle_cpu;        /* Force all torture tasks off this CPU */
1157
1158/* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
1159 * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
1160 */
1161static void rcu_torture_shuffle_tasks(void)
1162{
1163        int i;
1164
1165        cpumask_setall(shuffle_tmp_mask);
1166        get_online_cpus();
1167
1168        /* No point in shuffling if there is only one online CPU (ex: UP) */
1169        if (num_online_cpus() == 1) {
1170                put_online_cpus();
1171                return;
1172        }
1173
1174        if (rcu_idle_cpu != -1)
1175                cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask);
1176
1177        set_cpus_allowed_ptr(current, shuffle_tmp_mask);
1178
1179        if (reader_tasks) {
1180                for (i = 0; i < nrealreaders; i++)
1181                        if (reader_tasks[i])
1182                                set_cpus_allowed_ptr(reader_tasks[i],
1183                                                     shuffle_tmp_mask);
1184        }
1185
1186        if (fakewriter_tasks) {
1187                for (i = 0; i < nfakewriters; i++)
1188                        if (fakewriter_tasks[i])
1189                                set_cpus_allowed_ptr(fakewriter_tasks[i],
1190                                                     shuffle_tmp_mask);
1191        }
1192
1193        if (writer_task)
1194                set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
1195
1196        if (stats_task)
1197                set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
1198
1199        if (rcu_idle_cpu == -1)
1200                rcu_idle_cpu = num_online_cpus() - 1;
1201        else
1202                rcu_idle_cpu--;
1203
1204        put_online_cpus();
1205}
1206
1207/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
1208 * system to become idle at a time and cut off its timer ticks. This is meant
1209 * to test the support for such tickless idle CPU in RCU.
1210 */
1211static int
1212rcu_torture_shuffle(void *arg)
1213{
1214        VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
1215        do {
1216                schedule_timeout_interruptible(shuffle_interval * HZ);
1217                rcu_torture_shuffle_tasks();
1218                rcutorture_shutdown_absorb("rcu_torture_shuffle");
1219        } while (!kthread_should_stop());
1220        VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
1221        return 0;
1222}
1223
1224/* Cause the rcutorture test to "stutter", starting and stopping all
1225 * threads periodically.
1226 */
1227static int
1228rcu_torture_stutter(void *arg)
1229{
1230        VERBOSE_PRINTK_STRING("rcu_torture_stutter task started");
1231        do {
1232                schedule_timeout_interruptible(stutter * HZ);
1233                stutter_pause_test = 1;
1234                if (!kthread_should_stop())
1235                        schedule_timeout_interruptible(stutter * HZ);
1236                stutter_pause_test = 0;
1237                rcutorture_shutdown_absorb("rcu_torture_stutter");
1238        } while (!kthread_should_stop());
1239        VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
1240        return 0;
1241}
1242
1243static inline void
1244rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, char *tag)
1245{
1246        printk(KERN_ALERT "%s" TORTURE_FLAG
1247                "--- %s: nreaders=%d nfakewriters=%d "
1248                "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1249                "shuffle_interval=%d stutter=%d irqreader=%d "
1250                "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1251                "test_boost=%d/%d test_boost_interval=%d "
1252                "test_boost_duration=%d\n",
1253                torture_type, tag, nrealreaders, nfakewriters,
1254                stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1255                stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1256                test_boost, cur_ops->can_boost,
1257                test_boost_interval, test_boost_duration);
1258}
1259
1260static struct notifier_block rcutorture_shutdown_nb = {
1261        .notifier_call = rcutorture_shutdown_notify,
1262};
1263
1264static void rcutorture_booster_cleanup(int cpu)
1265{
1266        struct task_struct *t;
1267
1268        if (boost_tasks[cpu] == NULL)
1269                return;
1270        mutex_lock(&boost_mutex);
1271        VERBOSE_PRINTK_STRING("Stopping rcu_torture_boost task");
1272        t = boost_tasks[cpu];
1273        boost_tasks[cpu] = NULL;
1274        mutex_unlock(&boost_mutex);
1275
1276        /* This must be outside of the mutex, otherwise deadlock! */
1277        kthread_stop(t);
1278}
1279
1280static int rcutorture_booster_init(int cpu)
1281{
1282        int retval;
1283
1284        if (boost_tasks[cpu] != NULL)
1285                return 0;  /* Already created, nothing more to do. */
1286
1287        /* Don't allow time recalculation while creating a new task. */
1288        mutex_lock(&boost_mutex);
1289        VERBOSE_PRINTK_STRING("Creating rcu_torture_boost task");
1290        boost_tasks[cpu] = kthread_create(rcu_torture_boost, NULL,
1291                                          "rcu_torture_boost");
1292        if (IS_ERR(boost_tasks[cpu])) {
1293                retval = PTR_ERR(boost_tasks[cpu]);
1294                VERBOSE_PRINTK_STRING("rcu_torture_boost task create failed");
1295                n_rcu_torture_boost_ktrerror++;
1296                boost_tasks[cpu] = NULL;
1297                mutex_unlock(&boost_mutex);
1298                return retval;
1299        }
1300        kthread_bind(boost_tasks[cpu], cpu);
1301        wake_up_process(boost_tasks[cpu]);
1302        mutex_unlock(&boost_mutex);
1303        return 0;
1304}
1305
1306static int rcutorture_cpu_notify(struct notifier_block *self,
1307                                 unsigned long action, void *hcpu)
1308{
1309        long cpu = (long)hcpu;
1310
1311        switch (action) {
1312        case CPU_ONLINE:
1313        case CPU_DOWN_FAILED:
1314                (void)rcutorture_booster_init(cpu);
1315                break;
1316        case CPU_DOWN_PREPARE:
1317                rcutorture_booster_cleanup(cpu);
1318                break;
1319        default:
1320                break;
1321        }
1322        return NOTIFY_OK;
1323}
1324
1325static struct notifier_block rcutorture_cpu_nb = {
1326        .notifier_call = rcutorture_cpu_notify,
1327};
1328
1329static void
1330rcu_torture_cleanup(void)
1331{
1332        int i;
1333
1334        mutex_lock(&fullstop_mutex);
1335        if (fullstop == FULLSTOP_SHUTDOWN) {
1336                printk(KERN_WARNING /* but going down anyway, so... */
1337                       "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
1338                mutex_unlock(&fullstop_mutex);
1339                schedule_timeout_uninterruptible(10);
1340                if (cur_ops->cb_barrier != NULL)
1341                        cur_ops->cb_barrier();
1342                return;
1343        }
1344        fullstop = FULLSTOP_RMMOD;
1345        mutex_unlock(&fullstop_mutex);
1346        unregister_reboot_notifier(&rcutorture_shutdown_nb);
1347        if (stutter_task) {
1348                VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task");
1349                kthread_stop(stutter_task);
1350        }
1351        stutter_task = NULL;
1352        if (shuffler_task) {
1353                VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
1354                kthread_stop(shuffler_task);
1355                free_cpumask_var(shuffle_tmp_mask);
1356        }
1357        shuffler_task = NULL;
1358
1359        if (writer_task) {
1360                VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
1361                kthread_stop(writer_task);
1362        }
1363        writer_task = NULL;
1364
1365        if (reader_tasks) {
1366                for (i = 0; i < nrealreaders; i++) {
1367                        if (reader_tasks[i]) {
1368                                VERBOSE_PRINTK_STRING(
1369                                        "Stopping rcu_torture_reader task");
1370                                kthread_stop(reader_tasks[i]);
1371                        }
1372                        reader_tasks[i] = NULL;
1373                }
1374                kfree(reader_tasks);
1375                reader_tasks = NULL;
1376        }
1377        rcu_torture_current = NULL;
1378
1379        if (fakewriter_tasks) {
1380                for (i = 0; i < nfakewriters; i++) {
1381                        if (fakewriter_tasks[i]) {
1382                                VERBOSE_PRINTK_STRING(
1383                                        "Stopping rcu_torture_fakewriter task");
1384                                kthread_stop(fakewriter_tasks[i]);
1385                        }
1386                        fakewriter_tasks[i] = NULL;
1387                }
1388                kfree(fakewriter_tasks);
1389                fakewriter_tasks = NULL;
1390        }
1391
1392        if (stats_task) {
1393                VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
1394                kthread_stop(stats_task);
1395        }
1396        stats_task = NULL;
1397
1398        if (fqs_task) {
1399                VERBOSE_PRINTK_STRING("Stopping rcu_torture_fqs task");
1400                kthread_stop(fqs_task);
1401        }
1402        fqs_task = NULL;
1403        if ((test_boost == 1 && cur_ops->can_boost) ||
1404            test_boost == 2) {
1405                unregister_cpu_notifier(&rcutorture_cpu_nb);
1406                for_each_possible_cpu(i)
1407                        rcutorture_booster_cleanup(i);
1408        }
1409
1410        /* Wait for all RCU callbacks to fire.  */
1411
1412        if (cur_ops->cb_barrier != NULL)
1413                cur_ops->cb_barrier();
1414
1415        rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
1416
1417        if (cur_ops->cleanup)
1418                cur_ops->cleanup();
1419        if (atomic_read(&n_rcu_torture_error))
1420                rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
1421        else
1422                rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
1423}
1424
1425static int __init
1426rcu_torture_init(void)
1427{
1428        int i;
1429        int cpu;
1430        int firsterr = 0;
1431        static struct rcu_torture_ops *torture_ops[] =
1432                { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
1433                  &rcu_bh_ops, &rcu_bh_sync_ops,
1434                  &srcu_ops, &srcu_expedited_ops,
1435                  &sched_ops, &sched_sync_ops, &sched_expedited_ops, };
1436
1437        mutex_lock(&fullstop_mutex);
1438
1439        /* Process args and tell the world that the torturer is on the job. */
1440        for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
1441                cur_ops = torture_ops[i];
1442                if (strcmp(torture_type, cur_ops->name) == 0)
1443                        break;
1444        }
1445        if (i == ARRAY_SIZE(torture_ops)) {
1446                printk(KERN_ALERT "rcu-torture: invalid torture type: \"%s\"\n",
1447                       torture_type);
1448                printk(KERN_ALERT "rcu-torture types:");
1449                for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1450                        printk(KERN_ALERT " %s", torture_ops[i]->name);
1451                printk(KERN_ALERT "\n");
1452                mutex_unlock(&fullstop_mutex);
1453                return -EINVAL;
1454        }
1455        if (cur_ops->fqs == NULL && fqs_duration != 0) {
1456                printk(KERN_ALERT "rcu-torture: ->fqs NULL and non-zero "
1457                                  "fqs_duration, fqs disabled.\n");
1458                fqs_duration = 0;
1459        }
1460        if (cur_ops->init)
1461                cur_ops->init(); /* no "goto unwind" prior to this point!!! */
1462
1463        if (nreaders >= 0)
1464                nrealreaders = nreaders;
1465        else
1466                nrealreaders = 2 * num_online_cpus();
1467        rcu_torture_print_module_parms(cur_ops, "Start of test");
1468        fullstop = FULLSTOP_DONTSTOP;
1469
1470        /* Set up the freelist. */
1471
1472        INIT_LIST_HEAD(&rcu_torture_freelist);
1473        for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
1474                rcu_tortures[i].rtort_mbtest = 0;
1475                list_add_tail(&rcu_tortures[i].rtort_free,
1476                              &rcu_torture_freelist);
1477        }
1478
1479        /* Initialize the statistics so that each run gets its own numbers. */
1480
1481        rcu_torture_current = NULL;
1482        rcu_torture_current_version = 0;
1483        atomic_set(&n_rcu_torture_alloc, 0);
1484        atomic_set(&n_rcu_torture_alloc_fail, 0);
1485        atomic_set(&n_rcu_torture_free, 0);
1486        atomic_set(&n_rcu_torture_mberror, 0);
1487        atomic_set(&n_rcu_torture_error, 0);
1488        n_rcu_torture_boost_ktrerror = 0;
1489        n_rcu_torture_boost_rterror = 0;
1490        n_rcu_torture_boost_allocerror = 0;
1491        n_rcu_torture_boost_afferror = 0;
1492        n_rcu_torture_boost_failure = 0;
1493        n_rcu_torture_boosts = 0;
1494        for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1495                atomic_set(&rcu_torture_wcount[i], 0);
1496        for_each_possible_cpu(cpu) {
1497                for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1498                        per_cpu(rcu_torture_count, cpu)[i] = 0;
1499                        per_cpu(rcu_torture_batch, cpu)[i] = 0;
1500                }
1501        }
1502
1503        /* Start up the kthreads. */
1504
1505        VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
1506        writer_task = kthread_run(rcu_torture_writer, NULL,
1507                                  "rcu_torture_writer");
1508        if (IS_ERR(writer_task)) {
1509                firsterr = PTR_ERR(writer_task);
1510                VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
1511                writer_task = NULL;
1512                goto unwind;
1513        }
1514        fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
1515                                   GFP_KERNEL);
1516        if (fakewriter_tasks == NULL) {
1517                VERBOSE_PRINTK_ERRSTRING("out of memory");
1518                firsterr = -ENOMEM;
1519                goto unwind;
1520        }
1521        for (i = 0; i < nfakewriters; i++) {
1522                VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
1523                fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
1524                                                  "rcu_torture_fakewriter");
1525                if (IS_ERR(fakewriter_tasks[i])) {
1526                        firsterr = PTR_ERR(fakewriter_tasks[i]);
1527                        VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
1528                        fakewriter_tasks[i] = NULL;
1529                        goto unwind;
1530                }
1531        }
1532        reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
1533                               GFP_KERNEL);
1534        if (reader_tasks == NULL) {
1535                VERBOSE_PRINTK_ERRSTRING("out of memory");
1536                firsterr = -ENOMEM;
1537                goto unwind;
1538        }
1539        for (i = 0; i < nrealreaders; i++) {
1540                VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
1541                reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
1542                                              "rcu_torture_reader");
1543                if (IS_ERR(reader_tasks[i])) {
1544                        firsterr = PTR_ERR(reader_tasks[i]);
1545                        VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
1546                        reader_tasks[i] = NULL;
1547                        goto unwind;
1548                }
1549        }
1550        if (stat_interval > 0) {
1551                VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
1552                stats_task = kthread_run(rcu_torture_stats, NULL,
1553                                        "rcu_torture_stats");
1554                if (IS_ERR(stats_task)) {
1555                        firsterr = PTR_ERR(stats_task);
1556                        VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
1557                        stats_task = NULL;
1558                        goto unwind;
1559                }
1560        }
1561        if (test_no_idle_hz) {
1562                rcu_idle_cpu = num_online_cpus() - 1;
1563
1564                if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
1565                        firsterr = -ENOMEM;
1566                        VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask");
1567                        goto unwind;
1568                }
1569
1570                /* Create the shuffler thread */
1571                shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
1572                                          "rcu_torture_shuffle");
1573                if (IS_ERR(shuffler_task)) {
1574                        free_cpumask_var(shuffle_tmp_mask);
1575                        firsterr = PTR_ERR(shuffler_task);
1576                        VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
1577                        shuffler_task = NULL;
1578                        goto unwind;
1579                }
1580        }
1581        if (stutter < 0)
1582                stutter = 0;
1583        if (stutter) {
1584                /* Create the stutter thread */
1585                stutter_task = kthread_run(rcu_torture_stutter, NULL,
1586                                          "rcu_torture_stutter");
1587                if (IS_ERR(stutter_task)) {
1588                        firsterr = PTR_ERR(stutter_task);
1589                        VERBOSE_PRINTK_ERRSTRING("Failed to create stutter");
1590                        stutter_task = NULL;
1591                        goto unwind;
1592                }
1593        }
1594        if (fqs_duration < 0)
1595                fqs_duration = 0;
1596        if (fqs_duration) {
1597                /* Create the stutter thread */
1598                fqs_task = kthread_run(rcu_torture_fqs, NULL,
1599                                       "rcu_torture_fqs");
1600                if (IS_ERR(fqs_task)) {
1601                        firsterr = PTR_ERR(fqs_task);
1602                        VERBOSE_PRINTK_ERRSTRING("Failed to create fqs");
1603                        fqs_task = NULL;
1604                        goto unwind;
1605                }
1606        }
1607        if (test_boost_interval < 1)
1608                test_boost_interval = 1;
1609        if (test_boost_duration < 2)
1610                test_boost_duration = 2;
1611        if ((test_boost == 1 && cur_ops->can_boost) ||
1612            test_boost == 2) {
1613                int retval;
1614
1615                boost_starttime = jiffies + test_boost_interval * HZ;
1616                register_cpu_notifier(&rcutorture_cpu_nb);
1617                for_each_possible_cpu(i) {
1618                        if (cpu_is_offline(i))
1619                                continue;  /* Heuristic: CPU can go offline. */
1620                        retval = rcutorture_booster_init(i);
1621                        if (retval < 0) {
1622                                firsterr = retval;
1623                                goto unwind;
1624                        }
1625                }
1626        }
1627        register_reboot_notifier(&rcutorture_shutdown_nb);
1628        mutex_unlock(&fullstop_mutex);
1629        return 0;
1630
1631unwind:
1632        mutex_unlock(&fullstop_mutex);
1633        rcu_torture_cleanup();
1634        return firsterr;
1635}
1636
1637module_init(rcu_torture_init);
1638module_exit(rcu_torture_cleanup);
1639