linux/kernel/rcutree.c
<<
>>
Prefs
   1/*
   2 * Read-Copy Update mechanism for mutual exclusion
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17 *
  18 * Copyright IBM Corporation, 2008
  19 *
  20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
  21 *          Manfred Spraul <manfred@colorfullife.com>
  22 *          Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
  23 *
  24 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
  25 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
  26 *
  27 * For detailed explanation of Read-Copy Update mechanism see -
  28 *      Documentation/RCU
  29 */
  30#include <linux/types.h>
  31#include <linux/kernel.h>
  32#include <linux/init.h>
  33#include <linux/spinlock.h>
  34#include <linux/smp.h>
  35#include <linux/rcupdate.h>
  36#include <linux/interrupt.h>
  37#include <linux/sched.h>
  38#include <linux/nmi.h>
  39#include <linux/atomic.h>
  40#include <linux/bitops.h>
  41#include <linux/export.h>
  42#include <linux/completion.h>
  43#include <linux/moduleparam.h>
  44#include <linux/percpu.h>
  45#include <linux/notifier.h>
  46#include <linux/cpu.h>
  47#include <linux/mutex.h>
  48#include <linux/time.h>
  49#include <linux/kernel_stat.h>
  50#include <linux/wait.h>
  51#include <linux/kthread.h>
  52#include <linux/prefetch.h>
  53#include <linux/delay.h>
  54#include <linux/stop_machine.h>
  55#include <linux/random.h>
  56#include <linux/ftrace_event.h>
  57#include <linux/suspend.h>
  58
  59#include "rcutree.h"
  60#include <trace/events/rcu.h>
  61
  62#include "rcu.h"
  63
  64/*
  65 * Strings used in tracepoints need to be exported via the
  66 * tracing system such that tools like perf and trace-cmd can
  67 * translate the string address pointers to actual text.
  68 */
  69#define TPS(x)  tracepoint_string(x)
  70
  71/* Data structures. */
  72
  73static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
  74static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
  75
  76/*
  77 * In order to export the rcu_state name to the tracing tools, it
  78 * needs to be added in the __tracepoint_string section.
  79 * This requires defining a separate variable tp_<sname>_varname
  80 * that points to the string being used, and this will allow
  81 * the tracing userspace tools to be able to decipher the string
  82 * address to the matching string.
  83 */
  84#define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
  85static char sname##_varname[] = #sname; \
  86static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname; \
  87struct rcu_state sname##_state = { \
  88        .level = { &sname##_state.node[0] }, \
  89        .call = cr, \
  90        .fqs_state = RCU_GP_IDLE, \
  91        .gpnum = 0UL - 300UL, \
  92        .completed = 0UL - 300UL, \
  93        .orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \
  94        .orphan_nxttail = &sname##_state.orphan_nxtlist, \
  95        .orphan_donetail = &sname##_state.orphan_donelist, \
  96        .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
  97        .onoff_mutex = __MUTEX_INITIALIZER(sname##_state.onoff_mutex), \
  98        .name = sname##_varname, \
  99        .abbr = sabbr, \
 100}; \
 101DEFINE_PER_CPU(struct rcu_data, sname##_data)
 102
 103RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
 104RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
 105
 106static struct rcu_state *rcu_state;
 107LIST_HEAD(rcu_struct_flavors);
 108
 109/* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */
 110static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF;
 111module_param(rcu_fanout_leaf, int, 0444);
 112int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
 113static int num_rcu_lvl[] = {  /* Number of rcu_nodes at specified level. */
 114        NUM_RCU_LVL_0,
 115        NUM_RCU_LVL_1,
 116        NUM_RCU_LVL_2,
 117        NUM_RCU_LVL_3,
 118        NUM_RCU_LVL_4,
 119};
 120int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
 121
 122/*
 123 * The rcu_scheduler_active variable transitions from zero to one just
 124 * before the first task is spawned.  So when this variable is zero, RCU
 125 * can assume that there is but one task, allowing RCU to (for example)
 126 * optimize synchronize_sched() to a simple barrier().  When this variable
 127 * is one, RCU must actually do all the hard work required to detect real
 128 * grace periods.  This variable is also used to suppress boot-time false
 129 * positives from lockdep-RCU error checking.
 130 */
 131int rcu_scheduler_active __read_mostly;
 132EXPORT_SYMBOL_GPL(rcu_scheduler_active);
 133
 134/*
 135 * The rcu_scheduler_fully_active variable transitions from zero to one
 136 * during the early_initcall() processing, which is after the scheduler
 137 * is capable of creating new tasks.  So RCU processing (for example,
 138 * creating tasks for RCU priority boosting) must be delayed until after
 139 * rcu_scheduler_fully_active transitions from zero to one.  We also
 140 * currently delay invocation of any RCU callbacks until after this point.
 141 *
 142 * It might later prove better for people registering RCU callbacks during
 143 * early boot to take responsibility for these callbacks, but one step at
 144 * a time.
 145 */
 146static int rcu_scheduler_fully_active __read_mostly;
 147
 148#ifdef CONFIG_RCU_BOOST
 149
 150/*
 151 * Control variables for per-CPU and per-rcu_node kthreads.  These
 152 * handle all flavors of RCU.
 153 */
 154static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
 155DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
 156DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
 157DEFINE_PER_CPU(char, rcu_cpu_has_work);
 158
 159#endif /* #ifdef CONFIG_RCU_BOOST */
 160
 161static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
 162static void invoke_rcu_core(void);
 163static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
 164
 165/*
 166 * Track the rcutorture test sequence number and the update version
 167 * number within a given test.  The rcutorture_testseq is incremented
 168 * on every rcutorture module load and unload, so has an odd value
 169 * when a test is running.  The rcutorture_vernum is set to zero
 170 * when rcutorture starts and is incremented on each rcutorture update.
 171 * These variables enable correlating rcutorture output with the
 172 * RCU tracing information.
 173 */
 174unsigned long rcutorture_testseq;
 175unsigned long rcutorture_vernum;
 176
 177/*
 178 * Return true if an RCU grace period is in progress.  The ACCESS_ONCE()s
 179 * permit this function to be invoked without holding the root rcu_node
 180 * structure's ->lock, but of course results can be subject to change.
 181 */
 182static int rcu_gp_in_progress(struct rcu_state *rsp)
 183{
 184        return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
 185}
 186
 187/*
 188 * Note a quiescent state.  Because we do not need to know
 189 * how many quiescent states passed, just if there was at least
 190 * one since the start of the grace period, this just sets a flag.
 191 * The caller must have disabled preemption.
 192 */
 193void rcu_sched_qs(int cpu)
 194{
 195        struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
 196
 197        if (rdp->passed_quiesce == 0)
 198                trace_rcu_grace_period(TPS("rcu_sched"), rdp->gpnum, TPS("cpuqs"));
 199        rdp->passed_quiesce = 1;
 200}
 201
 202void rcu_bh_qs(int cpu)
 203{
 204        struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
 205
 206        if (rdp->passed_quiesce == 0)
 207                trace_rcu_grace_period(TPS("rcu_bh"), rdp->gpnum, TPS("cpuqs"));
 208        rdp->passed_quiesce = 1;
 209}
 210
 211/*
 212 * Note a context switch.  This is a quiescent state for RCU-sched,
 213 * and requires special handling for preemptible RCU.
 214 * The caller must have disabled preemption.
 215 */
 216void rcu_note_context_switch(int cpu)
 217{
 218        trace_rcu_utilization(TPS("Start context switch"));
 219        rcu_sched_qs(cpu);
 220        rcu_preempt_note_context_switch(cpu);
 221        trace_rcu_utilization(TPS("End context switch"));
 222}
 223EXPORT_SYMBOL_GPL(rcu_note_context_switch);
 224
 225DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
 226        .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
 227        .dynticks = ATOMIC_INIT(1),
 228#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
 229        .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
 230        .dynticks_idle = ATOMIC_INIT(1),
 231#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
 232};
 233
 234static long blimit = 10;        /* Maximum callbacks per rcu_do_batch. */
 235static long qhimark = 10000;    /* If this many pending, ignore blimit. */
 236static long qlowmark = 100;     /* Once only this many pending, use blimit. */
 237
 238module_param(blimit, long, 0444);
 239module_param(qhimark, long, 0444);
 240module_param(qlowmark, long, 0444);
 241
 242static ulong jiffies_till_first_fqs = ULONG_MAX;
 243static ulong jiffies_till_next_fqs = ULONG_MAX;
 244
 245module_param(jiffies_till_first_fqs, ulong, 0644);
 246module_param(jiffies_till_next_fqs, ulong, 0644);
 247
 248static void rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
 249                                  struct rcu_data *rdp);
 250static void force_qs_rnp(struct rcu_state *rsp,
 251                         int (*f)(struct rcu_data *rsp, bool *isidle,
 252                                  unsigned long *maxj),
 253                         bool *isidle, unsigned long *maxj);
 254static void force_quiescent_state(struct rcu_state *rsp);
 255static int rcu_pending(int cpu);
 256
 257/*
 258 * Return the number of RCU-sched batches processed thus far for debug & stats.
 259 */
 260long rcu_batches_completed_sched(void)
 261{
 262        return rcu_sched_state.completed;
 263}
 264EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
 265
 266/*
 267 * Return the number of RCU BH batches processed thus far for debug & stats.
 268 */
 269long rcu_batches_completed_bh(void)
 270{
 271        return rcu_bh_state.completed;
 272}
 273EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
 274
 275/*
 276 * Force a quiescent state for RCU BH.
 277 */
 278void rcu_bh_force_quiescent_state(void)
 279{
 280        force_quiescent_state(&rcu_bh_state);
 281}
 282EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
 283
 284/*
 285 * Record the number of times rcutorture tests have been initiated and
 286 * terminated.  This information allows the debugfs tracing stats to be
 287 * correlated to the rcutorture messages, even when the rcutorture module
 288 * is being repeatedly loaded and unloaded.  In other words, we cannot
 289 * store this state in rcutorture itself.
 290 */
 291void rcutorture_record_test_transition(void)
 292{
 293        rcutorture_testseq++;
 294        rcutorture_vernum = 0;
 295}
 296EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
 297
 298/*
 299 * Record the number of writer passes through the current rcutorture test.
 300 * This is also used to correlate debugfs tracing stats with the rcutorture
 301 * messages.
 302 */
 303void rcutorture_record_progress(unsigned long vernum)
 304{
 305        rcutorture_vernum++;
 306}
 307EXPORT_SYMBOL_GPL(rcutorture_record_progress);
 308
 309/*
 310 * Force a quiescent state for RCU-sched.
 311 */
 312void rcu_sched_force_quiescent_state(void)
 313{
 314        force_quiescent_state(&rcu_sched_state);
 315}
 316EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
 317
 318/*
 319 * Does the CPU have callbacks ready to be invoked?
 320 */
 321static int
 322cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
 323{
 324        return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL] &&
 325               rdp->nxttail[RCU_DONE_TAIL] != NULL;
 326}
 327
 328/*
 329 * Does the current CPU require a not-yet-started grace period?
 330 * The caller must have disabled interrupts to prevent races with
 331 * normal callback registry.
 332 */
 333static int
 334cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
 335{
 336        int i;
 337
 338        if (rcu_gp_in_progress(rsp))
 339                return 0;  /* No, a grace period is already in progress. */
 340        if (rcu_nocb_needs_gp(rsp))
 341                return 1;  /* Yes, a no-CBs CPU needs one. */
 342        if (!rdp->nxttail[RCU_NEXT_TAIL])
 343                return 0;  /* No, this is a no-CBs (or offline) CPU. */
 344        if (*rdp->nxttail[RCU_NEXT_READY_TAIL])
 345                return 1;  /* Yes, this CPU has newly registered callbacks. */
 346        for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++)
 347                if (rdp->nxttail[i - 1] != rdp->nxttail[i] &&
 348                    ULONG_CMP_LT(ACCESS_ONCE(rsp->completed),
 349                                 rdp->nxtcompleted[i]))
 350                        return 1;  /* Yes, CBs for future grace period. */
 351        return 0; /* No grace period needed. */
 352}
 353
 354/*
 355 * Return the root node of the specified rcu_state structure.
 356 */
 357static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
 358{
 359        return &rsp->node[0];
 360}
 361
 362/*
 363 * rcu_eqs_enter_common - current CPU is moving towards extended quiescent state
 364 *
 365 * If the new value of the ->dynticks_nesting counter now is zero,
 366 * we really have entered idle, and must do the appropriate accounting.
 367 * The caller must have disabled interrupts.
 368 */
 369static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
 370                                bool user)
 371{
 372        trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
 373        if (!user && !is_idle_task(current)) {
 374                struct task_struct *idle = idle_task(smp_processor_id());
 375
 376                trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0);
 377                ftrace_dump(DUMP_ORIG);
 378                WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
 379                          current->pid, current->comm,
 380                          idle->pid, idle->comm); /* must be idle task! */
 381        }
 382        rcu_prepare_for_idle(smp_processor_id());
 383        /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
 384        smp_mb__before_atomic_inc();  /* See above. */
 385        atomic_inc(&rdtp->dynticks);
 386        smp_mb__after_atomic_inc();  /* Force ordering with next sojourn. */
 387        WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
 388
 389        /*
 390         * It is illegal to enter an extended quiescent state while
 391         * in an RCU read-side critical section.
 392         */
 393        rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
 394                           "Illegal idle entry in RCU read-side critical section.");
 395        rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),
 396                           "Illegal idle entry in RCU-bh read-side critical section.");
 397        rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),
 398                           "Illegal idle entry in RCU-sched read-side critical section.");
 399}
 400
 401/*
 402 * Enter an RCU extended quiescent state, which can be either the
 403 * idle loop or adaptive-tickless usermode execution.
 404 */
 405static void rcu_eqs_enter(bool user)
 406{
 407        long long oldval;
 408        struct rcu_dynticks *rdtp;
 409
 410        rdtp = &__get_cpu_var(rcu_dynticks);
 411        oldval = rdtp->dynticks_nesting;
 412        WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
 413        if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE)
 414                rdtp->dynticks_nesting = 0;
 415        else
 416                rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
 417        rcu_eqs_enter_common(rdtp, oldval, user);
 418}
 419
 420/**
 421 * rcu_idle_enter - inform RCU that current CPU is entering idle
 422 *
 423 * Enter idle mode, in other words, -leave- the mode in which RCU
 424 * read-side critical sections can occur.  (Though RCU read-side
 425 * critical sections can occur in irq handlers in idle, a possibility
 426 * handled by irq_enter() and irq_exit().)
 427 *
 428 * We crowbar the ->dynticks_nesting field to zero to allow for
 429 * the possibility of usermode upcalls having messed up our count
 430 * of interrupt nesting level during the prior busy period.
 431 */
 432void rcu_idle_enter(void)
 433{
 434        unsigned long flags;
 435
 436        local_irq_save(flags);
 437        rcu_eqs_enter(false);
 438        rcu_sysidle_enter(&__get_cpu_var(rcu_dynticks), 0);
 439        local_irq_restore(flags);
 440}
 441EXPORT_SYMBOL_GPL(rcu_idle_enter);
 442
 443#ifdef CONFIG_RCU_USER_QS
 444/**
 445 * rcu_user_enter - inform RCU that we are resuming userspace.
 446 *
 447 * Enter RCU idle mode right before resuming userspace.  No use of RCU
 448 * is permitted between this call and rcu_user_exit(). This way the
 449 * CPU doesn't need to maintain the tick for RCU maintenance purposes
 450 * when the CPU runs in userspace.
 451 */
 452void rcu_user_enter(void)
 453{
 454        rcu_eqs_enter(1);
 455}
 456#endif /* CONFIG_RCU_USER_QS */
 457
 458/**
 459 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
 460 *
 461 * Exit from an interrupt handler, which might possibly result in entering
 462 * idle mode, in other words, leaving the mode in which read-side critical
 463 * sections can occur.
 464 *
 465 * This code assumes that the idle loop never does anything that might
 466 * result in unbalanced calls to irq_enter() and irq_exit().  If your
 467 * architecture violates this assumption, RCU will give you what you
 468 * deserve, good and hard.  But very infrequently and irreproducibly.
 469 *
 470 * Use things like work queues to work around this limitation.
 471 *
 472 * You have been warned.
 473 */
 474void rcu_irq_exit(void)
 475{
 476        unsigned long flags;
 477        long long oldval;
 478        struct rcu_dynticks *rdtp;
 479
 480        local_irq_save(flags);
 481        rdtp = &__get_cpu_var(rcu_dynticks);
 482        oldval = rdtp->dynticks_nesting;
 483        rdtp->dynticks_nesting--;
 484        WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
 485        if (rdtp->dynticks_nesting)
 486                trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting);
 487        else
 488                rcu_eqs_enter_common(rdtp, oldval, true);
 489        rcu_sysidle_enter(rdtp, 1);
 490        local_irq_restore(flags);
 491}
 492
 493/*
 494 * rcu_eqs_exit_common - current CPU moving away from extended quiescent state
 495 *
 496 * If the new value of the ->dynticks_nesting counter was previously zero,
 497 * we really have exited idle, and must do the appropriate accounting.
 498 * The caller must have disabled interrupts.
 499 */
 500static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
 501                               int user)
 502{
 503        smp_mb__before_atomic_inc();  /* Force ordering w/previous sojourn. */
 504        atomic_inc(&rdtp->dynticks);
 505        /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
 506        smp_mb__after_atomic_inc();  /* See above. */
 507        WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
 508        rcu_cleanup_after_idle(smp_processor_id());
 509        trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
 510        if (!user && !is_idle_task(current)) {
 511                struct task_struct *idle = idle_task(smp_processor_id());
 512
 513                trace_rcu_dyntick(TPS("Error on exit: not idle task"),
 514                                  oldval, rdtp->dynticks_nesting);
 515                ftrace_dump(DUMP_ORIG);
 516                WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
 517                          current->pid, current->comm,
 518                          idle->pid, idle->comm); /* must be idle task! */
 519        }
 520}
 521
 522/*
 523 * Exit an RCU extended quiescent state, which can be either the
 524 * idle loop or adaptive-tickless usermode execution.
 525 */
 526static void rcu_eqs_exit(bool user)
 527{
 528        struct rcu_dynticks *rdtp;
 529        long long oldval;
 530
 531        rdtp = &__get_cpu_var(rcu_dynticks);
 532        oldval = rdtp->dynticks_nesting;
 533        WARN_ON_ONCE(oldval < 0);
 534        if (oldval & DYNTICK_TASK_NEST_MASK)
 535                rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
 536        else
 537                rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
 538        rcu_eqs_exit_common(rdtp, oldval, user);
 539}
 540
 541/**
 542 * rcu_idle_exit - inform RCU that current CPU is leaving idle
 543 *
 544 * Exit idle mode, in other words, -enter- the mode in which RCU
 545 * read-side critical sections can occur.
 546 *
 547 * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to
 548 * allow for the possibility of usermode upcalls messing up our count
 549 * of interrupt nesting level during the busy period that is just
 550 * now starting.
 551 */
 552void rcu_idle_exit(void)
 553{
 554        unsigned long flags;
 555
 556        local_irq_save(flags);
 557        rcu_eqs_exit(false);
 558        rcu_sysidle_exit(&__get_cpu_var(rcu_dynticks), 0);
 559        local_irq_restore(flags);
 560}
 561EXPORT_SYMBOL_GPL(rcu_idle_exit);
 562
 563#ifdef CONFIG_RCU_USER_QS
 564/**
 565 * rcu_user_exit - inform RCU that we are exiting userspace.
 566 *
 567 * Exit RCU idle mode while entering the kernel because it can
 568 * run a RCU read side critical section anytime.
 569 */
 570void rcu_user_exit(void)
 571{
 572        rcu_eqs_exit(1);
 573}
 574#endif /* CONFIG_RCU_USER_QS */
 575
 576/**
 577 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
 578 *
 579 * Enter an interrupt handler, which might possibly result in exiting
 580 * idle mode, in other words, entering the mode in which read-side critical
 581 * sections can occur.
 582 *
 583 * Note that the Linux kernel is fully capable of entering an interrupt
 584 * handler that it never exits, for example when doing upcalls to
 585 * user mode!  This code assumes that the idle loop never does upcalls to
 586 * user mode.  If your architecture does do upcalls from the idle loop (or
 587 * does anything else that results in unbalanced calls to the irq_enter()
 588 * and irq_exit() functions), RCU will give you what you deserve, good
 589 * and hard.  But very infrequently and irreproducibly.
 590 *
 591 * Use things like work queues to work around this limitation.
 592 *
 593 * You have been warned.
 594 */
 595void rcu_irq_enter(void)
 596{
 597        unsigned long flags;
 598        struct rcu_dynticks *rdtp;
 599        long long oldval;
 600
 601        local_irq_save(flags);
 602        rdtp = &__get_cpu_var(rcu_dynticks);
 603        oldval = rdtp->dynticks_nesting;
 604        rdtp->dynticks_nesting++;
 605        WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
 606        if (oldval)
 607                trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
 608        else
 609                rcu_eqs_exit_common(rdtp, oldval, true);
 610        rcu_sysidle_exit(rdtp, 1);
 611        local_irq_restore(flags);
 612}
 613
 614/**
 615 * rcu_nmi_enter - inform RCU of entry to NMI context
 616 *
 617 * If the CPU was idle with dynamic ticks active, and there is no
 618 * irq handler running, this updates rdtp->dynticks_nmi to let the
 619 * RCU grace-period handling know that the CPU is active.
 620 */
 621void rcu_nmi_enter(void)
 622{
 623        struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
 624
 625        if (rdtp->dynticks_nmi_nesting == 0 &&
 626            (atomic_read(&rdtp->dynticks) & 0x1))
 627                return;
 628        rdtp->dynticks_nmi_nesting++;
 629        smp_mb__before_atomic_inc();  /* Force delay from prior write. */
 630        atomic_inc(&rdtp->dynticks);
 631        /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
 632        smp_mb__after_atomic_inc();  /* See above. */
 633        WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
 634}
 635
 636/**
 637 * rcu_nmi_exit - inform RCU of exit from NMI context
 638 *
 639 * If the CPU was idle with dynamic ticks active, and there is no
 640 * irq handler running, this updates rdtp->dynticks_nmi to let the
 641 * RCU grace-period handling know that the CPU is no longer active.
 642 */
 643void rcu_nmi_exit(void)
 644{
 645        struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
 646
 647        if (rdtp->dynticks_nmi_nesting == 0 ||
 648            --rdtp->dynticks_nmi_nesting != 0)
 649                return;
 650        /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
 651        smp_mb__before_atomic_inc();  /* See above. */
 652        atomic_inc(&rdtp->dynticks);
 653        smp_mb__after_atomic_inc();  /* Force delay to next write. */
 654        WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
 655}
 656
 657/**
 658 * rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle
 659 *
 660 * If the current CPU is in its idle loop and is neither in an interrupt
 661 * or NMI handler, return true.
 662 */
 663int rcu_is_cpu_idle(void)
 664{
 665        int ret;
 666
 667        preempt_disable();
 668        ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
 669        preempt_enable();
 670        return ret;
 671}
 672EXPORT_SYMBOL(rcu_is_cpu_idle);
 673
 674#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
 675
 676/*
 677 * Is the current CPU online?  Disable preemption to avoid false positives
 678 * that could otherwise happen due to the current CPU number being sampled,
 679 * this task being preempted, its old CPU being taken offline, resuming
 680 * on some other CPU, then determining that its old CPU is now offline.
 681 * It is OK to use RCU on an offline processor during initial boot, hence
 682 * the check for rcu_scheduler_fully_active.  Note also that it is OK
 683 * for a CPU coming online to use RCU for one jiffy prior to marking itself
 684 * online in the cpu_online_mask.  Similarly, it is OK for a CPU going
 685 * offline to continue to use RCU for one jiffy after marking itself
 686 * offline in the cpu_online_mask.  This leniency is necessary given the
 687 * non-atomic nature of the online and offline processing, for example,
 688 * the fact that a CPU enters the scheduler after completing the CPU_DYING
 689 * notifiers.
 690 *
 691 * This is also why RCU internally marks CPUs online during the
 692 * CPU_UP_PREPARE phase and offline during the CPU_DEAD phase.
 693 *
 694 * Disable checking if in an NMI handler because we cannot safely report
 695 * errors from NMI handlers anyway.
 696 */
 697bool rcu_lockdep_current_cpu_online(void)
 698{
 699        struct rcu_data *rdp;
 700        struct rcu_node *rnp;
 701        bool ret;
 702
 703        if (in_nmi())
 704                return 1;
 705        preempt_disable();
 706        rdp = &__get_cpu_var(rcu_sched_data);
 707        rnp = rdp->mynode;
 708        ret = (rdp->grpmask & rnp->qsmaskinit) ||
 709              !rcu_scheduler_fully_active;
 710        preempt_enable();
 711        return ret;
 712}
 713EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
 714
 715#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
 716
 717/**
 718 * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
 719 *
 720 * If the current CPU is idle or running at a first-level (not nested)
 721 * interrupt from idle, return true.  The caller must have at least
 722 * disabled preemption.
 723 */
 724static int rcu_is_cpu_rrupt_from_idle(void)
 725{
 726        return __get_cpu_var(rcu_dynticks).dynticks_nesting <= 1;
 727}
 728
 729/*
 730 * Snapshot the specified CPU's dynticks counter so that we can later
 731 * credit them with an implicit quiescent state.  Return 1 if this CPU
 732 * is in dynticks idle mode, which is an extended quiescent state.
 733 */
 734static int dyntick_save_progress_counter(struct rcu_data *rdp,
 735                                         bool *isidle, unsigned long *maxj)
 736{
 737        rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
 738        rcu_sysidle_check_cpu(rdp, isidle, maxj);
 739        return (rdp->dynticks_snap & 0x1) == 0;
 740}
 741
 742/*
 743 * Return true if the specified CPU has passed through a quiescent
 744 * state by virtue of being in or having passed through an dynticks
 745 * idle state since the last call to dyntick_save_progress_counter()
 746 * for this same CPU, or by virtue of having been offline.
 747 */
 748static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
 749                                    bool *isidle, unsigned long *maxj)
 750{
 751        unsigned int curr;
 752        unsigned int snap;
 753
 754        curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
 755        snap = (unsigned int)rdp->dynticks_snap;
 756
 757        /*
 758         * If the CPU passed through or entered a dynticks idle phase with
 759         * no active irq/NMI handlers, then we can safely pretend that the CPU
 760         * already acknowledged the request to pass through a quiescent
 761         * state.  Either way, that CPU cannot possibly be in an RCU
 762         * read-side critical section that started before the beginning
 763         * of the current RCU grace period.
 764         */
 765        if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) {
 766                trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
 767                rdp->dynticks_fqs++;
 768                return 1;
 769        }
 770
 771        /*
 772         * Check for the CPU being offline, but only if the grace period
 773         * is old enough.  We don't need to worry about the CPU changing
 774         * state: If we see it offline even once, it has been through a
 775         * quiescent state.
 776         *
 777         * The reason for insisting that the grace period be at least
 778         * one jiffy old is that CPUs that are not quite online and that
 779         * have just gone offline can still execute RCU read-side critical
 780         * sections.
 781         */
 782        if (ULONG_CMP_GE(rdp->rsp->gp_start + 2, jiffies))
 783                return 0;  /* Grace period is not old enough. */
 784        barrier();
 785        if (cpu_is_offline(rdp->cpu)) {
 786                trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
 787                rdp->offline_fqs++;
 788                return 1;
 789        }
 790
 791        /*
 792         * There is a possibility that a CPU in adaptive-ticks state
 793         * might run in the kernel with the scheduling-clock tick disabled
 794         * for an extended time period.  Invoke rcu_kick_nohz_cpu() to
 795         * force the CPU to restart the scheduling-clock tick in this
 796         * CPU is in this state.
 797         */
 798        rcu_kick_nohz_cpu(rdp->cpu);
 799
 800        return 0;
 801}
 802
 803static void record_gp_stall_check_time(struct rcu_state *rsp)
 804{
 805        rsp->gp_start = jiffies;
 806        rsp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check();
 807}
 808
 809/*
 810 * Dump stacks of all tasks running on stalled CPUs.  This is a fallback
 811 * for architectures that do not implement trigger_all_cpu_backtrace().
 812 * The NMI-triggered stack traces are more accurate because they are
 813 * printed by the target CPU.
 814 */
 815static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
 816{
 817        int cpu;
 818        unsigned long flags;
 819        struct rcu_node *rnp;
 820
 821        rcu_for_each_leaf_node(rsp, rnp) {
 822                raw_spin_lock_irqsave(&rnp->lock, flags);
 823                if (rnp->qsmask != 0) {
 824                        for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
 825                                if (rnp->qsmask & (1UL << cpu))
 826                                        dump_cpu_task(rnp->grplo + cpu);
 827                }
 828                raw_spin_unlock_irqrestore(&rnp->lock, flags);
 829        }
 830}
 831
 832static void print_other_cpu_stall(struct rcu_state *rsp)
 833{
 834        int cpu;
 835        long delta;
 836        unsigned long flags;
 837        int ndetected = 0;
 838        struct rcu_node *rnp = rcu_get_root(rsp);
 839        long totqlen = 0;
 840
 841        /* Only let one CPU complain about others per time interval. */
 842
 843        raw_spin_lock_irqsave(&rnp->lock, flags);
 844        delta = jiffies - rsp->jiffies_stall;
 845        if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
 846                raw_spin_unlock_irqrestore(&rnp->lock, flags);
 847                return;
 848        }
 849        rsp->jiffies_stall = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
 850        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 851
 852        /*
 853         * OK, time to rat on our buddy...
 854         * See Documentation/RCU/stallwarn.txt for info on how to debug
 855         * RCU CPU stall warnings.
 856         */
 857        pr_err("INFO: %s detected stalls on CPUs/tasks:",
 858               rsp->name);
 859        print_cpu_stall_info_begin();
 860        rcu_for_each_leaf_node(rsp, rnp) {
 861                raw_spin_lock_irqsave(&rnp->lock, flags);
 862                ndetected += rcu_print_task_stall(rnp);
 863                if (rnp->qsmask != 0) {
 864                        for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
 865                                if (rnp->qsmask & (1UL << cpu)) {
 866                                        print_cpu_stall_info(rsp,
 867                                                             rnp->grplo + cpu);
 868                                        ndetected++;
 869                                }
 870                }
 871                raw_spin_unlock_irqrestore(&rnp->lock, flags);
 872        }
 873
 874        /*
 875         * Now rat on any tasks that got kicked up to the root rcu_node
 876         * due to CPU offlining.
 877         */
 878        rnp = rcu_get_root(rsp);
 879        raw_spin_lock_irqsave(&rnp->lock, flags);
 880        ndetected += rcu_print_task_stall(rnp);
 881        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 882
 883        print_cpu_stall_info_end();
 884        for_each_possible_cpu(cpu)
 885                totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
 886        pr_cont("(detected by %d, t=%ld jiffies, g=%lu, c=%lu, q=%lu)\n",
 887               smp_processor_id(), (long)(jiffies - rsp->gp_start),
 888               rsp->gpnum, rsp->completed, totqlen);
 889        if (ndetected == 0)
 890                pr_err("INFO: Stall ended before state dump start\n");
 891        else if (!trigger_all_cpu_backtrace())
 892                rcu_dump_cpu_stacks(rsp);
 893
 894        /* Complain about tasks blocking the grace period. */
 895
 896        rcu_print_detail_task_stall(rsp);
 897
 898        force_quiescent_state(rsp);  /* Kick them all. */
 899}
 900
 901static void print_cpu_stall(struct rcu_state *rsp)
 902{
 903        int cpu;
 904        unsigned long flags;
 905        struct rcu_node *rnp = rcu_get_root(rsp);
 906        long totqlen = 0;
 907
 908        /*
 909         * OK, time to rat on ourselves...
 910         * See Documentation/RCU/stallwarn.txt for info on how to debug
 911         * RCU CPU stall warnings.
 912         */
 913        pr_err("INFO: %s self-detected stall on CPU", rsp->name);
 914        print_cpu_stall_info_begin();
 915        print_cpu_stall_info(rsp, smp_processor_id());
 916        print_cpu_stall_info_end();
 917        for_each_possible_cpu(cpu)
 918                totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
 919        pr_cont(" (t=%lu jiffies g=%lu c=%lu q=%lu)\n",
 920                jiffies - rsp->gp_start, rsp->gpnum, rsp->completed, totqlen);
 921        if (!trigger_all_cpu_backtrace())
 922                dump_stack();
 923
 924        raw_spin_lock_irqsave(&rnp->lock, flags);
 925        if (ULONG_CMP_GE(jiffies, rsp->jiffies_stall))
 926                rsp->jiffies_stall = jiffies +
 927                                     3 * rcu_jiffies_till_stall_check() + 3;
 928        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 929
 930        set_need_resched();  /* kick ourselves to get things going. */
 931}
 932
 933static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
 934{
 935        unsigned long j;
 936        unsigned long js;
 937        struct rcu_node *rnp;
 938
 939        if (rcu_cpu_stall_suppress)
 940                return;
 941        j = ACCESS_ONCE(jiffies);
 942        js = ACCESS_ONCE(rsp->jiffies_stall);
 943        rnp = rdp->mynode;
 944        if (rcu_gp_in_progress(rsp) &&
 945            (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) {
 946
 947                /* We haven't checked in, so go dump stack. */
 948                print_cpu_stall(rsp);
 949
 950        } else if (rcu_gp_in_progress(rsp) &&
 951                   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
 952
 953                /* They had a few time units to dump stack, so complain. */
 954                print_other_cpu_stall(rsp);
 955        }
 956}
 957
 958/**
 959 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
 960 *
 961 * Set the stall-warning timeout way off into the future, thus preventing
 962 * any RCU CPU stall-warning messages from appearing in the current set of
 963 * RCU grace periods.
 964 *
 965 * The caller must disable hard irqs.
 966 */
 967void rcu_cpu_stall_reset(void)
 968{
 969        struct rcu_state *rsp;
 970
 971        for_each_rcu_flavor(rsp)
 972                rsp->jiffies_stall = jiffies + ULONG_MAX / 2;
 973}
 974
 975/*
 976 * Initialize the specified rcu_data structure's callback list to empty.
 977 */
 978static void init_callback_list(struct rcu_data *rdp)
 979{
 980        int i;
 981
 982        if (init_nocb_callback_list(rdp))
 983                return;
 984        rdp->nxtlist = NULL;
 985        for (i = 0; i < RCU_NEXT_SIZE; i++)
 986                rdp->nxttail[i] = &rdp->nxtlist;
 987}
 988
 989/*
 990 * Determine the value that ->completed will have at the end of the
 991 * next subsequent grace period.  This is used to tag callbacks so that
 992 * a CPU can invoke callbacks in a timely fashion even if that CPU has
 993 * been dyntick-idle for an extended period with callbacks under the
 994 * influence of RCU_FAST_NO_HZ.
 995 *
 996 * The caller must hold rnp->lock with interrupts disabled.
 997 */
 998static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
 999                                       struct rcu_node *rnp)
1000{
1001        /*
1002         * If RCU is idle, we just wait for the next grace period.
1003         * But we can only be sure that RCU is idle if we are looking
1004         * at the root rcu_node structure -- otherwise, a new grace
1005         * period might have started, but just not yet gotten around
1006         * to initializing the current non-root rcu_node structure.
1007         */
1008        if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
1009                return rnp->completed + 1;
1010
1011        /*
1012         * Otherwise, wait for a possible partial grace period and
1013         * then the subsequent full grace period.
1014         */
1015        return rnp->completed + 2;
1016}
1017
1018/*
1019 * Trace-event helper function for rcu_start_future_gp() and
1020 * rcu_nocb_wait_gp().
1021 */
1022static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1023                                unsigned long c, const char *s)
1024{
1025        trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
1026                                      rnp->completed, c, rnp->level,
1027                                      rnp->grplo, rnp->grphi, s);
1028}
1029
1030/*
1031 * Start some future grace period, as needed to handle newly arrived
1032 * callbacks.  The required future grace periods are recorded in each
1033 * rcu_node structure's ->need_future_gp field.
1034 *
1035 * The caller must hold the specified rcu_node structure's ->lock.
1036 */
1037static unsigned long __maybe_unused
1038rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp)
1039{
1040        unsigned long c;
1041        int i;
1042        struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
1043
1044        /*
1045         * Pick up grace-period number for new callbacks.  If this
1046         * grace period is already marked as needed, return to the caller.
1047         */
1048        c = rcu_cbs_completed(rdp->rsp, rnp);
1049        trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf"));
1050        if (rnp->need_future_gp[c & 0x1]) {
1051                trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf"));
1052                return c;
1053        }
1054
1055        /*
1056         * If either this rcu_node structure or the root rcu_node structure
1057         * believe that a grace period is in progress, then we must wait
1058         * for the one following, which is in "c".  Because our request
1059         * will be noticed at the end of the current grace period, we don't
1060         * need to explicitly start one.
1061         */
1062        if (rnp->gpnum != rnp->completed ||
1063            ACCESS_ONCE(rnp->gpnum) != ACCESS_ONCE(rnp->completed)) {
1064                rnp->need_future_gp[c & 0x1]++;
1065                trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
1066                return c;
1067        }
1068
1069        /*
1070         * There might be no grace period in progress.  If we don't already
1071         * hold it, acquire the root rcu_node structure's lock in order to
1072         * start one (if needed).
1073         */
1074        if (rnp != rnp_root)
1075                raw_spin_lock(&rnp_root->lock);
1076
1077        /*
1078         * Get a new grace-period number.  If there really is no grace
1079         * period in progress, it will be smaller than the one we obtained
1080         * earlier.  Adjust callbacks as needed.  Note that even no-CBs
1081         * CPUs have a ->nxtcompleted[] array, so no no-CBs checks needed.
1082         */
1083        c = rcu_cbs_completed(rdp->rsp, rnp_root);
1084        for (i = RCU_DONE_TAIL; i < RCU_NEXT_TAIL; i++)
1085                if (ULONG_CMP_LT(c, rdp->nxtcompleted[i]))
1086                        rdp->nxtcompleted[i] = c;
1087
1088        /*
1089         * If the needed for the required grace period is already
1090         * recorded, trace and leave.
1091         */
1092        if (rnp_root->need_future_gp[c & 0x1]) {
1093                trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot"));
1094                goto unlock_out;
1095        }
1096
1097        /* Record the need for the future grace period. */
1098        rnp_root->need_future_gp[c & 0x1]++;
1099
1100        /* If a grace period is not already in progress, start one. */
1101        if (rnp_root->gpnum != rnp_root->completed) {
1102                trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot"));
1103        } else {
1104                trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot"));
1105                rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp);
1106        }
1107unlock_out:
1108        if (rnp != rnp_root)
1109                raw_spin_unlock(&rnp_root->lock);
1110        return c;
1111}
1112
1113/*
1114 * Clean up any old requests for the just-ended grace period.  Also return
1115 * whether any additional grace periods have been requested.  Also invoke
1116 * rcu_nocb_gp_cleanup() in order to wake up any no-callbacks kthreads
1117 * waiting for this grace period to complete.
1118 */
1119static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
1120{
1121        int c = rnp->completed;
1122        int needmore;
1123        struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1124
1125        rcu_nocb_gp_cleanup(rsp, rnp);
1126        rnp->need_future_gp[c & 0x1] = 0;
1127        needmore = rnp->need_future_gp[(c + 1) & 0x1];
1128        trace_rcu_future_gp(rnp, rdp, c,
1129                            needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1130        return needmore;
1131}
1132
1133/*
1134 * If there is room, assign a ->completed number to any callbacks on
1135 * this CPU that have not already been assigned.  Also accelerate any
1136 * callbacks that were previously assigned a ->completed number that has
1137 * since proven to be too conservative, which can happen if callbacks get
1138 * assigned a ->completed number while RCU is idle, but with reference to
1139 * a non-root rcu_node structure.  This function is idempotent, so it does
1140 * not hurt to call it repeatedly.
1141 *
1142 * The caller must hold rnp->lock with interrupts disabled.
1143 */
1144static void rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1145                               struct rcu_data *rdp)
1146{
1147        unsigned long c;
1148        int i;
1149
1150        /* If the CPU has no callbacks, nothing to do. */
1151        if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
1152                return;
1153
1154        /*
1155         * Starting from the sublist containing the callbacks most
1156         * recently assigned a ->completed number and working down, find the
1157         * first sublist that is not assignable to an upcoming grace period.
1158         * Such a sublist has something in it (first two tests) and has
1159         * a ->completed number assigned that will complete sooner than
1160         * the ->completed number for newly arrived callbacks (last test).
1161         *
1162         * The key point is that any later sublist can be assigned the
1163         * same ->completed number as the newly arrived callbacks, which
1164         * means that the callbacks in any of these later sublist can be
1165         * grouped into a single sublist, whether or not they have already
1166         * been assigned a ->completed number.
1167         */
1168        c = rcu_cbs_completed(rsp, rnp);
1169        for (i = RCU_NEXT_TAIL - 1; i > RCU_DONE_TAIL; i--)
1170                if (rdp->nxttail[i] != rdp->nxttail[i - 1] &&
1171                    !ULONG_CMP_GE(rdp->nxtcompleted[i], c))
1172                        break;
1173
1174        /*
1175         * If there are no sublist for unassigned callbacks, leave.
1176         * At the same time, advance "i" one sublist, so that "i" will
1177         * index into the sublist where all the remaining callbacks should
1178         * be grouped into.
1179         */
1180        if (++i >= RCU_NEXT_TAIL)
1181                return;
1182
1183        /*
1184         * Assign all subsequent callbacks' ->completed number to the next
1185         * full grace period and group them all in the sublist initially
1186         * indexed by "i".
1187         */
1188        for (; i <= RCU_NEXT_TAIL; i++) {
1189                rdp->nxttail[i] = rdp->nxttail[RCU_NEXT_TAIL];
1190                rdp->nxtcompleted[i] = c;
1191        }
1192        /* Record any needed additional grace periods. */
1193        rcu_start_future_gp(rnp, rdp);
1194
1195        /* Trace depending on how much we were able to accelerate. */
1196        if (!*rdp->nxttail[RCU_WAIT_TAIL])
1197                trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
1198        else
1199                trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
1200}
1201
1202/*
1203 * Move any callbacks whose grace period has completed to the
1204 * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1205 * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL
1206 * sublist.  This function is idempotent, so it does not hurt to
1207 * invoke it repeatedly.  As long as it is not invoked -too- often...
1208 *
1209 * The caller must hold rnp->lock with interrupts disabled.
1210 */
1211static void rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1212                            struct rcu_data *rdp)
1213{
1214        int i, j;
1215
1216        /* If the CPU has no callbacks, nothing to do. */
1217        if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
1218                return;
1219
1220        /*
1221         * Find all callbacks whose ->completed numbers indicate that they
1222         * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1223         */
1224        for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) {
1225                if (ULONG_CMP_LT(rnp->completed, rdp->nxtcompleted[i]))
1226                        break;
1227                rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[i];
1228        }
1229        /* Clean up any sublist tail pointers that were misordered above. */
1230        for (j = RCU_WAIT_TAIL; j < i; j++)
1231                rdp->nxttail[j] = rdp->nxttail[RCU_DONE_TAIL];
1232
1233        /* Copy down callbacks to fill in empty sublists. */
1234        for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) {
1235                if (rdp->nxttail[j] == rdp->nxttail[RCU_NEXT_TAIL])
1236                        break;
1237                rdp->nxttail[j] = rdp->nxttail[i];
1238                rdp->nxtcompleted[j] = rdp->nxtcompleted[i];
1239        }
1240
1241        /* Classify any remaining callbacks. */
1242        rcu_accelerate_cbs(rsp, rnp, rdp);
1243}
1244
1245/*
1246 * Update CPU-local rcu_data state to record the beginnings and ends of
1247 * grace periods.  The caller must hold the ->lock of the leaf rcu_node
1248 * structure corresponding to the current CPU, and must have irqs disabled.
1249 */
1250static void __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
1251{
1252        /* Handle the ends of any preceding grace periods first. */
1253        if (rdp->completed == rnp->completed) {
1254
1255                /* No grace period end, so just accelerate recent callbacks. */
1256                rcu_accelerate_cbs(rsp, rnp, rdp);
1257
1258        } else {
1259
1260                /* Advance callbacks. */
1261                rcu_advance_cbs(rsp, rnp, rdp);
1262
1263                /* Remember that we saw this grace-period completion. */
1264                rdp->completed = rnp->completed;
1265                trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
1266        }
1267
1268        if (rdp->gpnum != rnp->gpnum) {
1269                /*
1270                 * If the current grace period is waiting for this CPU,
1271                 * set up to detect a quiescent state, otherwise don't
1272                 * go looking for one.
1273                 */
1274                rdp->gpnum = rnp->gpnum;
1275                trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
1276                rdp->passed_quiesce = 0;
1277                rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
1278                zero_cpu_stall_ticks(rdp);
1279        }
1280}
1281
1282static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1283{
1284        unsigned long flags;
1285        struct rcu_node *rnp;
1286
1287        local_irq_save(flags);
1288        rnp = rdp->mynode;
1289        if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) &&
1290             rdp->completed == ACCESS_ONCE(rnp->completed)) || /* w/out lock. */
1291            !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
1292                local_irq_restore(flags);
1293                return;
1294        }
1295        __note_gp_changes(rsp, rnp, rdp);
1296        raw_spin_unlock_irqrestore(&rnp->lock, flags);
1297}
1298
1299/*
1300 * Initialize a new grace period.
1301 */
1302static int rcu_gp_init(struct rcu_state *rsp)
1303{
1304        struct rcu_data *rdp;
1305        struct rcu_node *rnp = rcu_get_root(rsp);
1306
1307        rcu_bind_gp_kthread();
1308        raw_spin_lock_irq(&rnp->lock);
1309        rsp->gp_flags = 0; /* Clear all flags: New grace period. */
1310
1311        if (rcu_gp_in_progress(rsp)) {
1312                /* Grace period already in progress, don't start another.  */
1313                raw_spin_unlock_irq(&rnp->lock);
1314                return 0;
1315        }
1316
1317        /* Advance to a new grace period and initialize state. */
1318        rsp->gpnum++;
1319        trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
1320        record_gp_stall_check_time(rsp);
1321        raw_spin_unlock_irq(&rnp->lock);
1322
1323        /* Exclude any concurrent CPU-hotplug operations. */
1324        mutex_lock(&rsp->onoff_mutex);
1325
1326        /*
1327         * Set the quiescent-state-needed bits in all the rcu_node
1328         * structures for all currently online CPUs in breadth-first order,
1329         * starting from the root rcu_node structure, relying on the layout
1330         * of the tree within the rsp->node[] array.  Note that other CPUs
1331         * will access only the leaves of the hierarchy, thus seeing that no
1332         * grace period is in progress, at least until the corresponding
1333         * leaf node has been initialized.  In addition, we have excluded
1334         * CPU-hotplug operations.
1335         *
1336         * The grace period cannot complete until the initialization
1337         * process finishes, because this kthread handles both.
1338         */
1339        rcu_for_each_node_breadth_first(rsp, rnp) {
1340                raw_spin_lock_irq(&rnp->lock);
1341                rdp = this_cpu_ptr(rsp->rda);
1342                rcu_preempt_check_blocked_tasks(rnp);
1343                rnp->qsmask = rnp->qsmaskinit;
1344                ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
1345                WARN_ON_ONCE(rnp->completed != rsp->completed);
1346                ACCESS_ONCE(rnp->completed) = rsp->completed;
1347                if (rnp == rdp->mynode)
1348                        __note_gp_changes(rsp, rnp, rdp);
1349                rcu_preempt_boost_start_gp(rnp);
1350                trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
1351                                            rnp->level, rnp->grplo,
1352                                            rnp->grphi, rnp->qsmask);
1353                raw_spin_unlock_irq(&rnp->lock);
1354#ifdef CONFIG_PROVE_RCU_DELAY
1355                if ((prandom_u32() % (rcu_num_nodes + 1)) == 0 &&
1356                    system_state == SYSTEM_RUNNING)
1357                        udelay(200);
1358#endif /* #ifdef CONFIG_PROVE_RCU_DELAY */
1359                cond_resched();
1360        }
1361
1362        mutex_unlock(&rsp->onoff_mutex);
1363        return 1;
1364}
1365
1366/*
1367 * Do one round of quiescent-state forcing.
1368 */
1369int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
1370{
1371        int fqs_state = fqs_state_in;
1372        bool isidle = false;
1373        unsigned long maxj;
1374        struct rcu_node *rnp = rcu_get_root(rsp);
1375
1376        rsp->n_force_qs++;
1377        if (fqs_state == RCU_SAVE_DYNTICK) {
1378                /* Collect dyntick-idle snapshots. */
1379                if (is_sysidle_rcu_state(rsp)) {
1380                        isidle = 1;
1381                        maxj = jiffies - ULONG_MAX / 4;
1382                }
1383                force_qs_rnp(rsp, dyntick_save_progress_counter,
1384                             &isidle, &maxj);
1385                rcu_sysidle_report_gp(rsp, isidle, maxj);
1386                fqs_state = RCU_FORCE_QS;
1387        } else {
1388                /* Handle dyntick-idle and offline CPUs. */
1389                isidle = 0;
1390                force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj);
1391        }
1392        /* Clear flag to prevent immediate re-entry. */
1393        if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
1394                raw_spin_lock_irq(&rnp->lock);
1395                rsp->gp_flags &= ~RCU_GP_FLAG_FQS;
1396                raw_spin_unlock_irq(&rnp->lock);
1397        }
1398        return fqs_state;
1399}
1400
1401/*
1402 * Clean up after the old grace period.
1403 */
1404static void rcu_gp_cleanup(struct rcu_state *rsp)
1405{
1406        unsigned long gp_duration;
1407        int nocb = 0;
1408        struct rcu_data *rdp;
1409        struct rcu_node *rnp = rcu_get_root(rsp);
1410
1411        raw_spin_lock_irq(&rnp->lock);
1412        gp_duration = jiffies - rsp->gp_start;
1413        if (gp_duration > rsp->gp_max)
1414                rsp->gp_max = gp_duration;
1415
1416        /*
1417         * We know the grace period is complete, but to everyone else
1418         * it appears to still be ongoing.  But it is also the case
1419         * that to everyone else it looks like there is nothing that
1420         * they can do to advance the grace period.  It is therefore
1421         * safe for us to drop the lock in order to mark the grace
1422         * period as completed in all of the rcu_node structures.
1423         */
1424        raw_spin_unlock_irq(&rnp->lock);
1425
1426        /*
1427         * Propagate new ->completed value to rcu_node structures so
1428         * that other CPUs don't have to wait until the start of the next
1429         * grace period to process their callbacks.  This also avoids
1430         * some nasty RCU grace-period initialization races by forcing
1431         * the end of the current grace period to be completely recorded in
1432         * all of the rcu_node structures before the beginning of the next
1433         * grace period is recorded in any of the rcu_node structures.
1434         */
1435        rcu_for_each_node_breadth_first(rsp, rnp) {
1436                raw_spin_lock_irq(&rnp->lock);
1437                ACCESS_ONCE(rnp->completed) = rsp->gpnum;
1438                rdp = this_cpu_ptr(rsp->rda);
1439                if (rnp == rdp->mynode)
1440                        __note_gp_changes(rsp, rnp, rdp);
1441                nocb += rcu_future_gp_cleanup(rsp, rnp);
1442                raw_spin_unlock_irq(&rnp->lock);
1443                cond_resched();
1444        }
1445        rnp = rcu_get_root(rsp);
1446        raw_spin_lock_irq(&rnp->lock);
1447        rcu_nocb_gp_set(rnp, nocb);
1448
1449        rsp->completed = rsp->gpnum; /* Declare grace period done. */
1450        trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
1451        rsp->fqs_state = RCU_GP_IDLE;
1452        rdp = this_cpu_ptr(rsp->rda);
1453        rcu_advance_cbs(rsp, rnp, rdp);  /* Reduce false positives below. */
1454        if (cpu_needs_another_gp(rsp, rdp))
1455                rsp->gp_flags = 1;
1456        raw_spin_unlock_irq(&rnp->lock);
1457}
1458
1459/*
1460 * Body of kthread that handles grace periods.
1461 */
1462static int __noreturn rcu_gp_kthread(void *arg)
1463{
1464        int fqs_state;
1465        unsigned long j;
1466        int ret;
1467        struct rcu_state *rsp = arg;
1468        struct rcu_node *rnp = rcu_get_root(rsp);
1469
1470        for (;;) {
1471
1472                /* Handle grace-period start. */
1473                for (;;) {
1474                        wait_event_interruptible(rsp->gp_wq,
1475                                                 rsp->gp_flags &
1476                                                 RCU_GP_FLAG_INIT);
1477                        if ((rsp->gp_flags & RCU_GP_FLAG_INIT) &&
1478                            rcu_gp_init(rsp))
1479                                break;
1480                        cond_resched();
1481                        flush_signals(current);
1482                }
1483
1484                /* Handle quiescent-state forcing. */
1485                fqs_state = RCU_SAVE_DYNTICK;
1486                j = jiffies_till_first_fqs;
1487                if (j > HZ) {
1488                        j = HZ;
1489                        jiffies_till_first_fqs = HZ;
1490                }
1491                for (;;) {
1492                        rsp->jiffies_force_qs = jiffies + j;
1493                        ret = wait_event_interruptible_timeout(rsp->gp_wq,
1494                                        (rsp->gp_flags & RCU_GP_FLAG_FQS) ||
1495                                        (!ACCESS_ONCE(rnp->qsmask) &&
1496                                         !rcu_preempt_blocked_readers_cgp(rnp)),
1497                                        j);
1498                        /* If grace period done, leave loop. */
1499                        if (!ACCESS_ONCE(rnp->qsmask) &&
1500                            !rcu_preempt_blocked_readers_cgp(rnp))
1501                                break;
1502                        /* If time for quiescent-state forcing, do it. */
1503                        if (ret == 0 || (rsp->gp_flags & RCU_GP_FLAG_FQS)) {
1504                                fqs_state = rcu_gp_fqs(rsp, fqs_state);
1505                                cond_resched();
1506                        } else {
1507                                /* Deal with stray signal. */
1508                                cond_resched();
1509                                flush_signals(current);
1510                        }
1511                        j = jiffies_till_next_fqs;
1512                        if (j > HZ) {
1513                                j = HZ;
1514                                jiffies_till_next_fqs = HZ;
1515                        } else if (j < 1) {
1516                                j = 1;
1517                                jiffies_till_next_fqs = 1;
1518                        }
1519                }
1520
1521                /* Handle grace-period end. */
1522                rcu_gp_cleanup(rsp);
1523        }
1524}
1525
1526static void rsp_wakeup(struct irq_work *work)
1527{
1528        struct rcu_state *rsp = container_of(work, struct rcu_state, wakeup_work);
1529
1530        /* Wake up rcu_gp_kthread() to start the grace period. */
1531        wake_up(&rsp->gp_wq);
1532}
1533
1534/*
1535 * Start a new RCU grace period if warranted, re-initializing the hierarchy
1536 * in preparation for detecting the next grace period.  The caller must hold
1537 * the root node's ->lock and hard irqs must be disabled.
1538 *
1539 * Note that it is legal for a dying CPU (which is marked as offline) to
1540 * invoke this function.  This can happen when the dying CPU reports its
1541 * quiescent state.
1542 */
1543static void
1544rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
1545                      struct rcu_data *rdp)
1546{
1547        if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) {
1548                /*
1549                 * Either we have not yet spawned the grace-period
1550                 * task, this CPU does not need another grace period,
1551                 * or a grace period is already in progress.
1552                 * Either way, don't start a new grace period.
1553                 */
1554                return;
1555        }
1556        rsp->gp_flags = RCU_GP_FLAG_INIT;
1557
1558        /*
1559         * We can't do wakeups while holding the rnp->lock, as that
1560         * could cause possible deadlocks with the rq->lock. Defer
1561         * the wakeup to interrupt context.  And don't bother waking
1562         * up the running kthread.
1563         */
1564        if (current != rsp->gp_kthread)
1565                irq_work_queue(&rsp->wakeup_work);
1566}
1567
1568/*
1569 * Similar to rcu_start_gp_advanced(), but also advance the calling CPU's
1570 * callbacks.  Note that rcu_start_gp_advanced() cannot do this because it
1571 * is invoked indirectly from rcu_advance_cbs(), which would result in
1572 * endless recursion -- or would do so if it wasn't for the self-deadlock
1573 * that is encountered beforehand.
1574 */
1575static void
1576rcu_start_gp(struct rcu_state *rsp)
1577{
1578        struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1579        struct rcu_node *rnp = rcu_get_root(rsp);
1580
1581        /*
1582         * If there is no grace period in progress right now, any
1583         * callbacks we have up to this point will be satisfied by the
1584         * next grace period.  Also, advancing the callbacks reduces the
1585         * probability of false positives from cpu_needs_another_gp()
1586         * resulting in pointless grace periods.  So, advance callbacks
1587         * then start the grace period!
1588         */
1589        rcu_advance_cbs(rsp, rnp, rdp);
1590        rcu_start_gp_advanced(rsp, rnp, rdp);
1591}
1592
1593/*
1594 * Report a full set of quiescent states to the specified rcu_state
1595 * data structure.  This involves cleaning up after the prior grace
1596 * period and letting rcu_start_gp() start up the next grace period
1597 * if one is needed.  Note that the caller must hold rnp->lock, which
1598 * is released before return.
1599 */
1600static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
1601        __releases(rcu_get_root(rsp)->lock)
1602{
1603        WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
1604        raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
1605        wake_up(&rsp->gp_wq);  /* Memory barrier implied by wake_up() path. */
1606}
1607
1608/*
1609 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
1610 * Allows quiescent states for a group of CPUs to be reported at one go
1611 * to the specified rcu_node structure, though all the CPUs in the group
1612 * must be represented by the same rcu_node structure (which need not be
1613 * a leaf rcu_node structure, though it often will be).  That structure's
1614 * lock must be held upon entry, and it is released before return.
1615 */
1616static void
1617rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
1618                  struct rcu_node *rnp, unsigned long flags)
1619        __releases(rnp->lock)
1620{
1621        struct rcu_node *rnp_c;
1622
1623        /* Walk up the rcu_node hierarchy. */
1624        for (;;) {
1625                if (!(rnp->qsmask & mask)) {
1626
1627                        /* Our bit has already been cleared, so done. */
1628                        raw_spin_unlock_irqrestore(&rnp->lock, flags);
1629                        return;
1630                }
1631                rnp->qsmask &= ~mask;
1632                trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
1633                                                 mask, rnp->qsmask, rnp->level,
1634                                                 rnp->grplo, rnp->grphi,
1635                                                 !!rnp->gp_tasks);
1636                if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
1637
1638                        /* Other bits still set at this level, so done. */
1639                        raw_spin_unlock_irqrestore(&rnp->lock, flags);
1640                        return;
1641                }
1642                mask = rnp->grpmask;
1643                if (rnp->parent == NULL) {
1644
1645                        /* No more levels.  Exit loop holding root lock. */
1646
1647                        break;
1648                }
1649                raw_spin_unlock_irqrestore(&rnp->lock, flags);
1650                rnp_c = rnp;
1651                rnp = rnp->parent;
1652                raw_spin_lock_irqsave(&rnp->lock, flags);
1653                WARN_ON_ONCE(rnp_c->qsmask);
1654        }
1655
1656        /*
1657         * Get here if we are the last CPU to pass through a quiescent
1658         * state for this grace period.  Invoke rcu_report_qs_rsp()
1659         * to clean up and start the next grace period if one is needed.
1660         */
1661        rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
1662}
1663
1664/*
1665 * Record a quiescent state for the specified CPU to that CPU's rcu_data
1666 * structure.  This must be either called from the specified CPU, or
1667 * called when the specified CPU is known to be offline (and when it is
1668 * also known that no other CPU is concurrently trying to help the offline
1669 * CPU).  The lastcomp argument is used to make sure we are still in the
1670 * grace period of interest.  We don't want to end the current grace period
1671 * based on quiescent states detected in an earlier grace period!
1672 */
1673static void
1674rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
1675{
1676        unsigned long flags;
1677        unsigned long mask;
1678        struct rcu_node *rnp;
1679
1680        rnp = rdp->mynode;
1681        raw_spin_lock_irqsave(&rnp->lock, flags);
1682        if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum ||
1683            rnp->completed == rnp->gpnum) {
1684
1685                /*
1686                 * The grace period in which this quiescent state was
1687                 * recorded has ended, so don't report it upwards.
1688                 * We will instead need a new quiescent state that lies
1689                 * within the current grace period.
1690                 */
1691                rdp->passed_quiesce = 0;        /* need qs for new gp. */
1692                raw_spin_unlock_irqrestore(&rnp->lock, flags);
1693                return;
1694        }
1695        mask = rdp->grpmask;
1696        if ((rnp->qsmask & mask) == 0) {
1697                raw_spin_unlock_irqrestore(&rnp->lock, flags);
1698        } else {
1699                rdp->qs_pending = 0;
1700
1701                /*
1702                 * This GP can't end until cpu checks in, so all of our
1703                 * callbacks can be processed during the next GP.
1704                 */
1705                rcu_accelerate_cbs(rsp, rnp, rdp);
1706
1707                rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
1708        }
1709}
1710
1711/*
1712 * Check to see if there is a new grace period of which this CPU
1713 * is not yet aware, and if so, set up local rcu_data state for it.
1714 * Otherwise, see if this CPU has just passed through its first
1715 * quiescent state for this grace period, and record that fact if so.
1716 */
1717static void
1718rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
1719{
1720        /* Check for grace-period ends and beginnings. */
1721        note_gp_changes(rsp, rdp);
1722
1723        /*
1724         * Does this CPU still need to do its part for current grace period?
1725         * If no, return and let the other CPUs do their part as well.
1726         */
1727        if (!rdp->qs_pending)
1728                return;
1729
1730        /*
1731         * Was there a quiescent state since the beginning of the grace
1732         * period? If no, then exit and wait for the next call.
1733         */
1734        if (!rdp->passed_quiesce)
1735                return;
1736
1737        /*
1738         * Tell RCU we are done (but rcu_report_qs_rdp() will be the
1739         * judge of that).
1740         */
1741        rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
1742}
1743
1744#ifdef CONFIG_HOTPLUG_CPU
1745
1746/*
1747 * Send the specified CPU's RCU callbacks to the orphanage.  The
1748 * specified CPU must be offline, and the caller must hold the
1749 * ->orphan_lock.
1750 */
1751static void
1752rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
1753                          struct rcu_node *rnp, struct rcu_data *rdp)
1754{
1755        /* No-CBs CPUs do not have orphanable callbacks. */
1756        if (rcu_is_nocb_cpu(rdp->cpu))
1757                return;
1758
1759        /*
1760         * Orphan the callbacks.  First adjust the counts.  This is safe
1761         * because _rcu_barrier() excludes CPU-hotplug operations, so it
1762         * cannot be running now.  Thus no memory barrier is required.
1763         */
1764        if (rdp->nxtlist != NULL) {
1765                rsp->qlen_lazy += rdp->qlen_lazy;
1766                rsp->qlen += rdp->qlen;
1767                rdp->n_cbs_orphaned += rdp->qlen;
1768                rdp->qlen_lazy = 0;
1769                ACCESS_ONCE(rdp->qlen) = 0;
1770        }
1771
1772        /*
1773         * Next, move those callbacks still needing a grace period to
1774         * the orphanage, where some other CPU will pick them up.
1775         * Some of the callbacks might have gone partway through a grace
1776         * period, but that is too bad.  They get to start over because we
1777         * cannot assume that grace periods are synchronized across CPUs.
1778         * We don't bother updating the ->nxttail[] array yet, instead
1779         * we just reset the whole thing later on.
1780         */
1781        if (*rdp->nxttail[RCU_DONE_TAIL] != NULL) {
1782                *rsp->orphan_nxttail = *rdp->nxttail[RCU_DONE_TAIL];
1783                rsp->orphan_nxttail = rdp->nxttail[RCU_NEXT_TAIL];
1784                *rdp->nxttail[RCU_DONE_TAIL] = NULL;
1785        }
1786
1787        /*
1788         * Then move the ready-to-invoke callbacks to the orphanage,
1789         * where some other CPU will pick them up.  These will not be
1790         * required to pass though another grace period: They are done.
1791         */
1792        if (rdp->nxtlist != NULL) {
1793                *rsp->orphan_donetail = rdp->nxtlist;
1794                rsp->orphan_donetail = rdp->nxttail[RCU_DONE_TAIL];
1795        }
1796
1797        /* Finally, initialize the rcu_data structure's list to empty.  */
1798        init_callback_list(rdp);
1799}
1800
1801/*
1802 * Adopt the RCU callbacks from the specified rcu_state structure's
1803 * orphanage.  The caller must hold the ->orphan_lock.
1804 */
1805static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
1806{
1807        int i;
1808        struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
1809
1810        /* No-CBs CPUs are handled specially. */
1811        if (rcu_nocb_adopt_orphan_cbs(rsp, rdp))
1812                return;
1813
1814        /* Do the accounting first. */
1815        rdp->qlen_lazy += rsp->qlen_lazy;
1816        rdp->qlen += rsp->qlen;
1817        rdp->n_cbs_adopted += rsp->qlen;
1818        if (rsp->qlen_lazy != rsp->qlen)
1819                rcu_idle_count_callbacks_posted();
1820        rsp->qlen_lazy = 0;
1821        rsp->qlen = 0;
1822
1823        /*
1824         * We do not need a memory barrier here because the only way we
1825         * can get here if there is an rcu_barrier() in flight is if
1826         * we are the task doing the rcu_barrier().
1827         */
1828
1829        /* First adopt the ready-to-invoke callbacks. */
1830        if (rsp->orphan_donelist != NULL) {
1831                *rsp->orphan_donetail = *rdp->nxttail[RCU_DONE_TAIL];
1832                *rdp->nxttail[RCU_DONE_TAIL] = rsp->orphan_donelist;
1833                for (i = RCU_NEXT_SIZE - 1; i >= RCU_DONE_TAIL; i--)
1834                        if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
1835                                rdp->nxttail[i] = rsp->orphan_donetail;
1836                rsp->orphan_donelist = NULL;
1837                rsp->orphan_donetail = &rsp->orphan_donelist;
1838        }
1839
1840        /* And then adopt the callbacks that still need a grace period. */
1841        if (rsp->orphan_nxtlist != NULL) {
1842                *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxtlist;
1843                rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxttail;
1844                rsp->orphan_nxtlist = NULL;
1845                rsp->orphan_nxttail = &rsp->orphan_nxtlist;
1846        }
1847}
1848
1849/*
1850 * Trace the fact that this CPU is going offline.
1851 */
1852static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
1853{
1854        RCU_TRACE(unsigned long mask);
1855        RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda));
1856        RCU_TRACE(struct rcu_node *rnp = rdp->mynode);
1857
1858        RCU_TRACE(mask = rdp->grpmask);
1859        trace_rcu_grace_period(rsp->name,
1860                               rnp->gpnum + 1 - !!(rnp->qsmask & mask),
1861                               TPS("cpuofl"));
1862}
1863
1864/*
1865 * The CPU has been completely removed, and some other CPU is reporting
1866 * this fact from process context.  Do the remainder of the cleanup,
1867 * including orphaning the outgoing CPU's RCU callbacks, and also
1868 * adopting them.  There can only be one CPU hotplug operation at a time,
1869 * so no other CPU can be attempting to update rcu_cpu_kthread_task.
1870 */
1871static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
1872{
1873        unsigned long flags;
1874        unsigned long mask;
1875        int need_report = 0;
1876        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1877        struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
1878
1879        /* Adjust any no-longer-needed kthreads. */
1880        rcu_boost_kthread_setaffinity(rnp, -1);
1881
1882        /* Remove the dead CPU from the bitmasks in the rcu_node hierarchy. */
1883
1884        /* Exclude any attempts to start a new grace period. */
1885        mutex_lock(&rsp->onoff_mutex);
1886        raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
1887
1888        /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
1889        rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
1890        rcu_adopt_orphan_cbs(rsp);
1891
1892        /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
1893        mask = rdp->grpmask;    /* rnp->grplo is constant. */
1894        do {
1895                raw_spin_lock(&rnp->lock);      /* irqs already disabled. */
1896                rnp->qsmaskinit &= ~mask;
1897                if (rnp->qsmaskinit != 0) {
1898                        if (rnp != rdp->mynode)
1899                                raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
1900                        break;
1901                }
1902                if (rnp == rdp->mynode)
1903                        need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
1904                else
1905                        raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
1906                mask = rnp->grpmask;
1907                rnp = rnp->parent;
1908        } while (rnp != NULL);
1909
1910        /*
1911         * We still hold the leaf rcu_node structure lock here, and
1912         * irqs are still disabled.  The reason for this subterfuge is
1913         * because invoking rcu_report_unblock_qs_rnp() with ->orphan_lock
1914         * held leads to deadlock.
1915         */
1916        raw_spin_unlock(&rsp->orphan_lock); /* irqs remain disabled. */
1917        rnp = rdp->mynode;
1918        if (need_report & RCU_OFL_TASKS_NORM_GP)
1919                rcu_report_unblock_qs_rnp(rnp, flags);
1920        else
1921                raw_spin_unlock_irqrestore(&rnp->lock, flags);
1922        if (need_report & RCU_OFL_TASKS_EXP_GP)
1923                rcu_report_exp_rnp(rsp, rnp, true);
1924        WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
1925                  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
1926                  cpu, rdp->qlen, rdp->nxtlist);
1927        init_callback_list(rdp);
1928        /* Disallow further callbacks on this CPU. */
1929        rdp->nxttail[RCU_NEXT_TAIL] = NULL;
1930        mutex_unlock(&rsp->onoff_mutex);
1931}
1932
1933#else /* #ifdef CONFIG_HOTPLUG_CPU */
1934
1935static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
1936{
1937}
1938
1939static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
1940{
1941}
1942
1943#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
1944
1945/*
1946 * Invoke any RCU callbacks that have made it to the end of their grace
1947 * period.  Thottle as specified by rdp->blimit.
1948 */
1949static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1950{
1951        unsigned long flags;
1952        struct rcu_head *next, *list, **tail;
1953        long bl, count, count_lazy;
1954        int i;
1955
1956        /* If no callbacks are ready, just return. */
1957        if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
1958                trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0);
1959                trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist),
1960                                    need_resched(), is_idle_task(current),
1961                                    rcu_is_callbacks_kthread());
1962                return;
1963        }
1964
1965        /*
1966         * Extract the list of ready callbacks, disabling to prevent
1967         * races with call_rcu() from interrupt handlers.
1968         */
1969        local_irq_save(flags);
1970        WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
1971        bl = rdp->blimit;
1972        trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, bl);
1973        list = rdp->nxtlist;
1974        rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
1975        *rdp->nxttail[RCU_DONE_TAIL] = NULL;
1976        tail = rdp->nxttail[RCU_DONE_TAIL];
1977        for (i = RCU_NEXT_SIZE - 1; i >= 0; i--)
1978                if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
1979                        rdp->nxttail[i] = &rdp->nxtlist;
1980        local_irq_restore(flags);
1981
1982        /* Invoke callbacks. */
1983        count = count_lazy = 0;
1984        while (list) {
1985                next = list->next;
1986                prefetch(next);
1987                debug_rcu_head_unqueue(list);
1988                if (__rcu_reclaim(rsp->name, list))
1989                        count_lazy++;
1990                list = next;
1991                /* Stop only if limit reached and CPU has something to do. */
1992                if (++count >= bl &&
1993                    (need_resched() ||
1994                     (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
1995                        break;
1996        }
1997
1998        local_irq_save(flags);
1999        trace_rcu_batch_end(rsp->name, count, !!list, need_resched(),
2000                            is_idle_task(current),
2001                            rcu_is_callbacks_kthread());
2002
2003        /* Update count, and requeue any remaining callbacks. */
2004        if (list != NULL) {
2005                *tail = rdp->nxtlist;
2006                rdp->nxtlist = list;
2007                for (i = 0; i < RCU_NEXT_SIZE; i++)
2008                        if (&rdp->nxtlist == rdp->nxttail[i])
2009                                rdp->nxttail[i] = tail;
2010                        else
2011                                break;
2012        }
2013        smp_mb(); /* List handling before counting for rcu_barrier(). */
2014        rdp->qlen_lazy -= count_lazy;
2015        ACCESS_ONCE(rdp->qlen) -= count;
2016        rdp->n_cbs_invoked += count;
2017
2018        /* Reinstate batch limit if we have worked down the excess. */
2019        if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
2020                rdp->blimit = blimit;
2021
2022        /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2023        if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
2024                rdp->qlen_last_fqs_check = 0;
2025                rdp->n_force_qs_snap = rsp->n_force_qs;
2026        } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
2027                rdp->qlen_last_fqs_check = rdp->qlen;
2028        WARN_ON_ONCE((rdp->nxtlist == NULL) != (rdp->qlen == 0));
2029
2030        local_irq_restore(flags);
2031
2032        /* Re-invoke RCU core processing if there are callbacks remaining. */
2033        if (cpu_has_callbacks_ready_to_invoke(rdp))
2034                invoke_rcu_core();
2035}
2036
2037/*
2038 * Check to see if this CPU is in a non-context-switch quiescent state
2039 * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
2040 * Also schedule RCU core processing.
2041 *
2042 * This function must be called from hardirq context.  It is normally
2043 * invoked from the scheduling-clock interrupt.  If rcu_pending returns
2044 * false, there is no point in invoking rcu_check_callbacks().
2045 */
2046void rcu_check_callbacks(int cpu, int user)
2047{
2048        trace_rcu_utilization(TPS("Start scheduler-tick"));
2049        increment_cpu_stall_ticks();
2050        if (user || rcu_is_cpu_rrupt_from_idle()) {
2051
2052                /*
2053                 * Get here if this CPU took its interrupt from user
2054                 * mode or from the idle loop, and if this is not a
2055                 * nested interrupt.  In this case, the CPU is in
2056                 * a quiescent state, so note it.
2057                 *
2058                 * No memory barrier is required here because both
2059                 * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
2060                 * variables that other CPUs neither access nor modify,
2061                 * at least not while the corresponding CPU is online.
2062                 */
2063
2064                rcu_sched_qs(cpu);
2065                rcu_bh_qs(cpu);
2066
2067        } else if (!in_softirq()) {
2068
2069                /*
2070                 * Get here if this CPU did not take its interrupt from
2071                 * softirq, in other words, if it is not interrupting
2072                 * a rcu_bh read-side critical section.  This is an _bh
2073                 * critical section, so note it.
2074                 */
2075
2076                rcu_bh_qs(cpu);
2077        }
2078        rcu_preempt_check_callbacks(cpu);
2079        if (rcu_pending(cpu))
2080                invoke_rcu_core();
2081        trace_rcu_utilization(TPS("End scheduler-tick"));
2082}
2083
2084/*
2085 * Scan the leaf rcu_node structures, processing dyntick state for any that
2086 * have not yet encountered a quiescent state, using the function specified.
2087 * Also initiate boosting for any threads blocked on the root rcu_node.
2088 *
2089 * The caller must have suppressed start of new grace periods.
2090 */
2091static void force_qs_rnp(struct rcu_state *rsp,
2092                         int (*f)(struct rcu_data *rsp, bool *isidle,
2093                                  unsigned long *maxj),
2094                         bool *isidle, unsigned long *maxj)
2095{
2096        unsigned long bit;
2097        int cpu;
2098        unsigned long flags;
2099        unsigned long mask;
2100        struct rcu_node *rnp;
2101
2102        rcu_for_each_leaf_node(rsp, rnp) {
2103                cond_resched();
2104                mask = 0;
2105                raw_spin_lock_irqsave(&rnp->lock, flags);
2106                if (!rcu_gp_in_progress(rsp)) {
2107                        raw_spin_unlock_irqrestore(&rnp->lock, flags);
2108                        return;
2109                }
2110                if (rnp->qsmask == 0) {
2111                        rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
2112                        continue;
2113                }
2114                cpu = rnp->grplo;
2115                bit = 1;
2116                for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
2117                        if ((rnp->qsmask & bit) != 0) {
2118                                if ((rnp->qsmaskinit & bit) != 0)
2119                                        *isidle = 0;
2120                                if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj))
2121                                        mask |= bit;
2122                        }
2123                }
2124                if (mask != 0) {
2125
2126                        /* rcu_report_qs_rnp() releases rnp->lock. */
2127                        rcu_report_qs_rnp(mask, rsp, rnp, flags);
2128                        continue;
2129                }
2130                raw_spin_unlock_irqrestore(&rnp->lock, flags);
2131        }
2132        rnp = rcu_get_root(rsp);
2133        if (rnp->qsmask == 0) {
2134                raw_spin_lock_irqsave(&rnp->lock, flags);
2135                rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
2136        }
2137}
2138
2139/*
2140 * Force quiescent states on reluctant CPUs, and also detect which
2141 * CPUs are in dyntick-idle mode.
2142 */
2143static void force_quiescent_state(struct rcu_state *rsp)
2144{
2145        unsigned long flags;
2146        bool ret;
2147        struct rcu_node *rnp;
2148        struct rcu_node *rnp_old = NULL;
2149
2150        /* Funnel through hierarchy to reduce memory contention. */
2151        rnp = per_cpu_ptr(rsp->rda, raw_smp_processor_id())->mynode;
2152        for (; rnp != NULL; rnp = rnp->parent) {
2153                ret = (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
2154                      !raw_spin_trylock(&rnp->fqslock);
2155                if (rnp_old != NULL)
2156                        raw_spin_unlock(&rnp_old->fqslock);
2157                if (ret) {
2158                        rsp->n_force_qs_lh++;
2159                        return;
2160                }
2161                rnp_old = rnp;
2162        }
2163        /* rnp_old == rcu_get_root(rsp), rnp == NULL. */
2164
2165        /* Reached the root of the rcu_node tree, acquire lock. */
2166        raw_spin_lock_irqsave(&rnp_old->lock, flags);
2167        raw_spin_unlock(&rnp_old->fqslock);
2168        if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2169                rsp->n_force_qs_lh++;
2170                raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
2171                return;  /* Someone beat us to it. */
2172        }
2173        rsp->gp_flags |= RCU_GP_FLAG_FQS;
2174        raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
2175        wake_up(&rsp->gp_wq);  /* Memory barrier implied by wake_up() path. */
2176}
2177
2178/*
2179 * This does the RCU core processing work for the specified rcu_state
2180 * and rcu_data structures.  This may be called only from the CPU to
2181 * whom the rdp belongs.
2182 */
2183static void
2184__rcu_process_callbacks(struct rcu_state *rsp)
2185{
2186        unsigned long flags;
2187        struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
2188
2189        WARN_ON_ONCE(rdp->beenonline == 0);
2190
2191        /* Update RCU state based on any recent quiescent states. */
2192        rcu_check_quiescent_state(rsp, rdp);
2193
2194        /* Does this CPU require a not-yet-started grace period? */
2195        local_irq_save(flags);
2196        if (cpu_needs_another_gp(rsp, rdp)) {
2197                raw_spin_lock(&rcu_get_root(rsp)->lock); /* irqs disabled. */
2198                rcu_start_gp(rsp);
2199                raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
2200        } else {
2201                local_irq_restore(flags);
2202        }
2203
2204        /* If there are callbacks ready, invoke them. */
2205        if (cpu_has_callbacks_ready_to_invoke(rdp))
2206                invoke_rcu_callbacks(rsp, rdp);
2207}
2208
2209/*
2210 * Do RCU core processing for the current CPU.
2211 */
2212static void rcu_process_callbacks(struct softirq_action *unused)
2213{
2214        struct rcu_state *rsp;
2215
2216        if (cpu_is_offline(smp_processor_id()))
2217                return;
2218        trace_rcu_utilization(TPS("Start RCU core"));
2219        for_each_rcu_flavor(rsp)
2220                __rcu_process_callbacks(rsp);
2221        trace_rcu_utilization(TPS("End RCU core"));
2222}
2223
2224/*
2225 * Schedule RCU callback invocation.  If the specified type of RCU
2226 * does not support RCU priority boosting, just do a direct call,
2227 * otherwise wake up the per-CPU kernel kthread.  Note that because we
2228 * are running on the current CPU with interrupts disabled, the
2229 * rcu_cpu_kthread_task cannot disappear out from under us.
2230 */
2231static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
2232{
2233        if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
2234                return;
2235        if (likely(!rsp->boost)) {
2236                rcu_do_batch(rsp, rdp);
2237                return;
2238        }
2239        invoke_rcu_callbacks_kthread();
2240}
2241
2242static void invoke_rcu_core(void)
2243{
2244        if (cpu_online(smp_processor_id()))
2245                raise_softirq(RCU_SOFTIRQ);
2246}
2247
2248/*
2249 * Handle any core-RCU processing required by a call_rcu() invocation.
2250 */
2251static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
2252                            struct rcu_head *head, unsigned long flags)
2253{
2254        /*
2255         * If called from an extended quiescent state, invoke the RCU
2256         * core in order to force a re-evaluation of RCU's idleness.
2257         */
2258        if (rcu_is_cpu_idle() && cpu_online(smp_processor_id()))
2259                invoke_rcu_core();
2260
2261        /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2262        if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2263                return;
2264
2265        /*
2266         * Force the grace period if too many callbacks or too long waiting.
2267         * Enforce hysteresis, and don't invoke force_quiescent_state()
2268         * if some other CPU has recently done so.  Also, don't bother
2269         * invoking force_quiescent_state() if the newly enqueued callback
2270         * is the only one waiting for a grace period to complete.
2271         */
2272        if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
2273
2274                /* Are we ignoring a completed grace period? */
2275                note_gp_changes(rsp, rdp);
2276
2277                /* Start a new grace period if one not already started. */
2278                if (!rcu_gp_in_progress(rsp)) {
2279                        struct rcu_node *rnp_root = rcu_get_root(rsp);
2280
2281                        raw_spin_lock(&rnp_root->lock);
2282                        rcu_start_gp(rsp);
2283                        raw_spin_unlock(&rnp_root->lock);
2284                } else {
2285                        /* Give the grace period a kick. */
2286                        rdp->blimit = LONG_MAX;
2287                        if (rsp->n_force_qs == rdp->n_force_qs_snap &&
2288                            *rdp->nxttail[RCU_DONE_TAIL] != head)
2289                                force_quiescent_state(rsp);
2290                        rdp->n_force_qs_snap = rsp->n_force_qs;
2291                        rdp->qlen_last_fqs_check = rdp->qlen;
2292                }
2293        }
2294}
2295
2296/*
2297 * RCU callback function to leak a callback.
2298 */
2299static void rcu_leak_callback(struct rcu_head *rhp)
2300{
2301}
2302
2303/*
2304 * Helper function for call_rcu() and friends.  The cpu argument will
2305 * normally be -1, indicating "currently running CPU".  It may specify
2306 * a CPU only if that CPU is a no-CBs CPU.  Currently, only _rcu_barrier()
2307 * is expected to specify a CPU.
2308 */
2309static void
2310__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
2311           struct rcu_state *rsp, int cpu, bool lazy)
2312{
2313        unsigned long flags;
2314        struct rcu_data *rdp;
2315
2316        WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
2317        if (debug_rcu_head_queue(head)) {
2318                /* Probable double call_rcu(), so leak the callback. */
2319                ACCESS_ONCE(head->func) = rcu_leak_callback;
2320                WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
2321                return;
2322        }
2323        head->func = func;
2324        head->next = NULL;
2325
2326        /*
2327         * Opportunistically note grace-period endings and beginnings.
2328         * Note that we might see a beginning right after we see an
2329         * end, but never vice versa, since this CPU has to pass through
2330         * a quiescent state betweentimes.
2331         */
2332        local_irq_save(flags);
2333        rdp = this_cpu_ptr(rsp->rda);
2334
2335        /* Add the callback to our list. */
2336        if (unlikely(rdp->nxttail[RCU_NEXT_TAIL] == NULL) || cpu != -1) {
2337                int offline;
2338
2339                if (cpu != -1)
2340                        rdp = per_cpu_ptr(rsp->rda, cpu);
2341                offline = !__call_rcu_nocb(rdp, head, lazy);
2342                WARN_ON_ONCE(offline);
2343                /* _call_rcu() is illegal on offline CPU; leak the callback. */
2344                local_irq_restore(flags);
2345                return;
2346        }
2347        ACCESS_ONCE(rdp->qlen)++;
2348        if (lazy)
2349                rdp->qlen_lazy++;
2350        else
2351                rcu_idle_count_callbacks_posted();
2352        smp_mb();  /* Count before adding callback for rcu_barrier(). */
2353        *rdp->nxttail[RCU_NEXT_TAIL] = head;
2354        rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
2355
2356        if (__is_kfree_rcu_offset((unsigned long)func))
2357                trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
2358                                         rdp->qlen_lazy, rdp->qlen);
2359        else
2360                trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
2361
2362        /* Go handle any RCU core processing required. */
2363        __call_rcu_core(rsp, rdp, head, flags);
2364        local_irq_restore(flags);
2365}
2366
2367/*
2368 * Queue an RCU-sched callback for invocation after a grace period.
2369 */
2370void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
2371{
2372        __call_rcu(head, func, &rcu_sched_state, -1, 0);
2373}
2374EXPORT_SYMBOL_GPL(call_rcu_sched);
2375
2376/*
2377 * Queue an RCU callback for invocation after a quicker grace period.
2378 */
2379void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
2380{
2381        __call_rcu(head, func, &rcu_bh_state, -1, 0);
2382}
2383EXPORT_SYMBOL_GPL(call_rcu_bh);
2384
2385/*
2386 * Because a context switch is a grace period for RCU-sched and RCU-bh,
2387 * any blocking grace-period wait automatically implies a grace period
2388 * if there is only one CPU online at any point time during execution
2389 * of either synchronize_sched() or synchronize_rcu_bh().  It is OK to
2390 * occasionally incorrectly indicate that there are multiple CPUs online
2391 * when there was in fact only one the whole time, as this just adds
2392 * some overhead: RCU still operates correctly.
2393 */
2394static inline int rcu_blocking_is_gp(void)
2395{
2396        int ret;
2397
2398        might_sleep();  /* Check for RCU read-side critical section. */
2399        preempt_disable();
2400        ret = num_online_cpus() <= 1;
2401        preempt_enable();
2402        return ret;
2403}
2404
2405/**
2406 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
2407 *
2408 * Control will return to the caller some time after a full rcu-sched
2409 * grace period has elapsed, in other words after all currently executing
2410 * rcu-sched read-side critical sections have completed.   These read-side
2411 * critical sections are delimited by rcu_read_lock_sched() and
2412 * rcu_read_unlock_sched(), and may be nested.  Note that preempt_disable(),
2413 * local_irq_disable(), and so on may be used in place of
2414 * rcu_read_lock_sched().
2415 *
2416 * This means that all preempt_disable code sequences, including NMI and
2417 * non-threaded hardware-interrupt handlers, in progress on entry will
2418 * have completed before this primitive returns.  However, this does not
2419 * guarantee that softirq handlers will have completed, since in some
2420 * kernels, these handlers can run in process context, and can block.
2421 *
2422 * Note that this guarantee implies further memory-ordering guarantees.
2423 * On systems with more than one CPU, when synchronize_sched() returns,
2424 * each CPU is guaranteed to have executed a full memory barrier since the
2425 * end of its last RCU-sched read-side critical section whose beginning
2426 * preceded the call to synchronize_sched().  In addition, each CPU having
2427 * an RCU read-side critical section that extends beyond the return from
2428 * synchronize_sched() is guaranteed to have executed a full memory barrier
2429 * after the beginning of synchronize_sched() and before the beginning of
2430 * that RCU read-side critical section.  Note that these guarantees include
2431 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
2432 * that are executing in the kernel.
2433 *
2434 * Furthermore, if CPU A invoked synchronize_sched(), which returned
2435 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
2436 * to have executed a full memory barrier during the execution of
2437 * synchronize_sched() -- even if CPU A and CPU B are the same CPU (but
2438 * again only if the system has more than one CPU).
2439 *
2440 * This primitive provides the guarantees made by the (now removed)
2441 * synchronize_kernel() API.  In contrast, synchronize_rcu() only
2442 * guarantees that rcu_read_lock() sections will have completed.
2443 * In "classic RCU", these two guarantees happen to be one and
2444 * the same, but can differ in realtime RCU implementations.
2445 */
2446void synchronize_sched(void)
2447{
2448        rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
2449                           !lock_is_held(&rcu_lock_map) &&
2450                           !lock_is_held(&rcu_sched_lock_map),
2451                           "Illegal synchronize_sched() in RCU-sched read-side critical section");
2452        if (rcu_blocking_is_gp())
2453                return;
2454        if (rcu_expedited)
2455                synchronize_sched_expedited();
2456        else
2457                wait_rcu_gp(call_rcu_sched);
2458}
2459EXPORT_SYMBOL_GPL(synchronize_sched);
2460
2461/**
2462 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
2463 *
2464 * Control will return to the caller some time after a full rcu_bh grace
2465 * period has elapsed, in other words after all currently executing rcu_bh
2466 * read-side critical sections have completed.  RCU read-side critical
2467 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
2468 * and may be nested.
2469 *
2470 * See the description of synchronize_sched() for more detailed information
2471 * on memory ordering guarantees.
2472 */
2473void synchronize_rcu_bh(void)
2474{
2475        rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
2476                           !lock_is_held(&rcu_lock_map) &&
2477                           !lock_is_held(&rcu_sched_lock_map),
2478                           "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
2479        if (rcu_blocking_is_gp())
2480                return;
2481        if (rcu_expedited)
2482                synchronize_rcu_bh_expedited();
2483        else
2484                wait_rcu_gp(call_rcu_bh);
2485}
2486EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
2487
2488static int synchronize_sched_expedited_cpu_stop(void *data)
2489{
2490        /*
2491         * There must be a full memory barrier on each affected CPU
2492         * between the time that try_stop_cpus() is called and the
2493         * time that it returns.
2494         *
2495         * In the current initial implementation of cpu_stop, the
2496         * above condition is already met when the control reaches
2497         * this point and the following smp_mb() is not strictly
2498         * necessary.  Do smp_mb() anyway for documentation and
2499         * robustness against future implementation changes.
2500         */
2501        smp_mb(); /* See above comment block. */
2502        return 0;
2503}
2504
2505/**
2506 * synchronize_sched_expedited - Brute-force RCU-sched grace period
2507 *
2508 * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
2509 * approach to force the grace period to end quickly.  This consumes
2510 * significant time on all CPUs and is unfriendly to real-time workloads,
2511 * so is thus not recommended for any sort of common-case code.  In fact,
2512 * if you are using synchronize_sched_expedited() in a loop, please
2513 * restructure your code to batch your updates, and then use a single
2514 * synchronize_sched() instead.
2515 *
2516 * Note that it is illegal to call this function while holding any lock
2517 * that is acquired by a CPU-hotplug notifier.  And yes, it is also illegal
2518 * to call this function from a CPU-hotplug notifier.  Failing to observe
2519 * these restriction will result in deadlock.
2520 *
2521 * This implementation can be thought of as an application of ticket
2522 * locking to RCU, with sync_sched_expedited_started and
2523 * sync_sched_expedited_done taking on the roles of the halves
2524 * of the ticket-lock word.  Each task atomically increments
2525 * sync_sched_expedited_started upon entry, snapshotting the old value,
2526 * then attempts to stop all the CPUs.  If this succeeds, then each
2527 * CPU will have executed a context switch, resulting in an RCU-sched
2528 * grace period.  We are then done, so we use atomic_cmpxchg() to
2529 * update sync_sched_expedited_done to match our snapshot -- but
2530 * only if someone else has not already advanced past our snapshot.
2531 *
2532 * On the other hand, if try_stop_cpus() fails, we check the value
2533 * of sync_sched_expedited_done.  If it has advanced past our
2534 * initial snapshot, then someone else must have forced a grace period
2535 * some time after we took our snapshot.  In this case, our work is
2536 * done for us, and we can simply return.  Otherwise, we try again,
2537 * but keep our initial snapshot for purposes of checking for someone
2538 * doing our work for us.
2539 *
2540 * If we fail too many times in a row, we fall back to synchronize_sched().
2541 */
2542void synchronize_sched_expedited(void)
2543{
2544        long firstsnap, s, snap;
2545        int trycount = 0;
2546        struct rcu_state *rsp = &rcu_sched_state;
2547
2548        /*
2549         * If we are in danger of counter wrap, just do synchronize_sched().
2550         * By allowing sync_sched_expedited_started to advance no more than
2551         * ULONG_MAX/8 ahead of sync_sched_expedited_done, we are ensuring
2552         * that more than 3.5 billion CPUs would be required to force a
2553         * counter wrap on a 32-bit system.  Quite a few more CPUs would of
2554         * course be required on a 64-bit system.
2555         */
2556        if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
2557                         (ulong)atomic_long_read(&rsp->expedited_done) +
2558                         ULONG_MAX / 8)) {
2559                synchronize_sched();
2560                atomic_long_inc(&rsp->expedited_wrap);
2561                return;
2562        }
2563
2564        /*
2565         * Take a ticket.  Note that atomic_inc_return() implies a
2566         * full memory barrier.
2567         */
2568        snap = atomic_long_inc_return(&rsp->expedited_start);
2569        firstsnap = snap;
2570        get_online_cpus();
2571        WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
2572
2573        /*
2574         * Each pass through the following loop attempts to force a
2575         * context switch on each CPU.
2576         */
2577        while (try_stop_cpus(cpu_online_mask,
2578                             synchronize_sched_expedited_cpu_stop,
2579                             NULL) == -EAGAIN) {
2580                put_online_cpus();
2581                atomic_long_inc(&rsp->expedited_tryfail);
2582
2583                /* Check to see if someone else did our work for us. */
2584                s = atomic_long_read(&rsp->expedited_done);
2585                if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
2586                        /* ensure test happens before caller kfree */
2587                        smp_mb__before_atomic_inc(); /* ^^^ */
2588                        atomic_long_inc(&rsp->expedited_workdone1);
2589                        return;
2590                }
2591
2592                /* No joy, try again later.  Or just synchronize_sched(). */
2593                if (trycount++ < 10) {
2594                        udelay(trycount * num_online_cpus());
2595                } else {
2596                        wait_rcu_gp(call_rcu_sched);
2597                        atomic_long_inc(&rsp->expedited_normal);
2598                        return;
2599                }
2600
2601                /* Recheck to see if someone else did our work for us. */
2602                s = atomic_long_read(&rsp->expedited_done);
2603                if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
2604                        /* ensure test happens before caller kfree */
2605                        smp_mb__before_atomic_inc(); /* ^^^ */
2606                        atomic_long_inc(&rsp->expedited_workdone2);
2607                        return;
2608                }
2609
2610                /*
2611                 * Refetching sync_sched_expedited_started allows later
2612                 * callers to piggyback on our grace period.  We retry
2613                 * after they started, so our grace period works for them,
2614                 * and they started after our first try, so their grace
2615                 * period works for us.
2616                 */
2617                get_online_cpus();
2618                snap = atomic_long_read(&rsp->expedited_start);
2619                smp_mb(); /* ensure read is before try_stop_cpus(). */
2620        }
2621        atomic_long_inc(&rsp->expedited_stoppedcpus);
2622
2623        /*
2624         * Everyone up to our most recent fetch is covered by our grace
2625         * period.  Update the counter, but only if our work is still
2626         * relevant -- which it won't be if someone who started later
2627         * than we did already did their update.
2628         */
2629        do {
2630                atomic_long_inc(&rsp->expedited_done_tries);
2631                s = atomic_long_read(&rsp->expedited_done);
2632                if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
2633                        /* ensure test happens before caller kfree */
2634                        smp_mb__before_atomic_inc(); /* ^^^ */
2635                        atomic_long_inc(&rsp->expedited_done_lost);
2636                        break;
2637                }
2638        } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
2639        atomic_long_inc(&rsp->expedited_done_exit);
2640
2641        put_online_cpus();
2642}
2643EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
2644
2645/*
2646 * Check to see if there is any immediate RCU-related work to be done
2647 * by the current CPU, for the specified type of RCU, returning 1 if so.
2648 * The checks are in order of increasing expense: checks that can be
2649 * carried out against CPU-local state are performed first.  However,
2650 * we must check for CPU stalls first, else we might not get a chance.
2651 */
2652static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
2653{
2654        struct rcu_node *rnp = rdp->mynode;
2655
2656        rdp->n_rcu_pending++;
2657
2658        /* Check for CPU stalls, if enabled. */
2659        check_cpu_stall(rsp, rdp);
2660
2661        /* Is the RCU core waiting for a quiescent state from this CPU? */
2662        if (rcu_scheduler_fully_active &&
2663            rdp->qs_pending && !rdp->passed_quiesce) {
2664                rdp->n_rp_qs_pending++;
2665        } else if (rdp->qs_pending && rdp->passed_quiesce) {
2666                rdp->n_rp_report_qs++;
2667                return 1;
2668        }
2669
2670        /* Does this CPU have callbacks ready to invoke? */
2671        if (cpu_has_callbacks_ready_to_invoke(rdp)) {
2672                rdp->n_rp_cb_ready++;
2673                return 1;
2674        }
2675
2676        /* Has RCU gone idle with this CPU needing another grace period? */
2677        if (cpu_needs_another_gp(rsp, rdp)) {
2678                rdp->n_rp_cpu_needs_gp++;
2679                return 1;
2680        }
2681
2682        /* Has another RCU grace period completed?  */
2683        if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
2684                rdp->n_rp_gp_completed++;
2685                return 1;
2686        }
2687
2688        /* Has a new RCU grace period started? */
2689        if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */
2690                rdp->n_rp_gp_started++;
2691                return 1;
2692        }
2693
2694        /* nothing to do */
2695        rdp->n_rp_need_nothing++;
2696        return 0;
2697}
2698
2699/*
2700 * Check to see if there is any immediate RCU-related work to be done
2701 * by the current CPU, returning 1 if so.  This function is part of the
2702 * RCU implementation; it is -not- an exported member of the RCU API.
2703 */
2704static int rcu_pending(int cpu)
2705{
2706        struct rcu_state *rsp;
2707
2708        for_each_rcu_flavor(rsp)
2709                if (__rcu_pending(rsp, per_cpu_ptr(rsp->rda, cpu)))
2710                        return 1;
2711        return 0;
2712}
2713
2714/*
2715 * Return true if the specified CPU has any callback.  If all_lazy is
2716 * non-NULL, store an indication of whether all callbacks are lazy.
2717 * (If there are no callbacks, all of them are deemed to be lazy.)
2718 */
2719static int rcu_cpu_has_callbacks(int cpu, bool *all_lazy)
2720{
2721        bool al = true;
2722        bool hc = false;
2723        struct rcu_data *rdp;
2724        struct rcu_state *rsp;
2725
2726        for_each_rcu_flavor(rsp) {
2727                rdp = per_cpu_ptr(rsp->rda, cpu);
2728                if (rdp->qlen != rdp->qlen_lazy)
2729                        al = false;
2730                if (rdp->nxtlist)
2731                        hc = true;
2732        }
2733        if (all_lazy)
2734                *all_lazy = al;
2735        return hc;
2736}
2737
2738/*
2739 * Helper function for _rcu_barrier() tracing.  If tracing is disabled,
2740 * the compiler is expected to optimize this away.
2741 */
2742static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s,
2743                               int cpu, unsigned long done)
2744{
2745        trace_rcu_barrier(rsp->name, s, cpu,
2746                          atomic_read(&rsp->barrier_cpu_count), done);
2747}
2748
2749/*
2750 * RCU callback function for _rcu_barrier().  If we are last, wake
2751 * up the task executing _rcu_barrier().
2752 */
2753static void rcu_barrier_callback(struct rcu_head *rhp)
2754{
2755        struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
2756        struct rcu_state *rsp = rdp->rsp;
2757
2758        if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
2759                _rcu_barrier_trace(rsp, "LastCB", -1, rsp->n_barrier_done);
2760                complete(&rsp->barrier_completion);
2761        } else {
2762                _rcu_barrier_trace(rsp, "CB", -1, rsp->n_barrier_done);
2763        }
2764}
2765
2766/*
2767 * Called with preemption disabled, and from cross-cpu IRQ context.
2768 */
2769static void rcu_barrier_func(void *type)
2770{
2771        struct rcu_state *rsp = type;
2772        struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
2773
2774        _rcu_barrier_trace(rsp, "IRQ", -1, rsp->n_barrier_done);
2775        atomic_inc(&rsp->barrier_cpu_count);
2776        rsp->call(&rdp->barrier_head, rcu_barrier_callback);
2777}
2778
2779/*
2780 * Orchestrate the specified type of RCU barrier, waiting for all
2781 * RCU callbacks of the specified type to complete.
2782 */
2783static void _rcu_barrier(struct rcu_state *rsp)
2784{
2785        int cpu;
2786        struct rcu_data *rdp;
2787        unsigned long snap = ACCESS_ONCE(rsp->n_barrier_done);
2788        unsigned long snap_done;
2789
2790        _rcu_barrier_trace(rsp, "Begin", -1, snap);
2791
2792        /* Take mutex to serialize concurrent rcu_barrier() requests. */
2793        mutex_lock(&rsp->barrier_mutex);
2794
2795        /*
2796         * Ensure that all prior references, including to ->n_barrier_done,
2797         * are ordered before the _rcu_barrier() machinery.
2798         */
2799        smp_mb();  /* See above block comment. */
2800
2801        /*
2802         * Recheck ->n_barrier_done to see if others did our work for us.
2803         * This means checking ->n_barrier_done for an even-to-odd-to-even
2804         * transition.  The "if" expression below therefore rounds the old
2805         * value up to the next even number and adds two before comparing.
2806         */
2807        snap_done = rsp->n_barrier_done;
2808        _rcu_barrier_trace(rsp, "Check", -1, snap_done);
2809
2810        /*
2811         * If the value in snap is odd, we needed to wait for the current
2812         * rcu_barrier() to complete, then wait for the next one, in other
2813         * words, we need the value of snap_done to be three larger than
2814         * the value of snap.  On the other hand, if the value in snap is
2815         * even, we only had to wait for the next rcu_barrier() to complete,
2816         * in other words, we need the value of snap_done to be only two
2817         * greater than the value of snap.  The "(snap + 3) & ~0x1" computes
2818         * this for us (thank you, Linus!).
2819         */
2820        if (ULONG_CMP_GE(snap_done, (snap + 3) & ~0x1)) {
2821                _rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done);
2822                smp_mb(); /* caller's subsequent code after above check. */
2823                mutex_unlock(&rsp->barrier_mutex);
2824                return;
2825        }
2826
2827        /*
2828         * Increment ->n_barrier_done to avoid duplicate work.  Use
2829         * ACCESS_ONCE() to prevent the compiler from speculating
2830         * the increment to precede the early-exit check.
2831         */
2832        ACCESS_ONCE(rsp->n_barrier_done)++;
2833        WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
2834        _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
2835        smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
2836
2837        /*
2838         * Initialize the count to one rather than to zero in order to
2839         * avoid a too-soon return to zero in case of a short grace period
2840         * (or preemption of this task).  Exclude CPU-hotplug operations
2841         * to ensure that no offline CPU has callbacks queued.
2842         */
2843        init_completion(&rsp->barrier_completion);
2844        atomic_set(&rsp->barrier_cpu_count, 1);
2845        get_online_cpus();
2846
2847        /*
2848         * Force each CPU with callbacks to register a new callback.
2849         * When that callback is invoked, we will know that all of the
2850         * corresponding CPU's preceding callbacks have been invoked.
2851         */
2852        for_each_possible_cpu(cpu) {
2853                if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
2854                        continue;
2855                rdp = per_cpu_ptr(rsp->rda, cpu);
2856                if (rcu_is_nocb_cpu(cpu)) {
2857                        _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
2858                                           rsp->n_barrier_done);
2859                        atomic_inc(&rsp->barrier_cpu_count);
2860                        __call_rcu(&rdp->barrier_head, rcu_barrier_callback,
2861                                   rsp, cpu, 0);
2862                } else if (ACCESS_ONCE(rdp->qlen)) {
2863                        _rcu_barrier_trace(rsp, "OnlineQ", cpu,
2864                                           rsp->n_barrier_done);
2865                        smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
2866                } else {
2867                        _rcu_barrier_trace(rsp, "OnlineNQ", cpu,
2868                                           rsp->n_barrier_done);
2869                }
2870        }
2871        put_online_cpus();
2872
2873        /*
2874         * Now that we have an rcu_barrier_callback() callback on each
2875         * CPU, and thus each counted, remove the initial count.
2876         */
2877        if (atomic_dec_and_test(&rsp->barrier_cpu_count))
2878                complete(&rsp->barrier_completion);
2879
2880        /* Increment ->n_barrier_done to prevent duplicate work. */
2881        smp_mb(); /* Keep increment after above mechanism. */
2882        ACCESS_ONCE(rsp->n_barrier_done)++;
2883        WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
2884        _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
2885        smp_mb(); /* Keep increment before caller's subsequent code. */
2886
2887        /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
2888        wait_for_completion(&rsp->barrier_completion);
2889
2890        /* Other rcu_barrier() invocations can now safely proceed. */
2891        mutex_unlock(&rsp->barrier_mutex);
2892}
2893
2894/**
2895 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
2896 */
2897void rcu_barrier_bh(void)
2898{
2899        _rcu_barrier(&rcu_bh_state);
2900}
2901EXPORT_SYMBOL_GPL(rcu_barrier_bh);
2902
2903/**
2904 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
2905 */
2906void rcu_barrier_sched(void)
2907{
2908        _rcu_barrier(&rcu_sched_state);
2909}
2910EXPORT_SYMBOL_GPL(rcu_barrier_sched);
2911
2912/*
2913 * Do boot-time initialization of a CPU's per-CPU RCU data.
2914 */
2915static void __init
2916rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
2917{
2918        unsigned long flags;
2919        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2920        struct rcu_node *rnp = rcu_get_root(rsp);
2921
2922        /* Set up local state, ensuring consistent view of global state. */
2923        raw_spin_lock_irqsave(&rnp->lock, flags);
2924        rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
2925        init_callback_list(rdp);
2926        rdp->qlen_lazy = 0;
2927        ACCESS_ONCE(rdp->qlen) = 0;
2928        rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
2929        WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
2930        WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
2931        rdp->cpu = cpu;
2932        rdp->rsp = rsp;
2933        rcu_boot_init_nocb_percpu_data(rdp);
2934        raw_spin_unlock_irqrestore(&rnp->lock, flags);
2935}
2936
2937/*
2938 * Initialize a CPU's per-CPU RCU data.  Note that only one online or
2939 * offline event can be happening at a given time.  Note also that we
2940 * can accept some slop in the rsp->completed access due to the fact
2941 * that this CPU cannot possibly have any RCU callbacks in flight yet.
2942 */
2943static void
2944rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
2945{
2946        unsigned long flags;
2947        unsigned long mask;
2948        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2949        struct rcu_node *rnp = rcu_get_root(rsp);
2950
2951        /* Exclude new grace periods. */
2952        mutex_lock(&rsp->onoff_mutex);
2953
2954        /* Set up local state, ensuring consistent view of global state. */
2955        raw_spin_lock_irqsave(&rnp->lock, flags);
2956        rdp->beenonline = 1;     /* We have now been online. */
2957        rdp->preemptible = preemptible;
2958        rdp->qlen_last_fqs_check = 0;
2959        rdp->n_force_qs_snap = rsp->n_force_qs;
2960        rdp->blimit = blimit;
2961        init_callback_list(rdp);  /* Re-enable callbacks on this CPU. */
2962        rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
2963        rcu_sysidle_init_percpu_data(rdp->dynticks);
2964        atomic_set(&rdp->dynticks->dynticks,
2965                   (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
2966        raw_spin_unlock(&rnp->lock);            /* irqs remain disabled. */
2967
2968        /* Add CPU to rcu_node bitmasks. */
2969        rnp = rdp->mynode;
2970        mask = rdp->grpmask;
2971        do {
2972                /* Exclude any attempts to start a new GP on small systems. */
2973                raw_spin_lock(&rnp->lock);      /* irqs already disabled. */
2974                rnp->qsmaskinit |= mask;
2975                mask = rnp->grpmask;
2976                if (rnp == rdp->mynode) {
2977                        /*
2978                         * If there is a grace period in progress, we will
2979                         * set up to wait for it next time we run the
2980                         * RCU core code.
2981                         */
2982                        rdp->gpnum = rnp->completed;
2983                        rdp->completed = rnp->completed;
2984                        rdp->passed_quiesce = 0;
2985                        rdp->qs_pending = 0;
2986                        trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
2987                }
2988                raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
2989                rnp = rnp->parent;
2990        } while (rnp != NULL && !(rnp->qsmaskinit & mask));
2991        local_irq_restore(flags);
2992
2993        mutex_unlock(&rsp->onoff_mutex);
2994}
2995
2996static void rcu_prepare_cpu(int cpu)
2997{
2998        struct rcu_state *rsp;
2999
3000        for_each_rcu_flavor(rsp)
3001                rcu_init_percpu_data(cpu, rsp,
3002                                     strcmp(rsp->name, "rcu_preempt") == 0);
3003}
3004
3005/*
3006 * Handle CPU online/offline notification events.
3007 */
3008static int rcu_cpu_notify(struct notifier_block *self,
3009                                    unsigned long action, void *hcpu)
3010{
3011        long cpu = (long)hcpu;
3012        struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
3013        struct rcu_node *rnp = rdp->mynode;
3014        struct rcu_state *rsp;
3015
3016        trace_rcu_utilization(TPS("Start CPU hotplug"));
3017        switch (action) {
3018        case CPU_UP_PREPARE:
3019        case CPU_UP_PREPARE_FROZEN:
3020                rcu_prepare_cpu(cpu);
3021                rcu_prepare_kthreads(cpu);
3022                break;
3023        case CPU_ONLINE:
3024        case CPU_DOWN_FAILED:
3025                rcu_boost_kthread_setaffinity(rnp, -1);
3026                break;
3027        case CPU_DOWN_PREPARE:
3028                rcu_boost_kthread_setaffinity(rnp, cpu);
3029                break;
3030        case CPU_DYING:
3031        case CPU_DYING_FROZEN:
3032                for_each_rcu_flavor(rsp)
3033                        rcu_cleanup_dying_cpu(rsp);
3034                break;
3035        case CPU_DEAD:
3036        case CPU_DEAD_FROZEN:
3037        case CPU_UP_CANCELED:
3038        case CPU_UP_CANCELED_FROZEN:
3039                for_each_rcu_flavor(rsp)
3040                        rcu_cleanup_dead_cpu(cpu, rsp);
3041                break;
3042        default:
3043                break;
3044        }
3045        trace_rcu_utilization(TPS("End CPU hotplug"));
3046        return NOTIFY_OK;
3047}
3048
3049static int rcu_pm_notify(struct notifier_block *self,
3050                         unsigned long action, void *hcpu)
3051{
3052        switch (action) {
3053        case PM_HIBERNATION_PREPARE:
3054        case PM_SUSPEND_PREPARE:
3055                if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
3056                        rcu_expedited = 1;
3057                break;
3058        case PM_POST_HIBERNATION:
3059        case PM_POST_SUSPEND:
3060                rcu_expedited = 0;
3061                break;
3062        default:
3063                break;
3064        }
3065        return NOTIFY_OK;
3066}
3067
3068/*
3069 * Spawn the kthread that handles this RCU flavor's grace periods.
3070 */
3071static int __init rcu_spawn_gp_kthread(void)
3072{
3073        unsigned long flags;
3074        struct rcu_node *rnp;
3075        struct rcu_state *rsp;
3076        struct task_struct *t;
3077
3078        for_each_rcu_flavor(rsp) {
3079                t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name);
3080                BUG_ON(IS_ERR(t));
3081                rnp = rcu_get_root(rsp);
3082                raw_spin_lock_irqsave(&rnp->lock, flags);
3083                rsp->gp_kthread = t;
3084                raw_spin_unlock_irqrestore(&rnp->lock, flags);
3085                rcu_spawn_nocb_kthreads(rsp);
3086        }
3087        return 0;
3088}
3089early_initcall(rcu_spawn_gp_kthread);
3090
3091/*
3092 * This function is invoked towards the end of the scheduler's initialization
3093 * process.  Before this is called, the idle task might contain
3094 * RCU read-side critical sections (during which time, this idle
3095 * task is booting the system).  After this function is called, the
3096 * idle tasks are prohibited from containing RCU read-side critical
3097 * sections.  This function also enables RCU lockdep checking.
3098 */
3099void rcu_scheduler_starting(void)
3100{
3101        WARN_ON(num_online_cpus() != 1);
3102        WARN_ON(nr_context_switches() > 0);
3103        rcu_scheduler_active = 1;
3104}
3105
3106/*
3107 * Compute the per-level fanout, either using the exact fanout specified
3108 * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
3109 */
3110#ifdef CONFIG_RCU_FANOUT_EXACT
3111static void __init rcu_init_levelspread(struct rcu_state *rsp)
3112{
3113        int i;
3114
3115        for (i = rcu_num_lvls - 1; i > 0; i--)
3116                rsp->levelspread[i] = CONFIG_RCU_FANOUT;
3117        rsp->levelspread[0] = rcu_fanout_leaf;
3118}
3119#else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
3120static void __init rcu_init_levelspread(struct rcu_state *rsp)
3121{
3122        int ccur;
3123        int cprv;
3124        int i;
3125
3126        cprv = nr_cpu_ids;
3127        for (i = rcu_num_lvls - 1; i >= 0; i--) {
3128                ccur = rsp->levelcnt[i];
3129                rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
3130                cprv = ccur;
3131        }
3132}
3133#endif /* #else #ifdef CONFIG_RCU_FANOUT_EXACT */
3134
3135/*
3136 * Helper function for rcu_init() that initializes one rcu_state structure.
3137 */
3138static void __init rcu_init_one(struct rcu_state *rsp,
3139                struct rcu_data __percpu *rda)
3140{
3141        static char *buf[] = { "rcu_node_0",
3142                               "rcu_node_1",
3143                               "rcu_node_2",
3144                               "rcu_node_3" };  /* Match MAX_RCU_LVLS */
3145        static char *fqs[] = { "rcu_node_fqs_0",
3146                               "rcu_node_fqs_1",
3147                               "rcu_node_fqs_2",
3148                               "rcu_node_fqs_3" };  /* Match MAX_RCU_LVLS */
3149        int cpustride = 1;
3150        int i;
3151        int j;
3152        struct rcu_node *rnp;
3153
3154        BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
3155
3156        /* Silence gcc 4.8 warning about array index out of range. */
3157        if (rcu_num_lvls > RCU_NUM_LVLS)
3158                panic("rcu_init_one: rcu_num_lvls overflow");
3159
3160        /* Initialize the level-tracking arrays. */
3161
3162        for (i = 0; i < rcu_num_lvls; i++)
3163                rsp->levelcnt[i] = num_rcu_lvl[i];
3164        for (i = 1; i < rcu_num_lvls; i++)
3165                rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
3166        rcu_init_levelspread(rsp);
3167
3168        /* Initialize the elements themselves, starting from the leaves. */
3169
3170        for (i = rcu_num_lvls - 1; i >= 0; i--) {
3171                cpustride *= rsp->levelspread[i];
3172                rnp = rsp->level[i];
3173                for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
3174                        raw_spin_lock_init(&rnp->lock);
3175                        lockdep_set_class_and_name(&rnp->lock,
3176                                                   &rcu_node_class[i], buf[i]);
3177                        raw_spin_lock_init(&rnp->fqslock);
3178                        lockdep_set_class_and_name(&rnp->fqslock,
3179                                                   &rcu_fqs_class[i], fqs[i]);
3180                        rnp->gpnum = rsp->gpnum;
3181                        rnp->completed = rsp->completed;
3182                        rnp->qsmask = 0;
3183                        rnp->qsmaskinit = 0;
3184                        rnp->grplo = j * cpustride;
3185                        rnp->grphi = (j + 1) * cpustride - 1;
3186                        if (rnp->grphi >= NR_CPUS)
3187                                rnp->grphi = NR_CPUS - 1;
3188                        if (i == 0) {
3189                                rnp->grpnum = 0;
3190                                rnp->grpmask = 0;
3191                                rnp->parent = NULL;
3192                        } else {
3193                                rnp->grpnum = j % rsp->levelspread[i - 1];
3194                                rnp->grpmask = 1UL << rnp->grpnum;
3195                                rnp->parent = rsp->level[i - 1] +
3196                                              j / rsp->levelspread[i - 1];
3197                        }
3198                        rnp->level = i;
3199                        INIT_LIST_HEAD(&rnp->blkd_tasks);
3200                        rcu_init_one_nocb(rnp);
3201                }
3202        }
3203
3204        rsp->rda = rda;
3205        init_waitqueue_head(&rsp->gp_wq);
3206        init_irq_work(&rsp->wakeup_work, rsp_wakeup);
3207        rnp = rsp->level[rcu_num_lvls - 1];
3208        for_each_possible_cpu(i) {
3209                while (i > rnp->grphi)
3210                        rnp++;
3211                per_cpu_ptr(rsp->rda, i)->mynode = rnp;
3212                rcu_boot_init_percpu_data(i, rsp);
3213        }
3214        list_add(&rsp->flavors, &rcu_struct_flavors);
3215}
3216
3217/*
3218 * Compute the rcu_node tree geometry from kernel parameters.  This cannot
3219 * replace the definitions in rcutree.h because those are needed to size
3220 * the ->node array in the rcu_state structure.
3221 */
3222static void __init rcu_init_geometry(void)
3223{
3224        ulong d;
3225        int i;
3226        int j;
3227        int n = nr_cpu_ids;
3228        int rcu_capacity[MAX_RCU_LVLS + 1];
3229
3230        /*
3231         * Initialize any unspecified boot parameters.
3232         * The default values of jiffies_till_first_fqs and
3233         * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
3234         * value, which is a function of HZ, then adding one for each
3235         * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
3236         */
3237        d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
3238        if (jiffies_till_first_fqs == ULONG_MAX)
3239                jiffies_till_first_fqs = d;
3240        if (jiffies_till_next_fqs == ULONG_MAX)
3241                jiffies_till_next_fqs = d;
3242
3243        /* If the compile-time values are accurate, just leave. */
3244        if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF &&
3245            nr_cpu_ids == NR_CPUS)
3246                return;
3247
3248        /*
3249         * Compute number of nodes that can be handled an rcu_node tree
3250         * with the given number of levels.  Setting rcu_capacity[0] makes
3251         * some of the arithmetic easier.
3252         */
3253        rcu_capacity[0] = 1;
3254        rcu_capacity[1] = rcu_fanout_leaf;
3255        for (i = 2; i <= MAX_RCU_LVLS; i++)
3256                rcu_capacity[i] = rcu_capacity[i - 1] * CONFIG_RCU_FANOUT;
3257
3258        /*
3259         * The boot-time rcu_fanout_leaf parameter is only permitted
3260         * to increase the leaf-level fanout, not decrease it.  Of course,
3261         * the leaf-level fanout cannot exceed the number of bits in
3262         * the rcu_node masks.  Finally, the tree must be able to accommodate
3263         * the configured number of CPUs.  Complain and fall back to the
3264         * compile-time values if these limits are exceeded.
3265         */
3266        if (rcu_fanout_leaf < CONFIG_RCU_FANOUT_LEAF ||
3267            rcu_fanout_leaf > sizeof(unsigned long) * 8 ||
3268            n > rcu_capacity[MAX_RCU_LVLS]) {
3269                WARN_ON(1);
3270                return;
3271        }
3272
3273        /* Calculate the number of rcu_nodes at each level of the tree. */
3274        for (i = 1; i <= MAX_RCU_LVLS; i++)
3275                if (n <= rcu_capacity[i]) {
3276                        for (j = 0; j <= i; j++)
3277                                num_rcu_lvl[j] =
3278                                        DIV_ROUND_UP(n, rcu_capacity[i - j]);
3279                        rcu_num_lvls = i;
3280                        for (j = i + 1; j <= MAX_RCU_LVLS; j++)
3281                                num_rcu_lvl[j] = 0;
3282                        break;
3283                }
3284
3285        /* Calculate the total number of rcu_node structures. */
3286        rcu_num_nodes = 0;
3287        for (i = 0; i <= MAX_RCU_LVLS; i++)
3288                rcu_num_nodes += num_rcu_lvl[i];
3289        rcu_num_nodes -= n;
3290}
3291
3292void __init rcu_init(void)
3293{
3294        int cpu;
3295
3296        rcu_bootup_announce();
3297        rcu_init_geometry();
3298        rcu_init_one(&rcu_sched_state, &rcu_sched_data);
3299        rcu_init_one(&rcu_bh_state, &rcu_bh_data);
3300        __rcu_init_preempt();
3301        open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
3302
3303        /*
3304         * We don't need protection against CPU-hotplug here because
3305         * this is called early in boot, before either interrupts
3306         * or the scheduler are operational.
3307         */
3308        cpu_notifier(rcu_cpu_notify, 0);
3309        pm_notifier(rcu_pm_notify, 0);
3310        for_each_online_cpu(cpu)
3311                rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
3312}
3313
3314#include "rcutree_plugin.h"
3315