linux/kernel/rcu/tree.c
<<
>>
Prefs
   1/*
   2 * Read-Copy Update mechanism for mutual exclusion
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, you can access it online at
  16 * http://www.gnu.org/licenses/gpl-2.0.html.
  17 *
  18 * Copyright IBM Corporation, 2008
  19 *
  20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
  21 *          Manfred Spraul <manfred@colorfullife.com>
  22 *          Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
  23 *
  24 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
  25 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
  26 *
  27 * For detailed explanation of Read-Copy Update mechanism see -
  28 *      Documentation/RCU
  29 */
  30#include <linux/types.h>
  31#include <linux/kernel.h>
  32#include <linux/init.h>
  33#include <linux/spinlock.h>
  34#include <linux/smp.h>
  35#include <linux/rcupdate_wait.h>
  36#include <linux/interrupt.h>
  37#include <linux/sched.h>
  38#include <linux/sched/debug.h>
  39#include <linux/nmi.h>
  40#include <linux/atomic.h>
  41#include <linux/bitops.h>
  42#include <linux/export.h>
  43#include <linux/completion.h>
  44#include <linux/moduleparam.h>
  45#include <linux/percpu.h>
  46#include <linux/notifier.h>
  47#include <linux/cpu.h>
  48#include <linux/mutex.h>
  49#include <linux/time.h>
  50#include <linux/kernel_stat.h>
  51#include <linux/wait.h>
  52#include <linux/kthread.h>
  53#include <uapi/linux/sched/types.h>
  54#include <linux/prefetch.h>
  55#include <linux/delay.h>
  56#include <linux/stop_machine.h>
  57#include <linux/random.h>
  58#include <linux/trace_events.h>
  59#include <linux/suspend.h>
  60#include <linux/ftrace.h>
  61
  62#include "tree.h"
  63#include "rcu.h"
  64
  65#ifdef MODULE_PARAM_PREFIX
  66#undef MODULE_PARAM_PREFIX
  67#endif
  68#define MODULE_PARAM_PREFIX "rcutree."
  69
  70/* Data structures. */
  71
  72/*
  73 * In order to export the rcu_state name to the tracing tools, it
  74 * needs to be added in the __tracepoint_string section.
  75 * This requires defining a separate variable tp_<sname>_varname
  76 * that points to the string being used, and this will allow
  77 * the tracing userspace tools to be able to decipher the string
  78 * address to the matching string.
  79 */
  80#ifdef CONFIG_TRACING
  81# define DEFINE_RCU_TPS(sname) \
  82static char sname##_varname[] = #sname; \
  83static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname;
  84# define RCU_STATE_NAME(sname) sname##_varname
  85#else
  86# define DEFINE_RCU_TPS(sname)
  87# define RCU_STATE_NAME(sname) __stringify(sname)
  88#endif
  89
  90#define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
  91DEFINE_RCU_TPS(sname) \
  92static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \
  93struct rcu_state sname##_state = { \
  94        .level = { &sname##_state.node[0] }, \
  95        .rda = &sname##_data, \
  96        .call = cr, \
  97        .gp_state = RCU_GP_IDLE, \
  98        .gpnum = 0UL - 300UL, \
  99        .completed = 0UL - 300UL, \
 100        .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
 101        .name = RCU_STATE_NAME(sname), \
 102        .abbr = sabbr, \
 103        .exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \
 104        .exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \
 105}
 106
 107RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
 108RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
 109
 110static struct rcu_state *const rcu_state_p;
 111LIST_HEAD(rcu_struct_flavors);
 112
 113/* Dump rcu_node combining tree at boot to verify correct setup. */
 114static bool dump_tree;
 115module_param(dump_tree, bool, 0444);
 116/* Control rcu_node-tree auto-balancing at boot time. */
 117static bool rcu_fanout_exact;
 118module_param(rcu_fanout_exact, bool, 0444);
 119/* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
 120static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
 121module_param(rcu_fanout_leaf, int, 0444);
 122int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
 123/* Number of rcu_nodes at specified level. */
 124int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
 125int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
 126/* panic() on RCU Stall sysctl. */
 127int sysctl_panic_on_rcu_stall __read_mostly;
 128
 129/*
 130 * The rcu_scheduler_active variable is initialized to the value
 131 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
 132 * first task is spawned.  So when this variable is RCU_SCHEDULER_INACTIVE,
 133 * RCU can assume that there is but one task, allowing RCU to (for example)
 134 * optimize synchronize_rcu() to a simple barrier().  When this variable
 135 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
 136 * to detect real grace periods.  This variable is also used to suppress
 137 * boot-time false positives from lockdep-RCU error checking.  Finally, it
 138 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
 139 * is fully initialized, including all of its kthreads having been spawned.
 140 */
 141int rcu_scheduler_active __read_mostly;
 142EXPORT_SYMBOL_GPL(rcu_scheduler_active);
 143
 144/*
 145 * The rcu_scheduler_fully_active variable transitions from zero to one
 146 * during the early_initcall() processing, which is after the scheduler
 147 * is capable of creating new tasks.  So RCU processing (for example,
 148 * creating tasks for RCU priority boosting) must be delayed until after
 149 * rcu_scheduler_fully_active transitions from zero to one.  We also
 150 * currently delay invocation of any RCU callbacks until after this point.
 151 *
 152 * It might later prove better for people registering RCU callbacks during
 153 * early boot to take responsibility for these callbacks, but one step at
 154 * a time.
 155 */
 156static int rcu_scheduler_fully_active __read_mostly;
 157
 158static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
 159static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
 160static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
 161static void invoke_rcu_core(void);
 162static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
 163static void rcu_report_exp_rdp(struct rcu_state *rsp,
 164                               struct rcu_data *rdp, bool wake);
 165static void sync_sched_exp_online_cleanup(int cpu);
 166
 167/* rcuc/rcub kthread realtime priority */
 168static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
 169module_param(kthread_prio, int, 0644);
 170
 171/* Delay in jiffies for grace-period initialization delays, debug only. */
 172
 173static int gp_preinit_delay;
 174module_param(gp_preinit_delay, int, 0444);
 175static int gp_init_delay;
 176module_param(gp_init_delay, int, 0444);
 177static int gp_cleanup_delay;
 178module_param(gp_cleanup_delay, int, 0444);
 179
 180/*
 181 * Number of grace periods between delays, normalized by the duration of
 182 * the delay.  The longer the delay, the more the grace periods between
 183 * each delay.  The reason for this normalization is that it means that,
 184 * for non-zero delays, the overall slowdown of grace periods is constant
 185 * regardless of the duration of the delay.  This arrangement balances
 186 * the need for long delays to increase some race probabilities with the
 187 * need for fast grace periods to increase other race probabilities.
 188 */
 189#define PER_RCU_NODE_PERIOD 3   /* Number of grace periods between delays. */
 190
 191/*
 192 * Track the rcutorture test sequence number and the update version
 193 * number within a given test.  The rcutorture_testseq is incremented
 194 * on every rcutorture module load and unload, so has an odd value
 195 * when a test is running.  The rcutorture_vernum is set to zero
 196 * when rcutorture starts and is incremented on each rcutorture update.
 197 * These variables enable correlating rcutorture output with the
 198 * RCU tracing information.
 199 */
 200unsigned long rcutorture_testseq;
 201unsigned long rcutorture_vernum;
 202
 203/*
 204 * Compute the mask of online CPUs for the specified rcu_node structure.
 205 * This will not be stable unless the rcu_node structure's ->lock is
 206 * held, but the bit corresponding to the current CPU will be stable
 207 * in most contexts.
 208 */
 209unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
 210{
 211        return READ_ONCE(rnp->qsmaskinitnext);
 212}
 213
 214/*
 215 * Return true if an RCU grace period is in progress.  The READ_ONCE()s
 216 * permit this function to be invoked without holding the root rcu_node
 217 * structure's ->lock, but of course results can be subject to change.
 218 */
 219static int rcu_gp_in_progress(struct rcu_state *rsp)
 220{
 221        return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum);
 222}
 223
 224/*
 225 * Note a quiescent state.  Because we do not need to know
 226 * how many quiescent states passed, just if there was at least
 227 * one since the start of the grace period, this just sets a flag.
 228 * The caller must have disabled preemption.
 229 */
 230void rcu_sched_qs(void)
 231{
 232        RCU_LOCKDEP_WARN(preemptible(), "rcu_sched_qs() invoked with preemption enabled!!!");
 233        if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s))
 234                return;
 235        trace_rcu_grace_period(TPS("rcu_sched"),
 236                               __this_cpu_read(rcu_sched_data.gpnum),
 237                               TPS("cpuqs"));
 238        __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
 239        if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
 240                return;
 241        __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
 242        rcu_report_exp_rdp(&rcu_sched_state,
 243                           this_cpu_ptr(&rcu_sched_data), true);
 244}
 245
 246void rcu_bh_qs(void)
 247{
 248        RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!");
 249        if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
 250                trace_rcu_grace_period(TPS("rcu_bh"),
 251                                       __this_cpu_read(rcu_bh_data.gpnum),
 252                                       TPS("cpuqs"));
 253                __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
 254        }
 255}
 256
 257/*
 258 * Steal a bit from the bottom of ->dynticks for idle entry/exit
 259 * control.  Initially this is for TLB flushing.
 260 */
 261#define RCU_DYNTICK_CTRL_MASK 0x1
 262#define RCU_DYNTICK_CTRL_CTR  (RCU_DYNTICK_CTRL_MASK + 1)
 263#ifndef rcu_eqs_special_exit
 264#define rcu_eqs_special_exit() do { } while (0)
 265#endif
 266
 267static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
 268        .dynticks_nesting = 1,
 269        .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
 270        .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
 271};
 272
 273/*
 274 * Record entry into an extended quiescent state.  This is only to be
 275 * called when not already in an extended quiescent state.
 276 */
 277static void rcu_dynticks_eqs_enter(void)
 278{
 279        struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
 280        int seq;
 281
 282        /*
 283         * CPUs seeing atomic_add_return() must see prior RCU read-side
 284         * critical sections, and we also must force ordering with the
 285         * next idle sojourn.
 286         */
 287        seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
 288        /* Better be in an extended quiescent state! */
 289        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
 290                     (seq & RCU_DYNTICK_CTRL_CTR));
 291        /* Better not have special action (TLB flush) pending! */
 292        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
 293                     (seq & RCU_DYNTICK_CTRL_MASK));
 294}
 295
 296/*
 297 * Record exit from an extended quiescent state.  This is only to be
 298 * called from an extended quiescent state.
 299 */
 300static void rcu_dynticks_eqs_exit(void)
 301{
 302        struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
 303        int seq;
 304
 305        /*
 306         * CPUs seeing atomic_add_return() must see prior idle sojourns,
 307         * and we also must force ordering with the next RCU read-side
 308         * critical section.
 309         */
 310        seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
 311        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
 312                     !(seq & RCU_DYNTICK_CTRL_CTR));
 313        if (seq & RCU_DYNTICK_CTRL_MASK) {
 314                atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdtp->dynticks);
 315                smp_mb__after_atomic(); /* _exit after clearing mask. */
 316                /* Prefer duplicate flushes to losing a flush. */
 317                rcu_eqs_special_exit();
 318        }
 319}
 320
 321/*
 322 * Reset the current CPU's ->dynticks counter to indicate that the
 323 * newly onlined CPU is no longer in an extended quiescent state.
 324 * This will either leave the counter unchanged, or increment it
 325 * to the next non-quiescent value.
 326 *
 327 * The non-atomic test/increment sequence works because the upper bits
 328 * of the ->dynticks counter are manipulated only by the corresponding CPU,
 329 * or when the corresponding CPU is offline.
 330 */
 331static void rcu_dynticks_eqs_online(void)
 332{
 333        struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
 334
 335        if (atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR)
 336                return;
 337        atomic_add(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
 338}
 339
 340/*
 341 * Is the current CPU in an extended quiescent state?
 342 *
 343 * No ordering, as we are sampling CPU-local information.
 344 */
 345bool rcu_dynticks_curr_cpu_in_eqs(void)
 346{
 347        struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
 348
 349        return !(atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR);
 350}
 351
 352/*
 353 * Snapshot the ->dynticks counter with full ordering so as to allow
 354 * stable comparison of this counter with past and future snapshots.
 355 */
 356int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
 357{
 358        int snap = atomic_add_return(0, &rdtp->dynticks);
 359
 360        return snap & ~RCU_DYNTICK_CTRL_MASK;
 361}
 362
 363/*
 364 * Return true if the snapshot returned from rcu_dynticks_snap()
 365 * indicates that RCU is in an extended quiescent state.
 366 */
 367static bool rcu_dynticks_in_eqs(int snap)
 368{
 369        return !(snap & RCU_DYNTICK_CTRL_CTR);
 370}
 371
 372/*
 373 * Return true if the CPU corresponding to the specified rcu_dynticks
 374 * structure has spent some time in an extended quiescent state since
 375 * rcu_dynticks_snap() returned the specified snapshot.
 376 */
 377static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap)
 378{
 379        return snap != rcu_dynticks_snap(rdtp);
 380}
 381
 382/*
 383 * Do a double-increment of the ->dynticks counter to emulate a
 384 * momentary idle-CPU quiescent state.
 385 */
 386static void rcu_dynticks_momentary_idle(void)
 387{
 388        struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
 389        int special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
 390                                        &rdtp->dynticks);
 391
 392        /* It is illegal to call this from idle state. */
 393        WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
 394}
 395
 396/*
 397 * Set the special (bottom) bit of the specified CPU so that it
 398 * will take special action (such as flushing its TLB) on the
 399 * next exit from an extended quiescent state.  Returns true if
 400 * the bit was successfully set, or false if the CPU was not in
 401 * an extended quiescent state.
 402 */
 403bool rcu_eqs_special_set(int cpu)
 404{
 405        int old;
 406        int new;
 407        struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
 408
 409        do {
 410                old = atomic_read(&rdtp->dynticks);
 411                if (old & RCU_DYNTICK_CTRL_CTR)
 412                        return false;
 413                new = old | RCU_DYNTICK_CTRL_MASK;
 414        } while (atomic_cmpxchg(&rdtp->dynticks, old, new) != old);
 415        return true;
 416}
 417
 418/*
 419 * Let the RCU core know that this CPU has gone through the scheduler,
 420 * which is a quiescent state.  This is called when the need for a
 421 * quiescent state is urgent, so we burn an atomic operation and full
 422 * memory barriers to let the RCU core know about it, regardless of what
 423 * this CPU might (or might not) do in the near future.
 424 *
 425 * We inform the RCU core by emulating a zero-duration dyntick-idle period.
 426 *
 427 * The caller must have disabled interrupts.
 428 */
 429static void rcu_momentary_dyntick_idle(void)
 430{
 431        raw_cpu_write(rcu_dynticks.rcu_need_heavy_qs, false);
 432        rcu_dynticks_momentary_idle();
 433}
 434
 435/*
 436 * Note a context switch.  This is a quiescent state for RCU-sched,
 437 * and requires special handling for preemptible RCU.
 438 * The caller must have disabled interrupts.
 439 */
 440void rcu_note_context_switch(bool preempt)
 441{
 442        barrier(); /* Avoid RCU read-side critical sections leaking down. */
 443        trace_rcu_utilization(TPS("Start context switch"));
 444        rcu_sched_qs();
 445        rcu_preempt_note_context_switch(preempt);
 446        /* Load rcu_urgent_qs before other flags. */
 447        if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs)))
 448                goto out;
 449        this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
 450        if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs)))
 451                rcu_momentary_dyntick_idle();
 452        this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
 453        if (!preempt)
 454                rcu_note_voluntary_context_switch_lite(current);
 455out:
 456        trace_rcu_utilization(TPS("End context switch"));
 457        barrier(); /* Avoid RCU read-side critical sections leaking up. */
 458}
 459EXPORT_SYMBOL_GPL(rcu_note_context_switch);
 460
 461/*
 462 * Register a quiescent state for all RCU flavors.  If there is an
 463 * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
 464 * dyntick-idle quiescent state visible to other CPUs (but only for those
 465 * RCU flavors in desperate need of a quiescent state, which will normally
 466 * be none of them).  Either way, do a lightweight quiescent state for
 467 * all RCU flavors.
 468 *
 469 * The barrier() calls are redundant in the common case when this is
 470 * called externally, but just in case this is called from within this
 471 * file.
 472 *
 473 */
 474void rcu_all_qs(void)
 475{
 476        unsigned long flags;
 477
 478        if (!raw_cpu_read(rcu_dynticks.rcu_urgent_qs))
 479                return;
 480        preempt_disable();
 481        /* Load rcu_urgent_qs before other flags. */
 482        if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) {
 483                preempt_enable();
 484                return;
 485        }
 486        this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
 487        barrier(); /* Avoid RCU read-side critical sections leaking down. */
 488        if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) {
 489                local_irq_save(flags);
 490                rcu_momentary_dyntick_idle();
 491                local_irq_restore(flags);
 492        }
 493        if (unlikely(raw_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)))
 494                rcu_sched_qs();
 495        this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
 496        barrier(); /* Avoid RCU read-side critical sections leaking up. */
 497        preempt_enable();
 498}
 499EXPORT_SYMBOL_GPL(rcu_all_qs);
 500
 501#define DEFAULT_RCU_BLIMIT 10     /* Maximum callbacks per rcu_do_batch. */
 502static long blimit = DEFAULT_RCU_BLIMIT;
 503#define DEFAULT_RCU_QHIMARK 10000 /* If this many pending, ignore blimit. */
 504static long qhimark = DEFAULT_RCU_QHIMARK;
 505#define DEFAULT_RCU_QLOMARK 100   /* Once only this many pending, use blimit. */
 506static long qlowmark = DEFAULT_RCU_QLOMARK;
 507
 508module_param(blimit, long, 0444);
 509module_param(qhimark, long, 0444);
 510module_param(qlowmark, long, 0444);
 511
 512static ulong jiffies_till_first_fqs = ULONG_MAX;
 513static ulong jiffies_till_next_fqs = ULONG_MAX;
 514static bool rcu_kick_kthreads;
 515
 516module_param(jiffies_till_first_fqs, ulong, 0644);
 517module_param(jiffies_till_next_fqs, ulong, 0644);
 518module_param(rcu_kick_kthreads, bool, 0644);
 519
 520/*
 521 * How long the grace period must be before we start recruiting
 522 * quiescent-state help from rcu_note_context_switch().
 523 */
 524static ulong jiffies_till_sched_qs = HZ / 10;
 525module_param(jiffies_till_sched_qs, ulong, 0444);
 526
 527static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp));
 528static void force_quiescent_state(struct rcu_state *rsp);
 529static int rcu_pending(void);
 530
 531/*
 532 * Return the number of RCU batches started thus far for debug & stats.
 533 */
 534unsigned long rcu_batches_started(void)
 535{
 536        return rcu_state_p->gpnum;
 537}
 538EXPORT_SYMBOL_GPL(rcu_batches_started);
 539
 540/*
 541 * Return the number of RCU-sched batches started thus far for debug & stats.
 542 */
 543unsigned long rcu_batches_started_sched(void)
 544{
 545        return rcu_sched_state.gpnum;
 546}
 547EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
 548
 549/*
 550 * Return the number of RCU BH batches started thus far for debug & stats.
 551 */
 552unsigned long rcu_batches_started_bh(void)
 553{
 554        return rcu_bh_state.gpnum;
 555}
 556EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
 557
 558/*
 559 * Return the number of RCU batches completed thus far for debug & stats.
 560 */
 561unsigned long rcu_batches_completed(void)
 562{
 563        return rcu_state_p->completed;
 564}
 565EXPORT_SYMBOL_GPL(rcu_batches_completed);
 566
 567/*
 568 * Return the number of RCU-sched batches completed thus far for debug & stats.
 569 */
 570unsigned long rcu_batches_completed_sched(void)
 571{
 572        return rcu_sched_state.completed;
 573}
 574EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
 575
 576/*
 577 * Return the number of RCU BH batches completed thus far for debug & stats.
 578 */
 579unsigned long rcu_batches_completed_bh(void)
 580{
 581        return rcu_bh_state.completed;
 582}
 583EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
 584
 585/*
 586 * Return the number of RCU expedited batches completed thus far for
 587 * debug & stats.  Odd numbers mean that a batch is in progress, even
 588 * numbers mean idle.  The value returned will thus be roughly double
 589 * the cumulative batches since boot.
 590 */
 591unsigned long rcu_exp_batches_completed(void)
 592{
 593        return rcu_state_p->expedited_sequence;
 594}
 595EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
 596
 597/*
 598 * Return the number of RCU-sched expedited batches completed thus far
 599 * for debug & stats.  Similar to rcu_exp_batches_completed().
 600 */
 601unsigned long rcu_exp_batches_completed_sched(void)
 602{
 603        return rcu_sched_state.expedited_sequence;
 604}
 605EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
 606
 607/*
 608 * Force a quiescent state.
 609 */
 610void rcu_force_quiescent_state(void)
 611{
 612        force_quiescent_state(rcu_state_p);
 613}
 614EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
 615
 616/*
 617 * Force a quiescent state for RCU BH.
 618 */
 619void rcu_bh_force_quiescent_state(void)
 620{
 621        force_quiescent_state(&rcu_bh_state);
 622}
 623EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
 624
 625/*
 626 * Force a quiescent state for RCU-sched.
 627 */
 628void rcu_sched_force_quiescent_state(void)
 629{
 630        force_quiescent_state(&rcu_sched_state);
 631}
 632EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
 633
 634/*
 635 * Show the state of the grace-period kthreads.
 636 */
 637void show_rcu_gp_kthreads(void)
 638{
 639        struct rcu_state *rsp;
 640
 641        for_each_rcu_flavor(rsp) {
 642                pr_info("%s: wait state: %d ->state: %#lx\n",
 643                        rsp->name, rsp->gp_state, rsp->gp_kthread->state);
 644                /* sched_show_task(rsp->gp_kthread); */
 645        }
 646}
 647EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
 648
 649/*
 650 * Record the number of times rcutorture tests have been initiated and
 651 * terminated.  This information allows the debugfs tracing stats to be
 652 * correlated to the rcutorture messages, even when the rcutorture module
 653 * is being repeatedly loaded and unloaded.  In other words, we cannot
 654 * store this state in rcutorture itself.
 655 */
 656void rcutorture_record_test_transition(void)
 657{
 658        rcutorture_testseq++;
 659        rcutorture_vernum = 0;
 660}
 661EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
 662
 663/*
 664 * Send along grace-period-related data for rcutorture diagnostics.
 665 */
 666void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
 667                            unsigned long *gpnum, unsigned long *completed)
 668{
 669        struct rcu_state *rsp = NULL;
 670
 671        switch (test_type) {
 672        case RCU_FLAVOR:
 673                rsp = rcu_state_p;
 674                break;
 675        case RCU_BH_FLAVOR:
 676                rsp = &rcu_bh_state;
 677                break;
 678        case RCU_SCHED_FLAVOR:
 679                rsp = &rcu_sched_state;
 680                break;
 681        default:
 682                break;
 683        }
 684        if (rsp == NULL)
 685                return;
 686        *flags = READ_ONCE(rsp->gp_flags);
 687        *gpnum = READ_ONCE(rsp->gpnum);
 688        *completed = READ_ONCE(rsp->completed);
 689}
 690EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
 691
 692/*
 693 * Record the number of writer passes through the current rcutorture test.
 694 * This is also used to correlate debugfs tracing stats with the rcutorture
 695 * messages.
 696 */
 697void rcutorture_record_progress(unsigned long vernum)
 698{
 699        rcutorture_vernum++;
 700}
 701EXPORT_SYMBOL_GPL(rcutorture_record_progress);
 702
 703/*
 704 * Return the root node of the specified rcu_state structure.
 705 */
 706static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
 707{
 708        return &rsp->node[0];
 709}
 710
 711/*
 712 * Enter an RCU extended quiescent state, which can be either the
 713 * idle loop or adaptive-tickless usermode execution.
 714 *
 715 * We crowbar the ->dynticks_nmi_nesting field to zero to allow for
 716 * the possibility of usermode upcalls having messed up our count
 717 * of interrupt nesting level during the prior busy period.
 718 */
 719static void rcu_eqs_enter(bool user)
 720{
 721        struct rcu_state *rsp;
 722        struct rcu_data *rdp;
 723        struct rcu_dynticks *rdtp;
 724
 725        rdtp = this_cpu_ptr(&rcu_dynticks);
 726        WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0);
 727        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
 728                     rdtp->dynticks_nesting == 0);
 729        if (rdtp->dynticks_nesting != 1) {
 730                rdtp->dynticks_nesting--;
 731                return;
 732        }
 733
 734        lockdep_assert_irqs_disabled();
 735        trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0, rdtp->dynticks);
 736        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
 737        for_each_rcu_flavor(rsp) {
 738                rdp = this_cpu_ptr(rsp->rda);
 739                do_nocb_deferred_wakeup(rdp);
 740        }
 741        rcu_prepare_for_idle();
 742        WRITE_ONCE(rdtp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
 743        rcu_dynticks_eqs_enter();
 744        rcu_dynticks_task_enter();
 745}
 746
 747/**
 748 * rcu_idle_enter - inform RCU that current CPU is entering idle
 749 *
 750 * Enter idle mode, in other words, -leave- the mode in which RCU
 751 * read-side critical sections can occur.  (Though RCU read-side
 752 * critical sections can occur in irq handlers in idle, a possibility
 753 * handled by irq_enter() and irq_exit().)
 754 *
 755 * If you add or remove a call to rcu_idle_enter(), be sure to test with
 756 * CONFIG_RCU_EQS_DEBUG=y.
 757 */
 758void rcu_idle_enter(void)
 759{
 760        lockdep_assert_irqs_disabled();
 761        rcu_eqs_enter(false);
 762}
 763
 764#ifdef CONFIG_NO_HZ_FULL
 765/**
 766 * rcu_user_enter - inform RCU that we are resuming userspace.
 767 *
 768 * Enter RCU idle mode right before resuming userspace.  No use of RCU
 769 * is permitted between this call and rcu_user_exit(). This way the
 770 * CPU doesn't need to maintain the tick for RCU maintenance purposes
 771 * when the CPU runs in userspace.
 772 *
 773 * If you add or remove a call to rcu_user_enter(), be sure to test with
 774 * CONFIG_RCU_EQS_DEBUG=y.
 775 */
 776void rcu_user_enter(void)
 777{
 778        lockdep_assert_irqs_disabled();
 779        rcu_eqs_enter(true);
 780}
 781#endif /* CONFIG_NO_HZ_FULL */
 782
 783/**
 784 * rcu_nmi_exit - inform RCU of exit from NMI context
 785 *
 786 * If we are returning from the outermost NMI handler that interrupted an
 787 * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting
 788 * to let the RCU grace-period handling know that the CPU is back to
 789 * being RCU-idle.
 790 *
 791 * If you add or remove a call to rcu_nmi_exit(), be sure to test
 792 * with CONFIG_RCU_EQS_DEBUG=y.
 793 */
 794void rcu_nmi_exit(void)
 795{
 796        struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
 797
 798        /*
 799         * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
 800         * (We are exiting an NMI handler, so RCU better be paying attention
 801         * to us!)
 802         */
 803        WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
 804        WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
 805
 806        /*
 807         * If the nesting level is not 1, the CPU wasn't RCU-idle, so
 808         * leave it in non-RCU-idle state.
 809         */
 810        if (rdtp->dynticks_nmi_nesting != 1) {
 811                trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nmi_nesting, rdtp->dynticks_nmi_nesting - 2, rdtp->dynticks);
 812                WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* No store tearing. */
 813                           rdtp->dynticks_nmi_nesting - 2);
 814                return;
 815        }
 816
 817        /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
 818        trace_rcu_dyntick(TPS("Startirq"), rdtp->dynticks_nmi_nesting, 0, rdtp->dynticks);
 819        WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
 820        rcu_dynticks_eqs_enter();
 821}
 822
 823/**
 824 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
 825 *
 826 * Exit from an interrupt handler, which might possibly result in entering
 827 * idle mode, in other words, leaving the mode in which read-side critical
 828 * sections can occur.  The caller must have disabled interrupts.
 829 *
 830 * This code assumes that the idle loop never does anything that might
 831 * result in unbalanced calls to irq_enter() and irq_exit().  If your
 832 * architecture's idle loop violates this assumption, RCU will give you what
 833 * you deserve, good and hard.  But very infrequently and irreproducibly.
 834 *
 835 * Use things like work queues to work around this limitation.
 836 *
 837 * You have been warned.
 838 *
 839 * If you add or remove a call to rcu_irq_exit(), be sure to test with
 840 * CONFIG_RCU_EQS_DEBUG=y.
 841 */
 842void rcu_irq_exit(void)
 843{
 844        struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
 845
 846        lockdep_assert_irqs_disabled();
 847        if (rdtp->dynticks_nmi_nesting == 1)
 848                rcu_prepare_for_idle();
 849        rcu_nmi_exit();
 850        if (rdtp->dynticks_nmi_nesting == 0)
 851                rcu_dynticks_task_enter();
 852}
 853
 854/*
 855 * Wrapper for rcu_irq_exit() where interrupts are enabled.
 856 *
 857 * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test
 858 * with CONFIG_RCU_EQS_DEBUG=y.
 859 */
 860void rcu_irq_exit_irqson(void)
 861{
 862        unsigned long flags;
 863
 864        local_irq_save(flags);
 865        rcu_irq_exit();
 866        local_irq_restore(flags);
 867}
 868
 869/*
 870 * Exit an RCU extended quiescent state, which can be either the
 871 * idle loop or adaptive-tickless usermode execution.
 872 *
 873 * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
 874 * allow for the possibility of usermode upcalls messing up our count of
 875 * interrupt nesting level during the busy period that is just now starting.
 876 */
 877static void rcu_eqs_exit(bool user)
 878{
 879        struct rcu_dynticks *rdtp;
 880        long oldval;
 881
 882        lockdep_assert_irqs_disabled();
 883        rdtp = this_cpu_ptr(&rcu_dynticks);
 884        oldval = rdtp->dynticks_nesting;
 885        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
 886        if (oldval) {
 887                rdtp->dynticks_nesting++;
 888                return;
 889        }
 890        rcu_dynticks_task_exit();
 891        rcu_dynticks_eqs_exit();
 892        rcu_cleanup_after_idle();
 893        trace_rcu_dyntick(TPS("End"), rdtp->dynticks_nesting, 1, rdtp->dynticks);
 894        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
 895        WRITE_ONCE(rdtp->dynticks_nesting, 1);
 896        WRITE_ONCE(rdtp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
 897}
 898
 899/**
 900 * rcu_idle_exit - inform RCU that current CPU is leaving idle
 901 *
 902 * Exit idle mode, in other words, -enter- the mode in which RCU
 903 * read-side critical sections can occur.
 904 *
 905 * If you add or remove a call to rcu_idle_exit(), be sure to test with
 906 * CONFIG_RCU_EQS_DEBUG=y.
 907 */
 908void rcu_idle_exit(void)
 909{
 910        unsigned long flags;
 911
 912        local_irq_save(flags);
 913        rcu_eqs_exit(false);
 914        local_irq_restore(flags);
 915}
 916
 917#ifdef CONFIG_NO_HZ_FULL
 918/**
 919 * rcu_user_exit - inform RCU that we are exiting userspace.
 920 *
 921 * Exit RCU idle mode while entering the kernel because it can
 922 * run a RCU read side critical section anytime.
 923 *
 924 * If you add or remove a call to rcu_user_exit(), be sure to test with
 925 * CONFIG_RCU_EQS_DEBUG=y.
 926 */
 927void rcu_user_exit(void)
 928{
 929        rcu_eqs_exit(1);
 930}
 931#endif /* CONFIG_NO_HZ_FULL */
 932
 933/**
 934 * rcu_nmi_enter - inform RCU of entry to NMI context
 935 *
 936 * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and
 937 * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know
 938 * that the CPU is active.  This implementation permits nested NMIs, as
 939 * long as the nesting level does not overflow an int.  (You will probably
 940 * run out of stack space first.)
 941 *
 942 * If you add or remove a call to rcu_nmi_enter(), be sure to test
 943 * with CONFIG_RCU_EQS_DEBUG=y.
 944 */
 945void rcu_nmi_enter(void)
 946{
 947        struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
 948        long incby = 2;
 949
 950        /* Complain about underflow. */
 951        WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0);
 952
 953        /*
 954         * If idle from RCU viewpoint, atomically increment ->dynticks
 955         * to mark non-idle and increment ->dynticks_nmi_nesting by one.
 956         * Otherwise, increment ->dynticks_nmi_nesting by two.  This means
 957         * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
 958         * to be in the outermost NMI handler that interrupted an RCU-idle
 959         * period (observation due to Andy Lutomirski).
 960         */
 961        if (rcu_dynticks_curr_cpu_in_eqs()) {
 962                rcu_dynticks_eqs_exit();
 963                incby = 1;
 964        }
 965        trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
 966                          rdtp->dynticks_nmi_nesting,
 967                          rdtp->dynticks_nmi_nesting + incby, rdtp->dynticks);
 968        WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* Prevent store tearing. */
 969                   rdtp->dynticks_nmi_nesting + incby);
 970        barrier();
 971}
 972
 973/**
 974 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
 975 *
 976 * Enter an interrupt handler, which might possibly result in exiting
 977 * idle mode, in other words, entering the mode in which read-side critical
 978 * sections can occur.  The caller must have disabled interrupts.
 979 *
 980 * Note that the Linux kernel is fully capable of entering an interrupt
 981 * handler that it never exits, for example when doing upcalls to user mode!
 982 * This code assumes that the idle loop never does upcalls to user mode.
 983 * If your architecture's idle loop does do upcalls to user mode (or does
 984 * anything else that results in unbalanced calls to the irq_enter() and
 985 * irq_exit() functions), RCU will give you what you deserve, good and hard.
 986 * But very infrequently and irreproducibly.
 987 *
 988 * Use things like work queues to work around this limitation.
 989 *
 990 * You have been warned.
 991 *
 992 * If you add or remove a call to rcu_irq_enter(), be sure to test with
 993 * CONFIG_RCU_EQS_DEBUG=y.
 994 */
 995void rcu_irq_enter(void)
 996{
 997        struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
 998
 999        lockdep_assert_irqs_disabled();
1000        if (rdtp->dynticks_nmi_nesting == 0)
1001                rcu_dynticks_task_exit();
1002        rcu_nmi_enter();
1003        if (rdtp->dynticks_nmi_nesting == 1)
1004                rcu_cleanup_after_idle();
1005}
1006
1007/*
1008 * Wrapper for rcu_irq_enter() where interrupts are enabled.
1009 *
1010 * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
1011 * with CONFIG_RCU_EQS_DEBUG=y.
1012 */
1013void rcu_irq_enter_irqson(void)
1014{
1015        unsigned long flags;
1016
1017        local_irq_save(flags);
1018        rcu_irq_enter();
1019        local_irq_restore(flags);
1020}
1021
1022/**
1023 * rcu_is_watching - see if RCU thinks that the current CPU is idle
1024 *
1025 * Return true if RCU is watching the running CPU, which means that this
1026 * CPU can safely enter RCU read-side critical sections.  In other words,
1027 * if the current CPU is in its idle loop and is neither in an interrupt
1028 * or NMI handler, return true.
1029 */
1030bool notrace rcu_is_watching(void)
1031{
1032        bool ret;
1033
1034        preempt_disable_notrace();
1035        ret = !rcu_dynticks_curr_cpu_in_eqs();
1036        preempt_enable_notrace();
1037        return ret;
1038}
1039EXPORT_SYMBOL_GPL(rcu_is_watching);
1040
1041/*
1042 * If a holdout task is actually running, request an urgent quiescent
1043 * state from its CPU.  This is unsynchronized, so migrations can cause
1044 * the request to go to the wrong CPU.  Which is OK, all that will happen
1045 * is that the CPU's next context switch will be a bit slower and next
1046 * time around this task will generate another request.
1047 */
1048void rcu_request_urgent_qs_task(struct task_struct *t)
1049{
1050        int cpu;
1051
1052        barrier();
1053        cpu = task_cpu(t);
1054        if (!task_curr(t))
1055                return; /* This task is not running on that CPU. */
1056        smp_store_release(per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, cpu), true);
1057}
1058
1059#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
1060
1061/*
1062 * Is the current CPU online?  Disable preemption to avoid false positives
1063 * that could otherwise happen due to the current CPU number being sampled,
1064 * this task being preempted, its old CPU being taken offline, resuming
1065 * on some other CPU, then determining that its old CPU is now offline.
1066 * It is OK to use RCU on an offline processor during initial boot, hence
1067 * the check for rcu_scheduler_fully_active.  Note also that it is OK
1068 * for a CPU coming online to use RCU for one jiffy prior to marking itself
1069 * online in the cpu_online_mask.  Similarly, it is OK for a CPU going
1070 * offline to continue to use RCU for one jiffy after marking itself
1071 * offline in the cpu_online_mask.  This leniency is necessary given the
1072 * non-atomic nature of the online and offline processing, for example,
1073 * the fact that a CPU enters the scheduler after completing the teardown
1074 * of the CPU.
1075 *
1076 * This is also why RCU internally marks CPUs online during in the
1077 * preparation phase and offline after the CPU has been taken down.
1078 *
1079 * Disable checking if in an NMI handler because we cannot safely report
1080 * errors from NMI handlers anyway.
1081 */
1082bool rcu_lockdep_current_cpu_online(void)
1083{
1084        struct rcu_data *rdp;
1085        struct rcu_node *rnp;
1086        bool ret;
1087
1088        if (in_nmi())
1089                return true;
1090        preempt_disable();
1091        rdp = this_cpu_ptr(&rcu_sched_data);
1092        rnp = rdp->mynode;
1093        ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) ||
1094              !rcu_scheduler_fully_active;
1095        preempt_enable();
1096        return ret;
1097}
1098EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
1099
1100#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
1101
1102/**
1103 * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
1104 *
1105 * If the current CPU is idle or running at a first-level (not nested)
1106 * interrupt from idle, return true.  The caller must have at least
1107 * disabled preemption.
1108 */
1109static int rcu_is_cpu_rrupt_from_idle(void)
1110{
1111        return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 0 &&
1112               __this_cpu_read(rcu_dynticks.dynticks_nmi_nesting) <= 1;
1113}
1114
1115/*
1116 * We are reporting a quiescent state on behalf of some other CPU, so
1117 * it is our responsibility to check for and handle potential overflow
1118 * of the rcu_node ->gpnum counter with respect to the rcu_data counters.
1119 * After all, the CPU might be in deep idle state, and thus executing no
1120 * code whatsoever.
1121 */
1122static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
1123{
1124        raw_lockdep_assert_held_rcu_node(rnp);
1125        if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, rnp->gpnum))
1126                WRITE_ONCE(rdp->gpwrap, true);
1127        if (ULONG_CMP_LT(rdp->rcu_iw_gpnum + ULONG_MAX / 4, rnp->gpnum))
1128                rdp->rcu_iw_gpnum = rnp->gpnum + ULONG_MAX / 4;
1129}
1130
1131/*
1132 * Snapshot the specified CPU's dynticks counter so that we can later
1133 * credit them with an implicit quiescent state.  Return 1 if this CPU
1134 * is in dynticks idle mode, which is an extended quiescent state.
1135 */
1136static int dyntick_save_progress_counter(struct rcu_data *rdp)
1137{
1138        rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
1139        if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
1140                trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
1141                rcu_gpnum_ovf(rdp->mynode, rdp);
1142                return 1;
1143        }
1144        return 0;
1145}
1146
1147/*
1148 * Handler for the irq_work request posted when a grace period has
1149 * gone on for too long, but not yet long enough for an RCU CPU
1150 * stall warning.  Set state appropriately, but just complain if
1151 * there is unexpected state on entry.
1152 */
1153static void rcu_iw_handler(struct irq_work *iwp)
1154{
1155        struct rcu_data *rdp;
1156        struct rcu_node *rnp;
1157
1158        rdp = container_of(iwp, struct rcu_data, rcu_iw);
1159        rnp = rdp->mynode;
1160        raw_spin_lock_rcu_node(rnp);
1161        if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
1162                rdp->rcu_iw_gpnum = rnp->gpnum;
1163                rdp->rcu_iw_pending = false;
1164        }
1165        raw_spin_unlock_rcu_node(rnp);
1166}
1167
1168/*
1169 * Return true if the specified CPU has passed through a quiescent
1170 * state by virtue of being in or having passed through an dynticks
1171 * idle state since the last call to dyntick_save_progress_counter()
1172 * for this same CPU, or by virtue of having been offline.
1173 */
1174static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1175{
1176        unsigned long jtsq;
1177        bool *rnhqp;
1178        bool *ruqp;
1179        struct rcu_node *rnp = rdp->mynode;
1180
1181        /*
1182         * If the CPU passed through or entered a dynticks idle phase with
1183         * no active irq/NMI handlers, then we can safely pretend that the CPU
1184         * already acknowledged the request to pass through a quiescent
1185         * state.  Either way, that CPU cannot possibly be in an RCU
1186         * read-side critical section that started before the beginning
1187         * of the current RCU grace period.
1188         */
1189        if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) {
1190                trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
1191                rdp->dynticks_fqs++;
1192                rcu_gpnum_ovf(rnp, rdp);
1193                return 1;
1194        }
1195
1196        /*
1197         * Has this CPU encountered a cond_resched() since the beginning
1198         * of the grace period?  For this to be the case, the CPU has to
1199         * have noticed the current grace period.  This might not be the
1200         * case for nohz_full CPUs looping in the kernel.
1201         */
1202        jtsq = jiffies_till_sched_qs;
1203        ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
1204        if (time_after(jiffies, rdp->rsp->gp_start + jtsq) &&
1205            READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) &&
1206            READ_ONCE(rdp->gpnum) == rnp->gpnum && !rdp->gpwrap) {
1207                trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("rqc"));
1208                rcu_gpnum_ovf(rnp, rdp);
1209                return 1;
1210        } else if (time_after(jiffies, rdp->rsp->gp_start + jtsq)) {
1211                /* Load rcu_qs_ctr before store to rcu_urgent_qs. */
1212                smp_store_release(ruqp, true);
1213        }
1214
1215        /* Check for the CPU being offline. */
1216        if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp))) {
1217                trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
1218                rdp->offline_fqs++;
1219                rcu_gpnum_ovf(rnp, rdp);
1220                return 1;
1221        }
1222
1223        /*
1224         * A CPU running for an extended time within the kernel can
1225         * delay RCU grace periods.  When the CPU is in NO_HZ_FULL mode,
1226         * even context-switching back and forth between a pair of
1227         * in-kernel CPU-bound tasks cannot advance grace periods.
1228         * So if the grace period is old enough, make the CPU pay attention.
1229         * Note that the unsynchronized assignments to the per-CPU
1230         * rcu_need_heavy_qs variable are safe.  Yes, setting of
1231         * bits can be lost, but they will be set again on the next
1232         * force-quiescent-state pass.  So lost bit sets do not result
1233         * in incorrect behavior, merely in a grace period lasting
1234         * a few jiffies longer than it might otherwise.  Because
1235         * there are at most four threads involved, and because the
1236         * updates are only once every few jiffies, the probability of
1237         * lossage (and thus of slight grace-period extension) is
1238         * quite low.
1239         */
1240        rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu);
1241        if (!READ_ONCE(*rnhqp) &&
1242            (time_after(jiffies, rdp->rsp->gp_start + jtsq) ||
1243             time_after(jiffies, rdp->rsp->jiffies_resched))) {
1244                WRITE_ONCE(*rnhqp, true);
1245                /* Store rcu_need_heavy_qs before rcu_urgent_qs. */
1246                smp_store_release(ruqp, true);
1247                rdp->rsp->jiffies_resched += jtsq; /* Re-enable beating. */
1248        }
1249
1250        /*
1251         * If more than halfway to RCU CPU stall-warning time, do a
1252         * resched_cpu() to try to loosen things up a bit.  Also check to
1253         * see if the CPU is getting hammered with interrupts, but only
1254         * once per grace period, just to keep the IPIs down to a dull roar.
1255         */
1256        if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2) {
1257                resched_cpu(rdp->cpu);
1258                if (IS_ENABLED(CONFIG_IRQ_WORK) &&
1259                    !rdp->rcu_iw_pending && rdp->rcu_iw_gpnum != rnp->gpnum &&
1260                    (rnp->ffmask & rdp->grpmask)) {
1261                        init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
1262                        rdp->rcu_iw_pending = true;
1263                        rdp->rcu_iw_gpnum = rnp->gpnum;
1264                        irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
1265                }
1266        }
1267
1268        return 0;
1269}
1270
1271static void record_gp_stall_check_time(struct rcu_state *rsp)
1272{
1273        unsigned long j = jiffies;
1274        unsigned long j1;
1275
1276        rsp->gp_start = j;
1277        smp_wmb(); /* Record start time before stall time. */
1278        j1 = rcu_jiffies_till_stall_check();
1279        WRITE_ONCE(rsp->jiffies_stall, j + j1);
1280        rsp->jiffies_resched = j + j1 / 2;
1281        rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
1282}
1283
1284/*
1285 * Convert a ->gp_state value to a character string.
1286 */
1287static const char *gp_state_getname(short gs)
1288{
1289        if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
1290                return "???";
1291        return gp_state_names[gs];
1292}
1293
1294/*
1295 * Complain about starvation of grace-period kthread.
1296 */
1297static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
1298{
1299        unsigned long gpa;
1300        unsigned long j;
1301
1302        j = jiffies;
1303        gpa = READ_ONCE(rsp->gp_activity);
1304        if (j - gpa > 2 * HZ) {
1305                pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
1306                       rsp->name, j - gpa,
1307                       rsp->gpnum, rsp->completed,
1308                       rsp->gp_flags,
1309                       gp_state_getname(rsp->gp_state), rsp->gp_state,
1310                       rsp->gp_kthread ? rsp->gp_kthread->state : ~0,
1311                       rsp->gp_kthread ? task_cpu(rsp->gp_kthread) : -1);
1312                if (rsp->gp_kthread) {
1313                        pr_err("RCU grace-period kthread stack dump:\n");
1314                        sched_show_task(rsp->gp_kthread);
1315                        wake_up_process(rsp->gp_kthread);
1316                }
1317        }
1318}
1319
1320/*
1321 * Dump stacks of all tasks running on stalled CPUs.  First try using
1322 * NMIs, but fall back to manual remote stack tracing on architectures
1323 * that don't support NMI-based stack dumps.  The NMI-triggered stack
1324 * traces are more accurate because they are printed by the target CPU.
1325 */
1326static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
1327{
1328        int cpu;
1329        unsigned long flags;
1330        struct rcu_node *rnp;
1331
1332        rcu_for_each_leaf_node(rsp, rnp) {
1333                raw_spin_lock_irqsave_rcu_node(rnp, flags);
1334                for_each_leaf_node_possible_cpu(rnp, cpu)
1335                        if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
1336                                if (!trigger_single_cpu_backtrace(cpu))
1337                                        dump_cpu_task(cpu);
1338                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1339        }
1340}
1341
1342/*
1343 * If too much time has passed in the current grace period, and if
1344 * so configured, go kick the relevant kthreads.
1345 */
1346static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
1347{
1348        unsigned long j;
1349
1350        if (!rcu_kick_kthreads)
1351                return;
1352        j = READ_ONCE(rsp->jiffies_kick_kthreads);
1353        if (time_after(jiffies, j) && rsp->gp_kthread &&
1354            (rcu_gp_in_progress(rsp) || READ_ONCE(rsp->gp_flags))) {
1355                WARN_ONCE(1, "Kicking %s grace-period kthread\n", rsp->name);
1356                rcu_ftrace_dump(DUMP_ALL);
1357                wake_up_process(rsp->gp_kthread);
1358                WRITE_ONCE(rsp->jiffies_kick_kthreads, j + HZ);
1359        }
1360}
1361
1362static inline void panic_on_rcu_stall(void)
1363{
1364        if (sysctl_panic_on_rcu_stall)
1365                panic("RCU Stall\n");
1366}
1367
1368static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
1369{
1370        int cpu;
1371        long delta;
1372        unsigned long flags;
1373        unsigned long gpa;
1374        unsigned long j;
1375        int ndetected = 0;
1376        struct rcu_node *rnp = rcu_get_root(rsp);
1377        long totqlen = 0;
1378
1379        /* Kick and suppress, if so configured. */
1380        rcu_stall_kick_kthreads(rsp);
1381        if (rcu_cpu_stall_suppress)
1382                return;
1383
1384        /* Only let one CPU complain about others per time interval. */
1385
1386        raw_spin_lock_irqsave_rcu_node(rnp, flags);
1387        delta = jiffies - READ_ONCE(rsp->jiffies_stall);
1388        if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
1389                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1390                return;
1391        }
1392        WRITE_ONCE(rsp->jiffies_stall,
1393                   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1394        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1395
1396        /*
1397         * OK, time to rat on our buddy...
1398         * See Documentation/RCU/stallwarn.txt for info on how to debug
1399         * RCU CPU stall warnings.
1400         */
1401        pr_err("INFO: %s detected stalls on CPUs/tasks:",
1402               rsp->name);
1403        print_cpu_stall_info_begin();
1404        rcu_for_each_leaf_node(rsp, rnp) {
1405                raw_spin_lock_irqsave_rcu_node(rnp, flags);
1406                ndetected += rcu_print_task_stall(rnp);
1407                if (rnp->qsmask != 0) {
1408                        for_each_leaf_node_possible_cpu(rnp, cpu)
1409                                if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
1410                                        print_cpu_stall_info(rsp, cpu);
1411                                        ndetected++;
1412                                }
1413                }
1414                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1415        }
1416
1417        print_cpu_stall_info_end();
1418        for_each_possible_cpu(cpu)
1419                totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
1420                                                            cpu)->cblist);
1421        pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
1422               smp_processor_id(), (long)(jiffies - rsp->gp_start),
1423               (long)rsp->gpnum, (long)rsp->completed, totqlen);
1424        if (ndetected) {
1425                rcu_dump_cpu_stacks(rsp);
1426
1427                /* Complain about tasks blocking the grace period. */
1428                rcu_print_detail_task_stall(rsp);
1429        } else {
1430                if (READ_ONCE(rsp->gpnum) != gpnum ||
1431                    READ_ONCE(rsp->completed) == gpnum) {
1432                        pr_err("INFO: Stall ended before state dump start\n");
1433                } else {
1434                        j = jiffies;
1435                        gpa = READ_ONCE(rsp->gp_activity);
1436                        pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
1437                               rsp->name, j - gpa, j, gpa,
1438                               jiffies_till_next_fqs,
1439                               rcu_get_root(rsp)->qsmask);
1440                        /* In this case, the current CPU might be at fault. */
1441                        sched_show_task(current);
1442                }
1443        }
1444
1445        rcu_check_gp_kthread_starvation(rsp);
1446
1447        panic_on_rcu_stall();
1448
1449        force_quiescent_state(rsp);  /* Kick them all. */
1450}
1451
1452static void print_cpu_stall(struct rcu_state *rsp)
1453{
1454        int cpu;
1455        unsigned long flags;
1456        struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1457        struct rcu_node *rnp = rcu_get_root(rsp);
1458        long totqlen = 0;
1459
1460        /* Kick and suppress, if so configured. */
1461        rcu_stall_kick_kthreads(rsp);
1462        if (rcu_cpu_stall_suppress)
1463                return;
1464
1465        /*
1466         * OK, time to rat on ourselves...
1467         * See Documentation/RCU/stallwarn.txt for info on how to debug
1468         * RCU CPU stall warnings.
1469         */
1470        pr_err("INFO: %s self-detected stall on CPU", rsp->name);
1471        print_cpu_stall_info_begin();
1472        raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
1473        print_cpu_stall_info(rsp, smp_processor_id());
1474        raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
1475        print_cpu_stall_info_end();
1476        for_each_possible_cpu(cpu)
1477                totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
1478                                                            cpu)->cblist);
1479        pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
1480                jiffies - rsp->gp_start,
1481                (long)rsp->gpnum, (long)rsp->completed, totqlen);
1482
1483        rcu_check_gp_kthread_starvation(rsp);
1484
1485        rcu_dump_cpu_stacks(rsp);
1486
1487        raw_spin_lock_irqsave_rcu_node(rnp, flags);
1488        if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
1489                WRITE_ONCE(rsp->jiffies_stall,
1490                           jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
1491        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1492
1493        panic_on_rcu_stall();
1494
1495        /*
1496         * Attempt to revive the RCU machinery by forcing a context switch.
1497         *
1498         * A context switch would normally allow the RCU state machine to make
1499         * progress and it could be we're stuck in kernel space without context
1500         * switches for an entirely unreasonable amount of time.
1501         */
1502        resched_cpu(smp_processor_id());
1503}
1504
1505static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
1506{
1507        unsigned long completed;
1508        unsigned long gpnum;
1509        unsigned long gps;
1510        unsigned long j;
1511        unsigned long js;
1512        struct rcu_node *rnp;
1513
1514        if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
1515            !rcu_gp_in_progress(rsp))
1516                return;
1517        rcu_stall_kick_kthreads(rsp);
1518        j = jiffies;
1519
1520        /*
1521         * Lots of memory barriers to reject false positives.
1522         *
1523         * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
1524         * then rsp->gp_start, and finally rsp->completed.  These values
1525         * are updated in the opposite order with memory barriers (or
1526         * equivalent) during grace-period initialization and cleanup.
1527         * Now, a false positive can occur if we get an new value of
1528         * rsp->gp_start and a old value of rsp->jiffies_stall.  But given
1529         * the memory barriers, the only way that this can happen is if one
1530         * grace period ends and another starts between these two fetches.
1531         * Detect this by comparing rsp->completed with the previous fetch
1532         * from rsp->gpnum.
1533         *
1534         * Given this check, comparisons of jiffies, rsp->jiffies_stall,
1535         * and rsp->gp_start suffice to forestall false positives.
1536         */
1537        gpnum = READ_ONCE(rsp->gpnum);
1538        smp_rmb(); /* Pick up ->gpnum first... */
1539        js = READ_ONCE(rsp->jiffies_stall);
1540        smp_rmb(); /* ...then ->jiffies_stall before the rest... */
1541        gps = READ_ONCE(rsp->gp_start);
1542        smp_rmb(); /* ...and finally ->gp_start before ->completed. */
1543        completed = READ_ONCE(rsp->completed);
1544        if (ULONG_CMP_GE(completed, gpnum) ||
1545            ULONG_CMP_LT(j, js) ||
1546            ULONG_CMP_GE(gps, js))
1547                return; /* No stall or GP completed since entering function. */
1548        rnp = rdp->mynode;
1549        if (rcu_gp_in_progress(rsp) &&
1550            (READ_ONCE(rnp->qsmask) & rdp->grpmask)) {
1551
1552                /* We haven't checked in, so go dump stack. */
1553                print_cpu_stall(rsp);
1554
1555        } else if (rcu_gp_in_progress(rsp) &&
1556                   ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
1557
1558                /* They had a few time units to dump stack, so complain. */
1559                print_other_cpu_stall(rsp, gpnum);
1560        }
1561}
1562
1563/**
1564 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
1565 *
1566 * Set the stall-warning timeout way off into the future, thus preventing
1567 * any RCU CPU stall-warning messages from appearing in the current set of
1568 * RCU grace periods.
1569 *
1570 * The caller must disable hard irqs.
1571 */
1572void rcu_cpu_stall_reset(void)
1573{
1574        struct rcu_state *rsp;
1575
1576        for_each_rcu_flavor(rsp)
1577                WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2);
1578}
1579
1580/*
1581 * Determine the value that ->completed will have at the end of the
1582 * next subsequent grace period.  This is used to tag callbacks so that
1583 * a CPU can invoke callbacks in a timely fashion even if that CPU has
1584 * been dyntick-idle for an extended period with callbacks under the
1585 * influence of RCU_FAST_NO_HZ.
1586 *
1587 * The caller must hold rnp->lock with interrupts disabled.
1588 */
1589static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
1590                                       struct rcu_node *rnp)
1591{
1592        raw_lockdep_assert_held_rcu_node(rnp);
1593
1594        /*
1595         * If RCU is idle, we just wait for the next grace period.
1596         * But we can only be sure that RCU is idle if we are looking
1597         * at the root rcu_node structure -- otherwise, a new grace
1598         * period might have started, but just not yet gotten around
1599         * to initializing the current non-root rcu_node structure.
1600         */
1601        if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
1602                return rnp->completed + 1;
1603
1604        /*
1605         * If the current rcu_node structure believes that RCU is
1606         * idle, and if the rcu_state structure does not yet reflect
1607         * the start of a new grace period, then the next grace period
1608         * will suffice.  The memory barrier is needed to accurately
1609         * sample the rsp->gpnum, and pairs with the second lock
1610         * acquisition in rcu_gp_init(), which is augmented with
1611         * smp_mb__after_unlock_lock() for this purpose.
1612         */
1613        if (rnp->gpnum == rnp->completed) {
1614                smp_mb(); /* See above block comment. */
1615                if (READ_ONCE(rsp->gpnum) == rnp->completed)
1616                        return rnp->completed + 1;
1617        }
1618
1619        /*
1620         * Otherwise, wait for a possible partial grace period and
1621         * then the subsequent full grace period.
1622         */
1623        return rnp->completed + 2;
1624}
1625
1626/* Trace-event wrapper function for trace_rcu_future_grace_period.  */
1627static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1628                              unsigned long c, const char *s)
1629{
1630        trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
1631                                      rnp->completed, c, rnp->level,
1632                                      rnp->grplo, rnp->grphi, s);
1633}
1634
1635/*
1636 * Start the specified grace period, as needed to handle newly arrived
1637 * callbacks.  The required future grace periods are recorded in each
1638 * rcu_node structure's ->need_future_gp[] field.  Returns true if there
1639 * is reason to awaken the grace-period kthread.
1640 *
1641 * The caller must hold the specified rcu_node structure's ->lock, which
1642 * is why the caller is responsible for waking the grace-period kthread.
1643 */
1644static bool rcu_start_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1645                              unsigned long c)
1646{
1647        bool ret = false;
1648        struct rcu_state *rsp = rdp->rsp;
1649        struct rcu_node *rnp_root;
1650
1651        /*
1652         * Use funnel locking to either acquire the root rcu_node
1653         * structure's lock or bail out if the need for this grace period
1654         * has already been recorded -- or has already started.  If there
1655         * is already a grace period in progress in a non-leaf node, no
1656         * recording is needed because the end of the grace period will
1657         * scan the leaf rcu_node structures.  Note that rnp->lock must
1658         * not be released.
1659         */
1660        raw_lockdep_assert_held_rcu_node(rnp);
1661        trace_rcu_this_gp(rnp, rdp, c, TPS("Startleaf"));
1662        for (rnp_root = rnp; 1; rnp_root = rnp_root->parent) {
1663                if (rnp_root != rnp)
1664                        raw_spin_lock_rcu_node(rnp_root);
1665                WARN_ON_ONCE(ULONG_CMP_LT(rnp_root->gpnum +
1666                                          need_future_gp_mask(), c));
1667                if (need_future_gp_element(rnp_root, c) ||
1668                    ULONG_CMP_GE(rnp_root->gpnum, c) ||
1669                    (rnp != rnp_root &&
1670                     rnp_root->gpnum != rnp_root->completed)) {
1671                        trace_rcu_this_gp(rnp_root, rdp, c, TPS("Prestarted"));
1672                        goto unlock_out;
1673                }
1674                need_future_gp_element(rnp_root, c) = true;
1675                if (rnp_root != rnp && rnp_root->parent != NULL)
1676                        raw_spin_unlock_rcu_node(rnp_root);
1677                if (!rnp_root->parent)
1678                        break;  /* At root, and perhaps also leaf. */
1679        }
1680
1681        /* If GP already in progress, just leave, otherwise start one. */
1682        if (rnp_root->gpnum != rnp_root->completed) {
1683                trace_rcu_this_gp(rnp_root, rdp, c, TPS("Startedleafroot"));
1684                goto unlock_out;
1685        }
1686        trace_rcu_this_gp(rnp_root, rdp, c, TPS("Startedroot"));
1687        WRITE_ONCE(rsp->gp_flags, rsp->gp_flags | RCU_GP_FLAG_INIT);
1688        if (!rsp->gp_kthread) {
1689                trace_rcu_this_gp(rnp_root, rdp, c, TPS("NoGPkthread"));
1690                goto unlock_out;
1691        }
1692        trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum), TPS("newreq"));
1693        ret = true;  /* Caller must wake GP kthread. */
1694unlock_out:
1695        if (rnp != rnp_root)
1696                raw_spin_unlock_rcu_node(rnp_root);
1697        return ret;
1698}
1699
1700/*
1701 * Clean up any old requests for the just-ended grace period.  Also return
1702 * whether any additional grace periods have been requested.
1703 */
1704static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
1705{
1706        unsigned long c = rnp->completed;
1707        bool needmore;
1708        struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
1709
1710        need_future_gp_element(rnp, c) = false;
1711        needmore = need_any_future_gp(rnp);
1712        trace_rcu_this_gp(rnp, rdp, c,
1713                          needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1714        return needmore;
1715}
1716
1717/*
1718 * Awaken the grace-period kthread for the specified flavor of RCU.
1719 * Don't do a self-awaken, and don't bother awakening when there is
1720 * nothing for the grace-period kthread to do (as in several CPUs
1721 * raced to awaken, and we lost), and finally don't try to awaken
1722 * a kthread that has not yet been created.
1723 */
1724static void rcu_gp_kthread_wake(struct rcu_state *rsp)
1725{
1726        if (current == rsp->gp_kthread ||
1727            !READ_ONCE(rsp->gp_flags) ||
1728            !rsp->gp_kthread)
1729                return;
1730        swake_up(&rsp->gp_wq);
1731}
1732
1733/*
1734 * If there is room, assign a ->completed number to any callbacks on
1735 * this CPU that have not already been assigned.  Also accelerate any
1736 * callbacks that were previously assigned a ->completed number that has
1737 * since proven to be too conservative, which can happen if callbacks get
1738 * assigned a ->completed number while RCU is idle, but with reference to
1739 * a non-root rcu_node structure.  This function is idempotent, so it does
1740 * not hurt to call it repeatedly.  Returns an flag saying that we should
1741 * awaken the RCU grace-period kthread.
1742 *
1743 * The caller must hold rnp->lock with interrupts disabled.
1744 */
1745static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1746                               struct rcu_data *rdp)
1747{
1748        unsigned long c;
1749        bool ret = false;
1750
1751        raw_lockdep_assert_held_rcu_node(rnp);
1752
1753        /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1754        if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1755                return false;
1756
1757        /*
1758         * Callbacks are often registered with incomplete grace-period
1759         * information.  Something about the fact that getting exact
1760         * information requires acquiring a global lock...  RCU therefore
1761         * makes a conservative estimate of the grace period number at which
1762         * a given callback will become ready to invoke.        The following
1763         * code checks this estimate and improves it when possible, thus
1764         * accelerating callback invocation to an earlier grace-period
1765         * number.
1766         */
1767        c = rcu_cbs_completed(rsp, rnp);
1768        if (rcu_segcblist_accelerate(&rdp->cblist, c))
1769                ret = rcu_start_this_gp(rnp, rdp, c);
1770
1771        /* Trace depending on how much we were able to accelerate. */
1772        if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1773                trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
1774        else
1775                trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
1776        return ret;
1777}
1778
1779/*
1780 * Move any callbacks whose grace period has completed to the
1781 * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1782 * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL
1783 * sublist.  This function is idempotent, so it does not hurt to
1784 * invoke it repeatedly.  As long as it is not invoked -too- often...
1785 * Returns true if the RCU grace-period kthread needs to be awakened.
1786 *
1787 * The caller must hold rnp->lock with interrupts disabled.
1788 */
1789static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
1790                            struct rcu_data *rdp)
1791{
1792        raw_lockdep_assert_held_rcu_node(rnp);
1793
1794        /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1795        if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1796                return false;
1797
1798        /*
1799         * Find all callbacks whose ->completed numbers indicate that they
1800         * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1801         */
1802        rcu_segcblist_advance(&rdp->cblist, rnp->completed);
1803
1804        /* Classify any remaining callbacks. */
1805        return rcu_accelerate_cbs(rsp, rnp, rdp);
1806}
1807
1808/*
1809 * Update CPU-local rcu_data state to record the beginnings and ends of
1810 * grace periods.  The caller must hold the ->lock of the leaf rcu_node
1811 * structure corresponding to the current CPU, and must have irqs disabled.
1812 * Returns true if the grace-period kthread needs to be awakened.
1813 */
1814static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
1815                              struct rcu_data *rdp)
1816{
1817        bool ret;
1818        bool need_gp;
1819
1820        raw_lockdep_assert_held_rcu_node(rnp);
1821
1822        /* Handle the ends of any preceding grace periods first. */
1823        if (rdp->completed == rnp->completed &&
1824            !unlikely(READ_ONCE(rdp->gpwrap))) {
1825
1826                /* No grace period end, so just accelerate recent callbacks. */
1827                ret = rcu_accelerate_cbs(rsp, rnp, rdp);
1828
1829        } else {
1830
1831                /* Advance callbacks. */
1832                ret = rcu_advance_cbs(rsp, rnp, rdp);
1833
1834                /* Remember that we saw this grace-period completion. */
1835                rdp->completed = rnp->completed;
1836                trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
1837        }
1838
1839        if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) {
1840                /*
1841                 * If the current grace period is waiting for this CPU,
1842                 * set up to detect a quiescent state, otherwise don't
1843                 * go looking for one.
1844                 */
1845                rdp->gpnum = rnp->gpnum;
1846                trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
1847                need_gp = !!(rnp->qsmask & rdp->grpmask);
1848                rdp->cpu_no_qs.b.norm = need_gp;
1849                rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
1850                rdp->core_needs_qs = need_gp;
1851                zero_cpu_stall_ticks(rdp);
1852                WRITE_ONCE(rdp->gpwrap, false);
1853                rcu_gpnum_ovf(rnp, rdp);
1854        }
1855        return ret;
1856}
1857
1858static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
1859{
1860        unsigned long flags;
1861        bool needwake;
1862        struct rcu_node *rnp;
1863
1864        local_irq_save(flags);
1865        rnp = rdp->mynode;
1866        if ((rdp->gpnum == READ_ONCE(rnp->gpnum) &&
1867             rdp->completed == READ_ONCE(rnp->completed) &&
1868             !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1869            !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1870                local_irq_restore(flags);
1871                return;
1872        }
1873        needwake = __note_gp_changes(rsp, rnp, rdp);
1874        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1875        if (needwake)
1876                rcu_gp_kthread_wake(rsp);
1877}
1878
1879static void rcu_gp_slow(struct rcu_state *rsp, int delay)
1880{
1881        if (delay > 0 &&
1882            !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1883                schedule_timeout_uninterruptible(delay);
1884}
1885
1886/*
1887 * Initialize a new grace period.  Return false if no grace period required.
1888 */
1889static bool rcu_gp_init(struct rcu_state *rsp)
1890{
1891        unsigned long oldmask;
1892        struct rcu_data *rdp;
1893        struct rcu_node *rnp = rcu_get_root(rsp);
1894
1895        WRITE_ONCE(rsp->gp_activity, jiffies);
1896        raw_spin_lock_irq_rcu_node(rnp);
1897        if (!READ_ONCE(rsp->gp_flags)) {
1898                /* Spurious wakeup, tell caller to go back to sleep.  */
1899                raw_spin_unlock_irq_rcu_node(rnp);
1900                return false;
1901        }
1902        WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */
1903
1904        if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
1905                /*
1906                 * Grace period already in progress, don't start another.
1907                 * Not supposed to be able to happen.
1908                 */
1909                raw_spin_unlock_irq_rcu_node(rnp);
1910                return false;
1911        }
1912
1913        /* Advance to a new grace period and initialize state. */
1914        record_gp_stall_check_time(rsp);
1915        /* Record GP times before starting GP, hence smp_store_release(). */
1916        smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
1917        trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
1918        raw_spin_unlock_irq_rcu_node(rnp);
1919
1920        /*
1921         * Apply per-leaf buffered online and offline operations to the
1922         * rcu_node tree.  Note that this new grace period need not wait
1923         * for subsequent online CPUs, and that quiescent-state forcing
1924         * will handle subsequent offline CPUs.
1925         */
1926        rcu_for_each_leaf_node(rsp, rnp) {
1927                rcu_gp_slow(rsp, gp_preinit_delay);
1928                raw_spin_lock_irq_rcu_node(rnp);
1929                if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1930                    !rnp->wait_blkd_tasks) {
1931                        /* Nothing to do on this leaf rcu_node structure. */
1932                        raw_spin_unlock_irq_rcu_node(rnp);
1933                        continue;
1934                }
1935
1936                /* Record old state, apply changes to ->qsmaskinit field. */
1937                oldmask = rnp->qsmaskinit;
1938                rnp->qsmaskinit = rnp->qsmaskinitnext;
1939
1940                /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1941                if (!oldmask != !rnp->qsmaskinit) {
1942                        if (!oldmask) /* First online CPU for this rcu_node. */
1943                                rcu_init_new_rnp(rnp);
1944                        else if (rcu_preempt_has_tasks(rnp)) /* blocked tasks */
1945                                rnp->wait_blkd_tasks = true;
1946                        else /* Last offline CPU and can propagate. */
1947                                rcu_cleanup_dead_rnp(rnp);
1948                }
1949
1950                /*
1951                 * If all waited-on tasks from prior grace period are
1952                 * done, and if all this rcu_node structure's CPUs are
1953                 * still offline, propagate up the rcu_node tree and
1954                 * clear ->wait_blkd_tasks.  Otherwise, if one of this
1955                 * rcu_node structure's CPUs has since come back online,
1956                 * simply clear ->wait_blkd_tasks (but rcu_cleanup_dead_rnp()
1957                 * checks for this, so just call it unconditionally).
1958                 */
1959                if (rnp->wait_blkd_tasks &&
1960                    (!rcu_preempt_has_tasks(rnp) ||
1961                     rnp->qsmaskinit)) {
1962                        rnp->wait_blkd_tasks = false;
1963                        rcu_cleanup_dead_rnp(rnp);
1964                }
1965
1966                raw_spin_unlock_irq_rcu_node(rnp);
1967        }
1968
1969        /*
1970         * Set the quiescent-state-needed bits in all the rcu_node
1971         * structures for all currently online CPUs in breadth-first order,
1972         * starting from the root rcu_node structure, relying on the layout
1973         * of the tree within the rsp->node[] array.  Note that other CPUs
1974         * will access only the leaves of the hierarchy, thus seeing that no
1975         * grace period is in progress, at least until the corresponding
1976         * leaf node has been initialized.
1977         *
1978         * The grace period cannot complete until the initialization
1979         * process finishes, because this kthread handles both.
1980         */
1981        rcu_for_each_node_breadth_first(rsp, rnp) {
1982                rcu_gp_slow(rsp, gp_init_delay);
1983                raw_spin_lock_irq_rcu_node(rnp);
1984                rdp = this_cpu_ptr(rsp->rda);
1985                rcu_preempt_check_blocked_tasks(rnp);
1986                rnp->qsmask = rnp->qsmaskinit;
1987                WRITE_ONCE(rnp->gpnum, rsp->gpnum);
1988                if (WARN_ON_ONCE(rnp->completed != rsp->completed))
1989                        WRITE_ONCE(rnp->completed, rsp->completed);
1990                if (rnp == rdp->mynode)
1991                        (void)__note_gp_changes(rsp, rnp, rdp);
1992                rcu_preempt_boost_start_gp(rnp);
1993                trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
1994                                            rnp->level, rnp->grplo,
1995                                            rnp->grphi, rnp->qsmask);
1996                raw_spin_unlock_irq_rcu_node(rnp);
1997                cond_resched_tasks_rcu_qs();
1998                WRITE_ONCE(rsp->gp_activity, jiffies);
1999        }
2000
2001        return true;
2002}
2003
2004/*
2005 * Helper function for swait_event_idle() wakeup at force-quiescent-state
2006 * time.
2007 */
2008static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
2009{
2010        struct rcu_node *rnp = rcu_get_root(rsp);
2011
2012        /* Someone like call_rcu() requested a force-quiescent-state scan. */
2013        *gfp = READ_ONCE(rsp->gp_flags);
2014        if (*gfp & RCU_GP_FLAG_FQS)
2015                return true;
2016
2017        /* The current grace period has completed. */
2018        if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
2019                return true;
2020
2021        return false;
2022}
2023
2024/*
2025 * Do one round of quiescent-state forcing.
2026 */
2027static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
2028{
2029        struct rcu_node *rnp = rcu_get_root(rsp);
2030
2031        WRITE_ONCE(rsp->gp_activity, jiffies);
2032        rsp->n_force_qs++;
2033        if (first_time) {
2034                /* Collect dyntick-idle snapshots. */
2035                force_qs_rnp(rsp, dyntick_save_progress_counter);
2036        } else {
2037                /* Handle dyntick-idle and offline CPUs. */
2038                force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
2039        }
2040        /* Clear flag to prevent immediate re-entry. */
2041        if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2042                raw_spin_lock_irq_rcu_node(rnp);
2043                WRITE_ONCE(rsp->gp_flags,
2044                           READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
2045                raw_spin_unlock_irq_rcu_node(rnp);
2046        }
2047}
2048
2049/*
2050 * Clean up after the old grace period.
2051 */
2052static void rcu_gp_cleanup(struct rcu_state *rsp)
2053{
2054        unsigned long gp_duration;
2055        bool needgp = false;
2056        struct rcu_data *rdp;
2057        struct rcu_node *rnp = rcu_get_root(rsp);
2058        struct swait_queue_head *sq;
2059
2060        WRITE_ONCE(rsp->gp_activity, jiffies);
2061        raw_spin_lock_irq_rcu_node(rnp);
2062        gp_duration = jiffies - rsp->gp_start;
2063        if (gp_duration > rsp->gp_max)
2064                rsp->gp_max = gp_duration;
2065
2066        /*
2067         * We know the grace period is complete, but to everyone else
2068         * it appears to still be ongoing.  But it is also the case
2069         * that to everyone else it looks like there is nothing that
2070         * they can do to advance the grace period.  It is therefore
2071         * safe for us to drop the lock in order to mark the grace
2072         * period as completed in all of the rcu_node structures.
2073         */
2074        raw_spin_unlock_irq_rcu_node(rnp);
2075
2076        /*
2077         * Propagate new ->completed value to rcu_node structures so
2078         * that other CPUs don't have to wait until the start of the next
2079         * grace period to process their callbacks.  This also avoids
2080         * some nasty RCU grace-period initialization races by forcing
2081         * the end of the current grace period to be completely recorded in
2082         * all of the rcu_node structures before the beginning of the next
2083         * grace period is recorded in any of the rcu_node structures.
2084         */
2085        rcu_for_each_node_breadth_first(rsp, rnp) {
2086                raw_spin_lock_irq_rcu_node(rnp);
2087                WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
2088                WARN_ON_ONCE(rnp->qsmask);
2089                WRITE_ONCE(rnp->completed, rsp->gpnum);
2090                rdp = this_cpu_ptr(rsp->rda);
2091                if (rnp == rdp->mynode)
2092                        needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
2093                /* smp_mb() provided by prior unlock-lock pair. */
2094                needgp = rcu_future_gp_cleanup(rsp, rnp) || needgp;
2095                sq = rcu_nocb_gp_get(rnp);
2096                raw_spin_unlock_irq_rcu_node(rnp);
2097                rcu_nocb_gp_cleanup(sq);
2098                cond_resched_tasks_rcu_qs();
2099                WRITE_ONCE(rsp->gp_activity, jiffies);
2100                rcu_gp_slow(rsp, gp_cleanup_delay);
2101        }
2102        rnp = rcu_get_root(rsp);
2103        raw_spin_lock_irq_rcu_node(rnp); /* Order GP before ->completed update. */
2104
2105        /* Declare grace period done. */
2106        WRITE_ONCE(rsp->completed, rsp->gpnum);
2107        trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
2108        rsp->gp_state = RCU_GP_IDLE;
2109        /* Check for GP requests since above loop. */
2110        rdp = this_cpu_ptr(rsp->rda);
2111        if (need_any_future_gp(rnp)) {
2112                trace_rcu_this_gp(rnp, rdp, rsp->completed - 1,
2113                                  TPS("CleanupMore"));
2114                needgp = true;
2115        }
2116        /* Advance CBs to reduce false positives below. */
2117        if (!rcu_accelerate_cbs(rsp, rnp, rdp) && needgp) {
2118                WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
2119                trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum),
2120                                       TPS("newreq"));
2121        }
2122        WRITE_ONCE(rsp->gp_flags, rsp->gp_flags & RCU_GP_FLAG_INIT);
2123        raw_spin_unlock_irq_rcu_node(rnp);
2124}
2125
2126/*
2127 * Body of kthread that handles grace periods.
2128 */
2129static int __noreturn rcu_gp_kthread(void *arg)
2130{
2131        bool first_gp_fqs;
2132        int gf;
2133        unsigned long j;
2134        int ret;
2135        struct rcu_state *rsp = arg;
2136        struct rcu_node *rnp = rcu_get_root(rsp);
2137
2138        rcu_bind_gp_kthread();
2139        for (;;) {
2140
2141                /* Handle grace-period start. */
2142                for (;;) {
2143                        trace_rcu_grace_period(rsp->name,
2144                                               READ_ONCE(rsp->gpnum),
2145                                               TPS("reqwait"));
2146                        rsp->gp_state = RCU_GP_WAIT_GPS;
2147                        swait_event_idle(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
2148                                                     RCU_GP_FLAG_INIT);
2149                        rsp->gp_state = RCU_GP_DONE_GPS;
2150                        /* Locking provides needed memory barrier. */
2151                        if (rcu_gp_init(rsp))
2152                                break;
2153                        cond_resched_tasks_rcu_qs();
2154                        WRITE_ONCE(rsp->gp_activity, jiffies);
2155                        WARN_ON(signal_pending(current));
2156                        trace_rcu_grace_period(rsp->name,
2157                                               READ_ONCE(rsp->gpnum),
2158                                               TPS("reqwaitsig"));
2159                }
2160
2161                /* Handle quiescent-state forcing. */
2162                first_gp_fqs = true;
2163                j = jiffies_till_first_fqs;
2164                if (j > HZ) {
2165                        j = HZ;
2166                        jiffies_till_first_fqs = HZ;
2167                }
2168                ret = 0;
2169                for (;;) {
2170                        if (!ret) {
2171                                rsp->jiffies_force_qs = jiffies + j;
2172                                WRITE_ONCE(rsp->jiffies_kick_kthreads,
2173                                           jiffies + 3 * j);
2174                        }
2175                        trace_rcu_grace_period(rsp->name,
2176                                               READ_ONCE(rsp->gpnum),
2177                                               TPS("fqswait"));
2178                        rsp->gp_state = RCU_GP_WAIT_FQS;
2179                        ret = swait_event_idle_timeout(rsp->gp_wq,
2180                                        rcu_gp_fqs_check_wake(rsp, &gf), j);
2181                        rsp->gp_state = RCU_GP_DOING_FQS;
2182                        /* Locking provides needed memory barriers. */
2183                        /* If grace period done, leave loop. */
2184                        if (!READ_ONCE(rnp->qsmask) &&
2185                            !rcu_preempt_blocked_readers_cgp(rnp))
2186                                break;
2187                        /* If time for quiescent-state forcing, do it. */
2188                        if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
2189                            (gf & RCU_GP_FLAG_FQS)) {
2190                                trace_rcu_grace_period(rsp->name,
2191                                                       READ_ONCE(rsp->gpnum),
2192                                                       TPS("fqsstart"));
2193                                rcu_gp_fqs(rsp, first_gp_fqs);
2194                                first_gp_fqs = false;
2195                                trace_rcu_grace_period(rsp->name,
2196                                                       READ_ONCE(rsp->gpnum),
2197                                                       TPS("fqsend"));
2198                                cond_resched_tasks_rcu_qs();
2199                                WRITE_ONCE(rsp->gp_activity, jiffies);
2200                                ret = 0; /* Force full wait till next FQS. */
2201                                j = jiffies_till_next_fqs;
2202                                if (j > HZ) {
2203                                        j = HZ;
2204                                        jiffies_till_next_fqs = HZ;
2205                                } else if (j < 1) {
2206                                        j = 1;
2207                                        jiffies_till_next_fqs = 1;
2208                                }
2209                        } else {
2210                                /* Deal with stray signal. */
2211                                cond_resched_tasks_rcu_qs();
2212                                WRITE_ONCE(rsp->gp_activity, jiffies);
2213                                WARN_ON(signal_pending(current));
2214                                trace_rcu_grace_period(rsp->name,
2215                                                       READ_ONCE(rsp->gpnum),
2216                                                       TPS("fqswaitsig"));
2217                                ret = 1; /* Keep old FQS timing. */
2218                                j = jiffies;
2219                                if (time_after(jiffies, rsp->jiffies_force_qs))
2220                                        j = 1;
2221                                else
2222                                        j = rsp->jiffies_force_qs - j;
2223                        }
2224                }
2225
2226                /* Handle grace-period end. */
2227                rsp->gp_state = RCU_GP_CLEANUP;
2228                rcu_gp_cleanup(rsp);
2229                rsp->gp_state = RCU_GP_CLEANED;
2230        }
2231}
2232
2233/*
2234 * Report a full set of quiescent states to the specified rcu_state data
2235 * structure.  Invoke rcu_gp_kthread_wake() to awaken the grace-period
2236 * kthread if another grace period is required.  Whether we wake
2237 * the grace-period kthread or it awakens itself for the next round
2238 * of quiescent-state forcing, that kthread will clean up after the
2239 * just-completed grace period.  Note that the caller must hold rnp->lock,
2240 * which is released before return.
2241 */
2242static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
2243        __releases(rcu_get_root(rsp)->lock)
2244{
2245        raw_lockdep_assert_held_rcu_node(rcu_get_root(rsp));
2246        WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
2247        WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
2248        raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
2249        rcu_gp_kthread_wake(rsp);
2250}
2251
2252/*
2253 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
2254 * Allows quiescent states for a group of CPUs to be reported at one go
2255 * to the specified rcu_node structure, though all the CPUs in the group
2256 * must be represented by the same rcu_node structure (which need not be a
2257 * leaf rcu_node structure, though it often will be).  The gps parameter
2258 * is the grace-period snapshot, which means that the quiescent states
2259 * are valid only if rnp->gpnum is equal to gps.  That structure's lock
2260 * must be held upon entry, and it is released before return.
2261 */
2262static void
2263rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
2264                  struct rcu_node *rnp, unsigned long gps, unsigned long flags)
2265        __releases(rnp->lock)
2266{
2267        unsigned long oldmask = 0;
2268        struct rcu_node *rnp_c;
2269
2270        raw_lockdep_assert_held_rcu_node(rnp);
2271
2272        /* Walk up the rcu_node hierarchy. */
2273        for (;;) {
2274                if (!(rnp->qsmask & mask) || rnp->gpnum != gps) {
2275
2276                        /*
2277                         * Our bit has already been cleared, or the
2278                         * relevant grace period is already over, so done.
2279                         */
2280                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2281                        return;
2282                }
2283                WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
2284                WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
2285                             rcu_preempt_blocked_readers_cgp(rnp));
2286                rnp->qsmask &= ~mask;
2287                trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
2288                                                 mask, rnp->qsmask, rnp->level,
2289                                                 rnp->grplo, rnp->grphi,
2290                                                 !!rnp->gp_tasks);
2291                if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2292
2293                        /* Other bits still set at this level, so done. */
2294                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2295                        return;
2296                }
2297                mask = rnp->grpmask;
2298                if (rnp->parent == NULL) {
2299
2300                        /* No more levels.  Exit loop holding root lock. */
2301
2302                        break;
2303                }
2304                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2305                rnp_c = rnp;
2306                rnp = rnp->parent;
2307                raw_spin_lock_irqsave_rcu_node(rnp, flags);
2308                oldmask = rnp_c->qsmask;
2309        }
2310
2311        /*
2312         * Get here if we are the last CPU to pass through a quiescent
2313         * state for this grace period.  Invoke rcu_report_qs_rsp()
2314         * to clean up and start the next grace period if one is needed.
2315         */
2316        rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
2317}
2318
2319/*
2320 * Record a quiescent state for all tasks that were previously queued
2321 * on the specified rcu_node structure and that were blocking the current
2322 * RCU grace period.  The caller must hold the specified rnp->lock with
2323 * irqs disabled, and this lock is released upon return, but irqs remain
2324 * disabled.
2325 */
2326static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
2327                                      struct rcu_node *rnp, unsigned long flags)
2328        __releases(rnp->lock)
2329{
2330        unsigned long gps;
2331        unsigned long mask;
2332        struct rcu_node *rnp_p;
2333
2334        raw_lockdep_assert_held_rcu_node(rnp);
2335        if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
2336            rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2337                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2338                return;  /* Still need more quiescent states! */
2339        }
2340
2341        rnp_p = rnp->parent;
2342        if (rnp_p == NULL) {
2343                /*
2344                 * Only one rcu_node structure in the tree, so don't
2345                 * try to report up to its nonexistent parent!
2346                 */
2347                rcu_report_qs_rsp(rsp, flags);
2348                return;
2349        }
2350
2351        /* Report up the rest of the hierarchy, tracking current ->gpnum. */
2352        gps = rnp->gpnum;
2353        mask = rnp->grpmask;
2354        raw_spin_unlock_rcu_node(rnp);  /* irqs remain disabled. */
2355        raw_spin_lock_rcu_node(rnp_p);  /* irqs already disabled. */
2356        rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
2357}
2358
2359/*
2360 * Record a quiescent state for the specified CPU to that CPU's rcu_data
2361 * structure.  This must be called from the specified CPU.
2362 */
2363static void
2364rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
2365{
2366        unsigned long flags;
2367        unsigned long mask;
2368        bool needwake;
2369        struct rcu_node *rnp;
2370
2371        rnp = rdp->mynode;
2372        raw_spin_lock_irqsave_rcu_node(rnp, flags);
2373        if (rdp->cpu_no_qs.b.norm || rdp->gpnum != rnp->gpnum ||
2374            rnp->completed == rnp->gpnum || rdp->gpwrap) {
2375
2376                /*
2377                 * The grace period in which this quiescent state was
2378                 * recorded has ended, so don't report it upwards.
2379                 * We will instead need a new quiescent state that lies
2380                 * within the current grace period.
2381                 */
2382                rdp->cpu_no_qs.b.norm = true;   /* need qs for new gp. */
2383                rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
2384                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2385                return;
2386        }
2387        mask = rdp->grpmask;
2388        if ((rnp->qsmask & mask) == 0) {
2389                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2390        } else {
2391                rdp->core_needs_qs = false;
2392
2393                /*
2394                 * This GP can't end until cpu checks in, so all of our
2395                 * callbacks can be processed during the next GP.
2396                 */
2397                needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
2398
2399                rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
2400                /* ^^^ Released rnp->lock */
2401                if (needwake)
2402                        rcu_gp_kthread_wake(rsp);
2403        }
2404}
2405
2406/*
2407 * Check to see if there is a new grace period of which this CPU
2408 * is not yet aware, and if so, set up local rcu_data state for it.
2409 * Otherwise, see if this CPU has just passed through its first
2410 * quiescent state for this grace period, and record that fact if so.
2411 */
2412static void
2413rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
2414{
2415        /* Check for grace-period ends and beginnings. */
2416        note_gp_changes(rsp, rdp);
2417
2418        /*
2419         * Does this CPU still need to do its part for current grace period?
2420         * If no, return and let the other CPUs do their part as well.
2421         */
2422        if (!rdp->core_needs_qs)
2423                return;
2424
2425        /*
2426         * Was there a quiescent state since the beginning of the grace
2427         * period? If no, then exit and wait for the next call.
2428         */
2429        if (rdp->cpu_no_qs.b.norm)
2430                return;
2431
2432        /*
2433         * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2434         * judge of that).
2435         */
2436        rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
2437}
2438
2439/*
2440 * Trace the fact that this CPU is going offline.
2441 */
2442static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
2443{
2444        RCU_TRACE(unsigned long mask;)
2445        RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda);)
2446        RCU_TRACE(struct rcu_node *rnp = rdp->mynode;)
2447
2448        if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2449                return;
2450
2451        RCU_TRACE(mask = rdp->grpmask;)
2452        trace_rcu_grace_period(rsp->name,
2453                               rnp->gpnum + 1 - !!(rnp->qsmask & mask),
2454                               TPS("cpuofl"));
2455}
2456
2457/*
2458 * All CPUs for the specified rcu_node structure have gone offline,
2459 * and all tasks that were preempted within an RCU read-side critical
2460 * section while running on one of those CPUs have since exited their RCU
2461 * read-side critical section.  Some other CPU is reporting this fact with
2462 * the specified rcu_node structure's ->lock held and interrupts disabled.
2463 * This function therefore goes up the tree of rcu_node structures,
2464 * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
2465 * the leaf rcu_node structure's ->qsmaskinit field has already been
2466 * updated
2467 *
2468 * This function does check that the specified rcu_node structure has
2469 * all CPUs offline and no blocked tasks, so it is OK to invoke it
2470 * prematurely.  That said, invoking it after the fact will cost you
2471 * a needless lock acquisition.  So once it has done its work, don't
2472 * invoke it again.
2473 */
2474static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2475{
2476        long mask;
2477        struct rcu_node *rnp = rnp_leaf;
2478
2479        raw_lockdep_assert_held_rcu_node(rnp);
2480        if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2481            rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
2482                return;
2483        for (;;) {
2484                mask = rnp->grpmask;
2485                rnp = rnp->parent;
2486                if (!rnp)
2487                        break;
2488                raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
2489                rnp->qsmaskinit &= ~mask;
2490                rnp->qsmask &= ~mask;
2491                if (rnp->qsmaskinit) {
2492                        raw_spin_unlock_rcu_node(rnp);
2493                        /* irqs remain disabled. */
2494                        return;
2495                }
2496                raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2497        }
2498}
2499
2500/*
2501 * The CPU has been completely removed, and some other CPU is reporting
2502 * this fact from process context.  Do the remainder of the cleanup.
2503 * There can only be one CPU hotplug operation at a time, so no need for
2504 * explicit locking.
2505 */
2506static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
2507{
2508        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2509        struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
2510
2511        if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2512                return;
2513
2514        /* Adjust any no-longer-needed kthreads. */
2515        rcu_boost_kthread_setaffinity(rnp, -1);
2516}
2517
2518/*
2519 * Invoke any RCU callbacks that have made it to the end of their grace
2520 * period.  Thottle as specified by rdp->blimit.
2521 */
2522static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
2523{
2524        unsigned long flags;
2525        struct rcu_head *rhp;
2526        struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2527        long bl, count;
2528
2529        /* If no callbacks are ready, just return. */
2530        if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2531                trace_rcu_batch_start(rsp->name,
2532                                      rcu_segcblist_n_lazy_cbs(&rdp->cblist),
2533                                      rcu_segcblist_n_cbs(&rdp->cblist), 0);
2534                trace_rcu_batch_end(rsp->name, 0,
2535                                    !rcu_segcblist_empty(&rdp->cblist),
2536                                    need_resched(), is_idle_task(current),
2537                                    rcu_is_callbacks_kthread());
2538                return;
2539        }
2540
2541        /*
2542         * Extract the list of ready callbacks, disabling to prevent
2543         * races with call_rcu() from interrupt handlers.  Leave the
2544         * callback counts, as rcu_barrier() needs to be conservative.
2545         */
2546        local_irq_save(flags);
2547        WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2548        bl = rdp->blimit;
2549        trace_rcu_batch_start(rsp->name, rcu_segcblist_n_lazy_cbs(&rdp->cblist),
2550                              rcu_segcblist_n_cbs(&rdp->cblist), bl);
2551        rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2552        local_irq_restore(flags);
2553
2554        /* Invoke callbacks. */
2555        rhp = rcu_cblist_dequeue(&rcl);
2556        for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2557                debug_rcu_head_unqueue(rhp);
2558                if (__rcu_reclaim(rsp->name, rhp))
2559                        rcu_cblist_dequeued_lazy(&rcl);
2560                /*
2561                 * Stop only if limit reached and CPU has something to do.
2562                 * Note: The rcl structure counts down from zero.
2563                 */
2564                if (-rcl.len >= bl &&
2565                    (need_resched() ||
2566                     (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2567                        break;
2568        }
2569
2570        local_irq_save(flags);
2571        count = -rcl.len;
2572        trace_rcu_batch_end(rsp->name, count, !!rcl.head, need_resched(),
2573                            is_idle_task(current), rcu_is_callbacks_kthread());
2574
2575        /* Update counts and requeue any remaining callbacks. */
2576        rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2577        smp_mb(); /* List handling before counting for rcu_barrier(). */
2578        rcu_segcblist_insert_count(&rdp->cblist, &rcl);
2579
2580        /* Reinstate batch limit if we have worked down the excess. */
2581        count = rcu_segcblist_n_cbs(&rdp->cblist);
2582        if (rdp->blimit == LONG_MAX && count <= qlowmark)
2583                rdp->blimit = blimit;
2584
2585        /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2586        if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2587                rdp->qlen_last_fqs_check = 0;
2588                rdp->n_force_qs_snap = rsp->n_force_qs;
2589        } else if (count < rdp->qlen_last_fqs_check - qhimark)
2590                rdp->qlen_last_fqs_check = count;
2591
2592        /*
2593         * The following usually indicates a double call_rcu().  To track
2594         * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2595         */
2596        WARN_ON_ONCE(rcu_segcblist_empty(&rdp->cblist) != (count == 0));
2597
2598        local_irq_restore(flags);
2599
2600        /* Re-invoke RCU core processing if there are callbacks remaining. */
2601        if (rcu_segcblist_ready_cbs(&rdp->cblist))
2602                invoke_rcu_core();
2603}
2604
2605/*
2606 * Check to see if this CPU is in a non-context-switch quiescent state
2607 * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
2608 * Also schedule RCU core processing.
2609 *
2610 * This function must be called from hardirq context.  It is normally
2611 * invoked from the scheduling-clock interrupt.
2612 */
2613void rcu_check_callbacks(int user)
2614{
2615        trace_rcu_utilization(TPS("Start scheduler-tick"));
2616        increment_cpu_stall_ticks();
2617        if (user || rcu_is_cpu_rrupt_from_idle()) {
2618
2619                /*
2620                 * Get here if this CPU took its interrupt from user
2621                 * mode or from the idle loop, and if this is not a
2622                 * nested interrupt.  In this case, the CPU is in
2623                 * a quiescent state, so note it.
2624                 *
2625                 * No memory barrier is required here because both
2626                 * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
2627                 * variables that other CPUs neither access nor modify,
2628                 * at least not while the corresponding CPU is online.
2629                 */
2630
2631                rcu_sched_qs();
2632                rcu_bh_qs();
2633
2634        } else if (!in_softirq()) {
2635
2636                /*
2637                 * Get here if this CPU did not take its interrupt from
2638                 * softirq, in other words, if it is not interrupting
2639                 * a rcu_bh read-side critical section.  This is an _bh
2640                 * critical section, so note it.
2641                 */
2642
2643                rcu_bh_qs();
2644        }
2645        rcu_preempt_check_callbacks();
2646        if (rcu_pending())
2647                invoke_rcu_core();
2648        if (user)
2649                rcu_note_voluntary_context_switch(current);
2650        trace_rcu_utilization(TPS("End scheduler-tick"));
2651}
2652
2653/*
2654 * Scan the leaf rcu_node structures, processing dyntick state for any that
2655 * have not yet encountered a quiescent state, using the function specified.
2656 * Also initiate boosting for any threads blocked on the root rcu_node.
2657 *
2658 * The caller must have suppressed start of new grace periods.
2659 */
2660static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
2661{
2662        int cpu;
2663        unsigned long flags;
2664        unsigned long mask;
2665        struct rcu_node *rnp;
2666
2667        rcu_for_each_leaf_node(rsp, rnp) {
2668                cond_resched_tasks_rcu_qs();
2669                mask = 0;
2670                raw_spin_lock_irqsave_rcu_node(rnp, flags);
2671                if (rnp->qsmask == 0) {
2672                        if (rcu_state_p == &rcu_sched_state ||
2673                            rsp != rcu_state_p ||
2674                            rcu_preempt_blocked_readers_cgp(rnp)) {
2675                                /*
2676                                 * No point in scanning bits because they
2677                                 * are all zero.  But we might need to
2678                                 * priority-boost blocked readers.
2679                                 */
2680                                rcu_initiate_boost(rnp, flags);
2681                                /* rcu_initiate_boost() releases rnp->lock */
2682                                continue;
2683                        }
2684                        if (rnp->parent &&
2685                            (rnp->parent->qsmask & rnp->grpmask)) {
2686                                /*
2687                                 * Race between grace-period
2688                                 * initialization and task exiting RCU
2689                                 * read-side critical section: Report.
2690                                 */
2691                                rcu_report_unblock_qs_rnp(rsp, rnp, flags);
2692                                /* rcu_report_unblock_qs_rnp() rlses ->lock */
2693                                continue;
2694                        }
2695                }
2696                for_each_leaf_node_possible_cpu(rnp, cpu) {
2697                        unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
2698                        if ((rnp->qsmask & bit) != 0) {
2699                                if (f(per_cpu_ptr(rsp->rda, cpu)))
2700                                        mask |= bit;
2701                        }
2702                }
2703                if (mask != 0) {
2704                        /* Idle/offline CPUs, report (releases rnp->lock. */
2705                        rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
2706                } else {
2707                        /* Nothing to do here, so just drop the lock. */
2708                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2709                }
2710        }
2711}
2712
2713/*
2714 * Force quiescent states on reluctant CPUs, and also detect which
2715 * CPUs are in dyntick-idle mode.
2716 */
2717static void force_quiescent_state(struct rcu_state *rsp)
2718{
2719        unsigned long flags;
2720        bool ret;
2721        struct rcu_node *rnp;
2722        struct rcu_node *rnp_old = NULL;
2723
2724        /* Funnel through hierarchy to reduce memory contention. */
2725        rnp = __this_cpu_read(rsp->rda->mynode);
2726        for (; rnp != NULL; rnp = rnp->parent) {
2727                ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
2728                      !raw_spin_trylock(&rnp->fqslock);
2729                if (rnp_old != NULL)
2730                        raw_spin_unlock(&rnp_old->fqslock);
2731                if (ret)
2732                        return;
2733                rnp_old = rnp;
2734        }
2735        /* rnp_old == rcu_get_root(rsp), rnp == NULL. */
2736
2737        /* Reached the root of the rcu_node tree, acquire lock. */
2738        raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2739        raw_spin_unlock(&rnp_old->fqslock);
2740        if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
2741                raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2742                return;  /* Someone beat us to it. */
2743        }
2744        WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
2745        raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2746        rcu_gp_kthread_wake(rsp);
2747}
2748
2749/*
2750 * This does the RCU core processing work for the specified rcu_state
2751 * and rcu_data structures.  This may be called only from the CPU to
2752 * whom the rdp belongs.
2753 */
2754static void
2755__rcu_process_callbacks(struct rcu_state *rsp)
2756{
2757        unsigned long flags;
2758        bool needwake;
2759        struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
2760        struct rcu_node *rnp;
2761
2762        WARN_ON_ONCE(!rdp->beenonline);
2763
2764        /* Update RCU state based on any recent quiescent states. */
2765        rcu_check_quiescent_state(rsp, rdp);
2766
2767        /* No grace period and unregistered callbacks? */
2768        if (!rcu_gp_in_progress(rsp) &&
2769            rcu_segcblist_is_enabled(&rdp->cblist)) {
2770                local_irq_save(flags);
2771                if (rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) {
2772                        local_irq_restore(flags);
2773                } else {
2774                        rnp = rdp->mynode;
2775                        raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
2776                        needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
2777                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2778                        if (needwake)
2779                                rcu_gp_kthread_wake(rsp);
2780                }
2781        }
2782
2783        /* If there are callbacks ready, invoke them. */
2784        if (rcu_segcblist_ready_cbs(&rdp->cblist))
2785                invoke_rcu_callbacks(rsp, rdp);
2786
2787        /* Do any needed deferred wakeups of rcuo kthreads. */
2788        do_nocb_deferred_wakeup(rdp);
2789}
2790
2791/*
2792 * Do RCU core processing for the current CPU.
2793 */
2794static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
2795{
2796        struct rcu_state *rsp;
2797
2798        if (cpu_is_offline(smp_processor_id()))
2799                return;
2800        trace_rcu_utilization(TPS("Start RCU core"));
2801        for_each_rcu_flavor(rsp)
2802                __rcu_process_callbacks(rsp);
2803        trace_rcu_utilization(TPS("End RCU core"));
2804}
2805
2806/*
2807 * Schedule RCU callback invocation.  If the specified type of RCU
2808 * does not support RCU priority boosting, just do a direct call,
2809 * otherwise wake up the per-CPU kernel kthread.  Note that because we
2810 * are running on the current CPU with softirqs disabled, the
2811 * rcu_cpu_kthread_task cannot disappear out from under us.
2812 */
2813static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
2814{
2815        if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
2816                return;
2817        if (likely(!rsp->boost)) {
2818                rcu_do_batch(rsp, rdp);
2819                return;
2820        }
2821        invoke_rcu_callbacks_kthread();
2822}
2823
2824static void invoke_rcu_core(void)
2825{
2826        if (cpu_online(smp_processor_id()))
2827                raise_softirq(RCU_SOFTIRQ);
2828}
2829
2830/*
2831 * Handle any core-RCU processing required by a call_rcu() invocation.
2832 */
2833static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
2834                            struct rcu_head *head, unsigned long flags)
2835{
2836        bool needwake;
2837
2838        /*
2839         * If called from an extended quiescent state, invoke the RCU
2840         * core in order to force a re-evaluation of RCU's idleness.
2841         */
2842        if (!rcu_is_watching())
2843                invoke_rcu_core();
2844
2845        /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2846        if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2847                return;
2848
2849        /*
2850         * Force the grace period if too many callbacks or too long waiting.
2851         * Enforce hysteresis, and don't invoke force_quiescent_state()
2852         * if some other CPU has recently done so.  Also, don't bother
2853         * invoking force_quiescent_state() if the newly enqueued callback
2854         * is the only one waiting for a grace period to complete.
2855         */
2856        if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2857                     rdp->qlen_last_fqs_check + qhimark)) {
2858
2859                /* Are we ignoring a completed grace period? */
2860                note_gp_changes(rsp, rdp);
2861
2862                /* Start a new grace period if one not already started. */
2863                if (!rcu_gp_in_progress(rsp)) {
2864                        struct rcu_node *rnp = rdp->mynode;
2865
2866                        raw_spin_lock_rcu_node(rnp);
2867                        needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
2868                        raw_spin_unlock_rcu_node(rnp);
2869                        if (needwake)
2870                                rcu_gp_kthread_wake(rsp);
2871                } else {
2872                        /* Give the grace period a kick. */
2873                        rdp->blimit = LONG_MAX;
2874                        if (rsp->n_force_qs == rdp->n_force_qs_snap &&
2875                            rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2876                                force_quiescent_state(rsp);
2877                        rdp->n_force_qs_snap = rsp->n_force_qs;
2878                        rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2879                }
2880        }
2881}
2882
2883/*
2884 * RCU callback function to leak a callback.
2885 */
2886static void rcu_leak_callback(struct rcu_head *rhp)
2887{
2888}
2889
2890/*
2891 * Helper function for call_rcu() and friends.  The cpu argument will
2892 * normally be -1, indicating "currently running CPU".  It may specify
2893 * a CPU only if that CPU is a no-CBs CPU.  Currently, only _rcu_barrier()
2894 * is expected to specify a CPU.
2895 */
2896static void
2897__call_rcu(struct rcu_head *head, rcu_callback_t func,
2898           struct rcu_state *rsp, int cpu, bool lazy)
2899{
2900        unsigned long flags;
2901        struct rcu_data *rdp;
2902
2903        /* Misaligned rcu_head! */
2904        WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
2905
2906        if (debug_rcu_head_queue(head)) {
2907                /*
2908                 * Probable double call_rcu(), so leak the callback.
2909                 * Use rcu:rcu_callback trace event to find the previous
2910                 * time callback was passed to __call_rcu().
2911                 */
2912                WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pF()!!!\n",
2913                          head, head->func);
2914                WRITE_ONCE(head->func, rcu_leak_callback);
2915                return;
2916        }
2917        head->func = func;
2918        head->next = NULL;
2919        local_irq_save(flags);
2920        rdp = this_cpu_ptr(rsp->rda);
2921
2922        /* Add the callback to our list. */
2923        if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist)) || cpu != -1) {
2924                int offline;
2925
2926                if (cpu != -1)
2927                        rdp = per_cpu_ptr(rsp->rda, cpu);
2928                if (likely(rdp->mynode)) {
2929                        /* Post-boot, so this should be for a no-CBs CPU. */
2930                        offline = !__call_rcu_nocb(rdp, head, lazy, flags);
2931                        WARN_ON_ONCE(offline);
2932                        /* Offline CPU, _call_rcu() illegal, leak callback.  */
2933                        local_irq_restore(flags);
2934                        return;
2935                }
2936                /*
2937                 * Very early boot, before rcu_init().  Initialize if needed
2938                 * and then drop through to queue the callback.
2939                 */
2940                BUG_ON(cpu != -1);
2941                WARN_ON_ONCE(!rcu_is_watching());
2942                if (rcu_segcblist_empty(&rdp->cblist))
2943                        rcu_segcblist_init(&rdp->cblist);
2944        }
2945        rcu_segcblist_enqueue(&rdp->cblist, head, lazy);
2946        if (!lazy)
2947                rcu_idle_count_callbacks_posted();
2948
2949        if (__is_kfree_rcu_offset((unsigned long)func))
2950                trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
2951                                         rcu_segcblist_n_lazy_cbs(&rdp->cblist),
2952                                         rcu_segcblist_n_cbs(&rdp->cblist));
2953        else
2954                trace_rcu_callback(rsp->name, head,
2955                                   rcu_segcblist_n_lazy_cbs(&rdp->cblist),
2956                                   rcu_segcblist_n_cbs(&rdp->cblist));
2957
2958        /* Go handle any RCU core processing required. */
2959        __call_rcu_core(rsp, rdp, head, flags);
2960        local_irq_restore(flags);
2961}
2962
2963/**
2964 * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
2965 * @head: structure to be used for queueing the RCU updates.
2966 * @func: actual callback function to be invoked after the grace period
2967 *
2968 * The callback function will be invoked some time after a full grace
2969 * period elapses, in other words after all currently executing RCU
2970 * read-side critical sections have completed. call_rcu_sched() assumes
2971 * that the read-side critical sections end on enabling of preemption
2972 * or on voluntary preemption.
2973 * RCU read-side critical sections are delimited by:
2974 *
2975 * - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR
2976 * - anything that disables preemption.
2977 *
2978 *  These may be nested.
2979 *
2980 * See the description of call_rcu() for more detailed information on
2981 * memory ordering guarantees.
2982 */
2983void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
2984{
2985        __call_rcu(head, func, &rcu_sched_state, -1, 0);
2986}
2987EXPORT_SYMBOL_GPL(call_rcu_sched);
2988
2989/**
2990 * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
2991 * @head: structure to be used for queueing the RCU updates.
2992 * @func: actual callback function to be invoked after the grace period
2993 *
2994 * The callback function will be invoked some time after a full grace
2995 * period elapses, in other words after all currently executing RCU
2996 * read-side critical sections have completed. call_rcu_bh() assumes
2997 * that the read-side critical sections end on completion of a softirq
2998 * handler. This means that read-side critical sections in process
2999 * context must not be interrupted by softirqs. This interface is to be
3000 * used when most of the read-side critical sections are in softirq context.
3001 * RCU read-side critical sections are delimited by:
3002 *
3003 * - rcu_read_lock() and  rcu_read_unlock(), if in interrupt context, OR
3004 * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
3005 *
3006 * These may be nested.
3007 *
3008 * See the description of call_rcu() for more detailed information on
3009 * memory ordering guarantees.
3010 */
3011void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
3012{
3013        __call_rcu(head, func, &rcu_bh_state, -1, 0);
3014}
3015EXPORT_SYMBOL_GPL(call_rcu_bh);
3016
3017/*
3018 * Queue an RCU callback for lazy invocation after a grace period.
3019 * This will likely be later named something like "call_rcu_lazy()",
3020 * but this change will require some way of tagging the lazy RCU
3021 * callbacks in the list of pending callbacks. Until then, this
3022 * function may only be called from __kfree_rcu().
3023 */
3024void kfree_call_rcu(struct rcu_head *head,
3025                    rcu_callback_t func)
3026{
3027        __call_rcu(head, func, rcu_state_p, -1, 1);
3028}
3029EXPORT_SYMBOL_GPL(kfree_call_rcu);
3030
3031/*
3032 * Because a context switch is a grace period for RCU-sched and RCU-bh,
3033 * any blocking grace-period wait automatically implies a grace period
3034 * if there is only one CPU online at any point time during execution
3035 * of either synchronize_sched() or synchronize_rcu_bh().  It is OK to
3036 * occasionally incorrectly indicate that there are multiple CPUs online
3037 * when there was in fact only one the whole time, as this just adds
3038 * some overhead: RCU still operates correctly.
3039 */
3040static inline int rcu_blocking_is_gp(void)
3041{
3042        int ret;
3043
3044        might_sleep();  /* Check for RCU read-side critical section. */
3045        preempt_disable();
3046        ret = num_online_cpus() <= 1;
3047        preempt_enable();
3048        return ret;
3049}
3050
3051/**
3052 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
3053 *
3054 * Control will return to the caller some time after a full rcu-sched
3055 * grace period has elapsed, in other words after all currently executing
3056 * rcu-sched read-side critical sections have completed.   These read-side
3057 * critical sections are delimited by rcu_read_lock_sched() and
3058 * rcu_read_unlock_sched(), and may be nested.  Note that preempt_disable(),
3059 * local_irq_disable(), and so on may be used in place of
3060 * rcu_read_lock_sched().
3061 *
3062 * This means that all preempt_disable code sequences, including NMI and
3063 * non-threaded hardware-interrupt handlers, in progress on entry will
3064 * have completed before this primitive returns.  However, this does not
3065 * guarantee that softirq handlers will have completed, since in some
3066 * kernels, these handlers can run in process context, and can block.
3067 *
3068 * Note that this guarantee implies further memory-ordering guarantees.
3069 * On systems with more than one CPU, when synchronize_sched() returns,
3070 * each CPU is guaranteed to have executed a full memory barrier since the
3071 * end of its last RCU-sched read-side critical section whose beginning
3072 * preceded the call to synchronize_sched().  In addition, each CPU having
3073 * an RCU read-side critical section that extends beyond the return from
3074 * synchronize_sched() is guaranteed to have executed a full memory barrier
3075 * after the beginning of synchronize_sched() and before the beginning of
3076 * that RCU read-side critical section.  Note that these guarantees include
3077 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3078 * that are executing in the kernel.
3079 *
3080 * Furthermore, if CPU A invoked synchronize_sched(), which returned
3081 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3082 * to have executed a full memory barrier during the execution of
3083 * synchronize_sched() -- even if CPU A and CPU B are the same CPU (but
3084 * again only if the system has more than one CPU).
3085 */
3086void synchronize_sched(void)
3087{
3088        RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3089                         lock_is_held(&rcu_lock_map) ||
3090                         lock_is_held(&rcu_sched_lock_map),
3091                         "Illegal synchronize_sched() in RCU-sched read-side critical section");
3092        if (rcu_blocking_is_gp())
3093                return;
3094        if (rcu_gp_is_expedited())
3095                synchronize_sched_expedited();
3096        else
3097                wait_rcu_gp(call_rcu_sched);
3098}
3099EXPORT_SYMBOL_GPL(synchronize_sched);
3100
3101/**
3102 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
3103 *
3104 * Control will return to the caller some time after a full rcu_bh grace
3105 * period has elapsed, in other words after all currently executing rcu_bh
3106 * read-side critical sections have completed.  RCU read-side critical
3107 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
3108 * and may be nested.
3109 *
3110 * See the description of synchronize_sched() for more detailed information
3111 * on memory ordering guarantees.
3112 */
3113void synchronize_rcu_bh(void)
3114{
3115        RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3116                         lock_is_held(&rcu_lock_map) ||
3117                         lock_is_held(&rcu_sched_lock_map),
3118                         "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
3119        if (rcu_blocking_is_gp())
3120                return;
3121        if (rcu_gp_is_expedited())
3122                synchronize_rcu_bh_expedited();
3123        else
3124                wait_rcu_gp(call_rcu_bh);
3125}
3126EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
3127
3128/**
3129 * get_state_synchronize_rcu - Snapshot current RCU state
3130 *
3131 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3132 * to determine whether or not a full grace period has elapsed in the
3133 * meantime.
3134 */
3135unsigned long get_state_synchronize_rcu(void)
3136{
3137        /*
3138         * Any prior manipulation of RCU-protected data must happen
3139         * before the load from ->gpnum.
3140         */
3141        smp_mb();  /* ^^^ */
3142
3143        /*
3144         * Make sure this load happens before the purportedly
3145         * time-consuming work between get_state_synchronize_rcu()
3146         * and cond_synchronize_rcu().
3147         */
3148        return smp_load_acquire(&rcu_state_p->gpnum);
3149}
3150EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3151
3152/**
3153 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3154 *
3155 * @oldstate: return value from earlier call to get_state_synchronize_rcu()
3156 *
3157 * If a full RCU grace period has elapsed since the earlier call to
3158 * get_state_synchronize_rcu(), just return.  Otherwise, invoke
3159 * synchronize_rcu() to wait for a full grace period.
3160 *
3161 * Yes, this function does not take counter wrap into account.  But
3162 * counter wrap is harmless.  If the counter wraps, we have waited for
3163 * more than 2 billion grace periods (and way more on a 64-bit system!),
3164 * so waiting for one additional grace period should be just fine.
3165 */
3166void cond_synchronize_rcu(unsigned long oldstate)
3167{
3168        unsigned long newstate;
3169
3170        /*
3171         * Ensure that this load happens before any RCU-destructive
3172         * actions the caller might carry out after we return.
3173         */
3174        newstate = smp_load_acquire(&rcu_state_p->completed);
3175        if (ULONG_CMP_GE(oldstate, newstate))
3176                synchronize_rcu();
3177}
3178EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3179
3180/**
3181 * get_state_synchronize_sched - Snapshot current RCU-sched state
3182 *
3183 * Returns a cookie that is used by a later call to cond_synchronize_sched()
3184 * to determine whether or not a full grace period has elapsed in the
3185 * meantime.
3186 */
3187unsigned long get_state_synchronize_sched(void)
3188{
3189        /*
3190         * Any prior manipulation of RCU-protected data must happen
3191         * before the load from ->gpnum.
3192         */
3193        smp_mb();  /* ^^^ */
3194
3195        /*
3196         * Make sure this load happens before the purportedly
3197         * time-consuming work between get_state_synchronize_sched()
3198         * and cond_synchronize_sched().
3199         */
3200        return smp_load_acquire(&rcu_sched_state.gpnum);
3201}
3202EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
3203
3204/**
3205 * cond_synchronize_sched - Conditionally wait for an RCU-sched grace period
3206 *
3207 * @oldstate: return value from earlier call to get_state_synchronize_sched()
3208 *
3209 * If a full RCU-sched grace period has elapsed since the earlier call to
3210 * get_state_synchronize_sched(), just return.  Otherwise, invoke
3211 * synchronize_sched() to wait for a full grace period.
3212 *
3213 * Yes, this function does not take counter wrap into account.  But
3214 * counter wrap is harmless.  If the counter wraps, we have waited for
3215 * more than 2 billion grace periods (and way more on a 64-bit system!),
3216 * so waiting for one additional grace period should be just fine.
3217 */
3218void cond_synchronize_sched(unsigned long oldstate)
3219{
3220        unsigned long newstate;
3221
3222        /*
3223         * Ensure that this load happens before any RCU-destructive
3224         * actions the caller might carry out after we return.
3225         */
3226        newstate = smp_load_acquire(&rcu_sched_state.completed);
3227        if (ULONG_CMP_GE(oldstate, newstate))
3228                synchronize_sched();
3229}
3230EXPORT_SYMBOL_GPL(cond_synchronize_sched);
3231
3232/*
3233 * Check to see if there is any immediate RCU-related work to be done
3234 * by the current CPU, for the specified type of RCU, returning 1 if so.
3235 * The checks are in order of increasing expense: checks that can be
3236 * carried out against CPU-local state are performed first.  However,
3237 * we must check for CPU stalls first, else we might not get a chance.
3238 */
3239static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
3240{
3241        struct rcu_node *rnp = rdp->mynode;
3242
3243        /* Check for CPU stalls, if enabled. */
3244        check_cpu_stall(rsp, rdp);
3245
3246        /* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
3247        if (rcu_nohz_full_cpu(rsp))
3248                return 0;
3249
3250        /* Is the RCU core waiting for a quiescent state from this CPU? */
3251        if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm)
3252                return 1;
3253
3254        /* Does this CPU have callbacks ready to invoke? */
3255        if (rcu_segcblist_ready_cbs(&rdp->cblist))
3256                return 1;
3257
3258        /* Has RCU gone idle with this CPU needing another grace period? */
3259        if (!rcu_gp_in_progress(rsp) &&
3260            rcu_segcblist_is_enabled(&rdp->cblist) &&
3261            !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3262                return 1;
3263
3264        /* Has another RCU grace period completed?  */
3265        if (READ_ONCE(rnp->completed) != rdp->completed) /* outside lock */
3266                return 1;
3267
3268        /* Has a new RCU grace period started? */
3269        if (READ_ONCE(rnp->gpnum) != rdp->gpnum ||
3270            unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3271                return 1;
3272
3273        /* Does this CPU need a deferred NOCB wakeup? */
3274        if (rcu_nocb_need_deferred_wakeup(rdp))
3275                return 1;
3276
3277        /* nothing to do */
3278        return 0;
3279}
3280
3281/*
3282 * Check to see if there is any immediate RCU-related work to be done
3283 * by the current CPU, returning 1 if so.  This function is part of the
3284 * RCU implementation; it is -not- an exported member of the RCU API.
3285 */
3286static int rcu_pending(void)
3287{
3288        struct rcu_state *rsp;
3289
3290        for_each_rcu_flavor(rsp)
3291                if (__rcu_pending(rsp, this_cpu_ptr(rsp->rda)))
3292                        return 1;
3293        return 0;
3294}
3295
3296/*
3297 * Return true if the specified CPU has any callback.  If all_lazy is
3298 * non-NULL, store an indication of whether all callbacks are lazy.
3299 * (If there are no callbacks, all of them are deemed to be lazy.)
3300 */
3301static bool __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy)
3302{
3303        bool al = true;
3304        bool hc = false;
3305        struct rcu_data *rdp;
3306        struct rcu_state *rsp;
3307
3308        for_each_rcu_flavor(rsp) {
3309                rdp = this_cpu_ptr(rsp->rda);
3310                if (rcu_segcblist_empty(&rdp->cblist))
3311                        continue;
3312                hc = true;
3313                if (rcu_segcblist_n_nonlazy_cbs(&rdp->cblist) || !all_lazy) {
3314                        al = false;
3315                        break;
3316                }
3317        }
3318        if (all_lazy)
3319                *all_lazy = al;
3320        return hc;
3321}
3322
3323/*
3324 * Helper function for _rcu_barrier() tracing.  If tracing is disabled,
3325 * the compiler is expected to optimize this away.
3326 */
3327static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s,
3328                               int cpu, unsigned long done)
3329{
3330        trace_rcu_barrier(rsp->name, s, cpu,
3331                          atomic_read(&rsp->barrier_cpu_count), done);
3332}
3333
3334/*
3335 * RCU callback function for _rcu_barrier().  If we are last, wake
3336 * up the task executing _rcu_barrier().
3337 */
3338static void rcu_barrier_callback(struct rcu_head *rhp)
3339{
3340        struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
3341        struct rcu_state *rsp = rdp->rsp;
3342
3343        if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
3344                _rcu_barrier_trace(rsp, TPS("LastCB"), -1,
3345                                   rsp->barrier_sequence);
3346                complete(&rsp->barrier_completion);
3347        } else {
3348                _rcu_barrier_trace(rsp, TPS("CB"), -1, rsp->barrier_sequence);
3349        }
3350}
3351
3352/*
3353 * Called with preemption disabled, and from cross-cpu IRQ context.
3354 */
3355static void rcu_barrier_func(void *type)
3356{
3357        struct rcu_state *rsp = type;
3358        struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
3359
3360        _rcu_barrier_trace(rsp, TPS("IRQ"), -1, rsp->barrier_sequence);
3361        rdp->barrier_head.func = rcu_barrier_callback;
3362        debug_rcu_head_queue(&rdp->barrier_head);
3363        if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) {
3364                atomic_inc(&rsp->barrier_cpu_count);
3365        } else {
3366                debug_rcu_head_unqueue(&rdp->barrier_head);
3367                _rcu_barrier_trace(rsp, TPS("IRQNQ"), -1,
3368                                   rsp->barrier_sequence);
3369        }
3370}
3371
3372/*
3373 * Orchestrate the specified type of RCU barrier, waiting for all
3374 * RCU callbacks of the specified type to complete.
3375 */
3376static void _rcu_barrier(struct rcu_state *rsp)
3377{
3378        int cpu;
3379        struct rcu_data *rdp;
3380        unsigned long s = rcu_seq_snap(&rsp->barrier_sequence);
3381
3382        _rcu_barrier_trace(rsp, TPS("Begin"), -1, s);
3383
3384        /* Take mutex to serialize concurrent rcu_barrier() requests. */
3385        mutex_lock(&rsp->barrier_mutex);
3386
3387        /* Did someone else do our work for us? */
3388        if (rcu_seq_done(&rsp->barrier_sequence, s)) {
3389                _rcu_barrier_trace(rsp, TPS("EarlyExit"), -1,
3390                                   rsp->barrier_sequence);
3391                smp_mb(); /* caller's subsequent code after above check. */
3392                mutex_unlock(&rsp->barrier_mutex);
3393                return;
3394        }
3395
3396        /* Mark the start of the barrier operation. */
3397        rcu_seq_start(&rsp->barrier_sequence);
3398        _rcu_barrier_trace(rsp, TPS("Inc1"), -1, rsp->barrier_sequence);
3399
3400        /*
3401         * Initialize the count to one rather than to zero in order to
3402         * avoid a too-soon return to zero in case of a short grace period
3403         * (or preemption of this task).  Exclude CPU-hotplug operations
3404         * to ensure that no offline CPU has callbacks queued.
3405         */
3406        init_completion(&rsp->barrier_completion);
3407        atomic_set(&rsp->barrier_cpu_count, 1);
3408        get_online_cpus();
3409
3410        /*
3411         * Force each CPU with callbacks to register a new callback.
3412         * When that callback is invoked, we will know that all of the
3413         * corresponding CPU's preceding callbacks have been invoked.
3414         */
3415        for_each_possible_cpu(cpu) {
3416                if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
3417                        continue;
3418                rdp = per_cpu_ptr(rsp->rda, cpu);
3419                if (rcu_is_nocb_cpu(cpu)) {
3420                        if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
3421                                _rcu_barrier_trace(rsp, TPS("OfflineNoCB"), cpu,
3422                                                   rsp->barrier_sequence);
3423                        } else {
3424                                _rcu_barrier_trace(rsp, TPS("OnlineNoCB"), cpu,
3425                                                   rsp->barrier_sequence);
3426                                smp_mb__before_atomic();
3427                                atomic_inc(&rsp->barrier_cpu_count);
3428                                __call_rcu(&rdp->barrier_head,
3429                                           rcu_barrier_callback, rsp, cpu, 0);
3430                        }
3431                } else if (rcu_segcblist_n_cbs(&rdp->cblist)) {
3432                        _rcu_barrier_trace(rsp, TPS("OnlineQ"), cpu,
3433                                           rsp->barrier_sequence);
3434                        smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
3435                } else {
3436                        _rcu_barrier_trace(rsp, TPS("OnlineNQ"), cpu,
3437                                           rsp->barrier_sequence);
3438                }
3439        }
3440        put_online_cpus();
3441
3442        /*
3443         * Now that we have an rcu_barrier_callback() callback on each
3444         * CPU, and thus each counted, remove the initial count.
3445         */
3446        if (atomic_dec_and_test(&rsp->barrier_cpu_count))
3447                complete(&rsp->barrier_completion);
3448
3449        /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
3450        wait_for_completion(&rsp->barrier_completion);
3451
3452        /* Mark the end of the barrier operation. */
3453        _rcu_barrier_trace(rsp, TPS("Inc2"), -1, rsp->barrier_sequence);
3454        rcu_seq_end(&rsp->barrier_sequence);
3455
3456        /* Other rcu_barrier() invocations can now safely proceed. */
3457        mutex_unlock(&rsp->barrier_mutex);
3458}
3459
3460/**
3461 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
3462 */
3463void rcu_barrier_bh(void)
3464{
3465        _rcu_barrier(&rcu_bh_state);
3466}
3467EXPORT_SYMBOL_GPL(rcu_barrier_bh);
3468
3469/**
3470 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
3471 */
3472void rcu_barrier_sched(void)
3473{
3474        _rcu_barrier(&rcu_sched_state);
3475}
3476EXPORT_SYMBOL_GPL(rcu_barrier_sched);
3477
3478/*
3479 * Propagate ->qsinitmask bits up the rcu_node tree to account for the
3480 * first CPU in a given leaf rcu_node structure coming online.  The caller
3481 * must hold the corresponding leaf rcu_node ->lock with interrrupts
3482 * disabled.
3483 */
3484static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
3485{
3486        long mask;
3487        struct rcu_node *rnp = rnp_leaf;
3488
3489        raw_lockdep_assert_held_rcu_node(rnp);
3490        for (;;) {
3491                mask = rnp->grpmask;
3492                rnp = rnp->parent;
3493                if (rnp == NULL)
3494                        return;
3495                raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
3496                rnp->qsmaskinit |= mask;
3497                raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
3498        }
3499}
3500
3501/*
3502 * Do boot-time initialization of a CPU's per-CPU RCU data.
3503 */
3504static void __init
3505rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
3506{
3507        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3508
3509        /* Set up local state, ensuring consistent view of global state. */
3510        rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
3511        rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
3512        WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1);
3513        WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
3514        rdp->cpu = cpu;
3515        rdp->rsp = rsp;
3516        rcu_boot_init_nocb_percpu_data(rdp);
3517}
3518
3519/*
3520 * Initialize a CPU's per-CPU RCU data.  Note that only one online or
3521 * offline event can be happening at a given time.  Note also that we
3522 * can accept some slop in the rsp->completed access due to the fact
3523 * that this CPU cannot possibly have any RCU callbacks in flight yet.
3524 */
3525static void
3526rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
3527{
3528        unsigned long flags;
3529        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3530        struct rcu_node *rnp = rcu_get_root(rsp);
3531
3532        /* Set up local state, ensuring consistent view of global state. */
3533        raw_spin_lock_irqsave_rcu_node(rnp, flags);
3534        rdp->qlen_last_fqs_check = 0;
3535        rdp->n_force_qs_snap = rsp->n_force_qs;
3536        rdp->blimit = blimit;
3537        if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
3538            !init_nocb_callback_list(rdp))
3539                rcu_segcblist_init(&rdp->cblist);  /* Re-enable callbacks. */
3540        rdp->dynticks->dynticks_nesting = 1;    /* CPU not up, no tearing. */
3541        rcu_dynticks_eqs_online();
3542        raw_spin_unlock_rcu_node(rnp);          /* irqs remain disabled. */
3543
3544        /*
3545         * Add CPU to leaf rcu_node pending-online bitmask.  Any needed
3546         * propagation up the rcu_node tree will happen at the beginning
3547         * of the next grace period.
3548         */
3549        rnp = rdp->mynode;
3550        raw_spin_lock_rcu_node(rnp);            /* irqs already disabled. */
3551        rdp->beenonline = true;  /* We have now been online. */
3552        rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */
3553        rdp->completed = rnp->completed;
3554        rdp->cpu_no_qs.b.norm = true;
3555        rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu);
3556        rdp->core_needs_qs = false;
3557        rdp->rcu_iw_pending = false;
3558        rdp->rcu_iw_gpnum = rnp->gpnum - 1;
3559        trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
3560        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3561}
3562
3563/*
3564 * Invoked early in the CPU-online process, when pretty much all
3565 * services are available.  The incoming CPU is not present.
3566 */
3567int rcutree_prepare_cpu(unsigned int cpu)
3568{
3569        struct rcu_state *rsp;
3570
3571        for_each_rcu_flavor(rsp)
3572                rcu_init_percpu_data(cpu, rsp);
3573
3574        rcu_prepare_kthreads(cpu);
3575        rcu_spawn_all_nocb_kthreads(cpu);
3576
3577        return 0;
3578}
3579
3580/*
3581 * Update RCU priority boot kthread affinity for CPU-hotplug changes.
3582 */
3583static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
3584{
3585        struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
3586
3587        rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
3588}
3589
3590/*
3591 * Near the end of the CPU-online process.  Pretty much all services
3592 * enabled, and the CPU is now very much alive.
3593 */
3594int rcutree_online_cpu(unsigned int cpu)
3595{
3596        unsigned long flags;
3597        struct rcu_data *rdp;
3598        struct rcu_node *rnp;
3599        struct rcu_state *rsp;
3600
3601        for_each_rcu_flavor(rsp) {
3602                rdp = per_cpu_ptr(rsp->rda, cpu);
3603                rnp = rdp->mynode;
3604                raw_spin_lock_irqsave_rcu_node(rnp, flags);
3605                rnp->ffmask |= rdp->grpmask;
3606                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3607        }
3608        if (IS_ENABLED(CONFIG_TREE_SRCU))
3609                srcu_online_cpu(cpu);
3610        if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
3611                return 0; /* Too early in boot for scheduler work. */
3612        sync_sched_exp_online_cleanup(cpu);
3613        rcutree_affinity_setting(cpu, -1);
3614        return 0;
3615}
3616
3617/*
3618 * Near the beginning of the process.  The CPU is still very much alive
3619 * with pretty much all services enabled.
3620 */
3621int rcutree_offline_cpu(unsigned int cpu)
3622{
3623        unsigned long flags;
3624        struct rcu_data *rdp;
3625        struct rcu_node *rnp;
3626        struct rcu_state *rsp;
3627
3628        for_each_rcu_flavor(rsp) {
3629                rdp = per_cpu_ptr(rsp->rda, cpu);
3630                rnp = rdp->mynode;
3631                raw_spin_lock_irqsave_rcu_node(rnp, flags);
3632                rnp->ffmask &= ~rdp->grpmask;
3633                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3634        }
3635
3636        rcutree_affinity_setting(cpu, cpu);
3637        if (IS_ENABLED(CONFIG_TREE_SRCU))
3638                srcu_offline_cpu(cpu);
3639        return 0;
3640}
3641
3642/*
3643 * Near the end of the offline process.  We do only tracing here.
3644 */
3645int rcutree_dying_cpu(unsigned int cpu)
3646{
3647        struct rcu_state *rsp;
3648
3649        for_each_rcu_flavor(rsp)
3650                rcu_cleanup_dying_cpu(rsp);
3651        return 0;
3652}
3653
3654/*
3655 * The outgoing CPU is gone and we are running elsewhere.
3656 */
3657int rcutree_dead_cpu(unsigned int cpu)
3658{
3659        struct rcu_state *rsp;
3660
3661        for_each_rcu_flavor(rsp) {
3662                rcu_cleanup_dead_cpu(cpu, rsp);
3663                do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
3664        }
3665        return 0;
3666}
3667
3668static DEFINE_PER_CPU(int, rcu_cpu_started);
3669
3670/*
3671 * Mark the specified CPU as being online so that subsequent grace periods
3672 * (both expedited and normal) will wait on it.  Note that this means that
3673 * incoming CPUs are not allowed to use RCU read-side critical sections
3674 * until this function is called.  Failing to observe this restriction
3675 * will result in lockdep splats.
3676 *
3677 * Note that this function is special in that it is invoked directly
3678 * from the incoming CPU rather than from the cpuhp_step mechanism.
3679 * This is because this function must be invoked at a precise location.
3680 */
3681void rcu_cpu_starting(unsigned int cpu)
3682{
3683        unsigned long flags;
3684        unsigned long mask;
3685        int nbits;
3686        unsigned long oldmask;
3687        struct rcu_data *rdp;
3688        struct rcu_node *rnp;
3689        struct rcu_state *rsp;
3690
3691        if (per_cpu(rcu_cpu_started, cpu))
3692                return;
3693
3694        per_cpu(rcu_cpu_started, cpu) = 1;
3695
3696        for_each_rcu_flavor(rsp) {
3697                rdp = per_cpu_ptr(rsp->rda, cpu);
3698                rnp = rdp->mynode;
3699                mask = rdp->grpmask;
3700                raw_spin_lock_irqsave_rcu_node(rnp, flags);
3701                rnp->qsmaskinitnext |= mask;
3702                oldmask = rnp->expmaskinitnext;
3703                rnp->expmaskinitnext |= mask;
3704                oldmask ^= rnp->expmaskinitnext;
3705                nbits = bitmap_weight(&oldmask, BITS_PER_LONG);
3706                /* Allow lockless access for expedited grace periods. */
3707                smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */
3708                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3709        }
3710        smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
3711}
3712
3713#ifdef CONFIG_HOTPLUG_CPU
3714/*
3715 * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
3716 * function.  We now remove it from the rcu_node tree's ->qsmaskinit
3717 * bit masks.
3718 */
3719static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
3720{
3721        unsigned long flags;
3722        unsigned long mask;
3723        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3724        struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
3725
3726        /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
3727        mask = rdp->grpmask;
3728        raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
3729        rnp->qsmaskinitnext &= ~mask;
3730        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3731}
3732
3733/*
3734 * The outgoing function has no further need of RCU, so remove it from
3735 * the list of CPUs that RCU must track.
3736 *
3737 * Note that this function is special in that it is invoked directly
3738 * from the outgoing CPU rather than from the cpuhp_step mechanism.
3739 * This is because this function must be invoked at a precise location.
3740 */
3741void rcu_report_dead(unsigned int cpu)
3742{
3743        struct rcu_state *rsp;
3744
3745        /* QS for any half-done expedited RCU-sched GP. */
3746        preempt_disable();
3747        rcu_report_exp_rdp(&rcu_sched_state,
3748                           this_cpu_ptr(rcu_sched_state.rda), true);
3749        preempt_enable();
3750        for_each_rcu_flavor(rsp)
3751                rcu_cleanup_dying_idle_cpu(cpu, rsp);
3752
3753        per_cpu(rcu_cpu_started, cpu) = 0;
3754}
3755
3756/* Migrate the dead CPU's callbacks to the current CPU. */
3757static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp)
3758{
3759        unsigned long flags;
3760        struct rcu_data *my_rdp;
3761        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
3762        struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
3763        bool needwake;
3764
3765        if (rcu_is_nocb_cpu(cpu) || rcu_segcblist_empty(&rdp->cblist))
3766                return;  /* No callbacks to migrate. */
3767
3768        local_irq_save(flags);
3769        my_rdp = this_cpu_ptr(rsp->rda);
3770        if (rcu_nocb_adopt_orphan_cbs(my_rdp, rdp, flags)) {
3771                local_irq_restore(flags);
3772                return;
3773        }
3774        raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
3775        /* Leverage recent GPs and set GP for new callbacks. */
3776        needwake = rcu_advance_cbs(rsp, rnp_root, rdp) ||
3777                   rcu_advance_cbs(rsp, rnp_root, my_rdp);
3778        rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
3779        WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
3780                     !rcu_segcblist_n_cbs(&my_rdp->cblist));
3781        raw_spin_unlock_irqrestore_rcu_node(rnp_root, flags);
3782        if (needwake)
3783                rcu_gp_kthread_wake(rsp);
3784        WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
3785                  !rcu_segcblist_empty(&rdp->cblist),
3786                  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
3787                  cpu, rcu_segcblist_n_cbs(&rdp->cblist),
3788                  rcu_segcblist_first_cb(&rdp->cblist));
3789}
3790
3791/*
3792 * The outgoing CPU has just passed through the dying-idle state,
3793 * and we are being invoked from the CPU that was IPIed to continue the
3794 * offline operation.  We need to migrate the outgoing CPU's callbacks.
3795 */
3796void rcutree_migrate_callbacks(int cpu)
3797{
3798        struct rcu_state *rsp;
3799
3800        for_each_rcu_flavor(rsp)
3801                rcu_migrate_callbacks(cpu, rsp);
3802}
3803#endif
3804
3805/*
3806 * On non-huge systems, use expedited RCU grace periods to make suspend
3807 * and hibernation run faster.
3808 */
3809static int rcu_pm_notify(struct notifier_block *self,
3810                         unsigned long action, void *hcpu)
3811{
3812        switch (action) {
3813        case PM_HIBERNATION_PREPARE:
3814        case PM_SUSPEND_PREPARE:
3815                if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
3816                        rcu_expedite_gp();
3817                break;
3818        case PM_POST_HIBERNATION:
3819        case PM_POST_SUSPEND:
3820                if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
3821                        rcu_unexpedite_gp();
3822                break;
3823        default:
3824                break;
3825        }
3826        return NOTIFY_OK;
3827}
3828
3829/*
3830 * Spawn the kthreads that handle each RCU flavor's grace periods.
3831 */
3832static int __init rcu_spawn_gp_kthread(void)
3833{
3834        unsigned long flags;
3835        int kthread_prio_in = kthread_prio;
3836        struct rcu_node *rnp;
3837        struct rcu_state *rsp;
3838        struct sched_param sp;
3839        struct task_struct *t;
3840
3841        /* Force priority into range. */
3842        if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
3843                kthread_prio = 1;
3844        else if (kthread_prio < 0)
3845                kthread_prio = 0;
3846        else if (kthread_prio > 99)
3847                kthread_prio = 99;
3848        if (kthread_prio != kthread_prio_in)
3849                pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
3850                         kthread_prio, kthread_prio_in);
3851
3852        rcu_scheduler_fully_active = 1;
3853        for_each_rcu_flavor(rsp) {
3854                t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
3855                BUG_ON(IS_ERR(t));
3856                rnp = rcu_get_root(rsp);
3857                raw_spin_lock_irqsave_rcu_node(rnp, flags);
3858                rsp->gp_kthread = t;
3859                if (kthread_prio) {
3860                        sp.sched_priority = kthread_prio;
3861                        sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
3862                }
3863                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3864                wake_up_process(t);
3865        }
3866        rcu_spawn_nocb_kthreads();
3867        rcu_spawn_boost_kthreads();
3868        return 0;
3869}
3870early_initcall(rcu_spawn_gp_kthread);
3871
3872/*
3873 * This function is invoked towards the end of the scheduler's
3874 * initialization process.  Before this is called, the idle task might
3875 * contain synchronous grace-period primitives (during which time, this idle
3876 * task is booting the system, and such primitives are no-ops).  After this
3877 * function is called, any synchronous grace-period primitives are run as
3878 * expedited, with the requesting task driving the grace period forward.
3879 * A later core_initcall() rcu_set_runtime_mode() will switch to full
3880 * runtime RCU functionality.
3881 */
3882void rcu_scheduler_starting(void)
3883{
3884        WARN_ON(num_online_cpus() != 1);
3885        WARN_ON(nr_context_switches() > 0);
3886        rcu_test_sync_prims();
3887        rcu_scheduler_active = RCU_SCHEDULER_INIT;
3888        rcu_test_sync_prims();
3889}
3890
3891/*
3892 * Helper function for rcu_init() that initializes one rcu_state structure.
3893 */
3894static void __init rcu_init_one(struct rcu_state *rsp)
3895{
3896        static const char * const buf[] = RCU_NODE_NAME_INIT;
3897        static const char * const fqs[] = RCU_FQS_NAME_INIT;
3898        static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
3899        static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
3900
3901        int levelspread[RCU_NUM_LVLS];          /* kids/node in each level. */
3902        int cpustride = 1;
3903        int i;
3904        int j;
3905        struct rcu_node *rnp;
3906
3907        BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
3908
3909        /* Silence gcc 4.8 false positive about array index out of range. */
3910        if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
3911                panic("rcu_init_one: rcu_num_lvls out of range");
3912
3913        /* Initialize the level-tracking arrays. */
3914
3915        for (i = 1; i < rcu_num_lvls; i++)
3916                rsp->level[i] = rsp->level[i - 1] + num_rcu_lvl[i - 1];
3917        rcu_init_levelspread(levelspread, num_rcu_lvl);
3918
3919        /* Initialize the elements themselves, starting from the leaves. */
3920
3921        for (i = rcu_num_lvls - 1; i >= 0; i--) {
3922                cpustride *= levelspread[i];
3923                rnp = rsp->level[i];
3924                for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
3925                        raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
3926                        lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
3927                                                   &rcu_node_class[i], buf[i]);
3928                        raw_spin_lock_init(&rnp->fqslock);
3929                        lockdep_set_class_and_name(&rnp->fqslock,
3930                                                   &rcu_fqs_class[i], fqs[i]);
3931                        rnp->gpnum = rsp->gpnum;
3932                        rnp->completed = rsp->completed;
3933                        rnp->qsmask = 0;
3934                        rnp->qsmaskinit = 0;
3935                        rnp->grplo = j * cpustride;
3936                        rnp->grphi = (j + 1) * cpustride - 1;
3937                        if (rnp->grphi >= nr_cpu_ids)
3938                                rnp->grphi = nr_cpu_ids - 1;
3939                        if (i == 0) {
3940                                rnp->grpnum = 0;
3941                                rnp->grpmask = 0;
3942                                rnp->parent = NULL;
3943                        } else {
3944                                rnp->grpnum = j % levelspread[i - 1];
3945                                rnp->grpmask = 1UL << rnp->grpnum;
3946                                rnp->parent = rsp->level[i - 1] +
3947                                              j / levelspread[i - 1];
3948                        }
3949                        rnp->level = i;
3950                        INIT_LIST_HEAD(&rnp->blkd_tasks);
3951                        rcu_init_one_nocb(rnp);
3952                        init_waitqueue_head(&rnp->exp_wq[0]);
3953                        init_waitqueue_head(&rnp->exp_wq[1]);
3954                        init_waitqueue_head(&rnp->exp_wq[2]);
3955                        init_waitqueue_head(&rnp->exp_wq[3]);
3956                        spin_lock_init(&rnp->exp_lock);
3957                }
3958        }
3959
3960        init_swait_queue_head(&rsp->gp_wq);
3961        init_swait_queue_head(&rsp->expedited_wq);
3962        rnp = rcu_first_leaf_node(rsp);
3963        for_each_possible_cpu(i) {
3964                while (i > rnp->grphi)
3965                        rnp++;
3966                per_cpu_ptr(rsp->rda, i)->mynode = rnp;
3967                rcu_boot_init_percpu_data(i, rsp);
3968        }
3969        list_add(&rsp->flavors, &rcu_struct_flavors);
3970}
3971
3972/*
3973 * Compute the rcu_node tree geometry from kernel parameters.  This cannot
3974 * replace the definitions in tree.h because those are needed to size
3975 * the ->node array in the rcu_state structure.
3976 */
3977static void __init rcu_init_geometry(void)
3978{
3979        ulong d;
3980        int i;
3981        int rcu_capacity[RCU_NUM_LVLS];
3982
3983        /*
3984         * Initialize any unspecified boot parameters.
3985         * The default values of jiffies_till_first_fqs and
3986         * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
3987         * value, which is a function of HZ, then adding one for each
3988         * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
3989         */
3990        d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
3991        if (jiffies_till_first_fqs == ULONG_MAX)
3992                jiffies_till_first_fqs = d;
3993        if (jiffies_till_next_fqs == ULONG_MAX)
3994                jiffies_till_next_fqs = d;
3995
3996        /* If the compile-time values are accurate, just leave. */
3997        if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
3998            nr_cpu_ids == NR_CPUS)
3999                return;
4000        pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4001                rcu_fanout_leaf, nr_cpu_ids);
4002
4003        /*
4004         * The boot-time rcu_fanout_leaf parameter must be at least two
4005         * and cannot exceed the number of bits in the rcu_node masks.
4006         * Complain and fall back to the compile-time values if this
4007         * limit is exceeded.
4008         */
4009        if (rcu_fanout_leaf < 2 ||
4010            rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4011                rcu_fanout_leaf = RCU_FANOUT_LEAF;
4012                WARN_ON(1);
4013                return;
4014        }
4015
4016        /*
4017         * Compute number of nodes that can be handled an rcu_node tree
4018         * with the given number of levels.
4019         */
4020        rcu_capacity[0] = rcu_fanout_leaf;
4021        for (i = 1; i < RCU_NUM_LVLS; i++)
4022                rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4023
4024        /*
4025         * The tree must be able to accommodate the configured number of CPUs.
4026         * If this limit is exceeded, fall back to the compile-time values.
4027         */
4028        if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4029                rcu_fanout_leaf = RCU_FANOUT_LEAF;
4030                WARN_ON(1);
4031                return;
4032        }
4033
4034        /* Calculate the number of levels in the tree. */
4035        for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4036        }
4037        rcu_num_lvls = i + 1;
4038
4039        /* Calculate the number of rcu_nodes at each level of the tree. */
4040        for (i = 0; i < rcu_num_lvls; i++) {
4041                int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4042                num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4043        }
4044
4045        /* Calculate the total number of rcu_node structures. */
4046        rcu_num_nodes = 0;
4047        for (i = 0; i < rcu_num_lvls; i++)
4048                rcu_num_nodes += num_rcu_lvl[i];
4049}
4050
4051/*
4052 * Dump out the structure of the rcu_node combining tree associated
4053 * with the rcu_state structure referenced by rsp.
4054 */
4055static void __init rcu_dump_rcu_node_tree(struct rcu_state *rsp)
4056{
4057        int level = 0;
4058        struct rcu_node *rnp;
4059
4060        pr_info("rcu_node tree layout dump\n");
4061        pr_info(" ");
4062        rcu_for_each_node_breadth_first(rsp, rnp) {
4063                if (rnp->level != level) {
4064                        pr_cont("\n");
4065                        pr_info(" ");
4066                        level = rnp->level;
4067                }
4068                pr_cont("%d:%d ^%d  ", rnp->grplo, rnp->grphi, rnp->grpnum);
4069        }
4070        pr_cont("\n");
4071}
4072
4073struct workqueue_struct *rcu_gp_wq;
4074struct workqueue_struct *rcu_par_gp_wq;
4075
4076void __init rcu_init(void)
4077{
4078        int cpu;
4079
4080        rcu_early_boot_tests();
4081
4082        rcu_bootup_announce();
4083        rcu_init_geometry();
4084        rcu_init_one(&rcu_bh_state);
4085        rcu_init_one(&rcu_sched_state);
4086        if (dump_tree)
4087                rcu_dump_rcu_node_tree(&rcu_sched_state);
4088        __rcu_init_preempt();
4089        open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
4090
4091        /*
4092         * We don't need protection against CPU-hotplug here because
4093         * this is called early in boot, before either interrupts
4094         * or the scheduler are operational.
4095         */
4096        pm_notifier(rcu_pm_notify, 0);
4097        for_each_online_cpu(cpu) {
4098                rcutree_prepare_cpu(cpu);
4099                rcu_cpu_starting(cpu);
4100                rcutree_online_cpu(cpu);
4101        }
4102
4103        /* Create workqueue for expedited GPs and for Tree SRCU. */
4104        rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
4105        WARN_ON(!rcu_gp_wq);
4106        rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
4107        WARN_ON(!rcu_par_gp_wq);
4108}
4109
4110#include "tree_exp.h"
4111#include "tree_plugin.h"
4112