linux/kernel/rcu/tree.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
   4 *
   5 * Copyright IBM Corporation, 2008
   6 *
   7 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
   8 *          Manfred Spraul <manfred@colorfullife.com>
   9 *          Paul E. McKenney <paulmck@linux.ibm.com>
  10 *
  11 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
  12 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
  13 *
  14 * For detailed explanation of Read-Copy Update mechanism see -
  15 *      Documentation/RCU
  16 */
  17
  18#define pr_fmt(fmt) "rcu: " fmt
  19
  20#include <linux/types.h>
  21#include <linux/kernel.h>
  22#include <linux/init.h>
  23#include <linux/spinlock.h>
  24#include <linux/smp.h>
  25#include <linux/rcupdate_wait.h>
  26#include <linux/interrupt.h>
  27#include <linux/sched.h>
  28#include <linux/sched/debug.h>
  29#include <linux/nmi.h>
  30#include <linux/atomic.h>
  31#include <linux/bitops.h>
  32#include <linux/export.h>
  33#include <linux/completion.h>
  34#include <linux/moduleparam.h>
  35#include <linux/panic.h>
  36#include <linux/panic_notifier.h>
  37#include <linux/percpu.h>
  38#include <linux/notifier.h>
  39#include <linux/cpu.h>
  40#include <linux/mutex.h>
  41#include <linux/time.h>
  42#include <linux/kernel_stat.h>
  43#include <linux/wait.h>
  44#include <linux/kthread.h>
  45#include <uapi/linux/sched/types.h>
  46#include <linux/prefetch.h>
  47#include <linux/delay.h>
  48#include <linux/random.h>
  49#include <linux/trace_events.h>
  50#include <linux/suspend.h>
  51#include <linux/ftrace.h>
  52#include <linux/tick.h>
  53#include <linux/sysrq.h>
  54#include <linux/kprobes.h>
  55#include <linux/gfp.h>
  56#include <linux/oom.h>
  57#include <linux/smpboot.h>
  58#include <linux/jiffies.h>
  59#include <linux/slab.h>
  60#include <linux/sched/isolation.h>
  61#include <linux/sched/clock.h>
  62#include <linux/vmalloc.h>
  63#include <linux/mm.h>
  64#include <linux/kasan.h>
  65#include "../time/tick-internal.h"
  66
  67#include "tree.h"
  68#include "rcu.h"
  69
  70#ifdef MODULE_PARAM_PREFIX
  71#undef MODULE_PARAM_PREFIX
  72#endif
  73#define MODULE_PARAM_PREFIX "rcutree."
  74
  75/* Data structures. */
  76
  77/*
  78 * Steal a bit from the bottom of ->dynticks for idle entry/exit
  79 * control.  Initially this is for TLB flushing.
  80 */
  81#define RCU_DYNTICK_CTRL_MASK 0x1
  82#define RCU_DYNTICK_CTRL_CTR  (RCU_DYNTICK_CTRL_MASK + 1)
  83
  84static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
  85        .dynticks_nesting = 1,
  86        .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
  87        .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
  88#ifdef CONFIG_RCU_NOCB_CPU
  89        .cblist.flags = SEGCBLIST_SOFTIRQ_ONLY,
  90#endif
  91};
  92static struct rcu_state rcu_state = {
  93        .level = { &rcu_state.node[0] },
  94        .gp_state = RCU_GP_IDLE,
  95        .gp_seq = (0UL - 300UL) << RCU_SEQ_CTR_SHIFT,
  96        .barrier_mutex = __MUTEX_INITIALIZER(rcu_state.barrier_mutex),
  97        .name = RCU_NAME,
  98        .abbr = RCU_ABBR,
  99        .exp_mutex = __MUTEX_INITIALIZER(rcu_state.exp_mutex),
 100        .exp_wake_mutex = __MUTEX_INITIALIZER(rcu_state.exp_wake_mutex),
 101        .ofl_lock = __RAW_SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock),
 102};
 103
 104/* Dump rcu_node combining tree at boot to verify correct setup. */
 105static bool dump_tree;
 106module_param(dump_tree, bool, 0444);
 107/* By default, use RCU_SOFTIRQ instead of rcuc kthreads. */
 108static bool use_softirq = !IS_ENABLED(CONFIG_PREEMPT_RT);
 109#ifndef CONFIG_PREEMPT_RT
 110module_param(use_softirq, bool, 0444);
 111#endif
 112/* Control rcu_node-tree auto-balancing at boot time. */
 113static bool rcu_fanout_exact;
 114module_param(rcu_fanout_exact, bool, 0444);
 115/* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
 116static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
 117module_param(rcu_fanout_leaf, int, 0444);
 118int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
 119/* Number of rcu_nodes at specified level. */
 120int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
 121int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
 122
 123/*
 124 * The rcu_scheduler_active variable is initialized to the value
 125 * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
 126 * first task is spawned.  So when this variable is RCU_SCHEDULER_INACTIVE,
 127 * RCU can assume that there is but one task, allowing RCU to (for example)
 128 * optimize synchronize_rcu() to a simple barrier().  When this variable
 129 * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
 130 * to detect real grace periods.  This variable is also used to suppress
 131 * boot-time false positives from lockdep-RCU error checking.  Finally, it
 132 * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
 133 * is fully initialized, including all of its kthreads having been spawned.
 134 */
 135int rcu_scheduler_active __read_mostly;
 136EXPORT_SYMBOL_GPL(rcu_scheduler_active);
 137
 138/*
 139 * The rcu_scheduler_fully_active variable transitions from zero to one
 140 * during the early_initcall() processing, which is after the scheduler
 141 * is capable of creating new tasks.  So RCU processing (for example,
 142 * creating tasks for RCU priority boosting) must be delayed until after
 143 * rcu_scheduler_fully_active transitions from zero to one.  We also
 144 * currently delay invocation of any RCU callbacks until after this point.
 145 *
 146 * It might later prove better for people registering RCU callbacks during
 147 * early boot to take responsibility for these callbacks, but one step at
 148 * a time.
 149 */
 150static int rcu_scheduler_fully_active __read_mostly;
 151
 152static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
 153                              unsigned long gps, unsigned long flags);
 154static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
 155static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
 156static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
 157static void invoke_rcu_core(void);
 158static void rcu_report_exp_rdp(struct rcu_data *rdp);
 159static void sync_sched_exp_online_cleanup(int cpu);
 160static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp);
 161static bool rcu_rdp_is_offloaded(struct rcu_data *rdp);
 162
 163/* rcuc/rcub kthread realtime priority */
 164static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
 165module_param(kthread_prio, int, 0444);
 166
 167/* Delay in jiffies for grace-period initialization delays, debug only. */
 168
 169static int gp_preinit_delay;
 170module_param(gp_preinit_delay, int, 0444);
 171static int gp_init_delay;
 172module_param(gp_init_delay, int, 0444);
 173static int gp_cleanup_delay;
 174module_param(gp_cleanup_delay, int, 0444);
 175
 176// Add delay to rcu_read_unlock() for strict grace periods.
 177static int rcu_unlock_delay;
 178#ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
 179module_param(rcu_unlock_delay, int, 0444);
 180#endif
 181
 182/*
 183 * This rcu parameter is runtime-read-only. It reflects
 184 * a minimum allowed number of objects which can be cached
 185 * per-CPU. Object size is equal to one page. This value
 186 * can be changed at boot time.
 187 */
 188static int rcu_min_cached_objs = 5;
 189module_param(rcu_min_cached_objs, int, 0444);
 190
 191// A page shrinker can ask for pages to be freed to make them
 192// available for other parts of the system. This usually happens
 193// under low memory conditions, and in that case we should also
 194// defer page-cache filling for a short time period.
 195//
 196// The default value is 5 seconds, which is long enough to reduce
 197// interference with the shrinker while it asks other systems to
 198// drain their caches.
 199static int rcu_delay_page_cache_fill_msec = 5000;
 200module_param(rcu_delay_page_cache_fill_msec, int, 0444);
 201
 202/* Retrieve RCU kthreads priority for rcutorture */
 203int rcu_get_gp_kthreads_prio(void)
 204{
 205        return kthread_prio;
 206}
 207EXPORT_SYMBOL_GPL(rcu_get_gp_kthreads_prio);
 208
 209/*
 210 * Number of grace periods between delays, normalized by the duration of
 211 * the delay.  The longer the delay, the more the grace periods between
 212 * each delay.  The reason for this normalization is that it means that,
 213 * for non-zero delays, the overall slowdown of grace periods is constant
 214 * regardless of the duration of the delay.  This arrangement balances
 215 * the need for long delays to increase some race probabilities with the
 216 * need for fast grace periods to increase other race probabilities.
 217 */
 218#define PER_RCU_NODE_PERIOD 3   /* Number of grace periods between delays for debugging. */
 219
 220/*
 221 * Compute the mask of online CPUs for the specified rcu_node structure.
 222 * This will not be stable unless the rcu_node structure's ->lock is
 223 * held, but the bit corresponding to the current CPU will be stable
 224 * in most contexts.
 225 */
 226static unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
 227{
 228        return READ_ONCE(rnp->qsmaskinitnext);
 229}
 230
 231/*
 232 * Return true if an RCU grace period is in progress.  The READ_ONCE()s
 233 * permit this function to be invoked without holding the root rcu_node
 234 * structure's ->lock, but of course results can be subject to change.
 235 */
 236static int rcu_gp_in_progress(void)
 237{
 238        return rcu_seq_state(rcu_seq_current(&rcu_state.gp_seq));
 239}
 240
 241/*
 242 * Return the number of callbacks queued on the specified CPU.
 243 * Handles both the nocbs and normal cases.
 244 */
 245static long rcu_get_n_cbs_cpu(int cpu)
 246{
 247        struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 248
 249        if (rcu_segcblist_is_enabled(&rdp->cblist))
 250                return rcu_segcblist_n_cbs(&rdp->cblist);
 251        return 0;
 252}
 253
 254void rcu_softirq_qs(void)
 255{
 256        rcu_qs();
 257        rcu_preempt_deferred_qs(current);
 258        rcu_tasks_qs(current, false);
 259}
 260
 261/*
 262 * Record entry into an extended quiescent state.  This is only to be
 263 * called when not already in an extended quiescent state, that is,
 264 * RCU is watching prior to the call to this function and is no longer
 265 * watching upon return.
 266 */
 267static noinstr void rcu_dynticks_eqs_enter(void)
 268{
 269        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 270        int seq;
 271
 272        /*
 273         * CPUs seeing atomic_add_return() must see prior RCU read-side
 274         * critical sections, and we also must force ordering with the
 275         * next idle sojourn.
 276         */
 277        rcu_dynticks_task_trace_enter();  // Before ->dynticks update!
 278        seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
 279        // RCU is no longer watching.  Better be in extended quiescent state!
 280        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
 281                     (seq & RCU_DYNTICK_CTRL_CTR));
 282        /* Better not have special action (TLB flush) pending! */
 283        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
 284                     (seq & RCU_DYNTICK_CTRL_MASK));
 285}
 286
 287/*
 288 * Record exit from an extended quiescent state.  This is only to be
 289 * called from an extended quiescent state, that is, RCU is not watching
 290 * prior to the call to this function and is watching upon return.
 291 */
 292static noinstr void rcu_dynticks_eqs_exit(void)
 293{
 294        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 295        int seq;
 296
 297        /*
 298         * CPUs seeing atomic_add_return() must see prior idle sojourns,
 299         * and we also must force ordering with the next RCU read-side
 300         * critical section.
 301         */
 302        seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
 303        // RCU is now watching.  Better not be in an extended quiescent state!
 304        rcu_dynticks_task_trace_exit();  // After ->dynticks update!
 305        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
 306                     !(seq & RCU_DYNTICK_CTRL_CTR));
 307        if (seq & RCU_DYNTICK_CTRL_MASK) {
 308                arch_atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
 309                smp_mb__after_atomic(); /* _exit after clearing mask. */
 310        }
 311}
 312
 313/*
 314 * Reset the current CPU's ->dynticks counter to indicate that the
 315 * newly onlined CPU is no longer in an extended quiescent state.
 316 * This will either leave the counter unchanged, or increment it
 317 * to the next non-quiescent value.
 318 *
 319 * The non-atomic test/increment sequence works because the upper bits
 320 * of the ->dynticks counter are manipulated only by the corresponding CPU,
 321 * or when the corresponding CPU is offline.
 322 */
 323static void rcu_dynticks_eqs_online(void)
 324{
 325        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 326
 327        if (atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR)
 328                return;
 329        atomic_add(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
 330}
 331
 332/*
 333 * Is the current CPU in an extended quiescent state?
 334 *
 335 * No ordering, as we are sampling CPU-local information.
 336 */
 337static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
 338{
 339        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 340
 341        return !(arch_atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
 342}
 343
 344/*
 345 * Snapshot the ->dynticks counter with full ordering so as to allow
 346 * stable comparison of this counter with past and future snapshots.
 347 */
 348static int rcu_dynticks_snap(struct rcu_data *rdp)
 349{
 350        int snap = atomic_add_return(0, &rdp->dynticks);
 351
 352        return snap & ~RCU_DYNTICK_CTRL_MASK;
 353}
 354
 355/*
 356 * Return true if the snapshot returned from rcu_dynticks_snap()
 357 * indicates that RCU is in an extended quiescent state.
 358 */
 359static bool rcu_dynticks_in_eqs(int snap)
 360{
 361        return !(snap & RCU_DYNTICK_CTRL_CTR);
 362}
 363
 364/* Return true if the specified CPU is currently idle from an RCU viewpoint.  */
 365bool rcu_is_idle_cpu(int cpu)
 366{
 367        struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 368
 369        return rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp));
 370}
 371
 372/*
 373 * Return true if the CPU corresponding to the specified rcu_data
 374 * structure has spent some time in an extended quiescent state since
 375 * rcu_dynticks_snap() returned the specified snapshot.
 376 */
 377static bool rcu_dynticks_in_eqs_since(struct rcu_data *rdp, int snap)
 378{
 379        return snap != rcu_dynticks_snap(rdp);
 380}
 381
 382/*
 383 * Return true if the referenced integer is zero while the specified
 384 * CPU remains within a single extended quiescent state.
 385 */
 386bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
 387{
 388        struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
 389        int snap;
 390
 391        // If not quiescent, force back to earlier extended quiescent state.
 392        snap = atomic_read(&rdp->dynticks) & ~(RCU_DYNTICK_CTRL_MASK |
 393                                               RCU_DYNTICK_CTRL_CTR);
 394
 395        smp_rmb(); // Order ->dynticks and *vp reads.
 396        if (READ_ONCE(*vp))
 397                return false;  // Non-zero, so report failure;
 398        smp_rmb(); // Order *vp read and ->dynticks re-read.
 399
 400        // If still in the same extended quiescent state, we are good!
 401        return snap == (atomic_read(&rdp->dynticks) & ~RCU_DYNTICK_CTRL_MASK);
 402}
 403
 404/*
 405 * Set the special (bottom) bit of the specified CPU so that it
 406 * will take special action (such as flushing its TLB) on the
 407 * next exit from an extended quiescent state.  Returns true if
 408 * the bit was successfully set, or false if the CPU was not in
 409 * an extended quiescent state.
 410 */
 411bool rcu_eqs_special_set(int cpu)
 412{
 413        int old;
 414        int new;
 415        int new_old;
 416        struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
 417
 418        new_old = atomic_read(&rdp->dynticks);
 419        do {
 420                old = new_old;
 421                if (old & RCU_DYNTICK_CTRL_CTR)
 422                        return false;
 423                new = old | RCU_DYNTICK_CTRL_MASK;
 424                new_old = atomic_cmpxchg(&rdp->dynticks, old, new);
 425        } while (new_old != old);
 426        return true;
 427}
 428
 429/*
 430 * Let the RCU core know that this CPU has gone through the scheduler,
 431 * which is a quiescent state.  This is called when the need for a
 432 * quiescent state is urgent, so we burn an atomic operation and full
 433 * memory barriers to let the RCU core know about it, regardless of what
 434 * this CPU might (or might not) do in the near future.
 435 *
 436 * We inform the RCU core by emulating a zero-duration dyntick-idle period.
 437 *
 438 * The caller must have disabled interrupts and must not be idle.
 439 */
 440notrace void rcu_momentary_dyntick_idle(void)
 441{
 442        int special;
 443
 444        raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
 445        special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
 446                                    &this_cpu_ptr(&rcu_data)->dynticks);
 447        /* It is illegal to call this from idle state. */
 448        WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
 449        rcu_preempt_deferred_qs(current);
 450}
 451EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
 452
 453/**
 454 * rcu_is_cpu_rrupt_from_idle - see if 'interrupted' from idle
 455 *
 456 * If the current CPU is idle and running at a first-level (not nested)
 457 * interrupt, or directly, from idle, return true.
 458 *
 459 * The caller must have at least disabled IRQs.
 460 */
 461static int rcu_is_cpu_rrupt_from_idle(void)
 462{
 463        long nesting;
 464
 465        /*
 466         * Usually called from the tick; but also used from smp_function_call()
 467         * for expedited grace periods. This latter can result in running from
 468         * the idle task, instead of an actual IPI.
 469         */
 470        lockdep_assert_irqs_disabled();
 471
 472        /* Check for counter underflows */
 473        RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) < 0,
 474                         "RCU dynticks_nesting counter underflow!");
 475        RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) <= 0,
 476                         "RCU dynticks_nmi_nesting counter underflow/zero!");
 477
 478        /* Are we at first interrupt nesting level? */
 479        nesting = __this_cpu_read(rcu_data.dynticks_nmi_nesting);
 480        if (nesting > 1)
 481                return false;
 482
 483        /*
 484         * If we're not in an interrupt, we must be in the idle task!
 485         */
 486        WARN_ON_ONCE(!nesting && !is_idle_task(current));
 487
 488        /* Does CPU appear to be idle from an RCU standpoint? */
 489        return __this_cpu_read(rcu_data.dynticks_nesting) == 0;
 490}
 491
 492#define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
 493                                // Maximum callbacks per rcu_do_batch ...
 494#define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
 495static long blimit = DEFAULT_RCU_BLIMIT;
 496#define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
 497static long qhimark = DEFAULT_RCU_QHIMARK;
 498#define DEFAULT_RCU_QLOMARK 100   // Once only this many pending, use blimit.
 499static long qlowmark = DEFAULT_RCU_QLOMARK;
 500#define DEFAULT_RCU_QOVLD_MULT 2
 501#define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
 502static long qovld = DEFAULT_RCU_QOVLD; // If this many pending, hammer QS.
 503static long qovld_calc = -1;      // No pre-initialization lock acquisitions!
 504
 505module_param(blimit, long, 0444);
 506module_param(qhimark, long, 0444);
 507module_param(qlowmark, long, 0444);
 508module_param(qovld, long, 0444);
 509
 510static ulong jiffies_till_first_fqs = IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 0 : ULONG_MAX;
 511static ulong jiffies_till_next_fqs = ULONG_MAX;
 512static bool rcu_kick_kthreads;
 513static int rcu_divisor = 7;
 514module_param(rcu_divisor, int, 0644);
 515
 516/* Force an exit from rcu_do_batch() after 3 milliseconds. */
 517static long rcu_resched_ns = 3 * NSEC_PER_MSEC;
 518module_param(rcu_resched_ns, long, 0644);
 519
 520/*
 521 * How long the grace period must be before we start recruiting
 522 * quiescent-state help from rcu_note_context_switch().
 523 */
 524static ulong jiffies_till_sched_qs = ULONG_MAX;
 525module_param(jiffies_till_sched_qs, ulong, 0444);
 526static ulong jiffies_to_sched_qs; /* See adjust_jiffies_till_sched_qs(). */
 527module_param(jiffies_to_sched_qs, ulong, 0444); /* Display only! */
 528
 529/*
 530 * Make sure that we give the grace-period kthread time to detect any
 531 * idle CPUs before taking active measures to force quiescent states.
 532 * However, don't go below 100 milliseconds, adjusted upwards for really
 533 * large systems.
 534 */
 535static void adjust_jiffies_till_sched_qs(void)
 536{
 537        unsigned long j;
 538
 539        /* If jiffies_till_sched_qs was specified, respect the request. */
 540        if (jiffies_till_sched_qs != ULONG_MAX) {
 541                WRITE_ONCE(jiffies_to_sched_qs, jiffies_till_sched_qs);
 542                return;
 543        }
 544        /* Otherwise, set to third fqs scan, but bound below on large system. */
 545        j = READ_ONCE(jiffies_till_first_fqs) +
 546                      2 * READ_ONCE(jiffies_till_next_fqs);
 547        if (j < HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV)
 548                j = HZ / 10 + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
 549        pr_info("RCU calculated value of scheduler-enlistment delay is %ld jiffies.\n", j);
 550        WRITE_ONCE(jiffies_to_sched_qs, j);
 551}
 552
 553static int param_set_first_fqs_jiffies(const char *val, const struct kernel_param *kp)
 554{
 555        ulong j;
 556        int ret = kstrtoul(val, 0, &j);
 557
 558        if (!ret) {
 559                WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : j);
 560                adjust_jiffies_till_sched_qs();
 561        }
 562        return ret;
 563}
 564
 565static int param_set_next_fqs_jiffies(const char *val, const struct kernel_param *kp)
 566{
 567        ulong j;
 568        int ret = kstrtoul(val, 0, &j);
 569
 570        if (!ret) {
 571                WRITE_ONCE(*(ulong *)kp->arg, (j > HZ) ? HZ : (j ?: 1));
 572                adjust_jiffies_till_sched_qs();
 573        }
 574        return ret;
 575}
 576
 577static const struct kernel_param_ops first_fqs_jiffies_ops = {
 578        .set = param_set_first_fqs_jiffies,
 579        .get = param_get_ulong,
 580};
 581
 582static const struct kernel_param_ops next_fqs_jiffies_ops = {
 583        .set = param_set_next_fqs_jiffies,
 584        .get = param_get_ulong,
 585};
 586
 587module_param_cb(jiffies_till_first_fqs, &first_fqs_jiffies_ops, &jiffies_till_first_fqs, 0644);
 588module_param_cb(jiffies_till_next_fqs, &next_fqs_jiffies_ops, &jiffies_till_next_fqs, 0644);
 589module_param(rcu_kick_kthreads, bool, 0644);
 590
 591static void force_qs_rnp(int (*f)(struct rcu_data *rdp));
 592static int rcu_pending(int user);
 593
 594/*
 595 * Return the number of RCU GPs completed thus far for debug & stats.
 596 */
 597unsigned long rcu_get_gp_seq(void)
 598{
 599        return READ_ONCE(rcu_state.gp_seq);
 600}
 601EXPORT_SYMBOL_GPL(rcu_get_gp_seq);
 602
 603/*
 604 * Return the number of RCU expedited batches completed thus far for
 605 * debug & stats.  Odd numbers mean that a batch is in progress, even
 606 * numbers mean idle.  The value returned will thus be roughly double
 607 * the cumulative batches since boot.
 608 */
 609unsigned long rcu_exp_batches_completed(void)
 610{
 611        return rcu_state.expedited_sequence;
 612}
 613EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
 614
 615/*
 616 * Return the root node of the rcu_state structure.
 617 */
 618static struct rcu_node *rcu_get_root(void)
 619{
 620        return &rcu_state.node[0];
 621}
 622
 623/*
 624 * Send along grace-period-related data for rcutorture diagnostics.
 625 */
 626void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
 627                            unsigned long *gp_seq)
 628{
 629        switch (test_type) {
 630        case RCU_FLAVOR:
 631                *flags = READ_ONCE(rcu_state.gp_flags);
 632                *gp_seq = rcu_seq_current(&rcu_state.gp_seq);
 633                break;
 634        default:
 635                break;
 636        }
 637}
 638EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
 639
 640/*
 641 * Enter an RCU extended quiescent state, which can be either the
 642 * idle loop or adaptive-tickless usermode execution.
 643 *
 644 * We crowbar the ->dynticks_nmi_nesting field to zero to allow for
 645 * the possibility of usermode upcalls having messed up our count
 646 * of interrupt nesting level during the prior busy period.
 647 */
 648static noinstr void rcu_eqs_enter(bool user)
 649{
 650        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 651
 652        WARN_ON_ONCE(rdp->dynticks_nmi_nesting != DYNTICK_IRQ_NONIDLE);
 653        WRITE_ONCE(rdp->dynticks_nmi_nesting, 0);
 654        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
 655                     rdp->dynticks_nesting == 0);
 656        if (rdp->dynticks_nesting != 1) {
 657                // RCU will still be watching, so just do accounting and leave.
 658                rdp->dynticks_nesting--;
 659                return;
 660        }
 661
 662        lockdep_assert_irqs_disabled();
 663        instrumentation_begin();
 664        trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
 665        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
 666        rcu_prepare_for_idle();
 667        rcu_preempt_deferred_qs(current);
 668
 669        // instrumentation for the noinstr rcu_dynticks_eqs_enter()
 670        instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
 671
 672        instrumentation_end();
 673        WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
 674        // RCU is watching here ...
 675        rcu_dynticks_eqs_enter();
 676        // ... but is no longer watching here.
 677        rcu_dynticks_task_enter();
 678}
 679
 680/**
 681 * rcu_idle_enter - inform RCU that current CPU is entering idle
 682 *
 683 * Enter idle mode, in other words, -leave- the mode in which RCU
 684 * read-side critical sections can occur.  (Though RCU read-side
 685 * critical sections can occur in irq handlers in idle, a possibility
 686 * handled by irq_enter() and irq_exit().)
 687 *
 688 * If you add or remove a call to rcu_idle_enter(), be sure to test with
 689 * CONFIG_RCU_EQS_DEBUG=y.
 690 */
 691void rcu_idle_enter(void)
 692{
 693        lockdep_assert_irqs_disabled();
 694        rcu_eqs_enter(false);
 695}
 696EXPORT_SYMBOL_GPL(rcu_idle_enter);
 697
 698#ifdef CONFIG_NO_HZ_FULL
 699
 700#if !defined(CONFIG_GENERIC_ENTRY) || !defined(CONFIG_KVM_XFER_TO_GUEST_WORK)
 701/*
 702 * An empty function that will trigger a reschedule on
 703 * IRQ tail once IRQs get re-enabled on userspace/guest resume.
 704 */
 705static void late_wakeup_func(struct irq_work *work)
 706{
 707}
 708
 709static DEFINE_PER_CPU(struct irq_work, late_wakeup_work) =
 710        IRQ_WORK_INIT(late_wakeup_func);
 711
 712/*
 713 * If either:
 714 *
 715 * 1) the task is about to enter in guest mode and $ARCH doesn't support KVM generic work
 716 * 2) the task is about to enter in user mode and $ARCH doesn't support generic entry.
 717 *
 718 * In these cases the late RCU wake ups aren't supported in the resched loops and our
 719 * last resort is to fire a local irq_work that will trigger a reschedule once IRQs
 720 * get re-enabled again.
 721 */
 722noinstr static void rcu_irq_work_resched(void)
 723{
 724        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 725
 726        if (IS_ENABLED(CONFIG_GENERIC_ENTRY) && !(current->flags & PF_VCPU))
 727                return;
 728
 729        if (IS_ENABLED(CONFIG_KVM_XFER_TO_GUEST_WORK) && (current->flags & PF_VCPU))
 730                return;
 731
 732        instrumentation_begin();
 733        if (do_nocb_deferred_wakeup(rdp) && need_resched()) {
 734                irq_work_queue(this_cpu_ptr(&late_wakeup_work));
 735        }
 736        instrumentation_end();
 737}
 738
 739#else
 740static inline void rcu_irq_work_resched(void) { }
 741#endif
 742
 743/**
 744 * rcu_user_enter - inform RCU that we are resuming userspace.
 745 *
 746 * Enter RCU idle mode right before resuming userspace.  No use of RCU
 747 * is permitted between this call and rcu_user_exit(). This way the
 748 * CPU doesn't need to maintain the tick for RCU maintenance purposes
 749 * when the CPU runs in userspace.
 750 *
 751 * If you add or remove a call to rcu_user_enter(), be sure to test with
 752 * CONFIG_RCU_EQS_DEBUG=y.
 753 */
 754noinstr void rcu_user_enter(void)
 755{
 756        lockdep_assert_irqs_disabled();
 757
 758        /*
 759         * Other than generic entry implementation, we may be past the last
 760         * rescheduling opportunity in the entry code. Trigger a self IPI
 761         * that will fire and reschedule once we resume in user/guest mode.
 762         */
 763        rcu_irq_work_resched();
 764        rcu_eqs_enter(true);
 765}
 766
 767#endif /* CONFIG_NO_HZ_FULL */
 768
 769/**
 770 * rcu_nmi_exit - inform RCU of exit from NMI context
 771 *
 772 * If we are returning from the outermost NMI handler that interrupted an
 773 * RCU-idle period, update rdp->dynticks and rdp->dynticks_nmi_nesting
 774 * to let the RCU grace-period handling know that the CPU is back to
 775 * being RCU-idle.
 776 *
 777 * If you add or remove a call to rcu_nmi_exit(), be sure to test
 778 * with CONFIG_RCU_EQS_DEBUG=y.
 779 */
 780noinstr void rcu_nmi_exit(void)
 781{
 782        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 783
 784        instrumentation_begin();
 785        /*
 786         * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
 787         * (We are exiting an NMI handler, so RCU better be paying attention
 788         * to us!)
 789         */
 790        WARN_ON_ONCE(rdp->dynticks_nmi_nesting <= 0);
 791        WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
 792
 793        /*
 794         * If the nesting level is not 1, the CPU wasn't RCU-idle, so
 795         * leave it in non-RCU-idle state.
 796         */
 797        if (rdp->dynticks_nmi_nesting != 1) {
 798                trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, rdp->dynticks_nmi_nesting - 2,
 799                                  atomic_read(&rdp->dynticks));
 800                WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
 801                           rdp->dynticks_nmi_nesting - 2);
 802                instrumentation_end();
 803                return;
 804        }
 805
 806        /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
 807        trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
 808        WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
 809
 810        if (!in_nmi())
 811                rcu_prepare_for_idle();
 812
 813        // instrumentation for the noinstr rcu_dynticks_eqs_enter()
 814        instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
 815        instrumentation_end();
 816
 817        // RCU is watching here ...
 818        rcu_dynticks_eqs_enter();
 819        // ... but is no longer watching here.
 820
 821        if (!in_nmi())
 822                rcu_dynticks_task_enter();
 823}
 824
 825/**
 826 * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
 827 *
 828 * Exit from an interrupt handler, which might possibly result in entering
 829 * idle mode, in other words, leaving the mode in which read-side critical
 830 * sections can occur.  The caller must have disabled interrupts.
 831 *
 832 * This code assumes that the idle loop never does anything that might
 833 * result in unbalanced calls to irq_enter() and irq_exit().  If your
 834 * architecture's idle loop violates this assumption, RCU will give you what
 835 * you deserve, good and hard.  But very infrequently and irreproducibly.
 836 *
 837 * Use things like work queues to work around this limitation.
 838 *
 839 * You have been warned.
 840 *
 841 * If you add or remove a call to rcu_irq_exit(), be sure to test with
 842 * CONFIG_RCU_EQS_DEBUG=y.
 843 */
 844void noinstr rcu_irq_exit(void)
 845{
 846        lockdep_assert_irqs_disabled();
 847        rcu_nmi_exit();
 848}
 849
 850#ifdef CONFIG_PROVE_RCU
 851/**
 852 * rcu_irq_exit_check_preempt - Validate that scheduling is possible
 853 */
 854void rcu_irq_exit_check_preempt(void)
 855{
 856        lockdep_assert_irqs_disabled();
 857
 858        RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nesting) <= 0,
 859                         "RCU dynticks_nesting counter underflow/zero!");
 860        RCU_LOCKDEP_WARN(__this_cpu_read(rcu_data.dynticks_nmi_nesting) !=
 861                         DYNTICK_IRQ_NONIDLE,
 862                         "Bad RCU  dynticks_nmi_nesting counter\n");
 863        RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
 864                         "RCU in extended quiescent state!");
 865}
 866#endif /* #ifdef CONFIG_PROVE_RCU */
 867
 868/*
 869 * Wrapper for rcu_irq_exit() where interrupts are enabled.
 870 *
 871 * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test
 872 * with CONFIG_RCU_EQS_DEBUG=y.
 873 */
 874void rcu_irq_exit_irqson(void)
 875{
 876        unsigned long flags;
 877
 878        local_irq_save(flags);
 879        rcu_irq_exit();
 880        local_irq_restore(flags);
 881}
 882
 883/*
 884 * Exit an RCU extended quiescent state, which can be either the
 885 * idle loop or adaptive-tickless usermode execution.
 886 *
 887 * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
 888 * allow for the possibility of usermode upcalls messing up our count of
 889 * interrupt nesting level during the busy period that is just now starting.
 890 */
 891static void noinstr rcu_eqs_exit(bool user)
 892{
 893        struct rcu_data *rdp;
 894        long oldval;
 895
 896        lockdep_assert_irqs_disabled();
 897        rdp = this_cpu_ptr(&rcu_data);
 898        oldval = rdp->dynticks_nesting;
 899        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
 900        if (oldval) {
 901                // RCU was already watching, so just do accounting and leave.
 902                rdp->dynticks_nesting++;
 903                return;
 904        }
 905        rcu_dynticks_task_exit();
 906        // RCU is not watching here ...
 907        rcu_dynticks_eqs_exit();
 908        // ... but is watching here.
 909        instrumentation_begin();
 910
 911        // instrumentation for the noinstr rcu_dynticks_eqs_exit()
 912        instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
 913
 914        rcu_cleanup_after_idle();
 915        trace_rcu_dyntick(TPS("End"), rdp->dynticks_nesting, 1, atomic_read(&rdp->dynticks));
 916        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
 917        WRITE_ONCE(rdp->dynticks_nesting, 1);
 918        WARN_ON_ONCE(rdp->dynticks_nmi_nesting);
 919        WRITE_ONCE(rdp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
 920        instrumentation_end();
 921}
 922
 923/**
 924 * rcu_idle_exit - inform RCU that current CPU is leaving idle
 925 *
 926 * Exit idle mode, in other words, -enter- the mode in which RCU
 927 * read-side critical sections can occur.
 928 *
 929 * If you add or remove a call to rcu_idle_exit(), be sure to test with
 930 * CONFIG_RCU_EQS_DEBUG=y.
 931 */
 932void rcu_idle_exit(void)
 933{
 934        unsigned long flags;
 935
 936        local_irq_save(flags);
 937        rcu_eqs_exit(false);
 938        local_irq_restore(flags);
 939}
 940EXPORT_SYMBOL_GPL(rcu_idle_exit);
 941
 942#ifdef CONFIG_NO_HZ_FULL
 943/**
 944 * rcu_user_exit - inform RCU that we are exiting userspace.
 945 *
 946 * Exit RCU idle mode while entering the kernel because it can
 947 * run a RCU read side critical section anytime.
 948 *
 949 * If you add or remove a call to rcu_user_exit(), be sure to test with
 950 * CONFIG_RCU_EQS_DEBUG=y.
 951 */
 952void noinstr rcu_user_exit(void)
 953{
 954        rcu_eqs_exit(true);
 955}
 956
 957/**
 958 * __rcu_irq_enter_check_tick - Enable scheduler tick on CPU if RCU needs it.
 959 *
 960 * The scheduler tick is not normally enabled when CPUs enter the kernel
 961 * from nohz_full userspace execution.  After all, nohz_full userspace
 962 * execution is an RCU quiescent state and the time executing in the kernel
 963 * is quite short.  Except of course when it isn't.  And it is not hard to
 964 * cause a large system to spend tens of seconds or even minutes looping
 965 * in the kernel, which can cause a number of problems, include RCU CPU
 966 * stall warnings.
 967 *
 968 * Therefore, if a nohz_full CPU fails to report a quiescent state
 969 * in a timely manner, the RCU grace-period kthread sets that CPU's
 970 * ->rcu_urgent_qs flag with the expectation that the next interrupt or
 971 * exception will invoke this function, which will turn on the scheduler
 972 * tick, which will enable RCU to detect that CPU's quiescent states,
 973 * for example, due to cond_resched() calls in CONFIG_PREEMPT=n kernels.
 974 * The tick will be disabled once a quiescent state is reported for
 975 * this CPU.
 976 *
 977 * Of course, in carefully tuned systems, there might never be an
 978 * interrupt or exception.  In that case, the RCU grace-period kthread
 979 * will eventually cause one to happen.  However, in less carefully
 980 * controlled environments, this function allows RCU to get what it
 981 * needs without creating otherwise useless interruptions.
 982 */
 983void __rcu_irq_enter_check_tick(void)
 984{
 985        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 986
 987        // If we're here from NMI there's nothing to do.
 988        if (in_nmi())
 989                return;
 990
 991        RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
 992                         "Illegal rcu_irq_enter_check_tick() from extended quiescent state");
 993
 994        if (!tick_nohz_full_cpu(rdp->cpu) ||
 995            !READ_ONCE(rdp->rcu_urgent_qs) ||
 996            READ_ONCE(rdp->rcu_forced_tick)) {
 997                // RCU doesn't need nohz_full help from this CPU, or it is
 998                // already getting that help.
 999                return;
1000        }
1001
1002        // We get here only when not in an extended quiescent state and
1003        // from interrupts (as opposed to NMIs).  Therefore, (1) RCU is
1004        // already watching and (2) The fact that we are in an interrupt
1005        // handler and that the rcu_node lock is an irq-disabled lock
1006        // prevents self-deadlock.  So we can safely recheck under the lock.
1007        // Note that the nohz_full state currently cannot change.
1008        raw_spin_lock_rcu_node(rdp->mynode);
1009        if (rdp->rcu_urgent_qs && !rdp->rcu_forced_tick) {
1010                // A nohz_full CPU is in the kernel and RCU needs a
1011                // quiescent state.  Turn on the tick!
1012                WRITE_ONCE(rdp->rcu_forced_tick, true);
1013                tick_dep_set_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
1014        }
1015        raw_spin_unlock_rcu_node(rdp->mynode);
1016}
1017#endif /* CONFIG_NO_HZ_FULL */
1018
1019/**
1020 * rcu_nmi_enter - inform RCU of entry to NMI context
1021 *
1022 * If the CPU was idle from RCU's viewpoint, update rdp->dynticks and
1023 * rdp->dynticks_nmi_nesting to let the RCU grace-period handling know
1024 * that the CPU is active.  This implementation permits nested NMIs, as
1025 * long as the nesting level does not overflow an int.  (You will probably
1026 * run out of stack space first.)
1027 *
1028 * If you add or remove a call to rcu_nmi_enter(), be sure to test
1029 * with CONFIG_RCU_EQS_DEBUG=y.
1030 */
1031noinstr void rcu_nmi_enter(void)
1032{
1033        long incby = 2;
1034        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1035
1036        /* Complain about underflow. */
1037        WARN_ON_ONCE(rdp->dynticks_nmi_nesting < 0);
1038
1039        /*
1040         * If idle from RCU viewpoint, atomically increment ->dynticks
1041         * to mark non-idle and increment ->dynticks_nmi_nesting by one.
1042         * Otherwise, increment ->dynticks_nmi_nesting by two.  This means
1043         * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
1044         * to be in the outermost NMI handler that interrupted an RCU-idle
1045         * period (observation due to Andy Lutomirski).
1046         */
1047        if (rcu_dynticks_curr_cpu_in_eqs()) {
1048
1049                if (!in_nmi())
1050                        rcu_dynticks_task_exit();
1051
1052                // RCU is not watching here ...
1053                rcu_dynticks_eqs_exit();
1054                // ... but is watching here.
1055
1056                if (!in_nmi()) {
1057                        instrumentation_begin();
1058                        rcu_cleanup_after_idle();
1059                        instrumentation_end();
1060                }
1061
1062                instrumentation_begin();
1063                // instrumentation for the noinstr rcu_dynticks_curr_cpu_in_eqs()
1064                instrument_atomic_read(&rdp->dynticks, sizeof(rdp->dynticks));
1065                // instrumentation for the noinstr rcu_dynticks_eqs_exit()
1066                instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
1067
1068                incby = 1;
1069        } else if (!in_nmi()) {
1070                instrumentation_begin();
1071                rcu_irq_enter_check_tick();
1072        } else  {
1073                instrumentation_begin();
1074        }
1075
1076        trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
1077                          rdp->dynticks_nmi_nesting,
1078                          rdp->dynticks_nmi_nesting + incby, atomic_read(&rdp->dynticks));
1079        instrumentation_end();
1080        WRITE_ONCE(rdp->dynticks_nmi_nesting, /* Prevent store tearing. */
1081                   rdp->dynticks_nmi_nesting + incby);
1082        barrier();
1083}
1084
1085/**
1086 * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
1087 *
1088 * Enter an interrupt handler, which might possibly result in exiting
1089 * idle mode, in other words, entering the mode in which read-side critical
1090 * sections can occur.  The caller must have disabled interrupts.
1091 *
1092 * Note that the Linux kernel is fully capable of entering an interrupt
1093 * handler that it never exits, for example when doing upcalls to user mode!
1094 * This code assumes that the idle loop never does upcalls to user mode.
1095 * If your architecture's idle loop does do upcalls to user mode (or does
1096 * anything else that results in unbalanced calls to the irq_enter() and
1097 * irq_exit() functions), RCU will give you what you deserve, good and hard.
1098 * But very infrequently and irreproducibly.
1099 *
1100 * Use things like work queues to work around this limitation.
1101 *
1102 * You have been warned.
1103 *
1104 * If you add or remove a call to rcu_irq_enter(), be sure to test with
1105 * CONFIG_RCU_EQS_DEBUG=y.
1106 */
1107noinstr void rcu_irq_enter(void)
1108{
1109        lockdep_assert_irqs_disabled();
1110        rcu_nmi_enter();
1111}
1112
1113/*
1114 * Wrapper for rcu_irq_enter() where interrupts are enabled.
1115 *
1116 * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
1117 * with CONFIG_RCU_EQS_DEBUG=y.
1118 */
1119void rcu_irq_enter_irqson(void)
1120{
1121        unsigned long flags;
1122
1123        local_irq_save(flags);
1124        rcu_irq_enter();
1125        local_irq_restore(flags);
1126}
1127
1128/*
1129 * If any sort of urgency was applied to the current CPU (for example,
1130 * the scheduler-clock interrupt was enabled on a nohz_full CPU) in order
1131 * to get to a quiescent state, disable it.
1132 */
1133static void rcu_disable_urgency_upon_qs(struct rcu_data *rdp)
1134{
1135        raw_lockdep_assert_held_rcu_node(rdp->mynode);
1136        WRITE_ONCE(rdp->rcu_urgent_qs, false);
1137        WRITE_ONCE(rdp->rcu_need_heavy_qs, false);
1138        if (tick_nohz_full_cpu(rdp->cpu) && rdp->rcu_forced_tick) {
1139                tick_dep_clear_cpu(rdp->cpu, TICK_DEP_BIT_RCU);
1140                WRITE_ONCE(rdp->rcu_forced_tick, false);
1141        }
1142}
1143
1144/**
1145 * rcu_is_watching - see if RCU thinks that the current CPU is not idle
1146 *
1147 * Return true if RCU is watching the running CPU, which means that this
1148 * CPU can safely enter RCU read-side critical sections.  In other words,
1149 * if the current CPU is not in its idle loop or is in an interrupt or
1150 * NMI handler, return true.
1151 *
1152 * Make notrace because it can be called by the internal functions of
1153 * ftrace, and making this notrace removes unnecessary recursion calls.
1154 */
1155notrace bool rcu_is_watching(void)
1156{
1157        bool ret;
1158
1159        preempt_disable_notrace();
1160        ret = !rcu_dynticks_curr_cpu_in_eqs();
1161        preempt_enable_notrace();
1162        return ret;
1163}
1164EXPORT_SYMBOL_GPL(rcu_is_watching);
1165
1166/*
1167 * If a holdout task is actually running, request an urgent quiescent
1168 * state from its CPU.  This is unsynchronized, so migrations can cause
1169 * the request to go to the wrong CPU.  Which is OK, all that will happen
1170 * is that the CPU's next context switch will be a bit slower and next
1171 * time around this task will generate another request.
1172 */
1173void rcu_request_urgent_qs_task(struct task_struct *t)
1174{
1175        int cpu;
1176
1177        barrier();
1178        cpu = task_cpu(t);
1179        if (!task_curr(t))
1180                return; /* This task is not running on that CPU. */
1181        smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
1182}
1183
1184#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
1185
1186/*
1187 * Is the current CPU online as far as RCU is concerned?
1188 *
1189 * Disable preemption to avoid false positives that could otherwise
1190 * happen due to the current CPU number being sampled, this task being
1191 * preempted, its old CPU being taken offline, resuming on some other CPU,
1192 * then determining that its old CPU is now offline.
1193 *
1194 * Disable checking if in an NMI handler because we cannot safely
1195 * report errors from NMI handlers anyway.  In addition, it is OK to use
1196 * RCU on an offline processor during initial boot, hence the check for
1197 * rcu_scheduler_fully_active.
1198 */
1199bool rcu_lockdep_current_cpu_online(void)
1200{
1201        struct rcu_data *rdp;
1202        struct rcu_node *rnp;
1203        bool ret = false;
1204
1205        if (in_nmi() || !rcu_scheduler_fully_active)
1206                return true;
1207        preempt_disable_notrace();
1208        rdp = this_cpu_ptr(&rcu_data);
1209        rnp = rdp->mynode;
1210        if (rdp->grpmask & rcu_rnp_online_cpus(rnp) || READ_ONCE(rnp->ofl_seq) & 0x1)
1211                ret = true;
1212        preempt_enable_notrace();
1213        return ret;
1214}
1215EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
1216
1217#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
1218
1219/*
1220 * When trying to report a quiescent state on behalf of some other CPU,
1221 * it is our responsibility to check for and handle potential overflow
1222 * of the rcu_node ->gp_seq counter with respect to the rcu_data counters.
1223 * After all, the CPU might be in deep idle state, and thus executing no
1224 * code whatsoever.
1225 */
1226static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
1227{
1228        raw_lockdep_assert_held_rcu_node(rnp);
1229        if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
1230                         rnp->gp_seq))
1231                WRITE_ONCE(rdp->gpwrap, true);
1232        if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
1233                rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
1234}
1235
1236/*
1237 * Snapshot the specified CPU's dynticks counter so that we can later
1238 * credit them with an implicit quiescent state.  Return 1 if this CPU
1239 * is in dynticks idle mode, which is an extended quiescent state.
1240 */
1241static int dyntick_save_progress_counter(struct rcu_data *rdp)
1242{
1243        rdp->dynticks_snap = rcu_dynticks_snap(rdp);
1244        if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
1245                trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1246                rcu_gpnum_ovf(rdp->mynode, rdp);
1247                return 1;
1248        }
1249        return 0;
1250}
1251
1252/*
1253 * Return true if the specified CPU has passed through a quiescent
1254 * state by virtue of being in or having passed through an dynticks
1255 * idle state since the last call to dyntick_save_progress_counter()
1256 * for this same CPU, or by virtue of having been offline.
1257 */
1258static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
1259{
1260        unsigned long jtsq;
1261        bool *rnhqp;
1262        bool *ruqp;
1263        struct rcu_node *rnp = rdp->mynode;
1264
1265        /*
1266         * If the CPU passed through or entered a dynticks idle phase with
1267         * no active irq/NMI handlers, then we can safely pretend that the CPU
1268         * already acknowledged the request to pass through a quiescent
1269         * state.  Either way, that CPU cannot possibly be in an RCU
1270         * read-side critical section that started before the beginning
1271         * of the current RCU grace period.
1272         */
1273        if (rcu_dynticks_in_eqs_since(rdp, rdp->dynticks_snap)) {
1274                trace_rcu_fqs(rcu_state.name, rdp->gp_seq, rdp->cpu, TPS("dti"));
1275                rcu_gpnum_ovf(rnp, rdp);
1276                return 1;
1277        }
1278
1279        /*
1280         * Complain if a CPU that is considered to be offline from RCU's
1281         * perspective has not yet reported a quiescent state.  After all,
1282         * the offline CPU should have reported a quiescent state during
1283         * the CPU-offline process, or, failing that, by rcu_gp_init()
1284         * if it ran concurrently with either the CPU going offline or the
1285         * last task on a leaf rcu_node structure exiting its RCU read-side
1286         * critical section while all CPUs corresponding to that structure
1287         * are offline.  This added warning detects bugs in any of these
1288         * code paths.
1289         *
1290         * The rcu_node structure's ->lock is held here, which excludes
1291         * the relevant portions the CPU-hotplug code, the grace-period
1292         * initialization code, and the rcu_read_unlock() code paths.
1293         *
1294         * For more detail, please refer to the "Hotplug CPU" section
1295         * of RCU's Requirements documentation.
1296         */
1297        if (WARN_ON_ONCE(!(rdp->grpmask & rcu_rnp_online_cpus(rnp)))) {
1298                bool onl;
1299                struct rcu_node *rnp1;
1300
1301                pr_info("%s: grp: %d-%d level: %d ->gp_seq %ld ->completedqs %ld\n",
1302                        __func__, rnp->grplo, rnp->grphi, rnp->level,
1303                        (long)rnp->gp_seq, (long)rnp->completedqs);
1304                for (rnp1 = rnp; rnp1; rnp1 = rnp1->parent)
1305                        pr_info("%s: %d:%d ->qsmask %#lx ->qsmaskinit %#lx ->qsmaskinitnext %#lx ->rcu_gp_init_mask %#lx\n",
1306                                __func__, rnp1->grplo, rnp1->grphi, rnp1->qsmask, rnp1->qsmaskinit, rnp1->qsmaskinitnext, rnp1->rcu_gp_init_mask);
1307                onl = !!(rdp->grpmask & rcu_rnp_online_cpus(rnp));
1308                pr_info("%s %d: %c online: %ld(%d) offline: %ld(%d)\n",
1309                        __func__, rdp->cpu, ".o"[onl],
1310                        (long)rdp->rcu_onl_gp_seq, rdp->rcu_onl_gp_flags,
1311                        (long)rdp->rcu_ofl_gp_seq, rdp->rcu_ofl_gp_flags);
1312                return 1; /* Break things loose after complaining. */
1313        }
1314
1315        /*
1316         * A CPU running for an extended time within the kernel can
1317         * delay RCU grace periods: (1) At age jiffies_to_sched_qs,
1318         * set .rcu_urgent_qs, (2) At age 2*jiffies_to_sched_qs, set
1319         * both .rcu_need_heavy_qs and .rcu_urgent_qs.  Note that the
1320         * unsynchronized assignments to the per-CPU rcu_need_heavy_qs
1321         * variable are safe because the assignments are repeated if this
1322         * CPU failed to pass through a quiescent state.  This code
1323         * also checks .jiffies_resched in case jiffies_to_sched_qs
1324         * is set way high.
1325         */
1326        jtsq = READ_ONCE(jiffies_to_sched_qs);
1327        ruqp = per_cpu_ptr(&rcu_data.rcu_urgent_qs, rdp->cpu);
1328        rnhqp = &per_cpu(rcu_data.rcu_need_heavy_qs, rdp->cpu);
1329        if (!READ_ONCE(*rnhqp) &&
1330            (time_after(jiffies, rcu_state.gp_start + jtsq * 2) ||
1331             time_after(jiffies, rcu_state.jiffies_resched) ||
1332             rcu_state.cbovld)) {
1333                WRITE_ONCE(*rnhqp, true);
1334                /* Store rcu_need_heavy_qs before rcu_urgent_qs. */
1335                smp_store_release(ruqp, true);
1336        } else if (time_after(jiffies, rcu_state.gp_start + jtsq)) {
1337                WRITE_ONCE(*ruqp, true);
1338        }
1339
1340        /*
1341         * NO_HZ_FULL CPUs can run in-kernel without rcu_sched_clock_irq!
1342         * The above code handles this, but only for straight cond_resched().
1343         * And some in-kernel loops check need_resched() before calling
1344         * cond_resched(), which defeats the above code for CPUs that are
1345         * running in-kernel with scheduling-clock interrupts disabled.
1346         * So hit them over the head with the resched_cpu() hammer!
1347         */
1348        if (tick_nohz_full_cpu(rdp->cpu) &&
1349            (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
1350             rcu_state.cbovld)) {
1351                WRITE_ONCE(*ruqp, true);
1352                resched_cpu(rdp->cpu);
1353                WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1354        }
1355
1356        /*
1357         * If more than halfway to RCU CPU stall-warning time, invoke
1358         * resched_cpu() more frequently to try to loosen things up a bit.
1359         * Also check to see if the CPU is getting hammered with interrupts,
1360         * but only once per grace period, just to keep the IPIs down to
1361         * a dull roar.
1362         */
1363        if (time_after(jiffies, rcu_state.jiffies_resched)) {
1364                if (time_after(jiffies,
1365                               READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
1366                        resched_cpu(rdp->cpu);
1367                        WRITE_ONCE(rdp->last_fqs_resched, jiffies);
1368                }
1369                if (IS_ENABLED(CONFIG_IRQ_WORK) &&
1370                    !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
1371                    (rnp->ffmask & rdp->grpmask)) {
1372                        rdp->rcu_iw_pending = true;
1373                        rdp->rcu_iw_gp_seq = rnp->gp_seq;
1374                        irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
1375                }
1376        }
1377
1378        return 0;
1379}
1380
1381/* Trace-event wrapper function for trace_rcu_future_grace_period.  */
1382static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
1383                              unsigned long gp_seq_req, const char *s)
1384{
1385        trace_rcu_future_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
1386                                      gp_seq_req, rnp->level,
1387                                      rnp->grplo, rnp->grphi, s);
1388}
1389
1390/*
1391 * rcu_start_this_gp - Request the start of a particular grace period
1392 * @rnp_start: The leaf node of the CPU from which to start.
1393 * @rdp: The rcu_data corresponding to the CPU from which to start.
1394 * @gp_seq_req: The gp_seq of the grace period to start.
1395 *
1396 * Start the specified grace period, as needed to handle newly arrived
1397 * callbacks.  The required future grace periods are recorded in each
1398 * rcu_node structure's ->gp_seq_needed field.  Returns true if there
1399 * is reason to awaken the grace-period kthread.
1400 *
1401 * The caller must hold the specified rcu_node structure's ->lock, which
1402 * is why the caller is responsible for waking the grace-period kthread.
1403 *
1404 * Returns true if the GP thread needs to be awakened else false.
1405 */
1406static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
1407                              unsigned long gp_seq_req)
1408{
1409        bool ret = false;
1410        struct rcu_node *rnp;
1411
1412        /*
1413         * Use funnel locking to either acquire the root rcu_node
1414         * structure's lock or bail out if the need for this grace period
1415         * has already been recorded -- or if that grace period has in
1416         * fact already started.  If there is already a grace period in
1417         * progress in a non-leaf node, no recording is needed because the
1418         * end of the grace period will scan the leaf rcu_node structures.
1419         * Note that rnp_start->lock must not be released.
1420         */
1421        raw_lockdep_assert_held_rcu_node(rnp_start);
1422        trace_rcu_this_gp(rnp_start, rdp, gp_seq_req, TPS("Startleaf"));
1423        for (rnp = rnp_start; 1; rnp = rnp->parent) {
1424                if (rnp != rnp_start)
1425                        raw_spin_lock_rcu_node(rnp);
1426                if (ULONG_CMP_GE(rnp->gp_seq_needed, gp_seq_req) ||
1427                    rcu_seq_started(&rnp->gp_seq, gp_seq_req) ||
1428                    (rnp != rnp_start &&
1429                     rcu_seq_state(rcu_seq_current(&rnp->gp_seq)))) {
1430                        trace_rcu_this_gp(rnp, rdp, gp_seq_req,
1431                                          TPS("Prestarted"));
1432                        goto unlock_out;
1433                }
1434                WRITE_ONCE(rnp->gp_seq_needed, gp_seq_req);
1435                if (rcu_seq_state(rcu_seq_current(&rnp->gp_seq))) {
1436                        /*
1437                         * We just marked the leaf or internal node, and a
1438                         * grace period is in progress, which means that
1439                         * rcu_gp_cleanup() will see the marking.  Bail to
1440                         * reduce contention.
1441                         */
1442                        trace_rcu_this_gp(rnp_start, rdp, gp_seq_req,
1443                                          TPS("Startedleaf"));
1444                        goto unlock_out;
1445                }
1446                if (rnp != rnp_start && rnp->parent != NULL)
1447                        raw_spin_unlock_rcu_node(rnp);
1448                if (!rnp->parent)
1449                        break;  /* At root, and perhaps also leaf. */
1450        }
1451
1452        /* If GP already in progress, just leave, otherwise start one. */
1453        if (rcu_gp_in_progress()) {
1454                trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedleafroot"));
1455                goto unlock_out;
1456        }
1457        trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("Startedroot"));
1458        WRITE_ONCE(rcu_state.gp_flags, rcu_state.gp_flags | RCU_GP_FLAG_INIT);
1459        WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
1460        if (!READ_ONCE(rcu_state.gp_kthread)) {
1461                trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
1462                goto unlock_out;
1463        }
1464        trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
1465        ret = true;  /* Caller must wake GP kthread. */
1466unlock_out:
1467        /* Push furthest requested GP to leaf node and rcu_data structure. */
1468        if (ULONG_CMP_LT(gp_seq_req, rnp->gp_seq_needed)) {
1469                WRITE_ONCE(rnp_start->gp_seq_needed, rnp->gp_seq_needed);
1470                WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1471        }
1472        if (rnp != rnp_start)
1473                raw_spin_unlock_rcu_node(rnp);
1474        return ret;
1475}
1476
1477/*
1478 * Clean up any old requests for the just-ended grace period.  Also return
1479 * whether any additional grace periods have been requested.
1480 */
1481static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
1482{
1483        bool needmore;
1484        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
1485
1486        needmore = ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed);
1487        if (!needmore)
1488                rnp->gp_seq_needed = rnp->gp_seq; /* Avoid counter wrap. */
1489        trace_rcu_this_gp(rnp, rdp, rnp->gp_seq,
1490                          needmore ? TPS("CleanupMore") : TPS("Cleanup"));
1491        return needmore;
1492}
1493
1494/*
1495 * Awaken the grace-period kthread.  Don't do a self-awaken (unless in an
1496 * interrupt or softirq handler, in which case we just might immediately
1497 * sleep upon return, resulting in a grace-period hang), and don't bother
1498 * awakening when there is nothing for the grace-period kthread to do
1499 * (as in several CPUs raced to awaken, we lost), and finally don't try
1500 * to awaken a kthread that has not yet been created.  If all those checks
1501 * are passed, track some debug information and awaken.
1502 *
1503 * So why do the self-wakeup when in an interrupt or softirq handler
1504 * in the grace-period kthread's context?  Because the kthread might have
1505 * been interrupted just as it was going to sleep, and just after the final
1506 * pre-sleep check of the awaken condition.  In this case, a wakeup really
1507 * is required, and is therefore supplied.
1508 */
1509static void rcu_gp_kthread_wake(void)
1510{
1511        struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
1512
1513        if ((current == t && !in_irq() && !in_serving_softirq()) ||
1514            !READ_ONCE(rcu_state.gp_flags) || !t)
1515                return;
1516        WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
1517        WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
1518        swake_up_one(&rcu_state.gp_wq);
1519}
1520
1521/*
1522 * If there is room, assign a ->gp_seq number to any callbacks on this
1523 * CPU that have not already been assigned.  Also accelerate any callbacks
1524 * that were previously assigned a ->gp_seq number that has since proven
1525 * to be too conservative, which can happen if callbacks get assigned a
1526 * ->gp_seq number while RCU is idle, but with reference to a non-root
1527 * rcu_node structure.  This function is idempotent, so it does not hurt
1528 * to call it repeatedly.  Returns an flag saying that we should awaken
1529 * the RCU grace-period kthread.
1530 *
1531 * The caller must hold rnp->lock with interrupts disabled.
1532 */
1533static bool rcu_accelerate_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1534{
1535        unsigned long gp_seq_req;
1536        bool ret = false;
1537
1538        rcu_lockdep_assert_cblist_protected(rdp);
1539        raw_lockdep_assert_held_rcu_node(rnp);
1540
1541        /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1542        if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1543                return false;
1544
1545        trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPreAcc"));
1546
1547        /*
1548         * Callbacks are often registered with incomplete grace-period
1549         * information.  Something about the fact that getting exact
1550         * information requires acquiring a global lock...  RCU therefore
1551         * makes a conservative estimate of the grace period number at which
1552         * a given callback will become ready to invoke.        The following
1553         * code checks this estimate and improves it when possible, thus
1554         * accelerating callback invocation to an earlier grace-period
1555         * number.
1556         */
1557        gp_seq_req = rcu_seq_snap(&rcu_state.gp_seq);
1558        if (rcu_segcblist_accelerate(&rdp->cblist, gp_seq_req))
1559                ret = rcu_start_this_gp(rnp, rdp, gp_seq_req);
1560
1561        /* Trace depending on how much we were able to accelerate. */
1562        if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
1563                trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccWaitCB"));
1564        else
1565                trace_rcu_grace_period(rcu_state.name, gp_seq_req, TPS("AccReadyCB"));
1566
1567        trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbPostAcc"));
1568
1569        return ret;
1570}
1571
1572/*
1573 * Similar to rcu_accelerate_cbs(), but does not require that the leaf
1574 * rcu_node structure's ->lock be held.  It consults the cached value
1575 * of ->gp_seq_needed in the rcu_data structure, and if that indicates
1576 * that a new grace-period request be made, invokes rcu_accelerate_cbs()
1577 * while holding the leaf rcu_node structure's ->lock.
1578 */
1579static void rcu_accelerate_cbs_unlocked(struct rcu_node *rnp,
1580                                        struct rcu_data *rdp)
1581{
1582        unsigned long c;
1583        bool needwake;
1584
1585        rcu_lockdep_assert_cblist_protected(rdp);
1586        c = rcu_seq_snap(&rcu_state.gp_seq);
1587        if (!READ_ONCE(rdp->gpwrap) && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
1588                /* Old request still live, so mark recent callbacks. */
1589                (void)rcu_segcblist_accelerate(&rdp->cblist, c);
1590                return;
1591        }
1592        raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
1593        needwake = rcu_accelerate_cbs(rnp, rdp);
1594        raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
1595        if (needwake)
1596                rcu_gp_kthread_wake();
1597}
1598
1599/*
1600 * Move any callbacks whose grace period has completed to the
1601 * RCU_DONE_TAIL sublist, then compact the remaining sublists and
1602 * assign ->gp_seq numbers to any callbacks in the RCU_NEXT_TAIL
1603 * sublist.  This function is idempotent, so it does not hurt to
1604 * invoke it repeatedly.  As long as it is not invoked -too- often...
1605 * Returns true if the RCU grace-period kthread needs to be awakened.
1606 *
1607 * The caller must hold rnp->lock with interrupts disabled.
1608 */
1609static bool rcu_advance_cbs(struct rcu_node *rnp, struct rcu_data *rdp)
1610{
1611        rcu_lockdep_assert_cblist_protected(rdp);
1612        raw_lockdep_assert_held_rcu_node(rnp);
1613
1614        /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
1615        if (!rcu_segcblist_pend_cbs(&rdp->cblist))
1616                return false;
1617
1618        /*
1619         * Find all callbacks whose ->gp_seq numbers indicate that they
1620         * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
1621         */
1622        rcu_segcblist_advance(&rdp->cblist, rnp->gp_seq);
1623
1624        /* Classify any remaining callbacks. */
1625        return rcu_accelerate_cbs(rnp, rdp);
1626}
1627
1628/*
1629 * Move and classify callbacks, but only if doing so won't require
1630 * that the RCU grace-period kthread be awakened.
1631 */
1632static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
1633                                                  struct rcu_data *rdp)
1634{
1635        rcu_lockdep_assert_cblist_protected(rdp);
1636        if (!rcu_seq_state(rcu_seq_current(&rnp->gp_seq)) ||
1637            !raw_spin_trylock_rcu_node(rnp))
1638                return;
1639        WARN_ON_ONCE(rcu_advance_cbs(rnp, rdp));
1640        raw_spin_unlock_rcu_node(rnp);
1641}
1642
1643/*
1644 * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1645 * quiescent state.  This is intended to be invoked when the CPU notices
1646 * a new grace period.
1647 */
1648static void rcu_strict_gp_check_qs(void)
1649{
1650        if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD)) {
1651                rcu_read_lock();
1652                rcu_read_unlock();
1653        }
1654}
1655
1656/*
1657 * Update CPU-local rcu_data state to record the beginnings and ends of
1658 * grace periods.  The caller must hold the ->lock of the leaf rcu_node
1659 * structure corresponding to the current CPU, and must have irqs disabled.
1660 * Returns true if the grace-period kthread needs to be awakened.
1661 */
1662static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
1663{
1664        bool ret = false;
1665        bool need_qs;
1666        const bool offloaded = rcu_rdp_is_offloaded(rdp);
1667
1668        raw_lockdep_assert_held_rcu_node(rnp);
1669
1670        if (rdp->gp_seq == rnp->gp_seq)
1671                return false; /* Nothing to do. */
1672
1673        /* Handle the ends of any preceding grace periods first. */
1674        if (rcu_seq_completed_gp(rdp->gp_seq, rnp->gp_seq) ||
1675            unlikely(READ_ONCE(rdp->gpwrap))) {
1676                if (!offloaded)
1677                        ret = rcu_advance_cbs(rnp, rdp); /* Advance CBs. */
1678                rdp->core_needs_qs = false;
1679                trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuend"));
1680        } else {
1681                if (!offloaded)
1682                        ret = rcu_accelerate_cbs(rnp, rdp); /* Recent CBs. */
1683                if (rdp->core_needs_qs)
1684                        rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
1685        }
1686
1687        /* Now handle the beginnings of any new-to-this-CPU grace periods. */
1688        if (rcu_seq_new_gp(rdp->gp_seq, rnp->gp_seq) ||
1689            unlikely(READ_ONCE(rdp->gpwrap))) {
1690                /*
1691                 * If the current grace period is waiting for this CPU,
1692                 * set up to detect a quiescent state, otherwise don't
1693                 * go looking for one.
1694                 */
1695                trace_rcu_grace_period(rcu_state.name, rnp->gp_seq, TPS("cpustart"));
1696                need_qs = !!(rnp->qsmask & rdp->grpmask);
1697                rdp->cpu_no_qs.b.norm = need_qs;
1698                rdp->core_needs_qs = need_qs;
1699                zero_cpu_stall_ticks(rdp);
1700        }
1701        rdp->gp_seq = rnp->gp_seq;  /* Remember new grace-period state. */
1702        if (ULONG_CMP_LT(rdp->gp_seq_needed, rnp->gp_seq_needed) || rdp->gpwrap)
1703                WRITE_ONCE(rdp->gp_seq_needed, rnp->gp_seq_needed);
1704        WRITE_ONCE(rdp->gpwrap, false);
1705        rcu_gpnum_ovf(rnp, rdp);
1706        return ret;
1707}
1708
1709static void note_gp_changes(struct rcu_data *rdp)
1710{
1711        unsigned long flags;
1712        bool needwake;
1713        struct rcu_node *rnp;
1714
1715        local_irq_save(flags);
1716        rnp = rdp->mynode;
1717        if ((rdp->gp_seq == rcu_seq_current(&rnp->gp_seq) &&
1718             !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
1719            !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
1720                local_irq_restore(flags);
1721                return;
1722        }
1723        needwake = __note_gp_changes(rnp, rdp);
1724        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1725        rcu_strict_gp_check_qs();
1726        if (needwake)
1727                rcu_gp_kthread_wake();
1728}
1729
1730static void rcu_gp_slow(int delay)
1731{
1732        if (delay > 0 &&
1733            !(rcu_seq_ctr(rcu_state.gp_seq) %
1734              (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
1735                schedule_timeout_idle(delay);
1736}
1737
1738static unsigned long sleep_duration;
1739
1740/* Allow rcutorture to stall the grace-period kthread. */
1741void rcu_gp_set_torture_wait(int duration)
1742{
1743        if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST) && duration > 0)
1744                WRITE_ONCE(sleep_duration, duration);
1745}
1746EXPORT_SYMBOL_GPL(rcu_gp_set_torture_wait);
1747
1748/* Actually implement the aforementioned wait. */
1749static void rcu_gp_torture_wait(void)
1750{
1751        unsigned long duration;
1752
1753        if (!IS_ENABLED(CONFIG_RCU_TORTURE_TEST))
1754                return;
1755        duration = xchg(&sleep_duration, 0UL);
1756        if (duration > 0) {
1757                pr_alert("%s: Waiting %lu jiffies\n", __func__, duration);
1758                schedule_timeout_idle(duration);
1759                pr_alert("%s: Wait complete\n", __func__);
1760        }
1761}
1762
1763/*
1764 * Handler for on_each_cpu() to invoke the target CPU's RCU core
1765 * processing.
1766 */
1767static void rcu_strict_gp_boundary(void *unused)
1768{
1769        invoke_rcu_core();
1770}
1771
1772/*
1773 * Initialize a new grace period.  Return false if no grace period required.
1774 */
1775static bool rcu_gp_init(void)
1776{
1777        unsigned long firstseq;
1778        unsigned long flags;
1779        unsigned long oldmask;
1780        unsigned long mask;
1781        struct rcu_data *rdp;
1782        struct rcu_node *rnp = rcu_get_root();
1783
1784        WRITE_ONCE(rcu_state.gp_activity, jiffies);
1785        raw_spin_lock_irq_rcu_node(rnp);
1786        if (!READ_ONCE(rcu_state.gp_flags)) {
1787                /* Spurious wakeup, tell caller to go back to sleep.  */
1788                raw_spin_unlock_irq_rcu_node(rnp);
1789                return false;
1790        }
1791        WRITE_ONCE(rcu_state.gp_flags, 0); /* Clear all flags: New GP. */
1792
1793        if (WARN_ON_ONCE(rcu_gp_in_progress())) {
1794                /*
1795                 * Grace period already in progress, don't start another.
1796                 * Not supposed to be able to happen.
1797                 */
1798                raw_spin_unlock_irq_rcu_node(rnp);
1799                return false;
1800        }
1801
1802        /* Advance to a new grace period and initialize state. */
1803        record_gp_stall_check_time();
1804        /* Record GP times before starting GP, hence rcu_seq_start(). */
1805        rcu_seq_start(&rcu_state.gp_seq);
1806        ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
1807        trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
1808        raw_spin_unlock_irq_rcu_node(rnp);
1809
1810        /*
1811         * Apply per-leaf buffered online and offline operations to
1812         * the rcu_node tree. Note that this new grace period need not
1813         * wait for subsequent online CPUs, and that RCU hooks in the CPU
1814         * offlining path, when combined with checks in this function,
1815         * will handle CPUs that are currently going offline or that will
1816         * go offline later.  Please also refer to "Hotplug CPU" section
1817         * of RCU's Requirements documentation.
1818         */
1819        WRITE_ONCE(rcu_state.gp_state, RCU_GP_ONOFF);
1820        rcu_for_each_leaf_node(rnp) {
1821                smp_mb(); // Pair with barriers used when updating ->ofl_seq to odd values.
1822                firstseq = READ_ONCE(rnp->ofl_seq);
1823                if (firstseq & 0x1)
1824                        while (firstseq == READ_ONCE(rnp->ofl_seq))
1825                                schedule_timeout_idle(1);  // Can't wake unless RCU is watching.
1826                smp_mb(); // Pair with barriers used when updating ->ofl_seq to even values.
1827                raw_spin_lock(&rcu_state.ofl_lock);
1828                raw_spin_lock_irq_rcu_node(rnp);
1829                if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
1830                    !rnp->wait_blkd_tasks) {
1831                        /* Nothing to do on this leaf rcu_node structure. */
1832                        raw_spin_unlock_irq_rcu_node(rnp);
1833                        raw_spin_unlock(&rcu_state.ofl_lock);
1834                        continue;
1835                }
1836
1837                /* Record old state, apply changes to ->qsmaskinit field. */
1838                oldmask = rnp->qsmaskinit;
1839                rnp->qsmaskinit = rnp->qsmaskinitnext;
1840
1841                /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
1842                if (!oldmask != !rnp->qsmaskinit) {
1843                        if (!oldmask) { /* First online CPU for rcu_node. */
1844                                if (!rnp->wait_blkd_tasks) /* Ever offline? */
1845                                        rcu_init_new_rnp(rnp);
1846                        } else if (rcu_preempt_has_tasks(rnp)) {
1847                                rnp->wait_blkd_tasks = true; /* blocked tasks */
1848                        } else { /* Last offline CPU and can propagate. */
1849                                rcu_cleanup_dead_rnp(rnp);
1850                        }
1851                }
1852
1853                /*
1854                 * If all waited-on tasks from prior grace period are
1855                 * done, and if all this rcu_node structure's CPUs are
1856                 * still offline, propagate up the rcu_node tree and
1857                 * clear ->wait_blkd_tasks.  Otherwise, if one of this
1858                 * rcu_node structure's CPUs has since come back online,
1859                 * simply clear ->wait_blkd_tasks.
1860                 */
1861                if (rnp->wait_blkd_tasks &&
1862                    (!rcu_preempt_has_tasks(rnp) || rnp->qsmaskinit)) {
1863                        rnp->wait_blkd_tasks = false;
1864                        if (!rnp->qsmaskinit)
1865                                rcu_cleanup_dead_rnp(rnp);
1866                }
1867
1868                raw_spin_unlock_irq_rcu_node(rnp);
1869                raw_spin_unlock(&rcu_state.ofl_lock);
1870        }
1871        rcu_gp_slow(gp_preinit_delay); /* Races with CPU hotplug. */
1872
1873        /*
1874         * Set the quiescent-state-needed bits in all the rcu_node
1875         * structures for all currently online CPUs in breadth-first
1876         * order, starting from the root rcu_node structure, relying on the
1877         * layout of the tree within the rcu_state.node[] array.  Note that
1878         * other CPUs will access only the leaves of the hierarchy, thus
1879         * seeing that no grace period is in progress, at least until the
1880         * corresponding leaf node has been initialized.
1881         *
1882         * The grace period cannot complete until the initialization
1883         * process finishes, because this kthread handles both.
1884         */
1885        WRITE_ONCE(rcu_state.gp_state, RCU_GP_INIT);
1886        rcu_for_each_node_breadth_first(rnp) {
1887                rcu_gp_slow(gp_init_delay);
1888                raw_spin_lock_irqsave_rcu_node(rnp, flags);
1889                rdp = this_cpu_ptr(&rcu_data);
1890                rcu_preempt_check_blocked_tasks(rnp);
1891                rnp->qsmask = rnp->qsmaskinit;
1892                WRITE_ONCE(rnp->gp_seq, rcu_state.gp_seq);
1893                if (rnp == rdp->mynode)
1894                        (void)__note_gp_changes(rnp, rdp);
1895                rcu_preempt_boost_start_gp(rnp);
1896                trace_rcu_grace_period_init(rcu_state.name, rnp->gp_seq,
1897                                            rnp->level, rnp->grplo,
1898                                            rnp->grphi, rnp->qsmask);
1899                /* Quiescent states for tasks on any now-offline CPUs. */
1900                mask = rnp->qsmask & ~rnp->qsmaskinitnext;
1901                rnp->rcu_gp_init_mask = mask;
1902                if ((mask || rnp->wait_blkd_tasks) && rcu_is_leaf_node(rnp))
1903                        rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
1904                else
1905                        raw_spin_unlock_irq_rcu_node(rnp);
1906                cond_resched_tasks_rcu_qs();
1907                WRITE_ONCE(rcu_state.gp_activity, jiffies);
1908        }
1909
1910        // If strict, make all CPUs aware of new grace period.
1911        if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
1912                on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
1913
1914        return true;
1915}
1916
1917/*
1918 * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
1919 * time.
1920 */
1921static bool rcu_gp_fqs_check_wake(int *gfp)
1922{
1923        struct rcu_node *rnp = rcu_get_root();
1924
1925        // If under overload conditions, force an immediate FQS scan.
1926        if (*gfp & RCU_GP_FLAG_OVLD)
1927                return true;
1928
1929        // Someone like call_rcu() requested a force-quiescent-state scan.
1930        *gfp = READ_ONCE(rcu_state.gp_flags);
1931        if (*gfp & RCU_GP_FLAG_FQS)
1932                return true;
1933
1934        // The current grace period has completed.
1935        if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
1936                return true;
1937
1938        return false;
1939}
1940
1941/*
1942 * Do one round of quiescent-state forcing.
1943 */
1944static void rcu_gp_fqs(bool first_time)
1945{
1946        struct rcu_node *rnp = rcu_get_root();
1947
1948        WRITE_ONCE(rcu_state.gp_activity, jiffies);
1949        rcu_state.n_force_qs++;
1950        if (first_time) {
1951                /* Collect dyntick-idle snapshots. */
1952                force_qs_rnp(dyntick_save_progress_counter);
1953        } else {
1954                /* Handle dyntick-idle and offline CPUs. */
1955                force_qs_rnp(rcu_implicit_dynticks_qs);
1956        }
1957        /* Clear flag to prevent immediate re-entry. */
1958        if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
1959                raw_spin_lock_irq_rcu_node(rnp);
1960                WRITE_ONCE(rcu_state.gp_flags,
1961                           READ_ONCE(rcu_state.gp_flags) & ~RCU_GP_FLAG_FQS);
1962                raw_spin_unlock_irq_rcu_node(rnp);
1963        }
1964}
1965
1966/*
1967 * Loop doing repeated quiescent-state forcing until the grace period ends.
1968 */
1969static void rcu_gp_fqs_loop(void)
1970{
1971        bool first_gp_fqs;
1972        int gf = 0;
1973        unsigned long j;
1974        int ret;
1975        struct rcu_node *rnp = rcu_get_root();
1976
1977        first_gp_fqs = true;
1978        j = READ_ONCE(jiffies_till_first_fqs);
1979        if (rcu_state.cbovld)
1980                gf = RCU_GP_FLAG_OVLD;
1981        ret = 0;
1982        for (;;) {
1983                if (!ret) {
1984                        WRITE_ONCE(rcu_state.jiffies_force_qs, jiffies + j);
1985                        /*
1986                         * jiffies_force_qs before RCU_GP_WAIT_FQS state
1987                         * update; required for stall checks.
1988                         */
1989                        smp_wmb();
1990                        WRITE_ONCE(rcu_state.jiffies_kick_kthreads,
1991                                   jiffies + (j ? 3 * j : 2));
1992                }
1993                trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
1994                                       TPS("fqswait"));
1995                WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_FQS);
1996                ret = swait_event_idle_timeout_exclusive(
1997                                rcu_state.gp_wq, rcu_gp_fqs_check_wake(&gf), j);
1998                rcu_gp_torture_wait();
1999                WRITE_ONCE(rcu_state.gp_state, RCU_GP_DOING_FQS);
2000                /* Locking provides needed memory barriers. */
2001                /* If grace period done, leave loop. */
2002                if (!READ_ONCE(rnp->qsmask) &&
2003                    !rcu_preempt_blocked_readers_cgp(rnp))
2004                        break;
2005                /* If time for quiescent-state forcing, do it. */
2006                if (!time_after(rcu_state.jiffies_force_qs, jiffies) ||
2007                    (gf & (RCU_GP_FLAG_FQS | RCU_GP_FLAG_OVLD))) {
2008                        trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2009                                               TPS("fqsstart"));
2010                        rcu_gp_fqs(first_gp_fqs);
2011                        gf = 0;
2012                        if (first_gp_fqs) {
2013                                first_gp_fqs = false;
2014                                gf = rcu_state.cbovld ? RCU_GP_FLAG_OVLD : 0;
2015                        }
2016                        trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2017                                               TPS("fqsend"));
2018                        cond_resched_tasks_rcu_qs();
2019                        WRITE_ONCE(rcu_state.gp_activity, jiffies);
2020                        ret = 0; /* Force full wait till next FQS. */
2021                        j = READ_ONCE(jiffies_till_next_fqs);
2022                } else {
2023                        /* Deal with stray signal. */
2024                        cond_resched_tasks_rcu_qs();
2025                        WRITE_ONCE(rcu_state.gp_activity, jiffies);
2026                        WARN_ON(signal_pending(current));
2027                        trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2028                                               TPS("fqswaitsig"));
2029                        ret = 1; /* Keep old FQS timing. */
2030                        j = jiffies;
2031                        if (time_after(jiffies, rcu_state.jiffies_force_qs))
2032                                j = 1;
2033                        else
2034                                j = rcu_state.jiffies_force_qs - j;
2035                        gf = 0;
2036                }
2037        }
2038}
2039
2040/*
2041 * Clean up after the old grace period.
2042 */
2043static noinline void rcu_gp_cleanup(void)
2044{
2045        int cpu;
2046        bool needgp = false;
2047        unsigned long gp_duration;
2048        unsigned long new_gp_seq;
2049        bool offloaded;
2050        struct rcu_data *rdp;
2051        struct rcu_node *rnp = rcu_get_root();
2052        struct swait_queue_head *sq;
2053
2054        WRITE_ONCE(rcu_state.gp_activity, jiffies);
2055        raw_spin_lock_irq_rcu_node(rnp);
2056        rcu_state.gp_end = jiffies;
2057        gp_duration = rcu_state.gp_end - rcu_state.gp_start;
2058        if (gp_duration > rcu_state.gp_max)
2059                rcu_state.gp_max = gp_duration;
2060
2061        /*
2062         * We know the grace period is complete, but to everyone else
2063         * it appears to still be ongoing.  But it is also the case
2064         * that to everyone else it looks like there is nothing that
2065         * they can do to advance the grace period.  It is therefore
2066         * safe for us to drop the lock in order to mark the grace
2067         * period as completed in all of the rcu_node structures.
2068         */
2069        raw_spin_unlock_irq_rcu_node(rnp);
2070
2071        /*
2072         * Propagate new ->gp_seq value to rcu_node structures so that
2073         * other CPUs don't have to wait until the start of the next grace
2074         * period to process their callbacks.  This also avoids some nasty
2075         * RCU grace-period initialization races by forcing the end of
2076         * the current grace period to be completely recorded in all of
2077         * the rcu_node structures before the beginning of the next grace
2078         * period is recorded in any of the rcu_node structures.
2079         */
2080        new_gp_seq = rcu_state.gp_seq;
2081        rcu_seq_end(&new_gp_seq);
2082        rcu_for_each_node_breadth_first(rnp) {
2083                raw_spin_lock_irq_rcu_node(rnp);
2084                if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
2085                        dump_blkd_tasks(rnp, 10);
2086                WARN_ON_ONCE(rnp->qsmask);
2087                WRITE_ONCE(rnp->gp_seq, new_gp_seq);
2088                rdp = this_cpu_ptr(&rcu_data);
2089                if (rnp == rdp->mynode)
2090                        needgp = __note_gp_changes(rnp, rdp) || needgp;
2091                /* smp_mb() provided by prior unlock-lock pair. */
2092                needgp = rcu_future_gp_cleanup(rnp) || needgp;
2093                // Reset overload indication for CPUs no longer overloaded
2094                if (rcu_is_leaf_node(rnp))
2095                        for_each_leaf_node_cpu_mask(rnp, cpu, rnp->cbovldmask) {
2096                                rdp = per_cpu_ptr(&rcu_data, cpu);
2097                                check_cb_ovld_locked(rdp, rnp);
2098                        }
2099                sq = rcu_nocb_gp_get(rnp);
2100                raw_spin_unlock_irq_rcu_node(rnp);
2101                rcu_nocb_gp_cleanup(sq);
2102                cond_resched_tasks_rcu_qs();
2103                WRITE_ONCE(rcu_state.gp_activity, jiffies);
2104                rcu_gp_slow(gp_cleanup_delay);
2105        }
2106        rnp = rcu_get_root();
2107        raw_spin_lock_irq_rcu_node(rnp); /* GP before ->gp_seq update. */
2108
2109        /* Declare grace period done, trace first to use old GP number. */
2110        trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
2111        rcu_seq_end(&rcu_state.gp_seq);
2112        ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
2113        WRITE_ONCE(rcu_state.gp_state, RCU_GP_IDLE);
2114        /* Check for GP requests since above loop. */
2115        rdp = this_cpu_ptr(&rcu_data);
2116        if (!needgp && ULONG_CMP_LT(rnp->gp_seq, rnp->gp_seq_needed)) {
2117                trace_rcu_this_gp(rnp, rdp, rnp->gp_seq_needed,
2118                                  TPS("CleanupMore"));
2119                needgp = true;
2120        }
2121        /* Advance CBs to reduce false positives below. */
2122        offloaded = rcu_rdp_is_offloaded(rdp);
2123        if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
2124                WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
2125                WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
2126                trace_rcu_grace_period(rcu_state.name,
2127                                       rcu_state.gp_seq,
2128                                       TPS("newreq"));
2129        } else {
2130                WRITE_ONCE(rcu_state.gp_flags,
2131                           rcu_state.gp_flags & RCU_GP_FLAG_INIT);
2132        }
2133        raw_spin_unlock_irq_rcu_node(rnp);
2134
2135        // If strict, make all CPUs aware of the end of the old grace period.
2136        if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2137                on_each_cpu(rcu_strict_gp_boundary, NULL, 0);
2138}
2139
2140/*
2141 * Body of kthread that handles grace periods.
2142 */
2143static int __noreturn rcu_gp_kthread(void *unused)
2144{
2145        rcu_bind_gp_kthread();
2146        for (;;) {
2147
2148                /* Handle grace-period start. */
2149                for (;;) {
2150                        trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2151                                               TPS("reqwait"));
2152                        WRITE_ONCE(rcu_state.gp_state, RCU_GP_WAIT_GPS);
2153                        swait_event_idle_exclusive(rcu_state.gp_wq,
2154                                         READ_ONCE(rcu_state.gp_flags) &
2155                                         RCU_GP_FLAG_INIT);
2156                        rcu_gp_torture_wait();
2157                        WRITE_ONCE(rcu_state.gp_state, RCU_GP_DONE_GPS);
2158                        /* Locking provides needed memory barrier. */
2159                        if (rcu_gp_init())
2160                                break;
2161                        cond_resched_tasks_rcu_qs();
2162                        WRITE_ONCE(rcu_state.gp_activity, jiffies);
2163                        WARN_ON(signal_pending(current));
2164                        trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq,
2165                                               TPS("reqwaitsig"));
2166                }
2167
2168                /* Handle quiescent-state forcing. */
2169                rcu_gp_fqs_loop();
2170
2171                /* Handle grace-period end. */
2172                WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANUP);
2173                rcu_gp_cleanup();
2174                WRITE_ONCE(rcu_state.gp_state, RCU_GP_CLEANED);
2175        }
2176}
2177
2178/*
2179 * Report a full set of quiescent states to the rcu_state data structure.
2180 * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
2181 * another grace period is required.  Whether we wake the grace-period
2182 * kthread or it awakens itself for the next round of quiescent-state
2183 * forcing, that kthread will clean up after the just-completed grace
2184 * period.  Note that the caller must hold rnp->lock, which is released
2185 * before return.
2186 */
2187static void rcu_report_qs_rsp(unsigned long flags)
2188        __releases(rcu_get_root()->lock)
2189{
2190        raw_lockdep_assert_held_rcu_node(rcu_get_root());
2191        WARN_ON_ONCE(!rcu_gp_in_progress());
2192        WRITE_ONCE(rcu_state.gp_flags,
2193                   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2194        raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(), flags);
2195        rcu_gp_kthread_wake();
2196}
2197
2198/*
2199 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
2200 * Allows quiescent states for a group of CPUs to be reported at one go
2201 * to the specified rcu_node structure, though all the CPUs in the group
2202 * must be represented by the same rcu_node structure (which need not be a
2203 * leaf rcu_node structure, though it often will be).  The gps parameter
2204 * is the grace-period snapshot, which means that the quiescent states
2205 * are valid only if rnp->gp_seq is equal to gps.  That structure's lock
2206 * must be held upon entry, and it is released before return.
2207 *
2208 * As a special case, if mask is zero, the bit-already-cleared check is
2209 * disabled.  This allows propagating quiescent state due to resumed tasks
2210 * during grace-period initialization.
2211 */
2212static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
2213                              unsigned long gps, unsigned long flags)
2214        __releases(rnp->lock)
2215{
2216        unsigned long oldmask = 0;
2217        struct rcu_node *rnp_c;
2218
2219        raw_lockdep_assert_held_rcu_node(rnp);
2220
2221        /* Walk up the rcu_node hierarchy. */
2222        for (;;) {
2223                if ((!(rnp->qsmask & mask) && mask) || rnp->gp_seq != gps) {
2224
2225                        /*
2226                         * Our bit has already been cleared, or the
2227                         * relevant grace period is already over, so done.
2228                         */
2229                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2230                        return;
2231                }
2232                WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
2233                WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
2234                             rcu_preempt_blocked_readers_cgp(rnp));
2235                WRITE_ONCE(rnp->qsmask, rnp->qsmask & ~mask);
2236                trace_rcu_quiescent_state_report(rcu_state.name, rnp->gp_seq,
2237                                                 mask, rnp->qsmask, rnp->level,
2238                                                 rnp->grplo, rnp->grphi,
2239                                                 !!rnp->gp_tasks);
2240                if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
2241
2242                        /* Other bits still set at this level, so done. */
2243                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2244                        return;
2245                }
2246                rnp->completedqs = rnp->gp_seq;
2247                mask = rnp->grpmask;
2248                if (rnp->parent == NULL) {
2249
2250                        /* No more levels.  Exit loop holding root lock. */
2251
2252                        break;
2253                }
2254                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2255                rnp_c = rnp;
2256                rnp = rnp->parent;
2257                raw_spin_lock_irqsave_rcu_node(rnp, flags);
2258                oldmask = READ_ONCE(rnp_c->qsmask);
2259        }
2260
2261        /*
2262         * Get here if we are the last CPU to pass through a quiescent
2263         * state for this grace period.  Invoke rcu_report_qs_rsp()
2264         * to clean up and start the next grace period if one is needed.
2265         */
2266        rcu_report_qs_rsp(flags); /* releases rnp->lock. */
2267}
2268
2269/*
2270 * Record a quiescent state for all tasks that were previously queued
2271 * on the specified rcu_node structure and that were blocking the current
2272 * RCU grace period.  The caller must hold the corresponding rnp->lock with
2273 * irqs disabled, and this lock is released upon return, but irqs remain
2274 * disabled.
2275 */
2276static void __maybe_unused
2277rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
2278        __releases(rnp->lock)
2279{
2280        unsigned long gps;
2281        unsigned long mask;
2282        struct rcu_node *rnp_p;
2283
2284        raw_lockdep_assert_held_rcu_node(rnp);
2285        if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RCU)) ||
2286            WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
2287            rnp->qsmask != 0) {
2288                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2289                return;  /* Still need more quiescent states! */
2290        }
2291
2292        rnp->completedqs = rnp->gp_seq;
2293        rnp_p = rnp->parent;
2294        if (rnp_p == NULL) {
2295                /*
2296                 * Only one rcu_node structure in the tree, so don't
2297                 * try to report up to its nonexistent parent!
2298                 */
2299                rcu_report_qs_rsp(flags);
2300                return;
2301        }
2302
2303        /* Report up the rest of the hierarchy, tracking current ->gp_seq. */
2304        gps = rnp->gp_seq;
2305        mask = rnp->grpmask;
2306        raw_spin_unlock_rcu_node(rnp);  /* irqs remain disabled. */
2307        raw_spin_lock_rcu_node(rnp_p);  /* irqs already disabled. */
2308        rcu_report_qs_rnp(mask, rnp_p, gps, flags);
2309}
2310
2311/*
2312 * Record a quiescent state for the specified CPU to that CPU's rcu_data
2313 * structure.  This must be called from the specified CPU.
2314 */
2315static void
2316rcu_report_qs_rdp(struct rcu_data *rdp)
2317{
2318        unsigned long flags;
2319        unsigned long mask;
2320        bool needwake = false;
2321        const bool offloaded = rcu_rdp_is_offloaded(rdp);
2322        struct rcu_node *rnp;
2323
2324        WARN_ON_ONCE(rdp->cpu != smp_processor_id());
2325        rnp = rdp->mynode;
2326        raw_spin_lock_irqsave_rcu_node(rnp, flags);
2327        if (rdp->cpu_no_qs.b.norm || rdp->gp_seq != rnp->gp_seq ||
2328            rdp->gpwrap) {
2329
2330                /*
2331                 * The grace period in which this quiescent state was
2332                 * recorded has ended, so don't report it upwards.
2333                 * We will instead need a new quiescent state that lies
2334                 * within the current grace period.
2335                 */
2336                rdp->cpu_no_qs.b.norm = true;   /* need qs for new gp. */
2337                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2338                return;
2339        }
2340        mask = rdp->grpmask;
2341        rdp->core_needs_qs = false;
2342        if ((rnp->qsmask & mask) == 0) {
2343                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2344        } else {
2345                /*
2346                 * This GP can't end until cpu checks in, so all of our
2347                 * callbacks can be processed during the next GP.
2348                 */
2349                if (!offloaded)
2350                        needwake = rcu_accelerate_cbs(rnp, rdp);
2351
2352                rcu_disable_urgency_upon_qs(rdp);
2353                rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2354                /* ^^^ Released rnp->lock */
2355                if (needwake)
2356                        rcu_gp_kthread_wake();
2357        }
2358}
2359
2360/*
2361 * Check to see if there is a new grace period of which this CPU
2362 * is not yet aware, and if so, set up local rcu_data state for it.
2363 * Otherwise, see if this CPU has just passed through its first
2364 * quiescent state for this grace period, and record that fact if so.
2365 */
2366static void
2367rcu_check_quiescent_state(struct rcu_data *rdp)
2368{
2369        /* Check for grace-period ends and beginnings. */
2370        note_gp_changes(rdp);
2371
2372        /*
2373         * Does this CPU still need to do its part for current grace period?
2374         * If no, return and let the other CPUs do their part as well.
2375         */
2376        if (!rdp->core_needs_qs)
2377                return;
2378
2379        /*
2380         * Was there a quiescent state since the beginning of the grace
2381         * period? If no, then exit and wait for the next call.
2382         */
2383        if (rdp->cpu_no_qs.b.norm)
2384                return;
2385
2386        /*
2387         * Tell RCU we are done (but rcu_report_qs_rdp() will be the
2388         * judge of that).
2389         */
2390        rcu_report_qs_rdp(rdp);
2391}
2392
2393/*
2394 * Near the end of the offline process.  Trace the fact that this CPU
2395 * is going offline.
2396 */
2397int rcutree_dying_cpu(unsigned int cpu)
2398{
2399        bool blkd;
2400        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
2401        struct rcu_node *rnp = rdp->mynode;
2402
2403        if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2404                return 0;
2405
2406        blkd = !!(rnp->qsmask & rdp->grpmask);
2407        trace_rcu_grace_period(rcu_state.name, READ_ONCE(rnp->gp_seq),
2408                               blkd ? TPS("cpuofl-bgp") : TPS("cpuofl"));
2409        return 0;
2410}
2411
2412/*
2413 * All CPUs for the specified rcu_node structure have gone offline,
2414 * and all tasks that were preempted within an RCU read-side critical
2415 * section while running on one of those CPUs have since exited their RCU
2416 * read-side critical section.  Some other CPU is reporting this fact with
2417 * the specified rcu_node structure's ->lock held and interrupts disabled.
2418 * This function therefore goes up the tree of rcu_node structures,
2419 * clearing the corresponding bits in the ->qsmaskinit fields.  Note that
2420 * the leaf rcu_node structure's ->qsmaskinit field has already been
2421 * updated.
2422 *
2423 * This function does check that the specified rcu_node structure has
2424 * all CPUs offline and no blocked tasks, so it is OK to invoke it
2425 * prematurely.  That said, invoking it after the fact will cost you
2426 * a needless lock acquisition.  So once it has done its work, don't
2427 * invoke it again.
2428 */
2429static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
2430{
2431        long mask;
2432        struct rcu_node *rnp = rnp_leaf;
2433
2434        raw_lockdep_assert_held_rcu_node(rnp_leaf);
2435        if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
2436            WARN_ON_ONCE(rnp_leaf->qsmaskinit) ||
2437            WARN_ON_ONCE(rcu_preempt_has_tasks(rnp_leaf)))
2438                return;
2439        for (;;) {
2440                mask = rnp->grpmask;
2441                rnp = rnp->parent;
2442                if (!rnp)
2443                        break;
2444                raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
2445                rnp->qsmaskinit &= ~mask;
2446                /* Between grace periods, so better already be zero! */
2447                WARN_ON_ONCE(rnp->qsmask);
2448                if (rnp->qsmaskinit) {
2449                        raw_spin_unlock_rcu_node(rnp);
2450                        /* irqs remain disabled. */
2451                        return;
2452                }
2453                raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
2454        }
2455}
2456
2457/*
2458 * The CPU has been completely removed, and some other CPU is reporting
2459 * this fact from process context.  Do the remainder of the cleanup.
2460 * There can only be one CPU hotplug operation at a time, so no need for
2461 * explicit locking.
2462 */
2463int rcutree_dead_cpu(unsigned int cpu)
2464{
2465        struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
2466        struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
2467
2468        if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
2469                return 0;
2470
2471        WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus - 1);
2472        /* Adjust any no-longer-needed kthreads. */
2473        rcu_boost_kthread_setaffinity(rnp, -1);
2474        /* Do any needed no-CB deferred wakeups from this CPU. */
2475        do_nocb_deferred_wakeup(per_cpu_ptr(&rcu_data, cpu));
2476
2477        // Stop-machine done, so allow nohz_full to disable tick.
2478        tick_dep_clear(TICK_DEP_BIT_RCU);
2479        return 0;
2480}
2481
2482/*
2483 * Invoke any RCU callbacks that have made it to the end of their grace
2484 * period.  Throttle as specified by rdp->blimit.
2485 */
2486static void rcu_do_batch(struct rcu_data *rdp)
2487{
2488        int div;
2489        bool __maybe_unused empty;
2490        unsigned long flags;
2491        const bool offloaded = rcu_rdp_is_offloaded(rdp);
2492        struct rcu_head *rhp;
2493        struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
2494        long bl, count = 0;
2495        long pending, tlimit = 0;
2496
2497        /* If no callbacks are ready, just return. */
2498        if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
2499                trace_rcu_batch_start(rcu_state.name,
2500                                      rcu_segcblist_n_cbs(&rdp->cblist), 0);
2501                trace_rcu_batch_end(rcu_state.name, 0,
2502                                    !rcu_segcblist_empty(&rdp->cblist),
2503                                    need_resched(), is_idle_task(current),
2504                                    rcu_is_callbacks_kthread());
2505                return;
2506        }
2507
2508        /*
2509         * Extract the list of ready callbacks, disabling to prevent
2510         * races with call_rcu() from interrupt handlers.  Leave the
2511         * callback counts, as rcu_barrier() needs to be conservative.
2512         */
2513        local_irq_save(flags);
2514        rcu_nocb_lock(rdp);
2515        WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
2516        pending = rcu_segcblist_n_cbs(&rdp->cblist);
2517        div = READ_ONCE(rcu_divisor);
2518        div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
2519        bl = max(rdp->blimit, pending >> div);
2520        if (unlikely(bl > 100)) {
2521                long rrn = READ_ONCE(rcu_resched_ns);
2522
2523                rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
2524                tlimit = local_clock() + rrn;
2525        }
2526        trace_rcu_batch_start(rcu_state.name,
2527                              rcu_segcblist_n_cbs(&rdp->cblist), bl);
2528        rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
2529        if (offloaded)
2530                rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2531
2532        trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCbDequeued"));
2533        rcu_nocb_unlock_irqrestore(rdp, flags);
2534
2535        /* Invoke callbacks. */
2536        tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2537        rhp = rcu_cblist_dequeue(&rcl);
2538
2539        for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
2540                rcu_callback_t f;
2541
2542                count++;
2543                debug_rcu_head_unqueue(rhp);
2544
2545                rcu_lock_acquire(&rcu_callback_map);
2546                trace_rcu_invoke_callback(rcu_state.name, rhp);
2547
2548                f = rhp->func;
2549                WRITE_ONCE(rhp->func, (rcu_callback_t)0L);
2550                f(rhp);
2551
2552                rcu_lock_release(&rcu_callback_map);
2553
2554                /*
2555                 * Stop only if limit reached and CPU has something to do.
2556                 */
2557                if (count >= bl && !offloaded &&
2558                    (need_resched() ||
2559                     (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
2560                        break;
2561                if (unlikely(tlimit)) {
2562                        /* only call local_clock() every 32 callbacks */
2563                        if (likely((count & 31) || local_clock() < tlimit))
2564                                continue;
2565                        /* Exceeded the time limit, so leave. */
2566                        break;
2567                }
2568                if (!in_serving_softirq()) {
2569                        local_bh_enable();
2570                        lockdep_assert_irqs_enabled();
2571                        cond_resched_tasks_rcu_qs();
2572                        lockdep_assert_irqs_enabled();
2573                        local_bh_disable();
2574                }
2575        }
2576
2577        local_irq_save(flags);
2578        rcu_nocb_lock(rdp);
2579        rdp->n_cbs_invoked += count;
2580        trace_rcu_batch_end(rcu_state.name, count, !!rcl.head, need_resched(),
2581                            is_idle_task(current), rcu_is_callbacks_kthread());
2582
2583        /* Update counts and requeue any remaining callbacks. */
2584        rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
2585        rcu_segcblist_add_len(&rdp->cblist, -count);
2586
2587        /* Reinstate batch limit if we have worked down the excess. */
2588        count = rcu_segcblist_n_cbs(&rdp->cblist);
2589        if (rdp->blimit >= DEFAULT_MAX_RCU_BLIMIT && count <= qlowmark)
2590                rdp->blimit = blimit;
2591
2592        /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
2593        if (count == 0 && rdp->qlen_last_fqs_check != 0) {
2594                rdp->qlen_last_fqs_check = 0;
2595                rdp->n_force_qs_snap = rcu_state.n_force_qs;
2596        } else if (count < rdp->qlen_last_fqs_check - qhimark)
2597                rdp->qlen_last_fqs_check = count;
2598
2599        /*
2600         * The following usually indicates a double call_rcu().  To track
2601         * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
2602         */
2603        empty = rcu_segcblist_empty(&rdp->cblist);
2604        WARN_ON_ONCE(count == 0 && !empty);
2605        WARN_ON_ONCE(!IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
2606                     count != 0 && empty);
2607        WARN_ON_ONCE(count == 0 && rcu_segcblist_n_segment_cbs(&rdp->cblist) != 0);
2608        WARN_ON_ONCE(!empty && rcu_segcblist_n_segment_cbs(&rdp->cblist) == 0);
2609
2610        rcu_nocb_unlock_irqrestore(rdp, flags);
2611
2612        /* Re-invoke RCU core processing if there are callbacks remaining. */
2613        if (!offloaded && rcu_segcblist_ready_cbs(&rdp->cblist))
2614                invoke_rcu_core();
2615        tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2616}
2617
2618/*
2619 * This function is invoked from each scheduling-clock interrupt,
2620 * and checks to see if this CPU is in a non-context-switch quiescent
2621 * state, for example, user mode or idle loop.  It also schedules RCU
2622 * core processing.  If the current grace period has gone on too long,
2623 * it will ask the scheduler to manufacture a context switch for the sole
2624 * purpose of providing the needed quiescent state.
2625 */
2626void rcu_sched_clock_irq(int user)
2627{
2628        trace_rcu_utilization(TPS("Start scheduler-tick"));
2629        lockdep_assert_irqs_disabled();
2630        raw_cpu_inc(rcu_data.ticks_this_gp);
2631        /* The load-acquire pairs with the store-release setting to true. */
2632        if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) {
2633                /* Idle and userspace execution already are quiescent states. */
2634                if (!rcu_is_cpu_rrupt_from_idle() && !user) {
2635                        set_tsk_need_resched(current);
2636                        set_preempt_need_resched();
2637                }
2638                __this_cpu_write(rcu_data.rcu_urgent_qs, false);
2639        }
2640        rcu_flavor_sched_clock_irq(user);
2641        if (rcu_pending(user))
2642                invoke_rcu_core();
2643        lockdep_assert_irqs_disabled();
2644
2645        trace_rcu_utilization(TPS("End scheduler-tick"));
2646}
2647
2648/*
2649 * Scan the leaf rcu_node structures.  For each structure on which all
2650 * CPUs have reported a quiescent state and on which there are tasks
2651 * blocking the current grace period, initiate RCU priority boosting.
2652 * Otherwise, invoke the specified function to check dyntick state for
2653 * each CPU that has not yet reported a quiescent state.
2654 */
2655static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
2656{
2657        int cpu;
2658        unsigned long flags;
2659        unsigned long mask;
2660        struct rcu_data *rdp;
2661        struct rcu_node *rnp;
2662
2663        rcu_state.cbovld = rcu_state.cbovldnext;
2664        rcu_state.cbovldnext = false;
2665        rcu_for_each_leaf_node(rnp) {
2666                cond_resched_tasks_rcu_qs();
2667                mask = 0;
2668                raw_spin_lock_irqsave_rcu_node(rnp, flags);
2669                rcu_state.cbovldnext |= !!rnp->cbovldmask;
2670                if (rnp->qsmask == 0) {
2671                        if (rcu_preempt_blocked_readers_cgp(rnp)) {
2672                                /*
2673                                 * No point in scanning bits because they
2674                                 * are all zero.  But we might need to
2675                                 * priority-boost blocked readers.
2676                                 */
2677                                rcu_initiate_boost(rnp, flags);
2678                                /* rcu_initiate_boost() releases rnp->lock */
2679                                continue;
2680                        }
2681                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2682                        continue;
2683                }
2684                for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
2685                        rdp = per_cpu_ptr(&rcu_data, cpu);
2686                        if (f(rdp)) {
2687                                mask |= rdp->grpmask;
2688                                rcu_disable_urgency_upon_qs(rdp);
2689                        }
2690                }
2691                if (mask != 0) {
2692                        /* Idle/offline CPUs, report (releases rnp->lock). */
2693                        rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
2694                } else {
2695                        /* Nothing to do here, so just drop the lock. */
2696                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
2697                }
2698        }
2699}
2700
2701/*
2702 * Force quiescent states on reluctant CPUs, and also detect which
2703 * CPUs are in dyntick-idle mode.
2704 */
2705void rcu_force_quiescent_state(void)
2706{
2707        unsigned long flags;
2708        bool ret;
2709        struct rcu_node *rnp;
2710        struct rcu_node *rnp_old = NULL;
2711
2712        /* Funnel through hierarchy to reduce memory contention. */
2713        rnp = __this_cpu_read(rcu_data.mynode);
2714        for (; rnp != NULL; rnp = rnp->parent) {
2715                ret = (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) ||
2716                       !raw_spin_trylock(&rnp->fqslock);
2717                if (rnp_old != NULL)
2718                        raw_spin_unlock(&rnp_old->fqslock);
2719                if (ret)
2720                        return;
2721                rnp_old = rnp;
2722        }
2723        /* rnp_old == rcu_get_root(), rnp == NULL. */
2724
2725        /* Reached the root of the rcu_node tree, acquire lock. */
2726        raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
2727        raw_spin_unlock(&rnp_old->fqslock);
2728        if (READ_ONCE(rcu_state.gp_flags) & RCU_GP_FLAG_FQS) {
2729                raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2730                return;  /* Someone beat us to it. */
2731        }
2732        WRITE_ONCE(rcu_state.gp_flags,
2733                   READ_ONCE(rcu_state.gp_flags) | RCU_GP_FLAG_FQS);
2734        raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
2735        rcu_gp_kthread_wake();
2736}
2737EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
2738
2739// Workqueue handler for an RCU reader for kernels enforcing struct RCU
2740// grace periods.
2741static void strict_work_handler(struct work_struct *work)
2742{
2743        rcu_read_lock();
2744        rcu_read_unlock();
2745}
2746
2747/* Perform RCU core processing work for the current CPU.  */
2748static __latent_entropy void rcu_core(void)
2749{
2750        unsigned long flags;
2751        struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
2752        struct rcu_node *rnp = rdp->mynode;
2753        const bool do_batch = !rcu_segcblist_completely_offloaded(&rdp->cblist);
2754
2755        if (cpu_is_offline(smp_processor_id()))
2756                return;
2757        trace_rcu_utilization(TPS("Start RCU core"));
2758        WARN_ON_ONCE(!rdp->beenonline);
2759
2760        /* Report any deferred quiescent states if preemption enabled. */
2761        if (!(preempt_count() & PREEMPT_MASK)) {
2762                rcu_preempt_deferred_qs(current);
2763        } else if (rcu_preempt_need_deferred_qs(current)) {
2764                set_tsk_need_resched(current);
2765                set_preempt_need_resched();
2766        }
2767
2768        /* Update RCU state based on any recent quiescent states. */
2769        rcu_check_quiescent_state(rdp);
2770
2771        /* No grace period and unregistered callbacks? */
2772        if (!rcu_gp_in_progress() &&
2773            rcu_segcblist_is_enabled(&rdp->cblist) && do_batch) {
2774                rcu_nocb_lock_irqsave(rdp, flags);
2775                if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
2776                        rcu_accelerate_cbs_unlocked(rnp, rdp);
2777                rcu_nocb_unlock_irqrestore(rdp, flags);
2778        }
2779
2780        rcu_check_gp_start_stall(rnp, rdp, rcu_jiffies_till_stall_check());
2781
2782        /* If there are callbacks ready, invoke them. */
2783        if (do_batch && rcu_segcblist_ready_cbs(&rdp->cblist) &&
2784            likely(READ_ONCE(rcu_scheduler_fully_active)))
2785                rcu_do_batch(rdp);
2786
2787        /* Do any needed deferred wakeups of rcuo kthreads. */
2788        do_nocb_deferred_wakeup(rdp);
2789        trace_rcu_utilization(TPS("End RCU core"));
2790
2791        // If strict GPs, schedule an RCU reader in a clean environment.
2792        if (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD))
2793                queue_work_on(rdp->cpu, rcu_gp_wq, &rdp->strict_work);
2794}
2795
2796static void rcu_core_si(struct softirq_action *h)
2797{
2798        rcu_core();
2799}
2800
2801static void rcu_wake_cond(struct task_struct *t, int status)
2802{
2803        /*
2804         * If the thread is yielding, only wake it when this
2805         * is invoked from idle
2806         */
2807        if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
2808                wake_up_process(t);
2809}
2810
2811static void invoke_rcu_core_kthread(void)
2812{
2813        struct task_struct *t;
2814        unsigned long flags;
2815
2816        local_irq_save(flags);
2817        __this_cpu_write(rcu_data.rcu_cpu_has_work, 1);
2818        t = __this_cpu_read(rcu_data.rcu_cpu_kthread_task);
2819        if (t != NULL && t != current)
2820                rcu_wake_cond(t, __this_cpu_read(rcu_data.rcu_cpu_kthread_status));
2821        local_irq_restore(flags);
2822}
2823
2824/*
2825 * Wake up this CPU's rcuc kthread to do RCU core processing.
2826 */
2827static void invoke_rcu_core(void)
2828{
2829        if (!cpu_online(smp_processor_id()))
2830                return;
2831        if (use_softirq)
2832                raise_softirq(RCU_SOFTIRQ);
2833        else
2834                invoke_rcu_core_kthread();
2835}
2836
2837static void rcu_cpu_kthread_park(unsigned int cpu)
2838{
2839        per_cpu(rcu_data.rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
2840}
2841
2842static int rcu_cpu_kthread_should_run(unsigned int cpu)
2843{
2844        return __this_cpu_read(rcu_data.rcu_cpu_has_work);
2845}
2846
2847/*
2848 * Per-CPU kernel thread that invokes RCU callbacks.  This replaces
2849 * the RCU softirq used in configurations of RCU that do not support RCU
2850 * priority boosting.
2851 */
2852static void rcu_cpu_kthread(unsigned int cpu)
2853{
2854        unsigned int *statusp = this_cpu_ptr(&rcu_data.rcu_cpu_kthread_status);
2855        char work, *workp = this_cpu_ptr(&rcu_data.rcu_cpu_has_work);
2856        int spincnt;
2857
2858        trace_rcu_utilization(TPS("Start CPU kthread@rcu_run"));
2859        for (spincnt = 0; spincnt < 10; spincnt++) {
2860                local_bh_disable();
2861                *statusp = RCU_KTHREAD_RUNNING;
2862                local_irq_disable();
2863                work = *workp;
2864                *workp = 0;
2865                local_irq_enable();
2866                if (work)
2867                        rcu_core();
2868                local_bh_enable();
2869                if (*workp == 0) {
2870                        trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
2871                        *statusp = RCU_KTHREAD_WAITING;
2872                        return;
2873                }
2874        }
2875        *statusp = RCU_KTHREAD_YIELDING;
2876        trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
2877        schedule_timeout_idle(2);
2878        trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
2879        *statusp = RCU_KTHREAD_WAITING;
2880}
2881
2882static struct smp_hotplug_thread rcu_cpu_thread_spec = {
2883        .store                  = &rcu_data.rcu_cpu_kthread_task,
2884        .thread_should_run      = rcu_cpu_kthread_should_run,
2885        .thread_fn              = rcu_cpu_kthread,
2886        .thread_comm            = "rcuc/%u",
2887        .setup                  = rcu_cpu_kthread_setup,
2888        .park                   = rcu_cpu_kthread_park,
2889};
2890
2891/*
2892 * Spawn per-CPU RCU core processing kthreads.
2893 */
2894static int __init rcu_spawn_core_kthreads(void)
2895{
2896        int cpu;
2897
2898        for_each_possible_cpu(cpu)
2899                per_cpu(rcu_data.rcu_cpu_has_work, cpu) = 0;
2900        if (!IS_ENABLED(CONFIG_RCU_BOOST) && use_softirq)
2901                return 0;
2902        WARN_ONCE(smpboot_register_percpu_thread(&rcu_cpu_thread_spec),
2903                  "%s: Could not start rcuc kthread, OOM is now expected behavior\n", __func__);
2904        return 0;
2905}
2906
2907/*
2908 * Handle any core-RCU processing required by a call_rcu() invocation.
2909 */
2910static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
2911                            unsigned long flags)
2912{
2913        /*
2914         * If called from an extended quiescent state, invoke the RCU
2915         * core in order to force a re-evaluation of RCU's idleness.
2916         */
2917        if (!rcu_is_watching())
2918                invoke_rcu_core();
2919
2920        /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
2921        if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
2922                return;
2923
2924        /*
2925         * Force the grace period if too many callbacks or too long waiting.
2926         * Enforce hysteresis, and don't invoke rcu_force_quiescent_state()
2927         * if some other CPU has recently done so.  Also, don't bother
2928         * invoking rcu_force_quiescent_state() if the newly enqueued callback
2929         * is the only one waiting for a grace period to complete.
2930         */
2931        if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
2932                     rdp->qlen_last_fqs_check + qhimark)) {
2933
2934                /* Are we ignoring a completed grace period? */
2935                note_gp_changes(rdp);
2936
2937                /* Start a new grace period if one not already started. */
2938                if (!rcu_gp_in_progress()) {
2939                        rcu_accelerate_cbs_unlocked(rdp->mynode, rdp);
2940                } else {
2941                        /* Give the grace period a kick. */
2942                        rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
2943                        if (rcu_state.n_force_qs == rdp->n_force_qs_snap &&
2944                            rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
2945                                rcu_force_quiescent_state();
2946                        rdp->n_force_qs_snap = rcu_state.n_force_qs;
2947                        rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
2948                }
2949        }
2950}
2951
2952/*
2953 * RCU callback function to leak a callback.
2954 */
2955static void rcu_leak_callback(struct rcu_head *rhp)
2956{
2957}
2958
2959/*
2960 * Check and if necessary update the leaf rcu_node structure's
2961 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2962 * number of queued RCU callbacks.  The caller must hold the leaf rcu_node
2963 * structure's ->lock.
2964 */
2965static void check_cb_ovld_locked(struct rcu_data *rdp, struct rcu_node *rnp)
2966{
2967        raw_lockdep_assert_held_rcu_node(rnp);
2968        if (qovld_calc <= 0)
2969                return; // Early boot and wildcard value set.
2970        if (rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc)
2971                WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask | rdp->grpmask);
2972        else
2973                WRITE_ONCE(rnp->cbovldmask, rnp->cbovldmask & ~rdp->grpmask);
2974}
2975
2976/*
2977 * Check and if necessary update the leaf rcu_node structure's
2978 * ->cbovldmask bit corresponding to the current CPU based on that CPU's
2979 * number of queued RCU callbacks.  No locks need be held, but the
2980 * caller must have disabled interrupts.
2981 *
2982 * Note that this function ignores the possibility that there are a lot
2983 * of callbacks all of which have already seen the end of their respective
2984 * grace periods.  This omission is due to the need for no-CBs CPUs to
2985 * be holding ->nocb_lock to do this check, which is too heavy for a
2986 * common-case operation.
2987 */
2988static void check_cb_ovld(struct rcu_data *rdp)
2989{
2990        struct rcu_node *const rnp = rdp->mynode;
2991
2992        if (qovld_calc <= 0 ||
2993            ((rcu_segcblist_n_cbs(&rdp->cblist) >= qovld_calc) ==
2994             !!(READ_ONCE(rnp->cbovldmask) & rdp->grpmask)))
2995                return; // Early boot wildcard value or already set correctly.
2996        raw_spin_lock_rcu_node(rnp);
2997        check_cb_ovld_locked(rdp, rnp);
2998        raw_spin_unlock_rcu_node(rnp);
2999}
3000
3001/* Helper function for call_rcu() and friends.  */
3002static void
3003__call_rcu(struct rcu_head *head, rcu_callback_t func)
3004{
3005        static atomic_t doublefrees;
3006        unsigned long flags;
3007        struct rcu_data *rdp;
3008        bool was_alldone;
3009
3010        /* Misaligned rcu_head! */
3011        WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
3012
3013        if (debug_rcu_head_queue(head)) {
3014                /*
3015                 * Probable double call_rcu(), so leak the callback.
3016                 * Use rcu:rcu_callback trace event to find the previous
3017                 * time callback was passed to __call_rcu().
3018                 */
3019                if (atomic_inc_return(&doublefrees) < 4) {
3020                        pr_err("%s(): Double-freed CB %p->%pS()!!!  ", __func__, head, head->func);
3021                        mem_dump_obj(head);
3022                }
3023                WRITE_ONCE(head->func, rcu_leak_callback);
3024                return;
3025        }
3026        head->func = func;
3027        head->next = NULL;
3028        local_irq_save(flags);
3029        kasan_record_aux_stack(head);
3030        rdp = this_cpu_ptr(&rcu_data);
3031
3032        /* Add the callback to our list. */
3033        if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist))) {
3034                // This can trigger due to call_rcu() from offline CPU:
3035                WARN_ON_ONCE(rcu_scheduler_active != RCU_SCHEDULER_INACTIVE);
3036                WARN_ON_ONCE(!rcu_is_watching());
3037                // Very early boot, before rcu_init().  Initialize if needed
3038                // and then drop through to queue the callback.
3039                if (rcu_segcblist_empty(&rdp->cblist))
3040                        rcu_segcblist_init(&rdp->cblist);
3041        }
3042
3043        check_cb_ovld(rdp);
3044        if (rcu_nocb_try_bypass(rdp, head, &was_alldone, flags))
3045                return; // Enqueued onto ->nocb_bypass, so just leave.
3046        // If no-CBs CPU gets here, rcu_nocb_try_bypass() acquired ->nocb_lock.
3047        rcu_segcblist_enqueue(&rdp->cblist, head);
3048        if (__is_kvfree_rcu_offset((unsigned long)func))
3049                trace_rcu_kvfree_callback(rcu_state.name, head,
3050                                         (unsigned long)func,
3051                                         rcu_segcblist_n_cbs(&rdp->cblist));
3052        else
3053                trace_rcu_callback(rcu_state.name, head,
3054                                   rcu_segcblist_n_cbs(&rdp->cblist));
3055
3056        trace_rcu_segcb_stats(&rdp->cblist, TPS("SegCBQueued"));
3057
3058        /* Go handle any RCU core processing required. */
3059        if (unlikely(rcu_rdp_is_offloaded(rdp))) {
3060                __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
3061        } else {
3062                __call_rcu_core(rdp, head, flags);
3063                local_irq_restore(flags);
3064        }
3065}
3066
3067/**
3068 * call_rcu() - Queue an RCU callback for invocation after a grace period.
3069 * @head: structure to be used for queueing the RCU updates.
3070 * @func: actual callback function to be invoked after the grace period
3071 *
3072 * The callback function will be invoked some time after a full grace
3073 * period elapses, in other words after all pre-existing RCU read-side
3074 * critical sections have completed.  However, the callback function
3075 * might well execute concurrently with RCU read-side critical sections
3076 * that started after call_rcu() was invoked.
3077 *
3078 * RCU read-side critical sections are delimited by rcu_read_lock()
3079 * and rcu_read_unlock(), and may be nested.  In addition, but only in
3080 * v5.0 and later, regions of code across which interrupts, preemption,
3081 * or softirqs have been disabled also serve as RCU read-side critical
3082 * sections.  This includes hardware interrupt handlers, softirq handlers,
3083 * and NMI handlers.
3084 *
3085 * Note that all CPUs must agree that the grace period extended beyond
3086 * all pre-existing RCU read-side critical section.  On systems with more
3087 * than one CPU, this means that when "func()" is invoked, each CPU is
3088 * guaranteed to have executed a full memory barrier since the end of its
3089 * last RCU read-side critical section whose beginning preceded the call
3090 * to call_rcu().  It also means that each CPU executing an RCU read-side
3091 * critical section that continues beyond the start of "func()" must have
3092 * executed a memory barrier after the call_rcu() but before the beginning
3093 * of that RCU read-side critical section.  Note that these guarantees
3094 * include CPUs that are offline, idle, or executing in user mode, as
3095 * well as CPUs that are executing in the kernel.
3096 *
3097 * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
3098 * resulting RCU callback function "func()", then both CPU A and CPU B are
3099 * guaranteed to execute a full memory barrier during the time interval
3100 * between the call to call_rcu() and the invocation of "func()" -- even
3101 * if CPU A and CPU B are the same CPU (but again only if the system has
3102 * more than one CPU).
3103 *
3104 * Implementation of these memory-ordering guarantees is described here:
3105 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3106 */
3107void call_rcu(struct rcu_head *head, rcu_callback_t func)
3108{
3109        __call_rcu(head, func);
3110}
3111EXPORT_SYMBOL_GPL(call_rcu);
3112
3113
3114/* Maximum number of jiffies to wait before draining a batch. */
3115#define KFREE_DRAIN_JIFFIES (HZ / 50)
3116#define KFREE_N_BATCHES 2
3117#define FREE_N_CHANNELS 2
3118
3119/**
3120 * struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers
3121 * @nr_records: Number of active pointers in the array
3122 * @next: Next bulk object in the block chain
3123 * @records: Array of the kvfree_rcu() pointers
3124 */
3125struct kvfree_rcu_bulk_data {
3126        unsigned long nr_records;
3127        struct kvfree_rcu_bulk_data *next;
3128        void *records[];
3129};
3130
3131/*
3132 * This macro defines how many entries the "records" array
3133 * will contain. It is based on the fact that the size of
3134 * kvfree_rcu_bulk_data structure becomes exactly one page.
3135 */
3136#define KVFREE_BULK_MAX_ENTR \
3137        ((PAGE_SIZE - sizeof(struct kvfree_rcu_bulk_data)) / sizeof(void *))
3138
3139/**
3140 * struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
3141 * @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
3142 * @head_free: List of kfree_rcu() objects waiting for a grace period
3143 * @bkvhead_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
3144 * @krcp: Pointer to @kfree_rcu_cpu structure
3145 */
3146
3147struct kfree_rcu_cpu_work {
3148        struct rcu_work rcu_work;
3149        struct rcu_head *head_free;
3150        struct kvfree_rcu_bulk_data *bkvhead_free[FREE_N_CHANNELS];
3151        struct kfree_rcu_cpu *krcp;
3152};
3153
3154/**
3155 * struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
3156 * @head: List of kfree_rcu() objects not yet waiting for a grace period
3157 * @bkvhead: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
3158 * @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
3159 * @lock: Synchronize access to this structure
3160 * @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
3161 * @monitor_todo: Tracks whether a @monitor_work delayed work is pending
3162 * @initialized: The @rcu_work fields have been initialized
3163 * @count: Number of objects for which GP not started
3164 * @bkvcache:
3165 *      A simple cache list that contains objects for reuse purpose.
3166 *      In order to save some per-cpu space the list is singular.
3167 *      Even though it is lockless an access has to be protected by the
3168 *      per-cpu lock.
3169 * @page_cache_work: A work to refill the cache when it is empty
3170 * @backoff_page_cache_fill: Delay cache refills
3171 * @work_in_progress: Indicates that page_cache_work is running
3172 * @hrtimer: A hrtimer for scheduling a page_cache_work
3173 * @nr_bkv_objs: number of allocated objects at @bkvcache.
3174 *
3175 * This is a per-CPU structure.  The reason that it is not included in
3176 * the rcu_data structure is to permit this code to be extracted from
3177 * the RCU files.  Such extraction could allow further optimization of
3178 * the interactions with the slab allocators.
3179 */
3180struct kfree_rcu_cpu {
3181        struct rcu_head *head;
3182        struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS];
3183        struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
3184        raw_spinlock_t lock;
3185        struct delayed_work monitor_work;
3186        bool monitor_todo;
3187        bool initialized;
3188        int count;
3189
3190        struct delayed_work page_cache_work;
3191        atomic_t backoff_page_cache_fill;
3192        atomic_t work_in_progress;
3193        struct hrtimer hrtimer;
3194
3195        struct llist_head bkvcache;
3196        int nr_bkv_objs;
3197};
3198
3199static DEFINE_PER_CPU(struct kfree_rcu_cpu, krc) = {
3200        .lock = __RAW_SPIN_LOCK_UNLOCKED(krc.lock),
3201};
3202
3203static __always_inline void
3204debug_rcu_bhead_unqueue(struct kvfree_rcu_bulk_data *bhead)
3205{
3206#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3207        int i;
3208
3209        for (i = 0; i < bhead->nr_records; i++)
3210                debug_rcu_head_unqueue((struct rcu_head *)(bhead->records[i]));
3211#endif
3212}
3213
3214static inline struct kfree_rcu_cpu *
3215krc_this_cpu_lock(unsigned long *flags)
3216{
3217        struct kfree_rcu_cpu *krcp;
3218
3219        local_irq_save(*flags); // For safely calling this_cpu_ptr().
3220        krcp = this_cpu_ptr(&krc);
3221        raw_spin_lock(&krcp->lock);
3222
3223        return krcp;
3224}
3225
3226static inline void
3227krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags)
3228{
3229        raw_spin_unlock_irqrestore(&krcp->lock, flags);
3230}
3231
3232static inline struct kvfree_rcu_bulk_data *
3233get_cached_bnode(struct kfree_rcu_cpu *krcp)
3234{
3235        if (!krcp->nr_bkv_objs)
3236                return NULL;
3237
3238        WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs - 1);
3239        return (struct kvfree_rcu_bulk_data *)
3240                llist_del_first(&krcp->bkvcache);
3241}
3242
3243static inline bool
3244put_cached_bnode(struct kfree_rcu_cpu *krcp,
3245        struct kvfree_rcu_bulk_data *bnode)
3246{
3247        // Check the limit.
3248        if (krcp->nr_bkv_objs >= rcu_min_cached_objs)
3249                return false;
3250
3251        llist_add((struct llist_node *) bnode, &krcp->bkvcache);
3252        WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs + 1);
3253        return true;
3254}
3255
3256static int
3257drain_page_cache(struct kfree_rcu_cpu *krcp)
3258{
3259        unsigned long flags;
3260        struct llist_node *page_list, *pos, *n;
3261        int freed = 0;
3262
3263        raw_spin_lock_irqsave(&krcp->lock, flags);
3264        page_list = llist_del_all(&krcp->bkvcache);
3265        WRITE_ONCE(krcp->nr_bkv_objs, 0);
3266        raw_spin_unlock_irqrestore(&krcp->lock, flags);
3267
3268        llist_for_each_safe(pos, n, page_list) {
3269                free_page((unsigned long)pos);
3270                freed++;
3271        }
3272
3273        return freed;
3274}
3275
3276/*
3277 * This function is invoked in workqueue context after a grace period.
3278 * It frees all the objects queued on ->bkvhead_free or ->head_free.
3279 */
3280static void kfree_rcu_work(struct work_struct *work)
3281{
3282        unsigned long flags;
3283        struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS], *bnext;
3284        struct rcu_head *head, *next;
3285        struct kfree_rcu_cpu *krcp;
3286        struct kfree_rcu_cpu_work *krwp;
3287        int i, j;
3288
3289        krwp = container_of(to_rcu_work(work),
3290                            struct kfree_rcu_cpu_work, rcu_work);
3291        krcp = krwp->krcp;
3292
3293        raw_spin_lock_irqsave(&krcp->lock, flags);
3294        // Channels 1 and 2.
3295        for (i = 0; i < FREE_N_CHANNELS; i++) {
3296                bkvhead[i] = krwp->bkvhead_free[i];
3297                krwp->bkvhead_free[i] = NULL;
3298        }
3299
3300        // Channel 3.
3301        head = krwp->head_free;
3302        krwp->head_free = NULL;
3303        raw_spin_unlock_irqrestore(&krcp->lock, flags);
3304
3305        // Handle the first two channels.
3306        for (i = 0; i < FREE_N_CHANNELS; i++) {
3307                for (; bkvhead[i]; bkvhead[i] = bnext) {
3308                        bnext = bkvhead[i]->next;
3309                        debug_rcu_bhead_unqueue(bkvhead[i]);
3310
3311                        rcu_lock_acquire(&rcu_callback_map);
3312                        if (i == 0) { // kmalloc() / kfree().
3313                                trace_rcu_invoke_kfree_bulk_callback(
3314                                        rcu_state.name, bkvhead[i]->nr_records,
3315                                        bkvhead[i]->records);
3316
3317                                kfree_bulk(bkvhead[i]->nr_records,
3318                                        bkvhead[i]->records);
3319                        } else { // vmalloc() / vfree().
3320                                for (j = 0; j < bkvhead[i]->nr_records; j++) {
3321                                        trace_rcu_invoke_kvfree_callback(
3322                                                rcu_state.name,
3323                                                bkvhead[i]->records[j], 0);
3324
3325                                        vfree(bkvhead[i]->records[j]);
3326                                }
3327                        }
3328                        rcu_lock_release(&rcu_callback_map);
3329
3330                        raw_spin_lock_irqsave(&krcp->lock, flags);
3331                        if (put_cached_bnode(krcp, bkvhead[i]))
3332                                bkvhead[i] = NULL;
3333                        raw_spin_unlock_irqrestore(&krcp->lock, flags);
3334
3335                        if (bkvhead[i])
3336                                free_page((unsigned long) bkvhead[i]);
3337
3338                        cond_resched_tasks_rcu_qs();
3339                }
3340        }
3341
3342        /*
3343         * This is used when the "bulk" path can not be used for the
3344         * double-argument of kvfree_rcu().  This happens when the
3345         * page-cache is empty, which means that objects are instead
3346         * queued on a linked list through their rcu_head structures.
3347         * This list is named "Channel 3".
3348         */
3349        for (; head; head = next) {
3350                unsigned long offset = (unsigned long)head->func;
3351                void *ptr = (void *)head - offset;
3352
3353                next = head->next;
3354                debug_rcu_head_unqueue((struct rcu_head *)ptr);
3355                rcu_lock_acquire(&rcu_callback_map);
3356                trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
3357
3358                if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
3359                        kvfree(ptr);
3360
3361                rcu_lock_release(&rcu_callback_map);
3362                cond_resched_tasks_rcu_qs();
3363        }
3364}
3365
3366/*
3367 * This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3368 */
3369static void kfree_rcu_monitor(struct work_struct *work)
3370{
3371        struct kfree_rcu_cpu *krcp = container_of(work,
3372                struct kfree_rcu_cpu, monitor_work.work);
3373        unsigned long flags;
3374        int i, j;
3375
3376        raw_spin_lock_irqsave(&krcp->lock, flags);
3377
3378        // Attempt to start a new batch.
3379        for (i = 0; i < KFREE_N_BATCHES; i++) {
3380                struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]);
3381
3382                // Try to detach bkvhead or head and attach it over any
3383                // available corresponding free channel. It can be that
3384                // a previous RCU batch is in progress, it means that
3385                // immediately to queue another one is not possible so
3386                // in that case the monitor work is rearmed.
3387                if ((krcp->bkvhead[0] && !krwp->bkvhead_free[0]) ||
3388                        (krcp->bkvhead[1] && !krwp->bkvhead_free[1]) ||
3389                                (krcp->head && !krwp->head_free)) {
3390                        // Channel 1 corresponds to the SLAB-pointer bulk path.
3391                        // Channel 2 corresponds to vmalloc-pointer bulk path.
3392                        for (j = 0; j < FREE_N_CHANNELS; j++) {
3393                                if (!krwp->bkvhead_free[j]) {
3394                                        krwp->bkvhead_free[j] = krcp->bkvhead[j];
3395                                        krcp->bkvhead[j] = NULL;
3396                                }
3397                        }
3398
3399                        // Channel 3 corresponds to both SLAB and vmalloc
3400                        // objects queued on the linked list.
3401                        if (!krwp->head_free) {
3402                                krwp->head_free = krcp->head;
3403                                krcp->head = NULL;
3404                        }
3405
3406                        WRITE_ONCE(krcp->count, 0);
3407
3408                        // One work is per one batch, so there are three
3409                        // "free channels", the batch can handle. It can
3410                        // be that the work is in the pending state when
3411                        // channels have been detached following by each
3412                        // other.
3413                        queue_rcu_work(system_wq, &krwp->rcu_work);
3414                }
3415        }
3416
3417        // If there is nothing to detach, it means that our job is
3418        // successfully done here. In case of having at least one
3419        // of the channels that is still busy we should rearm the
3420        // work to repeat an attempt. Because previous batches are
3421        // still in progress.
3422        if (!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head)
3423                krcp->monitor_todo = false;
3424        else
3425                schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3426
3427        raw_spin_unlock_irqrestore(&krcp->lock, flags);
3428}
3429
3430static enum hrtimer_restart
3431schedule_page_work_fn(struct hrtimer *t)
3432{
3433        struct kfree_rcu_cpu *krcp =
3434                container_of(t, struct kfree_rcu_cpu, hrtimer);
3435
3436        queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0);
3437        return HRTIMER_NORESTART;
3438}
3439
3440static void fill_page_cache_func(struct work_struct *work)
3441{
3442        struct kvfree_rcu_bulk_data *bnode;
3443        struct kfree_rcu_cpu *krcp =
3444                container_of(work, struct kfree_rcu_cpu,
3445                        page_cache_work.work);
3446        unsigned long flags;
3447        int nr_pages;
3448        bool pushed;
3449        int i;
3450
3451        nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ?
3452                1 : rcu_min_cached_objs;
3453
3454        for (i = 0; i < nr_pages; i++) {
3455                bnode = (struct kvfree_rcu_bulk_data *)
3456                        __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3457
3458                if (bnode) {
3459                        raw_spin_lock_irqsave(&krcp->lock, flags);
3460                        pushed = put_cached_bnode(krcp, bnode);
3461                        raw_spin_unlock_irqrestore(&krcp->lock, flags);
3462
3463                        if (!pushed) {
3464                                free_page((unsigned long) bnode);
3465                                break;
3466                        }
3467                }
3468        }
3469
3470        atomic_set(&krcp->work_in_progress, 0);
3471        atomic_set(&krcp->backoff_page_cache_fill, 0);
3472}
3473
3474static void
3475run_page_cache_worker(struct kfree_rcu_cpu *krcp)
3476{
3477        if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3478                        !atomic_xchg(&krcp->work_in_progress, 1)) {
3479                if (atomic_read(&krcp->backoff_page_cache_fill)) {
3480                        queue_delayed_work(system_wq,
3481                                &krcp->page_cache_work,
3482                                        msecs_to_jiffies(rcu_delay_page_cache_fill_msec));
3483                } else {
3484                        hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3485                        krcp->hrtimer.function = schedule_page_work_fn;
3486                        hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL);
3487                }
3488        }
3489}
3490
3491// Record ptr in a page managed by krcp, with the pre-krc_this_cpu_lock()
3492// state specified by flags.  If can_alloc is true, the caller must
3493// be schedulable and not be holding any locks or mutexes that might be
3494// acquired by the memory allocator or anything that it might invoke.
3495// Returns true if ptr was successfully recorded, else the caller must
3496// use a fallback.
3497static inline bool
3498add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
3499        unsigned long *flags, void *ptr, bool can_alloc)
3500{
3501        struct kvfree_rcu_bulk_data *bnode;
3502        int idx;
3503
3504        *krcp = krc_this_cpu_lock(flags);
3505        if (unlikely(!(*krcp)->initialized))
3506                return false;
3507
3508        idx = !!is_vmalloc_addr(ptr);
3509
3510        /* Check if a new block is required. */
3511        if (!(*krcp)->bkvhead[idx] ||
3512                        (*krcp)->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
3513                bnode = get_cached_bnode(*krcp);
3514                if (!bnode && can_alloc) {
3515                        krc_this_cpu_unlock(*krcp, *flags);
3516
3517                        // __GFP_NORETRY - allows a light-weight direct reclaim
3518                        // what is OK from minimizing of fallback hitting point of
3519                        // view. Apart of that it forbids any OOM invoking what is
3520                        // also beneficial since we are about to release memory soon.
3521                        //
3522                        // __GFP_NOMEMALLOC - prevents from consuming of all the
3523                        // memory reserves. Please note we have a fallback path.
3524                        //
3525                        // __GFP_NOWARN - it is supposed that an allocation can
3526                        // be failed under low memory or high memory pressure
3527                        // scenarios.
3528                        bnode = (struct kvfree_rcu_bulk_data *)
3529                                __get_free_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
3530                        *krcp = krc_this_cpu_lock(flags);
3531                }
3532
3533                if (!bnode)
3534                        return false;
3535
3536                /* Initialize the new block. */
3537                bnode->nr_records = 0;
3538                bnode->next = (*krcp)->bkvhead[idx];
3539
3540                /* Attach it to the head. */
3541                (*krcp)->bkvhead[idx] = bnode;
3542        }
3543
3544        /* Finally insert. */
3545        (*krcp)->bkvhead[idx]->records
3546                [(*krcp)->bkvhead[idx]->nr_records++] = ptr;
3547
3548        return true;
3549}
3550
3551/*
3552 * Queue a request for lazy invocation of the appropriate free routine
3553 * after a grace period.  Please note that three paths are maintained,
3554 * two for the common case using arrays of pointers and a third one that
3555 * is used only when the main paths cannot be used, for example, due to
3556 * memory pressure.
3557 *
3558 * Each kvfree_call_rcu() request is added to a batch. The batch will be drained
3559 * every KFREE_DRAIN_JIFFIES number of jiffies. All the objects in the batch will
3560 * be free'd in workqueue context. This allows us to: batch requests together to
3561 * reduce the number of grace periods during heavy kfree_rcu()/kvfree_rcu() load.
3562 */
3563void kvfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
3564{
3565        unsigned long flags;
3566        struct kfree_rcu_cpu *krcp;
3567        bool success;
3568        void *ptr;
3569
3570        if (head) {
3571                ptr = (void *) head - (unsigned long) func;
3572        } else {
3573                /*
3574                 * Please note there is a limitation for the head-less
3575                 * variant, that is why there is a clear rule for such
3576                 * objects: it can be used from might_sleep() context
3577                 * only. For other places please embed an rcu_head to
3578                 * your data.
3579                 */
3580                might_sleep();
3581                ptr = (unsigned long *) func;
3582        }
3583
3584        // Queue the object but don't yet schedule the batch.
3585        if (debug_rcu_head_queue(ptr)) {
3586                // Probable double kfree_rcu(), just leak.
3587                WARN_ONCE(1, "%s(): Double-freed call. rcu_head %p\n",
3588                          __func__, head);
3589
3590                // Mark as success and leave.
3591                return;
3592        }
3593
3594        kasan_record_aux_stack(ptr);
3595        success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head);
3596        if (!success) {
3597                run_page_cache_worker(krcp);
3598
3599                if (head == NULL)
3600                        // Inline if kvfree_rcu(one_arg) call.
3601                        goto unlock_return;
3602
3603                head->func = func;
3604                head->next = krcp->head;
3605                krcp->head = head;
3606                success = true;
3607        }
3608
3609        WRITE_ONCE(krcp->count, krcp->count + 1);
3610
3611        // Set timer to drain after KFREE_DRAIN_JIFFIES.
3612        if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
3613            !krcp->monitor_todo) {
3614                krcp->monitor_todo = true;
3615                schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
3616        }
3617
3618unlock_return:
3619        krc_this_cpu_unlock(krcp, flags);
3620
3621        /*
3622         * Inline kvfree() after synchronize_rcu(). We can do
3623         * it from might_sleep() context only, so the current
3624         * CPU can pass the QS state.
3625         */
3626        if (!success) {
3627                debug_rcu_head_unqueue((struct rcu_head *) ptr);
3628                synchronize_rcu();
3629                kvfree(ptr);
3630        }
3631}
3632EXPORT_SYMBOL_GPL(kvfree_call_rcu);
3633
3634static unsigned long
3635kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
3636{
3637        int cpu;
3638        unsigned long count = 0;
3639
3640        /* Snapshot count of all CPUs */
3641        for_each_possible_cpu(cpu) {
3642                struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3643
3644                count += READ_ONCE(krcp->count);
3645                count += READ_ONCE(krcp->nr_bkv_objs);
3646                atomic_set(&krcp->backoff_page_cache_fill, 1);
3647        }
3648
3649        return count;
3650}
3651
3652static unsigned long
3653kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3654{
3655        int cpu, freed = 0;
3656
3657        for_each_possible_cpu(cpu) {
3658                int count;
3659                struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3660
3661                count = krcp->count;
3662                count += drain_page_cache(krcp);
3663                kfree_rcu_monitor(&krcp->monitor_work.work);
3664
3665                sc->nr_to_scan -= count;
3666                freed += count;
3667
3668                if (sc->nr_to_scan <= 0)
3669                        break;
3670        }
3671
3672        return freed == 0 ? SHRINK_STOP : freed;
3673}
3674
3675static struct shrinker kfree_rcu_shrinker = {
3676        .count_objects = kfree_rcu_shrink_count,
3677        .scan_objects = kfree_rcu_shrink_scan,
3678        .batch = 0,
3679        .seeks = DEFAULT_SEEKS,
3680};
3681
3682void __init kfree_rcu_scheduler_running(void)
3683{
3684        int cpu;
3685        unsigned long flags;
3686
3687        for_each_possible_cpu(cpu) {
3688                struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
3689
3690                raw_spin_lock_irqsave(&krcp->lock, flags);
3691                if ((!krcp->bkvhead[0] && !krcp->bkvhead[1] && !krcp->head) ||
3692                                krcp->monitor_todo) {
3693                        raw_spin_unlock_irqrestore(&krcp->lock, flags);
3694                        continue;
3695                }
3696                krcp->monitor_todo = true;
3697                schedule_delayed_work_on(cpu, &krcp->monitor_work,
3698                                         KFREE_DRAIN_JIFFIES);
3699                raw_spin_unlock_irqrestore(&krcp->lock, flags);
3700        }
3701}
3702
3703/*
3704 * During early boot, any blocking grace-period wait automatically
3705 * implies a grace period.  Later on, this is never the case for PREEMPTION.
3706 *
3707 * However, because a context switch is a grace period for !PREEMPTION, any
3708 * blocking grace-period wait automatically implies a grace period if
3709 * there is only one CPU online at any point time during execution of
3710 * either synchronize_rcu() or synchronize_rcu_expedited().  It is OK to
3711 * occasionally incorrectly indicate that there are multiple CPUs online
3712 * when there was in fact only one the whole time, as this just adds some
3713 * overhead: RCU still operates correctly.
3714 */
3715static int rcu_blocking_is_gp(void)
3716{
3717        int ret;
3718
3719        if (IS_ENABLED(CONFIG_PREEMPTION))
3720                return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
3721        might_sleep();  /* Check for RCU read-side critical section. */
3722        preempt_disable();
3723        /*
3724         * If the rcu_state.n_online_cpus counter is equal to one,
3725         * there is only one CPU, and that CPU sees all prior accesses
3726         * made by any CPU that was online at the time of its access.
3727         * Furthermore, if this counter is equal to one, its value cannot
3728         * change until after the preempt_enable() below.
3729         *
3730         * Furthermore, if rcu_state.n_online_cpus is equal to one here,
3731         * all later CPUs (both this one and any that come online later
3732         * on) are guaranteed to see all accesses prior to this point
3733         * in the code, without the need for additional memory barriers.
3734         * Those memory barriers are provided by CPU-hotplug code.
3735         */
3736        ret = READ_ONCE(rcu_state.n_online_cpus) <= 1;
3737        preempt_enable();
3738        return ret;
3739}
3740
3741/**
3742 * synchronize_rcu - wait until a grace period has elapsed.
3743 *
3744 * Control will return to the caller some time after a full grace
3745 * period has elapsed, in other words after all currently executing RCU
3746 * read-side critical sections have completed.  Note, however, that
3747 * upon return from synchronize_rcu(), the caller might well be executing
3748 * concurrently with new RCU read-side critical sections that began while
3749 * synchronize_rcu() was waiting.
3750 *
3751 * RCU read-side critical sections are delimited by rcu_read_lock()
3752 * and rcu_read_unlock(), and may be nested.  In addition, but only in
3753 * v5.0 and later, regions of code across which interrupts, preemption,
3754 * or softirqs have been disabled also serve as RCU read-side critical
3755 * sections.  This includes hardware interrupt handlers, softirq handlers,
3756 * and NMI handlers.
3757 *
3758 * Note that this guarantee implies further memory-ordering guarantees.
3759 * On systems with more than one CPU, when synchronize_rcu() returns,
3760 * each CPU is guaranteed to have executed a full memory barrier since
3761 * the end of its last RCU read-side critical section whose beginning
3762 * preceded the call to synchronize_rcu().  In addition, each CPU having
3763 * an RCU read-side critical section that extends beyond the return from
3764 * synchronize_rcu() is guaranteed to have executed a full memory barrier
3765 * after the beginning of synchronize_rcu() and before the beginning of
3766 * that RCU read-side critical section.  Note that these guarantees include
3767 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
3768 * that are executing in the kernel.
3769 *
3770 * Furthermore, if CPU A invoked synchronize_rcu(), which returned
3771 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
3772 * to have executed a full memory barrier during the execution of
3773 * synchronize_rcu() -- even if CPU A and CPU B are the same CPU (but
3774 * again only if the system has more than one CPU).
3775 *
3776 * Implementation of these memory-ordering guarantees is described here:
3777 * Documentation/RCU/Design/Memory-Ordering/Tree-RCU-Memory-Ordering.rst.
3778 */
3779void synchronize_rcu(void)
3780{
3781        RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
3782                         lock_is_held(&rcu_lock_map) ||
3783                         lock_is_held(&rcu_sched_lock_map),
3784                         "Illegal synchronize_rcu() in RCU read-side critical section");
3785        if (rcu_blocking_is_gp())
3786                return;  // Context allows vacuous grace periods.
3787        if (rcu_gp_is_expedited())
3788                synchronize_rcu_expedited();
3789        else
3790                wait_rcu_gp(call_rcu);
3791}
3792EXPORT_SYMBOL_GPL(synchronize_rcu);
3793
3794/**
3795 * get_state_synchronize_rcu - Snapshot current RCU state
3796 *
3797 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3798 * or poll_state_synchronize_rcu() to determine whether or not a full
3799 * grace period has elapsed in the meantime.
3800 */
3801unsigned long get_state_synchronize_rcu(void)
3802{
3803        /*
3804         * Any prior manipulation of RCU-protected data must happen
3805         * before the load from ->gp_seq.
3806         */
3807        smp_mb();  /* ^^^ */
3808        return rcu_seq_snap(&rcu_state.gp_seq);
3809}
3810EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
3811
3812/**
3813 * start_poll_synchronize_rcu - Snapshot and start RCU grace period
3814 *
3815 * Returns a cookie that is used by a later call to cond_synchronize_rcu()
3816 * or poll_state_synchronize_rcu() to determine whether or not a full
3817 * grace period has elapsed in the meantime.  If the needed grace period
3818 * is not already slated to start, notifies RCU core of the need for that
3819 * grace period.
3820 *
3821 * Interrupts must be enabled for the case where it is necessary to awaken
3822 * the grace-period kthread.
3823 */
3824unsigned long start_poll_synchronize_rcu(void)
3825{
3826        unsigned long flags;
3827        unsigned long gp_seq = get_state_synchronize_rcu();
3828        bool needwake;
3829        struct rcu_data *rdp;
3830        struct rcu_node *rnp;
3831
3832        lockdep_assert_irqs_enabled();
3833        local_irq_save(flags);
3834        rdp = this_cpu_ptr(&rcu_data);
3835        rnp = rdp->mynode;
3836        raw_spin_lock_rcu_node(rnp); // irqs already disabled.
3837        needwake = rcu_start_this_gp(rnp, rdp, gp_seq);
3838        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
3839        if (needwake)
3840                rcu_gp_kthread_wake();
3841        return gp_seq;
3842}
3843EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
3844
3845/**
3846 * poll_state_synchronize_rcu - Conditionally wait for an RCU grace period
3847 *
3848 * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu()
3849 *
3850 * If a full RCU grace period has elapsed since the earlier call from
3851 * which oldstate was obtained, return @true, otherwise return @false.
3852 * If @false is returned, it is the caller's responsibility to invoke this
3853 * function later on until it does return @true.  Alternatively, the caller
3854 * can explicitly wait for a grace period, for example, by passing @oldstate
3855 * to cond_synchronize_rcu() or by directly invoking synchronize_rcu().
3856 *
3857 * Yes, this function does not take counter wrap into account.
3858 * But counter wrap is harmless.  If the counter wraps, we have waited for
3859 * more than 2 billion grace periods (and way more on a 64-bit system!).
3860 * Those needing to keep oldstate values for very long time periods
3861 * (many hours even on 32-bit systems) should check them occasionally
3862 * and either refresh them or set a flag indicating that the grace period
3863 * has completed.
3864 *
3865 * This function provides the same memory-ordering guarantees that
3866 * would be provided by a synchronize_rcu() that was invoked at the call
3867 * to the function that provided @oldstate, and that returned at the end
3868 * of this function.
3869 */
3870bool poll_state_synchronize_rcu(unsigned long oldstate)
3871{
3872        if (rcu_seq_done(&rcu_state.gp_seq, oldstate)) {
3873                smp_mb(); /* Ensure GP ends before subsequent accesses. */
3874                return true;
3875        }
3876        return false;
3877}
3878EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
3879
3880/**
3881 * cond_synchronize_rcu - Conditionally wait for an RCU grace period
3882 *
3883 * @oldstate: value from get_state_synchronize_rcu() or start_poll_synchronize_rcu()
3884 *
3885 * If a full RCU grace period has elapsed since the earlier call to
3886 * get_state_synchronize_rcu() or start_poll_synchronize_rcu(), just return.
3887 * Otherwise, invoke synchronize_rcu() to wait for a full grace period.
3888 *
3889 * Yes, this function does not take counter wrap into account.  But
3890 * counter wrap is harmless.  If the counter wraps, we have waited for
3891 * more than 2 billion grace periods (and way more on a 64-bit system!),
3892 * so waiting for one additional grace period should be just fine.
3893 *
3894 * This function provides the same memory-ordering guarantees that
3895 * would be provided by a synchronize_rcu() that was invoked at the call
3896 * to the function that provided @oldstate, and that returned at the end
3897 * of this function.
3898 */
3899void cond_synchronize_rcu(unsigned long oldstate)
3900{
3901        if (!poll_state_synchronize_rcu(oldstate))
3902                synchronize_rcu();
3903}
3904EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
3905
3906/*
3907 * Check to see if there is any immediate RCU-related work to be done by
3908 * the current CPU, returning 1 if so and zero otherwise.  The checks are
3909 * in order of increasing expense: checks that can be carried out against
3910 * CPU-local state are performed first.  However, we must check for CPU
3911 * stalls first, else we might not get a chance.
3912 */
3913static int rcu_pending(int user)
3914{
3915        bool gp_in_progress;
3916        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
3917        struct rcu_node *rnp = rdp->mynode;
3918
3919        lockdep_assert_irqs_disabled();
3920
3921        /* Check for CPU stalls, if enabled. */
3922        check_cpu_stall(rdp);
3923
3924        /* Does this CPU need a deferred NOCB wakeup? */
3925        if (rcu_nocb_need_deferred_wakeup(rdp, RCU_NOCB_WAKE))
3926                return 1;
3927
3928        /* Is this a nohz_full CPU in userspace or idle?  (Ignore RCU if so.) */
3929        if ((user || rcu_is_cpu_rrupt_from_idle()) && rcu_nohz_full_cpu())
3930                return 0;
3931
3932        /* Is the RCU core waiting for a quiescent state from this CPU? */
3933        gp_in_progress = rcu_gp_in_progress();
3934        if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm && gp_in_progress)
3935                return 1;
3936
3937        /* Does this CPU have callbacks ready to invoke? */
3938        if (!rcu_rdp_is_offloaded(rdp) &&
3939            rcu_segcblist_ready_cbs(&rdp->cblist))
3940                return 1;
3941
3942        /* Has RCU gone idle with this CPU needing another grace period? */
3943        if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
3944            !rcu_rdp_is_offloaded(rdp) &&
3945            !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
3946                return 1;
3947
3948        /* Have RCU grace period completed or started?  */
3949        if (rcu_seq_current(&rnp->gp_seq) != rdp->gp_seq ||
3950            unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
3951                return 1;
3952
3953        /* nothing to do */
3954        return 0;
3955}
3956
3957/*
3958 * Helper function for rcu_barrier() tracing.  If tracing is disabled,
3959 * the compiler is expected to optimize this away.
3960 */
3961static void rcu_barrier_trace(const char *s, int cpu, unsigned long done)
3962{
3963        trace_rcu_barrier(rcu_state.name, s, cpu,
3964                          atomic_read(&rcu_state.barrier_cpu_count), done);
3965}
3966
3967/*
3968 * RCU callback function for rcu_barrier().  If we are last, wake
3969 * up the task executing rcu_barrier().
3970 *
3971 * Note that the value of rcu_state.barrier_sequence must be captured
3972 * before the atomic_dec_and_test().  Otherwise, if this CPU is not last,
3973 * other CPUs might count the value down to zero before this CPU gets
3974 * around to invoking rcu_barrier_trace(), which might result in bogus
3975 * data from the next instance of rcu_barrier().
3976 */
3977static void rcu_barrier_callback(struct rcu_head *rhp)
3978{
3979        unsigned long __maybe_unused s = rcu_state.barrier_sequence;
3980
3981        if (atomic_dec_and_test(&rcu_state.barrier_cpu_count)) {
3982                rcu_barrier_trace(TPS("LastCB"), -1, s);
3983                complete(&rcu_state.barrier_completion);
3984        } else {
3985                rcu_barrier_trace(TPS("CB"), -1, s);
3986        }
3987}
3988
3989/*
3990 * Called with preemption disabled, and from cross-cpu IRQ context.
3991 */
3992static void rcu_barrier_func(void *cpu_in)
3993{
3994        uintptr_t cpu = (uintptr_t)cpu_in;
3995        struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
3996
3997        rcu_barrier_trace(TPS("IRQ"), -1, rcu_state.barrier_sequence);
3998        rdp->barrier_head.func = rcu_barrier_callback;
3999        debug_rcu_head_queue(&rdp->barrier_head);
4000        rcu_nocb_lock(rdp);
4001        WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
4002        if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head)) {
4003                atomic_inc(&rcu_state.barrier_cpu_count);
4004        } else {
4005                debug_rcu_head_unqueue(&rdp->barrier_head);
4006                rcu_barrier_trace(TPS("IRQNQ"), -1,
4007                                  rcu_state.barrier_sequence);
4008        }
4009        rcu_nocb_unlock(rdp);
4010}
4011
4012/**
4013 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
4014 *
4015 * Note that this primitive does not necessarily wait for an RCU grace period
4016 * to complete.  For example, if there are no RCU callbacks queued anywhere
4017 * in the system, then rcu_barrier() is within its rights to return
4018 * immediately, without waiting for anything, much less an RCU grace period.
4019 */
4020void rcu_barrier(void)
4021{
4022        uintptr_t cpu;
4023        struct rcu_data *rdp;
4024        unsigned long s = rcu_seq_snap(&rcu_state.barrier_sequence);
4025
4026        rcu_barrier_trace(TPS("Begin"), -1, s);
4027
4028        /* Take mutex to serialize concurrent rcu_barrier() requests. */
4029        mutex_lock(&rcu_state.barrier_mutex);
4030
4031        /* Did someone else do our work for us? */
4032        if (rcu_seq_done(&rcu_state.barrier_sequence, s)) {
4033                rcu_barrier_trace(TPS("EarlyExit"), -1,
4034                                  rcu_state.barrier_sequence);
4035                smp_mb(); /* caller's subsequent code after above check. */
4036                mutex_unlock(&rcu_state.barrier_mutex);
4037                return;
4038        }
4039
4040        /* Mark the start of the barrier operation. */
4041        rcu_seq_start(&rcu_state.barrier_sequence);
4042        rcu_barrier_trace(TPS("Inc1"), -1, rcu_state.barrier_sequence);
4043
4044        /*
4045         * Initialize the count to two rather than to zero in order
4046         * to avoid a too-soon return to zero in case of an immediate
4047         * invocation of the just-enqueued callback (or preemption of
4048         * this task).  Exclude CPU-hotplug operations to ensure that no
4049         * offline non-offloaded CPU has callbacks queued.
4050         */
4051        init_completion(&rcu_state.barrier_completion);
4052        atomic_set(&rcu_state.barrier_cpu_count, 2);
4053        get_online_cpus();
4054
4055        /*
4056         * Force each CPU with callbacks to register a new callback.
4057         * When that callback is invoked, we will know that all of the
4058         * corresponding CPU's preceding callbacks have been invoked.
4059         */
4060        for_each_possible_cpu(cpu) {
4061                rdp = per_cpu_ptr(&rcu_data, cpu);
4062                if (cpu_is_offline(cpu) &&
4063                    !rcu_rdp_is_offloaded(rdp))
4064                        continue;
4065                if (rcu_segcblist_n_cbs(&rdp->cblist) && cpu_online(cpu)) {
4066                        rcu_barrier_trace(TPS("OnlineQ"), cpu,
4067                                          rcu_state.barrier_sequence);
4068                        smp_call_function_single(cpu, rcu_barrier_func, (void *)cpu, 1);
4069                } else if (rcu_segcblist_n_cbs(&rdp->cblist) &&
4070                           cpu_is_offline(cpu)) {
4071                        rcu_barrier_trace(TPS("OfflineNoCBQ"), cpu,
4072                                          rcu_state.barrier_sequence);
4073                        local_irq_disable();
4074                        rcu_barrier_func((void *)cpu);
4075                        local_irq_enable();
4076                } else if (cpu_is_offline(cpu)) {
4077                        rcu_barrier_trace(TPS("OfflineNoCBNoQ"), cpu,
4078                                          rcu_state.barrier_sequence);
4079                } else {
4080                        rcu_barrier_trace(TPS("OnlineNQ"), cpu,
4081                                          rcu_state.barrier_sequence);
4082                }
4083        }
4084        put_online_cpus();
4085
4086        /*
4087         * Now that we have an rcu_barrier_callback() callback on each
4088         * CPU, and thus each counted, remove the initial count.
4089         */
4090        if (atomic_sub_and_test(2, &rcu_state.barrier_cpu_count))
4091                complete(&rcu_state.barrier_completion);
4092
4093        /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
4094        wait_for_completion(&rcu_state.barrier_completion);
4095
4096        /* Mark the end of the barrier operation. */
4097        rcu_barrier_trace(TPS("Inc2"), -1, rcu_state.barrier_sequence);
4098        rcu_seq_end(&rcu_state.barrier_sequence);
4099
4100        /* Other rcu_barrier() invocations can now safely proceed. */
4101        mutex_unlock(&rcu_state.barrier_mutex);
4102}
4103EXPORT_SYMBOL_GPL(rcu_barrier);
4104
4105/*
4106 * Propagate ->qsinitmask bits up the rcu_node tree to account for the
4107 * first CPU in a given leaf rcu_node structure coming online.  The caller
4108 * must hold the corresponding leaf rcu_node ->lock with interrupts
4109 * disabled.
4110 */
4111static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
4112{
4113        long mask;
4114        long oldmask;
4115        struct rcu_node *rnp = rnp_leaf;
4116
4117        raw_lockdep_assert_held_rcu_node(rnp_leaf);
4118        WARN_ON_ONCE(rnp->wait_blkd_tasks);
4119        for (;;) {
4120                mask = rnp->grpmask;
4121                rnp = rnp->parent;
4122                if (rnp == NULL)
4123                        return;
4124                raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
4125                oldmask = rnp->qsmaskinit;
4126                rnp->qsmaskinit |= mask;
4127                raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
4128                if (oldmask)
4129                        return;
4130        }
4131}
4132
4133/*
4134 * Do boot-time initialization of a CPU's per-CPU RCU data.
4135 */
4136static void __init
4137rcu_boot_init_percpu_data(int cpu)
4138{
4139        struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4140
4141        /* Set up local state, ensuring consistent view of global state. */
4142        rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
4143        INIT_WORK(&rdp->strict_work, strict_work_handler);
4144        WARN_ON_ONCE(rdp->dynticks_nesting != 1);
4145        WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)));
4146        rdp->rcu_ofl_gp_seq = rcu_state.gp_seq;
4147        rdp->rcu_ofl_gp_flags = RCU_GP_CLEANED;
4148        rdp->rcu_onl_gp_seq = rcu_state.gp_seq;
4149        rdp->rcu_onl_gp_flags = RCU_GP_CLEANED;
4150        rdp->cpu = cpu;
4151        rcu_boot_init_nocb_percpu_data(rdp);
4152}
4153
4154/*
4155 * Invoked early in the CPU-online process, when pretty much all services
4156 * are available.  The incoming CPU is not present.
4157 *
4158 * Initializes a CPU's per-CPU RCU data.  Note that only one online or
4159 * offline event can be happening at a given time.  Note also that we can
4160 * accept some slop in the rsp->gp_seq access due to the fact that this
4161 * CPU cannot possibly have any non-offloaded RCU callbacks in flight yet.
4162 * And any offloaded callbacks are being numbered elsewhere.
4163 */
4164int rcutree_prepare_cpu(unsigned int cpu)
4165{
4166        unsigned long flags;
4167        struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4168        struct rcu_node *rnp = rcu_get_root();
4169
4170        /* Set up local state, ensuring consistent view of global state. */
4171        raw_spin_lock_irqsave_rcu_node(rnp, flags);
4172        rdp->qlen_last_fqs_check = 0;
4173        rdp->n_force_qs_snap = rcu_state.n_force_qs;
4174        rdp->blimit = blimit;
4175        rdp->dynticks_nesting = 1;      /* CPU not up, no tearing. */
4176        rcu_dynticks_eqs_online();
4177        raw_spin_unlock_rcu_node(rnp);          /* irqs remain disabled. */
4178
4179        /*
4180         * Only non-NOCB CPUs that didn't have early-boot callbacks need to be
4181         * (re-)initialized.
4182         */
4183        if (!rcu_segcblist_is_enabled(&rdp->cblist))
4184                rcu_segcblist_init(&rdp->cblist);  /* Re-enable callbacks. */
4185
4186        /*
4187         * Add CPU to leaf rcu_node pending-online bitmask.  Any needed
4188         * propagation up the rcu_node tree will happen at the beginning
4189         * of the next grace period.
4190         */
4191        rnp = rdp->mynode;
4192        raw_spin_lock_rcu_node(rnp);            /* irqs already disabled. */
4193        rdp->beenonline = true;  /* We have now been online. */
4194        rdp->gp_seq = READ_ONCE(rnp->gp_seq);
4195        rdp->gp_seq_needed = rdp->gp_seq;
4196        rdp->cpu_no_qs.b.norm = true;
4197        rdp->core_needs_qs = false;
4198        rdp->rcu_iw_pending = false;
4199        rdp->rcu_iw = IRQ_WORK_INIT_HARD(rcu_iw_handler);
4200        rdp->rcu_iw_gp_seq = rdp->gp_seq - 1;
4201        trace_rcu_grace_period(rcu_state.name, rdp->gp_seq, TPS("cpuonl"));
4202        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4203        rcu_spawn_one_boost_kthread(rnp);
4204        rcu_spawn_cpu_nocb_kthread(cpu);
4205        WRITE_ONCE(rcu_state.n_online_cpus, rcu_state.n_online_cpus + 1);
4206
4207        return 0;
4208}
4209
4210/*
4211 * Update RCU priority boot kthread affinity for CPU-hotplug changes.
4212 */
4213static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
4214{
4215        struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4216
4217        rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
4218}
4219
4220/*
4221 * Near the end of the CPU-online process.  Pretty much all services
4222 * enabled, and the CPU is now very much alive.
4223 */
4224int rcutree_online_cpu(unsigned int cpu)
4225{
4226        unsigned long flags;
4227        struct rcu_data *rdp;
4228        struct rcu_node *rnp;
4229
4230        rdp = per_cpu_ptr(&rcu_data, cpu);
4231        rnp = rdp->mynode;
4232        raw_spin_lock_irqsave_rcu_node(rnp, flags);
4233        rnp->ffmask |= rdp->grpmask;
4234        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4235        if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
4236                return 0; /* Too early in boot for scheduler work. */
4237        sync_sched_exp_online_cleanup(cpu);
4238        rcutree_affinity_setting(cpu, -1);
4239
4240        // Stop-machine done, so allow nohz_full to disable tick.
4241        tick_dep_clear(TICK_DEP_BIT_RCU);
4242        return 0;
4243}
4244
4245/*
4246 * Near the beginning of the process.  The CPU is still very much alive
4247 * with pretty much all services enabled.
4248 */
4249int rcutree_offline_cpu(unsigned int cpu)
4250{
4251        unsigned long flags;
4252        struct rcu_data *rdp;
4253        struct rcu_node *rnp;
4254
4255        rdp = per_cpu_ptr(&rcu_data, cpu);
4256        rnp = rdp->mynode;
4257        raw_spin_lock_irqsave_rcu_node(rnp, flags);
4258        rnp->ffmask &= ~rdp->grpmask;
4259        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4260
4261        rcutree_affinity_setting(cpu, cpu);
4262
4263        // nohz_full CPUs need the tick for stop-machine to work quickly
4264        tick_dep_set(TICK_DEP_BIT_RCU);
4265        return 0;
4266}
4267
4268/*
4269 * Mark the specified CPU as being online so that subsequent grace periods
4270 * (both expedited and normal) will wait on it.  Note that this means that
4271 * incoming CPUs are not allowed to use RCU read-side critical sections
4272 * until this function is called.  Failing to observe this restriction
4273 * will result in lockdep splats.
4274 *
4275 * Note that this function is special in that it is invoked directly
4276 * from the incoming CPU rather than from the cpuhp_step mechanism.
4277 * This is because this function must be invoked at a precise location.
4278 */
4279void rcu_cpu_starting(unsigned int cpu)
4280{
4281        unsigned long flags;
4282        unsigned long mask;
4283        struct rcu_data *rdp;
4284        struct rcu_node *rnp;
4285        bool newcpu;
4286
4287        rdp = per_cpu_ptr(&rcu_data, cpu);
4288        if (rdp->cpu_started)
4289                return;
4290        rdp->cpu_started = true;
4291
4292        rnp = rdp->mynode;
4293        mask = rdp->grpmask;
4294        WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4295        WARN_ON_ONCE(!(rnp->ofl_seq & 0x1));
4296        smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4297        raw_spin_lock_irqsave_rcu_node(rnp, flags);
4298        WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
4299        newcpu = !(rnp->expmaskinitnext & mask);
4300        rnp->expmaskinitnext |= mask;
4301        /* Allow lockless access for expedited grace periods. */
4302        smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
4303        ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
4304        rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
4305        rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4306        rdp->rcu_onl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4307
4308        /* An incoming CPU should never be blocking a grace period. */
4309        if (WARN_ON_ONCE(rnp->qsmask & mask)) { /* RCU waiting on incoming CPU? */
4310                rcu_disable_urgency_upon_qs(rdp);
4311                /* Report QS -after- changing ->qsmaskinitnext! */
4312                rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4313        } else {
4314                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4315        }
4316        smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4317        WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4318        WARN_ON_ONCE(rnp->ofl_seq & 0x1);
4319        smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
4320}
4321
4322/*
4323 * The outgoing function has no further need of RCU, so remove it from
4324 * the rcu_node tree's ->qsmaskinitnext bit masks.
4325 *
4326 * Note that this function is special in that it is invoked directly
4327 * from the outgoing CPU rather than from the cpuhp_step mechanism.
4328 * This is because this function must be invoked at a precise location.
4329 */
4330void rcu_report_dead(unsigned int cpu)
4331{
4332        unsigned long flags;
4333        unsigned long mask;
4334        struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4335        struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
4336
4337        // Do any dangling deferred wakeups.
4338        do_nocb_deferred_wakeup(rdp);
4339
4340        /* QS for any half-done expedited grace period. */
4341        preempt_disable();
4342        rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
4343        preempt_enable();
4344        rcu_preempt_deferred_qs(current);
4345
4346        /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
4347        mask = rdp->grpmask;
4348        WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4349        WARN_ON_ONCE(!(rnp->ofl_seq & 0x1));
4350        smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4351        raw_spin_lock(&rcu_state.ofl_lock);
4352        raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
4353        rdp->rcu_ofl_gp_seq = READ_ONCE(rcu_state.gp_seq);
4354        rdp->rcu_ofl_gp_flags = READ_ONCE(rcu_state.gp_flags);
4355        if (rnp->qsmask & mask) { /* RCU waiting on outgoing CPU? */
4356                /* Report quiescent state -before- changing ->qsmaskinitnext! */
4357                rcu_report_qs_rnp(mask, rnp, rnp->gp_seq, flags);
4358                raw_spin_lock_irqsave_rcu_node(rnp, flags);
4359        }
4360        WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext & ~mask);
4361        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4362        raw_spin_unlock(&rcu_state.ofl_lock);
4363        smp_mb(); // Pair with rcu_gp_cleanup()'s ->ofl_seq barrier().
4364        WRITE_ONCE(rnp->ofl_seq, rnp->ofl_seq + 1);
4365        WARN_ON_ONCE(rnp->ofl_seq & 0x1);
4366
4367        rdp->cpu_started = false;
4368}
4369
4370#ifdef CONFIG_HOTPLUG_CPU
4371/*
4372 * The outgoing CPU has just passed through the dying-idle state, and we
4373 * are being invoked from the CPU that was IPIed to continue the offline
4374 * operation.  Migrate the outgoing CPU's callbacks to the current CPU.
4375 */
4376void rcutree_migrate_callbacks(int cpu)
4377{
4378        unsigned long flags;
4379        struct rcu_data *my_rdp;
4380        struct rcu_node *my_rnp;
4381        struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
4382        bool needwake;
4383
4384        if (rcu_rdp_is_offloaded(rdp) ||
4385            rcu_segcblist_empty(&rdp->cblist))
4386                return;  /* No callbacks to migrate. */
4387
4388        local_irq_save(flags);
4389        my_rdp = this_cpu_ptr(&rcu_data);
4390        my_rnp = my_rdp->mynode;
4391        rcu_nocb_lock(my_rdp); /* irqs already disabled. */
4392        WARN_ON_ONCE(!rcu_nocb_flush_bypass(my_rdp, NULL, jiffies));
4393        raw_spin_lock_rcu_node(my_rnp); /* irqs already disabled. */
4394        /* Leverage recent GPs and set GP for new callbacks. */
4395        needwake = rcu_advance_cbs(my_rnp, rdp) ||
4396                   rcu_advance_cbs(my_rnp, my_rdp);
4397        rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
4398        needwake = needwake || rcu_advance_cbs(my_rnp, my_rdp);
4399        rcu_segcblist_disable(&rdp->cblist);
4400        WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
4401                     !rcu_segcblist_n_cbs(&my_rdp->cblist));
4402        if (rcu_rdp_is_offloaded(my_rdp)) {
4403                raw_spin_unlock_rcu_node(my_rnp); /* irqs remain disabled. */
4404                __call_rcu_nocb_wake(my_rdp, true, flags);
4405        } else {
4406                rcu_nocb_unlock(my_rdp); /* irqs remain disabled. */
4407                raw_spin_unlock_irqrestore_rcu_node(my_rnp, flags);
4408        }
4409        if (needwake)
4410                rcu_gp_kthread_wake();
4411        lockdep_assert_irqs_enabled();
4412        WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
4413                  !rcu_segcblist_empty(&rdp->cblist),
4414                  "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
4415                  cpu, rcu_segcblist_n_cbs(&rdp->cblist),
4416                  rcu_segcblist_first_cb(&rdp->cblist));
4417}
4418#endif
4419
4420/*
4421 * On non-huge systems, use expedited RCU grace periods to make suspend
4422 * and hibernation run faster.
4423 */
4424static int rcu_pm_notify(struct notifier_block *self,
4425                         unsigned long action, void *hcpu)
4426{
4427        switch (action) {
4428        case PM_HIBERNATION_PREPARE:
4429        case PM_SUSPEND_PREPARE:
4430                rcu_expedite_gp();
4431                break;
4432        case PM_POST_HIBERNATION:
4433        case PM_POST_SUSPEND:
4434                rcu_unexpedite_gp();
4435                break;
4436        default:
4437                break;
4438        }
4439        return NOTIFY_OK;
4440}
4441
4442/*
4443 * Spawn the kthreads that handle RCU's grace periods.
4444 */
4445static int __init rcu_spawn_gp_kthread(void)
4446{
4447        unsigned long flags;
4448        int kthread_prio_in = kthread_prio;
4449        struct rcu_node *rnp;
4450        struct sched_param sp;
4451        struct task_struct *t;
4452
4453        /* Force priority into range. */
4454        if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 2
4455            && IS_BUILTIN(CONFIG_RCU_TORTURE_TEST))
4456                kthread_prio = 2;
4457        else if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
4458                kthread_prio = 1;
4459        else if (kthread_prio < 0)
4460                kthread_prio = 0;
4461        else if (kthread_prio > 99)
4462                kthread_prio = 99;
4463
4464        if (kthread_prio != kthread_prio_in)
4465                pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
4466                         kthread_prio, kthread_prio_in);
4467
4468        rcu_scheduler_fully_active = 1;
4469        t = kthread_create(rcu_gp_kthread, NULL, "%s", rcu_state.name);
4470        if (WARN_ONCE(IS_ERR(t), "%s: Could not start grace-period kthread, OOM is now expected behavior\n", __func__))
4471                return 0;
4472        if (kthread_prio) {
4473                sp.sched_priority = kthread_prio;
4474                sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
4475        }
4476        rnp = rcu_get_root();
4477        raw_spin_lock_irqsave_rcu_node(rnp, flags);
4478        WRITE_ONCE(rcu_state.gp_activity, jiffies);
4479        WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
4480        // Reset .gp_activity and .gp_req_activity before setting .gp_kthread.
4481        smp_store_release(&rcu_state.gp_kthread, t);  /* ^^^ */
4482        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
4483        wake_up_process(t);
4484        rcu_spawn_nocb_kthreads();
4485        rcu_spawn_boost_kthreads();
4486        rcu_spawn_core_kthreads();
4487        return 0;
4488}
4489early_initcall(rcu_spawn_gp_kthread);
4490
4491/*
4492 * This function is invoked towards the end of the scheduler's
4493 * initialization process.  Before this is called, the idle task might
4494 * contain synchronous grace-period primitives (during which time, this idle
4495 * task is booting the system, and such primitives are no-ops).  After this
4496 * function is called, any synchronous grace-period primitives are run as
4497 * expedited, with the requesting task driving the grace period forward.
4498 * A later core_initcall() rcu_set_runtime_mode() will switch to full
4499 * runtime RCU functionality.
4500 */
4501void rcu_scheduler_starting(void)
4502{
4503        WARN_ON(num_online_cpus() != 1);
4504        WARN_ON(nr_context_switches() > 0);
4505        rcu_test_sync_prims();
4506        rcu_scheduler_active = RCU_SCHEDULER_INIT;
4507        rcu_test_sync_prims();
4508}
4509
4510/*
4511 * Helper function for rcu_init() that initializes the rcu_state structure.
4512 */
4513static void __init rcu_init_one(void)
4514{
4515        static const char * const buf[] = RCU_NODE_NAME_INIT;
4516        static const char * const fqs[] = RCU_FQS_NAME_INIT;
4517        static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
4518        static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
4519
4520        int levelspread[RCU_NUM_LVLS];          /* kids/node in each level. */
4521        int cpustride = 1;
4522        int i;
4523        int j;
4524        struct rcu_node *rnp;
4525
4526        BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
4527
4528        /* Silence gcc 4.8 false positive about array index out of range. */
4529        if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
4530                panic("rcu_init_one: rcu_num_lvls out of range");
4531
4532        /* Initialize the level-tracking arrays. */
4533
4534        for (i = 1; i < rcu_num_lvls; i++)
4535                rcu_state.level[i] =
4536                        rcu_state.level[i - 1] + num_rcu_lvl[i - 1];
4537        rcu_init_levelspread(levelspread, num_rcu_lvl);
4538
4539        /* Initialize the elements themselves, starting from the leaves. */
4540
4541        for (i = rcu_num_lvls - 1; i >= 0; i--) {
4542                cpustride *= levelspread[i];
4543                rnp = rcu_state.level[i];
4544                for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
4545                        raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
4546                        lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
4547                                                   &rcu_node_class[i], buf[i]);
4548                        raw_spin_lock_init(&rnp->fqslock);
4549                        lockdep_set_class_and_name(&rnp->fqslock,
4550                                                   &rcu_fqs_class[i], fqs[i]);
4551                        rnp->gp_seq = rcu_state.gp_seq;
4552                        rnp->gp_seq_needed = rcu_state.gp_seq;
4553                        rnp->completedqs = rcu_state.gp_seq;
4554                        rnp->qsmask = 0;
4555                        rnp->qsmaskinit = 0;
4556                        rnp->grplo = j * cpustride;
4557                        rnp->grphi = (j + 1) * cpustride - 1;
4558                        if (rnp->grphi >= nr_cpu_ids)
4559                                rnp->grphi = nr_cpu_ids - 1;
4560                        if (i == 0) {
4561                                rnp->grpnum = 0;
4562                                rnp->grpmask = 0;
4563                                rnp->parent = NULL;
4564                        } else {
4565                                rnp->grpnum = j % levelspread[i - 1];
4566                                rnp->grpmask = BIT(rnp->grpnum);
4567                                rnp->parent = rcu_state.level[i - 1] +
4568                                              j / levelspread[i - 1];
4569                        }
4570                        rnp->level = i;
4571                        INIT_LIST_HEAD(&rnp->blkd_tasks);
4572                        rcu_init_one_nocb(rnp);
4573                        init_waitqueue_head(&rnp->exp_wq[0]);
4574                        init_waitqueue_head(&rnp->exp_wq[1]);
4575                        init_waitqueue_head(&rnp->exp_wq[2]);
4576                        init_waitqueue_head(&rnp->exp_wq[3]);
4577                        spin_lock_init(&rnp->exp_lock);
4578                }
4579        }
4580
4581        init_swait_queue_head(&rcu_state.gp_wq);
4582        init_swait_queue_head(&rcu_state.expedited_wq);
4583        rnp = rcu_first_leaf_node();
4584        for_each_possible_cpu(i) {
4585                while (i > rnp->grphi)
4586                        rnp++;
4587                per_cpu_ptr(&rcu_data, i)->mynode = rnp;
4588                rcu_boot_init_percpu_data(i);
4589        }
4590}
4591
4592/*
4593 * Compute the rcu_node tree geometry from kernel parameters.  This cannot
4594 * replace the definitions in tree.h because those are needed to size
4595 * the ->node array in the rcu_state structure.
4596 */
4597void rcu_init_geometry(void)
4598{
4599        ulong d;
4600        int i;
4601        static unsigned long old_nr_cpu_ids;
4602        int rcu_capacity[RCU_NUM_LVLS];
4603        static bool initialized;
4604
4605        if (initialized) {
4606                /*
4607                 * Warn if setup_nr_cpu_ids() had not yet been invoked,
4608                 * unless nr_cpus_ids == NR_CPUS, in which case who cares?
4609                 */
4610                WARN_ON_ONCE(old_nr_cpu_ids != nr_cpu_ids);
4611                return;
4612        }
4613
4614        old_nr_cpu_ids = nr_cpu_ids;
4615        initialized = true;
4616
4617        /*
4618         * Initialize any unspecified boot parameters.
4619         * The default values of jiffies_till_first_fqs and
4620         * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
4621         * value, which is a function of HZ, then adding one for each
4622         * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
4623         */
4624        d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
4625        if (jiffies_till_first_fqs == ULONG_MAX)
4626                jiffies_till_first_fqs = d;
4627        if (jiffies_till_next_fqs == ULONG_MAX)
4628                jiffies_till_next_fqs = d;
4629        adjust_jiffies_till_sched_qs();
4630
4631        /* If the compile-time values are accurate, just leave. */
4632        if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
4633            nr_cpu_ids == NR_CPUS)
4634                return;
4635        pr_info("Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
4636                rcu_fanout_leaf, nr_cpu_ids);
4637
4638        /*
4639         * The boot-time rcu_fanout_leaf parameter must be at least two
4640         * and cannot exceed the number of bits in the rcu_node masks.
4641         * Complain and fall back to the compile-time values if this
4642         * limit is exceeded.
4643         */
4644        if (rcu_fanout_leaf < 2 ||
4645            rcu_fanout_leaf > sizeof(unsigned long) * 8) {
4646                rcu_fanout_leaf = RCU_FANOUT_LEAF;
4647                WARN_ON(1);
4648                return;
4649        }
4650
4651        /*
4652         * Compute number of nodes that can be handled an rcu_node tree
4653         * with the given number of levels.
4654         */
4655        rcu_capacity[0] = rcu_fanout_leaf;
4656        for (i = 1; i < RCU_NUM_LVLS; i++)
4657                rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
4658
4659        /*
4660         * The tree must be able to accommodate the configured number of CPUs.
4661         * If this limit is exceeded, fall back to the compile-time values.
4662         */
4663        if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
4664                rcu_fanout_leaf = RCU_FANOUT_LEAF;
4665                WARN_ON(1);
4666                return;
4667        }
4668
4669        /* Calculate the number of levels in the tree. */
4670        for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
4671        }
4672        rcu_num_lvls = i + 1;
4673
4674        /* Calculate the number of rcu_nodes at each level of the tree. */
4675        for (i = 0; i < rcu_num_lvls; i++) {
4676                int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
4677                num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
4678        }
4679
4680        /* Calculate the total number of rcu_node structures. */
4681        rcu_num_nodes = 0;
4682        for (i = 0; i < rcu_num_lvls; i++)
4683                rcu_num_nodes += num_rcu_lvl[i];
4684}
4685
4686/*
4687 * Dump out the structure of the rcu_node combining tree associated
4688 * with the rcu_state structure.
4689 */
4690static void __init rcu_dump_rcu_node_tree(void)
4691{
4692        int level = 0;
4693        struct rcu_node *rnp;
4694
4695        pr_info("rcu_node tree layout dump\n");
4696        pr_info(" ");
4697        rcu_for_each_node_breadth_first(rnp) {
4698                if (rnp->level != level) {
4699                        pr_cont("\n");
4700                        pr_info(" ");
4701                        level = rnp->level;
4702                }
4703                pr_cont("%d:%d ^%d  ", rnp->grplo, rnp->grphi, rnp->grpnum);
4704        }
4705        pr_cont("\n");
4706}
4707
4708struct workqueue_struct *rcu_gp_wq;
4709struct workqueue_struct *rcu_par_gp_wq;
4710
4711static void __init kfree_rcu_batch_init(void)
4712{
4713        int cpu;
4714        int i;
4715
4716        /* Clamp it to [0:100] seconds interval. */
4717        if (rcu_delay_page_cache_fill_msec < 0 ||
4718                rcu_delay_page_cache_fill_msec > 100 * MSEC_PER_SEC) {
4719
4720                rcu_delay_page_cache_fill_msec =
4721                        clamp(rcu_delay_page_cache_fill_msec, 0,
4722                                (int) (100 * MSEC_PER_SEC));
4723
4724                pr_info("Adjusting rcutree.rcu_delay_page_cache_fill_msec to %d ms.\n",
4725                        rcu_delay_page_cache_fill_msec);
4726        }
4727
4728        for_each_possible_cpu(cpu) {
4729                struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
4730
4731                for (i = 0; i < KFREE_N_BATCHES; i++) {
4732                        INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
4733                        krcp->krw_arr[i].krcp = krcp;
4734                }
4735
4736                INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
4737                INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func);
4738                krcp->initialized = true;
4739        }
4740        if (register_shrinker(&kfree_rcu_shrinker))
4741                pr_err("Failed to register kfree_rcu() shrinker!\n");
4742}
4743
4744void __init rcu_init(void)
4745{
4746        int cpu;
4747
4748        rcu_early_boot_tests();
4749
4750        kfree_rcu_batch_init();
4751        rcu_bootup_announce();
4752        rcu_init_geometry();
4753        rcu_init_one();
4754        if (dump_tree)
4755                rcu_dump_rcu_node_tree();
4756        if (use_softirq)
4757                open_softirq(RCU_SOFTIRQ, rcu_core_si);
4758
4759        /*
4760         * We don't need protection against CPU-hotplug here because
4761         * this is called early in boot, before either interrupts
4762         * or the scheduler are operational.
4763         */
4764        pm_notifier(rcu_pm_notify, 0);
4765        for_each_online_cpu(cpu) {
4766                rcutree_prepare_cpu(cpu);
4767                rcu_cpu_starting(cpu);
4768                rcutree_online_cpu(cpu);
4769        }
4770
4771        /* Create workqueue for Tree SRCU and for expedited GPs. */
4772        rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
4773        WARN_ON(!rcu_gp_wq);
4774        rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
4775        WARN_ON(!rcu_par_gp_wq);
4776
4777        /* Fill in default value for rcutree.qovld boot parameter. */
4778        /* -After- the rcu_node ->lock fields are initialized! */
4779        if (qovld < 0)
4780                qovld_calc = DEFAULT_RCU_QOVLD_MULT * qhimark;
4781        else
4782                qovld_calc = qovld;
4783}
4784
4785#include "tree_stall.h"
4786#include "tree_exp.h"
4787#include "tree_plugin.h"
4788