linux/kernel/rcu/srcutree.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Sleepable Read-Copy Update mechanism for mutual exclusion.
   4 *
   5 * Copyright (C) IBM Corporation, 2006
   6 * Copyright (C) Fujitsu, 2012
   7 *
   8 * Authors: Paul McKenney <paulmck@linux.ibm.com>
   9 *         Lai Jiangshan <laijs@cn.fujitsu.com>
  10 *
  11 * For detailed explanation of Read-Copy Update mechanism see -
  12 *              Documentation/RCU/ *.txt
  13 *
  14 */
  15
  16#define pr_fmt(fmt) "rcu: " fmt
  17
  18#include <linux/export.h>
  19#include <linux/mutex.h>
  20#include <linux/percpu.h>
  21#include <linux/preempt.h>
  22#include <linux/rcupdate_wait.h>
  23#include <linux/sched.h>
  24#include <linux/smp.h>
  25#include <linux/delay.h>
  26#include <linux/module.h>
  27#include <linux/srcu.h>
  28
  29#include "rcu.h"
  30#include "rcu_segcblist.h"
  31
  32/* Holdoff in nanoseconds for auto-expediting. */
  33#define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
  34static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
  35module_param(exp_holdoff, ulong, 0444);
  36
  37/* Overflow-check frequency.  N bits roughly says every 2**N grace periods. */
  38static ulong counter_wrap_check = (ULONG_MAX >> 2);
  39module_param(counter_wrap_check, ulong, 0444);
  40
  41/* Early-boot callback-management, so early that no lock is required! */
  42static LIST_HEAD(srcu_boot_list);
  43static bool __read_mostly srcu_init_done;
  44
  45static void srcu_invoke_callbacks(struct work_struct *work);
  46static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
  47static void process_srcu(struct work_struct *work);
  48static void srcu_delay_timer(struct timer_list *t);
  49
  50/* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
  51#define spin_lock_rcu_node(p)                                   \
  52do {                                                                    \
  53        spin_lock(&ACCESS_PRIVATE(p, lock));                    \
  54        smp_mb__after_unlock_lock();                                    \
  55} while (0)
  56
  57#define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
  58
  59#define spin_lock_irq_rcu_node(p)                                       \
  60do {                                                                    \
  61        spin_lock_irq(&ACCESS_PRIVATE(p, lock));                        \
  62        smp_mb__after_unlock_lock();                                    \
  63} while (0)
  64
  65#define spin_unlock_irq_rcu_node(p)                                     \
  66        spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
  67
  68#define spin_lock_irqsave_rcu_node(p, flags)                    \
  69do {                                                                    \
  70        spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags);     \
  71        smp_mb__after_unlock_lock();                                    \
  72} while (0)
  73
  74#define spin_unlock_irqrestore_rcu_node(p, flags)                       \
  75        spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
  76
  77/*
  78 * Initialize SRCU combining tree.  Note that statically allocated
  79 * srcu_struct structures might already have srcu_read_lock() and
  80 * srcu_read_unlock() running against them.  So if the is_static parameter
  81 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
  82 */
  83static void init_srcu_struct_nodes(struct srcu_struct *ssp)
  84{
  85        int cpu;
  86        int i;
  87        int level = 0;
  88        int levelspread[RCU_NUM_LVLS];
  89        struct srcu_data *sdp;
  90        struct srcu_node *snp;
  91        struct srcu_node *snp_first;
  92
  93        /* Initialize geometry if it has not already been initialized. */
  94        rcu_init_geometry();
  95
  96        /* Work out the overall tree geometry. */
  97        ssp->level[0] = &ssp->node[0];
  98        for (i = 1; i < rcu_num_lvls; i++)
  99                ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1];
 100        rcu_init_levelspread(levelspread, num_rcu_lvl);
 101
 102        /* Each pass through this loop initializes one srcu_node structure. */
 103        srcu_for_each_node_breadth_first(ssp, snp) {
 104                spin_lock_init(&ACCESS_PRIVATE(snp, lock));
 105                WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
 106                             ARRAY_SIZE(snp->srcu_data_have_cbs));
 107                for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
 108                        snp->srcu_have_cbs[i] = 0;
 109                        snp->srcu_data_have_cbs[i] = 0;
 110                }
 111                snp->srcu_gp_seq_needed_exp = 0;
 112                snp->grplo = -1;
 113                snp->grphi = -1;
 114                if (snp == &ssp->node[0]) {
 115                        /* Root node, special case. */
 116                        snp->srcu_parent = NULL;
 117                        continue;
 118                }
 119
 120                /* Non-root node. */
 121                if (snp == ssp->level[level + 1])
 122                        level++;
 123                snp->srcu_parent = ssp->level[level - 1] +
 124                                   (snp - ssp->level[level]) /
 125                                   levelspread[level - 1];
 126        }
 127
 128        /*
 129         * Initialize the per-CPU srcu_data array, which feeds into the
 130         * leaves of the srcu_node tree.
 131         */
 132        WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
 133                     ARRAY_SIZE(sdp->srcu_unlock_count));
 134        level = rcu_num_lvls - 1;
 135        snp_first = ssp->level[level];
 136        for_each_possible_cpu(cpu) {
 137                sdp = per_cpu_ptr(ssp->sda, cpu);
 138                spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
 139                rcu_segcblist_init(&sdp->srcu_cblist);
 140                sdp->srcu_cblist_invoking = false;
 141                sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
 142                sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
 143                sdp->mynode = &snp_first[cpu / levelspread[level]];
 144                for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
 145                        if (snp->grplo < 0)
 146                                snp->grplo = cpu;
 147                        snp->grphi = cpu;
 148                }
 149                sdp->cpu = cpu;
 150                INIT_WORK(&sdp->work, srcu_invoke_callbacks);
 151                timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
 152                sdp->ssp = ssp;
 153                sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
 154        }
 155}
 156
 157/*
 158 * Initialize non-compile-time initialized fields, including the
 159 * associated srcu_node and srcu_data structures.  The is_static
 160 * parameter is passed through to init_srcu_struct_nodes(), and
 161 * also tells us that ->sda has already been wired up to srcu_data.
 162 */
 163static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
 164{
 165        mutex_init(&ssp->srcu_cb_mutex);
 166        mutex_init(&ssp->srcu_gp_mutex);
 167        ssp->srcu_idx = 0;
 168        ssp->srcu_gp_seq = 0;
 169        ssp->srcu_barrier_seq = 0;
 170        mutex_init(&ssp->srcu_barrier_mutex);
 171        atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
 172        INIT_DELAYED_WORK(&ssp->work, process_srcu);
 173        if (!is_static)
 174                ssp->sda = alloc_percpu(struct srcu_data);
 175        if (!ssp->sda)
 176                return -ENOMEM;
 177        init_srcu_struct_nodes(ssp);
 178        ssp->srcu_gp_seq_needed_exp = 0;
 179        ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
 180        smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */
 181        return 0;
 182}
 183
 184#ifdef CONFIG_DEBUG_LOCK_ALLOC
 185
 186int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
 187                       struct lock_class_key *key)
 188{
 189        /* Don't re-initialize a lock while it is held. */
 190        debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
 191        lockdep_init_map(&ssp->dep_map, name, key, 0);
 192        spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
 193        return init_srcu_struct_fields(ssp, false);
 194}
 195EXPORT_SYMBOL_GPL(__init_srcu_struct);
 196
 197#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 198
 199/**
 200 * init_srcu_struct - initialize a sleep-RCU structure
 201 * @ssp: structure to initialize.
 202 *
 203 * Must invoke this on a given srcu_struct before passing that srcu_struct
 204 * to any other function.  Each srcu_struct represents a separate domain
 205 * of SRCU protection.
 206 */
 207int init_srcu_struct(struct srcu_struct *ssp)
 208{
 209        spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
 210        return init_srcu_struct_fields(ssp, false);
 211}
 212EXPORT_SYMBOL_GPL(init_srcu_struct);
 213
 214#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 215
 216/*
 217 * First-use initialization of statically allocated srcu_struct
 218 * structure.  Wiring up the combining tree is more than can be
 219 * done with compile-time initialization, so this check is added
 220 * to each update-side SRCU primitive.  Use ssp->lock, which -is-
 221 * compile-time initialized, to resolve races involving multiple
 222 * CPUs trying to garner first-use privileges.
 223 */
 224static void check_init_srcu_struct(struct srcu_struct *ssp)
 225{
 226        unsigned long flags;
 227
 228        /* The smp_load_acquire() pairs with the smp_store_release(). */
 229        if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/
 230                return; /* Already initialized. */
 231        spin_lock_irqsave_rcu_node(ssp, flags);
 232        if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) {
 233                spin_unlock_irqrestore_rcu_node(ssp, flags);
 234                return;
 235        }
 236        init_srcu_struct_fields(ssp, true);
 237        spin_unlock_irqrestore_rcu_node(ssp, flags);
 238}
 239
 240/*
 241 * Returns approximate total of the readers' ->srcu_lock_count[] values
 242 * for the rank of per-CPU counters specified by idx.
 243 */
 244static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
 245{
 246        int cpu;
 247        unsigned long sum = 0;
 248
 249        for_each_possible_cpu(cpu) {
 250                struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
 251
 252                sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
 253        }
 254        return sum;
 255}
 256
 257/*
 258 * Returns approximate total of the readers' ->srcu_unlock_count[] values
 259 * for the rank of per-CPU counters specified by idx.
 260 */
 261static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
 262{
 263        int cpu;
 264        unsigned long sum = 0;
 265
 266        for_each_possible_cpu(cpu) {
 267                struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
 268
 269                sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
 270        }
 271        return sum;
 272}
 273
 274/*
 275 * Return true if the number of pre-existing readers is determined to
 276 * be zero.
 277 */
 278static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
 279{
 280        unsigned long unlocks;
 281
 282        unlocks = srcu_readers_unlock_idx(ssp, idx);
 283
 284        /*
 285         * Make sure that a lock is always counted if the corresponding
 286         * unlock is counted. Needs to be a smp_mb() as the read side may
 287         * contain a read from a variable that is written to before the
 288         * synchronize_srcu() in the write side. In this case smp_mb()s
 289         * A and B act like the store buffering pattern.
 290         *
 291         * This smp_mb() also pairs with smp_mb() C to prevent accesses
 292         * after the synchronize_srcu() from being executed before the
 293         * grace period ends.
 294         */
 295        smp_mb(); /* A */
 296
 297        /*
 298         * If the locks are the same as the unlocks, then there must have
 299         * been no readers on this index at some time in between. This does
 300         * not mean that there are no more readers, as one could have read
 301         * the current index but not have incremented the lock counter yet.
 302         *
 303         * So suppose that the updater is preempted here for so long
 304         * that more than ULONG_MAX non-nested readers come and go in
 305         * the meantime.  It turns out that this cannot result in overflow
 306         * because if a reader modifies its unlock count after we read it
 307         * above, then that reader's next load of ->srcu_idx is guaranteed
 308         * to get the new value, which will cause it to operate on the
 309         * other bank of counters, where it cannot contribute to the
 310         * overflow of these counters.  This means that there is a maximum
 311         * of 2*NR_CPUS increments, which cannot overflow given current
 312         * systems, especially not on 64-bit systems.
 313         *
 314         * OK, how about nesting?  This does impose a limit on nesting
 315         * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
 316         * especially on 64-bit systems.
 317         */
 318        return srcu_readers_lock_idx(ssp, idx) == unlocks;
 319}
 320
 321/**
 322 * srcu_readers_active - returns true if there are readers. and false
 323 *                       otherwise
 324 * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
 325 *
 326 * Note that this is not an atomic primitive, and can therefore suffer
 327 * severe errors when invoked on an active srcu_struct.  That said, it
 328 * can be useful as an error check at cleanup time.
 329 */
 330static bool srcu_readers_active(struct srcu_struct *ssp)
 331{
 332        int cpu;
 333        unsigned long sum = 0;
 334
 335        for_each_possible_cpu(cpu) {
 336                struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
 337
 338                sum += READ_ONCE(cpuc->srcu_lock_count[0]);
 339                sum += READ_ONCE(cpuc->srcu_lock_count[1]);
 340                sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
 341                sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
 342        }
 343        return sum;
 344}
 345
 346#define SRCU_INTERVAL           1
 347
 348/*
 349 * Return grace-period delay, zero if there are expedited grace
 350 * periods pending, SRCU_INTERVAL otherwise.
 351 */
 352static unsigned long srcu_get_delay(struct srcu_struct *ssp)
 353{
 354        if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq),
 355                         READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
 356                return 0;
 357        return SRCU_INTERVAL;
 358}
 359
 360/**
 361 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
 362 * @ssp: structure to clean up.
 363 *
 364 * Must invoke this after you are finished using a given srcu_struct that
 365 * was initialized via init_srcu_struct(), else you leak memory.
 366 */
 367void cleanup_srcu_struct(struct srcu_struct *ssp)
 368{
 369        int cpu;
 370
 371        if (WARN_ON(!srcu_get_delay(ssp)))
 372                return; /* Just leak it! */
 373        if (WARN_ON(srcu_readers_active(ssp)))
 374                return; /* Just leak it! */
 375        flush_delayed_work(&ssp->work);
 376        for_each_possible_cpu(cpu) {
 377                struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
 378
 379                del_timer_sync(&sdp->delay_work);
 380                flush_work(&sdp->work);
 381                if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
 382                        return; /* Forgot srcu_barrier(), so just leak it! */
 383        }
 384        if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
 385            WARN_ON(srcu_readers_active(ssp))) {
 386                pr_info("%s: Active srcu_struct %p state: %d\n",
 387                        __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)));
 388                return; /* Caller forgot to stop doing call_srcu()? */
 389        }
 390        free_percpu(ssp->sda);
 391        ssp->sda = NULL;
 392}
 393EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
 394
 395/*
 396 * Counts the new reader in the appropriate per-CPU element of the
 397 * srcu_struct.
 398 * Returns an index that must be passed to the matching srcu_read_unlock().
 399 */
 400int __srcu_read_lock(struct srcu_struct *ssp)
 401{
 402        int idx;
 403
 404        idx = READ_ONCE(ssp->srcu_idx) & 0x1;
 405        this_cpu_inc(ssp->sda->srcu_lock_count[idx]);
 406        smp_mb(); /* B */  /* Avoid leaking the critical section. */
 407        return idx;
 408}
 409EXPORT_SYMBOL_GPL(__srcu_read_lock);
 410
 411/*
 412 * Removes the count for the old reader from the appropriate per-CPU
 413 * element of the srcu_struct.  Note that this may well be a different
 414 * CPU than that which was incremented by the corresponding srcu_read_lock().
 415 */
 416void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
 417{
 418        smp_mb(); /* C */  /* Avoid leaking the critical section. */
 419        this_cpu_inc(ssp->sda->srcu_unlock_count[idx]);
 420}
 421EXPORT_SYMBOL_GPL(__srcu_read_unlock);
 422
 423/*
 424 * We use an adaptive strategy for synchronize_srcu() and especially for
 425 * synchronize_srcu_expedited().  We spin for a fixed time period
 426 * (defined below) to allow SRCU readers to exit their read-side critical
 427 * sections.  If there are still some readers after a few microseconds,
 428 * we repeatedly block for 1-millisecond time periods.
 429 */
 430#define SRCU_RETRY_CHECK_DELAY          5
 431
 432/*
 433 * Start an SRCU grace period.
 434 */
 435static void srcu_gp_start(struct srcu_struct *ssp)
 436{
 437        struct srcu_data *sdp = this_cpu_ptr(ssp->sda);
 438        int state;
 439
 440        lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
 441        WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
 442        spin_lock_rcu_node(sdp);  /* Interrupts already disabled. */
 443        rcu_segcblist_advance(&sdp->srcu_cblist,
 444                              rcu_seq_current(&ssp->srcu_gp_seq));
 445        (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
 446                                       rcu_seq_snap(&ssp->srcu_gp_seq));
 447        spin_unlock_rcu_node(sdp);  /* Interrupts remain disabled. */
 448        smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
 449        rcu_seq_start(&ssp->srcu_gp_seq);
 450        state = rcu_seq_state(ssp->srcu_gp_seq);
 451        WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
 452}
 453
 454
 455static void srcu_delay_timer(struct timer_list *t)
 456{
 457        struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
 458
 459        queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
 460}
 461
 462static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
 463                                       unsigned long delay)
 464{
 465        if (!delay) {
 466                queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
 467                return;
 468        }
 469
 470        timer_reduce(&sdp->delay_work, jiffies + delay);
 471}
 472
 473/*
 474 * Schedule callback invocation for the specified srcu_data structure,
 475 * if possible, on the corresponding CPU.
 476 */
 477static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
 478{
 479        srcu_queue_delayed_work_on(sdp, delay);
 480}
 481
 482/*
 483 * Schedule callback invocation for all srcu_data structures associated
 484 * with the specified srcu_node structure that have callbacks for the
 485 * just-completed grace period, the one corresponding to idx.  If possible,
 486 * schedule this invocation on the corresponding CPUs.
 487 */
 488static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
 489                                  unsigned long mask, unsigned long delay)
 490{
 491        int cpu;
 492
 493        for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
 494                if (!(mask & (1 << (cpu - snp->grplo))))
 495                        continue;
 496                srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
 497        }
 498}
 499
 500/*
 501 * Note the end of an SRCU grace period.  Initiates callback invocation
 502 * and starts a new grace period if needed.
 503 *
 504 * The ->srcu_cb_mutex acquisition does not protect any data, but
 505 * instead prevents more than one grace period from starting while we
 506 * are initiating callback invocation.  This allows the ->srcu_have_cbs[]
 507 * array to have a finite number of elements.
 508 */
 509static void srcu_gp_end(struct srcu_struct *ssp)
 510{
 511        unsigned long cbdelay;
 512        bool cbs;
 513        bool last_lvl;
 514        int cpu;
 515        unsigned long flags;
 516        unsigned long gpseq;
 517        int idx;
 518        unsigned long mask;
 519        struct srcu_data *sdp;
 520        struct srcu_node *snp;
 521
 522        /* Prevent more than one additional grace period. */
 523        mutex_lock(&ssp->srcu_cb_mutex);
 524
 525        /* End the current grace period. */
 526        spin_lock_irq_rcu_node(ssp);
 527        idx = rcu_seq_state(ssp->srcu_gp_seq);
 528        WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
 529        cbdelay = srcu_get_delay(ssp);
 530        WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns());
 531        rcu_seq_end(&ssp->srcu_gp_seq);
 532        gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
 533        if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
 534                WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq);
 535        spin_unlock_irq_rcu_node(ssp);
 536        mutex_unlock(&ssp->srcu_gp_mutex);
 537        /* A new grace period can start at this point.  But only one. */
 538
 539        /* Initiate callback invocation as needed. */
 540        idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
 541        srcu_for_each_node_breadth_first(ssp, snp) {
 542                spin_lock_irq_rcu_node(snp);
 543                cbs = false;
 544                last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
 545                if (last_lvl)
 546                        cbs = snp->srcu_have_cbs[idx] == gpseq;
 547                snp->srcu_have_cbs[idx] = gpseq;
 548                rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
 549                if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq))
 550                        WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq);
 551                mask = snp->srcu_data_have_cbs[idx];
 552                snp->srcu_data_have_cbs[idx] = 0;
 553                spin_unlock_irq_rcu_node(snp);
 554                if (cbs)
 555                        srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
 556
 557                /* Occasionally prevent srcu_data counter wrap. */
 558                if (!(gpseq & counter_wrap_check) && last_lvl)
 559                        for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
 560                                sdp = per_cpu_ptr(ssp->sda, cpu);
 561                                spin_lock_irqsave_rcu_node(sdp, flags);
 562                                if (ULONG_CMP_GE(gpseq,
 563                                                 sdp->srcu_gp_seq_needed + 100))
 564                                        sdp->srcu_gp_seq_needed = gpseq;
 565                                if (ULONG_CMP_GE(gpseq,
 566                                                 sdp->srcu_gp_seq_needed_exp + 100))
 567                                        sdp->srcu_gp_seq_needed_exp = gpseq;
 568                                spin_unlock_irqrestore_rcu_node(sdp, flags);
 569                        }
 570        }
 571
 572        /* Callback initiation done, allow grace periods after next. */
 573        mutex_unlock(&ssp->srcu_cb_mutex);
 574
 575        /* Start a new grace period if needed. */
 576        spin_lock_irq_rcu_node(ssp);
 577        gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
 578        if (!rcu_seq_state(gpseq) &&
 579            ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) {
 580                srcu_gp_start(ssp);
 581                spin_unlock_irq_rcu_node(ssp);
 582                srcu_reschedule(ssp, 0);
 583        } else {
 584                spin_unlock_irq_rcu_node(ssp);
 585        }
 586}
 587
 588/*
 589 * Funnel-locking scheme to scalably mediate many concurrent expedited
 590 * grace-period requests.  This function is invoked for the first known
 591 * expedited request for a grace period that has already been requested,
 592 * but without expediting.  To start a completely new grace period,
 593 * whether expedited or not, use srcu_funnel_gp_start() instead.
 594 */
 595static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
 596                                  unsigned long s)
 597{
 598        unsigned long flags;
 599
 600        for (; snp != NULL; snp = snp->srcu_parent) {
 601                if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||
 602                    ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
 603                        return;
 604                spin_lock_irqsave_rcu_node(snp, flags);
 605                if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
 606                        spin_unlock_irqrestore_rcu_node(snp, flags);
 607                        return;
 608                }
 609                WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
 610                spin_unlock_irqrestore_rcu_node(snp, flags);
 611        }
 612        spin_lock_irqsave_rcu_node(ssp, flags);
 613        if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
 614                WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
 615        spin_unlock_irqrestore_rcu_node(ssp, flags);
 616}
 617
 618/*
 619 * Funnel-locking scheme to scalably mediate many concurrent grace-period
 620 * requests.  The winner has to do the work of actually starting grace
 621 * period s.  Losers must either ensure that their desired grace-period
 622 * number is recorded on at least their leaf srcu_node structure, or they
 623 * must take steps to invoke their own callbacks.
 624 *
 625 * Note that this function also does the work of srcu_funnel_exp_start(),
 626 * in some cases by directly invoking it.
 627 */
 628static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
 629                                 unsigned long s, bool do_norm)
 630{
 631        unsigned long flags;
 632        int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
 633        struct srcu_node *snp = sdp->mynode;
 634        unsigned long snp_seq;
 635
 636        /* Each pass through the loop does one level of the srcu_node tree. */
 637        for (; snp != NULL; snp = snp->srcu_parent) {
 638                if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode)
 639                        return; /* GP already done and CBs recorded. */
 640                spin_lock_irqsave_rcu_node(snp, flags);
 641                if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
 642                        snp_seq = snp->srcu_have_cbs[idx];
 643                        if (snp == sdp->mynode && snp_seq == s)
 644                                snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
 645                        spin_unlock_irqrestore_rcu_node(snp, flags);
 646                        if (snp == sdp->mynode && snp_seq != s) {
 647                                srcu_schedule_cbs_sdp(sdp, do_norm
 648                                                           ? SRCU_INTERVAL
 649                                                           : 0);
 650                                return;
 651                        }
 652                        if (!do_norm)
 653                                srcu_funnel_exp_start(ssp, snp, s);
 654                        return;
 655                }
 656                snp->srcu_have_cbs[idx] = s;
 657                if (snp == sdp->mynode)
 658                        snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
 659                if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
 660                        WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
 661                spin_unlock_irqrestore_rcu_node(snp, flags);
 662        }
 663
 664        /* Top of tree, must ensure the grace period will be started. */
 665        spin_lock_irqsave_rcu_node(ssp, flags);
 666        if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {
 667                /*
 668                 * Record need for grace period s.  Pair with load
 669                 * acquire setting up for initialization.
 670                 */
 671                smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/
 672        }
 673        if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
 674                WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
 675
 676        /* If grace period not already done and none in progress, start it. */
 677        if (!rcu_seq_done(&ssp->srcu_gp_seq, s) &&
 678            rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {
 679                WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
 680                srcu_gp_start(ssp);
 681                if (likely(srcu_init_done))
 682                        queue_delayed_work(rcu_gp_wq, &ssp->work,
 683                                           srcu_get_delay(ssp));
 684                else if (list_empty(&ssp->work.work.entry))
 685                        list_add(&ssp->work.work.entry, &srcu_boot_list);
 686        }
 687        spin_unlock_irqrestore_rcu_node(ssp, flags);
 688}
 689
 690/*
 691 * Wait until all readers counted by array index idx complete, but
 692 * loop an additional time if there is an expedited grace period pending.
 693 * The caller must ensure that ->srcu_idx is not changed while checking.
 694 */
 695static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
 696{
 697        for (;;) {
 698                if (srcu_readers_active_idx_check(ssp, idx))
 699                        return true;
 700                if (--trycount + !srcu_get_delay(ssp) <= 0)
 701                        return false;
 702                udelay(SRCU_RETRY_CHECK_DELAY);
 703        }
 704}
 705
 706/*
 707 * Increment the ->srcu_idx counter so that future SRCU readers will
 708 * use the other rank of the ->srcu_(un)lock_count[] arrays.  This allows
 709 * us to wait for pre-existing readers in a starvation-free manner.
 710 */
 711static void srcu_flip(struct srcu_struct *ssp)
 712{
 713        /*
 714         * Ensure that if this updater saw a given reader's increment
 715         * from __srcu_read_lock(), that reader was using an old value
 716         * of ->srcu_idx.  Also ensure that if a given reader sees the
 717         * new value of ->srcu_idx, this updater's earlier scans cannot
 718         * have seen that reader's increments (which is OK, because this
 719         * grace period need not wait on that reader).
 720         */
 721        smp_mb(); /* E */  /* Pairs with B and C. */
 722
 723        WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
 724
 725        /*
 726         * Ensure that if the updater misses an __srcu_read_unlock()
 727         * increment, that task's next __srcu_read_lock() will see the
 728         * above counter update.  Note that both this memory barrier
 729         * and the one in srcu_readers_active_idx_check() provide the
 730         * guarantee for __srcu_read_lock().
 731         */
 732        smp_mb(); /* D */  /* Pairs with C. */
 733}
 734
 735/*
 736 * If SRCU is likely idle, return true, otherwise return false.
 737 *
 738 * Note that it is OK for several current from-idle requests for a new
 739 * grace period from idle to specify expediting because they will all end
 740 * up requesting the same grace period anyhow.  So no loss.
 741 *
 742 * Note also that if any CPU (including the current one) is still invoking
 743 * callbacks, this function will nevertheless say "idle".  This is not
 744 * ideal, but the overhead of checking all CPUs' callback lists is even
 745 * less ideal, especially on large systems.  Furthermore, the wakeup
 746 * can happen before the callback is fully removed, so we have no choice
 747 * but to accept this type of error.
 748 *
 749 * This function is also subject to counter-wrap errors, but let's face
 750 * it, if this function was preempted for enough time for the counters
 751 * to wrap, it really doesn't matter whether or not we expedite the grace
 752 * period.  The extra overhead of a needlessly expedited grace period is
 753 * negligible when amortized over that time period, and the extra latency
 754 * of a needlessly non-expedited grace period is similarly negligible.
 755 */
 756static bool srcu_might_be_idle(struct srcu_struct *ssp)
 757{
 758        unsigned long curseq;
 759        unsigned long flags;
 760        struct srcu_data *sdp;
 761        unsigned long t;
 762        unsigned long tlast;
 763
 764        check_init_srcu_struct(ssp);
 765        /* If the local srcu_data structure has callbacks, not idle.  */
 766        sdp = raw_cpu_ptr(ssp->sda);
 767        spin_lock_irqsave_rcu_node(sdp, flags);
 768        if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
 769                spin_unlock_irqrestore_rcu_node(sdp, flags);
 770                return false; /* Callbacks already present, so not idle. */
 771        }
 772        spin_unlock_irqrestore_rcu_node(sdp, flags);
 773
 774        /*
 775         * No local callbacks, so probabilistically probe global state.
 776         * Exact information would require acquiring locks, which would
 777         * kill scalability, hence the probabilistic nature of the probe.
 778         */
 779
 780        /* First, see if enough time has passed since the last GP. */
 781        t = ktime_get_mono_fast_ns();
 782        tlast = READ_ONCE(ssp->srcu_last_gp_end);
 783        if (exp_holdoff == 0 ||
 784            time_in_range_open(t, tlast, tlast + exp_holdoff))
 785                return false; /* Too soon after last GP. */
 786
 787        /* Next, check for probable idleness. */
 788        curseq = rcu_seq_current(&ssp->srcu_gp_seq);
 789        smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
 790        if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed)))
 791                return false; /* Grace period in progress, so not idle. */
 792        smp_mb(); /* Order ->srcu_gp_seq with prior access. */
 793        if (curseq != rcu_seq_current(&ssp->srcu_gp_seq))
 794                return false; /* GP # changed, so not idle. */
 795        return true; /* With reasonable probability, idle! */
 796}
 797
 798/*
 799 * SRCU callback function to leak a callback.
 800 */
 801static void srcu_leak_callback(struct rcu_head *rhp)
 802{
 803}
 804
 805/*
 806 * Start an SRCU grace period, and also queue the callback if non-NULL.
 807 */
 808static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
 809                                             struct rcu_head *rhp, bool do_norm)
 810{
 811        unsigned long flags;
 812        int idx;
 813        bool needexp = false;
 814        bool needgp = false;
 815        unsigned long s;
 816        struct srcu_data *sdp;
 817
 818        check_init_srcu_struct(ssp);
 819        idx = srcu_read_lock(ssp);
 820        sdp = raw_cpu_ptr(ssp->sda);
 821        spin_lock_irqsave_rcu_node(sdp, flags);
 822        if (rhp)
 823                rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
 824        rcu_segcblist_advance(&sdp->srcu_cblist,
 825                              rcu_seq_current(&ssp->srcu_gp_seq));
 826        s = rcu_seq_snap(&ssp->srcu_gp_seq);
 827        (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
 828        if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
 829                sdp->srcu_gp_seq_needed = s;
 830                needgp = true;
 831        }
 832        if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
 833                sdp->srcu_gp_seq_needed_exp = s;
 834                needexp = true;
 835        }
 836        spin_unlock_irqrestore_rcu_node(sdp, flags);
 837        if (needgp)
 838                srcu_funnel_gp_start(ssp, sdp, s, do_norm);
 839        else if (needexp)
 840                srcu_funnel_exp_start(ssp, sdp->mynode, s);
 841        srcu_read_unlock(ssp, idx);
 842        return s;
 843}
 844
 845/*
 846 * Enqueue an SRCU callback on the srcu_data structure associated with
 847 * the current CPU and the specified srcu_struct structure, initiating
 848 * grace-period processing if it is not already running.
 849 *
 850 * Note that all CPUs must agree that the grace period extended beyond
 851 * all pre-existing SRCU read-side critical section.  On systems with
 852 * more than one CPU, this means that when "func()" is invoked, each CPU
 853 * is guaranteed to have executed a full memory barrier since the end of
 854 * its last corresponding SRCU read-side critical section whose beginning
 855 * preceded the call to call_srcu().  It also means that each CPU executing
 856 * an SRCU read-side critical section that continues beyond the start of
 857 * "func()" must have executed a memory barrier after the call_srcu()
 858 * but before the beginning of that SRCU read-side critical section.
 859 * Note that these guarantees include CPUs that are offline, idle, or
 860 * executing in user mode, as well as CPUs that are executing in the kernel.
 861 *
 862 * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
 863 * resulting SRCU callback function "func()", then both CPU A and CPU
 864 * B are guaranteed to execute a full memory barrier during the time
 865 * interval between the call to call_srcu() and the invocation of "func()".
 866 * This guarantee applies even if CPU A and CPU B are the same CPU (but
 867 * again only if the system has more than one CPU).
 868 *
 869 * Of course, these guarantees apply only for invocations of call_srcu(),
 870 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
 871 * srcu_struct structure.
 872 */
 873static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
 874                        rcu_callback_t func, bool do_norm)
 875{
 876        if (debug_rcu_head_queue(rhp)) {
 877                /* Probable double call_srcu(), so leak the callback. */
 878                WRITE_ONCE(rhp->func, srcu_leak_callback);
 879                WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
 880                return;
 881        }
 882        rhp->func = func;
 883        (void)srcu_gp_start_if_needed(ssp, rhp, do_norm);
 884}
 885
 886/**
 887 * call_srcu() - Queue a callback for invocation after an SRCU grace period
 888 * @ssp: srcu_struct in queue the callback
 889 * @rhp: structure to be used for queueing the SRCU callback.
 890 * @func: function to be invoked after the SRCU grace period
 891 *
 892 * The callback function will be invoked some time after a full SRCU
 893 * grace period elapses, in other words after all pre-existing SRCU
 894 * read-side critical sections have completed.  However, the callback
 895 * function might well execute concurrently with other SRCU read-side
 896 * critical sections that started after call_srcu() was invoked.  SRCU
 897 * read-side critical sections are delimited by srcu_read_lock() and
 898 * srcu_read_unlock(), and may be nested.
 899 *
 900 * The callback will be invoked from process context, but must nevertheless
 901 * be fast and must not block.
 902 */
 903void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
 904               rcu_callback_t func)
 905{
 906        __call_srcu(ssp, rhp, func, true);
 907}
 908EXPORT_SYMBOL_GPL(call_srcu);
 909
 910/*
 911 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
 912 */
 913static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
 914{
 915        struct rcu_synchronize rcu;
 916
 917        RCU_LOCKDEP_WARN(lockdep_is_held(ssp) ||
 918                         lock_is_held(&rcu_bh_lock_map) ||
 919                         lock_is_held(&rcu_lock_map) ||
 920                         lock_is_held(&rcu_sched_lock_map),
 921                         "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
 922
 923        if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
 924                return;
 925        might_sleep();
 926        check_init_srcu_struct(ssp);
 927        init_completion(&rcu.completion);
 928        init_rcu_head_on_stack(&rcu.head);
 929        __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
 930        wait_for_completion(&rcu.completion);
 931        destroy_rcu_head_on_stack(&rcu.head);
 932
 933        /*
 934         * Make sure that later code is ordered after the SRCU grace
 935         * period.  This pairs with the spin_lock_irq_rcu_node()
 936         * in srcu_invoke_callbacks().  Unlike Tree RCU, this is needed
 937         * because the current CPU might have been totally uninvolved with
 938         * (and thus unordered against) that grace period.
 939         */
 940        smp_mb();
 941}
 942
 943/**
 944 * synchronize_srcu_expedited - Brute-force SRCU grace period
 945 * @ssp: srcu_struct with which to synchronize.
 946 *
 947 * Wait for an SRCU grace period to elapse, but be more aggressive about
 948 * spinning rather than blocking when waiting.
 949 *
 950 * Note that synchronize_srcu_expedited() has the same deadlock and
 951 * memory-ordering properties as does synchronize_srcu().
 952 */
 953void synchronize_srcu_expedited(struct srcu_struct *ssp)
 954{
 955        __synchronize_srcu(ssp, rcu_gp_is_normal());
 956}
 957EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
 958
 959/**
 960 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
 961 * @ssp: srcu_struct with which to synchronize.
 962 *
 963 * Wait for the count to drain to zero of both indexes. To avoid the
 964 * possible starvation of synchronize_srcu(), it waits for the count of
 965 * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
 966 * and then flip the srcu_idx and wait for the count of the other index.
 967 *
 968 * Can block; must be called from process context.
 969 *
 970 * Note that it is illegal to call synchronize_srcu() from the corresponding
 971 * SRCU read-side critical section; doing so will result in deadlock.
 972 * However, it is perfectly legal to call synchronize_srcu() on one
 973 * srcu_struct from some other srcu_struct's read-side critical section,
 974 * as long as the resulting graph of srcu_structs is acyclic.
 975 *
 976 * There are memory-ordering constraints implied by synchronize_srcu().
 977 * On systems with more than one CPU, when synchronize_srcu() returns,
 978 * each CPU is guaranteed to have executed a full memory barrier since
 979 * the end of its last corresponding SRCU read-side critical section
 980 * whose beginning preceded the call to synchronize_srcu().  In addition,
 981 * each CPU having an SRCU read-side critical section that extends beyond
 982 * the return from synchronize_srcu() is guaranteed to have executed a
 983 * full memory barrier after the beginning of synchronize_srcu() and before
 984 * the beginning of that SRCU read-side critical section.  Note that these
 985 * guarantees include CPUs that are offline, idle, or executing in user mode,
 986 * as well as CPUs that are executing in the kernel.
 987 *
 988 * Furthermore, if CPU A invoked synchronize_srcu(), which returned
 989 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
 990 * to have executed a full memory barrier during the execution of
 991 * synchronize_srcu().  This guarantee applies even if CPU A and CPU B
 992 * are the same CPU, but again only if the system has more than one CPU.
 993 *
 994 * Of course, these memory-ordering guarantees apply only when
 995 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
 996 * passed the same srcu_struct structure.
 997 *
 998 * Implementation of these memory-ordering guarantees is similar to
 999 * that of synchronize_rcu().
1000 *
1001 * If SRCU is likely idle, expedite the first request.  This semantic
1002 * was provided by Classic SRCU, and is relied upon by its users, so TREE
1003 * SRCU must also provide it.  Note that detecting idleness is heuristic
1004 * and subject to both false positives and negatives.
1005 */
1006void synchronize_srcu(struct srcu_struct *ssp)
1007{
1008        if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
1009                synchronize_srcu_expedited(ssp);
1010        else
1011                __synchronize_srcu(ssp, true);
1012}
1013EXPORT_SYMBOL_GPL(synchronize_srcu);
1014
1015/**
1016 * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
1017 * @ssp: srcu_struct to provide cookie for.
1018 *
1019 * This function returns a cookie that can be passed to
1020 * poll_state_synchronize_srcu(), which will return true if a full grace
1021 * period has elapsed in the meantime.  It is the caller's responsibility
1022 * to make sure that grace period happens, for example, by invoking
1023 * call_srcu() after return from get_state_synchronize_srcu().
1024 */
1025unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
1026{
1027        // Any prior manipulation of SRCU-protected data must happen
1028        // before the load from ->srcu_gp_seq.
1029        smp_mb();
1030        return rcu_seq_snap(&ssp->srcu_gp_seq);
1031}
1032EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
1033
1034/**
1035 * start_poll_synchronize_srcu - Provide cookie and start grace period
1036 * @ssp: srcu_struct to provide cookie for.
1037 *
1038 * This function returns a cookie that can be passed to
1039 * poll_state_synchronize_srcu(), which will return true if a full grace
1040 * period has elapsed in the meantime.  Unlike get_state_synchronize_srcu(),
1041 * this function also ensures that any needed SRCU grace period will be
1042 * started.  This convenience does come at a cost in terms of CPU overhead.
1043 */
1044unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
1045{
1046        return srcu_gp_start_if_needed(ssp, NULL, true);
1047}
1048EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
1049
1050/**
1051 * poll_state_synchronize_srcu - Has cookie's grace period ended?
1052 * @ssp: srcu_struct to provide cookie for.
1053 * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu().
1054 *
1055 * This function takes the cookie that was returned from either
1056 * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and
1057 * returns @true if an SRCU grace period elapsed since the time that the
1058 * cookie was created.
1059 *
1060 * Because cookies are finite in size, wrapping/overflow is possible.
1061 * This is more pronounced on 32-bit systems where cookies are 32 bits,
1062 * where in theory wrapping could happen in about 14 hours assuming
1063 * 25-microsecond expedited SRCU grace periods.  However, a more likely
1064 * overflow lower bound is on the order of 24 days in the case of
1065 * one-millisecond SRCU grace periods.  Of course, wrapping in a 64-bit
1066 * system requires geologic timespans, as in more than seven million years
1067 * even for expedited SRCU grace periods.
1068 *
1069 * Wrapping/overflow is much more of an issue for CONFIG_SMP=n systems
1070 * that also have CONFIG_PREEMPTION=n, which selects Tiny SRCU.  This uses
1071 * a 16-bit cookie, which rcutorture routinely wraps in a matter of a
1072 * few minutes.  If this proves to be a problem, this counter will be
1073 * expanded to the same size as for Tree SRCU.
1074 */
1075bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
1076{
1077        if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie))
1078                return false;
1079        // Ensure that the end of the SRCU grace period happens before
1080        // any subsequent code that the caller might execute.
1081        smp_mb(); // ^^^
1082        return true;
1083}
1084EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
1085
1086/*
1087 * Callback function for srcu_barrier() use.
1088 */
1089static void srcu_barrier_cb(struct rcu_head *rhp)
1090{
1091        struct srcu_data *sdp;
1092        struct srcu_struct *ssp;
1093
1094        sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
1095        ssp = sdp->ssp;
1096        if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1097                complete(&ssp->srcu_barrier_completion);
1098}
1099
1100/**
1101 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
1102 * @ssp: srcu_struct on which to wait for in-flight callbacks.
1103 */
1104void srcu_barrier(struct srcu_struct *ssp)
1105{
1106        int cpu;
1107        struct srcu_data *sdp;
1108        unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
1109
1110        check_init_srcu_struct(ssp);
1111        mutex_lock(&ssp->srcu_barrier_mutex);
1112        if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) {
1113                smp_mb(); /* Force ordering following return. */
1114                mutex_unlock(&ssp->srcu_barrier_mutex);
1115                return; /* Someone else did our work for us. */
1116        }
1117        rcu_seq_start(&ssp->srcu_barrier_seq);
1118        init_completion(&ssp->srcu_barrier_completion);
1119
1120        /* Initial count prevents reaching zero until all CBs are posted. */
1121        atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
1122
1123        /*
1124         * Each pass through this loop enqueues a callback, but only
1125         * on CPUs already having callbacks enqueued.  Note that if
1126         * a CPU already has callbacks enqueue, it must have already
1127         * registered the need for a future grace period, so all we
1128         * need do is enqueue a callback that will use the same
1129         * grace period as the last callback already in the queue.
1130         */
1131        for_each_possible_cpu(cpu) {
1132                sdp = per_cpu_ptr(ssp->sda, cpu);
1133                spin_lock_irq_rcu_node(sdp);
1134                atomic_inc(&ssp->srcu_barrier_cpu_cnt);
1135                sdp->srcu_barrier_head.func = srcu_barrier_cb;
1136                debug_rcu_head_queue(&sdp->srcu_barrier_head);
1137                if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1138                                           &sdp->srcu_barrier_head)) {
1139                        debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1140                        atomic_dec(&ssp->srcu_barrier_cpu_cnt);
1141                }
1142                spin_unlock_irq_rcu_node(sdp);
1143        }
1144
1145        /* Remove the initial count, at which point reaching zero can happen. */
1146        if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1147                complete(&ssp->srcu_barrier_completion);
1148        wait_for_completion(&ssp->srcu_barrier_completion);
1149
1150        rcu_seq_end(&ssp->srcu_barrier_seq);
1151        mutex_unlock(&ssp->srcu_barrier_mutex);
1152}
1153EXPORT_SYMBOL_GPL(srcu_barrier);
1154
1155/**
1156 * srcu_batches_completed - return batches completed.
1157 * @ssp: srcu_struct on which to report batch completion.
1158 *
1159 * Report the number of batches, correlated with, but not necessarily
1160 * precisely the same as, the number of grace periods that have elapsed.
1161 */
1162unsigned long srcu_batches_completed(struct srcu_struct *ssp)
1163{
1164        return READ_ONCE(ssp->srcu_idx);
1165}
1166EXPORT_SYMBOL_GPL(srcu_batches_completed);
1167
1168/*
1169 * Core SRCU state machine.  Push state bits of ->srcu_gp_seq
1170 * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
1171 * completed in that state.
1172 */
1173static void srcu_advance_state(struct srcu_struct *ssp)
1174{
1175        int idx;
1176
1177        mutex_lock(&ssp->srcu_gp_mutex);
1178
1179        /*
1180         * Because readers might be delayed for an extended period after
1181         * fetching ->srcu_idx for their index, at any point in time there
1182         * might well be readers using both idx=0 and idx=1.  We therefore
1183         * need to wait for readers to clear from both index values before
1184         * invoking a callback.
1185         *
1186         * The load-acquire ensures that we see the accesses performed
1187         * by the prior grace period.
1188         */
1189        idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */
1190        if (idx == SRCU_STATE_IDLE) {
1191                spin_lock_irq_rcu_node(ssp);
1192                if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1193                        WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));
1194                        spin_unlock_irq_rcu_node(ssp);
1195                        mutex_unlock(&ssp->srcu_gp_mutex);
1196                        return;
1197                }
1198                idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
1199                if (idx == SRCU_STATE_IDLE)
1200                        srcu_gp_start(ssp);
1201                spin_unlock_irq_rcu_node(ssp);
1202                if (idx != SRCU_STATE_IDLE) {
1203                        mutex_unlock(&ssp->srcu_gp_mutex);
1204                        return; /* Someone else started the grace period. */
1205                }
1206        }
1207
1208        if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1209                idx = 1 ^ (ssp->srcu_idx & 1);
1210                if (!try_check_zero(ssp, idx, 1)) {
1211                        mutex_unlock(&ssp->srcu_gp_mutex);
1212                        return; /* readers present, retry later. */
1213                }
1214                srcu_flip(ssp);
1215                spin_lock_irq_rcu_node(ssp);
1216                rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);
1217                spin_unlock_irq_rcu_node(ssp);
1218        }
1219
1220        if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1221
1222                /*
1223                 * SRCU read-side critical sections are normally short,
1224                 * so check at least twice in quick succession after a flip.
1225                 */
1226                idx = 1 ^ (ssp->srcu_idx & 1);
1227                if (!try_check_zero(ssp, idx, 2)) {
1228                        mutex_unlock(&ssp->srcu_gp_mutex);
1229                        return; /* readers present, retry later. */
1230                }
1231                srcu_gp_end(ssp);  /* Releases ->srcu_gp_mutex. */
1232        }
1233}
1234
1235/*
1236 * Invoke a limited number of SRCU callbacks that have passed through
1237 * their grace period.  If there are more to do, SRCU will reschedule
1238 * the workqueue.  Note that needed memory barriers have been executed
1239 * in this task's context by srcu_readers_active_idx_check().
1240 */
1241static void srcu_invoke_callbacks(struct work_struct *work)
1242{
1243        long len;
1244        bool more;
1245        struct rcu_cblist ready_cbs;
1246        struct rcu_head *rhp;
1247        struct srcu_data *sdp;
1248        struct srcu_struct *ssp;
1249
1250        sdp = container_of(work, struct srcu_data, work);
1251
1252        ssp = sdp->ssp;
1253        rcu_cblist_init(&ready_cbs);
1254        spin_lock_irq_rcu_node(sdp);
1255        rcu_segcblist_advance(&sdp->srcu_cblist,
1256                              rcu_seq_current(&ssp->srcu_gp_seq));
1257        if (sdp->srcu_cblist_invoking ||
1258            !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1259                spin_unlock_irq_rcu_node(sdp);
1260                return;  /* Someone else on the job or nothing to do. */
1261        }
1262
1263        /* We are on the job!  Extract and invoke ready callbacks. */
1264        sdp->srcu_cblist_invoking = true;
1265        rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1266        len = ready_cbs.len;
1267        spin_unlock_irq_rcu_node(sdp);
1268        rhp = rcu_cblist_dequeue(&ready_cbs);
1269        for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1270                debug_rcu_head_unqueue(rhp);
1271                local_bh_disable();
1272                rhp->func(rhp);
1273                local_bh_enable();
1274        }
1275        WARN_ON_ONCE(ready_cbs.len);
1276
1277        /*
1278         * Update counts, accelerate new callbacks, and if needed,
1279         * schedule another round of callback invocation.
1280         */
1281        spin_lock_irq_rcu_node(sdp);
1282        rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
1283        (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1284                                       rcu_seq_snap(&ssp->srcu_gp_seq));
1285        sdp->srcu_cblist_invoking = false;
1286        more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1287        spin_unlock_irq_rcu_node(sdp);
1288        if (more)
1289                srcu_schedule_cbs_sdp(sdp, 0);
1290}
1291
1292/*
1293 * Finished one round of SRCU grace period.  Start another if there are
1294 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1295 */
1296static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
1297{
1298        bool pushgp = true;
1299
1300        spin_lock_irq_rcu_node(ssp);
1301        if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1302                if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) {
1303                        /* All requests fulfilled, time to go idle. */
1304                        pushgp = false;
1305                }
1306        } else if (!rcu_seq_state(ssp->srcu_gp_seq)) {
1307                /* Outstanding request and no GP.  Start one. */
1308                srcu_gp_start(ssp);
1309        }
1310        spin_unlock_irq_rcu_node(ssp);
1311
1312        if (pushgp)
1313                queue_delayed_work(rcu_gp_wq, &ssp->work, delay);
1314}
1315
1316/*
1317 * This is the work-queue function that handles SRCU grace periods.
1318 */
1319static void process_srcu(struct work_struct *work)
1320{
1321        struct srcu_struct *ssp;
1322
1323        ssp = container_of(work, struct srcu_struct, work.work);
1324
1325        srcu_advance_state(ssp);
1326        srcu_reschedule(ssp, srcu_get_delay(ssp));
1327}
1328
1329void srcutorture_get_gp_data(enum rcutorture_type test_type,
1330                             struct srcu_struct *ssp, int *flags,
1331                             unsigned long *gp_seq)
1332{
1333        if (test_type != SRCU_FLAVOR)
1334                return;
1335        *flags = 0;
1336        *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq);
1337}
1338EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
1339
1340void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
1341{
1342        int cpu;
1343        int idx;
1344        unsigned long s0 = 0, s1 = 0;
1345
1346        idx = ssp->srcu_idx & 0x1;
1347        pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
1348                 tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx);
1349        for_each_possible_cpu(cpu) {
1350                unsigned long l0, l1;
1351                unsigned long u0, u1;
1352                long c0, c1;
1353                struct srcu_data *sdp;
1354
1355                sdp = per_cpu_ptr(ssp->sda, cpu);
1356                u0 = data_race(sdp->srcu_unlock_count[!idx]);
1357                u1 = data_race(sdp->srcu_unlock_count[idx]);
1358
1359                /*
1360                 * Make sure that a lock is always counted if the corresponding
1361                 * unlock is counted.
1362                 */
1363                smp_rmb();
1364
1365                l0 = data_race(sdp->srcu_lock_count[!idx]);
1366                l1 = data_race(sdp->srcu_lock_count[idx]);
1367
1368                c0 = l0 - u0;
1369                c1 = l1 - u1;
1370                pr_cont(" %d(%ld,%ld %c)",
1371                        cpu, c0, c1,
1372                        "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);
1373                s0 += c0;
1374                s1 += c1;
1375        }
1376        pr_cont(" T(%ld,%ld)\n", s0, s1);
1377}
1378EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
1379
1380static int __init srcu_bootup_announce(void)
1381{
1382        pr_info("Hierarchical SRCU implementation.\n");
1383        if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
1384                pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
1385        return 0;
1386}
1387early_initcall(srcu_bootup_announce);
1388
1389void __init srcu_init(void)
1390{
1391        struct srcu_struct *ssp;
1392
1393        /*
1394         * Once that is set, call_srcu() can follow the normal path and
1395         * queue delayed work. This must follow RCU workqueues creation
1396         * and timers initialization.
1397         */
1398        srcu_init_done = true;
1399        while (!list_empty(&srcu_boot_list)) {
1400                ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
1401                                      work.work.entry);
1402                list_del_init(&ssp->work.work.entry);
1403                queue_work(rcu_gp_wq, &ssp->work.work);
1404        }
1405}
1406
1407#ifdef CONFIG_MODULES
1408
1409/* Initialize any global-scope srcu_struct structures used by this module. */
1410static int srcu_module_coming(struct module *mod)
1411{
1412        int i;
1413        struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1414        int ret;
1415
1416        for (i = 0; i < mod->num_srcu_structs; i++) {
1417                ret = init_srcu_struct(*(sspp++));
1418                if (WARN_ON_ONCE(ret))
1419                        return ret;
1420        }
1421        return 0;
1422}
1423
1424/* Clean up any global-scope srcu_struct structures used by this module. */
1425static void srcu_module_going(struct module *mod)
1426{
1427        int i;
1428        struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1429
1430        for (i = 0; i < mod->num_srcu_structs; i++)
1431                cleanup_srcu_struct(*(sspp++));
1432}
1433
1434/* Handle one module, either coming or going. */
1435static int srcu_module_notify(struct notifier_block *self,
1436                              unsigned long val, void *data)
1437{
1438        struct module *mod = data;
1439        int ret = 0;
1440
1441        switch (val) {
1442        case MODULE_STATE_COMING:
1443                ret = srcu_module_coming(mod);
1444                break;
1445        case MODULE_STATE_GOING:
1446                srcu_module_going(mod);
1447                break;
1448        default:
1449                break;
1450        }
1451        return ret;
1452}
1453
1454static struct notifier_block srcu_module_nb = {
1455        .notifier_call = srcu_module_notify,
1456        .priority = 0,
1457};
1458
1459static __init int init_srcu_module_notifier(void)
1460{
1461        int ret;
1462
1463        ret = register_module_notifier(&srcu_module_nb);
1464        if (ret)
1465                pr_warn("Failed to register srcu module notifier\n");
1466        return ret;
1467}
1468late_initcall(init_srcu_module_notifier);
1469
1470#endif /* #ifdef CONFIG_MODULES */
1471