linux/kernel/rcu/srcutree.c
<<
>>
Prefs
   1/*
   2 * Sleepable Read-Copy Update mechanism for mutual exclusion.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, you can access it online at
  16 * http://www.gnu.org/licenses/gpl-2.0.html.
  17 *
  18 * Copyright (C) IBM Corporation, 2006
  19 * Copyright (C) Fujitsu, 2012
  20 *
  21 * Author: Paul McKenney <paulmck@us.ibm.com>
  22 *         Lai Jiangshan <laijs@cn.fujitsu.com>
  23 *
  24 * For detailed explanation of Read-Copy Update mechanism see -
  25 *              Documentation/RCU/ *.txt
  26 *
  27 */
  28
  29#define pr_fmt(fmt) "rcu: " fmt
  30
  31#include <linux/export.h>
  32#include <linux/mutex.h>
  33#include <linux/percpu.h>
  34#include <linux/preempt.h>
  35#include <linux/rcupdate_wait.h>
  36#include <linux/sched.h>
  37#include <linux/smp.h>
  38#include <linux/delay.h>
  39#include <linux/module.h>
  40#include <linux/srcu.h>
  41
  42#include "rcu.h"
  43#include "rcu_segcblist.h"
  44
  45/* Holdoff in nanoseconds for auto-expediting. */
  46#define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
  47static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
  48module_param(exp_holdoff, ulong, 0444);
  49
  50/* Overflow-check frequency.  N bits roughly says every 2**N grace periods. */
  51static ulong counter_wrap_check = (ULONG_MAX >> 2);
  52module_param(counter_wrap_check, ulong, 0444);
  53
  54/* Early-boot callback-management, so early that no lock is required! */
  55static LIST_HEAD(srcu_boot_list);
  56static bool __read_mostly srcu_init_done;
  57
  58static void srcu_invoke_callbacks(struct work_struct *work);
  59static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
  60static void process_srcu(struct work_struct *work);
  61static void srcu_delay_timer(struct timer_list *t);
  62
  63/* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
  64#define spin_lock_rcu_node(p)                                   \
  65do {                                                                    \
  66        spin_lock(&ACCESS_PRIVATE(p, lock));                    \
  67        smp_mb__after_unlock_lock();                                    \
  68} while (0)
  69
  70#define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
  71
  72#define spin_lock_irq_rcu_node(p)                                       \
  73do {                                                                    \
  74        spin_lock_irq(&ACCESS_PRIVATE(p, lock));                        \
  75        smp_mb__after_unlock_lock();                                    \
  76} while (0)
  77
  78#define spin_unlock_irq_rcu_node(p)                                     \
  79        spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
  80
  81#define spin_lock_irqsave_rcu_node(p, flags)                    \
  82do {                                                                    \
  83        spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags);     \
  84        smp_mb__after_unlock_lock();                                    \
  85} while (0)
  86
  87#define spin_unlock_irqrestore_rcu_node(p, flags)                       \
  88        spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
  89
  90/*
  91 * Initialize SRCU combining tree.  Note that statically allocated
  92 * srcu_struct structures might already have srcu_read_lock() and
  93 * srcu_read_unlock() running against them.  So if the is_static parameter
  94 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
  95 */
  96static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static)
  97{
  98        int cpu;
  99        int i;
 100        int level = 0;
 101        int levelspread[RCU_NUM_LVLS];
 102        struct srcu_data *sdp;
 103        struct srcu_node *snp;
 104        struct srcu_node *snp_first;
 105
 106        /* Work out the overall tree geometry. */
 107        ssp->level[0] = &ssp->node[0];
 108        for (i = 1; i < rcu_num_lvls; i++)
 109                ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1];
 110        rcu_init_levelspread(levelspread, num_rcu_lvl);
 111
 112        /* Each pass through this loop initializes one srcu_node structure. */
 113        srcu_for_each_node_breadth_first(ssp, snp) {
 114                spin_lock_init(&ACCESS_PRIVATE(snp, lock));
 115                WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
 116                             ARRAY_SIZE(snp->srcu_data_have_cbs));
 117                for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
 118                        snp->srcu_have_cbs[i] = 0;
 119                        snp->srcu_data_have_cbs[i] = 0;
 120                }
 121                snp->srcu_gp_seq_needed_exp = 0;
 122                snp->grplo = -1;
 123                snp->grphi = -1;
 124                if (snp == &ssp->node[0]) {
 125                        /* Root node, special case. */
 126                        snp->srcu_parent = NULL;
 127                        continue;
 128                }
 129
 130                /* Non-root node. */
 131                if (snp == ssp->level[level + 1])
 132                        level++;
 133                snp->srcu_parent = ssp->level[level - 1] +
 134                                   (snp - ssp->level[level]) /
 135                                   levelspread[level - 1];
 136        }
 137
 138        /*
 139         * Initialize the per-CPU srcu_data array, which feeds into the
 140         * leaves of the srcu_node tree.
 141         */
 142        WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
 143                     ARRAY_SIZE(sdp->srcu_unlock_count));
 144        level = rcu_num_lvls - 1;
 145        snp_first = ssp->level[level];
 146        for_each_possible_cpu(cpu) {
 147                sdp = per_cpu_ptr(ssp->sda, cpu);
 148                spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
 149                rcu_segcblist_init(&sdp->srcu_cblist);
 150                sdp->srcu_cblist_invoking = false;
 151                sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
 152                sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
 153                sdp->mynode = &snp_first[cpu / levelspread[level]];
 154                for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
 155                        if (snp->grplo < 0)
 156                                snp->grplo = cpu;
 157                        snp->grphi = cpu;
 158                }
 159                sdp->cpu = cpu;
 160                INIT_WORK(&sdp->work, srcu_invoke_callbacks);
 161                timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
 162                sdp->ssp = ssp;
 163                sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
 164                if (is_static)
 165                        continue;
 166
 167                /* Dynamically allocated, better be no srcu_read_locks()! */
 168                for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) {
 169                        sdp->srcu_lock_count[i] = 0;
 170                        sdp->srcu_unlock_count[i] = 0;
 171                }
 172        }
 173}
 174
 175/*
 176 * Initialize non-compile-time initialized fields, including the
 177 * associated srcu_node and srcu_data structures.  The is_static
 178 * parameter is passed through to init_srcu_struct_nodes(), and
 179 * also tells us that ->sda has already been wired up to srcu_data.
 180 */
 181static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
 182{
 183        mutex_init(&ssp->srcu_cb_mutex);
 184        mutex_init(&ssp->srcu_gp_mutex);
 185        ssp->srcu_idx = 0;
 186        ssp->srcu_gp_seq = 0;
 187        ssp->srcu_barrier_seq = 0;
 188        mutex_init(&ssp->srcu_barrier_mutex);
 189        atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
 190        INIT_DELAYED_WORK(&ssp->work, process_srcu);
 191        if (!is_static)
 192                ssp->sda = alloc_percpu(struct srcu_data);
 193        init_srcu_struct_nodes(ssp, is_static);
 194        ssp->srcu_gp_seq_needed_exp = 0;
 195        ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
 196        smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */
 197        return ssp->sda ? 0 : -ENOMEM;
 198}
 199
 200#ifdef CONFIG_DEBUG_LOCK_ALLOC
 201
 202int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
 203                       struct lock_class_key *key)
 204{
 205        /* Don't re-initialize a lock while it is held. */
 206        debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
 207        lockdep_init_map(&ssp->dep_map, name, key, 0);
 208        spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
 209        return init_srcu_struct_fields(ssp, false);
 210}
 211EXPORT_SYMBOL_GPL(__init_srcu_struct);
 212
 213#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 214
 215/**
 216 * init_srcu_struct - initialize a sleep-RCU structure
 217 * @ssp: structure to initialize.
 218 *
 219 * Must invoke this on a given srcu_struct before passing that srcu_struct
 220 * to any other function.  Each srcu_struct represents a separate domain
 221 * of SRCU protection.
 222 */
 223int init_srcu_struct(struct srcu_struct *ssp)
 224{
 225        spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
 226        return init_srcu_struct_fields(ssp, false);
 227}
 228EXPORT_SYMBOL_GPL(init_srcu_struct);
 229
 230#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 231
 232/*
 233 * First-use initialization of statically allocated srcu_struct
 234 * structure.  Wiring up the combining tree is more than can be
 235 * done with compile-time initialization, so this check is added
 236 * to each update-side SRCU primitive.  Use ssp->lock, which -is-
 237 * compile-time initialized, to resolve races involving multiple
 238 * CPUs trying to garner first-use privileges.
 239 */
 240static void check_init_srcu_struct(struct srcu_struct *ssp)
 241{
 242        unsigned long flags;
 243
 244        /* The smp_load_acquire() pairs with the smp_store_release(). */
 245        if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/
 246                return; /* Already initialized. */
 247        spin_lock_irqsave_rcu_node(ssp, flags);
 248        if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) {
 249                spin_unlock_irqrestore_rcu_node(ssp, flags);
 250                return;
 251        }
 252        init_srcu_struct_fields(ssp, true);
 253        spin_unlock_irqrestore_rcu_node(ssp, flags);
 254}
 255
 256/*
 257 * Returns approximate total of the readers' ->srcu_lock_count[] values
 258 * for the rank of per-CPU counters specified by idx.
 259 */
 260static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
 261{
 262        int cpu;
 263        unsigned long sum = 0;
 264
 265        for_each_possible_cpu(cpu) {
 266                struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
 267
 268                sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
 269        }
 270        return sum;
 271}
 272
 273/*
 274 * Returns approximate total of the readers' ->srcu_unlock_count[] values
 275 * for the rank of per-CPU counters specified by idx.
 276 */
 277static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
 278{
 279        int cpu;
 280        unsigned long sum = 0;
 281
 282        for_each_possible_cpu(cpu) {
 283                struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
 284
 285                sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
 286        }
 287        return sum;
 288}
 289
 290/*
 291 * Return true if the number of pre-existing readers is determined to
 292 * be zero.
 293 */
 294static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
 295{
 296        unsigned long unlocks;
 297
 298        unlocks = srcu_readers_unlock_idx(ssp, idx);
 299
 300        /*
 301         * Make sure that a lock is always counted if the corresponding
 302         * unlock is counted. Needs to be a smp_mb() as the read side may
 303         * contain a read from a variable that is written to before the
 304         * synchronize_srcu() in the write side. In this case smp_mb()s
 305         * A and B act like the store buffering pattern.
 306         *
 307         * This smp_mb() also pairs with smp_mb() C to prevent accesses
 308         * after the synchronize_srcu() from being executed before the
 309         * grace period ends.
 310         */
 311        smp_mb(); /* A */
 312
 313        /*
 314         * If the locks are the same as the unlocks, then there must have
 315         * been no readers on this index at some time in between. This does
 316         * not mean that there are no more readers, as one could have read
 317         * the current index but not have incremented the lock counter yet.
 318         *
 319         * So suppose that the updater is preempted here for so long
 320         * that more than ULONG_MAX non-nested readers come and go in
 321         * the meantime.  It turns out that this cannot result in overflow
 322         * because if a reader modifies its unlock count after we read it
 323         * above, then that reader's next load of ->srcu_idx is guaranteed
 324         * to get the new value, which will cause it to operate on the
 325         * other bank of counters, where it cannot contribute to the
 326         * overflow of these counters.  This means that there is a maximum
 327         * of 2*NR_CPUS increments, which cannot overflow given current
 328         * systems, especially not on 64-bit systems.
 329         *
 330         * OK, how about nesting?  This does impose a limit on nesting
 331         * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
 332         * especially on 64-bit systems.
 333         */
 334        return srcu_readers_lock_idx(ssp, idx) == unlocks;
 335}
 336
 337/**
 338 * srcu_readers_active - returns true if there are readers. and false
 339 *                       otherwise
 340 * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
 341 *
 342 * Note that this is not an atomic primitive, and can therefore suffer
 343 * severe errors when invoked on an active srcu_struct.  That said, it
 344 * can be useful as an error check at cleanup time.
 345 */
 346static bool srcu_readers_active(struct srcu_struct *ssp)
 347{
 348        int cpu;
 349        unsigned long sum = 0;
 350
 351        for_each_possible_cpu(cpu) {
 352                struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
 353
 354                sum += READ_ONCE(cpuc->srcu_lock_count[0]);
 355                sum += READ_ONCE(cpuc->srcu_lock_count[1]);
 356                sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
 357                sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
 358        }
 359        return sum;
 360}
 361
 362#define SRCU_INTERVAL           1
 363
 364/*
 365 * Return grace-period delay, zero if there are expedited grace
 366 * periods pending, SRCU_INTERVAL otherwise.
 367 */
 368static unsigned long srcu_get_delay(struct srcu_struct *ssp)
 369{
 370        if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq),
 371                         READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
 372                return 0;
 373        return SRCU_INTERVAL;
 374}
 375
 376/**
 377 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
 378 * @ssp: structure to clean up.
 379 *
 380 * Must invoke this after you are finished using a given srcu_struct that
 381 * was initialized via init_srcu_struct(), else you leak memory.
 382 */
 383void cleanup_srcu_struct(struct srcu_struct *ssp)
 384{
 385        int cpu;
 386
 387        if (WARN_ON(!srcu_get_delay(ssp)))
 388                return; /* Just leak it! */
 389        if (WARN_ON(srcu_readers_active(ssp)))
 390                return; /* Just leak it! */
 391        flush_delayed_work(&ssp->work);
 392        for_each_possible_cpu(cpu) {
 393                struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
 394
 395                del_timer_sync(&sdp->delay_work);
 396                flush_work(&sdp->work);
 397                if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
 398                        return; /* Forgot srcu_barrier(), so just leak it! */
 399        }
 400        if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
 401            WARN_ON(srcu_readers_active(ssp))) {
 402                pr_info("%s: Active srcu_struct %p state: %d\n",
 403                        __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)));
 404                return; /* Caller forgot to stop doing call_srcu()? */
 405        }
 406        free_percpu(ssp->sda);
 407        ssp->sda = NULL;
 408}
 409EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
 410
 411/*
 412 * Counts the new reader in the appropriate per-CPU element of the
 413 * srcu_struct.
 414 * Returns an index that must be passed to the matching srcu_read_unlock().
 415 */
 416int __srcu_read_lock(struct srcu_struct *ssp)
 417{
 418        int idx;
 419
 420        idx = READ_ONCE(ssp->srcu_idx) & 0x1;
 421        this_cpu_inc(ssp->sda->srcu_lock_count[idx]);
 422        smp_mb(); /* B */  /* Avoid leaking the critical section. */
 423        return idx;
 424}
 425EXPORT_SYMBOL_GPL(__srcu_read_lock);
 426
 427/*
 428 * Removes the count for the old reader from the appropriate per-CPU
 429 * element of the srcu_struct.  Note that this may well be a different
 430 * CPU than that which was incremented by the corresponding srcu_read_lock().
 431 */
 432void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
 433{
 434        smp_mb(); /* C */  /* Avoid leaking the critical section. */
 435        this_cpu_inc(ssp->sda->srcu_unlock_count[idx]);
 436}
 437EXPORT_SYMBOL_GPL(__srcu_read_unlock);
 438
 439/*
 440 * We use an adaptive strategy for synchronize_srcu() and especially for
 441 * synchronize_srcu_expedited().  We spin for a fixed time period
 442 * (defined below) to allow SRCU readers to exit their read-side critical
 443 * sections.  If there are still some readers after a few microseconds,
 444 * we repeatedly block for 1-millisecond time periods.
 445 */
 446#define SRCU_RETRY_CHECK_DELAY          5
 447
 448/*
 449 * Start an SRCU grace period.
 450 */
 451static void srcu_gp_start(struct srcu_struct *ssp)
 452{
 453        struct srcu_data *sdp = this_cpu_ptr(ssp->sda);
 454        int state;
 455
 456        lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
 457        WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
 458        spin_lock_rcu_node(sdp);  /* Interrupts already disabled. */
 459        rcu_segcblist_advance(&sdp->srcu_cblist,
 460                              rcu_seq_current(&ssp->srcu_gp_seq));
 461        (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
 462                                       rcu_seq_snap(&ssp->srcu_gp_seq));
 463        spin_unlock_rcu_node(sdp);  /* Interrupts remain disabled. */
 464        smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
 465        rcu_seq_start(&ssp->srcu_gp_seq);
 466        state = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
 467        WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
 468}
 469
 470
 471static void srcu_delay_timer(struct timer_list *t)
 472{
 473        struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
 474
 475        queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
 476}
 477
 478static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
 479                                       unsigned long delay)
 480{
 481        if (!delay) {
 482                queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
 483                return;
 484        }
 485
 486        timer_reduce(&sdp->delay_work, jiffies + delay);
 487}
 488
 489/*
 490 * Schedule callback invocation for the specified srcu_data structure,
 491 * if possible, on the corresponding CPU.
 492 */
 493static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
 494{
 495        srcu_queue_delayed_work_on(sdp, delay);
 496}
 497
 498/*
 499 * Schedule callback invocation for all srcu_data structures associated
 500 * with the specified srcu_node structure that have callbacks for the
 501 * just-completed grace period, the one corresponding to idx.  If possible,
 502 * schedule this invocation on the corresponding CPUs.
 503 */
 504static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
 505                                  unsigned long mask, unsigned long delay)
 506{
 507        int cpu;
 508
 509        for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
 510                if (!(mask & (1 << (cpu - snp->grplo))))
 511                        continue;
 512                srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
 513        }
 514}
 515
 516/*
 517 * Note the end of an SRCU grace period.  Initiates callback invocation
 518 * and starts a new grace period if needed.
 519 *
 520 * The ->srcu_cb_mutex acquisition does not protect any data, but
 521 * instead prevents more than one grace period from starting while we
 522 * are initiating callback invocation.  This allows the ->srcu_have_cbs[]
 523 * array to have a finite number of elements.
 524 */
 525static void srcu_gp_end(struct srcu_struct *ssp)
 526{
 527        unsigned long cbdelay;
 528        bool cbs;
 529        bool last_lvl;
 530        int cpu;
 531        unsigned long flags;
 532        unsigned long gpseq;
 533        int idx;
 534        unsigned long mask;
 535        struct srcu_data *sdp;
 536        struct srcu_node *snp;
 537
 538        /* Prevent more than one additional grace period. */
 539        mutex_lock(&ssp->srcu_cb_mutex);
 540
 541        /* End the current grace period. */
 542        spin_lock_irq_rcu_node(ssp);
 543        idx = rcu_seq_state(ssp->srcu_gp_seq);
 544        WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
 545        cbdelay = srcu_get_delay(ssp);
 546        ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
 547        rcu_seq_end(&ssp->srcu_gp_seq);
 548        gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
 549        if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
 550                ssp->srcu_gp_seq_needed_exp = gpseq;
 551        spin_unlock_irq_rcu_node(ssp);
 552        mutex_unlock(&ssp->srcu_gp_mutex);
 553        /* A new grace period can start at this point.  But only one. */
 554
 555        /* Initiate callback invocation as needed. */
 556        idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
 557        srcu_for_each_node_breadth_first(ssp, snp) {
 558                spin_lock_irq_rcu_node(snp);
 559                cbs = false;
 560                last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
 561                if (last_lvl)
 562                        cbs = snp->srcu_have_cbs[idx] == gpseq;
 563                snp->srcu_have_cbs[idx] = gpseq;
 564                rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
 565                if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq))
 566                        snp->srcu_gp_seq_needed_exp = gpseq;
 567                mask = snp->srcu_data_have_cbs[idx];
 568                snp->srcu_data_have_cbs[idx] = 0;
 569                spin_unlock_irq_rcu_node(snp);
 570                if (cbs)
 571                        srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
 572
 573                /* Occasionally prevent srcu_data counter wrap. */
 574                if (!(gpseq & counter_wrap_check) && last_lvl)
 575                        for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
 576                                sdp = per_cpu_ptr(ssp->sda, cpu);
 577                                spin_lock_irqsave_rcu_node(sdp, flags);
 578                                if (ULONG_CMP_GE(gpseq,
 579                                                 sdp->srcu_gp_seq_needed + 100))
 580                                        sdp->srcu_gp_seq_needed = gpseq;
 581                                if (ULONG_CMP_GE(gpseq,
 582                                                 sdp->srcu_gp_seq_needed_exp + 100))
 583                                        sdp->srcu_gp_seq_needed_exp = gpseq;
 584                                spin_unlock_irqrestore_rcu_node(sdp, flags);
 585                        }
 586        }
 587
 588        /* Callback initiation done, allow grace periods after next. */
 589        mutex_unlock(&ssp->srcu_cb_mutex);
 590
 591        /* Start a new grace period if needed. */
 592        spin_lock_irq_rcu_node(ssp);
 593        gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
 594        if (!rcu_seq_state(gpseq) &&
 595            ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) {
 596                srcu_gp_start(ssp);
 597                spin_unlock_irq_rcu_node(ssp);
 598                srcu_reschedule(ssp, 0);
 599        } else {
 600                spin_unlock_irq_rcu_node(ssp);
 601        }
 602}
 603
 604/*
 605 * Funnel-locking scheme to scalably mediate many concurrent expedited
 606 * grace-period requests.  This function is invoked for the first known
 607 * expedited request for a grace period that has already been requested,
 608 * but without expediting.  To start a completely new grace period,
 609 * whether expedited or not, use srcu_funnel_gp_start() instead.
 610 */
 611static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
 612                                  unsigned long s)
 613{
 614        unsigned long flags;
 615
 616        for (; snp != NULL; snp = snp->srcu_parent) {
 617                if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||
 618                    ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
 619                        return;
 620                spin_lock_irqsave_rcu_node(snp, flags);
 621                if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
 622                        spin_unlock_irqrestore_rcu_node(snp, flags);
 623                        return;
 624                }
 625                WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
 626                spin_unlock_irqrestore_rcu_node(snp, flags);
 627        }
 628        spin_lock_irqsave_rcu_node(ssp, flags);
 629        if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
 630                ssp->srcu_gp_seq_needed_exp = s;
 631        spin_unlock_irqrestore_rcu_node(ssp, flags);
 632}
 633
 634/*
 635 * Funnel-locking scheme to scalably mediate many concurrent grace-period
 636 * requests.  The winner has to do the work of actually starting grace
 637 * period s.  Losers must either ensure that their desired grace-period
 638 * number is recorded on at least their leaf srcu_node structure, or they
 639 * must take steps to invoke their own callbacks.
 640 *
 641 * Note that this function also does the work of srcu_funnel_exp_start(),
 642 * in some cases by directly invoking it.
 643 */
 644static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
 645                                 unsigned long s, bool do_norm)
 646{
 647        unsigned long flags;
 648        int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
 649        struct srcu_node *snp = sdp->mynode;
 650        unsigned long snp_seq;
 651
 652        /* Each pass through the loop does one level of the srcu_node tree. */
 653        for (; snp != NULL; snp = snp->srcu_parent) {
 654                if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode)
 655                        return; /* GP already done and CBs recorded. */
 656                spin_lock_irqsave_rcu_node(snp, flags);
 657                if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
 658                        snp_seq = snp->srcu_have_cbs[idx];
 659                        if (snp == sdp->mynode && snp_seq == s)
 660                                snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
 661                        spin_unlock_irqrestore_rcu_node(snp, flags);
 662                        if (snp == sdp->mynode && snp_seq != s) {
 663                                srcu_schedule_cbs_sdp(sdp, do_norm
 664                                                           ? SRCU_INTERVAL
 665                                                           : 0);
 666                                return;
 667                        }
 668                        if (!do_norm)
 669                                srcu_funnel_exp_start(ssp, snp, s);
 670                        return;
 671                }
 672                snp->srcu_have_cbs[idx] = s;
 673                if (snp == sdp->mynode)
 674                        snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
 675                if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
 676                        snp->srcu_gp_seq_needed_exp = s;
 677                spin_unlock_irqrestore_rcu_node(snp, flags);
 678        }
 679
 680        /* Top of tree, must ensure the grace period will be started. */
 681        spin_lock_irqsave_rcu_node(ssp, flags);
 682        if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {
 683                /*
 684                 * Record need for grace period s.  Pair with load
 685                 * acquire setting up for initialization.
 686                 */
 687                smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/
 688        }
 689        if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
 690                ssp->srcu_gp_seq_needed_exp = s;
 691
 692        /* If grace period not already done and none in progress, start it. */
 693        if (!rcu_seq_done(&ssp->srcu_gp_seq, s) &&
 694            rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {
 695                WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
 696                srcu_gp_start(ssp);
 697                if (likely(srcu_init_done))
 698                        queue_delayed_work(rcu_gp_wq, &ssp->work,
 699                                           srcu_get_delay(ssp));
 700                else if (list_empty(&ssp->work.work.entry))
 701                        list_add(&ssp->work.work.entry, &srcu_boot_list);
 702        }
 703        spin_unlock_irqrestore_rcu_node(ssp, flags);
 704}
 705
 706/*
 707 * Wait until all readers counted by array index idx complete, but
 708 * loop an additional time if there is an expedited grace period pending.
 709 * The caller must ensure that ->srcu_idx is not changed while checking.
 710 */
 711static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
 712{
 713        for (;;) {
 714                if (srcu_readers_active_idx_check(ssp, idx))
 715                        return true;
 716                if (--trycount + !srcu_get_delay(ssp) <= 0)
 717                        return false;
 718                udelay(SRCU_RETRY_CHECK_DELAY);
 719        }
 720}
 721
 722/*
 723 * Increment the ->srcu_idx counter so that future SRCU readers will
 724 * use the other rank of the ->srcu_(un)lock_count[] arrays.  This allows
 725 * us to wait for pre-existing readers in a starvation-free manner.
 726 */
 727static void srcu_flip(struct srcu_struct *ssp)
 728{
 729        /*
 730         * Ensure that if this updater saw a given reader's increment
 731         * from __srcu_read_lock(), that reader was using an old value
 732         * of ->srcu_idx.  Also ensure that if a given reader sees the
 733         * new value of ->srcu_idx, this updater's earlier scans cannot
 734         * have seen that reader's increments (which is OK, because this
 735         * grace period need not wait on that reader).
 736         */
 737        smp_mb(); /* E */  /* Pairs with B and C. */
 738
 739        WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
 740
 741        /*
 742         * Ensure that if the updater misses an __srcu_read_unlock()
 743         * increment, that task's next __srcu_read_lock() will see the
 744         * above counter update.  Note that both this memory barrier
 745         * and the one in srcu_readers_active_idx_check() provide the
 746         * guarantee for __srcu_read_lock().
 747         */
 748        smp_mb(); /* D */  /* Pairs with C. */
 749}
 750
 751/*
 752 * If SRCU is likely idle, return true, otherwise return false.
 753 *
 754 * Note that it is OK for several current from-idle requests for a new
 755 * grace period from idle to specify expediting because they will all end
 756 * up requesting the same grace period anyhow.  So no loss.
 757 *
 758 * Note also that if any CPU (including the current one) is still invoking
 759 * callbacks, this function will nevertheless say "idle".  This is not
 760 * ideal, but the overhead of checking all CPUs' callback lists is even
 761 * less ideal, especially on large systems.  Furthermore, the wakeup
 762 * can happen before the callback is fully removed, so we have no choice
 763 * but to accept this type of error.
 764 *
 765 * This function is also subject to counter-wrap errors, but let's face
 766 * it, if this function was preempted for enough time for the counters
 767 * to wrap, it really doesn't matter whether or not we expedite the grace
 768 * period.  The extra overhead of a needlessly expedited grace period is
 769 * negligible when amoritized over that time period, and the extra latency
 770 * of a needlessly non-expedited grace period is similarly negligible.
 771 */
 772static bool srcu_might_be_idle(struct srcu_struct *ssp)
 773{
 774        unsigned long curseq;
 775        unsigned long flags;
 776        struct srcu_data *sdp;
 777        unsigned long t;
 778
 779        /* If the local srcu_data structure has callbacks, not idle.  */
 780        local_irq_save(flags);
 781        sdp = this_cpu_ptr(ssp->sda);
 782        if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
 783                local_irq_restore(flags);
 784                return false; /* Callbacks already present, so not idle. */
 785        }
 786        local_irq_restore(flags);
 787
 788        /*
 789         * No local callbacks, so probabalistically probe global state.
 790         * Exact information would require acquiring locks, which would
 791         * kill scalability, hence the probabalistic nature of the probe.
 792         */
 793
 794        /* First, see if enough time has passed since the last GP. */
 795        t = ktime_get_mono_fast_ns();
 796        if (exp_holdoff == 0 ||
 797            time_in_range_open(t, ssp->srcu_last_gp_end,
 798                               ssp->srcu_last_gp_end + exp_holdoff))
 799                return false; /* Too soon after last GP. */
 800
 801        /* Next, check for probable idleness. */
 802        curseq = rcu_seq_current(&ssp->srcu_gp_seq);
 803        smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
 804        if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed)))
 805                return false; /* Grace period in progress, so not idle. */
 806        smp_mb(); /* Order ->srcu_gp_seq with prior access. */
 807        if (curseq != rcu_seq_current(&ssp->srcu_gp_seq))
 808                return false; /* GP # changed, so not idle. */
 809        return true; /* With reasonable probability, idle! */
 810}
 811
 812/*
 813 * SRCU callback function to leak a callback.
 814 */
 815static void srcu_leak_callback(struct rcu_head *rhp)
 816{
 817}
 818
 819/*
 820 * Enqueue an SRCU callback on the srcu_data structure associated with
 821 * the current CPU and the specified srcu_struct structure, initiating
 822 * grace-period processing if it is not already running.
 823 *
 824 * Note that all CPUs must agree that the grace period extended beyond
 825 * all pre-existing SRCU read-side critical section.  On systems with
 826 * more than one CPU, this means that when "func()" is invoked, each CPU
 827 * is guaranteed to have executed a full memory barrier since the end of
 828 * its last corresponding SRCU read-side critical section whose beginning
 829 * preceded the call to call_srcu().  It also means that each CPU executing
 830 * an SRCU read-side critical section that continues beyond the start of
 831 * "func()" must have executed a memory barrier after the call_srcu()
 832 * but before the beginning of that SRCU read-side critical section.
 833 * Note that these guarantees include CPUs that are offline, idle, or
 834 * executing in user mode, as well as CPUs that are executing in the kernel.
 835 *
 836 * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
 837 * resulting SRCU callback function "func()", then both CPU A and CPU
 838 * B are guaranteed to execute a full memory barrier during the time
 839 * interval between the call to call_srcu() and the invocation of "func()".
 840 * This guarantee applies even if CPU A and CPU B are the same CPU (but
 841 * again only if the system has more than one CPU).
 842 *
 843 * Of course, these guarantees apply only for invocations of call_srcu(),
 844 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
 845 * srcu_struct structure.
 846 */
 847static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
 848                        rcu_callback_t func, bool do_norm)
 849{
 850        unsigned long flags;
 851        int idx;
 852        bool needexp = false;
 853        bool needgp = false;
 854        unsigned long s;
 855        struct srcu_data *sdp;
 856
 857        check_init_srcu_struct(ssp);
 858        if (debug_rcu_head_queue(rhp)) {
 859                /* Probable double call_srcu(), so leak the callback. */
 860                WRITE_ONCE(rhp->func, srcu_leak_callback);
 861                WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
 862                return;
 863        }
 864        rhp->func = func;
 865        idx = srcu_read_lock(ssp);
 866        local_irq_save(flags);
 867        sdp = this_cpu_ptr(ssp->sda);
 868        spin_lock_rcu_node(sdp);
 869        rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
 870        rcu_segcblist_advance(&sdp->srcu_cblist,
 871                              rcu_seq_current(&ssp->srcu_gp_seq));
 872        s = rcu_seq_snap(&ssp->srcu_gp_seq);
 873        (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
 874        if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
 875                sdp->srcu_gp_seq_needed = s;
 876                needgp = true;
 877        }
 878        if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
 879                sdp->srcu_gp_seq_needed_exp = s;
 880                needexp = true;
 881        }
 882        spin_unlock_irqrestore_rcu_node(sdp, flags);
 883        if (needgp)
 884                srcu_funnel_gp_start(ssp, sdp, s, do_norm);
 885        else if (needexp)
 886                srcu_funnel_exp_start(ssp, sdp->mynode, s);
 887        srcu_read_unlock(ssp, idx);
 888}
 889
 890/**
 891 * call_srcu() - Queue a callback for invocation after an SRCU grace period
 892 * @ssp: srcu_struct in queue the callback
 893 * @rhp: structure to be used for queueing the SRCU callback.
 894 * @func: function to be invoked after the SRCU grace period
 895 *
 896 * The callback function will be invoked some time after a full SRCU
 897 * grace period elapses, in other words after all pre-existing SRCU
 898 * read-side critical sections have completed.  However, the callback
 899 * function might well execute concurrently with other SRCU read-side
 900 * critical sections that started after call_srcu() was invoked.  SRCU
 901 * read-side critical sections are delimited by srcu_read_lock() and
 902 * srcu_read_unlock(), and may be nested.
 903 *
 904 * The callback will be invoked from process context, but must nevertheless
 905 * be fast and must not block.
 906 */
 907void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
 908               rcu_callback_t func)
 909{
 910        __call_srcu(ssp, rhp, func, true);
 911}
 912EXPORT_SYMBOL_GPL(call_srcu);
 913
 914/*
 915 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
 916 */
 917static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
 918{
 919        struct rcu_synchronize rcu;
 920
 921        RCU_LOCKDEP_WARN(lock_is_held(&ssp->dep_map) ||
 922                         lock_is_held(&rcu_bh_lock_map) ||
 923                         lock_is_held(&rcu_lock_map) ||
 924                         lock_is_held(&rcu_sched_lock_map),
 925                         "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
 926
 927        if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
 928                return;
 929        might_sleep();
 930        check_init_srcu_struct(ssp);
 931        init_completion(&rcu.completion);
 932        init_rcu_head_on_stack(&rcu.head);
 933        __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
 934        wait_for_completion(&rcu.completion);
 935        destroy_rcu_head_on_stack(&rcu.head);
 936
 937        /*
 938         * Make sure that later code is ordered after the SRCU grace
 939         * period.  This pairs with the spin_lock_irq_rcu_node()
 940         * in srcu_invoke_callbacks().  Unlike Tree RCU, this is needed
 941         * because the current CPU might have been totally uninvolved with
 942         * (and thus unordered against) that grace period.
 943         */
 944        smp_mb();
 945}
 946
 947/**
 948 * synchronize_srcu_expedited - Brute-force SRCU grace period
 949 * @ssp: srcu_struct with which to synchronize.
 950 *
 951 * Wait for an SRCU grace period to elapse, but be more aggressive about
 952 * spinning rather than blocking when waiting.
 953 *
 954 * Note that synchronize_srcu_expedited() has the same deadlock and
 955 * memory-ordering properties as does synchronize_srcu().
 956 */
 957void synchronize_srcu_expedited(struct srcu_struct *ssp)
 958{
 959        __synchronize_srcu(ssp, rcu_gp_is_normal());
 960}
 961EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
 962
 963/**
 964 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
 965 * @ssp: srcu_struct with which to synchronize.
 966 *
 967 * Wait for the count to drain to zero of both indexes. To avoid the
 968 * possible starvation of synchronize_srcu(), it waits for the count of
 969 * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
 970 * and then flip the srcu_idx and wait for the count of the other index.
 971 *
 972 * Can block; must be called from process context.
 973 *
 974 * Note that it is illegal to call synchronize_srcu() from the corresponding
 975 * SRCU read-side critical section; doing so will result in deadlock.
 976 * However, it is perfectly legal to call synchronize_srcu() on one
 977 * srcu_struct from some other srcu_struct's read-side critical section,
 978 * as long as the resulting graph of srcu_structs is acyclic.
 979 *
 980 * There are memory-ordering constraints implied by synchronize_srcu().
 981 * On systems with more than one CPU, when synchronize_srcu() returns,
 982 * each CPU is guaranteed to have executed a full memory barrier since
 983 * the end of its last corresponding SRCU read-side critical section
 984 * whose beginning preceded the call to synchronize_srcu().  In addition,
 985 * each CPU having an SRCU read-side critical section that extends beyond
 986 * the return from synchronize_srcu() is guaranteed to have executed a
 987 * full memory barrier after the beginning of synchronize_srcu() and before
 988 * the beginning of that SRCU read-side critical section.  Note that these
 989 * guarantees include CPUs that are offline, idle, or executing in user mode,
 990 * as well as CPUs that are executing in the kernel.
 991 *
 992 * Furthermore, if CPU A invoked synchronize_srcu(), which returned
 993 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
 994 * to have executed a full memory barrier during the execution of
 995 * synchronize_srcu().  This guarantee applies even if CPU A and CPU B
 996 * are the same CPU, but again only if the system has more than one CPU.
 997 *
 998 * Of course, these memory-ordering guarantees apply only when
 999 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
1000 * passed the same srcu_struct structure.
1001 *
1002 * If SRCU is likely idle, expedite the first request.  This semantic
1003 * was provided by Classic SRCU, and is relied upon by its users, so TREE
1004 * SRCU must also provide it.  Note that detecting idleness is heuristic
1005 * and subject to both false positives and negatives.
1006 */
1007void synchronize_srcu(struct srcu_struct *ssp)
1008{
1009        if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
1010                synchronize_srcu_expedited(ssp);
1011        else
1012                __synchronize_srcu(ssp, true);
1013}
1014EXPORT_SYMBOL_GPL(synchronize_srcu);
1015
1016/*
1017 * Callback function for srcu_barrier() use.
1018 */
1019static void srcu_barrier_cb(struct rcu_head *rhp)
1020{
1021        struct srcu_data *sdp;
1022        struct srcu_struct *ssp;
1023
1024        sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
1025        ssp = sdp->ssp;
1026        if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1027                complete(&ssp->srcu_barrier_completion);
1028}
1029
1030/**
1031 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
1032 * @ssp: srcu_struct on which to wait for in-flight callbacks.
1033 */
1034void srcu_barrier(struct srcu_struct *ssp)
1035{
1036        int cpu;
1037        struct srcu_data *sdp;
1038        unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
1039
1040        check_init_srcu_struct(ssp);
1041        mutex_lock(&ssp->srcu_barrier_mutex);
1042        if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) {
1043                smp_mb(); /* Force ordering following return. */
1044                mutex_unlock(&ssp->srcu_barrier_mutex);
1045                return; /* Someone else did our work for us. */
1046        }
1047        rcu_seq_start(&ssp->srcu_barrier_seq);
1048        init_completion(&ssp->srcu_barrier_completion);
1049
1050        /* Initial count prevents reaching zero until all CBs are posted. */
1051        atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
1052
1053        /*
1054         * Each pass through this loop enqueues a callback, but only
1055         * on CPUs already having callbacks enqueued.  Note that if
1056         * a CPU already has callbacks enqueue, it must have already
1057         * registered the need for a future grace period, so all we
1058         * need do is enqueue a callback that will use the same
1059         * grace period as the last callback already in the queue.
1060         */
1061        for_each_possible_cpu(cpu) {
1062                sdp = per_cpu_ptr(ssp->sda, cpu);
1063                spin_lock_irq_rcu_node(sdp);
1064                atomic_inc(&ssp->srcu_barrier_cpu_cnt);
1065                sdp->srcu_barrier_head.func = srcu_barrier_cb;
1066                debug_rcu_head_queue(&sdp->srcu_barrier_head);
1067                if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1068                                           &sdp->srcu_barrier_head, 0)) {
1069                        debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1070                        atomic_dec(&ssp->srcu_barrier_cpu_cnt);
1071                }
1072                spin_unlock_irq_rcu_node(sdp);
1073        }
1074
1075        /* Remove the initial count, at which point reaching zero can happen. */
1076        if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1077                complete(&ssp->srcu_barrier_completion);
1078        wait_for_completion(&ssp->srcu_barrier_completion);
1079
1080        rcu_seq_end(&ssp->srcu_barrier_seq);
1081        mutex_unlock(&ssp->srcu_barrier_mutex);
1082}
1083EXPORT_SYMBOL_GPL(srcu_barrier);
1084
1085/**
1086 * srcu_batches_completed - return batches completed.
1087 * @ssp: srcu_struct on which to report batch completion.
1088 *
1089 * Report the number of batches, correlated with, but not necessarily
1090 * precisely the same as, the number of grace periods that have elapsed.
1091 */
1092unsigned long srcu_batches_completed(struct srcu_struct *ssp)
1093{
1094        return ssp->srcu_idx;
1095}
1096EXPORT_SYMBOL_GPL(srcu_batches_completed);
1097
1098/*
1099 * Core SRCU state machine.  Push state bits of ->srcu_gp_seq
1100 * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
1101 * completed in that state.
1102 */
1103static void srcu_advance_state(struct srcu_struct *ssp)
1104{
1105        int idx;
1106
1107        mutex_lock(&ssp->srcu_gp_mutex);
1108
1109        /*
1110         * Because readers might be delayed for an extended period after
1111         * fetching ->srcu_idx for their index, at any point in time there
1112         * might well be readers using both idx=0 and idx=1.  We therefore
1113         * need to wait for readers to clear from both index values before
1114         * invoking a callback.
1115         *
1116         * The load-acquire ensures that we see the accesses performed
1117         * by the prior grace period.
1118         */
1119        idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */
1120        if (idx == SRCU_STATE_IDLE) {
1121                spin_lock_irq_rcu_node(ssp);
1122                if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1123                        WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));
1124                        spin_unlock_irq_rcu_node(ssp);
1125                        mutex_unlock(&ssp->srcu_gp_mutex);
1126                        return;
1127                }
1128                idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
1129                if (idx == SRCU_STATE_IDLE)
1130                        srcu_gp_start(ssp);
1131                spin_unlock_irq_rcu_node(ssp);
1132                if (idx != SRCU_STATE_IDLE) {
1133                        mutex_unlock(&ssp->srcu_gp_mutex);
1134                        return; /* Someone else started the grace period. */
1135                }
1136        }
1137
1138        if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1139                idx = 1 ^ (ssp->srcu_idx & 1);
1140                if (!try_check_zero(ssp, idx, 1)) {
1141                        mutex_unlock(&ssp->srcu_gp_mutex);
1142                        return; /* readers present, retry later. */
1143                }
1144                srcu_flip(ssp);
1145                rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);
1146        }
1147
1148        if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1149
1150                /*
1151                 * SRCU read-side critical sections are normally short,
1152                 * so check at least twice in quick succession after a flip.
1153                 */
1154                idx = 1 ^ (ssp->srcu_idx & 1);
1155                if (!try_check_zero(ssp, idx, 2)) {
1156                        mutex_unlock(&ssp->srcu_gp_mutex);
1157                        return; /* readers present, retry later. */
1158                }
1159                srcu_gp_end(ssp);  /* Releases ->srcu_gp_mutex. */
1160        }
1161}
1162
1163/*
1164 * Invoke a limited number of SRCU callbacks that have passed through
1165 * their grace period.  If there are more to do, SRCU will reschedule
1166 * the workqueue.  Note that needed memory barriers have been executed
1167 * in this task's context by srcu_readers_active_idx_check().
1168 */
1169static void srcu_invoke_callbacks(struct work_struct *work)
1170{
1171        bool more;
1172        struct rcu_cblist ready_cbs;
1173        struct rcu_head *rhp;
1174        struct srcu_data *sdp;
1175        struct srcu_struct *ssp;
1176
1177        sdp = container_of(work, struct srcu_data, work);
1178
1179        ssp = sdp->ssp;
1180        rcu_cblist_init(&ready_cbs);
1181        spin_lock_irq_rcu_node(sdp);
1182        rcu_segcblist_advance(&sdp->srcu_cblist,
1183                              rcu_seq_current(&ssp->srcu_gp_seq));
1184        if (sdp->srcu_cblist_invoking ||
1185            !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1186                spin_unlock_irq_rcu_node(sdp);
1187                return;  /* Someone else on the job or nothing to do. */
1188        }
1189
1190        /* We are on the job!  Extract and invoke ready callbacks. */
1191        sdp->srcu_cblist_invoking = true;
1192        rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1193        spin_unlock_irq_rcu_node(sdp);
1194        rhp = rcu_cblist_dequeue(&ready_cbs);
1195        for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1196                debug_rcu_head_unqueue(rhp);
1197                local_bh_disable();
1198                rhp->func(rhp);
1199                local_bh_enable();
1200        }
1201
1202        /*
1203         * Update counts, accelerate new callbacks, and if needed,
1204         * schedule another round of callback invocation.
1205         */
1206        spin_lock_irq_rcu_node(sdp);
1207        rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
1208        (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1209                                       rcu_seq_snap(&ssp->srcu_gp_seq));
1210        sdp->srcu_cblist_invoking = false;
1211        more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1212        spin_unlock_irq_rcu_node(sdp);
1213        if (more)
1214                srcu_schedule_cbs_sdp(sdp, 0);
1215}
1216
1217/*
1218 * Finished one round of SRCU grace period.  Start another if there are
1219 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1220 */
1221static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
1222{
1223        bool pushgp = true;
1224
1225        spin_lock_irq_rcu_node(ssp);
1226        if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1227                if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) {
1228                        /* All requests fulfilled, time to go idle. */
1229                        pushgp = false;
1230                }
1231        } else if (!rcu_seq_state(ssp->srcu_gp_seq)) {
1232                /* Outstanding request and no GP.  Start one. */
1233                srcu_gp_start(ssp);
1234        }
1235        spin_unlock_irq_rcu_node(ssp);
1236
1237        if (pushgp)
1238                queue_delayed_work(rcu_gp_wq, &ssp->work, delay);
1239}
1240
1241/*
1242 * This is the work-queue function that handles SRCU grace periods.
1243 */
1244static void process_srcu(struct work_struct *work)
1245{
1246        struct srcu_struct *ssp;
1247
1248        ssp = container_of(work, struct srcu_struct, work.work);
1249
1250        srcu_advance_state(ssp);
1251        srcu_reschedule(ssp, srcu_get_delay(ssp));
1252}
1253
1254void srcutorture_get_gp_data(enum rcutorture_type test_type,
1255                             struct srcu_struct *ssp, int *flags,
1256                             unsigned long *gp_seq)
1257{
1258        if (test_type != SRCU_FLAVOR)
1259                return;
1260        *flags = 0;
1261        *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq);
1262}
1263EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
1264
1265void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
1266{
1267        int cpu;
1268        int idx;
1269        unsigned long s0 = 0, s1 = 0;
1270
1271        idx = ssp->srcu_idx & 0x1;
1272        pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
1273                 tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx);
1274        for_each_possible_cpu(cpu) {
1275                unsigned long l0, l1;
1276                unsigned long u0, u1;
1277                long c0, c1;
1278                struct srcu_data *sdp;
1279
1280                sdp = per_cpu_ptr(ssp->sda, cpu);
1281                u0 = sdp->srcu_unlock_count[!idx];
1282                u1 = sdp->srcu_unlock_count[idx];
1283
1284                /*
1285                 * Make sure that a lock is always counted if the corresponding
1286                 * unlock is counted.
1287                 */
1288                smp_rmb();
1289
1290                l0 = sdp->srcu_lock_count[!idx];
1291                l1 = sdp->srcu_lock_count[idx];
1292
1293                c0 = l0 - u0;
1294                c1 = l1 - u1;
1295                pr_cont(" %d(%ld,%ld %1p)",
1296                        cpu, c0, c1, rcu_segcblist_head(&sdp->srcu_cblist));
1297                s0 += c0;
1298                s1 += c1;
1299        }
1300        pr_cont(" T(%ld,%ld)\n", s0, s1);
1301}
1302EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
1303
1304static int __init srcu_bootup_announce(void)
1305{
1306        pr_info("Hierarchical SRCU implementation.\n");
1307        if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
1308                pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
1309        return 0;
1310}
1311early_initcall(srcu_bootup_announce);
1312
1313void __init srcu_init(void)
1314{
1315        struct srcu_struct *ssp;
1316
1317        srcu_init_done = true;
1318        while (!list_empty(&srcu_boot_list)) {
1319                ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
1320                                      work.work.entry);
1321                check_init_srcu_struct(ssp);
1322                list_del_init(&ssp->work.work.entry);
1323                queue_work(rcu_gp_wq, &ssp->work.work);
1324        }
1325}
1326