linux/kernel/rcupdate.c
<<
>>
Prefs
   1/*
   2 * Read-Copy Update mechanism for mutual exclusion
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17 *
  18 * Copyright (C) IBM Corporation, 2001
  19 *
  20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
  21 *          Manfred Spraul <manfred@colorfullife.com>
  22 * 
  23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
  24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
  25 * Papers:
  26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
  27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
  28 *
  29 * For detailed explanation of Read-Copy Update mechanism see -
  30 *              http://lse.sourceforge.net/locking/rcupdate.html
  31 *
  32 */
  33#include <linux/types.h>
  34#include <linux/kernel.h>
  35#include <linux/init.h>
  36#include <linux/spinlock.h>
  37#include <linux/smp.h>
  38#include <linux/rcupdate.h>
  39#include <linux/interrupt.h>
  40#include <linux/sched.h>
  41#include <asm/atomic.h>
  42#include <linux/bitops.h>
  43#include <linux/module.h>
  44#include <linux/completion.h>
  45#include <linux/moduleparam.h>
  46#include <linux/percpu.h>
  47#include <linux/notifier.h>
  48#include <linux/cpu.h>
  49#include <linux/mutex.h>
  50
  51#ifdef CONFIG_DEBUG_LOCK_ALLOC
  52static struct lock_class_key rcu_lock_key;
  53struct lockdep_map rcu_lock_map =
  54        STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
  55
  56EXPORT_SYMBOL_GPL(rcu_lock_map);
  57#endif
  58
  59/* Definition for rcupdate control block. */
  60static struct rcu_ctrlblk rcu_ctrlblk = {
  61        .cur = -300,
  62        .completed = -300,
  63        .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
  64        .cpumask = CPU_MASK_NONE,
  65};
  66static struct rcu_ctrlblk rcu_bh_ctrlblk = {
  67        .cur = -300,
  68        .completed = -300,
  69        .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock),
  70        .cpumask = CPU_MASK_NONE,
  71};
  72
  73DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
  74DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
  75
  76/* Fake initialization required by compiler */
  77static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
  78static int blimit = 10;
  79static int qhimark = 10000;
  80static int qlowmark = 100;
  81
  82static atomic_t rcu_barrier_cpu_count;
  83static DEFINE_MUTEX(rcu_barrier_mutex);
  84static struct completion rcu_barrier_completion;
  85
  86#ifdef CONFIG_SMP
  87static void force_quiescent_state(struct rcu_data *rdp,
  88                        struct rcu_ctrlblk *rcp)
  89{
  90        int cpu;
  91        cpumask_t cpumask;
  92        set_need_resched();
  93        if (unlikely(!rcp->signaled)) {
  94                rcp->signaled = 1;
  95                /*
  96                 * Don't send IPI to itself. With irqs disabled,
  97                 * rdp->cpu is the current cpu.
  98                 */
  99                cpumask = rcp->cpumask;
 100                cpu_clear(rdp->cpu, cpumask);
 101                for_each_cpu_mask(cpu, cpumask)
 102                        smp_send_reschedule(cpu);
 103        }
 104}
 105#else
 106static inline void force_quiescent_state(struct rcu_data *rdp,
 107                        struct rcu_ctrlblk *rcp)
 108{
 109        set_need_resched();
 110}
 111#endif
 112
 113/**
 114 * call_rcu - Queue an RCU callback for invocation after a grace period.
 115 * @head: structure to be used for queueing the RCU updates.
 116 * @func: actual update function to be invoked after the grace period
 117 *
 118 * The update function will be invoked some time after a full grace
 119 * period elapses, in other words after all currently executing RCU
 120 * read-side critical sections have completed.  RCU read-side critical
 121 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
 122 * and may be nested.
 123 */
 124void fastcall call_rcu(struct rcu_head *head,
 125                                void (*func)(struct rcu_head *rcu))
 126{
 127        unsigned long flags;
 128        struct rcu_data *rdp;
 129
 130        head->func = func;
 131        head->next = NULL;
 132        local_irq_save(flags);
 133        rdp = &__get_cpu_var(rcu_data);
 134        *rdp->nxttail = head;
 135        rdp->nxttail = &head->next;
 136        if (unlikely(++rdp->qlen > qhimark)) {
 137                rdp->blimit = INT_MAX;
 138                force_quiescent_state(rdp, &rcu_ctrlblk);
 139        }
 140        local_irq_restore(flags);
 141}
 142
 143/**
 144 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
 145 * @head: structure to be used for queueing the RCU updates.
 146 * @func: actual update function to be invoked after the grace period
 147 *
 148 * The update function will be invoked some time after a full grace
 149 * period elapses, in other words after all currently executing RCU
 150 * read-side critical sections have completed. call_rcu_bh() assumes
 151 * that the read-side critical sections end on completion of a softirq
 152 * handler. This means that read-side critical sections in process
 153 * context must not be interrupted by softirqs. This interface is to be
 154 * used when most of the read-side critical sections are in softirq context.
 155 * RCU read-side critical sections are delimited by rcu_read_lock() and
 156 * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh()
 157 * and rcu_read_unlock_bh(), if in process context. These may be nested.
 158 */
 159void fastcall call_rcu_bh(struct rcu_head *head,
 160                                void (*func)(struct rcu_head *rcu))
 161{
 162        unsigned long flags;
 163        struct rcu_data *rdp;
 164
 165        head->func = func;
 166        head->next = NULL;
 167        local_irq_save(flags);
 168        rdp = &__get_cpu_var(rcu_bh_data);
 169        *rdp->nxttail = head;
 170        rdp->nxttail = &head->next;
 171
 172        if (unlikely(++rdp->qlen > qhimark)) {
 173                rdp->blimit = INT_MAX;
 174                force_quiescent_state(rdp, &rcu_bh_ctrlblk);
 175        }
 176
 177        local_irq_restore(flags);
 178}
 179
 180/*
 181 * Return the number of RCU batches processed thus far.  Useful
 182 * for debug and statistics.
 183 */
 184long rcu_batches_completed(void)
 185{
 186        return rcu_ctrlblk.completed;
 187}
 188
 189/*
 190 * Return the number of RCU batches processed thus far.  Useful
 191 * for debug and statistics.
 192 */
 193long rcu_batches_completed_bh(void)
 194{
 195        return rcu_bh_ctrlblk.completed;
 196}
 197
 198static void rcu_barrier_callback(struct rcu_head *notused)
 199{
 200        if (atomic_dec_and_test(&rcu_barrier_cpu_count))
 201                complete(&rcu_barrier_completion);
 202}
 203
 204/*
 205 * Called with preemption disabled, and from cross-cpu IRQ context.
 206 */
 207static void rcu_barrier_func(void *notused)
 208{
 209        int cpu = smp_processor_id();
 210        struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
 211        struct rcu_head *head;
 212
 213        head = &rdp->barrier;
 214        atomic_inc(&rcu_barrier_cpu_count);
 215        call_rcu(head, rcu_barrier_callback);
 216}
 217
 218/**
 219 * rcu_barrier - Wait until all the in-flight RCUs are complete.
 220 */
 221void rcu_barrier(void)
 222{
 223        BUG_ON(in_interrupt());
 224        /* Take cpucontrol mutex to protect against CPU hotplug */
 225        mutex_lock(&rcu_barrier_mutex);
 226        init_completion(&rcu_barrier_completion);
 227        atomic_set(&rcu_barrier_cpu_count, 0);
 228        on_each_cpu(rcu_barrier_func, NULL, 0, 1);
 229        wait_for_completion(&rcu_barrier_completion);
 230        mutex_unlock(&rcu_barrier_mutex);
 231}
 232EXPORT_SYMBOL_GPL(rcu_barrier);
 233
 234/*
 235 * Invoke the completed RCU callbacks. They are expected to be in
 236 * a per-cpu list.
 237 */
 238static void rcu_do_batch(struct rcu_data *rdp)
 239{
 240        struct rcu_head *next, *list;
 241        int count = 0;
 242
 243        list = rdp->donelist;
 244        while (list) {
 245                next = list->next;
 246                prefetch(next);
 247                list->func(list);
 248                list = next;
 249                if (++count >= rdp->blimit)
 250                        break;
 251        }
 252        rdp->donelist = list;
 253
 254        local_irq_disable();
 255        rdp->qlen -= count;
 256        local_irq_enable();
 257        if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
 258                rdp->blimit = blimit;
 259
 260        if (!rdp->donelist)
 261                rdp->donetail = &rdp->donelist;
 262        else
 263                tasklet_schedule(&per_cpu(rcu_tasklet, rdp->cpu));
 264}
 265
 266/*
 267 * Grace period handling:
 268 * The grace period handling consists out of two steps:
 269 * - A new grace period is started.
 270 *   This is done by rcu_start_batch. The start is not broadcasted to
 271 *   all cpus, they must pick this up by comparing rcp->cur with
 272 *   rdp->quiescbatch. All cpus are recorded  in the
 273 *   rcu_ctrlblk.cpumask bitmap.
 274 * - All cpus must go through a quiescent state.
 275 *   Since the start of the grace period is not broadcasted, at least two
 276 *   calls to rcu_check_quiescent_state are required:
 277 *   The first call just notices that a new grace period is running. The
 278 *   following calls check if there was a quiescent state since the beginning
 279 *   of the grace period. If so, it updates rcu_ctrlblk.cpumask. If
 280 *   the bitmap is empty, then the grace period is completed.
 281 *   rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
 282 *   period (if necessary).
 283 */
 284/*
 285 * Register a new batch of callbacks, and start it up if there is currently no
 286 * active batch and the batch to be registered has not already occurred.
 287 * Caller must hold rcu_ctrlblk.lock.
 288 */
 289static void rcu_start_batch(struct rcu_ctrlblk *rcp)
 290{
 291        if (rcp->next_pending &&
 292                        rcp->completed == rcp->cur) {
 293                rcp->next_pending = 0;
 294                /*
 295                 * next_pending == 0 must be visible in
 296                 * __rcu_process_callbacks() before it can see new value of cur.
 297                 */
 298                smp_wmb();
 299                rcp->cur++;
 300
 301                /*
 302                 * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
 303                 * Barrier  Otherwise it can cause tickless idle CPUs to be
 304                 * included in rcp->cpumask, which will extend graceperiods
 305                 * unnecessarily.
 306                 */
 307                smp_mb();
 308                cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask);
 309
 310                rcp->signaled = 0;
 311        }
 312}
 313
 314/*
 315 * cpu went through a quiescent state since the beginning of the grace period.
 316 * Clear it from the cpu mask and complete the grace period if it was the last
 317 * cpu. Start another grace period if someone has further entries pending
 318 */
 319static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
 320{
 321        cpu_clear(cpu, rcp->cpumask);
 322        if (cpus_empty(rcp->cpumask)) {
 323                /* batch completed ! */
 324                rcp->completed = rcp->cur;
 325                rcu_start_batch(rcp);
 326        }
 327}
 328
 329/*
 330 * Check if the cpu has gone through a quiescent state (say context
 331 * switch). If so and if it already hasn't done so in this RCU
 332 * quiescent cycle, then indicate that it has done so.
 333 */
 334static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
 335                                        struct rcu_data *rdp)
 336{
 337        if (rdp->quiescbatch != rcp->cur) {
 338                /* start new grace period: */
 339                rdp->qs_pending = 1;
 340                rdp->passed_quiesc = 0;
 341                rdp->quiescbatch = rcp->cur;
 342                return;
 343        }
 344
 345        /* Grace period already completed for this cpu?
 346         * qs_pending is checked instead of the actual bitmap to avoid
 347         * cacheline trashing.
 348         */
 349        if (!rdp->qs_pending)
 350                return;
 351
 352        /* 
 353         * Was there a quiescent state since the beginning of the grace
 354         * period? If no, then exit and wait for the next call.
 355         */
 356        if (!rdp->passed_quiesc)
 357                return;
 358        rdp->qs_pending = 0;
 359
 360        spin_lock(&rcp->lock);
 361        /*
 362         * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
 363         * during cpu startup. Ignore the quiescent state.
 364         */
 365        if (likely(rdp->quiescbatch == rcp->cur))
 366                cpu_quiet(rdp->cpu, rcp);
 367
 368        spin_unlock(&rcp->lock);
 369}
 370
 371
 372#ifdef CONFIG_HOTPLUG_CPU
 373
 374/* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing
 375 * locking requirements, the list it's pulling from has to belong to a cpu
 376 * which is dead and hence not processing interrupts.
 377 */
 378static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list,
 379                                struct rcu_head **tail)
 380{
 381        local_irq_disable();
 382        *this_rdp->nxttail = list;
 383        if (list)
 384                this_rdp->nxttail = tail;
 385        local_irq_enable();
 386}
 387
 388static void __rcu_offline_cpu(struct rcu_data *this_rdp,
 389                                struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
 390{
 391        /* if the cpu going offline owns the grace period
 392         * we can block indefinitely waiting for it, so flush
 393         * it here
 394         */
 395        spin_lock_bh(&rcp->lock);
 396        if (rcp->cur != rcp->completed)
 397                cpu_quiet(rdp->cpu, rcp);
 398        spin_unlock_bh(&rcp->lock);
 399        rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail);
 400        rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail);
 401        rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail);
 402}
 403
 404static void rcu_offline_cpu(int cpu)
 405{
 406        struct rcu_data *this_rdp = &get_cpu_var(rcu_data);
 407        struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data);
 408
 409        __rcu_offline_cpu(this_rdp, &rcu_ctrlblk,
 410                                        &per_cpu(rcu_data, cpu));
 411        __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk,
 412                                        &per_cpu(rcu_bh_data, cpu));
 413        put_cpu_var(rcu_data);
 414        put_cpu_var(rcu_bh_data);
 415        tasklet_kill_immediate(&per_cpu(rcu_tasklet, cpu), cpu);
 416}
 417
 418#else
 419
 420static void rcu_offline_cpu(int cpu)
 421{
 422}
 423
 424#endif
 425
 426/*
 427 * This does the RCU processing work from tasklet context. 
 428 */
 429static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
 430                                        struct rcu_data *rdp)
 431{
 432        if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) {
 433                *rdp->donetail = rdp->curlist;
 434                rdp->donetail = rdp->curtail;
 435                rdp->curlist = NULL;
 436                rdp->curtail = &rdp->curlist;
 437        }
 438
 439        if (rdp->nxtlist && !rdp->curlist) {
 440                local_irq_disable();
 441                rdp->curlist = rdp->nxtlist;
 442                rdp->curtail = rdp->nxttail;
 443                rdp->nxtlist = NULL;
 444                rdp->nxttail = &rdp->nxtlist;
 445                local_irq_enable();
 446
 447                /*
 448                 * start the next batch of callbacks
 449                 */
 450
 451                /* determine batch number */
 452                rdp->batch = rcp->cur + 1;
 453                /* see the comment and corresponding wmb() in
 454                 * the rcu_start_batch()
 455                 */
 456                smp_rmb();
 457
 458                if (!rcp->next_pending) {
 459                        /* and start it/schedule start if it's a new batch */
 460                        spin_lock(&rcp->lock);
 461                        rcp->next_pending = 1;
 462                        rcu_start_batch(rcp);
 463                        spin_unlock(&rcp->lock);
 464                }
 465        }
 466
 467        rcu_check_quiescent_state(rcp, rdp);
 468        if (rdp->donelist)
 469                rcu_do_batch(rdp);
 470}
 471
 472static void rcu_process_callbacks(unsigned long unused)
 473{
 474        __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
 475        __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
 476}
 477
 478static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
 479{
 480        /* This cpu has pending rcu entries and the grace period
 481         * for them has completed.
 482         */
 483        if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch))
 484                return 1;
 485
 486        /* This cpu has no pending entries, but there are new entries */
 487        if (!rdp->curlist && rdp->nxtlist)
 488                return 1;
 489
 490        /* This cpu has finished callbacks to invoke */
 491        if (rdp->donelist)
 492                return 1;
 493
 494        /* The rcu core waits for a quiescent state from the cpu */
 495        if (rdp->quiescbatch != rcp->cur || rdp->qs_pending)
 496                return 1;
 497
 498        /* nothing to do */
 499        return 0;
 500}
 501
 502/*
 503 * Check to see if there is any immediate RCU-related work to be done
 504 * by the current CPU, returning 1 if so.  This function is part of the
 505 * RCU implementation; it is -not- an exported member of the RCU API.
 506 */
 507int rcu_pending(int cpu)
 508{
 509        return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||
 510                __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
 511}
 512
 513/*
 514 * Check to see if any future RCU-related work will need to be done
 515 * by the current CPU, even if none need be done immediately, returning
 516 * 1 if so.  This function is part of the RCU implementation; it is -not-
 517 * an exported member of the RCU API.
 518 */
 519int rcu_needs_cpu(int cpu)
 520{
 521        struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
 522        struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
 523
 524        return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu));
 525}
 526
 527void rcu_check_callbacks(int cpu, int user)
 528{
 529        if (user || 
 530            (idle_cpu(cpu) && !in_softirq() && 
 531                                hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
 532                rcu_qsctr_inc(cpu);
 533                rcu_bh_qsctr_inc(cpu);
 534        } else if (!in_softirq())
 535                rcu_bh_qsctr_inc(cpu);
 536        tasklet_schedule(&per_cpu(rcu_tasklet, cpu));
 537}
 538
 539static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
 540                                                struct rcu_data *rdp)
 541{
 542        memset(rdp, 0, sizeof(*rdp));
 543        rdp->curtail = &rdp->curlist;
 544        rdp->nxttail = &rdp->nxtlist;
 545        rdp->donetail = &rdp->donelist;
 546        rdp->quiescbatch = rcp->completed;
 547        rdp->qs_pending = 0;
 548        rdp->cpu = cpu;
 549        rdp->blimit = blimit;
 550}
 551
 552static void __cpuinit rcu_online_cpu(int cpu)
 553{
 554        struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
 555        struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu);
 556
 557        rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
 558        rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp);
 559        tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL);
 560}
 561
 562static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
 563                                unsigned long action, void *hcpu)
 564{
 565        long cpu = (long)hcpu;
 566        switch (action) {
 567        case CPU_UP_PREPARE:
 568        case CPU_UP_PREPARE_FROZEN:
 569                rcu_online_cpu(cpu);
 570                break;
 571        case CPU_DEAD:
 572        case CPU_DEAD_FROZEN:
 573                rcu_offline_cpu(cpu);
 574                break;
 575        default:
 576                break;
 577        }
 578        return NOTIFY_OK;
 579}
 580
 581static struct notifier_block __cpuinitdata rcu_nb = {
 582        .notifier_call  = rcu_cpu_notify,
 583};
 584
 585/*
 586 * Initializes rcu mechanism.  Assumed to be called early.
 587 * That is before local timer(SMP) or jiffie timer (uniproc) is setup.
 588 * Note that rcu_qsctr and friends are implicitly
 589 * initialized due to the choice of ``0'' for RCU_CTR_INVALID.
 590 */
 591void __init rcu_init(void)
 592{
 593        rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
 594                        (void *)(long)smp_processor_id());
 595        /* Register notifier for non-boot CPUs */
 596        register_cpu_notifier(&rcu_nb);
 597}
 598
 599struct rcu_synchronize {
 600        struct rcu_head head;
 601        struct completion completion;
 602};
 603
 604/* Because of FASTCALL declaration of complete, we use this wrapper */
 605static void wakeme_after_rcu(struct rcu_head  *head)
 606{
 607        struct rcu_synchronize *rcu;
 608
 609        rcu = container_of(head, struct rcu_synchronize, head);
 610        complete(&rcu->completion);
 611}
 612
 613/**
 614 * synchronize_rcu - wait until a grace period has elapsed.
 615 *
 616 * Control will return to the caller some time after a full grace
 617 * period has elapsed, in other words after all currently executing RCU
 618 * read-side critical sections have completed.  RCU read-side critical
 619 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
 620 * and may be nested.
 621 *
 622 * If your read-side code is not protected by rcu_read_lock(), do -not-
 623 * use synchronize_rcu().
 624 */
 625void synchronize_rcu(void)
 626{
 627        struct rcu_synchronize rcu;
 628
 629        init_completion(&rcu.completion);
 630        /* Will wake me after RCU finished */
 631        call_rcu(&rcu.head, wakeme_after_rcu);
 632
 633        /* Wait for it */
 634        wait_for_completion(&rcu.completion);
 635}
 636
 637module_param(blimit, int, 0);
 638module_param(qhimark, int, 0);
 639module_param(qlowmark, int, 0);
 640EXPORT_SYMBOL_GPL(rcu_batches_completed);
 641EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
 642EXPORT_SYMBOL_GPL(call_rcu);
 643EXPORT_SYMBOL_GPL(call_rcu_bh);
 644EXPORT_SYMBOL_GPL(synchronize_rcu);
 645