linux/include/linux/rcupdate.h
<<
>>
Prefs
   1/*
   2 * Read-Copy Update mechanism for mutual exclusion
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17 *
  18 * Copyright IBM Corporation, 2001
  19 *
  20 * Author: Dipankar Sarma <dipankar@in.ibm.com>
  21 *
  22 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
  23 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
  24 * Papers:
  25 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
  26 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
  27 *
  28 * For detailed explanation of Read-Copy Update mechanism see -
  29 *              http://lse.sourceforge.net/locking/rcupdate.html
  30 *
  31 */
  32
  33#ifndef __LINUX_RCUPDATE_H
  34#define __LINUX_RCUPDATE_H
  35
  36#include <linux/types.h>
  37#include <linux/cache.h>
  38#include <linux/spinlock.h>
  39#include <linux/threads.h>
  40#include <linux/cpumask.h>
  41#include <linux/seqlock.h>
  42#include <linux/lockdep.h>
  43#include <linux/completion.h>
  44#include <linux/debugobjects.h>
  45#include <linux/bug.h>
  46#include <linux/compiler.h>
  47
  48#ifdef CONFIG_RCU_TORTURE_TEST
  49extern int rcutorture_runnable; /* for sysctl */
  50#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
  51
  52#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
  53extern void rcutorture_record_test_transition(void);
  54extern void rcutorture_record_progress(unsigned long vernum);
  55extern void do_trace_rcu_torture_read(char *rcutorturename,
  56                                      struct rcu_head *rhp);
  57#else
  58static inline void rcutorture_record_test_transition(void)
  59{
  60}
  61static inline void rcutorture_record_progress(unsigned long vernum)
  62{
  63}
  64#ifdef CONFIG_RCU_TRACE
  65extern void do_trace_rcu_torture_read(char *rcutorturename,
  66                                      struct rcu_head *rhp);
  67#else
  68#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
  69#endif
  70#endif
  71
  72#define UINT_CMP_GE(a, b)       (UINT_MAX / 2 >= (a) - (b))
  73#define UINT_CMP_LT(a, b)       (UINT_MAX / 2 < (a) - (b))
  74#define ULONG_CMP_GE(a, b)      (ULONG_MAX / 2 >= (a) - (b))
  75#define ULONG_CMP_LT(a, b)      (ULONG_MAX / 2 < (a) - (b))
  76
  77/* Exported common interfaces */
  78
  79#ifdef CONFIG_PREEMPT_RCU
  80
  81/**
  82 * call_rcu() - Queue an RCU callback for invocation after a grace period.
  83 * @head: structure to be used for queueing the RCU updates.
  84 * @func: actual callback function to be invoked after the grace period
  85 *
  86 * The callback function will be invoked some time after a full grace
  87 * period elapses, in other words after all pre-existing RCU read-side
  88 * critical sections have completed.  However, the callback function
  89 * might well execute concurrently with RCU read-side critical sections
  90 * that started after call_rcu() was invoked.  RCU read-side critical
  91 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
  92 * and may be nested.
  93 */
  94extern void call_rcu(struct rcu_head *head,
  95                              void (*func)(struct rcu_head *head));
  96
  97#else /* #ifdef CONFIG_PREEMPT_RCU */
  98
  99/* In classic RCU, call_rcu() is just call_rcu_sched(). */
 100#define call_rcu        call_rcu_sched
 101
 102#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
 103
 104/**
 105 * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
 106 * @head: structure to be used for queueing the RCU updates.
 107 * @func: actual callback function to be invoked after the grace period
 108 *
 109 * The callback function will be invoked some time after a full grace
 110 * period elapses, in other words after all currently executing RCU
 111 * read-side critical sections have completed. call_rcu_bh() assumes
 112 * that the read-side critical sections end on completion of a softirq
 113 * handler. This means that read-side critical sections in process
 114 * context must not be interrupted by softirqs. This interface is to be
 115 * used when most of the read-side critical sections are in softirq context.
 116 * RCU read-side critical sections are delimited by :
 117 *  - rcu_read_lock() and  rcu_read_unlock(), if in interrupt context.
 118 *  OR
 119 *  - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
 120 *  These may be nested.
 121 */
 122extern void call_rcu_bh(struct rcu_head *head,
 123                        void (*func)(struct rcu_head *head));
 124
 125/**
 126 * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
 127 * @head: structure to be used for queueing the RCU updates.
 128 * @func: actual callback function to be invoked after the grace period
 129 *
 130 * The callback function will be invoked some time after a full grace
 131 * period elapses, in other words after all currently executing RCU
 132 * read-side critical sections have completed. call_rcu_sched() assumes
 133 * that the read-side critical sections end on enabling of preemption
 134 * or on voluntary preemption.
 135 * RCU read-side critical sections are delimited by :
 136 *  - rcu_read_lock_sched() and  rcu_read_unlock_sched(),
 137 *  OR
 138 *  anything that disables preemption.
 139 *  These may be nested.
 140 */
 141extern void call_rcu_sched(struct rcu_head *head,
 142                           void (*func)(struct rcu_head *rcu));
 143
 144extern void synchronize_sched(void);
 145
 146#ifdef CONFIG_PREEMPT_RCU
 147
 148extern void __rcu_read_lock(void);
 149extern void __rcu_read_unlock(void);
 150void synchronize_rcu(void);
 151
 152/*
 153 * Defined as a macro as it is a very low level header included from
 154 * areas that don't even know about current.  This gives the rcu_read_lock()
 155 * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other
 156 * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
 157 */
 158#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
 159
 160#else /* #ifdef CONFIG_PREEMPT_RCU */
 161
 162static inline void __rcu_read_lock(void)
 163{
 164        preempt_disable();
 165}
 166
 167static inline void __rcu_read_unlock(void)
 168{
 169        preempt_enable();
 170}
 171
 172static inline void synchronize_rcu(void)
 173{
 174        synchronize_sched();
 175}
 176
 177static inline int rcu_preempt_depth(void)
 178{
 179        return 0;
 180}
 181
 182#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
 183
 184/* Internal to kernel */
 185extern void rcu_sched_qs(int cpu);
 186extern void rcu_bh_qs(int cpu);
 187extern void rcu_check_callbacks(int cpu, int user);
 188struct notifier_block;
 189extern void rcu_idle_enter(void);
 190extern void rcu_idle_exit(void);
 191extern void rcu_irq_enter(void);
 192extern void rcu_irq_exit(void);
 193
 194/**
 195 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
 196 * @a: Code that RCU needs to pay attention to.
 197 *
 198 * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden
 199 * in the inner idle loop, that is, between the rcu_idle_enter() and
 200 * the rcu_idle_exit() -- RCU will happily ignore any such read-side
 201 * critical sections.  However, things like powertop need tracepoints
 202 * in the inner idle loop.
 203 *
 204 * This macro provides the way out:  RCU_NONIDLE(do_something_with_RCU())
 205 * will tell RCU that it needs to pay attending, invoke its argument
 206 * (in this example, a call to the do_something_with_RCU() function),
 207 * and then tell RCU to go back to ignoring this CPU.  It is permissible
 208 * to nest RCU_NONIDLE() wrappers, but the nesting level is currently
 209 * quite limited.  If deeper nesting is required, it will be necessary
 210 * to adjust DYNTICK_TASK_NESTING_VALUE accordingly.
 211 *
 212 * This macro may be used from process-level code only.
 213 */
 214#define RCU_NONIDLE(a) \
 215        do { \
 216                rcu_idle_exit(); \
 217                do { a; } while (0); \
 218                rcu_idle_enter(); \
 219        } while (0)
 220
 221/*
 222 * Infrastructure to implement the synchronize_() primitives in
 223 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
 224 */
 225
 226typedef void call_rcu_func_t(struct rcu_head *head,
 227                             void (*func)(struct rcu_head *head));
 228void wait_rcu_gp(call_rcu_func_t crf);
 229
 230#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
 231#include <linux/rcutree.h>
 232#elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
 233#include <linux/rcutiny.h>
 234#else
 235#error "Unknown RCU implementation specified to kernel configuration"
 236#endif
 237
 238/*
 239 * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
 240 * initialization and destruction of rcu_head on the stack. rcu_head structures
 241 * allocated dynamically in the heap or defined statically don't need any
 242 * initialization.
 243 */
 244#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
 245extern void init_rcu_head_on_stack(struct rcu_head *head);
 246extern void destroy_rcu_head_on_stack(struct rcu_head *head);
 247#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
 248static inline void init_rcu_head_on_stack(struct rcu_head *head)
 249{
 250}
 251
 252static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
 253{
 254}
 255#endif  /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
 256
 257#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
 258bool rcu_lockdep_current_cpu_online(void);
 259#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
 260static inline bool rcu_lockdep_current_cpu_online(void)
 261{
 262        return 1;
 263}
 264#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
 265
 266#ifdef CONFIG_DEBUG_LOCK_ALLOC
 267
 268#ifdef CONFIG_PROVE_RCU
 269extern int rcu_is_cpu_idle(void);
 270#else /* !CONFIG_PROVE_RCU */
 271static inline int rcu_is_cpu_idle(void)
 272{
 273        return 0;
 274}
 275#endif /* else !CONFIG_PROVE_RCU */
 276
 277static inline void rcu_lock_acquire(struct lockdep_map *map)
 278{
 279        lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_);
 280}
 281
 282static inline void rcu_lock_release(struct lockdep_map *map)
 283{
 284        lock_release(map, 1, _THIS_IP_);
 285}
 286
 287extern struct lockdep_map rcu_lock_map;
 288extern struct lockdep_map rcu_bh_lock_map;
 289extern struct lockdep_map rcu_sched_lock_map;
 290extern int debug_lockdep_rcu_enabled(void);
 291
 292/**
 293 * rcu_read_lock_held() - might we be in RCU read-side critical section?
 294 *
 295 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
 296 * read-side critical section.  In absence of CONFIG_DEBUG_LOCK_ALLOC,
 297 * this assumes we are in an RCU read-side critical section unless it can
 298 * prove otherwise.  This is useful for debug checks in functions that
 299 * require that they be called within an RCU read-side critical section.
 300 *
 301 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
 302 * and while lockdep is disabled.
 303 *
 304 * Note that rcu_read_lock() and the matching rcu_read_unlock() must
 305 * occur in the same context, for example, it is illegal to invoke
 306 * rcu_read_unlock() in process context if the matching rcu_read_lock()
 307 * was invoked from within an irq handler.
 308 *
 309 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
 310 * offline from an RCU perspective, so check for those as well.
 311 */
 312static inline int rcu_read_lock_held(void)
 313{
 314        if (!debug_lockdep_rcu_enabled())
 315                return 1;
 316        if (rcu_is_cpu_idle())
 317                return 0;
 318        if (!rcu_lockdep_current_cpu_online())
 319                return 0;
 320        return lock_is_held(&rcu_lock_map);
 321}
 322
 323/*
 324 * rcu_read_lock_bh_held() is defined out of line to avoid #include-file
 325 * hell.
 326 */
 327extern int rcu_read_lock_bh_held(void);
 328
 329/**
 330 * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
 331 *
 332 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
 333 * RCU-sched read-side critical section.  In absence of
 334 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
 335 * critical section unless it can prove otherwise.  Note that disabling
 336 * of preemption (including disabling irqs) counts as an RCU-sched
 337 * read-side critical section.  This is useful for debug checks in functions
 338 * that required that they be called within an RCU-sched read-side
 339 * critical section.
 340 *
 341 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
 342 * and while lockdep is disabled.
 343 *
 344 * Note that if the CPU is in the idle loop from an RCU point of
 345 * view (ie: that we are in the section between rcu_idle_enter() and
 346 * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
 347 * did an rcu_read_lock().  The reason for this is that RCU ignores CPUs
 348 * that are in such a section, considering these as in extended quiescent
 349 * state, so such a CPU is effectively never in an RCU read-side critical
 350 * section regardless of what RCU primitives it invokes.  This state of
 351 * affairs is required --- we need to keep an RCU-free window in idle
 352 * where the CPU may possibly enter into low power mode. This way we can
 353 * notice an extended quiescent state to other CPUs that started a grace
 354 * period. Otherwise we would delay any grace period as long as we run in
 355 * the idle task.
 356 *
 357 * Similarly, we avoid claiming an SRCU read lock held if the current
 358 * CPU is offline.
 359 */
 360#ifdef CONFIG_PREEMPT_COUNT
 361static inline int rcu_read_lock_sched_held(void)
 362{
 363        int lockdep_opinion = 0;
 364
 365        if (!debug_lockdep_rcu_enabled())
 366                return 1;
 367        if (rcu_is_cpu_idle())
 368                return 0;
 369        if (!rcu_lockdep_current_cpu_online())
 370                return 0;
 371        if (debug_locks)
 372                lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
 373        return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
 374}
 375#else /* #ifdef CONFIG_PREEMPT_COUNT */
 376static inline int rcu_read_lock_sched_held(void)
 377{
 378        return 1;
 379}
 380#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
 381
 382#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 383
 384# define rcu_lock_acquire(a)            do { } while (0)
 385# define rcu_lock_release(a)            do { } while (0)
 386
 387static inline int rcu_read_lock_held(void)
 388{
 389        return 1;
 390}
 391
 392static inline int rcu_read_lock_bh_held(void)
 393{
 394        return 1;
 395}
 396
 397#ifdef CONFIG_PREEMPT_COUNT
 398static inline int rcu_read_lock_sched_held(void)
 399{
 400        return preempt_count() != 0 || irqs_disabled();
 401}
 402#else /* #ifdef CONFIG_PREEMPT_COUNT */
 403static inline int rcu_read_lock_sched_held(void)
 404{
 405        return 1;
 406}
 407#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
 408
 409#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 410
 411#ifdef CONFIG_PROVE_RCU
 412
 413extern int rcu_my_thread_group_empty(void);
 414
 415/**
 416 * rcu_lockdep_assert - emit lockdep splat if specified condition not met
 417 * @c: condition to check
 418 * @s: informative message
 419 */
 420#define rcu_lockdep_assert(c, s)                                        \
 421        do {                                                            \
 422                static bool __section(.data.unlikely) __warned;         \
 423                if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \
 424                        __warned = true;                                \
 425                        lockdep_rcu_suspicious(__FILE__, __LINE__, s);  \
 426                }                                                       \
 427        } while (0)
 428
 429#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU)
 430static inline void rcu_preempt_sleep_check(void)
 431{
 432        rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
 433                           "Illegal context switch in RCU read-side "
 434                           "critical section");
 435}
 436#else /* #ifdef CONFIG_PROVE_RCU */
 437static inline void rcu_preempt_sleep_check(void)
 438{
 439}
 440#endif /* #else #ifdef CONFIG_PROVE_RCU */
 441
 442#define rcu_sleep_check()                                               \
 443        do {                                                            \
 444                rcu_preempt_sleep_check();                              \
 445                rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),     \
 446                                   "Illegal context switch in RCU-bh"   \
 447                                   " read-side critical section");      \
 448                rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),  \
 449                                   "Illegal context switch in RCU-sched"\
 450                                   " read-side critical section");      \
 451        } while (0)
 452
 453#else /* #ifdef CONFIG_PROVE_RCU */
 454
 455#define rcu_lockdep_assert(c, s) do { } while (0)
 456#define rcu_sleep_check() do { } while (0)
 457
 458#endif /* #else #ifdef CONFIG_PROVE_RCU */
 459
 460/*
 461 * Helper functions for rcu_dereference_check(), rcu_dereference_protected()
 462 * and rcu_assign_pointer().  Some of these could be folded into their
 463 * callers, but they are left separate in order to ease introduction of
 464 * multiple flavors of pointers to match the multiple flavors of RCU
 465 * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in
 466 * the future.
 467 */
 468
 469#ifdef __CHECKER__
 470#define rcu_dereference_sparse(p, space) \
 471        ((void)(((typeof(*p) space *)p) == p))
 472#else /* #ifdef __CHECKER__ */
 473#define rcu_dereference_sparse(p, space)
 474#endif /* #else #ifdef __CHECKER__ */
 475
 476#define __rcu_access_pointer(p, space) \
 477        ({ \
 478                typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
 479                rcu_dereference_sparse(p, space); \
 480                ((typeof(*p) __force __kernel *)(_________p1)); \
 481        })
 482#define __rcu_dereference_check(p, c, space) \
 483        ({ \
 484                typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \
 485                rcu_lockdep_assert(c, "suspicious rcu_dereference_check()" \
 486                                      " usage"); \
 487                rcu_dereference_sparse(p, space); \
 488                smp_read_barrier_depends(); \
 489                ((typeof(*p) __force __kernel *)(_________p1)); \
 490        })
 491#define __rcu_dereference_protected(p, c, space) \
 492        ({ \
 493                rcu_lockdep_assert(c, "suspicious rcu_dereference_protected()" \
 494                                      " usage"); \
 495                rcu_dereference_sparse(p, space); \
 496                ((typeof(*p) __force __kernel *)(p)); \
 497        })
 498
 499#define __rcu_access_index(p, space) \
 500        ({ \
 501                typeof(p) _________p1 = ACCESS_ONCE(p); \
 502                rcu_dereference_sparse(p, space); \
 503                (_________p1); \
 504        })
 505#define __rcu_dereference_index_check(p, c) \
 506        ({ \
 507                typeof(p) _________p1 = ACCESS_ONCE(p); \
 508                rcu_lockdep_assert(c, \
 509                                   "suspicious rcu_dereference_index_check()" \
 510                                   " usage"); \
 511                smp_read_barrier_depends(); \
 512                (_________p1); \
 513        })
 514#define __rcu_assign_pointer(p, v, space) \
 515        ({ \
 516                smp_wmb(); \
 517                (p) = (typeof(*v) __force space *)(v); \
 518        })
 519
 520
 521/**
 522 * rcu_access_pointer() - fetch RCU pointer with no dereferencing
 523 * @p: The pointer to read
 524 *
 525 * Return the value of the specified RCU-protected pointer, but omit the
 526 * smp_read_barrier_depends() and keep the ACCESS_ONCE().  This is useful
 527 * when the value of this pointer is accessed, but the pointer is not
 528 * dereferenced, for example, when testing an RCU-protected pointer against
 529 * NULL.  Although rcu_access_pointer() may also be used in cases where
 530 * update-side locks prevent the value of the pointer from changing, you
 531 * should instead use rcu_dereference_protected() for this use case.
 532 *
 533 * It is also permissible to use rcu_access_pointer() when read-side
 534 * access to the pointer was removed at least one grace period ago, as
 535 * is the case in the context of the RCU callback that is freeing up
 536 * the data, or after a synchronize_rcu() returns.  This can be useful
 537 * when tearing down multi-linked structures after a grace period
 538 * has elapsed.
 539 */
 540#define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu)
 541
 542/**
 543 * rcu_dereference_check() - rcu_dereference with debug checking
 544 * @p: The pointer to read, prior to dereferencing
 545 * @c: The conditions under which the dereference will take place
 546 *
 547 * Do an rcu_dereference(), but check that the conditions under which the
 548 * dereference will take place are correct.  Typically the conditions
 549 * indicate the various locking conditions that should be held at that
 550 * point.  The check should return true if the conditions are satisfied.
 551 * An implicit check for being in an RCU read-side critical section
 552 * (rcu_read_lock()) is included.
 553 *
 554 * For example:
 555 *
 556 *      bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock));
 557 *
 558 * could be used to indicate to lockdep that foo->bar may only be dereferenced
 559 * if either rcu_read_lock() is held, or that the lock required to replace
 560 * the bar struct at foo->bar is held.
 561 *
 562 * Note that the list of conditions may also include indications of when a lock
 563 * need not be held, for example during initialisation or destruction of the
 564 * target struct:
 565 *
 566 *      bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) ||
 567 *                                            atomic_read(&foo->usage) == 0);
 568 *
 569 * Inserts memory barriers on architectures that require them
 570 * (currently only the Alpha), prevents the compiler from refetching
 571 * (and from merging fetches), and, more importantly, documents exactly
 572 * which pointers are protected by RCU and checks that the pointer is
 573 * annotated as __rcu.
 574 */
 575#define rcu_dereference_check(p, c) \
 576        __rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu)
 577
 578/**
 579 * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking
 580 * @p: The pointer to read, prior to dereferencing
 581 * @c: The conditions under which the dereference will take place
 582 *
 583 * This is the RCU-bh counterpart to rcu_dereference_check().
 584 */
 585#define rcu_dereference_bh_check(p, c) \
 586        __rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu)
 587
 588/**
 589 * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking
 590 * @p: The pointer to read, prior to dereferencing
 591 * @c: The conditions under which the dereference will take place
 592 *
 593 * This is the RCU-sched counterpart to rcu_dereference_check().
 594 */
 595#define rcu_dereference_sched_check(p, c) \
 596        __rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \
 597                                __rcu)
 598
 599#define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/
 600
 601/**
 602 * rcu_access_index() - fetch RCU index with no dereferencing
 603 * @p: The index to read
 604 *
 605 * Return the value of the specified RCU-protected index, but omit the
 606 * smp_read_barrier_depends() and keep the ACCESS_ONCE().  This is useful
 607 * when the value of this index is accessed, but the index is not
 608 * dereferenced, for example, when testing an RCU-protected index against
 609 * -1.  Although rcu_access_index() may also be used in cases where
 610 * update-side locks prevent the value of the index from changing, you
 611 * should instead use rcu_dereference_index_protected() for this use case.
 612 */
 613#define rcu_access_index(p) __rcu_access_index((p), __rcu)
 614
 615/**
 616 * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
 617 * @p: The pointer to read, prior to dereferencing
 618 * @c: The conditions under which the dereference will take place
 619 *
 620 * Similar to rcu_dereference_check(), but omits the sparse checking.
 621 * This allows rcu_dereference_index_check() to be used on integers,
 622 * which can then be used as array indices.  Attempting to use
 623 * rcu_dereference_check() on an integer will give compiler warnings
 624 * because the sparse address-space mechanism relies on dereferencing
 625 * the RCU-protected pointer.  Dereferencing integers is not something
 626 * that even gcc will put up with.
 627 *
 628 * Note that this function does not implicitly check for RCU read-side
 629 * critical sections.  If this function gains lots of uses, it might
 630 * make sense to provide versions for each flavor of RCU, but it does
 631 * not make sense as of early 2010.
 632 */
 633#define rcu_dereference_index_check(p, c) \
 634        __rcu_dereference_index_check((p), (c))
 635
 636/**
 637 * rcu_dereference_protected() - fetch RCU pointer when updates prevented
 638 * @p: The pointer to read, prior to dereferencing
 639 * @c: The conditions under which the dereference will take place
 640 *
 641 * Return the value of the specified RCU-protected pointer, but omit
 642 * both the smp_read_barrier_depends() and the ACCESS_ONCE().  This
 643 * is useful in cases where update-side locks prevent the value of the
 644 * pointer from changing.  Please note that this primitive does -not-
 645 * prevent the compiler from repeating this reference or combining it
 646 * with other references, so it should not be used without protection
 647 * of appropriate locks.
 648 *
 649 * This function is only for update-side use.  Using this function
 650 * when protected only by rcu_read_lock() will result in infrequent
 651 * but very ugly failures.
 652 */
 653#define rcu_dereference_protected(p, c) \
 654        __rcu_dereference_protected((p), (c), __rcu)
 655
 656
 657/**
 658 * rcu_dereference() - fetch RCU-protected pointer for dereferencing
 659 * @p: The pointer to read, prior to dereferencing
 660 *
 661 * This is a simple wrapper around rcu_dereference_check().
 662 */
 663#define rcu_dereference(p) rcu_dereference_check(p, 0)
 664
 665/**
 666 * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing
 667 * @p: The pointer to read, prior to dereferencing
 668 *
 669 * Makes rcu_dereference_check() do the dirty work.
 670 */
 671#define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0)
 672
 673/**
 674 * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing
 675 * @p: The pointer to read, prior to dereferencing
 676 *
 677 * Makes rcu_dereference_check() do the dirty work.
 678 */
 679#define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0)
 680
 681/**
 682 * rcu_read_lock() - mark the beginning of an RCU read-side critical section
 683 *
 684 * When synchronize_rcu() is invoked on one CPU while other CPUs
 685 * are within RCU read-side critical sections, then the
 686 * synchronize_rcu() is guaranteed to block until after all the other
 687 * CPUs exit their critical sections.  Similarly, if call_rcu() is invoked
 688 * on one CPU while other CPUs are within RCU read-side critical
 689 * sections, invocation of the corresponding RCU callback is deferred
 690 * until after the all the other CPUs exit their critical sections.
 691 *
 692 * Note, however, that RCU callbacks are permitted to run concurrently
 693 * with new RCU read-side critical sections.  One way that this can happen
 694 * is via the following sequence of events: (1) CPU 0 enters an RCU
 695 * read-side critical section, (2) CPU 1 invokes call_rcu() to register
 696 * an RCU callback, (3) CPU 0 exits the RCU read-side critical section,
 697 * (4) CPU 2 enters a RCU read-side critical section, (5) the RCU
 698 * callback is invoked.  This is legal, because the RCU read-side critical
 699 * section that was running concurrently with the call_rcu() (and which
 700 * therefore might be referencing something that the corresponding RCU
 701 * callback would free up) has completed before the corresponding
 702 * RCU callback is invoked.
 703 *
 704 * RCU read-side critical sections may be nested.  Any deferred actions
 705 * will be deferred until the outermost RCU read-side critical section
 706 * completes.
 707 *
 708 * You can avoid reading and understanding the next paragraph by
 709 * following this rule: don't put anything in an rcu_read_lock() RCU
 710 * read-side critical section that would block in a !PREEMPT kernel.
 711 * But if you want the full story, read on!
 712 *
 713 * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), it
 714 * is illegal to block while in an RCU read-side critical section.  In
 715 * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU)
 716 * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may
 717 * be preempted, but explicit blocking is illegal.  Finally, in preemptible
 718 * RCU implementations in real-time (CONFIG_PREEMPT_RT) kernel builds,
 719 * RCU read-side critical sections may be preempted and they may also
 720 * block, but only when acquiring spinlocks that are subject to priority
 721 * inheritance.
 722 */
 723static inline void rcu_read_lock(void)
 724{
 725        __rcu_read_lock();
 726        __acquire(RCU);
 727        rcu_lock_acquire(&rcu_lock_map);
 728        rcu_lockdep_assert(!rcu_is_cpu_idle(),
 729                           "rcu_read_lock() used illegally while idle");
 730}
 731
 732/*
 733 * So where is rcu_write_lock()?  It does not exist, as there is no
 734 * way for writers to lock out RCU readers.  This is a feature, not
 735 * a bug -- this property is what provides RCU's performance benefits.
 736 * Of course, writers must coordinate with each other.  The normal
 737 * spinlock primitives work well for this, but any other technique may be
 738 * used as well.  RCU does not care how the writers keep out of each
 739 * others' way, as long as they do so.
 740 */
 741
 742/**
 743 * rcu_read_unlock() - marks the end of an RCU read-side critical section.
 744 *
 745 * See rcu_read_lock() for more information.
 746 */
 747static inline void rcu_read_unlock(void)
 748{
 749        rcu_lockdep_assert(!rcu_is_cpu_idle(),
 750                           "rcu_read_unlock() used illegally while idle");
 751        rcu_lock_release(&rcu_lock_map);
 752        __release(RCU);
 753        __rcu_read_unlock();
 754}
 755
 756/**
 757 * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
 758 *
 759 * This is equivalent of rcu_read_lock(), but to be used when updates
 760 * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since
 761 * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a
 762 * softirq handler to be a quiescent state, a process in RCU read-side
 763 * critical section must be protected by disabling softirqs. Read-side
 764 * critical sections in interrupt context can use just rcu_read_lock(),
 765 * though this should at least be commented to avoid confusing people
 766 * reading the code.
 767 *
 768 * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
 769 * must occur in the same context, for example, it is illegal to invoke
 770 * rcu_read_unlock_bh() from one task if the matching rcu_read_lock_bh()
 771 * was invoked from some other task.
 772 */
 773static inline void rcu_read_lock_bh(void)
 774{
 775        local_bh_disable();
 776        __acquire(RCU_BH);
 777        rcu_lock_acquire(&rcu_bh_lock_map);
 778        rcu_lockdep_assert(!rcu_is_cpu_idle(),
 779                           "rcu_read_lock_bh() used illegally while idle");
 780}
 781
 782/*
 783 * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
 784 *
 785 * See rcu_read_lock_bh() for more information.
 786 */
 787static inline void rcu_read_unlock_bh(void)
 788{
 789        rcu_lockdep_assert(!rcu_is_cpu_idle(),
 790                           "rcu_read_unlock_bh() used illegally while idle");
 791        rcu_lock_release(&rcu_bh_lock_map);
 792        __release(RCU_BH);
 793        local_bh_enable();
 794}
 795
 796/**
 797 * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
 798 *
 799 * This is equivalent of rcu_read_lock(), but to be used when updates
 800 * are being done using call_rcu_sched() or synchronize_rcu_sched().
 801 * Read-side critical sections can also be introduced by anything that
 802 * disables preemption, including local_irq_disable() and friends.
 803 *
 804 * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
 805 * must occur in the same context, for example, it is illegal to invoke
 806 * rcu_read_unlock_sched() from process context if the matching
 807 * rcu_read_lock_sched() was invoked from an NMI handler.
 808 */
 809static inline void rcu_read_lock_sched(void)
 810{
 811        preempt_disable();
 812        __acquire(RCU_SCHED);
 813        rcu_lock_acquire(&rcu_sched_lock_map);
 814        rcu_lockdep_assert(!rcu_is_cpu_idle(),
 815                           "rcu_read_lock_sched() used illegally while idle");
 816}
 817
 818/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
 819static inline notrace void rcu_read_lock_sched_notrace(void)
 820{
 821        preempt_disable_notrace();
 822        __acquire(RCU_SCHED);
 823}
 824
 825/*
 826 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
 827 *
 828 * See rcu_read_lock_sched for more information.
 829 */
 830static inline void rcu_read_unlock_sched(void)
 831{
 832        rcu_lockdep_assert(!rcu_is_cpu_idle(),
 833                           "rcu_read_unlock_sched() used illegally while idle");
 834        rcu_lock_release(&rcu_sched_lock_map);
 835        __release(RCU_SCHED);
 836        preempt_enable();
 837}
 838
 839/* Used by lockdep and tracing: cannot be traced, cannot call lockdep. */
 840static inline notrace void rcu_read_unlock_sched_notrace(void)
 841{
 842        __release(RCU_SCHED);
 843        preempt_enable_notrace();
 844}
 845
 846/**
 847 * rcu_assign_pointer() - assign to RCU-protected pointer
 848 * @p: pointer to assign to
 849 * @v: value to assign (publish)
 850 *
 851 * Assigns the specified value to the specified RCU-protected
 852 * pointer, ensuring that any concurrent RCU readers will see
 853 * any prior initialization.  Returns the value assigned.
 854 *
 855 * Inserts memory barriers on architectures that require them
 856 * (which is most of them), and also prevents the compiler from
 857 * reordering the code that initializes the structure after the pointer
 858 * assignment.  More importantly, this call documents which pointers
 859 * will be dereferenced by RCU read-side code.
 860 *
 861 * In some special cases, you may use RCU_INIT_POINTER() instead
 862 * of rcu_assign_pointer().  RCU_INIT_POINTER() is a bit faster due
 863 * to the fact that it does not constrain either the CPU or the compiler.
 864 * That said, using RCU_INIT_POINTER() when you should have used
 865 * rcu_assign_pointer() is a very bad thing that results in
 866 * impossible-to-diagnose memory corruption.  So please be careful.
 867 * See the RCU_INIT_POINTER() comment header for details.
 868 */
 869#define rcu_assign_pointer(p, v) \
 870        __rcu_assign_pointer((p), (v), __rcu)
 871
 872/**
 873 * RCU_INIT_POINTER() - initialize an RCU protected pointer
 874 *
 875 * Initialize an RCU-protected pointer in special cases where readers
 876 * do not need ordering constraints on the CPU or the compiler.  These
 877 * special cases are:
 878 *
 879 * 1.   This use of RCU_INIT_POINTER() is NULLing out the pointer -or-
 880 * 2.   The caller has taken whatever steps are required to prevent
 881 *      RCU readers from concurrently accessing this pointer -or-
 882 * 3.   The referenced data structure has already been exposed to
 883 *      readers either at compile time or via rcu_assign_pointer() -and-
 884 *      a.      You have not made -any- reader-visible changes to
 885 *              this structure since then -or-
 886 *      b.      It is OK for readers accessing this structure from its
 887 *              new location to see the old state of the structure.  (For
 888 *              example, the changes were to statistical counters or to
 889 *              other state where exact synchronization is not required.)
 890 *
 891 * Failure to follow these rules governing use of RCU_INIT_POINTER() will
 892 * result in impossible-to-diagnose memory corruption.  As in the structures
 893 * will look OK in crash dumps, but any concurrent RCU readers might
 894 * see pre-initialized values of the referenced data structure.  So
 895 * please be very careful how you use RCU_INIT_POINTER()!!!
 896 *
 897 * If you are creating an RCU-protected linked structure that is accessed
 898 * by a single external-to-structure RCU-protected pointer, then you may
 899 * use RCU_INIT_POINTER() to initialize the internal RCU-protected
 900 * pointers, but you must use rcu_assign_pointer() to initialize the
 901 * external-to-structure pointer -after- you have completely initialized
 902 * the reader-accessible portions of the linked structure.
 903 */
 904#define RCU_INIT_POINTER(p, v) \
 905                p = (typeof(*v) __force __rcu *)(v)
 906
 907static __always_inline bool __is_kfree_rcu_offset(unsigned long offset)
 908{
 909        return offset < 4096;
 910}
 911
 912static __always_inline
 913void __kfree_rcu(struct rcu_head *head, unsigned long offset)
 914{
 915        typedef void (*rcu_callback)(struct rcu_head *);
 916
 917        BUILD_BUG_ON(!__builtin_constant_p(offset));
 918
 919        /* See the kfree_rcu() header comment. */
 920        BUILD_BUG_ON(!__is_kfree_rcu_offset(offset));
 921
 922        kfree_call_rcu(head, (rcu_callback)offset);
 923}
 924
 925/**
 926 * kfree_rcu() - kfree an object after a grace period.
 927 * @ptr:        pointer to kfree
 928 * @rcu_head:   the name of the struct rcu_head within the type of @ptr.
 929 *
 930 * Many rcu callbacks functions just call kfree() on the base structure.
 931 * These functions are trivial, but their size adds up, and furthermore
 932 * when they are used in a kernel module, that module must invoke the
 933 * high-latency rcu_barrier() function at module-unload time.
 934 *
 935 * The kfree_rcu() function handles this issue.  Rather than encoding a
 936 * function address in the embedded rcu_head structure, kfree_rcu() instead
 937 * encodes the offset of the rcu_head structure within the base structure.
 938 * Because the functions are not allowed in the low-order 4096 bytes of
 939 * kernel virtual memory, offsets up to 4095 bytes can be accommodated.
 940 * If the offset is larger than 4095 bytes, a compile-time error will
 941 * be generated in __kfree_rcu().  If this error is triggered, you can
 942 * either fall back to use of call_rcu() or rearrange the structure to
 943 * position the rcu_head structure into the first 4096 bytes.
 944 *
 945 * Note that the allowable offset might decrease in the future, for example,
 946 * to allow something like kmem_cache_free_rcu().
 947 */
 948#define kfree_rcu(ptr, rcu_head)                                        \
 949        __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
 950
 951#endif /* __LINUX_RCUPDATE_H */
 952