linux/kernel/rcu/rcu.h
<<
>>
Prefs
   1/*
   2 * Read-Copy Update definitions shared among RCU implementations.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, you can access it online at
  16 * http://www.gnu.org/licenses/gpl-2.0.html.
  17 *
  18 * Copyright IBM Corporation, 2011
  19 *
  20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
  21 */
  22
  23#ifndef __LINUX_RCU_H
  24#define __LINUX_RCU_H
  25
  26#include <trace/events/rcu.h>
  27#ifdef CONFIG_RCU_TRACE
  28#define RCU_TRACE(stmt) stmt
  29#else /* #ifdef CONFIG_RCU_TRACE */
  30#define RCU_TRACE(stmt)
  31#endif /* #else #ifdef CONFIG_RCU_TRACE */
  32
  33/* Offset to allow for unmatched rcu_irq_{enter,exit}(). */
  34#define DYNTICK_IRQ_NONIDLE     ((LONG_MAX / 2) + 1)
  35
  36
  37/*
  38 * Grace-period counter management.
  39 */
  40
  41#define RCU_SEQ_CTR_SHIFT       2
  42#define RCU_SEQ_STATE_MASK      ((1 << RCU_SEQ_CTR_SHIFT) - 1)
  43
  44/*
  45 * Return the counter portion of a sequence number previously returned
  46 * by rcu_seq_snap() or rcu_seq_current().
  47 */
  48static inline unsigned long rcu_seq_ctr(unsigned long s)
  49{
  50        return s >> RCU_SEQ_CTR_SHIFT;
  51}
  52
  53/*
  54 * Return the state portion of a sequence number previously returned
  55 * by rcu_seq_snap() or rcu_seq_current().
  56 */
  57static inline int rcu_seq_state(unsigned long s)
  58{
  59        return s & RCU_SEQ_STATE_MASK;
  60}
  61
  62/*
  63 * Set the state portion of the pointed-to sequence number.
  64 * The caller is responsible for preventing conflicting updates.
  65 */
  66static inline void rcu_seq_set_state(unsigned long *sp, int newstate)
  67{
  68        WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK);
  69        WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate);
  70}
  71
  72/* Adjust sequence number for start of update-side operation. */
  73static inline void rcu_seq_start(unsigned long *sp)
  74{
  75        WRITE_ONCE(*sp, *sp + 1);
  76        smp_mb(); /* Ensure update-side operation after counter increment. */
  77        WARN_ON_ONCE(rcu_seq_state(*sp) != 1);
  78}
  79
  80/* Compute the end-of-grace-period value for the specified sequence number. */
  81static inline unsigned long rcu_seq_endval(unsigned long *sp)
  82{
  83        return (*sp | RCU_SEQ_STATE_MASK) + 1;
  84}
  85
  86/* Adjust sequence number for end of update-side operation. */
  87static inline void rcu_seq_end(unsigned long *sp)
  88{
  89        smp_mb(); /* Ensure update-side operation before counter increment. */
  90        WARN_ON_ONCE(!rcu_seq_state(*sp));
  91        WRITE_ONCE(*sp, rcu_seq_endval(sp));
  92}
  93
  94/*
  95 * rcu_seq_snap - Take a snapshot of the update side's sequence number.
  96 *
  97 * This function returns the earliest value of the grace-period sequence number
  98 * that will indicate that a full grace period has elapsed since the current
  99 * time.  Once the grace-period sequence number has reached this value, it will
 100 * be safe to invoke all callbacks that have been registered prior to the
 101 * current time. This value is the current grace-period number plus two to the
 102 * power of the number of low-order bits reserved for state, then rounded up to
 103 * the next value in which the state bits are all zero.
 104 */
 105static inline unsigned long rcu_seq_snap(unsigned long *sp)
 106{
 107        unsigned long s;
 108
 109        s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK;
 110        smp_mb(); /* Above access must not bleed into critical section. */
 111        return s;
 112}
 113
 114/* Return the current value the update side's sequence number, no ordering. */
 115static inline unsigned long rcu_seq_current(unsigned long *sp)
 116{
 117        return READ_ONCE(*sp);
 118}
 119
 120/*
 121 * Given a snapshot from rcu_seq_snap(), determine whether or not the
 122 * corresponding update-side operation has started.
 123 */
 124static inline bool rcu_seq_started(unsigned long *sp, unsigned long s)
 125{
 126        return ULONG_CMP_LT((s - 1) & ~RCU_SEQ_STATE_MASK, READ_ONCE(*sp));
 127}
 128
 129/*
 130 * Given a snapshot from rcu_seq_snap(), determine whether or not a
 131 * full update-side operation has occurred.
 132 */
 133static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
 134{
 135        return ULONG_CMP_GE(READ_ONCE(*sp), s);
 136}
 137
 138/*
 139 * Has a grace period completed since the time the old gp_seq was collected?
 140 */
 141static inline bool rcu_seq_completed_gp(unsigned long old, unsigned long new)
 142{
 143        return ULONG_CMP_LT(old, new & ~RCU_SEQ_STATE_MASK);
 144}
 145
 146/*
 147 * Has a grace period started since the time the old gp_seq was collected?
 148 */
 149static inline bool rcu_seq_new_gp(unsigned long old, unsigned long new)
 150{
 151        return ULONG_CMP_LT((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK,
 152                            new);
 153}
 154
 155/*
 156 * Roughly how many full grace periods have elapsed between the collection
 157 * of the two specified grace periods?
 158 */
 159static inline unsigned long rcu_seq_diff(unsigned long new, unsigned long old)
 160{
 161        unsigned long rnd_diff;
 162
 163        if (old == new)
 164                return 0;
 165        /*
 166         * Compute the number of grace periods (still shifted up), plus
 167         * one if either of new and old is not an exact grace period.
 168         */
 169        rnd_diff = (new & ~RCU_SEQ_STATE_MASK) -
 170                   ((old + RCU_SEQ_STATE_MASK) & ~RCU_SEQ_STATE_MASK) +
 171                   ((new & RCU_SEQ_STATE_MASK) || (old & RCU_SEQ_STATE_MASK));
 172        if (ULONG_CMP_GE(RCU_SEQ_STATE_MASK, rnd_diff))
 173                return 1; /* Definitely no grace period has elapsed. */
 174        return ((rnd_diff - RCU_SEQ_STATE_MASK - 1) >> RCU_SEQ_CTR_SHIFT) + 2;
 175}
 176
 177/*
 178 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
 179 * by call_rcu() and rcu callback execution, and are therefore not part of the
 180 * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors.
 181 */
 182
 183#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
 184# define STATE_RCU_HEAD_READY   0
 185# define STATE_RCU_HEAD_QUEUED  1
 186
 187extern struct debug_obj_descr rcuhead_debug_descr;
 188
 189static inline int debug_rcu_head_queue(struct rcu_head *head)
 190{
 191        int r1;
 192
 193        r1 = debug_object_activate(head, &rcuhead_debug_descr);
 194        debug_object_active_state(head, &rcuhead_debug_descr,
 195                                  STATE_RCU_HEAD_READY,
 196                                  STATE_RCU_HEAD_QUEUED);
 197        return r1;
 198}
 199
 200static inline void debug_rcu_head_unqueue(struct rcu_head *head)
 201{
 202        debug_object_active_state(head, &rcuhead_debug_descr,
 203                                  STATE_RCU_HEAD_QUEUED,
 204                                  STATE_RCU_HEAD_READY);
 205        debug_object_deactivate(head, &rcuhead_debug_descr);
 206}
 207#else   /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
 208static inline int debug_rcu_head_queue(struct rcu_head *head)
 209{
 210        return 0;
 211}
 212
 213static inline void debug_rcu_head_unqueue(struct rcu_head *head)
 214{
 215}
 216#endif  /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
 217
 218void kfree(const void *);
 219
 220/*
 221 * Reclaim the specified callback, either by invoking it (non-lazy case)
 222 * or freeing it directly (lazy case).  Return true if lazy, false otherwise.
 223 */
 224static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
 225{
 226        unsigned long offset = (unsigned long)head->func;
 227
 228        rcu_lock_acquire(&rcu_callback_map);
 229        if (__is_kfree_rcu_offset(offset)) {
 230                RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset);)
 231                kfree((void *)head - offset);
 232                rcu_lock_release(&rcu_callback_map);
 233                return true;
 234        } else {
 235                RCU_TRACE(trace_rcu_invoke_callback(rn, head);)
 236                head->func(head);
 237                rcu_lock_release(&rcu_callback_map);
 238                return false;
 239        }
 240}
 241
 242#ifdef CONFIG_RCU_STALL_COMMON
 243
 244extern int rcu_cpu_stall_suppress;
 245int rcu_jiffies_till_stall_check(void);
 246
 247#define rcu_ftrace_dump_stall_suppress() \
 248do { \
 249        if (!rcu_cpu_stall_suppress) \
 250                rcu_cpu_stall_suppress = 3; \
 251} while (0)
 252
 253#define rcu_ftrace_dump_stall_unsuppress() \
 254do { \
 255        if (rcu_cpu_stall_suppress == 3) \
 256                rcu_cpu_stall_suppress = 0; \
 257} while (0)
 258
 259#else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */
 260#define rcu_ftrace_dump_stall_suppress()
 261#define rcu_ftrace_dump_stall_unsuppress()
 262#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
 263
 264/*
 265 * Strings used in tracepoints need to be exported via the
 266 * tracing system such that tools like perf and trace-cmd can
 267 * translate the string address pointers to actual text.
 268 */
 269#define TPS(x)  tracepoint_string(x)
 270
 271/*
 272 * Dump the ftrace buffer, but only one time per callsite per boot.
 273 */
 274#define rcu_ftrace_dump(oops_dump_mode) \
 275do { \
 276        static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
 277        \
 278        if (!atomic_read(&___rfd_beenhere) && \
 279            !atomic_xchg(&___rfd_beenhere, 1)) { \
 280                tracing_off(); \
 281                rcu_ftrace_dump_stall_suppress(); \
 282                ftrace_dump(oops_dump_mode); \
 283                rcu_ftrace_dump_stall_unsuppress(); \
 284        } \
 285} while (0)
 286
 287void rcu_early_boot_tests(void);
 288void rcu_test_sync_prims(void);
 289
 290/*
 291 * This function really isn't for public consumption, but RCU is special in
 292 * that context switches can allow the state machine to make progress.
 293 */
 294extern void resched_cpu(int cpu);
 295
 296#if defined(SRCU) || !defined(TINY_RCU)
 297
 298#include <linux/rcu_node_tree.h>
 299
 300extern int rcu_num_lvls;
 301extern int num_rcu_lvl[];
 302extern int rcu_num_nodes;
 303static bool rcu_fanout_exact;
 304static int rcu_fanout_leaf;
 305
 306/*
 307 * Compute the per-level fanout, either using the exact fanout specified
 308 * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
 309 */
 310static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
 311{
 312        int i;
 313
 314        if (rcu_fanout_exact) {
 315                levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
 316                for (i = rcu_num_lvls - 2; i >= 0; i--)
 317                        levelspread[i] = RCU_FANOUT;
 318        } else {
 319                int ccur;
 320                int cprv;
 321
 322                cprv = nr_cpu_ids;
 323                for (i = rcu_num_lvls - 1; i >= 0; i--) {
 324                        ccur = levelcnt[i];
 325                        levelspread[i] = (cprv + ccur - 1) / ccur;
 326                        cprv = ccur;
 327                }
 328        }
 329}
 330
 331/* Returns first leaf rcu_node of the specified RCU flavor. */
 332#define rcu_first_leaf_node(rsp) ((rsp)->level[rcu_num_lvls - 1])
 333
 334/* Is this rcu_node a leaf? */
 335#define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
 336
 337/* Is this rcu_node the last leaf? */
 338#define rcu_is_last_leaf_node(rsp, rnp) ((rnp) == &(rsp)->node[rcu_num_nodes - 1])
 339
 340/*
 341 * Do a full breadth-first scan of the rcu_node structures for the
 342 * specified rcu_state structure.
 343 */
 344#define rcu_for_each_node_breadth_first(rsp, rnp) \
 345        for ((rnp) = &(rsp)->node[0]; \
 346             (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
 347
 348/*
 349 * Do a breadth-first scan of the non-leaf rcu_node structures for the
 350 * specified rcu_state structure.  Note that if there is a singleton
 351 * rcu_node tree with but one rcu_node structure, this loop is a no-op.
 352 */
 353#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
 354        for ((rnp) = &(rsp)->node[0]; !rcu_is_leaf_node(rsp, rnp); (rnp)++)
 355
 356/*
 357 * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
 358 * structure.  Note that if there is a singleton rcu_node tree with but
 359 * one rcu_node structure, this loop -will- visit the rcu_node structure.
 360 * It is still a leaf node, even if it is also the root node.
 361 */
 362#define rcu_for_each_leaf_node(rsp, rnp) \
 363        for ((rnp) = rcu_first_leaf_node(rsp); \
 364             (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
 365
 366/*
 367 * Iterate over all possible CPUs in a leaf RCU node.
 368 */
 369#define for_each_leaf_node_possible_cpu(rnp, cpu) \
 370        for ((cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \
 371             (cpu) <= rnp->grphi; \
 372             (cpu) = cpumask_next((cpu), cpu_possible_mask))
 373
 374/*
 375 * Iterate over all CPUs in a leaf RCU node's specified mask.
 376 */
 377#define rcu_find_next_bit(rnp, cpu, mask) \
 378        ((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu)))
 379#define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \
 380        for ((cpu) = rcu_find_next_bit((rnp), 0, (mask)); \
 381             (cpu) <= rnp->grphi; \
 382             (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask)))
 383
 384/*
 385 * Wrappers for the rcu_node::lock acquire and release.
 386 *
 387 * Because the rcu_nodes form a tree, the tree traversal locking will observe
 388 * different lock values, this in turn means that an UNLOCK of one level
 389 * followed by a LOCK of another level does not imply a full memory barrier;
 390 * and most importantly transitivity is lost.
 391 *
 392 * In order to restore full ordering between tree levels, augment the regular
 393 * lock acquire functions with smp_mb__after_unlock_lock().
 394 *
 395 * As ->lock of struct rcu_node is a __private field, therefore one should use
 396 * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
 397 */
 398#define raw_spin_lock_rcu_node(p)                                       \
 399do {                                                                    \
 400        raw_spin_lock(&ACCESS_PRIVATE(p, lock));                        \
 401        smp_mb__after_unlock_lock();                                    \
 402} while (0)
 403
 404#define raw_spin_unlock_rcu_node(p) raw_spin_unlock(&ACCESS_PRIVATE(p, lock))
 405
 406#define raw_spin_lock_irq_rcu_node(p)                                   \
 407do {                                                                    \
 408        raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock));                    \
 409        smp_mb__after_unlock_lock();                                    \
 410} while (0)
 411
 412#define raw_spin_unlock_irq_rcu_node(p)                                 \
 413        raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
 414
 415#define raw_spin_lock_irqsave_rcu_node(p, flags)                        \
 416do {                                                                    \
 417        raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
 418        smp_mb__after_unlock_lock();                                    \
 419} while (0)
 420
 421#define raw_spin_unlock_irqrestore_rcu_node(p, flags)                   \
 422        raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags)
 423
 424#define raw_spin_trylock_rcu_node(p)                                    \
 425({                                                                      \
 426        bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock));    \
 427                                                                        \
 428        if (___locked)                                                  \
 429                smp_mb__after_unlock_lock();                            \
 430        ___locked;                                                      \
 431})
 432
 433#define raw_lockdep_assert_held_rcu_node(p)                             \
 434        lockdep_assert_held(&ACCESS_PRIVATE(p, lock))
 435
 436#endif /* #if defined(SRCU) || !defined(TINY_RCU) */
 437
 438#ifdef CONFIG_TINY_RCU
 439/* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
 440static inline bool rcu_gp_is_normal(void) { return true; }
 441static inline bool rcu_gp_is_expedited(void) { return false; }
 442static inline void rcu_expedite_gp(void) { }
 443static inline void rcu_unexpedite_gp(void) { }
 444static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
 445#else /* #ifdef CONFIG_TINY_RCU */
 446bool rcu_gp_is_normal(void);     /* Internal RCU use. */
 447bool rcu_gp_is_expedited(void);  /* Internal RCU use. */
 448void rcu_expedite_gp(void);
 449void rcu_unexpedite_gp(void);
 450void rcupdate_announce_bootup_oddness(void);
 451void rcu_request_urgent_qs_task(struct task_struct *t);
 452#endif /* #else #ifdef CONFIG_TINY_RCU */
 453
 454#define RCU_SCHEDULER_INACTIVE  0
 455#define RCU_SCHEDULER_INIT      1
 456#define RCU_SCHEDULER_RUNNING   2
 457
 458enum rcutorture_type {
 459        RCU_FLAVOR,
 460        RCU_BH_FLAVOR,
 461        RCU_SCHED_FLAVOR,
 462        RCU_TASKS_FLAVOR,
 463        SRCU_FLAVOR,
 464        INVALID_RCU_FLAVOR
 465};
 466
 467#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
 468void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
 469                            unsigned long *gp_seq);
 470void rcutorture_record_progress(unsigned long vernum);
 471void do_trace_rcu_torture_read(const char *rcutorturename,
 472                               struct rcu_head *rhp,
 473                               unsigned long secs,
 474                               unsigned long c_old,
 475                               unsigned long c);
 476#else
 477static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
 478                                          int *flags, unsigned long *gp_seq)
 479{
 480        *flags = 0;
 481        *gp_seq = 0;
 482}
 483static inline void rcutorture_record_progress(unsigned long vernum) { }
 484#ifdef CONFIG_RCU_TRACE
 485void do_trace_rcu_torture_read(const char *rcutorturename,
 486                               struct rcu_head *rhp,
 487                               unsigned long secs,
 488                               unsigned long c_old,
 489                               unsigned long c);
 490#else
 491#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
 492        do { } while (0)
 493#endif
 494#endif
 495
 496#ifdef CONFIG_TINY_SRCU
 497
 498static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
 499                                           struct srcu_struct *sp, int *flags,
 500                                           unsigned long *gp_seq)
 501{
 502        if (test_type != SRCU_FLAVOR)
 503                return;
 504        *flags = 0;
 505        *gp_seq = sp->srcu_idx;
 506}
 507
 508#elif defined(CONFIG_TREE_SRCU)
 509
 510void srcutorture_get_gp_data(enum rcutorture_type test_type,
 511                             struct srcu_struct *sp, int *flags,
 512                             unsigned long *gp_seq);
 513
 514#endif
 515
 516#ifdef CONFIG_TINY_RCU
 517static inline unsigned long rcu_get_gp_seq(void) { return 0; }
 518static inline unsigned long rcu_bh_get_gp_seq(void) { return 0; }
 519static inline unsigned long rcu_sched_get_gp_seq(void) { return 0; }
 520static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
 521static inline unsigned long rcu_exp_batches_completed_sched(void) { return 0; }
 522static inline unsigned long
 523srcu_batches_completed(struct srcu_struct *sp) { return 0; }
 524static inline void rcu_force_quiescent_state(void) { }
 525static inline void rcu_bh_force_quiescent_state(void) { }
 526static inline void rcu_sched_force_quiescent_state(void) { }
 527static inline void show_rcu_gp_kthreads(void) { }
 528static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
 529#else /* #ifdef CONFIG_TINY_RCU */
 530unsigned long rcu_get_gp_seq(void);
 531unsigned long rcu_bh_get_gp_seq(void);
 532unsigned long rcu_sched_get_gp_seq(void);
 533unsigned long rcu_exp_batches_completed(void);
 534unsigned long rcu_exp_batches_completed_sched(void);
 535unsigned long srcu_batches_completed(struct srcu_struct *sp);
 536void show_rcu_gp_kthreads(void);
 537int rcu_get_gp_kthreads_prio(void);
 538void rcu_force_quiescent_state(void);
 539void rcu_bh_force_quiescent_state(void);
 540void rcu_sched_force_quiescent_state(void);
 541extern struct workqueue_struct *rcu_gp_wq;
 542extern struct workqueue_struct *rcu_par_gp_wq;
 543#endif /* #else #ifdef CONFIG_TINY_RCU */
 544
 545#ifdef CONFIG_RCU_NOCB_CPU
 546bool rcu_is_nocb_cpu(int cpu);
 547#else
 548static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
 549#endif
 550
 551#endif /* __LINUX_RCU_H */
 552