linux/include/linux/rcutiny.h
<<
>>
Prefs
   1/*
   2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17 *
  18 * Copyright IBM Corporation, 2008
  19 *
  20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
  21 *
  22 * For detailed explanation of Read-Copy Update mechanism see -
  23 *              Documentation/RCU
  24 */
  25#ifndef __LINUX_TINY_H
  26#define __LINUX_TINY_H
  27
  28#include <linux/cache.h>
  29
  30static inline void rcu_barrier_bh(void)
  31{
  32        wait_rcu_gp(call_rcu_bh);
  33}
  34
  35static inline void rcu_barrier_sched(void)
  36{
  37        wait_rcu_gp(call_rcu_sched);
  38}
  39
  40static inline void synchronize_rcu_expedited(void)
  41{
  42        synchronize_sched();    /* Only one CPU, so pretty fast anyway!!! */
  43}
  44
  45static inline void rcu_barrier(void)
  46{
  47        rcu_barrier_sched();  /* Only one CPU, so only one list of callbacks! */
  48}
  49
  50static inline void synchronize_rcu_bh(void)
  51{
  52        synchronize_sched();
  53}
  54
  55static inline void synchronize_rcu_bh_expedited(void)
  56{
  57        synchronize_sched();
  58}
  59
  60static inline void synchronize_sched_expedited(void)
  61{
  62        synchronize_sched();
  63}
  64
  65static inline void kfree_call_rcu(struct rcu_head *head,
  66                                  void (*func)(struct rcu_head *rcu))
  67{
  68        call_rcu(head, func);
  69}
  70
  71static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
  72{
  73        *delta_jiffies = ULONG_MAX;
  74        return 0;
  75}
  76
  77static inline void rcu_note_context_switch(int cpu)
  78{
  79        rcu_sched_qs(cpu);
  80}
  81
  82/*
  83 * Take advantage of the fact that there is only one CPU, which
  84 * allows us to ignore virtualization-based context switches.
  85 */
  86static inline void rcu_virt_note_context_switch(int cpu)
  87{
  88}
  89
  90/*
  91 * Return the number of grace periods.
  92 */
  93static inline long rcu_batches_completed(void)
  94{
  95        return 0;
  96}
  97
  98/*
  99 * Return the number of bottom-half grace periods.
 100 */
 101static inline long rcu_batches_completed_bh(void)
 102{
 103        return 0;
 104}
 105
 106static inline void rcu_force_quiescent_state(void)
 107{
 108}
 109
 110static inline void rcu_bh_force_quiescent_state(void)
 111{
 112}
 113
 114static inline void rcu_sched_force_quiescent_state(void)
 115{
 116}
 117
 118static inline void rcu_cpu_stall_reset(void)
 119{
 120}
 121
 122static inline void exit_rcu(void)
 123{
 124}
 125
 126#ifdef CONFIG_DEBUG_LOCK_ALLOC
 127extern int rcu_scheduler_active __read_mostly;
 128extern void rcu_scheduler_starting(void);
 129#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 130static inline void rcu_scheduler_starting(void)
 131{
 132}
 133#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 134
 135#endif /* __LINUX_RCUTINY_H */
 136