1/* 2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 17 * 18 * Copyright IBM Corporation, 2008 19 * 20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> 21 * 22 * For detailed explanation of Read-Copy Update mechanism see - 23 * Documentation/RCU 24 */ 25#ifndef __LINUX_TINY_H 26#define __LINUX_TINY_H 27 28#include <linux/cache.h> 29 30static inline void rcu_init(void) 31{ 32} 33 34static inline void rcu_barrier_bh(void) 35{ 36 wait_rcu_gp(call_rcu_bh); 37} 38 39static inline void rcu_barrier_sched(void) 40{ 41 wait_rcu_gp(call_rcu_sched); 42} 43 44#ifdef CONFIG_TINY_RCU 45 46static inline void synchronize_rcu_expedited(void) 47{ 48 synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ 49} 50 51static inline void rcu_barrier(void) 52{ 53 rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ 54} 55 56#else /* #ifdef CONFIG_TINY_RCU */ 57 58void synchronize_rcu_expedited(void); 59 60static inline void rcu_barrier(void) 61{ 62 wait_rcu_gp(call_rcu); 63} 64 65#endif /* #else #ifdef CONFIG_TINY_RCU */ 66 67static inline void synchronize_rcu_bh(void) 68{ 69 synchronize_sched(); 70} 71 72static inline void synchronize_rcu_bh_expedited(void) 73{ 74 synchronize_sched(); 75} 76 77static inline void synchronize_sched_expedited(void) 78{ 79 synchronize_sched(); 80} 81 82static inline void kfree_call_rcu(struct rcu_head *head, 83 void (*func)(struct rcu_head *rcu)) 84{ 85 call_rcu(head, func); 86} 87 88#ifdef CONFIG_TINY_RCU 89 90static inline void rcu_preempt_note_context_switch(void) 91{ 92} 93 94static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) 95{ 96 *delta_jiffies = ULONG_MAX; 97 return 0; 98} 99 100#else /* #ifdef CONFIG_TINY_RCU */ 101 102void rcu_preempt_note_context_switch(void); 103int rcu_preempt_needs_cpu(void); 104 105static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) 106{ 107 *delta_jiffies = ULONG_MAX; 108 return rcu_preempt_needs_cpu(); 109} 110 111#endif /* #else #ifdef CONFIG_TINY_RCU */ 112 113static inline void rcu_note_context_switch(int cpu) 114{ 115 rcu_sched_qs(cpu); 116 rcu_preempt_note_context_switch(); 117} 118 119/* 120 * Take advantage of the fact that there is only one CPU, which 121 * allows us to ignore virtualization-based context switches. 122 */ 123static inline void rcu_virt_note_context_switch(int cpu) 124{ 125} 126 127/* 128 * Return the number of grace periods. 129 */ 130static inline long rcu_batches_completed(void) 131{ 132 return 0; 133} 134 135/* 136 * Return the number of bottom-half grace periods. 137 */ 138static inline long rcu_batches_completed_bh(void) 139{ 140 return 0; 141} 142 143static inline void rcu_force_quiescent_state(void) 144{ 145} 146 147static inline void rcu_bh_force_quiescent_state(void) 148{ 149} 150 151static inline void rcu_sched_force_quiescent_state(void) 152{ 153} 154 155static inline void rcu_cpu_stall_reset(void) 156{ 157} 158 159#ifdef CONFIG_DEBUG_LOCK_ALLOC 160extern int rcu_scheduler_active __read_mostly; 161extern void rcu_scheduler_starting(void); 162#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 163static inline void rcu_scheduler_starting(void) 164{ 165} 166#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 167 168#endif /* __LINUX_RCUTINY_H */ 169