linux/include/linux/spinlock_api_smp.h
<<
>>
Prefs
   1#ifndef __LINUX_SPINLOCK_API_SMP_H
   2#define __LINUX_SPINLOCK_API_SMP_H
   3
   4#ifndef __LINUX_SPINLOCK_H
   5# error "please don't include this file directly"
   6#endif
   7
   8/*
   9 * include/linux/spinlock_api_smp.h
  10 *
  11 * spinlock API declarations on SMP (and debug)
  12 * (implemented in kernel/spinlock.c)
  13 *
  14 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
  15 * Released under the General Public License (GPL).
  16 */
  17
  18int in_lock_functions(unsigned long addr);
  19
  20#define assert_raw_spin_locked(x)       BUG_ON(!raw_spin_is_locked(x))
  21
  22void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)            __acquires(lock);
  23void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
  24                                                                __acquires(lock);
  25void __lockfunc _raw_spin_lock_bh_nested(raw_spinlock_t *lock, int subclass)
  26                                                                __acquires(lock);
  27void __lockfunc
  28_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
  29                                                                __acquires(lock);
  30void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)         __acquires(lock);
  31void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
  32                                                                __acquires(lock);
  33
  34unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
  35                                                                __acquires(lock);
  36unsigned long __lockfunc
  37_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
  38                                                                __acquires(lock);
  39int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock);
  40int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
  41void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)          __releases(lock);
  42void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)       __releases(lock);
  43void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)      __releases(lock);
  44void __lockfunc
  45_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
  46                                                                __releases(lock);
  47
  48#ifdef CONFIG_INLINE_SPIN_LOCK
  49#define _raw_spin_lock(lock) __raw_spin_lock(lock)
  50#endif
  51
  52#ifdef CONFIG_INLINE_SPIN_LOCK_BH
  53#define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock)
  54#endif
  55
  56#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
  57#define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock)
  58#endif
  59
  60#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
  61#define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock)
  62#endif
  63
  64#ifdef CONFIG_INLINE_SPIN_TRYLOCK
  65#define _raw_spin_trylock(lock) __raw_spin_trylock(lock)
  66#endif
  67
  68#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
  69#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock)
  70#endif
  71
  72#ifndef CONFIG_UNINLINE_SPIN_UNLOCK
  73#define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
  74#endif
  75
  76#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
  77#define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock)
  78#endif
  79
  80#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
  81#define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock)
  82#endif
  83
  84#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
  85#define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags)
  86#endif
  87
  88static inline int __raw_spin_trylock(raw_spinlock_t *lock)
  89{
  90        preempt_disable();
  91        if (do_raw_spin_trylock(lock)) {
  92                spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  93                return 1;
  94        }
  95        preempt_enable();
  96        return 0;
  97}
  98
  99/*
 100 * If lockdep is enabled then we use the non-preemption spin-ops
 101 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
 102 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
 103 */
 104#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
 105
 106static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
 107{
 108        unsigned long flags;
 109
 110        local_irq_save(flags);
 111        preempt_disable();
 112        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 113        /*
 114         * On lockdep we dont want the hand-coded irq-enable of
 115         * do_raw_spin_lock_flags() code, because lockdep assumes
 116         * that interrupts are not re-enabled during lock-acquire:
 117         */
 118#ifdef CONFIG_LOCKDEP
 119        LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
 120#else
 121        do_raw_spin_lock_flags(lock, &flags);
 122#endif
 123        return flags;
 124}
 125
 126static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
 127{
 128        local_irq_disable();
 129        preempt_disable();
 130        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 131        LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
 132}
 133
 134static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
 135{
 136        __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
 137        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 138        LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
 139}
 140
 141static inline void __raw_spin_lock(raw_spinlock_t *lock)
 142{
 143        preempt_disable();
 144        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 145        LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
 146}
 147
 148#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
 149
 150static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 151{
 152        spin_release(&lock->dep_map, 1, _RET_IP_);
 153        do_raw_spin_unlock(lock);
 154        preempt_enable();
 155}
 156
 157static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
 158                                            unsigned long flags)
 159{
 160        spin_release(&lock->dep_map, 1, _RET_IP_);
 161        do_raw_spin_unlock(lock);
 162        local_irq_restore(flags);
 163        preempt_enable();
 164}
 165
 166static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
 167{
 168        spin_release(&lock->dep_map, 1, _RET_IP_);
 169        do_raw_spin_unlock(lock);
 170        local_irq_enable();
 171        preempt_enable();
 172}
 173
 174static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
 175{
 176        spin_release(&lock->dep_map, 1, _RET_IP_);
 177        do_raw_spin_unlock(lock);
 178        __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
 179}
 180
 181static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
 182{
 183        __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
 184        if (do_raw_spin_trylock(lock)) {
 185                spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
 186                return 1;
 187        }
 188        __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
 189        return 0;
 190}
 191
 192#include <linux/rwlock_api_smp.h>
 193
 194#endif /* __LINUX_SPINLOCK_API_SMP_H */
 195