linux/include/linux/spinlock_api_smp.h
<<
>>
Prefs
   1#ifndef __LINUX_SPINLOCK_API_SMP_H
   2#define __LINUX_SPINLOCK_API_SMP_H
   3
   4#ifndef __LINUX_SPINLOCK_H
   5# error "please don't include this file directly"
   6#endif
   7
   8/*
   9 * include/linux/spinlock_api_smp.h
  10 *
  11 * spinlock API declarations on SMP (and debug)
  12 * (implemented in kernel/spinlock.c)
  13 *
  14 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
  15 * Released under the General Public License (GPL).
  16 */
  17
  18int in_lock_functions(unsigned long addr);
  19
  20#define assert_raw_spin_locked(x)       BUG_ON(!raw_spin_is_locked(x))
  21
  22void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)            __acquires(lock);
  23void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
  24                                                                __acquires(lock);
  25void __lockfunc
  26_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
  27                                                                __acquires(lock);
  28void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)         __acquires(lock);
  29void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
  30                                                                __acquires(lock);
  31
  32unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
  33                                                                __acquires(lock);
  34unsigned long __lockfunc
  35_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
  36                                                                __acquires(lock);
  37int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock);
  38int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
  39void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)          __releases(lock);
  40void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)       __releases(lock);
  41void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)      __releases(lock);
  42void __lockfunc
  43_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
  44                                                                __releases(lock);
  45
  46#ifdef CONFIG_INLINE_SPIN_LOCK
  47#define _raw_spin_lock(lock) __raw_spin_lock(lock)
  48#endif
  49
  50#ifdef CONFIG_INLINE_SPIN_LOCK_BH
  51#define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock)
  52#endif
  53
  54#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
  55#define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock)
  56#endif
  57
  58#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
  59#define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock)
  60#endif
  61
  62#ifdef CONFIG_INLINE_SPIN_TRYLOCK
  63#define _raw_spin_trylock(lock) __raw_spin_trylock(lock)
  64#endif
  65
  66#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
  67#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock)
  68#endif
  69
  70#ifdef CONFIG_INLINE_SPIN_UNLOCK
  71#define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
  72#endif
  73
  74#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
  75#define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock)
  76#endif
  77
  78#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
  79#define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock)
  80#endif
  81
  82#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
  83#define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags)
  84#endif
  85
  86static inline int __raw_spin_trylock(raw_spinlock_t *lock)
  87{
  88        preempt_disable();
  89        if (do_raw_spin_trylock(lock)) {
  90                spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
  91                return 1;
  92        }
  93        preempt_enable();
  94        return 0;
  95}
  96
  97/*
  98 * If lockdep is enabled then we use the non-preemption spin-ops
  99 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
 100 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
 101 */
 102#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
 103
 104static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
 105{
 106        unsigned long flags;
 107
 108        local_irq_save(flags);
 109        preempt_disable();
 110        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 111        /*
 112         * On lockdep we dont want the hand-coded irq-enable of
 113         * do_raw_spin_lock_flags() code, because lockdep assumes
 114         * that interrupts are not re-enabled during lock-acquire:
 115         */
 116#ifdef CONFIG_LOCKDEP
 117        LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
 118#else
 119        do_raw_spin_lock_flags(lock, &flags);
 120#endif
 121        return flags;
 122}
 123
 124static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
 125{
 126        local_irq_disable();
 127        preempt_disable();
 128        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 129        LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
 130}
 131
 132static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
 133{
 134        local_bh_disable();
 135        preempt_disable();
 136        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 137        LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
 138}
 139
 140static inline void __raw_spin_lock(raw_spinlock_t *lock)
 141{
 142        preempt_disable();
 143        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 144        LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
 145}
 146
 147#endif /* CONFIG_PREEMPT */
 148
 149static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 150{
 151        spin_release(&lock->dep_map, 1, _RET_IP_);
 152        do_raw_spin_unlock(lock);
 153        preempt_enable();
 154}
 155
 156static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
 157                                            unsigned long flags)
 158{
 159        spin_release(&lock->dep_map, 1, _RET_IP_);
 160        do_raw_spin_unlock(lock);
 161        local_irq_restore(flags);
 162        preempt_enable();
 163}
 164
 165static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
 166{
 167        spin_release(&lock->dep_map, 1, _RET_IP_);
 168        do_raw_spin_unlock(lock);
 169        local_irq_enable();
 170        preempt_enable();
 171}
 172
 173static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
 174{
 175        spin_release(&lock->dep_map, 1, _RET_IP_);
 176        do_raw_spin_unlock(lock);
 177        preempt_enable_no_resched();
 178        local_bh_enable_ip((unsigned long)__builtin_return_address(0));
 179}
 180
 181static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
 182{
 183        local_bh_disable();
 184        preempt_disable();
 185        if (do_raw_spin_trylock(lock)) {
 186                spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
 187                return 1;
 188        }
 189        preempt_enable_no_resched();
 190        local_bh_enable_ip((unsigned long)__builtin_return_address(0));
 191        return 0;
 192}
 193
 194#include <linux/rwlock_api_smp.h>
 195
 196#endif /* __LINUX_SPINLOCK_API_SMP_H */
 197