linux/include/linux/spinlock.h
<<
>>
Prefs
   1#ifndef __LINUX_SPINLOCK_H
   2#define __LINUX_SPINLOCK_H
   3
   4/*
   5 * include/linux/spinlock.h - generic spinlock/rwlock declarations
   6 *
   7 * here's the role of the various spinlock/rwlock related include files:
   8 *
   9 * on SMP builds:
  10 *
  11 *  asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
  12 *                        initializers
  13 *
  14 *  linux/spinlock_types.h:
  15 *                        defines the generic type and initializers
  16 *
  17 *  asm/spinlock.h:       contains the __raw_spin_*()/etc. lowlevel
  18 *                        implementations, mostly inline assembly code
  19 *
  20 *   (also included on UP-debug builds:)
  21 *
  22 *  linux/spinlock_api_smp.h:
  23 *                        contains the prototypes for the _spin_*() APIs.
  24 *
  25 *  linux/spinlock.h:     builds the final spin_*() APIs.
  26 *
  27 * on UP builds:
  28 *
  29 *  linux/spinlock_type_up.h:
  30 *                        contains the generic, simplified UP spinlock type.
  31 *                        (which is an empty structure on non-debug builds)
  32 *
  33 *  linux/spinlock_types.h:
  34 *                        defines the generic type and initializers
  35 *
  36 *  linux/spinlock_up.h:
  37 *                        contains the __raw_spin_*()/etc. version of UP
  38 *                        builds. (which are NOPs on non-debug, non-preempt
  39 *                        builds)
  40 *
  41 *   (included on UP-non-debug builds:)
  42 *
  43 *  linux/spinlock_api_up.h:
  44 *                        builds the _spin_*() APIs.
  45 *
  46 *  linux/spinlock.h:     builds the final spin_*() APIs.
  47 */
  48
  49#include <linux/preempt.h>
  50#include <linux/linkage.h>
  51#include <linux/compiler.h>
  52#include <linux/thread_info.h>
  53#include <linux/kernel.h>
  54#include <linux/stringify.h>
  55#include <linux/bottom_half.h>
  56
  57#include <asm/system.h>
  58
  59/*
  60 * Must define these before including other files, inline functions need them
  61 */
  62#define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME
  63
  64#define LOCK_SECTION_START(extra)               \
  65        ".subsection 1\n\t"                     \
  66        extra                                   \
  67        ".ifndef " LOCK_SECTION_NAME "\n\t"     \
  68        LOCK_SECTION_NAME ":\n\t"               \
  69        ".endif\n"
  70
  71#define LOCK_SECTION_END                        \
  72        ".previous\n\t"
  73
  74#define __lockfunc fastcall __attribute__((section(".spinlock.text")))
  75
  76/*
  77 * Pull the raw_spinlock_t and raw_rwlock_t definitions:
  78 */
  79#include <linux/spinlock_types.h>
  80
  81extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
  82
  83/*
  84 * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them):
  85 */
  86#ifdef CONFIG_SMP
  87# include <asm/spinlock.h>
  88#else
  89# include <linux/spinlock_up.h>
  90#endif
  91
  92#ifdef CONFIG_DEBUG_SPINLOCK
  93  extern void __spin_lock_init(spinlock_t *lock, const char *name,
  94                               struct lock_class_key *key);
  95# define spin_lock_init(lock)                                   \
  96do {                                                            \
  97        static struct lock_class_key __key;                     \
  98                                                                \
  99        __spin_lock_init((lock), #lock, &__key);                \
 100} while (0)
 101
 102#else
 103# define spin_lock_init(lock)                                   \
 104        do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0)
 105#endif
 106
 107#ifdef CONFIG_DEBUG_SPINLOCK
 108  extern void __rwlock_init(rwlock_t *lock, const char *name,
 109                            struct lock_class_key *key);
 110# define rwlock_init(lock)                                      \
 111do {                                                            \
 112        static struct lock_class_key __key;                     \
 113                                                                \
 114        __rwlock_init((lock), #lock, &__key);                   \
 115} while (0)
 116#else
 117# define rwlock_init(lock)                                      \
 118        do { *(lock) = RW_LOCK_UNLOCKED; } while (0)
 119#endif
 120
 121#define spin_is_locked(lock)    __raw_spin_is_locked(&(lock)->raw_lock)
 122
 123/**
 124 * spin_unlock_wait - wait until the spinlock gets unlocked
 125 * @lock: the spinlock in question.
 126 */
 127#define spin_unlock_wait(lock)  __raw_spin_unlock_wait(&(lock)->raw_lock)
 128
 129/*
 130 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
 131 */
 132#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
 133# include <linux/spinlock_api_smp.h>
 134#else
 135# include <linux/spinlock_api_up.h>
 136#endif
 137
 138#ifdef CONFIG_DEBUG_SPINLOCK
 139 extern void _raw_spin_lock(spinlock_t *lock);
 140#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
 141 extern int _raw_spin_trylock(spinlock_t *lock);
 142 extern void _raw_spin_unlock(spinlock_t *lock);
 143 extern void _raw_read_lock(rwlock_t *lock);
 144 extern int _raw_read_trylock(rwlock_t *lock);
 145 extern void _raw_read_unlock(rwlock_t *lock);
 146 extern void _raw_write_lock(rwlock_t *lock);
 147 extern int _raw_write_trylock(rwlock_t *lock);
 148 extern void _raw_write_unlock(rwlock_t *lock);
 149#else
 150# define _raw_spin_lock(lock)           __raw_spin_lock(&(lock)->raw_lock)
 151# define _raw_spin_lock_flags(lock, flags) \
 152                __raw_spin_lock_flags(&(lock)->raw_lock, *(flags))
 153# define _raw_spin_trylock(lock)        __raw_spin_trylock(&(lock)->raw_lock)
 154# define _raw_spin_unlock(lock)         __raw_spin_unlock(&(lock)->raw_lock)
 155# define _raw_read_lock(rwlock)         __raw_read_lock(&(rwlock)->raw_lock)
 156# define _raw_read_trylock(rwlock)      __raw_read_trylock(&(rwlock)->raw_lock)
 157# define _raw_read_unlock(rwlock)       __raw_read_unlock(&(rwlock)->raw_lock)
 158# define _raw_write_lock(rwlock)        __raw_write_lock(&(rwlock)->raw_lock)
 159# define _raw_write_trylock(rwlock)     __raw_write_trylock(&(rwlock)->raw_lock)
 160# define _raw_write_unlock(rwlock)      __raw_write_unlock(&(rwlock)->raw_lock)
 161#endif
 162
 163#define read_can_lock(rwlock)           __raw_read_can_lock(&(rwlock)->raw_lock)
 164#define write_can_lock(rwlock)          __raw_write_can_lock(&(rwlock)->raw_lock)
 165
 166/*
 167 * Define the various spin_lock and rw_lock methods.  Note we define these
 168 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
 169 * methods are defined as nops in the case they are not required.
 170 */
 171#define spin_trylock(lock)              __cond_lock(lock, _spin_trylock(lock))
 172#define read_trylock(lock)              __cond_lock(lock, _read_trylock(lock))
 173#define write_trylock(lock)             __cond_lock(lock, _write_trylock(lock))
 174
 175#define spin_lock(lock)                 _spin_lock(lock)
 176
 177#ifdef CONFIG_DEBUG_LOCK_ALLOC
 178# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
 179#else
 180# define spin_lock_nested(lock, subclass) _spin_lock(lock)
 181#endif
 182
 183#define write_lock(lock)                _write_lock(lock)
 184#define read_lock(lock)                 _read_lock(lock)
 185
 186#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
 187
 188#define spin_lock_irqsave(lock, flags)  flags = _spin_lock_irqsave(lock)
 189#define read_lock_irqsave(lock, flags)  flags = _read_lock_irqsave(lock)
 190#define write_lock_irqsave(lock, flags) flags = _write_lock_irqsave(lock)
 191
 192#ifdef CONFIG_DEBUG_LOCK_ALLOC
 193#define spin_lock_irqsave_nested(lock, flags, subclass) \
 194        flags = _spin_lock_irqsave_nested(lock, subclass)
 195#else
 196#define spin_lock_irqsave_nested(lock, flags, subclass) \
 197        flags = _spin_lock_irqsave(lock)
 198#endif
 199
 200#else
 201
 202#define spin_lock_irqsave(lock, flags)  _spin_lock_irqsave(lock, flags)
 203#define read_lock_irqsave(lock, flags)  _read_lock_irqsave(lock, flags)
 204#define write_lock_irqsave(lock, flags) _write_lock_irqsave(lock, flags)
 205#define spin_lock_irqsave_nested(lock, flags, subclass) \
 206        spin_lock_irqsave(lock, flags)
 207
 208#endif
 209
 210#define spin_lock_irq(lock)             _spin_lock_irq(lock)
 211#define spin_lock_bh(lock)              _spin_lock_bh(lock)
 212
 213#define read_lock_irq(lock)             _read_lock_irq(lock)
 214#define read_lock_bh(lock)              _read_lock_bh(lock)
 215
 216#define write_lock_irq(lock)            _write_lock_irq(lock)
 217#define write_lock_bh(lock)             _write_lock_bh(lock)
 218
 219/*
 220 * We inline the unlock functions in the nondebug case:
 221 */
 222#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \
 223        !defined(CONFIG_SMP)
 224# define spin_unlock(lock)              _spin_unlock(lock)
 225# define read_unlock(lock)              _read_unlock(lock)
 226# define write_unlock(lock)             _write_unlock(lock)
 227# define spin_unlock_irq(lock)          _spin_unlock_irq(lock)
 228# define read_unlock_irq(lock)          _read_unlock_irq(lock)
 229# define write_unlock_irq(lock)         _write_unlock_irq(lock)
 230#else
 231# define spin_unlock(lock) \
 232    do {__raw_spin_unlock(&(lock)->raw_lock); __release(lock); } while (0)
 233# define read_unlock(lock) \
 234    do {__raw_read_unlock(&(lock)->raw_lock); __release(lock); } while (0)
 235# define write_unlock(lock) \
 236    do {__raw_write_unlock(&(lock)->raw_lock); __release(lock); } while (0)
 237# define spin_unlock_irq(lock)                  \
 238do {                                            \
 239        __raw_spin_unlock(&(lock)->raw_lock);   \
 240        __release(lock);                        \
 241        local_irq_enable();                     \
 242} while (0)
 243# define read_unlock_irq(lock)                  \
 244do {                                            \
 245        __raw_read_unlock(&(lock)->raw_lock);   \
 246        __release(lock);                        \
 247        local_irq_enable();                     \
 248} while (0)
 249# define write_unlock_irq(lock)                 \
 250do {                                            \
 251        __raw_write_unlock(&(lock)->raw_lock);  \
 252        __release(lock);                        \
 253        local_irq_enable();                     \
 254} while (0)
 255#endif
 256
 257#define spin_unlock_irqrestore(lock, flags) \
 258                                        _spin_unlock_irqrestore(lock, flags)
 259#define spin_unlock_bh(lock)            _spin_unlock_bh(lock)
 260
 261#define read_unlock_irqrestore(lock, flags) \
 262                                        _read_unlock_irqrestore(lock, flags)
 263#define read_unlock_bh(lock)            _read_unlock_bh(lock)
 264
 265#define write_unlock_irqrestore(lock, flags) \
 266                                        _write_unlock_irqrestore(lock, flags)
 267#define write_unlock_bh(lock)           _write_unlock_bh(lock)
 268
 269#define spin_trylock_bh(lock)   __cond_lock(lock, _spin_trylock_bh(lock))
 270
 271#define spin_trylock_irq(lock) \
 272({ \
 273        local_irq_disable(); \
 274        spin_trylock(lock) ? \
 275        1 : ({ local_irq_enable(); 0;  }); \
 276})
 277
 278#define spin_trylock_irqsave(lock, flags) \
 279({ \
 280        local_irq_save(flags); \
 281        spin_trylock(lock) ? \
 282        1 : ({ local_irq_restore(flags); 0; }); \
 283})
 284
 285#define write_trylock_irqsave(lock, flags) \
 286({ \
 287        local_irq_save(flags); \
 288        write_trylock(lock) ? \
 289        1 : ({ local_irq_restore(flags); 0; }); \
 290})
 291
 292/*
 293 * Locks two spinlocks l1 and l2.
 294 * l1_first indicates if spinlock l1 should be taken first.
 295 */
 296static inline void double_spin_lock(spinlock_t *l1, spinlock_t *l2,
 297                                    bool l1_first)
 298        __acquires(l1)
 299        __acquires(l2)
 300{
 301        if (l1_first) {
 302                spin_lock(l1);
 303                spin_lock(l2);
 304        } else {
 305                spin_lock(l2);
 306                spin_lock(l1);
 307        }
 308}
 309
 310/*
 311 * Unlocks two spinlocks l1 and l2.
 312 * l1_taken_first indicates if spinlock l1 was taken first and therefore
 313 * should be released after spinlock l2.
 314 */
 315static inline void double_spin_unlock(spinlock_t *l1, spinlock_t *l2,
 316                                      bool l1_taken_first)
 317        __releases(l1)
 318        __releases(l2)
 319{
 320        if (l1_taken_first) {
 321                spin_unlock(l2);
 322                spin_unlock(l1);
 323        } else {
 324                spin_unlock(l1);
 325                spin_unlock(l2);
 326        }
 327}
 328
 329/*
 330 * Pull the atomic_t declaration:
 331 * (asm-mips/atomic.h needs above definitions)
 332 */
 333#include <asm/atomic.h>
 334/**
 335 * atomic_dec_and_lock - lock on reaching reference count zero
 336 * @atomic: the atomic counter
 337 * @lock: the spinlock in question
 338 */
 339extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
 340#define atomic_dec_and_lock(atomic, lock) \
 341                __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
 342
 343/**
 344 * spin_can_lock - would spin_trylock() succeed?
 345 * @lock: the spinlock in question.
 346 */
 347#define spin_can_lock(lock)     (!spin_is_locked(lock))
 348
 349#endif /* __LINUX_SPINLOCK_H */
 350