linux/include/linux/spinlock_api_smp.h
<<
>>
Prefs
   1#ifndef __LINUX_SPINLOCK_API_SMP_H
   2#define __LINUX_SPINLOCK_API_SMP_H
   3
   4#ifndef __LINUX_SPINLOCK_H
   5# error "please don't include this file directly"
   6#endif
   7
   8/*
   9 * include/linux/spinlock_api_smp.h
  10 *
  11 * spinlock API declarations on SMP (and debug)
  12 * (implemented in kernel/spinlock.c)
  13 *
  14 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
  15 * Released under the General Public License (GPL).
  16 */
  17
  18int in_lock_functions(unsigned long addr);
  19
  20#define assert_spin_locked(x)   BUG_ON(!spin_is_locked(x))
  21
  22void __lockfunc _spin_lock(spinlock_t *lock)            __acquires(lock);
  23void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
  24                                                        __acquires(lock);
  25void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map)
  26                                                        __acquires(lock);
  27void __lockfunc _read_lock(rwlock_t *lock)              __acquires(lock);
  28void __lockfunc _write_lock(rwlock_t *lock)             __acquires(lock);
  29void __lockfunc _spin_lock_bh(spinlock_t *lock)         __acquires(lock);
  30void __lockfunc _read_lock_bh(rwlock_t *lock)           __acquires(lock);
  31void __lockfunc _write_lock_bh(rwlock_t *lock)          __acquires(lock);
  32void __lockfunc _spin_lock_irq(spinlock_t *lock)        __acquires(lock);
  33void __lockfunc _read_lock_irq(rwlock_t *lock)          __acquires(lock);
  34void __lockfunc _write_lock_irq(rwlock_t *lock)         __acquires(lock);
  35unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
  36                                                        __acquires(lock);
  37unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
  38                                                        __acquires(lock);
  39unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
  40                                                        __acquires(lock);
  41unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
  42                                                        __acquires(lock);
  43int __lockfunc _spin_trylock(spinlock_t *lock);
  44int __lockfunc _read_trylock(rwlock_t *lock);
  45int __lockfunc _write_trylock(rwlock_t *lock);
  46int __lockfunc _spin_trylock_bh(spinlock_t *lock);
  47void __lockfunc _spin_unlock(spinlock_t *lock)          __releases(lock);
  48void __lockfunc _read_unlock(rwlock_t *lock)            __releases(lock);
  49void __lockfunc _write_unlock(rwlock_t *lock)           __releases(lock);
  50void __lockfunc _spin_unlock_bh(spinlock_t *lock)       __releases(lock);
  51void __lockfunc _read_unlock_bh(rwlock_t *lock)         __releases(lock);
  52void __lockfunc _write_unlock_bh(rwlock_t *lock)        __releases(lock);
  53void __lockfunc _spin_unlock_irq(spinlock_t *lock)      __releases(lock);
  54void __lockfunc _read_unlock_irq(rwlock_t *lock)        __releases(lock);
  55void __lockfunc _write_unlock_irq(rwlock_t *lock)       __releases(lock);
  56void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
  57                                                        __releases(lock);
  58void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
  59                                                        __releases(lock);
  60void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
  61                                                        __releases(lock);
  62
  63/*
  64 * We inline the unlock functions in the nondebug case:
  65 */
  66#if !defined(CONFIG_DEBUG_SPINLOCK) && !defined(CONFIG_PREEMPT)
  67#define __always_inline__spin_unlock
  68#define __always_inline__read_unlock
  69#define __always_inline__write_unlock
  70#define __always_inline__spin_unlock_irq
  71#define __always_inline__read_unlock_irq
  72#define __always_inline__write_unlock_irq
  73#endif
  74
  75#ifndef CONFIG_DEBUG_SPINLOCK
  76#ifndef CONFIG_GENERIC_LOCKBREAK
  77
  78#ifdef __always_inline__spin_lock
  79#define _spin_lock(lock) __spin_lock(lock)
  80#endif
  81
  82#ifdef __always_inline__read_lock
  83#define _read_lock(lock) __read_lock(lock)
  84#endif
  85
  86#ifdef __always_inline__write_lock
  87#define _write_lock(lock) __write_lock(lock)
  88#endif
  89
  90#ifdef __always_inline__spin_lock_bh
  91#define _spin_lock_bh(lock) __spin_lock_bh(lock)
  92#endif
  93
  94#ifdef __always_inline__read_lock_bh
  95#define _read_lock_bh(lock) __read_lock_bh(lock)
  96#endif
  97
  98#ifdef __always_inline__write_lock_bh
  99#define _write_lock_bh(lock) __write_lock_bh(lock)
 100#endif
 101
 102#ifdef __always_inline__spin_lock_irq
 103#define _spin_lock_irq(lock) __spin_lock_irq(lock)
 104#endif
 105
 106#ifdef __always_inline__read_lock_irq
 107#define _read_lock_irq(lock) __read_lock_irq(lock)
 108#endif
 109
 110#ifdef __always_inline__write_lock_irq
 111#define _write_lock_irq(lock) __write_lock_irq(lock)
 112#endif
 113
 114#ifdef __always_inline__spin_lock_irqsave
 115#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock)
 116#endif
 117
 118#ifdef __always_inline__read_lock_irqsave
 119#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
 120#endif
 121
 122#ifdef __always_inline__write_lock_irqsave
 123#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
 124#endif
 125
 126#endif /* !CONFIG_GENERIC_LOCKBREAK */
 127
 128#ifdef __always_inline__spin_trylock
 129#define _spin_trylock(lock) __spin_trylock(lock)
 130#endif
 131
 132#ifdef __always_inline__read_trylock
 133#define _read_trylock(lock) __read_trylock(lock)
 134#endif
 135
 136#ifdef __always_inline__write_trylock
 137#define _write_trylock(lock) __write_trylock(lock)
 138#endif
 139
 140#ifdef __always_inline__spin_trylock_bh
 141#define _spin_trylock_bh(lock) __spin_trylock_bh(lock)
 142#endif
 143
 144#ifdef __always_inline__spin_unlock
 145#define _spin_unlock(lock) __spin_unlock(lock)
 146#endif
 147
 148#ifdef __always_inline__read_unlock
 149#define _read_unlock(lock) __read_unlock(lock)
 150#endif
 151
 152#ifdef __always_inline__write_unlock
 153#define _write_unlock(lock) __write_unlock(lock)
 154#endif
 155
 156#ifdef __always_inline__spin_unlock_bh
 157#define _spin_unlock_bh(lock) __spin_unlock_bh(lock)
 158#endif
 159
 160#ifdef __always_inline__read_unlock_bh
 161#define _read_unlock_bh(lock) __read_unlock_bh(lock)
 162#endif
 163
 164#ifdef __always_inline__write_unlock_bh
 165#define _write_unlock_bh(lock) __write_unlock_bh(lock)
 166#endif
 167
 168#ifdef __always_inline__spin_unlock_irq
 169#define _spin_unlock_irq(lock) __spin_unlock_irq(lock)
 170#endif
 171
 172#ifdef __always_inline__read_unlock_irq
 173#define _read_unlock_irq(lock) __read_unlock_irq(lock)
 174#endif
 175
 176#ifdef __always_inline__write_unlock_irq
 177#define _write_unlock_irq(lock) __write_unlock_irq(lock)
 178#endif
 179
 180#ifdef __always_inline__spin_unlock_irqrestore
 181#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags)
 182#endif
 183
 184#ifdef __always_inline__read_unlock_irqrestore
 185#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
 186#endif
 187
 188#ifdef __always_inline__write_unlock_irqrestore
 189#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
 190#endif
 191
 192#endif /* CONFIG_DEBUG_SPINLOCK */
 193
 194static inline int __spin_trylock(spinlock_t *lock)
 195{
 196        preempt_disable();
 197        if (_raw_spin_trylock(lock)) {
 198                spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
 199                return 1;
 200        }
 201        preempt_enable();
 202        return 0;
 203}
 204
 205static inline int __read_trylock(rwlock_t *lock)
 206{
 207        preempt_disable();
 208        if (_raw_read_trylock(lock)) {
 209                rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
 210                return 1;
 211        }
 212        preempt_enable();
 213        return 0;
 214}
 215
 216static inline int __write_trylock(rwlock_t *lock)
 217{
 218        preempt_disable();
 219        if (_raw_write_trylock(lock)) {
 220                rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
 221                return 1;
 222        }
 223        preempt_enable();
 224        return 0;
 225}
 226
 227/*
 228 * If lockdep is enabled then we use the non-preemption spin-ops
 229 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
 230 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
 231 */
 232#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
 233
 234static inline void __read_lock(rwlock_t *lock)
 235{
 236        preempt_disable();
 237        rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
 238        LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
 239}
 240
 241static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
 242{
 243        unsigned long flags;
 244
 245        local_irq_save(flags);
 246        preempt_disable();
 247        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 248        /*
 249         * On lockdep we dont want the hand-coded irq-enable of
 250         * _raw_spin_lock_flags() code, because lockdep assumes
 251         * that interrupts are not re-enabled during lock-acquire:
 252         */
 253#ifdef CONFIG_LOCKDEP
 254        LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 255#else
 256        _raw_spin_lock_flags(lock, &flags);
 257#endif
 258        return flags;
 259}
 260
 261static inline void __spin_lock_irq(spinlock_t *lock)
 262{
 263        local_irq_disable();
 264        preempt_disable();
 265        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 266        LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 267}
 268
 269static inline void __spin_lock_bh(spinlock_t *lock)
 270{
 271        local_bh_disable();
 272        preempt_disable();
 273        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 274        LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 275}
 276
 277static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
 278{
 279        unsigned long flags;
 280
 281        local_irq_save(flags);
 282        preempt_disable();
 283        rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
 284        LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
 285                             _raw_read_lock_flags, &flags);
 286        return flags;
 287}
 288
 289static inline void __read_lock_irq(rwlock_t *lock)
 290{
 291        local_irq_disable();
 292        preempt_disable();
 293        rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
 294        LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
 295}
 296
 297static inline void __read_lock_bh(rwlock_t *lock)
 298{
 299        local_bh_disable();
 300        preempt_disable();
 301        rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
 302        LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
 303}
 304
 305static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
 306{
 307        unsigned long flags;
 308
 309        local_irq_save(flags);
 310        preempt_disable();
 311        rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 312        LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
 313                             _raw_write_lock_flags, &flags);
 314        return flags;
 315}
 316
 317static inline void __write_lock_irq(rwlock_t *lock)
 318{
 319        local_irq_disable();
 320        preempt_disable();
 321        rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 322        LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
 323}
 324
 325static inline void __write_lock_bh(rwlock_t *lock)
 326{
 327        local_bh_disable();
 328        preempt_disable();
 329        rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 330        LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
 331}
 332
 333static inline void __spin_lock(spinlock_t *lock)
 334{
 335        preempt_disable();
 336        spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 337        LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
 338}
 339
 340static inline void __write_lock(rwlock_t *lock)
 341{
 342        preempt_disable();
 343        rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
 344        LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
 345}
 346
 347#endif /* CONFIG_PREEMPT */
 348
 349static inline void __spin_unlock(spinlock_t *lock)
 350{
 351        spin_release(&lock->dep_map, 1, _RET_IP_);
 352        _raw_spin_unlock(lock);
 353        preempt_enable();
 354}
 355
 356static inline void __write_unlock(rwlock_t *lock)
 357{
 358        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 359        _raw_write_unlock(lock);
 360        preempt_enable();
 361}
 362
 363static inline void __read_unlock(rwlock_t *lock)
 364{
 365        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 366        _raw_read_unlock(lock);
 367        preempt_enable();
 368}
 369
 370static inline void __spin_unlock_irqrestore(spinlock_t *lock,
 371                                            unsigned long flags)
 372{
 373        spin_release(&lock->dep_map, 1, _RET_IP_);
 374        _raw_spin_unlock(lock);
 375        local_irq_restore(flags);
 376        preempt_enable();
 377}
 378
 379static inline void __spin_unlock_irq(spinlock_t *lock)
 380{
 381        spin_release(&lock->dep_map, 1, _RET_IP_);
 382        _raw_spin_unlock(lock);
 383        local_irq_enable();
 384        preempt_enable();
 385}
 386
 387static inline void __spin_unlock_bh(spinlock_t *lock)
 388{
 389        spin_release(&lock->dep_map, 1, _RET_IP_);
 390        _raw_spin_unlock(lock);
 391        preempt_enable_no_resched();
 392        local_bh_enable_ip((unsigned long)__builtin_return_address(0));
 393}
 394
 395static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
 396{
 397        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 398        _raw_read_unlock(lock);
 399        local_irq_restore(flags);
 400        preempt_enable();
 401}
 402
 403static inline void __read_unlock_irq(rwlock_t *lock)
 404{
 405        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 406        _raw_read_unlock(lock);
 407        local_irq_enable();
 408        preempt_enable();
 409}
 410
 411static inline void __read_unlock_bh(rwlock_t *lock)
 412{
 413        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 414        _raw_read_unlock(lock);
 415        preempt_enable_no_resched();
 416        local_bh_enable_ip((unsigned long)__builtin_return_address(0));
 417}
 418
 419static inline void __write_unlock_irqrestore(rwlock_t *lock,
 420                                             unsigned long flags)
 421{
 422        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 423        _raw_write_unlock(lock);
 424        local_irq_restore(flags);
 425        preempt_enable();
 426}
 427
 428static inline void __write_unlock_irq(rwlock_t *lock)
 429{
 430        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 431        _raw_write_unlock(lock);
 432        local_irq_enable();
 433        preempt_enable();
 434}
 435
 436static inline void __write_unlock_bh(rwlock_t *lock)
 437{
 438        rwlock_release(&lock->dep_map, 1, _RET_IP_);
 439        _raw_write_unlock(lock);
 440        preempt_enable_no_resched();
 441        local_bh_enable_ip((unsigned long)__builtin_return_address(0));
 442}
 443
 444static inline int __spin_trylock_bh(spinlock_t *lock)
 445{
 446        local_bh_disable();
 447        preempt_disable();
 448        if (_raw_spin_trylock(lock)) {
 449                spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
 450                return 1;
 451        }
 452        preempt_enable_no_resched();
 453        local_bh_enable_ip((unsigned long)__builtin_return_address(0));
 454        return 0;
 455}
 456
 457#endif /* __LINUX_SPINLOCK_API_SMP_H */
 458