linux/kernel/locking/spinlock.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (2004) Linus Torvalds
   4 *
   5 * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
   6 *
   7 * Copyright (2004, 2005) Ingo Molnar
   8 *
   9 * This file contains the spinlock/rwlock implementations for the
  10 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
  11 *
  12 * Note that some architectures have special knowledge about the
  13 * stack frames of these functions in their profile_pc. If you
  14 * change anything significant here that could change the stack
  15 * frame contact the architecture maintainers.
  16 */
  17
  18#include <linux/linkage.h>
  19#include <linux/preempt.h>
  20#include <linux/spinlock.h>
  21#include <linux/interrupt.h>
  22#include <linux/debug_locks.h>
  23#include <linux/export.h>
  24
  25/*
  26 * If lockdep is enabled then we use the non-preemption spin-ops
  27 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
  28 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
  29 */
  30#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
  31/*
  32 * The __lock_function inlines are taken from
  33 * spinlock : include/linux/spinlock_api_smp.h
  34 * rwlock   : include/linux/rwlock_api_smp.h
  35 */
  36#else
  37
  38/*
  39 * Some architectures can relax in favour of the CPU owning the lock.
  40 */
  41#ifndef arch_read_relax
  42# define arch_read_relax(l)     cpu_relax()
  43#endif
  44#ifndef arch_write_relax
  45# define arch_write_relax(l)    cpu_relax()
  46#endif
  47#ifndef arch_spin_relax
  48# define arch_spin_relax(l)     cpu_relax()
  49#endif
  50
  51/*
  52 * We build the __lock_function inlines here. They are too large for
  53 * inlining all over the place, but here is only one user per function
  54 * which embedds them into the calling _lock_function below.
  55 *
  56 * This could be a long-held lock. We both prepare to spin for a long
  57 * time (making _this_ CPU preemptable if possible), and we also signal
  58 * towards that other CPU that it should break the lock ASAP.
  59 */
  60#define BUILD_LOCK_OPS(op, locktype)                                    \
  61void __lockfunc __raw_##op##_lock(locktype##_t *lock)                   \
  62{                                                                       \
  63        for (;;) {                                                      \
  64                preempt_disable();                                      \
  65                if (likely(do_raw_##op##_trylock(lock)))                \
  66                        break;                                          \
  67                preempt_enable();                                       \
  68                                                                        \
  69                arch_##op##_relax(&lock->raw_lock);                     \
  70        }                                                               \
  71}                                                                       \
  72                                                                        \
  73unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock)  \
  74{                                                                       \
  75        unsigned long flags;                                            \
  76                                                                        \
  77        for (;;) {                                                      \
  78                preempt_disable();                                      \
  79                local_irq_save(flags);                                  \
  80                if (likely(do_raw_##op##_trylock(lock)))                \
  81                        break;                                          \
  82                local_irq_restore(flags);                               \
  83                preempt_enable();                                       \
  84                                                                        \
  85                arch_##op##_relax(&lock->raw_lock);                     \
  86        }                                                               \
  87                                                                        \
  88        return flags;                                                   \
  89}                                                                       \
  90                                                                        \
  91void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock)               \
  92{                                                                       \
  93        _raw_##op##_lock_irqsave(lock);                                 \
  94}                                                                       \
  95                                                                        \
  96void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock)                \
  97{                                                                       \
  98        unsigned long flags;                                            \
  99                                                                        \
 100        /*                                                      */      \
 101        /* Careful: we must exclude softirqs too, hence the     */      \
 102        /* irq-disabling. We use the generic preemption-aware   */      \
 103        /* function:                                            */      \
 104        /**/                                                            \
 105        flags = _raw_##op##_lock_irqsave(lock);                         \
 106        local_bh_disable();                                             \
 107        local_irq_restore(flags);                                       \
 108}                                                                       \
 109
 110/*
 111 * Build preemption-friendly versions of the following
 112 * lock-spinning functions:
 113 *
 114 *         __[spin|read|write]_lock()
 115 *         __[spin|read|write]_lock_irq()
 116 *         __[spin|read|write]_lock_irqsave()
 117 *         __[spin|read|write]_lock_bh()
 118 */
 119BUILD_LOCK_OPS(spin, raw_spinlock);
 120BUILD_LOCK_OPS(read, rwlock);
 121BUILD_LOCK_OPS(write, rwlock);
 122
 123#endif
 124
 125#ifndef CONFIG_INLINE_SPIN_TRYLOCK
 126int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock)
 127{
 128        return __raw_spin_trylock(lock);
 129}
 130EXPORT_SYMBOL(_raw_spin_trylock);
 131#endif
 132
 133#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
 134int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock)
 135{
 136        return __raw_spin_trylock_bh(lock);
 137}
 138EXPORT_SYMBOL(_raw_spin_trylock_bh);
 139#endif
 140
 141#ifndef CONFIG_INLINE_SPIN_LOCK
 142void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
 143{
 144        __raw_spin_lock(lock);
 145}
 146EXPORT_SYMBOL(_raw_spin_lock);
 147#endif
 148
 149#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
 150unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
 151{
 152        return __raw_spin_lock_irqsave(lock);
 153}
 154EXPORT_SYMBOL(_raw_spin_lock_irqsave);
 155#endif
 156
 157#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
 158void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
 159{
 160        __raw_spin_lock_irq(lock);
 161}
 162EXPORT_SYMBOL(_raw_spin_lock_irq);
 163#endif
 164
 165#ifndef CONFIG_INLINE_SPIN_LOCK_BH
 166void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
 167{
 168        __raw_spin_lock_bh(lock);
 169}
 170EXPORT_SYMBOL(_raw_spin_lock_bh);
 171#endif
 172
 173#ifdef CONFIG_UNINLINE_SPIN_UNLOCK
 174void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
 175{
 176        __raw_spin_unlock(lock);
 177}
 178EXPORT_SYMBOL(_raw_spin_unlock);
 179#endif
 180
 181#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
 182void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
 183{
 184        __raw_spin_unlock_irqrestore(lock, flags);
 185}
 186EXPORT_SYMBOL(_raw_spin_unlock_irqrestore);
 187#endif
 188
 189#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
 190void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)
 191{
 192        __raw_spin_unlock_irq(lock);
 193}
 194EXPORT_SYMBOL(_raw_spin_unlock_irq);
 195#endif
 196
 197#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
 198void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
 199{
 200        __raw_spin_unlock_bh(lock);
 201}
 202EXPORT_SYMBOL(_raw_spin_unlock_bh);
 203#endif
 204
 205#ifndef CONFIG_INLINE_READ_TRYLOCK
 206int __lockfunc _raw_read_trylock(rwlock_t *lock)
 207{
 208        return __raw_read_trylock(lock);
 209}
 210EXPORT_SYMBOL(_raw_read_trylock);
 211#endif
 212
 213#ifndef CONFIG_INLINE_READ_LOCK
 214void __lockfunc _raw_read_lock(rwlock_t *lock)
 215{
 216        __raw_read_lock(lock);
 217}
 218EXPORT_SYMBOL(_raw_read_lock);
 219#endif
 220
 221#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
 222unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
 223{
 224        return __raw_read_lock_irqsave(lock);
 225}
 226EXPORT_SYMBOL(_raw_read_lock_irqsave);
 227#endif
 228
 229#ifndef CONFIG_INLINE_READ_LOCK_IRQ
 230void __lockfunc _raw_read_lock_irq(rwlock_t *lock)
 231{
 232        __raw_read_lock_irq(lock);
 233}
 234EXPORT_SYMBOL(_raw_read_lock_irq);
 235#endif
 236
 237#ifndef CONFIG_INLINE_READ_LOCK_BH
 238void __lockfunc _raw_read_lock_bh(rwlock_t *lock)
 239{
 240        __raw_read_lock_bh(lock);
 241}
 242EXPORT_SYMBOL(_raw_read_lock_bh);
 243#endif
 244
 245#ifndef CONFIG_INLINE_READ_UNLOCK
 246void __lockfunc _raw_read_unlock(rwlock_t *lock)
 247{
 248        __raw_read_unlock(lock);
 249}
 250EXPORT_SYMBOL(_raw_read_unlock);
 251#endif
 252
 253#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
 254void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
 255{
 256        __raw_read_unlock_irqrestore(lock, flags);
 257}
 258EXPORT_SYMBOL(_raw_read_unlock_irqrestore);
 259#endif
 260
 261#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
 262void __lockfunc _raw_read_unlock_irq(rwlock_t *lock)
 263{
 264        __raw_read_unlock_irq(lock);
 265}
 266EXPORT_SYMBOL(_raw_read_unlock_irq);
 267#endif
 268
 269#ifndef CONFIG_INLINE_READ_UNLOCK_BH
 270void __lockfunc _raw_read_unlock_bh(rwlock_t *lock)
 271{
 272        __raw_read_unlock_bh(lock);
 273}
 274EXPORT_SYMBOL(_raw_read_unlock_bh);
 275#endif
 276
 277#ifndef CONFIG_INLINE_WRITE_TRYLOCK
 278int __lockfunc _raw_write_trylock(rwlock_t *lock)
 279{
 280        return __raw_write_trylock(lock);
 281}
 282EXPORT_SYMBOL(_raw_write_trylock);
 283#endif
 284
 285#ifndef CONFIG_INLINE_WRITE_LOCK
 286void __lockfunc _raw_write_lock(rwlock_t *lock)
 287{
 288        __raw_write_lock(lock);
 289}
 290EXPORT_SYMBOL(_raw_write_lock);
 291#endif
 292
 293#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
 294unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
 295{
 296        return __raw_write_lock_irqsave(lock);
 297}
 298EXPORT_SYMBOL(_raw_write_lock_irqsave);
 299#endif
 300
 301#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
 302void __lockfunc _raw_write_lock_irq(rwlock_t *lock)
 303{
 304        __raw_write_lock_irq(lock);
 305}
 306EXPORT_SYMBOL(_raw_write_lock_irq);
 307#endif
 308
 309#ifndef CONFIG_INLINE_WRITE_LOCK_BH
 310void __lockfunc _raw_write_lock_bh(rwlock_t *lock)
 311{
 312        __raw_write_lock_bh(lock);
 313}
 314EXPORT_SYMBOL(_raw_write_lock_bh);
 315#endif
 316
 317#ifndef CONFIG_INLINE_WRITE_UNLOCK
 318void __lockfunc _raw_write_unlock(rwlock_t *lock)
 319{
 320        __raw_write_unlock(lock);
 321}
 322EXPORT_SYMBOL(_raw_write_unlock);
 323#endif
 324
 325#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
 326void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
 327{
 328        __raw_write_unlock_irqrestore(lock, flags);
 329}
 330EXPORT_SYMBOL(_raw_write_unlock_irqrestore);
 331#endif
 332
 333#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
 334void __lockfunc _raw_write_unlock_irq(rwlock_t *lock)
 335{
 336        __raw_write_unlock_irq(lock);
 337}
 338EXPORT_SYMBOL(_raw_write_unlock_irq);
 339#endif
 340
 341#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
 342void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
 343{
 344        __raw_write_unlock_bh(lock);
 345}
 346EXPORT_SYMBOL(_raw_write_unlock_bh);
 347#endif
 348
 349#ifdef CONFIG_DEBUG_LOCK_ALLOC
 350
 351void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
 352{
 353        preempt_disable();
 354        spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
 355        LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
 356}
 357EXPORT_SYMBOL(_raw_spin_lock_nested);
 358
 359unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
 360                                                   int subclass)
 361{
 362        unsigned long flags;
 363
 364        local_irq_save(flags);
 365        preempt_disable();
 366        spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
 367        LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock,
 368                                do_raw_spin_lock_flags, &flags);
 369        return flags;
 370}
 371EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);
 372
 373void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock,
 374                                     struct lockdep_map *nest_lock)
 375{
 376        preempt_disable();
 377        spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
 378        LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
 379}
 380EXPORT_SYMBOL(_raw_spin_lock_nest_lock);
 381
 382#endif
 383
 384notrace int in_lock_functions(unsigned long addr)
 385{
 386        /* Linker adds these: start and end of __lockfunc functions */
 387        extern char __lock_text_start[], __lock_text_end[];
 388
 389        return addr >= (unsigned long)__lock_text_start
 390        && addr < (unsigned long)__lock_text_end;
 391}
 392EXPORT_SYMBOL(in_lock_functions);
 393