linux/kernel/locking/spinlock.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (2004) Linus Torvalds
   4 *
   5 * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
   6 *
   7 * Copyright (2004, 2005) Ingo Molnar
   8 *
   9 * This file contains the spinlock/rwlock implementations for the
  10 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
  11 *
  12 * Note that some architectures have special knowledge about the
  13 * stack frames of these functions in their profile_pc. If you
  14 * change anything significant here that could change the stack
  15 * frame contact the architecture maintainers.
  16 */
  17
  18#include <linux/linkage.h>
  19#include <linux/preempt.h>
  20#include <linux/spinlock.h>
  21#include <linux/interrupt.h>
  22#include <linux/debug_locks.h>
  23#include <linux/export.h>
  24
  25#ifdef CONFIG_MMIOWB
  26#ifndef arch_mmiowb_state
  27DEFINE_PER_CPU(struct mmiowb_state, __mmiowb_state);
  28EXPORT_PER_CPU_SYMBOL(__mmiowb_state);
  29#endif
  30#endif
  31
  32/*
  33 * If lockdep is enabled then we use the non-preemption spin-ops
  34 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
  35 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
  36 */
  37#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
  38/*
  39 * The __lock_function inlines are taken from
  40 * spinlock : include/linux/spinlock_api_smp.h
  41 * rwlock   : include/linux/rwlock_api_smp.h
  42 */
  43#else
  44
  45/*
  46 * Some architectures can relax in favour of the CPU owning the lock.
  47 */
  48#ifndef arch_read_relax
  49# define arch_read_relax(l)     cpu_relax()
  50#endif
  51#ifndef arch_write_relax
  52# define arch_write_relax(l)    cpu_relax()
  53#endif
  54#ifndef arch_spin_relax
  55# define arch_spin_relax(l)     cpu_relax()
  56#endif
  57
  58/*
  59 * We build the __lock_function inlines here. They are too large for
  60 * inlining all over the place, but here is only one user per function
  61 * which embeds them into the calling _lock_function below.
  62 *
  63 * This could be a long-held lock. We both prepare to spin for a long
  64 * time (making _this_ CPU preemptible if possible), and we also signal
  65 * towards that other CPU that it should break the lock ASAP.
  66 */
  67#define BUILD_LOCK_OPS(op, locktype)                                    \
  68void __lockfunc __raw_##op##_lock(locktype##_t *lock)                   \
  69{                                                                       \
  70        for (;;) {                                                      \
  71                preempt_disable();                                      \
  72                if (likely(do_raw_##op##_trylock(lock)))                \
  73                        break;                                          \
  74                preempt_enable();                                       \
  75                                                                        \
  76                arch_##op##_relax(&lock->raw_lock);                     \
  77        }                                                               \
  78}                                                                       \
  79                                                                        \
  80unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock)  \
  81{                                                                       \
  82        unsigned long flags;                                            \
  83                                                                        \
  84        for (;;) {                                                      \
  85                preempt_disable();                                      \
  86                local_irq_save(flags);                                  \
  87                if (likely(do_raw_##op##_trylock(lock)))                \
  88                        break;                                          \
  89                local_irq_restore(flags);                               \
  90                preempt_enable();                                       \
  91                                                                        \
  92                arch_##op##_relax(&lock->raw_lock);                     \
  93        }                                                               \
  94                                                                        \
  95        return flags;                                                   \
  96}                                                                       \
  97                                                                        \
  98void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock)               \
  99{                                                                       \
 100        _raw_##op##_lock_irqsave(lock);                                 \
 101}                                                                       \
 102                                                                        \
 103void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock)                \
 104{                                                                       \
 105        unsigned long flags;                                            \
 106                                                                        \
 107        /*                                                      */      \
 108        /* Careful: we must exclude softirqs too, hence the     */      \
 109        /* irq-disabling. We use the generic preemption-aware   */      \
 110        /* function:                                            */      \
 111        /**/                                                            \
 112        flags = _raw_##op##_lock_irqsave(lock);                         \
 113        local_bh_disable();                                             \
 114        local_irq_restore(flags);                                       \
 115}                                                                       \
 116
 117/*
 118 * Build preemption-friendly versions of the following
 119 * lock-spinning functions:
 120 *
 121 *         __[spin|read|write]_lock()
 122 *         __[spin|read|write]_lock_irq()
 123 *         __[spin|read|write]_lock_irqsave()
 124 *         __[spin|read|write]_lock_bh()
 125 */
 126BUILD_LOCK_OPS(spin, raw_spinlock);
 127
 128#ifndef CONFIG_PREEMPT_RT
 129BUILD_LOCK_OPS(read, rwlock);
 130BUILD_LOCK_OPS(write, rwlock);
 131#endif
 132
 133#endif
 134
 135#ifndef CONFIG_INLINE_SPIN_TRYLOCK
 136int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock)
 137{
 138        return __raw_spin_trylock(lock);
 139}
 140EXPORT_SYMBOL(_raw_spin_trylock);
 141#endif
 142
 143#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
 144int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock)
 145{
 146        return __raw_spin_trylock_bh(lock);
 147}
 148EXPORT_SYMBOL(_raw_spin_trylock_bh);
 149#endif
 150
 151#ifndef CONFIG_INLINE_SPIN_LOCK
 152void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
 153{
 154        __raw_spin_lock(lock);
 155}
 156EXPORT_SYMBOL(_raw_spin_lock);
 157#endif
 158
 159#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
 160unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
 161{
 162        return __raw_spin_lock_irqsave(lock);
 163}
 164EXPORT_SYMBOL(_raw_spin_lock_irqsave);
 165#endif
 166
 167#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
 168void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
 169{
 170        __raw_spin_lock_irq(lock);
 171}
 172EXPORT_SYMBOL(_raw_spin_lock_irq);
 173#endif
 174
 175#ifndef CONFIG_INLINE_SPIN_LOCK_BH
 176void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
 177{
 178        __raw_spin_lock_bh(lock);
 179}
 180EXPORT_SYMBOL(_raw_spin_lock_bh);
 181#endif
 182
 183#ifdef CONFIG_UNINLINE_SPIN_UNLOCK
 184void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
 185{
 186        __raw_spin_unlock(lock);
 187}
 188EXPORT_SYMBOL(_raw_spin_unlock);
 189#endif
 190
 191#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
 192void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
 193{
 194        __raw_spin_unlock_irqrestore(lock, flags);
 195}
 196EXPORT_SYMBOL(_raw_spin_unlock_irqrestore);
 197#endif
 198
 199#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
 200void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)
 201{
 202        __raw_spin_unlock_irq(lock);
 203}
 204EXPORT_SYMBOL(_raw_spin_unlock_irq);
 205#endif
 206
 207#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
 208void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
 209{
 210        __raw_spin_unlock_bh(lock);
 211}
 212EXPORT_SYMBOL(_raw_spin_unlock_bh);
 213#endif
 214
 215#ifndef CONFIG_PREEMPT_RT
 216
 217#ifndef CONFIG_INLINE_READ_TRYLOCK
 218int __lockfunc _raw_read_trylock(rwlock_t *lock)
 219{
 220        return __raw_read_trylock(lock);
 221}
 222EXPORT_SYMBOL(_raw_read_trylock);
 223#endif
 224
 225#ifndef CONFIG_INLINE_READ_LOCK
 226void __lockfunc _raw_read_lock(rwlock_t *lock)
 227{
 228        __raw_read_lock(lock);
 229}
 230EXPORT_SYMBOL(_raw_read_lock);
 231#endif
 232
 233#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
 234unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
 235{
 236        return __raw_read_lock_irqsave(lock);
 237}
 238EXPORT_SYMBOL(_raw_read_lock_irqsave);
 239#endif
 240
 241#ifndef CONFIG_INLINE_READ_LOCK_IRQ
 242void __lockfunc _raw_read_lock_irq(rwlock_t *lock)
 243{
 244        __raw_read_lock_irq(lock);
 245}
 246EXPORT_SYMBOL(_raw_read_lock_irq);
 247#endif
 248
 249#ifndef CONFIG_INLINE_READ_LOCK_BH
 250void __lockfunc _raw_read_lock_bh(rwlock_t *lock)
 251{
 252        __raw_read_lock_bh(lock);
 253}
 254EXPORT_SYMBOL(_raw_read_lock_bh);
 255#endif
 256
 257#ifndef CONFIG_INLINE_READ_UNLOCK
 258void __lockfunc _raw_read_unlock(rwlock_t *lock)
 259{
 260        __raw_read_unlock(lock);
 261}
 262EXPORT_SYMBOL(_raw_read_unlock);
 263#endif
 264
 265#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
 266void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
 267{
 268        __raw_read_unlock_irqrestore(lock, flags);
 269}
 270EXPORT_SYMBOL(_raw_read_unlock_irqrestore);
 271#endif
 272
 273#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
 274void __lockfunc _raw_read_unlock_irq(rwlock_t *lock)
 275{
 276        __raw_read_unlock_irq(lock);
 277}
 278EXPORT_SYMBOL(_raw_read_unlock_irq);
 279#endif
 280
 281#ifndef CONFIG_INLINE_READ_UNLOCK_BH
 282void __lockfunc _raw_read_unlock_bh(rwlock_t *lock)
 283{
 284        __raw_read_unlock_bh(lock);
 285}
 286EXPORT_SYMBOL(_raw_read_unlock_bh);
 287#endif
 288
 289#ifndef CONFIG_INLINE_WRITE_TRYLOCK
 290int __lockfunc _raw_write_trylock(rwlock_t *lock)
 291{
 292        return __raw_write_trylock(lock);
 293}
 294EXPORT_SYMBOL(_raw_write_trylock);
 295#endif
 296
 297#ifndef CONFIG_INLINE_WRITE_LOCK
 298void __lockfunc _raw_write_lock(rwlock_t *lock)
 299{
 300        __raw_write_lock(lock);
 301}
 302EXPORT_SYMBOL(_raw_write_lock);
 303#endif
 304
 305#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
 306unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
 307{
 308        return __raw_write_lock_irqsave(lock);
 309}
 310EXPORT_SYMBOL(_raw_write_lock_irqsave);
 311#endif
 312
 313#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
 314void __lockfunc _raw_write_lock_irq(rwlock_t *lock)
 315{
 316        __raw_write_lock_irq(lock);
 317}
 318EXPORT_SYMBOL(_raw_write_lock_irq);
 319#endif
 320
 321#ifndef CONFIG_INLINE_WRITE_LOCK_BH
 322void __lockfunc _raw_write_lock_bh(rwlock_t *lock)
 323{
 324        __raw_write_lock_bh(lock);
 325}
 326EXPORT_SYMBOL(_raw_write_lock_bh);
 327#endif
 328
 329#ifndef CONFIG_INLINE_WRITE_UNLOCK
 330void __lockfunc _raw_write_unlock(rwlock_t *lock)
 331{
 332        __raw_write_unlock(lock);
 333}
 334EXPORT_SYMBOL(_raw_write_unlock);
 335#endif
 336
 337#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
 338void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
 339{
 340        __raw_write_unlock_irqrestore(lock, flags);
 341}
 342EXPORT_SYMBOL(_raw_write_unlock_irqrestore);
 343#endif
 344
 345#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
 346void __lockfunc _raw_write_unlock_irq(rwlock_t *lock)
 347{
 348        __raw_write_unlock_irq(lock);
 349}
 350EXPORT_SYMBOL(_raw_write_unlock_irq);
 351#endif
 352
 353#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
 354void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
 355{
 356        __raw_write_unlock_bh(lock);
 357}
 358EXPORT_SYMBOL(_raw_write_unlock_bh);
 359#endif
 360
 361#endif /* !CONFIG_PREEMPT_RT */
 362
 363#ifdef CONFIG_DEBUG_LOCK_ALLOC
 364
 365void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
 366{
 367        preempt_disable();
 368        spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
 369        LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
 370}
 371EXPORT_SYMBOL(_raw_spin_lock_nested);
 372
 373unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
 374                                                   int subclass)
 375{
 376        unsigned long flags;
 377
 378        local_irq_save(flags);
 379        preempt_disable();
 380        spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
 381        LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock,
 382                                do_raw_spin_lock_flags, &flags);
 383        return flags;
 384}
 385EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);
 386
 387void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock,
 388                                     struct lockdep_map *nest_lock)
 389{
 390        preempt_disable();
 391        spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
 392        LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
 393}
 394EXPORT_SYMBOL(_raw_spin_lock_nest_lock);
 395
 396#endif
 397
 398notrace int in_lock_functions(unsigned long addr)
 399{
 400        /* Linker adds these: start and end of __lockfunc functions */
 401        extern char __lock_text_start[], __lock_text_end[];
 402
 403        return addr >= (unsigned long)__lock_text_start
 404        && addr < (unsigned long)__lock_text_end;
 405}
 406EXPORT_SYMBOL(in_lock_functions);
 407