linux/kernel/locking/spinlock.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (2004) Linus Torvalds
   4 *
   5 * Author: Zwane Mwaikambo <zwane@fsmlabs.com>
   6 *
   7 * Copyright (2004, 2005) Ingo Molnar
   8 *
   9 * This file contains the spinlock/rwlock implementations for the
  10 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
  11 *
  12 * Note that some architectures have special knowledge about the
  13 * stack frames of these functions in their profile_pc. If you
  14 * change anything significant here that could change the stack
  15 * frame contact the architecture maintainers.
  16 */
  17
  18#include <linux/linkage.h>
  19#include <linux/preempt.h>
  20#include <linux/spinlock.h>
  21#include <linux/interrupt.h>
  22#include <linux/debug_locks.h>
  23#include <linux/export.h>
  24
  25#ifdef CONFIG_MMIOWB
  26#ifndef arch_mmiowb_state
  27DEFINE_PER_CPU(struct mmiowb_state, __mmiowb_state);
  28EXPORT_PER_CPU_SYMBOL(__mmiowb_state);
  29#endif
  30#endif
  31
  32/*
  33 * If lockdep is enabled then we use the non-preemption spin-ops
  34 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
  35 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
  36 */
  37#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
  38/*
  39 * The __lock_function inlines are taken from
  40 * spinlock : include/linux/spinlock_api_smp.h
  41 * rwlock   : include/linux/rwlock_api_smp.h
  42 */
  43#else
  44
  45/*
  46 * Some architectures can relax in favour of the CPU owning the lock.
  47 */
  48#ifndef arch_read_relax
  49# define arch_read_relax(l)     cpu_relax()
  50#endif
  51#ifndef arch_write_relax
  52# define arch_write_relax(l)    cpu_relax()
  53#endif
  54#ifndef arch_spin_relax
  55# define arch_spin_relax(l)     cpu_relax()
  56#endif
  57
  58/*
  59 * We build the __lock_function inlines here. They are too large for
  60 * inlining all over the place, but here is only one user per function
  61 * which embedds them into the calling _lock_function below.
  62 *
  63 * This could be a long-held lock. We both prepare to spin for a long
  64 * time (making _this_ CPU preemptable if possible), and we also signal
  65 * towards that other CPU that it should break the lock ASAP.
  66 */
  67#define BUILD_LOCK_OPS(op, locktype)                                    \
  68void __lockfunc __raw_##op##_lock(locktype##_t *lock)                   \
  69{                                                                       \
  70        for (;;) {                                                      \
  71                preempt_disable();                                      \
  72                if (likely(do_raw_##op##_trylock(lock)))                \
  73                        break;                                          \
  74                preempt_enable();                                       \
  75                                                                        \
  76                arch_##op##_relax(&lock->raw_lock);                     \
  77        }                                                               \
  78}                                                                       \
  79                                                                        \
  80unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock)  \
  81{                                                                       \
  82        unsigned long flags;                                            \
  83                                                                        \
  84        for (;;) {                                                      \
  85                preempt_disable();                                      \
  86                local_irq_save(flags);                                  \
  87                if (likely(do_raw_##op##_trylock(lock)))                \
  88                        break;                                          \
  89                local_irq_restore(flags);                               \
  90                preempt_enable();                                       \
  91                                                                        \
  92                arch_##op##_relax(&lock->raw_lock);                     \
  93        }                                                               \
  94                                                                        \
  95        return flags;                                                   \
  96}                                                                       \
  97                                                                        \
  98void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock)               \
  99{                                                                       \
 100        _raw_##op##_lock_irqsave(lock);                                 \
 101}                                                                       \
 102                                                                        \
 103void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock)                \
 104{                                                                       \
 105        unsigned long flags;                                            \
 106                                                                        \
 107        /*                                                      */      \
 108        /* Careful: we must exclude softirqs too, hence the     */      \
 109        /* irq-disabling. We use the generic preemption-aware   */      \
 110        /* function:                                            */      \
 111        /**/                                                            \
 112        flags = _raw_##op##_lock_irqsave(lock);                         \
 113        local_bh_disable();                                             \
 114        local_irq_restore(flags);                                       \
 115}                                                                       \
 116
 117/*
 118 * Build preemption-friendly versions of the following
 119 * lock-spinning functions:
 120 *
 121 *         __[spin|read|write]_lock()
 122 *         __[spin|read|write]_lock_irq()
 123 *         __[spin|read|write]_lock_irqsave()
 124 *         __[spin|read|write]_lock_bh()
 125 */
 126BUILD_LOCK_OPS(spin, raw_spinlock);
 127BUILD_LOCK_OPS(read, rwlock);
 128BUILD_LOCK_OPS(write, rwlock);
 129
 130#endif
 131
 132#ifndef CONFIG_INLINE_SPIN_TRYLOCK
 133int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock)
 134{
 135        return __raw_spin_trylock(lock);
 136}
 137EXPORT_SYMBOL(_raw_spin_trylock);
 138#endif
 139
 140#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
 141int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock)
 142{
 143        return __raw_spin_trylock_bh(lock);
 144}
 145EXPORT_SYMBOL(_raw_spin_trylock_bh);
 146#endif
 147
 148#ifndef CONFIG_INLINE_SPIN_LOCK
 149void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
 150{
 151        __raw_spin_lock(lock);
 152}
 153EXPORT_SYMBOL(_raw_spin_lock);
 154#endif
 155
 156#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
 157unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
 158{
 159        return __raw_spin_lock_irqsave(lock);
 160}
 161EXPORT_SYMBOL(_raw_spin_lock_irqsave);
 162#endif
 163
 164#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
 165void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
 166{
 167        __raw_spin_lock_irq(lock);
 168}
 169EXPORT_SYMBOL(_raw_spin_lock_irq);
 170#endif
 171
 172#ifndef CONFIG_INLINE_SPIN_LOCK_BH
 173void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
 174{
 175        __raw_spin_lock_bh(lock);
 176}
 177EXPORT_SYMBOL(_raw_spin_lock_bh);
 178#endif
 179
 180#ifdef CONFIG_UNINLINE_SPIN_UNLOCK
 181void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
 182{
 183        __raw_spin_unlock(lock);
 184}
 185EXPORT_SYMBOL(_raw_spin_unlock);
 186#endif
 187
 188#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
 189void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
 190{
 191        __raw_spin_unlock_irqrestore(lock, flags);
 192}
 193EXPORT_SYMBOL(_raw_spin_unlock_irqrestore);
 194#endif
 195
 196#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
 197void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)
 198{
 199        __raw_spin_unlock_irq(lock);
 200}
 201EXPORT_SYMBOL(_raw_spin_unlock_irq);
 202#endif
 203
 204#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
 205void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
 206{
 207        __raw_spin_unlock_bh(lock);
 208}
 209EXPORT_SYMBOL(_raw_spin_unlock_bh);
 210#endif
 211
 212#ifndef CONFIG_INLINE_READ_TRYLOCK
 213int __lockfunc _raw_read_trylock(rwlock_t *lock)
 214{
 215        return __raw_read_trylock(lock);
 216}
 217EXPORT_SYMBOL(_raw_read_trylock);
 218#endif
 219
 220#ifndef CONFIG_INLINE_READ_LOCK
 221void __lockfunc _raw_read_lock(rwlock_t *lock)
 222{
 223        __raw_read_lock(lock);
 224}
 225EXPORT_SYMBOL(_raw_read_lock);
 226#endif
 227
 228#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
 229unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
 230{
 231        return __raw_read_lock_irqsave(lock);
 232}
 233EXPORT_SYMBOL(_raw_read_lock_irqsave);
 234#endif
 235
 236#ifndef CONFIG_INLINE_READ_LOCK_IRQ
 237void __lockfunc _raw_read_lock_irq(rwlock_t *lock)
 238{
 239        __raw_read_lock_irq(lock);
 240}
 241EXPORT_SYMBOL(_raw_read_lock_irq);
 242#endif
 243
 244#ifndef CONFIG_INLINE_READ_LOCK_BH
 245void __lockfunc _raw_read_lock_bh(rwlock_t *lock)
 246{
 247        __raw_read_lock_bh(lock);
 248}
 249EXPORT_SYMBOL(_raw_read_lock_bh);
 250#endif
 251
 252#ifndef CONFIG_INLINE_READ_UNLOCK
 253void __lockfunc _raw_read_unlock(rwlock_t *lock)
 254{
 255        __raw_read_unlock(lock);
 256}
 257EXPORT_SYMBOL(_raw_read_unlock);
 258#endif
 259
 260#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
 261void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
 262{
 263        __raw_read_unlock_irqrestore(lock, flags);
 264}
 265EXPORT_SYMBOL(_raw_read_unlock_irqrestore);
 266#endif
 267
 268#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
 269void __lockfunc _raw_read_unlock_irq(rwlock_t *lock)
 270{
 271        __raw_read_unlock_irq(lock);
 272}
 273EXPORT_SYMBOL(_raw_read_unlock_irq);
 274#endif
 275
 276#ifndef CONFIG_INLINE_READ_UNLOCK_BH
 277void __lockfunc _raw_read_unlock_bh(rwlock_t *lock)
 278{
 279        __raw_read_unlock_bh(lock);
 280}
 281EXPORT_SYMBOL(_raw_read_unlock_bh);
 282#endif
 283
 284#ifndef CONFIG_INLINE_WRITE_TRYLOCK
 285int __lockfunc _raw_write_trylock(rwlock_t *lock)
 286{
 287        return __raw_write_trylock(lock);
 288}
 289EXPORT_SYMBOL(_raw_write_trylock);
 290#endif
 291
 292#ifndef CONFIG_INLINE_WRITE_LOCK
 293void __lockfunc _raw_write_lock(rwlock_t *lock)
 294{
 295        __raw_write_lock(lock);
 296}
 297EXPORT_SYMBOL(_raw_write_lock);
 298#endif
 299
 300#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
 301unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
 302{
 303        return __raw_write_lock_irqsave(lock);
 304}
 305EXPORT_SYMBOL(_raw_write_lock_irqsave);
 306#endif
 307
 308#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
 309void __lockfunc _raw_write_lock_irq(rwlock_t *lock)
 310{
 311        __raw_write_lock_irq(lock);
 312}
 313EXPORT_SYMBOL(_raw_write_lock_irq);
 314#endif
 315
 316#ifndef CONFIG_INLINE_WRITE_LOCK_BH
 317void __lockfunc _raw_write_lock_bh(rwlock_t *lock)
 318{
 319        __raw_write_lock_bh(lock);
 320}
 321EXPORT_SYMBOL(_raw_write_lock_bh);
 322#endif
 323
 324#ifndef CONFIG_INLINE_WRITE_UNLOCK
 325void __lockfunc _raw_write_unlock(rwlock_t *lock)
 326{
 327        __raw_write_unlock(lock);
 328}
 329EXPORT_SYMBOL(_raw_write_unlock);
 330#endif
 331
 332#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
 333void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
 334{
 335        __raw_write_unlock_irqrestore(lock, flags);
 336}
 337EXPORT_SYMBOL(_raw_write_unlock_irqrestore);
 338#endif
 339
 340#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
 341void __lockfunc _raw_write_unlock_irq(rwlock_t *lock)
 342{
 343        __raw_write_unlock_irq(lock);
 344}
 345EXPORT_SYMBOL(_raw_write_unlock_irq);
 346#endif
 347
 348#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
 349void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
 350{
 351        __raw_write_unlock_bh(lock);
 352}
 353EXPORT_SYMBOL(_raw_write_unlock_bh);
 354#endif
 355
 356#ifdef CONFIG_DEBUG_LOCK_ALLOC
 357
 358void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
 359{
 360        preempt_disable();
 361        spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
 362        LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
 363}
 364EXPORT_SYMBOL(_raw_spin_lock_nested);
 365
 366unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
 367                                                   int subclass)
 368{
 369        unsigned long flags;
 370
 371        local_irq_save(flags);
 372        preempt_disable();
 373        spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
 374        LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock,
 375                                do_raw_spin_lock_flags, &flags);
 376        return flags;
 377}
 378EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);
 379
 380void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock,
 381                                     struct lockdep_map *nest_lock)
 382{
 383        preempt_disable();
 384        spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
 385        LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
 386}
 387EXPORT_SYMBOL(_raw_spin_lock_nest_lock);
 388
 389#endif
 390
 391notrace int in_lock_functions(unsigned long addr)
 392{
 393        /* Linker adds these: start and end of __lockfunc functions */
 394        extern char __lock_text_start[], __lock_text_end[];
 395
 396        return addr >= (unsigned long)__lock_text_start
 397        && addr < (unsigned long)__lock_text_end;
 398}
 399EXPORT_SYMBOL(in_lock_functions);
 400