linux/arch/arm64/include/asm/spinlock.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2012 ARM Ltd.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 *
  13 * You should have received a copy of the GNU General Public License
  14 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  15 */
  16#ifndef __ASM_SPINLOCK_H
  17#define __ASM_SPINLOCK_H
  18
  19#include <asm/spinlock_types.h>
  20#include <asm/processor.h>
  21
  22/*
  23 * Spinlock implementation.
  24 *
  25 * The old value is read exclusively and the new one, if unlocked, is written
  26 * exclusively. In case of failure, the loop is restarted.
  27 *
  28 * The memory barriers are implicit with the load-acquire and store-release
  29 * instructions.
  30 *
  31 * Unlocked value: 0
  32 * Locked value: 1
  33 */
  34
  35#define arch_spin_is_locked(x)          ((x)->lock != 0)
  36#define arch_spin_unlock_wait(lock) \
  37        do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
  38
  39#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
  40
  41static inline void arch_spin_lock(arch_spinlock_t *lock)
  42{
  43        unsigned int tmp;
  44
  45        asm volatile(
  46        "       sevl\n"
  47        "1:     wfe\n"
  48        "2:     ldaxr   %w0, %1\n"
  49        "       cbnz    %w0, 1b\n"
  50        "       stxr    %w0, %w2, %1\n"
  51        "       cbnz    %w0, 2b\n"
  52        : "=&r" (tmp), "+Q" (lock->lock)
  53        : "r" (1)
  54        : "cc", "memory");
  55}
  56
  57static inline int arch_spin_trylock(arch_spinlock_t *lock)
  58{
  59        unsigned int tmp;
  60
  61        asm volatile(
  62        "       ldaxr   %w0, %1\n"
  63        "       cbnz    %w0, 1f\n"
  64        "       stxr    %w0, %w2, %1\n"
  65        "1:\n"
  66        : "=&r" (tmp), "+Q" (lock->lock)
  67        : "r" (1)
  68        : "cc", "memory");
  69
  70        return !tmp;
  71}
  72
  73static inline void arch_spin_unlock(arch_spinlock_t *lock)
  74{
  75        asm volatile(
  76        "       stlr    %w1, %0\n"
  77        : "=Q" (lock->lock) : "r" (0) : "memory");
  78}
  79
  80/*
  81 * Write lock implementation.
  82 *
  83 * Write locks set bit 31. Unlocking, is done by writing 0 since the lock is
  84 * exclusively held.
  85 *
  86 * The memory barriers are implicit with the load-acquire and store-release
  87 * instructions.
  88 */
  89
  90static inline void arch_write_lock(arch_rwlock_t *rw)
  91{
  92        unsigned int tmp;
  93
  94        asm volatile(
  95        "       sevl\n"
  96        "1:     wfe\n"
  97        "2:     ldaxr   %w0, %1\n"
  98        "       cbnz    %w0, 1b\n"
  99        "       stxr    %w0, %w2, %1\n"
 100        "       cbnz    %w0, 2b\n"
 101        : "=&r" (tmp), "+Q" (rw->lock)
 102        : "r" (0x80000000)
 103        : "cc", "memory");
 104}
 105
 106static inline int arch_write_trylock(arch_rwlock_t *rw)
 107{
 108        unsigned int tmp;
 109
 110        asm volatile(
 111        "       ldaxr   %w0, %1\n"
 112        "       cbnz    %w0, 1f\n"
 113        "       stxr    %w0, %w2, %1\n"
 114        "1:\n"
 115        : "=&r" (tmp), "+Q" (rw->lock)
 116        : "r" (0x80000000)
 117        : "cc", "memory");
 118
 119        return !tmp;
 120}
 121
 122static inline void arch_write_unlock(arch_rwlock_t *rw)
 123{
 124        asm volatile(
 125        "       stlr    %w1, %0\n"
 126        : "=Q" (rw->lock) : "r" (0) : "memory");
 127}
 128
 129/* write_can_lock - would write_trylock() succeed? */
 130#define arch_write_can_lock(x)          ((x)->lock == 0)
 131
 132/*
 133 * Read lock implementation.
 134 *
 135 * It exclusively loads the lock value, increments it and stores the new value
 136 * back if positive and the CPU still exclusively owns the location. If the
 137 * value is negative, the lock is already held.
 138 *
 139 * During unlocking there may be multiple active read locks but no write lock.
 140 *
 141 * The memory barriers are implicit with the load-acquire and store-release
 142 * instructions.
 143 */
 144static inline void arch_read_lock(arch_rwlock_t *rw)
 145{
 146        unsigned int tmp, tmp2;
 147
 148        asm volatile(
 149        "       sevl\n"
 150        "1:     wfe\n"
 151        "2:     ldaxr   %w0, %2\n"
 152        "       add     %w0, %w0, #1\n"
 153        "       tbnz    %w0, #31, 1b\n"
 154        "       stxr    %w1, %w0, %2\n"
 155        "       cbnz    %w1, 2b\n"
 156        : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
 157        :
 158        : "cc", "memory");
 159}
 160
 161static inline void arch_read_unlock(arch_rwlock_t *rw)
 162{
 163        unsigned int tmp, tmp2;
 164
 165        asm volatile(
 166        "1:     ldxr    %w0, %2\n"
 167        "       sub     %w0, %w0, #1\n"
 168        "       stlxr   %w1, %w0, %2\n"
 169        "       cbnz    %w1, 1b\n"
 170        : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
 171        :
 172        : "cc", "memory");
 173}
 174
 175static inline int arch_read_trylock(arch_rwlock_t *rw)
 176{
 177        unsigned int tmp, tmp2 = 1;
 178
 179        asm volatile(
 180        "       ldaxr   %w0, %2\n"
 181        "       add     %w0, %w0, #1\n"
 182        "       tbnz    %w0, #31, 1f\n"
 183        "       stxr    %w1, %w0, %2\n"
 184        "1:\n"
 185        : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
 186        :
 187        : "cc", "memory");
 188
 189        return !tmp2;
 190}
 191
 192/* read_can_lock - would read_trylock() succeed? */
 193#define arch_read_can_lock(x)           ((x)->lock < 0x80000000)
 194
 195#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
 196#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 197
 198#define arch_spin_relax(lock)   cpu_relax()
 199#define arch_read_relax(lock)   cpu_relax()
 200#define arch_write_relax(lock)  cpu_relax()
 201
 202#endif /* __ASM_SPINLOCK_H */
 203