linux/arch/xtensa/include/asm/spinlock.h
<<
>>
Prefs
   1/*
   2 * include/asm-xtensa/spinlock.h
   3 *
   4 * This file is subject to the terms and conditions of the GNU General Public
   5 * License.  See the file "COPYING" in the main directory of this archive
   6 * for more details.
   7 *
   8 * Copyright (C) 2001 - 2005 Tensilica Inc.
   9 */
  10
  11#ifndef _XTENSA_SPINLOCK_H
  12#define _XTENSA_SPINLOCK_H
  13
  14#include <asm/barrier.h>
  15#include <asm/processor.h>
  16
  17/*
  18 * spinlock
  19 *
  20 * There is at most one owner of a spinlock.  There are not different
  21 * types of spinlock owners like there are for rwlocks (see below).
  22 *
  23 * When trying to obtain a spinlock, the function "spins" forever, or busy-
  24 * waits, until the lock is obtained.  When spinning, presumably some other
  25 * owner will soon give up the spinlock making it available to others.  Use
  26 * the trylock functions to avoid spinning forever.
  27 *
  28 * possible values:
  29 *
  30 *    0         nobody owns the spinlock
  31 *    1         somebody owns the spinlock
  32 */
  33
  34#define arch_spin_is_locked(x) ((x)->slock != 0)
  35
  36static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
  37{
  38        smp_cond_load_acquire(&lock->slock, !VAL);
  39}
  40
  41#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
  42
  43static inline void arch_spin_lock(arch_spinlock_t *lock)
  44{
  45        unsigned long tmp;
  46
  47        __asm__ __volatile__(
  48                        "       movi    %0, 0\n"
  49                        "       wsr     %0, scompare1\n"
  50                        "1:     movi    %0, 1\n"
  51                        "       s32c1i  %0, %1, 0\n"
  52                        "       bnez    %0, 1b\n"
  53                        : "=&a" (tmp)
  54                        : "a" (&lock->slock)
  55                        : "memory");
  56}
  57
  58/* Returns 1 if the lock is obtained, 0 otherwise. */
  59
  60static inline int arch_spin_trylock(arch_spinlock_t *lock)
  61{
  62        unsigned long tmp;
  63
  64        __asm__ __volatile__(
  65                        "       movi    %0, 0\n"
  66                        "       wsr     %0, scompare1\n"
  67                        "       movi    %0, 1\n"
  68                        "       s32c1i  %0, %1, 0\n"
  69                        : "=&a" (tmp)
  70                        : "a" (&lock->slock)
  71                        : "memory");
  72
  73        return tmp == 0 ? 1 : 0;
  74}
  75
  76static inline void arch_spin_unlock(arch_spinlock_t *lock)
  77{
  78        unsigned long tmp;
  79
  80        __asm__ __volatile__(
  81                        "       movi    %0, 0\n"
  82                        "       s32ri   %0, %1, 0\n"
  83                        : "=&a" (tmp)
  84                        : "a" (&lock->slock)
  85                        : "memory");
  86}
  87
  88/*
  89 * rwlock
  90 *
  91 * Read-write locks are really a more flexible spinlock.  They allow
  92 * multiple readers but only one writer.  Write ownership is exclusive
  93 * (i.e., all other readers and writers are blocked from ownership while
  94 * there is a write owner).  These rwlocks are unfair to writers.  Writers
  95 * can be starved for an indefinite time by readers.
  96 *
  97 * possible values:
  98 *
  99 *   0          nobody owns the rwlock
 100 *  >0          one or more readers own the rwlock
 101 *                (the positive value is the actual number of readers)
 102 *  0x80000000  one writer owns the rwlock, no other writers, no readers
 103 */
 104
 105#define arch_write_can_lock(x)  ((x)->lock == 0)
 106
 107static inline void arch_write_lock(arch_rwlock_t *rw)
 108{
 109        unsigned long tmp;
 110
 111        __asm__ __volatile__(
 112                        "       movi    %0, 0\n"
 113                        "       wsr     %0, scompare1\n"
 114                        "1:     movi    %0, 1\n"
 115                        "       slli    %0, %0, 31\n"
 116                        "       s32c1i  %0, %1, 0\n"
 117                        "       bnez    %0, 1b\n"
 118                        : "=&a" (tmp)
 119                        : "a" (&rw->lock)
 120                        : "memory");
 121}
 122
 123/* Returns 1 if the lock is obtained, 0 otherwise. */
 124
 125static inline int arch_write_trylock(arch_rwlock_t *rw)
 126{
 127        unsigned long tmp;
 128
 129        __asm__ __volatile__(
 130                        "       movi    %0, 0\n"
 131                        "       wsr     %0, scompare1\n"
 132                        "       movi    %0, 1\n"
 133                        "       slli    %0, %0, 31\n"
 134                        "       s32c1i  %0, %1, 0\n"
 135                        : "=&a" (tmp)
 136                        : "a" (&rw->lock)
 137                        : "memory");
 138
 139        return tmp == 0 ? 1 : 0;
 140}
 141
 142static inline void arch_write_unlock(arch_rwlock_t *rw)
 143{
 144        unsigned long tmp;
 145
 146        __asm__ __volatile__(
 147                        "       movi    %0, 0\n"
 148                        "       s32ri   %0, %1, 0\n"
 149                        : "=&a" (tmp)
 150                        : "a" (&rw->lock)
 151                        : "memory");
 152}
 153
 154static inline void arch_read_lock(arch_rwlock_t *rw)
 155{
 156        unsigned long tmp;
 157        unsigned long result;
 158
 159        __asm__ __volatile__(
 160                        "1:     l32i    %1, %2, 0\n"
 161                        "       bltz    %1, 1b\n"
 162                        "       wsr     %1, scompare1\n"
 163                        "       addi    %0, %1, 1\n"
 164                        "       s32c1i  %0, %2, 0\n"
 165                        "       bne     %0, %1, 1b\n"
 166                        : "=&a" (result), "=&a" (tmp)
 167                        : "a" (&rw->lock)
 168                        : "memory");
 169}
 170
 171/* Returns 1 if the lock is obtained, 0 otherwise. */
 172
 173static inline int arch_read_trylock(arch_rwlock_t *rw)
 174{
 175        unsigned long result;
 176        unsigned long tmp;
 177
 178        __asm__ __volatile__(
 179                        "       l32i    %1, %2, 0\n"
 180                        "       addi    %0, %1, 1\n"
 181                        "       bltz    %0, 1f\n"
 182                        "       wsr     %1, scompare1\n"
 183                        "       s32c1i  %0, %2, 0\n"
 184                        "       sub     %0, %0, %1\n"
 185                        "1:\n"
 186                        : "=&a" (result), "=&a" (tmp)
 187                        : "a" (&rw->lock)
 188                        : "memory");
 189
 190        return result == 0;
 191}
 192
 193static inline void arch_read_unlock(arch_rwlock_t *rw)
 194{
 195        unsigned long tmp1, tmp2;
 196
 197        __asm__ __volatile__(
 198                        "1:     l32i    %1, %2, 0\n"
 199                        "       addi    %0, %1, -1\n"
 200                        "       wsr     %1, scompare1\n"
 201                        "       s32c1i  %0, %2, 0\n"
 202                        "       bne     %0, %1, 1b\n"
 203                        : "=&a" (tmp1), "=&a" (tmp2)
 204                        : "a" (&rw->lock)
 205                        : "memory");
 206}
 207
 208#define arch_read_lock_flags(lock, flags)       arch_read_lock(lock)
 209#define arch_write_lock_flags(lock, flags)      arch_write_lock(lock)
 210
 211#endif  /* _XTENSA_SPINLOCK_H */
 212