linux/arch/sparc/include/asm/spinlock_32.h
<<
>>
Prefs
   1/* spinlock.h: 32-bit Sparc spinlock support.
   2 *
   3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
   4 */
   5
   6#ifndef __SPARC_SPINLOCK_H
   7#define __SPARC_SPINLOCK_H
   8
   9#ifndef __ASSEMBLY__
  10
  11#include <asm/psr.h>
  12#include <asm/barrier.h>
  13#include <asm/processor.h> /* for cpu_relax */
  14
  15#define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
  16
  17static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
  18{
  19        smp_cond_load_acquire(&lock->lock, !VAL);
  20}
  21
  22static inline void arch_spin_lock(arch_spinlock_t *lock)
  23{
  24        __asm__ __volatile__(
  25        "\n1:\n\t"
  26        "ldstub [%0], %%g2\n\t"
  27        "orcc   %%g2, 0x0, %%g0\n\t"
  28        "bne,a  2f\n\t"
  29        " ldub  [%0], %%g2\n\t"
  30        ".subsection    2\n"
  31        "2:\n\t"
  32        "orcc   %%g2, 0x0, %%g0\n\t"
  33        "bne,a  2b\n\t"
  34        " ldub  [%0], %%g2\n\t"
  35        "b,a    1b\n\t"
  36        ".previous\n"
  37        : /* no outputs */
  38        : "r" (lock)
  39        : "g2", "memory", "cc");
  40}
  41
  42static inline int arch_spin_trylock(arch_spinlock_t *lock)
  43{
  44        unsigned int result;
  45        __asm__ __volatile__("ldstub [%1], %0"
  46                             : "=r" (result)
  47                             : "r" (lock)
  48                             : "memory");
  49        return (result == 0);
  50}
  51
  52static inline void arch_spin_unlock(arch_spinlock_t *lock)
  53{
  54        __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
  55}
  56
  57/* Read-write spinlocks, allowing multiple readers
  58 * but only one writer.
  59 *
  60 * NOTE! it is quite common to have readers in interrupts
  61 * but no interrupt writers. For those circumstances we
  62 * can "mix" irq-safe locks - any writer needs to get a
  63 * irq-safe write-lock, but readers can get non-irqsafe
  64 * read-locks.
  65 *
  66 * XXX This might create some problems with my dual spinlock
  67 * XXX scheme, deadlocks etc. -DaveM
  68 *
  69 * Sort of like atomic_t's on Sparc, but even more clever.
  70 *
  71 *      ------------------------------------
  72 *      | 24-bit counter           | wlock |  arch_rwlock_t
  73 *      ------------------------------------
  74 *       31                       8 7     0
  75 *
  76 * wlock signifies the one writer is in or somebody is updating
  77 * counter. For a writer, if he successfully acquires the wlock,
  78 * but counter is non-zero, he has to release the lock and wait,
  79 * till both counter and wlock are zero.
  80 *
  81 * Unfortunately this scheme limits us to ~16,000,000 cpus.
  82 */
  83static inline void __arch_read_lock(arch_rwlock_t *rw)
  84{
  85        register arch_rwlock_t *lp asm("g1");
  86        lp = rw;
  87        __asm__ __volatile__(
  88        "mov    %%o7, %%g4\n\t"
  89        "call   ___rw_read_enter\n\t"
  90        " ldstub        [%%g1 + 3], %%g2\n"
  91        : /* no outputs */
  92        : "r" (lp)
  93        : "g2", "g4", "memory", "cc");
  94}
  95
  96#define arch_read_lock(lock) \
  97do {    unsigned long flags; \
  98        local_irq_save(flags); \
  99        __arch_read_lock(lock); \
 100        local_irq_restore(flags); \
 101} while(0)
 102
 103static inline void __arch_read_unlock(arch_rwlock_t *rw)
 104{
 105        register arch_rwlock_t *lp asm("g1");
 106        lp = rw;
 107        __asm__ __volatile__(
 108        "mov    %%o7, %%g4\n\t"
 109        "call   ___rw_read_exit\n\t"
 110        " ldstub        [%%g1 + 3], %%g2\n"
 111        : /* no outputs */
 112        : "r" (lp)
 113        : "g2", "g4", "memory", "cc");
 114}
 115
 116#define arch_read_unlock(lock) \
 117do {    unsigned long flags; \
 118        local_irq_save(flags); \
 119        __arch_read_unlock(lock); \
 120        local_irq_restore(flags); \
 121} while(0)
 122
 123static inline void arch_write_lock(arch_rwlock_t *rw)
 124{
 125        register arch_rwlock_t *lp asm("g1");
 126        lp = rw;
 127        __asm__ __volatile__(
 128        "mov    %%o7, %%g4\n\t"
 129        "call   ___rw_write_enter\n\t"
 130        " ldstub        [%%g1 + 3], %%g2\n"
 131        : /* no outputs */
 132        : "r" (lp)
 133        : "g2", "g4", "memory", "cc");
 134        *(volatile __u32 *)&lp->lock = ~0U;
 135}
 136
 137static inline void arch_write_unlock(arch_rwlock_t *lock)
 138{
 139        __asm__ __volatile__(
 140"       st              %%g0, [%0]"
 141        : /* no outputs */
 142        : "r" (lock)
 143        : "memory");
 144}
 145
 146static inline int arch_write_trylock(arch_rwlock_t *rw)
 147{
 148        unsigned int val;
 149
 150        __asm__ __volatile__("ldstub [%1 + 3], %0"
 151                             : "=r" (val)
 152                             : "r" (&rw->lock)
 153                             : "memory");
 154
 155        if (val == 0) {
 156                val = rw->lock & ~0xff;
 157                if (val)
 158                        ((volatile u8*)&rw->lock)[3] = 0;
 159                else
 160                        *(volatile u32*)&rw->lock = ~0U;
 161        }
 162
 163        return (val == 0);
 164}
 165
 166static inline int __arch_read_trylock(arch_rwlock_t *rw)
 167{
 168        register arch_rwlock_t *lp asm("g1");
 169        register int res asm("o0");
 170        lp = rw;
 171        __asm__ __volatile__(
 172        "mov    %%o7, %%g4\n\t"
 173        "call   ___rw_read_try\n\t"
 174        " ldstub        [%%g1 + 3], %%g2\n"
 175        : "=r" (res)
 176        : "r" (lp)
 177        : "g2", "g4", "memory", "cc");
 178        return res;
 179}
 180
 181#define arch_read_trylock(lock) \
 182({      unsigned long flags; \
 183        int res; \
 184        local_irq_save(flags); \
 185        res = __arch_read_trylock(lock); \
 186        local_irq_restore(flags); \
 187        res; \
 188})
 189
 190#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 191#define arch_read_lock_flags(rw, flags)   arch_read_lock(rw)
 192#define arch_write_lock_flags(rw, flags)  arch_write_lock(rw)
 193
 194#define arch_spin_relax(lock)   cpu_relax()
 195#define arch_read_relax(lock)   cpu_relax()
 196#define arch_write_relax(lock)  cpu_relax()
 197
 198#define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
 199#define arch_write_can_lock(rw) (!(rw)->lock)
 200
 201#endif /* !(__ASSEMBLY__) */
 202
 203#endif /* __SPARC_SPINLOCK_H */
 204