linux/arch/cris/include/arch-v32/arch/spinlock.h
<<
>>
Prefs
   1#ifndef __ASM_ARCH_SPINLOCK_H
   2#define __ASM_ARCH_SPINLOCK_H
   3
   4#include <linux/spinlock_types.h>
   5
   6#define RW_LOCK_BIAS 0x01000000
   7
   8extern void cris_spin_unlock(void *l, int val);
   9extern void cris_spin_lock(void *l);
  10extern int cris_spin_trylock(void *l);
  11
  12static inline int __raw_spin_is_locked(raw_spinlock_t *x)
  13{
  14        return *(volatile signed char *)(&(x)->slock) <= 0;
  15}
  16
  17static inline void __raw_spin_unlock(raw_spinlock_t *lock)
  18{
  19        __asm__ volatile ("move.d %1,%0" \
  20                          : "=m" (lock->slock) \
  21                          : "r" (1) \
  22                          : "memory");
  23}
  24
  25static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
  26{
  27        while (__raw_spin_is_locked(lock))
  28                cpu_relax();
  29}
  30
  31static inline int __raw_spin_trylock(raw_spinlock_t *lock)
  32{
  33        return cris_spin_trylock((void *)&lock->slock);
  34}
  35
  36static inline void __raw_spin_lock(raw_spinlock_t *lock)
  37{
  38        cris_spin_lock((void *)&lock->slock);
  39}
  40
  41static inline void
  42__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
  43{
  44        __raw_spin_lock(lock);
  45}
  46
  47/*
  48 * Read-write spinlocks, allowing multiple readers
  49 * but only one writer.
  50 *
  51 * NOTE! it is quite common to have readers in interrupts
  52 * but no interrupt writers. For those circumstances we
  53 * can "mix" irq-safe locks - any writer needs to get a
  54 * irq-safe write-lock, but readers can get non-irqsafe
  55 * read-locks.
  56 *
  57 */
  58
  59static inline int __raw_read_can_lock(raw_rwlock_t *x)
  60{
  61        return (int)(x)->lock > 0;
  62}
  63
  64static inline int __raw_write_can_lock(raw_rwlock_t *x)
  65{
  66        return (x)->lock == RW_LOCK_BIAS;
  67}
  68
  69static  inline void __raw_read_lock(raw_rwlock_t *rw)
  70{
  71        __raw_spin_lock(&rw->slock);
  72        while (rw->lock == 0);
  73        rw->lock--;
  74        __raw_spin_unlock(&rw->slock);
  75}
  76
  77static  inline void __raw_write_lock(raw_rwlock_t *rw)
  78{
  79        __raw_spin_lock(&rw->slock);
  80        while (rw->lock != RW_LOCK_BIAS);
  81        rw->lock = 0;
  82        __raw_spin_unlock(&rw->slock);
  83}
  84
  85static  inline void __raw_read_unlock(raw_rwlock_t *rw)
  86{
  87        __raw_spin_lock(&rw->slock);
  88        rw->lock++;
  89        __raw_spin_unlock(&rw->slock);
  90}
  91
  92static  inline void __raw_write_unlock(raw_rwlock_t *rw)
  93{
  94        __raw_spin_lock(&rw->slock);
  95        while (rw->lock != RW_LOCK_BIAS);
  96        rw->lock = RW_LOCK_BIAS;
  97        __raw_spin_unlock(&rw->slock);
  98}
  99
 100static  inline int __raw_read_trylock(raw_rwlock_t *rw)
 101{
 102        int ret = 0;
 103        __raw_spin_lock(&rw->slock);
 104        if (rw->lock != 0) {
 105                rw->lock--;
 106                ret = 1;
 107        }
 108        __raw_spin_unlock(&rw->slock);
 109        return ret;
 110}
 111
 112static  inline int __raw_write_trylock(raw_rwlock_t *rw)
 113{
 114        int ret = 0;
 115        __raw_spin_lock(&rw->slock);
 116        if (rw->lock == RW_LOCK_BIAS) {
 117                rw->lock = 0;
 118                ret = 1;
 119        }
 120        __raw_spin_unlock(&rw->slock);
 121        return 1;
 122}
 123
 124#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
 125#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
 126
 127#define _raw_spin_relax(lock)   cpu_relax()
 128#define _raw_read_relax(lock)   cpu_relax()
 129#define _raw_write_relax(lock)  cpu_relax()
 130
 131#endif /* __ASM_ARCH_SPINLOCK_H */
 132