1#ifndef __ASM_ARCH_SPINLOCK_H 2#define __ASM_ARCH_SPINLOCK_H 3 4#include <linux/spinlock_types.h> 5 6#define RW_LOCK_BIAS 0x01000000 7 8extern void cris_spin_unlock(void *l, int val); 9extern void cris_spin_lock(void *l); 10extern int cris_spin_trylock(void *l); 11 12static inline int arch_spin_is_locked(arch_spinlock_t *x) 13{ 14 return *(volatile signed char *)(&(x)->slock) <= 0; 15} 16 17static inline void arch_spin_unlock(arch_spinlock_t *lock) 18{ 19 __asm__ volatile ("move.d %1,%0" \ 20 : "=m" (lock->slock) \ 21 : "r" (1) \ 22 : "memory"); 23} 24 25static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 26{ 27 while (arch_spin_is_locked(lock)) 28 cpu_relax(); 29} 30 31static inline int arch_spin_trylock(arch_spinlock_t *lock) 32{ 33 return cris_spin_trylock((void *)&lock->slock); 34} 35 36static inline void arch_spin_lock(arch_spinlock_t *lock) 37{ 38 cris_spin_lock((void *)&lock->slock); 39} 40 41static inline void 42arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 43{ 44 arch_spin_lock(lock); 45} 46 47/* 48 * Read-write spinlocks, allowing multiple readers 49 * but only one writer. 50 * 51 * NOTE! it is quite common to have readers in interrupts 52 * but no interrupt writers. For those circumstances we 53 * can "mix" irq-safe locks - any writer needs to get a 54 * irq-safe write-lock, but readers can get non-irqsafe 55 * read-locks. 56 * 57 */ 58 59static inline int arch_read_can_lock(arch_rwlock_t *x) 60{ 61 return (int)(x)->lock > 0; 62} 63 64static inline int arch_write_can_lock(arch_rwlock_t *x) 65{ 66 return (x)->lock == RW_LOCK_BIAS; 67} 68 69static inline void arch_read_lock(arch_rwlock_t *rw) 70{ 71 arch_spin_lock(&rw->slock); 72 while (rw->lock == 0); 73 rw->lock--; 74 arch_spin_unlock(&rw->slock); 75} 76 77static inline void arch_write_lock(arch_rwlock_t *rw) 78{ 79 arch_spin_lock(&rw->slock); 80 while (rw->lock != RW_LOCK_BIAS); 81 rw->lock = 0; 82 arch_spin_unlock(&rw->slock); 83} 84 85static inline void arch_read_unlock(arch_rwlock_t *rw) 86{ 87 arch_spin_lock(&rw->slock); 88 rw->lock++; 89 arch_spin_unlock(&rw->slock); 90} 91 92static inline void arch_write_unlock(arch_rwlock_t *rw) 93{ 94 arch_spin_lock(&rw->slock); 95 while (rw->lock != RW_LOCK_BIAS); 96 rw->lock = RW_LOCK_BIAS; 97 arch_spin_unlock(&rw->slock); 98} 99 100static inline int arch_read_trylock(arch_rwlock_t *rw) 101{ 102 int ret = 0; 103 arch_spin_lock(&rw->slock); 104 if (rw->lock != 0) { 105 rw->lock--; 106 ret = 1; 107 } 108 arch_spin_unlock(&rw->slock); 109 return ret; 110} 111 112static inline int arch_write_trylock(arch_rwlock_t *rw) 113{ 114 int ret = 0; 115 arch_spin_lock(&rw->slock); 116 if (rw->lock == RW_LOCK_BIAS) { 117 rw->lock = 0; 118 ret = 1; 119 } 120 arch_spin_unlock(&rw->slock); 121 return ret; 122} 123 124#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) 125#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) 126 127#define arch_spin_relax(lock) cpu_relax() 128#define arch_read_relax(lock) cpu_relax() 129#define arch_write_relax(lock) cpu_relax() 130 131#endif /* __ASM_ARCH_SPINLOCK_H */ 132