1
2
3
4
5
6
7
8
9
10
11#ifndef _ASM_SPINLOCK_H
12#define _ASM_SPINLOCK_H
13
14#include <linux/atomic.h>
15#include <asm/barrier.h>
16#include <asm/processor.h>
17#include <asm/rwlock.h>
18#include <asm/page.h>
19
20
21
22
23
24
25
26
27#define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) != 0)
28
29static inline void arch_spin_unlock(arch_spinlock_t *lock)
30{
31 asm volatile(
32 " bclr 1,(0,%0) \n"
33 :
34 : "a"(&lock->slock)
35 : "memory", "cc");
36}
37
38static inline int arch_spin_trylock(arch_spinlock_t *lock)
39{
40 int ret;
41
42 asm volatile(
43 " mov 1,%0 \n"
44 " bset %0,(%1) \n"
45 " bne 1f \n"
46 " clr %0 \n"
47 "1: xor 1,%0 \n"
48 : "=d"(ret)
49 : "a"(&lock->slock)
50 : "memory", "cc");
51
52 return ret;
53}
54
55static inline void arch_spin_lock(arch_spinlock_t *lock)
56{
57 asm volatile(
58 "1: bset 1,(0,%0) \n"
59 " bne 1b \n"
60 :
61 : "a"(&lock->slock)
62 : "memory", "cc");
63}
64
65static inline void arch_spin_lock_flags(arch_spinlock_t *lock,
66 unsigned long flags)
67{
68 int temp;
69
70 asm volatile(
71 "1: bset 1,(0,%2) \n"
72 " beq 3f \n"
73 " mov %1,epsw \n"
74 "2: mov (0,%2),%0 \n"
75 " or %0,%0 \n"
76 " bne 2b \n"
77 " mov %3,%0 \n"
78 " mov %0,epsw \n"
79 " nop \n"
80 " nop \n"
81 " bra 1b\n"
82 "3: \n"
83 : "=&d" (temp)
84 : "d" (flags), "a"(&lock->slock), "i"(EPSW_IE | MN10300_CLI_LEVEL)
85 : "memory", "cc");
86}
87#define arch_spin_lock_flags arch_spin_lock_flags
88
89#ifdef __KERNEL__
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106static inline void arch_read_lock(arch_rwlock_t *rw)
107{
108#if 0
109 __build_read_lock(rw, "__read_lock_failed");
110#else
111 {
112 atomic_t *count = (atomic_t *)rw;
113 while (atomic_dec_return(count) < 0)
114 atomic_inc(count);
115 }
116#endif
117}
118
119static inline void arch_write_lock(arch_rwlock_t *rw)
120{
121#if 0
122 __build_write_lock(rw, "__write_lock_failed");
123#else
124 {
125 atomic_t *count = (atomic_t *)rw;
126 while (!atomic_sub_and_test(RW_LOCK_BIAS, count))
127 atomic_add(RW_LOCK_BIAS, count);
128 }
129#endif
130}
131
132static inline void arch_read_unlock(arch_rwlock_t *rw)
133{
134#if 0
135 __build_read_unlock(rw);
136#else
137 {
138 atomic_t *count = (atomic_t *)rw;
139 atomic_inc(count);
140 }
141#endif
142}
143
144static inline void arch_write_unlock(arch_rwlock_t *rw)
145{
146#if 0
147 __build_write_unlock(rw);
148#else
149 {
150 atomic_t *count = (atomic_t *)rw;
151 atomic_add(RW_LOCK_BIAS, count);
152 }
153#endif
154}
155
156static inline int arch_read_trylock(arch_rwlock_t *lock)
157{
158 atomic_t *count = (atomic_t *)lock;
159 atomic_dec(count);
160 if (atomic_read(count) >= 0)
161 return 1;
162 atomic_inc(count);
163 return 0;
164}
165
166static inline int arch_write_trylock(arch_rwlock_t *lock)
167{
168 atomic_t *count = (atomic_t *)lock;
169 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
170 return 1;
171 atomic_add(RW_LOCK_BIAS, count);
172 return 0;
173}
174
175#define _raw_spin_relax(lock) cpu_relax()
176#define _raw_read_relax(lock) cpu_relax()
177#define _raw_write_relax(lock) cpu_relax()
178
179#endif
180#endif
181