1
2
3
4
5
6
7
8
9
10
11#ifndef _XTENSA_SPINLOCK_H
12#define _XTENSA_SPINLOCK_H
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#define arch_spin_is_locked(x) ((x)->slock != 0)
32#define arch_spin_unlock_wait(lock) \
33 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
34
35#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
36
37static inline void arch_spin_lock(arch_spinlock_t *lock)
38{
39 unsigned long tmp;
40
41 __asm__ __volatile__(
42 " movi %0, 0\n"
43 " wsr %0, scompare1\n"
44 "1: movi %0, 1\n"
45 " s32c1i %0, %1, 0\n"
46 " bnez %0, 1b\n"
47 : "=&a" (tmp)
48 : "a" (&lock->slock)
49 : "memory");
50}
51
52
53
54static inline int arch_spin_trylock(arch_spinlock_t *lock)
55{
56 unsigned long tmp;
57
58 __asm__ __volatile__(
59 " movi %0, 0\n"
60 " wsr %0, scompare1\n"
61 " movi %0, 1\n"
62 " s32c1i %0, %1, 0\n"
63 : "=&a" (tmp)
64 : "a" (&lock->slock)
65 : "memory");
66
67 return tmp == 0 ? 1 : 0;
68}
69
70static inline void arch_spin_unlock(arch_spinlock_t *lock)
71{
72 unsigned long tmp;
73
74 __asm__ __volatile__(
75 " movi %0, 0\n"
76 " s32ri %0, %1, 0\n"
77 : "=&a" (tmp)
78 : "a" (&lock->slock)
79 : "memory");
80}
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99#define arch_write_can_lock(x) ((x)->lock == 0)
100
101static inline void arch_write_lock(arch_rwlock_t *rw)
102{
103 unsigned long tmp;
104
105 __asm__ __volatile__(
106 " movi %0, 0\n"
107 " wsr %0, scompare1\n"
108 "1: movi %0, 1\n"
109 " slli %0, %0, 31\n"
110 " s32c1i %0, %1, 0\n"
111 " bnez %0, 1b\n"
112 : "=&a" (tmp)
113 : "a" (&rw->lock)
114 : "memory");
115}
116
117
118
119static inline int arch_write_trylock(arch_rwlock_t *rw)
120{
121 unsigned long tmp;
122
123 __asm__ __volatile__(
124 " movi %0, 0\n"
125 " wsr %0, scompare1\n"
126 " movi %0, 1\n"
127 " slli %0, %0, 31\n"
128 " s32c1i %0, %1, 0\n"
129 : "=&a" (tmp)
130 : "a" (&rw->lock)
131 : "memory");
132
133 return tmp == 0 ? 1 : 0;
134}
135
136static inline void arch_write_unlock(arch_rwlock_t *rw)
137{
138 unsigned long tmp;
139
140 __asm__ __volatile__(
141 " movi %0, 0\n"
142 " s32ri %0, %1, 0\n"
143 : "=&a" (tmp)
144 : "a" (&rw->lock)
145 : "memory");
146}
147
148static inline void arch_read_lock(arch_rwlock_t *rw)
149{
150 unsigned long tmp;
151 unsigned long result;
152
153 __asm__ __volatile__(
154 "1: l32i %1, %2, 0\n"
155 " bltz %1, 1b\n"
156 " wsr %1, scompare1\n"
157 " addi %0, %1, 1\n"
158 " s32c1i %0, %2, 0\n"
159 " bne %0, %1, 1b\n"
160 : "=&a" (result), "=&a" (tmp)
161 : "a" (&rw->lock)
162 : "memory");
163}
164
165
166
167static inline int arch_read_trylock(arch_rwlock_t *rw)
168{
169 unsigned long result;
170 unsigned long tmp;
171
172 __asm__ __volatile__(
173 " l32i %1, %2, 0\n"
174 " addi %0, %1, 1\n"
175 " bltz %0, 1f\n"
176 " wsr %1, scompare1\n"
177 " s32c1i %0, %2, 0\n"
178 " sub %0, %0, %1\n"
179 "1:\n"
180 : "=&a" (result), "=&a" (tmp)
181 : "a" (&rw->lock)
182 : "memory");
183
184 return result == 0;
185}
186
187static inline void arch_read_unlock(arch_rwlock_t *rw)
188{
189 unsigned long tmp1, tmp2;
190
191 __asm__ __volatile__(
192 "1: l32i %1, %2, 0\n"
193 " addi %0, %1, -1\n"
194 " wsr %1, scompare1\n"
195 " s32c1i %0, %2, 0\n"
196 " bne %0, %1, 1b\n"
197 : "=&a" (tmp1), "=&a" (tmp2)
198 : "a" (&rw->lock)
199 : "memory");
200}
201
202#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
203#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
204
205#endif
206