1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#ifndef __ASM_SPINLOCK_H
17#define __ASM_SPINLOCK_H
18
19#include <asm/lse.h>
20#include <asm/spinlock_types.h>
21#include <asm/processor.h>
22
23
24
25
26
27
28
29
30#define arch_spin_unlock_wait(lock) \
31 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
32
33#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
34
35static inline void arch_spin_lock(arch_spinlock_t *lock)
36{
37 unsigned int tmp;
38 arch_spinlock_t lockval, newval;
39
40 asm volatile(
41
42 ARM64_LSE_ATOMIC_INSN(
43
44" prfm pstl1strm, %3\n"
45"1: ldaxr %w0, %3\n"
46" add %w1, %w0, %w5\n"
47" stxr %w2, %w1, %3\n"
48" cbnz %w2, 1b\n",
49
50" mov %w2, %w5\n"
51" ldadda %w2, %w0, %3\n"
52" nop\n"
53" nop\n"
54" nop\n"
55 )
56
57
58" eor %w1, %w0, %w0, ror #16\n"
59" cbz %w1, 3f\n"
60
61
62
63
64" sevl\n"
65"2: wfe\n"
66" ldaxrh %w2, %4\n"
67" eor %w1, %w2, %w0, lsr #16\n"
68" cbnz %w1, 2b\n"
69
70"3:"
71 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
72 : "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
73 : "memory");
74}
75
76static inline int arch_spin_trylock(arch_spinlock_t *lock)
77{
78 unsigned int tmp;
79 arch_spinlock_t lockval;
80
81 asm volatile(ARM64_LSE_ATOMIC_INSN(
82
83 " prfm pstl1strm, %2\n"
84 "1: ldaxr %w0, %2\n"
85 " eor %w1, %w0, %w0, ror #16\n"
86 " cbnz %w1, 2f\n"
87 " add %w0, %w0, %3\n"
88 " stxr %w1, %w0, %2\n"
89 " cbnz %w1, 1b\n"
90 "2:",
91
92 " ldr %w0, %2\n"
93 " eor %w1, %w0, %w0, ror #16\n"
94 " cbnz %w1, 1f\n"
95 " add %w1, %w0, %3\n"
96 " casa %w0, %w1, %2\n"
97 " and %w1, %w1, #0xffff\n"
98 " eor %w1, %w1, %w0, lsr #16\n"
99 "1:")
100 : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
101 : "I" (1 << TICKET_SHIFT)
102 : "memory");
103
104 return !tmp;
105}
106
107static inline void arch_spin_unlock(arch_spinlock_t *lock)
108{
109 unsigned long tmp;
110
111 asm volatile(ARM64_LSE_ATOMIC_INSN(
112
113 " ldrh %w1, %0\n"
114 " add %w1, %w1, #1\n"
115 " stlrh %w1, %0",
116
117 " mov %w1, #1\n"
118 " nop\n"
119 " staddlh %w1, %0")
120 : "=Q" (lock->owner), "=&r" (tmp)
121 :
122 : "memory");
123}
124
125static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
126{
127 return lock.owner == lock.next;
128}
129
130static inline int arch_spin_is_locked(arch_spinlock_t *lock)
131{
132 return !arch_spin_value_unlocked(READ_ONCE(*lock));
133}
134
135static inline int arch_spin_is_contended(arch_spinlock_t *lock)
136{
137 arch_spinlock_t lockval = READ_ONCE(*lock);
138 return (lockval.next - lockval.owner) > 1;
139}
140#define arch_spin_is_contended arch_spin_is_contended
141
142
143
144
145
146
147
148
149
150
151
152static inline void arch_write_lock(arch_rwlock_t *rw)
153{
154 unsigned int tmp;
155
156 asm volatile(ARM64_LSE_ATOMIC_INSN(
157
158 " sevl\n"
159 "1: wfe\n"
160 "2: ldaxr %w0, %1\n"
161 " cbnz %w0, 1b\n"
162 " stxr %w0, %w2, %1\n"
163 " cbnz %w0, 2b\n"
164 " nop",
165
166 "1: mov %w0, wzr\n"
167 "2: casa %w0, %w2, %1\n"
168 " cbz %w0, 3f\n"
169 " ldxr %w0, %1\n"
170 " cbz %w0, 2b\n"
171 " wfe\n"
172 " b 1b\n"
173 "3:")
174 : "=&r" (tmp), "+Q" (rw->lock)
175 : "r" (0x80000000)
176 : "memory");
177}
178
179static inline int arch_write_trylock(arch_rwlock_t *rw)
180{
181 unsigned int tmp;
182
183 asm volatile(ARM64_LSE_ATOMIC_INSN(
184
185 "1: ldaxr %w0, %1\n"
186 " cbnz %w0, 2f\n"
187 " stxr %w0, %w2, %1\n"
188 " cbnz %w0, 1b\n"
189 "2:",
190
191 " mov %w0, wzr\n"
192 " casa %w0, %w2, %1\n"
193 " nop\n"
194 " nop")
195 : "=&r" (tmp), "+Q" (rw->lock)
196 : "r" (0x80000000)
197 : "memory");
198
199 return !tmp;
200}
201
202static inline void arch_write_unlock(arch_rwlock_t *rw)
203{
204 asm volatile(ARM64_LSE_ATOMIC_INSN(
205 " stlr wzr, %0",
206 " swpl wzr, wzr, %0")
207 : "=Q" (rw->lock) :: "memory");
208}
209
210
211#define arch_write_can_lock(x) ((x)->lock == 0)
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229static inline void arch_read_lock(arch_rwlock_t *rw)
230{
231 unsigned int tmp, tmp2;
232
233 asm volatile(
234 " sevl\n"
235 ARM64_LSE_ATOMIC_INSN(
236
237 "1: wfe\n"
238 "2: ldaxr %w0, %2\n"
239 " add %w0, %w0, #1\n"
240 " tbnz %w0, #31, 1b\n"
241 " stxr %w1, %w0, %2\n"
242 " nop\n"
243 " cbnz %w1, 2b",
244
245 "1: wfe\n"
246 "2: ldxr %w0, %2\n"
247 " adds %w1, %w0, #1\n"
248 " tbnz %w1, #31, 1b\n"
249 " casa %w0, %w1, %2\n"
250 " sbc %w0, %w1, %w0\n"
251 " cbnz %w0, 2b")
252 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
253 :
254 : "cc", "memory");
255}
256
257static inline void arch_read_unlock(arch_rwlock_t *rw)
258{
259 unsigned int tmp, tmp2;
260
261 asm volatile(ARM64_LSE_ATOMIC_INSN(
262
263 "1: ldxr %w0, %2\n"
264 " sub %w0, %w0, #1\n"
265 " stlxr %w1, %w0, %2\n"
266 " cbnz %w1, 1b",
267
268 " movn %w0, #0\n"
269 " nop\n"
270 " nop\n"
271 " staddl %w0, %2")
272 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
273 :
274 : "memory");
275}
276
277static inline int arch_read_trylock(arch_rwlock_t *rw)
278{
279 unsigned int tmp, tmp2;
280
281 asm volatile(ARM64_LSE_ATOMIC_INSN(
282
283 " mov %w1, #1\n"
284 "1: ldaxr %w0, %2\n"
285 " add %w0, %w0, #1\n"
286 " tbnz %w0, #31, 2f\n"
287 " stxr %w1, %w0, %2\n"
288 " cbnz %w1, 1b\n"
289 "2:",
290
291 " ldr %w0, %2\n"
292 " adds %w1, %w0, #1\n"
293 " tbnz %w1, #31, 1f\n"
294 " casa %w0, %w1, %2\n"
295 " sbc %w1, %w1, %w0\n"
296 " nop\n"
297 "1:")
298 : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
299 :
300 : "cc", "memory");
301
302 return !tmp2;
303}
304
305
306#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
307
308#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
309#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
310
311#define arch_spin_relax(lock) cpu_relax()
312#define arch_read_relax(lock) cpu_relax()
313#define arch_write_relax(lock) cpu_relax()
314
315#endif
316