1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#ifndef __ASM_ATOMIC_LSE_H
22#define __ASM_ATOMIC_LSE_H
23
24#ifndef __ARM64_IN_ATOMIC_IMPL
25#error "please don't include this file directly"
26#endif
27
28#define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op)
29
30static inline void atomic_andnot(int i, atomic_t *v)
31{
32 register int w0 asm ("w0") = i;
33 register atomic_t *x1 asm ("x1") = v;
34
35 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(andnot),
36 " stclr %w[i], %[v]\n")
37 : [i] "+r" (w0), [v] "+Q" (v->counter)
38 : "r" (x1)
39 : __LL_SC_CLOBBERS);
40}
41
42static inline void atomic_or(int i, atomic_t *v)
43{
44 register int w0 asm ("w0") = i;
45 register atomic_t *x1 asm ("x1") = v;
46
47 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(or),
48 " stset %w[i], %[v]\n")
49 : [i] "+r" (w0), [v] "+Q" (v->counter)
50 : "r" (x1)
51 : __LL_SC_CLOBBERS);
52}
53
54static inline void atomic_xor(int i, atomic_t *v)
55{
56 register int w0 asm ("w0") = i;
57 register atomic_t *x1 asm ("x1") = v;
58
59 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(xor),
60 " steor %w[i], %[v]\n")
61 : [i] "+r" (w0), [v] "+Q" (v->counter)
62 : "r" (x1)
63 : __LL_SC_CLOBBERS);
64}
65
66static inline void atomic_add(int i, atomic_t *v)
67{
68 register int w0 asm ("w0") = i;
69 register atomic_t *x1 asm ("x1") = v;
70
71 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(add),
72 " stadd %w[i], %[v]\n")
73 : [i] "+r" (w0), [v] "+Q" (v->counter)
74 : "r" (x1)
75 : __LL_SC_CLOBBERS);
76}
77
78#define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \
79static inline int atomic_add_return##name(int i, atomic_t *v) \
80{ \
81 register int w0 asm ("w0") = i; \
82 register atomic_t *x1 asm ("x1") = v; \
83 \
84 asm volatile(ARM64_LSE_ATOMIC_INSN( \
85 \
86 " nop\n" \
87 __LL_SC_ATOMIC(add_return##name), \
88 \
89 " ldadd" #mb " %w[i], w30, %[v]\n" \
90 " add %w[i], %w[i], w30") \
91 : [i] "+r" (w0), [v] "+Q" (v->counter) \
92 : "r" (x1) \
93 : __LL_SC_CLOBBERS, ##cl); \
94 \
95 return w0; \
96}
97
98ATOMIC_OP_ADD_RETURN(_relaxed, )
99ATOMIC_OP_ADD_RETURN(_acquire, a, "memory")
100ATOMIC_OP_ADD_RETURN(_release, l, "memory")
101ATOMIC_OP_ADD_RETURN( , al, "memory")
102
103#undef ATOMIC_OP_ADD_RETURN
104
105static inline void atomic_and(int i, atomic_t *v)
106{
107 register int w0 asm ("w0") = i;
108 register atomic_t *x1 asm ("x1") = v;
109
110 asm volatile(ARM64_LSE_ATOMIC_INSN(
111
112 " nop\n"
113 __LL_SC_ATOMIC(and),
114
115 " mvn %w[i], %w[i]\n"
116 " stclr %w[i], %[v]")
117 : [i] "+r" (w0), [v] "+Q" (v->counter)
118 : "r" (x1)
119 : __LL_SC_CLOBBERS);
120}
121
122static inline void atomic_sub(int i, atomic_t *v)
123{
124 register int w0 asm ("w0") = i;
125 register atomic_t *x1 asm ("x1") = v;
126
127 asm volatile(ARM64_LSE_ATOMIC_INSN(
128
129 " nop\n"
130 __LL_SC_ATOMIC(sub),
131
132 " neg %w[i], %w[i]\n"
133 " stadd %w[i], %[v]")
134 : [i] "+r" (w0), [v] "+Q" (v->counter)
135 : "r" (x1)
136 : __LL_SC_CLOBBERS);
137}
138
139#define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \
140static inline int atomic_sub_return##name(int i, atomic_t *v) \
141{ \
142 register int w0 asm ("w0") = i; \
143 register atomic_t *x1 asm ("x1") = v; \
144 \
145 asm volatile(ARM64_LSE_ATOMIC_INSN( \
146 \
147 " nop\n" \
148 __LL_SC_ATOMIC(sub_return##name) \
149 " nop", \
150 \
151 " neg %w[i], %w[i]\n" \
152 " ldadd" #mb " %w[i], w30, %[v]\n" \
153 " add %w[i], %w[i], w30") \
154 : [i] "+r" (w0), [v] "+Q" (v->counter) \
155 : "r" (x1) \
156 : __LL_SC_CLOBBERS , ##cl); \
157 \
158 return w0; \
159}
160
161ATOMIC_OP_SUB_RETURN(_relaxed, )
162ATOMIC_OP_SUB_RETURN(_acquire, a, "memory")
163ATOMIC_OP_SUB_RETURN(_release, l, "memory")
164ATOMIC_OP_SUB_RETURN( , al, "memory")
165
166#undef ATOMIC_OP_SUB_RETURN
167#undef __LL_SC_ATOMIC
168
169#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
170
171static inline void atomic64_andnot(long i, atomic64_t *v)
172{
173 register long x0 asm ("x0") = i;
174 register atomic64_t *x1 asm ("x1") = v;
175
176 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(andnot),
177 " stclr %[i], %[v]\n")
178 : [i] "+r" (x0), [v] "+Q" (v->counter)
179 : "r" (x1)
180 : __LL_SC_CLOBBERS);
181}
182
183static inline void atomic64_or(long i, atomic64_t *v)
184{
185 register long x0 asm ("x0") = i;
186 register atomic64_t *x1 asm ("x1") = v;
187
188 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(or),
189 " stset %[i], %[v]\n")
190 : [i] "+r" (x0), [v] "+Q" (v->counter)
191 : "r" (x1)
192 : __LL_SC_CLOBBERS);
193}
194
195static inline void atomic64_xor(long i, atomic64_t *v)
196{
197 register long x0 asm ("x0") = i;
198 register atomic64_t *x1 asm ("x1") = v;
199
200 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(xor),
201 " steor %[i], %[v]\n")
202 : [i] "+r" (x0), [v] "+Q" (v->counter)
203 : "r" (x1)
204 : __LL_SC_CLOBBERS);
205}
206
207static inline void atomic64_add(long i, atomic64_t *v)
208{
209 register long x0 asm ("x0") = i;
210 register atomic64_t *x1 asm ("x1") = v;
211
212 asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(add),
213 " stadd %[i], %[v]\n")
214 : [i] "+r" (x0), [v] "+Q" (v->counter)
215 : "r" (x1)
216 : __LL_SC_CLOBBERS);
217}
218
219#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \
220static inline long atomic64_add_return##name(long i, atomic64_t *v) \
221{ \
222 register long x0 asm ("x0") = i; \
223 register atomic64_t *x1 asm ("x1") = v; \
224 \
225 asm volatile(ARM64_LSE_ATOMIC_INSN( \
226 \
227 " nop\n" \
228 __LL_SC_ATOMIC64(add_return##name), \
229 \
230 " ldadd" #mb " %[i], x30, %[v]\n" \
231 " add %[i], %[i], x30") \
232 : [i] "+r" (x0), [v] "+Q" (v->counter) \
233 : "r" (x1) \
234 : __LL_SC_CLOBBERS, ##cl); \
235 \
236 return x0; \
237}
238
239ATOMIC64_OP_ADD_RETURN(_relaxed, )
240ATOMIC64_OP_ADD_RETURN(_acquire, a, "memory")
241ATOMIC64_OP_ADD_RETURN(_release, l, "memory")
242ATOMIC64_OP_ADD_RETURN( , al, "memory")
243
244#undef ATOMIC64_OP_ADD_RETURN
245
246static inline void atomic64_and(long i, atomic64_t *v)
247{
248 register long x0 asm ("x0") = i;
249 register atomic64_t *x1 asm ("x1") = v;
250
251 asm volatile(ARM64_LSE_ATOMIC_INSN(
252
253 " nop\n"
254 __LL_SC_ATOMIC64(and),
255
256 " mvn %[i], %[i]\n"
257 " stclr %[i], %[v]")
258 : [i] "+r" (x0), [v] "+Q" (v->counter)
259 : "r" (x1)
260 : __LL_SC_CLOBBERS);
261}
262
263static inline void atomic64_sub(long i, atomic64_t *v)
264{
265 register long x0 asm ("x0") = i;
266 register atomic64_t *x1 asm ("x1") = v;
267
268 asm volatile(ARM64_LSE_ATOMIC_INSN(
269
270 " nop\n"
271 __LL_SC_ATOMIC64(sub),
272
273 " neg %[i], %[i]\n"
274 " stadd %[i], %[v]")
275 : [i] "+r" (x0), [v] "+Q" (v->counter)
276 : "r" (x1)
277 : __LL_SC_CLOBBERS);
278}
279
280#define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \
281static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
282{ \
283 register long x0 asm ("x0") = i; \
284 register atomic64_t *x1 asm ("x1") = v; \
285 \
286 asm volatile(ARM64_LSE_ATOMIC_INSN( \
287 \
288 " nop\n" \
289 __LL_SC_ATOMIC64(sub_return##name) \
290 " nop", \
291 \
292 " neg %[i], %[i]\n" \
293 " ldadd" #mb " %[i], x30, %[v]\n" \
294 " add %[i], %[i], x30") \
295 : [i] "+r" (x0), [v] "+Q" (v->counter) \
296 : "r" (x1) \
297 : __LL_SC_CLOBBERS, ##cl); \
298 \
299 return x0; \
300}
301
302ATOMIC64_OP_SUB_RETURN(_relaxed, )
303ATOMIC64_OP_SUB_RETURN(_acquire, a, "memory")
304ATOMIC64_OP_SUB_RETURN(_release, l, "memory")
305ATOMIC64_OP_SUB_RETURN( , al, "memory")
306
307#undef ATOMIC64_OP_SUB_RETURN
308
309static inline long atomic64_dec_if_positive(atomic64_t *v)
310{
311 register long x0 asm ("x0") = (long)v;
312
313 asm volatile(ARM64_LSE_ATOMIC_INSN(
314
315 " nop\n"
316 __LL_SC_ATOMIC64(dec_if_positive)
317 " nop\n"
318 " nop\n"
319 " nop\n"
320 " nop\n"
321 " nop",
322
323 "1: ldr x30, %[v]\n"
324 " subs %[ret], x30, #1\n"
325 " b.lt 2f\n"
326 " casal x30, %[ret], %[v]\n"
327 " sub x30, x30, #1\n"
328 " sub x30, x30, %[ret]\n"
329 " cbnz x30, 1b\n"
330 "2:")
331 : [ret] "+&r" (x0), [v] "+Q" (v->counter)
332 :
333 : __LL_SC_CLOBBERS, "cc", "memory");
334
335 return x0;
336}
337
338#undef __LL_SC_ATOMIC64
339
340#define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op)
341
342#define __CMPXCHG_CASE(w, sz, name, mb, cl...) \
343static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
344 unsigned long old, \
345 unsigned long new) \
346{ \
347 register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
348 register unsigned long x1 asm ("x1") = old; \
349 register unsigned long x2 asm ("x2") = new; \
350 \
351 asm volatile(ARM64_LSE_ATOMIC_INSN( \
352 \
353 " nop\n" \
354 __LL_SC_CMPXCHG(name) \
355 " nop", \
356 \
357 " mov " #w "30, %" #w "[old]\n" \
358 " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \
359 " mov %" #w "[ret], " #w "30") \
360 : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \
361 : [old] "r" (x1), [new] "r" (x2) \
362 : __LL_SC_CLOBBERS, ##cl); \
363 \
364 return x0; \
365}
366
367__CMPXCHG_CASE(w, b, 1, )
368__CMPXCHG_CASE(w, h, 2, )
369__CMPXCHG_CASE(w, , 4, )
370__CMPXCHG_CASE(x, , 8, )
371__CMPXCHG_CASE(w, b, acq_1, a, "memory")
372__CMPXCHG_CASE(w, h, acq_2, a, "memory")
373__CMPXCHG_CASE(w, , acq_4, a, "memory")
374__CMPXCHG_CASE(x, , acq_8, a, "memory")
375__CMPXCHG_CASE(w, b, rel_1, l, "memory")
376__CMPXCHG_CASE(w, h, rel_2, l, "memory")
377__CMPXCHG_CASE(w, , rel_4, l, "memory")
378__CMPXCHG_CASE(x, , rel_8, l, "memory")
379__CMPXCHG_CASE(w, b, mb_1, al, "memory")
380__CMPXCHG_CASE(w, h, mb_2, al, "memory")
381__CMPXCHG_CASE(w, , mb_4, al, "memory")
382__CMPXCHG_CASE(x, , mb_8, al, "memory")
383
384#undef __LL_SC_CMPXCHG
385#undef __CMPXCHG_CASE
386
387#define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op)
388
389#define __CMPXCHG_DBL(name, mb, cl...) \
390static inline long __cmpxchg_double##name(unsigned long old1, \
391 unsigned long old2, \
392 unsigned long new1, \
393 unsigned long new2, \
394 volatile void *ptr) \
395{ \
396 unsigned long oldval1 = old1; \
397 unsigned long oldval2 = old2; \
398 register unsigned long x0 asm ("x0") = old1; \
399 register unsigned long x1 asm ("x1") = old2; \
400 register unsigned long x2 asm ("x2") = new1; \
401 register unsigned long x3 asm ("x3") = new2; \
402 register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
403 \
404 asm volatile(ARM64_LSE_ATOMIC_INSN( \
405 \
406 " nop\n" \
407 " nop\n" \
408 " nop\n" \
409 __LL_SC_CMPXCHG_DBL(name), \
410 \
411 " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
412 " eor %[old1], %[old1], %[oldval1]\n" \
413 " eor %[old2], %[old2], %[oldval2]\n" \
414 " orr %[old1], %[old1], %[old2]") \
415 : [old1] "+r" (x0), [old2] "+r" (x1), \
416 [v] "+Q" (*(unsigned long *)ptr) \
417 : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
418 [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
419 : __LL_SC_CLOBBERS, ##cl); \
420 \
421 return x0; \
422}
423
424__CMPXCHG_DBL( , )
425__CMPXCHG_DBL(_mb, al, "memory")
426
427#undef __LL_SC_CMPXCHG_DBL
428#undef __CMPXCHG_DBL
429
430#endif
431