1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#ifndef __ASM_ATOMIC_LL_SC_H
22#define __ASM_ATOMIC_LL_SC_H
23
24#ifndef __ARM64_IN_ATOMIC_IMPL
25#error "please don't include this file directly"
26#endif
27
28
29
30
31
32
33
34
35
36
37
38
39
40#define ATOMIC_OP(op, asm_op) \
41__LL_SC_INLINE void \
42__LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v)) \
43{ \
44 unsigned long tmp; \
45 int result; \
46 \
47 asm volatile("// atomic_" #op "\n" \
48" prfm pstl1strm, %2\n" \
49"1: ldxr %w0, %2\n" \
50" " #asm_op " %w0, %w0, %w3\n" \
51" stxr %w1, %w0, %2\n" \
52" cbnz %w1, 1b" \
53 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
54 : "Ir" (i)); \
55} \
56__LL_SC_EXPORT(arch_atomic_##op);
57
58#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
59__LL_SC_INLINE int \
60__LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v)) \
61{ \
62 unsigned long tmp; \
63 int result; \
64 \
65 asm volatile("// atomic_" #op "_return" #name "\n" \
66" prfm pstl1strm, %2\n" \
67"1: ld" #acq "xr %w0, %2\n" \
68" " #asm_op " %w0, %w0, %w3\n" \
69" st" #rel "xr %w1, %w0, %2\n" \
70" cbnz %w1, 1b\n" \
71" " #mb \
72 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
73 : "Ir" (i) \
74 : cl); \
75 \
76 return result; \
77} \
78__LL_SC_EXPORT(arch_atomic_##op##_return##name);
79
80#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
81__LL_SC_INLINE int \
82__LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v)) \
83{ \
84 unsigned long tmp; \
85 int val, result; \
86 \
87 asm volatile("// atomic_fetch_" #op #name "\n" \
88" prfm pstl1strm, %3\n" \
89"1: ld" #acq "xr %w0, %3\n" \
90" " #asm_op " %w1, %w0, %w4\n" \
91" st" #rel "xr %w2, %w1, %3\n" \
92" cbnz %w2, 1b\n" \
93" " #mb \
94 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
95 : "Ir" (i) \
96 : cl); \
97 \
98 return result; \
99} \
100__LL_SC_EXPORT(arch_atomic_fetch_##op##name);
101
102#define ATOMIC_OPS(...) \
103 ATOMIC_OP(__VA_ARGS__) \
104 ATOMIC_OP_RETURN( , dmb ish, , l, "memory", __VA_ARGS__)\
105 ATOMIC_OP_RETURN(_relaxed, , , , , __VA_ARGS__)\
106 ATOMIC_OP_RETURN(_acquire, , a, , "memory", __VA_ARGS__)\
107 ATOMIC_OP_RETURN(_release, , , l, "memory", __VA_ARGS__)\
108 ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\
109 ATOMIC_FETCH_OP (_relaxed, , , , , __VA_ARGS__)\
110 ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
111 ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
112
113ATOMIC_OPS(add, add)
114ATOMIC_OPS(sub, sub)
115
116#undef ATOMIC_OPS
117#define ATOMIC_OPS(...) \
118 ATOMIC_OP(__VA_ARGS__) \
119 ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\
120 ATOMIC_FETCH_OP (_relaxed, , , , , __VA_ARGS__)\
121 ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
122 ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
123
124ATOMIC_OPS(and, and)
125ATOMIC_OPS(andnot, bic)
126ATOMIC_OPS(or, orr)
127ATOMIC_OPS(xor, eor)
128
129#undef ATOMIC_OPS
130#undef ATOMIC_FETCH_OP
131#undef ATOMIC_OP_RETURN
132#undef ATOMIC_OP
133
134#define ATOMIC64_OP(op, asm_op) \
135__LL_SC_INLINE void \
136__LL_SC_PREFIX(arch_atomic64_##op(long i, atomic64_t *v)) \
137{ \
138 long result; \
139 unsigned long tmp; \
140 \
141 asm volatile("// atomic64_" #op "\n" \
142" prfm pstl1strm, %2\n" \
143"1: ldxr %0, %2\n" \
144" " #asm_op " %0, %0, %3\n" \
145" stxr %w1, %0, %2\n" \
146" cbnz %w1, 1b" \
147 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
148 : "Ir" (i)); \
149} \
150__LL_SC_EXPORT(arch_atomic64_##op);
151
152#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \
153__LL_SC_INLINE long \
154__LL_SC_PREFIX(arch_atomic64_##op##_return##name(long i, atomic64_t *v))\
155{ \
156 long result; \
157 unsigned long tmp; \
158 \
159 asm volatile("// atomic64_" #op "_return" #name "\n" \
160" prfm pstl1strm, %2\n" \
161"1: ld" #acq "xr %0, %2\n" \
162" " #asm_op " %0, %0, %3\n" \
163" st" #rel "xr %w1, %0, %2\n" \
164" cbnz %w1, 1b\n" \
165" " #mb \
166 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
167 : "Ir" (i) \
168 : cl); \
169 \
170 return result; \
171} \
172__LL_SC_EXPORT(arch_atomic64_##op##_return##name);
173
174#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \
175__LL_SC_INLINE long \
176__LL_SC_PREFIX(arch_atomic64_fetch_##op##name(long i, atomic64_t *v)) \
177{ \
178 long result, val; \
179 unsigned long tmp; \
180 \
181 asm volatile("// atomic64_fetch_" #op #name "\n" \
182" prfm pstl1strm, %3\n" \
183"1: ld" #acq "xr %0, %3\n" \
184" " #asm_op " %1, %0, %4\n" \
185" st" #rel "xr %w2, %1, %3\n" \
186" cbnz %w2, 1b\n" \
187" " #mb \
188 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
189 : "Ir" (i) \
190 : cl); \
191 \
192 return result; \
193} \
194__LL_SC_EXPORT(arch_atomic64_fetch_##op##name);
195
196#define ATOMIC64_OPS(...) \
197 ATOMIC64_OP(__VA_ARGS__) \
198 ATOMIC64_OP_RETURN(, dmb ish, , l, "memory", __VA_ARGS__) \
199 ATOMIC64_OP_RETURN(_relaxed,, , , , __VA_ARGS__) \
200 ATOMIC64_OP_RETURN(_acquire,, a, , "memory", __VA_ARGS__) \
201 ATOMIC64_OP_RETURN(_release,, , l, "memory", __VA_ARGS__) \
202 ATOMIC64_FETCH_OP (, dmb ish, , l, "memory", __VA_ARGS__) \
203 ATOMIC64_FETCH_OP (_relaxed,, , , , __VA_ARGS__) \
204 ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
205 ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
206
207ATOMIC64_OPS(add, add)
208ATOMIC64_OPS(sub, sub)
209
210#undef ATOMIC64_OPS
211#define ATOMIC64_OPS(...) \
212 ATOMIC64_OP(__VA_ARGS__) \
213 ATOMIC64_FETCH_OP (, dmb ish, , l, "memory", __VA_ARGS__) \
214 ATOMIC64_FETCH_OP (_relaxed,, , , , __VA_ARGS__) \
215 ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
216 ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
217
218ATOMIC64_OPS(and, and)
219ATOMIC64_OPS(andnot, bic)
220ATOMIC64_OPS(or, orr)
221ATOMIC64_OPS(xor, eor)
222
223#undef ATOMIC64_OPS
224#undef ATOMIC64_FETCH_OP
225#undef ATOMIC64_OP_RETURN
226#undef ATOMIC64_OP
227
228__LL_SC_INLINE long
229__LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v))
230{
231 long result;
232 unsigned long tmp;
233
234 asm volatile("// atomic64_dec_if_positive\n"
235" prfm pstl1strm, %2\n"
236"1: ldxr %0, %2\n"
237" subs %0, %0, #1\n"
238" b.lt 2f\n"
239" stlxr %w1, %0, %2\n"
240" cbnz %w1, 1b\n"
241" dmb ish\n"
242"2:"
243 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
244 :
245 : "cc", "memory");
246
247 return result;
248}
249__LL_SC_EXPORT(arch_atomic64_dec_if_positive);
250
251#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl) \
252__LL_SC_INLINE u##sz \
253__LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \
254 unsigned long old, \
255 u##sz new)) \
256{ \
257 unsigned long tmp; \
258 u##sz oldval; \
259 \
260
261
262
263
264 \
265 if (sz < 32) \
266 old = (u##sz)old; \
267 \
268 asm volatile( \
269 " prfm pstl1strm, %[v]\n" \
270 "1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \
271 " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
272 " cbnz %" #w "[tmp], 2f\n" \
273 " st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \
274 " cbnz %w[tmp], 1b\n" \
275 " " #mb "\n" \
276 "2:" \
277 : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
278 [v] "+Q" (*(u##sz *)ptr) \
279 : [old] "Kr" (old), [new] "r" (new) \
280 : cl); \
281 \
282 return oldval; \
283} \
284__LL_SC_EXPORT(__cmpxchg_case_##name##sz);
285
286__CMPXCHG_CASE(w, b, , 8, , , , )
287__CMPXCHG_CASE(w, h, , 16, , , , )
288__CMPXCHG_CASE(w, , , 32, , , , )
289__CMPXCHG_CASE( , , , 64, , , , )
290__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory")
291__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory")
292__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory")
293__CMPXCHG_CASE( , , acq_, 64, , a, , "memory")
294__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory")
295__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory")
296__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory")
297__CMPXCHG_CASE( , , rel_, 64, , , l, "memory")
298__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory")
299__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory")
300__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory")
301__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory")
302
303#undef __CMPXCHG_CASE
304
305#define __CMPXCHG_DBL(name, mb, rel, cl) \
306__LL_SC_INLINE long \
307__LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \
308 unsigned long old2, \
309 unsigned long new1, \
310 unsigned long new2, \
311 volatile void *ptr)) \
312{ \
313 unsigned long tmp, ret; \
314 \
315 asm volatile("// __cmpxchg_double" #name "\n" \
316 " prfm pstl1strm, %2\n" \
317 "1: ldxp %0, %1, %2\n" \
318 " eor %0, %0, %3\n" \
319 " eor %1, %1, %4\n" \
320 " orr %1, %0, %1\n" \
321 " cbnz %1, 2f\n" \
322 " st" #rel "xp %w0, %5, %6, %2\n" \
323 " cbnz %w0, 1b\n" \
324 " " #mb "\n" \
325 "2:" \
326 : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \
327 : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
328 : cl); \
329 \
330 return ret; \
331} \
332__LL_SC_EXPORT(__cmpxchg_double##name);
333
334__CMPXCHG_DBL( , , , )
335__CMPXCHG_DBL(_mb, dmb ish, l, "memory")
336
337#undef __CMPXCHG_DBL
338
339#endif
340