1
2
3
4
5
6
7
8#ifndef _ASM_RISCV_ATOMIC_H
9#define _ASM_RISCV_ATOMIC_H
10
11#ifdef CONFIG_GENERIC_ATOMIC64
12# include <asm-generic/atomic64.h>
13#else
14# if (__riscv_xlen < 64)
15# error "64-bit atomics require XLEN to be at least 64"
16# endif
17#endif
18
19#include <asm/cmpxchg.h>
20#include <asm/barrier.h>
21
22#define __atomic_acquire_fence() \
23 __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
24
25#define __atomic_release_fence() \
26 __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
27
28static __always_inline int atomic_read(const atomic_t *v)
29{
30 return READ_ONCE(v->counter);
31}
32static __always_inline void atomic_set(atomic_t *v, int i)
33{
34 WRITE_ONCE(v->counter, i);
35}
36
37#ifndef CONFIG_GENERIC_ATOMIC64
38#define ATOMIC64_INIT(i) { (i) }
39static __always_inline s64 atomic64_read(const atomic64_t *v)
40{
41 return READ_ONCE(v->counter);
42}
43static __always_inline void atomic64_set(atomic64_t *v, s64 i)
44{
45 WRITE_ONCE(v->counter, i);
46}
47#endif
48
49
50
51
52
53
54#define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
55static __always_inline \
56void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
57{ \
58 __asm__ __volatile__ ( \
59 " amo" #asm_op "." #asm_type " zero, %1, %0" \
60 : "+A" (v->counter) \
61 : "r" (I) \
62 : "memory"); \
63} \
64
65#ifdef CONFIG_GENERIC_ATOMIC64
66#define ATOMIC_OPS(op, asm_op, I) \
67 ATOMIC_OP (op, asm_op, I, w, int, )
68#else
69#define ATOMIC_OPS(op, asm_op, I) \
70 ATOMIC_OP (op, asm_op, I, w, int, ) \
71 ATOMIC_OP (op, asm_op, I, d, s64, 64)
72#endif
73
74ATOMIC_OPS(add, add, i)
75ATOMIC_OPS(sub, add, -i)
76ATOMIC_OPS(and, and, i)
77ATOMIC_OPS( or, or, i)
78ATOMIC_OPS(xor, xor, i)
79
80#undef ATOMIC_OP
81#undef ATOMIC_OPS
82
83
84
85
86
87
88#define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \
89static __always_inline \
90c_type atomic##prefix##_fetch_##op##_relaxed(c_type i, \
91 atomic##prefix##_t *v) \
92{ \
93 register c_type ret; \
94 __asm__ __volatile__ ( \
95 " amo" #asm_op "." #asm_type " %1, %2, %0" \
96 : "+A" (v->counter), "=r" (ret) \
97 : "r" (I) \
98 : "memory"); \
99 return ret; \
100} \
101static __always_inline \
102c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
103{ \
104 register c_type ret; \
105 __asm__ __volatile__ ( \
106 " amo" #asm_op "." #asm_type ".aqrl %1, %2, %0" \
107 : "+A" (v->counter), "=r" (ret) \
108 : "r" (I) \
109 : "memory"); \
110 return ret; \
111}
112
113#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \
114static __always_inline \
115c_type atomic##prefix##_##op##_return_relaxed(c_type i, \
116 atomic##prefix##_t *v) \
117{ \
118 return atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
119} \
120static __always_inline \
121c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
122{ \
123 return atomic##prefix##_fetch_##op(i, v) c_op I; \
124}
125
126#ifdef CONFIG_GENERIC_ATOMIC64
127#define ATOMIC_OPS(op, asm_op, c_op, I) \
128 ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
129 ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, )
130#else
131#define ATOMIC_OPS(op, asm_op, c_op, I) \
132 ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
133 ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \
134 ATOMIC_FETCH_OP( op, asm_op, I, d, s64, 64) \
135 ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, s64, 64)
136#endif
137
138ATOMIC_OPS(add, add, +, i)
139ATOMIC_OPS(sub, add, +, -i)
140
141#define atomic_add_return_relaxed atomic_add_return_relaxed
142#define atomic_sub_return_relaxed atomic_sub_return_relaxed
143#define atomic_add_return atomic_add_return
144#define atomic_sub_return atomic_sub_return
145
146#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
147#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
148#define atomic_fetch_add atomic_fetch_add
149#define atomic_fetch_sub atomic_fetch_sub
150
151#ifndef CONFIG_GENERIC_ATOMIC64
152#define atomic64_add_return_relaxed atomic64_add_return_relaxed
153#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
154#define atomic64_add_return atomic64_add_return
155#define atomic64_sub_return atomic64_sub_return
156
157#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
158#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
159#define atomic64_fetch_add atomic64_fetch_add
160#define atomic64_fetch_sub atomic64_fetch_sub
161#endif
162
163#undef ATOMIC_OPS
164
165#ifdef CONFIG_GENERIC_ATOMIC64
166#define ATOMIC_OPS(op, asm_op, I) \
167 ATOMIC_FETCH_OP(op, asm_op, I, w, int, )
168#else
169#define ATOMIC_OPS(op, asm_op, I) \
170 ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \
171 ATOMIC_FETCH_OP(op, asm_op, I, d, s64, 64)
172#endif
173
174ATOMIC_OPS(and, and, i)
175ATOMIC_OPS( or, or, i)
176ATOMIC_OPS(xor, xor, i)
177
178#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
179#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
180#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
181#define atomic_fetch_and atomic_fetch_and
182#define atomic_fetch_or atomic_fetch_or
183#define atomic_fetch_xor atomic_fetch_xor
184
185#ifndef CONFIG_GENERIC_ATOMIC64
186#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
187#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
188#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
189#define atomic64_fetch_and atomic64_fetch_and
190#define atomic64_fetch_or atomic64_fetch_or
191#define atomic64_fetch_xor atomic64_fetch_xor
192#endif
193
194#undef ATOMIC_OPS
195
196#undef ATOMIC_FETCH_OP
197#undef ATOMIC_OP_RETURN
198
199
200static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
201{
202 int prev, rc;
203
204 __asm__ __volatile__ (
205 "0: lr.w %[p], %[c]\n"
206 " beq %[p], %[u], 1f\n"
207 " add %[rc], %[p], %[a]\n"
208 " sc.w.rl %[rc], %[rc], %[c]\n"
209 " bnez %[rc], 0b\n"
210 " fence rw, rw\n"
211 "1:\n"
212 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
213 : [a]"r" (a), [u]"r" (u)
214 : "memory");
215 return prev;
216}
217#define atomic_fetch_add_unless atomic_fetch_add_unless
218
219#ifndef CONFIG_GENERIC_ATOMIC64
220static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
221{
222 s64 prev;
223 long rc;
224
225 __asm__ __volatile__ (
226 "0: lr.d %[p], %[c]\n"
227 " beq %[p], %[u], 1f\n"
228 " add %[rc], %[p], %[a]\n"
229 " sc.d.rl %[rc], %[rc], %[c]\n"
230 " bnez %[rc], 0b\n"
231 " fence rw, rw\n"
232 "1:\n"
233 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
234 : [a]"r" (a), [u]"r" (u)
235 : "memory");
236 return prev;
237}
238#define atomic64_fetch_add_unless atomic64_fetch_add_unless
239#endif
240
241
242
243
244
245#define ATOMIC_OP(c_t, prefix, size) \
246static __always_inline \
247c_t atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \
248{ \
249 return __xchg_relaxed(&(v->counter), n, size); \
250} \
251static __always_inline \
252c_t atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \
253{ \
254 return __xchg_acquire(&(v->counter), n, size); \
255} \
256static __always_inline \
257c_t atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \
258{ \
259 return __xchg_release(&(v->counter), n, size); \
260} \
261static __always_inline \
262c_t atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \
263{ \
264 return __xchg(&(v->counter), n, size); \
265} \
266static __always_inline \
267c_t atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \
268 c_t o, c_t n) \
269{ \
270 return __cmpxchg_relaxed(&(v->counter), o, n, size); \
271} \
272static __always_inline \
273c_t atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \
274 c_t o, c_t n) \
275{ \
276 return __cmpxchg_acquire(&(v->counter), o, n, size); \
277} \
278static __always_inline \
279c_t atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \
280 c_t o, c_t n) \
281{ \
282 return __cmpxchg_release(&(v->counter), o, n, size); \
283} \
284static __always_inline \
285c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
286{ \
287 return __cmpxchg(&(v->counter), o, n, size); \
288}
289
290#ifdef CONFIG_GENERIC_ATOMIC64
291#define ATOMIC_OPS() \
292 ATOMIC_OP(int, , 4)
293#else
294#define ATOMIC_OPS() \
295 ATOMIC_OP(int, , 4) \
296 ATOMIC_OP(s64, 64, 8)
297#endif
298
299ATOMIC_OPS()
300
301#define atomic_xchg_relaxed atomic_xchg_relaxed
302#define atomic_xchg_acquire atomic_xchg_acquire
303#define atomic_xchg_release atomic_xchg_release
304#define atomic_xchg atomic_xchg
305#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
306#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
307#define atomic_cmpxchg_release atomic_cmpxchg_release
308#define atomic_cmpxchg atomic_cmpxchg
309
310#undef ATOMIC_OPS
311#undef ATOMIC_OP
312
313static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
314{
315 int prev, rc;
316
317 __asm__ __volatile__ (
318 "0: lr.w %[p], %[c]\n"
319 " sub %[rc], %[p], %[o]\n"
320 " bltz %[rc], 1f\n"
321 " sc.w.rl %[rc], %[rc], %[c]\n"
322 " bnez %[rc], 0b\n"
323 " fence rw, rw\n"
324 "1:\n"
325 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
326 : [o]"r" (offset)
327 : "memory");
328 return prev - offset;
329}
330
331#define atomic_dec_if_positive(v) atomic_sub_if_positive(v, 1)
332
333#ifndef CONFIG_GENERIC_ATOMIC64
334static __always_inline s64 atomic64_sub_if_positive(atomic64_t *v, s64 offset)
335{
336 s64 prev;
337 long rc;
338
339 __asm__ __volatile__ (
340 "0: lr.d %[p], %[c]\n"
341 " sub %[rc], %[p], %[o]\n"
342 " bltz %[rc], 1f\n"
343 " sc.d.rl %[rc], %[rc], %[c]\n"
344 " bnez %[rc], 0b\n"
345 " fence rw, rw\n"
346 "1:\n"
347 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
348 : [o]"r" (offset)
349 : "memory");
350 return prev - offset;
351}
352
353#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(v, 1)
354#endif
355
356#endif
357