1
2
3
4
5
6
7
8
9
10
11
12#ifndef _ASM_RISCV_ATOMIC_H
13#define _ASM_RISCV_ATOMIC_H
14
15#ifdef CONFIG_GENERIC_ATOMIC64
16# include <asm-generic/atomic64.h>
17#else
18# if (__riscv_xlen < 64)
19# error "64-bit atomics require XLEN to be at least 64"
20# endif
21#endif
22
23#include <asm/cmpxchg.h>
24#include <asm/barrier.h>
25
26#define ATOMIC_INIT(i) { (i) }
27
28#define __atomic_op_acquire(op, args...) \
29({ \
30 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
31 __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory"); \
32 __ret; \
33})
34
35#define __atomic_op_release(op, args...) \
36({ \
37 __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory"); \
38 op##_relaxed(args); \
39})
40
41static __always_inline int atomic_read(const atomic_t *v)
42{
43 return READ_ONCE(v->counter);
44}
45static __always_inline void atomic_set(atomic_t *v, int i)
46{
47 WRITE_ONCE(v->counter, i);
48}
49
50#ifndef CONFIG_GENERIC_ATOMIC64
51#define ATOMIC64_INIT(i) { (i) }
52static __always_inline long atomic64_read(const atomic64_t *v)
53{
54 return READ_ONCE(v->counter);
55}
56static __always_inline void atomic64_set(atomic64_t *v, long i)
57{
58 WRITE_ONCE(v->counter, i);
59}
60#endif
61
62
63
64
65
66
67#define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
68static __always_inline \
69void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
70{ \
71 __asm__ __volatile__ ( \
72 " amo" #asm_op "." #asm_type " zero, %1, %0" \
73 : "+A" (v->counter) \
74 : "r" (I) \
75 : "memory"); \
76} \
77
78#ifdef CONFIG_GENERIC_ATOMIC64
79#define ATOMIC_OPS(op, asm_op, I) \
80 ATOMIC_OP (op, asm_op, I, w, int, )
81#else
82#define ATOMIC_OPS(op, asm_op, I) \
83 ATOMIC_OP (op, asm_op, I, w, int, ) \
84 ATOMIC_OP (op, asm_op, I, d, long, 64)
85#endif
86
87ATOMIC_OPS(add, add, i)
88ATOMIC_OPS(sub, add, -i)
89ATOMIC_OPS(and, and, i)
90ATOMIC_OPS( or, or, i)
91ATOMIC_OPS(xor, xor, i)
92
93#undef ATOMIC_OP
94#undef ATOMIC_OPS
95
96
97
98
99
100
101#define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \
102static __always_inline \
103c_type atomic##prefix##_fetch_##op##_relaxed(c_type i, \
104 atomic##prefix##_t *v) \
105{ \
106 register c_type ret; \
107 __asm__ __volatile__ ( \
108 " amo" #asm_op "." #asm_type " %1, %2, %0" \
109 : "+A" (v->counter), "=r" (ret) \
110 : "r" (I) \
111 : "memory"); \
112 return ret; \
113} \
114static __always_inline \
115c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
116{ \
117 register c_type ret; \
118 __asm__ __volatile__ ( \
119 " amo" #asm_op "." #asm_type ".aqrl %1, %2, %0" \
120 : "+A" (v->counter), "=r" (ret) \
121 : "r" (I) \
122 : "memory"); \
123 return ret; \
124}
125
126#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \
127static __always_inline \
128c_type atomic##prefix##_##op##_return_relaxed(c_type i, \
129 atomic##prefix##_t *v) \
130{ \
131 return atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
132} \
133static __always_inline \
134c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
135{ \
136 return atomic##prefix##_fetch_##op(i, v) c_op I; \
137}
138
139#ifdef CONFIG_GENERIC_ATOMIC64
140#define ATOMIC_OPS(op, asm_op, c_op, I) \
141 ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
142 ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, )
143#else
144#define ATOMIC_OPS(op, asm_op, c_op, I) \
145 ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
146 ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \
147 ATOMIC_FETCH_OP( op, asm_op, I, d, long, 64) \
148 ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, long, 64)
149#endif
150
151ATOMIC_OPS(add, add, +, i)
152ATOMIC_OPS(sub, add, +, -i)
153
154#define atomic_add_return_relaxed atomic_add_return_relaxed
155#define atomic_sub_return_relaxed atomic_sub_return_relaxed
156#define atomic_add_return atomic_add_return
157#define atomic_sub_return atomic_sub_return
158
159#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
160#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
161#define atomic_fetch_add atomic_fetch_add
162#define atomic_fetch_sub atomic_fetch_sub
163
164#ifndef CONFIG_GENERIC_ATOMIC64
165#define atomic64_add_return_relaxed atomic64_add_return_relaxed
166#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
167#define atomic64_add_return atomic64_add_return
168#define atomic64_sub_return atomic64_sub_return
169
170#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
171#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
172#define atomic64_fetch_add atomic64_fetch_add
173#define atomic64_fetch_sub atomic64_fetch_sub
174#endif
175
176#undef ATOMIC_OPS
177
178#ifdef CONFIG_GENERIC_ATOMIC64
179#define ATOMIC_OPS(op, asm_op, I) \
180 ATOMIC_FETCH_OP(op, asm_op, I, w, int, )
181#else
182#define ATOMIC_OPS(op, asm_op, I) \
183 ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \
184 ATOMIC_FETCH_OP(op, asm_op, I, d, long, 64)
185#endif
186
187ATOMIC_OPS(and, and, i)
188ATOMIC_OPS( or, or, i)
189ATOMIC_OPS(xor, xor, i)
190
191#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
192#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
193#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
194#define atomic_fetch_and atomic_fetch_and
195#define atomic_fetch_or atomic_fetch_or
196#define atomic_fetch_xor atomic_fetch_xor
197
198#ifndef CONFIG_GENERIC_ATOMIC64
199#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
200#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
201#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
202#define atomic64_fetch_and atomic64_fetch_and
203#define atomic64_fetch_or atomic64_fetch_or
204#define atomic64_fetch_xor atomic64_fetch_xor
205#endif
206
207#undef ATOMIC_OPS
208
209#undef ATOMIC_FETCH_OP
210#undef ATOMIC_OP_RETURN
211
212
213
214
215
216
217
218
219#define ATOMIC_OP(op, func_op, comp_op, I, c_type, prefix) \
220static __always_inline \
221bool atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
222{ \
223 return atomic##prefix##_##func_op##_return(i, v) comp_op I; \
224}
225
226#ifdef CONFIG_GENERIC_ATOMIC64
227#define ATOMIC_OPS(op, func_op, comp_op, I) \
228 ATOMIC_OP(op, func_op, comp_op, I, int, )
229#else
230#define ATOMIC_OPS(op, func_op, comp_op, I) \
231 ATOMIC_OP(op, func_op, comp_op, I, int, ) \
232 ATOMIC_OP(op, func_op, comp_op, I, long, 64)
233#endif
234
235ATOMIC_OPS(add_and_test, add, ==, 0)
236ATOMIC_OPS(sub_and_test, sub, ==, 0)
237ATOMIC_OPS(add_negative, add, <, 0)
238
239#undef ATOMIC_OP
240#undef ATOMIC_OPS
241
242#define ATOMIC_OP(op, func_op, I, c_type, prefix) \
243static __always_inline \
244void atomic##prefix##_##op(atomic##prefix##_t *v) \
245{ \
246 atomic##prefix##_##func_op(I, v); \
247}
248
249#define ATOMIC_FETCH_OP(op, func_op, I, c_type, prefix) \
250static __always_inline \
251c_type atomic##prefix##_fetch_##op##_relaxed(atomic##prefix##_t *v) \
252{ \
253 return atomic##prefix##_fetch_##func_op##_relaxed(I, v); \
254} \
255static __always_inline \
256c_type atomic##prefix##_fetch_##op(atomic##prefix##_t *v) \
257{ \
258 return atomic##prefix##_fetch_##func_op(I, v); \
259}
260
261#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, c_type, prefix) \
262static __always_inline \
263c_type atomic##prefix##_##op##_return_relaxed(atomic##prefix##_t *v) \
264{ \
265 return atomic##prefix##_fetch_##op##_relaxed(v) c_op I; \
266} \
267static __always_inline \
268c_type atomic##prefix##_##op##_return(atomic##prefix##_t *v) \
269{ \
270 return atomic##prefix##_fetch_##op(v) c_op I; \
271}
272
273#ifdef CONFIG_GENERIC_ATOMIC64
274#define ATOMIC_OPS(op, asm_op, c_op, I) \
275 ATOMIC_OP( op, asm_op, I, int, ) \
276 ATOMIC_FETCH_OP( op, asm_op, I, int, ) \
277 ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, )
278#else
279#define ATOMIC_OPS(op, asm_op, c_op, I) \
280 ATOMIC_OP( op, asm_op, I, int, ) \
281 ATOMIC_FETCH_OP( op, asm_op, I, int, ) \
282 ATOMIC_OP_RETURN(op, asm_op, c_op, I, int, ) \
283 ATOMIC_OP( op, asm_op, I, long, 64) \
284 ATOMIC_FETCH_OP( op, asm_op, I, long, 64) \
285 ATOMIC_OP_RETURN(op, asm_op, c_op, I, long, 64)
286#endif
287
288ATOMIC_OPS(inc, add, +, 1)
289ATOMIC_OPS(dec, add, +, -1)
290
291#define atomic_inc_return_relaxed atomic_inc_return_relaxed
292#define atomic_dec_return_relaxed atomic_dec_return_relaxed
293#define atomic_inc_return atomic_inc_return
294#define atomic_dec_return atomic_dec_return
295
296#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
297#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
298#define atomic_fetch_inc atomic_fetch_inc
299#define atomic_fetch_dec atomic_fetch_dec
300
301#ifndef CONFIG_GENERIC_ATOMIC64
302#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
303#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
304#define atomic64_inc_return atomic64_inc_return
305#define atomic64_dec_return atomic64_dec_return
306
307#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
308#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
309#define atomic64_fetch_inc atomic64_fetch_inc
310#define atomic64_fetch_dec atomic64_fetch_dec
311#endif
312
313#undef ATOMIC_OPS
314#undef ATOMIC_OP
315#undef ATOMIC_FETCH_OP
316#undef ATOMIC_OP_RETURN
317
318#define ATOMIC_OP(op, func_op, comp_op, I, prefix) \
319static __always_inline \
320bool atomic##prefix##_##op(atomic##prefix##_t *v) \
321{ \
322 return atomic##prefix##_##func_op##_return(v) comp_op I; \
323}
324
325ATOMIC_OP(inc_and_test, inc, ==, 0, )
326ATOMIC_OP(dec_and_test, dec, ==, 0, )
327#ifndef CONFIG_GENERIC_ATOMIC64
328ATOMIC_OP(inc_and_test, inc, ==, 0, 64)
329ATOMIC_OP(dec_and_test, dec, ==, 0, 64)
330#endif
331
332#undef ATOMIC_OP
333
334
335static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
336{
337 int prev, rc;
338
339 __asm__ __volatile__ (
340 "0: lr.w %[p], %[c]\n"
341 " beq %[p], %[u], 1f\n"
342 " add %[rc], %[p], %[a]\n"
343 " sc.w.rl %[rc], %[rc], %[c]\n"
344 " bnez %[rc], 0b\n"
345 " fence rw, rw\n"
346 "1:\n"
347 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
348 : [a]"r" (a), [u]"r" (u)
349 : "memory");
350 return prev;
351}
352
353#ifndef CONFIG_GENERIC_ATOMIC64
354static __always_inline long __atomic64_add_unless(atomic64_t *v, long a, long u)
355{
356 long prev, rc;
357
358 __asm__ __volatile__ (
359 "0: lr.d %[p], %[c]\n"
360 " beq %[p], %[u], 1f\n"
361 " add %[rc], %[p], %[a]\n"
362 " sc.d.rl %[rc], %[rc], %[c]\n"
363 " bnez %[rc], 0b\n"
364 " fence rw, rw\n"
365 "1:\n"
366 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
367 : [a]"r" (a), [u]"r" (u)
368 : "memory");
369 return prev;
370}
371
372static __always_inline int atomic64_add_unless(atomic64_t *v, long a, long u)
373{
374 return __atomic64_add_unless(v, a, u) != u;
375}
376#endif
377
378
379
380
381
382static __always_inline int atomic_inc_not_zero(atomic_t *v)
383{
384 return __atomic_add_unless(v, 1, 0);
385}
386
387#ifndef CONFIG_GENERIC_ATOMIC64
388static __always_inline long atomic64_inc_not_zero(atomic64_t *v)
389{
390 return atomic64_add_unless(v, 1, 0);
391}
392#endif
393
394
395
396
397
398#define ATOMIC_OP(c_t, prefix, size) \
399static __always_inline \
400c_t atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \
401{ \
402 return __xchg_relaxed(&(v->counter), n, size); \
403} \
404static __always_inline \
405c_t atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \
406{ \
407 return __xchg_acquire(&(v->counter), n, size); \
408} \
409static __always_inline \
410c_t atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \
411{ \
412 return __xchg_release(&(v->counter), n, size); \
413} \
414static __always_inline \
415c_t atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \
416{ \
417 return __xchg(&(v->counter), n, size); \
418} \
419static __always_inline \
420c_t atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \
421 c_t o, c_t n) \
422{ \
423 return __cmpxchg_relaxed(&(v->counter), o, n, size); \
424} \
425static __always_inline \
426c_t atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \
427 c_t o, c_t n) \
428{ \
429 return __cmpxchg_acquire(&(v->counter), o, n, size); \
430} \
431static __always_inline \
432c_t atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \
433 c_t o, c_t n) \
434{ \
435 return __cmpxchg_release(&(v->counter), o, n, size); \
436} \
437static __always_inline \
438c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
439{ \
440 return __cmpxchg(&(v->counter), o, n, size); \
441}
442
443#ifdef CONFIG_GENERIC_ATOMIC64
444#define ATOMIC_OPS() \
445 ATOMIC_OP( int, , 4)
446#else
447#define ATOMIC_OPS() \
448 ATOMIC_OP( int, , 4) \
449 ATOMIC_OP(long, 64, 8)
450#endif
451
452ATOMIC_OPS()
453
454#undef ATOMIC_OPS
455#undef ATOMIC_OP
456
457static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
458{
459 int prev, rc;
460
461 __asm__ __volatile__ (
462 "0: lr.w %[p], %[c]\n"
463 " sub %[rc], %[p], %[o]\n"
464 " bltz %[rc], 1f\n"
465 " sc.w.rl %[rc], %[rc], %[c]\n"
466 " bnez %[rc], 0b\n"
467 " fence rw, rw\n"
468 "1:\n"
469 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
470 : [o]"r" (offset)
471 : "memory");
472 return prev - offset;
473}
474
475#define atomic_dec_if_positive(v) atomic_sub_if_positive(v, 1)
476
477#ifndef CONFIG_GENERIC_ATOMIC64
478static __always_inline long atomic64_sub_if_positive(atomic64_t *v, int offset)
479{
480 long prev, rc;
481
482 __asm__ __volatile__ (
483 "0: lr.d %[p], %[c]\n"
484 " sub %[rc], %[p], %[o]\n"
485 " bltz %[rc], 1f\n"
486 " sc.d.rl %[rc], %[rc], %[c]\n"
487 " bnez %[rc], 0b\n"
488 " fence rw, rw\n"
489 "1:\n"
490 : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
491 : [o]"r" (offset)
492 : "memory");
493 return prev - offset;
494}
495
496#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(v, 1)
497#endif
498
499#endif
500