1
2
3
4
5
6
7
8
9
10
11
12
13#ifndef __QEMU_ATOMIC_H
14#define __QEMU_ATOMIC_H 1
15
16#include "qemu/compiler.h"
17
18
19
20
21#define barrier() ({ asm volatile("" ::: "memory"); (void)0; })
22
23#ifndef __ATOMIC_RELAXED
24
25
26
27
28
29
30
31#if defined(__i386__) || defined(__x86_64__)
32#if !QEMU_GNUC_PREREQ(4, 4)
33#if defined __x86_64__
34#define smp_mb() ({ asm volatile("mfence" ::: "memory"); (void)0; })
35#else
36#define smp_mb() ({ asm volatile("lock; addl $0,0(%%esp) " ::: "memory"); (void)0; })
37#endif
38#endif
39#endif
40
41
42#ifdef __alpha__
43#define smp_read_barrier_depends() asm volatile("mb":::"memory")
44#endif
45
46#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
47
48
49
50
51
52
53#define smp_wmb() barrier()
54#define smp_rmb() barrier()
55
56
57
58
59
60
61#define atomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i))
62
63
64
65
66#define atomic_mb_set(ptr, i) ((void)atomic_xchg(ptr, i))
67
68#elif defined(_ARCH_PPC)
69
70
71
72
73
74
75
76
77
78#define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; })
79#if defined(__powerpc64__)
80#define smp_rmb() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
81#else
82#define smp_rmb() ({ asm volatile("sync" ::: "memory"); (void)0; })
83#endif
84#define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; })
85
86#endif
87
88#endif
89
90
91
92
93
94
95
96#ifndef smp_mb
97#define smp_mb() __sync_synchronize()
98#endif
99
100#ifndef smp_wmb
101#ifdef __ATOMIC_RELEASE
102
103
104
105
106
107
108#define smp_wmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); barrier(); })
109#else
110#define smp_wmb() __sync_synchronize()
111#endif
112#endif
113
114#ifndef smp_rmb
115#ifdef __ATOMIC_ACQUIRE
116#define smp_rmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); barrier(); })
117#else
118#define smp_rmb() __sync_synchronize()
119#endif
120#endif
121
122#ifndef smp_read_barrier_depends
123#ifdef __ATOMIC_CONSUME
124#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); barrier(); })
125#else
126#define smp_read_barrier_depends() barrier()
127#endif
128#endif
129
130#ifndef atomic_read
131#define atomic_read(ptr) (*(__typeof__(*ptr) volatile*) (ptr))
132#endif
133
134#ifndef atomic_set
135#define atomic_set(ptr, i) ((*(__typeof__(*ptr) volatile*) (ptr)) = (i))
136#endif
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158#ifndef atomic_rcu_read
159#ifdef __ATOMIC_CONSUME
160#define atomic_rcu_read(ptr) ({ \
161 typeof(*ptr) _val; \
162 __atomic_load(ptr, &_val, __ATOMIC_CONSUME); \
163 _val; \
164})
165#else
166#define atomic_rcu_read(ptr) ({ \
167 typeof(*ptr) _val = atomic_read(ptr); \
168 smp_read_barrier_depends(); \
169 _val; \
170})
171#endif
172#endif
173
174
175
176
177
178
179
180
181
182
183
184
185#ifndef atomic_rcu_set
186#ifdef __ATOMIC_RELEASE
187#define atomic_rcu_set(ptr, i) do { \
188 typeof(*ptr) _val = (i); \
189 __atomic_store(ptr, &_val, __ATOMIC_RELEASE); \
190} while(0)
191#else
192#define atomic_rcu_set(ptr, i) do { \
193 smp_wmb(); \
194 atomic_set(ptr, i); \
195} while (0)
196#endif
197#endif
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221#ifndef atomic_mb_read
222#define atomic_mb_read(ptr) ({ \
223 typeof(*ptr) _val = atomic_read(ptr); \
224 smp_rmb(); \
225 _val; \
226})
227#endif
228
229#ifndef atomic_mb_set
230#define atomic_mb_set(ptr, i) do { \
231 smp_wmb(); \
232 atomic_set(ptr, i); \
233 smp_mb(); \
234} while (0)
235#endif
236
237#ifndef atomic_xchg
238#if defined(__clang__)
239#define atomic_xchg(ptr, i) __sync_swap(ptr, i)
240#elif defined(__ATOMIC_SEQ_CST)
241#define atomic_xchg(ptr, i) ({ \
242 typeof(*ptr) _new = (i), _old; \
243 __atomic_exchange(ptr, &_new, &_old, __ATOMIC_SEQ_CST); \
244 _old; \
245})
246#else
247
248#define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
249#endif
250#endif
251
252
253#define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1)
254#define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1)
255#define atomic_fetch_add __sync_fetch_and_add
256#define atomic_fetch_sub __sync_fetch_and_sub
257#define atomic_fetch_and __sync_fetch_and_and
258#define atomic_fetch_or __sync_fetch_and_or
259#define atomic_cmpxchg __sync_val_compare_and_swap
260
261
262#define atomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1))
263#define atomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1))
264#define atomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n))
265#define atomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n))
266#define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n))
267#define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n))
268
269#endif
270