1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#ifndef __QEMU_ATOMIC_H
16#define __QEMU_ATOMIC_H 1
17
18
19
20
21#define barrier() ({ asm volatile("" ::: "memory"); (void)0; })
22
23#ifdef __ATOMIC_RELAXED
24
25
26
27
28
29
30
31
32
33
34
35#define smp_mb() ({ barrier(); __atomic_thread_fence(__ATOMIC_SEQ_CST); barrier(); })
36#define smp_wmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); barrier(); })
37#define smp_rmb() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); barrier(); })
38
39#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); barrier(); })
40
41
42
43
44
45#define atomic_read(ptr) \
46 ({ \
47 QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
48 typeof(*ptr) _val; \
49 __atomic_load(ptr, &_val, __ATOMIC_RELAXED); \
50 _val; \
51 })
52
53#define atomic_set(ptr, i) do { \
54 QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
55 typeof(*ptr) _val = (i); \
56 __atomic_store(ptr, &_val, __ATOMIC_RELAXED); \
57} while(0)
58
59
60
61#define atomic_rcu_read(ptr) \
62 ({ \
63 QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
64 typeof(*ptr) _val; \
65 __atomic_load(ptr, &_val, __ATOMIC_CONSUME); \
66 _val; \
67 })
68
69#define atomic_rcu_set(ptr, i) do { \
70 QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
71 typeof(*ptr) _val = (i); \
72 __atomic_store(ptr, &_val, __ATOMIC_RELEASE); \
73} while(0)
74
75
76
77
78
79
80
81
82
83#if defined(_ARCH_PPC)
84#define atomic_mb_read(ptr) \
85 ({ \
86 QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
87 typeof(*ptr) _val; \
88 __atomic_load(ptr, &_val, __ATOMIC_RELAXED); \
89 smp_rmb(); \
90 _val; \
91 })
92
93#define atomic_mb_set(ptr, i) do { \
94 QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
95 typeof(*ptr) _val = (i); \
96 smp_wmb(); \
97 __atomic_store(ptr, &_val, __ATOMIC_RELAXED); \
98 smp_mb(); \
99} while(0)
100#else
101#define atomic_mb_read(ptr) \
102 ({ \
103 QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
104 typeof(*ptr) _val; \
105 __atomic_load(ptr, &_val, __ATOMIC_SEQ_CST); \
106 _val; \
107 })
108
109#define atomic_mb_set(ptr, i) do { \
110 QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
111 typeof(*ptr) _val = (i); \
112 __atomic_store(ptr, &_val, __ATOMIC_SEQ_CST); \
113} while(0)
114#endif
115
116
117
118
119#define atomic_xchg(ptr, i) ({ \
120 QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
121 typeof(*ptr) _new = (i), _old; \
122 __atomic_exchange(ptr, &_new, &_old, __ATOMIC_SEQ_CST); \
123 _old; \
124})
125
126
127#define atomic_cmpxchg(ptr, old, new) \
128 ({ \
129 QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
130 typeof(*ptr) _old = (old), _new = (new); \
131 __atomic_compare_exchange(ptr, &_old, &_new, false, \
132 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
133 _old; \
134 })
135
136
137#define atomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
138#define atomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
139#define atomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
140#define atomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
141#define atomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
142#define atomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
143
144
145#define atomic_inc(ptr) ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
146#define atomic_dec(ptr) ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
147#define atomic_add(ptr, n) ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
148#define atomic_sub(ptr, n) ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
149#define atomic_and(ptr, n) ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
150#define atomic_or(ptr, n) ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
151
152#else
153
154
155
156
157
158
159
160#if defined(__i386__) || defined(__x86_64__)
161#if !QEMU_GNUC_PREREQ(4, 4)
162#if defined __x86_64__
163#define smp_mb() ({ asm volatile("mfence" ::: "memory"); (void)0; })
164#else
165#define smp_mb() ({ asm volatile("lock; addl $0,0(%%esp) " ::: "memory"); (void)0; })
166#endif
167#endif
168#endif
169
170
171#ifdef __alpha__
172#define smp_read_barrier_depends() asm volatile("mb":::"memory")
173#endif
174
175#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
176
177
178
179
180
181
182#define smp_wmb() barrier()
183#define smp_rmb() barrier()
184
185
186
187
188
189
190#define atomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i))
191
192
193
194
195#define atomic_mb_set(ptr, i) ((void)atomic_xchg(ptr, i))
196
197#elif defined(_ARCH_PPC)
198
199
200
201
202
203
204
205
206
207#define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; })
208#if defined(__powerpc64__)
209#define smp_rmb() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
210#else
211#define smp_rmb() ({ asm volatile("sync" ::: "memory"); (void)0; })
212#endif
213#define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; })
214
215#endif
216
217
218
219
220
221
222
223#ifndef smp_mb
224#define smp_mb() __sync_synchronize()
225#endif
226
227#ifndef smp_wmb
228#define smp_wmb() __sync_synchronize()
229#endif
230
231#ifndef smp_rmb
232#define smp_rmb() __sync_synchronize()
233#endif
234
235#ifndef smp_read_barrier_depends
236#define smp_read_barrier_depends() barrier()
237#endif
238
239
240
241
242#define atomic_read(ptr) (*(__typeof__(*ptr) volatile*) (ptr))
243#define atomic_set(ptr, i) ((*(__typeof__(*ptr) volatile*) (ptr)) = (i))
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263#define atomic_rcu_read(ptr) ({ \
264 typeof(*ptr) _val = atomic_read(ptr); \
265 smp_read_barrier_depends(); \
266 _val; \
267})
268
269
270
271
272
273
274
275
276
277
278
279
280#define atomic_rcu_set(ptr, i) do { \
281 smp_wmb(); \
282 atomic_set(ptr, i); \
283} while (0)
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307#define atomic_mb_read(ptr) ({ \
308 typeof(*ptr) _val = atomic_read(ptr); \
309 smp_rmb(); \
310 _val; \
311})
312
313#ifndef atomic_mb_set
314#define atomic_mb_set(ptr, i) do { \
315 smp_wmb(); \
316 atomic_set(ptr, i); \
317 smp_mb(); \
318} while (0)
319#endif
320
321#ifndef atomic_xchg
322#if defined(__clang__)
323#define atomic_xchg(ptr, i) __sync_swap(ptr, i)
324#else
325
326#define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
327#endif
328#endif
329
330
331#define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1)
332#define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1)
333#define atomic_fetch_add __sync_fetch_and_add
334#define atomic_fetch_sub __sync_fetch_and_sub
335#define atomic_fetch_and __sync_fetch_and_and
336#define atomic_fetch_or __sync_fetch_and_or
337#define atomic_cmpxchg __sync_val_compare_and_swap
338
339
340#define atomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1))
341#define atomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1))
342#define atomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n))
343#define atomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n))
344#define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n))
345#define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n))
346
347#endif
348#endif
349