1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#ifndef QEMU_ATOMIC_H
16#define QEMU_ATOMIC_H
17
18
19#define barrier() ({ asm volatile("" ::: "memory"); (void)0; })
20
21
22
23
24
25
26
27
28
29#define typeof_strip_qual(expr) \
30 typeof( \
31 __builtin_choose_expr( \
32 __builtin_types_compatible_p(typeof(expr), bool) || \
33 __builtin_types_compatible_p(typeof(expr), const bool) || \
34 __builtin_types_compatible_p(typeof(expr), volatile bool) || \
35 __builtin_types_compatible_p(typeof(expr), const volatile bool), \
36 (bool)1, \
37 __builtin_choose_expr( \
38 __builtin_types_compatible_p(typeof(expr), signed char) || \
39 __builtin_types_compatible_p(typeof(expr), const signed char) || \
40 __builtin_types_compatible_p(typeof(expr), volatile signed char) || \
41 __builtin_types_compatible_p(typeof(expr), const volatile signed char), \
42 (signed char)1, \
43 __builtin_choose_expr( \
44 __builtin_types_compatible_p(typeof(expr), unsigned char) || \
45 __builtin_types_compatible_p(typeof(expr), const unsigned char) || \
46 __builtin_types_compatible_p(typeof(expr), volatile unsigned char) || \
47 __builtin_types_compatible_p(typeof(expr), const volatile unsigned char), \
48 (unsigned char)1, \
49 __builtin_choose_expr( \
50 __builtin_types_compatible_p(typeof(expr), signed short) || \
51 __builtin_types_compatible_p(typeof(expr), const signed short) || \
52 __builtin_types_compatible_p(typeof(expr), volatile signed short) || \
53 __builtin_types_compatible_p(typeof(expr), const volatile signed short), \
54 (signed short)1, \
55 __builtin_choose_expr( \
56 __builtin_types_compatible_p(typeof(expr), unsigned short) || \
57 __builtin_types_compatible_p(typeof(expr), const unsigned short) || \
58 __builtin_types_compatible_p(typeof(expr), volatile unsigned short) || \
59 __builtin_types_compatible_p(typeof(expr), const volatile unsigned short), \
60 (unsigned short)1, \
61 (expr)+0))))))
62
63#ifdef __ATOMIC_RELAXED
64
65
66
67
68
69
70
71
72
73
74
75#define smp_mb() ({ barrier(); __atomic_thread_fence(__ATOMIC_SEQ_CST); })
76#define smp_mb_release() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); })
77#define smp_mb_acquire() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); })
78
79
80
81
82
83#if defined(__SANITIZE_THREAD__)
84#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); })
85#elif defined(__alpha__)
86#define smp_read_barrier_depends() asm volatile("mb":::"memory")
87#else
88#define smp_read_barrier_depends() barrier()
89#endif
90
91
92
93
94
95
96
97
98
99
100
101
102#define atomic_read__nocheck(ptr) \
103 __atomic_load_n(ptr, __ATOMIC_RELAXED)
104
105#define atomic_read(ptr) \
106 ({ \
107 QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
108 atomic_read__nocheck(ptr); \
109 })
110
111#define atomic_set__nocheck(ptr, i) \
112 __atomic_store_n(ptr, i, __ATOMIC_RELAXED)
113
114#define atomic_set(ptr, i) do { \
115 QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
116 atomic_set__nocheck(ptr, i); \
117} while(0)
118
119
120
121
122#ifdef __SANITIZE_THREAD__
123#define atomic_rcu_read__nocheck(ptr, valptr) \
124 __atomic_load(ptr, valptr, __ATOMIC_CONSUME);
125#else
126#define atomic_rcu_read__nocheck(ptr, valptr) \
127 __atomic_load(ptr, valptr, __ATOMIC_RELAXED); \
128 smp_read_barrier_depends();
129#endif
130
131#define atomic_rcu_read(ptr) \
132 ({ \
133 QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
134 typeof_strip_qual(*ptr) _val; \
135 atomic_rcu_read__nocheck(ptr, &_val); \
136 _val; \
137 })
138
139#define atomic_rcu_set(ptr, i) do { \
140 QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
141 __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
142} while(0)
143
144#define atomic_load_acquire(ptr) \
145 ({ \
146 QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
147 typeof_strip_qual(*ptr) _val; \
148 __atomic_load(ptr, &_val, __ATOMIC_ACQUIRE); \
149 _val; \
150 })
151
152#define atomic_store_release(ptr, i) do { \
153 QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
154 __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
155} while(0)
156
157
158
159
160#define atomic_xchg__nocheck(ptr, i) ({ \
161 __atomic_exchange_n(ptr, (i), __ATOMIC_SEQ_CST); \
162})
163
164#define atomic_xchg(ptr, i) ({ \
165 QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
166 atomic_xchg__nocheck(ptr, i); \
167})
168
169
170#define atomic_cmpxchg__nocheck(ptr, old, new) ({ \
171 typeof_strip_qual(*ptr) _old = (old); \
172 __atomic_compare_exchange_n(ptr, &_old, new, false, \
173 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
174 _old; \
175})
176
177#define atomic_cmpxchg(ptr, old, new) ({ \
178 QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
179 atomic_cmpxchg__nocheck(ptr, old, new); \
180})
181
182
183#define atomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
184#define atomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
185#define atomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
186#define atomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
187#define atomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
188#define atomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
189#define atomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)
190
191#define atomic_inc_fetch(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST)
192#define atomic_dec_fetch(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST)
193#define atomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST)
194#define atomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST)
195#define atomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST)
196#define atomic_or_fetch(ptr, n) __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST)
197#define atomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST)
198
199
200#define atomic_inc(ptr) ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
201#define atomic_dec(ptr) ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
202#define atomic_add(ptr, n) ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
203#define atomic_sub(ptr, n) ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
204#define atomic_and(ptr, n) ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
205#define atomic_or(ptr, n) ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
206#define atomic_xor(ptr, n) ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST))
207
208#else
209
210
211
212
213
214
215
216#if defined(__i386__) || defined(__x86_64__)
217#if !QEMU_GNUC_PREREQ(4, 4)
218#if defined __x86_64__
219#define smp_mb() ({ asm volatile("mfence" ::: "memory"); (void)0; })
220#else
221#define smp_mb() ({ asm volatile("lock; addl $0,0(%%esp) " ::: "memory"); (void)0; })
222#endif
223#endif
224#endif
225
226
227#ifdef __alpha__
228#define smp_read_barrier_depends() asm volatile("mb":::"memory")
229#endif
230
231#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
232
233
234
235
236
237
238#define smp_mb_release() barrier()
239#define smp_mb_acquire() barrier()
240
241
242
243
244
245
246#define atomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i))
247
248#elif defined(_ARCH_PPC)
249
250
251
252
253
254
255
256
257
258#define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; })
259#if defined(__powerpc64__)
260#define smp_mb_release() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
261#define smp_mb_acquire() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
262#else
263#define smp_mb_release() ({ asm volatile("sync" ::: "memory"); (void)0; })
264#define smp_mb_acquire() ({ asm volatile("sync" ::: "memory"); (void)0; })
265#endif
266#define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; })
267
268#endif
269
270
271
272
273
274
275
276#ifndef smp_mb
277#define smp_mb() __sync_synchronize()
278#endif
279
280#ifndef smp_mb_acquire
281#define smp_mb_acquire() __sync_synchronize()
282#endif
283
284#ifndef smp_mb_release
285#define smp_mb_release() __sync_synchronize()
286#endif
287
288#ifndef smp_read_barrier_depends
289#define smp_read_barrier_depends() barrier()
290#endif
291
292
293
294
295#define atomic_read__nocheck(p) (*(__typeof__(*(p)) volatile*) (p))
296#define atomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile*) (p)) = (i))
297
298#define atomic_read(ptr) atomic_read__nocheck(ptr)
299#define atomic_set(ptr, i) atomic_set__nocheck(ptr,i)
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319#define atomic_rcu_read(ptr) ({ \
320 typeof(*ptr) _val = atomic_read(ptr); \
321 smp_read_barrier_depends(); \
322 _val; \
323})
324
325
326
327
328
329
330
331
332
333
334
335
336#define atomic_rcu_set(ptr, i) do { \
337 smp_wmb(); \
338 atomic_set(ptr, i); \
339} while (0)
340
341#define atomic_load_acquire(ptr) ({ \
342 typeof(*ptr) _val = atomic_read(ptr); \
343 smp_mb_acquire(); \
344 _val; \
345})
346
347#define atomic_store_release(ptr, i) do { \
348 smp_mb_release(); \
349 atomic_set(ptr, i); \
350} while (0)
351
352#ifndef atomic_xchg
353#if defined(__clang__)
354#define atomic_xchg(ptr, i) __sync_swap(ptr, i)
355#else
356
357#define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
358#endif
359#endif
360#define atomic_xchg__nocheck atomic_xchg
361
362
363#define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1)
364#define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1)
365#define atomic_fetch_add(ptr, n) __sync_fetch_and_add(ptr, n)
366#define atomic_fetch_sub(ptr, n) __sync_fetch_and_sub(ptr, n)
367#define atomic_fetch_and(ptr, n) __sync_fetch_and_and(ptr, n)
368#define atomic_fetch_or(ptr, n) __sync_fetch_and_or(ptr, n)
369#define atomic_fetch_xor(ptr, n) __sync_fetch_and_xor(ptr, n)
370
371#define atomic_inc_fetch(ptr) __sync_add_and_fetch(ptr, 1)
372#define atomic_dec_fetch(ptr) __sync_add_and_fetch(ptr, -1)
373#define atomic_add_fetch(ptr, n) __sync_add_and_fetch(ptr, n)
374#define atomic_sub_fetch(ptr, n) __sync_sub_and_fetch(ptr, n)
375#define atomic_and_fetch(ptr, n) __sync_and_and_fetch(ptr, n)
376#define atomic_or_fetch(ptr, n) __sync_or_and_fetch(ptr, n)
377#define atomic_xor_fetch(ptr, n) __sync_xor_and_fetch(ptr, n)
378
379#define atomic_cmpxchg(ptr, old, new) __sync_val_compare_and_swap(ptr, old, new)
380#define atomic_cmpxchg__nocheck(ptr, old, new) atomic_cmpxchg(ptr, old, new)
381
382
383#define atomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1))
384#define atomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1))
385#define atomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n))
386#define atomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n))
387#define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n))
388#define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n))
389#define atomic_xor(ptr, n) ((void) __sync_fetch_and_xor(ptr, n))
390
391#endif
392
393#ifndef smp_wmb
394#define smp_wmb() smp_mb_release()
395#endif
396#ifndef smp_rmb
397#define smp_rmb() smp_mb_acquire()
398#endif
399
400
401#if !defined(__SANITIZE_THREAD__)
402#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
403#define atomic_mb_set(ptr, i) ((void)atomic_xchg(ptr, i))
404#endif
405#endif
406
407
408
409
410
411
412
413
414
415#ifndef atomic_mb_read
416#define atomic_mb_read(ptr) \
417 atomic_load_acquire(ptr)
418#endif
419
420#ifndef atomic_mb_set
421#define atomic_mb_set(ptr, i) do { \
422 atomic_store_release(ptr, i); \
423 smp_mb(); \
424} while(0)
425#endif
426
427#endif
428