1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#ifndef QEMU_ATOMIC_H
16#define QEMU_ATOMIC_H
17
18
19#define barrier() ({ asm volatile("" ::: "memory"); (void)0; })
20
21
22
23
24
25
26
27
28
29#define typeof_strip_qual(expr) \
30 typeof( \
31 __builtin_choose_expr( \
32 __builtin_types_compatible_p(typeof(expr), bool) || \
33 __builtin_types_compatible_p(typeof(expr), const bool) || \
34 __builtin_types_compatible_p(typeof(expr), volatile bool) || \
35 __builtin_types_compatible_p(typeof(expr), const volatile bool), \
36 (bool)1, \
37 __builtin_choose_expr( \
38 __builtin_types_compatible_p(typeof(expr), signed char) || \
39 __builtin_types_compatible_p(typeof(expr), const signed char) || \
40 __builtin_types_compatible_p(typeof(expr), volatile signed char) || \
41 __builtin_types_compatible_p(typeof(expr), const volatile signed char), \
42 (signed char)1, \
43 __builtin_choose_expr( \
44 __builtin_types_compatible_p(typeof(expr), unsigned char) || \
45 __builtin_types_compatible_p(typeof(expr), const unsigned char) || \
46 __builtin_types_compatible_p(typeof(expr), volatile unsigned char) || \
47 __builtin_types_compatible_p(typeof(expr), const volatile unsigned char), \
48 (unsigned char)1, \
49 __builtin_choose_expr( \
50 __builtin_types_compatible_p(typeof(expr), signed short) || \
51 __builtin_types_compatible_p(typeof(expr), const signed short) || \
52 __builtin_types_compatible_p(typeof(expr), volatile signed short) || \
53 __builtin_types_compatible_p(typeof(expr), const volatile signed short), \
54 (signed short)1, \
55 __builtin_choose_expr( \
56 __builtin_types_compatible_p(typeof(expr), unsigned short) || \
57 __builtin_types_compatible_p(typeof(expr), const unsigned short) || \
58 __builtin_types_compatible_p(typeof(expr), volatile unsigned short) || \
59 __builtin_types_compatible_p(typeof(expr), const volatile unsigned short), \
60 (unsigned short)1, \
61 (expr)+0))))))
62
63#ifdef __ATOMIC_RELAXED
64
65
66
67
68
69
70
71
72
73
74
75#define smp_mb() ({ barrier(); __atomic_thread_fence(__ATOMIC_SEQ_CST); })
76#define smp_mb_release() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); })
77#define smp_mb_acquire() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); })
78
79
80
81
82
83#if defined(__SANITIZE_THREAD__)
84#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); })
85#elif defined(__alpha__)
86#define smp_read_barrier_depends() asm volatile("mb":::"memory")
87#else
88#define smp_read_barrier_depends() barrier()
89#endif
90
91
92
93
94
95
96#define signal_barrier() __atomic_signal_fence(__ATOMIC_SEQ_CST)
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112#if defined(__x86_64__) || defined(__sparc__) || defined(__mips64)
113# define ATOMIC_REG_SIZE 8
114#else
115# define ATOMIC_REG_SIZE sizeof(void *)
116#endif
117
118
119
120
121
122
123
124
125
126
127
128#define atomic_read__nocheck(ptr) \
129 __atomic_load_n(ptr, __ATOMIC_RELAXED)
130
131#define atomic_read(ptr) \
132 ({ \
133 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
134 atomic_read__nocheck(ptr); \
135 })
136
137#define atomic_set__nocheck(ptr, i) \
138 __atomic_store_n(ptr, i, __ATOMIC_RELAXED)
139
140#define atomic_set(ptr, i) do { \
141 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
142 atomic_set__nocheck(ptr, i); \
143} while(0)
144
145
146
147
148#ifdef __SANITIZE_THREAD__
149#define atomic_rcu_read__nocheck(ptr, valptr) \
150 __atomic_load(ptr, valptr, __ATOMIC_CONSUME);
151#else
152#define atomic_rcu_read__nocheck(ptr, valptr) \
153 __atomic_load(ptr, valptr, __ATOMIC_RELAXED); \
154 smp_read_barrier_depends();
155#endif
156
157#define atomic_rcu_read(ptr) \
158 ({ \
159 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
160 typeof_strip_qual(*ptr) _val; \
161 atomic_rcu_read__nocheck(ptr, &_val); \
162 _val; \
163 })
164
165#define atomic_rcu_set(ptr, i) do { \
166 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
167 __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
168} while(0)
169
170#define atomic_load_acquire(ptr) \
171 ({ \
172 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
173 typeof_strip_qual(*ptr) _val; \
174 __atomic_load(ptr, &_val, __ATOMIC_ACQUIRE); \
175 _val; \
176 })
177
178#define atomic_store_release(ptr, i) do { \
179 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
180 __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
181} while(0)
182
183
184
185
186#define atomic_xchg__nocheck(ptr, i) ({ \
187 __atomic_exchange_n(ptr, (i), __ATOMIC_SEQ_CST); \
188})
189
190#define atomic_xchg(ptr, i) ({ \
191 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
192 atomic_xchg__nocheck(ptr, i); \
193})
194
195
196#define atomic_cmpxchg__nocheck(ptr, old, new) ({ \
197 typeof_strip_qual(*ptr) _old = (old); \
198 (void)__atomic_compare_exchange_n(ptr, &_old, new, false, \
199 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
200 _old; \
201})
202
203#define atomic_cmpxchg(ptr, old, new) ({ \
204 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
205 atomic_cmpxchg__nocheck(ptr, old, new); \
206})
207
208
209#define atomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
210#define atomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
211#define atomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
212#define atomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
213#define atomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
214#define atomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
215#define atomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)
216
217#define atomic_inc_fetch(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST)
218#define atomic_dec_fetch(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST)
219#define atomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST)
220#define atomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST)
221#define atomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST)
222#define atomic_or_fetch(ptr, n) __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST)
223#define atomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST)
224
225
226#define atomic_inc(ptr) ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
227#define atomic_dec(ptr) ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
228#define atomic_add(ptr, n) ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
229#define atomic_sub(ptr, n) ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
230#define atomic_and(ptr, n) ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
231#define atomic_or(ptr, n) ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
232#define atomic_xor(ptr, n) ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST))
233
234#else
235
236
237
238
239
240
241
242#if defined(__i386__) || defined(__x86_64__)
243#if !QEMU_GNUC_PREREQ(4, 4)
244#if defined __x86_64__
245#define smp_mb() ({ asm volatile("mfence" ::: "memory"); (void)0; })
246#else
247#define smp_mb() ({ asm volatile("lock; addl $0,0(%%esp) " ::: "memory"); (void)0; })
248#endif
249#endif
250#endif
251
252
253#ifdef __alpha__
254#define smp_read_barrier_depends() asm volatile("mb":::"memory")
255#endif
256
257#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
258
259
260
261
262
263
264#define smp_mb_release() barrier()
265#define smp_mb_acquire() barrier()
266
267
268
269
270
271
272#define atomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i))
273
274#elif defined(_ARCH_PPC)
275
276
277
278
279
280
281
282
283
284#define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; })
285#if defined(__powerpc64__)
286#define smp_mb_release() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
287#define smp_mb_acquire() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
288#else
289#define smp_mb_release() ({ asm volatile("sync" ::: "memory"); (void)0; })
290#define smp_mb_acquire() ({ asm volatile("sync" ::: "memory"); (void)0; })
291#endif
292#define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; })
293
294#endif
295
296
297
298
299
300
301
302#ifndef smp_mb
303#define smp_mb() __sync_synchronize()
304#endif
305
306#ifndef smp_mb_acquire
307#define smp_mb_acquire() __sync_synchronize()
308#endif
309
310#ifndef smp_mb_release
311#define smp_mb_release() __sync_synchronize()
312#endif
313
314#ifndef smp_read_barrier_depends
315#define smp_read_barrier_depends() barrier()
316#endif
317
318#ifndef signal_barrier
319#define signal_barrier() barrier()
320#endif
321
322
323
324
325#define atomic_read__nocheck(p) (*(__typeof__(*(p)) volatile*) (p))
326#define atomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile*) (p)) = (i))
327
328#define atomic_read(ptr) atomic_read__nocheck(ptr)
329#define atomic_set(ptr, i) atomic_set__nocheck(ptr,i)
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349#define atomic_rcu_read(ptr) ({ \
350 typeof(*ptr) _val = atomic_read(ptr); \
351 smp_read_barrier_depends(); \
352 _val; \
353})
354
355
356
357
358
359
360
361
362
363
364
365
366#define atomic_rcu_set(ptr, i) do { \
367 smp_wmb(); \
368 atomic_set(ptr, i); \
369} while (0)
370
371#define atomic_load_acquire(ptr) ({ \
372 typeof(*ptr) _val = atomic_read(ptr); \
373 smp_mb_acquire(); \
374 _val; \
375})
376
377#define atomic_store_release(ptr, i) do { \
378 smp_mb_release(); \
379 atomic_set(ptr, i); \
380} while (0)
381
382#ifndef atomic_xchg
383#if defined(__clang__)
384#define atomic_xchg(ptr, i) __sync_swap(ptr, i)
385#else
386
387#define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
388#endif
389#endif
390#define atomic_xchg__nocheck atomic_xchg
391
392
393#define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1)
394#define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1)
395#define atomic_fetch_add(ptr, n) __sync_fetch_and_add(ptr, n)
396#define atomic_fetch_sub(ptr, n) __sync_fetch_and_sub(ptr, n)
397#define atomic_fetch_and(ptr, n) __sync_fetch_and_and(ptr, n)
398#define atomic_fetch_or(ptr, n) __sync_fetch_and_or(ptr, n)
399#define atomic_fetch_xor(ptr, n) __sync_fetch_and_xor(ptr, n)
400
401#define atomic_inc_fetch(ptr) __sync_add_and_fetch(ptr, 1)
402#define atomic_dec_fetch(ptr) __sync_add_and_fetch(ptr, -1)
403#define atomic_add_fetch(ptr, n) __sync_add_and_fetch(ptr, n)
404#define atomic_sub_fetch(ptr, n) __sync_sub_and_fetch(ptr, n)
405#define atomic_and_fetch(ptr, n) __sync_and_and_fetch(ptr, n)
406#define atomic_or_fetch(ptr, n) __sync_or_and_fetch(ptr, n)
407#define atomic_xor_fetch(ptr, n) __sync_xor_and_fetch(ptr, n)
408
409#define atomic_cmpxchg(ptr, old, new) __sync_val_compare_and_swap(ptr, old, new)
410#define atomic_cmpxchg__nocheck(ptr, old, new) atomic_cmpxchg(ptr, old, new)
411
412
413#define atomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1))
414#define atomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1))
415#define atomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n))
416#define atomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n))
417#define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n))
418#define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n))
419#define atomic_xor(ptr, n) ((void) __sync_fetch_and_xor(ptr, n))
420
421#endif
422
423#ifndef smp_wmb
424#define smp_wmb() smp_mb_release()
425#endif
426#ifndef smp_rmb
427#define smp_rmb() smp_mb_acquire()
428#endif
429
430
431#if !defined(__SANITIZE_THREAD__)
432#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
433#define atomic_mb_set(ptr, i) ((void)atomic_xchg(ptr, i))
434#endif
435#endif
436
437
438
439
440
441
442
443
444
445#ifndef atomic_mb_read
446#define atomic_mb_read(ptr) \
447 atomic_load_acquire(ptr)
448#endif
449
450#ifndef atomic_mb_set
451#define atomic_mb_set(ptr, i) do { \
452 atomic_store_release(ptr, i); \
453 smp_mb(); \
454} while(0)
455#endif
456
457#define atomic_fetch_inc_nonzero(ptr) ({ \
458 typeof_strip_qual(*ptr) _oldn = atomic_read(ptr); \
459 while (_oldn && atomic_cmpxchg(ptr, _oldn, _oldn + 1) != _oldn) { \
460 _oldn = atomic_read(ptr); \
461 } \
462 _oldn; \
463})
464
465
466#ifdef CONFIG_ATOMIC64
467static inline int64_t atomic_read_i64(const int64_t *ptr)
468{
469
470 return atomic_read__nocheck(ptr);
471}
472
473static inline uint64_t atomic_read_u64(const uint64_t *ptr)
474{
475 return atomic_read__nocheck(ptr);
476}
477
478static inline void atomic_set_i64(int64_t *ptr, int64_t val)
479{
480 atomic_set__nocheck(ptr, val);
481}
482
483static inline void atomic_set_u64(uint64_t *ptr, uint64_t val)
484{
485 atomic_set__nocheck(ptr, val);
486}
487
488static inline void atomic64_init(void)
489{
490}
491#else
492int64_t atomic_read_i64(const int64_t *ptr);
493uint64_t atomic_read_u64(const uint64_t *ptr);
494void atomic_set_i64(int64_t *ptr, int64_t val);
495void atomic_set_u64(uint64_t *ptr, uint64_t val);
496void atomic64_init(void);
497#endif
498
499#endif
500