1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#ifndef QEMU_ATOMIC_H
16#define QEMU_ATOMIC_H
17
18
19#define barrier() ({ asm volatile("" ::: "memory"); (void)0; })
20
21
22
23
24
25
26
27
28
29#define typeof_strip_qual(expr) \
30 typeof( \
31 __builtin_choose_expr( \
32 __builtin_types_compatible_p(typeof(expr), bool) || \
33 __builtin_types_compatible_p(typeof(expr), const bool) || \
34 __builtin_types_compatible_p(typeof(expr), volatile bool) || \
35 __builtin_types_compatible_p(typeof(expr), const volatile bool), \
36 (bool)1, \
37 __builtin_choose_expr( \
38 __builtin_types_compatible_p(typeof(expr), signed char) || \
39 __builtin_types_compatible_p(typeof(expr), const signed char) || \
40 __builtin_types_compatible_p(typeof(expr), volatile signed char) || \
41 __builtin_types_compatible_p(typeof(expr), const volatile signed char), \
42 (signed char)1, \
43 __builtin_choose_expr( \
44 __builtin_types_compatible_p(typeof(expr), unsigned char) || \
45 __builtin_types_compatible_p(typeof(expr), const unsigned char) || \
46 __builtin_types_compatible_p(typeof(expr), volatile unsigned char) || \
47 __builtin_types_compatible_p(typeof(expr), const volatile unsigned char), \
48 (unsigned char)1, \
49 __builtin_choose_expr( \
50 __builtin_types_compatible_p(typeof(expr), signed short) || \
51 __builtin_types_compatible_p(typeof(expr), const signed short) || \
52 __builtin_types_compatible_p(typeof(expr), volatile signed short) || \
53 __builtin_types_compatible_p(typeof(expr), const volatile signed short), \
54 (signed short)1, \
55 __builtin_choose_expr( \
56 __builtin_types_compatible_p(typeof(expr), unsigned short) || \
57 __builtin_types_compatible_p(typeof(expr), const unsigned short) || \
58 __builtin_types_compatible_p(typeof(expr), volatile unsigned short) || \
59 __builtin_types_compatible_p(typeof(expr), const volatile unsigned short), \
60 (unsigned short)1, \
61 (expr)+0))))))
62
63#ifdef __ATOMIC_RELAXED
64
65
66
67
68
69
70
71
72
73
74
75#define smp_mb() ({ barrier(); __atomic_thread_fence(__ATOMIC_SEQ_CST); })
76#define smp_mb_release() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); })
77#define smp_mb_acquire() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); })
78
79
80
81
82
83#if defined(__SANITIZE_THREAD__)
84#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); })
85#elif defined(__alpha__)
86#define smp_read_barrier_depends() asm volatile("mb":::"memory")
87#else
88#define smp_read_barrier_depends() barrier()
89#endif
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104#if defined(__x86_64__) || defined(__sparc__)
105# define ATOMIC_REG_SIZE 8
106#else
107# define ATOMIC_REG_SIZE sizeof(void *)
108#endif
109
110
111
112
113
114
115
116
117
118
119
120#define atomic_read__nocheck(ptr) \
121 __atomic_load_n(ptr, __ATOMIC_RELAXED)
122
123#define atomic_read(ptr) \
124 ({ \
125 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
126 atomic_read__nocheck(ptr); \
127 })
128
129#define atomic_set__nocheck(ptr, i) \
130 __atomic_store_n(ptr, i, __ATOMIC_RELAXED)
131
132#define atomic_set(ptr, i) do { \
133 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
134 atomic_set__nocheck(ptr, i); \
135} while(0)
136
137
138
139
140#ifdef __SANITIZE_THREAD__
141#define atomic_rcu_read__nocheck(ptr, valptr) \
142 __atomic_load(ptr, valptr, __ATOMIC_CONSUME);
143#else
144#define atomic_rcu_read__nocheck(ptr, valptr) \
145 __atomic_load(ptr, valptr, __ATOMIC_RELAXED); \
146 smp_read_barrier_depends();
147#endif
148
149#define atomic_rcu_read(ptr) \
150 ({ \
151 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
152 typeof_strip_qual(*ptr) _val; \
153 atomic_rcu_read__nocheck(ptr, &_val); \
154 _val; \
155 })
156
157#define atomic_rcu_set(ptr, i) do { \
158 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
159 __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
160} while(0)
161
162#define atomic_load_acquire(ptr) \
163 ({ \
164 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
165 typeof_strip_qual(*ptr) _val; \
166 __atomic_load(ptr, &_val, __ATOMIC_ACQUIRE); \
167 _val; \
168 })
169
170#define atomic_store_release(ptr, i) do { \
171 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
172 __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
173} while(0)
174
175
176
177
178#define atomic_xchg__nocheck(ptr, i) ({ \
179 __atomic_exchange_n(ptr, (i), __ATOMIC_SEQ_CST); \
180})
181
182#define atomic_xchg(ptr, i) ({ \
183 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
184 atomic_xchg__nocheck(ptr, i); \
185})
186
187
188#define atomic_cmpxchg__nocheck(ptr, old, new) ({ \
189 typeof_strip_qual(*ptr) _old = (old); \
190 (void)__atomic_compare_exchange_n(ptr, &_old, new, false, \
191 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
192 _old; \
193})
194
195#define atomic_cmpxchg(ptr, old, new) ({ \
196 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
197 atomic_cmpxchg__nocheck(ptr, old, new); \
198})
199
200
201#define atomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
202#define atomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
203#define atomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
204#define atomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
205#define atomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
206#define atomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
207#define atomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)
208
209#define atomic_inc_fetch(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST)
210#define atomic_dec_fetch(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST)
211#define atomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST)
212#define atomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST)
213#define atomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST)
214#define atomic_or_fetch(ptr, n) __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST)
215#define atomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST)
216
217
218#define atomic_inc(ptr) ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
219#define atomic_dec(ptr) ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
220#define atomic_add(ptr, n) ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
221#define atomic_sub(ptr, n) ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
222#define atomic_and(ptr, n) ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
223#define atomic_or(ptr, n) ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
224#define atomic_xor(ptr, n) ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST))
225
226#else
227
228
229
230
231
232
233
234#if defined(__i386__) || defined(__x86_64__)
235#if !QEMU_GNUC_PREREQ(4, 4)
236#if defined __x86_64__
237#define smp_mb() ({ asm volatile("mfence" ::: "memory"); (void)0; })
238#else
239#define smp_mb() ({ asm volatile("lock; addl $0,0(%%esp) " ::: "memory"); (void)0; })
240#endif
241#endif
242#endif
243
244
245#ifdef __alpha__
246#define smp_read_barrier_depends() asm volatile("mb":::"memory")
247#endif
248
249#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
250
251
252
253
254
255
256#define smp_mb_release() barrier()
257#define smp_mb_acquire() barrier()
258
259
260
261
262
263
264#define atomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i))
265
266#elif defined(_ARCH_PPC)
267
268
269
270
271
272
273
274
275
276#define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; })
277#if defined(__powerpc64__)
278#define smp_mb_release() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
279#define smp_mb_acquire() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
280#else
281#define smp_mb_release() ({ asm volatile("sync" ::: "memory"); (void)0; })
282#define smp_mb_acquire() ({ asm volatile("sync" ::: "memory"); (void)0; })
283#endif
284#define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; })
285
286#endif
287
288
289
290
291
292
293
294#ifndef smp_mb
295#define smp_mb() __sync_synchronize()
296#endif
297
298#ifndef smp_mb_acquire
299#define smp_mb_acquire() __sync_synchronize()
300#endif
301
302#ifndef smp_mb_release
303#define smp_mb_release() __sync_synchronize()
304#endif
305
306#ifndef smp_read_barrier_depends
307#define smp_read_barrier_depends() barrier()
308#endif
309
310
311
312
313#define atomic_read__nocheck(p) (*(__typeof__(*(p)) volatile*) (p))
314#define atomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile*) (p)) = (i))
315
316#define atomic_read(ptr) atomic_read__nocheck(ptr)
317#define atomic_set(ptr, i) atomic_set__nocheck(ptr,i)
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337#define atomic_rcu_read(ptr) ({ \
338 typeof(*ptr) _val = atomic_read(ptr); \
339 smp_read_barrier_depends(); \
340 _val; \
341})
342
343
344
345
346
347
348
349
350
351
352
353
354#define atomic_rcu_set(ptr, i) do { \
355 smp_wmb(); \
356 atomic_set(ptr, i); \
357} while (0)
358
359#define atomic_load_acquire(ptr) ({ \
360 typeof(*ptr) _val = atomic_read(ptr); \
361 smp_mb_acquire(); \
362 _val; \
363})
364
365#define atomic_store_release(ptr, i) do { \
366 smp_mb_release(); \
367 atomic_set(ptr, i); \
368} while (0)
369
370#ifndef atomic_xchg
371#if defined(__clang__)
372#define atomic_xchg(ptr, i) __sync_swap(ptr, i)
373#else
374
375#define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
376#endif
377#endif
378#define atomic_xchg__nocheck atomic_xchg
379
380
381#define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1)
382#define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1)
383#define atomic_fetch_add(ptr, n) __sync_fetch_and_add(ptr, n)
384#define atomic_fetch_sub(ptr, n) __sync_fetch_and_sub(ptr, n)
385#define atomic_fetch_and(ptr, n) __sync_fetch_and_and(ptr, n)
386#define atomic_fetch_or(ptr, n) __sync_fetch_and_or(ptr, n)
387#define atomic_fetch_xor(ptr, n) __sync_fetch_and_xor(ptr, n)
388
389#define atomic_inc_fetch(ptr) __sync_add_and_fetch(ptr, 1)
390#define atomic_dec_fetch(ptr) __sync_add_and_fetch(ptr, -1)
391#define atomic_add_fetch(ptr, n) __sync_add_and_fetch(ptr, n)
392#define atomic_sub_fetch(ptr, n) __sync_sub_and_fetch(ptr, n)
393#define atomic_and_fetch(ptr, n) __sync_and_and_fetch(ptr, n)
394#define atomic_or_fetch(ptr, n) __sync_or_and_fetch(ptr, n)
395#define atomic_xor_fetch(ptr, n) __sync_xor_and_fetch(ptr, n)
396
397#define atomic_cmpxchg(ptr, old, new) __sync_val_compare_and_swap(ptr, old, new)
398#define atomic_cmpxchg__nocheck(ptr, old, new) atomic_cmpxchg(ptr, old, new)
399
400
401#define atomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1))
402#define atomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1))
403#define atomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n))
404#define atomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n))
405#define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n))
406#define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n))
407#define atomic_xor(ptr, n) ((void) __sync_fetch_and_xor(ptr, n))
408
409#endif
410
411#ifndef smp_wmb
412#define smp_wmb() smp_mb_release()
413#endif
414#ifndef smp_rmb
415#define smp_rmb() smp_mb_acquire()
416#endif
417
418
419#if !defined(__SANITIZE_THREAD__)
420#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
421#define atomic_mb_set(ptr, i) ((void)atomic_xchg(ptr, i))
422#endif
423#endif
424
425
426
427
428
429
430
431
432
433#ifndef atomic_mb_read
434#define atomic_mb_read(ptr) \
435 atomic_load_acquire(ptr)
436#endif
437
438#ifndef atomic_mb_set
439#define atomic_mb_set(ptr, i) do { \
440 atomic_store_release(ptr, i); \
441 smp_mb(); \
442} while(0)
443#endif
444
445#define atomic_fetch_inc_nonzero(ptr) ({ \
446 typeof_strip_qual(*ptr) _oldn = atomic_read(ptr); \
447 while (_oldn && atomic_cmpxchg(ptr, _oldn, _oldn + 1) != _oldn) { \
448 _oldn = atomic_read(ptr); \
449 } \
450 _oldn; \
451})
452
453
454#ifdef CONFIG_ATOMIC64
455static inline int64_t atomic_read_i64(const int64_t *ptr)
456{
457
458 return atomic_read__nocheck(ptr);
459}
460
461static inline uint64_t atomic_read_u64(const uint64_t *ptr)
462{
463 return atomic_read__nocheck(ptr);
464}
465
466static inline void atomic_set_i64(int64_t *ptr, int64_t val)
467{
468 atomic_set__nocheck(ptr, val);
469}
470
471static inline void atomic_set_u64(uint64_t *ptr, uint64_t val)
472{
473 atomic_set__nocheck(ptr, val);
474}
475
476static inline void atomic64_init(void)
477{
478}
479#else
480int64_t atomic_read_i64(const int64_t *ptr);
481uint64_t atomic_read_u64(const uint64_t *ptr);
482void atomic_set_i64(int64_t *ptr, int64_t val);
483void atomic_set_u64(uint64_t *ptr, uint64_t val);
484void atomic64_init(void);
485#endif
486
487#endif
488