qemu/include/qemu/atomic.h
<<
>>
Prefs
   1/*
   2 * Simple interface for atomic operations.
   3 *
   4 * Copyright (C) 2013 Red Hat, Inc.
   5 *
   6 * Author: Paolo Bonzini <pbonzini@redhat.com>
   7 *
   8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
   9 * See the COPYING file in the top-level directory.
  10 *
  11 * See docs/devel/atomics.txt for discussion about the guarantees each
  12 * atomic primitive is meant to provide.
  13 */
  14
  15#ifndef QEMU_ATOMIC_H
  16#define QEMU_ATOMIC_H
  17
  18/* Compiler barrier */
  19#define barrier()   ({ asm volatile("" ::: "memory"); (void)0; })
  20
  21/* The variable that receives the old value of an atomically-accessed
  22 * variable must be non-qualified, because atomic builtins return values
  23 * through a pointer-type argument as in __atomic_load(&var, &old, MODEL).
  24 *
  25 * This macro has to handle types smaller than int manually, because of
  26 * implicit promotion.  int and larger types, as well as pointers, can be
  27 * converted to a non-qualified type just by applying a binary operator.
  28 */
  29#define typeof_strip_qual(expr)                                                    \
  30  typeof(                                                                          \
  31    __builtin_choose_expr(                                                         \
  32      __builtin_types_compatible_p(typeof(expr), bool) ||                          \
  33        __builtin_types_compatible_p(typeof(expr), const bool) ||                  \
  34        __builtin_types_compatible_p(typeof(expr), volatile bool) ||               \
  35        __builtin_types_compatible_p(typeof(expr), const volatile bool),           \
  36        (bool)1,                                                                   \
  37    __builtin_choose_expr(                                                         \
  38      __builtin_types_compatible_p(typeof(expr), signed char) ||                   \
  39        __builtin_types_compatible_p(typeof(expr), const signed char) ||           \
  40        __builtin_types_compatible_p(typeof(expr), volatile signed char) ||        \
  41        __builtin_types_compatible_p(typeof(expr), const volatile signed char),    \
  42        (signed char)1,                                                            \
  43    __builtin_choose_expr(                                                         \
  44      __builtin_types_compatible_p(typeof(expr), unsigned char) ||                 \
  45        __builtin_types_compatible_p(typeof(expr), const unsigned char) ||         \
  46        __builtin_types_compatible_p(typeof(expr), volatile unsigned char) ||      \
  47        __builtin_types_compatible_p(typeof(expr), const volatile unsigned char),  \
  48        (unsigned char)1,                                                          \
  49    __builtin_choose_expr(                                                         \
  50      __builtin_types_compatible_p(typeof(expr), signed short) ||                  \
  51        __builtin_types_compatible_p(typeof(expr), const signed short) ||          \
  52        __builtin_types_compatible_p(typeof(expr), volatile signed short) ||       \
  53        __builtin_types_compatible_p(typeof(expr), const volatile signed short),   \
  54        (signed short)1,                                                           \
  55    __builtin_choose_expr(                                                         \
  56      __builtin_types_compatible_p(typeof(expr), unsigned short) ||                \
  57        __builtin_types_compatible_p(typeof(expr), const unsigned short) ||        \
  58        __builtin_types_compatible_p(typeof(expr), volatile unsigned short) ||     \
  59        __builtin_types_compatible_p(typeof(expr), const volatile unsigned short), \
  60        (unsigned short)1,                                                         \
  61      (expr)+0))))))
  62
  63#ifdef __ATOMIC_RELAXED
  64/* For C11 atomic ops */
  65
  66/* Manual memory barriers
  67 *
  68 *__atomic_thread_fence does not include a compiler barrier; instead,
  69 * the barrier is part of __atomic_load/__atomic_store's "volatile-like"
  70 * semantics. If smp_wmb() is a no-op, absence of the barrier means that
  71 * the compiler is free to reorder stores on each side of the barrier.
  72 * Add one here, and similarly in smp_rmb() and smp_read_barrier_depends().
  73 */
  74
  75#define smp_mb()                     ({ barrier(); __atomic_thread_fence(__ATOMIC_SEQ_CST); })
  76#define smp_mb_release()             ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); })
  77#define smp_mb_acquire()             ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); })
  78
  79/* Most compilers currently treat consume and acquire the same, but really
  80 * no processors except Alpha need a barrier here.  Leave it in if
  81 * using Thread Sanitizer to avoid warnings, otherwise optimize it away.
  82 */
  83#if defined(__SANITIZE_THREAD__)
  84#define smp_read_barrier_depends()   ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); })
  85#elif defined(__alpha__)
  86#define smp_read_barrier_depends()   asm volatile("mb":::"memory")
  87#else
  88#define smp_read_barrier_depends()   barrier()
  89#endif
  90
  91/* Sanity check that the size of an atomic operation isn't "overly large".
  92 * Despite the fact that e.g. i686 has 64-bit atomic operations, we do not
  93 * want to use them because we ought not need them, and this lets us do a
  94 * bit of sanity checking that other 32-bit hosts might build.
  95 *
  96 * That said, we have a problem on 64-bit ILP32 hosts in that in order to
  97 * sync with TCG_OVERSIZED_GUEST, this must match TCG_TARGET_REG_BITS.
  98 * We'd prefer not want to pull in everything else TCG related, so handle
  99 * those few cases by hand.
 100 *
 101 * Note that x32 is fully detected with __x86_64__ + _ILP32, and that for
 102 * Sparc we always force the use of sparcv9 in configure.
 103 */
 104#if defined(__x86_64__) || defined(__sparc__)
 105# define ATOMIC_REG_SIZE  8
 106#else
 107# define ATOMIC_REG_SIZE  sizeof(void *)
 108#endif
 109
 110/* Weak atomic operations prevent the compiler moving other
 111 * loads/stores past the atomic operation load/store. However there is
 112 * no explicit memory barrier for the processor.
 113 *
 114 * The C11 memory model says that variables that are accessed from
 115 * different threads should at least be done with __ATOMIC_RELAXED
 116 * primitives or the result is undefined. Generally this has little to
 117 * no effect on the generated code but not using the atomic primitives
 118 * will get flagged by sanitizers as a violation.
 119 */
 120#define atomic_read__nocheck(ptr) \
 121    __atomic_load_n(ptr, __ATOMIC_RELAXED)
 122
 123#define atomic_read(ptr)                              \
 124    ({                                                \
 125    QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
 126    atomic_read__nocheck(ptr);                        \
 127    })
 128
 129#define atomic_set__nocheck(ptr, i) \
 130    __atomic_store_n(ptr, i, __ATOMIC_RELAXED)
 131
 132#define atomic_set(ptr, i)  do {                      \
 133    QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
 134    atomic_set__nocheck(ptr, i);                      \
 135} while(0)
 136
 137/* See above: most compilers currently treat consume and acquire the
 138 * same, but this slows down atomic_rcu_read unnecessarily.
 139 */
 140#ifdef __SANITIZE_THREAD__
 141#define atomic_rcu_read__nocheck(ptr, valptr)           \
 142    __atomic_load(ptr, valptr, __ATOMIC_CONSUME);
 143#else
 144#define atomic_rcu_read__nocheck(ptr, valptr)           \
 145    __atomic_load(ptr, valptr, __ATOMIC_RELAXED);       \
 146    smp_read_barrier_depends();
 147#endif
 148
 149#define atomic_rcu_read(ptr)                          \
 150    ({                                                \
 151    QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
 152    typeof_strip_qual(*ptr) _val;                     \
 153    atomic_rcu_read__nocheck(ptr, &_val);             \
 154    _val;                                             \
 155    })
 156
 157#define atomic_rcu_set(ptr, i) do {                   \
 158    QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
 159    __atomic_store_n(ptr, i, __ATOMIC_RELEASE);       \
 160} while(0)
 161
 162#define atomic_load_acquire(ptr)                        \
 163    ({                                                  \
 164    QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE);  \
 165    typeof_strip_qual(*ptr) _val;                       \
 166    __atomic_load(ptr, &_val, __ATOMIC_ACQUIRE);        \
 167    _val;                                               \
 168    })
 169
 170#define atomic_store_release(ptr, i)  do {              \
 171    QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE);  \
 172    __atomic_store_n(ptr, i, __ATOMIC_RELEASE);         \
 173} while(0)
 174
 175
 176/* All the remaining operations are fully sequentially consistent */
 177
 178#define atomic_xchg__nocheck(ptr, i)    ({                  \
 179    __atomic_exchange_n(ptr, (i), __ATOMIC_SEQ_CST);        \
 180})
 181
 182#define atomic_xchg(ptr, i)    ({                           \
 183    QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE);      \
 184    atomic_xchg__nocheck(ptr, i);                           \
 185})
 186
 187/* Returns the eventual value, failed or not */
 188#define atomic_cmpxchg__nocheck(ptr, old, new)    ({                    \
 189    typeof_strip_qual(*ptr) _old = (old);                               \
 190    (void)__atomic_compare_exchange_n(ptr, &_old, new, false,           \
 191                              __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);      \
 192    _old;                                                               \
 193})
 194
 195#define atomic_cmpxchg(ptr, old, new)    ({                             \
 196    QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE);                  \
 197    atomic_cmpxchg__nocheck(ptr, old, new);                             \
 198})
 199
 200/* Provide shorter names for GCC atomic builtins, return old value */
 201#define atomic_fetch_inc(ptr)  __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
 202#define atomic_fetch_dec(ptr)  __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
 203#define atomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
 204#define atomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
 205#define atomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
 206#define atomic_fetch_or(ptr, n)  __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
 207#define atomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)
 208
 209#define atomic_inc_fetch(ptr)    __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST)
 210#define atomic_dec_fetch(ptr)    __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST)
 211#define atomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST)
 212#define atomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST)
 213#define atomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST)
 214#define atomic_or_fetch(ptr, n)  __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST)
 215#define atomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST)
 216
 217/* And even shorter names that return void.  */
 218#define atomic_inc(ptr)    ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
 219#define atomic_dec(ptr)    ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
 220#define atomic_add(ptr, n) ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
 221#define atomic_sub(ptr, n) ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
 222#define atomic_and(ptr, n) ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
 223#define atomic_or(ptr, n)  ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
 224#define atomic_xor(ptr, n) ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST))
 225
 226#else /* __ATOMIC_RELAXED */
 227
 228/*
 229 * We use GCC builtin if it's available, as that can use mfence on
 230 * 32-bit as well, e.g. if built with -march=pentium-m. However, on
 231 * i386 the spec is buggy, and the implementation followed it until
 232 * 4.3 (http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36793).
 233 */
 234#if defined(__i386__) || defined(__x86_64__)
 235#if !QEMU_GNUC_PREREQ(4, 4)
 236#if defined __x86_64__
 237#define smp_mb()    ({ asm volatile("mfence" ::: "memory"); (void)0; })
 238#else
 239#define smp_mb()    ({ asm volatile("lock; addl $0,0(%%esp) " ::: "memory"); (void)0; })
 240#endif
 241#endif
 242#endif
 243
 244
 245#ifdef __alpha__
 246#define smp_read_barrier_depends()   asm volatile("mb":::"memory")
 247#endif
 248
 249#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
 250
 251/*
 252 * Because of the strongly ordered storage model, wmb() and rmb() are nops
 253 * here (a compiler barrier only).  QEMU doesn't do accesses to write-combining
 254 * qemu memory or non-temporal load/stores from C code.
 255 */
 256#define smp_mb_release()   barrier()
 257#define smp_mb_acquire()   barrier()
 258
 259/*
 260 * __sync_lock_test_and_set() is documented to be an acquire barrier only,
 261 * but it is a full barrier at the hardware level.  Add a compiler barrier
 262 * to make it a full barrier also at the compiler level.
 263 */
 264#define atomic_xchg(ptr, i)    (barrier(), __sync_lock_test_and_set(ptr, i))
 265
 266#elif defined(_ARCH_PPC)
 267
 268/*
 269 * We use an eieio() for wmb() on powerpc.  This assumes we don't
 270 * need to order cacheable and non-cacheable stores with respect to
 271 * each other.
 272 *
 273 * smp_mb has the same problem as on x86 for not-very-new GCC
 274 * (http://patchwork.ozlabs.org/patch/126184/, Nov 2011).
 275 */
 276#define smp_wmb()          ({ asm volatile("eieio" ::: "memory"); (void)0; })
 277#if defined(__powerpc64__)
 278#define smp_mb_release()   ({ asm volatile("lwsync" ::: "memory"); (void)0; })
 279#define smp_mb_acquire()   ({ asm volatile("lwsync" ::: "memory"); (void)0; })
 280#else
 281#define smp_mb_release()   ({ asm volatile("sync" ::: "memory"); (void)0; })
 282#define smp_mb_acquire()   ({ asm volatile("sync" ::: "memory"); (void)0; })
 283#endif
 284#define smp_mb()           ({ asm volatile("sync" ::: "memory"); (void)0; })
 285
 286#endif /* _ARCH_PPC */
 287
 288/*
 289 * For (host) platforms we don't have explicit barrier definitions
 290 * for, we use the gcc __sync_synchronize() primitive to generate a
 291 * full barrier.  This should be safe on all platforms, though it may
 292 * be overkill for smp_mb_acquire() and smp_mb_release().
 293 */
 294#ifndef smp_mb
 295#define smp_mb()           __sync_synchronize()
 296#endif
 297
 298#ifndef smp_mb_acquire
 299#define smp_mb_acquire()   __sync_synchronize()
 300#endif
 301
 302#ifndef smp_mb_release
 303#define smp_mb_release()   __sync_synchronize()
 304#endif
 305
 306#ifndef smp_read_barrier_depends
 307#define smp_read_barrier_depends()   barrier()
 308#endif
 309
 310/* These will only be atomic if the processor does the fetch or store
 311 * in a single issue memory operation
 312 */
 313#define atomic_read__nocheck(p)   (*(__typeof__(*(p)) volatile*) (p))
 314#define atomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile*) (p)) = (i))
 315
 316#define atomic_read(ptr)       atomic_read__nocheck(ptr)
 317#define atomic_set(ptr, i)     atomic_set__nocheck(ptr,i)
 318
 319/**
 320 * atomic_rcu_read - reads a RCU-protected pointer to a local variable
 321 * into a RCU read-side critical section. The pointer can later be safely
 322 * dereferenced within the critical section.
 323 *
 324 * This ensures that the pointer copy is invariant thorough the whole critical
 325 * section.
 326 *
 327 * Inserts memory barriers on architectures that require them (currently only
 328 * Alpha) and documents which pointers are protected by RCU.
 329 *
 330 * atomic_rcu_read also includes a compiler barrier to ensure that
 331 * value-speculative optimizations (e.g. VSS: Value Speculation
 332 * Scheduling) does not perform the data read before the pointer read
 333 * by speculating the value of the pointer.
 334 *
 335 * Should match atomic_rcu_set(), atomic_xchg(), atomic_cmpxchg().
 336 */
 337#define atomic_rcu_read(ptr)    ({                \
 338    typeof(*ptr) _val = atomic_read(ptr);         \
 339    smp_read_barrier_depends();                   \
 340    _val;                                         \
 341})
 342
 343/**
 344 * atomic_rcu_set - assigns (publicizes) a pointer to a new data structure
 345 * meant to be read by RCU read-side critical sections.
 346 *
 347 * Documents which pointers will be dereferenced by RCU read-side critical
 348 * sections and adds the required memory barriers on architectures requiring
 349 * them. It also makes sure the compiler does not reorder code initializing the
 350 * data structure before its publication.
 351 *
 352 * Should match atomic_rcu_read().
 353 */
 354#define atomic_rcu_set(ptr, i)  do {              \
 355    smp_wmb();                                    \
 356    atomic_set(ptr, i);                           \
 357} while (0)
 358
 359#define atomic_load_acquire(ptr)    ({      \
 360    typeof(*ptr) _val = atomic_read(ptr);   \
 361    smp_mb_acquire();                       \
 362    _val;                                   \
 363})
 364
 365#define atomic_store_release(ptr, i)  do {  \
 366    smp_mb_release();                       \
 367    atomic_set(ptr, i);                     \
 368} while (0)
 369
 370#ifndef atomic_xchg
 371#if defined(__clang__)
 372#define atomic_xchg(ptr, i)    __sync_swap(ptr, i)
 373#else
 374/* __sync_lock_test_and_set() is documented to be an acquire barrier only.  */
 375#define atomic_xchg(ptr, i)    (smp_mb(), __sync_lock_test_and_set(ptr, i))
 376#endif
 377#endif
 378#define atomic_xchg__nocheck  atomic_xchg
 379
 380/* Provide shorter names for GCC atomic builtins.  */
 381#define atomic_fetch_inc(ptr)  __sync_fetch_and_add(ptr, 1)
 382#define atomic_fetch_dec(ptr)  __sync_fetch_and_add(ptr, -1)
 383#define atomic_fetch_add(ptr, n) __sync_fetch_and_add(ptr, n)
 384#define atomic_fetch_sub(ptr, n) __sync_fetch_and_sub(ptr, n)
 385#define atomic_fetch_and(ptr, n) __sync_fetch_and_and(ptr, n)
 386#define atomic_fetch_or(ptr, n) __sync_fetch_and_or(ptr, n)
 387#define atomic_fetch_xor(ptr, n) __sync_fetch_and_xor(ptr, n)
 388
 389#define atomic_inc_fetch(ptr)  __sync_add_and_fetch(ptr, 1)
 390#define atomic_dec_fetch(ptr)  __sync_add_and_fetch(ptr, -1)
 391#define atomic_add_fetch(ptr, n) __sync_add_and_fetch(ptr, n)
 392#define atomic_sub_fetch(ptr, n) __sync_sub_and_fetch(ptr, n)
 393#define atomic_and_fetch(ptr, n) __sync_and_and_fetch(ptr, n)
 394#define atomic_or_fetch(ptr, n) __sync_or_and_fetch(ptr, n)
 395#define atomic_xor_fetch(ptr, n) __sync_xor_and_fetch(ptr, n)
 396
 397#define atomic_cmpxchg(ptr, old, new) __sync_val_compare_and_swap(ptr, old, new)
 398#define atomic_cmpxchg__nocheck(ptr, old, new)  atomic_cmpxchg(ptr, old, new)
 399
 400/* And even shorter names that return void.  */
 401#define atomic_inc(ptr)        ((void) __sync_fetch_and_add(ptr, 1))
 402#define atomic_dec(ptr)        ((void) __sync_fetch_and_add(ptr, -1))
 403#define atomic_add(ptr, n)     ((void) __sync_fetch_and_add(ptr, n))
 404#define atomic_sub(ptr, n)     ((void) __sync_fetch_and_sub(ptr, n))
 405#define atomic_and(ptr, n)     ((void) __sync_fetch_and_and(ptr, n))
 406#define atomic_or(ptr, n)      ((void) __sync_fetch_and_or(ptr, n))
 407#define atomic_xor(ptr, n)     ((void) __sync_fetch_and_xor(ptr, n))
 408
 409#endif /* __ATOMIC_RELAXED */
 410
 411#ifndef smp_wmb
 412#define smp_wmb()   smp_mb_release()
 413#endif
 414#ifndef smp_rmb
 415#define smp_rmb()   smp_mb_acquire()
 416#endif
 417
 418/* This is more efficient than a store plus a fence.  */
 419#if !defined(__SANITIZE_THREAD__)
 420#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
 421#define atomic_mb_set(ptr, i)  ((void)atomic_xchg(ptr, i))
 422#endif
 423#endif
 424
 425/* atomic_mb_read/set semantics map Java volatile variables. They are
 426 * less expensive on some platforms (notably POWER) than fully
 427 * sequentially consistent operations.
 428 *
 429 * As long as they are used as paired operations they are safe to
 430 * use. See docs/devel/atomics.txt for more discussion.
 431 */
 432
 433#ifndef atomic_mb_read
 434#define atomic_mb_read(ptr)                             \
 435    atomic_load_acquire(ptr)
 436#endif
 437
 438#ifndef atomic_mb_set
 439#define atomic_mb_set(ptr, i)  do {                     \
 440    atomic_store_release(ptr, i);                       \
 441    smp_mb();                                           \
 442} while(0)
 443#endif
 444
 445#define atomic_fetch_inc_nonzero(ptr) ({                                \
 446    typeof_strip_qual(*ptr) _oldn = atomic_read(ptr);                   \
 447    while (_oldn && atomic_cmpxchg(ptr, _oldn, _oldn + 1) != _oldn) {   \
 448        _oldn = atomic_read(ptr);                                       \
 449    }                                                                   \
 450    _oldn;                                                              \
 451})
 452
 453/* Abstractions to access atomically (i.e. "once") i64/u64 variables */
 454#ifdef CONFIG_ATOMIC64
 455static inline int64_t atomic_read_i64(const int64_t *ptr)
 456{
 457    /* use __nocheck because sizeof(void *) might be < sizeof(u64) */
 458    return atomic_read__nocheck(ptr);
 459}
 460
 461static inline uint64_t atomic_read_u64(const uint64_t *ptr)
 462{
 463    return atomic_read__nocheck(ptr);
 464}
 465
 466static inline void atomic_set_i64(int64_t *ptr, int64_t val)
 467{
 468    atomic_set__nocheck(ptr, val);
 469}
 470
 471static inline void atomic_set_u64(uint64_t *ptr, uint64_t val)
 472{
 473    atomic_set__nocheck(ptr, val);
 474}
 475
 476static inline void atomic64_init(void)
 477{
 478}
 479#else /* !CONFIG_ATOMIC64 */
 480int64_t  atomic_read_i64(const int64_t *ptr);
 481uint64_t atomic_read_u64(const uint64_t *ptr);
 482void atomic_set_i64(int64_t *ptr, int64_t val);
 483void atomic_set_u64(uint64_t *ptr, uint64_t val);
 484void atomic64_init(void);
 485#endif /* !CONFIG_ATOMIC64 */
 486
 487#endif /* QEMU_ATOMIC_H */
 488