qemu/include/qemu/atomic.h
<<
>>
Prefs
   1/*
   2 * Simple interface for atomic operations.
   3 *
   4 * Copyright (C) 2013 Red Hat, Inc.
   5 *
   6 * Author: Paolo Bonzini <pbonzini@redhat.com>
   7 *
   8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
   9 * See the COPYING file in the top-level directory.
  10 *
  11 * See docs/devel/atomics.rst for discussion about the guarantees each
  12 * atomic primitive is meant to provide.
  13 */
  14
  15#ifndef QEMU_ATOMIC_H
  16#define QEMU_ATOMIC_H
  17
  18/* Compiler barrier */
  19#define barrier()   ({ asm volatile("" ::: "memory"); (void)0; })
  20
  21/* The variable that receives the old value of an atomically-accessed
  22 * variable must be non-qualified, because atomic builtins return values
  23 * through a pointer-type argument as in __atomic_load(&var, &old, MODEL).
  24 *
  25 * This macro has to handle types smaller than int manually, because of
  26 * implicit promotion.  int and larger types, as well as pointers, can be
  27 * converted to a non-qualified type just by applying a binary operator.
  28 */
  29#define typeof_strip_qual(expr)                                                    \
  30  typeof(                                                                          \
  31    __builtin_choose_expr(                                                         \
  32      __builtin_types_compatible_p(typeof(expr), bool) ||                          \
  33        __builtin_types_compatible_p(typeof(expr), const bool) ||                  \
  34        __builtin_types_compatible_p(typeof(expr), volatile bool) ||               \
  35        __builtin_types_compatible_p(typeof(expr), const volatile bool),           \
  36        (bool)1,                                                                   \
  37    __builtin_choose_expr(                                                         \
  38      __builtin_types_compatible_p(typeof(expr), signed char) ||                   \
  39        __builtin_types_compatible_p(typeof(expr), const signed char) ||           \
  40        __builtin_types_compatible_p(typeof(expr), volatile signed char) ||        \
  41        __builtin_types_compatible_p(typeof(expr), const volatile signed char),    \
  42        (signed char)1,                                                            \
  43    __builtin_choose_expr(                                                         \
  44      __builtin_types_compatible_p(typeof(expr), unsigned char) ||                 \
  45        __builtin_types_compatible_p(typeof(expr), const unsigned char) ||         \
  46        __builtin_types_compatible_p(typeof(expr), volatile unsigned char) ||      \
  47        __builtin_types_compatible_p(typeof(expr), const volatile unsigned char),  \
  48        (unsigned char)1,                                                          \
  49    __builtin_choose_expr(                                                         \
  50      __builtin_types_compatible_p(typeof(expr), signed short) ||                  \
  51        __builtin_types_compatible_p(typeof(expr), const signed short) ||          \
  52        __builtin_types_compatible_p(typeof(expr), volatile signed short) ||       \
  53        __builtin_types_compatible_p(typeof(expr), const volatile signed short),   \
  54        (signed short)1,                                                           \
  55    __builtin_choose_expr(                                                         \
  56      __builtin_types_compatible_p(typeof(expr), unsigned short) ||                \
  57        __builtin_types_compatible_p(typeof(expr), const unsigned short) ||        \
  58        __builtin_types_compatible_p(typeof(expr), volatile unsigned short) ||     \
  59        __builtin_types_compatible_p(typeof(expr), const volatile unsigned short), \
  60        (unsigned short)1,                                                         \
  61      (expr)+0))))))
  62
  63#ifndef __ATOMIC_RELAXED
  64#error "Expecting C11 atomic ops"
  65#endif
  66
  67/* Manual memory barriers
  68 *
  69 *__atomic_thread_fence does not include a compiler barrier; instead,
  70 * the barrier is part of __atomic_load/__atomic_store's "volatile-like"
  71 * semantics. If smp_wmb() is a no-op, absence of the barrier means that
  72 * the compiler is free to reorder stores on each side of the barrier.
  73 * Add one here, and similarly in smp_rmb() and smp_read_barrier_depends().
  74 */
  75
  76#define smp_mb()                     ({ barrier(); __atomic_thread_fence(__ATOMIC_SEQ_CST); })
  77#define smp_mb_release()             ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); })
  78#define smp_mb_acquire()             ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); })
  79
  80/* Most compilers currently treat consume and acquire the same, but really
  81 * no processors except Alpha need a barrier here.  Leave it in if
  82 * using Thread Sanitizer to avoid warnings, otherwise optimize it away.
  83 */
  84#if defined(__SANITIZE_THREAD__)
  85#define smp_read_barrier_depends()   ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); })
  86#elif defined(__alpha__)
  87#define smp_read_barrier_depends()   asm volatile("mb":::"memory")
  88#else
  89#define smp_read_barrier_depends()   barrier()
  90#endif
  91
  92/*
  93 * A signal barrier forces all pending local memory ops to be observed before
  94 * a SIGSEGV is delivered to the *same* thread.  In practice this is exactly
  95 * the same as barrier(), but since we have the correct builtin, use it.
  96 */
  97#define signal_barrier()    __atomic_signal_fence(__ATOMIC_SEQ_CST)
  98
  99/* Sanity check that the size of an atomic operation isn't "overly large".
 100 * Despite the fact that e.g. i686 has 64-bit atomic operations, we do not
 101 * want to use them because we ought not need them, and this lets us do a
 102 * bit of sanity checking that other 32-bit hosts might build.
 103 *
 104 * That said, we have a problem on 64-bit ILP32 hosts in that in order to
 105 * sync with TCG_OVERSIZED_GUEST, this must match TCG_TARGET_REG_BITS.
 106 * We'd prefer not want to pull in everything else TCG related, so handle
 107 * those few cases by hand.
 108 *
 109 * Note that x32 is fully detected with __x86_64__ + _ILP32, and that for
 110 * Sparc we always force the use of sparcv9 in configure. MIPS n32 (ILP32) &
 111 * n64 (LP64) ABIs are both detected using __mips64.
 112 */
 113#if defined(__x86_64__) || defined(__sparc__) || defined(__mips64)
 114# define ATOMIC_REG_SIZE  8
 115#else
 116# define ATOMIC_REG_SIZE  sizeof(void *)
 117#endif
 118
 119/* Weak atomic operations prevent the compiler moving other
 120 * loads/stores past the atomic operation load/store. However there is
 121 * no explicit memory barrier for the processor.
 122 *
 123 * The C11 memory model says that variables that are accessed from
 124 * different threads should at least be done with __ATOMIC_RELAXED
 125 * primitives or the result is undefined. Generally this has little to
 126 * no effect on the generated code but not using the atomic primitives
 127 * will get flagged by sanitizers as a violation.
 128 */
 129#define qatomic_read__nocheck(ptr) \
 130    __atomic_load_n(ptr, __ATOMIC_RELAXED)
 131
 132#define qatomic_read(ptr)                              \
 133    ({                                                 \
 134    QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
 135    qatomic_read__nocheck(ptr);                        \
 136    })
 137
 138#define qatomic_set__nocheck(ptr, i) \
 139    __atomic_store_n(ptr, i, __ATOMIC_RELAXED)
 140
 141#define qatomic_set(ptr, i)  do {                      \
 142    QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
 143    qatomic_set__nocheck(ptr, i);                      \
 144} while(0)
 145
 146/* See above: most compilers currently treat consume and acquire the
 147 * same, but this slows down qatomic_rcu_read unnecessarily.
 148 */
 149#ifdef __SANITIZE_THREAD__
 150#define qatomic_rcu_read__nocheck(ptr, valptr)           \
 151    __atomic_load(ptr, valptr, __ATOMIC_CONSUME);
 152#else
 153#define qatomic_rcu_read__nocheck(ptr, valptr)           \
 154    __atomic_load(ptr, valptr, __ATOMIC_RELAXED);        \
 155    smp_read_barrier_depends();
 156#endif
 157
 158#define qatomic_rcu_read(ptr)                          \
 159    ({                                                 \
 160    QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
 161    typeof_strip_qual(*ptr) _val;                      \
 162    qatomic_rcu_read__nocheck(ptr, &_val);             \
 163    _val;                                              \
 164    })
 165
 166#define qatomic_rcu_set(ptr, i) do {                   \
 167    QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
 168    __atomic_store_n(ptr, i, __ATOMIC_RELEASE);        \
 169} while(0)
 170
 171#define qatomic_load_acquire(ptr)                       \
 172    ({                                                  \
 173    QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE);  \
 174    typeof_strip_qual(*ptr) _val;                       \
 175    __atomic_load(ptr, &_val, __ATOMIC_ACQUIRE);        \
 176    _val;                                               \
 177    })
 178
 179#define qatomic_store_release(ptr, i)  do {             \
 180    QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE);  \
 181    __atomic_store_n(ptr, i, __ATOMIC_RELEASE);         \
 182} while(0)
 183
 184
 185/* All the remaining operations are fully sequentially consistent */
 186
 187#define qatomic_xchg__nocheck(ptr, i)    ({                 \
 188    __atomic_exchange_n(ptr, (i), __ATOMIC_SEQ_CST);        \
 189})
 190
 191#define qatomic_xchg(ptr, i)    ({                          \
 192    QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE);      \
 193    qatomic_xchg__nocheck(ptr, i);                          \
 194})
 195
 196/* Returns the eventual value, failed or not */
 197#define qatomic_cmpxchg__nocheck(ptr, old, new)    ({                   \
 198    typeof_strip_qual(*ptr) _old = (old);                               \
 199    (void)__atomic_compare_exchange_n(ptr, &_old, new, false,           \
 200                              __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);      \
 201    _old;                                                               \
 202})
 203
 204#define qatomic_cmpxchg(ptr, old, new)    ({                            \
 205    QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE);                  \
 206    qatomic_cmpxchg__nocheck(ptr, old, new);                            \
 207})
 208
 209/* Provide shorter names for GCC atomic builtins, return old value */
 210#define qatomic_fetch_inc(ptr)  __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
 211#define qatomic_fetch_dec(ptr)  __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
 212
 213#define qatomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
 214#define qatomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
 215#define qatomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
 216#define qatomic_fetch_or(ptr, n)  __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
 217#define qatomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)
 218
 219#define qatomic_inc_fetch(ptr)    __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST)
 220#define qatomic_dec_fetch(ptr)    __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST)
 221#define qatomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST)
 222#define qatomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST)
 223#define qatomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST)
 224#define qatomic_or_fetch(ptr, n)  __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST)
 225#define qatomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST)
 226
 227/* And even shorter names that return void.  */
 228#define qatomic_inc(ptr) \
 229    ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
 230#define qatomic_dec(ptr) \
 231    ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
 232#define qatomic_add(ptr, n) \
 233    ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
 234#define qatomic_sub(ptr, n) \
 235    ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
 236#define qatomic_and(ptr, n) \
 237    ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
 238#define qatomic_or(ptr, n) \
 239    ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
 240#define qatomic_xor(ptr, n) \
 241    ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST))
 242
 243#define smp_wmb()   smp_mb_release()
 244#define smp_rmb()   smp_mb_acquire()
 245
 246/* qatomic_mb_read/set semantics map Java volatile variables. They are
 247 * less expensive on some platforms (notably POWER) than fully
 248 * sequentially consistent operations.
 249 *
 250 * As long as they are used as paired operations they are safe to
 251 * use. See docs/devel/atomics.rst for more discussion.
 252 */
 253
 254#define qatomic_mb_read(ptr)                             \
 255    qatomic_load_acquire(ptr)
 256
 257#if !defined(__SANITIZE_THREAD__) && \
 258    (defined(__i386__) || defined(__x86_64__) || defined(__s390x__))
 259/* This is more efficient than a store plus a fence.  */
 260# define qatomic_mb_set(ptr, i)  ((void)qatomic_xchg(ptr, i))
 261#else
 262# define qatomic_mb_set(ptr, i) \
 263   ({ qatomic_store_release(ptr, i); smp_mb(); })
 264#endif
 265
 266#define qatomic_fetch_inc_nonzero(ptr) ({                               \
 267    typeof_strip_qual(*ptr) _oldn = qatomic_read(ptr);                  \
 268    while (_oldn && qatomic_cmpxchg(ptr, _oldn, _oldn + 1) != _oldn) {  \
 269        _oldn = qatomic_read(ptr);                                      \
 270    }                                                                   \
 271    _oldn;                                                              \
 272})
 273
 274/*
 275 * Abstractions to access atomically (i.e. "once") i64/u64 variables.
 276 *
 277 * The i386 abi is odd in that by default members are only aligned to
 278 * 4 bytes, which means that 8-byte types can wind up mis-aligned.
 279 * Clang will then warn about this, and emit a call into libatomic.
 280 *
 281 * Use of these types in structures when they will be used with atomic
 282 * operations can avoid this.
 283 */
 284typedef int64_t aligned_int64_t __attribute__((aligned(8)));
 285typedef uint64_t aligned_uint64_t __attribute__((aligned(8)));
 286
 287#ifdef CONFIG_ATOMIC64
 288/* Use __nocheck because sizeof(void *) might be < sizeof(u64) */
 289#define qatomic_read_i64(P) \
 290    _Generic(*(P), int64_t: qatomic_read__nocheck(P))
 291#define qatomic_read_u64(P) \
 292    _Generic(*(P), uint64_t: qatomic_read__nocheck(P))
 293#define qatomic_set_i64(P, V) \
 294    _Generic(*(P), int64_t: qatomic_set__nocheck(P, V))
 295#define qatomic_set_u64(P, V) \
 296    _Generic(*(P), uint64_t: qatomic_set__nocheck(P, V))
 297
 298static inline void qatomic64_init(void)
 299{
 300}
 301#else /* !CONFIG_ATOMIC64 */
 302int64_t  qatomic_read_i64(const int64_t *ptr);
 303uint64_t qatomic_read_u64(const uint64_t *ptr);
 304void qatomic_set_i64(int64_t *ptr, int64_t val);
 305void qatomic_set_u64(uint64_t *ptr, uint64_t val);
 306void qatomic64_init(void);
 307#endif /* !CONFIG_ATOMIC64 */
 308
 309#endif /* QEMU_ATOMIC_H */
 310