qemu/include/qemu/atomic.h
<<
>>
Prefs
   1/*
   2 * Simple interface for atomic operations.
   3 *
   4 * Copyright (C) 2013 Red Hat, Inc.
   5 *
   6 * Author: Paolo Bonzini <pbonzini@redhat.com>
   7 *
   8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
   9 * See the COPYING file in the top-level directory.
  10 *
  11 * See docs/devel/atomics.rst for discussion about the guarantees each
  12 * atomic primitive is meant to provide.
  13 */
  14
  15#ifndef QEMU_ATOMIC_H
  16#define QEMU_ATOMIC_H
  17
  18#include "compiler.h"
  19
  20/* Compiler barrier */
  21#define barrier()   ({ asm volatile("" ::: "memory"); (void)0; })
  22
  23/* The variable that receives the old value of an atomically-accessed
  24 * variable must be non-qualified, because atomic builtins return values
  25 * through a pointer-type argument as in __atomic_load(&var, &old, MODEL).
  26 *
  27 * This macro has to handle types smaller than int manually, because of
  28 * implicit promotion.  int and larger types, as well as pointers, can be
  29 * converted to a non-qualified type just by applying a binary operator.
  30 */
  31#define typeof_strip_qual(expr)                                                    \
  32  typeof(                                                                          \
  33    __builtin_choose_expr(                                                         \
  34      __builtin_types_compatible_p(typeof(expr), bool) ||                          \
  35        __builtin_types_compatible_p(typeof(expr), const bool) ||                  \
  36        __builtin_types_compatible_p(typeof(expr), volatile bool) ||               \
  37        __builtin_types_compatible_p(typeof(expr), const volatile bool),           \
  38        (bool)1,                                                                   \
  39    __builtin_choose_expr(                                                         \
  40      __builtin_types_compatible_p(typeof(expr), signed char) ||                   \
  41        __builtin_types_compatible_p(typeof(expr), const signed char) ||           \
  42        __builtin_types_compatible_p(typeof(expr), volatile signed char) ||        \
  43        __builtin_types_compatible_p(typeof(expr), const volatile signed char),    \
  44        (signed char)1,                                                            \
  45    __builtin_choose_expr(                                                         \
  46      __builtin_types_compatible_p(typeof(expr), unsigned char) ||                 \
  47        __builtin_types_compatible_p(typeof(expr), const unsigned char) ||         \
  48        __builtin_types_compatible_p(typeof(expr), volatile unsigned char) ||      \
  49        __builtin_types_compatible_p(typeof(expr), const volatile unsigned char),  \
  50        (unsigned char)1,                                                          \
  51    __builtin_choose_expr(                                                         \
  52      __builtin_types_compatible_p(typeof(expr), signed short) ||                  \
  53        __builtin_types_compatible_p(typeof(expr), const signed short) ||          \
  54        __builtin_types_compatible_p(typeof(expr), volatile signed short) ||       \
  55        __builtin_types_compatible_p(typeof(expr), const volatile signed short),   \
  56        (signed short)1,                                                           \
  57    __builtin_choose_expr(                                                         \
  58      __builtin_types_compatible_p(typeof(expr), unsigned short) ||                \
  59        __builtin_types_compatible_p(typeof(expr), const unsigned short) ||        \
  60        __builtin_types_compatible_p(typeof(expr), volatile unsigned short) ||     \
  61        __builtin_types_compatible_p(typeof(expr), const volatile unsigned short), \
  62        (unsigned short)1,                                                         \
  63      (expr)+0))))))
  64
  65#ifndef __ATOMIC_RELAXED
  66#error "Expecting C11 atomic ops"
  67#endif
  68
  69/* Manual memory barriers
  70 *
  71 *__atomic_thread_fence does not include a compiler barrier; instead,
  72 * the barrier is part of __atomic_load/__atomic_store's "volatile-like"
  73 * semantics. If smp_wmb() is a no-op, absence of the barrier means that
  74 * the compiler is free to reorder stores on each side of the barrier.
  75 * Add one here, and similarly in smp_rmb() and smp_read_barrier_depends().
  76 */
  77
  78#define smp_mb()                     ({ barrier(); __atomic_thread_fence(__ATOMIC_SEQ_CST); })
  79#define smp_mb_release()             ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); })
  80#define smp_mb_acquire()             ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); })
  81
  82/* Most compilers currently treat consume and acquire the same, but really
  83 * no processors except Alpha need a barrier here.  Leave it in if
  84 * using Thread Sanitizer to avoid warnings, otherwise optimize it away.
  85 */
  86#ifdef QEMU_SANITIZE_THREAD
  87#define smp_read_barrier_depends()   ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); })
  88#elif defined(__alpha__)
  89#define smp_read_barrier_depends()   asm volatile("mb":::"memory")
  90#else
  91#define smp_read_barrier_depends()   barrier()
  92#endif
  93
  94/*
  95 * A signal barrier forces all pending local memory ops to be observed before
  96 * a SIGSEGV is delivered to the *same* thread.  In practice this is exactly
  97 * the same as barrier(), but since we have the correct builtin, use it.
  98 */
  99#define signal_barrier()    __atomic_signal_fence(__ATOMIC_SEQ_CST)
 100
 101/* Sanity check that the size of an atomic operation isn't "overly large".
 102 * Despite the fact that e.g. i686 has 64-bit atomic operations, we do not
 103 * want to use them because we ought not need them, and this lets us do a
 104 * bit of sanity checking that other 32-bit hosts might build.
 105 *
 106 * That said, we have a problem on 64-bit ILP32 hosts in that in order to
 107 * sync with TCG_OVERSIZED_GUEST, this must match TCG_TARGET_REG_BITS.
 108 * We'd prefer not want to pull in everything else TCG related, so handle
 109 * those few cases by hand.
 110 *
 111 * Note that x32 is fully detected with __x86_64__ + _ILP32, and that for
 112 * Sparc we always force the use of sparcv9 in configure. MIPS n32 (ILP32) &
 113 * n64 (LP64) ABIs are both detected using __mips64.
 114 */
 115#if defined(__x86_64__) || defined(__sparc__) || defined(__mips64)
 116# define ATOMIC_REG_SIZE  8
 117#else
 118# define ATOMIC_REG_SIZE  sizeof(void *)
 119#endif
 120
 121/* Weak atomic operations prevent the compiler moving other
 122 * loads/stores past the atomic operation load/store. However there is
 123 * no explicit memory barrier for the processor.
 124 *
 125 * The C11 memory model says that variables that are accessed from
 126 * different threads should at least be done with __ATOMIC_RELAXED
 127 * primitives or the result is undefined. Generally this has little to
 128 * no effect on the generated code but not using the atomic primitives
 129 * will get flagged by sanitizers as a violation.
 130 */
 131#define qatomic_read__nocheck(ptr) \
 132    __atomic_load_n(ptr, __ATOMIC_RELAXED)
 133
 134#define qatomic_read(ptr)                              \
 135    ({                                                 \
 136    QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
 137    qatomic_read__nocheck(ptr);                        \
 138    })
 139
 140#define qatomic_set__nocheck(ptr, i) \
 141    __atomic_store_n(ptr, i, __ATOMIC_RELAXED)
 142
 143#define qatomic_set(ptr, i)  do {                      \
 144    QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
 145    qatomic_set__nocheck(ptr, i);                      \
 146} while(0)
 147
 148/* See above: most compilers currently treat consume and acquire the
 149 * same, but this slows down qatomic_rcu_read unnecessarily.
 150 */
 151#ifdef QEMU_SANITIZE_THREAD
 152#define qatomic_rcu_read__nocheck(ptr, valptr)           \
 153    __atomic_load(ptr, valptr, __ATOMIC_CONSUME);
 154#else
 155#define qatomic_rcu_read__nocheck(ptr, valptr)           \
 156    __atomic_load(ptr, valptr, __ATOMIC_RELAXED);        \
 157    smp_read_barrier_depends();
 158#endif
 159
 160#define qatomic_rcu_read(ptr)                          \
 161    ({                                                 \
 162    QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
 163    typeof_strip_qual(*ptr) _val;                      \
 164    qatomic_rcu_read__nocheck(ptr, &_val);             \
 165    _val;                                              \
 166    })
 167
 168#define qatomic_rcu_set(ptr, i) do {                   \
 169    QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
 170    __atomic_store_n(ptr, i, __ATOMIC_RELEASE);        \
 171} while(0)
 172
 173#define qatomic_load_acquire(ptr)                       \
 174    ({                                                  \
 175    QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE);  \
 176    typeof_strip_qual(*ptr) _val;                       \
 177    __atomic_load(ptr, &_val, __ATOMIC_ACQUIRE);        \
 178    _val;                                               \
 179    })
 180
 181#define qatomic_store_release(ptr, i)  do {             \
 182    QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE);  \
 183    __atomic_store_n(ptr, i, __ATOMIC_RELEASE);         \
 184} while(0)
 185
 186
 187/* All the remaining operations are fully sequentially consistent */
 188
 189#define qatomic_xchg__nocheck(ptr, i)    ({                 \
 190    __atomic_exchange_n(ptr, (i), __ATOMIC_SEQ_CST);        \
 191})
 192
 193#define qatomic_xchg(ptr, i)    ({                          \
 194    QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE);      \
 195    qatomic_xchg__nocheck(ptr, i);                          \
 196})
 197
 198/* Returns the eventual value, failed or not */
 199#define qatomic_cmpxchg__nocheck(ptr, old, new)    ({                   \
 200    typeof_strip_qual(*ptr) _old = (old);                               \
 201    (void)__atomic_compare_exchange_n(ptr, &_old, new, false,           \
 202                              __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);      \
 203    _old;                                                               \
 204})
 205
 206#define qatomic_cmpxchg(ptr, old, new)    ({                            \
 207    QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE);                  \
 208    qatomic_cmpxchg__nocheck(ptr, old, new);                            \
 209})
 210
 211/* Provide shorter names for GCC atomic builtins, return old value */
 212#define qatomic_fetch_inc(ptr)  __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
 213#define qatomic_fetch_dec(ptr)  __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
 214
 215#define qatomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
 216#define qatomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
 217#define qatomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
 218#define qatomic_fetch_or(ptr, n)  __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
 219#define qatomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)
 220
 221#define qatomic_inc_fetch(ptr)    __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST)
 222#define qatomic_dec_fetch(ptr)    __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST)
 223#define qatomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST)
 224#define qatomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST)
 225#define qatomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST)
 226#define qatomic_or_fetch(ptr, n)  __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST)
 227#define qatomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST)
 228
 229/* And even shorter names that return void.  */
 230#define qatomic_inc(ptr) \
 231    ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
 232#define qatomic_dec(ptr) \
 233    ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
 234#define qatomic_add(ptr, n) \
 235    ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
 236#define qatomic_sub(ptr, n) \
 237    ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
 238#define qatomic_and(ptr, n) \
 239    ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
 240#define qatomic_or(ptr, n) \
 241    ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
 242#define qatomic_xor(ptr, n) \
 243    ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST))
 244
 245#define smp_wmb()   smp_mb_release()
 246#define smp_rmb()   smp_mb_acquire()
 247
 248/* qatomic_mb_read/set semantics map Java volatile variables. They are
 249 * less expensive on some platforms (notably POWER) than fully
 250 * sequentially consistent operations.
 251 *
 252 * As long as they are used as paired operations they are safe to
 253 * use. See docs/devel/atomics.rst for more discussion.
 254 */
 255
 256#define qatomic_mb_read(ptr)                             \
 257    qatomic_load_acquire(ptr)
 258
 259#if !defined(QEMU_SANITIZE_THREAD) && \
 260    (defined(__i386__) || defined(__x86_64__) || defined(__s390x__))
 261/* This is more efficient than a store plus a fence.  */
 262# define qatomic_mb_set(ptr, i)  ((void)qatomic_xchg(ptr, i))
 263#else
 264# define qatomic_mb_set(ptr, i) \
 265   ({ qatomic_store_release(ptr, i); smp_mb(); })
 266#endif
 267
 268#define qatomic_fetch_inc_nonzero(ptr) ({                               \
 269    typeof_strip_qual(*ptr) _oldn = qatomic_read(ptr);                  \
 270    while (_oldn && qatomic_cmpxchg(ptr, _oldn, _oldn + 1) != _oldn) {  \
 271        _oldn = qatomic_read(ptr);                                      \
 272    }                                                                   \
 273    _oldn;                                                              \
 274})
 275
 276/*
 277 * Abstractions to access atomically (i.e. "once") i64/u64 variables.
 278 *
 279 * The i386 abi is odd in that by default members are only aligned to
 280 * 4 bytes, which means that 8-byte types can wind up mis-aligned.
 281 * Clang will then warn about this, and emit a call into libatomic.
 282 *
 283 * Use of these types in structures when they will be used with atomic
 284 * operations can avoid this.
 285 */
 286typedef int64_t aligned_int64_t __attribute__((aligned(8)));
 287typedef uint64_t aligned_uint64_t __attribute__((aligned(8)));
 288
 289#ifdef CONFIG_ATOMIC64
 290/* Use __nocheck because sizeof(void *) might be < sizeof(u64) */
 291#define qatomic_read_i64(P) \
 292    _Generic(*(P), int64_t: qatomic_read__nocheck(P))
 293#define qatomic_read_u64(P) \
 294    _Generic(*(P), uint64_t: qatomic_read__nocheck(P))
 295#define qatomic_set_i64(P, V) \
 296    _Generic(*(P), int64_t: qatomic_set__nocheck(P, V))
 297#define qatomic_set_u64(P, V) \
 298    _Generic(*(P), uint64_t: qatomic_set__nocheck(P, V))
 299
 300static inline void qatomic64_init(void)
 301{
 302}
 303#else /* !CONFIG_ATOMIC64 */
 304int64_t  qatomic_read_i64(const int64_t *ptr);
 305uint64_t qatomic_read_u64(const uint64_t *ptr);
 306void qatomic_set_i64(int64_t *ptr, int64_t val);
 307void qatomic_set_u64(uint64_t *ptr, uint64_t val);
 308void qatomic64_init(void);
 309#endif /* !CONFIG_ATOMIC64 */
 310
 311#endif /* QEMU_ATOMIC_H */
 312