qemu/include/qemu/atomic.h
<<
>>
Prefs
   1/*
   2 * Simple interface for atomic operations.
   3 *
   4 * Copyright (C) 2013 Red Hat, Inc.
   5 *
   6 * Author: Paolo Bonzini <pbonzini@redhat.com>
   7 *
   8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
   9 * See the COPYING file in the top-level directory.
  10 *
  11 * See docs/atomics.txt for discussion about the guarantees each
  12 * atomic primitive is meant to provide.
  13 */
  14
  15#ifndef __QEMU_ATOMIC_H
  16#define __QEMU_ATOMIC_H 1
  17
  18
  19
  20/* Compiler barrier */
  21#define barrier()   ({ asm volatile("" ::: "memory"); (void)0; })
  22
  23#ifdef __ATOMIC_RELAXED
  24/* For C11 atomic ops */
  25
  26/* Manual memory barriers
  27 *
  28 *__atomic_thread_fence does not include a compiler barrier; instead,
  29 * the barrier is part of __atomic_load/__atomic_store's "volatile-like"
  30 * semantics. If smp_wmb() is a no-op, absence of the barrier means that
  31 * the compiler is free to reorder stores on each side of the barrier.
  32 * Add one here, and similarly in smp_rmb() and smp_read_barrier_depends().
  33 */
  34
  35#define smp_mb()    ({ barrier(); __atomic_thread_fence(__ATOMIC_SEQ_CST); barrier(); })
  36#define smp_wmb()   ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); barrier(); })
  37#define smp_rmb()   ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); barrier(); })
  38
  39#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); barrier(); })
  40
  41/* Weak atomic operations prevent the compiler moving other
  42 * loads/stores past the atomic operation load/store. However there is
  43 * no explicit memory barrier for the processor.
  44 */
  45#define atomic_read(ptr)                              \
  46    ({                                                \
  47    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
  48    typeof(*ptr) _val;                                \
  49     __atomic_load(ptr, &_val, __ATOMIC_RELAXED);     \
  50    _val;                                             \
  51    })
  52
  53#define atomic_set(ptr, i)  do {                      \
  54    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
  55    typeof(*ptr) _val = (i);                          \
  56    __atomic_store(ptr, &_val, __ATOMIC_RELAXED);     \
  57} while(0)
  58
  59/* Atomic RCU operations imply weak memory barriers */
  60
  61#define atomic_rcu_read(ptr)                          \
  62    ({                                                \
  63    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
  64    typeof(*ptr) _val;                                \
  65    __atomic_load(ptr, &_val, __ATOMIC_CONSUME);      \
  66    _val;                                             \
  67    })
  68
  69#define atomic_rcu_set(ptr, i) do {                   \
  70    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *)); \
  71    typeof(*ptr) _val = (i);                          \
  72    __atomic_store(ptr, &_val, __ATOMIC_RELEASE);     \
  73} while(0)
  74
  75/* atomic_mb_read/set semantics map Java volatile variables. They are
  76 * less expensive on some platforms (notably POWER & ARMv7) than fully
  77 * sequentially consistent operations.
  78 *
  79 * As long as they are used as paired operations they are safe to
  80 * use. See docs/atomic.txt for more discussion.
  81 */
  82
  83#if defined(_ARCH_PPC)
  84#define atomic_mb_read(ptr)                             \
  85    ({                                                  \
  86    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *));   \
  87    typeof(*ptr) _val;                                  \
  88     __atomic_load(ptr, &_val, __ATOMIC_RELAXED);       \
  89     smp_rmb();                                         \
  90    _val;                                               \
  91    })
  92
  93#define atomic_mb_set(ptr, i)  do {                     \
  94    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *));   \
  95    typeof(*ptr) _val = (i);                            \
  96    smp_wmb();                                          \
  97    __atomic_store(ptr, &_val, __ATOMIC_RELAXED);       \
  98    smp_mb();                                           \
  99} while(0)
 100#else
 101#define atomic_mb_read(ptr)                             \
 102    ({                                                  \
 103    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *));   \
 104    typeof(*ptr) _val;                                  \
 105    __atomic_load(ptr, &_val, __ATOMIC_SEQ_CST);        \
 106    _val;                                               \
 107    })
 108
 109#define atomic_mb_set(ptr, i)  do {                     \
 110    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *));   \
 111    typeof(*ptr) _val = (i);                            \
 112    __atomic_store(ptr, &_val, __ATOMIC_SEQ_CST);       \
 113} while(0)
 114#endif
 115
 116
 117/* All the remaining operations are fully sequentially consistent */
 118
 119#define atomic_xchg(ptr, i)    ({                           \
 120    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *));       \
 121    typeof(*ptr) _new = (i), _old;                          \
 122    __atomic_exchange(ptr, &_new, &_old, __ATOMIC_SEQ_CST); \
 123    _old;                                                   \
 124})
 125
 126/* Returns the eventual value, failed or not */
 127#define atomic_cmpxchg(ptr, old, new)                                   \
 128    ({                                                                  \
 129    QEMU_BUILD_BUG_ON(sizeof(*ptr) > sizeof(void *));                   \
 130    typeof(*ptr) _old = (old), _new = (new);                            \
 131    __atomic_compare_exchange(ptr, &_old, &_new, false,                 \
 132                              __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);      \
 133    _old;                                                               \
 134    })
 135
 136/* Provide shorter names for GCC atomic builtins, return old value */
 137#define atomic_fetch_inc(ptr)  __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
 138#define atomic_fetch_dec(ptr)  __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
 139#define atomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
 140#define atomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
 141#define atomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
 142#define atomic_fetch_or(ptr, n)  __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
 143
 144/* And even shorter names that return void.  */
 145#define atomic_inc(ptr)    ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
 146#define atomic_dec(ptr)    ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
 147#define atomic_add(ptr, n) ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
 148#define atomic_sub(ptr, n) ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
 149#define atomic_and(ptr, n) ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
 150#define atomic_or(ptr, n)  ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
 151
 152#else /* __ATOMIC_RELAXED */
 153
 154/*
 155 * We use GCC builtin if it's available, as that can use mfence on
 156 * 32-bit as well, e.g. if built with -march=pentium-m. However, on
 157 * i386 the spec is buggy, and the implementation followed it until
 158 * 4.3 (http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36793).
 159 */
 160#if defined(__i386__) || defined(__x86_64__)
 161#if !QEMU_GNUC_PREREQ(4, 4)
 162#if defined __x86_64__
 163#define smp_mb()    ({ asm volatile("mfence" ::: "memory"); (void)0; })
 164#else
 165#define smp_mb()    ({ asm volatile("lock; addl $0,0(%%esp) " ::: "memory"); (void)0; })
 166#endif
 167#endif
 168#endif
 169
 170
 171#ifdef __alpha__
 172#define smp_read_barrier_depends()   asm volatile("mb":::"memory")
 173#endif
 174
 175#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
 176
 177/*
 178 * Because of the strongly ordered storage model, wmb() and rmb() are nops
 179 * here (a compiler barrier only).  QEMU doesn't do accesses to write-combining
 180 * qemu memory or non-temporal load/stores from C code.
 181 */
 182#define smp_wmb()   barrier()
 183#define smp_rmb()   barrier()
 184
 185/*
 186 * __sync_lock_test_and_set() is documented to be an acquire barrier only,
 187 * but it is a full barrier at the hardware level.  Add a compiler barrier
 188 * to make it a full barrier also at the compiler level.
 189 */
 190#define atomic_xchg(ptr, i)    (barrier(), __sync_lock_test_and_set(ptr, i))
 191
 192/*
 193 * Load/store with Java volatile semantics.
 194 */
 195#define atomic_mb_set(ptr, i)  ((void)atomic_xchg(ptr, i))
 196
 197#elif defined(_ARCH_PPC)
 198
 199/*
 200 * We use an eieio() for wmb() on powerpc.  This assumes we don't
 201 * need to order cacheable and non-cacheable stores with respect to
 202 * each other.
 203 *
 204 * smp_mb has the same problem as on x86 for not-very-new GCC
 205 * (http://patchwork.ozlabs.org/patch/126184/, Nov 2011).
 206 */
 207#define smp_wmb()   ({ asm volatile("eieio" ::: "memory"); (void)0; })
 208#if defined(__powerpc64__)
 209#define smp_rmb()   ({ asm volatile("lwsync" ::: "memory"); (void)0; })
 210#else
 211#define smp_rmb()   ({ asm volatile("sync" ::: "memory"); (void)0; })
 212#endif
 213#define smp_mb()    ({ asm volatile("sync" ::: "memory"); (void)0; })
 214
 215#endif /* _ARCH_PPC */
 216
 217/*
 218 * For (host) platforms we don't have explicit barrier definitions
 219 * for, we use the gcc __sync_synchronize() primitive to generate a
 220 * full barrier.  This should be safe on all platforms, though it may
 221 * be overkill for smp_wmb() and smp_rmb().
 222 */
 223#ifndef smp_mb
 224#define smp_mb()    __sync_synchronize()
 225#endif
 226
 227#ifndef smp_wmb
 228#define smp_wmb()   __sync_synchronize()
 229#endif
 230
 231#ifndef smp_rmb
 232#define smp_rmb()   __sync_synchronize()
 233#endif
 234
 235#ifndef smp_read_barrier_depends
 236#define smp_read_barrier_depends()   barrier()
 237#endif
 238
 239/* These will only be atomic if the processor does the fetch or store
 240 * in a single issue memory operation
 241 */
 242#define atomic_read(ptr)       (*(__typeof__(*ptr) volatile*) (ptr))
 243#define atomic_set(ptr, i)     ((*(__typeof__(*ptr) volatile*) (ptr)) = (i))
 244
 245/**
 246 * atomic_rcu_read - reads a RCU-protected pointer to a local variable
 247 * into a RCU read-side critical section. The pointer can later be safely
 248 * dereferenced within the critical section.
 249 *
 250 * This ensures that the pointer copy is invariant thorough the whole critical
 251 * section.
 252 *
 253 * Inserts memory barriers on architectures that require them (currently only
 254 * Alpha) and documents which pointers are protected by RCU.
 255 *
 256 * atomic_rcu_read also includes a compiler barrier to ensure that
 257 * value-speculative optimizations (e.g. VSS: Value Speculation
 258 * Scheduling) does not perform the data read before the pointer read
 259 * by speculating the value of the pointer.
 260 *
 261 * Should match atomic_rcu_set(), atomic_xchg(), atomic_cmpxchg().
 262 */
 263#define atomic_rcu_read(ptr)    ({                \
 264    typeof(*ptr) _val = atomic_read(ptr);         \
 265    smp_read_barrier_depends();                   \
 266    _val;                                         \
 267})
 268
 269/**
 270 * atomic_rcu_set - assigns (publicizes) a pointer to a new data structure
 271 * meant to be read by RCU read-side critical sections.
 272 *
 273 * Documents which pointers will be dereferenced by RCU read-side critical
 274 * sections and adds the required memory barriers on architectures requiring
 275 * them. It also makes sure the compiler does not reorder code initializing the
 276 * data structure before its publication.
 277 *
 278 * Should match atomic_rcu_read().
 279 */
 280#define atomic_rcu_set(ptr, i)  do {              \
 281    smp_wmb();                                    \
 282    atomic_set(ptr, i);                           \
 283} while (0)
 284
 285/* These have the same semantics as Java volatile variables.
 286 * See http://gee.cs.oswego.edu/dl/jmm/cookbook.html:
 287 * "1. Issue a StoreStore barrier (wmb) before each volatile store."
 288 *  2. Issue a StoreLoad barrier after each volatile store.
 289 *     Note that you could instead issue one before each volatile load, but
 290 *     this would be slower for typical programs using volatiles in which
 291 *     reads greatly outnumber writes. Alternatively, if available, you
 292 *     can implement volatile store as an atomic instruction (for example
 293 *     XCHG on x86) and omit the barrier. This may be more efficient if
 294 *     atomic instructions are cheaper than StoreLoad barriers.
 295 *  3. Issue LoadLoad and LoadStore barriers after each volatile load."
 296 *
 297 * If you prefer to think in terms of "pairing" of memory barriers,
 298 * an atomic_mb_read pairs with an atomic_mb_set.
 299 *
 300 * And for the few ia64 lovers that exist, an atomic_mb_read is a ld.acq,
 301 * while an atomic_mb_set is a st.rel followed by a memory barrier.
 302 *
 303 * These are a bit weaker than __atomic_load/store with __ATOMIC_SEQ_CST
 304 * (see docs/atomics.txt), and I'm not sure that __ATOMIC_ACQ_REL is enough.
 305 * Just always use the barriers manually by the rules above.
 306 */
 307#define atomic_mb_read(ptr)    ({           \
 308    typeof(*ptr) _val = atomic_read(ptr);   \
 309    smp_rmb();                              \
 310    _val;                                   \
 311})
 312
 313#ifndef atomic_mb_set
 314#define atomic_mb_set(ptr, i)  do {         \
 315    smp_wmb();                              \
 316    atomic_set(ptr, i);                     \
 317    smp_mb();                               \
 318} while (0)
 319#endif
 320
 321#ifndef atomic_xchg
 322#if defined(__clang__)
 323#define atomic_xchg(ptr, i)    __sync_swap(ptr, i)
 324#else
 325/* __sync_lock_test_and_set() is documented to be an acquire barrier only.  */
 326#define atomic_xchg(ptr, i)    (smp_mb(), __sync_lock_test_and_set(ptr, i))
 327#endif
 328#endif
 329
 330/* Provide shorter names for GCC atomic builtins.  */
 331#define atomic_fetch_inc(ptr)  __sync_fetch_and_add(ptr, 1)
 332#define atomic_fetch_dec(ptr)  __sync_fetch_and_add(ptr, -1)
 333#define atomic_fetch_add       __sync_fetch_and_add
 334#define atomic_fetch_sub       __sync_fetch_and_sub
 335#define atomic_fetch_and       __sync_fetch_and_and
 336#define atomic_fetch_or        __sync_fetch_and_or
 337#define atomic_cmpxchg         __sync_val_compare_and_swap
 338
 339/* And even shorter names that return void.  */
 340#define atomic_inc(ptr)        ((void) __sync_fetch_and_add(ptr, 1))
 341#define atomic_dec(ptr)        ((void) __sync_fetch_and_add(ptr, -1))
 342#define atomic_add(ptr, n)     ((void) __sync_fetch_and_add(ptr, n))
 343#define atomic_sub(ptr, n)     ((void) __sync_fetch_and_sub(ptr, n))
 344#define atomic_and(ptr, n)     ((void) __sync_fetch_and_and(ptr, n))
 345#define atomic_or(ptr, n)      ((void) __sync_fetch_and_or(ptr, n))
 346
 347#endif /* __ATOMIC_RELAXED */
 348#endif /* __QEMU_ATOMIC_H */
 349