linux/include/linux/atomic.h
<<
>>
Prefs
   1/* Atomic operations usable in machine independent code */
   2#ifndef _LINUX_ATOMIC_H
   3#define _LINUX_ATOMIC_H
   4#include <asm/atomic.h>
   5#include <asm/barrier.h>
   6
   7/*
   8 * Relaxed variants of xchg, cmpxchg and some atomic operations.
   9 *
  10 * We support four variants:
  11 *
  12 * - Fully ordered: The default implementation, no suffix required.
  13 * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
  14 * - Release: Provides RELEASE semantics, _release suffix.
  15 * - Relaxed: No ordering guarantees, _relaxed suffix.
  16 *
  17 * For compound atomics performing both a load and a store, ACQUIRE
  18 * semantics apply only to the load and RELEASE semantics only to the
  19 * store portion of the operation. Note that a failed cmpxchg_acquire
  20 * does -not- imply any memory ordering constraints.
  21 *
  22 * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
  23 */
  24
  25#ifndef atomic_read_acquire
  26#define  atomic_read_acquire(v)         smp_load_acquire(&(v)->counter)
  27#endif
  28
  29#ifndef atomic_set_release
  30#define  atomic_set_release(v, i)       smp_store_release(&(v)->counter, (i))
  31#endif
  32
  33/*
  34 * The idea here is to build acquire/release variants by adding explicit
  35 * barriers on top of the relaxed variant. In the case where the relaxed
  36 * variant is already fully ordered, no additional barriers are needed.
  37 *
  38 * If an architecture overrides __atomic_op_acquire() it will probably want
  39 * to define smp_mb__after_spinlock().
  40 */
  41#define __atomic_op_acquire(op, args...)                                \
  42({                                                                      \
  43        typeof(op##_relaxed(args)) __ret  = op##_relaxed(args);         \
  44        smp_mb__after_atomic();                                         \
  45        __ret;                                                          \
  46})
  47
  48#define __atomic_op_release(op, args...)                                \
  49({                                                                      \
  50        smp_mb__before_atomic();                                        \
  51        op##_relaxed(args);                                             \
  52})
  53
  54#define __atomic_op_fence(op, args...)                                  \
  55({                                                                      \
  56        typeof(op##_relaxed(args)) __ret;                               \
  57        smp_mb__before_atomic();                                        \
  58        __ret = op##_relaxed(args);                                     \
  59        smp_mb__after_atomic();                                         \
  60        __ret;                                                          \
  61})
  62
  63/* atomic_add_return_relaxed */
  64#ifndef atomic_add_return_relaxed
  65#define  atomic_add_return_relaxed      atomic_add_return
  66#define  atomic_add_return_acquire      atomic_add_return
  67#define  atomic_add_return_release      atomic_add_return
  68
  69#else /* atomic_add_return_relaxed */
  70
  71#ifndef atomic_add_return_acquire
  72#define  atomic_add_return_acquire(...)                                 \
  73        __atomic_op_acquire(atomic_add_return, __VA_ARGS__)
  74#endif
  75
  76#ifndef atomic_add_return_release
  77#define  atomic_add_return_release(...)                                 \
  78        __atomic_op_release(atomic_add_return, __VA_ARGS__)
  79#endif
  80
  81#ifndef atomic_add_return
  82#define  atomic_add_return(...)                                         \
  83        __atomic_op_fence(atomic_add_return, __VA_ARGS__)
  84#endif
  85#endif /* atomic_add_return_relaxed */
  86
  87/* atomic_sub_return_relaxed */
  88#ifndef atomic_sub_return_relaxed
  89#define  atomic_sub_return_relaxed      atomic_sub_return
  90#define  atomic_sub_return_acquire      atomic_sub_return
  91#define  atomic_sub_return_release      atomic_sub_return
  92
  93#else /* atomic_sub_return_relaxed */
  94
  95#ifndef atomic_sub_return_acquire
  96#define  atomic_sub_return_acquire(...)                                 \
  97        __atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
  98#endif
  99
 100#ifndef atomic_sub_return_release
 101#define  atomic_sub_return_release(...)                                 \
 102        __atomic_op_release(atomic_sub_return, __VA_ARGS__)
 103#endif
 104
 105#ifndef atomic_sub_return
 106#define  atomic_sub_return(...)                                         \
 107        __atomic_op_fence(atomic_sub_return, __VA_ARGS__)
 108#endif
 109#endif /* atomic_sub_return_relaxed */
 110
 111/* atomic_xchg_relaxed */
 112#ifndef atomic_xchg_relaxed
 113#define  atomic_xchg_relaxed            atomic_xchg
 114#define  atomic_xchg_acquire            atomic_xchg
 115#define  atomic_xchg_release            atomic_xchg
 116
 117#else /* atomic_xchg_relaxed */
 118
 119#ifndef atomic_xchg_acquire
 120#define  atomic_xchg_acquire(...)                                       \
 121        __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
 122#endif
 123
 124#ifndef atomic_xchg_release
 125#define  atomic_xchg_release(...)                                       \
 126        __atomic_op_release(atomic_xchg, __VA_ARGS__)
 127#endif
 128
 129#ifndef atomic_xchg
 130#define  atomic_xchg(...)                                               \
 131        __atomic_op_fence(atomic_xchg, __VA_ARGS__)
 132#endif
 133#endif /* atomic_xchg_relaxed */
 134
 135/* atomic_cmpxchg_relaxed */
 136#ifndef atomic_cmpxchg_relaxed
 137#define  atomic_cmpxchg_relaxed         atomic_cmpxchg
 138#define  atomic_cmpxchg_acquire         atomic_cmpxchg
 139#define  atomic_cmpxchg_release         atomic_cmpxchg
 140
 141#else /* atomic_cmpxchg_relaxed */
 142
 143#ifndef atomic_cmpxchg_acquire
 144#define  atomic_cmpxchg_acquire(...)                                    \
 145        __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
 146#endif
 147
 148#ifndef atomic_cmpxchg_release
 149#define  atomic_cmpxchg_release(...)                                    \
 150        __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
 151#endif
 152
 153#ifndef atomic_cmpxchg
 154#define  atomic_cmpxchg(...)                                            \
 155        __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
 156#endif
 157#endif /* atomic_cmpxchg_relaxed */
 158
 159#ifndef atomic64_read_acquire
 160#define  atomic64_read_acquire(v)       smp_load_acquire(&(v)->counter)
 161#endif
 162
 163#ifndef atomic64_set_release
 164#define  atomic64_set_release(v, i)     smp_store_release(&(v)->counter, (i))
 165#endif
 166
 167/* atomic64_add_return_relaxed */
 168#ifndef atomic64_add_return_relaxed
 169#define  atomic64_add_return_relaxed    atomic64_add_return
 170#define  atomic64_add_return_acquire    atomic64_add_return
 171#define  atomic64_add_return_release    atomic64_add_return
 172
 173#else /* atomic64_add_return_relaxed */
 174
 175#ifndef atomic64_add_return_acquire
 176#define  atomic64_add_return_acquire(...)                               \
 177        __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
 178#endif
 179
 180#ifndef atomic64_add_return_release
 181#define  atomic64_add_return_release(...)                               \
 182        __atomic_op_release(atomic64_add_return, __VA_ARGS__)
 183#endif
 184
 185#ifndef atomic64_add_return
 186#define  atomic64_add_return(...)                                       \
 187        __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
 188#endif
 189#endif /* atomic64_add_return_relaxed */
 190
 191/* atomic64_sub_return_relaxed */
 192#ifndef atomic64_sub_return_relaxed
 193#define  atomic64_sub_return_relaxed    atomic64_sub_return
 194#define  atomic64_sub_return_acquire    atomic64_sub_return
 195#define  atomic64_sub_return_release    atomic64_sub_return
 196
 197#else /* atomic64_sub_return_relaxed */
 198
 199#ifndef atomic64_sub_return_acquire
 200#define  atomic64_sub_return_acquire(...)                               \
 201        __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
 202#endif
 203
 204#ifndef atomic64_sub_return_release
 205#define  atomic64_sub_return_release(...)                               \
 206        __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
 207#endif
 208
 209#ifndef atomic64_sub_return
 210#define  atomic64_sub_return(...)                                       \
 211        __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
 212#endif
 213#endif /* atomic64_sub_return_relaxed */
 214
 215/* atomic64_xchg_relaxed */
 216#ifndef atomic64_xchg_relaxed
 217#define  atomic64_xchg_relaxed          atomic64_xchg
 218#define  atomic64_xchg_acquire          atomic64_xchg
 219#define  atomic64_xchg_release          atomic64_xchg
 220
 221#else /* atomic64_xchg_relaxed */
 222
 223#ifndef atomic64_xchg_acquire
 224#define  atomic64_xchg_acquire(...)                                     \
 225        __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
 226#endif
 227
 228#ifndef atomic64_xchg_release
 229#define  atomic64_xchg_release(...)                                     \
 230        __atomic_op_release(atomic64_xchg, __VA_ARGS__)
 231#endif
 232
 233#ifndef atomic64_xchg
 234#define  atomic64_xchg(...)                                             \
 235        __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
 236#endif
 237#endif /* atomic64_xchg_relaxed */
 238
 239/* atomic64_cmpxchg_relaxed */
 240#ifndef atomic64_cmpxchg_relaxed
 241#define  atomic64_cmpxchg_relaxed       atomic64_cmpxchg
 242#define  atomic64_cmpxchg_acquire       atomic64_cmpxchg
 243#define  atomic64_cmpxchg_release       atomic64_cmpxchg
 244
 245#else /* atomic64_cmpxchg_relaxed */
 246
 247#ifndef atomic64_cmpxchg_acquire
 248#define  atomic64_cmpxchg_acquire(...)                                  \
 249        __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
 250#endif
 251
 252#ifndef atomic64_cmpxchg_release
 253#define  atomic64_cmpxchg_release(...)                                  \
 254        __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
 255#endif
 256
 257#ifndef atomic64_cmpxchg
 258#define  atomic64_cmpxchg(...)                                          \
 259        __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
 260#endif
 261#endif /* atomic64_cmpxchg_relaxed */
 262
 263/* cmpxchg_relaxed */
 264#ifndef cmpxchg_relaxed
 265#define  cmpxchg_relaxed                cmpxchg
 266#define  cmpxchg_acquire                cmpxchg
 267#define  cmpxchg_release                cmpxchg
 268
 269#else /* cmpxchg_relaxed */
 270
 271#ifndef cmpxchg_acquire
 272#define  cmpxchg_acquire(...)                                           \
 273        __atomic_op_acquire(cmpxchg, __VA_ARGS__)
 274#endif
 275
 276#ifndef cmpxchg_release
 277#define  cmpxchg_release(...)                                           \
 278        __atomic_op_release(cmpxchg, __VA_ARGS__)
 279#endif
 280
 281#ifndef cmpxchg
 282#define  cmpxchg(...)                                                   \
 283        __atomic_op_fence(cmpxchg, __VA_ARGS__)
 284#endif
 285#endif /* cmpxchg_relaxed */
 286
 287/* cmpxchg64_relaxed */
 288#ifndef cmpxchg64_relaxed
 289#define  cmpxchg64_relaxed              cmpxchg64
 290#define  cmpxchg64_acquire              cmpxchg64
 291#define  cmpxchg64_release              cmpxchg64
 292
 293#else /* cmpxchg64_relaxed */
 294
 295#ifndef cmpxchg64_acquire
 296#define  cmpxchg64_acquire(...)                                         \
 297        __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
 298#endif
 299
 300#ifndef cmpxchg64_release
 301#define  cmpxchg64_release(...)                                         \
 302        __atomic_op_release(cmpxchg64, __VA_ARGS__)
 303#endif
 304
 305#ifndef cmpxchg64
 306#define  cmpxchg64(...)                                                 \
 307        __atomic_op_fence(cmpxchg64, __VA_ARGS__)
 308#endif
 309#endif /* cmpxchg64_relaxed */
 310
 311/* xchg_relaxed */
 312#ifndef xchg_relaxed
 313#define  xchg_relaxed                   xchg
 314#define  xchg_acquire                   xchg
 315#define  xchg_release                   xchg
 316
 317#else /* xchg_relaxed */
 318
 319#ifndef xchg_acquire
 320#define  xchg_acquire(...)              __atomic_op_acquire(xchg, __VA_ARGS__)
 321#endif
 322
 323#ifndef xchg_release
 324#define  xchg_release(...)              __atomic_op_release(xchg, __VA_ARGS__)
 325#endif
 326
 327#ifndef xchg
 328#define  xchg(...)                      __atomic_op_fence(xchg, __VA_ARGS__)
 329#endif
 330#endif /* xchg_relaxed */
 331
 332/*
 333 * Provide __deprecated wrappers for the new interface, avoid flag day changes.
 334 * We need the ugly external functions to break header recursion hell.
 335 */
 336#ifndef smp_mb__before_atomic_inc
 337static inline void __deprecated smp_mb__before_atomic_inc(void)
 338{
 339        extern void __smp_mb__before_atomic(void);
 340        __smp_mb__before_atomic();
 341}
 342#endif
 343
 344#ifndef smp_mb__after_atomic_inc
 345static inline void __deprecated smp_mb__after_atomic_inc(void)
 346{
 347        extern void __smp_mb__after_atomic(void);
 348        __smp_mb__after_atomic();
 349}
 350#endif
 351
 352#ifndef smp_mb__before_atomic_dec
 353static inline void __deprecated smp_mb__before_atomic_dec(void)
 354{
 355        extern void __smp_mb__before_atomic(void);
 356        __smp_mb__before_atomic();
 357}
 358#endif
 359
 360#ifndef smp_mb__after_atomic_dec
 361static inline void __deprecated smp_mb__after_atomic_dec(void)
 362{
 363        extern void __smp_mb__after_atomic(void);
 364        __smp_mb__after_atomic();
 365}
 366#endif
 367
 368/**
 369 * atomic_add_unless - add unless the number is already a given value
 370 * @v: pointer of type atomic_t
 371 * @a: the amount to add to v...
 372 * @u: ...unless v is equal to u.
 373 *
 374 * Atomically adds @a to @v, so long as @v was not already @u.
 375 * Returns non-zero if @v was not @u, and zero otherwise.
 376 */
 377static inline int atomic_add_unless(atomic_t *v, int a, int u)
 378{
 379        return __atomic_add_unless(v, a, u) != u;
 380}
 381
 382/**
 383 * atomic_inc_not_zero - increment unless the number is zero
 384 * @v: pointer of type atomic_t
 385 *
 386 * Atomically increments @v by 1, so long as @v is non-zero.
 387 * Returns non-zero if @v was non-zero, and zero otherwise.
 388 */
 389#ifndef atomic_inc_not_zero
 390#define atomic_inc_not_zero(v)          atomic_add_unless((v), 1, 0)
 391#endif
 392
 393/**
 394 * atomic_inc_not_zero_hint - increment if not null
 395 * @v: pointer of type atomic_t
 396 * @hint: probable value of the atomic before the increment
 397 *
 398 * This version of atomic_inc_not_zero() gives a hint of probable
 399 * value of the atomic. This helps processor to not read the memory
 400 * before doing the atomic read/modify/write cycle, lowering
 401 * number of bus transactions on some arches.
 402 *
 403 * Returns: 0 if increment was not done, 1 otherwise.
 404 */
 405#ifndef atomic_inc_not_zero_hint
 406static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
 407{
 408        int val, c = hint;
 409
 410        /* sanity test, should be removed by compiler if hint is a constant */
 411        if (!hint)
 412                return atomic_inc_not_zero(v);
 413
 414        do {
 415                val = atomic_cmpxchg(v, c, c + 1);
 416                if (val == c)
 417                        return 1;
 418                c = val;
 419        } while (c);
 420
 421        return 0;
 422}
 423#endif
 424
 425#ifndef atomic_inc_unless_negative
 426static inline int atomic_inc_unless_negative(atomic_t *p)
 427{
 428        int v, v1;
 429        for (v = 0; v >= 0; v = v1) {
 430                v1 = atomic_cmpxchg(p, v, v + 1);
 431                if (likely(v1 == v))
 432                        return 1;
 433        }
 434        return 0;
 435}
 436#endif
 437
 438#ifndef atomic_dec_unless_positive
 439static inline int atomic_dec_unless_positive(atomic_t *p)
 440{
 441        int v, v1;
 442        for (v = 0; v <= 0; v = v1) {
 443                v1 = atomic_cmpxchg(p, v, v - 1);
 444                if (likely(v1 == v))
 445                        return 1;
 446        }
 447        return 0;
 448}
 449#endif
 450
 451/*
 452 * atomic_dec_if_positive - decrement by 1 if old value positive
 453 * @v: pointer of type atomic_t
 454 *
 455 * The function returns the old value of *v minus 1, even if
 456 * the atomic variable, v, was not decremented.
 457 */
 458#ifndef atomic_dec_if_positive
 459static inline int atomic_dec_if_positive(atomic_t *v)
 460{
 461        int c, old, dec;
 462        c = atomic_read(v);
 463        for (;;) {
 464                dec = c - 1;
 465                if (unlikely(dec < 0))
 466                        break;
 467                old = atomic_cmpxchg((v), c, dec);
 468                if (likely(old == c))
 469                        break;
 470                c = old;
 471        }
 472        return dec;
 473}
 474#endif
 475
 476#ifndef CONFIG_ARCH_HAS_ATOMIC_OR
 477static inline void atomic_or(int i, atomic_t *v)
 478{
 479        int old;
 480        int new;
 481
 482        do {
 483                old = atomic_read(v);
 484                new = old | i;
 485        } while (atomic_cmpxchg(v, old, new) != old);
 486}
 487#endif /* #ifndef CONFIG_ARCH_HAS_ATOMIC_OR */
 488
 489#include <asm-generic/atomic-long.h>
 490#ifdef CONFIG_GENERIC_ATOMIC64
 491#include <asm-generic/atomic64.h>
 492#endif
 493#endif /* _LINUX_ATOMIC_H */
 494