linux/arch/tile/include/asm/atomic_64.h
<<
>>
Prefs
   1/*
   2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
   3 *
   4 *   This program is free software; you can redistribute it and/or
   5 *   modify it under the terms of the GNU General Public License
   6 *   as published by the Free Software Foundation, version 2.
   7 *
   8 *   This program is distributed in the hope that it will be useful, but
   9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
  10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11 *   NON INFRINGEMENT.  See the GNU General Public License for
  12 *   more details.
  13 *
  14 * Do not include directly; use <linux/atomic.h>.
  15 */
  16
  17#ifndef _ASM_TILE_ATOMIC_64_H
  18#define _ASM_TILE_ATOMIC_64_H
  19
  20#ifndef __ASSEMBLY__
  21
  22#include <asm/barrier.h>
  23#include <arch/spr_def.h>
  24
  25/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
  26
  27#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
  28
  29/*
  30 * The smp_mb() operations throughout are to support the fact that
  31 * Linux requires memory barriers before and after the operation,
  32 * on any routine which updates memory and returns a value.
  33 */
  34
  35/*
  36 * Note a subtlety of the locking here.  We are required to provide a
  37 * full memory barrier before and after the operation.  However, we
  38 * only provide an explicit mb before the operation.  After the
  39 * operation, we use barrier() to get a full mb for free, because:
  40 *
  41 * (1) The barrier directive to the compiler prohibits any instructions
  42 * being statically hoisted before the barrier;
  43 * (2) the microarchitecture will not issue any further instructions
  44 * until the fetchadd result is available for the "+ i" add instruction;
  45 * (3) the smb_mb before the fetchadd ensures that no other memory
  46 * operations are in flight at this point.
  47 */
  48static inline int atomic_add_return(int i, atomic_t *v)
  49{
  50        int val;
  51        smp_mb();  /* barrier for proper semantics */
  52        val = __insn_fetchadd4((void *)&v->counter, i) + i;
  53        barrier();  /* equivalent to smp_mb(); see block comment above */
  54        return val;
  55}
  56
  57#define ATOMIC_OPS(op)                                                  \
  58static inline int atomic_fetch_##op(int i, atomic_t *v)                 \
  59{                                                                       \
  60        int val;                                                        \
  61        smp_mb();                                                       \
  62        val = __insn_fetch##op##4((void *)&v->counter, i);              \
  63        smp_mb();                                                       \
  64        return val;                                                     \
  65}                                                                       \
  66static inline void atomic_##op(int i, atomic_t *v)                      \
  67{                                                                       \
  68        __insn_fetch##op##4((void *)&v->counter, i);                    \
  69}
  70
  71ATOMIC_OPS(add)
  72ATOMIC_OPS(and)
  73ATOMIC_OPS(or)
  74
  75#undef ATOMIC_OPS
  76
  77static inline int atomic_fetch_xor(int i, atomic_t *v)
  78{
  79        int guess, oldval = v->counter;
  80        smp_mb();
  81        do {
  82                guess = oldval;
  83                __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
  84                oldval = __insn_cmpexch4(&v->counter, guess ^ i);
  85        } while (guess != oldval);
  86        smp_mb();
  87        return oldval;
  88}
  89
  90static inline void atomic_xor(int i, atomic_t *v)
  91{
  92        int guess, oldval = v->counter;
  93        do {
  94                guess = oldval;
  95                __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
  96                oldval = __insn_cmpexch4(&v->counter, guess ^ i);
  97        } while (guess != oldval);
  98}
  99
 100static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 101{
 102        int guess, oldval = v->counter;
 103        do {
 104                if (oldval == u)
 105                        break;
 106                guess = oldval;
 107                oldval = cmpxchg(&v->counter, guess, guess + a);
 108        } while (guess != oldval);
 109        return oldval;
 110}
 111
 112/* Now the true 64-bit operations. */
 113
 114#define ATOMIC64_INIT(i)        { (i) }
 115
 116#define atomic64_read(v)        READ_ONCE((v)->counter)
 117#define atomic64_set(v, i)      WRITE_ONCE((v)->counter, (i))
 118
 119static inline long atomic64_add_return(long i, atomic64_t *v)
 120{
 121        int val;
 122        smp_mb();  /* barrier for proper semantics */
 123        val = __insn_fetchadd((void *)&v->counter, i) + i;
 124        barrier();  /* equivalent to smp_mb; see atomic_add_return() */
 125        return val;
 126}
 127
 128#define ATOMIC64_OPS(op)                                                \
 129static inline long atomic64_fetch_##op(long i, atomic64_t *v)           \
 130{                                                                       \
 131        long val;                                                       \
 132        smp_mb();                                                       \
 133        val = __insn_fetch##op((void *)&v->counter, i);                 \
 134        smp_mb();                                                       \
 135        return val;                                                     \
 136}                                                                       \
 137static inline void atomic64_##op(long i, atomic64_t *v)                 \
 138{                                                                       \
 139        __insn_fetch##op((void *)&v->counter, i);                       \
 140}
 141
 142ATOMIC64_OPS(add)
 143ATOMIC64_OPS(and)
 144ATOMIC64_OPS(or)
 145
 146#undef ATOMIC64_OPS
 147
 148static inline long atomic64_fetch_xor(long i, atomic64_t *v)
 149{
 150        long guess, oldval = v->counter;
 151        smp_mb();
 152        do {
 153                guess = oldval;
 154                __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
 155                oldval = __insn_cmpexch(&v->counter, guess ^ i);
 156        } while (guess != oldval);
 157        smp_mb();
 158        return oldval;
 159}
 160
 161static inline void atomic64_xor(long i, atomic64_t *v)
 162{
 163        long guess, oldval = v->counter;
 164        do {
 165                guess = oldval;
 166                __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
 167                oldval = __insn_cmpexch(&v->counter, guess ^ i);
 168        } while (guess != oldval);
 169}
 170
 171static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
 172{
 173        long guess, oldval = v->counter;
 174        do {
 175                if (oldval == u)
 176                        break;
 177                guess = oldval;
 178                oldval = cmpxchg(&v->counter, guess, guess + a);
 179        } while (guess != oldval);
 180        return oldval != u;
 181}
 182
 183#define atomic64_sub_return(i, v)       atomic64_add_return(-(i), (v))
 184#define atomic64_fetch_sub(i, v)        atomic64_fetch_add(-(i), (v))
 185#define atomic64_sub(i, v)              atomic64_add(-(i), (v))
 186#define atomic64_inc_return(v)          atomic64_add_return(1, (v))
 187#define atomic64_dec_return(v)          atomic64_sub_return(1, (v))
 188#define atomic64_inc(v)                 atomic64_add(1, (v))
 189#define atomic64_dec(v)                 atomic64_sub(1, (v))
 190
 191#define atomic64_inc_and_test(v)        (atomic64_inc_return(v) == 0)
 192#define atomic64_dec_and_test(v)        (atomic64_dec_return(v) == 0)
 193#define atomic64_sub_and_test(i, v)     (atomic64_sub_return((i), (v)) == 0)
 194#define atomic64_add_negative(i, v)     (atomic64_add_return((i), (v)) < 0)
 195
 196#define atomic64_inc_not_zero(v)        atomic64_add_unless((v), 1, 0)
 197
 198#endif /* !__ASSEMBLY__ */
 199
 200#endif /* _ASM_TILE_ATOMIC_64_H */
 201