linux/arch/s390/include/asm/atomic.h
<<
>>
Prefs
   1/*
   2 * Copyright IBM Corp. 1999, 2009
   3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
   4 *            Denis Joseph Barrow,
   5 *            Arnd Bergmann <arndb@de.ibm.com>,
   6 *
   7 * Atomic operations that C can't guarantee us.
   8 * Useful for resource counting etc.
   9 * s390 uses 'Compare And Swap' for atomicity in SMP environment.
  10 *
  11 */
  12
  13#ifndef __ARCH_S390_ATOMIC__
  14#define __ARCH_S390_ATOMIC__
  15
  16#include <linux/compiler.h>
  17#include <linux/types.h>
  18#include <asm/barrier.h>
  19#include <asm/cmpxchg.h>
  20
  21#define ATOMIC_INIT(i)  { (i) }
  22
  23#define __ATOMIC_NO_BARRIER     "\n"
  24
  25#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
  26
  27#define __ATOMIC_OR     "lao"
  28#define __ATOMIC_AND    "lan"
  29#define __ATOMIC_ADD    "laa"
  30#define __ATOMIC_XOR    "lax"
  31#define __ATOMIC_BARRIER "bcr   14,0\n"
  32
  33#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier)                \
  34({                                                                      \
  35        int old_val;                                                    \
  36                                                                        \
  37        typecheck(atomic_t *, ptr);                                     \
  38        asm volatile(                                                   \
  39                __barrier                                               \
  40                op_string "     %0,%2,%1\n"                             \
  41                __barrier                                               \
  42                : "=d" (old_val), "+Q" ((ptr)->counter)                 \
  43                : "d" (op_val)                                          \
  44                : "cc", "memory");                                      \
  45        old_val;                                                        \
  46})
  47
  48#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  49
  50#define __ATOMIC_OR     "or"
  51#define __ATOMIC_AND    "nr"
  52#define __ATOMIC_ADD    "ar"
  53#define __ATOMIC_XOR    "xr"
  54#define __ATOMIC_BARRIER "\n"
  55
  56#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier)                \
  57({                                                                      \
  58        int old_val, new_val;                                           \
  59                                                                        \
  60        typecheck(atomic_t *, ptr);                                     \
  61        asm volatile(                                                   \
  62                "       l       %0,%2\n"                                \
  63                "0:     lr      %1,%0\n"                                \
  64                op_string "     %1,%3\n"                                \
  65                "       cs      %0,%1,%2\n"                             \
  66                "       jl      0b"                                     \
  67                : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
  68                : "d" (op_val)                                          \
  69                : "cc", "memory");                                      \
  70        old_val;                                                        \
  71})
  72
  73#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
  74
  75static inline int atomic_read(const atomic_t *v)
  76{
  77        int c;
  78
  79        asm volatile(
  80                "       l       %0,%1\n"
  81                : "=d" (c) : "Q" (v->counter));
  82        return c;
  83}
  84
  85static inline void atomic_set(atomic_t *v, int i)
  86{
  87        asm volatile(
  88                "       st      %1,%0\n"
  89                : "=Q" (v->counter) : "d" (i));
  90}
  91
  92static inline int atomic_add_return(int i, atomic_t *v)
  93{
  94        return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
  95}
  96
  97static inline void atomic_add(int i, atomic_t *v)
  98{
  99#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
 100        if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
 101                asm volatile(
 102                        "asi    %0,%1\n"
 103                        : "+Q" (v->counter)
 104                        : "i" (i)
 105                        : "cc", "memory");
 106                return;
 107        }
 108#endif
 109        __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER);
 110}
 111
 112#define atomic_add_negative(_i, _v)     (atomic_add_return(_i, _v) < 0)
 113#define atomic_inc(_v)                  atomic_add(1, _v)
 114#define atomic_inc_return(_v)           atomic_add_return(1, _v)
 115#define atomic_inc_and_test(_v)         (atomic_add_return(1, _v) == 0)
 116#define atomic_sub(_i, _v)              atomic_add(-(int)(_i), _v)
 117#define atomic_sub_return(_i, _v)       atomic_add_return(-(int)(_i), _v)
 118#define atomic_sub_and_test(_i, _v)     (atomic_sub_return(_i, _v) == 0)
 119#define atomic_dec(_v)                  atomic_sub(1, _v)
 120#define atomic_dec_return(_v)           atomic_sub_return(1, _v)
 121#define atomic_dec_and_test(_v)         (atomic_sub_return(1, _v) == 0)
 122
 123#define ATOMIC_OP(op, OP)                                               \
 124static inline void atomic_##op(int i, atomic_t *v)                      \
 125{                                                                       \
 126        __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER);        \
 127}
 128
 129ATOMIC_OP(and, AND)
 130ATOMIC_OP(or, OR)
 131ATOMIC_OP(xor, XOR)
 132
 133#undef ATOMIC_OP
 134
 135#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 136
 137static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 138{
 139        asm volatile(
 140                "       cs      %0,%2,%1"
 141                : "+d" (old), "+Q" (v->counter)
 142                : "d" (new)
 143                : "cc", "memory");
 144        return old;
 145}
 146
 147static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 148{
 149        int c, old;
 150        c = atomic_read(v);
 151        for (;;) {
 152                if (unlikely(c == u))
 153                        break;
 154                old = atomic_cmpxchg(v, c, c + a);
 155                if (likely(old == c))
 156                        break;
 157                c = old;
 158        }
 159        return c;
 160}
 161
 162
 163#undef __ATOMIC_LOOP
 164
 165#define ATOMIC64_INIT(i)  { (i) }
 166
 167#define __ATOMIC64_NO_BARRIER   "\n"
 168
 169#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
 170
 171#define __ATOMIC64_OR   "laog"
 172#define __ATOMIC64_AND  "lang"
 173#define __ATOMIC64_ADD  "laag"
 174#define __ATOMIC64_XOR  "laxg"
 175#define __ATOMIC64_BARRIER "bcr 14,0\n"
 176
 177#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier)              \
 178({                                                                      \
 179        long long old_val;                                              \
 180                                                                        \
 181        typecheck(atomic64_t *, ptr);                                   \
 182        asm volatile(                                                   \
 183                __barrier                                               \
 184                op_string "     %0,%2,%1\n"                             \
 185                __barrier                                               \
 186                : "=d" (old_val), "+Q" ((ptr)->counter)                 \
 187                : "d" (op_val)                                          \
 188                : "cc", "memory");                                      \
 189        old_val;                                                        \
 190})
 191
 192#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
 193
 194#define __ATOMIC64_OR   "ogr"
 195#define __ATOMIC64_AND  "ngr"
 196#define __ATOMIC64_ADD  "agr"
 197#define __ATOMIC64_XOR  "xgr"
 198#define __ATOMIC64_BARRIER "\n"
 199
 200#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier)              \
 201({                                                                      \
 202        long long old_val, new_val;                                     \
 203                                                                        \
 204        typecheck(atomic64_t *, ptr);                                   \
 205        asm volatile(                                                   \
 206                "       lg      %0,%2\n"                                \
 207                "0:     lgr     %1,%0\n"                                \
 208                op_string "     %1,%3\n"                                \
 209                "       csg     %0,%1,%2\n"                             \
 210                "       jl      0b"                                     \
 211                : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
 212                : "d" (op_val)                                          \
 213                : "cc", "memory");                                      \
 214        old_val;                                                        \
 215})
 216
 217#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
 218
 219static inline long long atomic64_read(const atomic64_t *v)
 220{
 221        long long c;
 222
 223        asm volatile(
 224                "       lg      %0,%1\n"
 225                : "=d" (c) : "Q" (v->counter));
 226        return c;
 227}
 228
 229static inline void atomic64_set(atomic64_t *v, long long i)
 230{
 231        asm volatile(
 232                "       stg     %1,%0\n"
 233                : "=Q" (v->counter) : "d" (i));
 234}
 235
 236static inline long long atomic64_add_return(long long i, atomic64_t *v)
 237{
 238        return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
 239}
 240
 241static inline void atomic64_add(long long i, atomic64_t *v)
 242{
 243#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
 244        if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
 245                asm volatile(
 246                        "agsi   %0,%1\n"
 247                        : "+Q" (v->counter)
 248                        : "i" (i)
 249                        : "cc", "memory");
 250                return;
 251        }
 252#endif
 253        __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
 254}
 255
 256#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 257
 258static inline long long atomic64_cmpxchg(atomic64_t *v,
 259                                             long long old, long long new)
 260{
 261        asm volatile(
 262                "       csg     %0,%2,%1"
 263                : "+d" (old), "+Q" (v->counter)
 264                : "d" (new)
 265                : "cc", "memory");
 266        return old;
 267}
 268
 269#define ATOMIC64_OP(op, OP)                                             \
 270static inline void atomic64_##op(long i, atomic64_t *v)                 \
 271{                                                                       \
 272        __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER);  \
 273}
 274
 275ATOMIC64_OP(and, AND)
 276ATOMIC64_OP(or, OR)
 277ATOMIC64_OP(xor, XOR)
 278
 279#undef ATOMIC64_OP
 280#undef __ATOMIC64_LOOP
 281
 282static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
 283{
 284        long long c, old;
 285
 286        c = atomic64_read(v);
 287        for (;;) {
 288                if (unlikely(c == u))
 289                        break;
 290                old = atomic64_cmpxchg(v, c, c + i);
 291                if (likely(old == c))
 292                        break;
 293                c = old;
 294        }
 295        return c != u;
 296}
 297
 298static inline long long atomic64_dec_if_positive(atomic64_t *v)
 299{
 300        long long c, old, dec;
 301
 302        c = atomic64_read(v);
 303        for (;;) {
 304                dec = c - 1;
 305                if (unlikely(dec < 0))
 306                        break;
 307                old = atomic64_cmpxchg((v), c, dec);
 308                if (likely(old == c))
 309                        break;
 310                c = old;
 311        }
 312        return dec;
 313}
 314
 315#define atomic64_add_negative(_i, _v)   (atomic64_add_return(_i, _v) < 0)
 316#define atomic64_inc(_v)                atomic64_add(1, _v)
 317#define atomic64_inc_return(_v)         atomic64_add_return(1, _v)
 318#define atomic64_inc_and_test(_v)       (atomic64_add_return(1, _v) == 0)
 319#define atomic64_sub_return(_i, _v)     atomic64_add_return(-(long long)(_i), _v)
 320#define atomic64_sub(_i, _v)            atomic64_add(-(long long)(_i), _v)
 321#define atomic64_sub_and_test(_i, _v)   (atomic64_sub_return(_i, _v) == 0)
 322#define atomic64_dec(_v)                atomic64_sub(1, _v)
 323#define atomic64_dec_return(_v)         atomic64_sub_return(1, _v)
 324#define atomic64_dec_and_test(_v)       (atomic64_sub_return(1, _v) == 0)
 325#define atomic64_inc_not_zero(v)        atomic64_add_unless((v), 1, 0)
 326
 327#endif /* __ARCH_S390_ATOMIC__  */
 328