linux/arch/arm/include/asm/atomic.h
<<
>>
Prefs
   1/*
   2 *  arch/arm/include/asm/atomic.h
   3 *
   4 *  Copyright (C) 1996 Russell King.
   5 *  Copyright (C) 2002 Deep Blue Solutions Ltd.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11#ifndef __ASM_ARM_ATOMIC_H
  12#define __ASM_ARM_ATOMIC_H
  13
  14#include <linux/compiler.h>
  15#include <linux/prefetch.h>
  16#include <linux/types.h>
  17#include <linux/irqflags.h>
  18#include <asm/barrier.h>
  19#include <asm/cmpxchg.h>
  20
  21#define ATOMIC_INIT(i)  { (i) }
  22
  23#ifdef __KERNEL__
  24
  25/*
  26 * On ARM, ordinary assignment (str instruction) doesn't clear the local
  27 * strex/ldrex monitor on some implementations. The reason we can use it for
  28 * atomic_set() is the clrex or dummy strex done on every exception return.
  29 */
  30#define atomic_read(v)  READ_ONCE((v)->counter)
  31#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
  32
  33#if __LINUX_ARM_ARCH__ >= 6
  34
  35/*
  36 * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
  37 * store exclusive to ensure that these are atomic.  We may loop
  38 * to ensure that the update happens.
  39 */
  40
  41#define ATOMIC_OP(op, c_op, asm_op)                                     \
  42static inline void atomic_##op(int i, atomic_t *v)                      \
  43{                                                                       \
  44        unsigned long tmp;                                              \
  45        int result;                                                     \
  46                                                                        \
  47        prefetchw(&v->counter);                                         \
  48        __asm__ __volatile__("@ atomic_" #op "\n"                       \
  49"1:     ldrex   %0, [%3]\n"                                             \
  50"       " #asm_op "     %0, %0, %4\n"                                   \
  51"       strex   %1, %0, [%3]\n"                                         \
  52"       teq     %1, #0\n"                                               \
  53"       bne     1b"                                                     \
  54        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)               \
  55        : "r" (&v->counter), "Ir" (i)                                   \
  56        : "cc");                                                        \
  57}                                                                       \
  58
  59#define ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
  60static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)      \
  61{                                                                       \
  62        unsigned long tmp;                                              \
  63        int result;                                                     \
  64                                                                        \
  65        prefetchw(&v->counter);                                         \
  66                                                                        \
  67        __asm__ __volatile__("@ atomic_" #op "_return\n"                \
  68"1:     ldrex   %0, [%3]\n"                                             \
  69"       " #asm_op "     %0, %0, %4\n"                                   \
  70"       strex   %1, %0, [%3]\n"                                         \
  71"       teq     %1, #0\n"                                               \
  72"       bne     1b"                                                     \
  73        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)               \
  74        : "r" (&v->counter), "Ir" (i)                                   \
  75        : "cc");                                                        \
  76                                                                        \
  77        return result;                                                  \
  78}
  79
  80#define atomic_add_return_relaxed       atomic_add_return_relaxed
  81#define atomic_sub_return_relaxed       atomic_sub_return_relaxed
  82
  83static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
  84{
  85        int oldval;
  86        unsigned long res;
  87
  88        prefetchw(&ptr->counter);
  89
  90        do {
  91                __asm__ __volatile__("@ atomic_cmpxchg\n"
  92                "ldrex  %1, [%3]\n"
  93                "mov    %0, #0\n"
  94                "teq    %1, %4\n"
  95                "strexeq %0, %5, [%3]\n"
  96                    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
  97                    : "r" (&ptr->counter), "Ir" (old), "r" (new)
  98                    : "cc");
  99        } while (res);
 100
 101        return oldval;
 102}
 103#define atomic_cmpxchg_relaxed          atomic_cmpxchg_relaxed
 104
 105static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 106{
 107        int oldval, newval;
 108        unsigned long tmp;
 109
 110        smp_mb();
 111        prefetchw(&v->counter);
 112
 113        __asm__ __volatile__ ("@ atomic_add_unless\n"
 114"1:     ldrex   %0, [%4]\n"
 115"       teq     %0, %5\n"
 116"       beq     2f\n"
 117"       add     %1, %0, %6\n"
 118"       strex   %2, %1, [%4]\n"
 119"       teq     %2, #0\n"
 120"       bne     1b\n"
 121"2:"
 122        : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
 123        : "r" (&v->counter), "r" (u), "r" (a)
 124        : "cc");
 125
 126        if (oldval != u)
 127                smp_mb();
 128
 129        return oldval;
 130}
 131
 132#else /* ARM_ARCH_6 */
 133
 134#ifdef CONFIG_SMP
 135#error SMP not supported on pre-ARMv6 CPUs
 136#endif
 137
 138#define ATOMIC_OP(op, c_op, asm_op)                                     \
 139static inline void atomic_##op(int i, atomic_t *v)                      \
 140{                                                                       \
 141        unsigned long flags;                                            \
 142                                                                        \
 143        raw_local_irq_save(flags);                                      \
 144        v->counter c_op i;                                              \
 145        raw_local_irq_restore(flags);                                   \
 146}                                                                       \
 147
 148#define ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
 149static inline int atomic_##op##_return(int i, atomic_t *v)              \
 150{                                                                       \
 151        unsigned long flags;                                            \
 152        int val;                                                        \
 153                                                                        \
 154        raw_local_irq_save(flags);                                      \
 155        v->counter c_op i;                                              \
 156        val = v->counter;                                               \
 157        raw_local_irq_restore(flags);                                   \
 158                                                                        \
 159        return val;                                                     \
 160}
 161
 162static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 163{
 164        int ret;
 165        unsigned long flags;
 166
 167        raw_local_irq_save(flags);
 168        ret = v->counter;
 169        if (likely(ret == old))
 170                v->counter = new;
 171        raw_local_irq_restore(flags);
 172
 173        return ret;
 174}
 175
 176static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 177{
 178        int c, old;
 179
 180        c = atomic_read(v);
 181        while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
 182                c = old;
 183        return c;
 184}
 185
 186#endif /* __LINUX_ARM_ARCH__ */
 187
 188#define ATOMIC_OPS(op, c_op, asm_op)                                    \
 189        ATOMIC_OP(op, c_op, asm_op)                                     \
 190        ATOMIC_OP_RETURN(op, c_op, asm_op)
 191
 192ATOMIC_OPS(add, +=, add)
 193ATOMIC_OPS(sub, -=, sub)
 194
 195#define atomic_andnot atomic_andnot
 196
 197ATOMIC_OP(and, &=, and)
 198ATOMIC_OP(andnot, &= ~, bic)
 199ATOMIC_OP(or,  |=, orr)
 200ATOMIC_OP(xor, ^=, eor)
 201
 202#undef ATOMIC_OPS
 203#undef ATOMIC_OP_RETURN
 204#undef ATOMIC_OP
 205
 206#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 207
 208#define atomic_inc(v)           atomic_add(1, v)
 209#define atomic_dec(v)           atomic_sub(1, v)
 210
 211#define atomic_inc_and_test(v)  (atomic_add_return(1, v) == 0)
 212#define atomic_dec_and_test(v)  (atomic_sub_return(1, v) == 0)
 213#define atomic_inc_return_relaxed(v)    (atomic_add_return_relaxed(1, v))
 214#define atomic_dec_return_relaxed(v)    (atomic_sub_return_relaxed(1, v))
 215#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
 216
 217#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
 218
 219#ifndef CONFIG_GENERIC_ATOMIC64
 220typedef struct {
 221        long long counter;
 222} atomic64_t;
 223
 224#define ATOMIC64_INIT(i) { (i) }
 225
 226#ifdef CONFIG_ARM_LPAE
 227static inline long long atomic64_read(const atomic64_t *v)
 228{
 229        long long result;
 230
 231        __asm__ __volatile__("@ atomic64_read\n"
 232"       ldrd    %0, %H0, [%1]"
 233        : "=&r" (result)
 234        : "r" (&v->counter), "Qo" (v->counter)
 235        );
 236
 237        return result;
 238}
 239
 240static inline void atomic64_set(atomic64_t *v, long long i)
 241{
 242        __asm__ __volatile__("@ atomic64_set\n"
 243"       strd    %2, %H2, [%1]"
 244        : "=Qo" (v->counter)
 245        : "r" (&v->counter), "r" (i)
 246        );
 247}
 248#else
 249static inline long long atomic64_read(const atomic64_t *v)
 250{
 251        long long result;
 252
 253        __asm__ __volatile__("@ atomic64_read\n"
 254"       ldrexd  %0, %H0, [%1]"
 255        : "=&r" (result)
 256        : "r" (&v->counter), "Qo" (v->counter)
 257        );
 258
 259        return result;
 260}
 261
 262static inline void atomic64_set(atomic64_t *v, long long i)
 263{
 264        long long tmp;
 265
 266        prefetchw(&v->counter);
 267        __asm__ __volatile__("@ atomic64_set\n"
 268"1:     ldrexd  %0, %H0, [%2]\n"
 269"       strexd  %0, %3, %H3, [%2]\n"
 270"       teq     %0, #0\n"
 271"       bne     1b"
 272        : "=&r" (tmp), "=Qo" (v->counter)
 273        : "r" (&v->counter), "r" (i)
 274        : "cc");
 275}
 276#endif
 277
 278#define ATOMIC64_OP(op, op1, op2)                                       \
 279static inline void atomic64_##op(long long i, atomic64_t *v)            \
 280{                                                                       \
 281        long long result;                                               \
 282        unsigned long tmp;                                              \
 283                                                                        \
 284        prefetchw(&v->counter);                                         \
 285        __asm__ __volatile__("@ atomic64_" #op "\n"                     \
 286"1:     ldrexd  %0, %H0, [%3]\n"                                        \
 287"       " #op1 " %Q0, %Q0, %Q4\n"                                       \
 288"       " #op2 " %R0, %R0, %R4\n"                                       \
 289"       strexd  %1, %0, %H0, [%3]\n"                                    \
 290"       teq     %1, #0\n"                                               \
 291"       bne     1b"                                                     \
 292        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)               \
 293        : "r" (&v->counter), "r" (i)                                    \
 294        : "cc");                                                        \
 295}                                                                       \
 296
 297#define ATOMIC64_OP_RETURN(op, op1, op2)                                \
 298static inline long long                                                 \
 299atomic64_##op##_return_relaxed(long long i, atomic64_t *v)              \
 300{                                                                       \
 301        long long result;                                               \
 302        unsigned long tmp;                                              \
 303                                                                        \
 304        prefetchw(&v->counter);                                         \
 305                                                                        \
 306        __asm__ __volatile__("@ atomic64_" #op "_return\n"              \
 307"1:     ldrexd  %0, %H0, [%3]\n"                                        \
 308"       " #op1 " %Q0, %Q0, %Q4\n"                                       \
 309"       " #op2 " %R0, %R0, %R4\n"                                       \
 310"       strexd  %1, %0, %H0, [%3]\n"                                    \
 311"       teq     %1, #0\n"                                               \
 312"       bne     1b"                                                     \
 313        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)               \
 314        : "r" (&v->counter), "r" (i)                                    \
 315        : "cc");                                                        \
 316                                                                        \
 317        return result;                                                  \
 318}
 319
 320#define ATOMIC64_OPS(op, op1, op2)                                      \
 321        ATOMIC64_OP(op, op1, op2)                                       \
 322        ATOMIC64_OP_RETURN(op, op1, op2)
 323
 324ATOMIC64_OPS(add, adds, adc)
 325ATOMIC64_OPS(sub, subs, sbc)
 326
 327#define atomic64_add_return_relaxed     atomic64_add_return_relaxed
 328#define atomic64_sub_return_relaxed     atomic64_sub_return_relaxed
 329
 330#define atomic64_andnot atomic64_andnot
 331
 332ATOMIC64_OP(and, and, and)
 333ATOMIC64_OP(andnot, bic, bic)
 334ATOMIC64_OP(or,  orr, orr)
 335ATOMIC64_OP(xor, eor, eor)
 336
 337#undef ATOMIC64_OPS
 338#undef ATOMIC64_OP_RETURN
 339#undef ATOMIC64_OP
 340
 341static inline long long
 342atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
 343{
 344        long long oldval;
 345        unsigned long res;
 346
 347        prefetchw(&ptr->counter);
 348
 349        do {
 350                __asm__ __volatile__("@ atomic64_cmpxchg\n"
 351                "ldrexd         %1, %H1, [%3]\n"
 352                "mov            %0, #0\n"
 353                "teq            %1, %4\n"
 354                "teqeq          %H1, %H4\n"
 355                "strexdeq       %0, %5, %H5, [%3]"
 356                : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
 357                : "r" (&ptr->counter), "r" (old), "r" (new)
 358                : "cc");
 359        } while (res);
 360
 361        return oldval;
 362}
 363#define atomic64_cmpxchg_relaxed        atomic64_cmpxchg_relaxed
 364
 365static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
 366{
 367        long long result;
 368        unsigned long tmp;
 369
 370        prefetchw(&ptr->counter);
 371
 372        __asm__ __volatile__("@ atomic64_xchg\n"
 373"1:     ldrexd  %0, %H0, [%3]\n"
 374"       strexd  %1, %4, %H4, [%3]\n"
 375"       teq     %1, #0\n"
 376"       bne     1b"
 377        : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
 378        : "r" (&ptr->counter), "r" (new)
 379        : "cc");
 380
 381        return result;
 382}
 383#define atomic64_xchg_relaxed           atomic64_xchg_relaxed
 384
 385static inline long long atomic64_dec_if_positive(atomic64_t *v)
 386{
 387        long long result;
 388        unsigned long tmp;
 389
 390        smp_mb();
 391        prefetchw(&v->counter);
 392
 393        __asm__ __volatile__("@ atomic64_dec_if_positive\n"
 394"1:     ldrexd  %0, %H0, [%3]\n"
 395"       subs    %Q0, %Q0, #1\n"
 396"       sbc     %R0, %R0, #0\n"
 397"       teq     %R0, #0\n"
 398"       bmi     2f\n"
 399"       strexd  %1, %0, %H0, [%3]\n"
 400"       teq     %1, #0\n"
 401"       bne     1b\n"
 402"2:"
 403        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 404        : "r" (&v->counter)
 405        : "cc");
 406
 407        smp_mb();
 408
 409        return result;
 410}
 411
 412static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
 413{
 414        long long val;
 415        unsigned long tmp;
 416        int ret = 1;
 417
 418        smp_mb();
 419        prefetchw(&v->counter);
 420
 421        __asm__ __volatile__("@ atomic64_add_unless\n"
 422"1:     ldrexd  %0, %H0, [%4]\n"
 423"       teq     %0, %5\n"
 424"       teqeq   %H0, %H5\n"
 425"       moveq   %1, #0\n"
 426"       beq     2f\n"
 427"       adds    %Q0, %Q0, %Q6\n"
 428"       adc     %R0, %R0, %R6\n"
 429"       strexd  %2, %0, %H0, [%4]\n"
 430"       teq     %2, #0\n"
 431"       bne     1b\n"
 432"2:"
 433        : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
 434        : "r" (&v->counter), "r" (u), "r" (a)
 435        : "cc");
 436
 437        if (ret)
 438                smp_mb();
 439
 440        return ret;
 441}
 442
 443#define atomic64_add_negative(a, v)     (atomic64_add_return((a), (v)) < 0)
 444#define atomic64_inc(v)                 atomic64_add(1LL, (v))
 445#define atomic64_inc_return_relaxed(v)  atomic64_add_return_relaxed(1LL, (v))
 446#define atomic64_inc_and_test(v)        (atomic64_inc_return(v) == 0)
 447#define atomic64_sub_and_test(a, v)     (atomic64_sub_return((a), (v)) == 0)
 448#define atomic64_dec(v)                 atomic64_sub(1LL, (v))
 449#define atomic64_dec_return_relaxed(v)  atomic64_sub_return_relaxed(1LL, (v))
 450#define atomic64_dec_and_test(v)        (atomic64_dec_return((v)) == 0)
 451#define atomic64_inc_not_zero(v)        atomic64_add_unless((v), 1LL, 0LL)
 452
 453#endif /* !CONFIG_GENERIC_ATOMIC64 */
 454#endif
 455#endif
 456