linux/arch/arm/include/asm/atomic.h
<<
>>
Prefs
   1/*
   2 *  arch/arm/include/asm/atomic.h
   3 *
   4 *  Copyright (C) 1996 Russell King.
   5 *  Copyright (C) 2002 Deep Blue Solutions Ltd.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11#ifndef __ASM_ARM_ATOMIC_H
  12#define __ASM_ARM_ATOMIC_H
  13
  14#include <linux/compiler.h>
  15#include <linux/types.h>
  16#include <linux/irqflags.h>
  17#include <asm/barrier.h>
  18#include <asm/cmpxchg.h>
  19
  20#define ATOMIC_INIT(i)  { (i) }
  21
  22#ifdef __KERNEL__
  23
  24/*
  25 * On ARM, ordinary assignment (str instruction) doesn't clear the local
  26 * strex/ldrex monitor on some implementations. The reason we can use it for
  27 * atomic_set() is the clrex or dummy strex done on every exception return.
  28 */
  29#define atomic_read(v)  (*(volatile int *)&(v)->counter)
  30#define atomic_set(v,i) (((v)->counter) = (i))
  31
  32#if __LINUX_ARM_ARCH__ >= 6
  33
  34/*
  35 * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
  36 * store exclusive to ensure that these are atomic.  We may loop
  37 * to ensure that the update happens.
  38 */
  39static inline void atomic_add(int i, atomic_t *v)
  40{
  41        unsigned long tmp;
  42        int result;
  43
  44        __asm__ __volatile__("@ atomic_add\n"
  45"1:     ldrex   %0, [%3]\n"
  46"       add     %0, %0, %4\n"
  47"       strex   %1, %0, [%3]\n"
  48"       teq     %1, #0\n"
  49"       bne     1b"
  50        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
  51        : "r" (&v->counter), "Ir" (i)
  52        : "cc");
  53}
  54
  55static inline int atomic_add_return(int i, atomic_t *v)
  56{
  57        unsigned long tmp;
  58        int result;
  59
  60        smp_mb();
  61
  62        __asm__ __volatile__("@ atomic_add_return\n"
  63"1:     ldrex   %0, [%3]\n"
  64"       add     %0, %0, %4\n"
  65"       strex   %1, %0, [%3]\n"
  66"       teq     %1, #0\n"
  67"       bne     1b"
  68        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
  69        : "r" (&v->counter), "Ir" (i)
  70        : "cc");
  71
  72        smp_mb();
  73
  74        return result;
  75}
  76
  77static inline void atomic_sub(int i, atomic_t *v)
  78{
  79        unsigned long tmp;
  80        int result;
  81
  82        __asm__ __volatile__("@ atomic_sub\n"
  83"1:     ldrex   %0, [%3]\n"
  84"       sub     %0, %0, %4\n"
  85"       strex   %1, %0, [%3]\n"
  86"       teq     %1, #0\n"
  87"       bne     1b"
  88        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
  89        : "r" (&v->counter), "Ir" (i)
  90        : "cc");
  91}
  92
  93static inline int atomic_sub_return(int i, atomic_t *v)
  94{
  95        unsigned long tmp;
  96        int result;
  97
  98        smp_mb();
  99
 100        __asm__ __volatile__("@ atomic_sub_return\n"
 101"1:     ldrex   %0, [%3]\n"
 102"       sub     %0, %0, %4\n"
 103"       strex   %1, %0, [%3]\n"
 104"       teq     %1, #0\n"
 105"       bne     1b"
 106        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 107        : "r" (&v->counter), "Ir" (i)
 108        : "cc");
 109
 110        smp_mb();
 111
 112        return result;
 113}
 114
 115static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
 116{
 117        unsigned long oldval, res;
 118
 119        smp_mb();
 120
 121        do {
 122                __asm__ __volatile__("@ atomic_cmpxchg\n"
 123                "ldrex  %1, [%3]\n"
 124                "mov    %0, #0\n"
 125                "teq    %1, %4\n"
 126                "strexeq %0, %5, [%3]\n"
 127                    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
 128                    : "r" (&ptr->counter), "Ir" (old), "r" (new)
 129                    : "cc");
 130        } while (res);
 131
 132        smp_mb();
 133
 134        return oldval;
 135}
 136
 137static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
 138{
 139        unsigned long tmp, tmp2;
 140
 141        __asm__ __volatile__("@ atomic_clear_mask\n"
 142"1:     ldrex   %0, [%3]\n"
 143"       bic     %0, %0, %4\n"
 144"       strex   %1, %0, [%3]\n"
 145"       teq     %1, #0\n"
 146"       bne     1b"
 147        : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
 148        : "r" (addr), "Ir" (mask)
 149        : "cc");
 150}
 151
 152#else /* ARM_ARCH_6 */
 153
 154#ifdef CONFIG_SMP
 155#error SMP not supported on pre-ARMv6 CPUs
 156#endif
 157
 158static inline int atomic_add_return(int i, atomic_t *v)
 159{
 160        unsigned long flags;
 161        int val;
 162
 163        raw_local_irq_save(flags);
 164        val = v->counter;
 165        v->counter = val += i;
 166        raw_local_irq_restore(flags);
 167
 168        return val;
 169}
 170#define atomic_add(i, v)        (void) atomic_add_return(i, v)
 171
 172static inline int atomic_sub_return(int i, atomic_t *v)
 173{
 174        unsigned long flags;
 175        int val;
 176
 177        raw_local_irq_save(flags);
 178        val = v->counter;
 179        v->counter = val -= i;
 180        raw_local_irq_restore(flags);
 181
 182        return val;
 183}
 184#define atomic_sub(i, v)        (void) atomic_sub_return(i, v)
 185
 186static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 187{
 188        int ret;
 189        unsigned long flags;
 190
 191        raw_local_irq_save(flags);
 192        ret = v->counter;
 193        if (likely(ret == old))
 194                v->counter = new;
 195        raw_local_irq_restore(flags);
 196
 197        return ret;
 198}
 199
 200static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
 201{
 202        unsigned long flags;
 203
 204        raw_local_irq_save(flags);
 205        *addr &= ~mask;
 206        raw_local_irq_restore(flags);
 207}
 208
 209#endif /* __LINUX_ARM_ARCH__ */
 210
 211#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 212
 213static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 214{
 215        int c, old;
 216
 217        c = atomic_read(v);
 218        while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
 219                c = old;
 220        return c;
 221}
 222
 223#define atomic_inc(v)           atomic_add(1, v)
 224#define atomic_dec(v)           atomic_sub(1, v)
 225
 226#define atomic_inc_and_test(v)  (atomic_add_return(1, v) == 0)
 227#define atomic_dec_and_test(v)  (atomic_sub_return(1, v) == 0)
 228#define atomic_inc_return(v)    (atomic_add_return(1, v))
 229#define atomic_dec_return(v)    (atomic_sub_return(1, v))
 230#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
 231
 232#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
 233
 234#define smp_mb__before_atomic_dec()     smp_mb()
 235#define smp_mb__after_atomic_dec()      smp_mb()
 236#define smp_mb__before_atomic_inc()     smp_mb()
 237#define smp_mb__after_atomic_inc()      smp_mb()
 238
 239#ifndef CONFIG_GENERIC_ATOMIC64
 240typedef struct {
 241        u64 __aligned(8) counter;
 242} atomic64_t;
 243
 244#define ATOMIC64_INIT(i) { (i) }
 245
 246static inline u64 atomic64_read(const atomic64_t *v)
 247{
 248        u64 result;
 249
 250        __asm__ __volatile__("@ atomic64_read\n"
 251"       ldrexd  %0, %H0, [%1]"
 252        : "=&r" (result)
 253        : "r" (&v->counter), "Qo" (v->counter)
 254        );
 255
 256        return result;
 257}
 258
 259static inline void atomic64_set(atomic64_t *v, u64 i)
 260{
 261        u64 tmp;
 262
 263        __asm__ __volatile__("@ atomic64_set\n"
 264"1:     ldrexd  %0, %H0, [%2]\n"
 265"       strexd  %0, %3, %H3, [%2]\n"
 266"       teq     %0, #0\n"
 267"       bne     1b"
 268        : "=&r" (tmp), "=Qo" (v->counter)
 269        : "r" (&v->counter), "r" (i)
 270        : "cc");
 271}
 272
 273static inline void atomic64_add(u64 i, atomic64_t *v)
 274{
 275        u64 result;
 276        unsigned long tmp;
 277
 278        __asm__ __volatile__("@ atomic64_add\n"
 279"1:     ldrexd  %0, %H0, [%3]\n"
 280"       adds    %0, %0, %4\n"
 281"       adc     %H0, %H0, %H4\n"
 282"       strexd  %1, %0, %H0, [%3]\n"
 283"       teq     %1, #0\n"
 284"       bne     1b"
 285        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 286        : "r" (&v->counter), "r" (i)
 287        : "cc");
 288}
 289
 290static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
 291{
 292        u64 result;
 293        unsigned long tmp;
 294
 295        smp_mb();
 296
 297        __asm__ __volatile__("@ atomic64_add_return\n"
 298"1:     ldrexd  %0, %H0, [%3]\n"
 299"       adds    %0, %0, %4\n"
 300"       adc     %H0, %H0, %H4\n"
 301"       strexd  %1, %0, %H0, [%3]\n"
 302"       teq     %1, #0\n"
 303"       bne     1b"
 304        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 305        : "r" (&v->counter), "r" (i)
 306        : "cc");
 307
 308        smp_mb();
 309
 310        return result;
 311}
 312
 313static inline void atomic64_sub(u64 i, atomic64_t *v)
 314{
 315        u64 result;
 316        unsigned long tmp;
 317
 318        __asm__ __volatile__("@ atomic64_sub\n"
 319"1:     ldrexd  %0, %H0, [%3]\n"
 320"       subs    %0, %0, %4\n"
 321"       sbc     %H0, %H0, %H4\n"
 322"       strexd  %1, %0, %H0, [%3]\n"
 323"       teq     %1, #0\n"
 324"       bne     1b"
 325        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 326        : "r" (&v->counter), "r" (i)
 327        : "cc");
 328}
 329
 330static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
 331{
 332        u64 result;
 333        unsigned long tmp;
 334
 335        smp_mb();
 336
 337        __asm__ __volatile__("@ atomic64_sub_return\n"
 338"1:     ldrexd  %0, %H0, [%3]\n"
 339"       subs    %0, %0, %4\n"
 340"       sbc     %H0, %H0, %H4\n"
 341"       strexd  %1, %0, %H0, [%3]\n"
 342"       teq     %1, #0\n"
 343"       bne     1b"
 344        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 345        : "r" (&v->counter), "r" (i)
 346        : "cc");
 347
 348        smp_mb();
 349
 350        return result;
 351}
 352
 353static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
 354{
 355        u64 oldval;
 356        unsigned long res;
 357
 358        smp_mb();
 359
 360        do {
 361                __asm__ __volatile__("@ atomic64_cmpxchg\n"
 362                "ldrexd         %1, %H1, [%3]\n"
 363                "mov            %0, #0\n"
 364                "teq            %1, %4\n"
 365                "teqeq          %H1, %H4\n"
 366                "strexdeq       %0, %5, %H5, [%3]"
 367                : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
 368                : "r" (&ptr->counter), "r" (old), "r" (new)
 369                : "cc");
 370        } while (res);
 371
 372        smp_mb();
 373
 374        return oldval;
 375}
 376
 377static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
 378{
 379        u64 result;
 380        unsigned long tmp;
 381
 382        smp_mb();
 383
 384        __asm__ __volatile__("@ atomic64_xchg\n"
 385"1:     ldrexd  %0, %H0, [%3]\n"
 386"       strexd  %1, %4, %H4, [%3]\n"
 387"       teq     %1, #0\n"
 388"       bne     1b"
 389        : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
 390        : "r" (&ptr->counter), "r" (new)
 391        : "cc");
 392
 393        smp_mb();
 394
 395        return result;
 396}
 397
 398static inline u64 atomic64_dec_if_positive(atomic64_t *v)
 399{
 400        u64 result;
 401        unsigned long tmp;
 402
 403        smp_mb();
 404
 405        __asm__ __volatile__("@ atomic64_dec_if_positive\n"
 406"1:     ldrexd  %0, %H0, [%3]\n"
 407"       subs    %0, %0, #1\n"
 408"       sbc     %H0, %H0, #0\n"
 409"       teq     %H0, #0\n"
 410"       bmi     2f\n"
 411"       strexd  %1, %0, %H0, [%3]\n"
 412"       teq     %1, #0\n"
 413"       bne     1b\n"
 414"2:"
 415        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 416        : "r" (&v->counter)
 417        : "cc");
 418
 419        smp_mb();
 420
 421        return result;
 422}
 423
 424static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
 425{
 426        u64 val;
 427        unsigned long tmp;
 428        int ret = 1;
 429
 430        smp_mb();
 431
 432        __asm__ __volatile__("@ atomic64_add_unless\n"
 433"1:     ldrexd  %0, %H0, [%4]\n"
 434"       teq     %0, %5\n"
 435"       teqeq   %H0, %H5\n"
 436"       moveq   %1, #0\n"
 437"       beq     2f\n"
 438"       adds    %0, %0, %6\n"
 439"       adc     %H0, %H0, %H6\n"
 440"       strexd  %2, %0, %H0, [%4]\n"
 441"       teq     %2, #0\n"
 442"       bne     1b\n"
 443"2:"
 444        : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
 445        : "r" (&v->counter), "r" (u), "r" (a)
 446        : "cc");
 447
 448        if (ret)
 449                smp_mb();
 450
 451        return ret;
 452}
 453
 454#define atomic64_add_negative(a, v)     (atomic64_add_return((a), (v)) < 0)
 455#define atomic64_inc(v)                 atomic64_add(1LL, (v))
 456#define atomic64_inc_return(v)          atomic64_add_return(1LL, (v))
 457#define atomic64_inc_and_test(v)        (atomic64_inc_return(v) == 0)
 458#define atomic64_sub_and_test(a, v)     (atomic64_sub_return((a), (v)) == 0)
 459#define atomic64_dec(v)                 atomic64_sub(1LL, (v))
 460#define atomic64_dec_return(v)          atomic64_sub_return(1LL, (v))
 461#define atomic64_dec_and_test(v)        (atomic64_dec_return((v)) == 0)
 462#define atomic64_inc_not_zero(v)        atomic64_add_unless((v), 1LL, 0LL)
 463
 464#endif /* !CONFIG_GENERIC_ATOMIC64 */
 465#endif
 466#endif
 467