linux/arch/metag/include/asm/atomic_lock1.h
<<
>>
Prefs
   1#ifndef __ASM_METAG_ATOMIC_LOCK1_H
   2#define __ASM_METAG_ATOMIC_LOCK1_H
   3
   4#define ATOMIC_INIT(i)  { (i) }
   5
   6#include <linux/compiler.h>
   7
   8#include <asm/barrier.h>
   9#include <asm/global_lock.h>
  10
  11static inline int atomic_read(const atomic_t *v)
  12{
  13        return (v)->counter;
  14}
  15
  16/*
  17 * atomic_set needs to be take the lock to protect atomic_add_unless from a
  18 * possible race, as it reads the counter twice:
  19 *
  20 *  CPU0                               CPU1
  21 *  atomic_add_unless(1, 0)
  22 *    ret = v->counter (non-zero)
  23 *    if (ret != u)                    v->counter = 0
  24 *      v->counter += 1 (counter set to 1)
  25 *
  26 * Making atomic_set take the lock ensures that ordering and logical
  27 * consistency is preserved.
  28 */
  29static inline int atomic_set(atomic_t *v, int i)
  30{
  31        unsigned long flags;
  32
  33        __global_lock1(flags);
  34        fence();
  35        v->counter = i;
  36        __global_unlock1(flags);
  37        return i;
  38}
  39
  40static inline void atomic_add(int i, atomic_t *v)
  41{
  42        unsigned long flags;
  43
  44        __global_lock1(flags);
  45        fence();
  46        v->counter += i;
  47        __global_unlock1(flags);
  48}
  49
  50static inline void atomic_sub(int i, atomic_t *v)
  51{
  52        unsigned long flags;
  53
  54        __global_lock1(flags);
  55        fence();
  56        v->counter -= i;
  57        __global_unlock1(flags);
  58}
  59
  60static inline int atomic_add_return(int i, atomic_t *v)
  61{
  62        unsigned long result;
  63        unsigned long flags;
  64
  65        __global_lock1(flags);
  66        result = v->counter;
  67        result += i;
  68        fence();
  69        v->counter = result;
  70        __global_unlock1(flags);
  71
  72        return result;
  73}
  74
  75static inline int atomic_sub_return(int i, atomic_t *v)
  76{
  77        unsigned long result;
  78        unsigned long flags;
  79
  80        __global_lock1(flags);
  81        result = v->counter;
  82        result -= i;
  83        fence();
  84        v->counter = result;
  85        __global_unlock1(flags);
  86
  87        return result;
  88}
  89
  90static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
  91{
  92        unsigned long flags;
  93
  94        __global_lock1(flags);
  95        fence();
  96        v->counter &= ~mask;
  97        __global_unlock1(flags);
  98}
  99
 100static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
 101{
 102        unsigned long flags;
 103
 104        __global_lock1(flags);
 105        fence();
 106        v->counter |= mask;
 107        __global_unlock1(flags);
 108}
 109
 110static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 111{
 112        int ret;
 113        unsigned long flags;
 114
 115        __global_lock1(flags);
 116        ret = v->counter;
 117        if (ret == old) {
 118                fence();
 119                v->counter = new;
 120        }
 121        __global_unlock1(flags);
 122
 123        return ret;
 124}
 125
 126#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 127
 128static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 129{
 130        int ret;
 131        unsigned long flags;
 132
 133        __global_lock1(flags);
 134        ret = v->counter;
 135        if (ret != u) {
 136                fence();
 137                v->counter += a;
 138        }
 139        __global_unlock1(flags);
 140
 141        return ret;
 142}
 143
 144static inline int atomic_sub_if_positive(int i, atomic_t *v)
 145{
 146        int ret;
 147        unsigned long flags;
 148
 149        __global_lock1(flags);
 150        ret = v->counter - 1;
 151        if (ret >= 0) {
 152                fence();
 153                v->counter = ret;
 154        }
 155        __global_unlock1(flags);
 156
 157        return ret;
 158}
 159
 160#endif /* __ASM_METAG_ATOMIC_LOCK1_H */
 161