linux/arch/m68k/include/asm/atomic.h
<<
>>
Prefs
   1#ifndef __ARCH_M68K_ATOMIC__
   2#define __ARCH_M68K_ATOMIC__
   3
   4#include <linux/types.h>
   5#include <asm/system.h>
   6
   7/*
   8 * Atomic operations that C can't guarantee us.  Useful for
   9 * resource counting etc..
  10 */
  11
  12/*
  13 * We do not have SMP m68k systems, so we don't have to deal with that.
  14 */
  15
  16#define ATOMIC_INIT(i)  { (i) }
  17
  18#define atomic_read(v)          (*(volatile int *)&(v)->counter)
  19#define atomic_set(v, i)        (((v)->counter) = i)
  20
  21/*
  22 * The ColdFire parts cannot do some immediate to memory operations,
  23 * so for them we do not specify the "i" asm constraint.
  24 */
  25#ifdef CONFIG_COLDFIRE
  26#define ASM_DI  "d"
  27#else
  28#define ASM_DI  "di"
  29#endif
  30
  31static inline void atomic_add(int i, atomic_t *v)
  32{
  33        __asm__ __volatile__("addl %1,%0" : "+m" (*v) : ASM_DI (i));
  34}
  35
  36static inline void atomic_sub(int i, atomic_t *v)
  37{
  38        __asm__ __volatile__("subl %1,%0" : "+m" (*v) : ASM_DI (i));
  39}
  40
  41static inline void atomic_inc(atomic_t *v)
  42{
  43        __asm__ __volatile__("addql #1,%0" : "+m" (*v));
  44}
  45
  46static inline void atomic_dec(atomic_t *v)
  47{
  48        __asm__ __volatile__("subql #1,%0" : "+m" (*v));
  49}
  50
  51static inline int atomic_dec_and_test(atomic_t *v)
  52{
  53        char c;
  54        __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
  55        return c != 0;
  56}
  57
  58static inline int atomic_inc_and_test(atomic_t *v)
  59{
  60        char c;
  61        __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
  62        return c != 0;
  63}
  64
  65#ifdef CONFIG_RMW_INSNS
  66
  67static inline int atomic_add_return(int i, atomic_t *v)
  68{
  69        int t, tmp;
  70
  71        __asm__ __volatile__(
  72                        "1:     movel %2,%1\n"
  73                        "       addl %3,%1\n"
  74                        "       casl %2,%1,%0\n"
  75                        "       jne 1b"
  76                        : "+m" (*v), "=&d" (t), "=&d" (tmp)
  77                        : "g" (i), "2" (atomic_read(v)));
  78        return t;
  79}
  80
  81static inline int atomic_sub_return(int i, atomic_t *v)
  82{
  83        int t, tmp;
  84
  85        __asm__ __volatile__(
  86                        "1:     movel %2,%1\n"
  87                        "       subl %3,%1\n"
  88                        "       casl %2,%1,%0\n"
  89                        "       jne 1b"
  90                        : "+m" (*v), "=&d" (t), "=&d" (tmp)
  91                        : "g" (i), "2" (atomic_read(v)));
  92        return t;
  93}
  94
  95#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
  96#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  97
  98#else /* !CONFIG_RMW_INSNS */
  99
 100static inline int atomic_add_return(int i, atomic_t * v)
 101{
 102        unsigned long flags;
 103        int t;
 104
 105        local_irq_save(flags);
 106        t = atomic_read(v);
 107        t += i;
 108        atomic_set(v, t);
 109        local_irq_restore(flags);
 110
 111        return t;
 112}
 113
 114static inline int atomic_sub_return(int i, atomic_t * v)
 115{
 116        unsigned long flags;
 117        int t;
 118
 119        local_irq_save(flags);
 120        t = atomic_read(v);
 121        t -= i;
 122        atomic_set(v, t);
 123        local_irq_restore(flags);
 124
 125        return t;
 126}
 127
 128static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 129{
 130        unsigned long flags;
 131        int prev;
 132
 133        local_irq_save(flags);
 134        prev = atomic_read(v);
 135        if (prev == old)
 136                atomic_set(v, new);
 137        local_irq_restore(flags);
 138        return prev;
 139}
 140
 141static inline int atomic_xchg(atomic_t *v, int new)
 142{
 143        unsigned long flags;
 144        int prev;
 145
 146        local_irq_save(flags);
 147        prev = atomic_read(v);
 148        atomic_set(v, new);
 149        local_irq_restore(flags);
 150        return prev;
 151}
 152
 153#endif /* !CONFIG_RMW_INSNS */
 154
 155#define atomic_dec_return(v)    atomic_sub_return(1, (v))
 156#define atomic_inc_return(v)    atomic_add_return(1, (v))
 157
 158static inline int atomic_sub_and_test(int i, atomic_t *v)
 159{
 160        char c;
 161        __asm__ __volatile__("subl %2,%1; seq %0"
 162                             : "=d" (c), "+m" (*v)
 163                             : ASM_DI (i));
 164        return c != 0;
 165}
 166
 167static inline int atomic_add_negative(int i, atomic_t *v)
 168{
 169        char c;
 170        __asm__ __volatile__("addl %2,%1; smi %0"
 171                             : "=d" (c), "+m" (*v)
 172                             : "id" (i));
 173        return c != 0;
 174}
 175
 176static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
 177{
 178        __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask)));
 179}
 180
 181static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
 182{
 183        __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
 184}
 185
 186static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
 187{
 188        int c, old;
 189        c = atomic_read(v);
 190        for (;;) {
 191                if (unlikely(c == (u)))
 192                        break;
 193                old = atomic_cmpxchg((v), c, c + (a));
 194                if (likely(old == c))
 195                        break;
 196                c = old;
 197        }
 198        return c != (u);
 199}
 200
 201#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 202
 203/* Atomic operations are already serializing */
 204#define smp_mb__before_atomic_dec()     barrier()
 205#define smp_mb__after_atomic_dec()      barrier()
 206#define smp_mb__before_atomic_inc()     barrier()
 207#define smp_mb__after_atomic_inc()      barrier()
 208
 209#include <asm-generic/atomic-long.h>
 210#include <asm-generic/atomic64.h>
 211#endif /* __ARCH_M68K_ATOMIC __ */
 212