linux/arch/m68k/include/asm/atomic.h
<<
>>
Prefs
   1#ifndef __ARCH_M68K_ATOMIC__
   2#define __ARCH_M68K_ATOMIC__
   3
   4#include <linux/types.h>
   5#include <linux/irqflags.h>
   6#include <asm/cmpxchg.h>
   7
   8/*
   9 * Atomic operations that C can't guarantee us.  Useful for
  10 * resource counting etc..
  11 */
  12
  13/*
  14 * We do not have SMP m68k systems, so we don't have to deal with that.
  15 */
  16
  17#define ATOMIC_INIT(i)  { (i) }
  18
  19#define atomic_read(v)          (*(volatile int *)&(v)->counter)
  20#define atomic_set(v, i)        (((v)->counter) = i)
  21
  22/*
  23 * The ColdFire parts cannot do some immediate to memory operations,
  24 * so for them we do not specify the "i" asm constraint.
  25 */
  26#ifdef CONFIG_COLDFIRE
  27#define ASM_DI  "d"
  28#else
  29#define ASM_DI  "di"
  30#endif
  31
  32static inline void atomic_add(int i, atomic_t *v)
  33{
  34        __asm__ __volatile__("addl %1,%0" : "+m" (*v) : ASM_DI (i));
  35}
  36
  37static inline void atomic_sub(int i, atomic_t *v)
  38{
  39        __asm__ __volatile__("subl %1,%0" : "+m" (*v) : ASM_DI (i));
  40}
  41
  42static inline void atomic_inc(atomic_t *v)
  43{
  44        __asm__ __volatile__("addql #1,%0" : "+m" (*v));
  45}
  46
  47static inline void atomic_dec(atomic_t *v)
  48{
  49        __asm__ __volatile__("subql #1,%0" : "+m" (*v));
  50}
  51
  52static inline int atomic_dec_and_test(atomic_t *v)
  53{
  54        char c;
  55        __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
  56        return c != 0;
  57}
  58
  59static inline int atomic_dec_and_test_lt(atomic_t *v)
  60{
  61        char c;
  62        __asm__ __volatile__(
  63                "subql #1,%1; slt %0"
  64                : "=d" (c), "=m" (*v)
  65                : "m" (*v));
  66        return c != 0;
  67}
  68
  69static inline int atomic_inc_and_test(atomic_t *v)
  70{
  71        char c;
  72        __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
  73        return c != 0;
  74}
  75
  76#ifdef CONFIG_RMW_INSNS
  77
  78static inline int atomic_add_return(int i, atomic_t *v)
  79{
  80        int t, tmp;
  81
  82        __asm__ __volatile__(
  83                        "1:     movel %2,%1\n"
  84                        "       addl %3,%1\n"
  85                        "       casl %2,%1,%0\n"
  86                        "       jne 1b"
  87                        : "+m" (*v), "=&d" (t), "=&d" (tmp)
  88                        : "g" (i), "2" (atomic_read(v)));
  89        return t;
  90}
  91
  92static inline int atomic_sub_return(int i, atomic_t *v)
  93{
  94        int t, tmp;
  95
  96        __asm__ __volatile__(
  97                        "1:     movel %2,%1\n"
  98                        "       subl %3,%1\n"
  99                        "       casl %2,%1,%0\n"
 100                        "       jne 1b"
 101                        : "+m" (*v), "=&d" (t), "=&d" (tmp)
 102                        : "g" (i), "2" (atomic_read(v)));
 103        return t;
 104}
 105
 106#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
 107#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 108
 109#else /* !CONFIG_RMW_INSNS */
 110
 111static inline int atomic_add_return(int i, atomic_t * v)
 112{
 113        unsigned long flags;
 114        int t;
 115
 116        local_irq_save(flags);
 117        t = atomic_read(v);
 118        t += i;
 119        atomic_set(v, t);
 120        local_irq_restore(flags);
 121
 122        return t;
 123}
 124
 125static inline int atomic_sub_return(int i, atomic_t * v)
 126{
 127        unsigned long flags;
 128        int t;
 129
 130        local_irq_save(flags);
 131        t = atomic_read(v);
 132        t -= i;
 133        atomic_set(v, t);
 134        local_irq_restore(flags);
 135
 136        return t;
 137}
 138
 139static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 140{
 141        unsigned long flags;
 142        int prev;
 143
 144        local_irq_save(flags);
 145        prev = atomic_read(v);
 146        if (prev == old)
 147                atomic_set(v, new);
 148        local_irq_restore(flags);
 149        return prev;
 150}
 151
 152static inline int atomic_xchg(atomic_t *v, int new)
 153{
 154        unsigned long flags;
 155        int prev;
 156
 157        local_irq_save(flags);
 158        prev = atomic_read(v);
 159        atomic_set(v, new);
 160        local_irq_restore(flags);
 161        return prev;
 162}
 163
 164#endif /* !CONFIG_RMW_INSNS */
 165
 166#define atomic_dec_return(v)    atomic_sub_return(1, (v))
 167#define atomic_inc_return(v)    atomic_add_return(1, (v))
 168
 169static inline int atomic_sub_and_test(int i, atomic_t *v)
 170{
 171        char c;
 172        __asm__ __volatile__("subl %2,%1; seq %0"
 173                             : "=d" (c), "+m" (*v)
 174                             : ASM_DI (i));
 175        return c != 0;
 176}
 177
 178static inline int atomic_add_negative(int i, atomic_t *v)
 179{
 180        char c;
 181        __asm__ __volatile__("addl %2,%1; smi %0"
 182                             : "=d" (c), "+m" (*v)
 183                             : ASM_DI (i));
 184        return c != 0;
 185}
 186
 187static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
 188{
 189        __asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
 190}
 191
 192static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
 193{
 194        __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
 195}
 196
 197static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
 198{
 199        int c, old;
 200        c = atomic_read(v);
 201        for (;;) {
 202                if (unlikely(c == (u)))
 203                        break;
 204                old = atomic_cmpxchg((v), c, c + (a));
 205                if (likely(old == c))
 206                        break;
 207                c = old;
 208        }
 209        return c;
 210}
 211
 212
 213/* Atomic operations are already serializing */
 214#define smp_mb__before_atomic_dec()     barrier()
 215#define smp_mb__after_atomic_dec()      barrier()
 216#define smp_mb__before_atomic_inc()     barrier()
 217#define smp_mb__after_atomic_inc()      barrier()
 218
 219#endif /* __ARCH_M68K_ATOMIC __ */
 220