linux/arch/m68k/include/asm/atomic.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __ARCH_M68K_ATOMIC__
   3#define __ARCH_M68K_ATOMIC__
   4
   5#include <linux/types.h>
   6#include <linux/irqflags.h>
   7#include <asm/cmpxchg.h>
   8#include <asm/barrier.h>
   9
  10/*
  11 * Atomic operations that C can't guarantee us.  Useful for
  12 * resource counting etc..
  13 */
  14
  15/*
  16 * We do not have SMP m68k systems, so we don't have to deal with that.
  17 */
  18
  19#define arch_atomic_read(v)     READ_ONCE((v)->counter)
  20#define arch_atomic_set(v, i)   WRITE_ONCE(((v)->counter), (i))
  21
  22/*
  23 * The ColdFire parts cannot do some immediate to memory operations,
  24 * so for them we do not specify the "i" asm constraint.
  25 */
  26#ifdef CONFIG_COLDFIRE
  27#define ASM_DI  "d"
  28#else
  29#define ASM_DI  "di"
  30#endif
  31
  32#define ATOMIC_OP(op, c_op, asm_op)                                     \
  33static inline void arch_atomic_##op(int i, atomic_t *v)                 \
  34{                                                                       \
  35        __asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
  36}                                                                       \
  37
  38#ifdef CONFIG_RMW_INSNS
  39
  40#define ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
  41static inline int arch_atomic_##op##_return(int i, atomic_t *v)         \
  42{                                                                       \
  43        int t, tmp;                                                     \
  44                                                                        \
  45        __asm__ __volatile__(                                           \
  46                        "1:     movel %2,%1\n"                          \
  47                        "       " #asm_op "l %3,%1\n"                   \
  48                        "       casl %2,%1,%0\n"                        \
  49                        "       jne 1b"                                 \
  50                        : "+m" (*v), "=&d" (t), "=&d" (tmp)             \
  51                        : "di" (i), "2" (arch_atomic_read(v)));         \
  52        return t;                                                       \
  53}
  54
  55#define ATOMIC_FETCH_OP(op, c_op, asm_op)                               \
  56static inline int arch_atomic_fetch_##op(int i, atomic_t *v)            \
  57{                                                                       \
  58        int t, tmp;                                                     \
  59                                                                        \
  60        __asm__ __volatile__(                                           \
  61                        "1:     movel %2,%1\n"                          \
  62                        "       " #asm_op "l %3,%1\n"                   \
  63                        "       casl %2,%1,%0\n"                        \
  64                        "       jne 1b"                                 \
  65                        : "+m" (*v), "=&d" (t), "=&d" (tmp)             \
  66                        : "di" (i), "2" (arch_atomic_read(v)));         \
  67        return tmp;                                                     \
  68}
  69
  70#else
  71
  72#define ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
  73static inline int arch_atomic_##op##_return(int i, atomic_t * v)        \
  74{                                                                       \
  75        unsigned long flags;                                            \
  76        int t;                                                          \
  77                                                                        \
  78        local_irq_save(flags);                                          \
  79        t = (v->counter c_op i);                                        \
  80        local_irq_restore(flags);                                       \
  81                                                                        \
  82        return t;                                                       \
  83}
  84
  85#define ATOMIC_FETCH_OP(op, c_op, asm_op)                               \
  86static inline int arch_atomic_fetch_##op(int i, atomic_t * v)           \
  87{                                                                       \
  88        unsigned long flags;                                            \
  89        int t;                                                          \
  90                                                                        \
  91        local_irq_save(flags);                                          \
  92        t = v->counter;                                                 \
  93        v->counter c_op i;                                              \
  94        local_irq_restore(flags);                                       \
  95                                                                        \
  96        return t;                                                       \
  97}
  98
  99#endif /* CONFIG_RMW_INSNS */
 100
 101#define ATOMIC_OPS(op, c_op, asm_op)                                    \
 102        ATOMIC_OP(op, c_op, asm_op)                                     \
 103        ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
 104        ATOMIC_FETCH_OP(op, c_op, asm_op)
 105
 106ATOMIC_OPS(add, +=, add)
 107ATOMIC_OPS(sub, -=, sub)
 108
 109#undef ATOMIC_OPS
 110#define ATOMIC_OPS(op, c_op, asm_op)                                    \
 111        ATOMIC_OP(op, c_op, asm_op)                                     \
 112        ATOMIC_FETCH_OP(op, c_op, asm_op)
 113
 114ATOMIC_OPS(and, &=, and)
 115ATOMIC_OPS(or, |=, or)
 116ATOMIC_OPS(xor, ^=, eor)
 117
 118#undef ATOMIC_OPS
 119#undef ATOMIC_FETCH_OP
 120#undef ATOMIC_OP_RETURN
 121#undef ATOMIC_OP
 122
 123static inline void arch_atomic_inc(atomic_t *v)
 124{
 125        __asm__ __volatile__("addql #1,%0" : "+m" (*v));
 126}
 127#define arch_atomic_inc arch_atomic_inc
 128
 129static inline void arch_atomic_dec(atomic_t *v)
 130{
 131        __asm__ __volatile__("subql #1,%0" : "+m" (*v));
 132}
 133#define arch_atomic_dec arch_atomic_dec
 134
 135static inline int arch_atomic_dec_and_test(atomic_t *v)
 136{
 137        char c;
 138        __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
 139        return c != 0;
 140}
 141#define arch_atomic_dec_and_test arch_atomic_dec_and_test
 142
 143static inline int arch_atomic_dec_and_test_lt(atomic_t *v)
 144{
 145        char c;
 146        __asm__ __volatile__(
 147                "subql #1,%1; slt %0"
 148                : "=d" (c), "=m" (*v)
 149                : "m" (*v));
 150        return c != 0;
 151}
 152
 153static inline int arch_atomic_inc_and_test(atomic_t *v)
 154{
 155        char c;
 156        __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
 157        return c != 0;
 158}
 159#define arch_atomic_inc_and_test arch_atomic_inc_and_test
 160
 161#ifdef CONFIG_RMW_INSNS
 162
 163#define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))
 164#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
 165
 166#else /* !CONFIG_RMW_INSNS */
 167
 168static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
 169{
 170        unsigned long flags;
 171        int prev;
 172
 173        local_irq_save(flags);
 174        prev = arch_atomic_read(v);
 175        if (prev == old)
 176                arch_atomic_set(v, new);
 177        local_irq_restore(flags);
 178        return prev;
 179}
 180
 181static inline int arch_atomic_xchg(atomic_t *v, int new)
 182{
 183        unsigned long flags;
 184        int prev;
 185
 186        local_irq_save(flags);
 187        prev = arch_atomic_read(v);
 188        arch_atomic_set(v, new);
 189        local_irq_restore(flags);
 190        return prev;
 191}
 192
 193#endif /* !CONFIG_RMW_INSNS */
 194
 195static inline int arch_atomic_sub_and_test(int i, atomic_t *v)
 196{
 197        char c;
 198        __asm__ __volatile__("subl %2,%1; seq %0"
 199                             : "=d" (c), "+m" (*v)
 200                             : ASM_DI (i));
 201        return c != 0;
 202}
 203#define arch_atomic_sub_and_test arch_atomic_sub_and_test
 204
 205static inline int arch_atomic_add_negative(int i, atomic_t *v)
 206{
 207        char c;
 208        __asm__ __volatile__("addl %2,%1; smi %0"
 209                             : "=d" (c), "+m" (*v)
 210                             : ASM_DI (i));
 211        return c != 0;
 212}
 213#define arch_atomic_add_negative arch_atomic_add_negative
 214
 215#endif /* __ARCH_M68K_ATOMIC __ */
 216