linux/arch/arc/include/asm/atomic-spinlock.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2
   3#ifndef _ASM_ARC_ATOMIC_SPLOCK_H
   4#define _ASM_ARC_ATOMIC_SPLOCK_H
   5
   6/*
   7 * Non hardware assisted Atomic-R-M-W
   8 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
   9 */
  10
  11static inline void arch_atomic_set(atomic_t *v, int i)
  12{
  13        /*
  14         * Independent of hardware support, all of the atomic_xxx() APIs need
  15         * to follow the same locking rules to make sure that a "hardware"
  16         * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
  17         * sequence
  18         *
  19         * Thus atomic_set() despite being 1 insn (and seemingly atomic)
  20         * requires the locking.
  21         */
  22        unsigned long flags;
  23
  24        atomic_ops_lock(flags);
  25        WRITE_ONCE(v->counter, i);
  26        atomic_ops_unlock(flags);
  27}
  28
  29#define arch_atomic_set_release(v, i)   arch_atomic_set((v), (i))
  30
  31#define ATOMIC_OP(op, c_op, asm_op)                                     \
  32static inline void arch_atomic_##op(int i, atomic_t *v)                 \
  33{                                                                       \
  34        unsigned long flags;                                            \
  35                                                                        \
  36        atomic_ops_lock(flags);                                         \
  37        v->counter c_op i;                                              \
  38        atomic_ops_unlock(flags);                                       \
  39}
  40
  41#define ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
  42static inline int arch_atomic_##op##_return(int i, atomic_t *v)         \
  43{                                                                       \
  44        unsigned long flags;                                            \
  45        unsigned int temp;                                              \
  46                                                                        \
  47        /*                                                              \
  48         * spin lock/unlock provides the needed smp_mb() before/after   \
  49         */                                                             \
  50        atomic_ops_lock(flags);                                         \
  51        temp = v->counter;                                              \
  52        temp c_op i;                                                    \
  53        v->counter = temp;                                              \
  54        atomic_ops_unlock(flags);                                       \
  55                                                                        \
  56        return temp;                                                    \
  57}
  58
  59#define ATOMIC_FETCH_OP(op, c_op, asm_op)                               \
  60static inline int arch_atomic_fetch_##op(int i, atomic_t *v)            \
  61{                                                                       \
  62        unsigned long flags;                                            \
  63        unsigned int orig;                                              \
  64                                                                        \
  65        /*                                                              \
  66         * spin lock/unlock provides the needed smp_mb() before/after   \
  67         */                                                             \
  68        atomic_ops_lock(flags);                                         \
  69        orig = v->counter;                                              \
  70        v->counter c_op i;                                              \
  71        atomic_ops_unlock(flags);                                       \
  72                                                                        \
  73        return orig;                                                    \
  74}
  75
  76#define ATOMIC_OPS(op, c_op, asm_op)                                    \
  77        ATOMIC_OP(op, c_op, asm_op)                                     \
  78        ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
  79        ATOMIC_FETCH_OP(op, c_op, asm_op)
  80
  81ATOMIC_OPS(add, +=, add)
  82ATOMIC_OPS(sub, -=, sub)
  83
  84#undef ATOMIC_OPS
  85#define ATOMIC_OPS(op, c_op, asm_op)                                    \
  86        ATOMIC_OP(op, c_op, asm_op)                                     \
  87        ATOMIC_FETCH_OP(op, c_op, asm_op)
  88
  89ATOMIC_OPS(and, &=, and)
  90ATOMIC_OPS(andnot, &= ~, bic)
  91ATOMIC_OPS(or, |=, or)
  92ATOMIC_OPS(xor, ^=, xor)
  93
  94#define arch_atomic_andnot              arch_atomic_andnot
  95#define arch_atomic_fetch_andnot        arch_atomic_fetch_andnot
  96
  97#undef ATOMIC_OPS
  98#undef ATOMIC_FETCH_OP
  99#undef ATOMIC_OP_RETURN
 100#undef ATOMIC_OP
 101
 102#endif
 103