linux/arch/arc/include/asm/atomic64-arcv2.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2
   3/*
   4 * ARCv2 supports 64-bit exclusive load (LLOCKD) / store (SCONDD)
   5 *  - The address HAS to be 64-bit aligned
   6 */
   7
   8#ifndef _ASM_ARC_ATOMIC64_ARCV2_H
   9#define _ASM_ARC_ATOMIC64_ARCV2_H
  10
  11typedef struct {
  12        s64 __aligned(8) counter;
  13} atomic64_t;
  14
  15#define ATOMIC64_INIT(a) { (a) }
  16
  17static inline s64 arch_atomic64_read(const atomic64_t *v)
  18{
  19        s64 val;
  20
  21        __asm__ __volatile__(
  22        "       ldd   %0, [%1]  \n"
  23        : "=r"(val)
  24        : "r"(&v->counter));
  25
  26        return val;
  27}
  28
  29static inline void arch_atomic64_set(atomic64_t *v, s64 a)
  30{
  31        /*
  32         * This could have been a simple assignment in "C" but would need
  33         * explicit volatile. Otherwise gcc optimizers could elide the store
  34         * which borked atomic64 self-test
  35         * In the inline asm version, memory clobber needed for exact same
  36         * reason, to tell gcc about the store.
  37         *
  38         * This however is not needed for sibling atomic64_add() etc since both
  39         * load/store are explicitly done in inline asm. As long as API is used
  40         * for each access, gcc has no way to optimize away any load/store
  41         */
  42        __asm__ __volatile__(
  43        "       std   %0, [%1]  \n"
  44        :
  45        : "r"(a), "r"(&v->counter)
  46        : "memory");
  47}
  48
  49#define ATOMIC64_OP(op, op1, op2)                                       \
  50static inline void arch_atomic64_##op(s64 a, atomic64_t *v)             \
  51{                                                                       \
  52        s64 val;                                                        \
  53                                                                        \
  54        __asm__ __volatile__(                                           \
  55        "1:                             \n"                             \
  56        "       llockd  %0, [%1]        \n"                             \
  57        "       " #op1 " %L0, %L0, %L2  \n"                             \
  58        "       " #op2 " %H0, %H0, %H2  \n"                             \
  59        "       scondd   %0, [%1]       \n"                             \
  60        "       bnz     1b              \n"                             \
  61        : "=&r"(val)                                                    \
  62        : "r"(&v->counter), "ir"(a)                                     \
  63        : "cc");                                                        \
  64}                                                                       \
  65
  66#define ATOMIC64_OP_RETURN(op, op1, op2)                                \
  67static inline s64 arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v)     \
  68{                                                                       \
  69        s64 val;                                                        \
  70                                                                        \
  71        __asm__ __volatile__(                                           \
  72        "1:                             \n"                             \
  73        "       llockd   %0, [%1]       \n"                             \
  74        "       " #op1 " %L0, %L0, %L2  \n"                             \
  75        "       " #op2 " %H0, %H0, %H2  \n"                             \
  76        "       scondd   %0, [%1]       \n"                             \
  77        "       bnz     1b              \n"                             \
  78        : [val] "=&r"(val)                                              \
  79        : "r"(&v->counter), "ir"(a)                                     \
  80        : "cc");        /* memory clobber comes from smp_mb() */        \
  81                                                                        \
  82        return val;                                                     \
  83}
  84
  85#define arch_atomic64_add_return_relaxed        arch_atomic64_add_return_relaxed
  86#define arch_atomic64_sub_return_relaxed        arch_atomic64_sub_return_relaxed
  87
  88#define ATOMIC64_FETCH_OP(op, op1, op2)                                 \
  89static inline s64 arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v)      \
  90{                                                                       \
  91        s64 val, orig;                                                  \
  92                                                                        \
  93        __asm__ __volatile__(                                           \
  94        "1:                             \n"                             \
  95        "       llockd   %0, [%2]       \n"                             \
  96        "       " #op1 " %L1, %L0, %L3  \n"                             \
  97        "       " #op2 " %H1, %H0, %H3  \n"                             \
  98        "       scondd   %1, [%2]       \n"                             \
  99        "       bnz     1b              \n"                             \
 100        : "=&r"(orig), "=&r"(val)                                       \
 101        : "r"(&v->counter), "ir"(a)                                     \
 102        : "cc");        /* memory clobber comes from smp_mb() */        \
 103                                                                        \
 104        return orig;                                                    \
 105}
 106
 107#define arch_atomic64_fetch_add_relaxed         arch_atomic64_fetch_add_relaxed
 108#define arch_atomic64_fetch_sub_relaxed         arch_atomic64_fetch_sub_relaxed
 109
 110#define arch_atomic64_fetch_and_relaxed         arch_atomic64_fetch_and_relaxed
 111#define arch_atomic64_fetch_andnot_relaxed      arch_atomic64_fetch_andnot_relaxed
 112#define arch_atomic64_fetch_or_relaxed          arch_atomic64_fetch_or_relaxed
 113#define arch_atomic64_fetch_xor_relaxed         arch_atomic64_fetch_xor_relaxed
 114
 115#define ATOMIC64_OPS(op, op1, op2)                                      \
 116        ATOMIC64_OP(op, op1, op2)                                       \
 117        ATOMIC64_OP_RETURN(op, op1, op2)                                \
 118        ATOMIC64_FETCH_OP(op, op1, op2)
 119
 120ATOMIC64_OPS(add, add.f, adc)
 121ATOMIC64_OPS(sub, sub.f, sbc)
 122
 123#undef ATOMIC64_OPS
 124#define ATOMIC64_OPS(op, op1, op2)                                      \
 125        ATOMIC64_OP(op, op1, op2)                                       \
 126        ATOMIC64_FETCH_OP(op, op1, op2)
 127
 128ATOMIC64_OPS(and, and, and)
 129ATOMIC64_OPS(andnot, bic, bic)
 130ATOMIC64_OPS(or, or, or)
 131ATOMIC64_OPS(xor, xor, xor)
 132
 133#define arch_atomic64_andnot            arch_atomic64_andnot
 134
 135#undef ATOMIC64_OPS
 136#undef ATOMIC64_FETCH_OP
 137#undef ATOMIC64_OP_RETURN
 138#undef ATOMIC64_OP
 139
 140static inline s64
 141arch_atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
 142{
 143        s64 prev;
 144
 145        smp_mb();
 146
 147        __asm__ __volatile__(
 148        "1:     llockd  %0, [%1]        \n"
 149        "       brne    %L0, %L2, 2f    \n"
 150        "       brne    %H0, %H2, 2f    \n"
 151        "       scondd  %3, [%1]        \n"
 152        "       bnz     1b              \n"
 153        "2:                             \n"
 154        : "=&r"(prev)
 155        : "r"(ptr), "ir"(expected), "r"(new)
 156        : "cc");        /* memory clobber comes from smp_mb() */
 157
 158        smp_mb();
 159
 160        return prev;
 161}
 162
 163static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new)
 164{
 165        s64 prev;
 166
 167        smp_mb();
 168
 169        __asm__ __volatile__(
 170        "1:     llockd  %0, [%1]        \n"
 171        "       scondd  %2, [%1]        \n"
 172        "       bnz     1b              \n"
 173        "2:                             \n"
 174        : "=&r"(prev)
 175        : "r"(ptr), "r"(new)
 176        : "cc");        /* memory clobber comes from smp_mb() */
 177
 178        smp_mb();
 179
 180        return prev;
 181}
 182
 183/**
 184 * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
 185 * @v: pointer of type atomic64_t
 186 *
 187 * The function returns the old value of *v minus 1, even if
 188 * the atomic variable, v, was not decremented.
 189 */
 190
 191static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
 192{
 193        s64 val;
 194
 195        smp_mb();
 196
 197        __asm__ __volatile__(
 198        "1:     llockd  %0, [%1]        \n"
 199        "       sub.f   %L0, %L0, 1     # w0 - 1, set C on borrow\n"
 200        "       sub.c   %H0, %H0, 1     # if C set, w1 - 1\n"
 201        "       brlt    %H0, 0, 2f      \n"
 202        "       scondd  %0, [%1]        \n"
 203        "       bnz     1b              \n"
 204        "2:                             \n"
 205        : "=&r"(val)
 206        : "r"(&v->counter)
 207        : "cc");        /* memory clobber comes from smp_mb() */
 208
 209        smp_mb();
 210
 211        return val;
 212}
 213#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
 214
 215/**
 216 * arch_atomic64_fetch_add_unless - add unless the number is a given value
 217 * @v: pointer of type atomic64_t
 218 * @a: the amount to add to v...
 219 * @u: ...unless v is equal to u.
 220 *
 221 * Atomically adds @a to @v, if it was not @u.
 222 * Returns the old value of @v
 223 */
 224static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
 225{
 226        s64 old, temp;
 227
 228        smp_mb();
 229
 230        __asm__ __volatile__(
 231        "1:     llockd  %0, [%2]        \n"
 232        "       brne    %L0, %L4, 2f    # continue to add since v != u \n"
 233        "       breq.d  %H0, %H4, 3f    # return since v == u \n"
 234        "2:                             \n"
 235        "       add.f   %L1, %L0, %L3   \n"
 236        "       adc     %H1, %H0, %H3   \n"
 237        "       scondd  %1, [%2]        \n"
 238        "       bnz     1b              \n"
 239        "3:                             \n"
 240        : "=&r"(old), "=&r" (temp)
 241        : "r"(&v->counter), "r"(a), "r"(u)
 242        : "cc");        /* memory clobber comes from smp_mb() */
 243
 244        smp_mb();
 245
 246        return old;
 247}
 248#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
 249
 250#endif
 251