linux/arch/arm64/include/asm/atomic_ll_sc.h
<<
>>
Prefs
   1/*
   2 * Based on arch/arm/include/asm/atomic.h
   3 *
   4 * Copyright (C) 1996 Russell King.
   5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
   6 * Copyright (C) 2012 ARM Ltd.
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#ifndef __ASM_ATOMIC_LL_SC_H
  22#define __ASM_ATOMIC_LL_SC_H
  23
  24#ifndef __ARM64_IN_ATOMIC_IMPL
  25#error "please don't include this file directly"
  26#endif
  27
  28/*
  29 * AArch64 UP and SMP safe atomic ops.  We use load exclusive and
  30 * store exclusive to ensure that these are atomic.  We may loop
  31 * to ensure that the update happens.
  32 *
  33 * NOTE: these functions do *not* follow the PCS and must explicitly
  34 * save any clobbered registers other than x0 (regardless of return
  35 * value).  This is achieved through -fcall-saved-* compiler flags for
  36 * this file, which unfortunately don't work on a per-function basis
  37 * (the optimize attribute silently ignores these options).
  38 */
  39
  40#define ATOMIC_OP(op, asm_op)                                           \
  41__LL_SC_INLINE void                                                     \
  42__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v))                         \
  43{                                                                       \
  44        unsigned long tmp;                                              \
  45        int result;                                                     \
  46                                                                        \
  47        asm volatile("// atomic_" #op "\n"                              \
  48"       prfm    pstl1strm, %2\n"                                        \
  49"1:     ldxr    %w0, %2\n"                                              \
  50"       " #asm_op "     %w0, %w0, %w3\n"                                \
  51"       stxr    %w1, %w0, %2\n"                                         \
  52"       cbnz    %w1, 1b"                                                \
  53        : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
  54        : "Ir" (i));                                                    \
  55}                                                                       \
  56__LL_SC_EXPORT(atomic_##op);
  57
  58#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op)            \
  59__LL_SC_INLINE int                                                      \
  60__LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v))          \
  61{                                                                       \
  62        unsigned long tmp;                                              \
  63        int result;                                                     \
  64                                                                        \
  65        asm volatile("// atomic_" #op "_return" #name "\n"              \
  66"       prfm    pstl1strm, %2\n"                                        \
  67"1:     ld" #acq "xr    %w0, %2\n"                                      \
  68"       " #asm_op "     %w0, %w0, %w3\n"                                \
  69"       st" #rel "xr    %w1, %w0, %2\n"                                 \
  70"       cbnz    %w1, 1b\n"                                              \
  71"       " #mb                                                           \
  72        : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
  73        : "Ir" (i)                                                      \
  74        : cl);                                                          \
  75                                                                        \
  76        return result;                                                  \
  77}                                                                       \
  78__LL_SC_EXPORT(atomic_##op##_return##name);
  79
  80#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op)             \
  81__LL_SC_INLINE int                                                      \
  82__LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v))             \
  83{                                                                       \
  84        unsigned long tmp;                                              \
  85        int val, result;                                                \
  86                                                                        \
  87        asm volatile("// atomic_fetch_" #op #name "\n"                  \
  88"       prfm    pstl1strm, %3\n"                                        \
  89"1:     ld" #acq "xr    %w0, %3\n"                                      \
  90"       " #asm_op "     %w1, %w0, %w4\n"                                \
  91"       st" #rel "xr    %w2, %w1, %3\n"                                 \
  92"       cbnz    %w2, 1b\n"                                              \
  93"       " #mb                                                           \
  94        : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter)   \
  95        : "Ir" (i)                                                      \
  96        : cl);                                                          \
  97                                                                        \
  98        return result;                                                  \
  99}                                                                       \
 100__LL_SC_EXPORT(atomic_fetch_##op##name);
 101
 102#define ATOMIC_OPS(...)                                                 \
 103        ATOMIC_OP(__VA_ARGS__)                                          \
 104        ATOMIC_OP_RETURN(        , dmb ish,  , l, "memory", __VA_ARGS__)\
 105        ATOMIC_OP_RETURN(_relaxed,        ,  ,  ,         , __VA_ARGS__)\
 106        ATOMIC_OP_RETURN(_acquire,        , a,  , "memory", __VA_ARGS__)\
 107        ATOMIC_OP_RETURN(_release,        ,  , l, "memory", __VA_ARGS__)\
 108        ATOMIC_FETCH_OP (        , dmb ish,  , l, "memory", __VA_ARGS__)\
 109        ATOMIC_FETCH_OP (_relaxed,        ,  ,  ,         , __VA_ARGS__)\
 110        ATOMIC_FETCH_OP (_acquire,        , a,  , "memory", __VA_ARGS__)\
 111        ATOMIC_FETCH_OP (_release,        ,  , l, "memory", __VA_ARGS__)
 112
 113ATOMIC_OPS(add, add)
 114ATOMIC_OPS(sub, sub)
 115
 116#undef ATOMIC_OPS
 117#define ATOMIC_OPS(...)                                                 \
 118        ATOMIC_OP(__VA_ARGS__)                                          \
 119        ATOMIC_FETCH_OP (        , dmb ish,  , l, "memory", __VA_ARGS__)\
 120        ATOMIC_FETCH_OP (_relaxed,        ,  ,  ,         , __VA_ARGS__)\
 121        ATOMIC_FETCH_OP (_acquire,        , a,  , "memory", __VA_ARGS__)\
 122        ATOMIC_FETCH_OP (_release,        ,  , l, "memory", __VA_ARGS__)
 123
 124ATOMIC_OPS(and, and)
 125ATOMIC_OPS(andnot, bic)
 126ATOMIC_OPS(or, orr)
 127ATOMIC_OPS(xor, eor)
 128
 129#undef ATOMIC_OPS
 130#undef ATOMIC_FETCH_OP
 131#undef ATOMIC_OP_RETURN
 132#undef ATOMIC_OP
 133
 134#define ATOMIC64_OP(op, asm_op)                                         \
 135__LL_SC_INLINE void                                                     \
 136__LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v))                    \
 137{                                                                       \
 138        long result;                                                    \
 139        unsigned long tmp;                                              \
 140                                                                        \
 141        asm volatile("// atomic64_" #op "\n"                            \
 142"       prfm    pstl1strm, %2\n"                                        \
 143"1:     ldxr    %0, %2\n"                                               \
 144"       " #asm_op "     %0, %0, %3\n"                                   \
 145"       stxr    %w1, %0, %2\n"                                          \
 146"       cbnz    %w1, 1b"                                                \
 147        : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
 148        : "Ir" (i));                                                    \
 149}                                                                       \
 150__LL_SC_EXPORT(atomic64_##op);
 151
 152#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op)          \
 153__LL_SC_INLINE long                                                     \
 154__LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v))     \
 155{                                                                       \
 156        long result;                                                    \
 157        unsigned long tmp;                                              \
 158                                                                        \
 159        asm volatile("// atomic64_" #op "_return" #name "\n"            \
 160"       prfm    pstl1strm, %2\n"                                        \
 161"1:     ld" #acq "xr    %0, %2\n"                                       \
 162"       " #asm_op "     %0, %0, %3\n"                                   \
 163"       st" #rel "xr    %w1, %0, %2\n"                                  \
 164"       cbnz    %w1, 1b\n"                                              \
 165"       " #mb                                                           \
 166        : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)                \
 167        : "Ir" (i)                                                      \
 168        : cl);                                                          \
 169                                                                        \
 170        return result;                                                  \
 171}                                                                       \
 172__LL_SC_EXPORT(atomic64_##op##_return##name);
 173
 174#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op)           \
 175__LL_SC_INLINE long                                                     \
 176__LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v))        \
 177{                                                                       \
 178        long result, val;                                               \
 179        unsigned long tmp;                                              \
 180                                                                        \
 181        asm volatile("// atomic64_fetch_" #op #name "\n"                \
 182"       prfm    pstl1strm, %3\n"                                        \
 183"1:     ld" #acq "xr    %0, %3\n"                                       \
 184"       " #asm_op "     %1, %0, %4\n"                                   \
 185"       st" #rel "xr    %w2, %1, %3\n"                                  \
 186"       cbnz    %w2, 1b\n"                                              \
 187"       " #mb                                                           \
 188        : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter)   \
 189        : "Ir" (i)                                                      \
 190        : cl);                                                          \
 191                                                                        \
 192        return result;                                                  \
 193}                                                                       \
 194__LL_SC_EXPORT(atomic64_fetch_##op##name);
 195
 196#define ATOMIC64_OPS(...)                                               \
 197        ATOMIC64_OP(__VA_ARGS__)                                        \
 198        ATOMIC64_OP_RETURN(, dmb ish,  , l, "memory", __VA_ARGS__)      \
 199        ATOMIC64_OP_RETURN(_relaxed,,  ,  ,         , __VA_ARGS__)      \
 200        ATOMIC64_OP_RETURN(_acquire,, a,  , "memory", __VA_ARGS__)      \
 201        ATOMIC64_OP_RETURN(_release,,  , l, "memory", __VA_ARGS__)      \
 202        ATOMIC64_FETCH_OP (, dmb ish,  , l, "memory", __VA_ARGS__)      \
 203        ATOMIC64_FETCH_OP (_relaxed,,  ,  ,         , __VA_ARGS__)      \
 204        ATOMIC64_FETCH_OP (_acquire,, a,  , "memory", __VA_ARGS__)      \
 205        ATOMIC64_FETCH_OP (_release,,  , l, "memory", __VA_ARGS__)
 206
 207ATOMIC64_OPS(add, add)
 208ATOMIC64_OPS(sub, sub)
 209
 210#undef ATOMIC64_OPS
 211#define ATOMIC64_OPS(...)                                               \
 212        ATOMIC64_OP(__VA_ARGS__)                                        \
 213        ATOMIC64_FETCH_OP (, dmb ish,  , l, "memory", __VA_ARGS__)      \
 214        ATOMIC64_FETCH_OP (_relaxed,,  ,  ,         , __VA_ARGS__)      \
 215        ATOMIC64_FETCH_OP (_acquire,, a,  , "memory", __VA_ARGS__)      \
 216        ATOMIC64_FETCH_OP (_release,,  , l, "memory", __VA_ARGS__)
 217
 218ATOMIC64_OPS(and, and)
 219ATOMIC64_OPS(andnot, bic)
 220ATOMIC64_OPS(or, orr)
 221ATOMIC64_OPS(xor, eor)
 222
 223#undef ATOMIC64_OPS
 224#undef ATOMIC64_FETCH_OP
 225#undef ATOMIC64_OP_RETURN
 226#undef ATOMIC64_OP
 227
 228__LL_SC_INLINE long
 229__LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
 230{
 231        long result;
 232        unsigned long tmp;
 233
 234        asm volatile("// atomic64_dec_if_positive\n"
 235"       prfm    pstl1strm, %2\n"
 236"1:     ldxr    %0, %2\n"
 237"       subs    %0, %0, #1\n"
 238"       b.lt    2f\n"
 239"       stlxr   %w1, %0, %2\n"
 240"       cbnz    %w1, 1b\n"
 241"       dmb     ish\n"
 242"2:"
 243        : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
 244        :
 245        : "cc", "memory");
 246
 247        return result;
 248}
 249__LL_SC_EXPORT(atomic64_dec_if_positive);
 250
 251#define __CMPXCHG_CASE(w, sz, name, mb, acq, rel, cl)                   \
 252__LL_SC_INLINE unsigned long                                            \
 253__LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr,                \
 254                                     unsigned long old,                 \
 255                                     unsigned long new))                \
 256{                                                                       \
 257        unsigned long tmp, oldval;                                      \
 258                                                                        \
 259        asm volatile(                                                   \
 260        "       prfm    pstl1strm, %[v]\n"                              \
 261        "1:     ld" #acq "xr" #sz "\t%" #w "[oldval], %[v]\n"           \
 262        "       eor     %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n"  \
 263        "       cbnz    %" #w "[tmp], 2f\n"                             \
 264        "       st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n"     \
 265        "       cbnz    %w[tmp], 1b\n"                                  \
 266        "       " #mb "\n"                                              \
 267        "2:"                                                            \
 268        : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval),                   \
 269          [v] "+Q" (*(unsigned long *)ptr)                              \
 270        : [old] "Lr" (old), [new] "r" (new)                             \
 271        : cl);                                                          \
 272                                                                        \
 273        return oldval;                                                  \
 274}                                                                       \
 275__LL_SC_EXPORT(__cmpxchg_case_##name);
 276
 277__CMPXCHG_CASE(w, b,     1,        ,  ,  ,         )
 278__CMPXCHG_CASE(w, h,     2,        ,  ,  ,         )
 279__CMPXCHG_CASE(w,  ,     4,        ,  ,  ,         )
 280__CMPXCHG_CASE( ,  ,     8,        ,  ,  ,         )
 281__CMPXCHG_CASE(w, b, acq_1,        , a,  , "memory")
 282__CMPXCHG_CASE(w, h, acq_2,        , a,  , "memory")
 283__CMPXCHG_CASE(w,  , acq_4,        , a,  , "memory")
 284__CMPXCHG_CASE( ,  , acq_8,        , a,  , "memory")
 285__CMPXCHG_CASE(w, b, rel_1,        ,  , l, "memory")
 286__CMPXCHG_CASE(w, h, rel_2,        ,  , l, "memory")
 287__CMPXCHG_CASE(w,  , rel_4,        ,  , l, "memory")
 288__CMPXCHG_CASE( ,  , rel_8,        ,  , l, "memory")
 289__CMPXCHG_CASE(w, b,  mb_1, dmb ish,  , l, "memory")
 290__CMPXCHG_CASE(w, h,  mb_2, dmb ish,  , l, "memory")
 291__CMPXCHG_CASE(w,  ,  mb_4, dmb ish,  , l, "memory")
 292__CMPXCHG_CASE( ,  ,  mb_8, dmb ish,  , l, "memory")
 293
 294#undef __CMPXCHG_CASE
 295
 296#define __CMPXCHG_DBL(name, mb, rel, cl)                                \
 297__LL_SC_INLINE long                                                     \
 298__LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1,               \
 299                                      unsigned long old2,               \
 300                                      unsigned long new1,               \
 301                                      unsigned long new2,               \
 302                                      volatile void *ptr))              \
 303{                                                                       \
 304        unsigned long tmp, ret;                                         \
 305                                                                        \
 306        asm volatile("// __cmpxchg_double" #name "\n"                   \
 307        "       prfm    pstl1strm, %2\n"                                \
 308        "1:     ldxp    %0, %1, %2\n"                                   \
 309        "       eor     %0, %0, %3\n"                                   \
 310        "       eor     %1, %1, %4\n"                                   \
 311        "       orr     %1, %0, %1\n"                                   \
 312        "       cbnz    %1, 2f\n"                                       \
 313        "       st" #rel "xp    %w0, %5, %6, %2\n"                      \
 314        "       cbnz    %w0, 1b\n"                                      \
 315        "       " #mb "\n"                                              \
 316        "2:"                                                            \
 317        : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr)        \
 318        : "r" (old1), "r" (old2), "r" (new1), "r" (new2)                \
 319        : cl);                                                          \
 320                                                                        \
 321        return ret;                                                     \
 322}                                                                       \
 323__LL_SC_EXPORT(__cmpxchg_double##name);
 324
 325__CMPXCHG_DBL(   ,        ,  ,         )
 326__CMPXCHG_DBL(_mb, dmb ish, l, "memory")
 327
 328#undef __CMPXCHG_DBL
 329
 330#endif  /* __ASM_ATOMIC_LL_SC_H */
 331