linux/arch/arm64/include/asm/cmpxchg.h
<<
>>
Prefs
   1/*
   2 * Based on arch/arm/include/asm/cmpxchg.h
   3 *
   4 * Copyright (C) 2012 ARM Ltd.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  17 */
  18#ifndef __ASM_CMPXCHG_H
  19#define __ASM_CMPXCHG_H
  20
  21#include <linux/build_bug.h>
  22#include <linux/compiler.h>
  23
  24#include <asm/atomic.h>
  25#include <asm/barrier.h>
  26#include <asm/lse.h>
  27
  28/*
  29 * We need separate acquire parameters for ll/sc and lse, since the full
  30 * barrier case is generated as release+dmb for the former and
  31 * acquire+release for the latter.
  32 */
  33#define __XCHG_CASE(w, sfx, name, sz, mb, nop_lse, acq, acq_lse, rel, cl)       \
  34static inline u##sz __xchg_case_##name##sz(u##sz x, volatile void *ptr)         \
  35{                                                                               \
  36        u##sz ret;                                                              \
  37        unsigned long tmp;                                                      \
  38                                                                                \
  39        asm volatile(ARM64_LSE_ATOMIC_INSN(                                     \
  40        /* LL/SC */                                                             \
  41        "       prfm    pstl1strm, %2\n"                                        \
  42        "1:     ld" #acq "xr" #sfx "\t%" #w "0, %2\n"                           \
  43        "       st" #rel "xr" #sfx "\t%w1, %" #w "3, %2\n"                      \
  44        "       cbnz    %w1, 1b\n"                                              \
  45        "       " #mb,                                                          \
  46        /* LSE atomics */                                                       \
  47        "       swp" #acq_lse #rel #sfx "\t%" #w "3, %" #w "0, %2\n"            \
  48                __nops(3)                                                       \
  49        "       " #nop_lse)                                                     \
  50        : "=&r" (ret), "=&r" (tmp), "+Q" (*(u##sz *)ptr)                        \
  51        : "r" (x)                                                               \
  52        : cl);                                                                  \
  53                                                                                \
  54        return ret;                                                             \
  55}
  56
  57__XCHG_CASE(w, b,     ,  8,        ,    ,  ,  ,  ,         )
  58__XCHG_CASE(w, h,     , 16,        ,    ,  ,  ,  ,         )
  59__XCHG_CASE(w,  ,     , 32,        ,    ,  ,  ,  ,         )
  60__XCHG_CASE( ,  ,     , 64,        ,    ,  ,  ,  ,         )
  61__XCHG_CASE(w, b, acq_,  8,        ,    , a, a,  , "memory")
  62__XCHG_CASE(w, h, acq_, 16,        ,    , a, a,  , "memory")
  63__XCHG_CASE(w,  , acq_, 32,        ,    , a, a,  , "memory")
  64__XCHG_CASE( ,  , acq_, 64,        ,    , a, a,  , "memory")
  65__XCHG_CASE(w, b, rel_,  8,        ,    ,  ,  , l, "memory")
  66__XCHG_CASE(w, h, rel_, 16,        ,    ,  ,  , l, "memory")
  67__XCHG_CASE(w,  , rel_, 32,        ,    ,  ,  , l, "memory")
  68__XCHG_CASE( ,  , rel_, 64,        ,    ,  ,  , l, "memory")
  69__XCHG_CASE(w, b,  mb_,  8, dmb ish, nop,  , a, l, "memory")
  70__XCHG_CASE(w, h,  mb_, 16, dmb ish, nop,  , a, l, "memory")
  71__XCHG_CASE(w,  ,  mb_, 32, dmb ish, nop,  , a, l, "memory")
  72__XCHG_CASE( ,  ,  mb_, 64, dmb ish, nop,  , a, l, "memory")
  73
  74#undef __XCHG_CASE
  75
  76#define __XCHG_GEN(sfx)                                                 \
  77static inline unsigned long __xchg##sfx(unsigned long x,                \
  78                                        volatile void *ptr,             \
  79                                        int size)                       \
  80{                                                                       \
  81        switch (size) {                                                 \
  82        case 1:                                                         \
  83                return __xchg_case##sfx##_8(x, ptr);                    \
  84        case 2:                                                         \
  85                return __xchg_case##sfx##_16(x, ptr);                   \
  86        case 4:                                                         \
  87                return __xchg_case##sfx##_32(x, ptr);                   \
  88        case 8:                                                         \
  89                return __xchg_case##sfx##_64(x, ptr);                   \
  90        default:                                                        \
  91                BUILD_BUG();                                            \
  92        }                                                               \
  93                                                                        \
  94        unreachable();                                                  \
  95}
  96
  97__XCHG_GEN()
  98__XCHG_GEN(_acq)
  99__XCHG_GEN(_rel)
 100__XCHG_GEN(_mb)
 101
 102#undef __XCHG_GEN
 103
 104#define __xchg_wrapper(sfx, ptr, x)                                     \
 105({                                                                      \
 106        __typeof__(*(ptr)) __ret;                                       \
 107        __ret = (__typeof__(*(ptr)))                                    \
 108                __xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \
 109        __ret;                                                          \
 110})
 111
 112/* xchg */
 113#define xchg_relaxed(...)       __xchg_wrapper(    , __VA_ARGS__)
 114#define xchg_acquire(...)       __xchg_wrapper(_acq, __VA_ARGS__)
 115#define xchg_release(...)       __xchg_wrapper(_rel, __VA_ARGS__)
 116#define xchg(...)               __xchg_wrapper( _mb, __VA_ARGS__)
 117
 118#define __CMPXCHG_GEN(sfx)                                              \
 119static inline unsigned long __cmpxchg##sfx(volatile void *ptr,          \
 120                                           unsigned long old,           \
 121                                           unsigned long new,           \
 122                                           int size)                    \
 123{                                                                       \
 124        switch (size) {                                                 \
 125        case 1:                                                         \
 126                return __cmpxchg_case##sfx##_8(ptr, old, new);          \
 127        case 2:                                                         \
 128                return __cmpxchg_case##sfx##_16(ptr, old, new);         \
 129        case 4:                                                         \
 130                return __cmpxchg_case##sfx##_32(ptr, old, new);         \
 131        case 8:                                                         \
 132                return __cmpxchg_case##sfx##_64(ptr, old, new);         \
 133        default:                                                        \
 134                BUILD_BUG();                                            \
 135        }                                                               \
 136                                                                        \
 137        unreachable();                                                  \
 138}
 139
 140__CMPXCHG_GEN()
 141__CMPXCHG_GEN(_acq)
 142__CMPXCHG_GEN(_rel)
 143__CMPXCHG_GEN(_mb)
 144
 145#undef __CMPXCHG_GEN
 146
 147#define __cmpxchg_wrapper(sfx, ptr, o, n)                               \
 148({                                                                      \
 149        __typeof__(*(ptr)) __ret;                                       \
 150        __ret = (__typeof__(*(ptr)))                                    \
 151                __cmpxchg##sfx((ptr), (unsigned long)(o),               \
 152                                (unsigned long)(n), sizeof(*(ptr)));    \
 153        __ret;                                                          \
 154})
 155
 156/* cmpxchg */
 157#define cmpxchg_relaxed(...)    __cmpxchg_wrapper(    , __VA_ARGS__)
 158#define cmpxchg_acquire(...)    __cmpxchg_wrapper(_acq, __VA_ARGS__)
 159#define cmpxchg_release(...)    __cmpxchg_wrapper(_rel, __VA_ARGS__)
 160#define cmpxchg(...)            __cmpxchg_wrapper( _mb, __VA_ARGS__)
 161#define cmpxchg_local           cmpxchg_relaxed
 162
 163/* cmpxchg64 */
 164#define cmpxchg64_relaxed       cmpxchg_relaxed
 165#define cmpxchg64_acquire       cmpxchg_acquire
 166#define cmpxchg64_release       cmpxchg_release
 167#define cmpxchg64               cmpxchg
 168#define cmpxchg64_local         cmpxchg_local
 169
 170/* cmpxchg_double */
 171#define system_has_cmpxchg_double()     1
 172
 173#define __cmpxchg_double_check(ptr1, ptr2)                                      \
 174({                                                                              \
 175        if (sizeof(*(ptr1)) != 8)                                               \
 176                BUILD_BUG();                                                    \
 177        VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1);      \
 178})
 179
 180#define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
 181({\
 182        int __ret;\
 183        __cmpxchg_double_check(ptr1, ptr2); \
 184        __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
 185                                     (unsigned long)(n1), (unsigned long)(n2), \
 186                                     ptr1); \
 187        __ret; \
 188})
 189
 190#define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
 191({\
 192        int __ret;\
 193        __cmpxchg_double_check(ptr1, ptr2); \
 194        __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
 195                                  (unsigned long)(n1), (unsigned long)(n2), \
 196                                  ptr1); \
 197        __ret; \
 198})
 199
 200#define __CMPWAIT_CASE(w, sfx, sz)                                      \
 201static inline void __cmpwait_case_##sz(volatile void *ptr,              \
 202                                       unsigned long val)               \
 203{                                                                       \
 204        unsigned long tmp;                                              \
 205                                                                        \
 206        asm volatile(                                                   \
 207        "       sevl\n"                                                 \
 208        "       wfe\n"                                                  \
 209        "       ldxr" #sfx "\t%" #w "[tmp], %[v]\n"                     \
 210        "       eor     %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n"     \
 211        "       cbnz    %" #w "[tmp], 1f\n"                             \
 212        "       wfe\n"                                                  \
 213        "1:"                                                            \
 214        : [tmp] "=&r" (tmp), [v] "+Q" (*(unsigned long *)ptr)           \
 215        : [val] "r" (val));                                             \
 216}
 217
 218__CMPWAIT_CASE(w, b, 8);
 219__CMPWAIT_CASE(w, h, 16);
 220__CMPWAIT_CASE(w,  , 32);
 221__CMPWAIT_CASE( ,  , 64);
 222
 223#undef __CMPWAIT_CASE
 224
 225#define __CMPWAIT_GEN(sfx)                                              \
 226static inline void __cmpwait##sfx(volatile void *ptr,                   \
 227                                  unsigned long val,                    \
 228                                  int size)                             \
 229{                                                                       \
 230        switch (size) {                                                 \
 231        case 1:                                                         \
 232                return __cmpwait_case##sfx##_8(ptr, (u8)val);           \
 233        case 2:                                                         \
 234                return __cmpwait_case##sfx##_16(ptr, (u16)val);         \
 235        case 4:                                                         \
 236                return __cmpwait_case##sfx##_32(ptr, val);              \
 237        case 8:                                                         \
 238                return __cmpwait_case##sfx##_64(ptr, val);              \
 239        default:                                                        \
 240                BUILD_BUG();                                            \
 241        }                                                               \
 242                                                                        \
 243        unreachable();                                                  \
 244}
 245
 246__CMPWAIT_GEN()
 247
 248#undef __CMPWAIT_GEN
 249
 250#define __cmpwait_relaxed(ptr, val) \
 251        __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr)))
 252
 253#endif  /* __ASM_CMPXCHG_H */
 254