linux/arch/arm64/include/asm/cmpxchg.h
<<
>>
Prefs
   1/*
   2 * Based on arch/arm/include/asm/cmpxchg.h
   3 *
   4 * Copyright (C) 2012 ARM Ltd.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  17 */
  18#ifndef __ASM_CMPXCHG_H
  19#define __ASM_CMPXCHG_H
  20
  21#include <linux/bug.h>
  22
  23#include <asm/barrier.h>
  24
  25static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
  26{
  27        unsigned long ret, tmp;
  28
  29        switch (size) {
  30        case 1:
  31                asm volatile("//        __xchg1\n"
  32                "1:     ldxrb   %w0, %2\n"
  33                "       stlxrb  %w1, %w3, %2\n"
  34                "       cbnz    %w1, 1b\n"
  35                        : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
  36                        : "r" (x)
  37                        : "memory");
  38                break;
  39        case 2:
  40                asm volatile("//        __xchg2\n"
  41                "1:     ldxrh   %w0, %2\n"
  42                "       stlxrh  %w1, %w3, %2\n"
  43                "       cbnz    %w1, 1b\n"
  44                        : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
  45                        : "r" (x)
  46                        : "memory");
  47                break;
  48        case 4:
  49                asm volatile("//        __xchg4\n"
  50                "1:     ldxr    %w0, %2\n"
  51                "       stlxr   %w1, %w3, %2\n"
  52                "       cbnz    %w1, 1b\n"
  53                        : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
  54                        : "r" (x)
  55                        : "memory");
  56                break;
  57        case 8:
  58                asm volatile("//        __xchg8\n"
  59                "1:     ldxr    %0, %2\n"
  60                "       stlxr   %w1, %3, %2\n"
  61                "       cbnz    %w1, 1b\n"
  62                        : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
  63                        : "r" (x)
  64                        : "memory");
  65                break;
  66        default:
  67                BUILD_BUG();
  68        }
  69
  70        smp_mb();
  71        return ret;
  72}
  73
  74#define xchg(ptr,x) \
  75({ \
  76        __typeof__(*(ptr)) __ret; \
  77        __ret = (__typeof__(*(ptr))) \
  78                __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
  79        __ret; \
  80})
  81
  82static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
  83                                      unsigned long new, int size)
  84{
  85        unsigned long oldval = 0, res;
  86
  87        switch (size) {
  88        case 1:
  89                do {
  90                        asm volatile("// __cmpxchg1\n"
  91                        "       ldxrb   %w1, %2\n"
  92                        "       mov     %w0, #0\n"
  93                        "       cmp     %w1, %w3\n"
  94                        "       b.ne    1f\n"
  95                        "       stxrb   %w0, %w4, %2\n"
  96                        "1:\n"
  97                                : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr)
  98                                : "Ir" (old), "r" (new)
  99                                : "cc");
 100                } while (res);
 101                break;
 102
 103        case 2:
 104                do {
 105                        asm volatile("// __cmpxchg2\n"
 106                        "       ldxrh   %w1, %2\n"
 107                        "       mov     %w0, #0\n"
 108                        "       cmp     %w1, %w3\n"
 109                        "       b.ne    1f\n"
 110                        "       stxrh   %w0, %w4, %2\n"
 111                        "1:\n"
 112                                : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr)
 113                                : "Ir" (old), "r" (new)
 114                                : "cc");
 115                } while (res);
 116                break;
 117
 118        case 4:
 119                do {
 120                        asm volatile("// __cmpxchg4\n"
 121                        "       ldxr    %w1, %2\n"
 122                        "       mov     %w0, #0\n"
 123                        "       cmp     %w1, %w3\n"
 124                        "       b.ne    1f\n"
 125                        "       stxr    %w0, %w4, %2\n"
 126                        "1:\n"
 127                                : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr)
 128                                : "Ir" (old), "r" (new)
 129                                : "cc");
 130                } while (res);
 131                break;
 132
 133        case 8:
 134                do {
 135                        asm volatile("// __cmpxchg8\n"
 136                        "       ldxr    %1, %2\n"
 137                        "       mov     %w0, #0\n"
 138                        "       cmp     %1, %3\n"
 139                        "       b.ne    1f\n"
 140                        "       stxr    %w0, %4, %2\n"
 141                        "1:\n"
 142                                : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr)
 143                                : "Ir" (old), "r" (new)
 144                                : "cc");
 145                } while (res);
 146                break;
 147
 148        default:
 149                BUILD_BUG();
 150        }
 151
 152        return oldval;
 153}
 154
 155static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
 156                                         unsigned long new, int size)
 157{
 158        unsigned long ret;
 159
 160        smp_mb();
 161        ret = __cmpxchg(ptr, old, new, size);
 162        smp_mb();
 163
 164        return ret;
 165}
 166
 167#define cmpxchg(ptr, o, n) \
 168({ \
 169        __typeof__(*(ptr)) __ret; \
 170        __ret = (__typeof__(*(ptr))) \
 171                __cmpxchg_mb((ptr), (unsigned long)(o), (unsigned long)(n), \
 172                             sizeof(*(ptr))); \
 173        __ret; \
 174})
 175
 176#define cmpxchg_local(ptr, o, n) \
 177({ \
 178        __typeof__(*(ptr)) __ret; \
 179        __ret = (__typeof__(*(ptr))) \
 180                __cmpxchg((ptr), (unsigned long)(o), \
 181                          (unsigned long)(n), sizeof(*(ptr))); \
 182        __ret; \
 183})
 184
 185#define cmpxchg64(ptr,o,n)              cmpxchg((ptr),(o),(n))
 186#define cmpxchg64_local(ptr,o,n)        cmpxchg_local((ptr),(o),(n))
 187
 188#define cmpxchg64_relaxed(ptr,o,n)      cmpxchg_local((ptr),(o),(n))
 189
 190#endif  /* __ASM_CMPXCHG_H */
 191