linux/arch/x86/include/asm/cmpxchg_32.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_X86_CMPXCHG_32_H
   3#define _ASM_X86_CMPXCHG_32_H
   4
   5/*
   6 * Note: if you use set64_bit(), __cmpxchg64(), or their variants,
   7 *       you need to test for the feature in boot_cpu_data.
   8 */
   9
  10/*
  11 * CMPXCHG8B only writes to the target if we had the previous
  12 * value in registers, otherwise it acts as a read and gives us the
  13 * "new previous" value.  That is why there is a loop.  Preloading
  14 * EDX:EAX is a performance optimization: in the common case it means
  15 * we need only one locked operation.
  16 *
  17 * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very
  18 * least an FPU save and/or %cr0.ts manipulation.
  19 *
  20 * cmpxchg8b must be used with the lock prefix here to allow the
  21 * instruction to be executed atomically.  We need to have the reader
  22 * side to see the coherent 64bit value.
  23 */
  24static inline void set_64bit(volatile u64 *ptr, u64 value)
  25{
  26        u32 low  = value;
  27        u32 high = value >> 32;
  28        u64 prev = *ptr;
  29
  30        asm volatile("\n1:\t"
  31                     LOCK_PREFIX "cmpxchg8b %0\n\t"
  32                     "jnz 1b"
  33                     : "=m" (*ptr), "+A" (prev)
  34                     : "b" (low), "c" (high)
  35                     : "memory");
  36}
  37
  38#ifdef CONFIG_X86_CMPXCHG64
  39#define arch_cmpxchg64(ptr, o, n)                                       \
  40        ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
  41                                         (unsigned long long)(n)))
  42#define arch_cmpxchg64_local(ptr, o, n)                                 \
  43        ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
  44                                               (unsigned long long)(n)))
  45#endif
  46
  47static inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
  48{
  49        u64 prev;
  50        asm volatile(LOCK_PREFIX "cmpxchg8b %1"
  51                     : "=A" (prev),
  52                       "+m" (*ptr)
  53                     : "b" ((u32)new),
  54                       "c" ((u32)(new >> 32)),
  55                       "0" (old)
  56                     : "memory");
  57        return prev;
  58}
  59
  60static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
  61{
  62        u64 prev;
  63        asm volatile("cmpxchg8b %1"
  64                     : "=A" (prev),
  65                       "+m" (*ptr)
  66                     : "b" ((u32)new),
  67                       "c" ((u32)(new >> 32)),
  68                       "0" (old)
  69                     : "memory");
  70        return prev;
  71}
  72
  73#ifndef CONFIG_X86_CMPXCHG64
  74/*
  75 * Building a kernel capable running on 80386 and 80486. It may be necessary
  76 * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
  77 */
  78
  79#define arch_cmpxchg64(ptr, o, n)                               \
  80({                                                              \
  81        __typeof__(*(ptr)) __ret;                               \
  82        __typeof__(*(ptr)) __old = (o);                         \
  83        __typeof__(*(ptr)) __new = (n);                         \
  84        alternative_io(LOCK_PREFIX_HERE                         \
  85                        "call cmpxchg8b_emu",                   \
  86                        "lock; cmpxchg8b (%%esi)" ,             \
  87                       X86_FEATURE_CX8,                         \
  88                       "=A" (__ret),                            \
  89                       "S" ((ptr)), "0" (__old),                \
  90                       "b" ((unsigned int)__new),               \
  91                       "c" ((unsigned int)(__new>>32))          \
  92                       : "memory");                             \
  93        __ret; })
  94
  95
  96#define arch_cmpxchg64_local(ptr, o, n)                         \
  97({                                                              \
  98        __typeof__(*(ptr)) __ret;                               \
  99        __typeof__(*(ptr)) __old = (o);                         \
 100        __typeof__(*(ptr)) __new = (n);                         \
 101        alternative_io("call cmpxchg8b_emu",                    \
 102                       "cmpxchg8b (%%esi)" ,                    \
 103                       X86_FEATURE_CX8,                         \
 104                       "=A" (__ret),                            \
 105                       "S" ((ptr)), "0" (__old),                \
 106                       "b" ((unsigned int)__new),               \
 107                       "c" ((unsigned int)(__new>>32))          \
 108                       : "memory");                             \
 109        __ret; })
 110
 111#endif
 112
 113#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX8)
 114
 115#endif /* _ASM_X86_CMPXCHG_32_H */
 116