linux/arch/x86/include/asm/atomic.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_X86_ATOMIC_H
   3#define _ASM_X86_ATOMIC_H
   4
   5#include <linux/compiler.h>
   6#include <linux/types.h>
   7#include <asm/alternative.h>
   8#include <asm/cmpxchg.h>
   9#include <asm/rmwcc.h>
  10#include <asm/barrier.h>
  11
  12/*
  13 * Atomic operations that C can't guarantee us.  Useful for
  14 * resource counting etc..
  15 */
  16
  17/**
  18 * arch_atomic_read - read atomic variable
  19 * @v: pointer of type atomic_t
  20 *
  21 * Atomically reads the value of @v.
  22 */
  23static __always_inline int arch_atomic_read(const atomic_t *v)
  24{
  25        /*
  26         * Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
  27         * it's non-inlined function that increases binary size and stack usage.
  28         */
  29        return __READ_ONCE((v)->counter);
  30}
  31
  32/**
  33 * arch_atomic_set - set atomic variable
  34 * @v: pointer of type atomic_t
  35 * @i: required value
  36 *
  37 * Atomically sets the value of @v to @i.
  38 */
  39static __always_inline void arch_atomic_set(atomic_t *v, int i)
  40{
  41        __WRITE_ONCE(v->counter, i);
  42}
  43
  44/**
  45 * arch_atomic_add - add integer to atomic variable
  46 * @i: integer value to add
  47 * @v: pointer of type atomic_t
  48 *
  49 * Atomically adds @i to @v.
  50 */
  51static __always_inline void arch_atomic_add(int i, atomic_t *v)
  52{
  53        asm volatile(LOCK_PREFIX "addl %1,%0"
  54                     : "+m" (v->counter)
  55                     : "ir" (i) : "memory");
  56}
  57
  58/**
  59 * arch_atomic_sub - subtract integer from atomic variable
  60 * @i: integer value to subtract
  61 * @v: pointer of type atomic_t
  62 *
  63 * Atomically subtracts @i from @v.
  64 */
  65static __always_inline void arch_atomic_sub(int i, atomic_t *v)
  66{
  67        asm volatile(LOCK_PREFIX "subl %1,%0"
  68                     : "+m" (v->counter)
  69                     : "ir" (i) : "memory");
  70}
  71
  72/**
  73 * arch_atomic_sub_and_test - subtract value from variable and test result
  74 * @i: integer value to subtract
  75 * @v: pointer of type atomic_t
  76 *
  77 * Atomically subtracts @i from @v and returns
  78 * true if the result is zero, or false for all
  79 * other cases.
  80 */
  81static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
  82{
  83        return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i);
  84}
  85#define arch_atomic_sub_and_test arch_atomic_sub_and_test
  86
  87/**
  88 * arch_atomic_inc - increment atomic variable
  89 * @v: pointer of type atomic_t
  90 *
  91 * Atomically increments @v by 1.
  92 */
  93static __always_inline void arch_atomic_inc(atomic_t *v)
  94{
  95        asm volatile(LOCK_PREFIX "incl %0"
  96                     : "+m" (v->counter) :: "memory");
  97}
  98#define arch_atomic_inc arch_atomic_inc
  99
 100/**
 101 * arch_atomic_dec - decrement atomic variable
 102 * @v: pointer of type atomic_t
 103 *
 104 * Atomically decrements @v by 1.
 105 */
 106static __always_inline void arch_atomic_dec(atomic_t *v)
 107{
 108        asm volatile(LOCK_PREFIX "decl %0"
 109                     : "+m" (v->counter) :: "memory");
 110}
 111#define arch_atomic_dec arch_atomic_dec
 112
 113/**
 114 * arch_atomic_dec_and_test - decrement and test
 115 * @v: pointer of type atomic_t
 116 *
 117 * Atomically decrements @v by 1 and
 118 * returns true if the result is 0, or false for all other
 119 * cases.
 120 */
 121static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
 122{
 123        return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e);
 124}
 125#define arch_atomic_dec_and_test arch_atomic_dec_and_test
 126
 127/**
 128 * arch_atomic_inc_and_test - increment and test
 129 * @v: pointer of type atomic_t
 130 *
 131 * Atomically increments @v by 1
 132 * and returns true if the result is zero, or false for all
 133 * other cases.
 134 */
 135static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
 136{
 137        return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e);
 138}
 139#define arch_atomic_inc_and_test arch_atomic_inc_and_test
 140
 141/**
 142 * arch_atomic_add_negative - add and test if negative
 143 * @i: integer value to add
 144 * @v: pointer of type atomic_t
 145 *
 146 * Atomically adds @i to @v and returns true
 147 * if the result is negative, or false when
 148 * result is greater than or equal to zero.
 149 */
 150static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
 151{
 152        return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i);
 153}
 154#define arch_atomic_add_negative arch_atomic_add_negative
 155
 156/**
 157 * arch_atomic_add_return - add integer and return
 158 * @i: integer value to add
 159 * @v: pointer of type atomic_t
 160 *
 161 * Atomically adds @i to @v and returns @i + @v
 162 */
 163static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
 164{
 165        return i + xadd(&v->counter, i);
 166}
 167#define arch_atomic_add_return arch_atomic_add_return
 168
 169/**
 170 * arch_atomic_sub_return - subtract integer and return
 171 * @v: pointer of type atomic_t
 172 * @i: integer value to subtract
 173 *
 174 * Atomically subtracts @i from @v and returns @v - @i
 175 */
 176static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
 177{
 178        return arch_atomic_add_return(-i, v);
 179}
 180#define arch_atomic_sub_return arch_atomic_sub_return
 181
 182static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
 183{
 184        return xadd(&v->counter, i);
 185}
 186#define arch_atomic_fetch_add arch_atomic_fetch_add
 187
 188static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
 189{
 190        return xadd(&v->counter, -i);
 191}
 192#define arch_atomic_fetch_sub arch_atomic_fetch_sub
 193
 194static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
 195{
 196        return arch_cmpxchg(&v->counter, old, new);
 197}
 198#define arch_atomic_cmpxchg arch_atomic_cmpxchg
 199
 200static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
 201{
 202        return arch_try_cmpxchg(&v->counter, old, new);
 203}
 204#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
 205
 206static __always_inline int arch_atomic_xchg(atomic_t *v, int new)
 207{
 208        return arch_xchg(&v->counter, new);
 209}
 210#define arch_atomic_xchg arch_atomic_xchg
 211
 212static __always_inline void arch_atomic_and(int i, atomic_t *v)
 213{
 214        asm volatile(LOCK_PREFIX "andl %1,%0"
 215                        : "+m" (v->counter)
 216                        : "ir" (i)
 217                        : "memory");
 218}
 219
 220static __always_inline int arch_atomic_fetch_and(int i, atomic_t *v)
 221{
 222        int val = arch_atomic_read(v);
 223
 224        do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i));
 225
 226        return val;
 227}
 228#define arch_atomic_fetch_and arch_atomic_fetch_and
 229
 230static __always_inline void arch_atomic_or(int i, atomic_t *v)
 231{
 232        asm volatile(LOCK_PREFIX "orl %1,%0"
 233                        : "+m" (v->counter)
 234                        : "ir" (i)
 235                        : "memory");
 236}
 237
 238static __always_inline int arch_atomic_fetch_or(int i, atomic_t *v)
 239{
 240        int val = arch_atomic_read(v);
 241
 242        do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i));
 243
 244        return val;
 245}
 246#define arch_atomic_fetch_or arch_atomic_fetch_or
 247
 248static __always_inline void arch_atomic_xor(int i, atomic_t *v)
 249{
 250        asm volatile(LOCK_PREFIX "xorl %1,%0"
 251                        : "+m" (v->counter)
 252                        : "ir" (i)
 253                        : "memory");
 254}
 255
 256static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
 257{
 258        int val = arch_atomic_read(v);
 259
 260        do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i));
 261
 262        return val;
 263}
 264#define arch_atomic_fetch_xor arch_atomic_fetch_xor
 265
 266#ifdef CONFIG_X86_32
 267# include <asm/atomic64_32.h>
 268#else
 269# include <asm/atomic64_64.h>
 270#endif
 271
 272#endif /* _ASM_X86_ATOMIC_H */
 273