1
2#ifndef _ASM_X86_ATOMIC_H
3#define _ASM_X86_ATOMIC_H
4
5#include <linux/compiler.h>
6#include <linux/types.h>
7#include <asm/alternative.h>
8#include <asm/cmpxchg.h>
9#include <asm/rmwcc.h>
10#include <asm/barrier.h>
11
12
13
14
15
16
17
18
19
20
21
22
23static __always_inline int arch_atomic_read(const atomic_t *v)
24{
25
26
27
28
29 return __READ_ONCE((v)->counter);
30}
31
32
33
34
35
36
37
38
39static __always_inline void arch_atomic_set(atomic_t *v, int i)
40{
41 __WRITE_ONCE(v->counter, i);
42}
43
44
45
46
47
48
49
50
51static __always_inline void arch_atomic_add(int i, atomic_t *v)
52{
53 asm volatile(LOCK_PREFIX "addl %1,%0"
54 : "+m" (v->counter)
55 : "ir" (i) : "memory");
56}
57
58
59
60
61
62
63
64
65static __always_inline void arch_atomic_sub(int i, atomic_t *v)
66{
67 asm volatile(LOCK_PREFIX "subl %1,%0"
68 : "+m" (v->counter)
69 : "ir" (i) : "memory");
70}
71
72
73
74
75
76
77
78
79
80
81static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
82{
83 return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i);
84}
85#define arch_atomic_sub_and_test arch_atomic_sub_and_test
86
87
88
89
90
91
92
93static __always_inline void arch_atomic_inc(atomic_t *v)
94{
95 asm volatile(LOCK_PREFIX "incl %0"
96 : "+m" (v->counter) :: "memory");
97}
98#define arch_atomic_inc arch_atomic_inc
99
100
101
102
103
104
105
106static __always_inline void arch_atomic_dec(atomic_t *v)
107{
108 asm volatile(LOCK_PREFIX "decl %0"
109 : "+m" (v->counter) :: "memory");
110}
111#define arch_atomic_dec arch_atomic_dec
112
113
114
115
116
117
118
119
120
121static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
122{
123 return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e);
124}
125#define arch_atomic_dec_and_test arch_atomic_dec_and_test
126
127
128
129
130
131
132
133
134
135static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
136{
137 return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e);
138}
139#define arch_atomic_inc_and_test arch_atomic_inc_and_test
140
141
142
143
144
145
146
147
148
149
150static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
151{
152 return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i);
153}
154#define arch_atomic_add_negative arch_atomic_add_negative
155
156
157
158
159
160
161
162
163static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
164{
165 return i + xadd(&v->counter, i);
166}
167#define arch_atomic_add_return arch_atomic_add_return
168
169
170
171
172
173
174
175
176static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
177{
178 return arch_atomic_add_return(-i, v);
179}
180#define arch_atomic_sub_return arch_atomic_sub_return
181
182static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
183{
184 return xadd(&v->counter, i);
185}
186#define arch_atomic_fetch_add arch_atomic_fetch_add
187
188static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
189{
190 return xadd(&v->counter, -i);
191}
192#define arch_atomic_fetch_sub arch_atomic_fetch_sub
193
194static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
195{
196 return arch_cmpxchg(&v->counter, old, new);
197}
198#define arch_atomic_cmpxchg arch_atomic_cmpxchg
199
200static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
201{
202 return arch_try_cmpxchg(&v->counter, old, new);
203}
204#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
205
206static __always_inline int arch_atomic_xchg(atomic_t *v, int new)
207{
208 return arch_xchg(&v->counter, new);
209}
210#define arch_atomic_xchg arch_atomic_xchg
211
212static __always_inline void arch_atomic_and(int i, atomic_t *v)
213{
214 asm volatile(LOCK_PREFIX "andl %1,%0"
215 : "+m" (v->counter)
216 : "ir" (i)
217 : "memory");
218}
219
220static __always_inline int arch_atomic_fetch_and(int i, atomic_t *v)
221{
222 int val = arch_atomic_read(v);
223
224 do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i));
225
226 return val;
227}
228#define arch_atomic_fetch_and arch_atomic_fetch_and
229
230static __always_inline void arch_atomic_or(int i, atomic_t *v)
231{
232 asm volatile(LOCK_PREFIX "orl %1,%0"
233 : "+m" (v->counter)
234 : "ir" (i)
235 : "memory");
236}
237
238static __always_inline int arch_atomic_fetch_or(int i, atomic_t *v)
239{
240 int val = arch_atomic_read(v);
241
242 do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i));
243
244 return val;
245}
246#define arch_atomic_fetch_or arch_atomic_fetch_or
247
248static __always_inline void arch_atomic_xor(int i, atomic_t *v)
249{
250 asm volatile(LOCK_PREFIX "xorl %1,%0"
251 : "+m" (v->counter)
252 : "ir" (i)
253 : "memory");
254}
255
256static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
257{
258 int val = arch_atomic_read(v);
259
260 do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i));
261
262 return val;
263}
264#define arch_atomic_fetch_xor arch_atomic_fetch_xor
265
266#ifdef CONFIG_X86_32
267# include <asm/atomic64_32.h>
268#else
269# include <asm/atomic64_64.h>
270#endif
271
272#endif
273