1
2#ifndef _ASM_X86_ATOMIC_H
3#define _ASM_X86_ATOMIC_H
4
5#include <linux/compiler.h>
6#include <linux/types.h>
7#include <asm/alternative.h>
8#include <asm/cmpxchg.h>
9#include <asm/rmwcc.h>
10#include <asm/barrier.h>
11
12
13
14
15
16
17#define ATOMIC_INIT(i) { (i) }
18
19
20
21
22
23
24
25static __always_inline int arch_atomic_read(const atomic_t *v)
26{
27
28
29
30
31 return READ_ONCE((v)->counter);
32}
33
34
35
36
37
38
39
40
41static __always_inline void arch_atomic_set(atomic_t *v, int i)
42{
43 WRITE_ONCE(v->counter, i);
44}
45
46
47
48
49
50
51
52
53static __always_inline void arch_atomic_add(int i, atomic_t *v)
54{
55 asm volatile(LOCK_PREFIX "addl %1,%0"
56 : "+m" (v->counter)
57 : "ir" (i) : "memory");
58}
59
60
61
62
63
64
65
66
67static __always_inline void arch_atomic_sub(int i, atomic_t *v)
68{
69 asm volatile(LOCK_PREFIX "subl %1,%0"
70 : "+m" (v->counter)
71 : "ir" (i) : "memory");
72}
73
74
75
76
77
78
79
80
81
82
83static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
84{
85 return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i);
86}
87#define arch_atomic_sub_and_test arch_atomic_sub_and_test
88
89
90
91
92
93
94
95static __always_inline void arch_atomic_inc(atomic_t *v)
96{
97 asm volatile(LOCK_PREFIX "incl %0"
98 : "+m" (v->counter) :: "memory");
99}
100#define arch_atomic_inc arch_atomic_inc
101
102
103
104
105
106
107
108static __always_inline void arch_atomic_dec(atomic_t *v)
109{
110 asm volatile(LOCK_PREFIX "decl %0"
111 : "+m" (v->counter) :: "memory");
112}
113#define arch_atomic_dec arch_atomic_dec
114
115
116
117
118
119
120
121
122
123static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
124{
125 return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e);
126}
127#define arch_atomic_dec_and_test arch_atomic_dec_and_test
128
129
130
131
132
133
134
135
136
137static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
138{
139 return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e);
140}
141#define arch_atomic_inc_and_test arch_atomic_inc_and_test
142
143
144
145
146
147
148
149
150
151
152static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
153{
154 return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i);
155}
156#define arch_atomic_add_negative arch_atomic_add_negative
157
158
159
160
161
162
163
164
165static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
166{
167 return i + xadd(&v->counter, i);
168}
169
170
171
172
173
174
175
176
177static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
178{
179 return arch_atomic_add_return(-i, v);
180}
181
182static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
183{
184 return xadd(&v->counter, i);
185}
186
187static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
188{
189 return xadd(&v->counter, -i);
190}
191
192static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
193{
194 return arch_cmpxchg(&v->counter, old, new);
195}
196
197#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
198static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
199{
200 return try_cmpxchg(&v->counter, old, new);
201}
202
203static inline int arch_atomic_xchg(atomic_t *v, int new)
204{
205 return arch_xchg(&v->counter, new);
206}
207
208static inline void arch_atomic_and(int i, atomic_t *v)
209{
210 asm volatile(LOCK_PREFIX "andl %1,%0"
211 : "+m" (v->counter)
212 : "ir" (i)
213 : "memory");
214}
215
216static inline int arch_atomic_fetch_and(int i, atomic_t *v)
217{
218 int val = arch_atomic_read(v);
219
220 do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i));
221
222 return val;
223}
224
225static inline void arch_atomic_or(int i, atomic_t *v)
226{
227 asm volatile(LOCK_PREFIX "orl %1,%0"
228 : "+m" (v->counter)
229 : "ir" (i)
230 : "memory");
231}
232
233static inline int arch_atomic_fetch_or(int i, atomic_t *v)
234{
235 int val = arch_atomic_read(v);
236
237 do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i));
238
239 return val;
240}
241
242static inline void arch_atomic_xor(int i, atomic_t *v)
243{
244 asm volatile(LOCK_PREFIX "xorl %1,%0"
245 : "+m" (v->counter)
246 : "ir" (i)
247 : "memory");
248}
249
250static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
251{
252 int val = arch_atomic_read(v);
253
254 do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i));
255
256 return val;
257}
258
259#ifdef CONFIG_X86_32
260# include <asm/atomic64_32.h>
261#else
262# include <asm/atomic64_64.h>
263#endif
264
265#include <asm-generic/atomic-instrumented.h>
266
267#endif
268