1#ifndef _ASM_X86_ATOMIC_H
2#define _ASM_X86_ATOMIC_H
3
4#include <linux/compiler.h>
5#include <linux/types.h>
6#include <asm/alternative.h>
7#include <asm/cmpxchg.h>
8#include <asm/rmwcc.h>
9#include <asm/barrier.h>
10
11
12
13
14
15
16#define ATOMIC_INIT(i) { (i) }
17
18
19
20
21
22
23
24static __always_inline int atomic_read(const atomic_t *v)
25{
26 return READ_ONCE((v)->counter);
27}
28
29
30
31
32
33
34
35
36static __always_inline void atomic_set(atomic_t *v, int i)
37{
38 WRITE_ONCE(v->counter, i);
39}
40
41
42
43
44
45
46
47
48static __always_inline void atomic_add(int i, atomic_t *v)
49{
50 asm volatile(LOCK_PREFIX "addl %1,%0"
51 : "+m" (v->counter)
52 : "ir" (i));
53}
54
55
56
57
58
59
60
61
62static __always_inline void atomic_sub(int i, atomic_t *v)
63{
64 asm volatile(LOCK_PREFIX "subl %1,%0"
65 : "+m" (v->counter)
66 : "ir" (i));
67}
68
69
70
71
72
73
74
75
76
77
78static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
79{
80 GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
81}
82
83
84
85
86
87
88
89static __always_inline void atomic_inc(atomic_t *v)
90{
91 asm volatile(LOCK_PREFIX "incl %0"
92 : "+m" (v->counter));
93}
94
95
96
97
98
99
100
101static __always_inline void atomic_dec(atomic_t *v)
102{
103 asm volatile(LOCK_PREFIX "decl %0"
104 : "+m" (v->counter));
105}
106
107
108
109
110
111
112
113
114
115static __always_inline bool atomic_dec_and_test(atomic_t *v)
116{
117 GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
118}
119
120
121
122
123
124
125
126
127
128static __always_inline bool atomic_inc_and_test(atomic_t *v)
129{
130 GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
131}
132
133
134
135
136
137
138
139
140
141
142static __always_inline bool atomic_add_negative(int i, atomic_t *v)
143{
144 GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
145}
146
147
148
149
150
151
152
153
154static __always_inline int atomic_add_return(int i, atomic_t *v)
155{
156 return i + xadd(&v->counter, i);
157}
158
159
160
161
162
163
164
165
166static __always_inline int atomic_sub_return(int i, atomic_t *v)
167{
168 return atomic_add_return(-i, v);
169}
170
171#define atomic_inc_return(v) (atomic_add_return(1, v))
172#define atomic_dec_return(v) (atomic_sub_return(1, v))
173
174static __always_inline int atomic_fetch_add(int i, atomic_t *v)
175{
176 return xadd(&v->counter, i);
177}
178
179static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
180{
181 return xadd(&v->counter, -i);
182}
183
184static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
185{
186 return cmpxchg(&v->counter, old, new);
187}
188
189static inline int atomic_xchg(atomic_t *v, int new)
190{
191 return xchg(&v->counter, new);
192}
193
194#define ATOMIC_OP(op) \
195static inline void atomic_##op(int i, atomic_t *v) \
196{ \
197 asm volatile(LOCK_PREFIX #op"l %1,%0" \
198 : "+m" (v->counter) \
199 : "ir" (i) \
200 : "memory"); \
201}
202
203#define ATOMIC_FETCH_OP(op, c_op) \
204static inline int atomic_fetch_##op(int i, atomic_t *v) \
205{ \
206 int old, val = atomic_read(v); \
207 for (;;) { \
208 old = atomic_cmpxchg(v, val, val c_op i); \
209 if (old == val) \
210 break; \
211 val = old; \
212 } \
213 return old; \
214}
215
216#define ATOMIC_OPS(op, c_op) \
217 ATOMIC_OP(op) \
218 ATOMIC_FETCH_OP(op, c_op)
219
220ATOMIC_OPS(and, &)
221ATOMIC_OPS(or , |)
222ATOMIC_OPS(xor, ^)
223
224#undef ATOMIC_OPS
225#undef ATOMIC_FETCH_OP
226#undef ATOMIC_OP
227
228
229
230
231
232
233
234
235
236
237static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
238{
239 int c, old;
240 c = atomic_read(v);
241 for (;;) {
242 if (unlikely(c == (u)))
243 break;
244 old = atomic_cmpxchg((v), c, c + (a));
245 if (likely(old == c))
246 break;
247 c = old;
248 }
249 return c;
250}
251
252
253
254
255
256
257
258
259static __always_inline short int atomic_inc_short(short int *v)
260{
261 asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
262 return *v;
263}
264
265#ifdef CONFIG_X86_32
266# include <asm/atomic64_32.h>
267#else
268# include <asm/atomic64_64.h>
269#endif
270
271#endif
272