1#ifndef _ASM_X86_ATOMIC_H
2#define _ASM_X86_ATOMIC_H
3
4#include <linux/compiler.h>
5#include <linux/types.h>
6#include <asm/processor.h>
7#include <asm/alternative.h>
8#include <asm/cmpxchg.h>
9#include <asm/rmwcc.h>
10#include <asm/barrier.h>
11
12
13
14
15
16
17#define ATOMIC_INIT(i) { (i) }
18
19
20
21
22
23
24
25static __always_inline int atomic_read(const atomic_t *v)
26{
27 return ACCESS_ONCE((v)->counter);
28}
29
30
31
32
33
34
35
36
37static __always_inline void atomic_set(atomic_t *v, int i)
38{
39 v->counter = i;
40}
41
42
43
44
45
46
47
48
49static __always_inline void atomic_add(int i, atomic_t *v)
50{
51 asm volatile(LOCK_PREFIX "addl %1,%0"
52 : "+m" (v->counter)
53 : "ir" (i));
54}
55
56
57
58
59
60
61
62
63static __always_inline void atomic_sub(int i, atomic_t *v)
64{
65 asm volatile(LOCK_PREFIX "subl %1,%0"
66 : "+m" (v->counter)
67 : "ir" (i));
68}
69
70
71
72
73
74
75
76
77
78
79static __always_inline int atomic_sub_and_test(int i, atomic_t *v)
80{
81 GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
82}
83
84
85
86
87
88
89
90static __always_inline void atomic_inc(atomic_t *v)
91{
92 asm volatile(LOCK_PREFIX "incl %0"
93 : "+m" (v->counter));
94}
95
96
97
98
99
100
101
102static __always_inline void atomic_dec(atomic_t *v)
103{
104 asm volatile(LOCK_PREFIX "decl %0"
105 : "+m" (v->counter));
106}
107
108
109
110
111
112
113
114
115
116static __always_inline int atomic_dec_and_test(atomic_t *v)
117{
118 GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
119}
120
121
122
123
124
125
126
127
128
129static __always_inline int atomic_inc_and_test(atomic_t *v)
130{
131 GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
132}
133
134
135
136
137
138
139
140
141
142
143static __always_inline int atomic_add_negative(int i, atomic_t *v)
144{
145 GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
146}
147
148
149
150
151
152
153
154
155static __always_inline int atomic_add_return(int i, atomic_t *v)
156{
157 return i + xadd(&v->counter, i);
158}
159
160
161
162
163
164
165
166
167static __always_inline int atomic_sub_return(int i, atomic_t *v)
168{
169 return atomic_add_return(-i, v);
170}
171
172#define atomic_inc_return(v) (atomic_add_return(1, v))
173#define atomic_dec_return(v) (atomic_sub_return(1, v))
174
175static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
176{
177 return cmpxchg(&v->counter, old, new);
178}
179
180static inline int atomic_xchg(atomic_t *v, int new)
181{
182 return xchg(&v->counter, new);
183}
184
185#define ATOMIC_OP(op) \
186static inline void atomic_##op(int i, atomic_t *v) \
187{ \
188 asm volatile(LOCK_PREFIX #op"l %1,%0" \
189 : "+m" (v->counter) \
190 : "ir" (i) \
191 : "memory"); \
192}
193
194ATOMIC_OP(and)
195ATOMIC_OP(or)
196ATOMIC_OP(xor)
197
198#undef ATOMIC_OP
199
200
201
202
203
204
205
206
207
208
209static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
210{
211 int c, old;
212 c = atomic_read(v);
213 for (;;) {
214 if (unlikely(c == (u)))
215 break;
216 old = atomic_cmpxchg((v), c, c + (a));
217 if (likely(old == c))
218 break;
219 c = old;
220 }
221 return c;
222}
223
224
225
226
227
228
229
230
231static __always_inline short int atomic_inc_short(short int *v)
232{
233 asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
234 return *v;
235}
236
237#ifdef CONFIG_X86_32
238# include <asm/atomic64_32.h>
239#else
240# include <asm/atomic64_64.h>
241#endif
242
243#endif
244