1
2#ifndef _ASM_X86_ATOMIC64_64_H
3#define _ASM_X86_ATOMIC64_64_H
4
5#include <linux/types.h>
6#include <asm/alternative.h>
7#include <asm/cmpxchg.h>
8
9
10
11#define ATOMIC64_INIT(i) { (i) }
12
13
14
15
16
17
18
19
20static inline long atomic64_read(const atomic64_t *v)
21{
22 return READ_ONCE((v)->counter);
23}
24
25
26
27
28
29
30
31
32static inline void atomic64_set(atomic64_t *v, long i)
33{
34 WRITE_ONCE(v->counter, i);
35}
36
37
38
39
40
41
42
43
44static __always_inline void atomic64_add(long i, atomic64_t *v)
45{
46 asm volatile(LOCK_PREFIX "addq %1,%0"
47 : "=m" (v->counter)
48 : "er" (i), "m" (v->counter));
49}
50
51
52
53
54
55
56
57
58static inline void atomic64_sub(long i, atomic64_t *v)
59{
60 asm volatile(LOCK_PREFIX "subq %1,%0"
61 : "=m" (v->counter)
62 : "er" (i), "m" (v->counter));
63}
64
65
66
67
68
69
70
71
72
73
74static inline bool atomic64_sub_and_test(long i, atomic64_t *v)
75{
76 GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
77}
78
79
80
81
82
83
84
85static __always_inline void atomic64_inc(atomic64_t *v)
86{
87 asm volatile(LOCK_PREFIX "incq %0"
88 : "=m" (v->counter)
89 : "m" (v->counter));
90}
91
92
93
94
95
96
97
98static __always_inline void atomic64_dec(atomic64_t *v)
99{
100 asm volatile(LOCK_PREFIX "decq %0"
101 : "=m" (v->counter)
102 : "m" (v->counter));
103}
104
105
106
107
108
109
110
111
112
113static inline bool atomic64_dec_and_test(atomic64_t *v)
114{
115 GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
116}
117
118
119
120
121
122
123
124
125
126static inline bool atomic64_inc_and_test(atomic64_t *v)
127{
128 GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
129}
130
131
132
133
134
135
136
137
138
139
140static inline bool atomic64_add_negative(long i, atomic64_t *v)
141{
142 GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
143}
144
145
146
147
148
149
150
151
152static __always_inline long atomic64_add_return(long i, atomic64_t *v)
153{
154 return i + xadd(&v->counter, i);
155}
156
157static inline long atomic64_sub_return(long i, atomic64_t *v)
158{
159 return atomic64_add_return(-i, v);
160}
161
162static inline long atomic64_fetch_add(long i, atomic64_t *v)
163{
164 return xadd(&v->counter, i);
165}
166
167static inline long atomic64_fetch_sub(long i, atomic64_t *v)
168{
169 return xadd(&v->counter, -i);
170}
171
172#define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
173#define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
174
175static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
176{
177 return cmpxchg(&v->counter, old, new);
178}
179
180#define atomic64_try_cmpxchg atomic64_try_cmpxchg
181static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, long new)
182{
183 return try_cmpxchg(&v->counter, old, new);
184}
185
186static inline long atomic64_xchg(atomic64_t *v, long new)
187{
188 return xchg(&v->counter, new);
189}
190
191
192
193
194
195
196
197
198
199
200static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
201{
202 s64 c = atomic64_read(v);
203 do {
204 if (unlikely(c == u))
205 return false;
206 } while (!atomic64_try_cmpxchg(v, &c, c + a));
207 return true;
208}
209
210#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
211
212
213
214
215
216
217
218
219static inline long atomic64_dec_if_positive(atomic64_t *v)
220{
221 s64 dec, c = atomic64_read(v);
222 do {
223 dec = c - 1;
224 if (unlikely(dec < 0))
225 break;
226 } while (!atomic64_try_cmpxchg(v, &c, dec));
227 return dec;
228}
229
230static inline void atomic64_and(long i, atomic64_t *v)
231{
232 asm volatile(LOCK_PREFIX "andq %1,%0"
233 : "+m" (v->counter)
234 : "er" (i)
235 : "memory");
236}
237
238static inline long atomic64_fetch_and(long i, atomic64_t *v)
239{
240 s64 val = atomic64_read(v);
241
242 do {
243 } while (!atomic64_try_cmpxchg(v, &val, val & i));
244 return val;
245}
246
247static inline void atomic64_or(long i, atomic64_t *v)
248{
249 asm volatile(LOCK_PREFIX "orq %1,%0"
250 : "+m" (v->counter)
251 : "er" (i)
252 : "memory");
253}
254
255static inline long atomic64_fetch_or(long i, atomic64_t *v)
256{
257 s64 val = atomic64_read(v);
258
259 do {
260 } while (!atomic64_try_cmpxchg(v, &val, val | i));
261 return val;
262}
263
264static inline void atomic64_xor(long i, atomic64_t *v)
265{
266 asm volatile(LOCK_PREFIX "xorq %1,%0"
267 : "+m" (v->counter)
268 : "er" (i)
269 : "memory");
270}
271
272static inline long atomic64_fetch_xor(long i, atomic64_t *v)
273{
274 s64 val = atomic64_read(v);
275
276 do {
277 } while (!atomic64_try_cmpxchg(v, &val, val ^ i));
278 return val;
279}
280
281#endif
282