1
2#ifndef _LINUX_ATOMIC_H
3#define _LINUX_ATOMIC_H
4#include <asm/atomic.h>
5#include <asm/barrier.h>
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#ifndef atomic_read_acquire
26#define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
27#endif
28
29#ifndef atomic_set_release
30#define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
31#endif
32
33
34
35
36
37
38
39
40
41#define __atomic_op_acquire(op, args...) \
42({ \
43 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
44 smp_mb__after_atomic(); \
45 __ret; \
46})
47
48#define __atomic_op_release(op, args...) \
49({ \
50 smp_mb__before_atomic(); \
51 op##_relaxed(args); \
52})
53
54#define __atomic_op_fence(op, args...) \
55({ \
56 typeof(op##_relaxed(args)) __ret; \
57 smp_mb__before_atomic(); \
58 __ret = op##_relaxed(args); \
59 smp_mb__after_atomic(); \
60 __ret; \
61})
62
63
64#ifndef atomic_add_return_relaxed
65#define atomic_add_return_relaxed atomic_add_return
66#define atomic_add_return_acquire atomic_add_return
67#define atomic_add_return_release atomic_add_return
68
69#else
70
71#ifndef atomic_add_return_acquire
72#define atomic_add_return_acquire(...) \
73 __atomic_op_acquire(atomic_add_return, __VA_ARGS__)
74#endif
75
76#ifndef atomic_add_return_release
77#define atomic_add_return_release(...) \
78 __atomic_op_release(atomic_add_return, __VA_ARGS__)
79#endif
80
81#ifndef atomic_add_return
82#define atomic_add_return(...) \
83 __atomic_op_fence(atomic_add_return, __VA_ARGS__)
84#endif
85#endif
86
87
88#ifndef atomic_sub_return_relaxed
89#define atomic_sub_return_relaxed atomic_sub_return
90#define atomic_sub_return_acquire atomic_sub_return
91#define atomic_sub_return_release atomic_sub_return
92
93#else
94
95#ifndef atomic_sub_return_acquire
96#define atomic_sub_return_acquire(...) \
97 __atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
98#endif
99
100#ifndef atomic_sub_return_release
101#define atomic_sub_return_release(...) \
102 __atomic_op_release(atomic_sub_return, __VA_ARGS__)
103#endif
104
105#ifndef atomic_sub_return
106#define atomic_sub_return(...) \
107 __atomic_op_fence(atomic_sub_return, __VA_ARGS__)
108#endif
109#endif
110
111
112#ifndef atomic_xchg_relaxed
113#define atomic_xchg_relaxed atomic_xchg
114#define atomic_xchg_acquire atomic_xchg
115#define atomic_xchg_release atomic_xchg
116
117#else
118
119#ifndef atomic_xchg_acquire
120#define atomic_xchg_acquire(...) \
121 __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
122#endif
123
124#ifndef atomic_xchg_release
125#define atomic_xchg_release(...) \
126 __atomic_op_release(atomic_xchg, __VA_ARGS__)
127#endif
128
129#ifndef atomic_xchg
130#define atomic_xchg(...) \
131 __atomic_op_fence(atomic_xchg, __VA_ARGS__)
132#endif
133#endif
134
135
136#ifndef atomic_cmpxchg_relaxed
137#define atomic_cmpxchg_relaxed atomic_cmpxchg
138#define atomic_cmpxchg_acquire atomic_cmpxchg
139#define atomic_cmpxchg_release atomic_cmpxchg
140
141#else
142
143#ifndef atomic_cmpxchg_acquire
144#define atomic_cmpxchg_acquire(...) \
145 __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
146#endif
147
148#ifndef atomic_cmpxchg_release
149#define atomic_cmpxchg_release(...) \
150 __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
151#endif
152
153#ifndef atomic_cmpxchg
154#define atomic_cmpxchg(...) \
155 __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
156#endif
157#endif
158
159#ifndef atomic64_read_acquire
160#define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
161#endif
162
163#ifndef atomic64_set_release
164#define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
165#endif
166
167
168#ifndef atomic64_add_return_relaxed
169#define atomic64_add_return_relaxed atomic64_add_return
170#define atomic64_add_return_acquire atomic64_add_return
171#define atomic64_add_return_release atomic64_add_return
172
173#else
174
175#ifndef atomic64_add_return_acquire
176#define atomic64_add_return_acquire(...) \
177 __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
178#endif
179
180#ifndef atomic64_add_return_release
181#define atomic64_add_return_release(...) \
182 __atomic_op_release(atomic64_add_return, __VA_ARGS__)
183#endif
184
185#ifndef atomic64_add_return
186#define atomic64_add_return(...) \
187 __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
188#endif
189#endif
190
191
192#ifndef atomic64_sub_return_relaxed
193#define atomic64_sub_return_relaxed atomic64_sub_return
194#define atomic64_sub_return_acquire atomic64_sub_return
195#define atomic64_sub_return_release atomic64_sub_return
196
197#else
198
199#ifndef atomic64_sub_return_acquire
200#define atomic64_sub_return_acquire(...) \
201 __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
202#endif
203
204#ifndef atomic64_sub_return_release
205#define atomic64_sub_return_release(...) \
206 __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
207#endif
208
209#ifndef atomic64_sub_return
210#define atomic64_sub_return(...) \
211 __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
212#endif
213#endif
214
215
216#ifndef atomic64_xchg_relaxed
217#define atomic64_xchg_relaxed atomic64_xchg
218#define atomic64_xchg_acquire atomic64_xchg
219#define atomic64_xchg_release atomic64_xchg
220
221#else
222
223#ifndef atomic64_xchg_acquire
224#define atomic64_xchg_acquire(...) \
225 __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
226#endif
227
228#ifndef atomic64_xchg_release
229#define atomic64_xchg_release(...) \
230 __atomic_op_release(atomic64_xchg, __VA_ARGS__)
231#endif
232
233#ifndef atomic64_xchg
234#define atomic64_xchg(...) \
235 __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
236#endif
237#endif
238
239
240#ifndef atomic64_cmpxchg_relaxed
241#define atomic64_cmpxchg_relaxed atomic64_cmpxchg
242#define atomic64_cmpxchg_acquire atomic64_cmpxchg
243#define atomic64_cmpxchg_release atomic64_cmpxchg
244
245#else
246
247#ifndef atomic64_cmpxchg_acquire
248#define atomic64_cmpxchg_acquire(...) \
249 __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
250#endif
251
252#ifndef atomic64_cmpxchg_release
253#define atomic64_cmpxchg_release(...) \
254 __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
255#endif
256
257#ifndef atomic64_cmpxchg
258#define atomic64_cmpxchg(...) \
259 __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
260#endif
261#endif
262
263
264#ifndef cmpxchg_relaxed
265#define cmpxchg_relaxed cmpxchg
266#define cmpxchg_acquire cmpxchg
267#define cmpxchg_release cmpxchg
268
269#else
270
271#ifndef cmpxchg_acquire
272#define cmpxchg_acquire(...) \
273 __atomic_op_acquire(cmpxchg, __VA_ARGS__)
274#endif
275
276#ifndef cmpxchg_release
277#define cmpxchg_release(...) \
278 __atomic_op_release(cmpxchg, __VA_ARGS__)
279#endif
280
281#ifndef cmpxchg
282#define cmpxchg(...) \
283 __atomic_op_fence(cmpxchg, __VA_ARGS__)
284#endif
285#endif
286
287
288#ifndef cmpxchg64_relaxed
289#define cmpxchg64_relaxed cmpxchg64
290#define cmpxchg64_acquire cmpxchg64
291#define cmpxchg64_release cmpxchg64
292
293#else
294
295#ifndef cmpxchg64_acquire
296#define cmpxchg64_acquire(...) \
297 __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
298#endif
299
300#ifndef cmpxchg64_release
301#define cmpxchg64_release(...) \
302 __atomic_op_release(cmpxchg64, __VA_ARGS__)
303#endif
304
305#ifndef cmpxchg64
306#define cmpxchg64(...) \
307 __atomic_op_fence(cmpxchg64, __VA_ARGS__)
308#endif
309#endif
310
311
312#ifndef xchg_relaxed
313#define xchg_relaxed xchg
314#define xchg_acquire xchg
315#define xchg_release xchg
316
317#else
318
319#ifndef xchg_acquire
320#define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__)
321#endif
322
323#ifndef xchg_release
324#define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__)
325#endif
326
327#ifndef xchg
328#define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
329#endif
330#endif
331
332
333
334
335
336#ifndef smp_mb__before_atomic_inc
337static inline void __deprecated smp_mb__before_atomic_inc(void)
338{
339 extern void __smp_mb__before_atomic(void);
340 __smp_mb__before_atomic();
341}
342#endif
343
344#ifndef smp_mb__after_atomic_inc
345static inline void __deprecated smp_mb__after_atomic_inc(void)
346{
347 extern void __smp_mb__after_atomic(void);
348 __smp_mb__after_atomic();
349}
350#endif
351
352#ifndef smp_mb__before_atomic_dec
353static inline void __deprecated smp_mb__before_atomic_dec(void)
354{
355 extern void __smp_mb__before_atomic(void);
356 __smp_mb__before_atomic();
357}
358#endif
359
360#ifndef smp_mb__after_atomic_dec
361static inline void __deprecated smp_mb__after_atomic_dec(void)
362{
363 extern void __smp_mb__after_atomic(void);
364 __smp_mb__after_atomic();
365}
366#endif
367
368
369
370
371
372
373
374
375
376
377static inline int atomic_add_unless(atomic_t *v, int a, int u)
378{
379 return __atomic_add_unless(v, a, u) != u;
380}
381
382
383
384
385
386
387
388
389#ifndef atomic_inc_not_zero
390#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
391#endif
392
393
394
395
396
397
398
399
400
401
402
403
404
405#ifndef atomic_inc_not_zero_hint
406static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
407{
408 int val, c = hint;
409
410
411 if (!hint)
412 return atomic_inc_not_zero(v);
413
414 do {
415 val = atomic_cmpxchg(v, c, c + 1);
416 if (val == c)
417 return 1;
418 c = val;
419 } while (c);
420
421 return 0;
422}
423#endif
424
425#ifndef atomic_inc_unless_negative
426static inline int atomic_inc_unless_negative(atomic_t *p)
427{
428 int v, v1;
429 for (v = 0; v >= 0; v = v1) {
430 v1 = atomic_cmpxchg(p, v, v + 1);
431 if (likely(v1 == v))
432 return 1;
433 }
434 return 0;
435}
436#endif
437
438#ifndef atomic_dec_unless_positive
439static inline int atomic_dec_unless_positive(atomic_t *p)
440{
441 int v, v1;
442 for (v = 0; v <= 0; v = v1) {
443 v1 = atomic_cmpxchg(p, v, v - 1);
444 if (likely(v1 == v))
445 return 1;
446 }
447 return 0;
448}
449#endif
450
451
452
453
454
455
456
457
458#ifndef atomic_dec_if_positive
459static inline int atomic_dec_if_positive(atomic_t *v)
460{
461 int c, old, dec;
462 c = atomic_read(v);
463 for (;;) {
464 dec = c - 1;
465 if (unlikely(dec < 0))
466 break;
467 old = atomic_cmpxchg((v), c, dec);
468 if (likely(old == c))
469 break;
470 c = old;
471 }
472 return dec;
473}
474#endif
475
476#ifndef CONFIG_ARCH_HAS_ATOMIC_OR
477static inline void atomic_or(int i, atomic_t *v)
478{
479 int old;
480 int new;
481
482 do {
483 old = atomic_read(v);
484 new = old | i;
485 } while (atomic_cmpxchg(v, old, new) != old);
486}
487#endif
488
489#include <asm-generic/atomic-long.h>
490#ifdef CONFIG_GENERIC_ATOMIC64
491#include <asm-generic/atomic64.h>
492#endif
493#endif
494