1#ifndef _ASM_IA64_ATOMIC_H
2#define _ASM_IA64_ATOMIC_H
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/types.h>
16
17#include <asm/intrinsics.h>
18
19
20#define ATOMIC_INIT(i) { (i) }
21#define ATOMIC64_INIT(i) { (i) }
22
23#define atomic_read(v) ACCESS_ONCE((v)->counter)
24#define atomic64_read(v) ACCESS_ONCE((v)->counter)
25
26#define atomic_set(v,i) (((v)->counter) = (i))
27#define atomic64_set(v,i) (((v)->counter) = (i))
28
29static __inline__ int
30ia64_atomic_add (int i, atomic_t *v)
31{
32 __s32 old, new;
33 CMPXCHG_BUGCHECK_DECL
34
35 do {
36 CMPXCHG_BUGCHECK(v);
37 old = atomic_read(v);
38 new = old + i;
39 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
40 return new;
41}
42
43static __inline__ long
44ia64_atomic64_add (__s64 i, atomic64_t *v)
45{
46 __s64 old, new;
47 CMPXCHG_BUGCHECK_DECL
48
49 do {
50 CMPXCHG_BUGCHECK(v);
51 old = atomic64_read(v);
52 new = old + i;
53 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
54 return new;
55}
56
57static __inline__ int
58ia64_atomic_sub (int i, atomic_t *v)
59{
60 __s32 old, new;
61 CMPXCHG_BUGCHECK_DECL
62
63 do {
64 CMPXCHG_BUGCHECK(v);
65 old = atomic_read(v);
66 new = old - i;
67 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
68 return new;
69}
70
71static __inline__ long
72ia64_atomic64_sub (__s64 i, atomic64_t *v)
73{
74 __s64 old, new;
75 CMPXCHG_BUGCHECK_DECL
76
77 do {
78 CMPXCHG_BUGCHECK(v);
79 old = atomic64_read(v);
80 new = old - i;
81 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
82 return new;
83}
84
85#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
86#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
87
88#define atomic64_cmpxchg(v, old, new) \
89 (cmpxchg(&((v)->counter), old, new))
90#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
91
92static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
93{
94 int c, old;
95 c = atomic_read(v);
96 for (;;) {
97 if (unlikely(c == (u)))
98 break;
99 old = atomic_cmpxchg((v), c, c + (a));
100 if (likely(old == c))
101 break;
102 c = old;
103 }
104 return c;
105}
106
107
108static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
109{
110 long c, old;
111 c = atomic64_read(v);
112 for (;;) {
113 if (unlikely(c == (u)))
114 break;
115 old = atomic64_cmpxchg((v), c, c + (a));
116 if (likely(old == c))
117 break;
118 c = old;
119 }
120 return c != (u);
121}
122
123#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
124
125#define atomic_add_return(i,v) \
126({ \
127 int __ia64_aar_i = (i); \
128 (__builtin_constant_p(i) \
129 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
130 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
131 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
132 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
133 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
134 : ia64_atomic_add(__ia64_aar_i, v); \
135})
136
137#define atomic64_add_return(i,v) \
138({ \
139 long __ia64_aar_i = (i); \
140 (__builtin_constant_p(i) \
141 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
142 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
143 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
144 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
145 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
146 : ia64_atomic64_add(__ia64_aar_i, v); \
147})
148
149
150
151
152
153static __inline__ int
154atomic_add_negative (int i, atomic_t *v)
155{
156 return atomic_add_return(i, v) < 0;
157}
158
159static __inline__ long
160atomic64_add_negative (__s64 i, atomic64_t *v)
161{
162 return atomic64_add_return(i, v) < 0;
163}
164
165#define atomic_sub_return(i,v) \
166({ \
167 int __ia64_asr_i = (i); \
168 (__builtin_constant_p(i) \
169 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
170 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
171 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
172 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
173 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
174 : ia64_atomic_sub(__ia64_asr_i, v); \
175})
176
177#define atomic64_sub_return(i,v) \
178({ \
179 long __ia64_asr_i = (i); \
180 (__builtin_constant_p(i) \
181 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
182 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
183 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
184 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
185 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
186 : ia64_atomic64_sub(__ia64_asr_i, v); \
187})
188
189#define atomic_dec_return(v) atomic_sub_return(1, (v))
190#define atomic_inc_return(v) atomic_add_return(1, (v))
191#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
192#define atomic64_inc_return(v) atomic64_add_return(1, (v))
193
194#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
195#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
196#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
197#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
198#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
199#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
200
201#define atomic_add(i,v) atomic_add_return((i), (v))
202#define atomic_sub(i,v) atomic_sub_return((i), (v))
203#define atomic_inc(v) atomic_add(1, (v))
204#define atomic_dec(v) atomic_sub(1, (v))
205
206#define atomic64_add(i,v) atomic64_add_return((i), (v))
207#define atomic64_sub(i,v) atomic64_sub_return((i), (v))
208#define atomic64_inc(v) atomic64_add(1, (v))
209#define atomic64_dec(v) atomic64_sub(1, (v))
210
211
212#define smp_mb__before_atomic_dec() barrier()
213#define smp_mb__after_atomic_dec() barrier()
214#define smp_mb__before_atomic_inc() barrier()
215#define smp_mb__after_atomic_inc() barrier()
216
217#endif
218