1
2#ifndef _ALPHA_ATOMIC_H
3#define _ALPHA_ATOMIC_H
4
5#include <linux/types.h>
6#include <asm/barrier.h>
7#include <asm/cmpxchg.h>
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#define __atomic_acquire_fence()
25#define __atomic_post_full_fence()
26
27#define ATOMIC64_INIT(i) { (i) }
28
29#define arch_atomic_read(v) READ_ONCE((v)->counter)
30#define arch_atomic64_read(v) READ_ONCE((v)->counter)
31
32#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
33#define arch_atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
34
35
36
37
38
39
40
41#define ATOMIC_OP(op, asm_op) \
42static __inline__ void arch_atomic_##op(int i, atomic_t * v) \
43{ \
44 unsigned long temp; \
45 __asm__ __volatile__( \
46 "1: ldl_l %0,%1\n" \
47 " " #asm_op " %0,%2,%0\n" \
48 " stl_c %0,%1\n" \
49 " beq %0,2f\n" \
50 ".subsection 2\n" \
51 "2: br 1b\n" \
52 ".previous" \
53 :"=&r" (temp), "=m" (v->counter) \
54 :"Ir" (i), "m" (v->counter)); \
55} \
56
57#define ATOMIC_OP_RETURN(op, asm_op) \
58static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
59{ \
60 long temp, result; \
61 __asm__ __volatile__( \
62 "1: ldl_l %0,%1\n" \
63 " " #asm_op " %0,%3,%2\n" \
64 " " #asm_op " %0,%3,%0\n" \
65 " stl_c %0,%1\n" \
66 " beq %0,2f\n" \
67 ".subsection 2\n" \
68 "2: br 1b\n" \
69 ".previous" \
70 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
71 :"Ir" (i), "m" (v->counter) : "memory"); \
72 smp_mb(); \
73 return result; \
74}
75
76#define ATOMIC_FETCH_OP(op, asm_op) \
77static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
78{ \
79 long temp, result; \
80 __asm__ __volatile__( \
81 "1: ldl_l %2,%1\n" \
82 " " #asm_op " %2,%3,%0\n" \
83 " stl_c %0,%1\n" \
84 " beq %0,2f\n" \
85 ".subsection 2\n" \
86 "2: br 1b\n" \
87 ".previous" \
88 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
89 :"Ir" (i), "m" (v->counter) : "memory"); \
90 smp_mb(); \
91 return result; \
92}
93
94#define ATOMIC64_OP(op, asm_op) \
95static __inline__ void arch_atomic64_##op(s64 i, atomic64_t * v) \
96{ \
97 s64 temp; \
98 __asm__ __volatile__( \
99 "1: ldq_l %0,%1\n" \
100 " " #asm_op " %0,%2,%0\n" \
101 " stq_c %0,%1\n" \
102 " beq %0,2f\n" \
103 ".subsection 2\n" \
104 "2: br 1b\n" \
105 ".previous" \
106 :"=&r" (temp), "=m" (v->counter) \
107 :"Ir" (i), "m" (v->counter)); \
108} \
109
110#define ATOMIC64_OP_RETURN(op, asm_op) \
111static __inline__ s64 \
112arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
113{ \
114 s64 temp, result; \
115 __asm__ __volatile__( \
116 "1: ldq_l %0,%1\n" \
117 " " #asm_op " %0,%3,%2\n" \
118 " " #asm_op " %0,%3,%0\n" \
119 " stq_c %0,%1\n" \
120 " beq %0,2f\n" \
121 ".subsection 2\n" \
122 "2: br 1b\n" \
123 ".previous" \
124 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
125 :"Ir" (i), "m" (v->counter) : "memory"); \
126 smp_mb(); \
127 return result; \
128}
129
130#define ATOMIC64_FETCH_OP(op, asm_op) \
131static __inline__ s64 \
132arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
133{ \
134 s64 temp, result; \
135 __asm__ __volatile__( \
136 "1: ldq_l %2,%1\n" \
137 " " #asm_op " %2,%3,%0\n" \
138 " stq_c %0,%1\n" \
139 " beq %0,2f\n" \
140 ".subsection 2\n" \
141 "2: br 1b\n" \
142 ".previous" \
143 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
144 :"Ir" (i), "m" (v->counter) : "memory"); \
145 smp_mb(); \
146 return result; \
147}
148
149#define ATOMIC_OPS(op) \
150 ATOMIC_OP(op, op##l) \
151 ATOMIC_OP_RETURN(op, op##l) \
152 ATOMIC_FETCH_OP(op, op##l) \
153 ATOMIC64_OP(op, op##q) \
154 ATOMIC64_OP_RETURN(op, op##q) \
155 ATOMIC64_FETCH_OP(op, op##q)
156
157ATOMIC_OPS(add)
158ATOMIC_OPS(sub)
159
160#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
161#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
162#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
163#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
164
165#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
166#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
167#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
168#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
169
170#define arch_atomic_andnot arch_atomic_andnot
171#define arch_atomic64_andnot arch_atomic64_andnot
172
173#undef ATOMIC_OPS
174#define ATOMIC_OPS(op, asm) \
175 ATOMIC_OP(op, asm) \
176 ATOMIC_FETCH_OP(op, asm) \
177 ATOMIC64_OP(op, asm) \
178 ATOMIC64_FETCH_OP(op, asm)
179
180ATOMIC_OPS(and, and)
181ATOMIC_OPS(andnot, bic)
182ATOMIC_OPS(or, bis)
183ATOMIC_OPS(xor, xor)
184
185#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
186#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
187#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
188#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
189
190#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
191#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
192#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
193#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
194
195#undef ATOMIC_OPS
196#undef ATOMIC64_FETCH_OP
197#undef ATOMIC64_OP_RETURN
198#undef ATOMIC64_OP
199#undef ATOMIC_FETCH_OP
200#undef ATOMIC_OP_RETURN
201#undef ATOMIC_OP
202
203#define arch_atomic64_cmpxchg(v, old, new) \
204 (arch_cmpxchg(&((v)->counter), old, new))
205#define arch_atomic64_xchg(v, new) \
206 (arch_xchg(&((v)->counter), new))
207
208#define arch_atomic_cmpxchg(v, old, new) \
209 (arch_cmpxchg(&((v)->counter), old, new))
210#define arch_atomic_xchg(v, new) \
211 (arch_xchg(&((v)->counter), new))
212
213
214
215
216
217
218
219
220
221
222static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
223{
224 int c, new, old;
225 smp_mb();
226 __asm__ __volatile__(
227 "1: ldl_l %[old],%[mem]\n"
228 " cmpeq %[old],%[u],%[c]\n"
229 " addl %[old],%[a],%[new]\n"
230 " bne %[c],2f\n"
231 " stl_c %[new],%[mem]\n"
232 " beq %[new],3f\n"
233 "2:\n"
234 ".subsection 2\n"
235 "3: br 1b\n"
236 ".previous"
237 : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
238 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
239 : "memory");
240 smp_mb();
241 return old;
242}
243#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
244
245
246
247
248
249
250
251
252
253
254static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
255{
256 s64 c, new, old;
257 smp_mb();
258 __asm__ __volatile__(
259 "1: ldq_l %[old],%[mem]\n"
260 " cmpeq %[old],%[u],%[c]\n"
261 " addq %[old],%[a],%[new]\n"
262 " bne %[c],2f\n"
263 " stq_c %[new],%[mem]\n"
264 " beq %[new],3f\n"
265 "2:\n"
266 ".subsection 2\n"
267 "3: br 1b\n"
268 ".previous"
269 : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
270 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
271 : "memory");
272 smp_mb();
273 return old;
274}
275#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
276
277
278
279
280
281
282
283
284static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
285{
286 s64 old, tmp;
287 smp_mb();
288 __asm__ __volatile__(
289 "1: ldq_l %[old],%[mem]\n"
290 " subq %[old],1,%[tmp]\n"
291 " ble %[old],2f\n"
292 " stq_c %[tmp],%[mem]\n"
293 " beq %[tmp],3f\n"
294 "2:\n"
295 ".subsection 2\n"
296 "3: br 1b\n"
297 ".previous"
298 : [old] "=&r"(old), [tmp] "=&r"(tmp)
299 : [mem] "m"(*v)
300 : "memory");
301 smp_mb();
302 return old - 1;
303}
304#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
305
306#endif
307