1
2
3
4
5
6
7
8
9#ifndef _ASM_ARC_ATOMIC_H
10#define _ASM_ARC_ATOMIC_H
11
12#ifndef __ASSEMBLY__
13
14#include <linux/types.h>
15#include <linux/compiler.h>
16#include <asm/cmpxchg.h>
17#include <asm/barrier.h>
18#include <asm/smp.h>
19
20#ifndef CONFIG_ARC_PLAT_EZNPS
21
22#define atomic_read(v) READ_ONCE((v)->counter)
23
24#ifdef CONFIG_ARC_HAS_LLSC
25
26#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
27
28#define ATOMIC_OP(op, c_op, asm_op) \
29static inline void atomic_##op(int i, atomic_t *v) \
30{ \
31 unsigned int val; \
32 \
33 __asm__ __volatile__( \
34 "1: llock %[val], [%[ctr]] \n" \
35 " " #asm_op " %[val], %[val], %[i] \n" \
36 " scond %[val], [%[ctr]] \n" \
37 " bnz 1b \n" \
38 : [val] "=&r" (val) \
39 : [ctr] "r" (&v->counter), \
40 [i] "ir" (i) \
41 : "cc"); \
42} \
43
44#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
45static inline int atomic_##op##_return(int i, atomic_t *v) \
46{ \
47 unsigned int val; \
48 \
49
50
51
52 \
53 smp_mb(); \
54 \
55 __asm__ __volatile__( \
56 "1: llock %[val], [%[ctr]] \n" \
57 " " #asm_op " %[val], %[val], %[i] \n" \
58 " scond %[val], [%[ctr]] \n" \
59 " bnz 1b \n" \
60 : [val] "=&r" (val) \
61 : [ctr] "r" (&v->counter), \
62 [i] "ir" (i) \
63 : "cc"); \
64 \
65 smp_mb(); \
66 \
67 return val; \
68}
69
70#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
71static inline int atomic_fetch_##op(int i, atomic_t *v) \
72{ \
73 unsigned int val, orig; \
74 \
75
76
77
78 \
79 smp_mb(); \
80 \
81 __asm__ __volatile__( \
82 "1: llock %[orig], [%[ctr]] \n" \
83 " " #asm_op " %[val], %[orig], %[i] \n" \
84 " scond %[val], [%[ctr]] \n" \
85 " \n" \
86 : [val] "=&r" (val), \
87 [orig] "=&r" (orig) \
88 : [ctr] "r" (&v->counter), \
89 [i] "ir" (i) \
90 : "cc"); \
91 \
92 smp_mb(); \
93 \
94 return orig; \
95}
96
97#else
98
99#ifndef CONFIG_SMP
100
101
102#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
103
104#else
105
106static inline void atomic_set(atomic_t *v, int i)
107{
108
109
110
111
112
113
114
115
116
117 unsigned long flags;
118
119 atomic_ops_lock(flags);
120 WRITE_ONCE(v->counter, i);
121 atomic_ops_unlock(flags);
122}
123
124#endif
125
126
127
128
129
130
131#define ATOMIC_OP(op, c_op, asm_op) \
132static inline void atomic_##op(int i, atomic_t *v) \
133{ \
134 unsigned long flags; \
135 \
136 atomic_ops_lock(flags); \
137 v->counter c_op i; \
138 atomic_ops_unlock(flags); \
139}
140
141#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
142static inline int atomic_##op##_return(int i, atomic_t *v) \
143{ \
144 unsigned long flags; \
145 unsigned long temp; \
146 \
147
148
149 \
150 atomic_ops_lock(flags); \
151 temp = v->counter; \
152 temp c_op i; \
153 v->counter = temp; \
154 atomic_ops_unlock(flags); \
155 \
156 return temp; \
157}
158
159#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
160static inline int atomic_fetch_##op(int i, atomic_t *v) \
161{ \
162 unsigned long flags; \
163 unsigned long orig; \
164 \
165
166
167 \
168 atomic_ops_lock(flags); \
169 orig = v->counter; \
170 v->counter c_op i; \
171 atomic_ops_unlock(flags); \
172 \
173 return orig; \
174}
175
176#endif
177
178#define ATOMIC_OPS(op, c_op, asm_op) \
179 ATOMIC_OP(op, c_op, asm_op) \
180 ATOMIC_OP_RETURN(op, c_op, asm_op) \
181 ATOMIC_FETCH_OP(op, c_op, asm_op)
182
183ATOMIC_OPS(add, +=, add)
184ATOMIC_OPS(sub, -=, sub)
185
186#define atomic_andnot atomic_andnot
187
188#undef ATOMIC_OPS
189#define ATOMIC_OPS(op, c_op, asm_op) \
190 ATOMIC_OP(op, c_op, asm_op) \
191 ATOMIC_FETCH_OP(op, c_op, asm_op)
192
193ATOMIC_OPS(and, &=, and)
194ATOMIC_OPS(andnot, &= ~, bic)
195ATOMIC_OPS(or, |=, or)
196ATOMIC_OPS(xor, ^=, xor)
197
198#else
199
200static inline int atomic_read(const atomic_t *v)
201{
202 int temp;
203
204 __asm__ __volatile__(
205 " ld.di %0, [%1]"
206 : "=r"(temp)
207 : "r"(&v->counter)
208 : "memory");
209 return temp;
210}
211
212static inline void atomic_set(atomic_t *v, int i)
213{
214 __asm__ __volatile__(
215 " st.di %0,[%1]"
216 :
217 : "r"(i), "r"(&v->counter)
218 : "memory");
219}
220
221#define ATOMIC_OP(op, c_op, asm_op) \
222static inline void atomic_##op(int i, atomic_t *v) \
223{ \
224 __asm__ __volatile__( \
225 " mov r2, %0\n" \
226 " mov r3, %1\n" \
227 " .word %2\n" \
228 : \
229 : "r"(i), "r"(&v->counter), "i"(asm_op) \
230 : "r2", "r3", "memory"); \
231} \
232
233#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
234static inline int atomic_##op##_return(int i, atomic_t *v) \
235{ \
236 unsigned int temp = i; \
237 \
238 \
239 smp_mb(); \
240 \
241 __asm__ __volatile__( \
242 " mov r2, %0\n" \
243 " mov r3, %1\n" \
244 " .word %2\n" \
245 " mov %0, r2" \
246 : "+r"(temp) \
247 : "r"(&v->counter), "i"(asm_op) \
248 : "r2", "r3", "memory"); \
249 \
250 smp_mb(); \
251 \
252 temp c_op i; \
253 \
254 return temp; \
255}
256
257#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
258static inline int atomic_fetch_##op(int i, atomic_t *v) \
259{ \
260 unsigned int temp = i; \
261 \
262 \
263 smp_mb(); \
264 \
265 __asm__ __volatile__( \
266 " mov r2, %0\n" \
267 " mov r3, %1\n" \
268 " .word %2\n" \
269 " mov %0, r2" \
270 : "+r"(temp) \
271 : "r"(&v->counter), "i"(asm_op) \
272 : "r2", "r3", "memory"); \
273 \
274 smp_mb(); \
275 \
276 return temp; \
277}
278
279#define ATOMIC_OPS(op, c_op, asm_op) \
280 ATOMIC_OP(op, c_op, asm_op) \
281 ATOMIC_OP_RETURN(op, c_op, asm_op) \
282 ATOMIC_FETCH_OP(op, c_op, asm_op)
283
284ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
285#define atomic_sub(i, v) atomic_add(-(i), (v))
286#define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
287
288#undef ATOMIC_OPS
289#define ATOMIC_OPS(op, c_op, asm_op) \
290 ATOMIC_OP(op, c_op, asm_op) \
291 ATOMIC_FETCH_OP(op, c_op, asm_op)
292
293ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
294#define atomic_andnot(mask, v) atomic_and(~(mask), (v))
295ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
296ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
297
298#endif
299
300#undef ATOMIC_OPS
301#undef ATOMIC_FETCH_OP
302#undef ATOMIC_OP_RETURN
303#undef ATOMIC_OP
304
305
306
307
308
309
310
311
312
313
314#define __atomic_add_unless(v, a, u) \
315({ \
316 int c, old; \
317 \
318
319
320
321 \
322 smp_mb(); \
323 \
324 c = atomic_read(v); \
325 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
326 c = old; \
327 \
328 smp_mb(); \
329 \
330 c; \
331})
332
333#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
334
335#define atomic_inc(v) atomic_add(1, v)
336#define atomic_dec(v) atomic_sub(1, v)
337
338#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
339#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
340#define atomic_inc_return(v) atomic_add_return(1, (v))
341#define atomic_dec_return(v) atomic_sub_return(1, (v))
342#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
343
344#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
345
346#define ATOMIC_INIT(i) { (i) }
347
348#include <asm-generic/atomic64.h>
349
350#endif
351
352#endif
353