1
2
3
4
5
6
7
8
9#ifndef _ASM_ARC_ATOMIC_H
10#define _ASM_ARC_ATOMIC_H
11
12#ifndef __ASSEMBLY__
13
14#include <linux/types.h>
15#include <linux/compiler.h>
16#include <asm/cmpxchg.h>
17#include <asm/barrier.h>
18#include <asm/smp.h>
19
20#define ATOMIC_INIT(i) { (i) }
21
22#ifndef CONFIG_ARC_PLAT_EZNPS
23
24#define atomic_read(v) READ_ONCE((v)->counter)
25
26#ifdef CONFIG_ARC_HAS_LLSC
27
28#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
29
30#define ATOMIC_OP(op, c_op, asm_op) \
31static inline void atomic_##op(int i, atomic_t *v) \
32{ \
33 unsigned int val; \
34 \
35 __asm__ __volatile__( \
36 "1: llock %[val], [%[ctr]] \n" \
37 " " #asm_op " %[val], %[val], %[i] \n" \
38 " scond %[val], [%[ctr]] \n" \
39 " bnz 1b \n" \
40 : [val] "=&r" (val) \
41 : [ctr] "r" (&v->counter), \
42 [i] "ir" (i) \
43 : "cc"); \
44} \
45
46#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
47static inline int atomic_##op##_return(int i, atomic_t *v) \
48{ \
49 unsigned int val; \
50 \
51
52
53
54 \
55 smp_mb(); \
56 \
57 __asm__ __volatile__( \
58 "1: llock %[val], [%[ctr]] \n" \
59 " " #asm_op " %[val], %[val], %[i] \n" \
60 " scond %[val], [%[ctr]] \n" \
61 " bnz 1b \n" \
62 : [val] "=&r" (val) \
63 : [ctr] "r" (&v->counter), \
64 [i] "ir" (i) \
65 : "cc"); \
66 \
67 smp_mb(); \
68 \
69 return val; \
70}
71
72#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
73static inline int atomic_fetch_##op(int i, atomic_t *v) \
74{ \
75 unsigned int val, orig; \
76 \
77
78
79
80 \
81 smp_mb(); \
82 \
83 __asm__ __volatile__( \
84 "1: llock %[orig], [%[ctr]] \n" \
85 " " #asm_op " %[val], %[orig], %[i] \n" \
86 " scond %[val], [%[ctr]] \n" \
87 " \n" \
88 : [val] "=&r" (val), \
89 [orig] "=&r" (orig) \
90 : [ctr] "r" (&v->counter), \
91 [i] "ir" (i) \
92 : "cc"); \
93 \
94 smp_mb(); \
95 \
96 return orig; \
97}
98
99#else
100
101#ifndef CONFIG_SMP
102
103
104#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
105
106#else
107
108static inline void atomic_set(atomic_t *v, int i)
109{
110
111
112
113
114
115
116
117
118
119 unsigned long flags;
120
121 atomic_ops_lock(flags);
122 WRITE_ONCE(v->counter, i);
123 atomic_ops_unlock(flags);
124}
125
126#define atomic_set_release(v, i) atomic_set((v), (i))
127
128#endif
129
130
131
132
133
134
135#define ATOMIC_OP(op, c_op, asm_op) \
136static inline void atomic_##op(int i, atomic_t *v) \
137{ \
138 unsigned long flags; \
139 \
140 atomic_ops_lock(flags); \
141 v->counter c_op i; \
142 atomic_ops_unlock(flags); \
143}
144
145#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
146static inline int atomic_##op##_return(int i, atomic_t *v) \
147{ \
148 unsigned long flags; \
149 unsigned long temp; \
150 \
151
152
153 \
154 atomic_ops_lock(flags); \
155 temp = v->counter; \
156 temp c_op i; \
157 v->counter = temp; \
158 atomic_ops_unlock(flags); \
159 \
160 return temp; \
161}
162
163#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
164static inline int atomic_fetch_##op(int i, atomic_t *v) \
165{ \
166 unsigned long flags; \
167 unsigned long orig; \
168 \
169
170
171 \
172 atomic_ops_lock(flags); \
173 orig = v->counter; \
174 v->counter c_op i; \
175 atomic_ops_unlock(flags); \
176 \
177 return orig; \
178}
179
180#endif
181
182#define ATOMIC_OPS(op, c_op, asm_op) \
183 ATOMIC_OP(op, c_op, asm_op) \
184 ATOMIC_OP_RETURN(op, c_op, asm_op) \
185 ATOMIC_FETCH_OP(op, c_op, asm_op)
186
187ATOMIC_OPS(add, +=, add)
188ATOMIC_OPS(sub, -=, sub)
189
190#define atomic_andnot atomic_andnot
191
192#undef ATOMIC_OPS
193#define ATOMIC_OPS(op, c_op, asm_op) \
194 ATOMIC_OP(op, c_op, asm_op) \
195 ATOMIC_FETCH_OP(op, c_op, asm_op)
196
197ATOMIC_OPS(and, &=, and)
198ATOMIC_OPS(andnot, &= ~, bic)
199ATOMIC_OPS(or, |=, or)
200ATOMIC_OPS(xor, ^=, xor)
201
202#else
203
204static inline int atomic_read(const atomic_t *v)
205{
206 int temp;
207
208 __asm__ __volatile__(
209 " ld.di %0, [%1]"
210 : "=r"(temp)
211 : "r"(&v->counter)
212 : "memory");
213 return temp;
214}
215
216static inline void atomic_set(atomic_t *v, int i)
217{
218 __asm__ __volatile__(
219 " st.di %0,[%1]"
220 :
221 : "r"(i), "r"(&v->counter)
222 : "memory");
223}
224
225#define ATOMIC_OP(op, c_op, asm_op) \
226static inline void atomic_##op(int i, atomic_t *v) \
227{ \
228 __asm__ __volatile__( \
229 " mov r2, %0\n" \
230 " mov r3, %1\n" \
231 " .word %2\n" \
232 : \
233 : "r"(i), "r"(&v->counter), "i"(asm_op) \
234 : "r2", "r3", "memory"); \
235} \
236
237#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
238static inline int atomic_##op##_return(int i, atomic_t *v) \
239{ \
240 unsigned int temp = i; \
241 \
242 \
243 smp_mb(); \
244 \
245 __asm__ __volatile__( \
246 " mov r2, %0\n" \
247 " mov r3, %1\n" \
248 " .word %2\n" \
249 " mov %0, r2" \
250 : "+r"(temp) \
251 : "r"(&v->counter), "i"(asm_op) \
252 : "r2", "r3", "memory"); \
253 \
254 smp_mb(); \
255 \
256 temp c_op i; \
257 \
258 return temp; \
259}
260
261#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
262static inline int atomic_fetch_##op(int i, atomic_t *v) \
263{ \
264 unsigned int temp = i; \
265 \
266 \
267 smp_mb(); \
268 \
269 __asm__ __volatile__( \
270 " mov r2, %0\n" \
271 " mov r3, %1\n" \
272 " .word %2\n" \
273 " mov %0, r2" \
274 : "+r"(temp) \
275 : "r"(&v->counter), "i"(asm_op) \
276 : "r2", "r3", "memory"); \
277 \
278 smp_mb(); \
279 \
280 return temp; \
281}
282
283#define ATOMIC_OPS(op, c_op, asm_op) \
284 ATOMIC_OP(op, c_op, asm_op) \
285 ATOMIC_OP_RETURN(op, c_op, asm_op) \
286 ATOMIC_FETCH_OP(op, c_op, asm_op)
287
288ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
289#define atomic_sub(i, v) atomic_add(-(i), (v))
290#define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
291#define atomic_fetch_sub(i, v) atomic_fetch_add(-(i), (v))
292
293#undef ATOMIC_OPS
294#define ATOMIC_OPS(op, c_op, asm_op) \
295 ATOMIC_OP(op, c_op, asm_op) \
296 ATOMIC_FETCH_OP(op, c_op, asm_op)
297
298ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
299#define atomic_andnot(mask, v) atomic_and(~(mask), (v))
300#define atomic_fetch_andnot(mask, v) atomic_fetch_and(~(mask), (v))
301ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
302ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
303
304#endif
305
306#undef ATOMIC_OPS
307#undef ATOMIC_FETCH_OP
308#undef ATOMIC_OP_RETURN
309#undef ATOMIC_OP
310
311
312
313
314
315
316
317
318
319
320#define __atomic_add_unless(v, a, u) \
321({ \
322 int c, old; \
323 \
324
325
326
327 \
328 smp_mb(); \
329 \
330 c = atomic_read(v); \
331 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
332 c = old; \
333 \
334 smp_mb(); \
335 \
336 c; \
337})
338
339#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
340
341#define atomic_inc(v) atomic_add(1, v)
342#define atomic_dec(v) atomic_sub(1, v)
343
344#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
345#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
346#define atomic_inc_return(v) atomic_add_return(1, (v))
347#define atomic_dec_return(v) atomic_sub_return(1, (v))
348#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
349
350#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
351
352
353#ifdef CONFIG_GENERIC_ATOMIC64
354
355#include <asm-generic/atomic64.h>
356
357#else
358
359
360
361
362
363
364
365
366
367
368
369typedef struct {
370 aligned_u64 counter;
371} atomic64_t;
372
373#define ATOMIC64_INIT(a) { (a) }
374
375static inline long long atomic64_read(const atomic64_t *v)
376{
377 unsigned long long val;
378
379 __asm__ __volatile__(
380 " ldd %0, [%1] \n"
381 : "=r"(val)
382 : "r"(&v->counter));
383
384 return val;
385}
386
387static inline void atomic64_set(atomic64_t *v, long long a)
388{
389
390
391
392
393
394
395
396
397
398
399
400 __asm__ __volatile__(
401 " std %0, [%1] \n"
402 :
403 : "r"(a), "r"(&v->counter)
404 : "memory");
405}
406
407#define ATOMIC64_OP(op, op1, op2) \
408static inline void atomic64_##op(long long a, atomic64_t *v) \
409{ \
410 unsigned long long val; \
411 \
412 __asm__ __volatile__( \
413 "1: \n" \
414 " llockd %0, [%1] \n" \
415 " " #op1 " %L0, %L0, %L2 \n" \
416 " " #op2 " %H0, %H0, %H2 \n" \
417 " scondd %0, [%1] \n" \
418 " bnz 1b \n" \
419 : "=&r"(val) \
420 : "r"(&v->counter), "ir"(a) \
421 : "cc"); \
422} \
423
424#define ATOMIC64_OP_RETURN(op, op1, op2) \
425static inline long long atomic64_##op##_return(long long a, atomic64_t *v) \
426{ \
427 unsigned long long val; \
428 \
429 smp_mb(); \
430 \
431 __asm__ __volatile__( \
432 "1: \n" \
433 " llockd %0, [%1] \n" \
434 " " #op1 " %L0, %L0, %L2 \n" \
435 " " #op2 " %H0, %H0, %H2 \n" \
436 " scondd %0, [%1] \n" \
437 " bnz 1b \n" \
438 : [val] "=&r"(val) \
439 : "r"(&v->counter), "ir"(a) \
440 : "cc"); \
441 \
442 smp_mb(); \
443 \
444 return val; \
445}
446
447#define ATOMIC64_FETCH_OP(op, op1, op2) \
448static inline long long atomic64_fetch_##op(long long a, atomic64_t *v) \
449{ \
450 unsigned long long val, orig; \
451 \
452 smp_mb(); \
453 \
454 __asm__ __volatile__( \
455 "1: \n" \
456 " llockd %0, [%2] \n" \
457 " " #op1 " %L1, %L0, %L3 \n" \
458 " " #op2 " %H1, %H0, %H3 \n" \
459 " scondd %1, [%2] \n" \
460 " bnz 1b \n" \
461 : "=&r"(orig), "=&r"(val) \
462 : "r"(&v->counter), "ir"(a) \
463 : "cc"); \
464 \
465 smp_mb(); \
466 \
467 return orig; \
468}
469
470#define ATOMIC64_OPS(op, op1, op2) \
471 ATOMIC64_OP(op, op1, op2) \
472 ATOMIC64_OP_RETURN(op, op1, op2) \
473 ATOMIC64_FETCH_OP(op, op1, op2)
474
475#define atomic64_andnot atomic64_andnot
476
477ATOMIC64_OPS(add, add.f, adc)
478ATOMIC64_OPS(sub, sub.f, sbc)
479ATOMIC64_OPS(and, and, and)
480ATOMIC64_OPS(andnot, bic, bic)
481ATOMIC64_OPS(or, or, or)
482ATOMIC64_OPS(xor, xor, xor)
483
484#undef ATOMIC64_OPS
485#undef ATOMIC64_FETCH_OP
486#undef ATOMIC64_OP_RETURN
487#undef ATOMIC64_OP
488
489static inline long long
490atomic64_cmpxchg(atomic64_t *ptr, long long expected, long long new)
491{
492 long long prev;
493
494 smp_mb();
495
496 __asm__ __volatile__(
497 "1: llockd %0, [%1] \n"
498 " brne %L0, %L2, 2f \n"
499 " brne %H0, %H2, 2f \n"
500 " scondd %3, [%1] \n"
501 " bnz 1b \n"
502 "2: \n"
503 : "=&r"(prev)
504 : "r"(ptr), "ir"(expected), "r"(new)
505 : "cc");
506
507 smp_mb();
508
509 return prev;
510}
511
512static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
513{
514 long long prev;
515
516 smp_mb();
517
518 __asm__ __volatile__(
519 "1: llockd %0, [%1] \n"
520 " scondd %2, [%1] \n"
521 " bnz 1b \n"
522 "2: \n"
523 : "=&r"(prev)
524 : "r"(ptr), "r"(new)
525 : "cc");
526
527 smp_mb();
528
529 return prev;
530}
531
532
533
534
535
536
537
538
539
540static inline long long atomic64_dec_if_positive(atomic64_t *v)
541{
542 long long val;
543
544 smp_mb();
545
546 __asm__ __volatile__(
547 "1: llockd %0, [%1] \n"
548 " sub.f %L0, %L0, 1 # w0 - 1, set C on borrow\n"
549 " sub.c %H0, %H0, 1 # if C set, w1 - 1\n"
550 " brlt %H0, 0, 2f \n"
551 " scondd %0, [%1] \n"
552 " bnz 1b \n"
553 "2: \n"
554 : "=&r"(val)
555 : "r"(&v->counter)
556 : "cc");
557
558 smp_mb();
559
560 return val;
561}
562
563
564
565
566
567
568
569
570
571
572static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
573{
574 long long val;
575 int op_done;
576
577 smp_mb();
578
579 __asm__ __volatile__(
580 "1: llockd %0, [%2] \n"
581 " mov %1, 1 \n"
582 " brne %L0, %L4, 2f # continue to add since v != u \n"
583 " breq.d %H0, %H4, 3f # return since v == u \n"
584 " mov %1, 0 \n"
585 "2: \n"
586 " add.f %L0, %L0, %L3 \n"
587 " adc %H0, %H0, %H3 \n"
588 " scondd %0, [%2] \n"
589 " bnz 1b \n"
590 "3: \n"
591 : "=&r"(val), "=&r" (op_done)
592 : "r"(&v->counter), "r"(a), "r"(u)
593 : "cc");
594
595 smp_mb();
596
597 return op_done;
598}
599
600#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
601#define atomic64_inc(v) atomic64_add(1LL, (v))
602#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
603#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
604#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
605#define atomic64_dec(v) atomic64_sub(1LL, (v))
606#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
607#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
608#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
609
610#endif
611
612#endif
613
614#endif
615