1
2
3
4
5
6
7
8
9
10
11#ifndef _ASM_ATOMIC_H
12#define _ASM_ATOMIC_H
13
14#include <asm/irqflags.h>
15#include <asm/cmpxchg.h>
16#include <asm/barrier.h>
17
18#ifndef CONFIG_SMP
19#include <asm-generic/atomic.h>
20#else
21
22
23
24
25
26
27#define ATOMIC_INIT(i) { (i) }
28
29#ifdef __KERNEL__
30
31
32
33
34
35
36
37#define atomic_read(v) READ_ONCE((v)->counter)
38
39
40
41
42
43
44
45
46#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
47
48#define ATOMIC_OP(op) \
49static inline void atomic_##op(int i, atomic_t *v) \
50{ \
51 int retval, status; \
52 \
53 asm volatile( \
54 "1: mov %4,(_AAR,%3) \n" \
55 " mov (_ADR,%3),%1 \n" \
56 " " #op " %5,%1 \n" \
57 " mov %1,(_ADR,%3) \n" \
58 " mov (_ADR,%3),%0 \n" \
59 " mov (_ASR,%3),%0 \n" \
60 " or %0,%0 \n" \
61 " bne 1b \n" \
62 : "=&r"(status), "=&r"(retval), "=m"(v->counter) \
63 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) \
64 : "memory", "cc"); \
65}
66
67#define ATOMIC_OP_RETURN(op) \
68static inline int atomic_##op##_return(int i, atomic_t *v) \
69{ \
70 int retval, status; \
71 \
72 asm volatile( \
73 "1: mov %4,(_AAR,%3) \n" \
74 " mov (_ADR,%3),%1 \n" \
75 " " #op " %5,%1 \n" \
76 " mov %1,(_ADR,%3) \n" \
77 " mov (_ADR,%3),%0 \n" \
78 " mov (_ASR,%3),%0 \n" \
79 " or %0,%0 \n" \
80 " bne 1b \n" \
81 : "=&r"(status), "=&r"(retval), "=m"(v->counter) \
82 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i) \
83 : "memory", "cc"); \
84 return retval; \
85}
86
87#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
88
89ATOMIC_OPS(add)
90ATOMIC_OPS(sub)
91
92ATOMIC_OP(and)
93ATOMIC_OP(or)
94ATOMIC_OP(xor)
95
96#undef ATOMIC_OPS
97#undef ATOMIC_OP_RETURN
98#undef ATOMIC_OP
99
100static inline int atomic_add_negative(int i, atomic_t *v)
101{
102 return atomic_add_return(i, v) < 0;
103}
104
105static inline void atomic_inc(atomic_t *v)
106{
107 atomic_add_return(1, v);
108}
109
110static inline void atomic_dec(atomic_t *v)
111{
112 atomic_sub_return(1, v);
113}
114
115#define atomic_dec_return(v) atomic_sub_return(1, (v))
116#define atomic_inc_return(v) atomic_add_return(1, (v))
117
118#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
119#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
120#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
121
122#define __atomic_add_unless(v, a, u) \
123({ \
124 int c, old; \
125 c = atomic_read(v); \
126 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
127 c = old; \
128 c; \
129})
130
131#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
132#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
133
134#endif
135#endif
136#endif
137