1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef __ASM_AVR32_ATOMIC_H
15#define __ASM_AVR32_ATOMIC_H
16
17#include <linux/types.h>
18#include <asm/cmpxchg.h>
19
20#define ATOMIC_INIT(i) { (i) }
21
22#define atomic_read(v) READ_ONCE((v)->counter)
23#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
24
25#define ATOMIC_OP_RETURN(op, asm_op, asm_con) \
26static inline int __atomic_##op##_return(int i, atomic_t *v) \
27{ \
28 int result; \
29 \
30 asm volatile( \
31 "/* atomic_" #op "_return */\n" \
32 "1: ssrf 5\n" \
33 " ld.w %0, %2\n" \
34 " " #asm_op " %0, %3\n" \
35 " stcond %1, %0\n" \
36 " brne 1b" \
37 : "=&r" (result), "=o" (v->counter) \
38 : "m" (v->counter), #asm_con (i) \
39 : "cc"); \
40 \
41 return result; \
42}
43
44#define ATOMIC_FETCH_OP(op, asm_op, asm_con) \
45static inline int __atomic_fetch_##op(int i, atomic_t *v) \
46{ \
47 int result, val; \
48 \
49 asm volatile( \
50 "/* atomic_fetch_" #op " */\n" \
51 "1: ssrf 5\n" \
52 " ld.w %0, %3\n" \
53 " mov %1, %0\n" \
54 " " #asm_op " %1, %4\n" \
55 " stcond %2, %1\n" \
56 " brne 1b" \
57 : "=&r" (result), "=&r" (val), "=o" (v->counter) \
58 : "m" (v->counter), #asm_con (i) \
59 : "cc"); \
60 \
61 return result; \
62}
63
64ATOMIC_OP_RETURN(sub, sub, rKs21)
65ATOMIC_OP_RETURN(add, add, r)
66ATOMIC_FETCH_OP (sub, sub, rKs21)
67ATOMIC_FETCH_OP (add, add, r)
68
69#define ATOMIC_OPS(op, asm_op) \
70ATOMIC_OP_RETURN(op, asm_op, r) \
71static inline void atomic_##op(int i, atomic_t *v) \
72{ \
73 (void)__atomic_##op##_return(i, v); \
74} \
75ATOMIC_FETCH_OP(op, asm_op, r) \
76static inline int atomic_fetch_##op(int i, atomic_t *v) \
77{ \
78 return __atomic_fetch_##op(i, v); \
79}
80
81ATOMIC_OPS(and, and)
82ATOMIC_OPS(or, or)
83ATOMIC_OPS(xor, eor)
84
85#undef ATOMIC_OPS
86#undef ATOMIC_FETCH_OP
87#undef ATOMIC_OP_RETURN
88
89
90
91
92
93
94
95
96
97
98
99
100#define IS_21BIT_CONST(i) \
101 (__builtin_constant_p(i) && ((i) >= -1048575) && ((i) <= 1048576))
102
103
104
105
106
107
108
109
110static inline int atomic_add_return(int i, atomic_t *v)
111{
112 if (IS_21BIT_CONST(i))
113 return __atomic_sub_return(-i, v);
114
115 return __atomic_add_return(i, v);
116}
117
118static inline int atomic_fetch_add(int i, atomic_t *v)
119{
120 if (IS_21BIT_CONST(i))
121 return __atomic_fetch_sub(-i, v);
122
123 return __atomic_fetch_add(i, v);
124}
125
126
127
128
129
130
131
132
133static inline int atomic_sub_return(int i, atomic_t *v)
134{
135 if (IS_21BIT_CONST(i))
136 return __atomic_sub_return(i, v);
137
138 return __atomic_add_return(-i, v);
139}
140
141static inline int atomic_fetch_sub(int i, atomic_t *v)
142{
143 if (IS_21BIT_CONST(i))
144 return __atomic_fetch_sub(i, v);
145
146 return __atomic_fetch_add(-i, v);
147}
148
149
150
151
152
153
154
155
156
157
158static inline int __atomic_add_unless(atomic_t *v, int a, int u)
159{
160 int tmp, old = atomic_read(v);
161
162 if (IS_21BIT_CONST(a)) {
163 asm volatile(
164 "/* __atomic_sub_unless */\n"
165 "1: ssrf 5\n"
166 " ld.w %0, %2\n"
167 " cp.w %0, %4\n"
168 " breq 1f\n"
169 " sub %0, %3\n"
170 " stcond %1, %0\n"
171 " brne 1b\n"
172 "1:"
173 : "=&r"(tmp), "=o"(v->counter)
174 : "m"(v->counter), "rKs21"(-a), "rKs21"(u)
175 : "cc", "memory");
176 } else {
177 asm volatile(
178 "/* __atomic_add_unless */\n"
179 "1: ssrf 5\n"
180 " ld.w %0, %2\n"
181 " cp.w %0, %4\n"
182 " breq 1f\n"
183 " add %0, %3\n"
184 " stcond %1, %0\n"
185 " brne 1b\n"
186 "1:"
187 : "=&r"(tmp), "=o"(v->counter)
188 : "m"(v->counter), "r"(a), "ir"(u)
189 : "cc", "memory");
190 }
191
192 return old;
193}
194
195#undef IS_21BIT_CONST
196
197
198
199
200
201
202
203
204
205static inline int atomic_sub_if_positive(int i, atomic_t *v)
206{
207 int result;
208
209 asm volatile(
210 "/* atomic_sub_if_positive */\n"
211 "1: ssrf 5\n"
212 " ld.w %0, %2\n"
213 " sub %0, %3\n"
214 " brlt 1f\n"
215 " stcond %1, %0\n"
216 " brne 1b\n"
217 "1:"
218 : "=&r"(result), "=o"(v->counter)
219 : "m"(v->counter), "ir"(i)
220 : "cc", "memory");
221
222 return result;
223}
224
225#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
226#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
227
228#define atomic_sub(i, v) (void)atomic_sub_return(i, v)
229#define atomic_add(i, v) (void)atomic_add_return(i, v)
230#define atomic_dec(v) atomic_sub(1, (v))
231#define atomic_inc(v) atomic_add(1, (v))
232
233#define atomic_dec_return(v) atomic_sub_return(1, v)
234#define atomic_inc_return(v) atomic_add_return(1, v)
235
236#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
237#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
238#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
239#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
240
241#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
242
243#endif
244