1
2
3
4
5
6
7
8
9
10
11
12
13#ifndef _XTENSA_ATOMIC_H
14#define _XTENSA_ATOMIC_H
15
16#include <linux/stringify.h>
17#include <linux/types.h>
18
19#ifdef __KERNEL__
20#include <asm/processor.h>
21#include <asm/cmpxchg.h>
22#include <asm/barrier.h>
23
24#define ATOMIC_INIT(i) { (i) }
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50#define atomic_read(v) READ_ONCE((v)->counter)
51
52
53
54
55
56
57
58
59#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
60
61#if XCHAL_HAVE_S32C1I
62#define ATOMIC_OP(op) \
63static inline void atomic_##op(int i, atomic_t * v) \
64{ \
65 unsigned long tmp; \
66 int result; \
67 \
68 __asm__ __volatile__( \
69 "1: l32i %1, %3, 0\n" \
70 " wsr %1, scompare1\n" \
71 " " #op " %0, %1, %2\n" \
72 " s32c1i %0, %3, 0\n" \
73 " bne %0, %1, 1b\n" \
74 : "=&a" (result), "=&a" (tmp) \
75 : "a" (i), "a" (v) \
76 : "memory" \
77 ); \
78} \
79
80#define ATOMIC_OP_RETURN(op) \
81static inline int atomic_##op##_return(int i, atomic_t * v) \
82{ \
83 unsigned long tmp; \
84 int result; \
85 \
86 __asm__ __volatile__( \
87 "1: l32i %1, %3, 0\n" \
88 " wsr %1, scompare1\n" \
89 " " #op " %0, %1, %2\n" \
90 " s32c1i %0, %3, 0\n" \
91 " bne %0, %1, 1b\n" \
92 " " #op " %0, %0, %2\n" \
93 : "=&a" (result), "=&a" (tmp) \
94 : "a" (i), "a" (v) \
95 : "memory" \
96 ); \
97 \
98 return result; \
99}
100
101#define ATOMIC_FETCH_OP(op) \
102static inline int atomic_fetch_##op(int i, atomic_t * v) \
103{ \
104 unsigned long tmp; \
105 int result; \
106 \
107 __asm__ __volatile__( \
108 "1: l32i %1, %3, 0\n" \
109 " wsr %1, scompare1\n" \
110 " " #op " %0, %1, %2\n" \
111 " s32c1i %0, %3, 0\n" \
112 " bne %0, %1, 1b\n" \
113 : "=&a" (result), "=&a" (tmp) \
114 : "a" (i), "a" (v) \
115 : "memory" \
116 ); \
117 \
118 return result; \
119}
120
121#else
122
123#define ATOMIC_OP(op) \
124static inline void atomic_##op(int i, atomic_t * v) \
125{ \
126 unsigned int vval; \
127 \
128 __asm__ __volatile__( \
129 " rsil a15, "__stringify(TOPLEVEL)"\n"\
130 " l32i %0, %2, 0\n" \
131 " " #op " %0, %0, %1\n" \
132 " s32i %0, %2, 0\n" \
133 " wsr a15, ps\n" \
134 " rsync\n" \
135 : "=&a" (vval) \
136 : "a" (i), "a" (v) \
137 : "a15", "memory" \
138 ); \
139} \
140
141#define ATOMIC_OP_RETURN(op) \
142static inline int atomic_##op##_return(int i, atomic_t * v) \
143{ \
144 unsigned int vval; \
145 \
146 __asm__ __volatile__( \
147 " rsil a15,"__stringify(TOPLEVEL)"\n" \
148 " l32i %0, %2, 0\n" \
149 " " #op " %0, %0, %1\n" \
150 " s32i %0, %2, 0\n" \
151 " wsr a15, ps\n" \
152 " rsync\n" \
153 : "=&a" (vval) \
154 : "a" (i), "a" (v) \
155 : "a15", "memory" \
156 ); \
157 \
158 return vval; \
159}
160
161#define ATOMIC_FETCH_OP(op) \
162static inline int atomic_fetch_##op(int i, atomic_t * v) \
163{ \
164 unsigned int tmp, vval; \
165 \
166 __asm__ __volatile__( \
167 " rsil a15,"__stringify(TOPLEVEL)"\n" \
168 " l32i %0, %3, 0\n" \
169 " " #op " %1, %0, %2\n" \
170 " s32i %1, %3, 0\n" \
171 " wsr a15, ps\n" \
172 " rsync\n" \
173 : "=&a" (vval), "=&a" (tmp) \
174 : "a" (i), "a" (v) \
175 : "a15", "memory" \
176 ); \
177 \
178 return vval; \
179}
180
181#endif
182
183#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) ATOMIC_OP_RETURN(op)
184
185ATOMIC_OPS(add)
186ATOMIC_OPS(sub)
187
188#undef ATOMIC_OPS
189#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
190
191ATOMIC_OPS(and)
192ATOMIC_OPS(or)
193ATOMIC_OPS(xor)
194
195#undef ATOMIC_OPS
196#undef ATOMIC_FETCH_OP
197#undef ATOMIC_OP_RETURN
198#undef ATOMIC_OP
199
200
201
202
203
204
205
206
207
208
209#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
210
211
212
213
214
215
216
217#define atomic_inc(v) atomic_add(1,(v))
218
219
220
221
222
223
224
225#define atomic_inc_return(v) atomic_add_return(1,(v))
226
227
228
229
230
231
232
233#define atomic_dec(v) atomic_sub(1,(v))
234
235
236
237
238
239
240
241#define atomic_dec_return(v) atomic_sub_return(1,(v))
242
243
244
245
246
247
248
249
250
251#define atomic_dec_and_test(v) (atomic_sub_return(1,(v)) == 0)
252
253
254
255
256
257
258
259
260
261#define atomic_inc_and_test(v) (atomic_add_return(1,(v)) == 0)
262
263
264
265
266
267
268
269
270
271
272#define atomic_add_negative(i,v) (atomic_add_return((i),(v)) < 0)
273
274#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
275#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
276
277
278
279
280
281
282
283
284
285
286static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
287{
288 int c, old;
289 c = atomic_read(v);
290 for (;;) {
291 if (unlikely(c == (u)))
292 break;
293 old = atomic_cmpxchg((v), c, c + (a));
294 if (likely(old == c))
295 break;
296 c = old;
297 }
298 return c;
299}
300
301#endif
302
303#endif
304