1
2
3
4
5
6
7
8
9
10
11
12
13#ifndef __ARCH_S390_ATOMIC__
14#define __ARCH_S390_ATOMIC__
15
16#include <linux/compiler.h>
17#include <linux/types.h>
18#include <asm/barrier.h>
19#include <asm/cmpxchg.h>
20
21#define ATOMIC_INIT(i) { (i) }
22
23#define __ATOMIC_NO_BARRIER "\n"
24
25#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
26
27#define __ATOMIC_OR "lao"
28#define __ATOMIC_AND "lan"
29#define __ATOMIC_ADD "laa"
30#define __ATOMIC_BARRIER "bcr 14,0\n"
31
32#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
33({ \
34 int old_val; \
35 \
36 typecheck(atomic_t *, ptr); \
37 asm volatile( \
38 __barrier \
39 op_string " %0,%2,%1\n" \
40 __barrier \
41 : "=d" (old_val), "+Q" ((ptr)->counter) \
42 : "d" (op_val) \
43 : "cc", "memory"); \
44 old_val; \
45})
46
47#else
48
49#define __ATOMIC_OR "or"
50#define __ATOMIC_AND "nr"
51#define __ATOMIC_ADD "ar"
52#define __ATOMIC_BARRIER "\n"
53
54#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
55({ \
56 int old_val, new_val; \
57 \
58 typecheck(atomic_t *, ptr); \
59 asm volatile( \
60 " l %0,%2\n" \
61 "0: lr %1,%0\n" \
62 op_string " %1,%3\n" \
63 " cs %0,%1,%2\n" \
64 " jl 0b" \
65 : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
66 : "d" (op_val) \
67 : "cc", "memory"); \
68 old_val; \
69})
70
71#endif
72
73static inline int atomic_read(const atomic_t *v)
74{
75 int c;
76
77 asm volatile(
78 " l %0,%1\n"
79 : "=d" (c) : "Q" (v->counter));
80 return c;
81}
82
83static inline void atomic_set(atomic_t *v, int i)
84{
85 asm volatile(
86 " st %1,%0\n"
87 : "=Q" (v->counter) : "d" (i));
88}
89
90static inline int atomic_add_return(int i, atomic_t *v)
91{
92 return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
93}
94
95static inline void atomic_add(int i, atomic_t *v)
96{
97#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
98 if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
99 asm volatile(
100 "asi %0,%1\n"
101 : "+Q" (v->counter)
102 : "i" (i)
103 : "cc", "memory");
104 return;
105 }
106#endif
107 __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER);
108}
109
110#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
111#define atomic_inc(_v) atomic_add(1, _v)
112#define atomic_inc_return(_v) atomic_add_return(1, _v)
113#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
114#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
115#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
116#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
117#define atomic_dec(_v) atomic_sub(1, _v)
118#define atomic_dec_return(_v) atomic_sub_return(1, _v)
119#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
120
121static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
122{
123 __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER);
124}
125
126static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
127{
128 __ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER);
129}
130
131#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
132
133static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
134{
135 asm volatile(
136 " cs %0,%2,%1"
137 : "+d" (old), "+Q" (v->counter)
138 : "d" (new)
139 : "cc", "memory");
140 return old;
141}
142
143static inline int __atomic_add_unless(atomic_t *v, int a, int u)
144{
145 int c, old;
146 c = atomic_read(v);
147 for (;;) {
148 if (unlikely(c == u))
149 break;
150 old = atomic_cmpxchg(v, c, c + a);
151 if (likely(old == c))
152 break;
153 c = old;
154 }
155 return c;
156}
157
158
159#undef __ATOMIC_LOOP
160
161#define ATOMIC64_INIT(i) { (i) }
162
163#define __ATOMIC64_NO_BARRIER "\n"
164
165#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
166
167#define __ATOMIC64_OR "laog"
168#define __ATOMIC64_AND "lang"
169#define __ATOMIC64_ADD "laag"
170#define __ATOMIC64_BARRIER "bcr 14,0\n"
171
172#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
173({ \
174 long long old_val; \
175 \
176 typecheck(atomic64_t *, ptr); \
177 asm volatile( \
178 __barrier \
179 op_string " %0,%2,%1\n" \
180 __barrier \
181 : "=d" (old_val), "+Q" ((ptr)->counter) \
182 : "d" (op_val) \
183 : "cc", "memory"); \
184 old_val; \
185})
186
187#else
188
189#define __ATOMIC64_OR "ogr"
190#define __ATOMIC64_AND "ngr"
191#define __ATOMIC64_ADD "agr"
192#define __ATOMIC64_BARRIER "\n"
193
194#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
195({ \
196 long long old_val, new_val; \
197 \
198 typecheck(atomic64_t *, ptr); \
199 asm volatile( \
200 " lg %0,%2\n" \
201 "0: lgr %1,%0\n" \
202 op_string " %1,%3\n" \
203 " csg %0,%1,%2\n" \
204 " jl 0b" \
205 : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
206 : "d" (op_val) \
207 : "cc", "memory"); \
208 old_val; \
209})
210
211#endif
212
213static inline long long atomic64_read(const atomic64_t *v)
214{
215 long long c;
216
217 asm volatile(
218 " lg %0,%1\n"
219 : "=d" (c) : "Q" (v->counter));
220 return c;
221}
222
223static inline void atomic64_set(atomic64_t *v, long long i)
224{
225 asm volatile(
226 " stg %1,%0\n"
227 : "=Q" (v->counter) : "d" (i));
228}
229
230static inline long long atomic64_add_return(long long i, atomic64_t *v)
231{
232 return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
233}
234
235static inline void atomic64_add(long long i, atomic64_t *v)
236{
237#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
238 if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
239 asm volatile(
240 "agsi %0,%1\n"
241 : "+Q" (v->counter)
242 : "i" (i)
243 : "cc", "memory");
244 return;
245 }
246#endif
247 __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
248}
249
250static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
251{
252 __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER);
253}
254
255static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
256{
257 __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER);
258}
259
260#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
261
262static inline long long atomic64_cmpxchg(atomic64_t *v,
263 long long old, long long new)
264{
265 asm volatile(
266 " csg %0,%2,%1"
267 : "+d" (old), "+Q" (v->counter)
268 : "d" (new)
269 : "cc", "memory");
270 return old;
271}
272
273#undef __ATOMIC64_LOOP
274
275static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
276{
277 long long c, old;
278
279 c = atomic64_read(v);
280 for (;;) {
281 if (unlikely(c == u))
282 break;
283 old = atomic64_cmpxchg(v, c, c + i);
284 if (likely(old == c))
285 break;
286 c = old;
287 }
288 return c != u;
289}
290
291static inline long long atomic64_dec_if_positive(atomic64_t *v)
292{
293 long long c, old, dec;
294
295 c = atomic64_read(v);
296 for (;;) {
297 dec = c - 1;
298 if (unlikely(dec < 0))
299 break;
300 old = atomic64_cmpxchg((v), c, dec);
301 if (likely(old == c))
302 break;
303 c = old;
304 }
305 return dec;
306}
307
308#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
309#define atomic64_inc(_v) atomic64_add(1, _v)
310#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
311#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
312#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
313#define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
314#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
315#define atomic64_dec(_v) atomic64_sub(1, _v)
316#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
317#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
318#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
319
320#endif
321