1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#ifndef _ASM_TILE_ATOMIC_32_H
18#define _ASM_TILE_ATOMIC_32_H
19
20#include <asm/barrier.h>
21#include <arch/chip.h>
22
23#ifndef __ASSEMBLY__
24
25
26
27
28
29
30
31
32static inline void atomic_add(int i, atomic_t *v)
33{
34 _atomic_xchg_add(&v->counter, i);
35}
36
37#define ATOMIC_OPS(op) \
38unsigned long _atomic_fetch_##op(volatile unsigned long *p, unsigned long mask); \
39static inline void atomic_##op(int i, atomic_t *v) \
40{ \
41 _atomic_fetch_##op((unsigned long *)&v->counter, i); \
42} \
43static inline int atomic_fetch_##op(int i, atomic_t *v) \
44{ \
45 smp_mb(); \
46 return _atomic_fetch_##op((unsigned long *)&v->counter, i); \
47}
48
49ATOMIC_OPS(and)
50ATOMIC_OPS(or)
51ATOMIC_OPS(xor)
52
53#undef ATOMIC_OPS
54
55static inline int atomic_fetch_add(int i, atomic_t *v)
56{
57 smp_mb();
58 return _atomic_xchg_add(&v->counter, i);
59}
60
61
62
63
64
65
66
67
68static inline int atomic_add_return(int i, atomic_t *v)
69{
70 smp_mb();
71 return _atomic_xchg_add(&v->counter, i) + i;
72}
73
74
75
76
77
78
79
80
81
82
83static inline int __atomic_add_unless(atomic_t *v, int a, int u)
84{
85 smp_mb();
86 return _atomic_xchg_add_unless(&v->counter, a, u);
87}
88
89
90
91
92
93
94
95
96
97
98
99static inline void atomic_set(atomic_t *v, int n)
100{
101 _atomic_xchg(&v->counter, n);
102}
103
104
105
106typedef struct {
107 long long counter;
108} atomic64_t;
109
110#define ATOMIC64_INIT(val) { (val) }
111
112
113
114
115
116
117
118static inline long long atomic64_read(const atomic64_t *v)
119{
120
121
122
123
124
125 return _atomic64_xchg_add((long long *)&v->counter, 0);
126}
127
128
129
130
131
132
133
134
135static inline void atomic64_add(long long i, atomic64_t *v)
136{
137 _atomic64_xchg_add(&v->counter, i);
138}
139
140#define ATOMIC64_OPS(op) \
141long long _atomic64_fetch_##op(long long *v, long long n); \
142static inline void atomic64_##op(long long i, atomic64_t *v) \
143{ \
144 _atomic64_fetch_##op(&v->counter, i); \
145} \
146static inline long long atomic64_fetch_##op(long long i, atomic64_t *v) \
147{ \
148 smp_mb(); \
149 return _atomic64_fetch_##op(&v->counter, i); \
150}
151
152ATOMIC64_OPS(and)
153ATOMIC64_OPS(or)
154ATOMIC64_OPS(xor)
155
156#undef ATOMIC64_OPS
157
158static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
159{
160 smp_mb();
161 return _atomic64_xchg_add(&v->counter, i);
162}
163
164
165
166
167
168
169
170
171static inline long long atomic64_add_return(long long i, atomic64_t *v)
172{
173 smp_mb();
174 return _atomic64_xchg_add(&v->counter, i) + i;
175}
176
177
178
179
180
181
182
183
184
185
186static inline long long atomic64_add_unless(atomic64_t *v, long long a,
187 long long u)
188{
189 smp_mb();
190 return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
191}
192
193
194
195
196
197
198
199
200
201
202
203static inline void atomic64_set(atomic64_t *v, long long n)
204{
205 _atomic64_xchg(&v->counter, n);
206}
207
208#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
209#define atomic64_inc(v) atomic64_add(1LL, (v))
210#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
211#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
212#define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v))
213#define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v))
214#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
215#define atomic64_sub(i, v) atomic64_add(-(i), (v))
216#define atomic64_dec(v) atomic64_sub(1LL, (v))
217#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
218#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
219#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
220
221#endif
222
223
224
225
226
227
228
229
230
231
232
233
234#define ATOMIC_HASH_SHIFT (PAGE_SHIFT - 3)
235#define ATOMIC_HASH_SIZE (1 << ATOMIC_HASH_SHIFT)
236
237#ifndef __ASSEMBLY__
238extern int atomic_locks[];
239#endif
240
241
242
243
244
245
246
247
248
249#define ATOMIC_LOCK_REG 20
250#define ATOMIC_LOCK_REG_NAME r20
251
252#ifndef __ASSEMBLY__
253
254void __init_atomic_per_cpu(void);
255
256#ifdef CONFIG_SMP
257
258void __atomic_fault_unlock(int *lock_ptr);
259#endif
260
261
262int *__atomic_hashed_lock(volatile void *v);
263
264
265struct __get_user {
266 unsigned long val;
267 int err;
268};
269extern struct __get_user __atomic32_cmpxchg(volatile int *p,
270 int *lock, int o, int n);
271extern struct __get_user __atomic32_xchg(volatile int *p, int *lock, int n);
272extern struct __get_user __atomic32_xchg_add(volatile int *p, int *lock, int n);
273extern struct __get_user __atomic32_xchg_add_unless(volatile int *p,
274 int *lock, int o, int n);
275extern struct __get_user __atomic32_fetch_or(volatile int *p, int *lock, int n);
276extern struct __get_user __atomic32_fetch_and(volatile int *p, int *lock, int n);
277extern struct __get_user __atomic32_fetch_andn(volatile int *p, int *lock, int n);
278extern struct __get_user __atomic32_fetch_xor(volatile int *p, int *lock, int n);
279extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
280 long long o, long long n);
281extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
282extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
283 long long n);
284extern long long __atomic64_xchg_add_unless(volatile long long *p,
285 int *lock, long long o, long long n);
286extern long long __atomic64_fetch_and(volatile long long *p, int *lock, long long n);
287extern long long __atomic64_fetch_or(volatile long long *p, int *lock, long long n);
288extern long long __atomic64_fetch_xor(volatile long long *p, int *lock, long long n);
289
290
291struct __get_user __atomic_bad_address(int __user *addr);
292
293#endif
294
295#endif
296