1
2
3
4
5#ifndef _ASM_PARISC_ATOMIC_H_
6#define _ASM_PARISC_ATOMIC_H_
7
8#include <linux/types.h>
9#include <asm/cmpxchg.h>
10#include <asm/barrier.h>
11
12
13
14
15
16
17
18
19
20#ifdef CONFIG_SMP
21#include <asm/spinlock.h>
22#include <asm/cache.h>
23
24
25
26
27
28# define ATOMIC_HASH_SIZE 4
29# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
30
31extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
32
33
34
35#define _atomic_spin_lock_irqsave(l,f) do { \
36 arch_spinlock_t *s = ATOMIC_HASH(l); \
37 local_irq_save(f); \
38 arch_spin_lock(s); \
39} while(0)
40
41#define _atomic_spin_unlock_irqrestore(l,f) do { \
42 arch_spinlock_t *s = ATOMIC_HASH(l); \
43 arch_spin_unlock(s); \
44 local_irq_restore(f); \
45} while(0)
46
47
48#else
49# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
50# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
51#endif
52
53
54
55
56
57
58static __inline__ void atomic_set(atomic_t *v, int i)
59{
60 unsigned long flags;
61 _atomic_spin_lock_irqsave(v, flags);
62
63 v->counter = i;
64
65 _atomic_spin_unlock_irqrestore(v, flags);
66}
67
68static __inline__ int atomic_read(const atomic_t *v)
69{
70 return ACCESS_ONCE((v)->counter);
71}
72
73
74#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
75#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
76
77
78
79
80
81
82
83
84
85
86static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
87{
88 int c, old;
89 c = atomic_read(v);
90 for (;;) {
91 if (unlikely(c == (u)))
92 break;
93 old = atomic_cmpxchg((v), c, c + (a));
94 if (likely(old == c))
95 break;
96 c = old;
97 }
98 return c;
99}
100
101#define ATOMIC_OP(op, c_op) \
102static __inline__ void atomic_##op(int i, atomic_t *v) \
103{ \
104 unsigned long flags; \
105 \
106 _atomic_spin_lock_irqsave(v, flags); \
107 v->counter c_op i; \
108 _atomic_spin_unlock_irqrestore(v, flags); \
109} \
110
111#define ATOMIC_OP_RETURN(op, c_op) \
112static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
113{ \
114 unsigned long flags; \
115 int ret; \
116 \
117 _atomic_spin_lock_irqsave(v, flags); \
118 ret = (v->counter c_op i); \
119 _atomic_spin_unlock_irqrestore(v, flags); \
120 \
121 return ret; \
122}
123
124#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
125
126ATOMIC_OPS(add, +=)
127ATOMIC_OPS(sub, -=)
128
129ATOMIC_OP(and, &=)
130ATOMIC_OP(or, |=)
131ATOMIC_OP(xor, ^=)
132
133#undef ATOMIC_OPS
134#undef ATOMIC_OP_RETURN
135#undef ATOMIC_OP
136
137#define atomic_inc(v) (atomic_add( 1,(v)))
138#define atomic_dec(v) (atomic_add( -1,(v)))
139
140#define atomic_inc_return(v) (atomic_add_return( 1,(v)))
141#define atomic_dec_return(v) (atomic_add_return( -1,(v)))
142
143#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
144
145
146
147
148
149
150
151
152
153#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
154
155#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
156
157#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
158
159#define ATOMIC_INIT(i) { (i) }
160
161#ifdef CONFIG_64BIT
162
163#define ATOMIC64_INIT(i) { (i) }
164
165#define ATOMIC64_OP(op, c_op) \
166static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
167{ \
168 unsigned long flags; \
169 \
170 _atomic_spin_lock_irqsave(v, flags); \
171 v->counter c_op i; \
172 _atomic_spin_unlock_irqrestore(v, flags); \
173} \
174
175#define ATOMIC64_OP_RETURN(op, c_op) \
176static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
177{ \
178 unsigned long flags; \
179 s64 ret; \
180 \
181 _atomic_spin_lock_irqsave(v, flags); \
182 ret = (v->counter c_op i); \
183 _atomic_spin_unlock_irqrestore(v, flags); \
184 \
185 return ret; \
186}
187
188#define ATOMIC64_OPS(op, c_op) ATOMIC64_OP(op, c_op) ATOMIC64_OP_RETURN(op, c_op)
189
190ATOMIC64_OPS(add, +=)
191ATOMIC64_OPS(sub, -=)
192ATOMIC64_OP(and, &=)
193ATOMIC64_OP(or, |=)
194ATOMIC64_OP(xor, ^=)
195
196#undef ATOMIC64_OPS
197#undef ATOMIC64_OP_RETURN
198#undef ATOMIC64_OP
199
200static __inline__ void
201atomic64_set(atomic64_t *v, s64 i)
202{
203 unsigned long flags;
204 _atomic_spin_lock_irqsave(v, flags);
205
206 v->counter = i;
207
208 _atomic_spin_unlock_irqrestore(v, flags);
209}
210
211static __inline__ s64
212atomic64_read(const atomic64_t *v)
213{
214 return ACCESS_ONCE((v)->counter);
215}
216
217#define atomic64_inc(v) (atomic64_add( 1,(v)))
218#define atomic64_dec(v) (atomic64_add( -1,(v)))
219
220#define atomic64_inc_return(v) (atomic64_add_return( 1,(v)))
221#define atomic64_dec_return(v) (atomic64_add_return( -1,(v)))
222
223#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
224
225#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
226#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
227#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
228
229
230#define atomic64_cmpxchg(v, o, n) \
231 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
232#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
233
234
235
236
237
238
239
240
241
242
243static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
244{
245 long c, old;
246 c = atomic64_read(v);
247 for (;;) {
248 if (unlikely(c == (u)))
249 break;
250 old = atomic64_cmpxchg((v), c, c + (a));
251 if (likely(old == c))
252 break;
253 c = old;
254 }
255 return c != (u);
256}
257
258#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
259
260
261
262
263
264
265
266
267static inline long atomic64_dec_if_positive(atomic64_t *v)
268{
269 long c, old, dec;
270 c = atomic64_read(v);
271 for (;;) {
272 dec = c - 1;
273 if (unlikely(dec < 0))
274 break;
275 old = atomic64_cmpxchg((v), c, dec);
276 if (likely(old == c))
277 break;
278 c = old;
279 }
280 return dec;
281}
282
283#endif
284
285
286#endif
287