1
2
3
4
5#ifndef _ASM_PARISC_ATOMIC_H_
6#define _ASM_PARISC_ATOMIC_H_
7
8#include <linux/types.h>
9#include <asm/cmpxchg.h>
10
11
12
13
14
15
16
17
18
19#ifdef CONFIG_SMP
20#include <asm/spinlock.h>
21#include <asm/cache.h>
22
23
24
25
26
27# define ATOMIC_HASH_SIZE 4
28# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
29
30extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
31
32
33
34#define _atomic_spin_lock_irqsave(l,f) do { \
35 arch_spinlock_t *s = ATOMIC_HASH(l); \
36 local_irq_save(f); \
37 arch_spin_lock(s); \
38} while(0)
39
40#define _atomic_spin_unlock_irqrestore(l,f) do { \
41 arch_spinlock_t *s = ATOMIC_HASH(l); \
42 arch_spin_unlock(s); \
43 local_irq_restore(f); \
44} while(0)
45
46
47#else
48# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
49# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
50#endif
51
52
53
54
55
56
57
58
59
60
61
62static __inline__ int __atomic_add_return(int i, atomic_t *v)
63{
64 int ret;
65 unsigned long flags;
66 _atomic_spin_lock_irqsave(v, flags);
67
68 ret = (v->counter += i);
69
70 _atomic_spin_unlock_irqrestore(v, flags);
71 return ret;
72}
73
74static __inline__ void atomic_set(atomic_t *v, int i)
75{
76 unsigned long flags;
77 _atomic_spin_lock_irqsave(v, flags);
78
79 v->counter = i;
80
81 _atomic_spin_unlock_irqrestore(v, flags);
82}
83
84static __inline__ int atomic_read(const atomic_t *v)
85{
86 return (*(volatile int *)&(v)->counter);
87}
88
89
90#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
91#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
92
93
94
95
96
97
98
99
100
101
102static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
103{
104 int c, old;
105 c = atomic_read(v);
106 for (;;) {
107 if (unlikely(c == (u)))
108 break;
109 old = atomic_cmpxchg((v), c, c + (a));
110 if (likely(old == c))
111 break;
112 c = old;
113 }
114 return c;
115}
116
117
118#define atomic_add(i,v) ((void)(__atomic_add_return( (i),(v))))
119#define atomic_sub(i,v) ((void)(__atomic_add_return(-((int) (i)),(v))))
120#define atomic_inc(v) ((void)(__atomic_add_return( 1,(v))))
121#define atomic_dec(v) ((void)(__atomic_add_return( -1,(v))))
122
123#define atomic_add_return(i,v) (__atomic_add_return( (i),(v)))
124#define atomic_sub_return(i,v) (__atomic_add_return(-(i),(v)))
125#define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
126#define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
127
128#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
129
130
131
132
133
134
135
136
137
138#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
139
140#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
141
142#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
143
144#define ATOMIC_INIT(i) { (i) }
145
146#define smp_mb__before_atomic_dec() smp_mb()
147#define smp_mb__after_atomic_dec() smp_mb()
148#define smp_mb__before_atomic_inc() smp_mb()
149#define smp_mb__after_atomic_inc() smp_mb()
150
151#ifdef CONFIG_64BIT
152
153#define ATOMIC64_INIT(i) { (i) }
154
155static __inline__ s64
156__atomic64_add_return(s64 i, atomic64_t *v)
157{
158 s64 ret;
159 unsigned long flags;
160 _atomic_spin_lock_irqsave(v, flags);
161
162 ret = (v->counter += i);
163
164 _atomic_spin_unlock_irqrestore(v, flags);
165 return ret;
166}
167
168static __inline__ void
169atomic64_set(atomic64_t *v, s64 i)
170{
171 unsigned long flags;
172 _atomic_spin_lock_irqsave(v, flags);
173
174 v->counter = i;
175
176 _atomic_spin_unlock_irqrestore(v, flags);
177}
178
179static __inline__ s64
180atomic64_read(const atomic64_t *v)
181{
182 return (*(volatile long *)&(v)->counter);
183}
184
185#define atomic64_add(i,v) ((void)(__atomic64_add_return( ((s64)(i)),(v))))
186#define atomic64_sub(i,v) ((void)(__atomic64_add_return(-((s64)(i)),(v))))
187#define atomic64_inc(v) ((void)(__atomic64_add_return( 1,(v))))
188#define atomic64_dec(v) ((void)(__atomic64_add_return( -1,(v))))
189
190#define atomic64_add_return(i,v) (__atomic64_add_return( ((s64)(i)),(v)))
191#define atomic64_sub_return(i,v) (__atomic64_add_return(-((s64)(i)),(v)))
192#define atomic64_inc_return(v) (__atomic64_add_return( 1,(v)))
193#define atomic64_dec_return(v) (__atomic64_add_return( -1,(v)))
194
195#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
196
197#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
198#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
199#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0)
200
201
202#define atomic64_cmpxchg(v, o, n) \
203 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
204#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
205
206
207
208
209
210
211
212
213
214
215static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
216{
217 long c, old;
218 c = atomic64_read(v);
219 for (;;) {
220 if (unlikely(c == (u)))
221 break;
222 old = atomic64_cmpxchg((v), c, c + (a));
223 if (likely(old == c))
224 break;
225 c = old;
226 }
227 return c != (u);
228}
229
230#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
231
232#endif
233
234
235#endif
236