1
2
3
4
5
6#ifndef _ASM_PARISC_ATOMIC_H_
7#define _ASM_PARISC_ATOMIC_H_
8
9#include <linux/types.h>
10#include <asm/cmpxchg.h>
11#include <asm/barrier.h>
12
13
14
15
16
17
18
19
20
21#ifdef CONFIG_SMP
22#include <asm/spinlock.h>
23#include <asm/cache.h>
24
25
26
27
28
29# define ATOMIC_HASH_SIZE 4
30# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
31
32extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
33
34
35
36#define _atomic_spin_lock_irqsave(l,f) do { \
37 arch_spinlock_t *s = ATOMIC_HASH(l); \
38 local_irq_save(f); \
39 arch_spin_lock(s); \
40} while(0)
41
42#define _atomic_spin_unlock_irqrestore(l,f) do { \
43 arch_spinlock_t *s = ATOMIC_HASH(l); \
44 arch_spin_unlock(s); \
45 local_irq_restore(f); \
46} while(0)
47
48
49#else
50# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
51# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
52#endif
53
54
55
56
57
58
59static __inline__ void arch_atomic_set(atomic_t *v, int i)
60{
61 unsigned long flags;
62 _atomic_spin_lock_irqsave(v, flags);
63
64 v->counter = i;
65
66 _atomic_spin_unlock_irqrestore(v, flags);
67}
68
69#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
70
71static __inline__ int arch_atomic_read(const atomic_t *v)
72{
73 return READ_ONCE((v)->counter);
74}
75
76
77#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
78#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
79
80#define ATOMIC_OP(op, c_op) \
81static __inline__ void arch_atomic_##op(int i, atomic_t *v) \
82{ \
83 unsigned long flags; \
84 \
85 _atomic_spin_lock_irqsave(v, flags); \
86 v->counter c_op i; \
87 _atomic_spin_unlock_irqrestore(v, flags); \
88}
89
90#define ATOMIC_OP_RETURN(op, c_op) \
91static __inline__ int arch_atomic_##op##_return(int i, atomic_t *v) \
92{ \
93 unsigned long flags; \
94 int ret; \
95 \
96 _atomic_spin_lock_irqsave(v, flags); \
97 ret = (v->counter c_op i); \
98 _atomic_spin_unlock_irqrestore(v, flags); \
99 \
100 return ret; \
101}
102
103#define ATOMIC_FETCH_OP(op, c_op) \
104static __inline__ int arch_atomic_fetch_##op(int i, atomic_t *v) \
105{ \
106 unsigned long flags; \
107 int ret; \
108 \
109 _atomic_spin_lock_irqsave(v, flags); \
110 ret = v->counter; \
111 v->counter c_op i; \
112 _atomic_spin_unlock_irqrestore(v, flags); \
113 \
114 return ret; \
115}
116
117#define ATOMIC_OPS(op, c_op) \
118 ATOMIC_OP(op, c_op) \
119 ATOMIC_OP_RETURN(op, c_op) \
120 ATOMIC_FETCH_OP(op, c_op)
121
122ATOMIC_OPS(add, +=)
123ATOMIC_OPS(sub, -=)
124
125#undef ATOMIC_OPS
126#define ATOMIC_OPS(op, c_op) \
127 ATOMIC_OP(op, c_op) \
128 ATOMIC_FETCH_OP(op, c_op)
129
130ATOMIC_OPS(and, &=)
131ATOMIC_OPS(or, |=)
132ATOMIC_OPS(xor, ^=)
133
134#undef ATOMIC_OPS
135#undef ATOMIC_FETCH_OP
136#undef ATOMIC_OP_RETURN
137#undef ATOMIC_OP
138
139#ifdef CONFIG_64BIT
140
141#define ATOMIC64_INIT(i) { (i) }
142
143#define ATOMIC64_OP(op, c_op) \
144static __inline__ void arch_atomic64_##op(s64 i, atomic64_t *v) \
145{ \
146 unsigned long flags; \
147 \
148 _atomic_spin_lock_irqsave(v, flags); \
149 v->counter c_op i; \
150 _atomic_spin_unlock_irqrestore(v, flags); \
151}
152
153#define ATOMIC64_OP_RETURN(op, c_op) \
154static __inline__ s64 arch_atomic64_##op##_return(s64 i, atomic64_t *v) \
155{ \
156 unsigned long flags; \
157 s64 ret; \
158 \
159 _atomic_spin_lock_irqsave(v, flags); \
160 ret = (v->counter c_op i); \
161 _atomic_spin_unlock_irqrestore(v, flags); \
162 \
163 return ret; \
164}
165
166#define ATOMIC64_FETCH_OP(op, c_op) \
167static __inline__ s64 arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
168{ \
169 unsigned long flags; \
170 s64 ret; \
171 \
172 _atomic_spin_lock_irqsave(v, flags); \
173 ret = v->counter; \
174 v->counter c_op i; \
175 _atomic_spin_unlock_irqrestore(v, flags); \
176 \
177 return ret; \
178}
179
180#define ATOMIC64_OPS(op, c_op) \
181 ATOMIC64_OP(op, c_op) \
182 ATOMIC64_OP_RETURN(op, c_op) \
183 ATOMIC64_FETCH_OP(op, c_op)
184
185ATOMIC64_OPS(add, +=)
186ATOMIC64_OPS(sub, -=)
187
188#undef ATOMIC64_OPS
189#define ATOMIC64_OPS(op, c_op) \
190 ATOMIC64_OP(op, c_op) \
191 ATOMIC64_FETCH_OP(op, c_op)
192
193ATOMIC64_OPS(and, &=)
194ATOMIC64_OPS(or, |=)
195ATOMIC64_OPS(xor, ^=)
196
197#undef ATOMIC64_OPS
198#undef ATOMIC64_FETCH_OP
199#undef ATOMIC64_OP_RETURN
200#undef ATOMIC64_OP
201
202static __inline__ void
203arch_atomic64_set(atomic64_t *v, s64 i)
204{
205 unsigned long flags;
206 _atomic_spin_lock_irqsave(v, flags);
207
208 v->counter = i;
209
210 _atomic_spin_unlock_irqrestore(v, flags);
211}
212
213#define arch_atomic64_set_release(v, i) arch_atomic64_set((v), (i))
214
215static __inline__ s64
216arch_atomic64_read(const atomic64_t *v)
217{
218 return READ_ONCE((v)->counter);
219}
220
221
222#define arch_atomic64_cmpxchg(v, o, n) \
223 ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
224#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
225
226#endif
227
228
229#endif
230