1
2
3
4
5
6
7
8
9
10
11#ifndef _ASM_ATOMIC_H
12#define _ASM_ATOMIC_H
13
14#include <asm/irqflags.h>
15#include <asm/cmpxchg.h>
16
17#ifndef CONFIG_SMP
18#include <asm-generic/atomic.h>
19#else
20
21
22
23
24
25
26#define ATOMIC_INIT(i) { (i) }
27
28#ifdef __KERNEL__
29
30
31
32
33
34
35
36
37#define atomic_read(v) (ACCESS_ONCE((v)->counter))
38
39
40
41
42
43
44
45
46
47#define atomic_set(v, i) (((v)->counter) = (i))
48
49
50
51
52
53
54
55
56
57static inline int atomic_add_return(int i, atomic_t *v)
58{
59 int retval;
60#ifdef CONFIG_SMP
61 int status;
62
63 asm volatile(
64 "1: mov %4,(_AAR,%3) \n"
65 " mov (_ADR,%3),%1 \n"
66 " add %5,%1 \n"
67 " mov %1,(_ADR,%3) \n"
68 " mov (_ADR,%3),%0 \n"
69 " mov (_ASR,%3),%0 \n"
70 " or %0,%0 \n"
71 " bne 1b \n"
72 : "=&r"(status), "=&r"(retval), "=m"(v->counter)
73 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
74 : "memory", "cc");
75
76#else
77 unsigned long flags;
78
79 flags = arch_local_cli_save();
80 retval = v->counter;
81 retval += i;
82 v->counter = retval;
83 arch_local_irq_restore(flags);
84#endif
85 return retval;
86}
87
88
89
90
91
92
93
94
95
96static inline int atomic_sub_return(int i, atomic_t *v)
97{
98 int retval;
99#ifdef CONFIG_SMP
100 int status;
101
102 asm volatile(
103 "1: mov %4,(_AAR,%3) \n"
104 " mov (_ADR,%3),%1 \n"
105 " sub %5,%1 \n"
106 " mov %1,(_ADR,%3) \n"
107 " mov (_ADR,%3),%0 \n"
108 " mov (_ASR,%3),%0 \n"
109 " or %0,%0 \n"
110 " bne 1b \n"
111 : "=&r"(status), "=&r"(retval), "=m"(v->counter)
112 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
113 : "memory", "cc");
114
115#else
116 unsigned long flags;
117 flags = arch_local_cli_save();
118 retval = v->counter;
119 retval -= i;
120 v->counter = retval;
121 arch_local_irq_restore(flags);
122#endif
123 return retval;
124}
125
126static inline int atomic_add_negative(int i, atomic_t *v)
127{
128 return atomic_add_return(i, v) < 0;
129}
130
131static inline void atomic_add(int i, atomic_t *v)
132{
133 atomic_add_return(i, v);
134}
135
136static inline void atomic_sub(int i, atomic_t *v)
137{
138 atomic_sub_return(i, v);
139}
140
141static inline void atomic_inc(atomic_t *v)
142{
143 atomic_add_return(1, v);
144}
145
146static inline void atomic_dec(atomic_t *v)
147{
148 atomic_sub_return(1, v);
149}
150
151#define atomic_dec_return(v) atomic_sub_return(1, (v))
152#define atomic_inc_return(v) atomic_add_return(1, (v))
153
154#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
155#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
156#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
157
158#define __atomic_add_unless(v, a, u) \
159({ \
160 int c, old; \
161 c = atomic_read(v); \
162 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
163 c = old; \
164 c; \
165})
166
167#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
168#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
169
170
171
172
173
174
175
176
177static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
178{
179#ifdef CONFIG_SMP
180 int status;
181
182 asm volatile(
183 "1: mov %3,(_AAR,%2) \n"
184 " mov (_ADR,%2),%0 \n"
185 " and %4,%0 \n"
186 " mov %0,(_ADR,%2) \n"
187 " mov (_ADR,%2),%0 \n"
188 " mov (_ASR,%2),%0 \n"
189 " or %0,%0 \n"
190 " bne 1b \n"
191 : "=&r"(status), "=m"(*addr)
192 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask)
193 : "memory", "cc");
194#else
195 unsigned long flags;
196
197 mask = ~mask;
198 flags = arch_local_cli_save();
199 *addr &= mask;
200 arch_local_irq_restore(flags);
201#endif
202}
203
204
205
206
207
208
209
210
211static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
212{
213#ifdef CONFIG_SMP
214 int status;
215
216 asm volatile(
217 "1: mov %3,(_AAR,%2) \n"
218 " mov (_ADR,%2),%0 \n"
219 " or %4,%0 \n"
220 " mov %0,(_ADR,%2) \n"
221 " mov (_ADR,%2),%0 \n"
222 " mov (_ASR,%2),%0 \n"
223 " or %0,%0 \n"
224 " bne 1b \n"
225 : "=&r"(status), "=m"(*addr)
226 : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask)
227 : "memory", "cc");
228#else
229 unsigned long flags;
230
231 flags = arch_local_cli_save();
232 *addr |= mask;
233 arch_local_irq_restore(flags);
234#endif
235}
236
237
238#define smp_mb__before_atomic_dec() barrier()
239#define smp_mb__after_atomic_dec() barrier()
240#define smp_mb__before_atomic_inc() barrier()
241#define smp_mb__after_atomic_inc() barrier()
242
243#endif
244#endif
245#endif
246