1
2
3
4
5
6
7
8
9
10
11#ifndef __ASM_ARM_ATOMIC_H
12#define __ASM_ARM_ATOMIC_H
13
14#include <linux/compiler.h>
15#include <linux/types.h>
16#include <asm/system.h>
17
18#define ATOMIC_INIT(i) { (i) }
19
20#ifdef __KERNEL__
21
22
23
24
25
26
27#define atomic_read(v) ((v)->counter)
28#define atomic_set(v,i) (((v)->counter) = (i))
29
30#if __LINUX_ARM_ARCH__ >= 6
31
32
33
34
35
36
37static inline void atomic_add(int i, atomic_t *v)
38{
39 unsigned long tmp;
40 int result;
41
42 __asm__ __volatile__("@ atomic_add\n"
43"1: ldrex %0, [%2]\n"
44" add %0, %0, %3\n"
45" strex %1, %0, [%2]\n"
46" teq %1, #0\n"
47" bne 1b"
48 : "=&r" (result), "=&r" (tmp)
49 : "r" (&v->counter), "Ir" (i)
50 : "cc");
51}
52
53static inline int atomic_add_return(int i, atomic_t *v)
54{
55 unsigned long tmp;
56 int result;
57
58 smp_mb();
59
60 __asm__ __volatile__("@ atomic_add_return\n"
61"1: ldrex %0, [%2]\n"
62" add %0, %0, %3\n"
63" strex %1, %0, [%2]\n"
64" teq %1, #0\n"
65" bne 1b"
66 : "=&r" (result), "=&r" (tmp)
67 : "r" (&v->counter), "Ir" (i)
68 : "cc");
69
70 smp_mb();
71
72 return result;
73}
74
75static inline void atomic_sub(int i, atomic_t *v)
76{
77 unsigned long tmp;
78 int result;
79
80 __asm__ __volatile__("@ atomic_sub\n"
81"1: ldrex %0, [%2]\n"
82" sub %0, %0, %3\n"
83" strex %1, %0, [%2]\n"
84" teq %1, #0\n"
85" bne 1b"
86 : "=&r" (result), "=&r" (tmp)
87 : "r" (&v->counter), "Ir" (i)
88 : "cc");
89}
90
91static inline int atomic_sub_return(int i, atomic_t *v)
92{
93 unsigned long tmp;
94 int result;
95
96 smp_mb();
97
98 __asm__ __volatile__("@ atomic_sub_return\n"
99"1: ldrex %0, [%2]\n"
100" sub %0, %0, %3\n"
101" strex %1, %0, [%2]\n"
102" teq %1, #0\n"
103" bne 1b"
104 : "=&r" (result), "=&r" (tmp)
105 : "r" (&v->counter), "Ir" (i)
106 : "cc");
107
108 smp_mb();
109
110 return result;
111}
112
113static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
114{
115 unsigned long oldval, res;
116
117 smp_mb();
118
119 do {
120 __asm__ __volatile__("@ atomic_cmpxchg\n"
121 "ldrex %1, [%2]\n"
122 "mov %0, #0\n"
123 "teq %1, %3\n"
124 "strexeq %0, %4, [%2]\n"
125 : "=&r" (res), "=&r" (oldval)
126 : "r" (&ptr->counter), "Ir" (old), "r" (new)
127 : "cc");
128 } while (res);
129
130 smp_mb();
131
132 return oldval;
133}
134
135static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
136{
137 unsigned long tmp, tmp2;
138
139 __asm__ __volatile__("@ atomic_clear_mask\n"
140"1: ldrex %0, [%2]\n"
141" bic %0, %0, %3\n"
142" strex %1, %0, [%2]\n"
143" teq %1, #0\n"
144" bne 1b"
145 : "=&r" (tmp), "=&r" (tmp2)
146 : "r" (addr), "Ir" (mask)
147 : "cc");
148}
149
150#else
151
152#ifdef CONFIG_SMP
153#error SMP not supported on pre-ARMv6 CPUs
154#endif
155
156static inline int atomic_add_return(int i, atomic_t *v)
157{
158 unsigned long flags;
159 int val;
160
161 raw_local_irq_save(flags);
162 val = v->counter;
163 v->counter = val += i;
164 raw_local_irq_restore(flags);
165
166 return val;
167}
168#define atomic_add(i, v) (void) atomic_add_return(i, v)
169
170static inline int atomic_sub_return(int i, atomic_t *v)
171{
172 unsigned long flags;
173 int val;
174
175 raw_local_irq_save(flags);
176 val = v->counter;
177 v->counter = val -= i;
178 raw_local_irq_restore(flags);
179
180 return val;
181}
182#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
183
184static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
185{
186 int ret;
187 unsigned long flags;
188
189 raw_local_irq_save(flags);
190 ret = v->counter;
191 if (likely(ret == old))
192 v->counter = new;
193 raw_local_irq_restore(flags);
194
195 return ret;
196}
197
198static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
199{
200 unsigned long flags;
201
202 raw_local_irq_save(flags);
203 *addr &= ~mask;
204 raw_local_irq_restore(flags);
205}
206
207#endif
208
209#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
210
211static inline int atomic_add_unless(atomic_t *v, int a, int u)
212{
213 int c, old;
214
215 c = atomic_read(v);
216 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
217 c = old;
218 return c != u;
219}
220#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
221
222#define atomic_inc(v) atomic_add(1, v)
223#define atomic_dec(v) atomic_sub(1, v)
224
225#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
226#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
227#define atomic_inc_return(v) (atomic_add_return(1, v))
228#define atomic_dec_return(v) (atomic_sub_return(1, v))
229#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
230
231#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
232
233#define smp_mb__before_atomic_dec() smp_mb()
234#define smp_mb__after_atomic_dec() smp_mb()
235#define smp_mb__before_atomic_inc() smp_mb()
236#define smp_mb__after_atomic_inc() smp_mb()
237
238#include <asm-generic/atomic-long.h>
239#endif
240#endif
241