1
2
3
4
5
6
7
8
9
10
11#ifndef __ASM_ARM_ATOMIC_H
12#define __ASM_ARM_ATOMIC_H
13
14#include <linux/compiler.h>
15#include <linux/types.h>
16#include <linux/irqflags.h>
17#include <asm/barrier.h>
18#include <asm/cmpxchg.h>
19
20#define ATOMIC_INIT(i) { (i) }
21
22#ifdef __KERNEL__
23
24
25
26
27
28
29#define atomic_read(v) ACCESS_ONCE((v)->counter)
30#define atomic_set(v,i) (((v)->counter) = (i))
31
32#if __LINUX_ARM_ARCH__ >= 6
33
34
35
36
37
38
39static inline void atomic_add(int i, atomic_t *v)
40{
41 unsigned long tmp;
42 int result;
43
44 __asm__ __volatile__("@ atomic_add\n"
45"1: ldrex %0, [%3]\n"
46" add %0, %0, %4\n"
47" strex %1, %0, [%3]\n"
48" teq %1, #0\n"
49" bne 1b"
50 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
51 : "r" (&v->counter), "Ir" (i)
52 : "cc");
53}
54
55static inline int atomic_add_return(int i, atomic_t *v)
56{
57 unsigned long tmp;
58 int result;
59
60 smp_mb();
61
62 __asm__ __volatile__("@ atomic_add_return\n"
63"1: ldrex %0, [%3]\n"
64" add %0, %0, %4\n"
65" strex %1, %0, [%3]\n"
66" teq %1, #0\n"
67" bne 1b"
68 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
69 : "r" (&v->counter), "Ir" (i)
70 : "cc");
71
72 smp_mb();
73
74 return result;
75}
76
77static inline void atomic_sub(int i, atomic_t *v)
78{
79 unsigned long tmp;
80 int result;
81
82 __asm__ __volatile__("@ atomic_sub\n"
83"1: ldrex %0, [%3]\n"
84" sub %0, %0, %4\n"
85" strex %1, %0, [%3]\n"
86" teq %1, #0\n"
87" bne 1b"
88 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
89 : "r" (&v->counter), "Ir" (i)
90 : "cc");
91}
92
93static inline int atomic_sub_return(int i, atomic_t *v)
94{
95 unsigned long tmp;
96 int result;
97
98 smp_mb();
99
100 __asm__ __volatile__("@ atomic_sub_return\n"
101"1: ldrex %0, [%3]\n"
102" sub %0, %0, %4\n"
103" strex %1, %0, [%3]\n"
104" teq %1, #0\n"
105" bne 1b"
106 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
107 : "r" (&v->counter), "Ir" (i)
108 : "cc");
109
110 smp_mb();
111
112 return result;
113}
114
115static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
116{
117 unsigned long oldval, res;
118
119 smp_mb();
120
121 do {
122 __asm__ __volatile__("@ atomic_cmpxchg\n"
123 "ldrex %1, [%3]\n"
124 "mov %0, #0\n"
125 "teq %1, %4\n"
126 "strexeq %0, %5, [%3]\n"
127 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
128 : "r" (&ptr->counter), "Ir" (old), "r" (new)
129 : "cc");
130 } while (res);
131
132 smp_mb();
133
134 return oldval;
135}
136
137static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
138{
139 unsigned long tmp, tmp2;
140
141 __asm__ __volatile__("@ atomic_clear_mask\n"
142"1: ldrex %0, [%3]\n"
143" bic %0, %0, %4\n"
144" strex %1, %0, [%3]\n"
145" teq %1, #0\n"
146" bne 1b"
147 : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
148 : "r" (addr), "Ir" (mask)
149 : "cc");
150}
151
152#else
153
154#ifdef CONFIG_SMP
155#error SMP not supported on pre-ARMv6 CPUs
156#endif
157
158static inline int atomic_add_return(int i, atomic_t *v)
159{
160 unsigned long flags;
161 int val;
162
163 raw_local_irq_save(flags);
164 val = v->counter;
165 v->counter = val += i;
166 raw_local_irq_restore(flags);
167
168 return val;
169}
170#define atomic_add(i, v) (void) atomic_add_return(i, v)
171
172static inline int atomic_sub_return(int i, atomic_t *v)
173{
174 unsigned long flags;
175 int val;
176
177 raw_local_irq_save(flags);
178 val = v->counter;
179 v->counter = val -= i;
180 raw_local_irq_restore(flags);
181
182 return val;
183}
184#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
185
186static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
187{
188 int ret;
189 unsigned long flags;
190
191 raw_local_irq_save(flags);
192 ret = v->counter;
193 if (likely(ret == old))
194 v->counter = new;
195 raw_local_irq_restore(flags);
196
197 return ret;
198}
199
200static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
201{
202 unsigned long flags;
203
204 raw_local_irq_save(flags);
205 *addr &= ~mask;
206 raw_local_irq_restore(flags);
207}
208
209#endif
210
211#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
212
213static inline int __atomic_add_unless(atomic_t *v, int a, int u)
214{
215 int c, old;
216
217 c = atomic_read(v);
218 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
219 c = old;
220 return c;
221}
222
223#define atomic_inc(v) atomic_add(1, v)
224#define atomic_dec(v) atomic_sub(1, v)
225
226#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
227#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
228#define atomic_inc_return(v) (atomic_add_return(1, v))
229#define atomic_dec_return(v) (atomic_sub_return(1, v))
230#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
231
232#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
233
234#define smp_mb__before_atomic_dec() smp_mb()
235#define smp_mb__after_atomic_dec() smp_mb()
236#define smp_mb__before_atomic_inc() smp_mb()
237#define smp_mb__after_atomic_inc() smp_mb()
238
239#ifndef CONFIG_GENERIC_ATOMIC64
240typedef struct {
241 u64 __aligned(8) counter;
242} atomic64_t;
243
244#define ATOMIC64_INIT(i) { (i) }
245
246#ifdef CONFIG_ARM_LPAE
247static inline u64 atomic64_read(const atomic64_t *v)
248{
249 u64 result;
250
251 __asm__ __volatile__("@ atomic64_read\n"
252" ldrd %0, %H0, [%1]"
253 : "=&r" (result)
254 : "r" (&v->counter), "Qo" (v->counter)
255 );
256
257 return result;
258}
259
260static inline void atomic64_set(atomic64_t *v, u64 i)
261{
262 __asm__ __volatile__("@ atomic64_set\n"
263" strd %2, %H2, [%1]"
264 : "=Qo" (v->counter)
265 : "r" (&v->counter), "r" (i)
266 );
267}
268#else
269static inline u64 atomic64_read(const atomic64_t *v)
270{
271 u64 result;
272
273 __asm__ __volatile__("@ atomic64_read\n"
274" ldrexd %0, %H0, [%1]"
275 : "=&r" (result)
276 : "r" (&v->counter), "Qo" (v->counter)
277 );
278
279 return result;
280}
281
282static inline void atomic64_set(atomic64_t *v, u64 i)
283{
284 u64 tmp;
285
286 __asm__ __volatile__("@ atomic64_set\n"
287"1: ldrexd %0, %H0, [%2]\n"
288" strexd %0, %3, %H3, [%2]\n"
289" teq %0, #0\n"
290" bne 1b"
291 : "=&r" (tmp), "=Qo" (v->counter)
292 : "r" (&v->counter), "r" (i)
293 : "cc");
294}
295#endif
296
297static inline void atomic64_add(u64 i, atomic64_t *v)
298{
299 u64 result;
300 unsigned long tmp;
301
302 __asm__ __volatile__("@ atomic64_add\n"
303"1: ldrexd %0, %H0, [%3]\n"
304" adds %0, %0, %4\n"
305" adc %H0, %H0, %H4\n"
306" strexd %1, %0, %H0, [%3]\n"
307" teq %1, #0\n"
308" bne 1b"
309 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
310 : "r" (&v->counter), "r" (i)
311 : "cc");
312}
313
314static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
315{
316 u64 result;
317 unsigned long tmp;
318
319 smp_mb();
320
321 __asm__ __volatile__("@ atomic64_add_return\n"
322"1: ldrexd %0, %H0, [%3]\n"
323" adds %0, %0, %4\n"
324" adc %H0, %H0, %H4\n"
325" strexd %1, %0, %H0, [%3]\n"
326" teq %1, #0\n"
327" bne 1b"
328 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
329 : "r" (&v->counter), "r" (i)
330 : "cc");
331
332 smp_mb();
333
334 return result;
335}
336
337static inline void atomic64_sub(u64 i, atomic64_t *v)
338{
339 u64 result;
340 unsigned long tmp;
341
342 __asm__ __volatile__("@ atomic64_sub\n"
343"1: ldrexd %0, %H0, [%3]\n"
344" subs %0, %0, %4\n"
345" sbc %H0, %H0, %H4\n"
346" strexd %1, %0, %H0, [%3]\n"
347" teq %1, #0\n"
348" bne 1b"
349 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
350 : "r" (&v->counter), "r" (i)
351 : "cc");
352}
353
354static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
355{
356 u64 result;
357 unsigned long tmp;
358
359 smp_mb();
360
361 __asm__ __volatile__("@ atomic64_sub_return\n"
362"1: ldrexd %0, %H0, [%3]\n"
363" subs %0, %0, %4\n"
364" sbc %H0, %H0, %H4\n"
365" strexd %1, %0, %H0, [%3]\n"
366" teq %1, #0\n"
367" bne 1b"
368 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
369 : "r" (&v->counter), "r" (i)
370 : "cc");
371
372 smp_mb();
373
374 return result;
375}
376
377static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
378{
379 u64 oldval;
380 unsigned long res;
381
382 smp_mb();
383
384 do {
385 __asm__ __volatile__("@ atomic64_cmpxchg\n"
386 "ldrexd %1, %H1, [%3]\n"
387 "mov %0, #0\n"
388 "teq %1, %4\n"
389 "teqeq %H1, %H4\n"
390 "strexdeq %0, %5, %H5, [%3]"
391 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
392 : "r" (&ptr->counter), "r" (old), "r" (new)
393 : "cc");
394 } while (res);
395
396 smp_mb();
397
398 return oldval;
399}
400
401static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
402{
403 u64 result;
404 unsigned long tmp;
405
406 smp_mb();
407
408 __asm__ __volatile__("@ atomic64_xchg\n"
409"1: ldrexd %0, %H0, [%3]\n"
410" strexd %1, %4, %H4, [%3]\n"
411" teq %1, #0\n"
412" bne 1b"
413 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
414 : "r" (&ptr->counter), "r" (new)
415 : "cc");
416
417 smp_mb();
418
419 return result;
420}
421
422static inline u64 atomic64_dec_if_positive(atomic64_t *v)
423{
424 u64 result;
425 unsigned long tmp;
426
427 smp_mb();
428
429 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
430"1: ldrexd %0, %H0, [%3]\n"
431" subs %0, %0, #1\n"
432" sbc %H0, %H0, #0\n"
433" teq %H0, #0\n"
434" bmi 2f\n"
435" strexd %1, %0, %H0, [%3]\n"
436" teq %1, #0\n"
437" bne 1b\n"
438"2:"
439 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
440 : "r" (&v->counter)
441 : "cc");
442
443 smp_mb();
444
445 return result;
446}
447
448static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
449{
450 u64 val;
451 unsigned long tmp;
452 int ret = 1;
453
454 smp_mb();
455
456 __asm__ __volatile__("@ atomic64_add_unless\n"
457"1: ldrexd %0, %H0, [%4]\n"
458" teq %0, %5\n"
459" teqeq %H0, %H5\n"
460" moveq %1, #0\n"
461" beq 2f\n"
462" adds %0, %0, %6\n"
463" adc %H0, %H0, %H6\n"
464" strexd %2, %0, %H0, [%4]\n"
465" teq %2, #0\n"
466" bne 1b\n"
467"2:"
468 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
469 : "r" (&v->counter), "r" (u), "r" (a)
470 : "cc");
471
472 if (ret)
473 smp_mb();
474
475 return ret;
476}
477
478#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
479#define atomic64_inc(v) atomic64_add(1LL, (v))
480#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
481#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
482#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
483#define atomic64_dec(v) atomic64_sub(1LL, (v))
484#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
485#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
486#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
487
488#endif
489#endif
490#endif
491