1
2
3
4
5
6
7
8
9
10
11
12
13
14#ifndef __ASM_AVR32_ATOMIC_H
15#define __ASM_AVR32_ATOMIC_H
16
17#include <linux/types.h>
18#include <asm/cmpxchg.h>
19
20#define ATOMIC_INIT(i) { (i) }
21
22#define atomic_read(v) (*(volatile int *)&(v)->counter)
23#define atomic_set(v, i) (((v)->counter) = i)
24
25
26
27
28
29
30
31
32static inline int atomic_sub_return(int i, atomic_t *v)
33{
34 int result;
35
36 asm volatile(
37 "/* atomic_sub_return */\n"
38 "1: ssrf 5\n"
39 " ld.w %0, %2\n"
40 " sub %0, %3\n"
41 " stcond %1, %0\n"
42 " brne 1b"
43 : "=&r"(result), "=o"(v->counter)
44 : "m"(v->counter), "rKs21"(i)
45 : "cc");
46
47 return result;
48}
49
50
51
52
53
54
55
56
57static inline int atomic_add_return(int i, atomic_t *v)
58{
59 int result;
60
61 if (__builtin_constant_p(i) && (i >= -1048575) && (i <= 1048576))
62 result = atomic_sub_return(-i, v);
63 else
64 asm volatile(
65 "/* atomic_add_return */\n"
66 "1: ssrf 5\n"
67 " ld.w %0, %1\n"
68 " add %0, %3\n"
69 " stcond %2, %0\n"
70 " brne 1b"
71 : "=&r"(result), "=o"(v->counter)
72 : "m"(v->counter), "r"(i)
73 : "cc", "memory");
74
75 return result;
76}
77
78
79
80
81
82
83
84
85
86
87static inline void atomic_sub_unless(atomic_t *v, int a, int u)
88{
89 int tmp;
90
91 asm volatile(
92 "/* atomic_sub_unless */\n"
93 "1: ssrf 5\n"
94 " ld.w %0, %2\n"
95 " cp.w %0, %4\n"
96 " breq 1f\n"
97 " sub %0, %3\n"
98 " stcond %1, %0\n"
99 " brne 1b\n"
100 "1:"
101 : "=&r"(tmp), "=o"(v->counter)
102 : "m"(v->counter), "rKs21"(a), "rKs21"(u)
103 : "cc", "memory");
104}
105
106
107
108
109
110
111
112
113
114
115static inline int __atomic_add_unless(atomic_t *v, int a, int u)
116{
117 int tmp, old = atomic_read(v);
118
119 if (__builtin_constant_p(a) && (a >= -1048575) && (a <= 1048576))
120 atomic_sub_unless(v, -a, u);
121 else {
122 asm volatile(
123 "/* __atomic_add_unless */\n"
124 "1: ssrf 5\n"
125 " ld.w %0, %2\n"
126 " cp.w %0, %4\n"
127 " breq 1f\n"
128 " add %0, %3\n"
129 " stcond %1, %0\n"
130 " brne 1b\n"
131 "1:"
132 : "=&r"(tmp), "=o"(v->counter)
133 : "m"(v->counter), "r"(a), "ir"(u)
134 : "cc", "memory");
135 }
136
137 return old;
138}
139
140
141
142
143
144
145
146
147
148static inline int atomic_sub_if_positive(int i, atomic_t *v)
149{
150 int result;
151
152 asm volatile(
153 "/* atomic_sub_if_positive */\n"
154 "1: ssrf 5\n"
155 " ld.w %0, %2\n"
156 " sub %0, %3\n"
157 " brlt 1f\n"
158 " stcond %1, %0\n"
159 " brne 1b\n"
160 "1:"
161 : "=&r"(result), "=o"(v->counter)
162 : "m"(v->counter), "ir"(i)
163 : "cc", "memory");
164
165 return result;
166}
167
168#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
169#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
170
171#define atomic_sub(i, v) (void)atomic_sub_return(i, v)
172#define atomic_add(i, v) (void)atomic_add_return(i, v)
173#define atomic_dec(v) atomic_sub(1, (v))
174#define atomic_inc(v) atomic_add(1, (v))
175
176#define atomic_dec_return(v) atomic_sub_return(1, v)
177#define atomic_inc_return(v) atomic_add_return(1, v)
178
179#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
180#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
181#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
182#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
183
184#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
185
186#define smp_mb__before_atomic_dec() barrier()
187#define smp_mb__after_atomic_dec() barrier()
188#define smp_mb__before_atomic_inc() barrier()
189#define smp_mb__after_atomic_inc() barrier()
190
191#endif
192