1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#ifndef _ASM_ATOMIC_H
23#define _ASM_ATOMIC_H
24
25#include <linux/types.h>
26#include <asm/cmpxchg.h>
27#include <asm/barrier.h>
28
29#define ATOMIC_INIT(i) { (i) }
30
31
32
33static inline void atomic_set(atomic_t *v, int new)
34{
35 asm volatile(
36 "1: r6 = memw_locked(%0);\n"
37 " memw_locked(%0,p0) = %1;\n"
38 " if (!P0) jump 1b;\n"
39 :
40 : "r" (&v->counter), "r" (new)
41 : "memory", "p0", "r6"
42 );
43}
44
45
46
47
48
49
50
51#define atomic_read(v) READ_ONCE((v)->counter)
52
53
54
55
56
57
58#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
79{
80 int __oldval;
81
82 asm volatile(
83 "1: %0 = memw_locked(%1);\n"
84 " { P0 = cmp.eq(%0,%2);\n"
85 " if (!P0.new) jump:nt 2f; }\n"
86 " memw_locked(%1,P0) = %3;\n"
87 " if (!P0) jump 1b;\n"
88 "2:\n"
89 : "=&r" (__oldval)
90 : "r" (&v->counter), "r" (old), "r" (new)
91 : "memory", "p0"
92 );
93
94 return __oldval;
95}
96
97#define ATOMIC_OP(op) \
98static inline void atomic_##op(int i, atomic_t *v) \
99{ \
100 int output; \
101 \
102 __asm__ __volatile__ ( \
103 "1: %0 = memw_locked(%1);\n" \
104 " %0 = "#op "(%0,%2);\n" \
105 " memw_locked(%1,P3)=%0;\n" \
106 " if !P3 jump 1b;\n" \
107 : "=&r" (output) \
108 : "r" (&v->counter), "r" (i) \
109 : "memory", "p3" \
110 ); \
111} \
112
113#define ATOMIC_OP_RETURN(op) \
114static inline int atomic_##op##_return(int i, atomic_t *v) \
115{ \
116 int output; \
117 \
118 __asm__ __volatile__ ( \
119 "1: %0 = memw_locked(%1);\n" \
120 " %0 = "#op "(%0,%2);\n" \
121 " memw_locked(%1,P3)=%0;\n" \
122 " if !P3 jump 1b;\n" \
123 : "=&r" (output) \
124 : "r" (&v->counter), "r" (i) \
125 : "memory", "p3" \
126 ); \
127 return output; \
128}
129
130#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
131
132ATOMIC_OPS(add)
133ATOMIC_OPS(sub)
134
135ATOMIC_OP(and)
136ATOMIC_OP(or)
137ATOMIC_OP(xor)
138
139#undef ATOMIC_OPS
140#undef ATOMIC_OP_RETURN
141#undef ATOMIC_OP
142
143
144
145
146
147
148
149
150
151
152
153static inline int __atomic_add_unless(atomic_t *v, int a, int u)
154{
155 int __oldval;
156 register int tmp;
157
158 asm volatile(
159 "1: %0 = memw_locked(%2);"
160 " {"
161 " p3 = cmp.eq(%0, %4);"
162 " if (p3.new) jump:nt 2f;"
163 " %1 = add(%0, %3);"
164 " }"
165 " memw_locked(%2, p3) = %1;"
166 " {"
167 " if !p3 jump 1b;"
168 " }"
169 "2:"
170 : "=&r" (__oldval), "=&r" (tmp)
171 : "r" (v), "r" (a), "r" (u)
172 : "memory", "p3"
173 );
174 return __oldval;
175}
176
177#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
178
179#define atomic_inc(v) atomic_add(1, (v))
180#define atomic_dec(v) atomic_sub(1, (v))
181
182#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
183#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
184#define atomic_sub_and_test(i, v) (atomic_sub_return(i, (v)) == 0)
185#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
186
187#define atomic_inc_return(v) (atomic_add_return(1, v))
188#define atomic_dec_return(v) (atomic_sub_return(1, v))
189
190#endif
191