1
2
3
4
5
6
7
8#ifndef _ASM_ATOMIC_H
9#define _ASM_ATOMIC_H
10
11#include <linux/types.h>
12#include <asm/cmpxchg.h>
13#include <asm/barrier.h>
14
15
16
17static inline void arch_atomic_set(atomic_t *v, int new)
18{
19 asm volatile(
20 "1: r6 = memw_locked(%0);\n"
21 " memw_locked(%0,p0) = %1;\n"
22 " if (!P0) jump 1b;\n"
23 :
24 : "r" (&v->counter), "r" (new)
25 : "memory", "p0", "r6"
26 );
27}
28
29#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
30
31
32
33
34
35
36
37#define arch_atomic_read(v) READ_ONCE((v)->counter)
38
39
40
41
42
43
44#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
65{
66 int __oldval;
67
68 asm volatile(
69 "1: %0 = memw_locked(%1);\n"
70 " { P0 = cmp.eq(%0,%2);\n"
71 " if (!P0.new) jump:nt 2f; }\n"
72 " memw_locked(%1,P0) = %3;\n"
73 " if (!P0) jump 1b;\n"
74 "2:\n"
75 : "=&r" (__oldval)
76 : "r" (&v->counter), "r" (old), "r" (new)
77 : "memory", "p0"
78 );
79
80 return __oldval;
81}
82
83#define ATOMIC_OP(op) \
84static inline void arch_atomic_##op(int i, atomic_t *v) \
85{ \
86 int output; \
87 \
88 __asm__ __volatile__ ( \
89 "1: %0 = memw_locked(%1);\n" \
90 " %0 = "#op "(%0,%2);\n" \
91 " memw_locked(%1,P3)=%0;\n" \
92 " if (!P3) jump 1b;\n" \
93 : "=&r" (output) \
94 : "r" (&v->counter), "r" (i) \
95 : "memory", "p3" \
96 ); \
97} \
98
99#define ATOMIC_OP_RETURN(op) \
100static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
101{ \
102 int output; \
103 \
104 __asm__ __volatile__ ( \
105 "1: %0 = memw_locked(%1);\n" \
106 " %0 = "#op "(%0,%2);\n" \
107 " memw_locked(%1,P3)=%0;\n" \
108 " if (!P3) jump 1b;\n" \
109 : "=&r" (output) \
110 : "r" (&v->counter), "r" (i) \
111 : "memory", "p3" \
112 ); \
113 return output; \
114}
115
116#define ATOMIC_FETCH_OP(op) \
117static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
118{ \
119 int output, val; \
120 \
121 __asm__ __volatile__ ( \
122 "1: %0 = memw_locked(%2);\n" \
123 " %1 = "#op "(%0,%3);\n" \
124 " memw_locked(%2,P3)=%1;\n" \
125 " if (!P3) jump 1b;\n" \
126 : "=&r" (output), "=&r" (val) \
127 : "r" (&v->counter), "r" (i) \
128 : "memory", "p3" \
129 ); \
130 return output; \
131}
132
133#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
134
135ATOMIC_OPS(add)
136ATOMIC_OPS(sub)
137
138#undef ATOMIC_OPS
139#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
140
141ATOMIC_OPS(and)
142ATOMIC_OPS(or)
143ATOMIC_OPS(xor)
144
145#undef ATOMIC_OPS
146#undef ATOMIC_FETCH_OP
147#undef ATOMIC_OP_RETURN
148#undef ATOMIC_OP
149
150
151
152
153
154
155
156
157
158
159
160static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
161{
162 int __oldval;
163 register int tmp;
164
165 asm volatile(
166 "1: %0 = memw_locked(%2);"
167 " {"
168 " p3 = cmp.eq(%0, %4);"
169 " if (p3.new) jump:nt 2f;"
170 " %1 = add(%0, %3);"
171 " }"
172 " memw_locked(%2, p3) = %1;"
173 " {"
174 " if (!p3) jump 1b;"
175 " }"
176 "2:"
177 : "=&r" (__oldval), "=&r" (tmp)
178 : "r" (v), "r" (a), "r" (u)
179 : "memory", "p3"
180 );
181 return __oldval;
182}
183#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
184
185#endif
186