1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#ifndef __ASM_GENERIC_ATOMIC_H
16#define __ASM_GENERIC_ATOMIC_H
17
18#include <asm/cmpxchg.h>
19
20#ifdef CONFIG_SMP
21
22# if !defined(atomic_add_return) || !defined(atomic_sub_return) || \
23 !defined(atomic_clear_mask) || !defined(atomic_set_mask)
24# error "SMP requires a little arch-specific magic"
25# endif
26#endif
27
28
29
30
31
32
33#define ATOMIC_INIT(i) { (i) }
34
35#ifdef __KERNEL__
36
37
38
39
40
41
42
43#ifndef atomic_read
44#define atomic_read(v) (*(volatile int *)&(v)->counter)
45#endif
46
47
48
49
50
51
52
53
54#define atomic_set(v, i) (((v)->counter) = (i))
55
56#include <linux/irqflags.h>
57
58
59
60
61
62
63
64
65#ifndef atomic_add_return
66static inline int atomic_add_return(int i, atomic_t *v)
67{
68 unsigned long flags;
69 int temp;
70
71 raw_local_irq_save(flags);
72 temp = v->counter;
73 temp += i;
74 v->counter = temp;
75 raw_local_irq_restore(flags);
76
77 return temp;
78}
79#endif
80
81
82
83
84
85
86
87
88#ifndef atomic_sub_return
89static inline int atomic_sub_return(int i, atomic_t *v)
90{
91 unsigned long flags;
92 int temp;
93
94 raw_local_irq_save(flags);
95 temp = v->counter;
96 temp -= i;
97 v->counter = temp;
98 raw_local_irq_restore(flags);
99
100 return temp;
101}
102#endif
103
104static inline int atomic_add_negative(int i, atomic_t *v)
105{
106 return atomic_add_return(i, v) < 0;
107}
108
109static inline void atomic_add(int i, atomic_t *v)
110{
111 atomic_add_return(i, v);
112}
113
114static inline void atomic_sub(int i, atomic_t *v)
115{
116 atomic_sub_return(i, v);
117}
118
119static inline void atomic_inc(atomic_t *v)
120{
121 atomic_add_return(1, v);
122}
123
124static inline void atomic_dec(atomic_t *v)
125{
126 atomic_sub_return(1, v);
127}
128
129#define atomic_dec_return(v) atomic_sub_return(1, (v))
130#define atomic_inc_return(v) atomic_add_return(1, (v))
131
132#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
133#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
134#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
135
136#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
137#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
138
139#define cmpxchg_local(ptr, o, n) \
140 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
141 (unsigned long)(n), sizeof(*(ptr))))
142
143#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
144
145static inline int __atomic_add_unless(atomic_t *v, int a, int u)
146{
147 int c, old;
148 c = atomic_read(v);
149 while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
150 c = old;
151 return c;
152}
153
154
155
156
157
158
159
160
161#ifndef atomic_clear_mask
162static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
163{
164 unsigned long flags;
165
166 mask = ~mask;
167 raw_local_irq_save(flags);
168 v->counter &= mask;
169 raw_local_irq_restore(flags);
170}
171#endif
172
173
174
175
176
177
178
179
180#ifndef atomic_set_mask
181static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
182{
183 unsigned long flags;
184
185 raw_local_irq_save(flags);
186 v->counter |= mask;
187 raw_local_irq_restore(flags);
188}
189#endif
190
191
192#define smp_mb__before_atomic_dec() barrier()
193#define smp_mb__after_atomic_dec() barrier()
194#define smp_mb__before_atomic_inc() barrier()
195#define smp_mb__after_atomic_inc() barrier()
196
197#endif
198#endif
199