1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/cache.h>
16#include <linux/delay.h>
17#include <linux/uaccess.h>
18#include <linux/module.h>
19#include <linux/mm.h>
20#include <linux/atomic.h>
21#include <arch/chip.h>
22
23
24int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
25
26int *__atomic_hashed_lock(volatile void *v)
27{
28
29
30
31
32
33 unsigned long ptr = __insn_mm((unsigned long)v >> 1,
34 (unsigned long)atomic_locks,
35 2, (ATOMIC_HASH_SHIFT + 2) - 1);
36 return (int *)ptr;
37}
38
39#ifdef CONFIG_SMP
40
41static int is_atomic_lock(int *p)
42{
43 return p >= &atomic_locks[0] && p < &atomic_locks[ATOMIC_HASH_SIZE];
44}
45
46void __atomic_fault_unlock(int *irqlock_word)
47{
48 BUG_ON(!is_atomic_lock(irqlock_word));
49 BUG_ON(*irqlock_word != 1);
50 *irqlock_word = 0;
51}
52
53#endif
54
55static inline int *__atomic_setup(volatile void *v)
56{
57
58 *(volatile int *)v;
59 return __atomic_hashed_lock(v);
60}
61
62int _atomic_xchg(int *v, int n)
63{
64 return __atomic32_xchg(v, __atomic_setup(v), n).val;
65}
66EXPORT_SYMBOL(_atomic_xchg);
67
68int _atomic_xchg_add(int *v, int i)
69{
70 return __atomic32_xchg_add(v, __atomic_setup(v), i).val;
71}
72EXPORT_SYMBOL(_atomic_xchg_add);
73
74int _atomic_xchg_add_unless(int *v, int a, int u)
75{
76
77
78
79
80
81 return __atomic32_xchg_add_unless(v, __atomic_setup(v), u, a).val;
82}
83EXPORT_SYMBOL(_atomic_xchg_add_unless);
84
85int _atomic_cmpxchg(int *v, int o, int n)
86{
87 return __atomic32_cmpxchg(v, __atomic_setup(v), o, n).val;
88}
89EXPORT_SYMBOL(_atomic_cmpxchg);
90
91unsigned long _atomic_fetch_or(volatile unsigned long *p, unsigned long mask)
92{
93 return __atomic32_fetch_or((int *)p, __atomic_setup(p), mask).val;
94}
95EXPORT_SYMBOL(_atomic_fetch_or);
96
97unsigned long _atomic_fetch_and(volatile unsigned long *p, unsigned long mask)
98{
99 return __atomic32_fetch_and((int *)p, __atomic_setup(p), mask).val;
100}
101EXPORT_SYMBOL(_atomic_fetch_and);
102
103unsigned long _atomic_fetch_andn(volatile unsigned long *p, unsigned long mask)
104{
105 return __atomic32_fetch_andn((int *)p, __atomic_setup(p), mask).val;
106}
107EXPORT_SYMBOL(_atomic_fetch_andn);
108
109unsigned long _atomic_fetch_xor(volatile unsigned long *p, unsigned long mask)
110{
111 return __atomic32_fetch_xor((int *)p, __atomic_setup(p), mask).val;
112}
113EXPORT_SYMBOL(_atomic_fetch_xor);
114
115
116long long _atomic64_xchg(long long *v, long long n)
117{
118 return __atomic64_xchg(v, __atomic_setup(v), n);
119}
120EXPORT_SYMBOL(_atomic64_xchg);
121
122long long _atomic64_xchg_add(long long *v, long long i)
123{
124 return __atomic64_xchg_add(v, __atomic_setup(v), i);
125}
126EXPORT_SYMBOL(_atomic64_xchg_add);
127
128long long _atomic64_xchg_add_unless(long long *v, long long a, long long u)
129{
130
131
132
133
134
135 return __atomic64_xchg_add_unless(v, __atomic_setup(v), u, a);
136}
137EXPORT_SYMBOL(_atomic64_xchg_add_unless);
138
139long long _atomic64_cmpxchg(long long *v, long long o, long long n)
140{
141 return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);
142}
143EXPORT_SYMBOL(_atomic64_cmpxchg);
144
145long long _atomic64_fetch_and(long long *v, long long n)
146{
147 return __atomic64_fetch_and(v, __atomic_setup(v), n);
148}
149EXPORT_SYMBOL(_atomic64_fetch_and);
150
151long long _atomic64_fetch_or(long long *v, long long n)
152{
153 return __atomic64_fetch_or(v, __atomic_setup(v), n);
154}
155EXPORT_SYMBOL(_atomic64_fetch_or);
156
157long long _atomic64_fetch_xor(long long *v, long long n)
158{
159 return __atomic64_fetch_xor(v, __atomic_setup(v), n);
160}
161EXPORT_SYMBOL(_atomic64_fetch_xor);
162
163
164
165
166
167
168
169
170
171
172
173struct __get_user __atomic_bad_address(int __user *addr)
174{
175 if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int))))
176 panic("Bad address used for kernel atomic op: %p\n", addr);
177 return (struct __get_user) { .err = -EFAULT };
178}
179
180
181void __init __init_atomic_per_cpu(void)
182{
183
184 BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
185 BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids);
186
187
188
189
190
191
192
193
194
195 BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0);
196
197
198 BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
199
200
201
202
203
204
205 BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
206}
207