1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/cache.h>
16#include <linux/delay.h>
17#include <linux/uaccess.h>
18#include <linux/module.h>
19#include <linux/mm.h>
20#include <asm/atomic.h>
21#include <asm/futex.h>
22#include <arch/chip.h>
23
24
25#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
26
27
28
29
30
31struct atomic_locks_on_cpu {
32 int lock[ATOMIC_HASH_L2_SIZE];
33} __attribute__((aligned(ATOMIC_HASH_L2_SIZE * 4)));
34
35static DEFINE_PER_CPU(struct atomic_locks_on_cpu, atomic_lock_pool);
36
37
38static struct atomic_locks_on_cpu __initdata initial_atomic_locks;
39
40
41struct atomic_locks_on_cpu *atomic_lock_ptr[ATOMIC_HASH_L1_SIZE]
42 __write_once = {
43 [0 ... ATOMIC_HASH_L1_SIZE-1] (&initial_atomic_locks)
44};
45
46#else
47
48
49int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
50
51#endif
52
53static inline int *__atomic_hashed_lock(volatile void *v)
54{
55
56#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
57 unsigned long i =
58 (unsigned long) v & ((PAGE_SIZE-1) & -sizeof(long long));
59 unsigned long n = __insn_crc32_32(0, i);
60
61
62 unsigned long l1_index = n >> ((sizeof(n) * 8) - ATOMIC_HASH_L1_SHIFT);
63
64 unsigned long l2_index = n & (ATOMIC_HASH_L2_SIZE - 1);
65
66 return &atomic_lock_ptr[l1_index]->lock[l2_index];
67#else
68
69
70
71
72 unsigned long ptr = __insn_mm((unsigned long)v >> 1,
73 (unsigned long)atomic_locks,
74 2, (ATOMIC_HASH_SHIFT + 2) - 1);
75 return (int *)ptr;
76#endif
77}
78
79#ifdef CONFIG_SMP
80
81static int is_atomic_lock(int *p)
82{
83#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
84 int i;
85 for (i = 0; i < ATOMIC_HASH_L1_SIZE; ++i) {
86
87 if (p >= &atomic_lock_ptr[i]->lock[0] &&
88 p < &atomic_lock_ptr[i]->lock[ATOMIC_HASH_L2_SIZE]) {
89 return 1;
90 }
91 }
92 return 0;
93#else
94 return p >= &atomic_locks[0] && p < &atomic_locks[ATOMIC_HASH_SIZE];
95#endif
96}
97
98void __atomic_fault_unlock(int *irqlock_word)
99{
100 BUG_ON(!is_atomic_lock(irqlock_word));
101 BUG_ON(*irqlock_word != 1);
102 *irqlock_word = 0;
103}
104
105#endif
106
107static inline int *__atomic_setup(volatile void *v)
108{
109
110 *(volatile int *)v;
111 return __atomic_hashed_lock(v);
112}
113
114int _atomic_xchg(atomic_t *v, int n)
115{
116 return __atomic_xchg(&v->counter, __atomic_setup(v), n).val;
117}
118EXPORT_SYMBOL(_atomic_xchg);
119
120int _atomic_xchg_add(atomic_t *v, int i)
121{
122 return __atomic_xchg_add(&v->counter, __atomic_setup(v), i).val;
123}
124EXPORT_SYMBOL(_atomic_xchg_add);
125
126int _atomic_xchg_add_unless(atomic_t *v, int a, int u)
127{
128
129
130
131
132
133 return __atomic_xchg_add_unless(&v->counter, __atomic_setup(v), u, a)
134 .val;
135}
136EXPORT_SYMBOL(_atomic_xchg_add_unless);
137
138int _atomic_cmpxchg(atomic_t *v, int o, int n)
139{
140 return __atomic_cmpxchg(&v->counter, __atomic_setup(v), o, n).val;
141}
142EXPORT_SYMBOL(_atomic_cmpxchg);
143
144unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask)
145{
146 return __atomic_or((int *)p, __atomic_setup(p), mask).val;
147}
148EXPORT_SYMBOL(_atomic_or);
149
150unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask)
151{
152 return __atomic_andn((int *)p, __atomic_setup(p), mask).val;
153}
154EXPORT_SYMBOL(_atomic_andn);
155
156unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
157{
158 return __atomic_xor((int *)p, __atomic_setup(p), mask).val;
159}
160EXPORT_SYMBOL(_atomic_xor);
161
162
163u64 _atomic64_xchg(atomic64_t *v, u64 n)
164{
165 return __atomic64_xchg(&v->counter, __atomic_setup(v), n);
166}
167EXPORT_SYMBOL(_atomic64_xchg);
168
169u64 _atomic64_xchg_add(atomic64_t *v, u64 i)
170{
171 return __atomic64_xchg_add(&v->counter, __atomic_setup(v), i);
172}
173EXPORT_SYMBOL(_atomic64_xchg_add);
174
175u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u)
176{
177
178
179
180
181
182 return __atomic64_xchg_add_unless(&v->counter, __atomic_setup(v),
183 u, a);
184}
185EXPORT_SYMBOL(_atomic64_xchg_add_unless);
186
187u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
188{
189 return __atomic64_cmpxchg(&v->counter, __atomic_setup(v), o, n);
190}
191EXPORT_SYMBOL(_atomic64_cmpxchg);
192
193
194static inline int *__futex_setup(int __user *v)
195{
196
197
198
199
200
201 __insn_prefetch(v);
202 return __atomic_hashed_lock((int __force *)v);
203}
204
205struct __get_user futex_set(u32 __user *v, int i)
206{
207 return __atomic_xchg((int __force *)v, __futex_setup(v), i);
208}
209
210struct __get_user futex_add(u32 __user *v, int n)
211{
212 return __atomic_xchg_add((int __force *)v, __futex_setup(v), n);
213}
214
215struct __get_user futex_or(u32 __user *v, int n)
216{
217 return __atomic_or((int __force *)v, __futex_setup(v), n);
218}
219
220struct __get_user futex_andn(u32 __user *v, int n)
221{
222 return __atomic_andn((int __force *)v, __futex_setup(v), n);
223}
224
225struct __get_user futex_xor(u32 __user *v, int n)
226{
227 return __atomic_xor((int __force *)v, __futex_setup(v), n);
228}
229
230struct __get_user futex_cmpxchg(u32 __user *v, int o, int n)
231{
232 return __atomic_cmpxchg((int __force *)v, __futex_setup(v), o, n);
233}
234
235
236
237
238
239
240
241
242
243
244
245struct __get_user __atomic_bad_address(int __user *addr)
246{
247 if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int))))
248 panic("Bad address used for kernel atomic op: %p\n", addr);
249 return (struct __get_user) { .err = -EFAULT };
250}
251
252
253#if CHIP_HAS_CBOX_HOME_MAP()
254static int __init noatomichash(char *str)
255{
256 pr_warning("noatomichash is deprecated.\n");
257 return 1;
258}
259__setup("noatomichash", noatomichash);
260#endif
261
262void __init __init_atomic_per_cpu(void)
263{
264#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
265
266 unsigned int i;
267 int actual_cpu;
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285 actual_cpu = cpumask_first(cpu_possible_mask);
286
287 for (i = 0; i < ATOMIC_HASH_L1_SIZE; ++i) {
288
289
290
291
292 actual_cpu = cpumask_next(actual_cpu, cpu_possible_mask);
293 if (actual_cpu >= nr_cpu_ids)
294 actual_cpu = cpumask_first(cpu_possible_mask);
295
296 atomic_lock_ptr[i] = &per_cpu(atomic_lock_pool, actual_cpu);
297 }
298
299#else
300
301
302 BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
303 BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids);
304
305
306
307
308
309
310
311
312
313 BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0);
314
315
316 BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
317
318
319
320
321
322
323 BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
324
325#endif
326
327
328 BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
329}
330