1#ifndef __ASM_ARM_SYSTEM_H
2#define __ASM_ARM_SYSTEM_H
3
4#ifdef __KERNEL__
5
6#define CPU_ARCH_UNKNOWN 0
7#define CPU_ARCH_ARMv3 1
8#define CPU_ARCH_ARMv4 2
9#define CPU_ARCH_ARMv4T 3
10#define CPU_ARCH_ARMv5 4
11#define CPU_ARCH_ARMv5T 5
12#define CPU_ARCH_ARMv5TE 6
13#define CPU_ARCH_ARMv5TEJ 7
14#define CPU_ARCH_ARMv6 8
15#define CPU_ARCH_ARMv7 9
16
17
18
19
20#define CR_M (1 << 0)
21#define CR_A (1 << 1)
22#define CR_C (1 << 2)
23#define CR_W (1 << 3)
24#define CR_P (1 << 4)
25#define CR_D (1 << 5)
26#define CR_L (1 << 6)
27#define CR_B (1 << 7)
28#define CR_S (1 << 8)
29#define CR_R (1 << 9)
30#define CR_F (1 << 10)
31#define CR_Z (1 << 11)
32#define CR_I (1 << 12)
33#define CR_V (1 << 13)
34#define CR_RR (1 << 14)
35#define CR_L4 (1 << 15)
36#define CR_DT (1 << 16)
37#define CR_IT (1 << 18)
38#define CR_ST (1 << 19)
39#define CR_FI (1 << 21)
40#define CR_U (1 << 22)
41#define CR_XP (1 << 23)
42#define CR_VE (1 << 24)
43#define CR_EE (1 << 25)
44#define CR_TRE (1 << 28)
45#define CR_AFE (1 << 29)
46#define CR_TE (1 << 30)
47
48
49
50
51
52
53
54
55
56#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
57
58#ifndef __ASSEMBLY__
59
60#include <linux/linkage.h>
61#include <linux/irqflags.h>
62
63#define __exception __attribute__((section(".exception.text")))
64
65struct thread_info;
66struct task_struct;
67
68
69extern unsigned int system_rev;
70extern unsigned int system_serial_low;
71extern unsigned int system_serial_high;
72extern unsigned int mem_fclk_21285;
73
74struct pt_regs;
75
76void die(const char *msg, struct pt_regs *regs, int err)
77 __attribute__((noreturn));
78
79struct siginfo;
80void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
81 unsigned long err, unsigned long trap);
82
83void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
84 struct pt_regs *),
85 int sig, const char *name);
86
87#define xchg(ptr,x) \
88 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
89
90extern asmlinkage void __backtrace(void);
91extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
92
93struct mm_struct;
94extern void show_pte(struct mm_struct *mm, unsigned long addr);
95extern void __show_regs(struct pt_regs *);
96
97extern int cpu_architecture(void);
98extern void cpu_init(void);
99
100void arm_machine_restart(char mode, const char *cmd);
101extern void (*arm_pm_restart)(char str, const char *cmd);
102
103#define UDBG_UNDEFINED (1 << 0)
104#define UDBG_SYSCALL (1 << 1)
105#define UDBG_BADABORT (1 << 2)
106#define UDBG_SEGV (1 << 3)
107#define UDBG_BUS (1 << 4)
108
109extern unsigned int user_debug;
110
111#if __LINUX_ARM_ARCH__ >= 4
112#define vectors_high() (cr_alignment & CR_V)
113#else
114#define vectors_high() (0)
115#endif
116
117#if __LINUX_ARM_ARCH__ >= 7
118#define isb() __asm__ __volatile__ ("isb" : : : "memory")
119#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
120#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
121#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
122#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
123 : : "r" (0) : "memory")
124#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
125 : : "r" (0) : "memory")
126#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
127 : : "r" (0) : "memory")
128#elif defined(CONFIG_CPU_FA526)
129#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
130 : : "r" (0) : "memory")
131#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
132 : : "r" (0) : "memory")
133#define dmb() __asm__ __volatile__ ("" : : : "memory")
134#else
135#define isb() __asm__ __volatile__ ("" : : : "memory")
136#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
137 : : "r" (0) : "memory")
138#define dmb() __asm__ __volatile__ ("" : : : "memory")
139#endif
140
141#ifndef CONFIG_SMP
142#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
143#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
144#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
145#define smp_mb() barrier()
146#define smp_rmb() barrier()
147#define smp_wmb() barrier()
148#else
149#define mb() dmb()
150#define rmb() dmb()
151#define wmb() dmb()
152#define smp_mb() dmb()
153#define smp_rmb() dmb()
154#define smp_wmb() dmb()
155#endif
156#define read_barrier_depends() do { } while(0)
157#define smp_read_barrier_depends() do { } while(0)
158
159#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
160#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
161
162extern unsigned long cr_no_alignment;
163extern unsigned long cr_alignment;
164
165static inline unsigned int get_cr(void)
166{
167 unsigned int val;
168 asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc");
169 return val;
170}
171
172static inline void set_cr(unsigned int val)
173{
174 asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR"
175 : : "r" (val) : "cc");
176 isb();
177}
178
179#ifndef CONFIG_SMP
180extern void adjust_cr(unsigned long mask, unsigned long set);
181#endif
182
183#define CPACC_FULL(n) (3 << (n * 2))
184#define CPACC_SVC(n) (1 << (n * 2))
185#define CPACC_DISABLE(n) (0 << (n * 2))
186
187static inline unsigned int get_copro_access(void)
188{
189 unsigned int val;
190 asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access"
191 : "=r" (val) : : "cc");
192 return val;
193}
194
195static inline void set_copro_access(unsigned int val)
196{
197 asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access"
198 : : "r" (val) : "cc");
199 isb();
200}
201
202
203
204
205
206
207#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
208
209
210
211
212
213
214extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
215
216#define switch_to(prev,next,last) \
217do { \
218 last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
219} while (0)
220
221#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237#define swp_is_buggy
238#endif
239
240static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
241{
242 extern void __bad_xchg(volatile void *, int);
243 unsigned long ret;
244#ifdef swp_is_buggy
245 unsigned long flags;
246#endif
247#if __LINUX_ARM_ARCH__ >= 6
248 unsigned int tmp;
249#endif
250
251 smp_mb();
252
253 switch (size) {
254#if __LINUX_ARM_ARCH__ >= 6
255 case 1:
256 asm volatile("@ __xchg1\n"
257 "1: ldrexb %0, [%3]\n"
258 " strexb %1, %2, [%3]\n"
259 " teq %1, #0\n"
260 " bne 1b"
261 : "=&r" (ret), "=&r" (tmp)
262 : "r" (x), "r" (ptr)
263 : "memory", "cc");
264 break;
265 case 4:
266 asm volatile("@ __xchg4\n"
267 "1: ldrex %0, [%3]\n"
268 " strex %1, %2, [%3]\n"
269 " teq %1, #0\n"
270 " bne 1b"
271 : "=&r" (ret), "=&r" (tmp)
272 : "r" (x), "r" (ptr)
273 : "memory", "cc");
274 break;
275#elif defined(swp_is_buggy)
276#ifdef CONFIG_SMP
277#error SMP is not supported on this platform
278#endif
279 case 1:
280 raw_local_irq_save(flags);
281 ret = *(volatile unsigned char *)ptr;
282 *(volatile unsigned char *)ptr = x;
283 raw_local_irq_restore(flags);
284 break;
285
286 case 4:
287 raw_local_irq_save(flags);
288 ret = *(volatile unsigned long *)ptr;
289 *(volatile unsigned long *)ptr = x;
290 raw_local_irq_restore(flags);
291 break;
292#else
293 case 1:
294 asm volatile("@ __xchg1\n"
295 " swpb %0, %1, [%2]"
296 : "=&r" (ret)
297 : "r" (x), "r" (ptr)
298 : "memory", "cc");
299 break;
300 case 4:
301 asm volatile("@ __xchg4\n"
302 " swp %0, %1, [%2]"
303 : "=&r" (ret)
304 : "r" (x), "r" (ptr)
305 : "memory", "cc");
306 break;
307#endif
308 default:
309 __bad_xchg(ptr, size), ret = 0;
310 break;
311 }
312 smp_mb();
313
314 return ret;
315}
316
317extern void disable_hlt(void);
318extern void enable_hlt(void);
319
320#include <asm-generic/cmpxchg-local.h>
321
322#if __LINUX_ARM_ARCH__ < 6
323
324#ifdef CONFIG_SMP
325#error "SMP is not supported on this platform"
326#endif
327
328
329
330
331
332#define cmpxchg_local(ptr, o, n) \
333 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
334 (unsigned long)(n), sizeof(*(ptr))))
335#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
336
337#ifndef CONFIG_SMP
338#include <asm-generic/cmpxchg.h>
339#endif
340
341#else
342
343extern void __bad_cmpxchg(volatile void *ptr, int size);
344
345
346
347
348
349static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
350 unsigned long new, int size)
351{
352 unsigned long oldval, res;
353
354 switch (size) {
355#ifdef CONFIG_CPU_32v6K
356 case 1:
357 do {
358 asm volatile("@ __cmpxchg1\n"
359 " ldrexb %1, [%2]\n"
360 " mov %0, #0\n"
361 " teq %1, %3\n"
362 " strexbeq %0, %4, [%2]\n"
363 : "=&r" (res), "=&r" (oldval)
364 : "r" (ptr), "Ir" (old), "r" (new)
365 : "memory", "cc");
366 } while (res);
367 break;
368 case 2:
369 do {
370 asm volatile("@ __cmpxchg1\n"
371 " ldrexh %1, [%2]\n"
372 " mov %0, #0\n"
373 " teq %1, %3\n"
374 " strexheq %0, %4, [%2]\n"
375 : "=&r" (res), "=&r" (oldval)
376 : "r" (ptr), "Ir" (old), "r" (new)
377 : "memory", "cc");
378 } while (res);
379 break;
380#endif
381 case 4:
382 do {
383 asm volatile("@ __cmpxchg4\n"
384 " ldrex %1, [%2]\n"
385 " mov %0, #0\n"
386 " teq %1, %3\n"
387 " strexeq %0, %4, [%2]\n"
388 : "=&r" (res), "=&r" (oldval)
389 : "r" (ptr), "Ir" (old), "r" (new)
390 : "memory", "cc");
391 } while (res);
392 break;
393 default:
394 __bad_cmpxchg(ptr, size);
395 oldval = 0;
396 }
397
398 return oldval;
399}
400
401static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
402 unsigned long new, int size)
403{
404 unsigned long ret;
405
406 smp_mb();
407 ret = __cmpxchg(ptr, old, new, size);
408 smp_mb();
409
410 return ret;
411}
412
413#define cmpxchg(ptr,o,n) \
414 ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
415 (unsigned long)(o), \
416 (unsigned long)(n), \
417 sizeof(*(ptr))))
418
419static inline unsigned long __cmpxchg_local(volatile void *ptr,
420 unsigned long old,
421 unsigned long new, int size)
422{
423 unsigned long ret;
424
425 switch (size) {
426#ifndef CONFIG_CPU_32v6K
427 case 1:
428 case 2:
429 ret = __cmpxchg_local_generic(ptr, old, new, size);
430 break;
431#endif
432 default:
433 ret = __cmpxchg(ptr, old, new, size);
434 }
435
436 return ret;
437}
438
439#define cmpxchg_local(ptr,o,n) \
440 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
441 (unsigned long)(o), \
442 (unsigned long)(n), \
443 sizeof(*(ptr))))
444
445#ifdef CONFIG_CPU_32v6K
446
447
448
449
450
451
452static inline unsigned long long __cmpxchg64(volatile void *ptr,
453 unsigned long long old,
454 unsigned long long new)
455{
456 register unsigned long long oldval asm("r0");
457 register unsigned long long __old asm("r2") = old;
458 register unsigned long long __new asm("r4") = new;
459 unsigned long res;
460
461 do {
462 asm volatile(
463 " @ __cmpxchg8\n"
464 " ldrexd %1, %H1, [%2]\n"
465 " mov %0, #0\n"
466 " teq %1, %3\n"
467 " teqeq %H1, %H3\n"
468 " strexdeq %0, %4, %H4, [%2]\n"
469 : "=&r" (res), "=&r" (oldval)
470 : "r" (ptr), "Ir" (__old), "r" (__new)
471 : "memory", "cc");
472 } while (res);
473
474 return oldval;
475}
476
477static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
478 unsigned long long old,
479 unsigned long long new)
480{
481 unsigned long long ret;
482
483 smp_mb();
484 ret = __cmpxchg64(ptr, old, new);
485 smp_mb();
486
487 return ret;
488}
489
490#define cmpxchg64(ptr,o,n) \
491 ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
492 (unsigned long long)(o), \
493 (unsigned long long)(n)))
494
495#define cmpxchg64_local(ptr,o,n) \
496 ((__typeof__(*(ptr)))__cmpxchg64((ptr), \
497 (unsigned long long)(o), \
498 (unsigned long long)(n)))
499
500#else
501
502#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
503
504#endif
505
506#endif
507
508#endif
509
510#define arch_align_stack(x) (x)
511
512#endif
513
514#endif
515