1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/init.h>
19
20#include <asm/assembler.h>
21#include <asm/memory.h>
22#include <asm/glue-df.h>
23#include <asm/glue-pf.h>
24#include <asm/vfpmacros.h>
25#ifndef CONFIG_MULTI_IRQ_HANDLER
26#include <mach/entry-macro.S>
27#endif
28#include <asm/thread_notify.h>
29#include <asm/unwind.h>
30#include <asm/unistd.h>
31#include <asm/tls.h>
32#include <asm/system_info.h>
33
34#include "entry-header.S"
35#include <asm/entry-macro-multi.S>
36#include <asm/probes.h>
37
38
39
40
41 .macro irq_handler
42#ifdef CONFIG_MULTI_IRQ_HANDLER
43 ldr r1, =handle_arch_irq
44 mov r0, sp
45 badr lr, 9997f
46 ldr pc, [r1]
47#else
48 arch_irq_handler_default
49#endif
509997:
51 .endm
52
53 .macro pabt_helper
54 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
55#ifdef MULTI_PABORT
56 ldr ip, .LCprocfns
57 mov lr, pc
58 ldr pc, [ip,
59#else
60 bl CPU_PABORT_HANDLER
61#endif
62 .endm
63
64 .macro dabt_helper
65
66 @
67 @ Call the processor-specific abort handler:
68 @
69 @ r2 - pt_regs
70 @ r4 - aborted context pc
71 @ r5 - aborted context psr
72 @
73 @ The abort handler must return the aborted address in r0, and
74 @ the fault status register in r1. r9 must be preserved.
75 @
76#ifdef MULTI_DABORT
77 ldr ip, .LCprocfns
78 mov lr, pc
79 ldr pc, [ip,
80#else
81 bl CPU_DABORT_HANDLER
82#endif
83 .endm
84
85#ifdef CONFIG_KPROBES
86 .section .kprobes.text,"ax",%progbits
87#else
88 .text
89#endif
90
91
92
93
94 .macro inv_entry, reason
95 sub sp, sp,
96 ARM( stmib sp, {r1 - lr} )
97 THUMB( stmia sp, {r0 - r12} )
98 THUMB( str sp, [sp,
99 THUMB( str lr, [sp,
100 mov r1,
101 .endm
102
103__pabt_invalid:
104 inv_entry BAD_PREFETCH
105 b common_invalid
106ENDPROC(__pabt_invalid)
107
108__dabt_invalid:
109 inv_entry BAD_DATA
110 b common_invalid
111ENDPROC(__dabt_invalid)
112
113__irq_invalid:
114 inv_entry BAD_IRQ
115 b common_invalid
116ENDPROC(__irq_invalid)
117
118__und_invalid:
119 inv_entry BAD_UNDEFINSTR
120
121 @
122 @ XXX fall through to common_invalid
123 @
124
125@
126@ common_invalid - generic code for failed exception (re-entrant version of handlers)
127@
128common_invalid:
129 zero_fp
130
131 ldmia r0, {r4 - r6}
132 add r0, sp,
133 mov r7,
134 str r4, [sp] @ save preserved r0
135 stmia r0, {r5 - r7} @ lr_<exception>,
136 @ cpsr_<exception>, "old_r0"
137
138 mov r0, sp
139 b bad_mode
140ENDPROC(__und_invalid)
141
142
143
144
145
146
147#define SPFIX(code...) code
148#else
149#define SPFIX(code...)
150#endif
151
152 .macro svc_entry, stack_hole=0, trace=1, uaccess=1
153 UNWIND(.fnstart )
154 UNWIND(.save {r0 - pc} )
155 sub sp, sp,
156#ifdef CONFIG_THUMB2_KERNEL
157 SPFIX( str r0, [sp] ) @ temporarily saved
158 SPFIX( mov r0, sp )
159 SPFIX( tst r0,
160 SPFIX( ldr r0, [sp] ) @ restored
161#else
162 SPFIX( tst sp,
163#endif
164 SPFIX( subeq sp, sp,
165 stmia sp, {r1 - r12}
166
167 ldmia r0, {r3 - r5}
168 add r7, sp,
169 mov r6,
170 add r2, sp,
171 SPFIX( addeq r2, r2,
172 str r3, [sp,
173 @ from the exception stack
174
175 mov r3, lr
176
177 @
178 @ We are now ready to fill in the remaining blanks on the stack:
179 @
180 @ r2 - sp_svc
181 @ r3 - lr_svc
182 @ r4 - lr_<exception>, already fixed up for correct return/restart
183 @ r5 - spsr_<exception>
184 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
185 @
186 stmia r7, {r2 - r6}
187
188 get_thread_info tsk
189 ldr r0, [tsk,
190 mov r1,
191 str r1, [tsk,
192 str r0, [sp,
193
194 uaccess_save r0
195 .if \uaccess
196 uaccess_disable r0
197 .endif
198
199 .if \trace
200#ifdef CONFIG_TRACE_IRQFLAGS
201 bl trace_hardirqs_off
202#endif
203 .endif
204 .endm
205
206 .align 5
207__dabt_svc:
208 svc_entry uaccess=0
209 mov r2, sp
210 dabt_helper
211 THUMB( ldr r5, [sp,
212 svc_exit r5 @ return from exception
213 UNWIND(.fnend )
214ENDPROC(__dabt_svc)
215
216 .align 5
217__irq_svc:
218 svc_entry
219 irq_handler
220
221#ifdef CONFIG_PREEMPT
222 ldr r8, [tsk,
223 ldr r0, [tsk,
224 teq r8,
225 movne r0,
226 tst r0,
227 blne svc_preempt
228#endif
229
230 svc_exit r5, irq = 1 @ return from exception
231 UNWIND(.fnend )
232ENDPROC(__irq_svc)
233
234 .ltorg
235
236#ifdef CONFIG_PREEMPT
237svc_preempt:
238 mov r8, lr
2391: bl preempt_schedule_irq @ irq en/disable is done inside
240 ldr r0, [tsk,
241 tst r0,
242 reteq r8 @ go again
243 b 1b
244#endif
245
246__und_fault:
247 @ Correct the PC such that it is pointing at the instruction
248 @ which caused the fault. If the faulting instruction was ARM
249 @ the PC will be pointing at the next instruction, and have to
250 @ subtract 4. Otherwise, it is Thumb, and the PC will be
251 @ pointing at the second half of the Thumb instruction. We
252 @ have to subtract 2.
253 ldr r2, [r0,
254 sub r2, r2, r1
255 str r2, [r0,
256 b do_undefinstr
257ENDPROC(__und_fault)
258
259 .align 5
260__und_svc:
261#ifdef CONFIG_KPROBES
262 @ If a kprobe is about to simulate a "stmdb sp..." instruction,
263 @ it obviously needs free stack space which then will belong to
264 @ the saved context.
265 svc_entry MAX_STACK_SIZE
266#else
267 svc_entry
268#endif
269 @
270 @ call emulation code, which returns using r9 if it has emulated
271 @ the instruction, or the more conventional lr if we are to treat
272 @ this as a real undefined instruction
273 @
274 @ r0 - instruction
275 @
276#ifndef CONFIG_THUMB2_KERNEL
277 ldr r0, [r4,
278#else
279 mov r1,
280 ldrh r0, [r4,
281 cmp r0,
282 blo __und_svc_fault
283 ldrh r9, [r4] @ bottom 16 bits
284 add r4, r4,
285 str r4, [sp,
286 orr r0, r9, r0, lsl
287#endif
288 badr r9, __und_svc_finish
289 mov r2, r4
290 bl call_fpe
291
292 mov r1,
293__und_svc_fault:
294 mov r0, sp @ struct pt_regs *regs
295 bl __und_fault
296
297__und_svc_finish:
298 get_thread_info tsk
299 ldr r5, [sp,
300 svc_exit r5 @ return from exception
301 UNWIND(.fnend )
302ENDPROC(__und_svc)
303
304 .align 5
305__pabt_svc:
306 svc_entry
307 mov r2, sp @ regs
308 pabt_helper
309 svc_exit r5 @ return from exception
310 UNWIND(.fnend )
311ENDPROC(__pabt_svc)
312
313 .align 5
314__fiq_svc:
315 svc_entry trace=0
316 mov r0, sp @ struct pt_regs *regs
317 bl handle_fiq_as_nmi
318 svc_exit_via_fiq
319 UNWIND(.fnend )
320ENDPROC(__fiq_svc)
321
322 .align 5
323.LCcralign:
324 .word cr_alignment
325#ifdef MULTI_DABORT
326.LCprocfns:
327 .word processor
328#endif
329.LCfp:
330 .word fp_enter
331
332
333
334
335
336@
337@ Taking a FIQ in abort mode is similar to taking a FIQ in SVC mode
338@ and reuses the same macros. However in abort mode we must also
339@ save/restore lr_abt and spsr_abt to make nested aborts safe.
340@
341 .align 5
342__fiq_abt:
343 svc_entry trace=0
344
345 ARM( msr cpsr_c,
346 THUMB( mov r0,
347 THUMB( msr cpsr_c, r0 )
348 mov r1, lr @ Save lr_abt
349 mrs r2, spsr @ Save spsr_abt, abort is now safe
350 ARM( msr cpsr_c,
351 THUMB( mov r0,
352 THUMB( msr cpsr_c, r0 )
353 stmfd sp!, {r1 - r2}
354
355 add r0, sp,
356 bl handle_fiq_as_nmi
357
358 ldmfd sp!, {r1 - r2}
359 ARM( msr cpsr_c,
360 THUMB( mov r0,
361 THUMB( msr cpsr_c, r0 )
362 mov lr, r1 @ Restore lr_abt, abort is unsafe
363 msr spsr_cxsf, r2 @ Restore spsr_abt
364 ARM( msr cpsr_c,
365 THUMB( mov r0,
366 THUMB( msr cpsr_c, r0 )
367
368 svc_exit_via_fiq
369 UNWIND(.fnend )
370ENDPROC(__fiq_abt)
371
372
373
374
375
376
377
378
379
380#endif
381
382 .macro usr_entry, trace=1, uaccess=1
383 UNWIND(.fnstart )
384 UNWIND(.cantunwind ) @ don't unwind the user space
385 sub sp, sp,
386 ARM( stmib sp, {r1 - r12} )
387 THUMB( stmia sp, {r0 - r12} )
388
389 ATRAP( mrc p15, 0, r7, c1, c0, 0)
390 ATRAP( ldr r8, .LCcralign)
391
392 ldmia r0, {r3 - r5}
393 add r0, sp,
394 mov r6,
395
396 str r3, [sp] @ save the "real" r0 copied
397 @ from the exception stack
398
399 ATRAP( ldr r8, [r8,
400
401 @
402 @ We are now ready to fill in the remaining blanks on the stack:
403 @
404 @ r4 - lr_<exception>, already fixed up for correct return/restart
405 @ r5 - spsr_<exception>
406 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
407 @
408 @ Also, separately save sp_usr and lr_usr
409 @
410 stmia r0, {r4 - r6}
411 ARM( stmdb r0, {sp, lr}^ )
412 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
413
414 .if \uaccess
415 uaccess_disable ip
416 .endif
417
418 @ Enable the alignment trap while in kernel mode
419 ATRAP( teq r8, r7)
420 ATRAP( mcrne p15, 0, r8, c1, c0, 0)
421
422 @
423 @ Clear FP to mark the first stack frame
424 @
425 zero_fp
426
427 .if \trace
428#ifdef CONFIG_TRACE_IRQFLAGS
429 bl trace_hardirqs_off
430#endif
431 ct_user_exit save = 0
432 .endif
433 .endm
434
435 .macro kuser_cmpxchg_check
436
437#ifndef CONFIG_MMU
438
439#else
440 @ Make sure our user space atomic helper is restarted
441 @ if it was interrupted in a critical region. Here we
442 @ perform a quick test inline since it should be false
443 @ 99.9999% of the time. The rest is done out of line.
444 cmp r4,
445 blhs kuser_cmpxchg64_fixup
446#endif
447#endif
448 .endm
449
450 .align 5
451__dabt_usr:
452 usr_entry uaccess=0
453 kuser_cmpxchg_check
454 mov r2, sp
455 dabt_helper
456 b ret_from_exception
457 UNWIND(.fnend )
458ENDPROC(__dabt_usr)
459
460 .align 5
461__irq_usr:
462 usr_entry
463 kuser_cmpxchg_check
464 irq_handler
465 get_thread_info tsk
466 mov why,
467 b ret_to_user_from_irq
468 UNWIND(.fnend )
469ENDPROC(__irq_usr)
470
471 .ltorg
472
473 .align 5
474__und_usr:
475 usr_entry uaccess=0
476
477 mov r2, r4
478 mov r3, r5
479
480 @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
481 @ faulting instruction depending on Thumb mode.
482 @ r3 = regs->ARM_cpsr
483 @
484 @ The emulation code returns using r9 if it has emulated the
485 @ instruction, or the more conventional lr if we are to treat
486 @ this as a real undefined instruction
487 @
488 badr r9, ret_from_exception
489
490 @ IRQs must be enabled before attempting to read the instruction from
491 @ user space since that could cause a page/translation fault if the
492 @ page table was modified by another CPU.
493 enable_irq
494
495 tst r3,
496 bne __und_usr_thumb
497 sub r4, r2,
4981: ldrt r0, [r4]
499 ARM_BE8(rev r0, r0) @ little endian instruction
500
501 uaccess_disable ip
502
503 @ r0 = 32-bit ARM instruction which caused the exception
504 @ r2 = PC value for the following instruction (:= regs->ARM_pc)
505 @ r4 = PC value for the faulting instruction
506 @ lr = 32-bit undefined instruction function
507 badr lr, __und_usr_fault_32
508 b call_fpe
509
510__und_usr_thumb:
511 @ Thumb instruction
512 sub r4, r2,
513
514
515
516
517
518
519
520
521
522#define NEED_CPU_ARCHITECTURE
523 ldr r5, .LCcpu_architecture
524 ldr r5, [r5]
525 cmp r5,
526 blo __und_usr_fault_16 @ 16bit undefined instruction
527
528
529
530
531
532 .arch armv6t2
533#endif
5342: ldrht r5, [r4]
535ARM_BE8(rev16 r5, r5) @ little endian instruction
536 cmp r5,
537 blo __und_usr_fault_16_pan @ 16bit undefined instruction
5383: ldrht r0, [r2]
539ARM_BE8(rev16 r0, r0) @ little endian instruction
540 uaccess_disable ip
541 add r2, r2,
542 str r2, [sp,
543 orr r0, r0, r5, lsl
544 badr lr, __und_usr_fault_32
545 @ r0 = the two 16-bit Thumb instructions which caused the exception
546 @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
547 @ r4 = PC value for the first 16-bit Thumb instruction
548 @ lr = 32bit undefined instruction function
549
550
551
552#ifdef CONFIG_CPU_32v6K
553 .arch armv6k
554#else
555 .arch armv6
556#endif
557#endif
558#else
559 b __und_usr_fault_16
560#endif
561 UNWIND(.fnend)
562ENDPROC(__und_usr)
563
564
565
566
567 .pushsection .text.fixup, "ax"
568 .align 2
5694: str r4, [sp,
570 ret r9
571 .popsection
572 .pushsection __ex_table,"a"
573 .long 1b, 4b
574
575 .long 2b, 4b
576 .long 3b, 4b
577#endif
578 .popsection
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605 @
606 @ Fall-through from Thumb-2 __und_usr
607 @
608#ifdef CONFIG_NEON
609 get_thread_info r10 @ get current thread
610 adr r6, .LCneon_thumb_opcodes
611 b 2f
612#endif
613call_fpe:
614 get_thread_info r10 @ get current thread
615#ifdef CONFIG_NEON
616 adr r6, .LCneon_arm_opcodes
6172: ldr r5, [r6],
618 ldr r7, [r6],
619 cmp r5,
620 beq 1f
621 and r8, r0, r5
622 cmp r8, r7 @ NEON instruction?
623 bne 2b
624 mov r7,
625 strb r7, [r10,
626 strb r7, [r10,
627 b do_vfp @ let VFP handler handle this
6281:
629#endif
630 tst r0,
631 tstne r0,
632 reteq lr
633 and r8, r0,
634 THUMB( lsr r8, r8,
635 mov r7,
636 add r6, r10,
637 ARM( strb r7, [r6, r8, lsr
638 THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
639#ifdef CONFIG_IWMMXT
640 @ Test if we need to give access to iWMMXt coprocessors
641 ldr r5, [r10,
642 rsbs r7, r8,
643 movcss r7, r5, lsr
644 bcs iwmmxt_task_enable
645#endif
646 ARM( add pc, pc, r8, lsr
647 THUMB( lsl r8, r8,
648 THUMB( add pc, r8 )
649 nop
650
651 ret.w lr @ CP
652 W(b) do_fpe @ CP
653 W(b) do_fpe @ CP
654 ret.w lr @ CP
655#ifdef CONFIG_CRUNCH
656 b crunch_task_enable @ CP
657 b crunch_task_enable @ CP
658 b crunch_task_enable @ CP
659#else
660 ret.w lr @ CP
661 ret.w lr @ CP
662 ret.w lr @ CP
663#endif
664 ret.w lr @ CP
665 ret.w lr @ CP
666 ret.w lr @ CP
667#ifdef CONFIG_VFP
668 W(b) do_vfp @ CP
669 W(b) do_vfp @ CP
670#else
671 ret.w lr @ CP
672 ret.w lr @ CP
673#endif
674 ret.w lr @ CP
675 ret.w lr @ CP
676 ret.w lr @ CP
677 ret.w lr @ CP
678
679#ifdef NEED_CPU_ARCHITECTURE
680 .align 2
681.LCcpu_architecture:
682 .word __cpu_architecture
683#endif
684
685#ifdef CONFIG_NEON
686 .align 6
687
688.LCneon_arm_opcodes:
689 .word 0xfe000000 @ mask
690 .word 0xf2000000 @ opcode
691
692 .word 0xff100000 @ mask
693 .word 0xf4000000 @ opcode
694
695 .word 0x00000000 @ mask
696 .word 0x00000000 @ opcode
697
698.LCneon_thumb_opcodes:
699 .word 0xef000000 @ mask
700 .word 0xef000000 @ opcode
701
702 .word 0xff100000 @ mask
703 .word 0xf9000000 @ opcode
704
705 .word 0x00000000 @ mask
706 .word 0x00000000 @ opcode
707#endif
708
709do_fpe:
710 ldr r4, .LCfp
711 add r10, r10,
712 ldr pc, [r4] @ Call FP module USR entry point
713
714
715
716
717
718
719
720
721
722
723 .pushsection .data
724 .align 2
725ENTRY(fp_enter)
726 .word no_fp
727 .popsection
728
729ENTRY(no_fp)
730 ret lr
731ENDPROC(no_fp)
732
733__und_usr_fault_32:
734 mov r1,
735 b 1f
736__und_usr_fault_16_pan:
737 uaccess_disable ip
738__und_usr_fault_16:
739 mov r1,
7401: mov r0, sp
741 badr lr, ret_from_exception
742 b __und_fault
743ENDPROC(__und_usr_fault_32)
744ENDPROC(__und_usr_fault_16)
745
746 .align 5
747__pabt_usr:
748 usr_entry
749 mov r2, sp @ regs
750 pabt_helper
751 UNWIND(.fnend )
752
753
754
755
756ENTRY(ret_from_exception)
757 UNWIND(.fnstart )
758 UNWIND(.cantunwind )
759 get_thread_info tsk
760 mov why,
761 b ret_to_user
762 UNWIND(.fnend )
763ENDPROC(__pabt_usr)
764ENDPROC(ret_from_exception)
765
766 .align 5
767__fiq_usr:
768 usr_entry trace=0
769 kuser_cmpxchg_check
770 mov r0, sp @ struct pt_regs *regs
771 bl handle_fiq_as_nmi
772 get_thread_info tsk
773 restore_user_regs fast = 0, offset = 0
774 UNWIND(.fnend )
775ENDPROC(__fiq_usr)
776
777
778
779
780
781
782ENTRY(__switch_to)
783 UNWIND(.fnstart )
784 UNWIND(.cantunwind )
785 add ip, r1,
786 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
787 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
788 THUMB( str sp, [ip],
789 THUMB( str lr, [ip],
790 ldr r4, [r2,
791 ldr r5, [r2,
792#ifdef CONFIG_CPU_USE_DOMAINS
793 mrc p15, 0, r6, c3, c0, 0 @ Get domain register
794 str r6, [r1,
795 ldr r6, [r2,
796#endif
797 switch_tls r1, r4, r5, r3, r7
798
799 ldr r7, [r2,
800 ldr r8, =__stack_chk_guard
801 .if (TSK_STACK_CANARY > IMM12_MASK)
802 add r7, r7,
803 .endif
804 ldr r7, [r7,
805#endif
806#ifdef CONFIG_CPU_USE_DOMAINS
807 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
808#endif
809 mov r5, r0
810 add r4, r2,
811 ldr r0, =thread_notify_head
812 mov r1,
813 bl atomic_notifier_call_chain
814
815 str r7, [r8]
816#endif
817 THUMB( mov ip, r4 )
818 mov r0, r5
819 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
820 THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
821 THUMB( ldr sp, [ip],
822 THUMB( ldr pc, [ip] )
823 UNWIND(.fnend )
824ENDPROC(__switch_to)
825
826 __INIT
827
828
829
830
831
832
833
834
835
836
837
838 THUMB( .arm )
839
840 .macro usr_ret, reg
841#ifdef CONFIG_ARM_THUMB
842 bx \reg
843#else
844 ret \reg
845#endif
846 .endm
847
848 .macro kuser_pad, sym, size
849 .if (. - \sym) & 3
850 .rept 4 - (. - \sym) & 3
851 .byte 0
852 .endr
853 .endif
854 .rept (\size - (. - \sym)) / 4
855 .word 0xe7fddef1
856 .endr
857 .endm
858
859#ifdef CONFIG_KUSER_HELPERS
860 .align 5
861 .globl __kuser_helper_start
862__kuser_helper_start:
863
864
865
866
867
868
869__kuser_cmpxchg64: @ 0xffff0f60
870
871
872
873 stmfd sp!, {r4, r5, r6, r7}
874 ldrd r4, r5, [r0] @ load old val
875 ldrd r6, r7, [r1] @ load new val
876 smp_dmb arm
8771: ldrexd r0, r1, [r2] @ load current val
878 eors r3, r0, r4 @ compare with oldval (1)
879 eoreqs r3, r1, r5 @ compare with oldval (2)
880 strexdeq r3, r6, r7, [r2] @ store newval if eq
881 teqeq r3,
882 beq 1b @ if no then retry
883 smp_dmb arm
884 rsbs r0, r3,
885 ldmfd sp!, {r4, r5, r6, r7}
886 usr_ret lr
887
888
889
890#ifdef CONFIG_MMU
891
892
893
894
895
896
897
898 stmfd sp!, {r4, r5, r6, lr}
899 ldmia r0, {r4, r5} @ load old val
900 ldmia r1, {r6, lr} @ load new val
9011: ldmia r2, {r0, r1} @ load current val
902 eors r3, r0, r4 @ compare with oldval (1)
903 eoreqs r3, r1, r5 @ compare with oldval (2)
9042: stmeqia r2, {r6, lr} @ store newval if eq
905 rsbs r0, r3,
906 ldmfd sp!, {r4, r5, r6, pc}
907
908 .text
909kuser_cmpxchg64_fixup:
910 @ Called from kuser_cmpxchg_fixup.
911 @ r4 = address of interrupted insn (must be preserved).
912 @ sp = saved regs. r7 and r8 are clobbered.
913 @ 1b = first critical insn, 2b = last critical insn.
914 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
915 mov r7,
916 sub r7, r7,
917 subs r8, r4, r7
918 rsbcss r8, r8,
919 strcs r7, [sp,
920
921 bcc kuser_cmpxchg32_fixup
922#endif
923 ret lr
924 .previous
925
926#else
927
928 mov r0,
929 adds r0, r0,
930 usr_ret lr
931#endif
932
933#else
934
935#endif
936
937 kuser_pad __kuser_cmpxchg64, 64
938
939__kuser_memory_barrier: @ 0xffff0fa0
940 smp_dmb arm
941 usr_ret lr
942
943 kuser_pad __kuser_memory_barrier, 32
944
945__kuser_cmpxchg: @ 0xffff0fc0
946
947
948
949#ifdef CONFIG_MMU
950
951
952
953
954
955
956
957
958
959
9601: ldr r3, [r2] @ load current val
961 subs r3, r3, r0 @ compare with oldval
9622: streq r1, [r2] @ store newval if eq
963 rsbs r0, r3,
964 usr_ret lr
965
966 .text
967kuser_cmpxchg32_fixup:
968 @ Called from kuser_cmpxchg_check macro.
969 @ r4 = address of interrupted insn (must be preserved).
970 @ sp = saved regs. r7 and r8 are clobbered.
971 @ 1b = first critical insn, 2b = last critical insn.
972 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
973 mov r7,
974 sub r7, r7,
975 subs r8, r4, r7
976 rsbcss r8, r8,
977 strcs r7, [sp,
978 ret lr
979 .previous
980
981#else
982
983 mov r0,
984 adds r0, r0,
985 usr_ret lr
986#endif
987
988#else
989
990 smp_dmb arm
9911: ldrex r3, [r2]
992 subs r3, r3, r0
993 strexeq r3, r1, [r2]
994 teqeq r3,
995 beq 1b
996 rsbs r0, r3,
997
998 ALT_SMP(b __kuser_memory_barrier)
999 ALT_UP(usr_ret lr)
1000
1001#endif
1002
1003 kuser_pad __kuser_cmpxchg, 32
1004
1005__kuser_get_tls: @ 0xffff0fe0
1006 ldr r0, [pc,
1007 usr_ret lr
1008 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
1009 kuser_pad __kuser_get_tls, 16
1010 .rep 3
1011 .word 0 @ 0xffff0ff0 software TLS value, then
1012 .endr @ pad up to __kuser_helper_version
1013
1014__kuser_helper_version: @ 0xffff0ffc
1015 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
1016
1017 .globl __kuser_helper_end
1018__kuser_helper_end:
1019
1020#endif
1021
1022 THUMB( .thumb )
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037 .macro vector_stub, name, mode, correction=0
1038 .align 5
1039
1040vector_\name:
1041 .if \correction
1042 sub lr, lr,
1043 .endif
1044
1045 @
1046 @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
1047 @ (parent CPSR)
1048 @
1049 stmia sp, {r0, lr} @ save r0, lr
1050 mrs lr, spsr
1051 str lr, [sp,
1052
1053 @
1054 @ Prepare for SVC32 mode. IRQs remain disabled.
1055 @
1056 mrs r0, cpsr
1057 eor r0, r0,
1058 msr spsr_cxsf, r0
1059
1060 @
1061 @ the branch table must immediately follow this code
1062 @
1063 and lr, lr,
1064 THUMB( adr r0, 1f )
1065 THUMB( ldr lr, [r0, lr, lsl
1066 mov r0, sp
1067 ARM( ldr lr, [pc, lr, lsl
1068 movs pc, lr @ branch to handler in SVC mode
1069ENDPROC(vector_\name)
1070
1071 .align 2
1072 @ handler addresses follow this label
10731:
1074 .endm
1075
1076 .section .stubs, "ax", %progbits
1077 @ This must be the first word
1078 .word vector_swi
1079
1080vector_rst:
1081 ARM( swi SYS_ERROR0 )
1082 THUMB( svc
1083 THUMB( nop )
1084 b vector_und
1085
1086
1087
1088
1089 vector_stub irq, IRQ_MODE, 4
1090
1091 .long __irq_usr @ 0 (USR_26 / USR_32)
1092 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
1093 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
1094 .long __irq_svc @ 3 (SVC_26 / SVC_32)
1095 .long __irq_invalid @ 4
1096 .long __irq_invalid @ 5
1097 .long __irq_invalid @ 6
1098 .long __irq_invalid @ 7
1099 .long __irq_invalid @ 8
1100 .long __irq_invalid @ 9
1101 .long __irq_invalid @ a
1102 .long __irq_invalid @ b
1103 .long __irq_invalid @ c
1104 .long __irq_invalid @ d
1105 .long __irq_invalid @ e
1106 .long __irq_invalid @ f
1107
1108
1109
1110
1111
1112 vector_stub dabt, ABT_MODE, 8
1113
1114 .long __dabt_usr @ 0 (USR_26 / USR_32)
1115 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
1116 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
1117 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
1118 .long __dabt_invalid @ 4
1119 .long __dabt_invalid @ 5
1120 .long __dabt_invalid @ 6
1121 .long __dabt_invalid @ 7
1122 .long __dabt_invalid @ 8
1123 .long __dabt_invalid @ 9
1124 .long __dabt_invalid @ a
1125 .long __dabt_invalid @ b
1126 .long __dabt_invalid @ c
1127 .long __dabt_invalid @ d
1128 .long __dabt_invalid @ e
1129 .long __dabt_invalid @ f
1130
1131
1132
1133
1134
1135 vector_stub pabt, ABT_MODE, 4
1136
1137 .long __pabt_usr @ 0 (USR_26 / USR_32)
1138 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
1139 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
1140 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
1141 .long __pabt_invalid @ 4
1142 .long __pabt_invalid @ 5
1143 .long __pabt_invalid @ 6
1144 .long __pabt_invalid @ 7
1145 .long __pabt_invalid @ 8
1146 .long __pabt_invalid @ 9
1147 .long __pabt_invalid @ a
1148 .long __pabt_invalid @ b
1149 .long __pabt_invalid @ c
1150 .long __pabt_invalid @ d
1151 .long __pabt_invalid @ e
1152 .long __pabt_invalid @ f
1153
1154
1155
1156
1157
1158 vector_stub und, UND_MODE
1159
1160 .long __und_usr @ 0 (USR_26 / USR_32)
1161 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
1162 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
1163 .long __und_svc @ 3 (SVC_26 / SVC_32)
1164 .long __und_invalid @ 4
1165 .long __und_invalid @ 5
1166 .long __und_invalid @ 6
1167 .long __und_invalid @ 7
1168 .long __und_invalid @ 8
1169 .long __und_invalid @ 9
1170 .long __und_invalid @ a
1171 .long __und_invalid @ b
1172 .long __und_invalid @ c
1173 .long __und_invalid @ d
1174 .long __und_invalid @ e
1175 .long __und_invalid @ f
1176
1177 .align 5
1178
1179
1180
1181
1182
1183
1184
1185
1186vector_addrexcptn:
1187 b vector_addrexcptn
1188
1189
1190
1191
1192
1193
1194
1195 vector_stub fiq, FIQ_MODE, 4
1196
1197 .long __fiq_usr @ 0 (USR_26 / USR_32)
1198 .long __fiq_svc @ 1 (FIQ_26 / FIQ_32)
1199 .long __fiq_svc @ 2 (IRQ_26 / IRQ_32)
1200 .long __fiq_svc @ 3 (SVC_26 / SVC_32)
1201 .long __fiq_svc @ 4
1202 .long __fiq_svc @ 5
1203 .long __fiq_svc @ 6
1204 .long __fiq_abt @ 7
1205 .long __fiq_svc @ 8
1206 .long __fiq_svc @ 9
1207 .long __fiq_svc @ a
1208 .long __fiq_svc @ b
1209 .long __fiq_svc @ c
1210 .long __fiq_svc @ d
1211 .long __fiq_svc @ e
1212 .long __fiq_svc @ f
1213
1214 .globl vector_fiq
1215
1216 .section .vectors, "ax", %progbits
1217.L__vectors_start:
1218 W(b) vector_rst
1219 W(b) vector_und
1220 W(ldr) pc, .L__vectors_start + 0x1000
1221 W(b) vector_pabt
1222 W(b) vector_dabt
1223 W(b) vector_addrexcptn
1224 W(b) vector_irq
1225 W(b) vector_fiq
1226
1227 .data
1228 .align 2
1229
1230 .globl cr_alignment
1231cr_alignment:
1232 .space 4
1233
1234#ifdef CONFIG_MULTI_IRQ_HANDLER
1235 .globl handle_arch_irq
1236handle_arch_irq:
1237 .space 4
1238#endif
1239