1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/init.h>
15#include <linux/linkage.h>
16#include <asm/assembler.h>
17#include <asm/errno.h>
18#include <asm/thread_info.h>
19#include <asm/memory.h>
20#include <asm/unistd.h>
21#include <generated/asm-offsets.h>
22#include "debug-macro.S"
23
24@
25@ Most of the stack format comes from struct pt_regs, but with
26@ the addition of 8 bytes for storing syscall args 5 and 6.
27@
28#define S_OFF 8
29
30
31
32
33
34
35
36#endif
37
38 .macro zero_fp
39#ifdef CONFIG_FRAME_POINTER
40 mov fp,
41#endif
42 .endm
43
44 .macro alignment_trap, rtemp
45#ifdef CONFIG_ALIGNMENT_TRAP
46 ldw \rtemp, .LCcralign
47 ldw \rtemp, [\rtemp]
48 movc p0.c1, \rtemp,
49#endif
50 .endm
51
52 .macro load_user_sp_lr, rd, rtemp, offset = 0
53 mov \rtemp, asr
54 xor \rtemp, \rtemp,
55 mov.a asr, \rtemp @ switch to the SUSR mode
56
57 ldw sp, [\rd+],
58 ldw lr, [\rd+],
59
60 xor \rtemp, \rtemp,
61 mov.a asr, \rtemp @ switch back to the PRIV mode
62 .endm
63
64 .macro priv_exit, rpsr
65 mov.a bsr, \rpsr
66 ldm.w (r0 - r15), [sp]+
67 ldm.b (r16 - pc), [sp]+ @ load r0 - pc, asr
68 .endm
69
70 .macro restore_user_regs, fast = 0, offset = 0
71 ldw r1, [sp+],
72 ldw lr, [sp+],
73 mov.a bsr, r1 @ save in bsr_priv
74 .if \fast
75 add sp, sp,
76 ldm.w (r1 - r15), [sp]+ @ get calling r1 - r15
77 ldur (r16 - lr), [sp]+ @ get calling r16 - lr
78 .else
79 ldm.w (r0 - r15), [sp]+ @ get calling r0 - r15
80 ldur (r16 - lr), [sp]+ @ get calling r16 - lr
81 .endif
82 nop
83 add sp, sp,
84 mov.a pc, lr @ return
85 @ and move bsr_priv into asr
86 .endm
87
88 .macro get_thread_info, rd
89 mov \rd, sp >>
90 mov \rd, \rd <<
91 .endm
92
93 .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
94 ldw \base, =(PKUNITY_INTC_BASE)
95 ldw \irqstat, [\base+],
96 ldw \tmp, [\base+],
97 and.a \irqstat, \irqstat, \tmp
98 beq 1001f
99 cntlz \irqnr, \irqstat
100 rsub \irqnr, \irqnr,
1011001:
102 .endm
103
104#ifdef CONFIG_DEBUG_LL
105 .macro printreg, reg, temp
106 adr \temp, 901f
107 stm (r0-r3), [\temp]+
108 stw lr, [\temp+],
109 mov r0, \reg
110 b.l printhex8
111 mov r0,
112 b.l printch
113 mov r0, pc
114 b.l printhex8
115 adr r0, 902f
116 b.l printascii
117 adr \temp, 901f
118 ldm (r0-r3), [\temp]+
119 ldw lr, [\temp+],
120 b 903f
121901: .word 0, 0, 0, 0, 0 @ r0-r3, lr
122902: .asciz ": epip4d\n"
123 .align
124903:
125 .endm
126#endif
127
128
129
130
131
132
133
134
135
136scno .req r21 @ syscall number
137tbl .req r22 @ syscall table pointer
138why .req r22 @ Linux syscall (!= 0)
139tsk .req r23 @ current thread_info
140
141
142
143
144 .macro intr_handler
1451: get_irqnr_and_base r0, r6, r5, lr
146 beq 2f
147 mov r1, sp
148 @
149 @ routine called with r0 = irq number, r1 = struct pt_regs *
150 @
151 adr lr, 1b
152 b asm_do_IRQ
1532:
154 .endm
155
156
157
158
159 .macro priv_entry
160 sub sp, sp,
161 stm (r1 - r15), [sp]+
162 add r5, sp,
163 stm (r16 - r28), [r5]+
164
165 ldm (r1 - r3), [r0]+
166 add r5, sp,
167 mov r4,
168 add r0, sp,
169 stw.w r1, [sp+],
170 @ from the exception stack
171
172 mov r1, lr
173
174 @
175 @ We are now ready to fill in the remaining blanks on the stack:
176 @
177 @ r0 - sp_priv
178 @ r1 - lr_priv
179 @ r2 - lr_<exception>, already fixed up for correct return/restart
180 @ r3 - bsr_<exception>
181 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
182 @
183 stm (r0 - r4), [r5]+
184 .endm
185
186
187
188
189
190 .macro user_entry
191 sub sp, sp,
192 stm (r1 - r15), [sp+]
193 add r4, sp,
194 stm (r16 - r28), [r4]+
195
196 ldm (r1 - r3), [r0]+
197 add r0, sp,
198 mov r4,
199
200 stw r1, [sp] @ save the "real" r0 copied
201 @ from the exception stack
202
203 @
204 @ We are now ready to fill in the remaining blanks on the stack:
205 @
206 @ r2 - lr_<exception>, already fixed up for correct return/restart
207 @ r3 - bsr_<exception>
208 @ r4 - orig_r0 (see pt_regs definition in ptrace.h)
209 @
210 @ Also, separately save sp_user and lr_user
211 @
212 stm (r2 - r4), [r0]+
213 stur (sp, lr), [r0-]
214
215 @
216 @ Enable the alignment trap while in kernel mode
217 @
218 alignment_trap r0
219
220 @
221 @ Clear FP to mark the first stack frame
222 @
223 zero_fp
224 .endm
225
226 .text
227
228@
229@ __invalid - generic code for failed exception
230@ (re-entrant version of handlers)
231@
232__invalid:
233 sub sp, sp,
234 stm (r1 - r15), [sp+]
235 add r1, sp,
236 stm (r16 - r28, sp, lr), [r1]+
237
238 zero_fp
239
240 ldm (r4 - r6), [r0]+
241 add r0, sp,
242 mov r7,
243 stw r4, [sp] @ save preserved r0
244 stm (r5 - r7), [r0]+ @ lr_<exception>,
245 @ asr_<exception>, "old_r0"
246
247 mov r0, sp
248 mov r1, asr
249 b bad_mode
250ENDPROC(__invalid)
251
252 .align 5
253__dabt_priv:
254 priv_entry
255
256 @
257 @ get ready to re-enable interrupts if appropriate
258 @
259 mov r17, asr
260 cand.a r3,
261 bne 1f
262 andn r17, r17,
2631:
264
265 @
266 @ Call the processor-specific abort handler:
267 @
268 @ r2 - aborted context pc
269 @ r3 - aborted context asr
270 @
271 @ The abort handler must return the aborted address in r0, and
272 @ the fault status register in r1.
273 @
274 movc r1, p0.c3,
275 movc r0, p0.c4,
276
277 @
278 @ set desired INTR state, then call main handler
279 @
280 mov.a asr, r17
281 mov r2, sp
282 b.l do_DataAbort
283
284 @
285 @ INTRs off again before pulling preserved data off the stack
286 @
287 disable_irq r0
288
289 @
290 @ restore BSR and restart the instruction
291 @
292 ldw r2, [sp+],
293 priv_exit r2 @ return from exception
294ENDPROC(__dabt_priv)
295
296 .align 5
297__intr_priv:
298 priv_entry
299
300 intr_handler
301
302 mov r0,
303 movc p0.c5, r0,
304 nop; nop; nop; nop; nop; nop; nop; nop
305
306 ldw r4, [sp+],
307
308 priv_exit r4 @ return from exception
309ENDPROC(__intr_priv)
310
311 .ltorg
312
313 .align 5
314__extn_priv:
315 priv_entry
316
317 mov r0, sp @ struct pt_regs *regs
318 mov r1, asr
319 b bad_mode @ not supported
320ENDPROC(__extn_priv)
321
322 .align 5
323__pabt_priv:
324 priv_entry
325
326 @
327 @ re-enable interrupts if appropriate
328 @
329 mov r17, asr
330 cand.a r3,
331 bne 1f
332 andn r17, r17,
3331:
334
335 @
336 @ set args, then call main handler
337 @
338 @ r0 - address of faulting instruction
339 @ r1 - pointer to registers on stack
340 @
341 mov r0, r2 @ pass address of aborted instruction
342 mov r1,
343 mov.a asr, r17
344 mov r2, sp @ regs
345 b.l do_PrefetchAbort @ call abort handler
346
347 @
348 @ INTRs off again before pulling preserved data off the stack
349 @
350 disable_irq r0
351
352 @
353 @ restore BSR and restart the instruction
354 @
355 ldw r2, [sp+],
356 priv_exit r2 @ return from exception
357ENDPROC(__pabt_priv)
358
359 .align 5
360.LCcralign:
361 .word cr_alignment
362
363 .align 5
364__dabt_user:
365 user_entry
366
367#ifdef CONFIG_UNICORE_FPU_F64
368 cff ip, s31
369 cand.a ip,
370 beq 209f
371
372 ldw ip, [sp+],
373 add ip, ip,
374 stw ip, [sp+],
375 @
376 @ fall through to the emulation code, which returns using r19 if
377 @ it has emulated the instruction, or the more conventional lr
378 @ if we are to treat this as a real extended instruction
379 @
380 @ r0 - instruction
381 @
3821: ldw.u r0, [r2]
383 adr r19, ret_from_exception
384 adr lr, 209f
385 @
386 @ fallthrough to call do_uc_f64
387 @
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404 get_thread_info r20 @ get current thread
405 and r8, r0,
406 mov r7,
407 stb r7, [r20+],
408
409 @ F64 hardware support entry point.
410 @ r0 = faulted instruction
411 @ r19 = return address
412 @ r20 = fp_state
413 enable_irq r4
414 add r20, r20,
415 cff r1, s31 @ get fpu FPSCR
416 andn r2, r1,
417 ctf r2, s31 @ clear 27 bit
418 mov r2, sp @ nothing stacked - regdump is at TOS
419 mov lr, r19 @ setup for a return to the user code
420
421 @ Now call the C code to package up the bounce to the support code
422 @ r0 holds the trigger instruction
423 @ r1 holds the FPSCR value
424 @ r2 pointer to register dump
425 b ucf64_exchandler
426209:
427#endif
428 @
429 @ Call the processor-specific abort handler:
430 @
431 @ r2 - aborted context pc
432 @ r3 - aborted context asr
433 @
434 @ The abort handler must return the aborted address in r0, and
435 @ the fault status register in r1.
436 @
437 movc r1, p0.c3,
438 movc r0, p0.c4,
439
440 @
441 @ INTRs on, then call the main handler
442 @
443 enable_irq r2
444 mov r2, sp
445 adr lr, ret_from_exception
446 b do_DataAbort
447ENDPROC(__dabt_user)
448
449 .align 5
450__intr_user:
451 user_entry
452
453 get_thread_info tsk
454
455 intr_handler
456
457 mov why,
458 b ret_to_user
459ENDPROC(__intr_user)
460
461 .ltorg
462
463 .align 5
464__extn_user:
465 user_entry
466
467 mov r0, sp
468 mov r1, asr
469 b bad_mode
470ENDPROC(__extn_user)
471
472 .align 5
473__pabt_user:
474 user_entry
475
476 mov r0, r2 @ pass address of aborted instruction.
477 mov r1,
478 enable_irq r1 @ Enable interrupts
479 mov r2, sp @ regs
480 b.l do_PrefetchAbort @ call abort handler
481
482
483
484
485ENTRY(ret_from_exception)
486 get_thread_info tsk
487 mov why,
488 b ret_to_user
489ENDPROC(__pabt_user)
490ENDPROC(ret_from_exception)
491
492
493
494
495
496
497ENTRY(__switch_to)
498 add ip, r1,
499 stm.w (r4 - r15), [ip]+
500 stm.w (r16 - r27, sp, lr), [ip]+
501
502#ifdef CONFIG_UNICORE_FPU_F64
503 add ip, r1,
504 sfm.w (f0 - f7 ), [ip]+
505 sfm.w (f8 - f15), [ip]+
506 sfm.w (f16 - f23), [ip]+
507 sfm.w (f24 - f31), [ip]+
508 cff r4, s31
509 stw r4, [ip]
510
511 add ip, r2,
512 lfm.w (f0 - f7 ), [ip]+
513 lfm.w (f8 - f15), [ip]+
514 lfm.w (f16 - f23), [ip]+
515 lfm.w (f24 - f31), [ip]+
516 ldw r4, [ip]
517 ctf r4, s31
518#endif
519 add ip, r2,
520 ldm.w (r4 - r15), [ip]+
521 ldm (r16 - r27, sp, pc), [ip]+ @ Load all regs saved previously
522ENDPROC(__switch_to)
523
524 .align 5
525
526
527
528
529
530ret_fast_syscall:
531 disable_irq r1 @ disable interrupts
532 ldw r1, [tsk+],
533 cand.a r1,
534 bne fast_work_pending
535
536 @ fast_restore_user_regs
537 restore_user_regs fast = 1, offset = S_OFF
538
539
540
541
542fast_work_pending:
543 stw.w r0, [sp+],
544work_pending:
545 cand.a r1,
546 bne work_resched
547 mov r0, sp @ 'regs'
548 mov r2, why @ 'syscall'
549 cand.a r1,
550 cmovne why,
551 b.l do_notify_resume
552 b ret_slow_syscall @ Check work again
553
554work_resched:
555 b.l schedule
556
557
558
559ENTRY(ret_to_user)
560ret_slow_syscall:
561 disable_irq r1 @ disable interrupts
562 get_thread_info tsk @ epip4d, one path error?!
563 ldw r1, [tsk+],
564 cand.a r1,
565 bne work_pending
566no_work_pending:
567 @ slow_restore_user_regs
568 restore_user_regs fast = 0, offset = 0
569ENDPROC(ret_to_user)
570
571
572
573
574ENTRY(ret_from_fork)
575 b.l schedule_tail
576 b ret_slow_syscall
577ENDPROC(ret_from_fork)
578
579ENTRY(ret_from_kernel_thread)
580 b.l schedule_tail
581 mov r0, r5
582 adr lr, ret_slow_syscall
583 mov pc, r4
584ENDPROC(ret_from_kernel_thread)
585
586
587
588
589
590 .align 5
591ENTRY(vector_swi)
592 sub sp, sp,
593 stm (r0 - r15), [sp]+ @ Calling r0 - r15
594 add r8, sp,
595 stm (r16 - r28), [r8]+ @ Calling r16 - r28
596 add r8, sp,
597 stur (sp, lr), [r8-] @ Calling sp, lr
598 mov r8, bsr @ called from non-REAL mode
599 stw lr, [sp+],
600 stw r8, [sp+],
601 stw r0, [sp+],
602 zero_fp
603
604
605
606
607 sub ip, lr,
608 ldw.u scno, [ip] @ get SWI instruction
609
610#ifdef CONFIG_ALIGNMENT_TRAP
611 ldw ip, __cr_alignment
612 ldw ip, [ip]
613 movc p0.c1, ip,
614#endif
615 enable_irq ip
616
617 get_thread_info tsk
618 ldw tbl, =sys_call_table @ load syscall table pointer
619
620 andn scno, scno,
621 andn scno, scno,
622
623 stm.w (r4, r5), [sp-] @ push fifth and sixth args
624 ldw ip, [tsk+],
625 cand.a ip,
626 bne __sys_trace
627
628 csub.a scno,
629 adr lr, ret_fast_syscall @ return address
630 bea 1f
631 ldw pc, [tbl+], scno <<
6321:
633 add r1, sp,
6342: mov why,
635 b sys_ni_syscall @ not private func
636
637
638
639
640
641__sys_trace:
642 mov r2, scno
643 add r1, sp,
644 mov r0,
645 b.l syscall_trace
646
647 adr lr, __sys_trace_return @ return address
648 mov scno, r0 @ syscall number (possibly new)
649 add r1, sp,
650 csub.a scno,
651 bea 2b
652 ldm (r0 - r3), [r1]+ @ have to reload r0 - r3
653 ldw pc, [tbl+], scno <<
654
655__sys_trace_return:
656 stw.w r0, [sp+],
657 mov r2, scno
658 mov r1, sp
659 mov r0,
660 b.l syscall_trace
661 b ret_slow_syscall
662
663 .align 5
664#ifdef CONFIG_ALIGNMENT_TRAP
665 .type __cr_alignment,
666__cr_alignment:
667 .word cr_alignment
668#endif
669 .ltorg
670
671ENTRY(sys_rt_sigreturn)
672 add r0, sp,
673 mov why,
674 b __sys_rt_sigreturn
675ENDPROC(sys_rt_sigreturn)
676
677 __INIT
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692 .macro vector_stub, name, mode
693 .align 5
694
695vector_\name:
696 @
697 @ Save r0, lr_<exception> (parent PC) and bsr_<exception>
698 @ (parent ASR)
699 @
700 stw r0, [sp]
701 stw lr, [sp+],
702 mov lr, bsr
703 stw lr, [sp+],
704
705 @
706 @ Prepare for PRIV mode. INTRs remain disabled.
707 @
708 mov r0, asr
709 xor r0, r0,
710 mov.a bsr, r0
711
712 @
713 @ the branch table must immediately follow this code
714 @
715 and lr, lr,
716 add lr, lr,
717 mov r0, sp
718 ldw lr, [pc+], lr <<
719 mov.a pc, lr @ branch to handler in PRIV mode
720ENDPROC(vector_\name)
721 .align 2
722 @ handler addresses follow this label
723 .endm
724
725 .globl __stubs_start
726__stubs_start:
727
728
729
730 vector_stub intr, INTR_MODE
731
732 .long __intr_user @ 0 (USER)
733 .long __invalid @ 1
734 .long __invalid @ 2
735 .long __intr_priv @ 3 (PRIV)
736
737
738
739
740
741 vector_stub dabt, ABRT_MODE
742
743 .long __dabt_user @ 0 (USER)
744 .long __invalid @ 1
745 .long __invalid @ 2 (INTR)
746 .long __dabt_priv @ 3 (PRIV)
747
748
749
750
751
752 vector_stub pabt, ABRT_MODE
753
754 .long __pabt_user @ 0 (USER)
755 .long __invalid @ 1
756 .long __invalid @ 2 (INTR)
757 .long __pabt_priv @ 3 (PRIV)
758
759
760
761
762
763 vector_stub extn, EXTN_MODE
764
765 .long __extn_user @ 0 (USER)
766 .long __invalid @ 1
767 .long __invalid @ 2 (INTR)
768 .long __extn_priv @ 3 (PRIV)
769
770
771
772
773
774 .align 5
775
776.LCvswi:
777 .word vector_swi
778
779 .globl __stubs_end
780__stubs_end:
781
782 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
783
784 .globl __vectors_start
785__vectors_start:
786 jepriv SYS_ERROR0
787 b vector_extn + stubs_offset
788 ldw pc, .LCvswi + stubs_offset
789 b vector_pabt + stubs_offset
790 b vector_dabt + stubs_offset
791 jepriv SYS_ERROR0
792 b vector_intr + stubs_offset
793 jepriv SYS_ERROR0
794
795 .globl __vectors_end
796__vectors_end:
797
798 .data
799
800 .globl cr_alignment
801 .globl cr_no_alignment
802cr_alignment:
803 .space 4
804cr_no_alignment:
805 .space 4
806