linux/arch/unicore32/kernel/entry.S
<<
>>
Prefs
   1/*
   2 * linux/arch/unicore32/kernel/entry.S
   3 *
   4 * Code specific to PKUnity SoC and UniCore ISA
   5 *
   6 * Copyright (C) 2001-2010 GUAN Xue-tao
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 *
  12 *  Low-level vector interface routines
  13 */
  14#include <linux/init.h>
  15#include <linux/linkage.h>
  16#include <asm/assembler.h>
  17#include <asm/errno.h>
  18#include <asm/thread_info.h>
  19#include <asm/memory.h>
  20#include <asm/unistd.h>
  21#include <generated/asm-offsets.h>
  22#include "debug-macro.S"
  23
  24@
  25@ Most of the stack format comes from struct pt_regs, but with
  26@ the addition of 8 bytes for storing syscall args 5 and 6.
  27@
  28#define S_OFF           8
  29
  30/*
  31 * The SWI code relies on the fact that R0 is at the bottom of the stack
  32 * (due to slow/fast restore user regs).
  33 */
  34#if S_R0 != 0
  35#error "Please fix"
  36#endif
  37
  38        .macro  zero_fp
  39#ifdef CONFIG_FRAME_POINTER
  40        mov     fp, #0
  41#endif
  42        .endm
  43
  44        .macro  alignment_trap, rtemp
  45#ifdef CONFIG_ALIGNMENT_TRAP
  46        ldw     \rtemp, .LCcralign
  47        ldw     \rtemp, [\rtemp]
  48        movc    p0.c1, \rtemp, #0
  49#endif
  50        .endm
  51
  52        .macro  load_user_sp_lr, rd, rtemp, offset = 0
  53        mov     \rtemp, asr
  54        xor     \rtemp, \rtemp, #(PRIV_MODE ^ SUSR_MODE)
  55        mov.a   asr, \rtemp                     @ switch to the SUSR mode
  56
  57        ldw     sp, [\rd+], #\offset            @ load sp_user
  58        ldw     lr, [\rd+], #\offset + 4        @ load lr_user
  59
  60        xor     \rtemp, \rtemp, #(PRIV_MODE ^ SUSR_MODE)
  61        mov.a   asr, \rtemp                     @ switch back to the PRIV mode
  62        .endm
  63
  64        .macro  priv_exit, rpsr
  65        mov.a   bsr, \rpsr
  66        ldm.w   (r0 - r15), [sp]+
  67        ldm.b   (r16 - pc), [sp]+               @ load r0 - pc, asr
  68        .endm
  69
  70        .macro  restore_user_regs, fast = 0, offset = 0
  71        ldw     r1, [sp+], #\offset + S_PSR     @ get calling asr
  72        ldw     lr, [sp+], #\offset + S_PC      @ get pc
  73        mov.a   bsr, r1                         @ save in bsr_priv
  74        .if     \fast
  75        add     sp, sp, #\offset + S_R1         @ r0 is syscall return value
  76        ldm.w   (r1 - r15), [sp]+               @ get calling r1 - r15
  77        ldur    (r16 - lr), [sp]+               @ get calling r16 - lr
  78        .else
  79        ldm.w   (r0 - r15), [sp]+               @ get calling r0 - r15
  80        ldur    (r16 - lr), [sp]+               @ get calling r16 - lr
  81        .endif
  82        nop
  83        add     sp, sp, #S_FRAME_SIZE - S_R16
  84        mov.a   pc, lr                          @ return
  85                                                @ and move bsr_priv into asr
  86        .endm
  87
  88        .macro  get_thread_info, rd
  89        mov     \rd, sp >> #13
  90        mov     \rd, \rd << #13
  91        .endm
  92
  93        .macro  get_irqnr_and_base, irqnr, irqstat, base, tmp
  94        ldw     \base, =(PKUNITY_INTC_BASE)
  95        ldw     \irqstat, [\base+], #0xC        @ INTC_ICIP
  96        ldw     \tmp,     [\base+], #0x4        @ INTC_ICMR
  97        and.a   \irqstat, \irqstat, \tmp
  98        beq     1001f
  99        cntlz   \irqnr, \irqstat
 100        rsub    \irqnr, \irqnr, #31
 1011001:   /* EQ will be set if no irqs pending */
 102        .endm
 103
 104#ifdef CONFIG_DEBUG_LL
 105        .macro  printreg, reg, temp
 106                adr     \temp, 901f
 107                stm     (r0-r3), [\temp]+
 108                stw     lr, [\temp+], #0x10
 109                mov     r0, \reg
 110                b.l     printhex8
 111                mov     r0, #':'
 112                b.l     printch
 113                mov     r0, pc
 114                b.l     printhex8
 115                adr     r0, 902f
 116                b.l     printascii
 117                adr     \temp, 901f
 118                ldm     (r0-r3), [\temp]+
 119                ldw     lr, [\temp+], #0x10
 120                b       903f
 121901:    .word   0, 0, 0, 0, 0   @ r0-r3, lr
 122902:    .asciz  ": epip4d\n"
 123        .align
 124903:
 125        .endm
 126#endif
 127
 128/*
 129 * These are the registers used in the syscall handler, and allow us to
 130 * have in theory up to 7 arguments to a function - r0 to r6.
 131 *
 132 * Note that tbl == why is intentional.
 133 *
 134 * We must set at least "tsk" and "why" when calling ret_with_reschedule.
 135 */
 136scno    .req    r21             @ syscall number
 137tbl     .req    r22             @ syscall table pointer
 138why     .req    r22             @ Linux syscall (!= 0)
 139tsk     .req    r23             @ current thread_info
 140
 141/*
 142 * Interrupt handling.  Preserves r17, r18, r19
 143 */
 144        .macro  intr_handler
 1451:      get_irqnr_and_base r0, r6, r5, lr
 146        beq     2f
 147        mov     r1, sp
 148        @
 149        @ routine called with r0 = irq number, r1 = struct pt_regs *
 150        @
 151        adr     lr, 1b
 152        b       asm_do_IRQ
 1532:
 154        .endm
 155
 156/*
 157 * PRIV mode handlers
 158 */
 159        .macro  priv_entry
 160        sub     sp, sp, #(S_FRAME_SIZE - 4)
 161        stm     (r1 - r15), [sp]+
 162        add     r5, sp, #S_R15
 163        stm     (r16 - r28), [r5]+
 164
 165        ldm     (r1 - r3), [r0]+
 166        add     r5, sp, #S_SP - 4       @ here for interlock avoidance
 167        mov     r4, #-1                 @  ""  ""      ""       ""
 168        add     r0, sp, #(S_FRAME_SIZE - 4)
 169        stw.w   r1, [sp+], #-4          @ save the "real" r0 copied
 170                                        @ from the exception stack
 171
 172        mov     r1, lr
 173
 174        @
 175        @ We are now ready to fill in the remaining blanks on the stack:
 176        @
 177        @  r0 - sp_priv
 178        @  r1 - lr_priv
 179        @  r2 - lr_<exception>, already fixed up for correct return/restart
 180        @  r3 - bsr_<exception>
 181        @  r4 - orig_r0 (see pt_regs definition in ptrace.h)
 182        @
 183        stm     (r0 - r4), [r5]+
 184        .endm
 185
 186/*
 187 * User mode handlers
 188 *
 189 */
 190        .macro  user_entry
 191        sub     sp, sp, #S_FRAME_SIZE
 192        stm     (r1 - r15), [sp+]
 193        add     r4, sp, #S_R16
 194        stm     (r16 - r28), [r4]+
 195
 196        ldm     (r1 - r3), [r0]+
 197        add     r0, sp, #S_PC           @ here for interlock avoidance
 198        mov     r4, #-1                 @  ""  ""     ""        ""
 199
 200        stw     r1, [sp]                @ save the "real" r0 copied
 201                                        @ from the exception stack
 202
 203        @
 204        @ We are now ready to fill in the remaining blanks on the stack:
 205        @
 206        @  r2 - lr_<exception>, already fixed up for correct return/restart
 207        @  r3 - bsr_<exception>
 208        @  r4 - orig_r0 (see pt_regs definition in ptrace.h)
 209        @
 210        @ Also, separately save sp_user and lr_user
 211        @
 212        stm     (r2 - r4), [r0]+
 213        stur    (sp, lr), [r0-]
 214
 215        @
 216        @ Enable the alignment trap while in kernel mode
 217        @
 218        alignment_trap r0
 219
 220        @
 221        @ Clear FP to mark the first stack frame
 222        @
 223        zero_fp
 224        .endm
 225
 226        .text
 227
 228@
 229@ __invalid - generic code for failed exception
 230@                       (re-entrant version of handlers)
 231@
 232__invalid:
 233        sub     sp, sp, #S_FRAME_SIZE
 234        stm     (r1 - r15), [sp+]
 235        add     r1, sp, #S_R16
 236        stm     (r16 - r28, sp, lr), [r1]+
 237
 238        zero_fp
 239
 240        ldm     (r4 - r6), [r0]+
 241        add     r0, sp, #S_PC           @ here for interlock avoidance
 242        mov     r7, #-1                 @  ""   ""    ""        ""
 243        stw     r4, [sp]                @ save preserved r0
 244        stm     (r5 - r7), [r0]+        @ lr_<exception>,
 245                                        @ asr_<exception>, "old_r0"
 246
 247        mov     r0, sp
 248        mov     r1, asr
 249        b       bad_mode
 250ENDPROC(__invalid)
 251
 252        .align  5
 253__dabt_priv:
 254        priv_entry
 255
 256        @
 257        @ get ready to re-enable interrupts if appropriate
 258        @
 259        mov     r17, asr
 260        cand.a  r3, #PSR_I_BIT
 261        bne     1f
 262        andn    r17, r17, #PSR_I_BIT
 2631:
 264
 265        @
 266        @ Call the processor-specific abort handler:
 267        @
 268        @  r2 - aborted context pc
 269        @  r3 - aborted context asr
 270        @
 271        @ The abort handler must return the aborted address in r0, and
 272        @ the fault status register in r1.
 273        @
 274        movc    r1, p0.c3, #0           @ get FSR
 275        movc    r0, p0.c4, #0           @ get FAR
 276
 277        @
 278        @ set desired INTR state, then call main handler
 279        @
 280        mov.a   asr, r17
 281        mov     r2, sp
 282        b.l     do_DataAbort
 283
 284        @
 285        @ INTRs off again before pulling preserved data off the stack
 286        @
 287        disable_irq r0
 288
 289        @
 290        @ restore BSR and restart the instruction
 291        @
 292        ldw     r2, [sp+], #S_PSR
 293        priv_exit r2                            @ return from exception
 294ENDPROC(__dabt_priv)
 295
 296        .align  5
 297__intr_priv:
 298        priv_entry
 299
 300        intr_handler
 301
 302        mov     r0, #0                          @ epip4d
 303        movc    p0.c5, r0, #14
 304        nop; nop; nop; nop; nop; nop; nop; nop
 305
 306        ldw     r4, [sp+], #S_PSR               @ irqs are already disabled
 307
 308        priv_exit r4                            @ return from exception
 309ENDPROC(__intr_priv)
 310
 311        .ltorg
 312
 313        .align  5
 314__extn_priv:
 315        priv_entry
 316
 317        mov     r0, sp                          @ struct pt_regs *regs
 318        mov     r1, asr
 319        b       bad_mode                        @ not supported
 320ENDPROC(__extn_priv)
 321
 322        .align  5
 323__pabt_priv:
 324        priv_entry
 325
 326        @
 327        @ re-enable interrupts if appropriate
 328        @
 329        mov     r17, asr
 330        cand.a  r3, #PSR_I_BIT
 331        bne     1f
 332        andn    r17, r17, #PSR_I_BIT
 3331:
 334
 335        @
 336        @ set args, then call main handler
 337        @
 338        @  r0 - address of faulting instruction
 339        @  r1 - pointer to registers on stack
 340        @
 341        mov     r0, r2                  @ pass address of aborted instruction
 342        mov     r1, #5
 343        mov.a   asr, r17
 344        mov     r2, sp                  @ regs
 345        b.l     do_PrefetchAbort        @ call abort handler
 346
 347        @
 348        @ INTRs off again before pulling preserved data off the stack
 349        @
 350        disable_irq r0
 351
 352        @
 353        @ restore BSR and restart the instruction
 354        @
 355        ldw     r2, [sp+], #S_PSR
 356        priv_exit r2                    @ return from exception
 357ENDPROC(__pabt_priv)
 358
 359        .align  5
 360.LCcralign:
 361        .word   cr_alignment
 362
 363        .align  5
 364__dabt_user:
 365        user_entry
 366
 367#ifdef CONFIG_UNICORE_FPU_F64
 368        cff     ip, s31
 369        cand.a  ip, #0x08000000         @ FPU execption traps?
 370        beq     209f
 371
 372        ldw     ip, [sp+], #S_PC
 373        add     ip, ip, #4
 374        stw     ip, [sp+], #S_PC
 375        @
 376        @ fall through to the emulation code, which returns using r19 if
 377        @ it has emulated the instruction, or the more conventional lr
 378        @ if we are to treat this as a real extended instruction
 379        @
 380        @  r0 - instruction
 381        @
 3821:      ldw.u   r0, [r2]
 383        adr     r19, ret_from_exception
 384        adr     lr, 209f
 385        @
 386        @ fallthrough to call do_uc_f64
 387        @
 388/*
 389 * Check whether the instruction is a co-processor instruction.
 390 * If yes, we need to call the relevant co-processor handler.
 391 *
 392 * Note that we don't do a full check here for the co-processor
 393 * instructions; all instructions with bit 27 set are well
 394 * defined.  The only instructions that should fault are the
 395 * co-processor instructions.
 396 *
 397 * Emulators may wish to make use of the following registers:
 398 *  r0  = instruction opcode.
 399 *  r2  = PC
 400 *  r19 = normal "successful" return address
 401 *  r20 = this threads thread_info structure.
 402 *  lr  = unrecognised instruction return address
 403 */
 404        get_thread_info r20                     @ get current thread
 405        and     r8, r0, #0x00003c00             @ mask out CP number
 406        mov     r7, #1
 407        stb     r7, [r20+], #TI_USED_CP + 2     @ set appropriate used_cp[]
 408
 409        @ F64 hardware support entry point.
 410        @  r0  = faulted instruction
 411        @  r19 = return address
 412        @  r20 = fp_state
 413        enable_irq r4
 414        add     r20, r20, #TI_FPSTATE   @ r20 = workspace
 415        cff     r1, s31                 @ get fpu FPSCR
 416        andn    r2, r1, #0x08000000
 417        ctf     r2, s31                 @ clear 27 bit
 418        mov     r2, sp                  @ nothing stacked - regdump is at TOS
 419        mov     lr, r19                 @ setup for a return to the user code
 420
 421        @ Now call the C code to package up the bounce to the support code
 422        @   r0 holds the trigger instruction
 423        @   r1 holds the FPSCR value
 424        @   r2 pointer to register dump
 425        b       ucf64_exchandler
 426209:
 427#endif
 428        @
 429        @ Call the processor-specific abort handler:
 430        @
 431        @  r2 - aborted context pc
 432        @  r3 - aborted context asr
 433        @
 434        @ The abort handler must return the aborted address in r0, and
 435        @ the fault status register in r1.
 436        @
 437        movc    r1, p0.c3, #0           @ get FSR
 438        movc    r0, p0.c4, #0           @ get FAR
 439
 440        @
 441        @ INTRs on, then call the main handler
 442        @
 443        enable_irq r2
 444        mov     r2, sp
 445        adr     lr, ret_from_exception
 446        b       do_DataAbort
 447ENDPROC(__dabt_user)
 448
 449        .align  5
 450__intr_user:
 451        user_entry
 452
 453        get_thread_info tsk
 454
 455        intr_handler
 456
 457        mov     why, #0
 458        b       ret_to_user
 459ENDPROC(__intr_user)
 460
 461        .ltorg
 462
 463        .align  5
 464__extn_user:
 465        user_entry
 466
 467        mov     r0, sp
 468        mov     r1, asr
 469        b       bad_mode
 470ENDPROC(__extn_user)
 471
 472        .align  5
 473__pabt_user:
 474        user_entry
 475
 476        mov     r0, r2                  @ pass address of aborted instruction.
 477        mov     r1, #5
 478        enable_irq r1                   @ Enable interrupts
 479        mov     r2, sp                  @ regs
 480        b.l     do_PrefetchAbort        @ call abort handler
 481        /* fall through */
 482/*
 483 * This is the return code to user mode for abort handlers
 484 */
 485ENTRY(ret_from_exception)
 486        get_thread_info tsk
 487        mov     why, #0
 488        b       ret_to_user
 489ENDPROC(__pabt_user)
 490ENDPROC(ret_from_exception)
 491
 492/*
 493 * Register switch for UniCore V2 processors
 494 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
 495 * previous and next are guaranteed not to be the same.
 496 */
 497ENTRY(__switch_to)
 498        add     ip, r1, #TI_CPU_SAVE
 499        stm.w   (r4 - r15), [ip]+
 500        stm.w   (r16 - r27, sp, lr), [ip]+
 501
 502#ifdef  CONFIG_UNICORE_FPU_F64
 503        add     ip, r1, #TI_FPSTATE
 504        sfm.w   (f0  - f7 ), [ip]+
 505        sfm.w   (f8  - f15), [ip]+
 506        sfm.w   (f16 - f23), [ip]+
 507        sfm.w   (f24 - f31), [ip]+
 508        cff     r4, s31
 509        stw     r4, [ip]
 510
 511        add     ip, r2, #TI_FPSTATE
 512        lfm.w   (f0  - f7 ), [ip]+
 513        lfm.w   (f8  - f15), [ip]+
 514        lfm.w   (f16 - f23), [ip]+
 515        lfm.w   (f24 - f31), [ip]+
 516        ldw     r4, [ip]
 517        ctf     r4, s31
 518#endif
 519        add     ip, r2, #TI_CPU_SAVE
 520        ldm.w   (r4 - r15), [ip]+
 521        ldm     (r16 - r27, sp, pc), [ip]+      @ Load all regs saved previously
 522ENDPROC(__switch_to)
 523
 524        .align  5
 525/*
 526 * This is the fast syscall return path.  We do as little as
 527 * possible here, and this includes saving r0 back into the PRIV
 528 * stack.
 529 */
 530ret_fast_syscall:
 531        disable_irq r1                          @ disable interrupts
 532        ldw     r1, [tsk+], #TI_FLAGS
 533        cand.a  r1, #_TIF_WORK_MASK
 534        bne     fast_work_pending
 535
 536        @ fast_restore_user_regs
 537        restore_user_regs fast = 1, offset = S_OFF
 538
 539/*
 540 * Ok, we need to do extra processing, enter the slow path.
 541 */
 542fast_work_pending:
 543        stw.w   r0, [sp+], #S_R0+S_OFF          @ returned r0
 544work_pending:
 545        cand.a  r1, #_TIF_NEED_RESCHED
 546        bne     work_resched
 547        mov     r0, sp                          @ 'regs'
 548        mov     r2, why                         @ 'syscall'
 549        cand.a  r1, #_TIF_SIGPENDING            @ delivering a signal?
 550        cmovne  why, #0                         @ prevent further restarts
 551        b.l     do_notify_resume
 552        b       ret_slow_syscall                @ Check work again
 553
 554work_resched:
 555        b.l     schedule
 556/*
 557 * "slow" syscall return path.  "why" tells us if this was a real syscall.
 558 */
 559ENTRY(ret_to_user)
 560ret_slow_syscall:
 561        disable_irq r1                          @ disable interrupts
 562        get_thread_info tsk                     @ epip4d, one path error?!
 563        ldw     r1, [tsk+], #TI_FLAGS
 564        cand.a  r1, #_TIF_WORK_MASK
 565        bne     work_pending
 566no_work_pending:
 567        @ slow_restore_user_regs
 568        restore_user_regs fast = 0, offset = 0
 569ENDPROC(ret_to_user)
 570
 571/*
 572 * This is how we return from a fork.
 573 */
 574ENTRY(ret_from_fork)
 575        b.l     schedule_tail
 576        b       ret_slow_syscall
 577ENDPROC(ret_from_fork)
 578
 579ENTRY(ret_from_kernel_thread)
 580        b.l     schedule_tail
 581        mov     r0, r5
 582        adr     lr, ret_slow_syscall
 583        mov     pc, r4
 584ENDPROC(ret_from_kernel_thread)
 585
 586/*=============================================================================
 587 * SWI handler
 588 *-----------------------------------------------------------------------------
 589 */
 590        .align  5
 591ENTRY(vector_swi)
 592        sub     sp, sp, #S_FRAME_SIZE
 593        stm     (r0 - r15), [sp]+               @ Calling r0 - r15
 594        add     r8, sp, #S_R16
 595        stm     (r16 - r28), [r8]+              @ Calling r16 - r28
 596        add     r8, sp, #S_PC
 597        stur    (sp, lr), [r8-]                 @ Calling sp, lr
 598        mov     r8, bsr                         @ called from non-REAL mode
 599        stw     lr, [sp+], #S_PC                @ Save calling PC
 600        stw     r8, [sp+], #S_PSR               @ Save ASR
 601        stw     r0, [sp+], #S_OLD_R0            @ Save OLD_R0
 602        zero_fp
 603
 604        /*
 605         * Get the system call number.
 606         */
 607        sub     ip, lr, #4
 608        ldw.u   scno, [ip]                      @ get SWI instruction
 609
 610#ifdef CONFIG_ALIGNMENT_TRAP
 611        ldw     ip, __cr_alignment
 612        ldw     ip, [ip]
 613        movc    p0.c1, ip, #0                   @ update control register
 614#endif
 615        enable_irq ip
 616
 617        get_thread_info tsk
 618        ldw     tbl, =sys_call_table            @ load syscall table pointer
 619
 620        andn    scno, scno, #0xff000000         @ mask off SWI op-code
 621        andn    scno, scno, #0x00ff0000         @ mask off SWI op-code
 622
 623        stm.w   (r4, r5), [sp-]                 @ push fifth and sixth args
 624        ldw     ip, [tsk+], #TI_FLAGS           @ check for syscall tracing
 625        cand.a  ip, #_TIF_SYSCALL_TRACE         @ are we tracing syscalls?
 626        bne     __sys_trace
 627
 628        csub.a  scno, #__NR_syscalls            @ check upper syscall limit
 629        adr     lr, ret_fast_syscall            @ return address
 630        bea     1f
 631        ldw     pc, [tbl+], scno << #2          @ call sys_* routine
 6321:
 633        add     r1, sp, #S_OFF
 6342:      mov     why, #0                         @ no longer a real syscall
 635        b       sys_ni_syscall                  @ not private func
 636
 637        /*
 638         * This is the really slow path.  We're going to be doing
 639         * context switches, and waiting for our parent to respond.
 640         */
 641__sys_trace:
 642        mov     r2, scno
 643        add     r1, sp, #S_OFF
 644        mov     r0, #0                          @ trace entry [IP = 0]
 645        b.l     syscall_trace
 646
 647        adr     lr, __sys_trace_return          @ return address
 648        mov     scno, r0                        @ syscall number (possibly new)
 649        add     r1, sp, #S_R0 + S_OFF           @ pointer to regs
 650        csub.a  scno, #__NR_syscalls            @ check upper syscall limit
 651        bea     2b
 652        ldm     (r0 - r3), [r1]+                @ have to reload r0 - r3
 653        ldw     pc, [tbl+], scno << #2          @ call sys_* routine
 654
 655__sys_trace_return:
 656        stw.w   r0, [sp+], #S_R0 + S_OFF        @ save returned r0
 657        mov     r2, scno
 658        mov     r1, sp
 659        mov     r0, #1                          @ trace exit [IP = 1]
 660        b.l     syscall_trace
 661        b       ret_slow_syscall
 662
 663        .align  5
 664#ifdef CONFIG_ALIGNMENT_TRAP
 665        .type   __cr_alignment, #object
 666__cr_alignment:
 667        .word   cr_alignment
 668#endif
 669        .ltorg
 670
 671ENTRY(sys_rt_sigreturn)
 672                add     r0, sp, #S_OFF
 673                mov     why, #0         @ prevent syscall restart handling
 674                b       __sys_rt_sigreturn
 675ENDPROC(sys_rt_sigreturn)
 676
 677        __INIT
 678
 679/*
 680 * Vector stubs.
 681 *
 682 * This code is copied to 0xffff0200 so we can use branches in the
 683 * vectors, rather than ldr's.  Note that this code must not
 684 * exceed 0x300 bytes.
 685 *
 686 * Common stub entry macro:
 687 *   Enter in INTR mode, bsr = PRIV/USER ASR, lr = PRIV/USER PC
 688 *
 689 * SP points to a minimal amount of processor-private memory, the address
 690 * of which is copied into r0 for the mode specific abort handler.
 691 */
 692        .macro  vector_stub, name, mode
 693        .align  5
 694
 695vector_\name:
 696        @
 697        @ Save r0, lr_<exception> (parent PC) and bsr_<exception>
 698        @ (parent ASR)
 699        @
 700        stw     r0, [sp]
 701        stw     lr, [sp+], #4           @ save r0, lr
 702        mov     lr, bsr
 703        stw     lr, [sp+], #8           @ save bsr
 704
 705        @
 706        @ Prepare for PRIV mode.  INTRs remain disabled.
 707        @
 708        mov     r0, asr
 709        xor     r0, r0, #(\mode ^ PRIV_MODE)
 710        mov.a   bsr, r0
 711
 712        @
 713        @ the branch table must immediately follow this code
 714        @
 715        and     lr, lr, #0x03
 716        add     lr, lr, #1
 717        mov     r0, sp
 718        ldw     lr, [pc+], lr << #2
 719        mov.a   pc, lr                  @ branch to handler in PRIV mode
 720ENDPROC(vector_\name)
 721        .align  2
 722        @ handler addresses follow this label
 723        .endm
 724
 725        .globl  __stubs_start
 726__stubs_start:
 727/*
 728 * Interrupt dispatcher
 729 */
 730        vector_stub     intr, INTR_MODE
 731
 732        .long   __intr_user                     @  0  (USER)
 733        .long   __invalid                       @  1
 734        .long   __invalid                       @  2
 735        .long   __intr_priv                     @  3  (PRIV)
 736
 737/*
 738 * Data abort dispatcher
 739 * Enter in ABT mode, bsr = USER ASR, lr = USER PC
 740 */
 741        vector_stub     dabt, ABRT_MODE
 742
 743        .long   __dabt_user                     @  0  (USER)
 744        .long   __invalid                       @  1
 745        .long   __invalid                       @  2  (INTR)
 746        .long   __dabt_priv                     @  3  (PRIV)
 747
 748/*
 749 * Prefetch abort dispatcher
 750 * Enter in ABT mode, bsr = USER ASR, lr = USER PC
 751 */
 752        vector_stub     pabt, ABRT_MODE
 753
 754        .long   __pabt_user                     @  0 (USER)
 755        .long   __invalid                       @  1
 756        .long   __invalid                       @  2 (INTR)
 757        .long   __pabt_priv                     @  3 (PRIV)
 758
 759/*
 760 * Undef instr entry dispatcher
 761 * Enter in EXTN mode, bsr = PRIV/USER ASR, lr = PRIV/USER PC
 762 */
 763        vector_stub     extn, EXTN_MODE
 764
 765        .long   __extn_user                     @  0 (USER)
 766        .long   __invalid                       @  1
 767        .long   __invalid                       @  2 (INTR)
 768        .long   __extn_priv                     @  3 (PRIV)
 769
 770/*
 771 * We group all the following data together to optimise
 772 * for CPUs with separate I & D caches.
 773 */
 774        .align  5
 775
 776.LCvswi:
 777        .word   vector_swi
 778
 779        .globl  __stubs_end
 780__stubs_end:
 781
 782        .equ    stubs_offset, __vectors_start + 0x200 - __stubs_start
 783
 784        .globl  __vectors_start
 785__vectors_start:
 786        jepriv  SYS_ERROR0
 787        b       vector_extn + stubs_offset
 788        ldw     pc, .LCvswi + stubs_offset
 789        b       vector_pabt + stubs_offset
 790        b       vector_dabt + stubs_offset
 791        jepriv  SYS_ERROR0
 792        b       vector_intr + stubs_offset
 793        jepriv  SYS_ERROR0
 794
 795        .globl  __vectors_end
 796__vectors_end:
 797
 798        .data
 799
 800        .globl  cr_alignment
 801        .globl  cr_no_alignment
 802cr_alignment:
 803        .space  4
 804cr_no_alignment:
 805        .space  4
 806