linux/arch/sh/kernel/cpu/sh5/entry.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0
   2 *
   3 * arch/sh/kernel/cpu/sh5/entry.S
   4 *
   5 * Copyright (C) 2000, 2001  Paolo Alberelli
   6 * Copyright (C) 2004 - 2008  Paul Mundt
   7 * Copyright (C) 2003, 2004  Richard Curnow
   8 */
   9#include <linux/errno.h>
  10#include <linux/init.h>
  11#include <linux/sys.h>
  12#include <cpu/registers.h>
  13#include <asm/processor.h>
  14#include <asm/unistd.h>
  15#include <asm/thread_info.h>
  16#include <asm/asm-offsets.h>
  17
  18/*
  19 * SR fields.
  20 */
  21#define SR_ASID_MASK    0x00ff0000
  22#define SR_FD_MASK      0x00008000
  23#define SR_SS           0x08000000
  24#define SR_BL           0x10000000
  25#define SR_MD           0x40000000
  26
  27/*
  28 * Event code.
  29 */
  30#define EVENT_INTERRUPT         0
  31#define EVENT_FAULT_TLB         1
  32#define EVENT_FAULT_NOT_TLB     2
  33#define EVENT_DEBUG             3
  34
  35/* EXPEVT values */
  36#define RESET_CAUSE             0x20
  37#define DEBUGSS_CAUSE           0x980
  38
  39/*
  40 * Frame layout. Quad index.
  41 */
  42#define FRAME_T(x)      FRAME_TBASE+(x*8)
  43#define FRAME_R(x)      FRAME_RBASE+(x*8)
  44#define FRAME_S(x)      FRAME_SBASE+(x*8)
  45#define FSPC            0
  46#define FSSR            1
  47#define FSYSCALL_ID     2
  48
  49/* Arrange the save frame to be a multiple of 32 bytes long */
  50#define FRAME_SBASE     0
  51#define FRAME_RBASE     (FRAME_SBASE+(3*8))     /* SYSCALL_ID - SSR - SPC */
  52#define FRAME_TBASE     (FRAME_RBASE+(63*8))    /* r0 - r62 */
  53#define FRAME_PBASE     (FRAME_TBASE+(8*8))     /* tr0 -tr7 */
  54#define FRAME_SIZE      (FRAME_PBASE+(2*8))     /* pad0-pad1 */
  55
  56#define FP_FRAME_SIZE   FP_FRAME_BASE+(33*8)    /* dr0 - dr31 + fpscr */
  57#define FP_FRAME_BASE   0
  58
  59#define SAVED_R2        0*8
  60#define SAVED_R3        1*8
  61#define SAVED_R4        2*8
  62#define SAVED_R5        3*8
  63#define SAVED_R18       4*8
  64#define SAVED_R6        5*8
  65#define SAVED_TR0       6*8
  66
  67/* These are the registers saved in the TLB path that aren't saved in the first
  68   level of the normal one. */
  69#define TLB_SAVED_R25   7*8
  70#define TLB_SAVED_TR1   8*8
  71#define TLB_SAVED_TR2   9*8
  72#define TLB_SAVED_TR3   10*8
  73#define TLB_SAVED_TR4   11*8
  74/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
  75   breakage otherwise. */
  76#define TLB_SAVED_R0    12*8
  77#define TLB_SAVED_R1    13*8
  78
  79#define CLI()                           \
  80        getcon  SR, r6;                 \
  81        ori     r6, 0xf0, r6;           \
  82        putcon  r6, SR;
  83
  84#define STI()                           \
  85        getcon  SR, r6;                 \
  86        andi    r6, ~0xf0, r6;          \
  87        putcon  r6, SR;
  88
  89#ifdef CONFIG_PREEMPT
  90#  define preempt_stop()        CLI()
  91#else
  92#  define preempt_stop()
  93#  define resume_kernel         restore_all
  94#endif
  95
  96        .section        .data, "aw"
  97
  98#define FAST_TLBMISS_STACK_CACHELINES 4
  99#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
 100
 101/* Register back-up area for all exceptions */
 102        .balign 32
 103        /* Allow for 16 quadwords to be pushed by fast tlbmiss handling
 104         * register saves etc. */
 105        .fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
 106/* This is 32 byte aligned by construction */
 107/* Register back-up area for all exceptions */
 108reg_save_area:
 109        .quad   0
 110        .quad   0
 111        .quad   0
 112        .quad   0
 113
 114        .quad   0
 115        .quad   0
 116        .quad   0
 117        .quad   0
 118
 119        .quad   0
 120        .quad   0
 121        .quad   0
 122        .quad   0
 123
 124        .quad   0
 125        .quad   0
 126
 127/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
 128 * reentrancy. Note this area may be accessed via physical address.
 129 * Align so this fits a whole single cache line, for ease of purging.
 130 */
 131        .balign 32,0,32
 132resvec_save_area:
 133        .quad   0
 134        .quad   0
 135        .quad   0
 136        .quad   0
 137        .quad   0
 138        .balign 32,0,32
 139
 140/* Jump table of 3rd level handlers  */
 141trap_jtable:
 142        .long   do_exception_error              /* 0x000 */
 143        .long   do_exception_error              /* 0x020 */
 144#ifdef CONFIG_MMU
 145        .long   tlb_miss_load                           /* 0x040 */
 146        .long   tlb_miss_store                          /* 0x060 */
 147#else
 148        .long   do_exception_error
 149        .long   do_exception_error
 150#endif
 151        ! ARTIFICIAL pseudo-EXPEVT setting
 152        .long   do_debug_interrupt              /* 0x080 */
 153#ifdef CONFIG_MMU
 154        .long   tlb_miss_load                           /* 0x0A0 */
 155        .long   tlb_miss_store                          /* 0x0C0 */
 156#else
 157        .long   do_exception_error
 158        .long   do_exception_error
 159#endif
 160        .long   do_address_error_load   /* 0x0E0 */
 161        .long   do_address_error_store  /* 0x100 */
 162#ifdef CONFIG_SH_FPU
 163        .long   do_fpu_error            /* 0x120 */
 164#else
 165        .long   do_exception_error              /* 0x120 */
 166#endif
 167        .long   do_exception_error              /* 0x140 */
 168        .long   system_call                             /* 0x160 */
 169        .long   do_reserved_inst                /* 0x180 */
 170        .long   do_illegal_slot_inst    /* 0x1A0 */
 171        .long   do_exception_error              /* 0x1C0 - NMI */
 172        .long   do_exception_error              /* 0x1E0 */
 173        .rept 15
 174                .long do_IRQ            /* 0x200 - 0x3C0 */
 175        .endr
 176        .long   do_exception_error              /* 0x3E0 */
 177        .rept 32
 178                .long do_IRQ            /* 0x400 - 0x7E0 */
 179        .endr
 180        .long   fpu_error_or_IRQA                       /* 0x800 */
 181        .long   fpu_error_or_IRQB                       /* 0x820 */
 182        .long   do_IRQ                  /* 0x840 */
 183        .long   do_IRQ                  /* 0x860 */
 184        .rept 6
 185                .long do_exception_error        /* 0x880 - 0x920 */
 186        .endr
 187        .long   breakpoint_trap_handler /* 0x940 */
 188        .long   do_exception_error              /* 0x960 */
 189        .long   do_single_step          /* 0x980 */
 190
 191        .rept 3
 192                .long do_exception_error        /* 0x9A0 - 0x9E0 */
 193        .endr
 194        .long   do_IRQ                  /* 0xA00 */
 195        .long   do_IRQ                  /* 0xA20 */
 196#ifdef CONFIG_MMU
 197        .long   itlb_miss_or_IRQ                        /* 0xA40 */
 198#else
 199        .long   do_IRQ
 200#endif
 201        .long   do_IRQ                  /* 0xA60 */
 202        .long   do_IRQ                  /* 0xA80 */
 203#ifdef CONFIG_MMU
 204        .long   itlb_miss_or_IRQ                        /* 0xAA0 */
 205#else
 206        .long   do_IRQ
 207#endif
 208        .long   do_exception_error              /* 0xAC0 */
 209        .long   do_address_error_exec   /* 0xAE0 */
 210        .rept 8
 211                .long do_exception_error        /* 0xB00 - 0xBE0 */
 212        .endr
 213        .rept 18
 214                .long do_IRQ            /* 0xC00 - 0xE20 */
 215        .endr
 216
 217        .section        .text64, "ax"
 218
 219/*
 220 * --- Exception/Interrupt/Event Handling Section
 221 */
 222
 223/*
 224 * VBR and RESVEC blocks.
 225 *
 226 * First level handler for VBR-based exceptions.
 227 *
 228 * To avoid waste of space, align to the maximum text block size.
 229 * This is assumed to be at most 128 bytes or 32 instructions.
 230 * DO NOT EXCEED 32 instructions on the first level handlers !
 231 *
 232 * Also note that RESVEC is contained within the VBR block
 233 * where the room left (1KB - TEXT_SIZE) allows placing
 234 * the RESVEC block (at most 512B + TEXT_SIZE).
 235 *
 236 * So first (and only) level handler for RESVEC-based exceptions.
 237 *
 238 * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
 239 * and interrupt) we are a lot tight with register space until
 240 * saving onto the stack frame, which is done in handle_exception().
 241 *
 242 */
 243
 244#define TEXT_SIZE       128
 245#define BLOCK_SIZE      1664            /* Dynamic check, 13*128 */
 246
 247        .balign TEXT_SIZE
 248LVBR_block:
 249        .space  256, 0                  /* Power-on class handler, */
 250                                        /* not required here       */
 251not_a_tlb_miss:
 252        synco   /* TAKum03020 (but probably a good idea anyway.) */
 253        /* Save original stack pointer into KCR1 */
 254        putcon  SP, KCR1
 255
 256        /* Save other original registers into reg_save_area */
 257        movi  reg_save_area, SP
 258        st.q    SP, SAVED_R2, r2
 259        st.q    SP, SAVED_R3, r3
 260        st.q    SP, SAVED_R4, r4
 261        st.q    SP, SAVED_R5, r5
 262        st.q    SP, SAVED_R6, r6
 263        st.q    SP, SAVED_R18, r18
 264        gettr   tr0, r3
 265        st.q    SP, SAVED_TR0, r3
 266
 267        /* Set args for Non-debug, Not a TLB miss class handler */
 268        getcon  EXPEVT, r2
 269        movi    ret_from_exception, r3
 270        ori     r3, 1, r3
 271        movi    EVENT_FAULT_NOT_TLB, r4
 272        or      SP, ZERO, r5
 273        getcon  KCR1, SP
 274        pta     handle_exception, tr0
 275        blink   tr0, ZERO
 276
 277        .balign 256
 278        ! VBR+0x200
 279        nop
 280        .balign 256
 281        ! VBR+0x300
 282        nop
 283        .balign 256
 284        /*
 285         * Instead of the natural .balign 1024 place RESVEC here
 286         * respecting the final 1KB alignment.
 287         */
 288        .balign TEXT_SIZE
 289        /*
 290         * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
 291         * block making sure the final alignment is correct.
 292         */
 293#ifdef CONFIG_MMU
 294tlb_miss:
 295        synco   /* TAKum03020 (but probably a good idea anyway.) */
 296        putcon  SP, KCR1
 297        movi    reg_save_area, SP
 298        /* SP is guaranteed 32-byte aligned. */
 299        st.q    SP, TLB_SAVED_R0 , r0
 300        st.q    SP, TLB_SAVED_R1 , r1
 301        st.q    SP, SAVED_R2 , r2
 302        st.q    SP, SAVED_R3 , r3
 303        st.q    SP, SAVED_R4 , r4
 304        st.q    SP, SAVED_R5 , r5
 305        st.q    SP, SAVED_R6 , r6
 306        st.q    SP, SAVED_R18, r18
 307
 308        /* Save R25 for safety; as/ld may want to use it to achieve the call to
 309         * the code in mm/tlbmiss.c */
 310        st.q    SP, TLB_SAVED_R25, r25
 311        gettr   tr0, r2
 312        gettr   tr1, r3
 313        gettr   tr2, r4
 314        gettr   tr3, r5
 315        gettr   tr4, r18
 316        st.q    SP, SAVED_TR0 , r2
 317        st.q    SP, TLB_SAVED_TR1 , r3
 318        st.q    SP, TLB_SAVED_TR2 , r4
 319        st.q    SP, TLB_SAVED_TR3 , r5
 320        st.q    SP, TLB_SAVED_TR4 , r18
 321
 322        pt      do_fast_page_fault, tr0
 323        getcon  SSR, r2
 324        getcon  EXPEVT, r3
 325        getcon  TEA, r4
 326        shlri   r2, 30, r2
 327        andi    r2, 1, r2       /* r2 = SSR.MD */
 328        blink   tr0, LINK
 329
 330        pt      fixup_to_invoke_general_handler, tr1
 331
 332        /* If the fast path handler fixed the fault, just drop through quickly
 333           to the restore code right away to return to the excepting context.
 334           */
 335        bnei/u  r2, 0, tr1
 336
 337fast_tlb_miss_restore:
 338        ld.q    SP, SAVED_TR0, r2
 339        ld.q    SP, TLB_SAVED_TR1, r3
 340        ld.q    SP, TLB_SAVED_TR2, r4
 341
 342        ld.q    SP, TLB_SAVED_TR3, r5
 343        ld.q    SP, TLB_SAVED_TR4, r18
 344
 345        ptabs   r2, tr0
 346        ptabs   r3, tr1
 347        ptabs   r4, tr2
 348        ptabs   r5, tr3
 349        ptabs   r18, tr4
 350
 351        ld.q    SP, TLB_SAVED_R0, r0
 352        ld.q    SP, TLB_SAVED_R1, r1
 353        ld.q    SP, SAVED_R2, r2
 354        ld.q    SP, SAVED_R3, r3
 355        ld.q    SP, SAVED_R4, r4
 356        ld.q    SP, SAVED_R5, r5
 357        ld.q    SP, SAVED_R6, r6
 358        ld.q    SP, SAVED_R18, r18
 359        ld.q    SP, TLB_SAVED_R25, r25
 360
 361        getcon  KCR1, SP
 362        rte
 363        nop /* for safety, in case the code is run on sh5-101 cut1.x */
 364
 365fixup_to_invoke_general_handler:
 366
 367        /* OK, new method.  Restore stuff that's not expected to get saved into
 368           the 'first-level' reg save area, then just fall through to setting
 369           up the registers and calling the second-level handler. */
 370
 371        /* 2nd level expects r2,3,4,5,6,18,tr0 to be saved.  So we must restore
 372           r25,tr1-4 and save r6 to get into the right state.  */
 373
 374        ld.q    SP, TLB_SAVED_TR1, r3
 375        ld.q    SP, TLB_SAVED_TR2, r4
 376        ld.q    SP, TLB_SAVED_TR3, r5
 377        ld.q    SP, TLB_SAVED_TR4, r18
 378        ld.q    SP, TLB_SAVED_R25, r25
 379
 380        ld.q    SP, TLB_SAVED_R0, r0
 381        ld.q    SP, TLB_SAVED_R1, r1
 382
 383        ptabs/u r3, tr1
 384        ptabs/u r4, tr2
 385        ptabs/u r5, tr3
 386        ptabs/u r18, tr4
 387
 388        /* Set args for Non-debug, TLB miss class handler */
 389        getcon  EXPEVT, r2
 390        movi    ret_from_exception, r3
 391        ori     r3, 1, r3
 392        movi    EVENT_FAULT_TLB, r4
 393        or      SP, ZERO, r5
 394        getcon  KCR1, SP
 395        pta     handle_exception, tr0
 396        blink   tr0, ZERO
 397#else /* CONFIG_MMU */
 398        .balign 256
 399#endif
 400
 401/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
 402   DOES END UP AT VBR+0x600 */
 403        nop
 404        nop
 405        nop
 406        nop
 407        nop
 408        nop
 409
 410        .balign 256
 411        /* VBR + 0x600 */
 412
 413interrupt:
 414        synco   /* TAKum03020 (but probably a good idea anyway.) */
 415        /* Save original stack pointer into KCR1 */
 416        putcon  SP, KCR1
 417
 418        /* Save other original registers into reg_save_area */
 419        movi  reg_save_area, SP
 420        st.q    SP, SAVED_R2, r2
 421        st.q    SP, SAVED_R3, r3
 422        st.q    SP, SAVED_R4, r4
 423        st.q    SP, SAVED_R5, r5
 424        st.q    SP, SAVED_R6, r6
 425        st.q    SP, SAVED_R18, r18
 426        gettr   tr0, r3
 427        st.q    SP, SAVED_TR0, r3
 428
 429        /* Set args for interrupt class handler */
 430        getcon  INTEVT, r2
 431        movi    ret_from_irq, r3
 432        ori     r3, 1, r3
 433        movi    EVENT_INTERRUPT, r4
 434        or      SP, ZERO, r5
 435        getcon  KCR1, SP
 436        pta     handle_exception, tr0
 437        blink   tr0, ZERO
 438        .balign TEXT_SIZE               /* let's waste the bare minimum */
 439
 440LVBR_block_end:                         /* Marker. Used for total checking */
 441
 442        .balign 256
 443LRESVEC_block:
 444        /* Panic handler. Called with MMU off. Possible causes/actions:
 445         * - Reset:             Jump to program start.
 446         * - Single Step:       Turn off Single Step & return.
 447         * - Others:            Call panic handler, passing PC as arg.
 448         *                      (this may need to be extended...)
 449         */
 450reset_or_panic:
 451        synco   /* TAKum03020 (but probably a good idea anyway.) */
 452        putcon  SP, DCR
 453        /* First save r0-1 and tr0, as we need to use these */
 454        movi    resvec_save_area-CONFIG_PAGE_OFFSET, SP
 455        st.q    SP, 0, r0
 456        st.q    SP, 8, r1
 457        gettr   tr0, r0
 458        st.q    SP, 32, r0
 459
 460        /* Check cause */
 461        getcon  EXPEVT, r0
 462        movi    RESET_CAUSE, r1
 463        sub     r1, r0, r1              /* r1=0 if reset */
 464        movi    _stext-CONFIG_PAGE_OFFSET, r0
 465        ori     r0, 1, r0
 466        ptabs   r0, tr0
 467        beqi    r1, 0, tr0              /* Jump to start address if reset */
 468
 469        getcon  EXPEVT, r0
 470        movi    DEBUGSS_CAUSE, r1
 471        sub     r1, r0, r1              /* r1=0 if single step */
 472        pta     single_step_panic, tr0
 473        beqi    r1, 0, tr0              /* jump if single step */
 474
 475        /* Now jump to where we save the registers. */
 476        movi    panic_stash_regs-CONFIG_PAGE_OFFSET, r1
 477        ptabs   r1, tr0
 478        blink   tr0, r63
 479
 480single_step_panic:
 481        /* We are in a handler with Single Step set. We need to resume the
 482         * handler, by turning on MMU & turning off Single Step. */
 483        getcon  SSR, r0
 484        movi    SR_MMU, r1
 485        or      r0, r1, r0
 486        movi    ~SR_SS, r1
 487        and     r0, r1, r0
 488        putcon  r0, SSR
 489        /* Restore EXPEVT, as the rte won't do this */
 490        getcon  PEXPEVT, r0
 491        putcon  r0, EXPEVT
 492        /* Restore regs */
 493        ld.q    SP, 32, r0
 494        ptabs   r0, tr0
 495        ld.q    SP, 0, r0
 496        ld.q    SP, 8, r1
 497        getcon  DCR, SP
 498        synco
 499        rte
 500
 501
 502        .balign 256
 503debug_exception:
 504        synco   /* TAKum03020 (but probably a good idea anyway.) */
 505        /*
 506         * Single step/software_break_point first level handler.
 507         * Called with MMU off, so the first thing we do is enable it
 508         * by doing an rte with appropriate SSR.
 509         */
 510        putcon  SP, DCR
 511        /* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
 512        movi    resvec_save_area-CONFIG_PAGE_OFFSET, SP
 513
 514        /* With the MMU off, we are bypassing the cache, so purge any
 515         * data that will be made stale by the following stores.
 516         */
 517        ocbp    SP, 0
 518        synco
 519
 520        st.q    SP, 0, r0
 521        st.q    SP, 8, r1
 522        getcon  SPC, r0
 523        st.q    SP, 16, r0
 524        getcon  SSR, r0
 525        st.q    SP, 24, r0
 526
 527        /* Enable MMU, block exceptions, set priv mode, disable single step */
 528        movi    SR_MMU | SR_BL | SR_MD, r1
 529        or      r0, r1, r0
 530        movi    ~SR_SS, r1
 531        and     r0, r1, r0
 532        putcon  r0, SSR
 533        /* Force control to debug_exception_2 when rte is executed */
 534        movi    debug_exeception_2, r0
 535        ori     r0, 1, r0      /* force SHmedia, just in case */
 536        putcon  r0, SPC
 537        getcon  DCR, SP
 538        synco
 539        rte
 540debug_exeception_2:
 541        /* Restore saved regs */
 542        putcon  SP, KCR1
 543        movi    resvec_save_area, SP
 544        ld.q    SP, 24, r0
 545        putcon  r0, SSR
 546        ld.q    SP, 16, r0
 547        putcon  r0, SPC
 548        ld.q    SP, 0, r0
 549        ld.q    SP, 8, r1
 550
 551        /* Save other original registers into reg_save_area */
 552        movi  reg_save_area, SP
 553        st.q    SP, SAVED_R2, r2
 554        st.q    SP, SAVED_R3, r3
 555        st.q    SP, SAVED_R4, r4
 556        st.q    SP, SAVED_R5, r5
 557        st.q    SP, SAVED_R6, r6
 558        st.q    SP, SAVED_R18, r18
 559        gettr   tr0, r3
 560        st.q    SP, SAVED_TR0, r3
 561
 562        /* Set args for debug class handler */
 563        getcon  EXPEVT, r2
 564        movi    ret_from_exception, r3
 565        ori     r3, 1, r3
 566        movi    EVENT_DEBUG, r4
 567        or      SP, ZERO, r5
 568        getcon  KCR1, SP
 569        pta     handle_exception, tr0
 570        blink   tr0, ZERO
 571
 572        .balign 256
 573debug_interrupt:
 574        /* !!! WE COME HERE IN REAL MODE !!! */
 575        /* Hook-up debug interrupt to allow various debugging options to be
 576         * hooked into its handler. */
 577        /* Save original stack pointer into KCR1 */
 578        synco
 579        putcon  SP, KCR1
 580        movi    resvec_save_area-CONFIG_PAGE_OFFSET, SP
 581        ocbp    SP, 0
 582        ocbp    SP, 32
 583        synco
 584
 585        /* Save other original registers into reg_save_area thru real addresses */
 586        st.q    SP, SAVED_R2, r2
 587        st.q    SP, SAVED_R3, r3
 588        st.q    SP, SAVED_R4, r4
 589        st.q    SP, SAVED_R5, r5
 590        st.q    SP, SAVED_R6, r6
 591        st.q    SP, SAVED_R18, r18
 592        gettr   tr0, r3
 593        st.q    SP, SAVED_TR0, r3
 594
 595        /* move (spc,ssr)->(pspc,pssr).  The rte will shift
 596           them back again, so that they look like the originals
 597           as far as the real handler code is concerned. */
 598        getcon  spc, r6
 599        putcon  r6, pspc
 600        getcon  ssr, r6
 601        putcon  r6, pssr
 602
 603        ! construct useful SR for handle_exception
 604        movi    3, r6
 605        shlli   r6, 30, r6
 606        getcon  sr, r18
 607        or      r18, r6, r6
 608        putcon  r6, ssr
 609
 610        ! SSR is now the current SR with the MD and MMU bits set
 611        ! i.e. the rte will switch back to priv mode and put
 612        ! the mmu back on
 613
 614        ! construct spc
 615        movi    handle_exception, r18
 616        ori     r18, 1, r18             ! for safety (do we need this?)
 617        putcon  r18, spc
 618
 619        /* Set args for Non-debug, Not a TLB miss class handler */
 620
 621        ! EXPEVT==0x80 is unused, so 'steal' this value to put the
 622        ! debug interrupt handler in the vectoring table
 623        movi    0x80, r2
 624        movi    ret_from_exception, r3
 625        ori     r3, 1, r3
 626        movi    EVENT_FAULT_NOT_TLB, r4
 627
 628        or      SP, ZERO, r5
 629        movi    CONFIG_PAGE_OFFSET, r6
 630        add     r6, r5, r5
 631        getcon  KCR1, SP
 632
 633        synco   ! for safety
 634        rte     ! -> handle_exception, switch back to priv mode again
 635
 636LRESVEC_block_end:                      /* Marker. Unused. */
 637
 638        .balign TEXT_SIZE
 639
 640/*
 641 * Second level handler for VBR-based exceptions. Pre-handler.
 642 * In common to all stack-frame sensitive handlers.
 643 *
 644 * Inputs:
 645 * (KCR0) Current [current task union]
 646 * (KCR1) Original SP
 647 * (r2)   INTEVT/EXPEVT
 648 * (r3)   appropriate return address
 649 * (r4)   Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
 650 * (r5)   Pointer to reg_save_area
 651 * (SP)   Original SP
 652 *
 653 * Available registers:
 654 * (r6)
 655 * (r18)
 656 * (tr0)
 657 *
 658 */
 659handle_exception:
 660        /* Common 2nd level handler. */
 661
 662        /* First thing we need an appropriate stack pointer */
 663        getcon  SSR, r6
 664        shlri   r6, 30, r6
 665        andi    r6, 1, r6
 666        pta     stack_ok, tr0
 667        bne     r6, ZERO, tr0           /* Original stack pointer is fine */
 668
 669        /* Set stack pointer for user fault */
 670        getcon  KCR0, SP
 671        movi    THREAD_SIZE, r6         /* Point to the end */
 672        add     SP, r6, SP
 673
 674stack_ok:
 675
 676/* DEBUG : check for underflow/overflow of the kernel stack */
 677        pta     no_underflow, tr0
 678        getcon  KCR0, r6
 679        movi    1024, r18
 680        add     r6, r18, r6
 681        bge     SP, r6, tr0     ! ? below 1k from bottom of stack : danger zone
 682
 683/* Just panic to cause a crash. */
 684bad_sp:
 685        ld.b    r63, 0, r6
 686        nop
 687
 688no_underflow:
 689        pta     bad_sp, tr0
 690        getcon  kcr0, r6
 691        movi    THREAD_SIZE, r18
 692        add     r18, r6, r6
 693        bgt     SP, r6, tr0     ! sp above the stack
 694
 695        /* Make some room for the BASIC frame. */
 696        movi    -(FRAME_SIZE), r6
 697        add     SP, r6, SP
 698
 699/* Could do this with no stalling if we had another spare register, but the
 700   code below will be OK. */
 701        ld.q    r5, SAVED_R2, r6
 702        ld.q    r5, SAVED_R3, r18
 703        st.q    SP, FRAME_R(2), r6
 704        ld.q    r5, SAVED_R4, r6
 705        st.q    SP, FRAME_R(3), r18
 706        ld.q    r5, SAVED_R5, r18
 707        st.q    SP, FRAME_R(4), r6
 708        ld.q    r5, SAVED_R6, r6
 709        st.q    SP, FRAME_R(5), r18
 710        ld.q    r5, SAVED_R18, r18
 711        st.q    SP, FRAME_R(6), r6
 712        ld.q    r5, SAVED_TR0, r6
 713        st.q    SP, FRAME_R(18), r18
 714        st.q    SP, FRAME_T(0), r6
 715
 716        /* Keep old SP around */
 717        getcon  KCR1, r6
 718
 719        /* Save the rest of the general purpose registers */
 720        st.q    SP, FRAME_R(0), r0
 721        st.q    SP, FRAME_R(1), r1
 722        st.q    SP, FRAME_R(7), r7
 723        st.q    SP, FRAME_R(8), r8
 724        st.q    SP, FRAME_R(9), r9
 725        st.q    SP, FRAME_R(10), r10
 726        st.q    SP, FRAME_R(11), r11
 727        st.q    SP, FRAME_R(12), r12
 728        st.q    SP, FRAME_R(13), r13
 729        st.q    SP, FRAME_R(14), r14
 730
 731        /* SP is somewhere else */
 732        st.q    SP, FRAME_R(15), r6
 733
 734        st.q    SP, FRAME_R(16), r16
 735        st.q    SP, FRAME_R(17), r17
 736        /* r18 is saved earlier. */
 737        st.q    SP, FRAME_R(19), r19
 738        st.q    SP, FRAME_R(20), r20
 739        st.q    SP, FRAME_R(21), r21
 740        st.q    SP, FRAME_R(22), r22
 741        st.q    SP, FRAME_R(23), r23
 742        st.q    SP, FRAME_R(24), r24
 743        st.q    SP, FRAME_R(25), r25
 744        st.q    SP, FRAME_R(26), r26
 745        st.q    SP, FRAME_R(27), r27
 746        st.q    SP, FRAME_R(28), r28
 747        st.q    SP, FRAME_R(29), r29
 748        st.q    SP, FRAME_R(30), r30
 749        st.q    SP, FRAME_R(31), r31
 750        st.q    SP, FRAME_R(32), r32
 751        st.q    SP, FRAME_R(33), r33
 752        st.q    SP, FRAME_R(34), r34
 753        st.q    SP, FRAME_R(35), r35
 754        st.q    SP, FRAME_R(36), r36
 755        st.q    SP, FRAME_R(37), r37
 756        st.q    SP, FRAME_R(38), r38
 757        st.q    SP, FRAME_R(39), r39
 758        st.q    SP, FRAME_R(40), r40
 759        st.q    SP, FRAME_R(41), r41
 760        st.q    SP, FRAME_R(42), r42
 761        st.q    SP, FRAME_R(43), r43
 762        st.q    SP, FRAME_R(44), r44
 763        st.q    SP, FRAME_R(45), r45
 764        st.q    SP, FRAME_R(46), r46
 765        st.q    SP, FRAME_R(47), r47
 766        st.q    SP, FRAME_R(48), r48
 767        st.q    SP, FRAME_R(49), r49
 768        st.q    SP, FRAME_R(50), r50
 769        st.q    SP, FRAME_R(51), r51
 770        st.q    SP, FRAME_R(52), r52
 771        st.q    SP, FRAME_R(53), r53
 772        st.q    SP, FRAME_R(54), r54
 773        st.q    SP, FRAME_R(55), r55
 774        st.q    SP, FRAME_R(56), r56
 775        st.q    SP, FRAME_R(57), r57
 776        st.q    SP, FRAME_R(58), r58
 777        st.q    SP, FRAME_R(59), r59
 778        st.q    SP, FRAME_R(60), r60
 779        st.q    SP, FRAME_R(61), r61
 780        st.q    SP, FRAME_R(62), r62
 781
 782        /*
 783         * Save the S* registers.
 784         */
 785        getcon  SSR, r61
 786        st.q    SP, FRAME_S(FSSR), r61
 787        getcon  SPC, r62
 788        st.q    SP, FRAME_S(FSPC), r62
 789        movi    -1, r62                 /* Reset syscall_nr */
 790        st.q    SP, FRAME_S(FSYSCALL_ID), r62
 791
 792        /* Save the rest of the target registers */
 793        gettr   tr1, r6
 794        st.q    SP, FRAME_T(1), r6
 795        gettr   tr2, r6
 796        st.q    SP, FRAME_T(2), r6
 797        gettr   tr3, r6
 798        st.q    SP, FRAME_T(3), r6
 799        gettr   tr4, r6
 800        st.q    SP, FRAME_T(4), r6
 801        gettr   tr5, r6
 802        st.q    SP, FRAME_T(5), r6
 803        gettr   tr6, r6
 804        st.q    SP, FRAME_T(6), r6
 805        gettr   tr7, r6
 806        st.q    SP, FRAME_T(7), r6
 807
 808        ! setup FP so that unwinder can wind back through nested kernel mode
 809        ! exceptions
 810        add     SP, ZERO, r14
 811
 812        /* For syscall and debug race condition, get TRA now */
 813        getcon  TRA, r5
 814
 815        /* We are in a safe position to turn SR.BL off, but set IMASK=0xf
 816         * Also set FD, to catch FPU usage in the kernel.
 817         *
 818         * benedict.gaster@superh.com 29/07/2002
 819         *
 820         * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
 821         * same time change BL from 1->0, as any pending interrupt of a level
 822         * higher than he previous value of IMASK will leak through and be
 823         * taken unexpectedly.
 824         *
 825         * To avoid this we raise the IMASK and then issue another PUTCON to
 826         * enable interrupts.
 827         */
 828        getcon  SR, r6
 829        movi    SR_IMASK | SR_FD, r7
 830        or      r6, r7, r6
 831        putcon  r6, SR
 832        movi    SR_UNBLOCK_EXC, r7
 833        and     r6, r7, r6
 834        putcon  r6, SR
 835
 836
 837        /* Now call the appropriate 3rd level handler */
 838        or      r3, ZERO, LINK
 839        movi    trap_jtable, r3
 840        shlri   r2, 3, r2
 841        ldx.l   r2, r3, r3
 842        shlri   r2, 2, r2
 843        ptabs   r3, tr0
 844        or      SP, ZERO, r3
 845        blink   tr0, ZERO
 846
 847/*
 848 * Second level handler for VBR-based exceptions. Post-handlers.
 849 *
 850 * Post-handlers for interrupts (ret_from_irq), exceptions
 851 * (ret_from_exception) and common reentrance doors (restore_all
 852 * to get back to the original context, ret_from_syscall loop to
 853 * check kernel exiting).
 854 *
 855 * ret_with_reschedule and work_notifysig are an inner lables of
 856 * the ret_from_syscall loop.
 857 *
 858 * In common to all stack-frame sensitive handlers.
 859 *
 860 * Inputs:
 861 * (SP)   struct pt_regs *, original register's frame pointer (basic)
 862 *
 863 */
 864        .global ret_from_irq
 865ret_from_irq:
 866        ld.q    SP, FRAME_S(FSSR), r6
 867        shlri   r6, 30, r6
 868        andi    r6, 1, r6
 869        pta     resume_kernel, tr0
 870        bne     r6, ZERO, tr0           /* no further checks */
 871        STI()
 872        pta     ret_with_reschedule, tr0
 873        blink   tr0, ZERO               /* Do not check softirqs */
 874
 875        .global ret_from_exception
 876ret_from_exception:
 877        preempt_stop()
 878
 879        ld.q    SP, FRAME_S(FSSR), r6
 880        shlri   r6, 30, r6
 881        andi    r6, 1, r6
 882        pta     resume_kernel, tr0
 883        bne     r6, ZERO, tr0           /* no further checks */
 884
 885        /* Check softirqs */
 886
 887#ifdef CONFIG_PREEMPT
 888        pta   ret_from_syscall, tr0
 889        blink   tr0, ZERO
 890
 891resume_kernel:
 892        CLI()
 893
 894        pta     restore_all, tr0
 895
 896        getcon  KCR0, r6
 897        ld.l    r6, TI_PRE_COUNT, r7
 898        beq/u   r7, ZERO, tr0
 899
 900need_resched:
 901        ld.l    r6, TI_FLAGS, r7
 902        movi    (1 << TIF_NEED_RESCHED), r8
 903        and     r8, r7, r8
 904        bne     r8, ZERO, tr0
 905
 906        getcon  SR, r7
 907        andi    r7, 0xf0, r7
 908        bne     r7, ZERO, tr0
 909
 910        movi    preempt_schedule_irq, r7
 911        ori     r7, 1, r7
 912        ptabs   r7, tr1
 913        blink   tr1, LINK
 914
 915        pta     need_resched, tr1
 916        blink   tr1, ZERO
 917#endif
 918
 919        .global ret_from_syscall
 920ret_from_syscall:
 921
 922ret_with_reschedule:
 923        getcon  KCR0, r6                ! r6 contains current_thread_info
 924        ld.l    r6, TI_FLAGS, r7        ! r7 contains current_thread_info->flags
 925
 926        movi    _TIF_NEED_RESCHED, r8
 927        and     r8, r7, r8
 928        pta     work_resched, tr0
 929        bne     r8, ZERO, tr0
 930
 931        pta     restore_all, tr1
 932
 933        movi    (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), r8
 934        and     r8, r7, r8
 935        pta     work_notifysig, tr0
 936        bne     r8, ZERO, tr0
 937
 938        blink   tr1, ZERO
 939
 940work_resched:
 941        pta     ret_from_syscall, tr0
 942        gettr   tr0, LINK
 943        movi    schedule, r6
 944        ptabs   r6, tr0
 945        blink   tr0, ZERO               /* Call schedule(), return on top */
 946
 947work_notifysig:
 948        gettr   tr1, LINK
 949
 950        movi    do_notify_resume, r6
 951        ptabs   r6, tr0
 952        or      SP, ZERO, r2
 953        or      r7, ZERO, r3
 954        blink   tr0, LINK           /* Call do_notify_resume(regs, current_thread_info->flags), return here */
 955
 956restore_all:
 957        /* Do prefetches */
 958
 959        ld.q    SP, FRAME_T(0), r6
 960        ld.q    SP, FRAME_T(1), r7
 961        ld.q    SP, FRAME_T(2), r8
 962        ld.q    SP, FRAME_T(3), r9
 963        ptabs   r6, tr0
 964        ptabs   r7, tr1
 965        ptabs   r8, tr2
 966        ptabs   r9, tr3
 967        ld.q    SP, FRAME_T(4), r6
 968        ld.q    SP, FRAME_T(5), r7
 969        ld.q    SP, FRAME_T(6), r8
 970        ld.q    SP, FRAME_T(7), r9
 971        ptabs   r6, tr4
 972        ptabs   r7, tr5
 973        ptabs   r8, tr6
 974        ptabs   r9, tr7
 975
 976        ld.q    SP, FRAME_R(0), r0
 977        ld.q    SP, FRAME_R(1), r1
 978        ld.q    SP, FRAME_R(2), r2
 979        ld.q    SP, FRAME_R(3), r3
 980        ld.q    SP, FRAME_R(4), r4
 981        ld.q    SP, FRAME_R(5), r5
 982        ld.q    SP, FRAME_R(6), r6
 983        ld.q    SP, FRAME_R(7), r7
 984        ld.q    SP, FRAME_R(8), r8
 985        ld.q    SP, FRAME_R(9), r9
 986        ld.q    SP, FRAME_R(10), r10
 987        ld.q    SP, FRAME_R(11), r11
 988        ld.q    SP, FRAME_R(12), r12
 989        ld.q    SP, FRAME_R(13), r13
 990        ld.q    SP, FRAME_R(14), r14
 991
 992        ld.q    SP, FRAME_R(16), r16
 993        ld.q    SP, FRAME_R(17), r17
 994        ld.q    SP, FRAME_R(18), r18
 995        ld.q    SP, FRAME_R(19), r19
 996        ld.q    SP, FRAME_R(20), r20
 997        ld.q    SP, FRAME_R(21), r21
 998        ld.q    SP, FRAME_R(22), r22
 999        ld.q    SP, FRAME_R(23), r23
1000        ld.q    SP, FRAME_R(24), r24
1001        ld.q    SP, FRAME_R(25), r25
1002        ld.q    SP, FRAME_R(26), r26
1003        ld.q    SP, FRAME_R(27), r27
1004        ld.q    SP, FRAME_R(28), r28
1005        ld.q    SP, FRAME_R(29), r29
1006        ld.q    SP, FRAME_R(30), r30
1007        ld.q    SP, FRAME_R(31), r31
1008        ld.q    SP, FRAME_R(32), r32
1009        ld.q    SP, FRAME_R(33), r33
1010        ld.q    SP, FRAME_R(34), r34
1011        ld.q    SP, FRAME_R(35), r35
1012        ld.q    SP, FRAME_R(36), r36
1013        ld.q    SP, FRAME_R(37), r37
1014        ld.q    SP, FRAME_R(38), r38
1015        ld.q    SP, FRAME_R(39), r39
1016        ld.q    SP, FRAME_R(40), r40
1017        ld.q    SP, FRAME_R(41), r41
1018        ld.q    SP, FRAME_R(42), r42
1019        ld.q    SP, FRAME_R(43), r43
1020        ld.q    SP, FRAME_R(44), r44
1021        ld.q    SP, FRAME_R(45), r45
1022        ld.q    SP, FRAME_R(46), r46
1023        ld.q    SP, FRAME_R(47), r47
1024        ld.q    SP, FRAME_R(48), r48
1025        ld.q    SP, FRAME_R(49), r49
1026        ld.q    SP, FRAME_R(50), r50
1027        ld.q    SP, FRAME_R(51), r51
1028        ld.q    SP, FRAME_R(52), r52
1029        ld.q    SP, FRAME_R(53), r53
1030        ld.q    SP, FRAME_R(54), r54
1031        ld.q    SP, FRAME_R(55), r55
1032        ld.q    SP, FRAME_R(56), r56
1033        ld.q    SP, FRAME_R(57), r57
1034        ld.q    SP, FRAME_R(58), r58
1035
1036        getcon  SR, r59
1037        movi    SR_BLOCK_EXC, r60
1038        or      r59, r60, r59
1039        putcon  r59, SR                 /* SR.BL = 1, keep nesting out */
1040        ld.q    SP, FRAME_S(FSSR), r61
1041        ld.q    SP, FRAME_S(FSPC), r62
1042        movi    SR_ASID_MASK, r60
1043        and     r59, r60, r59
1044        andc    r61, r60, r61           /* Clear out older ASID */
1045        or      r59, r61, r61           /* Retain current ASID */
1046        putcon  r61, SSR
1047        putcon  r62, SPC
1048
1049        /* Ignore FSYSCALL_ID */
1050
1051        ld.q    SP, FRAME_R(59), r59
1052        ld.q    SP, FRAME_R(60), r60
1053        ld.q    SP, FRAME_R(61), r61
1054        ld.q    SP, FRAME_R(62), r62
1055
1056        /* Last touch */
1057        ld.q    SP, FRAME_R(15), SP
1058        rte
1059        nop
1060
1061/*
1062 * Third level handlers for VBR-based exceptions. Adapting args to
1063 * and/or deflecting to fourth level handlers.
1064 *
1065 * Fourth level handlers interface.
1066 * Most are C-coded handlers directly pointed by the trap_jtable.
1067 * (Third = Fourth level)
1068 * Inputs:
1069 * (r2)   fault/interrupt code, entry number (e.g. NMI = 14,
1070 *        IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
1071 * (r3)   struct pt_regs *, original register's frame pointer
1072 * (r4)   Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
1073 * (r5)   TRA control register (for syscall/debug benefit only)
1074 * (LINK) return address
1075 * (SP)   = r3
1076 *
1077 * Kernel TLB fault handlers will get a slightly different interface.
1078 * (r2)   struct pt_regs *, original register's frame pointer
1079 * (r3)   page fault error code (see asm/thread_info.h)
1080 * (r4)   Effective Address of fault
1081 * (LINK) return address
1082 * (SP)   = r2
1083 *
1084 * fpu_error_or_IRQ? is a helper to deflect to the right cause.
1085 *
1086 */
1087#ifdef CONFIG_MMU
1088tlb_miss_load:
1089        or      SP, ZERO, r2
1090        or      ZERO, ZERO, r3          /* Read */
1091        getcon  TEA, r4
1092        pta     call_do_page_fault, tr0
1093        beq     ZERO, ZERO, tr0
1094
1095tlb_miss_store:
1096        or      SP, ZERO, r2
1097        movi    FAULT_CODE_WRITE, r3            /* Write */
1098        getcon  TEA, r4
1099        pta     call_do_page_fault, tr0
1100        beq     ZERO, ZERO, tr0
1101
1102itlb_miss_or_IRQ:
1103        pta     its_IRQ, tr0
1104        beqi/u  r4, EVENT_INTERRUPT, tr0
1105
1106        /* ITLB miss */
1107        or      SP, ZERO, r2
1108        movi    FAULT_CODE_ITLB, r3
1109        getcon  TEA, r4
1110        /* Fall through */
1111
1112call_do_page_fault:
1113        movi    do_page_fault, r6
1114        ptabs   r6, tr0
1115        blink   tr0, ZERO
1116#endif /* CONFIG_MMU */
1117
1118fpu_error_or_IRQA:
1119        pta     its_IRQ, tr0
1120        beqi/l  r4, EVENT_INTERRUPT, tr0
1121#ifdef CONFIG_SH_FPU
1122        movi    fpu_state_restore_trap_handler, r6
1123#else
1124        movi    do_exception_error, r6
1125#endif
1126        ptabs   r6, tr0
1127        blink   tr0, ZERO
1128
1129fpu_error_or_IRQB:
1130        pta     its_IRQ, tr0
1131        beqi/l  r4, EVENT_INTERRUPT, tr0
1132#ifdef CONFIG_SH_FPU
1133        movi    fpu_state_restore_trap_handler, r6
1134#else
1135        movi    do_exception_error, r6
1136#endif
1137        ptabs   r6, tr0
1138        blink   tr0, ZERO
1139
1140its_IRQ:
1141        movi    do_IRQ, r6
1142        ptabs   r6, tr0
1143        blink   tr0, ZERO
1144
1145/*
1146 * system_call/unknown_trap third level handler:
1147 *
1148 * Inputs:
1149 * (r2)   fault/interrupt code, entry number (TRAP = 11)
1150 * (r3)   struct pt_regs *, original register's frame pointer
1151 * (r4)   Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
1152 * (r5)   TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
1153 * (SP)   = r3
1154 * (LINK) return address: ret_from_exception
1155 * (*r3)  Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
1156 *
1157 * Outputs:
1158 * (*r3)  Syscall reply (Saved r2)
1159 * (LINK) In case of syscall only it can be scrapped.
1160 *        Common second level post handler will be ret_from_syscall.
1161 *        Common (non-trace) exit point to that is syscall_ret (saving
1162 *        result to r2). Common bad exit point is syscall_bad (returning
1163 *        ENOSYS then saved to r2).
1164 *
1165 */
1166
1167unknown_trap:
1168        /* Unknown Trap or User Trace */
1169        movi    do_unknown_trapa, r6
1170        ptabs   r6, tr0
1171        ld.q    r3, FRAME_R(9), r2      /* r2 = #arg << 16 | syscall # */
1172        andi    r2, 0x1ff, r2           /* r2 = syscall # */
1173        blink   tr0, LINK
1174
1175        pta     syscall_ret, tr0
1176        blink   tr0, ZERO
1177
1178        /* New syscall implementation*/
1179system_call:
1180        pta     unknown_trap, tr0
1181        or      r5, ZERO, r4            /* TRA (=r5) -> r4 */
1182        shlri   r4, 20, r4
1183        bnei    r4, 1, tr0              /* unknown_trap if not 0x1yzzzz */
1184
1185        /* It's a system call */
1186        st.q    r3, FRAME_S(FSYSCALL_ID), r5    /* ID (0x1yzzzz) -> stack */
1187        andi    r5, 0x1ff, r5                   /* syscall # -> r5        */
1188
1189        STI()
1190
1191        pta     syscall_allowed, tr0
1192        movi    NR_syscalls - 1, r4     /* Last valid */
1193        bgeu/l  r4, r5, tr0
1194
1195syscall_bad:
1196        /* Return ENOSYS ! */
1197        movi    -(ENOSYS), r2           /* Fall-through */
1198
1199        .global syscall_ret
1200syscall_ret:
1201        st.q    SP, FRAME_R(9), r2      /* Expecting SP back to BASIC frame */
1202        ld.q    SP, FRAME_S(FSPC), r2
1203        addi    r2, 4, r2               /* Move PC, being pre-execution event */
1204        st.q    SP, FRAME_S(FSPC), r2
1205        pta     ret_from_syscall, tr0
1206        blink   tr0, ZERO
1207
1208
1209/*  A different return path for ret_from_fork, because we now need
1210 *  to call schedule_tail with the later kernels. Because prev is
1211 *  loaded into r2 by switch_to() means we can just call it straight  away
1212 */
1213
1214.global ret_from_fork
1215ret_from_fork:
1216
1217        movi    schedule_tail,r5
1218        ori     r5, 1, r5
1219        ptabs   r5, tr0
1220        blink   tr0, LINK
1221
1222        ld.q    SP, FRAME_S(FSPC), r2
1223        addi    r2, 4, r2               /* Move PC, being pre-execution event */
1224        st.q    SP, FRAME_S(FSPC), r2
1225        pta     ret_from_syscall, tr0
1226        blink   tr0, ZERO
1227
1228.global ret_from_kernel_thread
1229ret_from_kernel_thread:
1230
1231        movi    schedule_tail,r5
1232        ori     r5, 1, r5
1233        ptabs   r5, tr0
1234        blink   tr0, LINK
1235
1236        ld.q    SP, FRAME_R(2), r2
1237        ld.q    SP, FRAME_R(3), r3
1238        ptabs   r3, tr0
1239        blink   tr0, LINK
1240
1241        ld.q    SP, FRAME_S(FSPC), r2
1242        addi    r2, 4, r2               /* Move PC, being pre-execution event */
1243        st.q    SP, FRAME_S(FSPC), r2
1244        pta     ret_from_syscall, tr0
1245        blink   tr0, ZERO
1246
1247syscall_allowed:
1248        /* Use LINK to deflect the exit point, default is syscall_ret */
1249        pta     syscall_ret, tr0
1250        gettr   tr0, LINK
1251        pta     syscall_notrace, tr0
1252
1253        getcon  KCR0, r2
1254        ld.l    r2, TI_FLAGS, r4
1255        movi    _TIF_WORK_SYSCALL_MASK, r6
1256        and     r6, r4, r6
1257        beq/l   r6, ZERO, tr0
1258
1259        /* Trace it by calling syscall_trace before and after */
1260        movi    do_syscall_trace_enter, r4
1261        or      SP, ZERO, r2
1262        ptabs   r4, tr0
1263        blink   tr0, LINK
1264
1265        /* Save the retval */
1266        st.q    SP, FRAME_R(2), r2
1267
1268        /* Reload syscall number as r5 is trashed by do_syscall_trace_enter */
1269        ld.q    SP, FRAME_S(FSYSCALL_ID), r5
1270        andi    r5, 0x1ff, r5
1271
1272        pta     syscall_ret_trace, tr0
1273        gettr   tr0, LINK
1274
1275syscall_notrace:
1276        /* Now point to the appropriate 4th level syscall handler */
1277        movi    sys_call_table, r4
1278        shlli   r5, 2, r5
1279        ldx.l   r4, r5, r5
1280        ptabs   r5, tr0
1281
1282        /* Prepare original args */
1283        ld.q    SP, FRAME_R(2), r2
1284        ld.q    SP, FRAME_R(3), r3
1285        ld.q    SP, FRAME_R(4), r4
1286        ld.q    SP, FRAME_R(5), r5
1287        ld.q    SP, FRAME_R(6), r6
1288        ld.q    SP, FRAME_R(7), r7
1289
1290        /* And now the trick for those syscalls requiring regs * ! */
1291        or      SP, ZERO, r8
1292
1293        /* Call it */
1294        blink   tr0, ZERO       /* LINK is already properly set */
1295
1296syscall_ret_trace:
1297        /* We get back here only if under trace */
1298        st.q    SP, FRAME_R(9), r2      /* Save return value */
1299
1300        movi    do_syscall_trace_leave, LINK
1301        or      SP, ZERO, r2
1302        ptabs   LINK, tr0
1303        blink   tr0, LINK
1304
1305        /* This needs to be done after any syscall tracing */
1306        ld.q    SP, FRAME_S(FSPC), r2
1307        addi    r2, 4, r2       /* Move PC, being pre-execution event */
1308        st.q    SP, FRAME_S(FSPC), r2
1309
1310        pta     ret_from_syscall, tr0
1311        blink   tr0, ZERO               /* Resume normal return sequence */
1312
1313/*
1314 * --- Switch to running under a particular ASID and return the previous ASID value
1315 * --- The caller is assumed to have done a cli before calling this.
1316 *
1317 * Input r2 : new ASID
1318 * Output r2 : old ASID
1319 */
1320
1321        .global switch_and_save_asid
1322switch_and_save_asid:
1323        getcon  sr, r0
1324        movi    255, r4
1325        shlli   r4, 16, r4      /* r4 = mask to select ASID */
1326        and     r0, r4, r3      /* r3 = shifted old ASID */
1327        andi    r2, 255, r2     /* mask down new ASID */
1328        shlli   r2, 16, r2      /* align new ASID against SR.ASID */
1329        andc    r0, r4, r0      /* efface old ASID from SR */
1330        or      r0, r2, r0      /* insert the new ASID */
1331        putcon  r0, ssr
1332        movi    1f, r0
1333        putcon  r0, spc
1334        rte
1335        nop
13361:
1337        ptabs   LINK, tr0
1338        shlri   r3, 16, r2      /* r2 = old ASID */
1339        blink tr0, r63
1340
1341        .global route_to_panic_handler
1342route_to_panic_handler:
1343        /* Switch to real mode, goto panic_handler, don't return.  Useful for
1344           last-chance debugging, e.g. if no output wants to go to the console.
1345           */
1346
1347        movi    panic_handler - CONFIG_PAGE_OFFSET, r1
1348        ptabs   r1, tr0
1349        pta     1f, tr1
1350        gettr   tr1, r0
1351        putcon  r0, spc
1352        getcon  sr, r0
1353        movi    1, r1
1354        shlli   r1, 31, r1
1355        andc    r0, r1, r0
1356        putcon  r0, ssr
1357        rte
1358        nop
13591:      /* Now in real mode */
1360        blink tr0, r63
1361        nop
1362
1363        .global peek_real_address_q
1364peek_real_address_q:
1365        /* Two args:
1366           r2 : real mode address to peek
1367           r2(out) : result quadword
1368
1369           This is provided as a cheapskate way of manipulating device
1370           registers for debugging (to avoid the need to ioremap the debug
1371           module, and to avoid the need to ioremap the watchpoint
1372           controller in a way that identity maps sufficient bits to avoid the
1373           SH5-101 cut2 silicon defect).
1374
1375           This code is not performance critical
1376        */
1377
1378        add.l   r2, r63, r2     /* sign extend address */
1379        getcon  sr, r0          /* r0 = saved original SR */
1380        movi    1, r1
1381        shlli   r1, 28, r1
1382        or      r0, r1, r1      /* r0 with block bit set */
1383        putcon  r1, sr          /* now in critical section */
1384        movi    1, r36
1385        shlli   r36, 31, r36
1386        andc    r1, r36, r1     /* turn sr.mmu off in real mode section */
1387
1388        putcon  r1, ssr
1389        movi    .peek0 - CONFIG_PAGE_OFFSET, r36 /* real mode target address */
1390        movi    1f, r37         /* virtual mode return addr */
1391        putcon  r36, spc
1392
1393        synco
1394        rte
1395        nop
1396
1397.peek0: /* come here in real mode, don't touch caches!!
1398           still in critical section (sr.bl==1) */
1399        putcon  r0, ssr
1400        putcon  r37, spc
1401        /* Here's the actual peek.  If the address is bad, all bets are now off
1402         * what will happen (handlers invoked in real-mode = bad news) */
1403        ld.q    r2, 0, r2
1404        synco
1405        rte     /* Back to virtual mode */
1406        nop
1407
14081:
1409        ptabs   LINK, tr0
1410        blink   tr0, r63
1411
1412        .global poke_real_address_q
1413poke_real_address_q:
1414        /* Two args:
1415           r2 : real mode address to poke
1416           r3 : quadword value to write.
1417
1418           This is provided as a cheapskate way of manipulating device
1419           registers for debugging (to avoid the need to ioremap the debug
1420           module, and to avoid the need to ioremap the watchpoint
1421           controller in a way that identity maps sufficient bits to avoid the
1422           SH5-101 cut2 silicon defect).
1423
1424           This code is not performance critical
1425        */
1426
1427        add.l   r2, r63, r2     /* sign extend address */
1428        getcon  sr, r0          /* r0 = saved original SR */
1429        movi    1, r1
1430        shlli   r1, 28, r1
1431        or      r0, r1, r1      /* r0 with block bit set */
1432        putcon  r1, sr          /* now in critical section */
1433        movi    1, r36
1434        shlli   r36, 31, r36
1435        andc    r1, r36, r1     /* turn sr.mmu off in real mode section */
1436
1437        putcon  r1, ssr
1438        movi    .poke0-CONFIG_PAGE_OFFSET, r36 /* real mode target address */
1439        movi    1f, r37         /* virtual mode return addr */
1440        putcon  r36, spc
1441
1442        synco
1443        rte
1444        nop
1445
1446.poke0: /* come here in real mode, don't touch caches!!
1447           still in critical section (sr.bl==1) */
1448        putcon  r0, ssr
1449        putcon  r37, spc
1450        /* Here's the actual poke.  If the address is bad, all bets are now off
1451         * what will happen (handlers invoked in real-mode = bad news) */
1452        st.q    r2, 0, r3
1453        synco
1454        rte     /* Back to virtual mode */
1455        nop
1456
14571:
1458        ptabs   LINK, tr0
1459        blink   tr0, r63
1460
1461#ifdef CONFIG_MMU
1462/*
1463 * --- User Access Handling Section
1464 */
1465
1466/*
1467 * User Access support. It all moved to non inlined Assembler
1468 * functions in here.
1469 *
1470 * __kernel_size_t __copy_user(void *__to, const void *__from,
1471 *                             __kernel_size_t __n)
1472 *
1473 * Inputs:
1474 * (r2)  target address
1475 * (r3)  source address
1476 * (r4)  size in bytes
1477 *
1478 * Ouputs:
1479 * (*r2) target data
1480 * (r2)  non-copied bytes
1481 *
1482 * If a fault occurs on the user pointer, bail out early and return the
1483 * number of bytes not copied in r2.
1484 * Strategy : for large blocks, call a real memcpy function which can
1485 * move >1 byte at a time using unaligned ld/st instructions, and can
1486 * manipulate the cache using prefetch + alloco to improve the speed
1487 * further.  If a fault occurs in that function, just revert to the
1488 * byte-by-byte approach used for small blocks; this is rare so the
1489 * performance hit for that case does not matter.
1490 *
1491 * For small blocks it's not worth the overhead of setting up and calling
1492 * the memcpy routine; do the copy a byte at a time.
1493 *
1494 */
1495        .global __copy_user
1496__copy_user:
1497        pta     __copy_user_byte_by_byte, tr1
1498        movi    16, r0 ! this value is a best guess, should tune it by benchmarking
1499        bge/u   r0, r4, tr1
1500        pta copy_user_memcpy, tr0
1501        addi    SP, -32, SP
1502        /* Save arguments in case we have to fix-up unhandled page fault */
1503        st.q    SP, 0, r2
1504        st.q    SP, 8, r3
1505        st.q    SP, 16, r4
1506        st.q    SP, 24, r35 ! r35 is callee-save
1507        /* Save LINK in a register to reduce RTS time later (otherwise
1508           ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
1509        ori     LINK, 0, r35
1510        blink   tr0, LINK
1511
1512        /* Copy completed normally if we get back here */
1513        ptabs   r35, tr0
1514        ld.q    SP, 24, r35
1515        /* don't restore r2-r4, pointless */
1516        /* set result=r2 to zero as the copy must have succeeded. */
1517        or      r63, r63, r2
1518        addi    SP, 32, SP
1519        blink   tr0, r63 ! RTS
1520
1521        .global __copy_user_fixup
1522__copy_user_fixup:
1523        /* Restore stack frame */
1524        ori     r35, 0, LINK
1525        ld.q    SP, 24, r35
1526        ld.q    SP, 16, r4
1527        ld.q    SP,  8, r3
1528        ld.q    SP,  0, r2
1529        addi    SP, 32, SP
1530        /* Fall through to original code, in the 'same' state we entered with */
1531
1532/* The slow byte-by-byte method is used if the fast copy traps due to a bad
1533   user address.  In that rare case, the speed drop can be tolerated. */
1534__copy_user_byte_by_byte:
1535        pta     ___copy_user_exit, tr1
1536        pta     ___copy_user1, tr0
1537        beq/u   r4, r63, tr1    /* early exit for zero length copy */
1538        sub     r2, r3, r0
1539        addi    r0, -1, r0
1540
1541___copy_user1:
1542        ld.b    r3, 0, r5               /* Fault address 1 */
1543
1544        /* Could rewrite this to use just 1 add, but the second comes 'free'
1545           due to load latency */
1546        addi    r3, 1, r3
1547        addi    r4, -1, r4              /* No real fixup required */
1548___copy_user2:
1549        stx.b   r3, r0, r5              /* Fault address 2 */
1550        bne     r4, ZERO, tr0
1551
1552___copy_user_exit:
1553        or      r4, ZERO, r2
1554        ptabs   LINK, tr0
1555        blink   tr0, ZERO
1556
1557/*
1558 * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
1559 *
1560 * Inputs:
1561 * (r2)  target address
1562 * (r3)  size in bytes
1563 *
1564 * Ouputs:
1565 * (*r2) zero-ed target data
1566 * (r2)  non-zero-ed bytes
1567 */
1568        .global __clear_user
1569__clear_user:
1570        pta     ___clear_user_exit, tr1
1571        pta     ___clear_user1, tr0
1572        beq/u   r3, r63, tr1
1573
1574___clear_user1:
1575        st.b    r2, 0, ZERO             /* Fault address */
1576        addi    r2, 1, r2
1577        addi    r3, -1, r3              /* No real fixup required */
1578        bne     r3, ZERO, tr0
1579
1580___clear_user_exit:
1581        or      r3, ZERO, r2
1582        ptabs   LINK, tr0
1583        blink   tr0, ZERO
1584
1585#endif /* CONFIG_MMU */
1586
1587/*
1588 * extern long __get_user_asm_?(void *val, long addr)
1589 *
1590 * Inputs:
1591 * (r2)  dest address
1592 * (r3)  source address (in User Space)
1593 *
1594 * Ouputs:
1595 * (r2)  -EFAULT (faulting)
1596 *       0       (not faulting)
1597 */
1598        .global __get_user_asm_b
1599__get_user_asm_b:
1600        or      r2, ZERO, r4
1601        movi    -(EFAULT), r2           /* r2 = reply, no real fixup */
1602
1603___get_user_asm_b1:
1604        ld.b    r3, 0, r5               /* r5 = data */
1605        st.b    r4, 0, r5
1606        or      ZERO, ZERO, r2
1607
1608___get_user_asm_b_exit:
1609        ptabs   LINK, tr0
1610        blink   tr0, ZERO
1611
1612
1613        .global __get_user_asm_w
1614__get_user_asm_w:
1615        or      r2, ZERO, r4
1616        movi    -(EFAULT), r2           /* r2 = reply, no real fixup */
1617
1618___get_user_asm_w1:
1619        ld.w    r3, 0, r5               /* r5 = data */
1620        st.w    r4, 0, r5
1621        or      ZERO, ZERO, r2
1622
1623___get_user_asm_w_exit:
1624        ptabs   LINK, tr0
1625        blink   tr0, ZERO
1626
1627
1628        .global __get_user_asm_l
1629__get_user_asm_l:
1630        or      r2, ZERO, r4
1631        movi    -(EFAULT), r2           /* r2 = reply, no real fixup */
1632
1633___get_user_asm_l1:
1634        ld.l    r3, 0, r5               /* r5 = data */
1635        st.l    r4, 0, r5
1636        or      ZERO, ZERO, r2
1637
1638___get_user_asm_l_exit:
1639        ptabs   LINK, tr0
1640        blink   tr0, ZERO
1641
1642
1643        .global __get_user_asm_q
1644__get_user_asm_q:
1645        or      r2, ZERO, r4
1646        movi    -(EFAULT), r2           /* r2 = reply, no real fixup */
1647
1648___get_user_asm_q1:
1649        ld.q    r3, 0, r5               /* r5 = data */
1650        st.q    r4, 0, r5
1651        or      ZERO, ZERO, r2
1652
1653___get_user_asm_q_exit:
1654        ptabs   LINK, tr0
1655        blink   tr0, ZERO
1656
1657/*
1658 * extern long __put_user_asm_?(void *pval, long addr)
1659 *
1660 * Inputs:
1661 * (r2)  kernel pointer to value
1662 * (r3)  dest address (in User Space)
1663 *
1664 * Ouputs:
1665 * (r2)  -EFAULT (faulting)
1666 *       0       (not faulting)
1667 */
1668        .global __put_user_asm_b
1669__put_user_asm_b:
1670        ld.b    r2, 0, r4               /* r4 = data */
1671        movi    -(EFAULT), r2           /* r2 = reply, no real fixup */
1672
1673___put_user_asm_b1:
1674        st.b    r3, 0, r4
1675        or      ZERO, ZERO, r2
1676
1677___put_user_asm_b_exit:
1678        ptabs   LINK, tr0
1679        blink   tr0, ZERO
1680
1681
1682        .global __put_user_asm_w
1683__put_user_asm_w:
1684        ld.w    r2, 0, r4               /* r4 = data */
1685        movi    -(EFAULT), r2           /* r2 = reply, no real fixup */
1686
1687___put_user_asm_w1:
1688        st.w    r3, 0, r4
1689        or      ZERO, ZERO, r2
1690
1691___put_user_asm_w_exit:
1692        ptabs   LINK, tr0
1693        blink   tr0, ZERO
1694
1695
1696        .global __put_user_asm_l
1697__put_user_asm_l:
1698        ld.l    r2, 0, r4               /* r4 = data */
1699        movi    -(EFAULT), r2           /* r2 = reply, no real fixup */
1700
1701___put_user_asm_l1:
1702        st.l    r3, 0, r4
1703        or      ZERO, ZERO, r2
1704
1705___put_user_asm_l_exit:
1706        ptabs   LINK, tr0
1707        blink   tr0, ZERO
1708
1709
1710        .global __put_user_asm_q
1711__put_user_asm_q:
1712        ld.q    r2, 0, r4               /* r4 = data */
1713        movi    -(EFAULT), r2           /* r2 = reply, no real fixup */
1714
1715___put_user_asm_q1:
1716        st.q    r3, 0, r4
1717        or      ZERO, ZERO, r2
1718
1719___put_user_asm_q_exit:
1720        ptabs   LINK, tr0
1721        blink   tr0, ZERO
1722
1723panic_stash_regs:
1724        /* The idea is : when we get an unhandled panic, we dump the registers
1725           to a known memory location, the just sit in a tight loop.
1726           This allows the human to look at the memory region through the GDB
1727           session (assuming the debug module's SHwy initiator isn't locked up
1728           or anything), to hopefully analyze the cause of the panic. */
1729
1730        /* On entry, former r15 (SP) is in DCR
1731           former r0  is at resvec_saved_area + 0
1732           former r1  is at resvec_saved_area + 8
1733           former tr0 is at resvec_saved_area + 32
1734           DCR is the only register whose value is lost altogether.
1735        */
1736
1737        movi    0xffffffff80000000, r0 ! phy of dump area
1738        ld.q    SP, 0x000, r1   ! former r0
1739        st.q    r0,  0x000, r1
1740        ld.q    SP, 0x008, r1   ! former r1
1741        st.q    r0,  0x008, r1
1742        st.q    r0,  0x010, r2
1743        st.q    r0,  0x018, r3
1744        st.q    r0,  0x020, r4
1745        st.q    r0,  0x028, r5
1746        st.q    r0,  0x030, r6
1747        st.q    r0,  0x038, r7
1748        st.q    r0,  0x040, r8
1749        st.q    r0,  0x048, r9
1750        st.q    r0,  0x050, r10
1751        st.q    r0,  0x058, r11
1752        st.q    r0,  0x060, r12
1753        st.q    r0,  0x068, r13
1754        st.q    r0,  0x070, r14
1755        getcon  dcr, r14
1756        st.q    r0,  0x078, r14
1757        st.q    r0,  0x080, r16
1758        st.q    r0,  0x088, r17
1759        st.q    r0,  0x090, r18
1760        st.q    r0,  0x098, r19
1761        st.q    r0,  0x0a0, r20
1762        st.q    r0,  0x0a8, r21
1763        st.q    r0,  0x0b0, r22
1764        st.q    r0,  0x0b8, r23
1765        st.q    r0,  0x0c0, r24
1766        st.q    r0,  0x0c8, r25
1767        st.q    r0,  0x0d0, r26
1768        st.q    r0,  0x0d8, r27
1769        st.q    r0,  0x0e0, r28
1770        st.q    r0,  0x0e8, r29
1771        st.q    r0,  0x0f0, r30
1772        st.q    r0,  0x0f8, r31
1773        st.q    r0,  0x100, r32
1774        st.q    r0,  0x108, r33
1775        st.q    r0,  0x110, r34
1776        st.q    r0,  0x118, r35
1777        st.q    r0,  0x120, r36
1778        st.q    r0,  0x128, r37
1779        st.q    r0,  0x130, r38
1780        st.q    r0,  0x138, r39
1781        st.q    r0,  0x140, r40
1782        st.q    r0,  0x148, r41
1783        st.q    r0,  0x150, r42
1784        st.q    r0,  0x158, r43
1785        st.q    r0,  0x160, r44
1786        st.q    r0,  0x168, r45
1787        st.q    r0,  0x170, r46
1788        st.q    r0,  0x178, r47
1789        st.q    r0,  0x180, r48
1790        st.q    r0,  0x188, r49
1791        st.q    r0,  0x190, r50
1792        st.q    r0,  0x198, r51
1793        st.q    r0,  0x1a0, r52
1794        st.q    r0,  0x1a8, r53
1795        st.q    r0,  0x1b0, r54
1796        st.q    r0,  0x1b8, r55
1797        st.q    r0,  0x1c0, r56
1798        st.q    r0,  0x1c8, r57
1799        st.q    r0,  0x1d0, r58
1800        st.q    r0,  0x1d8, r59
1801        st.q    r0,  0x1e0, r60
1802        st.q    r0,  0x1e8, r61
1803        st.q    r0,  0x1f0, r62
1804        st.q    r0,  0x1f8, r63 ! bogus, but for consistency's sake...
1805
1806        ld.q    SP, 0x020, r1  ! former tr0
1807        st.q    r0,  0x200, r1
1808        gettr   tr1, r1
1809        st.q    r0,  0x208, r1
1810        gettr   tr2, r1
1811        st.q    r0,  0x210, r1
1812        gettr   tr3, r1
1813        st.q    r0,  0x218, r1
1814        gettr   tr4, r1
1815        st.q    r0,  0x220, r1
1816        gettr   tr5, r1
1817        st.q    r0,  0x228, r1
1818        gettr   tr6, r1
1819        st.q    r0,  0x230, r1
1820        gettr   tr7, r1
1821        st.q    r0,  0x238, r1
1822
1823        getcon  sr,  r1
1824        getcon  ssr,  r2
1825        getcon  pssr,  r3
1826        getcon  spc,  r4
1827        getcon  pspc,  r5
1828        getcon  intevt,  r6
1829        getcon  expevt,  r7
1830        getcon  pexpevt,  r8
1831        getcon  tra,  r9
1832        getcon  tea,  r10
1833        getcon  kcr0, r11
1834        getcon  kcr1, r12
1835        getcon  vbr,  r13
1836        getcon  resvec,  r14
1837
1838        st.q    r0,  0x240, r1
1839        st.q    r0,  0x248, r2
1840        st.q    r0,  0x250, r3
1841        st.q    r0,  0x258, r4
1842        st.q    r0,  0x260, r5
1843        st.q    r0,  0x268, r6
1844        st.q    r0,  0x270, r7
1845        st.q    r0,  0x278, r8
1846        st.q    r0,  0x280, r9
1847        st.q    r0,  0x288, r10
1848        st.q    r0,  0x290, r11
1849        st.q    r0,  0x298, r12
1850        st.q    r0,  0x2a0, r13
1851        st.q    r0,  0x2a8, r14
1852
1853        getcon  SPC,r2
1854        getcon  SSR,r3
1855        getcon  EXPEVT,r4
1856        /* Prepare to jump to C - physical address */
1857        movi    panic_handler-CONFIG_PAGE_OFFSET, r1
1858        ori     r1, 1, r1
1859        ptabs   r1, tr0
1860        getcon  DCR, SP
1861        blink   tr0, ZERO
1862        nop
1863        nop
1864        nop
1865        nop
1866
1867
1868
1869
1870/*
1871 * --- Signal Handling Section
1872 */
1873
1874/*
1875 * extern long long _sa_default_rt_restorer
1876 * extern long long _sa_default_restorer
1877 *
1878 *               or, better,
1879 *
1880 * extern void _sa_default_rt_restorer(void)
1881 * extern void _sa_default_restorer(void)
1882 *
1883 * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
1884 * from user space. Copied into user space by signal management.
1885 * Both must be quad aligned and 2 quad long (4 instructions).
1886 *
1887 */
1888        .balign 8
1889        .global sa_default_rt_restorer
1890sa_default_rt_restorer:
1891        movi    0x10, r9
1892        shori   __NR_rt_sigreturn, r9
1893        trapa   r9
1894        nop
1895
1896        .balign 8
1897        .global sa_default_restorer
1898sa_default_restorer:
1899        movi    0x10, r9
1900        shori   __NR_sigreturn, r9
1901        trapa   r9
1902        nop
1903
1904/*
1905 * --- __ex_table Section
1906 */
1907
1908/*
1909 * User Access Exception Table.
1910 */
1911        .section        __ex_table,  "a"
1912
1913        .global asm_uaccess_start       /* Just a marker */
1914asm_uaccess_start:
1915
1916#ifdef CONFIG_MMU
1917        .long   ___copy_user1, ___copy_user_exit
1918        .long   ___copy_user2, ___copy_user_exit
1919        .long   ___clear_user1, ___clear_user_exit
1920#endif
1921        .long   ___get_user_asm_b1, ___get_user_asm_b_exit
1922        .long   ___get_user_asm_w1, ___get_user_asm_w_exit
1923        .long   ___get_user_asm_l1, ___get_user_asm_l_exit
1924        .long   ___get_user_asm_q1, ___get_user_asm_q_exit
1925        .long   ___put_user_asm_b1, ___put_user_asm_b_exit
1926        .long   ___put_user_asm_w1, ___put_user_asm_w_exit
1927        .long   ___put_user_asm_l1, ___put_user_asm_l_exit
1928        .long   ___put_user_asm_q1, ___put_user_asm_q_exit
1929
1930        .global asm_uaccess_end         /* Just a marker */
1931asm_uaccess_end:
1932
1933
1934
1935
1936/*
1937 * --- .init.text Section
1938 */
1939
1940        __INIT
1941
1942/*
1943 * void trap_init (void)
1944 *
1945 */
1946        .global trap_init
1947trap_init:
1948        addi    SP, -24, SP                     /* Room to save r28/r29/r30 */
1949        st.q    SP, 0, r28
1950        st.q    SP, 8, r29
1951        st.q    SP, 16, r30
1952
1953        /* Set VBR and RESVEC */
1954        movi    LVBR_block, r19
1955        andi    r19, -4, r19                    /* reset MMUOFF + reserved */
1956        /* For RESVEC exceptions we force the MMU off, which means we need the
1957           physical address. */
1958        movi    LRESVEC_block-CONFIG_PAGE_OFFSET, r20
1959        andi    r20, -4, r20                    /* reset reserved */
1960        ori     r20, 1, r20                     /* set MMUOFF */
1961        putcon  r19, VBR
1962        putcon  r20, RESVEC
1963
1964        /* Sanity check */
1965        movi    LVBR_block_end, r21
1966        andi    r21, -4, r21
1967        movi    BLOCK_SIZE, r29                 /* r29 = expected size */
1968        or      r19, ZERO, r30
1969        add     r19, r29, r19
1970
1971        /*
1972         * Ugly, but better loop forever now than crash afterwards.
1973         * We should print a message, but if we touch LVBR or
1974         * LRESVEC blocks we should not be surprised if we get stuck
1975         * in trap_init().
1976         */
1977        pta     trap_init_loop, tr1
1978        gettr   tr1, r28                        /* r28 = trap_init_loop */
1979        sub     r21, r30, r30                   /* r30 = actual size */
1980
1981        /*
1982         * VBR/RESVEC handlers overlap by being bigger than
1983         * allowed. Very bad. Just loop forever.
1984         * (r28) panic/loop address
1985         * (r29) expected size
1986         * (r30) actual size
1987         */
1988trap_init_loop:
1989        bne     r19, r21, tr1
1990
1991        /* Now that exception vectors are set up reset SR.BL */
1992        getcon  SR, r22
1993        movi    SR_UNBLOCK_EXC, r23
1994        and     r22, r23, r22
1995        putcon  r22, SR
1996
1997        addi    SP, 24, SP
1998        ptabs   LINK, tr0
1999        blink   tr0, ZERO
2000
2001