linux/arch/powerpc/kernel/exceptions-64e.S
<<
>>
Prefs
   1/*
   2 *  Boot code and exception vectors for Book3E processors
   3 *
   4 *  Copyright (C) 2007 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
   5 *
   6 *  This program is free software; you can redistribute it and/or
   7 *  modify it under the terms of the GNU General Public License
   8 *  as published by the Free Software Foundation; either version
   9 *  2 of the License, or (at your option) any later version.
  10 */
  11
  12#include <linux/threads.h>
  13#include <asm/reg.h>
  14#include <asm/page.h>
  15#include <asm/ppc_asm.h>
  16#include <asm/asm-offsets.h>
  17#include <asm/cputable.h>
  18#include <asm/setup.h>
  19#include <asm/thread_info.h>
  20#include <asm/exception-64e.h>
  21#include <asm/bug.h>
  22#include <asm/irqflags.h>
  23#include <asm/ptrace.h>
  24#include <asm/ppc-opcode.h>
  25#include <asm/mmu.h>
  26
  27/* XXX This will ultimately add space for a special exception save
  28 *     structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...
  29 *     when taking special interrupts. For now we don't support that,
  30 *     special interrupts from within a non-standard level will probably
  31 *     blow you up
  32 */
  33#define SPECIAL_EXC_FRAME_SIZE  INT_FRAME_SIZE
  34
  35/* Exception prolog code for all exceptions */
  36#define EXCEPTION_PROLOG(n, type, addition)                                 \
  37        mtspr   SPRN_SPRG_##type##_SCRATCH,r13; /* get spare registers */   \
  38        mfspr   r13,SPRN_SPRG_PACA;     /* get PACA */                      \
  39        std     r10,PACA_EX##type+EX_R10(r13);                              \
  40        std     r11,PACA_EX##type+EX_R11(r13);                              \
  41        mfcr    r10;                    /* save CR */                       \
  42        addition;                       /* additional code for that exc. */ \
  43        std     r1,PACA_EX##type+EX_R1(r13); /* save old r1 in the PACA */  \
  44        stw     r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \
  45        mfspr   r11,SPRN_##type##_SRR1;/* what are we coming from */        \
  46        type##_SET_KSTACK;              /* get special stack if necessary */\
  47        andi.   r10,r11,MSR_PR;         /* save stack pointer */            \
  48        beq     1f;                     /* branch around if supervisor */   \
  49        ld      r1,PACAKSAVE(r13);      /* get kernel stack coming from usr */\
  501:      cmpdi   cr1,r1,0;               /* check if SP makes sense */       \
  51        bge-    cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \
  52        mfspr   r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */
  53
  54/* Exception type-specific macros */
  55#define GEN_SET_KSTACK                                                      \
  56        subi    r1,r1,INT_FRAME_SIZE;   /* alloc frame on kernel stack */
  57#define SPRN_GEN_SRR0   SPRN_SRR0
  58#define SPRN_GEN_SRR1   SPRN_SRR1
  59
  60#define CRIT_SET_KSTACK                                                     \
  61        ld      r1,PACA_CRIT_STACK(r13);                                    \
  62        subi    r1,r1,SPECIAL_EXC_FRAME_SIZE;
  63#define SPRN_CRIT_SRR0  SPRN_CSRR0
  64#define SPRN_CRIT_SRR1  SPRN_CSRR1
  65
  66#define DBG_SET_KSTACK                                                      \
  67        ld      r1,PACA_DBG_STACK(r13);                                     \
  68        subi    r1,r1,SPECIAL_EXC_FRAME_SIZE;
  69#define SPRN_DBG_SRR0   SPRN_DSRR0
  70#define SPRN_DBG_SRR1   SPRN_DSRR1
  71
  72#define MC_SET_KSTACK                                                       \
  73        ld      r1,PACA_MC_STACK(r13);                                      \
  74        subi    r1,r1,SPECIAL_EXC_FRAME_SIZE;
  75#define SPRN_MC_SRR0    SPRN_MCSRR0
  76#define SPRN_MC_SRR1    SPRN_MCSRR1
  77
  78#define NORMAL_EXCEPTION_PROLOG(n, addition)                                \
  79        EXCEPTION_PROLOG(n, GEN, addition##_GEN)
  80
  81#define CRIT_EXCEPTION_PROLOG(n, addition)                                  \
  82        EXCEPTION_PROLOG(n, CRIT, addition##_CRIT)
  83
  84#define DBG_EXCEPTION_PROLOG(n, addition)                                   \
  85        EXCEPTION_PROLOG(n, DBG, addition##_DBG)
  86
  87#define MC_EXCEPTION_PROLOG(n, addition)                                    \
  88        EXCEPTION_PROLOG(n, MC, addition##_MC)
  89
  90
  91/* Variants of the "addition" argument for the prolog
  92 */
  93#define PROLOG_ADDITION_NONE_GEN
  94#define PROLOG_ADDITION_NONE_CRIT
  95#define PROLOG_ADDITION_NONE_DBG
  96#define PROLOG_ADDITION_NONE_MC
  97
  98#define PROLOG_ADDITION_MASKABLE_GEN                                        \
  99        lbz     r11,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */      \
 100        cmpwi   cr0,r11,0;              /* yes -> go out of line */         \
 101        beq     masked_interrupt_book3e;
 102
 103#define PROLOG_ADDITION_2REGS_GEN                                           \
 104        std     r14,PACA_EXGEN+EX_R14(r13);                                 \
 105        std     r15,PACA_EXGEN+EX_R15(r13)
 106
 107#define PROLOG_ADDITION_1REG_GEN                                            \
 108        std     r14,PACA_EXGEN+EX_R14(r13);
 109
 110#define PROLOG_ADDITION_2REGS_CRIT                                          \
 111        std     r14,PACA_EXCRIT+EX_R14(r13);                                \
 112        std     r15,PACA_EXCRIT+EX_R15(r13)
 113
 114#define PROLOG_ADDITION_2REGS_DBG                                           \
 115        std     r14,PACA_EXDBG+EX_R14(r13);                                 \
 116        std     r15,PACA_EXDBG+EX_R15(r13)
 117
 118#define PROLOG_ADDITION_2REGS_MC                                            \
 119        std     r14,PACA_EXMC+EX_R14(r13);                                  \
 120        std     r15,PACA_EXMC+EX_R15(r13)
 121
 122/* Core exception code for all exceptions except TLB misses.
 123 * XXX: Needs to make SPRN_SPRG_GEN depend on exception type
 124 */
 125#define EXCEPTION_COMMON(n, excf, ints)                                     \
 126        std     r0,GPR0(r1);            /* save r0 in stackframe */         \
 127        std     r2,GPR2(r1);            /* save r2 in stackframe */         \
 128        SAVE_4GPRS(3, r1);              /* save r3 - r6 in stackframe */    \
 129        SAVE_2GPRS(7, r1);              /* save r7, r8 in stackframe */     \
 130        std     r9,GPR9(r1);            /* save r9 in stackframe */         \
 131        std     r10,_NIP(r1);           /* save SRR0 to stackframe */       \
 132        std     r11,_MSR(r1);           /* save SRR1 to stackframe */       \
 133        ACCOUNT_CPU_USER_ENTRY(r10,r11);/* accounting (uses cr0+eq) */      \
 134        ld      r3,excf+EX_R10(r13);    /* get back r10 */                  \
 135        ld      r4,excf+EX_R11(r13);    /* get back r11 */                  \
 136        mfspr   r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 */                 \
 137        std     r12,GPR12(r1);          /* save r12 in stackframe */        \
 138        ld      r2,PACATOC(r13);        /* get kernel TOC into r2 */        \
 139        mflr    r6;                     /* save LR in stackframe */         \
 140        mfctr   r7;                     /* save CTR in stackframe */        \
 141        mfspr   r8,SPRN_XER;            /* save XER in stackframe */        \
 142        ld      r9,excf+EX_R1(r13);     /* load orig r1 back from PACA */   \
 143        lwz     r10,excf+EX_CR(r13);    /* load orig CR back from PACA  */  \
 144        lbz     r11,PACASOFTIRQEN(r13); /* get current IRQ softe */         \
 145        ld      r12,exception_marker@toc(r2);                               \
 146        li      r0,0;                                                       \
 147        std     r3,GPR10(r1);           /* save r10 to stackframe */        \
 148        std     r4,GPR11(r1);           /* save r11 to stackframe */        \
 149        std     r5,GPR13(r1);           /* save it to stackframe */         \
 150        std     r6,_LINK(r1);                                               \
 151        std     r7,_CTR(r1);                                                \
 152        std     r8,_XER(r1);                                                \
 153        li      r3,(n)+1;               /* indicate partial regs in trap */ \
 154        std     r9,0(r1);               /* store stack frame back link */   \
 155        std     r10,_CCR(r1);           /* store orig CR in stackframe */   \
 156        std     r9,GPR1(r1);            /* store stack frame back link */   \
 157        std     r11,SOFTE(r1);          /* and save it to stackframe */     \
 158        std     r12,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */       \
 159        std     r3,_TRAP(r1);           /* set trap number              */  \
 160        std     r0,RESULT(r1);          /* clear regs->result */            \
 161        ints;
 162
 163/* Variants for the "ints" argument */
 164#define INTS_KEEP
 165#define INTS_DISABLE_SOFT                                                   \
 166        stb     r0,PACASOFTIRQEN(r13);  /* mark interrupts soft-disabled */ \
 167        TRACE_DISABLE_INTS;
 168#define INTS_DISABLE_HARD                                                   \
 169        stb     r0,PACAHARDIRQEN(r13); /* and hard disabled */
 170#define INTS_DISABLE_ALL                                                    \
 171        INTS_DISABLE_SOFT                                                   \
 172        INTS_DISABLE_HARD
 173
 174/* This is called by exceptions that used INTS_KEEP (that is did not clear
 175 * neither soft nor hard IRQ indicators in the PACA. This will restore MSR:EE
 176 * to it's previous value
 177 *
 178 * XXX In the long run, we may want to open-code it in order to separate the
 179 *     load from the wrtee, thus limiting the latency caused by the dependency
 180 *     but at this point, I'll favor code clarity until we have a near to final
 181 *     implementation
 182 */
 183#define INTS_RESTORE_HARD                                                   \
 184        ld      r11,_MSR(r1);                                               \
 185        wrtee   r11;
 186
 187/* XXX FIXME: Restore r14/r15 when necessary */
 188#define BAD_STACK_TRAMPOLINE(n)                                             \
 189exc_##n##_bad_stack:                                                        \
 190        li      r1,(n);                 /* get exception number */          \
 191        sth     r1,PACA_TRAP_SAVE(r13); /* store trap */                    \
 192        b       bad_stack_book3e;       /* bad stack error */
 193
 194/* WARNING: If you change the layout of this stub, make sure you chcek
 195        *   the debug exception handler which handles single stepping
 196        *   into exceptions from userspace, and the MM code in
 197        *   arch/powerpc/mm/tlb_nohash.c which patches the branch here
 198        *   and would need to be updated if that branch is moved
 199        */
 200#define EXCEPTION_STUB(loc, label)                                      \
 201        . = interrupt_base_book3e + loc;                                \
 202        nop;    /* To make debug interrupts happy */                    \
 203        b       exc_##label##_book3e;
 204
 205#define ACK_NONE(r)
 206#define ACK_DEC(r)                                                      \
 207        lis     r,TSR_DIS@h;                                            \
 208        mtspr   SPRN_TSR,r
 209#define ACK_FIT(r)                                                      \
 210        lis     r,TSR_FIS@h;                                            \
 211        mtspr   SPRN_TSR,r
 212
 213/* Used by asynchronous interrupt that may happen in the idle loop.
 214 *
 215 * This check if the thread was in the idle loop, and if yes, returns
 216 * to the caller rather than the PC. This is to avoid a race if
 217 * interrupts happen before the wait instruction.
 218 */
 219#define CHECK_NAPPING()                                                 \
 220        clrrdi  r11,r1,THREAD_SHIFT;                                    \
 221        ld      r10,TI_LOCAL_FLAGS(r11);                                \
 222        andi.   r9,r10,_TLF_NAPPING;                                    \
 223        beq+    1f;                                                     \
 224        ld      r8,_LINK(r1);                                           \
 225        rlwinm  r7,r10,0,~_TLF_NAPPING;                                 \
 226        std     r8,_NIP(r1);                                            \
 227        std     r7,TI_LOCAL_FLAGS(r11);                                 \
 2281:
 229
 230
 231#define MASKABLE_EXCEPTION(trapnum, label, hdlr, ack)                   \
 232        START_EXCEPTION(label);                                         \
 233        NORMAL_EXCEPTION_PROLOG(trapnum, PROLOG_ADDITION_MASKABLE)      \
 234        EXCEPTION_COMMON(trapnum, PACA_EXGEN, INTS_DISABLE_ALL)         \
 235        ack(r8);                                                        \
 236        CHECK_NAPPING();                                                \
 237        addi    r3,r1,STACK_FRAME_OVERHEAD;                             \
 238        bl      hdlr;                                                   \
 239        b       .ret_from_except_lite;
 240
 241/* This value is used to mark exception frames on the stack. */
 242        .section        ".toc","aw"
 243exception_marker:
 244        .tc     ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
 245
 246
 247/*
 248 * And here we have the exception vectors !
 249 */
 250
 251        .text
 252        .balign 0x1000
 253        .globl interrupt_base_book3e
 254interrupt_base_book3e:                                  /* fake trap */
 255        /* Note: If real debug exceptions are supported by the HW, the vector
 256         * below will have to be patched up to point to an appropriate handler
 257         */
 258        EXCEPTION_STUB(0x000, machine_check)            /* 0x0200 */
 259        EXCEPTION_STUB(0x020, critical_input)           /* 0x0580 */
 260        EXCEPTION_STUB(0x040, debug_crit)               /* 0x0d00 */
 261        EXCEPTION_STUB(0x060, data_storage)             /* 0x0300 */
 262        EXCEPTION_STUB(0x080, instruction_storage)      /* 0x0400 */
 263        EXCEPTION_STUB(0x0a0, external_input)           /* 0x0500 */
 264        EXCEPTION_STUB(0x0c0, alignment)                /* 0x0600 */
 265        EXCEPTION_STUB(0x0e0, program)                  /* 0x0700 */
 266        EXCEPTION_STUB(0x100, fp_unavailable)           /* 0x0800 */
 267        EXCEPTION_STUB(0x120, system_call)              /* 0x0c00 */
 268        EXCEPTION_STUB(0x140, ap_unavailable)           /* 0x0f20 */
 269        EXCEPTION_STUB(0x160, decrementer)              /* 0x0900 */
 270        EXCEPTION_STUB(0x180, fixed_interval)           /* 0x0980 */
 271        EXCEPTION_STUB(0x1a0, watchdog)                 /* 0x09f0 */
 272        EXCEPTION_STUB(0x1c0, data_tlb_miss)
 273        EXCEPTION_STUB(0x1e0, instruction_tlb_miss)
 274        EXCEPTION_STUB(0x280, doorbell)
 275        EXCEPTION_STUB(0x2a0, doorbell_crit)
 276
 277        .globl interrupt_end_book3e
 278interrupt_end_book3e:
 279
 280/* Critical Input Interrupt */
 281        START_EXCEPTION(critical_input);
 282        CRIT_EXCEPTION_PROLOG(0x100, PROLOG_ADDITION_NONE)
 283//      EXCEPTION_COMMON(0x100, PACA_EXCRIT, INTS_DISABLE_ALL)
 284//      bl      special_reg_save_crit
 285//      CHECK_NAPPING();
 286//      addi    r3,r1,STACK_FRAME_OVERHEAD
 287//      bl      .critical_exception
 288//      b       ret_from_crit_except
 289        b       .
 290
 291/* Machine Check Interrupt */
 292        START_EXCEPTION(machine_check);
 293        CRIT_EXCEPTION_PROLOG(0x200, PROLOG_ADDITION_NONE)
 294//      EXCEPTION_COMMON(0x200, PACA_EXMC, INTS_DISABLE_ALL)
 295//      bl      special_reg_save_mc
 296//      addi    r3,r1,STACK_FRAME_OVERHEAD
 297//      CHECK_NAPPING();
 298//      bl      .machine_check_exception
 299//      b       ret_from_mc_except
 300        b       .
 301
 302/* Data Storage Interrupt */
 303        START_EXCEPTION(data_storage)
 304        NORMAL_EXCEPTION_PROLOG(0x300, PROLOG_ADDITION_2REGS)
 305        mfspr   r14,SPRN_DEAR
 306        mfspr   r15,SPRN_ESR
 307        EXCEPTION_COMMON(0x300, PACA_EXGEN, INTS_KEEP)
 308        b       storage_fault_common
 309
 310/* Instruction Storage Interrupt */
 311        START_EXCEPTION(instruction_storage);
 312        NORMAL_EXCEPTION_PROLOG(0x400, PROLOG_ADDITION_2REGS)
 313        li      r15,0
 314        mr      r14,r10
 315        EXCEPTION_COMMON(0x400, PACA_EXGEN, INTS_KEEP)
 316        b       storage_fault_common
 317
 318/* External Input Interrupt */
 319        MASKABLE_EXCEPTION(0x500, external_input, .do_IRQ, ACK_NONE)
 320
 321/* Alignment */
 322        START_EXCEPTION(alignment);
 323        NORMAL_EXCEPTION_PROLOG(0x600, PROLOG_ADDITION_2REGS)
 324        mfspr   r14,SPRN_DEAR
 325        mfspr   r15,SPRN_ESR
 326        EXCEPTION_COMMON(0x600, PACA_EXGEN, INTS_KEEP)
 327        b       alignment_more  /* no room, go out of line */
 328
 329/* Program Interrupt */
 330        START_EXCEPTION(program);
 331        NORMAL_EXCEPTION_PROLOG(0x700, PROLOG_ADDITION_1REG)
 332        mfspr   r14,SPRN_ESR
 333        EXCEPTION_COMMON(0x700, PACA_EXGEN, INTS_DISABLE_SOFT)
 334        std     r14,_DSISR(r1)
 335        addi    r3,r1,STACK_FRAME_OVERHEAD
 336        ld      r14,PACA_EXGEN+EX_R14(r13)
 337        bl      .save_nvgprs
 338        INTS_RESTORE_HARD
 339        bl      .program_check_exception
 340        b       .ret_from_except
 341
 342/* Floating Point Unavailable Interrupt */
 343        START_EXCEPTION(fp_unavailable);
 344        NORMAL_EXCEPTION_PROLOG(0x800, PROLOG_ADDITION_NONE)
 345        /* we can probably do a shorter exception entry for that one... */
 346        EXCEPTION_COMMON(0x800, PACA_EXGEN, INTS_KEEP)
 347        bne     1f                      /* if from user, just load it up */
 348        bl      .save_nvgprs
 349        addi    r3,r1,STACK_FRAME_OVERHEAD
 350        INTS_RESTORE_HARD
 351        bl      .kernel_fp_unavailable_exception
 352        BUG_OPCODE
 3531:      ld      r12,_MSR(r1)
 354        bl      .load_up_fpu
 355        b       fast_exception_return
 356
 357/* Decrementer Interrupt */
 358        MASKABLE_EXCEPTION(0x900, decrementer, .timer_interrupt, ACK_DEC)
 359
 360/* Fixed Interval Timer Interrupt */
 361        MASKABLE_EXCEPTION(0x980, fixed_interval, .unknown_exception, ACK_FIT)
 362
 363/* Watchdog Timer Interrupt */
 364        START_EXCEPTION(watchdog);
 365        CRIT_EXCEPTION_PROLOG(0x9f0, PROLOG_ADDITION_NONE)
 366//      EXCEPTION_COMMON(0x9f0, PACA_EXCRIT, INTS_DISABLE_ALL)
 367//      bl      special_reg_save_crit
 368//      CHECK_NAPPING();
 369//      addi    r3,r1,STACK_FRAME_OVERHEAD
 370//      bl      .unknown_exception
 371//      b       ret_from_crit_except
 372        b       .
 373
 374/* System Call Interrupt */
 375        START_EXCEPTION(system_call)
 376        mr      r9,r13                  /* keep a copy of userland r13 */
 377        mfspr   r11,SPRN_SRR0           /* get return address */
 378        mfspr   r12,SPRN_SRR1           /* get previous MSR */
 379        mfspr   r13,SPRN_SPRG_PACA      /* get our PACA */
 380        b       system_call_common
 381
 382/* Auxillary Processor Unavailable Interrupt */
 383        START_EXCEPTION(ap_unavailable);
 384        NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE)
 385        EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_KEEP)
 386        addi    r3,r1,STACK_FRAME_OVERHEAD
 387        bl      .save_nvgprs
 388        INTS_RESTORE_HARD
 389        bl      .unknown_exception
 390        b       .ret_from_except
 391
 392/* Debug exception as a critical interrupt*/
 393        START_EXCEPTION(debug_crit);
 394        CRIT_EXCEPTION_PROLOG(0xd00, PROLOG_ADDITION_2REGS)
 395
 396        /*
 397         * If there is a single step or branch-taken exception in an
 398         * exception entry sequence, it was probably meant to apply to
 399         * the code where the exception occurred (since exception entry
 400         * doesn't turn off DE automatically).  We simulate the effect
 401         * of turning off DE on entry to an exception handler by turning
 402         * off DE in the CSRR1 value and clearing the debug status.
 403         */
 404
 405        mfspr   r14,SPRN_DBSR           /* check single-step/branch taken */
 406        andis.  r15,r14,DBSR_IC@h
 407        beq+    1f
 408
 409        LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
 410        LOAD_REG_IMMEDIATE(r15,interrupt_end_book3e)
 411        cmpld   cr0,r10,r14
 412        cmpld   cr1,r10,r15
 413        blt+    cr0,1f
 414        bge+    cr1,1f
 415
 416        /* here it looks like we got an inappropriate debug exception. */
 417        lis     r14,DBSR_IC@h           /* clear the IC event */
 418        rlwinm  r11,r11,0,~MSR_DE       /* clear DE in the CSRR1 value */
 419        mtspr   SPRN_DBSR,r14
 420        mtspr   SPRN_CSRR1,r11
 421        lwz     r10,PACA_EXCRIT+EX_CR(r13)      /* restore registers */
 422        ld      r1,PACA_EXCRIT+EX_R1(r13)
 423        ld      r14,PACA_EXCRIT+EX_R14(r13)
 424        ld      r15,PACA_EXCRIT+EX_R15(r13)
 425        mtcr    r10
 426        ld      r10,PACA_EXCRIT+EX_R10(r13)     /* restore registers */
 427        ld      r11,PACA_EXCRIT+EX_R11(r13)
 428        mfspr   r13,SPRN_SPRG_CRIT_SCRATCH
 429        rfci
 430
 431        /* Normal debug exception */
 432        /* XXX We only handle coming from userspace for now since we can't
 433         *     quite save properly an interrupted kernel state yet
 434         */
 4351:      andi.   r14,r11,MSR_PR;         /* check for userspace again */
 436        beq     kernel_dbg_exc;         /* if from kernel mode */
 437
 438        /* Now we mash up things to make it look like we are coming on a
 439         * normal exception
 440         */
 441        mfspr   r15,SPRN_SPRG_CRIT_SCRATCH
 442        mtspr   SPRN_SPRG_GEN_SCRATCH,r15
 443        mfspr   r14,SPRN_DBSR
 444        EXCEPTION_COMMON(0xd00, PACA_EXCRIT, INTS_DISABLE_ALL)
 445        std     r14,_DSISR(r1)
 446        addi    r3,r1,STACK_FRAME_OVERHEAD
 447        mr      r4,r14
 448        ld      r14,PACA_EXCRIT+EX_R14(r13)
 449        ld      r15,PACA_EXCRIT+EX_R15(r13)
 450        bl      .save_nvgprs
 451        bl      .DebugException
 452        b       .ret_from_except
 453
 454kernel_dbg_exc:
 455        b       .       /* NYI */
 456
 457/* Doorbell interrupt */
 458        MASKABLE_EXCEPTION(0x2070, doorbell, .doorbell_exception, ACK_NONE)
 459
 460/* Doorbell critical Interrupt */
 461        START_EXCEPTION(doorbell_crit);
 462        CRIT_EXCEPTION_PROLOG(0x2080, PROLOG_ADDITION_NONE)
 463//      EXCEPTION_COMMON(0x2080, PACA_EXCRIT, INTS_DISABLE_ALL)
 464//      bl      special_reg_save_crit
 465//      CHECK_NAPPING();
 466//      addi    r3,r1,STACK_FRAME_OVERHEAD
 467//      bl      .doorbell_critical_exception
 468//      b       ret_from_crit_except
 469        b       .
 470
 471
 472/*
 473 * An interrupt came in while soft-disabled; clear EE in SRR1,
 474 * clear paca->hard_enabled and return.
 475 */
 476masked_interrupt_book3e:
 477        mtcr    r10
 478        stb     r11,PACAHARDIRQEN(r13)
 479        mfspr   r10,SPRN_SRR1
 480        rldicl  r11,r10,48,1            /* clear MSR_EE */
 481        rotldi  r10,r11,16
 482        mtspr   SPRN_SRR1,r10
 483        ld      r10,PACA_EXGEN+EX_R10(r13);     /* restore registers */
 484        ld      r11,PACA_EXGEN+EX_R11(r13);
 485        mfspr   r13,SPRN_SPRG_GEN_SCRATCH;
 486        rfi
 487        b       .
 488
 489/*
 490 * This is called from 0x300 and 0x400 handlers after the prologs with
 491 * r14 and r15 containing the fault address and error code, with the
 492 * original values stashed away in the PACA
 493 */
 494storage_fault_common:
 495        std     r14,_DAR(r1)
 496        std     r15,_DSISR(r1)
 497        addi    r3,r1,STACK_FRAME_OVERHEAD
 498        mr      r4,r14
 499        mr      r5,r15
 500        ld      r14,PACA_EXGEN+EX_R14(r13)
 501        ld      r15,PACA_EXGEN+EX_R15(r13)
 502        INTS_RESTORE_HARD
 503        bl      .do_page_fault
 504        cmpdi   r3,0
 505        bne-    1f
 506        b       .ret_from_except_lite
 5071:      bl      .save_nvgprs
 508        mr      r5,r3
 509        addi    r3,r1,STACK_FRAME_OVERHEAD
 510        ld      r4,_DAR(r1)
 511        bl      .bad_page_fault
 512        b       .ret_from_except
 513
 514/*
 515 * Alignment exception doesn't fit entirely in the 0x100 bytes so it
 516 * continues here.
 517 */
 518alignment_more:
 519        std     r14,_DAR(r1)
 520        std     r15,_DSISR(r1)
 521        addi    r3,r1,STACK_FRAME_OVERHEAD
 522        ld      r14,PACA_EXGEN+EX_R14(r13)
 523        ld      r15,PACA_EXGEN+EX_R15(r13)
 524        bl      .save_nvgprs
 525        INTS_RESTORE_HARD
 526        bl      .alignment_exception
 527        b       .ret_from_except
 528
 529/*
 530 * We branch here from entry_64.S for the last stage of the exception
 531 * return code path. MSR:EE is expected to be off at that point
 532 */
 533_GLOBAL(exception_return_book3e)
 534        b       1f
 535
 536/* This is the return from load_up_fpu fast path which could do with
 537 * less GPR restores in fact, but for now we have a single return path
 538 */
 539        .globl fast_exception_return
 540fast_exception_return:
 541        wrteei  0
 5421:      mr      r0,r13
 543        ld      r10,_MSR(r1)
 544        REST_4GPRS(2, r1)
 545        andi.   r6,r10,MSR_PR
 546        REST_2GPRS(6, r1)
 547        beq     1f
 548        ACCOUNT_CPU_USER_EXIT(r10, r11)
 549        ld      r0,GPR13(r1)
 550
 5511:      stdcx.  r0,0,r1         /* to clear the reservation */
 552
 553        ld      r8,_CCR(r1)
 554        ld      r9,_LINK(r1)
 555        ld      r10,_CTR(r1)
 556        ld      r11,_XER(r1)
 557        mtcr    r8
 558        mtlr    r9
 559        mtctr   r10
 560        mtxer   r11
 561        REST_2GPRS(8, r1)
 562        ld      r10,GPR10(r1)
 563        ld      r11,GPR11(r1)
 564        ld      r12,GPR12(r1)
 565        mtspr   SPRN_SPRG_GEN_SCRATCH,r0
 566
 567        std     r10,PACA_EXGEN+EX_R10(r13);
 568        std     r11,PACA_EXGEN+EX_R11(r13);
 569        ld      r10,_NIP(r1)
 570        ld      r11,_MSR(r1)
 571        ld      r0,GPR0(r1)
 572        ld      r1,GPR1(r1)
 573        mtspr   SPRN_SRR0,r10
 574        mtspr   SPRN_SRR1,r11
 575        ld      r10,PACA_EXGEN+EX_R10(r13)
 576        ld      r11,PACA_EXGEN+EX_R11(r13)
 577        mfspr   r13,SPRN_SPRG_GEN_SCRATCH
 578        rfi
 579
 580/*
 581 * Trampolines used when spotting a bad kernel stack pointer in
 582 * the exception entry code.
 583 *
 584 * TODO: move some bits like SRR0 read to trampoline, pass PACA
 585 * index around, etc... to handle crit & mcheck
 586 */
 587BAD_STACK_TRAMPOLINE(0x000)
 588BAD_STACK_TRAMPOLINE(0x100)
 589BAD_STACK_TRAMPOLINE(0x200)
 590BAD_STACK_TRAMPOLINE(0x300)
 591BAD_STACK_TRAMPOLINE(0x400)
 592BAD_STACK_TRAMPOLINE(0x500)
 593BAD_STACK_TRAMPOLINE(0x600)
 594BAD_STACK_TRAMPOLINE(0x700)
 595BAD_STACK_TRAMPOLINE(0x800)
 596BAD_STACK_TRAMPOLINE(0x900)
 597BAD_STACK_TRAMPOLINE(0x980)
 598BAD_STACK_TRAMPOLINE(0x9f0)
 599BAD_STACK_TRAMPOLINE(0xa00)
 600BAD_STACK_TRAMPOLINE(0xb00)
 601BAD_STACK_TRAMPOLINE(0xc00)
 602BAD_STACK_TRAMPOLINE(0xd00)
 603BAD_STACK_TRAMPOLINE(0xe00)
 604BAD_STACK_TRAMPOLINE(0xf00)
 605BAD_STACK_TRAMPOLINE(0xf20)
 606BAD_STACK_TRAMPOLINE(0x2070)
 607BAD_STACK_TRAMPOLINE(0x2080)
 608
 609        .globl  bad_stack_book3e
 610bad_stack_book3e:
 611        /* XXX: Needs to make SPRN_SPRG_GEN depend on exception type */
 612        mfspr   r10,SPRN_SRR0;            /* read SRR0 before touching stack */
 613        ld      r1,PACAEMERGSP(r13)
 614        subi    r1,r1,64+INT_FRAME_SIZE
 615        std     r10,_NIP(r1)
 616        std     r11,_MSR(r1)
 617        ld      r10,PACA_EXGEN+EX_R1(r13) /* FIXME for crit & mcheck */
 618        lwz     r11,PACA_EXGEN+EX_CR(r13) /* FIXME for crit & mcheck */
 619        std     r10,GPR1(r1)
 620        std     r11,_CCR(r1)
 621        mfspr   r10,SPRN_DEAR
 622        mfspr   r11,SPRN_ESR
 623        std     r10,_DAR(r1)
 624        std     r11,_DSISR(r1)
 625        std     r0,GPR0(r1);            /* save r0 in stackframe */         \
 626        std     r2,GPR2(r1);            /* save r2 in stackframe */         \
 627        SAVE_4GPRS(3, r1);              /* save r3 - r6 in stackframe */    \
 628        SAVE_2GPRS(7, r1);              /* save r7, r8 in stackframe */     \
 629        std     r9,GPR9(r1);            /* save r9 in stackframe */         \
 630        ld      r3,PACA_EXGEN+EX_R10(r13);/* get back r10 */                \
 631        ld      r4,PACA_EXGEN+EX_R11(r13);/* get back r11 */                \
 632        mfspr   r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 XXX can be wrong */ \
 633        std     r3,GPR10(r1);           /* save r10 to stackframe */        \
 634        std     r4,GPR11(r1);           /* save r11 to stackframe */        \
 635        std     r12,GPR12(r1);          /* save r12 in stackframe */        \
 636        std     r5,GPR13(r1);           /* save it to stackframe */         \
 637        mflr    r10
 638        mfctr   r11
 639        mfxer   r12
 640        std     r10,_LINK(r1)
 641        std     r11,_CTR(r1)
 642        std     r12,_XER(r1)
 643        SAVE_10GPRS(14,r1)
 644        SAVE_8GPRS(24,r1)
 645        lhz     r12,PACA_TRAP_SAVE(r13)
 646        std     r12,_TRAP(r1)
 647        addi    r11,r1,INT_FRAME_SIZE
 648        std     r11,0(r1)
 649        li      r12,0
 650        std     r12,0(r11)
 651        ld      r2,PACATOC(r13)
 6521:      addi    r3,r1,STACK_FRAME_OVERHEAD
 653        bl      .kernel_bad_stack
 654        b       1b
 655
 656/*
 657 * Setup the initial TLB for a core. This current implementation
 658 * assume that whatever we are running off will not conflict with
 659 * the new mapping at PAGE_OFFSET.
 660 */
 661_GLOBAL(initial_tlb_book3e)
 662
 663        /* Look for the first TLB with IPROT set */
 664        mfspr   r4,SPRN_TLB0CFG
 665        andi.   r3,r4,TLBnCFG_IPROT
 666        lis     r3,MAS0_TLBSEL(0)@h
 667        bne     found_iprot
 668
 669        mfspr   r4,SPRN_TLB1CFG
 670        andi.   r3,r4,TLBnCFG_IPROT
 671        lis     r3,MAS0_TLBSEL(1)@h
 672        bne     found_iprot
 673
 674        mfspr   r4,SPRN_TLB2CFG
 675        andi.   r3,r4,TLBnCFG_IPROT
 676        lis     r3,MAS0_TLBSEL(2)@h
 677        bne     found_iprot
 678
 679        lis     r3,MAS0_TLBSEL(3)@h
 680        mfspr   r4,SPRN_TLB3CFG
 681        /* fall through */
 682
 683found_iprot:
 684        andi.   r5,r4,TLBnCFG_HES
 685        bne     have_hes
 686
 687        mflr    r8                              /* save LR */
 688/* 1. Find the index of the entry we're executing in
 689 *
 690 * r3 = MAS0_TLBSEL (for the iprot array)
 691 * r4 = SPRN_TLBnCFG
 692 */
 693        bl      invstr                          /* Find our address */
 694invstr: mflr    r6                              /* Make it accessible */
 695        mfmsr   r7
 696        rlwinm  r5,r7,27,31,31                  /* extract MSR[IS] */
 697        mfspr   r7,SPRN_PID
 698        slwi    r7,r7,16
 699        or      r7,r7,r5
 700        mtspr   SPRN_MAS6,r7
 701        tlbsx   0,r6                            /* search MSR[IS], SPID=PID */
 702
 703        mfspr   r3,SPRN_MAS0
 704        rlwinm  r5,r3,16,20,31                  /* Extract MAS0(Entry) */
 705
 706        mfspr   r7,SPRN_MAS1                    /* Insure IPROT set */
 707        oris    r7,r7,MAS1_IPROT@h
 708        mtspr   SPRN_MAS1,r7
 709        tlbwe
 710
 711/* 2. Invalidate all entries except the entry we're executing in
 712 *
 713 * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in
 714 * r4 = SPRN_TLBnCFG
 715 * r5 = ESEL of entry we are running in
 716 */
 717        andi.   r4,r4,TLBnCFG_N_ENTRY           /* Extract # entries */
 718        li      r6,0                            /* Set Entry counter to 0 */
 7191:      mr      r7,r3                           /* Set MAS0(TLBSEL) */
 720        rlwimi  r7,r6,16,4,15                   /* Setup MAS0 = TLBSEL | ESEL(r6) */
 721        mtspr   SPRN_MAS0,r7
 722        tlbre
 723        mfspr   r7,SPRN_MAS1
 724        rlwinm  r7,r7,0,2,31                    /* Clear MAS1 Valid and IPROT */
 725        cmpw    r5,r6
 726        beq     skpinv                          /* Dont update the current execution TLB */
 727        mtspr   SPRN_MAS1,r7
 728        tlbwe
 729        isync
 730skpinv: addi    r6,r6,1                         /* Increment */
 731        cmpw    r6,r4                           /* Are we done? */
 732        bne     1b                              /* If not, repeat */
 733
 734        /* Invalidate all TLBs */
 735        PPC_TLBILX_ALL(0,0)
 736        sync
 737        isync
 738
 739/* 3. Setup a temp mapping and jump to it
 740 *
 741 * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in
 742 * r5 = ESEL of entry we are running in
 743 */
 744        andi.   r7,r5,0x1       /* Find an entry not used and is non-zero */
 745        addi    r7,r7,0x1
 746        mr      r4,r3           /* Set MAS0(TLBSEL) = 1 */
 747        mtspr   SPRN_MAS0,r4
 748        tlbre
 749
 750        rlwimi  r4,r7,16,4,15   /* Setup MAS0 = TLBSEL | ESEL(r7) */
 751        mtspr   SPRN_MAS0,r4
 752
 753        mfspr   r7,SPRN_MAS1
 754        xori    r6,r7,MAS1_TS           /* Setup TMP mapping in the other Address space */
 755        mtspr   SPRN_MAS1,r6
 756
 757        tlbwe
 758
 759        mfmsr   r6
 760        xori    r6,r6,MSR_IS
 761        mtspr   SPRN_SRR1,r6
 762        bl      1f              /* Find our address */
 7631:      mflr    r6
 764        addi    r6,r6,(2f - 1b)
 765        mtspr   SPRN_SRR0,r6
 766        rfi
 7672:
 768
 769/* 4. Clear out PIDs & Search info
 770 *
 771 * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
 772 * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
 773 * r5 = MAS3
 774 */
 775        li      r6,0
 776        mtspr   SPRN_MAS6,r6
 777        mtspr   SPRN_PID,r6
 778
 779/* 5. Invalidate mapping we started in
 780 *
 781 * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
 782 * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
 783 * r5 = MAS3
 784 */
 785        mtspr   SPRN_MAS0,r3
 786        tlbre
 787        mfspr   r6,SPRN_MAS1
 788        rlwinm  r6,r6,0,2,0     /* clear IPROT */
 789        mtspr   SPRN_MAS1,r6
 790        tlbwe
 791
 792        /* Invalidate TLB1 */
 793        PPC_TLBILX_ALL(0,0)
 794        sync
 795        isync
 796
 797/* The mapping only needs to be cache-coherent on SMP */
 798#ifdef CONFIG_SMP
 799#define M_IF_SMP        MAS2_M
 800#else
 801#define M_IF_SMP        0
 802#endif
 803
 804/* 6. Setup KERNELBASE mapping in TLB[0]
 805 *
 806 * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
 807 * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
 808 * r5 = MAS3
 809 */
 810        rlwinm  r3,r3,0,16,3    /* clear ESEL */
 811        mtspr   SPRN_MAS0,r3
 812        lis     r6,(MAS1_VALID|MAS1_IPROT)@h
 813        ori     r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
 814        mtspr   SPRN_MAS1,r6
 815
 816        LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | M_IF_SMP)
 817        mtspr   SPRN_MAS2,r6
 818
 819        rlwinm  r5,r5,0,0,25
 820        ori     r5,r5,MAS3_SR | MAS3_SW | MAS3_SX
 821        mtspr   SPRN_MAS3,r5
 822        li      r5,-1
 823        rlwinm  r5,r5,0,0,25
 824
 825        tlbwe
 826
 827/* 7. Jump to KERNELBASE mapping
 828 *
 829 * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
 830 */
 831        /* Now we branch the new virtual address mapped by this entry */
 832        LOAD_REG_IMMEDIATE(r6,2f)
 833        lis     r7,MSR_KERNEL@h
 834        ori     r7,r7,MSR_KERNEL@l
 835        mtspr   SPRN_SRR0,r6
 836        mtspr   SPRN_SRR1,r7
 837        rfi                             /* start execution out of TLB1[0] entry */
 8382:
 839
 840/* 8. Clear out the temp mapping
 841 *
 842 * r4 = MAS0 w/TLBSEL & ESEL for the entry we are running in
 843 */
 844        mtspr   SPRN_MAS0,r4
 845        tlbre
 846        mfspr   r5,SPRN_MAS1
 847        rlwinm  r5,r5,0,2,0     /* clear IPROT */
 848        mtspr   SPRN_MAS1,r5
 849        tlbwe
 850
 851        /* Invalidate TLB1 */
 852        PPC_TLBILX_ALL(0,0)
 853        sync
 854        isync
 855
 856        /* We translate LR and return */
 857        tovirt(r8,r8)
 858        mtlr    r8
 859        blr
 860
 861have_hes:
 862        /* Setup MAS 0,1,2,3 and 7 for tlbwe of a 1G entry that maps the
 863         * kernel linear mapping. We also set MAS8 once for all here though
 864         * that will have to be made dependent on whether we are running under
 865         * a hypervisor I suppose.
 866         */
 867        ori     r3,r3,MAS0_HES | MAS0_WQ_ALLWAYS
 868        mtspr   SPRN_MAS0,r3
 869        lis     r3,(MAS1_VALID | MAS1_IPROT)@h
 870        ori     r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT
 871        mtspr   SPRN_MAS1,r3
 872        LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET | MAS2_M)
 873        mtspr   SPRN_MAS2,r3
 874        li      r3,MAS3_SR | MAS3_SW | MAS3_SX
 875        mtspr   SPRN_MAS7_MAS3,r3
 876        li      r3,0
 877        mtspr   SPRN_MAS8,r3
 878
 879        /* Write the TLB entry */
 880        tlbwe
 881
 882        /* Now we branch the new virtual address mapped by this entry */
 883        LOAD_REG_IMMEDIATE(r3,1f)
 884        mtctr   r3
 885        bctr
 886
 8871:      /* We are now running at PAGE_OFFSET, clean the TLB of everything
 888         * else (XXX we should scan for bolted crap from the firmware too)
 889         */
 890        PPC_TLBILX(0,0,0)
 891        sync
 892        isync
 893
 894        /* We translate LR and return */
 895        mflr    r3
 896        tovirt(r3,r3)
 897        mtlr    r3
 898        blr
 899
 900/*
 901 * Main entry (boot CPU, thread 0)
 902 *
 903 * We enter here from head_64.S, possibly after the prom_init trampoline
 904 * with r3 and r4 already saved to r31 and 30 respectively and in 64 bits
 905 * mode. Anything else is as it was left by the bootloader
 906 *
 907 * Initial requirements of this port:
 908 *
 909 * - Kernel loaded at 0 physical
 910 * - A good lump of memory mapped 0:0 by UTLB entry 0
 911 * - MSR:IS & MSR:DS set to 0
 912 *
 913 * Note that some of the above requirements will be relaxed in the future
 914 * as the kernel becomes smarter at dealing with different initial conditions
 915 * but for now you have to be careful
 916 */
 917_GLOBAL(start_initialization_book3e)
 918        mflr    r28
 919
 920        /* First, we need to setup some initial TLBs to map the kernel
 921         * text, data and bss at PAGE_OFFSET. We don't have a real mode
 922         * and always use AS 0, so we just set it up to match our link
 923         * address and never use 0 based addresses.
 924         */
 925        bl      .initial_tlb_book3e
 926
 927        /* Init global core bits */
 928        bl      .init_core_book3e
 929
 930        /* Init per-thread bits */
 931        bl      .init_thread_book3e
 932
 933        /* Return to common init code */
 934        tovirt(r28,r28)
 935        mtlr    r28
 936        blr
 937
 938
 939/*
 940 * Secondary core/processor entry
 941 *
 942 * This is entered for thread 0 of a secondary core, all other threads
 943 * are expected to be stopped. It's similar to start_initialization_book3e
 944 * except that it's generally entered from the holding loop in head_64.S
 945 * after CPUs have been gathered by Open Firmware.
 946 *
 947 * We assume we are in 32 bits mode running with whatever TLB entry was
 948 * set for us by the firmware or POR engine.
 949 */
 950_GLOBAL(book3e_secondary_core_init_tlb_set)
 951        li      r4,1
 952        b       .generic_secondary_smp_init
 953
 954_GLOBAL(book3e_secondary_core_init)
 955        mflr    r28
 956
 957        /* Do we need to setup initial TLB entry ? */
 958        cmplwi  r4,0
 959        bne     2f
 960
 961        /* Setup TLB for this core */
 962        bl      .initial_tlb_book3e
 963
 964        /* We can return from the above running at a different
 965         * address, so recalculate r2 (TOC)
 966         */
 967        bl      .relative_toc
 968
 969        /* Init global core bits */
 9702:      bl      .init_core_book3e
 971
 972        /* Init per-thread bits */
 9733:      bl      .init_thread_book3e
 974
 975        /* Return to common init code at proper virtual address.
 976         *
 977         * Due to various previous assumptions, we know we entered this
 978         * function at either the final PAGE_OFFSET mapping or using a
 979         * 1:1 mapping at 0, so we don't bother doing a complicated check
 980         * here, we just ensure the return address has the right top bits.
 981         *
 982         * Note that if we ever want to be smarter about where we can be
 983         * started from, we have to be careful that by the time we reach
 984         * the code below we may already be running at a different location
 985         * than the one we were called from since initial_tlb_book3e can
 986         * have moved us already.
 987         */
 988        cmpdi   cr0,r28,0
 989        blt     1f
 990        lis     r3,PAGE_OFFSET@highest
 991        sldi    r3,r3,32
 992        or      r28,r28,r3
 9931:      mtlr    r28
 994        blr
 995
 996_GLOBAL(book3e_secondary_thread_init)
 997        mflr    r28
 998        b       3b
 999
1000_STATIC(init_core_book3e)
1001        /* Establish the interrupt vector base */
1002        LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e)
1003        mtspr   SPRN_IVPR,r3
1004        sync
1005        blr
1006
1007_STATIC(init_thread_book3e)
1008        lis     r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h
1009        mtspr   SPRN_EPCR,r3
1010
1011        /* Make sure interrupts are off */
1012        wrteei  0
1013
1014        /* disable all timers and clear out status */
1015        li      r3,0
1016        mtspr   SPRN_TCR,r3
1017        mfspr   r3,SPRN_TSR
1018        mtspr   SPRN_TSR,r3
1019
1020        blr
1021
1022_GLOBAL(__setup_base_ivors)
1023        SET_IVOR(0, 0x020) /* Critical Input */
1024        SET_IVOR(1, 0x000) /* Machine Check */
1025        SET_IVOR(2, 0x060) /* Data Storage */ 
1026        SET_IVOR(3, 0x080) /* Instruction Storage */
1027        SET_IVOR(4, 0x0a0) /* External Input */ 
1028        SET_IVOR(5, 0x0c0) /* Alignment */ 
1029        SET_IVOR(6, 0x0e0) /* Program */ 
1030        SET_IVOR(7, 0x100) /* FP Unavailable */ 
1031        SET_IVOR(8, 0x120) /* System Call */ 
1032        SET_IVOR(9, 0x140) /* Auxiliary Processor Unavailable */ 
1033        SET_IVOR(10, 0x160) /* Decrementer */ 
1034        SET_IVOR(11, 0x180) /* Fixed Interval Timer */ 
1035        SET_IVOR(12, 0x1a0) /* Watchdog Timer */ 
1036        SET_IVOR(13, 0x1c0) /* Data TLB Error */ 
1037        SET_IVOR(14, 0x1e0) /* Instruction TLB Error */
1038        SET_IVOR(15, 0x040) /* Debug */
1039
1040        sync
1041
1042        blr
1043