linux/arch/powerpc/kernel/head_book3s_32.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 *  PowerPC version
   4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   5 *
   6 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
   7 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
   8 *  Adapted for Power Macintosh by Paul Mackerras.
   9 *  Low-level exception handlers and MMU support
  10 *  rewritten by Paul Mackerras.
  11 *    Copyright (C) 1996 Paul Mackerras.
  12 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
  13 *
  14 *  This file contains the low-level support and setup for the
  15 *  PowerPC platform, including trap and interrupt dispatch.
  16 *  (The PPC 8xx embedded CPUs use head_8xx.S instead.)
  17 */
  18
  19#include <linux/init.h>
  20#include <linux/pgtable.h>
  21#include <asm/reg.h>
  22#include <asm/page.h>
  23#include <asm/mmu.h>
  24#include <asm/cputable.h>
  25#include <asm/cache.h>
  26#include <asm/thread_info.h>
  27#include <asm/ppc_asm.h>
  28#include <asm/asm-offsets.h>
  29#include <asm/ptrace.h>
  30#include <asm/bug.h>
  31#include <asm/kvm_book3s_asm.h>
  32#include <asm/export.h>
  33#include <asm/feature-fixups.h>
  34#include <asm/interrupt.h>
  35
  36#include "head_32.h"
  37
  38#define LOAD_BAT(n, reg, RA, RB)        \
  39        /* see the comment for clear_bats() -- Cort */ \
  40        li      RA,0;                   \
  41        mtspr   SPRN_IBAT##n##U,RA;     \
  42        mtspr   SPRN_DBAT##n##U,RA;     \
  43        lwz     RA,(n*16)+0(reg);       \
  44        lwz     RB,(n*16)+4(reg);       \
  45        mtspr   SPRN_IBAT##n##U,RA;     \
  46        mtspr   SPRN_IBAT##n##L,RB;     \
  47        lwz     RA,(n*16)+8(reg);       \
  48        lwz     RB,(n*16)+12(reg);      \
  49        mtspr   SPRN_DBAT##n##U,RA;     \
  50        mtspr   SPRN_DBAT##n##L,RB
  51
  52        __HEAD
  53        .stabs  "arch/powerpc/kernel/",N_SO,0,0,0f
  54        .stabs  "head_book3s_32.S",N_SO,0,0,0f
  550:
  56_ENTRY(_stext);
  57
  58/*
  59 * _start is defined this way because the XCOFF loader in the OpenFirmware
  60 * on the powermac expects the entry point to be a procedure descriptor.
  61 */
  62_ENTRY(_start);
  63        /*
  64         * These are here for legacy reasons, the kernel used to
  65         * need to look like a coff function entry for the pmac
  66         * but we're always started by some kind of bootloader now.
  67         *  -- Cort
  68         */
  69        nop     /* used by __secondary_hold on prep (mtx) and chrp smp */
  70        nop     /* used by __secondary_hold on prep (mtx) and chrp smp */
  71        nop
  72
  73/* PMAC
  74 * Enter here with the kernel text, data and bss loaded starting at
  75 * 0, running with virtual == physical mapping.
  76 * r5 points to the prom entry point (the client interface handler
  77 * address).  Address translation is turned on, with the prom
  78 * managing the hash table.  Interrupts are disabled.  The stack
  79 * pointer (r1) points to just below the end of the half-meg region
  80 * from 0x380000 - 0x400000, which is mapped in already.
  81 *
  82 * If we are booted from MacOS via BootX, we enter with the kernel
  83 * image loaded somewhere, and the following values in registers:
  84 *  r3: 'BooX' (0x426f6f58)
  85 *  r4: virtual address of boot_infos_t
  86 *  r5: 0
  87 *
  88 * PREP
  89 * This is jumped to on prep systems right after the kernel is relocated
  90 * to its proper place in memory by the boot loader.  The expected layout
  91 * of the regs is:
  92 *   r3: ptr to residual data
  93 *   r4: initrd_start or if no initrd then 0
  94 *   r5: initrd_end - unused if r4 is 0
  95 *   r6: Start of command line string
  96 *   r7: End of command line string
  97 *
  98 * This just gets a minimal mmu environment setup so we can call
  99 * start_here() to do the real work.
 100 * -- Cort
 101 */
 102
 103        .globl  __start
 104__start:
 105/*
 106 * We have to do any OF calls before we map ourselves to KERNELBASE,
 107 * because OF may have I/O devices mapped into that area
 108 * (particularly on CHRP).
 109 */
 110        cmpwi   0,r5,0
 111        beq     1f
 112
 113#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
 114        /* find out where we are now */
 115        bcl     20,31,$+4
 1160:      mflr    r8                      /* r8 = runtime addr here */
 117        addis   r8,r8,(_stext - 0b)@ha
 118        addi    r8,r8,(_stext - 0b)@l   /* current runtime base addr */
 119        bl      prom_init
 120#endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */
 121
 122        /* We never return. We also hit that trap if trying to boot
 123         * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
 124        trap
 125
 126/*
 127 * Check for BootX signature when supporting PowerMac and branch to
 128 * appropriate trampoline if it's present
 129 */
 130#ifdef CONFIG_PPC_PMAC
 1311:      lis     r31,0x426f
 132        ori     r31,r31,0x6f58
 133        cmpw    0,r3,r31
 134        bne     1f
 135        bl      bootx_init
 136        trap
 137#endif /* CONFIG_PPC_PMAC */
 138
 1391:      mr      r31,r3                  /* save device tree ptr */
 140        li      r24,0                   /* cpu # */
 141
 142/*
 143 * early_init() does the early machine identification and does
 144 * the necessary low-level setup and clears the BSS
 145 *  -- Cort <cort@fsmlabs.com>
 146 */
 147        bl      early_init
 148
 149/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
 150 * the physical address we are running at, returned by early_init()
 151 */
 152        bl      mmu_off
 153__after_mmu_off:
 154        bl      clear_bats
 155        bl      flush_tlbs
 156
 157        bl      initial_bats
 158        bl      load_segment_registers
 159        bl      reloc_offset
 160        bl      early_hash_table
 161#if defined(CONFIG_BOOTX_TEXT)
 162        bl      setup_disp_bat
 163#endif
 164#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
 165        bl      setup_cpm_bat
 166#endif
 167#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
 168        bl      setup_usbgecko_bat
 169#endif
 170
 171/*
 172 * Call setup_cpu for CPU 0 and initialize 6xx Idle
 173 */
 174        bl      reloc_offset
 175        li      r24,0                   /* cpu# */
 176        bl      call_setup_cpu          /* Call setup_cpu for this CPU */
 177        bl      reloc_offset
 178        bl      init_idle_6xx
 179
 180
 181/*
 182 * We need to run with _start at physical address 0.
 183 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
 184 * the exception vectors at 0 (and therefore this copy
 185 * overwrites OF's exception vectors with our own).
 186 * The MMU is off at this point.
 187 */
 188        bl      reloc_offset
 189        mr      r26,r3
 190        addis   r4,r3,KERNELBASE@h      /* current address of _start */
 191        lis     r5,PHYSICAL_START@h
 192        cmplw   0,r4,r5                 /* already running at PHYSICAL_START? */
 193        bne     relocate_kernel
 194/*
 195 * we now have the 1st 16M of ram mapped with the bats.
 196 * prep needs the mmu to be turned on here, but pmac already has it on.
 197 * this shouldn't bother the pmac since it just gets turned on again
 198 * as we jump to our code at KERNELBASE. -- Cort
 199 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
 200 * off, and in other cases, we now turn it off before changing BATs above.
 201 */
 202turn_on_mmu:
 203        mfmsr   r0
 204        ori     r0,r0,MSR_DR|MSR_IR|MSR_RI
 205        mtspr   SPRN_SRR1,r0
 206        lis     r0,start_here@h
 207        ori     r0,r0,start_here@l
 208        mtspr   SPRN_SRR0,r0
 209        rfi                             /* enables MMU */
 210
 211/*
 212 * We need __secondary_hold as a place to hold the other cpus on
 213 * an SMP machine, even when we are running a UP kernel.
 214 */
 215        . = 0xc0                        /* for prep bootloader */
 216        li      r3,1                    /* MTX only has 1 cpu */
 217        .globl  __secondary_hold
 218__secondary_hold:
 219        /* tell the master we're here */
 220        stw     r3,__secondary_hold_acknowledge@l(0)
 221#ifdef CONFIG_SMP
 222100:    lwz     r4,0(0)
 223        /* wait until we're told to start */
 224        cmpw    0,r4,r3
 225        bne     100b
 226        /* our cpu # was at addr 0 - go */
 227        mr      r24,r3                  /* cpu # */
 228        b       __secondary_start
 229#else
 230        b       .
 231#endif /* CONFIG_SMP */
 232
 233        .globl  __secondary_hold_spinloop
 234__secondary_hold_spinloop:
 235        .long   0
 236        .globl  __secondary_hold_acknowledge
 237__secondary_hold_acknowledge:
 238        .long   -1
 239
 240/* System reset */
 241/* core99 pmac starts the seconary here by changing the vector, and
 242   putting it back to what it was (unknown_async_exception) when done.  */
 243        EXCEPTION(INTERRUPT_SYSTEM_RESET, Reset, unknown_async_exception)
 244
 245/* Machine check */
 246/*
 247 * On CHRP, this is complicated by the fact that we could get a
 248 * machine check inside RTAS, and we have no guarantee that certain
 249 * critical registers will have the values we expect.  The set of
 250 * registers that might have bad values includes all the GPRs
 251 * and all the BATs.  We indicate that we are in RTAS by putting
 252 * a non-zero value, the address of the exception frame to use,
 253 * in thread.rtas_sp.  The machine check handler checks thread.rtas_sp
 254 * and uses its value if it is non-zero.
 255 * (Other exception handlers assume that r1 is a valid kernel stack
 256 * pointer when we take an exception from supervisor mode.)
 257 *      -- paulus.
 258 */
 259        START_EXCEPTION(INTERRUPT_MACHINE_CHECK, MachineCheck)
 260        EXCEPTION_PROLOG_0
 261#ifdef CONFIG_PPC_CHRP
 262        mtspr   SPRN_SPRG_SCRATCH2,r1
 263        mfspr   r1, SPRN_SPRG_THREAD
 264        lwz     r1, RTAS_SP(r1)
 265        cmpwi   cr1, r1, 0
 266        bne     cr1, 7f
 267        mfspr   r1, SPRN_SPRG_SCRATCH2
 268#endif /* CONFIG_PPC_CHRP */
 269        EXCEPTION_PROLOG_1
 2707:      EXCEPTION_PROLOG_2 0x200 MachineCheck
 271#ifdef CONFIG_PPC_CHRP
 272        beq     cr1, 1f
 273        twi     31, 0, 0
 274#endif
 2751:      prepare_transfer_to_handler
 276        bl      machine_check_exception
 277        b       interrupt_return
 278
 279/* Data access exception. */
 280        START_EXCEPTION(INTERRUPT_DATA_STORAGE, DataAccess)
 281#ifdef CONFIG_PPC_BOOK3S_604
 282BEGIN_MMU_FTR_SECTION
 283        mtspr   SPRN_SPRG_SCRATCH2,r10
 284        mfspr   r10, SPRN_SPRG_THREAD
 285        stw     r11, THR11(r10)
 286        mfspr   r10, SPRN_DSISR
 287        mfcr    r11
 288        andis.  r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
 289        mfspr   r10, SPRN_SPRG_THREAD
 290        beq     hash_page_dsi
 291.Lhash_page_dsi_cont:
 292        mtcr    r11
 293        lwz     r11, THR11(r10)
 294        mfspr   r10, SPRN_SPRG_SCRATCH2
 295MMU_FTR_SECTION_ELSE
 296        b       1f
 297ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
 298#endif
 2991:      EXCEPTION_PROLOG_0 handle_dar_dsisr=1
 300        EXCEPTION_PROLOG_1
 301        EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1
 302        prepare_transfer_to_handler
 303        lwz     r5, _DSISR(r1)
 304        andis.  r0, r5, DSISR_DABRMATCH@h
 305        bne-    1f
 306        bl      do_page_fault
 307        b       interrupt_return
 3081:      bl      do_break
 309        REST_NVGPRS(r1)
 310        b       interrupt_return
 311
 312
 313/* Instruction access exception. */
 314        START_EXCEPTION(INTERRUPT_INST_STORAGE, InstructionAccess)
 315        mtspr   SPRN_SPRG_SCRATCH0,r10
 316        mtspr   SPRN_SPRG_SCRATCH1,r11
 317        mfspr   r10, SPRN_SPRG_THREAD
 318        mfspr   r11, SPRN_SRR0
 319        stw     r11, SRR0(r10)
 320        mfspr   r11, SPRN_SRR1          /* check whether user or kernel */
 321        stw     r11, SRR1(r10)
 322        mfcr    r10
 323#ifdef CONFIG_PPC_BOOK3S_604
 324BEGIN_MMU_FTR_SECTION
 325        andis.  r11, r11, SRR1_ISI_NOPT@h       /* no pte found? */
 326        bne     hash_page_isi
 327.Lhash_page_isi_cont:
 328        mfspr   r11, SPRN_SRR1          /* check whether user or kernel */
 329END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
 330#endif
 331        andi.   r11, r11, MSR_PR
 332
 333        EXCEPTION_PROLOG_1
 334        EXCEPTION_PROLOG_2 INTERRUPT_INST_STORAGE InstructionAccess
 335        andis.  r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
 336        stw     r5, _DSISR(r11)
 337        stw     r12, _DAR(r11)
 338        prepare_transfer_to_handler
 339        bl      do_page_fault
 340        b       interrupt_return
 341
 342/* External interrupt */
 343        EXCEPTION(INTERRUPT_EXTERNAL, HardwareInterrupt, do_IRQ)
 344
 345/* Alignment exception */
 346        START_EXCEPTION(INTERRUPT_ALIGNMENT, Alignment)
 347        EXCEPTION_PROLOG INTERRUPT_ALIGNMENT Alignment handle_dar_dsisr=1
 348        prepare_transfer_to_handler
 349        bl      alignment_exception
 350        REST_NVGPRS(r1)
 351        b       interrupt_return
 352
 353/* Program check exception */
 354        START_EXCEPTION(INTERRUPT_PROGRAM, ProgramCheck)
 355        EXCEPTION_PROLOG INTERRUPT_PROGRAM ProgramCheck
 356        prepare_transfer_to_handler
 357        bl      program_check_exception
 358        REST_NVGPRS(r1)
 359        b       interrupt_return
 360
 361/* Floating-point unavailable */
 362        START_EXCEPTION(0x800, FPUnavailable)
 363#ifdef CONFIG_PPC_FPU
 364BEGIN_FTR_SECTION
 365/*
 366 * Certain Freescale cores don't have a FPU and treat fp instructions
 367 * as a FP Unavailable exception.  Redirect to illegal/emulation handling.
 368 */
 369        b       ProgramCheck
 370END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
 371        EXCEPTION_PROLOG INTERRUPT_FP_UNAVAIL FPUnavailable
 372        beq     1f
 373        bl      load_up_fpu             /* if from user, just load it up */
 374        b       fast_exception_return
 3751:      prepare_transfer_to_handler
 376        bl      kernel_fp_unavailable_exception
 377        b       interrupt_return
 378#else
 379        b       ProgramCheck
 380#endif
 381
 382/* Decrementer */
 383        EXCEPTION(INTERRUPT_DECREMENTER, Decrementer, timer_interrupt)
 384
 385        EXCEPTION(0xa00, Trap_0a, unknown_exception)
 386        EXCEPTION(0xb00, Trap_0b, unknown_exception)
 387
 388/* System call */
 389        START_EXCEPTION(INTERRUPT_SYSCALL, SystemCall)
 390        SYSCALL_ENTRY   INTERRUPT_SYSCALL
 391
 392        EXCEPTION(INTERRUPT_TRACE, SingleStep, single_step_exception)
 393        EXCEPTION(0xe00, Trap_0e, unknown_exception)
 394
 395/*
 396 * The Altivec unavailable trap is at 0x0f20.  Foo.
 397 * We effectively remap it to 0x3000.
 398 * We include an altivec unavailable exception vector even if
 399 * not configured for Altivec, so that you can't panic a
 400 * non-altivec kernel running on a machine with altivec just
 401 * by executing an altivec instruction.
 402 */
 403        START_EXCEPTION(INTERRUPT_PERFMON, PerformanceMonitorTrap)
 404        b       PerformanceMonitor
 405
 406        START_EXCEPTION(INTERRUPT_ALTIVEC_UNAVAIL, AltiVecUnavailableTrap)
 407        b       AltiVecUnavailable
 408
 409        __HEAD
 410/*
 411 * Handle TLB miss for instruction on 603/603e.
 412 * Note: we get an alternate set of r0 - r3 to use automatically.
 413 */
 414        . = INTERRUPT_INST_TLB_MISS_603
 415InstructionTLBMiss:
 416/*
 417 * r0:  scratch
 418 * r1:  linux style pte ( later becomes ppc hardware pte )
 419 * r2:  ptr to linux-style pte
 420 * r3:  scratch
 421 */
 422        /* Get PTE (linux-style) and check access */
 423        mfspr   r3,SPRN_IMISS
 424#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
 425        lis     r1, TASK_SIZE@h         /* check if kernel address */
 426        cmplw   0,r1,r3
 427#endif
 428        mfspr   r2, SPRN_SDR1
 429        li      r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
 430        rlwinm  r2, r2, 28, 0xfffff000
 431#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
 432        bgt-    112f
 433        lis     r2, (swapper_pg_dir - PAGE_OFFSET)@ha   /* if kernel address, use */
 434        li      r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
 435        addi    r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l        /* kernel page table */
 436#endif
 437112:    rlwimi  r2,r3,12,20,29          /* insert top 10 bits of address */
 438        lwz     r2,0(r2)                /* get pmd entry */
 439        rlwinm. r2,r2,0,0,19            /* extract address of pte page */
 440        beq-    InstructionAddressInvalid       /* return if no mapping */
 441        rlwimi  r2,r3,22,20,29          /* insert next 10 bits of address */
 442        lwz     r0,0(r2)                /* get linux-style pte */
 443        andc.   r1,r1,r0                /* check access & ~permission */
 444        bne-    InstructionAddressInvalid /* return if access not permitted */
 445        /* Convert linux-style PTE to low word of PPC-style PTE */
 446        rlwimi  r0,r0,32-2,31,31        /* _PAGE_USER -> PP lsb */
 447        ori     r1, r1, 0xe06           /* clear out reserved bits */
 448        andc    r1, r0, r1              /* PP = user? 1 : 0 */
 449BEGIN_FTR_SECTION
 450        rlwinm  r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
 451END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
 452        mtspr   SPRN_RPA,r1
 453        tlbli   r3
 454        mfspr   r3,SPRN_SRR1            /* Need to restore CR0 */
 455        mtcrf   0x80,r3
 456        rfi
 457InstructionAddressInvalid:
 458        mfspr   r3,SPRN_SRR1
 459        rlwinm  r1,r3,9,6,6     /* Get load/store bit */
 460
 461        addis   r1,r1,0x2000
 462        mtspr   SPRN_DSISR,r1   /* (shouldn't be needed) */
 463        andi.   r2,r3,0xFFFF    /* Clear upper bits of SRR1 */
 464        or      r2,r2,r1
 465        mtspr   SPRN_SRR1,r2
 466        mfspr   r1,SPRN_IMISS   /* Get failing address */
 467        rlwinm. r2,r2,0,31,31   /* Check for little endian access */
 468        rlwimi  r2,r2,1,30,30   /* change 1 -> 3 */
 469        xor     r1,r1,r2
 470        mtspr   SPRN_DAR,r1     /* Set fault address */
 471        mfmsr   r0              /* Restore "normal" registers */
 472        xoris   r0,r0,MSR_TGPR>>16
 473        mtcrf   0x80,r3         /* Restore CR0 */
 474        mtmsr   r0
 475        b       InstructionAccess
 476
 477/*
 478 * Handle TLB miss for DATA Load operation on 603/603e
 479 */
 480        . = INTERRUPT_DATA_LOAD_TLB_MISS_603
 481DataLoadTLBMiss:
 482/*
 483 * r0:  scratch
 484 * r1:  linux style pte ( later becomes ppc hardware pte )
 485 * r2:  ptr to linux-style pte
 486 * r3:  scratch
 487 */
 488        /* Get PTE (linux-style) and check access */
 489        mfspr   r3,SPRN_DMISS
 490        lis     r1, TASK_SIZE@h         /* check if kernel address */
 491        cmplw   0,r1,r3
 492        mfspr   r2, SPRN_SDR1
 493        li      r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
 494        rlwinm  r2, r2, 28, 0xfffff000
 495        bgt-    112f
 496        lis     r2, (swapper_pg_dir - PAGE_OFFSET)@ha   /* if kernel address, use */
 497        li      r1, _PAGE_PRESENT | _PAGE_ACCESSED
 498        addi    r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l        /* kernel page table */
 499112:    rlwimi  r2,r3,12,20,29          /* insert top 10 bits of address */
 500        lwz     r2,0(r2)                /* get pmd entry */
 501        rlwinm. r2,r2,0,0,19            /* extract address of pte page */
 502        beq-    DataAddressInvalid      /* return if no mapping */
 503        rlwimi  r2,r3,22,20,29          /* insert next 10 bits of address */
 504        lwz     r0,0(r2)                /* get linux-style pte */
 505        andc.   r1,r1,r0                /* check access & ~permission */
 506        bne-    DataAddressInvalid      /* return if access not permitted */
 507        /*
 508         * NOTE! We are assuming this is not an SMP system, otherwise
 509         * we would need to update the pte atomically with lwarx/stwcx.
 510         */
 511        /* Convert linux-style PTE to low word of PPC-style PTE */
 512        rlwinm  r1,r0,32-9,30,30        /* _PAGE_RW -> PP msb */
 513        rlwimi  r0,r0,32-1,30,30        /* _PAGE_USER -> PP msb */
 514        rlwimi  r0,r0,32-1,31,31        /* _PAGE_USER -> PP lsb */
 515        ori     r1,r1,0xe04             /* clear out reserved bits */
 516        andc    r1,r0,r1                /* PP = user? rw? 1: 3: 0 */
 517BEGIN_FTR_SECTION
 518        rlwinm  r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
 519END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
 520        mtspr   SPRN_RPA,r1
 521BEGIN_MMU_FTR_SECTION
 522        li      r0,1
 523        mfspr   r1,SPRN_SPRG_603_LRU
 524        rlwinm  r2,r3,20,27,31          /* Get Address bits 15:19 */
 525        slw     r0,r0,r2
 526        xor     r1,r0,r1
 527        srw     r0,r1,r2
 528        mtspr   SPRN_SPRG_603_LRU,r1
 529        mfspr   r2,SPRN_SRR1
 530        rlwimi  r2,r0,31-14,14,14
 531        mtspr   SPRN_SRR1,r2
 532        mtcrf   0x80,r2
 533        tlbld   r3
 534        rfi
 535MMU_FTR_SECTION_ELSE
 536        mfspr   r2,SPRN_SRR1            /* Need to restore CR0 */
 537        mtcrf   0x80,r2
 538        tlbld   r3
 539        rfi
 540ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
 541DataAddressInvalid:
 542        mfspr   r3,SPRN_SRR1
 543        rlwinm  r1,r3,9,6,6     /* Get load/store bit */
 544        addis   r1,r1,0x2000
 545        mtspr   SPRN_DSISR,r1
 546        andi.   r2,r3,0xFFFF    /* Clear upper bits of SRR1 */
 547        mtspr   SPRN_SRR1,r2
 548        mfspr   r1,SPRN_DMISS   /* Get failing address */
 549        rlwinm. r2,r2,0,31,31   /* Check for little endian access */
 550        beq     20f             /* Jump if big endian */
 551        xori    r1,r1,3
 55220:     mtspr   SPRN_DAR,r1     /* Set fault address */
 553        mfmsr   r0              /* Restore "normal" registers */
 554        xoris   r0,r0,MSR_TGPR>>16
 555        mtcrf   0x80,r3         /* Restore CR0 */
 556        mtmsr   r0
 557        b       DataAccess
 558
 559/*
 560 * Handle TLB miss for DATA Store on 603/603e
 561 */
 562        . = INTERRUPT_DATA_STORE_TLB_MISS_603
 563DataStoreTLBMiss:
 564/*
 565 * r0:  scratch
 566 * r1:  linux style pte ( later becomes ppc hardware pte )
 567 * r2:  ptr to linux-style pte
 568 * r3:  scratch
 569 */
 570        /* Get PTE (linux-style) and check access */
 571        mfspr   r3,SPRN_DMISS
 572        lis     r1, TASK_SIZE@h         /* check if kernel address */
 573        cmplw   0,r1,r3
 574        mfspr   r2, SPRN_SDR1
 575        li      r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER
 576        rlwinm  r2, r2, 28, 0xfffff000
 577        bgt-    112f
 578        lis     r2, (swapper_pg_dir - PAGE_OFFSET)@ha   /* if kernel address, use */
 579        li      r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
 580        addi    r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l        /* kernel page table */
 581112:    rlwimi  r2,r3,12,20,29          /* insert top 10 bits of address */
 582        lwz     r2,0(r2)                /* get pmd entry */
 583        rlwinm. r2,r2,0,0,19            /* extract address of pte page */
 584        beq-    DataAddressInvalid      /* return if no mapping */
 585        rlwimi  r2,r3,22,20,29          /* insert next 10 bits of address */
 586        lwz     r0,0(r2)                /* get linux-style pte */
 587        andc.   r1,r1,r0                /* check access & ~permission */
 588        bne-    DataAddressInvalid      /* return if access not permitted */
 589        /*
 590         * NOTE! We are assuming this is not an SMP system, otherwise
 591         * we would need to update the pte atomically with lwarx/stwcx.
 592         */
 593        /* Convert linux-style PTE to low word of PPC-style PTE */
 594        rlwimi  r0,r0,32-2,31,31        /* _PAGE_USER -> PP lsb */
 595        li      r1,0xe06                /* clear out reserved bits & PP msb */
 596        andc    r1,r0,r1                /* PP = user? 1: 0 */
 597BEGIN_FTR_SECTION
 598        rlwinm  r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
 599END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
 600        mtspr   SPRN_RPA,r1
 601        mfspr   r2,SPRN_SRR1            /* Need to restore CR0 */
 602        mtcrf   0x80,r2
 603BEGIN_MMU_FTR_SECTION
 604        li      r0,1
 605        mfspr   r1,SPRN_SPRG_603_LRU
 606        rlwinm  r2,r3,20,27,31          /* Get Address bits 15:19 */
 607        slw     r0,r0,r2
 608        xor     r1,r0,r1
 609        srw     r0,r1,r2
 610        mtspr   SPRN_SPRG_603_LRU,r1
 611        mfspr   r2,SPRN_SRR1
 612        rlwimi  r2,r0,31-14,14,14
 613        mtspr   SPRN_SRR1,r2
 614        mtcrf   0x80,r2
 615        tlbld   r3
 616        rfi
 617MMU_FTR_SECTION_ELSE
 618        mfspr   r2,SPRN_SRR1            /* Need to restore CR0 */
 619        mtcrf   0x80,r2
 620        tlbld   r3
 621        rfi
 622ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
 623
 624#ifndef CONFIG_ALTIVEC
 625#define altivec_assist_exception        unknown_exception
 626#endif
 627
 628#ifndef CONFIG_TAU_INT
 629#define TAUException    unknown_async_exception
 630#endif
 631
 632        EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception)
 633        EXCEPTION(0x1400, SMI, SMIException)
 634        EXCEPTION(0x1500, Trap_15, unknown_exception)
 635        EXCEPTION(0x1600, Trap_16, altivec_assist_exception)
 636        EXCEPTION(0x1700, Trap_17, TAUException)
 637        EXCEPTION(0x1800, Trap_18, unknown_exception)
 638        EXCEPTION(0x1900, Trap_19, unknown_exception)
 639        EXCEPTION(0x1a00, Trap_1a, unknown_exception)
 640        EXCEPTION(0x1b00, Trap_1b, unknown_exception)
 641        EXCEPTION(0x1c00, Trap_1c, unknown_exception)
 642        EXCEPTION(0x1d00, Trap_1d, unknown_exception)
 643        EXCEPTION(0x1e00, Trap_1e, unknown_exception)
 644        EXCEPTION(0x1f00, Trap_1f, unknown_exception)
 645        EXCEPTION(0x2000, RunMode, RunModeException)
 646        EXCEPTION(0x2100, Trap_21, unknown_exception)
 647        EXCEPTION(0x2200, Trap_22, unknown_exception)
 648        EXCEPTION(0x2300, Trap_23, unknown_exception)
 649        EXCEPTION(0x2400, Trap_24, unknown_exception)
 650        EXCEPTION(0x2500, Trap_25, unknown_exception)
 651        EXCEPTION(0x2600, Trap_26, unknown_exception)
 652        EXCEPTION(0x2700, Trap_27, unknown_exception)
 653        EXCEPTION(0x2800, Trap_28, unknown_exception)
 654        EXCEPTION(0x2900, Trap_29, unknown_exception)
 655        EXCEPTION(0x2a00, Trap_2a, unknown_exception)
 656        EXCEPTION(0x2b00, Trap_2b, unknown_exception)
 657        EXCEPTION(0x2c00, Trap_2c, unknown_exception)
 658        EXCEPTION(0x2d00, Trap_2d, unknown_exception)
 659        EXCEPTION(0x2e00, Trap_2e, unknown_exception)
 660        EXCEPTION(0x2f00, Trap_2f, unknown_exception)
 661
 662        __HEAD
 663        . = 0x3000
 664
 665#ifdef CONFIG_PPC_BOOK3S_604
 666.macro save_regs_thread         thread
 667        stw     r0, THR0(\thread)
 668        stw     r3, THR3(\thread)
 669        stw     r4, THR4(\thread)
 670        stw     r5, THR5(\thread)
 671        stw     r6, THR6(\thread)
 672        stw     r8, THR8(\thread)
 673        stw     r9, THR9(\thread)
 674        mflr    r0
 675        stw     r0, THLR(\thread)
 676        mfctr   r0
 677        stw     r0, THCTR(\thread)
 678.endm
 679
 680.macro restore_regs_thread      thread
 681        lwz     r0, THLR(\thread)
 682        mtlr    r0
 683        lwz     r0, THCTR(\thread)
 684        mtctr   r0
 685        lwz     r0, THR0(\thread)
 686        lwz     r3, THR3(\thread)
 687        lwz     r4, THR4(\thread)
 688        lwz     r5, THR5(\thread)
 689        lwz     r6, THR6(\thread)
 690        lwz     r8, THR8(\thread)
 691        lwz     r9, THR9(\thread)
 692.endm
 693
 694hash_page_dsi:
 695        save_regs_thread        r10
 696        mfdsisr r3
 697        mfdar   r4
 698        mfsrr0  r5
 699        mfsrr1  r9
 700        rlwinm  r3, r3, 32 - 15, _PAGE_RW       /* DSISR_STORE -> _PAGE_RW */
 701        bl      hash_page
 702        mfspr   r10, SPRN_SPRG_THREAD
 703        restore_regs_thread r10
 704        b       .Lhash_page_dsi_cont
 705
 706hash_page_isi:
 707        mr      r11, r10
 708        mfspr   r10, SPRN_SPRG_THREAD
 709        save_regs_thread        r10
 710        li      r3, 0
 711        lwz     r4, SRR0(r10)
 712        lwz     r9, SRR1(r10)
 713        bl      hash_page
 714        mfspr   r10, SPRN_SPRG_THREAD
 715        restore_regs_thread r10
 716        mr      r10, r11
 717        b       .Lhash_page_isi_cont
 718
 719        .globl fast_hash_page_return
 720fast_hash_page_return:
 721        andis.  r10, r9, SRR1_ISI_NOPT@h        /* Set on ISI, cleared on DSI */
 722        mfspr   r10, SPRN_SPRG_THREAD
 723        restore_regs_thread r10
 724        bne     1f
 725
 726        /* DSI */
 727        mtcr    r11
 728        lwz     r11, THR11(r10)
 729        mfspr   r10, SPRN_SPRG_SCRATCH2
 730        rfi
 731
 7321:      /* ISI */
 733        mtcr    r11
 734        mfspr   r11, SPRN_SPRG_SCRATCH1
 735        mfspr   r10, SPRN_SPRG_SCRATCH0
 736        rfi
 737#endif /* CONFIG_PPC_BOOK3S_604 */
 738
 739#ifdef CONFIG_VMAP_STACK
 740        vmap_stack_overflow_exception
 741#endif
 742
 743        __HEAD
 744AltiVecUnavailable:
 745        EXCEPTION_PROLOG 0xf20 AltiVecUnavailable
 746#ifdef CONFIG_ALTIVEC
 747        beq     1f
 748        bl      load_up_altivec         /* if from user, just load it up */
 749        b       fast_exception_return
 750#endif /* CONFIG_ALTIVEC */
 7511:      prepare_transfer_to_handler
 752        bl      altivec_unavailable_exception
 753        b       interrupt_return
 754
 755        __HEAD
 756PerformanceMonitor:
 757        EXCEPTION_PROLOG 0xf00 PerformanceMonitor
 758        prepare_transfer_to_handler
 759        bl      performance_monitor_exception
 760        b       interrupt_return
 761
 762
 763        __HEAD
 764/*
 765 * This code is jumped to from the startup code to copy
 766 * the kernel image to physical address PHYSICAL_START.
 767 */
 768relocate_kernel:
 769        lis     r3,PHYSICAL_START@h     /* Destination base address */
 770        li      r6,0                    /* Destination offset */
 771        li      r5,0x4000               /* # bytes of memory to copy */
 772        bl      copy_and_flush          /* copy the first 0x4000 bytes */
 773        addi    r0,r3,4f@l              /* jump to the address of 4f */
 774        mtctr   r0                      /* in copy and do the rest. */
 775        bctr                            /* jump to the copy */
 7764:      lis     r5,_end-KERNELBASE@h
 777        ori     r5,r5,_end-KERNELBASE@l
 778        bl      copy_and_flush          /* copy the rest */
 779        b       turn_on_mmu
 780
 781/*
 782 * Copy routine used to copy the kernel to start at physical address 0
 783 * and flush and invalidate the caches as needed.
 784 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
 785 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
 786 */
 787_ENTRY(copy_and_flush)
 788        addi    r5,r5,-4
 789        addi    r6,r6,-4
 7904:      li      r0,L1_CACHE_BYTES/4
 791        mtctr   r0
 7923:      addi    r6,r6,4                 /* copy a cache line */
 793        lwzx    r0,r6,r4
 794        stwx    r0,r6,r3
 795        bdnz    3b
 796        dcbst   r6,r3                   /* write it to memory */
 797        sync
 798        icbi    r6,r3                   /* flush the icache line */
 799        cmplw   0,r6,r5
 800        blt     4b
 801        sync                            /* additional sync needed on g4 */
 802        isync
 803        addi    r5,r5,4
 804        addi    r6,r6,4
 805        blr
 806
 807#ifdef CONFIG_SMP
 808        .globl __secondary_start_mpc86xx
 809__secondary_start_mpc86xx:
 810        mfspr   r3, SPRN_PIR
 811        stw     r3, __secondary_hold_acknowledge@l(0)
 812        mr      r24, r3                 /* cpu # */
 813        b       __secondary_start
 814
 815        .globl  __secondary_start_pmac_0
 816__secondary_start_pmac_0:
 817        /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
 818        li      r24,0
 819        b       1f
 820        li      r24,1
 821        b       1f
 822        li      r24,2
 823        b       1f
 824        li      r24,3
 8251:
 826        /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
 827           set to map the 0xf0000000 - 0xffffffff region */
 828        mfmsr   r0
 829        rlwinm  r0,r0,0,28,26           /* clear DR (0x10) */
 830        mtmsr   r0
 831        isync
 832
 833        .globl  __secondary_start
 834__secondary_start:
 835        /* Copy some CPU settings from CPU 0 */
 836        bl      __restore_cpu_setup
 837
 838        lis     r3,-KERNELBASE@h
 839        mr      r4,r24
 840        bl      call_setup_cpu          /* Call setup_cpu for this CPU */
 841        lis     r3,-KERNELBASE@h
 842        bl      init_idle_6xx
 843
 844        /* get current's stack and current */
 845        lis     r2,secondary_current@ha
 846        tophys(r2,r2)
 847        lwz     r2,secondary_current@l(r2)
 848        tophys(r1,r2)
 849        lwz     r1,TASK_STACK(r1)
 850
 851        /* stack */
 852        addi    r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
 853        li      r0,0
 854        tophys(r3,r1)
 855        stw     r0,0(r3)
 856
 857        /* load up the MMU */
 858        bl      load_segment_registers
 859        bl      load_up_mmu
 860
 861        /* ptr to phys current thread */
 862        tophys(r4,r2)
 863        addi    r4,r4,THREAD    /* phys address of our thread_struct */
 864        mtspr   SPRN_SPRG_THREAD,r4
 865BEGIN_MMU_FTR_SECTION
 866        lis     r4, (swapper_pg_dir - PAGE_OFFSET)@h
 867        ori     r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
 868        rlwinm  r4, r4, 4, 0xffff01ff
 869        mtspr   SPRN_SDR1, r4
 870END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
 871
 872        /* enable MMU and jump to start_secondary */
 873        li      r4,MSR_KERNEL
 874        lis     r3,start_secondary@h
 875        ori     r3,r3,start_secondary@l
 876        mtspr   SPRN_SRR0,r3
 877        mtspr   SPRN_SRR1,r4
 878        rfi
 879#endif /* CONFIG_SMP */
 880
 881#ifdef CONFIG_KVM_BOOK3S_HANDLER
 882#include "../kvm/book3s_rmhandlers.S"
 883#endif
 884
 885/*
 886 * Load stuff into the MMU.  Intended to be called with
 887 * IR=0 and DR=0.
 888 */
 889early_hash_table:
 890        sync                    /* Force all PTE updates to finish */
 891        isync
 892        tlbia                   /* Clear all TLB entries */
 893        sync                    /* wait for tlbia/tlbie to finish */
 894        TLBSYNC                 /* ... on all CPUs */
 895        /* Load the SDR1 register (hash table base & size) */
 896        lis     r6, early_hash - PAGE_OFFSET@h
 897        ori     r6, r6, 3       /* 256kB table */
 898        mtspr   SPRN_SDR1, r6
 899        blr
 900
 901load_up_mmu:
 902        sync                    /* Force all PTE updates to finish */
 903        isync
 904        tlbia                   /* Clear all TLB entries */
 905        sync                    /* wait for tlbia/tlbie to finish */
 906        TLBSYNC                 /* ... on all CPUs */
 907BEGIN_MMU_FTR_SECTION
 908        /* Load the SDR1 register (hash table base & size) */
 909        lis     r6,_SDR1@ha
 910        tophys(r6,r6)
 911        lwz     r6,_SDR1@l(r6)
 912        mtspr   SPRN_SDR1,r6
 913END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
 914
 915/* Load the BAT registers with the values set up by MMU_init. */
 916        lis     r3,BATS@ha
 917        addi    r3,r3,BATS@l
 918        tophys(r3,r3)
 919        LOAD_BAT(0,r3,r4,r5)
 920        LOAD_BAT(1,r3,r4,r5)
 921        LOAD_BAT(2,r3,r4,r5)
 922        LOAD_BAT(3,r3,r4,r5)
 923BEGIN_MMU_FTR_SECTION
 924        LOAD_BAT(4,r3,r4,r5)
 925        LOAD_BAT(5,r3,r4,r5)
 926        LOAD_BAT(6,r3,r4,r5)
 927        LOAD_BAT(7,r3,r4,r5)
 928END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
 929        blr
 930
 931_GLOBAL(load_segment_registers)
 932        li      r0, NUM_USER_SEGMENTS /* load up user segment register values */
 933        mtctr   r0              /* for context 0 */
 934#ifdef CONFIG_PPC_KUEP
 935        lis     r3, SR_NX@h     /* Kp = 0, Ks = 0, VSID = 0 */
 936#else
 937        li      r3, 0           /* Kp = 0, Ks = 0, VSID = 0 */
 938#endif
 939        li      r4, 0
 9403:      mtsrin  r3, r4
 941        addi    r3, r3, 0x111   /* increment VSID */
 942        addis   r4, r4, 0x1000  /* address of next segment */
 943        bdnz    3b
 944        li      r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
 945        mtctr   r0                      /* for context 0 */
 946        rlwinm  r3, r3, 0, ~SR_NX       /* Nx = 0 */
 947        rlwinm  r3, r3, 0, ~SR_KS       /* Ks = 0 */
 948        oris    r3, r3, SR_KP@h         /* Kp = 1 */
 9493:      mtsrin  r3, r4
 950        addi    r3, r3, 0x111   /* increment VSID */
 951        addis   r4, r4, 0x1000  /* address of next segment */
 952        bdnz    3b
 953        blr
 954
 955/*
 956 * This is where the main kernel code starts.
 957 */
 958start_here:
 959        /* ptr to current */
 960        lis     r2,init_task@h
 961        ori     r2,r2,init_task@l
 962        /* Set up for using our exception vectors */
 963        /* ptr to phys current thread */
 964        tophys(r4,r2)
 965        addi    r4,r4,THREAD    /* init task's THREAD */
 966        mtspr   SPRN_SPRG_THREAD,r4
 967BEGIN_MMU_FTR_SECTION
 968        lis     r4, (swapper_pg_dir - PAGE_OFFSET)@h
 969        ori     r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
 970        rlwinm  r4, r4, 4, 0xffff01ff
 971        mtspr   SPRN_SDR1, r4
 972END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
 973
 974        /* stack */
 975        lis     r1,init_thread_union@ha
 976        addi    r1,r1,init_thread_union@l
 977        li      r0,0
 978        stwu    r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
 979/*
 980 * Do early platform-specific initialization,
 981 * and set up the MMU.
 982 */
 983#ifdef CONFIG_KASAN
 984        bl      kasan_early_init
 985#endif
 986        li      r3,0
 987        mr      r4,r31
 988        bl      machine_init
 989        bl      __save_cpu_setup
 990        bl      MMU_init
 991        bl      MMU_init_hw_patch
 992
 993/*
 994 * Go back to running unmapped so we can load up new values
 995 * for SDR1 (hash table pointer) and the segment registers
 996 * and change to using our exception vectors.
 997 */
 998        lis     r4,2f@h
 999        ori     r4,r4,2f@l
1000        tophys(r4,r4)
1001        li      r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1002
1003        .align  4
1004        mtspr   SPRN_SRR0,r4
1005        mtspr   SPRN_SRR1,r3
1006        rfi
1007/* Load up the kernel context */
10082:      bl      load_up_mmu
1009
1010#ifdef CONFIG_BDI_SWITCH
1011        /* Add helper information for the Abatron bdiGDB debugger.
1012         * We do this here because we know the mmu is disabled, and
1013         * will be enabled for real in just a few instructions.
1014         */
1015        lis     r5, abatron_pteptrs@h
1016        ori     r5, r5, abatron_pteptrs@l
1017        stw     r5, 0xf0(0)     /* This much match your Abatron config */
1018        lis     r6, swapper_pg_dir@h
1019        ori     r6, r6, swapper_pg_dir@l
1020        tophys(r5, r5)
1021        stw     r6, 0(r5)
1022#endif /* CONFIG_BDI_SWITCH */
1023
1024/* Now turn on the MMU for real! */
1025        li      r4,MSR_KERNEL
1026        lis     r3,start_kernel@h
1027        ori     r3,r3,start_kernel@l
1028        mtspr   SPRN_SRR0,r3
1029        mtspr   SPRN_SRR1,r4
1030        rfi
1031
1032/*
1033 * An undocumented "feature" of 604e requires that the v bit
1034 * be cleared before changing BAT values.
1035 *
1036 * Also, newer IBM firmware does not clear bat3 and 4 so
1037 * this makes sure it's done.
1038 *  -- Cort
1039 */
1040clear_bats:
1041        li      r10,0
1042
1043        mtspr   SPRN_DBAT0U,r10
1044        mtspr   SPRN_DBAT0L,r10
1045        mtspr   SPRN_DBAT1U,r10
1046        mtspr   SPRN_DBAT1L,r10
1047        mtspr   SPRN_DBAT2U,r10
1048        mtspr   SPRN_DBAT2L,r10
1049        mtspr   SPRN_DBAT3U,r10
1050        mtspr   SPRN_DBAT3L,r10
1051        mtspr   SPRN_IBAT0U,r10
1052        mtspr   SPRN_IBAT0L,r10
1053        mtspr   SPRN_IBAT1U,r10
1054        mtspr   SPRN_IBAT1L,r10
1055        mtspr   SPRN_IBAT2U,r10
1056        mtspr   SPRN_IBAT2L,r10
1057        mtspr   SPRN_IBAT3U,r10
1058        mtspr   SPRN_IBAT3L,r10
1059BEGIN_MMU_FTR_SECTION
1060        /* Here's a tweak: at this point, CPU setup have
1061         * not been called yet, so HIGH_BAT_EN may not be
1062         * set in HID0 for the 745x processors. However, it
1063         * seems that doesn't affect our ability to actually
1064         * write to these SPRs.
1065         */
1066        mtspr   SPRN_DBAT4U,r10
1067        mtspr   SPRN_DBAT4L,r10
1068        mtspr   SPRN_DBAT5U,r10
1069        mtspr   SPRN_DBAT5L,r10
1070        mtspr   SPRN_DBAT6U,r10
1071        mtspr   SPRN_DBAT6L,r10
1072        mtspr   SPRN_DBAT7U,r10
1073        mtspr   SPRN_DBAT7L,r10
1074        mtspr   SPRN_IBAT4U,r10
1075        mtspr   SPRN_IBAT4L,r10
1076        mtspr   SPRN_IBAT5U,r10
1077        mtspr   SPRN_IBAT5L,r10
1078        mtspr   SPRN_IBAT6U,r10
1079        mtspr   SPRN_IBAT6L,r10
1080        mtspr   SPRN_IBAT7U,r10
1081        mtspr   SPRN_IBAT7L,r10
1082END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1083        blr
1084
1085_ENTRY(update_bats)
1086        lis     r4, 1f@h
1087        ori     r4, r4, 1f@l
1088        tophys(r4, r4)
1089        mfmsr   r6
1090        mflr    r7
1091        li      r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)
1092        rlwinm  r0, r6, 0, ~MSR_RI
1093        rlwinm  r0, r0, 0, ~MSR_EE
1094        mtmsr   r0
1095
1096        .align  4
1097        mtspr   SPRN_SRR0, r4
1098        mtspr   SPRN_SRR1, r3
1099        rfi
11001:      bl      clear_bats
1101        lis     r3, BATS@ha
1102        addi    r3, r3, BATS@l
1103        tophys(r3, r3)
1104        LOAD_BAT(0, r3, r4, r5)
1105        LOAD_BAT(1, r3, r4, r5)
1106        LOAD_BAT(2, r3, r4, r5)
1107        LOAD_BAT(3, r3, r4, r5)
1108BEGIN_MMU_FTR_SECTION
1109        LOAD_BAT(4, r3, r4, r5)
1110        LOAD_BAT(5, r3, r4, r5)
1111        LOAD_BAT(6, r3, r4, r5)
1112        LOAD_BAT(7, r3, r4, r5)
1113END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1114        li      r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
1115        mtmsr   r3
1116        mtspr   SPRN_SRR0, r7
1117        mtspr   SPRN_SRR1, r6
1118        rfi
1119
1120flush_tlbs:
1121        lis     r10, 0x40
11221:      addic.  r10, r10, -0x1000
1123        tlbie   r10
1124        bgt     1b
1125        sync
1126        blr
1127
1128mmu_off:
1129        addi    r4, r3, __after_mmu_off - _start
1130        mfmsr   r3
1131        andi.   r0,r3,MSR_DR|MSR_IR             /* MMU enabled? */
1132        beqlr
1133        andc    r3,r3,r0
1134
1135        .align  4
1136        mtspr   SPRN_SRR0,r4
1137        mtspr   SPRN_SRR1,r3
1138        sync
1139        rfi
1140
1141/* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */
1142initial_bats:
1143        lis     r11,PAGE_OFFSET@h
1144        tophys(r8,r11)
1145#ifdef CONFIG_SMP
1146        ori     r8,r8,0x12              /* R/W access, M=1 */
1147#else
1148        ori     r8,r8,2                 /* R/W access */
1149#endif /* CONFIG_SMP */
1150        ori     r11,r11,BL_256M<<2|0x2  /* set up BAT registers for 604 */
1151
1152        mtspr   SPRN_DBAT0L,r8          /* N.B. 6xx have valid */
1153        mtspr   SPRN_DBAT0U,r11         /* bit in upper BAT register */
1154        mtspr   SPRN_IBAT0L,r8
1155        mtspr   SPRN_IBAT0U,r11
1156        isync
1157        blr
1158
1159#ifdef CONFIG_BOOTX_TEXT
1160setup_disp_bat:
1161        /*
1162         * setup the display bat prepared for us in prom.c
1163         */
1164        mflr    r8
1165        bl      reloc_offset
1166        mtlr    r8
1167        addis   r8,r3,disp_BAT@ha
1168        addi    r8,r8,disp_BAT@l
1169        cmpwi   cr0,r8,0
1170        beqlr
1171        lwz     r11,0(r8)
1172        lwz     r8,4(r8)
1173        mtspr   SPRN_DBAT3L,r8
1174        mtspr   SPRN_DBAT3U,r11
1175        blr
1176#endif /* CONFIG_BOOTX_TEXT */
1177
1178#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
1179setup_cpm_bat:
1180        lis     r8, 0xf000
1181        ori     r8, r8, 0x002a
1182        mtspr   SPRN_DBAT1L, r8
1183
1184        lis     r11, 0xf000
1185        ori     r11, r11, (BL_1M << 2) | 2
1186        mtspr   SPRN_DBAT1U, r11
1187
1188        blr
1189#endif
1190
1191#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
1192setup_usbgecko_bat:
1193        /* prepare a BAT for early io */
1194#if defined(CONFIG_GAMECUBE)
1195        lis     r8, 0x0c00
1196#elif defined(CONFIG_WII)
1197        lis     r8, 0x0d00
1198#else
1199#error Invalid platform for USB Gecko based early debugging.
1200#endif
1201        /*
1202         * The virtual address used must match the virtual address
1203         * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
1204         */
1205        lis     r11, 0xfffe     /* top 128K */
1206        ori     r8, r8, 0x002a  /* uncached, guarded ,rw */
1207        ori     r11, r11, 0x2   /* 128K, Vs=1, Vp=0 */
1208        mtspr   SPRN_DBAT1L, r8
1209        mtspr   SPRN_DBAT1U, r11
1210        blr
1211#endif
1212
1213        .data
1214