linux/arch/powerpc/kernel/head_32.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 *  PowerPC version
   4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   5 *
   6 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
   7 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
   8 *  Adapted for Power Macintosh by Paul Mackerras.
   9 *  Low-level exception handlers and MMU support
  10 *  rewritten by Paul Mackerras.
  11 *    Copyright (C) 1996 Paul Mackerras.
  12 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
  13 *
  14 *  This file contains the low-level support and setup for the
  15 *  PowerPC platform, including trap and interrupt dispatch.
  16 *  (The PPC 8xx embedded CPUs use head_8xx.S instead.)
  17 */
  18
  19#include <linux/init.h>
  20#include <asm/reg.h>
  21#include <asm/page.h>
  22#include <asm/mmu.h>
  23#include <asm/pgtable.h>
  24#include <asm/cputable.h>
  25#include <asm/cache.h>
  26#include <asm/thread_info.h>
  27#include <asm/ppc_asm.h>
  28#include <asm/asm-offsets.h>
  29#include <asm/ptrace.h>
  30#include <asm/bug.h>
  31#include <asm/kvm_book3s_asm.h>
  32#include <asm/export.h>
  33#include <asm/feature-fixups.h>
  34
  35#include "head_32.h"
  36
  37/* 601 only have IBAT; cr0.eq is set on 601 when using this macro */
  38#define LOAD_BAT(n, reg, RA, RB)        \
  39        /* see the comment for clear_bats() -- Cort */ \
  40        li      RA,0;                   \
  41        mtspr   SPRN_IBAT##n##U,RA;     \
  42        mtspr   SPRN_DBAT##n##U,RA;     \
  43        lwz     RA,(n*16)+0(reg);       \
  44        lwz     RB,(n*16)+4(reg);       \
  45        mtspr   SPRN_IBAT##n##U,RA;     \
  46        mtspr   SPRN_IBAT##n##L,RB;     \
  47        beq     1f;                     \
  48        lwz     RA,(n*16)+8(reg);       \
  49        lwz     RB,(n*16)+12(reg);      \
  50        mtspr   SPRN_DBAT##n##U,RA;     \
  51        mtspr   SPRN_DBAT##n##L,RB;     \
  521:
  53
  54        __HEAD
  55        .stabs  "arch/powerpc/kernel/",N_SO,0,0,0f
  56        .stabs  "head_32.S",N_SO,0,0,0f
  570:
  58_ENTRY(_stext);
  59
  60/*
  61 * _start is defined this way because the XCOFF loader in the OpenFirmware
  62 * on the powermac expects the entry point to be a procedure descriptor.
  63 */
  64_ENTRY(_start);
  65        /*
  66         * These are here for legacy reasons, the kernel used to
  67         * need to look like a coff function entry for the pmac
  68         * but we're always started by some kind of bootloader now.
  69         *  -- Cort
  70         */
  71        nop     /* used by __secondary_hold on prep (mtx) and chrp smp */
  72        nop     /* used by __secondary_hold on prep (mtx) and chrp smp */
  73        nop
  74
  75/* PMAC
  76 * Enter here with the kernel text, data and bss loaded starting at
  77 * 0, running with virtual == physical mapping.
  78 * r5 points to the prom entry point (the client interface handler
  79 * address).  Address translation is turned on, with the prom
  80 * managing the hash table.  Interrupts are disabled.  The stack
  81 * pointer (r1) points to just below the end of the half-meg region
  82 * from 0x380000 - 0x400000, which is mapped in already.
  83 *
  84 * If we are booted from MacOS via BootX, we enter with the kernel
  85 * image loaded somewhere, and the following values in registers:
  86 *  r3: 'BooX' (0x426f6f58)
  87 *  r4: virtual address of boot_infos_t
  88 *  r5: 0
  89 *
  90 * PREP
  91 * This is jumped to on prep systems right after the kernel is relocated
  92 * to its proper place in memory by the boot loader.  The expected layout
  93 * of the regs is:
  94 *   r3: ptr to residual data
  95 *   r4: initrd_start or if no initrd then 0
  96 *   r5: initrd_end - unused if r4 is 0
  97 *   r6: Start of command line string
  98 *   r7: End of command line string
  99 *
 100 * This just gets a minimal mmu environment setup so we can call
 101 * start_here() to do the real work.
 102 * -- Cort
 103 */
 104
 105        .globl  __start
 106__start:
 107/*
 108 * We have to do any OF calls before we map ourselves to KERNELBASE,
 109 * because OF may have I/O devices mapped into that area
 110 * (particularly on CHRP).
 111 */
 112        cmpwi   0,r5,0
 113        beq     1f
 114
 115#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
 116        /* find out where we are now */
 117        bcl     20,31,$+4
 1180:      mflr    r8                      /* r8 = runtime addr here */
 119        addis   r8,r8,(_stext - 0b)@ha
 120        addi    r8,r8,(_stext - 0b)@l   /* current runtime base addr */
 121        bl      prom_init
 122#endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */
 123
 124        /* We never return. We also hit that trap if trying to boot
 125         * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
 126        trap
 127
 128/*
 129 * Check for BootX signature when supporting PowerMac and branch to
 130 * appropriate trampoline if it's present
 131 */
 132#ifdef CONFIG_PPC_PMAC
 1331:      lis     r31,0x426f
 134        ori     r31,r31,0x6f58
 135        cmpw    0,r3,r31
 136        bne     1f
 137        bl      bootx_init
 138        trap
 139#endif /* CONFIG_PPC_PMAC */
 140
 1411:      mr      r31,r3                  /* save device tree ptr */
 142        li      r24,0                   /* cpu # */
 143
 144/*
 145 * early_init() does the early machine identification and does
 146 * the necessary low-level setup and clears the BSS
 147 *  -- Cort <cort@fsmlabs.com>
 148 */
 149        bl      early_init
 150
 151/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
 152 * the physical address we are running at, returned by early_init()
 153 */
 154        bl      mmu_off
 155__after_mmu_off:
 156        bl      clear_bats
 157        bl      flush_tlbs
 158
 159        bl      initial_bats
 160        bl      load_segment_registers
 161#ifdef CONFIG_KASAN
 162        bl      early_hash_table
 163#endif
 164#if defined(CONFIG_BOOTX_TEXT)
 165        bl      setup_disp_bat
 166#endif
 167#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
 168        bl      setup_cpm_bat
 169#endif
 170#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
 171        bl      setup_usbgecko_bat
 172#endif
 173
 174/*
 175 * Call setup_cpu for CPU 0 and initialize 6xx Idle
 176 */
 177        bl      reloc_offset
 178        li      r24,0                   /* cpu# */
 179        bl      call_setup_cpu          /* Call setup_cpu for this CPU */
 180#ifdef CONFIG_PPC_BOOK3S_32
 181        bl      reloc_offset
 182        bl      init_idle_6xx
 183#endif /* CONFIG_PPC_BOOK3S_32 */
 184
 185
 186/*
 187 * We need to run with _start at physical address 0.
 188 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
 189 * the exception vectors at 0 (and therefore this copy
 190 * overwrites OF's exception vectors with our own).
 191 * The MMU is off at this point.
 192 */
 193        bl      reloc_offset
 194        mr      r26,r3
 195        addis   r4,r3,KERNELBASE@h      /* current address of _start */
 196        lis     r5,PHYSICAL_START@h
 197        cmplw   0,r4,r5                 /* already running at PHYSICAL_START? */
 198        bne     relocate_kernel
 199/*
 200 * we now have the 1st 16M of ram mapped with the bats.
 201 * prep needs the mmu to be turned on here, but pmac already has it on.
 202 * this shouldn't bother the pmac since it just gets turned on again
 203 * as we jump to our code at KERNELBASE. -- Cort
 204 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
 205 * off, and in other cases, we now turn it off before changing BATs above.
 206 */
 207turn_on_mmu:
 208        mfmsr   r0
 209        ori     r0,r0,MSR_DR|MSR_IR|MSR_RI
 210        mtspr   SPRN_SRR1,r0
 211        lis     r0,start_here@h
 212        ori     r0,r0,start_here@l
 213        mtspr   SPRN_SRR0,r0
 214        SYNC
 215        RFI                             /* enables MMU */
 216
 217/*
 218 * We need __secondary_hold as a place to hold the other cpus on
 219 * an SMP machine, even when we are running a UP kernel.
 220 */
 221        . = 0xc0                        /* for prep bootloader */
 222        li      r3,1                    /* MTX only has 1 cpu */
 223        .globl  __secondary_hold
 224__secondary_hold:
 225        /* tell the master we're here */
 226        stw     r3,__secondary_hold_acknowledge@l(0)
 227#ifdef CONFIG_SMP
 228100:    lwz     r4,0(0)
 229        /* wait until we're told to start */
 230        cmpw    0,r4,r3
 231        bne     100b
 232        /* our cpu # was at addr 0 - go */
 233        mr      r24,r3                  /* cpu # */
 234        b       __secondary_start
 235#else
 236        b       .
 237#endif /* CONFIG_SMP */
 238
 239        .globl  __secondary_hold_spinloop
 240__secondary_hold_spinloop:
 241        .long   0
 242        .globl  __secondary_hold_acknowledge
 243__secondary_hold_acknowledge:
 244        .long   -1
 245
 246/* System reset */
 247/* core99 pmac starts the seconary here by changing the vector, and
 248   putting it back to what it was (unknown_exception) when done.  */
 249        EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
 250
 251/* Machine check */
 252/*
 253 * On CHRP, this is complicated by the fact that we could get a
 254 * machine check inside RTAS, and we have no guarantee that certain
 255 * critical registers will have the values we expect.  The set of
 256 * registers that might have bad values includes all the GPRs
 257 * and all the BATs.  We indicate that we are in RTAS by putting
 258 * a non-zero value, the address of the exception frame to use,
 259 * in thread.rtas_sp.  The machine check handler checks thread.rtas_sp
 260 * and uses its value if it is non-zero.
 261 * (Other exception handlers assume that r1 is a valid kernel stack
 262 * pointer when we take an exception from supervisor mode.)
 263 *      -- paulus.
 264 */
 265        . = 0x200
 266        DO_KVM  0x200
 267        mtspr   SPRN_SPRG_SCRATCH0,r10
 268        mtspr   SPRN_SPRG_SCRATCH1,r11
 269        mfcr    r10
 270#ifdef CONFIG_PPC_CHRP
 271        mfspr   r11, SPRN_SPRG_THREAD
 272        lwz     r11, RTAS_SP(r11)
 273        cmpwi   cr1, r11, 0
 274        bne     cr1, 7f
 275#endif /* CONFIG_PPC_CHRP */
 276        EXCEPTION_PROLOG_1
 2777:      EXCEPTION_PROLOG_2
 278        addi    r3,r1,STACK_FRAME_OVERHEAD
 279#ifdef CONFIG_PPC_CHRP
 280        bne     cr1,1f
 281#endif
 282        EXC_XFER_STD(0x200, machine_check_exception)
 283#ifdef CONFIG_PPC_CHRP
 2841:      b       machine_check_in_rtas
 285#endif
 286
 287/* Data access exception. */
 288        . = 0x300
 289        DO_KVM  0x300
 290DataAccess:
 291        EXCEPTION_PROLOG
 292        mfspr   r10,SPRN_DSISR
 293        stw     r10,_DSISR(r11)
 294#ifdef CONFIG_PPC_KUAP
 295        andis.  r0,r10,(DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
 296#else
 297        andis.  r0,r10,(DSISR_BAD_FAULT_32S|DSISR_DABRMATCH)@h
 298#endif
 299        bne     1f                      /* if not, try to put a PTE */
 300        mfspr   r4,SPRN_DAR             /* into the hash table */
 301        rlwinm  r3,r10,32-15,21,21      /* DSISR_STORE -> _PAGE_RW */
 302BEGIN_MMU_FTR_SECTION
 303        bl      hash_page
 304END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
 3051:      lwz     r5,_DSISR(r11)          /* get DSISR value */
 306        mfspr   r4,SPRN_DAR
 307        EXC_XFER_LITE(0x300, handle_page_fault)
 308
 309
 310/* Instruction access exception. */
 311        . = 0x400
 312        DO_KVM  0x400
 313InstructionAccess:
 314        EXCEPTION_PROLOG
 315        andis.  r0,r9,SRR1_ISI_NOPT@h   /* no pte found? */
 316        beq     1f                      /* if so, try to put a PTE */
 317        li      r3,0                    /* into the hash table */
 318        mr      r4,r12                  /* SRR0 is fault address */
 319BEGIN_MMU_FTR_SECTION
 320        bl      hash_page
 321END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
 3221:      mr      r4,r12
 323        andis.  r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
 324        EXC_XFER_LITE(0x400, handle_page_fault)
 325
 326/* External interrupt */
 327        EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
 328
 329/* Alignment exception */
 330        . = 0x600
 331        DO_KVM  0x600
 332Alignment:
 333        EXCEPTION_PROLOG
 334        mfspr   r4,SPRN_DAR
 335        stw     r4,_DAR(r11)
 336        mfspr   r5,SPRN_DSISR
 337        stw     r5,_DSISR(r11)
 338        addi    r3,r1,STACK_FRAME_OVERHEAD
 339        EXC_XFER_STD(0x600, alignment_exception)
 340
 341/* Program check exception */
 342        EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
 343
 344/* Floating-point unavailable */
 345        . = 0x800
 346        DO_KVM  0x800
 347FPUnavailable:
 348BEGIN_FTR_SECTION
 349/*
 350 * Certain Freescale cores don't have a FPU and treat fp instructions
 351 * as a FP Unavailable exception.  Redirect to illegal/emulation handling.
 352 */
 353        b       ProgramCheck
 354END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
 355        EXCEPTION_PROLOG
 356        beq     1f
 357        bl      load_up_fpu             /* if from user, just load it up */
 358        b       fast_exception_return
 3591:      addi    r3,r1,STACK_FRAME_OVERHEAD
 360        EXC_XFER_LITE(0x800, kernel_fp_unavailable_exception)
 361
 362/* Decrementer */
 363        EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
 364
 365        EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_STD)
 366        EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_STD)
 367
 368/* System call */
 369        . = 0xc00
 370        DO_KVM  0xc00
 371SystemCall:
 372        SYSCALL_ENTRY   0xc00
 373
 374/* Single step - not used on 601 */
 375        EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
 376        EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_STD)
 377
 378/*
 379 * The Altivec unavailable trap is at 0x0f20.  Foo.
 380 * We effectively remap it to 0x3000.
 381 * We include an altivec unavailable exception vector even if
 382 * not configured for Altivec, so that you can't panic a
 383 * non-altivec kernel running on a machine with altivec just
 384 * by executing an altivec instruction.
 385 */
 386        . = 0xf00
 387        DO_KVM  0xf00
 388        b       PerformanceMonitor
 389
 390        . = 0xf20
 391        DO_KVM  0xf20
 392        b       AltiVecUnavailable
 393
 394/*
 395 * Handle TLB miss for instruction on 603/603e.
 396 * Note: we get an alternate set of r0 - r3 to use automatically.
 397 */
 398        . = 0x1000
 399InstructionTLBMiss:
 400/*
 401 * r0:  scratch
 402 * r1:  linux style pte ( later becomes ppc hardware pte )
 403 * r2:  ptr to linux-style pte
 404 * r3:  scratch
 405 */
 406        /* Get PTE (linux-style) and check access */
 407        mfspr   r3,SPRN_IMISS
 408#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
 409        lis     r1,PAGE_OFFSET@h                /* check if kernel address */
 410        cmplw   0,r1,r3
 411#endif
 412        mfspr   r2, SPRN_SPRG_PGDIR
 413#ifdef CONFIG_SWAP
 414        li      r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
 415#else
 416        li      r1,_PAGE_PRESENT | _PAGE_EXEC
 417#endif
 418#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
 419        bge-    112f
 420        lis     r2, (swapper_pg_dir - PAGE_OFFSET)@ha   /* if kernel address, use */
 421        addi    r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l        /* kernel page table */
 422#endif
 423112:    rlwimi  r2,r3,12,20,29          /* insert top 10 bits of address */
 424        lwz     r2,0(r2)                /* get pmd entry */
 425        rlwinm. r2,r2,0,0,19            /* extract address of pte page */
 426        beq-    InstructionAddressInvalid       /* return if no mapping */
 427        rlwimi  r2,r3,22,20,29          /* insert next 10 bits of address */
 428        lwz     r0,0(r2)                /* get linux-style pte */
 429        andc.   r1,r1,r0                /* check access & ~permission */
 430        bne-    InstructionAddressInvalid /* return if access not permitted */
 431        /* Convert linux-style PTE to low word of PPC-style PTE */
 432        rlwimi  r0,r0,32-2,31,31        /* _PAGE_USER -> PP lsb */
 433        ori     r1, r1, 0xe06           /* clear out reserved bits */
 434        andc    r1, r0, r1              /* PP = user? 1 : 0 */
 435BEGIN_FTR_SECTION
 436        rlwinm  r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
 437END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
 438        mtspr   SPRN_RPA,r1
 439        tlbli   r3
 440        mfspr   r3,SPRN_SRR1            /* Need to restore CR0 */
 441        mtcrf   0x80,r3
 442        rfi
 443InstructionAddressInvalid:
 444        mfspr   r3,SPRN_SRR1
 445        rlwinm  r1,r3,9,6,6     /* Get load/store bit */
 446
 447        addis   r1,r1,0x2000
 448        mtspr   SPRN_DSISR,r1   /* (shouldn't be needed) */
 449        andi.   r2,r3,0xFFFF    /* Clear upper bits of SRR1 */
 450        or      r2,r2,r1
 451        mtspr   SPRN_SRR1,r2
 452        mfspr   r1,SPRN_IMISS   /* Get failing address */
 453        rlwinm. r2,r2,0,31,31   /* Check for little endian access */
 454        rlwimi  r2,r2,1,30,30   /* change 1 -> 3 */
 455        xor     r1,r1,r2
 456        mtspr   SPRN_DAR,r1     /* Set fault address */
 457        mfmsr   r0              /* Restore "normal" registers */
 458        xoris   r0,r0,MSR_TGPR>>16
 459        mtcrf   0x80,r3         /* Restore CR0 */
 460        mtmsr   r0
 461        b       InstructionAccess
 462
 463/*
 464 * Handle TLB miss for DATA Load operation on 603/603e
 465 */
 466        . = 0x1100
 467DataLoadTLBMiss:
 468/*
 469 * r0:  scratch
 470 * r1:  linux style pte ( later becomes ppc hardware pte )
 471 * r2:  ptr to linux-style pte
 472 * r3:  scratch
 473 */
 474        /* Get PTE (linux-style) and check access */
 475        mfspr   r3,SPRN_DMISS
 476        lis     r1,PAGE_OFFSET@h                /* check if kernel address */
 477        cmplw   0,r1,r3
 478        mfspr   r2, SPRN_SPRG_PGDIR
 479#ifdef CONFIG_SWAP
 480        li      r1, _PAGE_PRESENT | _PAGE_ACCESSED
 481#else
 482        li      r1, _PAGE_PRESENT
 483#endif
 484        bge-    112f
 485        lis     r2, (swapper_pg_dir - PAGE_OFFSET)@ha   /* if kernel address, use */
 486        addi    r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l        /* kernel page table */
 487112:    rlwimi  r2,r3,12,20,29          /* insert top 10 bits of address */
 488        lwz     r2,0(r2)                /* get pmd entry */
 489        rlwinm. r2,r2,0,0,19            /* extract address of pte page */
 490        beq-    DataAddressInvalid      /* return if no mapping */
 491        rlwimi  r2,r3,22,20,29          /* insert next 10 bits of address */
 492        lwz     r0,0(r2)                /* get linux-style pte */
 493        andc.   r1,r1,r0                /* check access & ~permission */
 494        bne-    DataAddressInvalid      /* return if access not permitted */
 495        /*
 496         * NOTE! We are assuming this is not an SMP system, otherwise
 497         * we would need to update the pte atomically with lwarx/stwcx.
 498         */
 499        /* Convert linux-style PTE to low word of PPC-style PTE */
 500        rlwinm  r1,r0,32-9,30,30        /* _PAGE_RW -> PP msb */
 501        rlwimi  r0,r0,32-1,30,30        /* _PAGE_USER -> PP msb */
 502        rlwimi  r0,r0,32-1,31,31        /* _PAGE_USER -> PP lsb */
 503        ori     r1,r1,0xe04             /* clear out reserved bits */
 504        andc    r1,r0,r1                /* PP = user? rw? 1: 3: 0 */
 505BEGIN_FTR_SECTION
 506        rlwinm  r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
 507END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
 508        mtspr   SPRN_RPA,r1
 509        mfspr   r2,SPRN_SRR1            /* Need to restore CR0 */
 510        mtcrf   0x80,r2
 511BEGIN_MMU_FTR_SECTION
 512        li      r0,1
 513        mfspr   r1,SPRN_SPRG_603_LRU
 514        rlwinm  r2,r3,20,27,31          /* Get Address bits 15:19 */
 515        slw     r0,r0,r2
 516        xor     r1,r0,r1
 517        srw     r0,r1,r2
 518        mtspr   SPRN_SPRG_603_LRU,r1
 519        mfspr   r2,SPRN_SRR1
 520        rlwimi  r2,r0,31-14,14,14
 521        mtspr   SPRN_SRR1,r2
 522END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
 523        tlbld   r3
 524        rfi
 525DataAddressInvalid:
 526        mfspr   r3,SPRN_SRR1
 527        rlwinm  r1,r3,9,6,6     /* Get load/store bit */
 528        addis   r1,r1,0x2000
 529        mtspr   SPRN_DSISR,r1
 530        andi.   r2,r3,0xFFFF    /* Clear upper bits of SRR1 */
 531        mtspr   SPRN_SRR1,r2
 532        mfspr   r1,SPRN_DMISS   /* Get failing address */
 533        rlwinm. r2,r2,0,31,31   /* Check for little endian access */
 534        beq     20f             /* Jump if big endian */
 535        xori    r1,r1,3
 53620:     mtspr   SPRN_DAR,r1     /* Set fault address */
 537        mfmsr   r0              /* Restore "normal" registers */
 538        xoris   r0,r0,MSR_TGPR>>16
 539        mtcrf   0x80,r3         /* Restore CR0 */
 540        mtmsr   r0
 541        b       DataAccess
 542
 543/*
 544 * Handle TLB miss for DATA Store on 603/603e
 545 */
 546        . = 0x1200
 547DataStoreTLBMiss:
 548/*
 549 * r0:  scratch
 550 * r1:  linux style pte ( later becomes ppc hardware pte )
 551 * r2:  ptr to linux-style pte
 552 * r3:  scratch
 553 */
 554        /* Get PTE (linux-style) and check access */
 555        mfspr   r3,SPRN_DMISS
 556        lis     r1,PAGE_OFFSET@h                /* check if kernel address */
 557        cmplw   0,r1,r3
 558        mfspr   r2, SPRN_SPRG_PGDIR
 559#ifdef CONFIG_SWAP
 560        li      r1, _PAGE_RW | _PAGE_PRESENT | _PAGE_ACCESSED
 561#else
 562        li      r1, _PAGE_RW | _PAGE_PRESENT
 563#endif
 564        bge-    112f
 565        lis     r2, (swapper_pg_dir - PAGE_OFFSET)@ha   /* if kernel address, use */
 566        addi    r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l        /* kernel page table */
 567112:    rlwimi  r2,r3,12,20,29          /* insert top 10 bits of address */
 568        lwz     r2,0(r2)                /* get pmd entry */
 569        rlwinm. r2,r2,0,0,19            /* extract address of pte page */
 570        beq-    DataAddressInvalid      /* return if no mapping */
 571        rlwimi  r2,r3,22,20,29          /* insert next 10 bits of address */
 572        lwz     r0,0(r2)                /* get linux-style pte */
 573        andc.   r1,r1,r0                /* check access & ~permission */
 574        bne-    DataAddressInvalid      /* return if access not permitted */
 575        /*
 576         * NOTE! We are assuming this is not an SMP system, otherwise
 577         * we would need to update the pte atomically with lwarx/stwcx.
 578         */
 579        /* Convert linux-style PTE to low word of PPC-style PTE */
 580        rlwimi  r0,r0,32-2,31,31        /* _PAGE_USER -> PP lsb */
 581        li      r1,0xe06                /* clear out reserved bits & PP msb */
 582        andc    r1,r0,r1                /* PP = user? 1: 0 */
 583BEGIN_FTR_SECTION
 584        rlwinm  r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
 585END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
 586        mtspr   SPRN_RPA,r1
 587        mfspr   r2,SPRN_SRR1            /* Need to restore CR0 */
 588        mtcrf   0x80,r2
 589BEGIN_MMU_FTR_SECTION
 590        li      r0,1
 591        mfspr   r1,SPRN_SPRG_603_LRU
 592        rlwinm  r2,r3,20,27,31          /* Get Address bits 15:19 */
 593        slw     r0,r0,r2
 594        xor     r1,r0,r1
 595        srw     r0,r1,r2
 596        mtspr   SPRN_SPRG_603_LRU,r1
 597        mfspr   r2,SPRN_SRR1
 598        rlwimi  r2,r0,31-14,14,14
 599        mtspr   SPRN_SRR1,r2
 600END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
 601        tlbld   r3
 602        rfi
 603
 604#ifndef CONFIG_ALTIVEC
 605#define altivec_assist_exception        unknown_exception
 606#endif
 607
 608        EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_STD)
 609        EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_STD)
 610        EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD)
 611        EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_STD)
 612        EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
 613        EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD)
 614        EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD)
 615        EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_STD)
 616        EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_STD)
 617        EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_STD)
 618        EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD)
 619        EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD)
 620        EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD)
 621        EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_STD)
 622        EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_STD)
 623        EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_STD)
 624        EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_STD)
 625        EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_STD)
 626        EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_STD)
 627        EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_STD)
 628        EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_STD)
 629        EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_STD)
 630        EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_STD)
 631        EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_STD)
 632        EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_STD)
 633        EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_STD)
 634        EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_STD)
 635        EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_STD)
 636        EXCEPTION(0x2f00, Trap_2f, unknown_exception, EXC_XFER_STD)
 637
 638        . = 0x3000
 639
 640AltiVecUnavailable:
 641        EXCEPTION_PROLOG
 642#ifdef CONFIG_ALTIVEC
 643        beq     1f
 644        bl      load_up_altivec         /* if from user, just load it up */
 645        b       fast_exception_return
 646#endif /* CONFIG_ALTIVEC */
 6471:      addi    r3,r1,STACK_FRAME_OVERHEAD
 648        EXC_XFER_LITE(0xf20, altivec_unavailable_exception)
 649
 650PerformanceMonitor:
 651        EXCEPTION_PROLOG
 652        addi    r3,r1,STACK_FRAME_OVERHEAD
 653        EXC_XFER_STD(0xf00, performance_monitor_exception)
 654
 655
 656/*
 657 * This code is jumped to from the startup code to copy
 658 * the kernel image to physical address PHYSICAL_START.
 659 */
 660relocate_kernel:
 661        addis   r9,r26,klimit@ha        /* fetch klimit */
 662        lwz     r25,klimit@l(r9)
 663        addis   r25,r25,-KERNELBASE@h
 664        lis     r3,PHYSICAL_START@h     /* Destination base address */
 665        li      r6,0                    /* Destination offset */
 666        li      r5,0x4000               /* # bytes of memory to copy */
 667        bl      copy_and_flush          /* copy the first 0x4000 bytes */
 668        addi    r0,r3,4f@l              /* jump to the address of 4f */
 669        mtctr   r0                      /* in copy and do the rest. */
 670        bctr                            /* jump to the copy */
 6714:      mr      r5,r25
 672        bl      copy_and_flush          /* copy the rest */
 673        b       turn_on_mmu
 674
 675/*
 676 * Copy routine used to copy the kernel to start at physical address 0
 677 * and flush and invalidate the caches as needed.
 678 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
 679 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
 680 */
 681_ENTRY(copy_and_flush)
 682        addi    r5,r5,-4
 683        addi    r6,r6,-4
 6844:      li      r0,L1_CACHE_BYTES/4
 685        mtctr   r0
 6863:      addi    r6,r6,4                 /* copy a cache line */
 687        lwzx    r0,r6,r4
 688        stwx    r0,r6,r3
 689        bdnz    3b
 690        dcbst   r6,r3                   /* write it to memory */
 691        sync
 692        icbi    r6,r3                   /* flush the icache line */
 693        cmplw   0,r6,r5
 694        blt     4b
 695        sync                            /* additional sync needed on g4 */
 696        isync
 697        addi    r5,r5,4
 698        addi    r6,r6,4
 699        blr
 700
 701#ifdef CONFIG_SMP
 702        .globl __secondary_start_mpc86xx
 703__secondary_start_mpc86xx:
 704        mfspr   r3, SPRN_PIR
 705        stw     r3, __secondary_hold_acknowledge@l(0)
 706        mr      r24, r3                 /* cpu # */
 707        b       __secondary_start
 708
 709        .globl  __secondary_start_pmac_0
 710__secondary_start_pmac_0:
 711        /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
 712        li      r24,0
 713        b       1f
 714        li      r24,1
 715        b       1f
 716        li      r24,2
 717        b       1f
 718        li      r24,3
 7191:
 720        /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
 721           set to map the 0xf0000000 - 0xffffffff region */
 722        mfmsr   r0
 723        rlwinm  r0,r0,0,28,26           /* clear DR (0x10) */
 724        SYNC
 725        mtmsr   r0
 726        isync
 727
 728        .globl  __secondary_start
 729__secondary_start:
 730        /* Copy some CPU settings from CPU 0 */
 731        bl      __restore_cpu_setup
 732
 733        lis     r3,-KERNELBASE@h
 734        mr      r4,r24
 735        bl      call_setup_cpu          /* Call setup_cpu for this CPU */
 736#ifdef CONFIG_PPC_BOOK3S_32
 737        lis     r3,-KERNELBASE@h
 738        bl      init_idle_6xx
 739#endif /* CONFIG_PPC_BOOK3S_32 */
 740
 741        /* get current's stack and current */
 742        lis     r2,secondary_current@ha
 743        tophys(r2,r2)
 744        lwz     r2,secondary_current@l(r2)
 745        tophys(r1,r2)
 746        lwz     r1,TASK_STACK(r1)
 747
 748        /* stack */
 749        addi    r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
 750        li      r0,0
 751        tophys(r3,r1)
 752        stw     r0,0(r3)
 753
 754        /* load up the MMU */
 755        bl      load_segment_registers
 756        bl      load_up_mmu
 757
 758        /* ptr to phys current thread */
 759        tophys(r4,r2)
 760        addi    r4,r4,THREAD    /* phys address of our thread_struct */
 761        mtspr   SPRN_SPRG_THREAD,r4
 762        lis     r4, (swapper_pg_dir - PAGE_OFFSET)@h
 763        ori     r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
 764        mtspr   SPRN_SPRG_PGDIR, r4
 765
 766        /* enable MMU and jump to start_secondary */
 767        li      r4,MSR_KERNEL
 768        lis     r3,start_secondary@h
 769        ori     r3,r3,start_secondary@l
 770        mtspr   SPRN_SRR0,r3
 771        mtspr   SPRN_SRR1,r4
 772        SYNC
 773        RFI
 774#endif /* CONFIG_SMP */
 775
 776#ifdef CONFIG_KVM_BOOK3S_HANDLER
 777#include "../kvm/book3s_rmhandlers.S"
 778#endif
 779
 780/*
 781 * Those generic dummy functions are kept for CPUs not
 782 * included in CONFIG_PPC_BOOK3S_32
 783 */
 784#if !defined(CONFIG_PPC_BOOK3S_32)
 785_ENTRY(__save_cpu_setup)
 786        blr
 787_ENTRY(__restore_cpu_setup)
 788        blr
 789#endif /* !defined(CONFIG_PPC_BOOK3S_32) */
 790
 791/*
 792 * Load stuff into the MMU.  Intended to be called with
 793 * IR=0 and DR=0.
 794 */
 795#ifdef CONFIG_KASAN
 796early_hash_table:
 797        sync                    /* Force all PTE updates to finish */
 798        isync
 799        tlbia                   /* Clear all TLB entries */
 800        sync                    /* wait for tlbia/tlbie to finish */
 801        TLBSYNC                 /* ... on all CPUs */
 802        /* Load the SDR1 register (hash table base & size) */
 803        lis     r6, early_hash - PAGE_OFFSET@h
 804        ori     r6, r6, 3       /* 256kB table */
 805        mtspr   SPRN_SDR1, r6
 806        blr
 807#endif
 808
 809load_up_mmu:
 810        sync                    /* Force all PTE updates to finish */
 811        isync
 812        tlbia                   /* Clear all TLB entries */
 813        sync                    /* wait for tlbia/tlbie to finish */
 814        TLBSYNC                 /* ... on all CPUs */
 815        /* Load the SDR1 register (hash table base & size) */
 816        lis     r6,_SDR1@ha
 817        tophys(r6,r6)
 818        lwz     r6,_SDR1@l(r6)
 819        mtspr   SPRN_SDR1,r6
 820
 821/* Load the BAT registers with the values set up by MMU_init.
 822   MMU_init takes care of whether we're on a 601 or not. */
 823        mfpvr   r3
 824        srwi    r3,r3,16
 825        cmpwi   r3,1
 826        lis     r3,BATS@ha
 827        addi    r3,r3,BATS@l
 828        tophys(r3,r3)
 829        LOAD_BAT(0,r3,r4,r5)
 830        LOAD_BAT(1,r3,r4,r5)
 831        LOAD_BAT(2,r3,r4,r5)
 832        LOAD_BAT(3,r3,r4,r5)
 833BEGIN_MMU_FTR_SECTION
 834        LOAD_BAT(4,r3,r4,r5)
 835        LOAD_BAT(5,r3,r4,r5)
 836        LOAD_BAT(6,r3,r4,r5)
 837        LOAD_BAT(7,r3,r4,r5)
 838END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
 839        blr
 840
 841load_segment_registers:
 842        li      r0, NUM_USER_SEGMENTS /* load up user segment register values */
 843        mtctr   r0              /* for context 0 */
 844        li      r3, 0           /* Kp = 0, Ks = 0, VSID = 0 */
 845#ifdef CONFIG_PPC_KUEP
 846        oris    r3, r3, SR_NX@h /* Set Nx */
 847#endif
 848#ifdef CONFIG_PPC_KUAP
 849        oris    r3, r3, SR_KS@h /* Set Ks */
 850#endif
 851        li      r4, 0
 8523:      mtsrin  r3, r4
 853        addi    r3, r3, 0x111   /* increment VSID */
 854        addis   r4, r4, 0x1000  /* address of next segment */
 855        bdnz    3b
 856        li      r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
 857        mtctr   r0                      /* for context 0 */
 858        rlwinm  r3, r3, 0, ~SR_NX       /* Nx = 0 */
 859        rlwinm  r3, r3, 0, ~SR_KS       /* Ks = 0 */
 860        oris    r3, r3, SR_KP@h         /* Kp = 1 */
 8613:      mtsrin  r3, r4
 862        addi    r3, r3, 0x111   /* increment VSID */
 863        addis   r4, r4, 0x1000  /* address of next segment */
 864        bdnz    3b
 865        blr
 866
 867/*
 868 * This is where the main kernel code starts.
 869 */
 870start_here:
 871        /* ptr to current */
 872        lis     r2,init_task@h
 873        ori     r2,r2,init_task@l
 874        /* Set up for using our exception vectors */
 875        /* ptr to phys current thread */
 876        tophys(r4,r2)
 877        addi    r4,r4,THREAD    /* init task's THREAD */
 878        mtspr   SPRN_SPRG_THREAD,r4
 879        lis     r4, (swapper_pg_dir - PAGE_OFFSET)@h
 880        ori     r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
 881        mtspr   SPRN_SPRG_PGDIR, r4
 882
 883        /* stack */
 884        lis     r1,init_thread_union@ha
 885        addi    r1,r1,init_thread_union@l
 886        li      r0,0
 887        stwu    r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
 888/*
 889 * Do early platform-specific initialization,
 890 * and set up the MMU.
 891 */
 892#ifdef CONFIG_KASAN
 893        bl      kasan_early_init
 894#endif
 895        li      r3,0
 896        mr      r4,r31
 897        bl      machine_init
 898        bl      __save_cpu_setup
 899        bl      MMU_init
 900BEGIN_MMU_FTR_SECTION
 901        bl      MMU_init_hw_patch
 902END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
 903
 904/*
 905 * Go back to running unmapped so we can load up new values
 906 * for SDR1 (hash table pointer) and the segment registers
 907 * and change to using our exception vectors.
 908 */
 909        lis     r4,2f@h
 910        ori     r4,r4,2f@l
 911        tophys(r4,r4)
 912        li      r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
 913        mtspr   SPRN_SRR0,r4
 914        mtspr   SPRN_SRR1,r3
 915        SYNC
 916        RFI
 917/* Load up the kernel context */
 9182:      bl      load_up_mmu
 919
 920#ifdef CONFIG_BDI_SWITCH
 921        /* Add helper information for the Abatron bdiGDB debugger.
 922         * We do this here because we know the mmu is disabled, and
 923         * will be enabled for real in just a few instructions.
 924         */
 925        lis     r5, abatron_pteptrs@h
 926        ori     r5, r5, abatron_pteptrs@l
 927        stw     r5, 0xf0(r0)    /* This much match your Abatron config */
 928        lis     r6, swapper_pg_dir@h
 929        ori     r6, r6, swapper_pg_dir@l
 930        tophys(r5, r5)
 931        stw     r6, 0(r5)
 932#endif /* CONFIG_BDI_SWITCH */
 933
 934/* Now turn on the MMU for real! */
 935        li      r4,MSR_KERNEL
 936        lis     r3,start_kernel@h
 937        ori     r3,r3,start_kernel@l
 938        mtspr   SPRN_SRR0,r3
 939        mtspr   SPRN_SRR1,r4
 940        SYNC
 941        RFI
 942
 943/*
 944 * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
 945 *
 946 * Set up the segment registers for a new context.
 947 */
 948_ENTRY(switch_mmu_context)
 949        lwz     r3,MMCONTEXTID(r4)
 950        cmpwi   cr0,r3,0
 951        blt-    4f
 952        mulli   r3,r3,897       /* multiply context by skew factor */
 953        rlwinm  r3,r3,4,8,27    /* VSID = (context & 0xfffff) << 4 */
 954#ifdef CONFIG_PPC_KUEP
 955        oris    r3, r3, SR_NX@h /* Set Nx */
 956#endif
 957#ifdef CONFIG_PPC_KUAP
 958        oris    r3, r3, SR_KS@h /* Set Ks */
 959#endif
 960        li      r0,NUM_USER_SEGMENTS
 961        mtctr   r0
 962
 963        lwz     r4, MM_PGD(r4)
 964#ifdef CONFIG_BDI_SWITCH
 965        /* Context switch the PTE pointer for the Abatron BDI2000.
 966         * The PGDIR is passed as second argument.
 967         */
 968        lis     r5, abatron_pteptrs@ha
 969        stw     r4, abatron_pteptrs@l + 0x4(r5)
 970#endif
 971        tophys(r4, r4)
 972        mtspr   SPRN_SPRG_PGDIR, r4
 973        li      r4,0
 974        isync
 9753:
 976        mtsrin  r3,r4
 977        addi    r3,r3,0x111     /* next VSID */
 978        rlwinm  r3,r3,0,8,3     /* clear out any overflow from VSID field */
 979        addis   r4,r4,0x1000    /* address of next segment */
 980        bdnz    3b
 981        sync
 982        isync
 983        blr
 9844:      trap
 985        EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
 986        blr
 987EXPORT_SYMBOL(switch_mmu_context)
 988
 989/*
 990 * An undocumented "feature" of 604e requires that the v bit
 991 * be cleared before changing BAT values.
 992 *
 993 * Also, newer IBM firmware does not clear bat3 and 4 so
 994 * this makes sure it's done.
 995 *  -- Cort
 996 */
 997clear_bats:
 998        li      r10,0
 999        mfspr   r9,SPRN_PVR
1000        rlwinm  r9,r9,16,16,31          /* r9 = 1 for 601, 4 for 604 */
1001        cmpwi   r9, 1
1002        beq     1f
1003
1004        mtspr   SPRN_DBAT0U,r10
1005        mtspr   SPRN_DBAT0L,r10
1006        mtspr   SPRN_DBAT1U,r10
1007        mtspr   SPRN_DBAT1L,r10
1008        mtspr   SPRN_DBAT2U,r10
1009        mtspr   SPRN_DBAT2L,r10
1010        mtspr   SPRN_DBAT3U,r10
1011        mtspr   SPRN_DBAT3L,r10
10121:
1013        mtspr   SPRN_IBAT0U,r10
1014        mtspr   SPRN_IBAT0L,r10
1015        mtspr   SPRN_IBAT1U,r10
1016        mtspr   SPRN_IBAT1L,r10
1017        mtspr   SPRN_IBAT2U,r10
1018        mtspr   SPRN_IBAT2L,r10
1019        mtspr   SPRN_IBAT3U,r10
1020        mtspr   SPRN_IBAT3L,r10
1021BEGIN_MMU_FTR_SECTION
1022        /* Here's a tweak: at this point, CPU setup have
1023         * not been called yet, so HIGH_BAT_EN may not be
1024         * set in HID0 for the 745x processors. However, it
1025         * seems that doesn't affect our ability to actually
1026         * write to these SPRs.
1027         */
1028        mtspr   SPRN_DBAT4U,r10
1029        mtspr   SPRN_DBAT4L,r10
1030        mtspr   SPRN_DBAT5U,r10
1031        mtspr   SPRN_DBAT5L,r10
1032        mtspr   SPRN_DBAT6U,r10
1033        mtspr   SPRN_DBAT6L,r10
1034        mtspr   SPRN_DBAT7U,r10
1035        mtspr   SPRN_DBAT7L,r10
1036        mtspr   SPRN_IBAT4U,r10
1037        mtspr   SPRN_IBAT4L,r10
1038        mtspr   SPRN_IBAT5U,r10
1039        mtspr   SPRN_IBAT5L,r10
1040        mtspr   SPRN_IBAT6U,r10
1041        mtspr   SPRN_IBAT6L,r10
1042        mtspr   SPRN_IBAT7U,r10
1043        mtspr   SPRN_IBAT7L,r10
1044END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1045        blr
1046
1047_ENTRY(update_bats)
1048        lis     r4, 1f@h
1049        ori     r4, r4, 1f@l
1050        tophys(r4, r4)
1051        mfmsr   r6
1052        mflr    r7
1053        li      r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)
1054        rlwinm  r0, r6, 0, ~MSR_RI
1055        rlwinm  r0, r0, 0, ~MSR_EE
1056        mtmsr   r0
1057        mtspr   SPRN_SRR0, r4
1058        mtspr   SPRN_SRR1, r3
1059        SYNC
1060        RFI
10611:      bl      clear_bats
1062        lis     r3, BATS@ha
1063        addi    r3, r3, BATS@l
1064        tophys(r3, r3)
1065        LOAD_BAT(0, r3, r4, r5)
1066        LOAD_BAT(1, r3, r4, r5)
1067        LOAD_BAT(2, r3, r4, r5)
1068        LOAD_BAT(3, r3, r4, r5)
1069BEGIN_MMU_FTR_SECTION
1070        LOAD_BAT(4, r3, r4, r5)
1071        LOAD_BAT(5, r3, r4, r5)
1072        LOAD_BAT(6, r3, r4, r5)
1073        LOAD_BAT(7, r3, r4, r5)
1074END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1075        li      r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
1076        mtmsr   r3
1077        mtspr   SPRN_SRR0, r7
1078        mtspr   SPRN_SRR1, r6
1079        SYNC
1080        RFI
1081
1082flush_tlbs:
1083        lis     r10, 0x40
10841:      addic.  r10, r10, -0x1000
1085        tlbie   r10
1086        bgt     1b
1087        sync
1088        blr
1089
1090mmu_off:
1091        addi    r4, r3, __after_mmu_off - _start
1092        mfmsr   r3
1093        andi.   r0,r3,MSR_DR|MSR_IR             /* MMU enabled? */
1094        beqlr
1095        andc    r3,r3,r0
1096        mtspr   SPRN_SRR0,r4
1097        mtspr   SPRN_SRR1,r3
1098        sync
1099        RFI
1100
1101/*
1102 * On 601, we use 3 BATs to map up to 24M of RAM at _PAGE_OFFSET
1103 * (we keep one for debugging) and on others, we use one 256M BAT.
1104 */
1105initial_bats:
1106        lis     r11,PAGE_OFFSET@h
1107        mfspr   r9,SPRN_PVR
1108        rlwinm  r9,r9,16,16,31          /* r9 = 1 for 601, 4 for 604 */
1109        cmpwi   0,r9,1
1110        bne     4f
1111        ori     r11,r11,4               /* set up BAT registers for 601 */
1112        li      r8,0x7f                 /* valid, block length = 8MB */
1113        mtspr   SPRN_IBAT0U,r11         /* N.B. 601 has valid bit in */
1114        mtspr   SPRN_IBAT0L,r8          /* lower BAT register */
1115        addis   r11,r11,0x800000@h
1116        addis   r8,r8,0x800000@h
1117        mtspr   SPRN_IBAT1U,r11
1118        mtspr   SPRN_IBAT1L,r8
1119        addis   r11,r11,0x800000@h
1120        addis   r8,r8,0x800000@h
1121        mtspr   SPRN_IBAT2U,r11
1122        mtspr   SPRN_IBAT2L,r8
1123        isync
1124        blr
1125
11264:      tophys(r8,r11)
1127#ifdef CONFIG_SMP
1128        ori     r8,r8,0x12              /* R/W access, M=1 */
1129#else
1130        ori     r8,r8,2                 /* R/W access */
1131#endif /* CONFIG_SMP */
1132        ori     r11,r11,BL_256M<<2|0x2  /* set up BAT registers for 604 */
1133
1134        mtspr   SPRN_DBAT0L,r8          /* N.B. 6xx (not 601) have valid */
1135        mtspr   SPRN_DBAT0U,r11         /* bit in upper BAT register */
1136        mtspr   SPRN_IBAT0L,r8
1137        mtspr   SPRN_IBAT0U,r11
1138        isync
1139        blr
1140
1141
1142#ifdef CONFIG_BOOTX_TEXT
1143setup_disp_bat:
1144        /*
1145         * setup the display bat prepared for us in prom.c
1146         */
1147        mflr    r8
1148        bl      reloc_offset
1149        mtlr    r8
1150        addis   r8,r3,disp_BAT@ha
1151        addi    r8,r8,disp_BAT@l
1152        cmpwi   cr0,r8,0
1153        beqlr
1154        lwz     r11,0(r8)
1155        lwz     r8,4(r8)
1156        mfspr   r9,SPRN_PVR
1157        rlwinm  r9,r9,16,16,31          /* r9 = 1 for 601, 4 for 604 */
1158        cmpwi   0,r9,1
1159        beq     1f
1160        mtspr   SPRN_DBAT3L,r8
1161        mtspr   SPRN_DBAT3U,r11
1162        blr
11631:      mtspr   SPRN_IBAT3L,r8
1164        mtspr   SPRN_IBAT3U,r11
1165        blr
1166#endif /* CONFIG_BOOTX_TEXT */
1167
1168#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
1169setup_cpm_bat:
1170        lis     r8, 0xf000
1171        ori     r8, r8, 0x002a
1172        mtspr   SPRN_DBAT1L, r8
1173
1174        lis     r11, 0xf000
1175        ori     r11, r11, (BL_1M << 2) | 2
1176        mtspr   SPRN_DBAT1U, r11
1177
1178        blr
1179#endif
1180
1181#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
1182setup_usbgecko_bat:
1183        /* prepare a BAT for early io */
1184#if defined(CONFIG_GAMECUBE)
1185        lis     r8, 0x0c00
1186#elif defined(CONFIG_WII)
1187        lis     r8, 0x0d00
1188#else
1189#error Invalid platform for USB Gecko based early debugging.
1190#endif
1191        /*
1192         * The virtual address used must match the virtual address
1193         * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
1194         */
1195        lis     r11, 0xfffe     /* top 128K */
1196        ori     r8, r8, 0x002a  /* uncached, guarded ,rw */
1197        ori     r11, r11, 0x2   /* 128K, Vs=1, Vp=0 */
1198        mtspr   SPRN_DBAT1L, r8
1199        mtspr   SPRN_DBAT1U, r11
1200        blr
1201#endif
1202
1203#ifdef CONFIG_8260
1204/* Jump into the system reset for the rom.
1205 * We first disable the MMU, and then jump to the ROM reset address.
1206 *
1207 * r3 is the board info structure, r4 is the location for starting.
1208 * I use this for building a small kernel that can load other kernels,
1209 * rather than trying to write or rely on a rom monitor that can tftp load.
1210 */
1211       .globl  m8260_gorom
1212m8260_gorom:
1213        mfmsr   r0
1214        rlwinm  r0,r0,0,17,15   /* clear MSR_EE in r0 */
1215        sync
1216        mtmsr   r0
1217        sync
1218        mfspr   r11, SPRN_HID0
1219        lis     r10, 0
1220        ori     r10,r10,HID0_ICE|HID0_DCE
1221        andc    r11, r11, r10
1222        mtspr   SPRN_HID0, r11
1223        isync
1224        li      r5, MSR_ME|MSR_RI
1225        lis     r6,2f@h
1226        addis   r6,r6,-KERNELBASE@h
1227        ori     r6,r6,2f@l
1228        mtspr   SPRN_SRR0,r6
1229        mtspr   SPRN_SRR1,r5
1230        isync
1231        sync
1232        rfi
12332:
1234        mtlr    r4
1235        blr
1236#endif
1237
1238
1239/*
1240 * We put a few things here that have to be page-aligned.
1241 * This stuff goes at the beginning of the data segment,
1242 * which is page-aligned.
1243 */
1244        .data
1245        .globl  sdata
1246sdata:
1247        .globl  empty_zero_page
1248empty_zero_page:
1249        .space  4096
1250EXPORT_SYMBOL(empty_zero_page)
1251
1252        .globl  swapper_pg_dir
1253swapper_pg_dir:
1254        .space  PGD_TABLE_SIZE
1255
1256/* Room for two PTE pointers, usually the kernel and current user pointers
1257 * to their respective root page table.
1258 */
1259abatron_pteptrs:
1260        .space  8
1261