linux/arch/arm/kernel/head.S
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/kernel/head.S
   3 *
   4 *  Copyright (C) 1994-2002 Russell King
   5 *  Copyright (c) 2003 ARM Limited
   6 *  All Rights Reserved
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 *
  12 *  Kernel startup code for all 32-bit CPUs
  13 */
  14#include <linux/linkage.h>
  15#include <linux/init.h>
  16
  17#include <asm/assembler.h>
  18#include <asm/cp15.h>
  19#include <asm/domain.h>
  20#include <asm/ptrace.h>
  21#include <asm/asm-offsets.h>
  22#include <asm/memory.h>
  23#include <asm/thread_info.h>
  24#include <asm/pgtable.h>
  25
  26#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING)
  27#include CONFIG_DEBUG_LL_INCLUDE
  28#endif
  29
  30/*
  31 * swapper_pg_dir is the virtual address of the initial page table.
  32 * We place the page tables 16K below KERNEL_RAM_VADDR.  Therefore, we must
  33 * make sure that KERNEL_RAM_VADDR is correctly set.  Currently, we expect
  34 * the least significant 16 bits to be 0x8000, but we could probably
  35 * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000.
  36 */
  37#define KERNEL_RAM_VADDR        (PAGE_OFFSET + TEXT_OFFSET)
  38#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000
  39#error KERNEL_RAM_VADDR must start at 0xXXXX8000
  40#endif
  41
  42#ifdef CONFIG_ARM_LPAE
  43        /* LPAE requires an additional page for the PGD */
  44#define PG_DIR_SIZE     0x5000
  45#define PMD_ORDER       3
  46#else
  47#define PG_DIR_SIZE     0x4000
  48#define PMD_ORDER       2
  49#endif
  50
  51        .globl  swapper_pg_dir
  52        .equ    swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
  53
  54        .macro  pgtbl, rd, phys
  55        add     \rd, \phys, #TEXT_OFFSET
  56        sub     \rd, \rd, #PG_DIR_SIZE
  57        .endm
  58
  59/*
  60 * Kernel startup entry point.
  61 * ---------------------------
  62 *
  63 * This is normally called from the decompressor code.  The requirements
  64 * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
  65 * r1 = machine nr, r2 = atags or dtb pointer.
  66 *
  67 * This code is mostly position independent, so if you link the kernel at
  68 * 0xc0008000, you call this at __pa(0xc0008000).
  69 *
  70 * See linux/arch/arm/tools/mach-types for the complete list of machine
  71 * numbers for r1.
  72 *
  73 * We're trying to keep crap to a minimum; DO NOT add any machine specific
  74 * crap here - that's what the boot loader (or in extreme, well justified
  75 * circumstances, zImage) is for.
  76 */
  77        .arm
  78
  79        __HEAD
  80ENTRY(stext)
  81 ARM_BE8(setend be )                    @ ensure we are in BE8 mode
  82
  83 THUMB( badr    r9, 1f          )       @ Kernel is always entered in ARM.
  84 THUMB( bx      r9              )       @ If this is a Thumb-2 kernel,
  85 THUMB( .thumb                  )       @ switch to Thumb now.
  86 THUMB(1:                       )
  87
  88#ifdef CONFIG_ARM_VIRT_EXT
  89        bl      __hyp_stub_install
  90#endif
  91        @ ensure svc mode and all interrupts masked
  92        safe_svcmode_maskall r9
  93
  94        mrc     p15, 0, r9, c0, c0              @ get processor id
  95        bl      __lookup_processor_type         @ r5=procinfo r9=cpuid
  96        movs    r10, r5                         @ invalid processor (r5=0)?
  97 THUMB( it      eq )            @ force fixup-able long branch encoding
  98        beq     __error_p                       @ yes, error 'p'
  99
 100#ifdef CONFIG_ARM_LPAE
 101        mrc     p15, 0, r3, c0, c1, 4           @ read ID_MMFR0
 102        and     r3, r3, #0xf                    @ extract VMSA support
 103        cmp     r3, #5                          @ long-descriptor translation table format?
 104 THUMB( it      lo )                            @ force fixup-able long branch encoding
 105        blo     __error_lpae                    @ only classic page table format
 106#endif
 107
 108#ifndef CONFIG_XIP_KERNEL
 109        adr     r3, 2f
 110        ldmia   r3, {r4, r8}
 111        sub     r4, r3, r4                      @ (PHYS_OFFSET - PAGE_OFFSET)
 112        add     r8, r8, r4                      @ PHYS_OFFSET
 113#else
 114        ldr     r8, =PLAT_PHYS_OFFSET           @ always constant in this case
 115#endif
 116
 117        /*
 118         * r1 = machine no, r2 = atags or dtb,
 119         * r8 = phys_offset, r9 = cpuid, r10 = procinfo
 120         */
 121        bl      __vet_atags
 122#ifdef CONFIG_SMP_ON_UP
 123        bl      __fixup_smp
 124#endif
 125#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
 126        bl      __fixup_pv_table
 127#endif
 128        bl      __create_page_tables
 129
 130        /*
 131         * The following calls CPU specific code in a position independent
 132         * manner.  See arch/arm/mm/proc-*.S for details.  r10 = base of
 133         * xxx_proc_info structure selected by __lookup_processor_type
 134         * above.
 135         *
 136         * The processor init function will be called with:
 137         *  r1 - machine type
 138         *  r2 - boot data (atags/dt) pointer
 139         *  r4 - translation table base (low word)
 140         *  r5 - translation table base (high word, if LPAE)
 141         *  r8 - translation table base 1 (pfn if LPAE)
 142         *  r9 - cpuid
 143         *  r13 - virtual address for __enable_mmu -> __turn_mmu_on
 144         *
 145         * On return, the CPU will be ready for the MMU to be turned on,
 146         * r0 will hold the CPU control register value, r1, r2, r4, and
 147         * r9 will be preserved.  r5 will also be preserved if LPAE.
 148         */
 149        ldr     r13, =__mmap_switched           @ address to jump to after
 150                                                @ mmu has been enabled
 151        badr    lr, 1f                          @ return (PIC) address
 152#ifdef CONFIG_ARM_LPAE
 153        mov     r5, #0                          @ high TTBR0
 154        mov     r8, r4, lsr #12                 @ TTBR1 is swapper_pg_dir pfn
 155#else
 156        mov     r8, r4                          @ set TTBR1 to swapper_pg_dir
 157#endif
 158        ldr     r12, [r10, #PROCINFO_INITFUNC]
 159        add     r12, r12, r10
 160        ret     r12
 1611:      b       __enable_mmu
 162ENDPROC(stext)
 163        .ltorg
 164#ifndef CONFIG_XIP_KERNEL
 1652:      .long   .
 166        .long   PAGE_OFFSET
 167#endif
 168
 169/*
 170 * Setup the initial page tables.  We only setup the barest
 171 * amount which are required to get the kernel running, which
 172 * generally means mapping in the kernel code.
 173 *
 174 * r8 = phys_offset, r9 = cpuid, r10 = procinfo
 175 *
 176 * Returns:
 177 *  r0, r3, r5-r7 corrupted
 178 *  r4 = physical page table address
 179 */
 180__create_page_tables:
 181        pgtbl   r4, r8                          @ page table address
 182
 183        /*
 184         * Clear the swapper page table
 185         */
 186        mov     r0, r4
 187        mov     r3, #0
 188        add     r6, r0, #PG_DIR_SIZE
 1891:      str     r3, [r0], #4
 190        str     r3, [r0], #4
 191        str     r3, [r0], #4
 192        str     r3, [r0], #4
 193        teq     r0, r6
 194        bne     1b
 195
 196#ifdef CONFIG_ARM_LPAE
 197        /*
 198         * Build the PGD table (first level) to point to the PMD table. A PGD
 199         * entry is 64-bit wide.
 200         */
 201        mov     r0, r4
 202        add     r3, r4, #0x1000                 @ first PMD table address
 203        orr     r3, r3, #3                      @ PGD block type
 204        mov     r6, #4                          @ PTRS_PER_PGD
 205        mov     r7, #1 << (55 - 32)             @ L_PGD_SWAPPER
 2061:
 207#ifdef CONFIG_CPU_ENDIAN_BE8
 208        str     r7, [r0], #4                    @ set top PGD entry bits
 209        str     r3, [r0], #4                    @ set bottom PGD entry bits
 210#else
 211        str     r3, [r0], #4                    @ set bottom PGD entry bits
 212        str     r7, [r0], #4                    @ set top PGD entry bits
 213#endif
 214        add     r3, r3, #0x1000                 @ next PMD table
 215        subs    r6, r6, #1
 216        bne     1b
 217
 218        add     r4, r4, #0x1000                 @ point to the PMD tables
 219#ifdef CONFIG_CPU_ENDIAN_BE8
 220        add     r4, r4, #4                      @ we only write the bottom word
 221#endif
 222#endif
 223
 224        ldr     r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags
 225
 226        /*
 227         * Create identity mapping to cater for __enable_mmu.
 228         * This identity mapping will be removed by paging_init().
 229         */
 230        adr     r0, __turn_mmu_on_loc
 231        ldmia   r0, {r3, r5, r6}
 232        sub     r0, r0, r3                      @ virt->phys offset
 233        add     r5, r5, r0                      @ phys __turn_mmu_on
 234        add     r6, r6, r0                      @ phys __turn_mmu_on_end
 235        mov     r5, r5, lsr #SECTION_SHIFT
 236        mov     r6, r6, lsr #SECTION_SHIFT
 237
 2381:      orr     r3, r7, r5, lsl #SECTION_SHIFT  @ flags + kernel base
 239        str     r3, [r4, r5, lsl #PMD_ORDER]    @ identity mapping
 240        cmp     r5, r6
 241        addlo   r5, r5, #1                      @ next section
 242        blo     1b
 243
 244        /*
 245         * Map our RAM from the start to the end of the kernel .bss section.
 246         */
 247        add     r0, r4, #PAGE_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
 248        ldr     r6, =(_end - 1)
 249        orr     r3, r8, r7
 250        add     r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
 2511:      str     r3, [r0], #1 << PMD_ORDER
 252        add     r3, r3, #1 << SECTION_SHIFT
 253        cmp     r0, r6
 254        bls     1b
 255
 256#ifdef CONFIG_XIP_KERNEL
 257        /*
 258         * Map the kernel image separately as it is not located in RAM.
 259         */
 260#define XIP_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR)
 261        mov     r3, pc
 262        mov     r3, r3, lsr #SECTION_SHIFT
 263        orr     r3, r7, r3, lsl #SECTION_SHIFT
 264        add     r0, r4,  #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER)
 265        str     r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]!
 266        ldr     r6, =(_edata_loc - 1)
 267        add     r0, r0, #1 << PMD_ORDER
 268        add     r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
 2691:      cmp     r0, r6
 270        add     r3, r3, #1 << SECTION_SHIFT
 271        strls   r3, [r0], #1 << PMD_ORDER
 272        bls     1b
 273#endif
 274
 275        /*
 276         * Then map boot params address in r2 if specified.
 277         * We map 2 sections in case the ATAGs/DTB crosses a section boundary.
 278         */
 279        mov     r0, r2, lsr #SECTION_SHIFT
 280        movs    r0, r0, lsl #SECTION_SHIFT
 281        subne   r3, r0, r8
 282        addne   r3, r3, #PAGE_OFFSET
 283        addne   r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER)
 284        orrne   r6, r7, r0
 285        strne   r6, [r3], #1 << PMD_ORDER
 286        addne   r6, r6, #1 << SECTION_SHIFT
 287        strne   r6, [r3]
 288
 289#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
 290        sub     r4, r4, #4                      @ Fixup page table pointer
 291                                                @ for 64-bit descriptors
 292#endif
 293
 294#ifdef CONFIG_DEBUG_LL
 295#if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING)
 296        /*
 297         * Map in IO space for serial debugging.
 298         * This allows debug messages to be output
 299         * via a serial console before paging_init.
 300         */
 301        addruart r7, r3, r0
 302
 303        mov     r3, r3, lsr #SECTION_SHIFT
 304        mov     r3, r3, lsl #PMD_ORDER
 305
 306        add     r0, r4, r3
 307        mov     r3, r7, lsr #SECTION_SHIFT
 308        ldr     r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
 309        orr     r3, r7, r3, lsl #SECTION_SHIFT
 310#ifdef CONFIG_ARM_LPAE
 311        mov     r7, #1 << (54 - 32)             @ XN
 312#ifdef CONFIG_CPU_ENDIAN_BE8
 313        str     r7, [r0], #4
 314        str     r3, [r0], #4
 315#else
 316        str     r3, [r0], #4
 317        str     r7, [r0], #4
 318#endif
 319#else
 320        orr     r3, r3, #PMD_SECT_XN
 321        str     r3, [r0], #4
 322#endif
 323
 324#else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */
 325        /* we don't need any serial debugging mappings */
 326        ldr     r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
 327#endif
 328
 329#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS)
 330        /*
 331         * If we're using the NetWinder or CATS, we also need to map
 332         * in the 16550-type serial port for the debug messages
 333         */
 334        add     r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ORDER)
 335        orr     r3, r7, #0x7c000000
 336        str     r3, [r0]
 337#endif
 338#ifdef CONFIG_ARCH_RPC
 339        /*
 340         * Map in screen at 0x02000000 & SCREEN2_BASE
 341         * Similar reasons here - for debug.  This is
 342         * only for Acorn RiscPC architectures.
 343         */
 344        add     r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ORDER)
 345        orr     r3, r7, #0x02000000
 346        str     r3, [r0]
 347        add     r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ORDER)
 348        str     r3, [r0]
 349#endif
 350#endif
 351#ifdef CONFIG_ARM_LPAE
 352        sub     r4, r4, #0x1000         @ point to the PGD table
 353#endif
 354        ret     lr
 355ENDPROC(__create_page_tables)
 356        .ltorg
 357        .align
 358__turn_mmu_on_loc:
 359        .long   .
 360        .long   __turn_mmu_on
 361        .long   __turn_mmu_on_end
 362
 363#if defined(CONFIG_SMP)
 364        .text
 365        .arm
 366ENTRY(secondary_startup_arm)
 367 THUMB( badr    r9, 1f          )       @ Kernel is entered in ARM.
 368 THUMB( bx      r9              )       @ If this is a Thumb-2 kernel,
 369 THUMB( .thumb                  )       @ switch to Thumb now.
 370 THUMB(1:                       )
 371ENTRY(secondary_startup)
 372        /*
 373         * Common entry point for secondary CPUs.
 374         *
 375         * Ensure that we're in SVC mode, and IRQs are disabled.  Lookup
 376         * the processor type - there is no need to check the machine type
 377         * as it has already been validated by the primary processor.
 378         */
 379
 380 ARM_BE8(setend be)                             @ ensure we are in BE8 mode
 381
 382#ifdef CONFIG_ARM_VIRT_EXT
 383        bl      __hyp_stub_install_secondary
 384#endif
 385        safe_svcmode_maskall r9
 386
 387        mrc     p15, 0, r9, c0, c0              @ get processor id
 388        bl      __lookup_processor_type
 389        movs    r10, r5                         @ invalid processor?
 390        moveq   r0, #'p'                        @ yes, error 'p'
 391 THUMB( it      eq )            @ force fixup-able long branch encoding
 392        beq     __error_p
 393
 394        /*
 395         * Use the page tables supplied from  __cpu_up.
 396         */
 397        adr     r4, __secondary_data
 398        ldmia   r4, {r5, r7, r12}               @ address to jump to after
 399        sub     lr, r4, r5                      @ mmu has been enabled
 400        add     r3, r7, lr
 401        ldrd    r4, [r3, #0]                    @ get secondary_data.pgdir
 402ARM_BE8(eor     r4, r4, r5)                     @ Swap r5 and r4 in BE:
 403ARM_BE8(eor     r5, r4, r5)                     @ it can be done in 3 steps
 404ARM_BE8(eor     r4, r4, r5)                     @ without using a temp reg.
 405        ldr     r8, [r3, #8]                    @ get secondary_data.swapper_pg_dir
 406        badr    lr, __enable_mmu                @ return address
 407        mov     r13, r12                        @ __secondary_switched address
 408        ldr     r12, [r10, #PROCINFO_INITFUNC]
 409        add     r12, r12, r10                   @ initialise processor
 410                                                @ (return control reg)
 411        ret     r12
 412ENDPROC(secondary_startup)
 413ENDPROC(secondary_startup_arm)
 414
 415        /*
 416         * r6  = &secondary_data
 417         */
 418ENTRY(__secondary_switched)
 419        ldr     sp, [r7, #12]                   @ get secondary_data.stack
 420        mov     fp, #0
 421        b       secondary_start_kernel
 422ENDPROC(__secondary_switched)
 423
 424        .align
 425
 426        .type   __secondary_data, %object
 427__secondary_data:
 428        .long   .
 429        .long   secondary_data
 430        .long   __secondary_switched
 431#endif /* defined(CONFIG_SMP) */
 432
 433
 434
 435/*
 436 * Setup common bits before finally enabling the MMU.  Essentially
 437 * this is just loading the page table pointer and domain access
 438 * registers.  All these registers need to be preserved by the
 439 * processor setup function (or set in the case of r0)
 440 *
 441 *  r0  = cp#15 control register
 442 *  r1  = machine ID
 443 *  r2  = atags or dtb pointer
 444 *  r4  = TTBR pointer (low word)
 445 *  r5  = TTBR pointer (high word if LPAE)
 446 *  r9  = processor ID
 447 *  r13 = *virtual* address to jump to upon completion
 448 */
 449__enable_mmu:
 450#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
 451        orr     r0, r0, #CR_A
 452#else
 453        bic     r0, r0, #CR_A
 454#endif
 455#ifdef CONFIG_CPU_DCACHE_DISABLE
 456        bic     r0, r0, #CR_C
 457#endif
 458#ifdef CONFIG_CPU_BPREDICT_DISABLE
 459        bic     r0, r0, #CR_Z
 460#endif
 461#ifdef CONFIG_CPU_ICACHE_DISABLE
 462        bic     r0, r0, #CR_I
 463#endif
 464#ifdef CONFIG_ARM_LPAE
 465        mcrr    p15, 0, r4, r5, c2              @ load TTBR0
 466#else
 467        mov     r5, #DACR_INIT
 468        mcr     p15, 0, r5, c3, c0, 0           @ load domain access register
 469        mcr     p15, 0, r4, c2, c0, 0           @ load page table pointer
 470#endif
 471        b       __turn_mmu_on
 472ENDPROC(__enable_mmu)
 473
 474/*
 475 * Enable the MMU.  This completely changes the structure of the visible
 476 * memory space.  You will not be able to trace execution through this.
 477 * If you have an enquiry about this, *please* check the linux-arm-kernel
 478 * mailing list archives BEFORE sending another post to the list.
 479 *
 480 *  r0  = cp#15 control register
 481 *  r1  = machine ID
 482 *  r2  = atags or dtb pointer
 483 *  r9  = processor ID
 484 *  r13 = *virtual* address to jump to upon completion
 485 *
 486 * other registers depend on the function called upon completion
 487 */
 488        .align  5
 489        .pushsection    .idmap.text, "ax"
 490ENTRY(__turn_mmu_on)
 491        mov     r0, r0
 492        instr_sync
 493        mcr     p15, 0, r0, c1, c0, 0           @ write control reg
 494        mrc     p15, 0, r3, c0, c0, 0           @ read id reg
 495        instr_sync
 496        mov     r3, r3
 497        mov     r3, r13
 498        ret     r3
 499__turn_mmu_on_end:
 500ENDPROC(__turn_mmu_on)
 501        .popsection
 502
 503
 504#ifdef CONFIG_SMP_ON_UP
 505        __HEAD
 506__fixup_smp:
 507        and     r3, r9, #0x000f0000     @ architecture version
 508        teq     r3, #0x000f0000         @ CPU ID supported?
 509        bne     __fixup_smp_on_up       @ no, assume UP
 510
 511        bic     r3, r9, #0x00ff0000
 512        bic     r3, r3, #0x0000000f     @ mask 0xff00fff0
 513        mov     r4, #0x41000000
 514        orr     r4, r4, #0x0000b000
 515        orr     r4, r4, #0x00000020     @ val 0x4100b020
 516        teq     r3, r4                  @ ARM 11MPCore?
 517        reteq   lr                      @ yes, assume SMP
 518
 519        mrc     p15, 0, r0, c0, c0, 5   @ read MPIDR
 520        and     r0, r0, #0xc0000000     @ multiprocessing extensions and
 521        teq     r0, #0x80000000         @ not part of a uniprocessor system?
 522        bne    __fixup_smp_on_up        @ no, assume UP
 523
 524        @ Core indicates it is SMP. Check for Aegis SOC where a single
 525        @ Cortex-A9 CPU is present but SMP operations fault.
 526        mov     r4, #0x41000000
 527        orr     r4, r4, #0x0000c000
 528        orr     r4, r4, #0x00000090
 529        teq     r3, r4                  @ Check for ARM Cortex-A9
 530        retne   lr                      @ Not ARM Cortex-A9,
 531
 532        @ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the
 533        @ below address check will need to be #ifdef'd or equivalent
 534        @ for the Aegis platform.
 535        mrc     p15, 4, r0, c15, c0     @ get SCU base address
 536        teq     r0, #0x0                @ '0' on actual UP A9 hardware
 537        beq     __fixup_smp_on_up       @ So its an A9 UP
 538        ldr     r0, [r0, #4]            @ read SCU Config
 539ARM_BE8(rev     r0, r0)                 @ byteswap if big endian
 540        and     r0, r0, #0x3            @ number of CPUs
 541        teq     r0, #0x0                @ is 1?
 542        retne   lr
 543
 544__fixup_smp_on_up:
 545        adr     r0, 1f
 546        ldmia   r0, {r3 - r5}
 547        sub     r3, r0, r3
 548        add     r4, r4, r3
 549        add     r5, r5, r3
 550        b       __do_fixup_smp_on_up
 551ENDPROC(__fixup_smp)
 552
 553        .align
 5541:      .word   .
 555        .word   __smpalt_begin
 556        .word   __smpalt_end
 557
 558        .pushsection .data
 559        .globl  smp_on_up
 560smp_on_up:
 561        ALT_SMP(.long   1)
 562        ALT_UP(.long    0)
 563        .popsection
 564#endif
 565
 566        .text
 567__do_fixup_smp_on_up:
 568        cmp     r4, r5
 569        reths   lr
 570        ldmia   r4!, {r0, r6}
 571 ARM(   str     r6, [r0, r3]    )
 572 THUMB( add     r0, r0, r3      )
 573#ifdef __ARMEB__
 574 THUMB( mov     r6, r6, ror #16 )       @ Convert word order for big-endian.
 575#endif
 576 THUMB( strh    r6, [r0], #2    )       @ For Thumb-2, store as two halfwords
 577 THUMB( mov     r6, r6, lsr #16 )       @ to be robust against misaligned r3.
 578 THUMB( strh    r6, [r0]        )
 579        b       __do_fixup_smp_on_up
 580ENDPROC(__do_fixup_smp_on_up)
 581
 582ENTRY(fixup_smp)
 583        stmfd   sp!, {r4 - r6, lr}
 584        mov     r4, r0
 585        add     r5, r0, r1
 586        mov     r3, #0
 587        bl      __do_fixup_smp_on_up
 588        ldmfd   sp!, {r4 - r6, pc}
 589ENDPROC(fixup_smp)
 590
 591#ifdef __ARMEB__
 592#define LOW_OFFSET      0x4
 593#define HIGH_OFFSET     0x0
 594#else
 595#define LOW_OFFSET      0x0
 596#define HIGH_OFFSET     0x4
 597#endif
 598
 599#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
 600
 601/* __fixup_pv_table - patch the stub instructions with the delta between
 602 * PHYS_OFFSET and PAGE_OFFSET, which is assumed to be 16MiB aligned and
 603 * can be expressed by an immediate shifter operand. The stub instruction
 604 * has a form of '(add|sub) rd, rn, #imm'.
 605 */
 606        __HEAD
 607__fixup_pv_table:
 608        adr     r0, 1f
 609        ldmia   r0, {r3-r7}
 610        mvn     ip, #0
 611        subs    r3, r0, r3      @ PHYS_OFFSET - PAGE_OFFSET
 612        add     r4, r4, r3      @ adjust table start address
 613        add     r5, r5, r3      @ adjust table end address
 614        add     r6, r6, r3      @ adjust __pv_phys_pfn_offset address
 615        add     r7, r7, r3      @ adjust __pv_offset address
 616        mov     r0, r8, lsr #PAGE_SHIFT @ convert to PFN
 617        str     r0, [r6]        @ save computed PHYS_OFFSET to __pv_phys_pfn_offset
 618        strcc   ip, [r7, #HIGH_OFFSET]  @ save to __pv_offset high bits
 619        mov     r6, r3, lsr #24 @ constant for add/sub instructions
 620        teq     r3, r6, lsl #24 @ must be 16MiB aligned
 621THUMB(  it      ne              @ cross section branch )
 622        bne     __error
 623        str     r3, [r7, #LOW_OFFSET]   @ save to __pv_offset low bits
 624        b       __fixup_a_pv_table
 625ENDPROC(__fixup_pv_table)
 626
 627        .align
 6281:      .long   .
 629        .long   __pv_table_begin
 630        .long   __pv_table_end
 6312:      .long   __pv_phys_pfn_offset
 632        .long   __pv_offset
 633
 634        .text
 635__fixup_a_pv_table:
 636        adr     r0, 3f
 637        ldr     r6, [r0]
 638        add     r6, r6, r3
 639        ldr     r0, [r6, #HIGH_OFFSET]  @ pv_offset high word
 640        ldr     r6, [r6, #LOW_OFFSET]   @ pv_offset low word
 641        mov     r6, r6, lsr #24
 642        cmn     r0, #1
 643#ifdef CONFIG_THUMB2_KERNEL
 644        moveq   r0, #0x200000   @ set bit 21, mov to mvn instruction
 645        lsls    r6, #24
 646        beq     2f
 647        clz     r7, r6
 648        lsr     r6, #24
 649        lsl     r6, r7
 650        bic     r6, #0x0080
 651        lsrs    r7, #1
 652        orrcs   r6, #0x0080
 653        orr     r6, r6, r7, lsl #12
 654        orr     r6, #0x4000
 655        b       2f
 6561:      add     r7, r3
 657        ldrh    ip, [r7, #2]
 658ARM_BE8(rev16   ip, ip)
 659        tst     ip, #0x4000
 660        and     ip, #0x8f00
 661        orrne   ip, r6  @ mask in offset bits 31-24
 662        orreq   ip, r0  @ mask in offset bits 7-0
 663ARM_BE8(rev16   ip, ip)
 664        strh    ip, [r7, #2]
 665        bne     2f
 666        ldrh    ip, [r7]
 667ARM_BE8(rev16   ip, ip)
 668        bic     ip, #0x20
 669        orr     ip, ip, r0, lsr #16
 670ARM_BE8(rev16   ip, ip)
 671        strh    ip, [r7]
 6722:      cmp     r4, r5
 673        ldrcc   r7, [r4], #4    @ use branch for delay slot
 674        bcc     1b
 675        bx      lr
 676#else
 677#ifdef CONFIG_CPU_ENDIAN_BE8
 678        moveq   r0, #0x00004000 @ set bit 22, mov to mvn instruction
 679#else
 680        moveq   r0, #0x400000   @ set bit 22, mov to mvn instruction
 681#endif
 682        b       2f
 6831:      ldr     ip, [r7, r3]
 684#ifdef CONFIG_CPU_ENDIAN_BE8
 685        @ in BE8, we load data in BE, but instructions still in LE
 686        bic     ip, ip, #0xff000000
 687        tst     ip, #0x000f0000 @ check the rotation field
 688        orrne   ip, ip, r6, lsl #24 @ mask in offset bits 31-24
 689        biceq   ip, ip, #0x00004000 @ clear bit 22
 690        orreq   ip, ip, r0      @ mask in offset bits 7-0
 691#else
 692        bic     ip, ip, #0x000000ff
 693        tst     ip, #0xf00      @ check the rotation field
 694        orrne   ip, ip, r6      @ mask in offset bits 31-24
 695        biceq   ip, ip, #0x400000       @ clear bit 22
 696        orreq   ip, ip, r0      @ mask in offset bits 7-0
 697#endif
 698        str     ip, [r7, r3]
 6992:      cmp     r4, r5
 700        ldrcc   r7, [r4], #4    @ use branch for delay slot
 701        bcc     1b
 702        ret     lr
 703#endif
 704ENDPROC(__fixup_a_pv_table)
 705
 706        .align
 7073:      .long __pv_offset
 708
 709ENTRY(fixup_pv_table)
 710        stmfd   sp!, {r4 - r7, lr}
 711        mov     r3, #0                  @ no offset
 712        mov     r4, r0                  @ r0 = table start
 713        add     r5, r0, r1              @ r1 = table size
 714        bl      __fixup_a_pv_table
 715        ldmfd   sp!, {r4 - r7, pc}
 716ENDPROC(fixup_pv_table)
 717
 718        .data
 719        .globl  __pv_phys_pfn_offset
 720        .type   __pv_phys_pfn_offset, %object
 721__pv_phys_pfn_offset:
 722        .word   0
 723        .size   __pv_phys_pfn_offset, . -__pv_phys_pfn_offset
 724
 725        .globl  __pv_offset
 726        .type   __pv_offset, %object
 727__pv_offset:
 728        .quad   0
 729        .size   __pv_offset, . -__pv_offset
 730#endif
 731
 732#include "head-common.S"
 733