linux/arch/arm/kernel/head.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 *  linux/arch/arm/kernel/head.S
   4 *
   5 *  Copyright (C) 1994-2002 Russell King
   6 *  Copyright (c) 2003 ARM Limited
   7 *  All Rights Reserved
   8 *
   9 *  Kernel startup code for all 32-bit CPUs
  10 */
  11#include <linux/linkage.h>
  12#include <linux/init.h>
  13#include <linux/pgtable.h>
  14
  15#include <asm/assembler.h>
  16#include <asm/cp15.h>
  17#include <asm/domain.h>
  18#include <asm/ptrace.h>
  19#include <asm/asm-offsets.h>
  20#include <asm/memory.h>
  21#include <asm/thread_info.h>
  22
  23#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING)
  24#include CONFIG_DEBUG_LL_INCLUDE
  25#endif
  26
  27/*
  28 * swapper_pg_dir is the virtual address of the initial page table.
  29 * We place the page tables 16K below KERNEL_RAM_VADDR.  Therefore, we must
  30 * make sure that KERNEL_RAM_VADDR is correctly set.  Currently, we expect
  31 * the least significant 16 bits to be 0x8000, but we could probably
  32 * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000.
  33 */
  34#define KERNEL_RAM_VADDR        (PAGE_OFFSET + TEXT_OFFSET)
  35#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000
  36#error KERNEL_RAM_VADDR must start at 0xXXXX8000
  37#endif
  38
  39#ifdef CONFIG_ARM_LPAE
  40        /* LPAE requires an additional page for the PGD */
  41#define PG_DIR_SIZE     0x5000
  42#define PMD_ORDER       3
  43#else
  44#define PG_DIR_SIZE     0x4000
  45#define PMD_ORDER       2
  46#endif
  47
  48        .globl  swapper_pg_dir
  49        .equ    swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
  50
  51        .macro  pgtbl, rd, phys
  52        add     \rd, \phys, #TEXT_OFFSET
  53        sub     \rd, \rd, #PG_DIR_SIZE
  54        .endm
  55
  56/*
  57 * Kernel startup entry point.
  58 * ---------------------------
  59 *
  60 * This is normally called from the decompressor code.  The requirements
  61 * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
  62 * r1 = machine nr, r2 = atags or dtb pointer.
  63 *
  64 * This code is mostly position independent, so if you link the kernel at
  65 * 0xc0008000, you call this at __pa(0xc0008000).
  66 *
  67 * See linux/arch/arm/tools/mach-types for the complete list of machine
  68 * numbers for r1.
  69 *
  70 * We're trying to keep crap to a minimum; DO NOT add any machine specific
  71 * crap here - that's what the boot loader (or in extreme, well justified
  72 * circumstances, zImage) is for.
  73 */
  74        .arm
  75
  76        __HEAD
  77ENTRY(stext)
  78 ARM_BE8(setend be )                    @ ensure we are in BE8 mode
  79
  80 THUMB( badr    r9, 1f          )       @ Kernel is always entered in ARM.
  81 THUMB( bx      r9              )       @ If this is a Thumb-2 kernel,
  82 THUMB( .thumb                  )       @ switch to Thumb now.
  83 THUMB(1:                       )
  84
  85#ifdef CONFIG_ARM_VIRT_EXT
  86        bl      __hyp_stub_install
  87#endif
  88        @ ensure svc mode and all interrupts masked
  89        safe_svcmode_maskall r9
  90
  91        mrc     p15, 0, r9, c0, c0              @ get processor id
  92        bl      __lookup_processor_type         @ r5=procinfo r9=cpuid
  93        movs    r10, r5                         @ invalid processor (r5=0)?
  94 THUMB( it      eq )            @ force fixup-able long branch encoding
  95        beq     __error_p                       @ yes, error 'p'
  96
  97#ifdef CONFIG_ARM_LPAE
  98        mrc     p15, 0, r3, c0, c1, 4           @ read ID_MMFR0
  99        and     r3, r3, #0xf                    @ extract VMSA support
 100        cmp     r3, #5                          @ long-descriptor translation table format?
 101 THUMB( it      lo )                            @ force fixup-able long branch encoding
 102        blo     __error_lpae                    @ only classic page table format
 103#endif
 104
 105#ifndef CONFIG_XIP_KERNEL
 106        adr     r3, 2f
 107        ldmia   r3, {r4, r8}
 108        sub     r4, r3, r4                      @ (PHYS_OFFSET - PAGE_OFFSET)
 109        add     r8, r8, r4                      @ PHYS_OFFSET
 110#else
 111        ldr     r8, =PLAT_PHYS_OFFSET           @ always constant in this case
 112#endif
 113
 114        /*
 115         * r1 = machine no, r2 = atags or dtb,
 116         * r8 = phys_offset, r9 = cpuid, r10 = procinfo
 117         */
 118        bl      __vet_atags
 119#ifdef CONFIG_SMP_ON_UP
 120        bl      __fixup_smp
 121#endif
 122#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
 123        bl      __fixup_pv_table
 124#endif
 125        bl      __create_page_tables
 126
 127        /*
 128         * The following calls CPU specific code in a position independent
 129         * manner.  See arch/arm/mm/proc-*.S for details.  r10 = base of
 130         * xxx_proc_info structure selected by __lookup_processor_type
 131         * above.
 132         *
 133         * The processor init function will be called with:
 134         *  r1 - machine type
 135         *  r2 - boot data (atags/dt) pointer
 136         *  r4 - translation table base (low word)
 137         *  r5 - translation table base (high word, if LPAE)
 138         *  r8 - translation table base 1 (pfn if LPAE)
 139         *  r9 - cpuid
 140         *  r13 - virtual address for __enable_mmu -> __turn_mmu_on
 141         *
 142         * On return, the CPU will be ready for the MMU to be turned on,
 143         * r0 will hold the CPU control register value, r1, r2, r4, and
 144         * r9 will be preserved.  r5 will also be preserved if LPAE.
 145         */
 146        ldr     r13, =__mmap_switched           @ address to jump to after
 147                                                @ mmu has been enabled
 148        badr    lr, 1f                          @ return (PIC) address
 149#ifdef CONFIG_ARM_LPAE
 150        mov     r5, #0                          @ high TTBR0
 151        mov     r8, r4, lsr #12                 @ TTBR1 is swapper_pg_dir pfn
 152#else
 153        mov     r8, r4                          @ set TTBR1 to swapper_pg_dir
 154#endif
 155        ldr     r12, [r10, #PROCINFO_INITFUNC]
 156        add     r12, r12, r10
 157        ret     r12
 1581:      b       __enable_mmu
 159ENDPROC(stext)
 160        .ltorg
 161#ifndef CONFIG_XIP_KERNEL
 1622:      .long   .
 163        .long   PAGE_OFFSET
 164#endif
 165
 166/*
 167 * Setup the initial page tables.  We only setup the barest
 168 * amount which are required to get the kernel running, which
 169 * generally means mapping in the kernel code.
 170 *
 171 * r8 = phys_offset, r9 = cpuid, r10 = procinfo
 172 *
 173 * Returns:
 174 *  r0, r3, r5-r7 corrupted
 175 *  r4 = physical page table address
 176 */
 177__create_page_tables:
 178        pgtbl   r4, r8                          @ page table address
 179
 180        /*
 181         * Clear the swapper page table
 182         */
 183        mov     r0, r4
 184        mov     r3, #0
 185        add     r6, r0, #PG_DIR_SIZE
 1861:      str     r3, [r0], #4
 187        str     r3, [r0], #4
 188        str     r3, [r0], #4
 189        str     r3, [r0], #4
 190        teq     r0, r6
 191        bne     1b
 192
 193#ifdef CONFIG_ARM_LPAE
 194        /*
 195         * Build the PGD table (first level) to point to the PMD table. A PGD
 196         * entry is 64-bit wide.
 197         */
 198        mov     r0, r4
 199        add     r3, r4, #0x1000                 @ first PMD table address
 200        orr     r3, r3, #3                      @ PGD block type
 201        mov     r6, #4                          @ PTRS_PER_PGD
 202        mov     r7, #1 << (55 - 32)             @ L_PGD_SWAPPER
 2031:
 204#ifdef CONFIG_CPU_ENDIAN_BE8
 205        str     r7, [r0], #4                    @ set top PGD entry bits
 206        str     r3, [r0], #4                    @ set bottom PGD entry bits
 207#else
 208        str     r3, [r0], #4                    @ set bottom PGD entry bits
 209        str     r7, [r0], #4                    @ set top PGD entry bits
 210#endif
 211        add     r3, r3, #0x1000                 @ next PMD table
 212        subs    r6, r6, #1
 213        bne     1b
 214
 215        add     r4, r4, #0x1000                 @ point to the PMD tables
 216#ifdef CONFIG_CPU_ENDIAN_BE8
 217        add     r4, r4, #4                      @ we only write the bottom word
 218#endif
 219#endif
 220
 221        ldr     r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags
 222
 223        /*
 224         * Create identity mapping to cater for __enable_mmu.
 225         * This identity mapping will be removed by paging_init().
 226         */
 227        adr     r0, __turn_mmu_on_loc
 228        ldmia   r0, {r3, r5, r6}
 229        sub     r0, r0, r3                      @ virt->phys offset
 230        add     r5, r5, r0                      @ phys __turn_mmu_on
 231        add     r6, r6, r0                      @ phys __turn_mmu_on_end
 232        mov     r5, r5, lsr #SECTION_SHIFT
 233        mov     r6, r6, lsr #SECTION_SHIFT
 234
 2351:      orr     r3, r7, r5, lsl #SECTION_SHIFT  @ flags + kernel base
 236        str     r3, [r4, r5, lsl #PMD_ORDER]    @ identity mapping
 237        cmp     r5, r6
 238        addlo   r5, r5, #1                      @ next section
 239        blo     1b
 240
 241        /*
 242         * Map our RAM from the start to the end of the kernel .bss section.
 243         */
 244        add     r0, r4, #PAGE_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
 245        ldr     r6, =(_end - 1)
 246        orr     r3, r8, r7
 247        add     r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
 2481:      str     r3, [r0], #1 << PMD_ORDER
 249        add     r3, r3, #1 << SECTION_SHIFT
 250        cmp     r0, r6
 251        bls     1b
 252
 253#ifdef CONFIG_XIP_KERNEL
 254        /*
 255         * Map the kernel image separately as it is not located in RAM.
 256         */
 257#define XIP_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR)
 258        mov     r3, pc
 259        mov     r3, r3, lsr #SECTION_SHIFT
 260        orr     r3, r7, r3, lsl #SECTION_SHIFT
 261        add     r0, r4,  #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER)
 262        str     r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]!
 263        ldr     r6, =(_edata_loc - 1)
 264        add     r0, r0, #1 << PMD_ORDER
 265        add     r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
 2661:      cmp     r0, r6
 267        add     r3, r3, #1 << SECTION_SHIFT
 268        strls   r3, [r0], #1 << PMD_ORDER
 269        bls     1b
 270#endif
 271
 272        /*
 273         * Then map boot params address in r2 if specified.
 274         * We map 2 sections in case the ATAGs/DTB crosses a section boundary.
 275         */
 276        mov     r0, r2, lsr #SECTION_SHIFT
 277        movs    r0, r0, lsl #SECTION_SHIFT
 278        subne   r3, r0, r8
 279        addne   r3, r3, #PAGE_OFFSET
 280        addne   r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER)
 281        orrne   r6, r7, r0
 282        strne   r6, [r3], #1 << PMD_ORDER
 283        addne   r6, r6, #1 << SECTION_SHIFT
 284        strne   r6, [r3]
 285
 286#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
 287        sub     r4, r4, #4                      @ Fixup page table pointer
 288                                                @ for 64-bit descriptors
 289#endif
 290
 291#ifdef CONFIG_DEBUG_LL
 292#if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING)
 293        /*
 294         * Map in IO space for serial debugging.
 295         * This allows debug messages to be output
 296         * via a serial console before paging_init.
 297         */
 298        addruart r7, r3, r0
 299
 300        mov     r3, r3, lsr #SECTION_SHIFT
 301        mov     r3, r3, lsl #PMD_ORDER
 302
 303        add     r0, r4, r3
 304        mov     r3, r7, lsr #SECTION_SHIFT
 305        ldr     r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
 306        orr     r3, r7, r3, lsl #SECTION_SHIFT
 307#ifdef CONFIG_ARM_LPAE
 308        mov     r7, #1 << (54 - 32)             @ XN
 309#ifdef CONFIG_CPU_ENDIAN_BE8
 310        str     r7, [r0], #4
 311        str     r3, [r0], #4
 312#else
 313        str     r3, [r0], #4
 314        str     r7, [r0], #4
 315#endif
 316#else
 317        orr     r3, r3, #PMD_SECT_XN
 318        str     r3, [r0], #4
 319#endif
 320
 321#else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */
 322        /* we don't need any serial debugging mappings */
 323        ldr     r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
 324#endif
 325
 326#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS)
 327        /*
 328         * If we're using the NetWinder or CATS, we also need to map
 329         * in the 16550-type serial port for the debug messages
 330         */
 331        add     r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ORDER)
 332        orr     r3, r7, #0x7c000000
 333        str     r3, [r0]
 334#endif
 335#ifdef CONFIG_ARCH_RPC
 336        /*
 337         * Map in screen at 0x02000000 & SCREEN2_BASE
 338         * Similar reasons here - for debug.  This is
 339         * only for Acorn RiscPC architectures.
 340         */
 341        add     r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ORDER)
 342        orr     r3, r7, #0x02000000
 343        str     r3, [r0]
 344        add     r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ORDER)
 345        str     r3, [r0]
 346#endif
 347#endif
 348#ifdef CONFIG_ARM_LPAE
 349        sub     r4, r4, #0x1000         @ point to the PGD table
 350#endif
 351        ret     lr
 352ENDPROC(__create_page_tables)
 353        .ltorg
 354        .align
 355__turn_mmu_on_loc:
 356        .long   .
 357        .long   __turn_mmu_on
 358        .long   __turn_mmu_on_end
 359
 360#if defined(CONFIG_SMP)
 361        .text
 362        .arm
 363ENTRY(secondary_startup_arm)
 364 THUMB( badr    r9, 1f          )       @ Kernel is entered in ARM.
 365 THUMB( bx      r9              )       @ If this is a Thumb-2 kernel,
 366 THUMB( .thumb                  )       @ switch to Thumb now.
 367 THUMB(1:                       )
 368ENTRY(secondary_startup)
 369        /*
 370         * Common entry point for secondary CPUs.
 371         *
 372         * Ensure that we're in SVC mode, and IRQs are disabled.  Lookup
 373         * the processor type - there is no need to check the machine type
 374         * as it has already been validated by the primary processor.
 375         */
 376
 377 ARM_BE8(setend be)                             @ ensure we are in BE8 mode
 378
 379#ifdef CONFIG_ARM_VIRT_EXT
 380        bl      __hyp_stub_install_secondary
 381#endif
 382        safe_svcmode_maskall r9
 383
 384        mrc     p15, 0, r9, c0, c0              @ get processor id
 385        bl      __lookup_processor_type
 386        movs    r10, r5                         @ invalid processor?
 387        moveq   r0, #'p'                        @ yes, error 'p'
 388 THUMB( it      eq )            @ force fixup-able long branch encoding
 389        beq     __error_p
 390
 391        /*
 392         * Use the page tables supplied from  __cpu_up.
 393         */
 394        adr     r4, __secondary_data
 395        ldmia   r4, {r5, r7, r12}               @ address to jump to after
 396        sub     lr, r4, r5                      @ mmu has been enabled
 397        add     r3, r7, lr
 398        ldrd    r4, r5, [r3, #0]                @ get secondary_data.pgdir
 399ARM_BE8(eor     r4, r4, r5)                     @ Swap r5 and r4 in BE:
 400ARM_BE8(eor     r5, r4, r5)                     @ it can be done in 3 steps
 401ARM_BE8(eor     r4, r4, r5)                     @ without using a temp reg.
 402        ldr     r8, [r3, #8]                    @ get secondary_data.swapper_pg_dir
 403        badr    lr, __enable_mmu                @ return address
 404        mov     r13, r12                        @ __secondary_switched address
 405        ldr     r12, [r10, #PROCINFO_INITFUNC]
 406        add     r12, r12, r10                   @ initialise processor
 407                                                @ (return control reg)
 408        ret     r12
 409ENDPROC(secondary_startup)
 410ENDPROC(secondary_startup_arm)
 411
 412        /*
 413         * r6  = &secondary_data
 414         */
 415ENTRY(__secondary_switched)
 416        ldr     sp, [r7, #12]                   @ get secondary_data.stack
 417        mov     fp, #0
 418        b       secondary_start_kernel
 419ENDPROC(__secondary_switched)
 420
 421        .align
 422
 423        .type   __secondary_data, %object
 424__secondary_data:
 425        .long   .
 426        .long   secondary_data
 427        .long   __secondary_switched
 428#endif /* defined(CONFIG_SMP) */
 429
 430
 431
 432/*
 433 * Setup common bits before finally enabling the MMU.  Essentially
 434 * this is just loading the page table pointer and domain access
 435 * registers.  All these registers need to be preserved by the
 436 * processor setup function (or set in the case of r0)
 437 *
 438 *  r0  = cp#15 control register
 439 *  r1  = machine ID
 440 *  r2  = atags or dtb pointer
 441 *  r4  = TTBR pointer (low word)
 442 *  r5  = TTBR pointer (high word if LPAE)
 443 *  r9  = processor ID
 444 *  r13 = *virtual* address to jump to upon completion
 445 */
 446__enable_mmu:
 447#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
 448        orr     r0, r0, #CR_A
 449#else
 450        bic     r0, r0, #CR_A
 451#endif
 452#ifdef CONFIG_CPU_DCACHE_DISABLE
 453        bic     r0, r0, #CR_C
 454#endif
 455#ifdef CONFIG_CPU_BPREDICT_DISABLE
 456        bic     r0, r0, #CR_Z
 457#endif
 458#ifdef CONFIG_CPU_ICACHE_DISABLE
 459        bic     r0, r0, #CR_I
 460#endif
 461#ifdef CONFIG_ARM_LPAE
 462        mcrr    p15, 0, r4, r5, c2              @ load TTBR0
 463#else
 464        mov     r5, #DACR_INIT
 465        mcr     p15, 0, r5, c3, c0, 0           @ load domain access register
 466        mcr     p15, 0, r4, c2, c0, 0           @ load page table pointer
 467#endif
 468        b       __turn_mmu_on
 469ENDPROC(__enable_mmu)
 470
 471/*
 472 * Enable the MMU.  This completely changes the structure of the visible
 473 * memory space.  You will not be able to trace execution through this.
 474 * If you have an enquiry about this, *please* check the linux-arm-kernel
 475 * mailing list archives BEFORE sending another post to the list.
 476 *
 477 *  r0  = cp#15 control register
 478 *  r1  = machine ID
 479 *  r2  = atags or dtb pointer
 480 *  r9  = processor ID
 481 *  r13 = *virtual* address to jump to upon completion
 482 *
 483 * other registers depend on the function called upon completion
 484 */
 485        .align  5
 486        .pushsection    .idmap.text, "ax"
 487ENTRY(__turn_mmu_on)
 488        mov     r0, r0
 489        instr_sync
 490        mcr     p15, 0, r0, c1, c0, 0           @ write control reg
 491        mrc     p15, 0, r3, c0, c0, 0           @ read id reg
 492        instr_sync
 493        mov     r3, r3
 494        mov     r3, r13
 495        ret     r3
 496__turn_mmu_on_end:
 497ENDPROC(__turn_mmu_on)
 498        .popsection
 499
 500
 501#ifdef CONFIG_SMP_ON_UP
 502        __HEAD
 503__fixup_smp:
 504        and     r3, r9, #0x000f0000     @ architecture version
 505        teq     r3, #0x000f0000         @ CPU ID supported?
 506        bne     __fixup_smp_on_up       @ no, assume UP
 507
 508        bic     r3, r9, #0x00ff0000
 509        bic     r3, r3, #0x0000000f     @ mask 0xff00fff0
 510        mov     r4, #0x41000000
 511        orr     r4, r4, #0x0000b000
 512        orr     r4, r4, #0x00000020     @ val 0x4100b020
 513        teq     r3, r4                  @ ARM 11MPCore?
 514        reteq   lr                      @ yes, assume SMP
 515
 516        mrc     p15, 0, r0, c0, c0, 5   @ read MPIDR
 517        and     r0, r0, #0xc0000000     @ multiprocessing extensions and
 518        teq     r0, #0x80000000         @ not part of a uniprocessor system?
 519        bne    __fixup_smp_on_up        @ no, assume UP
 520
 521        @ Core indicates it is SMP. Check for Aegis SOC where a single
 522        @ Cortex-A9 CPU is present but SMP operations fault.
 523        mov     r4, #0x41000000
 524        orr     r4, r4, #0x0000c000
 525        orr     r4, r4, #0x00000090
 526        teq     r3, r4                  @ Check for ARM Cortex-A9
 527        retne   lr                      @ Not ARM Cortex-A9,
 528
 529        @ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the
 530        @ below address check will need to be #ifdef'd or equivalent
 531        @ for the Aegis platform.
 532        mrc     p15, 4, r0, c15, c0     @ get SCU base address
 533        teq     r0, #0x0                @ '0' on actual UP A9 hardware
 534        beq     __fixup_smp_on_up       @ So its an A9 UP
 535        ldr     r0, [r0, #4]            @ read SCU Config
 536ARM_BE8(rev     r0, r0)                 @ byteswap if big endian
 537        and     r0, r0, #0x3            @ number of CPUs
 538        teq     r0, #0x0                @ is 1?
 539        retne   lr
 540
 541__fixup_smp_on_up:
 542        adr     r0, 1f
 543        ldmia   r0, {r3 - r5}
 544        sub     r3, r0, r3
 545        add     r4, r4, r3
 546        add     r5, r5, r3
 547        b       __do_fixup_smp_on_up
 548ENDPROC(__fixup_smp)
 549
 550        .align
 5511:      .word   .
 552        .word   __smpalt_begin
 553        .word   __smpalt_end
 554
 555        .pushsection .data
 556        .align  2
 557        .globl  smp_on_up
 558smp_on_up:
 559        ALT_SMP(.long   1)
 560        ALT_UP(.long    0)
 561        .popsection
 562#endif
 563
 564        .text
 565__do_fixup_smp_on_up:
 566        cmp     r4, r5
 567        reths   lr
 568        ldmia   r4!, {r0, r6}
 569 ARM(   str     r6, [r0, r3]    )
 570 THUMB( add     r0, r0, r3      )
 571#ifdef __ARMEB__
 572 THUMB( mov     r6, r6, ror #16 )       @ Convert word order for big-endian.
 573#endif
 574 THUMB( strh    r6, [r0], #2    )       @ For Thumb-2, store as two halfwords
 575 THUMB( mov     r6, r6, lsr #16 )       @ to be robust against misaligned r3.
 576 THUMB( strh    r6, [r0]        )
 577        b       __do_fixup_smp_on_up
 578ENDPROC(__do_fixup_smp_on_up)
 579
 580ENTRY(fixup_smp)
 581        stmfd   sp!, {r4 - r6, lr}
 582        mov     r4, r0
 583        add     r5, r0, r1
 584        mov     r3, #0
 585        bl      __do_fixup_smp_on_up
 586        ldmfd   sp!, {r4 - r6, pc}
 587ENDPROC(fixup_smp)
 588
 589#ifdef __ARMEB__
 590#define LOW_OFFSET      0x4
 591#define HIGH_OFFSET     0x0
 592#else
 593#define LOW_OFFSET      0x0
 594#define HIGH_OFFSET     0x4
 595#endif
 596
 597#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
 598
 599/* __fixup_pv_table - patch the stub instructions with the delta between
 600 * PHYS_OFFSET and PAGE_OFFSET, which is assumed to be 16MiB aligned and
 601 * can be expressed by an immediate shifter operand. The stub instruction
 602 * has a form of '(add|sub) rd, rn, #imm'.
 603 */
 604        __HEAD
 605__fixup_pv_table:
 606        adr     r0, 1f
 607        ldmia   r0, {r3-r7}
 608        mvn     ip, #0
 609        subs    r3, r0, r3      @ PHYS_OFFSET - PAGE_OFFSET
 610        add     r4, r4, r3      @ adjust table start address
 611        add     r5, r5, r3      @ adjust table end address
 612        add     r6, r6, r3      @ adjust __pv_phys_pfn_offset address
 613        add     r7, r7, r3      @ adjust __pv_offset address
 614        mov     r0, r8, lsr #PAGE_SHIFT @ convert to PFN
 615        str     r0, [r6]        @ save computed PHYS_OFFSET to __pv_phys_pfn_offset
 616        strcc   ip, [r7, #HIGH_OFFSET]  @ save to __pv_offset high bits
 617        mov     r6, r3, lsr #24 @ constant for add/sub instructions
 618        teq     r3, r6, lsl #24 @ must be 16MiB aligned
 619THUMB(  it      ne              @ cross section branch )
 620        bne     __error
 621        str     r3, [r7, #LOW_OFFSET]   @ save to __pv_offset low bits
 622        b       __fixup_a_pv_table
 623ENDPROC(__fixup_pv_table)
 624
 625        .align
 6261:      .long   .
 627        .long   __pv_table_begin
 628        .long   __pv_table_end
 6292:      .long   __pv_phys_pfn_offset
 630        .long   __pv_offset
 631
 632        .text
 633__fixup_a_pv_table:
 634        adr     r0, 3f
 635        ldr     r6, [r0]
 636        add     r6, r6, r3
 637        ldr     r0, [r6, #HIGH_OFFSET]  @ pv_offset high word
 638        ldr     r6, [r6, #LOW_OFFSET]   @ pv_offset low word
 639        mov     r6, r6, lsr #24
 640        cmn     r0, #1
 641#ifdef CONFIG_THUMB2_KERNEL
 642        moveq   r0, #0x200000   @ set bit 21, mov to mvn instruction
 643        lsls    r6, #24
 644        beq     2f
 645        clz     r7, r6
 646        lsr     r6, #24
 647        lsl     r6, r7
 648        bic     r6, #0x0080
 649        lsrs    r7, #1
 650        orrcs   r6, #0x0080
 651        orr     r6, r6, r7, lsl #12
 652        orr     r6, #0x4000
 653        b       2f
 6541:      add     r7, r3
 655        ldrh    ip, [r7, #2]
 656ARM_BE8(rev16   ip, ip)
 657        tst     ip, #0x4000
 658        and     ip, #0x8f00
 659        orrne   ip, r6  @ mask in offset bits 31-24
 660        orreq   ip, r0  @ mask in offset bits 7-0
 661ARM_BE8(rev16   ip, ip)
 662        strh    ip, [r7, #2]
 663        bne     2f
 664        ldrh    ip, [r7]
 665ARM_BE8(rev16   ip, ip)
 666        bic     ip, #0x20
 667        orr     ip, ip, r0, lsr #16
 668ARM_BE8(rev16   ip, ip)
 669        strh    ip, [r7]
 6702:      cmp     r4, r5
 671        ldrcc   r7, [r4], #4    @ use branch for delay slot
 672        bcc     1b
 673        bx      lr
 674#else
 675#ifdef CONFIG_CPU_ENDIAN_BE8
 676        moveq   r0, #0x00004000 @ set bit 22, mov to mvn instruction
 677#else
 678        moveq   r0, #0x400000   @ set bit 22, mov to mvn instruction
 679#endif
 680        b       2f
 6811:      ldr     ip, [r7, r3]
 682#ifdef CONFIG_CPU_ENDIAN_BE8
 683        @ in BE8, we load data in BE, but instructions still in LE
 684        bic     ip, ip, #0xff000000
 685        tst     ip, #0x000f0000 @ check the rotation field
 686        orrne   ip, ip, r6, lsl #24 @ mask in offset bits 31-24
 687        biceq   ip, ip, #0x00004000 @ clear bit 22
 688        orreq   ip, ip, r0      @ mask in offset bits 7-0
 689#else
 690        bic     ip, ip, #0x000000ff
 691        tst     ip, #0xf00      @ check the rotation field
 692        orrne   ip, ip, r6      @ mask in offset bits 31-24
 693        biceq   ip, ip, #0x400000       @ clear bit 22
 694        orreq   ip, ip, r0      @ mask in offset bits 7-0
 695#endif
 696        str     ip, [r7, r3]
 6972:      cmp     r4, r5
 698        ldrcc   r7, [r4], #4    @ use branch for delay slot
 699        bcc     1b
 700        ret     lr
 701#endif
 702ENDPROC(__fixup_a_pv_table)
 703
 704        .align
 7053:      .long __pv_offset
 706
 707ENTRY(fixup_pv_table)
 708        stmfd   sp!, {r4 - r7, lr}
 709        mov     r3, #0                  @ no offset
 710        mov     r4, r0                  @ r0 = table start
 711        add     r5, r0, r1              @ r1 = table size
 712        bl      __fixup_a_pv_table
 713        ldmfd   sp!, {r4 - r7, pc}
 714ENDPROC(fixup_pv_table)
 715
 716        .data
 717        .align  2
 718        .globl  __pv_phys_pfn_offset
 719        .type   __pv_phys_pfn_offset, %object
 720__pv_phys_pfn_offset:
 721        .word   0
 722        .size   __pv_phys_pfn_offset, . -__pv_phys_pfn_offset
 723
 724        .globl  __pv_offset
 725        .type   __pv_offset, %object
 726__pv_offset:
 727        .quad   0
 728        .size   __pv_offset, . -__pv_offset
 729#endif
 730
 731#include "head-common.S"
 732