linux/arch/arm/kernel/head.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 *  linux/arch/arm/kernel/head.S
   4 *
   5 *  Copyright (C) 1994-2002 Russell King
   6 *  Copyright (c) 2003 ARM Limited
   7 *  All Rights Reserved
   8 *
   9 *  Kernel startup code for all 32-bit CPUs
  10 */
  11#include <linux/linkage.h>
  12#include <linux/init.h>
  13#include <linux/pgtable.h>
  14
  15#include <asm/assembler.h>
  16#include <asm/cp15.h>
  17#include <asm/domain.h>
  18#include <asm/ptrace.h>
  19#include <asm/asm-offsets.h>
  20#include <asm/memory.h>
  21#include <asm/thread_info.h>
  22
  23#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING)
  24#include CONFIG_DEBUG_LL_INCLUDE
  25#endif
  26/*
  27 * swapper_pg_dir is the virtual address of the initial page table.
  28 * We place the page tables 16K below KERNEL_RAM_VADDR.  Therefore, we must
  29 * make sure that KERNEL_RAM_VADDR is correctly set.  Currently, we expect
  30 * the least significant 16 bits to be 0x8000, but we could probably
  31 * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000.
  32 */
  33#define KERNEL_RAM_VADDR        (KERNEL_OFFSET + TEXT_OFFSET)
  34#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000
  35#error KERNEL_RAM_VADDR must start at 0xXXXX8000
  36#endif
  37
  38#ifdef CONFIG_ARM_LPAE
  39        /* LPAE requires an additional page for the PGD */
  40#define PG_DIR_SIZE     0x5000
  41#define PMD_ORDER       3
  42#else
  43#define PG_DIR_SIZE     0x4000
  44#define PMD_ORDER       2
  45#endif
  46
  47        .globl  swapper_pg_dir
  48        .equ    swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
  49
  50        /*
  51         * This needs to be assigned at runtime when the linker symbols are
  52         * resolved. These are unsigned 64bit really, but in this assembly code
  53         * We store them as 32bit.
  54         */
  55        .pushsection .data
  56        .align  2
  57        .globl  kernel_sec_start
  58        .globl  kernel_sec_end
  59kernel_sec_start:
  60        .long   0
  61        .long   0
  62kernel_sec_end:
  63        .long   0
  64        .long   0
  65        .popsection
  66
  67        .macro  pgtbl, rd, phys
  68        add     \rd, \phys, #TEXT_OFFSET
  69        sub     \rd, \rd, #PG_DIR_SIZE
  70        .endm
  71
  72/*
  73 * Kernel startup entry point.
  74 * ---------------------------
  75 *
  76 * This is normally called from the decompressor code.  The requirements
  77 * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
  78 * r1 = machine nr, r2 = atags or dtb pointer.
  79 *
  80 * This code is mostly position independent, so if you link the kernel at
  81 * 0xc0008000, you call this at __pa(0xc0008000).
  82 *
  83 * See linux/arch/arm/tools/mach-types for the complete list of machine
  84 * numbers for r1.
  85 *
  86 * We're trying to keep crap to a minimum; DO NOT add any machine specific
  87 * crap here - that's what the boot loader (or in extreme, well justified
  88 * circumstances, zImage) is for.
  89 */
  90        .arm
  91
  92        __HEAD
  93ENTRY(stext)
  94 ARM_BE8(setend be )                    @ ensure we are in BE8 mode
  95
  96 THUMB( badr    r9, 1f          )       @ Kernel is always entered in ARM.
  97 THUMB( bx      r9              )       @ If this is a Thumb-2 kernel,
  98 THUMB( .thumb                  )       @ switch to Thumb now.
  99 THUMB(1:                       )
 100
 101#ifdef CONFIG_ARM_VIRT_EXT
 102        bl      __hyp_stub_install
 103#endif
 104        @ ensure svc mode and all interrupts masked
 105        safe_svcmode_maskall r9
 106
 107        mrc     p15, 0, r9, c0, c0              @ get processor id
 108        bl      __lookup_processor_type         @ r5=procinfo r9=cpuid
 109        movs    r10, r5                         @ invalid processor (r5=0)?
 110 THUMB( it      eq )            @ force fixup-able long branch encoding
 111        beq     __error_p                       @ yes, error 'p'
 112
 113#ifdef CONFIG_ARM_LPAE
 114        mrc     p15, 0, r3, c0, c1, 4           @ read ID_MMFR0
 115        and     r3, r3, #0xf                    @ extract VMSA support
 116        cmp     r3, #5                          @ long-descriptor translation table format?
 117 THUMB( it      lo )                            @ force fixup-able long branch encoding
 118        blo     __error_lpae                    @ only classic page table format
 119#endif
 120
 121#ifndef CONFIG_XIP_KERNEL
 122        adr_l   r8, _text                       @ __pa(_text)
 123        sub     r8, r8, #TEXT_OFFSET            @ PHYS_OFFSET
 124#else
 125        ldr     r8, =PLAT_PHYS_OFFSET           @ always constant in this case
 126#endif
 127
 128        /*
 129         * r1 = machine no, r2 = atags or dtb,
 130         * r8 = phys_offset, r9 = cpuid, r10 = procinfo
 131         */
 132        bl      __vet_atags
 133#ifdef CONFIG_SMP_ON_UP
 134        bl      __fixup_smp
 135#endif
 136#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
 137        bl      __fixup_pv_table
 138#endif
 139        bl      __create_page_tables
 140
 141        /*
 142         * The following calls CPU specific code in a position independent
 143         * manner.  See arch/arm/mm/proc-*.S for details.  r10 = base of
 144         * xxx_proc_info structure selected by __lookup_processor_type
 145         * above.
 146         *
 147         * The processor init function will be called with:
 148         *  r1 - machine type
 149         *  r2 - boot data (atags/dt) pointer
 150         *  r4 - translation table base (low word)
 151         *  r5 - translation table base (high word, if LPAE)
 152         *  r8 - translation table base 1 (pfn if LPAE)
 153         *  r9 - cpuid
 154         *  r13 - virtual address for __enable_mmu -> __turn_mmu_on
 155         *
 156         * On return, the CPU will be ready for the MMU to be turned on,
 157         * r0 will hold the CPU control register value, r1, r2, r4, and
 158         * r9 will be preserved.  r5 will also be preserved if LPAE.
 159         */
 160        ldr     r13, =__mmap_switched           @ address to jump to after
 161                                                @ mmu has been enabled
 162        badr    lr, 1f                          @ return (PIC) address
 163#ifdef CONFIG_ARM_LPAE
 164        mov     r5, #0                          @ high TTBR0
 165        mov     r8, r4, lsr #12                 @ TTBR1 is swapper_pg_dir pfn
 166#else
 167        mov     r8, r4                          @ set TTBR1 to swapper_pg_dir
 168#endif
 169        ldr     r12, [r10, #PROCINFO_INITFUNC]
 170        add     r12, r12, r10
 171        ret     r12
 1721:      b       __enable_mmu
 173ENDPROC(stext)
 174        .ltorg
 175
 176/*
 177 * Setup the initial page tables.  We only setup the barest
 178 * amount which are required to get the kernel running, which
 179 * generally means mapping in the kernel code.
 180 *
 181 * r8 = phys_offset, r9 = cpuid, r10 = procinfo
 182 *
 183 * Returns:
 184 *  r0, r3, r5-r7 corrupted
 185 *  r4 = physical page table address
 186 */
 187__create_page_tables:
 188        pgtbl   r4, r8                          @ page table address
 189
 190        /*
 191         * Clear the swapper page table
 192         */
 193        mov     r0, r4
 194        mov     r3, #0
 195        add     r6, r0, #PG_DIR_SIZE
 1961:      str     r3, [r0], #4
 197        str     r3, [r0], #4
 198        str     r3, [r0], #4
 199        str     r3, [r0], #4
 200        teq     r0, r6
 201        bne     1b
 202
 203#ifdef CONFIG_ARM_LPAE
 204        /*
 205         * Build the PGD table (first level) to point to the PMD table. A PGD
 206         * entry is 64-bit wide.
 207         */
 208        mov     r0, r4
 209        add     r3, r4, #0x1000                 @ first PMD table address
 210        orr     r3, r3, #3                      @ PGD block type
 211        mov     r6, #4                          @ PTRS_PER_PGD
 212        mov     r7, #1 << (55 - 32)             @ L_PGD_SWAPPER
 2131:
 214#ifdef CONFIG_CPU_ENDIAN_BE8
 215        str     r7, [r0], #4                    @ set top PGD entry bits
 216        str     r3, [r0], #4                    @ set bottom PGD entry bits
 217#else
 218        str     r3, [r0], #4                    @ set bottom PGD entry bits
 219        str     r7, [r0], #4                    @ set top PGD entry bits
 220#endif
 221        add     r3, r3, #0x1000                 @ next PMD table
 222        subs    r6, r6, #1
 223        bne     1b
 224
 225        add     r4, r4, #0x1000                 @ point to the PMD tables
 226#ifdef CONFIG_CPU_ENDIAN_BE8
 227        add     r4, r4, #4                      @ we only write the bottom word
 228#endif
 229#endif
 230
 231        ldr     r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags
 232
 233        /*
 234         * Create identity mapping to cater for __enable_mmu.
 235         * This identity mapping will be removed by paging_init().
 236         */
 237        adr_l   r5, __turn_mmu_on               @ _pa(__turn_mmu_on)
 238        adr_l   r6, __turn_mmu_on_end           @ _pa(__turn_mmu_on_end)
 239        mov     r5, r5, lsr #SECTION_SHIFT
 240        mov     r6, r6, lsr #SECTION_SHIFT
 241
 2421:      orr     r3, r7, r5, lsl #SECTION_SHIFT  @ flags + kernel base
 243        str     r3, [r4, r5, lsl #PMD_ORDER]    @ identity mapping
 244        cmp     r5, r6
 245        addlo   r5, r5, #1                      @ next section
 246        blo     1b
 247
 248        /*
 249         * The main matter: map in the kernel using section mappings, and
 250         * set two variables to indicate the physical start and end of the
 251         * kernel.
 252         */
 253        add     r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ORDER)
 254        ldr     r6, =(_end - 1)
 255        adr_l   r5, kernel_sec_start            @ _pa(kernel_sec_start)
 256#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
 257        str     r8, [r5, #4]                    @ Save physical start of kernel (BE)
 258#else
 259        str     r8, [r5]                        @ Save physical start of kernel (LE)
 260#endif
 261        orr     r3, r8, r7                      @ Add the MMU flags
 262        add     r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
 2631:      str     r3, [r0], #1 << PMD_ORDER
 264        add     r3, r3, #1 << SECTION_SHIFT
 265        cmp     r0, r6
 266        bls     1b
 267        eor     r3, r3, r7                      @ Remove the MMU flags
 268        adr_l   r5, kernel_sec_end              @ _pa(kernel_sec_end)
 269#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
 270        str     r3, [r5, #4]                    @ Save physical end of kernel (BE)
 271#else
 272        str     r3, [r5]                        @ Save physical end of kernel (LE)
 273#endif
 274
 275#ifdef CONFIG_XIP_KERNEL
 276        /*
 277         * Map the kernel image separately as it is not located in RAM.
 278         */
 279#define XIP_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR)
 280        mov     r3, pc
 281        mov     r3, r3, lsr #SECTION_SHIFT
 282        orr     r3, r7, r3, lsl #SECTION_SHIFT
 283        add     r0, r4,  #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER)
 284        str     r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]!
 285        ldr     r6, =(_edata_loc - 1)
 286        add     r0, r0, #1 << PMD_ORDER
 287        add     r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER)
 2881:      cmp     r0, r6
 289        add     r3, r3, #1 << SECTION_SHIFT
 290        strls   r3, [r0], #1 << PMD_ORDER
 291        bls     1b
 292#endif
 293
 294        /*
 295         * Then map boot params address in r2 if specified.
 296         * We map 2 sections in case the ATAGs/DTB crosses a section boundary.
 297         */
 298        mov     r0, r2, lsr #SECTION_SHIFT
 299        cmp     r2, #0
 300        ldrne   r3, =FDT_FIXED_BASE >> (SECTION_SHIFT - PMD_ORDER)
 301        addne   r3, r3, r4
 302        orrne   r6, r7, r0, lsl #SECTION_SHIFT
 303        strne   r6, [r3], #1 << PMD_ORDER
 304        addne   r6, r6, #1 << SECTION_SHIFT
 305        strne   r6, [r3]
 306
 307#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
 308        sub     r4, r4, #4                      @ Fixup page table pointer
 309                                                @ for 64-bit descriptors
 310#endif
 311
 312#ifdef CONFIG_DEBUG_LL
 313#if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING)
 314        /*
 315         * Map in IO space for serial debugging.
 316         * This allows debug messages to be output
 317         * via a serial console before paging_init.
 318         */
 319        addruart r7, r3, r0
 320
 321        mov     r3, r3, lsr #SECTION_SHIFT
 322        mov     r3, r3, lsl #PMD_ORDER
 323
 324        add     r0, r4, r3
 325        mov     r3, r7, lsr #SECTION_SHIFT
 326        ldr     r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
 327        orr     r3, r7, r3, lsl #SECTION_SHIFT
 328#ifdef CONFIG_ARM_LPAE
 329        mov     r7, #1 << (54 - 32)             @ XN
 330#ifdef CONFIG_CPU_ENDIAN_BE8
 331        str     r7, [r0], #4
 332        str     r3, [r0], #4
 333#else
 334        str     r3, [r0], #4
 335        str     r7, [r0], #4
 336#endif
 337#else
 338        orr     r3, r3, #PMD_SECT_XN
 339        str     r3, [r0], #4
 340#endif
 341
 342#else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */
 343        /* we don't need any serial debugging mappings */
 344        ldr     r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
 345#endif
 346
 347#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS)
 348        /*
 349         * If we're using the NetWinder or CATS, we also need to map
 350         * in the 16550-type serial port for the debug messages
 351         */
 352        add     r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ORDER)
 353        orr     r3, r7, #0x7c000000
 354        str     r3, [r0]
 355#endif
 356#ifdef CONFIG_ARCH_RPC
 357        /*
 358         * Map in screen at 0x02000000 & SCREEN2_BASE
 359         * Similar reasons here - for debug.  This is
 360         * only for Acorn RiscPC architectures.
 361         */
 362        add     r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ORDER)
 363        orr     r3, r7, #0x02000000
 364        str     r3, [r0]
 365        add     r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ORDER)
 366        str     r3, [r0]
 367#endif
 368#endif
 369#ifdef CONFIG_ARM_LPAE
 370        sub     r4, r4, #0x1000         @ point to the PGD table
 371#endif
 372        ret     lr
 373ENDPROC(__create_page_tables)
 374        .ltorg
 375
 376#if defined(CONFIG_SMP)
 377        .text
 378        .arm
 379ENTRY(secondary_startup_arm)
 380 THUMB( badr    r9, 1f          )       @ Kernel is entered in ARM.
 381 THUMB( bx      r9              )       @ If this is a Thumb-2 kernel,
 382 THUMB( .thumb                  )       @ switch to Thumb now.
 383 THUMB(1:                       )
 384ENTRY(secondary_startup)
 385        /*
 386         * Common entry point for secondary CPUs.
 387         *
 388         * Ensure that we're in SVC mode, and IRQs are disabled.  Lookup
 389         * the processor type - there is no need to check the machine type
 390         * as it has already been validated by the primary processor.
 391         */
 392
 393 ARM_BE8(setend be)                             @ ensure we are in BE8 mode
 394
 395#ifdef CONFIG_ARM_VIRT_EXT
 396        bl      __hyp_stub_install_secondary
 397#endif
 398        safe_svcmode_maskall r9
 399
 400        mrc     p15, 0, r9, c0, c0              @ get processor id
 401        bl      __lookup_processor_type
 402        movs    r10, r5                         @ invalid processor?
 403        moveq   r0, #'p'                        @ yes, error 'p'
 404 THUMB( it      eq )            @ force fixup-able long branch encoding
 405        beq     __error_p
 406
 407        /*
 408         * Use the page tables supplied from  __cpu_up.
 409         */
 410        adr_l   r3, secondary_data
 411        mov_l   r12, __secondary_switched
 412        ldrd    r4, r5, [r3, #0]                @ get secondary_data.pgdir
 413ARM_BE8(eor     r4, r4, r5)                     @ Swap r5 and r4 in BE:
 414ARM_BE8(eor     r5, r4, r5)                     @ it can be done in 3 steps
 415ARM_BE8(eor     r4, r4, r5)                     @ without using a temp reg.
 416        ldr     r8, [r3, #8]                    @ get secondary_data.swapper_pg_dir
 417        badr    lr, __enable_mmu                @ return address
 418        mov     r13, r12                        @ __secondary_switched address
 419        ldr     r12, [r10, #PROCINFO_INITFUNC]
 420        add     r12, r12, r10                   @ initialise processor
 421                                                @ (return control reg)
 422        ret     r12
 423ENDPROC(secondary_startup)
 424ENDPROC(secondary_startup_arm)
 425
 426ENTRY(__secondary_switched)
 427#if defined(CONFIG_VMAP_STACK) && !defined(CONFIG_ARM_LPAE)
 428        @ Before using the vmap'ed stack, we have to switch to swapper_pg_dir
 429        @ as the ID map does not cover the vmalloc region.
 430        mrc     p15, 0, ip, c2, c0, 1   @ read TTBR1
 431        mcr     p15, 0, ip, c2, c0, 0   @ set TTBR0
 432        instr_sync
 433#endif
 434        adr_l   r7, secondary_data + 12         @ get secondary_data.stack
 435        ldr     sp, [r7]
 436        ldr     r0, [r7, #4]                    @ get secondary_data.task
 437        mov     fp, #0
 438        b       secondary_start_kernel
 439ENDPROC(__secondary_switched)
 440
 441#endif /* defined(CONFIG_SMP) */
 442
 443
 444
 445/*
 446 * Setup common bits before finally enabling the MMU.  Essentially
 447 * this is just loading the page table pointer and domain access
 448 * registers.  All these registers need to be preserved by the
 449 * processor setup function (or set in the case of r0)
 450 *
 451 *  r0  = cp#15 control register
 452 *  r1  = machine ID
 453 *  r2  = atags or dtb pointer
 454 *  r4  = TTBR pointer (low word)
 455 *  r5  = TTBR pointer (high word if LPAE)
 456 *  r9  = processor ID
 457 *  r13 = *virtual* address to jump to upon completion
 458 */
 459__enable_mmu:
 460#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
 461        orr     r0, r0, #CR_A
 462#else
 463        bic     r0, r0, #CR_A
 464#endif
 465#ifdef CONFIG_CPU_DCACHE_DISABLE
 466        bic     r0, r0, #CR_C
 467#endif
 468#ifdef CONFIG_CPU_BPREDICT_DISABLE
 469        bic     r0, r0, #CR_Z
 470#endif
 471#ifdef CONFIG_CPU_ICACHE_DISABLE
 472        bic     r0, r0, #CR_I
 473#endif
 474#ifdef CONFIG_ARM_LPAE
 475        mcrr    p15, 0, r4, r5, c2              @ load TTBR0
 476#else
 477        mov     r5, #DACR_INIT
 478        mcr     p15, 0, r5, c3, c0, 0           @ load domain access register
 479        mcr     p15, 0, r4, c2, c0, 0           @ load page table pointer
 480#endif
 481        b       __turn_mmu_on
 482ENDPROC(__enable_mmu)
 483
 484/*
 485 * Enable the MMU.  This completely changes the structure of the visible
 486 * memory space.  You will not be able to trace execution through this.
 487 * If you have an enquiry about this, *please* check the linux-arm-kernel
 488 * mailing list archives BEFORE sending another post to the list.
 489 *
 490 *  r0  = cp#15 control register
 491 *  r1  = machine ID
 492 *  r2  = atags or dtb pointer
 493 *  r9  = processor ID
 494 *  r13 = *virtual* address to jump to upon completion
 495 *
 496 * other registers depend on the function called upon completion
 497 */
 498        .align  5
 499        .pushsection    .idmap.text, "ax"
 500ENTRY(__turn_mmu_on)
 501        mov     r0, r0
 502        instr_sync
 503        mcr     p15, 0, r0, c1, c0, 0           @ write control reg
 504        mrc     p15, 0, r3, c0, c0, 0           @ read id reg
 505        instr_sync
 506        mov     r3, r3
 507        mov     r3, r13
 508        ret     r3
 509__turn_mmu_on_end:
 510ENDPROC(__turn_mmu_on)
 511        .popsection
 512
 513
 514#ifdef CONFIG_SMP_ON_UP
 515        __HEAD
 516__fixup_smp:
 517        and     r3, r9, #0x000f0000     @ architecture version
 518        teq     r3, #0x000f0000         @ CPU ID supported?
 519        bne     __fixup_smp_on_up       @ no, assume UP
 520
 521        bic     r3, r9, #0x00ff0000
 522        bic     r3, r3, #0x0000000f     @ mask 0xff00fff0
 523        mov     r4, #0x41000000
 524        orr     r4, r4, #0x0000b000
 525        orr     r4, r4, #0x00000020     @ val 0x4100b020
 526        teq     r3, r4                  @ ARM 11MPCore?
 527        reteq   lr                      @ yes, assume SMP
 528
 529        mrc     p15, 0, r0, c0, c0, 5   @ read MPIDR
 530        and     r0, r0, #0xc0000000     @ multiprocessing extensions and
 531        teq     r0, #0x80000000         @ not part of a uniprocessor system?
 532        bne    __fixup_smp_on_up        @ no, assume UP
 533
 534        @ Core indicates it is SMP. Check for Aegis SOC where a single
 535        @ Cortex-A9 CPU is present but SMP operations fault.
 536        mov     r4, #0x41000000
 537        orr     r4, r4, #0x0000c000
 538        orr     r4, r4, #0x00000090
 539        teq     r3, r4                  @ Check for ARM Cortex-A9
 540        retne   lr                      @ Not ARM Cortex-A9,
 541
 542        @ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the
 543        @ below address check will need to be #ifdef'd or equivalent
 544        @ for the Aegis platform.
 545        mrc     p15, 4, r0, c15, c0     @ get SCU base address
 546        teq     r0, #0x0                @ '0' on actual UP A9 hardware
 547        beq     __fixup_smp_on_up       @ So its an A9 UP
 548        ldr     r0, [r0, #4]            @ read SCU Config
 549ARM_BE8(rev     r0, r0)                 @ byteswap if big endian
 550        and     r0, r0, #0x3            @ number of CPUs
 551        teq     r0, #0x0                @ is 1?
 552        retne   lr
 553
 554__fixup_smp_on_up:
 555        adr_l   r4, __smpalt_begin
 556        adr_l   r5, __smpalt_end
 557        b       __do_fixup_smp_on_up
 558ENDPROC(__fixup_smp)
 559
 560        .pushsection .data
 561        .align  2
 562        .globl  smp_on_up
 563smp_on_up:
 564        ALT_SMP(.long   1)
 565        ALT_UP(.long    0)
 566        .popsection
 567#endif
 568
 569        .text
 570__do_fixup_smp_on_up:
 571        cmp     r4, r5
 572        reths   lr
 573        ldmia   r4, {r0, r6}
 574 ARM(   str     r6, [r0, r4]    )
 575 THUMB( add     r0, r0, r4      )
 576        add     r4, r4, #8
 577#ifdef __ARMEB__
 578 THUMB( mov     r6, r6, ror #16 )       @ Convert word order for big-endian.
 579#endif
 580 THUMB( strh    r6, [r0], #2    )       @ For Thumb-2, store as two halfwords
 581 THUMB( mov     r6, r6, lsr #16 )       @ to be robust against misaligned r0.
 582 THUMB( strh    r6, [r0]        )
 583        b       __do_fixup_smp_on_up
 584ENDPROC(__do_fixup_smp_on_up)
 585
 586ENTRY(fixup_smp)
 587        stmfd   sp!, {r4 - r6, lr}
 588        mov     r4, r0
 589        add     r5, r0, r1
 590        bl      __do_fixup_smp_on_up
 591        ldmfd   sp!, {r4 - r6, pc}
 592ENDPROC(fixup_smp)
 593
 594#include "head-common.S"
 595