linux/arch/riscv/kernel/head.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Copyright (C) 2012 Regents of the University of California
   4 */
   5
   6#include <asm/thread_info.h>
   7#include <asm/asm-offsets.h>
   8#include <asm/asm.h>
   9#include <linux/init.h>
  10#include <linux/linkage.h>
  11#include <asm/thread_info.h>
  12#include <asm/page.h>
  13#include <asm/csr.h>
  14#include <asm/hwcap.h>
  15#include <asm/image.h>
  16
  17__HEAD
  18ENTRY(_start)
  19        /*
  20         * Image header expected by Linux boot-loaders. The image header data
  21         * structure is described in asm/image.h.
  22         * Do not modify it without modifying the structure and all bootloaders
  23         * that expects this header format!!
  24         */
  25        /* jump to start kernel */
  26        j _start_kernel
  27        /* reserved */
  28        .word 0
  29        .balign 8
  30#if __riscv_xlen == 64
  31        /* Image load offset(2MB) from start of RAM */
  32        .dword 0x200000
  33#else
  34        /* Image load offset(4MB) from start of RAM */
  35        .dword 0x400000
  36#endif
  37        /* Effective size of kernel image */
  38        .dword _end - _start
  39        .dword __HEAD_FLAGS
  40        .word RISCV_HEADER_VERSION
  41        .word 0
  42        .dword 0
  43        .ascii RISCV_IMAGE_MAGIC
  44        .balign 4
  45        .ascii RISCV_IMAGE_MAGIC2
  46        .word 0
  47
  48.align 2
  49#ifdef CONFIG_MMU
  50relocate:
  51        /* Relocate return address */
  52        li a1, PAGE_OFFSET
  53        la a2, _start
  54        sub a1, a1, a2
  55        add ra, ra, a1
  56
  57        /* Point stvec to virtual address of intruction after satp write */
  58        la a2, 1f
  59        add a2, a2, a1
  60        csrw CSR_TVEC, a2
  61
  62        /* Compute satp for kernel page tables, but don't load it yet */
  63        srl a2, a0, PAGE_SHIFT
  64        li a1, SATP_MODE
  65        or a2, a2, a1
  66
  67        /*
  68         * Load trampoline page directory, which will cause us to trap to
  69         * stvec if VA != PA, or simply fall through if VA == PA.  We need a
  70         * full fence here because setup_vm() just wrote these PTEs and we need
  71         * to ensure the new translations are in use.
  72         */
  73        la a0, trampoline_pg_dir
  74        srl a0, a0, PAGE_SHIFT
  75        or a0, a0, a1
  76        sfence.vma
  77        csrw CSR_SATP, a0
  78.align 2
  791:
  80        /* Set trap vector to spin forever to help debug */
  81        la a0, .Lsecondary_park
  82        csrw CSR_TVEC, a0
  83
  84        /* Reload the global pointer */
  85.option push
  86.option norelax
  87        la gp, __global_pointer$
  88.option pop
  89
  90        /*
  91         * Switch to kernel page tables.  A full fence is necessary in order to
  92         * avoid using the trampoline translations, which are only correct for
  93         * the first superpage.  Fetching the fence is guarnteed to work
  94         * because that first superpage is translated the same way.
  95         */
  96        csrw CSR_SATP, a2
  97        sfence.vma
  98
  99        ret
 100#endif /* CONFIG_MMU */
 101#ifdef CONFIG_SMP
 102        .global secondary_start_sbi
 103secondary_start_sbi:
 104        /* Mask all interrupts */
 105        csrw CSR_IE, zero
 106        csrw CSR_IP, zero
 107
 108        /* Load the global pointer */
 109        .option push
 110        .option norelax
 111                la gp, __global_pointer$
 112        .option pop
 113
 114        /*
 115         * Disable FPU to detect illegal usage of
 116         * floating point in kernel space
 117         */
 118        li t0, SR_FS
 119        csrc CSR_STATUS, t0
 120
 121        /* Set trap vector to spin forever to help debug */
 122        la a3, .Lsecondary_park
 123        csrw CSR_TVEC, a3
 124
 125        slli a3, a0, LGREG
 126        la a4, __cpu_up_stack_pointer
 127        la a5, __cpu_up_task_pointer
 128        add a4, a3, a4
 129        add a5, a3, a5
 130        REG_L sp, (a4)
 131        REG_L tp, (a5)
 132
 133        .global secondary_start_common
 134secondary_start_common:
 135
 136#ifdef CONFIG_MMU
 137        /* Enable virtual memory and relocate to virtual address */
 138        la a0, swapper_pg_dir
 139        call relocate
 140#endif
 141        call setup_trap_vector
 142        tail smp_callin
 143#endif /* CONFIG_SMP */
 144
 145.align 2
 146setup_trap_vector:
 147        /* Set trap vector to exception handler */
 148        la a0, handle_exception
 149        csrw CSR_TVEC, a0
 150
 151        /*
 152         * Set sup0 scratch register to 0, indicating to exception vector that
 153         * we are presently executing in kernel.
 154         */
 155        csrw CSR_SCRATCH, zero
 156        ret
 157
 158.Lsecondary_park:
 159        /* We lack SMP support or have too many harts, so park this hart */
 160        wfi
 161        j .Lsecondary_park
 162
 163END(_start)
 164
 165        __INIT
 166ENTRY(_start_kernel)
 167        /* Mask all interrupts */
 168        csrw CSR_IE, zero
 169        csrw CSR_IP, zero
 170
 171#ifdef CONFIG_RISCV_M_MODE
 172        /* flush the instruction cache */
 173        fence.i
 174
 175        /* Reset all registers except ra, a0, a1 */
 176        call reset_regs
 177
 178        /*
 179         * Setup a PMP to permit access to all of memory.  Some machines may
 180         * not implement PMPs, so we set up a quick trap handler to just skip
 181         * touching the PMPs on any trap.
 182         */
 183        la a0, pmp_done
 184        csrw CSR_TVEC, a0
 185
 186        li a0, -1
 187        csrw CSR_PMPADDR0, a0
 188        li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
 189        csrw CSR_PMPCFG0, a0
 190.align 2
 191pmp_done:
 192
 193        /*
 194         * The hartid in a0 is expected later on, and we have no firmware
 195         * to hand it to us.
 196         */
 197        csrr a0, CSR_MHARTID
 198#endif /* CONFIG_RISCV_M_MODE */
 199
 200        /* Load the global pointer */
 201.option push
 202.option norelax
 203        la gp, __global_pointer$
 204.option pop
 205
 206        /*
 207         * Disable FPU to detect illegal usage of
 208         * floating point in kernel space
 209         */
 210        li t0, SR_FS
 211        csrc CSR_STATUS, t0
 212
 213#ifdef CONFIG_SMP
 214        li t0, CONFIG_NR_CPUS
 215        blt a0, t0, .Lgood_cores
 216        tail .Lsecondary_park
 217.Lgood_cores:
 218#endif
 219
 220        /* Pick one hart to run the main boot sequence */
 221        la a3, hart_lottery
 222        li a2, 1
 223        amoadd.w a3, a2, (a3)
 224        bnez a3, .Lsecondary_start
 225
 226        /* Clear BSS for flat non-ELF images */
 227        la a3, __bss_start
 228        la a4, __bss_stop
 229        ble a4, a3, clear_bss_done
 230clear_bss:
 231        REG_S zero, (a3)
 232        add a3, a3, RISCV_SZPTR
 233        blt a3, a4, clear_bss
 234clear_bss_done:
 235
 236        /* Save hart ID and DTB physical address */
 237        mv s0, a0
 238        mv s1, a1
 239        la a2, boot_cpu_hartid
 240        REG_S a0, (a2)
 241
 242        /* Initialize page tables and relocate to virtual addresses */
 243        la sp, init_thread_union + THREAD_SIZE
 244        mv a0, s1
 245        call setup_vm
 246#ifdef CONFIG_MMU
 247        la a0, early_pg_dir
 248        call relocate
 249#endif /* CONFIG_MMU */
 250
 251        call setup_trap_vector
 252        /* Restore C environment */
 253        la tp, init_task
 254        sw zero, TASK_TI_CPU(tp)
 255        la sp, init_thread_union + THREAD_SIZE
 256
 257#ifdef CONFIG_KASAN
 258        call kasan_early_init
 259#endif
 260        /* Start the kernel */
 261        call soc_early_init
 262        call parse_dtb
 263        tail start_kernel
 264
 265.Lsecondary_start:
 266#ifdef CONFIG_SMP
 267        /* Set trap vector to spin forever to help debug */
 268        la a3, .Lsecondary_park
 269        csrw CSR_TVEC, a3
 270
 271        slli a3, a0, LGREG
 272        la a1, __cpu_up_stack_pointer
 273        la a2, __cpu_up_task_pointer
 274        add a1, a3, a1
 275        add a2, a3, a2
 276
 277        /*
 278         * This hart didn't win the lottery, so we wait for the winning hart to
 279         * get far enough along the boot process that it should continue.
 280         */
 281.Lwait_for_cpu_up:
 282        /* FIXME: We should WFI to save some energy here. */
 283        REG_L sp, (a1)
 284        REG_L tp, (a2)
 285        beqz sp, .Lwait_for_cpu_up
 286        beqz tp, .Lwait_for_cpu_up
 287        fence
 288
 289        tail secondary_start_common
 290#endif
 291
 292END(_start_kernel)
 293
 294#ifdef CONFIG_RISCV_M_MODE
 295ENTRY(reset_regs)
 296        li      sp, 0
 297        li      gp, 0
 298        li      tp, 0
 299        li      t0, 0
 300        li      t1, 0
 301        li      t2, 0
 302        li      s0, 0
 303        li      s1, 0
 304        li      a2, 0
 305        li      a3, 0
 306        li      a4, 0
 307        li      a5, 0
 308        li      a6, 0
 309        li      a7, 0
 310        li      s2, 0
 311        li      s3, 0
 312        li      s4, 0
 313        li      s5, 0
 314        li      s6, 0
 315        li      s7, 0
 316        li      s8, 0
 317        li      s9, 0
 318        li      s10, 0
 319        li      s11, 0
 320        li      t3, 0
 321        li      t4, 0
 322        li      t5, 0
 323        li      t6, 0
 324        csrw    CSR_SCRATCH, 0
 325
 326#ifdef CONFIG_FPU
 327        csrr    t0, CSR_MISA
 328        andi    t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)
 329        beqz    t0, .Lreset_regs_done
 330
 331        li      t1, SR_FS
 332        csrs    CSR_STATUS, t1
 333        fmv.s.x f0, zero
 334        fmv.s.x f1, zero
 335        fmv.s.x f2, zero
 336        fmv.s.x f3, zero
 337        fmv.s.x f4, zero
 338        fmv.s.x f5, zero
 339        fmv.s.x f6, zero
 340        fmv.s.x f7, zero
 341        fmv.s.x f8, zero
 342        fmv.s.x f9, zero
 343        fmv.s.x f10, zero
 344        fmv.s.x f11, zero
 345        fmv.s.x f12, zero
 346        fmv.s.x f13, zero
 347        fmv.s.x f14, zero
 348        fmv.s.x f15, zero
 349        fmv.s.x f16, zero
 350        fmv.s.x f17, zero
 351        fmv.s.x f18, zero
 352        fmv.s.x f19, zero
 353        fmv.s.x f20, zero
 354        fmv.s.x f21, zero
 355        fmv.s.x f22, zero
 356        fmv.s.x f23, zero
 357        fmv.s.x f24, zero
 358        fmv.s.x f25, zero
 359        fmv.s.x f26, zero
 360        fmv.s.x f27, zero
 361        fmv.s.x f28, zero
 362        fmv.s.x f29, zero
 363        fmv.s.x f30, zero
 364        fmv.s.x f31, zero
 365        csrw    fcsr, 0
 366        /* note that the caller must clear SR_FS */
 367#endif /* CONFIG_FPU */
 368.Lreset_regs_done:
 369        ret
 370END(reset_regs)
 371#endif /* CONFIG_RISCV_M_MODE */
 372
 373__PAGE_ALIGNED_BSS
 374        /* Empty zero page */
 375        .balign PAGE_SIZE
 376