linux/arch/microblaze/kernel/head.S
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
   3 * Copyright (C) 2007-2009 PetaLogix
   4 * Copyright (C) 2006 Atmark Techno, Inc.
   5 *
   6 * MMU code derived from arch/ppc/kernel/head_4xx.S:
   7 *    Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
   8 *      Initial PowerPC version.
   9 *    Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
  10 *      Rewritten for PReP
  11 *    Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
  12 *      Low-level exception handers, MMU support, and rewrite.
  13 *    Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
  14 *      PowerPC 8xx modifications.
  15 *    Copyright (c) 1998-1999 TiVo, Inc.
  16 *      PowerPC 403GCX modifications.
  17 *    Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
  18 *      PowerPC 403GCX/405GP modifications.
  19 *    Copyright 2000 MontaVista Software Inc.
  20 *      PPC405 modifications
  21 *      PowerPC 403GCX/405GP modifications.
  22 *      Author: MontaVista Software, Inc.
  23 *              frank_rowand@mvista.com or source@mvista.com
  24 *              debbie_chu@mvista.com
  25 *
  26 * This file is subject to the terms and conditions of the GNU General Public
  27 * License. See the file "COPYING" in the main directory of this archive
  28 * for more details.
  29 */
  30
  31#include <linux/init.h>
  32#include <linux/linkage.h>
  33#include <asm/entry.h>
  34#include <asm/thread_info.h>
  35#include <asm/page.h>
  36#include <linux/of_fdt.h>               /* for OF_DT_HEADER */
  37
  38#ifdef CONFIG_MMU
  39#include <asm/setup.h> /* COMMAND_LINE_SIZE */
  40#include <asm/mmu.h>
  41#include <asm/processor.h>
  42#include <asm/asm-offsets.h>
  43
  44.section .data
  45.global empty_zero_page
  46.align 12
  47empty_zero_page:
  48        .space  PAGE_SIZE
  49.global swapper_pg_dir
  50swapper_pg_dir:
  51        .space  PAGE_SIZE
  52#ifdef CONFIG_SMP
  53temp_boot_stack:
  54        .space  1024
  55#define CURRENT_SAVE    CURRENT_SAVE_ADDR
  56#endif /* CONFIG_SMP */
  57#endif /* CONFIG_MMU */
  58
  59.section .rodata
  60.align 4
  61endian_check:
  62        .word   1
  63
  64        __HEAD
  65ENTRY(_start)
  66#if CONFIG_KERNEL_BASE_ADDR == 0
  67        brai    TOPHYS(real_start)
  68        .org    0x100
  69real_start:
  70#endif
  71
  72        mts     rmsr, r0
  73/* Disable stack protection from bootloader */
  74        mts     rslr, r0
  75        addi    r8, r0, 0xFFFFFFFF
  76        mts     rshr, r8
  77/*
  78 * According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc'
  79 * if the msrclr instruction is not enabled. We use this to detect
  80 * if the opcode is available, by issuing msrclr and then testing the result.
  81 * r8 == 0 - msr instructions are implemented
  82 * r8 != 0 - msr instructions are not implemented
  83 */
  84        mfs     r1, rmsr
  85        msrclr  r8, 0 /* clear nothing - just read msr for test */
  86        cmpu    r8, r8, r1 /* r1 must contain msr reg content */
  87
  88#ifdef CONFIG_SMP
  89        /* skip FDT copy if secondary */
  90        mfs     r11, rpvr0
  91        andi    r11, r11, 0xFF
  92        bnei    r11, _setup_initial_mmu
  93#endif /* CONFIG_SMP */
  94
  95/* r7 may point to an FDT, or there may be one linked in.
  96   if it's in r7, we've got to save it away ASAP.
  97   We ensure r7 points to a valid FDT, just in case the bootloader
  98   is broken or non-existent */
  99        beqi    r7, no_fdt_arg                  /* NULL pointer?  don't copy */
 100/* Does r7 point to a valid FDT? Load HEADER magic number */
 101        /* Run time Big/Little endian platform */
 102        /* Save 1 as word and load byte - 0 - BIG, 1 - LITTLE */
 103        lbui    r11, r0, TOPHYS(endian_check)
 104        beqid   r11, big_endian /* DO NOT break delay stop dependency */
 105        lw      r11, r0, r7 /* Big endian load in delay slot */
 106        lwr     r11, r0, r7 /* Little endian load */
 107big_endian:
 108        rsubi   r11, r11, OF_DT_HEADER  /* Check FDT header */
 109        beqi    r11, _prepare_copy_fdt
 110        or      r7, r0, r0              /* clear R7 when not valid DTB */
 111        bnei    r11, no_fdt_arg                 /* No - get out of here */
 112_prepare_copy_fdt:
 113        or      r11, r0, r0 /* incremment */
 114        ori     r4, r0, TOPHYS(_fdt_start)
 115        ori     r3, r0, (0x10000 - 4)
 116_copy_fdt:
 117        lw      r12, r7, r11 /* r12 = r7 + r11 */
 118        sw      r12, r4, r11 /* addr[r4 + r11] = r12 */
 119        addik   r11, r11, 4 /* increment counting */
 120        bgtid   r3, _copy_fdt /* loop for all entries */
 121        addik   r3, r3, -4 /* descrement loop */
 122no_fdt_arg:
 123
 124#ifdef CONFIG_MMU
 125
 126#ifndef CONFIG_CMDLINE_BOOL
 127/*
 128 * handling command line
 129 * copy command line directly to cmd_line placed in data section.
 130 */
 131        beqid   r5, skip        /* Skip if NULL pointer */
 132        or      r11, r0, r0             /* incremment */
 133        ori     r4, r0, cmd_line        /* load address of command line */
 134        tophys(r4,r4)                   /* convert to phys address */
 135        ori     r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
 136_copy_command_line:
 137        /* r2=r5+r11 - r5 contain pointer to command line */
 138        lbu     r2, r5, r11
 139        beqid   r2, skip                /* Skip if no data */
 140        sb      r2, r4, r11             /* addr[r4+r11]= r2 */
 141        addik   r11, r11, 1             /* increment counting */
 142        bgtid   r3, _copy_command_line  /* loop for all entries       */
 143        addik   r3, r3, -1              /* decrement loop */
 144        addik   r5, r4, 0               /* add new space for command line */
 145        tovirt(r5,r5)
 146skip:
 147#endif /* CONFIG_CMDLINE_BOOL */
 148
 149#ifdef NOT_COMPILE
 150/* save bram context */
 151        or      r11, r0, r0                             /* incremment */
 152        ori     r4, r0, TOPHYS(_bram_load_start)        /* save bram context */
 153        ori     r3, r0, (LMB_SIZE - 4)
 154_copy_bram:
 155        lw      r7, r0, r11             /* r7 = r0 + r11 */
 156        sw      r7, r4, r11             /* addr[r4 + r11] = r7 */
 157        addik   r11, r11, 4             /* increment counting */
 158        bgtid   r3, _copy_bram          /* loop for all entries */
 159        addik   r3, r3, -4              /* descrement loop */
 160#endif
 161        /* We have to turn on the MMU right away. */
 162
 163_setup_initial_mmu:
 164        /*
 165         * Set up the initial MMU state so we can do the first level of
 166         * kernel initialization.  This maps the first 16 MBytes of memory 1:1
 167         * virtual to physical.
 168         */
 169        nop
 170        addik   r3, r0, MICROBLAZE_TLB_SIZE -1  /* Invalidate all TLB entries */
 171_invalidate:
 172        mts     rtlbx, r3
 173        mts     rtlbhi, r0                      /* flush: ensure V is clear   */
 174        mts     rtlblo, r0
 175        bgtid   r3, _invalidate         /* loop for all entries       */
 176        addik   r3, r3, -1
 177        /* sync */
 178
 179        /* Setup the kernel PID */
 180        mts     rpid,r0                 /* Load the kernel PID */
 181        nop
 182        bri     4
 183
 184        /*
 185         * We should still be executing code at physical address area
 186         * RAM_BASEADDR at this point. However, kernel code is at
 187         * a virtual address. So, set up a TLB mapping to cover this once
 188         * translation is enabled.
 189         */
 190
 191        addik   r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */
 192        tophys(r4,r3)                   /* Load the kernel physical address */
 193
 194        /* start to do TLB calculation */
 195        addik   r12, r0, _end_tlb_mapping
 196        rsub    r12, r3, r12
 197
 198        or r9, r0, r0 /* TLB0 = 0 */
 199        or r10, r0, r0 /* TLB1 = 0 */
 200
 201        /*
 202         * Linux is 4MB aligned that's why we can just check certain sizes.
 203         * Add 12MB, 16MB and 8MB on the top of list because that's normal
 204         * sizes which are often used.
 205         */
 206        addik   r11, r12, -0xc00000 /* 12 MB */
 207        beqi    r11, GT12
 208        addik   r11, r12, -0x1000000 /* 16 MB */
 209        beqi    r11, GT16
 210        addik   r11, r12, -0x800000 /* 8 MB */
 211        beqi    r11, GT8
 212        addik   r11, r12, -0x2000000 /* 32 MB */
 213        beqi    r11, GT32
 214        addik   r11, r12, -0x1800000 /* 24 MB */
 215        beqi    r11, GT24
 216        addik   r11, r12, -0x1400000 /* 20 MB */
 217        beqi    r11, GT20
 218        addik   r11, r12, -0x400000 /* 4 MB */
 219        beqi    r11, GT4
 220        /* if this page doesn't detect it use 32MB mapping */
 221GT32:
 222        addik   r9, r0, 0x1000000 /* means TLB0 is 16MB */
 223GT16:
 224        addik   r10, r0, 0x1000000 /* means TLB1 is 16MB */
 225        bri     tlb_end
 226GT24:
 227        addik   r9, r0, 0x1000000 /* means TLB0 is 16MB */
 228GT8:
 229        addik   r10, r0, 0x800000 /* means TLB1 is 8MB */
 230        bri     tlb_end
 231GT20:
 232        addik   r9, r0, 0x1000000 /* means TLB0 is 16MB */
 233GT4:
 234        addik   r10, r0, 0x400000 /* means TLB1 is 4MB */
 235        bri     tlb_end
 236GT12:
 237        addik   r9, r0, 0x800000 /* means TLB0 is 8MB */
 238        addik   r10, r0, 0x400000 /* means TLB1 is 4MB */
 239        /* NOTE: No need to just to tlb_end here */
 240tlb_end:
 241
 242        /*
 243         * Configure and load two entries into TLB slots 0 and 1.
 244         * In case we are pinning TLBs, these are reserved in by the
 245         * other TLB functions.  If not reserving, then it doesn't
 246         * matter where they are loaded.
 247         */
 248        andi    r4,r4,0xfffffc00        /* Mask off the real page number */
 249        ori     r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */
 250
 251        /*
 252         * TLB0 is always used - check if is not zero (r9 stores TLB0 value)
 253         * if is use TLB1 value and clear it (r10 stores TLB1 value)
 254         */
 255        bnei    r9, tlb0_not_zero
 256        add     r9, r10, r0
 257        add     r10, r0, r0
 258tlb0_not_zero:
 259
 260        /* look at the code below */
 261        ori     r30, r0, 0x200
 262        andi    r29, r9, 0x100000
 263        bneid   r29, 1f
 264        addik   r30, r30, 0x80
 265        andi    r29, r9, 0x400000
 266        bneid   r29, 1f
 267        addik   r30, r30, 0x80
 268        andi    r29, r9, 0x1000000
 269        bneid   r29, 1f
 270        addik   r30, r30, 0x80
 2711:
 272        andi    r3,r3,0xfffffc00        /* Mask off the effective page number */
 273        ori     r3,r3,(TLB_VALID)
 274        or      r3, r3, r30
 275
 276        /* Load tlb_skip size value which is index to first unused TLB entry */
 277        lwi     r11, r0, TOPHYS(tlb_skip)
 278        mts     rtlbx,r11               /* TLB slow 0 */
 279
 280        mts     rtlblo,r4               /* Load the data portion of the entry */
 281        mts     rtlbhi,r3               /* Load the tag portion of the entry */
 282
 283        /* Increase tlb_skip size */
 284        addik   r11, r11, 1
 285        swi     r11, r0, TOPHYS(tlb_skip)
 286
 287        /* TLB1 can be zeroes that's why we not setup it */
 288        beqi    r10, jump_over2
 289
 290        /* look at the code below */
 291        ori     r30, r0, 0x200
 292        andi    r29, r10, 0x100000
 293        bneid   r29, 1f
 294        addik   r30, r30, 0x80
 295        andi    r29, r10, 0x400000
 296        bneid   r29, 1f
 297        addik   r30, r30, 0x80
 298        andi    r29, r10, 0x1000000
 299        bneid   r29, 1f
 300        addik   r30, r30, 0x80
 3011:
 302        addk    r4, r4, r9      /* previous addr + TLB0 size */
 303        addk    r3, r3, r9
 304
 305        andi    r3,r3,0xfffffc00        /* Mask off the effective page number */
 306        ori     r3,r3,(TLB_VALID)
 307        or      r3, r3, r30
 308
 309        lwi     r11, r0, TOPHYS(tlb_skip)
 310        mts     rtlbx, r11              /* r11 is used from TLB0 */
 311
 312        mts     rtlblo,r4               /* Load the data portion of the entry */
 313        mts     rtlbhi,r3               /* Load the tag portion of the entry */
 314
 315        /* Increase tlb_skip size */
 316        addik   r11, r11, 1
 317        swi     r11, r0, TOPHYS(tlb_skip)
 318
 319jump_over2:
 320        /*
 321         * Load a TLB entry for LMB, since we need access to
 322         * the exception vectors, using a 4k real==virtual mapping.
 323         */
 324        /* Use temporary TLB_ID for LMB - clear this temporary mapping later */
 325        ori     r11, r0, MICROBLAZE_LMB_TLB_ID
 326        mts     rtlbx,r11
 327
 328        ori     r4,r0,(TLB_WR | TLB_EX)
 329        ori     r3,r0,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
 330
 331        mts     rtlblo,r4               /* Load the data portion of the entry */
 332        mts     rtlbhi,r3               /* Load the tag portion of the entry */
 333
 334        /*
 335         * We now have the lower 16 Meg of RAM mapped into TLB entries, and the
 336         * caches ready to work.
 337         */
 338turn_on_mmu:
 339        ori     r15,r0,start_here
 340#ifdef CONFIG_SMP
 341        /*
 342         * Read PVR and mask off all but CPU id bits to use to select
 343         * boot sequence
 344         */
 345        mfs     r4, rpvr0
 346        andi    r4, r4, 0xFF
 347
 348        beqi    r4, finish
 349        ori     r15, r0, start_secondary_cpu
 350finish:
 351#endif /* CONFIG_SMP */
 352        ori     r4,r0,MSR_KERNEL_VMS
 353        mts     rmsr,r4
 354        nop
 355        rted    r15,0                   /* enables MMU */
 356        nop
 357
 358start_here:
 359#endif /* CONFIG_MMU */
 360
 361        /* Initialize small data anchors */
 362        addik   r13, r0, _KERNEL_SDA_BASE_
 363        addik   r2, r0, _KERNEL_SDA2_BASE_
 364
 365        /* Initialize stack pointer */
 366        addik   r1, r0, init_thread_union + THREAD_SIZE - 4
 367
 368        /* Initialize r31 with current task address */
 369        addik   r31, r0, init_task
 370#ifdef CONFIG_MMU
 371        /* save current for CPU 0 */
 372        swi     CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
 373#endif
 374
 375        addik   r11, r0, machine_early_init
 376        brald   r15, r11
 377        nop
 378
 379#ifndef CONFIG_MMU
 380        addik   r15, r0, machine_halt
 381        braid   start_kernel
 382        nop
 383#else
 384        /*
 385         * Initialize the MMU.
 386         */
 387        bralid  r15, mmu_init
 388        nop
 389
 390        /* Go back to running unmapped so we can load up new values
 391         * and change to using our exception vectors.
 392         * On the MicroBlaze, all we invalidate the used TLB entries to clear
 393         * the old 16M byte TLB mappings.
 394         */
 395        ori     r15,r0,TOPHYS(kernel_load_context)
 396        ori     r4,r0,MSR_KERNEL
 397        mts     rmsr,r4
 398        nop
 399        bri     4
 400        rted    r15,0
 401        nop
 402
 403        /* Load up the kernel context */
 404kernel_load_context:
 405        ori     r5, r0, MICROBLAZE_LMB_TLB_ID
 406        mts     rtlbx,r5
 407        nop
 408        mts     rtlbhi,r0
 409        nop
 410        addi    r15, r0, machine_halt
 411        ori     r17, r0, start_kernel
 412        ori     r4, r0, MSR_KERNEL_VMS
 413        mts     rmsr, r4
 414        nop
 415        rted    r17, 0          /* enable MMU and jump to start_kernel */
 416        nop
 417#endif /* CONFIG_MMU */
 418
 419#ifdef CONFIG_SMP
 420/* Entry point for secondary processors */
 421start_secondary_cpu:
 422
 423        /* Initialize small data anchors */
 424        addik   r13, r0, _KERNEL_SDA_BASE_
 425        addik   r2, r0, _KERNEL_SDA2_BASE_
 426
 427        /* Initialize stack pointer */
 428        addik   r1, r0, temp_boot_stack + 1024 - 4
 429
 430        /*
 431         * Initialize the exception table.
 432         */
 433        addik   r11, r0, secondary_machine_init
 434        brald   r15, r11
 435        nop
 436
 437        lwi     r1, r0, secondary_ti
 438
 439        /* Initialize r31 with current task address */
 440        lwi     CURRENT_TASK, r1, TI_TASK
 441        /* save current for secondary CPU */
 442        swi     CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
 443
 444        /* Initialize stack pointer */
 445        addi    r1, r1, THREAD_SIZE - 4
 446        swi     r0, r1, 0
 447
 448        /* Initialize MMU */
 449        ori     r11, r0, 0x10000000
 450        mts     rzpr, r11
 451
 452        ori     r15, r0, TOPHYS(kernel_load_context_secondary)
 453        ori     r4, r0, MSR_KERNEL
 454        mts     rmsr, r4
 455        nop
 456        bri     4
 457        rted    r15, 0
 458        nop
 459
 460        /* Load up the kernel context */
 461kernel_load_context_secondary:
 462        # Keep entry 0 and 1 valid. Entry 3 mapped to LMB can go away.
 463        ori     r5, r0, MICROBLAZE_LMB_TLB_ID
 464        mts     rtlbx, r5
 465        nop
 466        mts     rtlbhi, r0
 467        nop
 468        addi    r15, r0, machine_halt
 469        ori     r17, r0, start_secondary
 470        ori     r4, r0, MSR_KERNEL_VMS
 471        mts     rmsr, r4
 472        nop
 473        rted    r17, 0          /* enable MMU and jump to start_kernel */
 474        nop
 475#endif /* CONFIG_SMP */
 476