linux/arch/sh/kernel/head_32.S
<<
>>
Prefs
   1/* $Id: head.S,v 1.7 2003/09/01 17:58:19 lethal Exp $
   2 *
   3 *  arch/sh/kernel/head.S
   4 *
   5 *  Copyright (C) 1999, 2000  Niibe Yutaka & Kaz Kojima
   6 *  Copyright (C) 2010  Matt Fleming
   7 *
   8 * This file is subject to the terms and conditions of the GNU General Public
   9 * License.  See the file "COPYING" in the main directory of this archive
  10 * for more details.
  11 *
  12 * Head.S contains the SH exception handlers and startup code.
  13 */
  14#include <linux/init.h>
  15#include <linux/linkage.h>
  16#include <asm/thread_info.h>
  17#include <asm/mmu.h>
  18#include <cpu/mmu_context.h>
  19
  20#ifdef CONFIG_CPU_SH4A
  21#define SYNCO()         synco
  22
  23#define PREFI(label, reg)       \
  24        mov.l   label, reg;     \
  25        prefi   @reg
  26#else
  27#define SYNCO()
  28#define PREFI(label, reg)
  29#endif
  30
  31        .section        .empty_zero_page, "aw"
  32ENTRY(empty_zero_page)
  33        .long   1               /* MOUNT_ROOT_RDONLY */
  34        .long   0               /* RAMDISK_FLAGS */
  35        .long   0x0200          /* ORIG_ROOT_DEV */
  36        .long   1               /* LOADER_TYPE */
  37        .long   0x00000000      /* INITRD_START */
  38        .long   0x00000000      /* INITRD_SIZE */
  39#ifdef CONFIG_32BIT
  40        .long   0x53453f00 + 32 /* "SE?" = 32 bit */
  41#else
  42        .long   0x53453f00 + 29 /* "SE?" = 29 bit */
  43#endif
  441:
  45        .skip   PAGE_SIZE - empty_zero_page - 1b
  46
  47        __HEAD
  48
  49/*
  50 * Condition at the entry of _stext:
  51 *
  52 *   BSC has already been initialized.
  53 *   INTC may or may not be initialized.
  54 *   VBR may or may not be initialized.
  55 *   MMU may or may not be initialized.
  56 *   Cache may or may not be initialized.
  57 *   Hardware (including on-chip modules) may or may not be initialized. 
  58 *
  59 */
  60ENTRY(_stext)
  61        !                       Initialize Status Register
  62        mov.l   1f, r0          ! MD=1, RB=0, BL=0, IMASK=0xF
  63        ldc     r0, sr
  64        !                       Initialize global interrupt mask
  65#ifdef CONFIG_CPU_HAS_SR_RB
  66        mov     #0, r0
  67        ldc     r0, r6_bank
  68#endif
  69
  70#ifdef CONFIG_OF_FLATTREE
  71        mov     r4, r12         ! Store device tree blob pointer in r12
  72#endif
  73        
  74        /*
  75         * Prefetch if possible to reduce cache miss penalty.
  76         *
  77         * We do this early on for SH-4A as a micro-optimization,
  78         * as later on we will have speculative execution enabled
  79         * and this will become less of an issue.
  80         */
  81        PREFI(5f, r0)
  82        PREFI(6f, r0)
  83
  84        !
  85        mov.l   2f, r0
  86        mov     r0, r15         ! Set initial r15 (stack pointer)
  87#ifdef CONFIG_CPU_HAS_SR_RB
  88        mov.l   7f, r0
  89        ldc     r0, r7_bank     ! ... and initial thread_info
  90#endif
  91
  92#ifdef CONFIG_PMB
  93/*
  94 * Reconfigure the initial PMB mappings setup by the hardware.
  95 *
  96 * When we boot in 32-bit MMU mode there are 2 PMB entries already
  97 * setup for us.
  98 *
  99 * Entry       VPN         PPN      V   SZ      C       UB      WT
 100 * ---------------------------------------------------------------
 101 *   0      0x80000000 0x00000000   1  512MB    1       0       1
 102 *   1      0xA0000000 0x00000000   1  512MB    0       0       0
 103 *
 104 * But we reprogram them here because we want complete control over
 105 * our address space and the initial mappings may not map PAGE_OFFSET
 106 * to __MEMORY_START (or even map all of our RAM).
 107 *
 108 * Once we've setup cached and uncached mappings we clear the rest of the
 109 * PMB entries. This clearing also deals with the fact that PMB entries
 110 * can persist across reboots. The PMB could have been left in any state
 111 * when the reboot occurred, so to be safe we clear all entries and start
 112 * with with a clean slate.
 113 *
 114 * The uncached mapping is constructed using the smallest possible
 115 * mapping with a single unbufferable page. Only the kernel text needs to
 116 * be covered via the uncached mapping so that certain functions can be
 117 * run uncached.
 118 *
 119 * Drivers and the like that have previously abused the 1:1 identity
 120 * mapping are unsupported in 32-bit mode and must specify their caching
 121 * preference when page tables are constructed.
 122 *
 123 * This frees up the P2 space for more nefarious purposes.
 124 *
 125 * Register utilization is as follows:
 126 *
 127 *      r0 = PMB_DATA data field
 128 *      r1 = PMB_DATA address field
 129 *      r2 = PMB_ADDR data field
 130 *      r3 = PMB_ADDR address field
 131 *      r4 = PMB_E_SHIFT
 132 *      r5 = remaining amount of RAM to map
 133 *      r6 = PMB mapping size we're trying to use
 134 *      r7 = cached_to_uncached
 135 *      r8 = scratch register
 136 *      r9 = scratch register
 137 *      r10 = number of PMB entries we've setup
 138 *      r11 = scratch register
 139 */
 140
 141        mov.l   .LMMUCR, r1     /* Flush the TLB */
 142        mov.l   @r1, r0
 143        or      #MMUCR_TI, r0
 144        mov.l   r0, @r1
 145
 146        mov.l   .LMEMORY_SIZE, r5
 147
 148        mov     #PMB_E_SHIFT, r0
 149        mov     #0x1, r4
 150        shld    r0, r4
 151
 152        mov.l   .LFIRST_DATA_ENTRY, r0
 153        mov.l   .LPMB_DATA, r1
 154        mov.l   .LFIRST_ADDR_ENTRY, r2
 155        mov.l   .LPMB_ADDR, r3
 156
 157        /*
 158         * First we need to walk the PMB and figure out if there are any
 159         * existing mappings that match the initial mappings VPN/PPN.
 160         * If these have already been established by the bootloader, we
 161         * don't bother setting up new entries here, and let the late PMB
 162         * initialization take care of things instead.
 163         *
 164         * Note that we may need to coalesce and merge entries in order
 165         * to reclaim more available PMB slots, which is much more than
 166         * we want to do at this early stage.
 167         */
 168        mov     #0, r10
 169        mov     #NR_PMB_ENTRIES, r9
 170
 171        mov     r1, r7          /* temporary PMB_DATA iter */
 172
 173.Lvalidate_existing_mappings:
 174
 175        mov.l   .LPMB_DATA_MASK, r11
 176        mov.l   @r7, r8
 177        and     r11, r8
 178        cmp/eq  r0, r8          /* Check for valid __MEMORY_START mappings */
 179        bt      .Lpmb_done
 180
 181        add     #1, r10         /* Increment the loop counter */
 182        cmp/eq  r9, r10
 183        bf/s    .Lvalidate_existing_mappings
 184         add    r4, r7          /* Increment to the next PMB_DATA entry */
 185
 186        /*
 187         * If we've fallen through, continue with setting up the initial
 188         * mappings.
 189         */
 190
 191        mov     r5, r7          /* cached_to_uncached */
 192        mov     #0, r10
 193
 194#ifdef CONFIG_UNCACHED_MAPPING
 195        /*
 196         * Uncached mapping
 197         */
 198        mov     #(PMB_SZ_16M >> 2), r9
 199        shll2   r9
 200
 201        mov     #(PMB_UB >> 8), r8
 202        shll8   r8
 203
 204        or      r0, r8
 205        or      r9, r8
 206        mov.l   r8, @r1
 207        mov     r2, r8
 208        add     r7, r8
 209        mov.l   r8, @r3
 210
 211        add     r4, r1
 212        add     r4, r3
 213        add     #1, r10
 214#endif
 215
 216/*
 217 * Iterate over all of the available sizes from largest to
 218 * smallest for constructing the cached mapping.
 219 */
 220#define __PMB_ITER_BY_SIZE(size)                        \
 221.L##size:                                               \
 222        mov     #(size >> 4), r6;                       \
 223        shll16  r6;                                     \
 224        shll8   r6;                                     \
 225                                                        \
 226        cmp/hi  r5, r6;                                 \
 227        bt      9999f;                                  \
 228                                                        \
 229        mov     #(PMB_SZ_##size##M >> 2), r9;           \
 230        shll2   r9;                                     \
 231                                                        \
 232        /*                                              \
 233         * Cached mapping                               \
 234         */                                             \
 235        mov     #PMB_C, r8;                             \
 236        or      r0, r8;                                 \
 237        or      r9, r8;                                 \
 238        mov.l   r8, @r1;                                \
 239        mov.l   r2, @r3;                                \
 240                                                        \
 241        /* Increment to the next PMB_DATA entry */      \
 242        add     r4, r1;                                 \
 243        /* Increment to the next PMB_ADDR entry */      \
 244        add     r4, r3;                                 \
 245        /* Increment number of PMB entries */           \
 246        add     #1, r10;                                \
 247                                                        \
 248        sub     r6, r5;                                 \
 249        add     r6, r0;                                 \
 250        add     r6, r2;                                 \
 251                                                        \
 252        bra     .L##size;                               \
 2539999:
 254
 255        __PMB_ITER_BY_SIZE(512)
 256        __PMB_ITER_BY_SIZE(128)
 257        __PMB_ITER_BY_SIZE(64)
 258        __PMB_ITER_BY_SIZE(16)
 259
 260#ifdef CONFIG_UNCACHED_MAPPING
 261        /*
 262         * Now that we can access it, update cached_to_uncached and
 263         * uncached_size.
 264         */
 265        mov.l   .Lcached_to_uncached, r0
 266        mov.l   r7, @r0
 267
 268        mov.l   .Luncached_size, r0
 269        mov     #1, r7
 270        shll16  r7
 271        shll8   r7
 272        mov.l   r7, @r0
 273#endif
 274
 275        /*
 276         * Clear the remaining PMB entries.
 277         *
 278         * r3 = entry to begin clearing from
 279         * r10 = number of entries we've setup so far
 280         */
 281        mov     #0, r1
 282        mov     #NR_PMB_ENTRIES, r0
 283
 284.Lagain:
 285        mov.l   r1, @r3         /* Clear PMB_ADDR entry */
 286        add     #1, r10         /* Increment the loop counter */
 287        cmp/eq  r0, r10
 288        bf/s    .Lagain
 289         add    r4, r3          /* Increment to the next PMB_ADDR entry */
 290
 291        mov.l   6f, r0
 292        icbi    @r0
 293
 294.Lpmb_done:
 295#endif /* CONFIG_PMB */
 296
 297#ifndef CONFIG_SH_NO_BSS_INIT
 298        /*
 299         * Don't clear BSS if running on slow platforms such as an RTL simulation,
 300         * remote memory via SHdebug link, etc.  For these the memory can be guaranteed
 301         * to be all zero on boot anyway.
 302         */
 303                                ! Clear BSS area
 304#ifdef CONFIG_SMP       
 305        mov.l   3f, r0
 306        cmp/eq  #0, r0          ! skip clear if set to zero
 307        bt      10f
 308#endif
 309        
 310        mov.l   3f, r1
 311        add     #4, r1
 312        mov.l   4f, r2
 313        mov     #0, r0
 3149:      cmp/hs  r2, r1
 315        bf/s    9b              ! while (r1 < r2)
 316         mov.l  r0,@-r2
 317
 31810:             
 319#endif
 320
 321#ifdef CONFIG_OF_FLATTREE
 322        mov.l   8f, r0          ! Make flat device tree available early.
 323        jsr     @r0
 324         mov    r12, r4
 325#endif
 326
 327        !                       Additional CPU initialization
 328        mov.l   6f, r0
 329        jsr     @r0
 330         nop
 331
 332        SYNCO()                 ! Wait for pending instructions..
 333        
 334        !                       Start kernel
 335        mov.l   5f, r0
 336        jmp     @r0
 337         nop
 338
 339        .balign 4
 340#if defined(CONFIG_CPU_SH2)
 3411:      .long   0x000000F0              ! IMASK=0xF
 342#else
 3431:      .long   0x500080F0              ! MD=1, RB=0, BL=1, FD=1, IMASK=0xF
 344#endif
 345ENTRY(stack_start)
 3462:      .long   init_thread_union+THREAD_SIZE
 3473:      .long   __bss_start
 3484:      .long   _end
 3495:      .long   start_kernel
 3506:      .long   cpu_init
 3517:      .long   init_thread_union
 352#if defined(CONFIG_OF_FLATTREE)
 3538:      .long   sh_fdt_init
 354#endif
 355
 356#ifdef CONFIG_PMB
 357.LPMB_ADDR:             .long   PMB_ADDR
 358.LPMB_DATA:             .long   PMB_DATA
 359.LPMB_DATA_MASK:        .long   PMB_PFN_MASK | PMB_V
 360.LFIRST_ADDR_ENTRY:     .long   PAGE_OFFSET | PMB_V
 361.LFIRST_DATA_ENTRY:     .long   __MEMORY_START | PMB_V
 362.LMMUCR:                .long   MMUCR
 363.LMEMORY_SIZE:          .long   __MEMORY_SIZE
 364#ifdef CONFIG_UNCACHED_MAPPING
 365.Lcached_to_uncached:   .long   cached_to_uncached
 366.Luncached_size:        .long   uncached_size
 367#endif
 368#endif
 369