linux/arch/powerpc/mm/44x_mmu.c
<<
>>
Prefs
   1/*
   2 * Modifications by Matt Porter (mporter@mvista.com) to support
   3 * PPC44x Book E processors.
   4 *
   5 * This file contains the routines for initializing the MMU
   6 * on the 4xx series of chips.
   7 *  -- paulus
   8 *
   9 *  Derived from arch/ppc/mm/init.c:
  10 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  11 *
  12 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  13 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  14 *    Copyright (C) 1996 Paul Mackerras
  15 *
  16 *  Derived from "arch/i386/mm/init.c"
  17 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  18 *
  19 *  This program is free software; you can redistribute it and/or
  20 *  modify it under the terms of the GNU General Public License
  21 *  as published by the Free Software Foundation; either version
  22 *  2 of the License, or (at your option) any later version.
  23 *
  24 */
  25
  26#include <linux/init.h>
  27#include <linux/memblock.h>
  28
  29#include <asm/mmu.h>
  30#include <asm/page.h>
  31#include <asm/cacheflush.h>
  32
  33#include "mmu_decl.h"
  34
  35/* Used by the 44x TLB replacement exception handler.
  36 * Just needed it declared someplace.
  37 */
  38unsigned int tlb_44x_index; /* = 0 */
  39unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS;
  40int icache_44x_need_flush;
  41
  42unsigned long tlb_47x_boltmap[1024/8];
  43
  44static void ppc44x_update_tlb_hwater(void)
  45{
  46        extern unsigned int tlb_44x_patch_hwater_D[];
  47        extern unsigned int tlb_44x_patch_hwater_I[];
  48
  49        /* The TLB miss handlers hard codes the watermark in a cmpli
  50         * instruction to improve performances rather than loading it
  51         * from the global variable. Thus, we patch the instructions
  52         * in the 2 TLB miss handlers when updating the value
  53         */
  54        tlb_44x_patch_hwater_D[0] = (tlb_44x_patch_hwater_D[0] & 0xffff0000) |
  55                tlb_44x_hwater;
  56        flush_icache_range((unsigned long)&tlb_44x_patch_hwater_D[0],
  57                           (unsigned long)&tlb_44x_patch_hwater_D[1]);
  58        tlb_44x_patch_hwater_I[0] = (tlb_44x_patch_hwater_I[0] & 0xffff0000) |
  59                tlb_44x_hwater;
  60        flush_icache_range((unsigned long)&tlb_44x_patch_hwater_I[0],
  61                           (unsigned long)&tlb_44x_patch_hwater_I[1]);
  62}
  63
  64/*
  65 * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 44x type MMU
  66 */
  67static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
  68{
  69        unsigned int entry = tlb_44x_hwater--;
  70
  71        ppc44x_update_tlb_hwater();
  72
  73        mtspr(SPRN_MMUCR, 0);
  74
  75        __asm__ __volatile__(
  76                "tlbwe  %2,%3,%4\n"
  77                "tlbwe  %1,%3,%5\n"
  78                "tlbwe  %0,%3,%6\n"
  79        :
  80        : "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G),
  81          "r" (phys),
  82          "r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M),
  83          "r" (entry),
  84          "i" (PPC44x_TLB_PAGEID),
  85          "i" (PPC44x_TLB_XLAT),
  86          "i" (PPC44x_TLB_ATTRIB));
  87}
  88
  89static int __init ppc47x_find_free_bolted(void)
  90{
  91        unsigned int mmube0 = mfspr(SPRN_MMUBE0);
  92        unsigned int mmube1 = mfspr(SPRN_MMUBE1);
  93
  94        if (!(mmube0 & MMUBE0_VBE0))
  95                return 0;
  96        if (!(mmube0 & MMUBE0_VBE1))
  97                return 1;
  98        if (!(mmube0 & MMUBE0_VBE2))
  99                return 2;
 100        if (!(mmube1 & MMUBE1_VBE3))
 101                return 3;
 102        if (!(mmube1 & MMUBE1_VBE4))
 103                return 4;
 104        if (!(mmube1 & MMUBE1_VBE5))
 105                return 5;
 106        return -1;
 107}
 108
 109static void __init ppc47x_update_boltmap(void)
 110{
 111        unsigned int mmube0 = mfspr(SPRN_MMUBE0);
 112        unsigned int mmube1 = mfspr(SPRN_MMUBE1);
 113
 114        if (mmube0 & MMUBE0_VBE0)
 115                __set_bit((mmube0 >> MMUBE0_IBE0_SHIFT) & 0xff,
 116                          tlb_47x_boltmap);
 117        if (mmube0 & MMUBE0_VBE1)
 118                __set_bit((mmube0 >> MMUBE0_IBE1_SHIFT) & 0xff,
 119                          tlb_47x_boltmap);
 120        if (mmube0 & MMUBE0_VBE2)
 121                __set_bit((mmube0 >> MMUBE0_IBE2_SHIFT) & 0xff,
 122                          tlb_47x_boltmap);
 123        if (mmube1 & MMUBE1_VBE3)
 124                __set_bit((mmube1 >> MMUBE1_IBE3_SHIFT) & 0xff,
 125                          tlb_47x_boltmap);
 126        if (mmube1 & MMUBE1_VBE4)
 127                __set_bit((mmube1 >> MMUBE1_IBE4_SHIFT) & 0xff,
 128                          tlb_47x_boltmap);
 129        if (mmube1 & MMUBE1_VBE5)
 130                __set_bit((mmube1 >> MMUBE1_IBE5_SHIFT) & 0xff,
 131                          tlb_47x_boltmap);
 132}
 133
 134/*
 135 * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 47x type MMU
 136 */
 137static void ppc47x_pin_tlb(unsigned int virt, unsigned int phys)
 138{
 139        unsigned int rA;
 140        int bolted;
 141
 142        /* Base rA is HW way select, way 0, bolted bit set */
 143        rA = 0x88000000;
 144
 145        /* Look for a bolted entry slot */
 146        bolted = ppc47x_find_free_bolted();
 147        BUG_ON(bolted < 0);
 148
 149        /* Insert bolted slot number */
 150        rA |= bolted << 24;
 151
 152        pr_debug("256M TLB entry for 0x%08x->0x%08x in bolt slot %d\n",
 153                 virt, phys, bolted);
 154
 155        mtspr(SPRN_MMUCR, 0);
 156
 157        __asm__ __volatile__(
 158                "tlbwe  %2,%3,0\n"
 159                "tlbwe  %1,%3,1\n"
 160                "tlbwe  %0,%3,2\n"
 161                :
 162                : "r" (PPC47x_TLB2_SW | PPC47x_TLB2_SR |
 163                       PPC47x_TLB2_SX
 164#ifdef CONFIG_SMP
 165                       | PPC47x_TLB2_M
 166#endif
 167                       ),
 168                  "r" (phys),
 169                  "r" (virt | PPC47x_TLB0_VALID | PPC47x_TLB0_256M),
 170                  "r" (rA));
 171}
 172
 173void __init MMU_init_hw(void)
 174{
 175        /* This is not useful on 47x but won't hurt either */
 176        ppc44x_update_tlb_hwater();
 177
 178        flush_instruction_cache();
 179}
 180
 181unsigned long __init mmu_mapin_ram(unsigned long top)
 182{
 183        unsigned long addr;
 184        unsigned long memstart = memstart_addr & ~(PPC_PIN_SIZE - 1);
 185
 186        /* Pin in enough TLBs to cover any lowmem not covered by the
 187         * initial 256M mapping established in head_44x.S */
 188        for (addr = memstart + PPC_PIN_SIZE; addr < lowmem_end_addr;
 189             addr += PPC_PIN_SIZE) {
 190                if (mmu_has_feature(MMU_FTR_TYPE_47x))
 191                        ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
 192                else
 193                        ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
 194        }
 195        if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
 196                ppc47x_update_boltmap();
 197
 198#ifdef DEBUG
 199                {
 200                        int i;
 201
 202                        printk(KERN_DEBUG "bolted entries: ");
 203                        for (i = 0; i < 255; i++) {
 204                                if (test_bit(i, tlb_47x_boltmap))
 205                                        printk("%d ", i);
 206                        }
 207                        printk("\n");
 208                }
 209#endif /* DEBUG */
 210        }
 211        return total_lowmem;
 212}
 213
 214void setup_initial_memory_limit(phys_addr_t first_memblock_base,
 215                                phys_addr_t first_memblock_size)
 216{
 217        u64 size;
 218
 219#ifndef CONFIG_NONSTATIC_KERNEL
 220        /* We don't currently support the first MEMBLOCK not mapping 0
 221         * physical on those processors
 222         */
 223        BUG_ON(first_memblock_base != 0);
 224#endif
 225
 226        /* 44x has a 256M TLB entry pinned at boot */
 227        size = (min_t(u64, first_memblock_size, PPC_PIN_SIZE));
 228        memblock_set_current_limit(first_memblock_base + size);
 229}
 230
 231#ifdef CONFIG_SMP
 232void mmu_init_secondary(int cpu)
 233{
 234        unsigned long addr;
 235        unsigned long memstart = memstart_addr & ~(PPC_PIN_SIZE - 1);
 236
 237        /* Pin in enough TLBs to cover any lowmem not covered by the
 238         * initial 256M mapping established in head_44x.S
 239         *
 240         * WARNING: This is called with only the first 256M of the
 241         * linear mapping in the TLB and we can't take faults yet
 242         * so beware of what this code uses. It runs off a temporary
 243         * stack. current (r2) isn't initialized, smp_processor_id()
 244         * will not work, current thread info isn't accessible, ...
 245         */
 246        for (addr = memstart + PPC_PIN_SIZE; addr < lowmem_end_addr;
 247             addr += PPC_PIN_SIZE) {
 248                if (mmu_has_feature(MMU_FTR_TYPE_47x))
 249                        ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
 250                else
 251                        ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
 252        }
 253}
 254#endif /* CONFIG_SMP */
 255