linux/arch/powerpc/mm/slb.c
<<
>>
Prefs
   1/*
   2 * PowerPC64 SLB support.
   3 *
   4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
   5 * Based on earlier code written by:
   6 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
   7 *    Copyright (c) 2001 Dave Engebretsen
   8 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
   9 *
  10 *
  11 *      This program is free software; you can redistribute it and/or
  12 *      modify it under the terms of the GNU General Public License
  13 *      as published by the Free Software Foundation; either version
  14 *      2 of the License, or (at your option) any later version.
  15 */
  16
  17#include <asm/pgtable.h>
  18#include <asm/mmu.h>
  19#include <asm/mmu_context.h>
  20#include <asm/paca.h>
  21#include <asm/cputable.h>
  22#include <asm/cacheflush.h>
  23#include <asm/smp.h>
  24#include <asm/firmware.h>
  25#include <linux/compiler.h>
  26#include <asm/udbg.h>
  27
  28
  29extern void slb_allocate_realmode(unsigned long ea);
  30extern void slb_allocate_user(unsigned long ea);
  31
  32static void slb_allocate(unsigned long ea)
  33{
  34        /* Currently, we do real mode for all SLBs including user, but
  35         * that will change if we bring back dynamic VSIDs
  36         */
  37        slb_allocate_realmode(ea);
  38}
  39
  40#define slb_esid_mask(ssize)    \
  41        (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
  42
  43static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
  44                                         unsigned long slot)
  45{
  46        return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot;
  47}
  48
  49#define slb_vsid_shift(ssize)   \
  50        ((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T)
  51
  52static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
  53                                         unsigned long flags)
  54{
  55        return (get_kernel_vsid(ea, ssize) << slb_vsid_shift(ssize)) | flags |
  56                ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
  57}
  58
  59static inline void slb_shadow_update(unsigned long ea, int ssize,
  60                                     unsigned long flags,
  61                                     unsigned long entry)
  62{
  63        /*
  64         * Clear the ESID first so the entry is not valid while we are
  65         * updating it.  No write barriers are needed here, provided
  66         * we only update the current CPU's SLB shadow buffer.
  67         */
  68        get_slb_shadow()->save_area[entry].esid = 0;
  69        get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, ssize, flags);
  70        get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, ssize, entry);
  71}
  72
  73static inline void slb_shadow_clear(unsigned long entry)
  74{
  75        get_slb_shadow()->save_area[entry].esid = 0;
  76}
  77
  78static inline void create_shadowed_slbe(unsigned long ea, int ssize,
  79                                        unsigned long flags,
  80                                        unsigned long entry)
  81{
  82        /*
  83         * Updating the shadow buffer before writing the SLB ensures
  84         * we don't get a stale entry here if we get preempted by PHYP
  85         * between these two statements.
  86         */
  87        slb_shadow_update(ea, ssize, flags, entry);
  88
  89        asm volatile("slbmte  %0,%1" :
  90                     : "r" (mk_vsid_data(ea, ssize, flags)),
  91                       "r" (mk_esid_data(ea, ssize, entry))
  92                     : "memory" );
  93}
  94
  95static void __slb_flush_and_rebolt(void)
  96{
  97        /* If you change this make sure you change SLB_NUM_BOLTED
  98         * appropriately too. */
  99        unsigned long linear_llp, vmalloc_llp, lflags, vflags;
 100        unsigned long ksp_esid_data, ksp_vsid_data;
 101
 102        linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
 103        vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
 104        lflags = SLB_VSID_KERNEL | linear_llp;
 105        vflags = SLB_VSID_KERNEL | vmalloc_llp;
 106
 107        ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, 2);
 108        if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
 109                ksp_esid_data &= ~SLB_ESID_V;
 110                ksp_vsid_data = 0;
 111                slb_shadow_clear(2);
 112        } else {
 113                /* Update stack entry; others don't change */
 114                slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2);
 115                ksp_vsid_data = get_slb_shadow()->save_area[2].vsid;
 116        }
 117
 118        /* We need to do this all in asm, so we're sure we don't touch
 119         * the stack between the slbia and rebolting it. */
 120        asm volatile("isync\n"
 121                     "slbia\n"
 122                     /* Slot 1 - first VMALLOC segment */
 123                     "slbmte    %0,%1\n"
 124                     /* Slot 2 - kernel stack */
 125                     "slbmte    %2,%3\n"
 126                     "isync"
 127                     :: "r"(mk_vsid_data(VMALLOC_START, mmu_kernel_ssize, vflags)),
 128                        "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, 1)),
 129                        "r"(ksp_vsid_data),
 130                        "r"(ksp_esid_data)
 131                     : "memory");
 132}
 133
 134void slb_flush_and_rebolt(void)
 135{
 136
 137        WARN_ON(!irqs_disabled());
 138
 139        /*
 140         * We can't take a PMU exception in the following code, so hard
 141         * disable interrupts.
 142         */
 143        hard_irq_disable();
 144
 145        __slb_flush_and_rebolt();
 146        get_paca()->slb_cache_ptr = 0;
 147}
 148
 149void slb_vmalloc_update(void)
 150{
 151        unsigned long vflags;
 152
 153        vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
 154        slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
 155        slb_flush_and_rebolt();
 156}
 157
 158/* Helper function to compare esids.  There are four cases to handle.
 159 * 1. The system is not 1T segment size capable.  Use the GET_ESID compare.
 160 * 2. The system is 1T capable, both addresses are < 1T, use the GET_ESID compare.
 161 * 3. The system is 1T capable, only one of the two addresses is > 1T.  This is not a match.
 162 * 4. The system is 1T capable, both addresses are > 1T, use the GET_ESID_1T macro to compare.
 163 */
 164static inline int esids_match(unsigned long addr1, unsigned long addr2)
 165{
 166        int esid_1t_count;
 167
 168        /* System is not 1T segment size capable. */
 169        if (!cpu_has_feature(CPU_FTR_1T_SEGMENT))
 170                return (GET_ESID(addr1) == GET_ESID(addr2));
 171
 172        esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) +
 173                                ((addr2 >> SID_SHIFT_1T) != 0));
 174
 175        /* both addresses are < 1T */
 176        if (esid_1t_count == 0)
 177                return (GET_ESID(addr1) == GET_ESID(addr2));
 178
 179        /* One address < 1T, the other > 1T.  Not a match */
 180        if (esid_1t_count == 1)
 181                return 0;
 182
 183        /* Both addresses are > 1T. */
 184        return (GET_ESID_1T(addr1) == GET_ESID_1T(addr2));
 185}
 186
 187/* Flush all user entries from the segment table of the current processor. */
 188void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
 189{
 190        unsigned long offset;
 191        unsigned long slbie_data = 0;
 192        unsigned long pc = KSTK_EIP(tsk);
 193        unsigned long stack = KSTK_ESP(tsk);
 194        unsigned long exec_base;
 195
 196        /*
 197         * We need interrupts hard-disabled here, not just soft-disabled,
 198         * so that a PMU interrupt can't occur, which might try to access
 199         * user memory (to get a stack trace) and possible cause an SLB miss
 200         * which would update the slb_cache/slb_cache_ptr fields in the PACA.
 201         */
 202        hard_irq_disable();
 203        offset = get_paca()->slb_cache_ptr;
 204        if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) &&
 205            offset <= SLB_CACHE_ENTRIES) {
 206                int i;
 207                asm volatile("isync" : : : "memory");
 208                for (i = 0; i < offset; i++) {
 209                        slbie_data = (unsigned long)get_paca()->slb_cache[i]
 210                                << SID_SHIFT; /* EA */
 211                        slbie_data |= user_segment_size(slbie_data)
 212                                << SLBIE_SSIZE_SHIFT;
 213                        slbie_data |= SLBIE_C; /* C set for user addresses */
 214                        asm volatile("slbie %0" : : "r" (slbie_data));
 215                }
 216                asm volatile("isync" : : : "memory");
 217        } else {
 218                __slb_flush_and_rebolt();
 219        }
 220
 221        /* Workaround POWER5 < DD2.1 issue */
 222        if (offset == 1 || offset > SLB_CACHE_ENTRIES)
 223                asm volatile("slbie %0" : : "r" (slbie_data));
 224
 225        get_paca()->slb_cache_ptr = 0;
 226        get_paca()->context = mm->context;
 227
 228        /*
 229         * preload some userspace segments into the SLB.
 230         * Almost all 32 and 64bit PowerPC executables are linked at
 231         * 0x10000000 so it makes sense to preload this segment.
 232         */
 233        exec_base = 0x10000000;
 234
 235        if (is_kernel_addr(pc) || is_kernel_addr(stack) ||
 236            is_kernel_addr(exec_base))
 237                return;
 238
 239        slb_allocate(pc);
 240
 241        if (!esids_match(pc, stack))
 242                slb_allocate(stack);
 243
 244        if (!esids_match(pc, exec_base) &&
 245            !esids_match(stack, exec_base))
 246                slb_allocate(exec_base);
 247}
 248
 249static inline void patch_slb_encoding(unsigned int *insn_addr,
 250                                      unsigned int immed)
 251{
 252        *insn_addr = (*insn_addr & 0xffff0000) | immed;
 253        flush_icache_range((unsigned long)insn_addr, 4+
 254                           (unsigned long)insn_addr);
 255}
 256
 257void slb_set_size(u16 size)
 258{
 259        extern unsigned int *slb_compare_rr_to_size;
 260
 261        if (mmu_slb_size == size)
 262                return;
 263
 264        mmu_slb_size = size;
 265        patch_slb_encoding(slb_compare_rr_to_size, mmu_slb_size);
 266}
 267
 268void slb_initialize(void)
 269{
 270        unsigned long linear_llp, vmalloc_llp, io_llp;
 271        unsigned long lflags, vflags;
 272        static int slb_encoding_inited;
 273        extern unsigned int *slb_miss_kernel_load_linear;
 274        extern unsigned int *slb_miss_kernel_load_io;
 275        extern unsigned int *slb_compare_rr_to_size;
 276#ifdef CONFIG_SPARSEMEM_VMEMMAP
 277        extern unsigned int *slb_miss_kernel_load_vmemmap;
 278        unsigned long vmemmap_llp;
 279#endif
 280
 281        /* Prepare our SLB miss handler based on our page size */
 282        linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
 283        io_llp = mmu_psize_defs[mmu_io_psize].sllp;
 284        vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
 285        get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
 286#ifdef CONFIG_SPARSEMEM_VMEMMAP
 287        vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp;
 288#endif
 289        if (!slb_encoding_inited) {
 290                slb_encoding_inited = 1;
 291                patch_slb_encoding(slb_miss_kernel_load_linear,
 292                                   SLB_VSID_KERNEL | linear_llp);
 293                patch_slb_encoding(slb_miss_kernel_load_io,
 294                                   SLB_VSID_KERNEL | io_llp);
 295                patch_slb_encoding(slb_compare_rr_to_size,
 296                                   mmu_slb_size);
 297
 298                pr_devel("SLB: linear  LLP = %04lx\n", linear_llp);
 299                pr_devel("SLB: io      LLP = %04lx\n", io_llp);
 300
 301#ifdef CONFIG_SPARSEMEM_VMEMMAP
 302                patch_slb_encoding(slb_miss_kernel_load_vmemmap,
 303                                   SLB_VSID_KERNEL | vmemmap_llp);
 304                pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
 305#endif
 306        }
 307
 308        get_paca()->stab_rr = SLB_NUM_BOLTED;
 309
 310        /* On iSeries the bolted entries have already been set up by
 311         * the hypervisor from the lparMap data in head.S */
 312        if (firmware_has_feature(FW_FEATURE_ISERIES))
 313                return;
 314
 315        lflags = SLB_VSID_KERNEL | linear_llp;
 316        vflags = SLB_VSID_KERNEL | vmalloc_llp;
 317
 318        /* Invalidate the entire SLB (even slot 0) & all the ERATS */
 319        asm volatile("isync":::"memory");
 320        asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
 321        asm volatile("isync; slbia; isync":::"memory");
 322        create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0);
 323
 324        create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
 325
 326        /* For the boot cpu, we're running on the stack in init_thread_union,
 327         * which is in the first segment of the linear mapping, and also
 328         * get_paca()->kstack hasn't been initialized yet.
 329         * For secondary cpus, we need to bolt the kernel stack entry now.
 330         */
 331        slb_shadow_clear(2);
 332        if (raw_smp_processor_id() != boot_cpuid &&
 333            (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET)
 334                create_shadowed_slbe(get_paca()->kstack,
 335                                     mmu_kernel_ssize, lflags, 2);
 336
 337        asm volatile("isync":::"memory");
 338}
 339