linux/arch/powerpc/mm/slb.c
<<
>>
Prefs
   1/*
   2 * PowerPC64 SLB support.
   3 *
   4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
   5 * Based on earlier code written by:
   6 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
   7 *    Copyright (c) 2001 Dave Engebretsen
   8 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
   9 *
  10 *
  11 *      This program is free software; you can redistribute it and/or
  12 *      modify it under the terms of the GNU General Public License
  13 *      as published by the Free Software Foundation; either version
  14 *      2 of the License, or (at your option) any later version.
  15 */
  16
  17#include <asm/pgtable.h>
  18#include <asm/mmu.h>
  19#include <asm/mmu_context.h>
  20#include <asm/paca.h>
  21#include <asm/cputable.h>
  22#include <asm/cacheflush.h>
  23#include <asm/smp.h>
  24#include <asm/firmware.h>
  25#include <linux/compiler.h>
  26#include <asm/udbg.h>
  27#include <asm/code-patching.h>
  28
  29
  30extern void slb_allocate_realmode(unsigned long ea);
  31extern void slb_allocate_user(unsigned long ea);
  32
  33static void slb_allocate(unsigned long ea)
  34{
  35        /* Currently, we do real mode for all SLBs including user, but
  36         * that will change if we bring back dynamic VSIDs
  37         */
  38        slb_allocate_realmode(ea);
  39}
  40
  41#define slb_esid_mask(ssize)    \
  42        (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
  43
  44static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
  45                                         unsigned long slot)
  46{
  47        return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot;
  48}
  49
  50#define slb_vsid_shift(ssize)   \
  51        ((ssize) == MMU_SEGSIZE_256M? SLB_VSID_SHIFT: SLB_VSID_SHIFT_1T)
  52
  53static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
  54                                         unsigned long flags)
  55{
  56        return (get_kernel_vsid(ea, ssize) << slb_vsid_shift(ssize)) | flags |
  57                ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
  58}
  59
  60static inline void slb_shadow_update(unsigned long ea, int ssize,
  61                                     unsigned long flags,
  62                                     unsigned long entry)
  63{
  64        /*
  65         * Clear the ESID first so the entry is not valid while we are
  66         * updating it.  No write barriers are needed here, provided
  67         * we only update the current CPU's SLB shadow buffer.
  68         */
  69        get_slb_shadow()->save_area[entry].esid = 0;
  70        get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, ssize, flags);
  71        get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, ssize, entry);
  72}
  73
  74static inline void slb_shadow_clear(unsigned long entry)
  75{
  76        get_slb_shadow()->save_area[entry].esid = 0;
  77}
  78
  79static inline void create_shadowed_slbe(unsigned long ea, int ssize,
  80                                        unsigned long flags,
  81                                        unsigned long entry)
  82{
  83        /*
  84         * Updating the shadow buffer before writing the SLB ensures
  85         * we don't get a stale entry here if we get preempted by PHYP
  86         * between these two statements.
  87         */
  88        slb_shadow_update(ea, ssize, flags, entry);
  89
  90        asm volatile("slbmte  %0,%1" :
  91                     : "r" (mk_vsid_data(ea, ssize, flags)),
  92                       "r" (mk_esid_data(ea, ssize, entry))
  93                     : "memory" );
  94}
  95
  96static void __slb_flush_and_rebolt(void)
  97{
  98        /* If you change this make sure you change SLB_NUM_BOLTED
  99         * appropriately too. */
 100        unsigned long linear_llp, vmalloc_llp, lflags, vflags;
 101        unsigned long ksp_esid_data, ksp_vsid_data;
 102
 103        linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
 104        vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
 105        lflags = SLB_VSID_KERNEL | linear_llp;
 106        vflags = SLB_VSID_KERNEL | vmalloc_llp;
 107
 108        ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, 2);
 109        if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
 110                ksp_esid_data &= ~SLB_ESID_V;
 111                ksp_vsid_data = 0;
 112                slb_shadow_clear(2);
 113        } else {
 114                /* Update stack entry; others don't change */
 115                slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2);
 116                ksp_vsid_data = get_slb_shadow()->save_area[2].vsid;
 117        }
 118
 119        /* We need to do this all in asm, so we're sure we don't touch
 120         * the stack between the slbia and rebolting it. */
 121        asm volatile("isync\n"
 122                     "slbia\n"
 123                     /* Slot 1 - first VMALLOC segment */
 124                     "slbmte    %0,%1\n"
 125                     /* Slot 2 - kernel stack */
 126                     "slbmte    %2,%3\n"
 127                     "isync"
 128                     :: "r"(mk_vsid_data(VMALLOC_START, mmu_kernel_ssize, vflags)),
 129                        "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, 1)),
 130                        "r"(ksp_vsid_data),
 131                        "r"(ksp_esid_data)
 132                     : "memory");
 133}
 134
 135void slb_flush_and_rebolt(void)
 136{
 137
 138        WARN_ON(!irqs_disabled());
 139
 140        /*
 141         * We can't take a PMU exception in the following code, so hard
 142         * disable interrupts.
 143         */
 144        hard_irq_disable();
 145
 146        __slb_flush_and_rebolt();
 147        get_paca()->slb_cache_ptr = 0;
 148}
 149
 150void slb_vmalloc_update(void)
 151{
 152        unsigned long vflags;
 153
 154        vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
 155        slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
 156        slb_flush_and_rebolt();
 157}
 158
 159/* Helper function to compare esids.  There are four cases to handle.
 160 * 1. The system is not 1T segment size capable.  Use the GET_ESID compare.
 161 * 2. The system is 1T capable, both addresses are < 1T, use the GET_ESID compare.
 162 * 3. The system is 1T capable, only one of the two addresses is > 1T.  This is not a match.
 163 * 4. The system is 1T capable, both addresses are > 1T, use the GET_ESID_1T macro to compare.
 164 */
 165static inline int esids_match(unsigned long addr1, unsigned long addr2)
 166{
 167        int esid_1t_count;
 168
 169        /* System is not 1T segment size capable. */
 170        if (!mmu_has_feature(MMU_FTR_1T_SEGMENT))
 171                return (GET_ESID(addr1) == GET_ESID(addr2));
 172
 173        esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) +
 174                                ((addr2 >> SID_SHIFT_1T) != 0));
 175
 176        /* both addresses are < 1T */
 177        if (esid_1t_count == 0)
 178                return (GET_ESID(addr1) == GET_ESID(addr2));
 179
 180        /* One address < 1T, the other > 1T.  Not a match */
 181        if (esid_1t_count == 1)
 182                return 0;
 183
 184        /* Both addresses are > 1T. */
 185        return (GET_ESID_1T(addr1) == GET_ESID_1T(addr2));
 186}
 187
 188/* Flush all user entries from the segment table of the current processor. */
 189void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
 190{
 191        unsigned long offset;
 192        unsigned long slbie_data = 0;
 193        unsigned long pc = KSTK_EIP(tsk);
 194        unsigned long stack = KSTK_ESP(tsk);
 195        unsigned long exec_base;
 196
 197        /*
 198         * We need interrupts hard-disabled here, not just soft-disabled,
 199         * so that a PMU interrupt can't occur, which might try to access
 200         * user memory (to get a stack trace) and possible cause an SLB miss
 201         * which would update the slb_cache/slb_cache_ptr fields in the PACA.
 202         */
 203        hard_irq_disable();
 204        offset = get_paca()->slb_cache_ptr;
 205        if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) &&
 206            offset <= SLB_CACHE_ENTRIES) {
 207                int i;
 208                asm volatile("isync" : : : "memory");
 209                for (i = 0; i < offset; i++) {
 210                        slbie_data = (unsigned long)get_paca()->slb_cache[i]
 211                                << SID_SHIFT; /* EA */
 212                        slbie_data |= user_segment_size(slbie_data)
 213                                << SLBIE_SSIZE_SHIFT;
 214                        slbie_data |= SLBIE_C; /* C set for user addresses */
 215                        asm volatile("slbie %0" : : "r" (slbie_data));
 216                }
 217                asm volatile("isync" : : : "memory");
 218        } else {
 219                __slb_flush_and_rebolt();
 220        }
 221
 222        /* Workaround POWER5 < DD2.1 issue */
 223        if (offset == 1 || offset > SLB_CACHE_ENTRIES)
 224                asm volatile("slbie %0" : : "r" (slbie_data));
 225
 226        get_paca()->slb_cache_ptr = 0;
 227        get_paca()->context = mm->context;
 228
 229        /*
 230         * preload some userspace segments into the SLB.
 231         * Almost all 32 and 64bit PowerPC executables are linked at
 232         * 0x10000000 so it makes sense to preload this segment.
 233         */
 234        exec_base = 0x10000000;
 235
 236        if (is_kernel_addr(pc) || is_kernel_addr(stack) ||
 237            is_kernel_addr(exec_base))
 238                return;
 239
 240        slb_allocate(pc);
 241
 242        if (!esids_match(pc, stack))
 243                slb_allocate(stack);
 244
 245        if (!esids_match(pc, exec_base) &&
 246            !esids_match(stack, exec_base))
 247                slb_allocate(exec_base);
 248}
 249
 250static inline void patch_slb_encoding(unsigned int *insn_addr,
 251                                      unsigned int immed)
 252{
 253        int insn = (*insn_addr & 0xffff0000) | immed;
 254        patch_instruction(insn_addr, insn);
 255}
 256
 257void slb_set_size(u16 size)
 258{
 259        extern unsigned int *slb_compare_rr_to_size;
 260
 261        if (mmu_slb_size == size)
 262                return;
 263
 264        mmu_slb_size = size;
 265        patch_slb_encoding(slb_compare_rr_to_size, mmu_slb_size);
 266}
 267
 268void slb_initialize(void)
 269{
 270        unsigned long linear_llp, vmalloc_llp, io_llp;
 271        unsigned long lflags, vflags;
 272        static int slb_encoding_inited;
 273        extern unsigned int *slb_miss_kernel_load_linear;
 274        extern unsigned int *slb_miss_kernel_load_io;
 275        extern unsigned int *slb_compare_rr_to_size;
 276#ifdef CONFIG_SPARSEMEM_VMEMMAP
 277        extern unsigned int *slb_miss_kernel_load_vmemmap;
 278        unsigned long vmemmap_llp;
 279#endif
 280
 281        /* Prepare our SLB miss handler based on our page size */
 282        linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
 283        io_llp = mmu_psize_defs[mmu_io_psize].sllp;
 284        vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
 285        get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
 286#ifdef CONFIG_SPARSEMEM_VMEMMAP
 287        vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp;
 288#endif
 289        if (!slb_encoding_inited) {
 290                slb_encoding_inited = 1;
 291                patch_slb_encoding(slb_miss_kernel_load_linear,
 292                                   SLB_VSID_KERNEL | linear_llp);
 293                patch_slb_encoding(slb_miss_kernel_load_io,
 294                                   SLB_VSID_KERNEL | io_llp);
 295                patch_slb_encoding(slb_compare_rr_to_size,
 296                                   mmu_slb_size);
 297
 298                pr_devel("SLB: linear  LLP = %04lx\n", linear_llp);
 299                pr_devel("SLB: io      LLP = %04lx\n", io_llp);
 300
 301#ifdef CONFIG_SPARSEMEM_VMEMMAP
 302                patch_slb_encoding(slb_miss_kernel_load_vmemmap,
 303                                   SLB_VSID_KERNEL | vmemmap_llp);
 304                pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
 305#endif
 306        }
 307
 308        get_paca()->stab_rr = SLB_NUM_BOLTED;
 309
 310        /* On iSeries the bolted entries have already been set up by
 311         * the hypervisor from the lparMap data in head.S */
 312        if (firmware_has_feature(FW_FEATURE_ISERIES))
 313                return;
 314
 315        lflags = SLB_VSID_KERNEL | linear_llp;
 316        vflags = SLB_VSID_KERNEL | vmalloc_llp;
 317
 318        /* Invalidate the entire SLB (even slot 0) & all the ERATS */
 319        asm volatile("isync":::"memory");
 320        asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
 321        asm volatile("isync; slbia; isync":::"memory");
 322        create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, 0);
 323
 324        create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
 325
 326        /* For the boot cpu, we're running on the stack in init_thread_union,
 327         * which is in the first segment of the linear mapping, and also
 328         * get_paca()->kstack hasn't been initialized yet.
 329         * For secondary cpus, we need to bolt the kernel stack entry now.
 330         */
 331        slb_shadow_clear(2);
 332        if (raw_smp_processor_id() != boot_cpuid &&
 333            (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET)
 334                create_shadowed_slbe(get_paca()->kstack,
 335                                     mmu_kernel_ssize, lflags, 2);
 336
 337        asm volatile("isync":::"memory");
 338}
 339