linux/arch/mips/include/asm/mmu_context.h
<<
>>
Prefs
   1/*
   2 * Switch a MMU context.
   3 *
   4 * This file is subject to the terms and conditions of the GNU General Public
   5 * License.  See the file "COPYING" in the main directory of this archive
   6 * for more details.
   7 *
   8 * Copyright (C) 1996, 1997, 1998, 1999 by Ralf Baechle
   9 * Copyright (C) 1999 Silicon Graphics, Inc.
  10 */
  11#ifndef _ASM_MMU_CONTEXT_H
  12#define _ASM_MMU_CONTEXT_H
  13
  14#include <linux/errno.h>
  15#include <linux/sched.h>
  16#include <linux/smp.h>
  17#include <linux/slab.h>
  18#include <asm/cacheflush.h>
  19#include <asm/hazards.h>
  20#include <asm/tlbflush.h>
  21#ifdef CONFIG_MIPS_MT_SMTC
  22#include <asm/mipsmtregs.h>
  23#include <asm/smtc.h>
  24#endif /* SMTC */
  25#include <asm-generic/mm_hooks.h>
  26
  27#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
  28
  29#define TLBMISS_HANDLER_SETUP_PGD(pgd)                          \
  30        tlbmiss_handler_setup_pgd((unsigned long)(pgd))
  31
  32extern void tlbmiss_handler_setup_pgd(unsigned long pgd);
  33
  34#define TLBMISS_HANDLER_SETUP()                                         \
  35        do {                                                            \
  36                TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir);              \
  37                write_c0_xcontext((unsigned long) smp_processor_id() << 51); \
  38        } while (0)
  39
  40
  41static inline unsigned long get_current_pgd(void)
  42{
  43        return PHYS_TO_XKSEG_CACHED((read_c0_context() >> 11) & ~0xfffUL);
  44}
  45
  46#else /* CONFIG_MIPS_PGD_C0_CONTEXT: using  pgd_current*/
  47
  48/*
  49 * For the fast tlb miss handlers, we keep a per cpu array of pointers
  50 * to the current pgd for each processor. Also, the proc. id is stuffed
  51 * into the context register.
  52 */
  53extern unsigned long pgd_current[];
  54
  55#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
  56        pgd_current[smp_processor_id()] = (unsigned long)(pgd)
  57
  58#ifdef CONFIG_32BIT
  59#define TLBMISS_HANDLER_SETUP()                                         \
  60        write_c0_context((unsigned long) smp_processor_id() << 25);     \
  61        back_to_back_c0_hazard();                                       \
  62        TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
  63#endif
  64#ifdef CONFIG_64BIT
  65#define TLBMISS_HANDLER_SETUP()                                         \
  66        write_c0_context((unsigned long) smp_processor_id() << 26);     \
  67        back_to_back_c0_hazard();                                       \
  68        TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
  69#endif
  70#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
  71#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  72
  73#define ASID_INC        0x40
  74#define ASID_MASK       0xfc0
  75
  76#elif defined(CONFIG_CPU_R8000)
  77
  78#define ASID_INC        0x10
  79#define ASID_MASK       0xff0
  80
  81#elif defined(CONFIG_CPU_RM9000)
  82
  83#define ASID_INC        0x1
  84#define ASID_MASK       0xfff
  85
  86/* SMTC/34K debug hack - but maybe we'll keep it */
  87#elif defined(CONFIG_MIPS_MT_SMTC)
  88
  89#define ASID_INC        0x1
  90extern unsigned long smtc_asid_mask;
  91#define ASID_MASK       (smtc_asid_mask)
  92#define HW_ASID_MASK    0xff
  93/* End SMTC/34K debug hack */
  94#else /* FIXME: not correct for R6000 */
  95
  96#define ASID_INC        0x1
  97#define ASID_MASK       0xff
  98
  99#endif
 100
 101#define cpu_context(cpu, mm)    ((mm)->context.asid[cpu])
 102#define cpu_asid(cpu, mm)       (cpu_context((cpu), (mm)) & ASID_MASK)
 103#define asid_cache(cpu)         (cpu_data[cpu].asid_cache)
 104
 105static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 106{
 107}
 108
 109/*
 110 *  All unused by hardware upper bits will be considered
 111 *  as a software asid extension.
 112 */
 113#define ASID_VERSION_MASK  ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
 114#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
 115
 116#ifndef CONFIG_MIPS_MT_SMTC
 117/* Normal, classic MIPS get_new_mmu_context */
 118static inline void
 119get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
 120{
 121        unsigned long asid = asid_cache(cpu);
 122
 123        if (! ((asid += ASID_INC) & ASID_MASK) ) {
 124                if (cpu_has_vtag_icache)
 125                        flush_icache_all();
 126                local_flush_tlb_all();  /* start new asid cycle */
 127                if (!asid)              /* fix version if needed */
 128                        asid = ASID_FIRST_VERSION;
 129        }
 130        cpu_context(cpu, mm) = asid_cache(cpu) = asid;
 131}
 132
 133#else /* CONFIG_MIPS_MT_SMTC */
 134
 135#define get_new_mmu_context(mm, cpu) smtc_get_new_mmu_context((mm), (cpu))
 136
 137#endif /* CONFIG_MIPS_MT_SMTC */
 138
 139/*
 140 * Initialize the context related info for a new mm_struct
 141 * instance.
 142 */
 143static inline int
 144init_new_context(struct task_struct *tsk, struct mm_struct *mm)
 145{
 146        int i;
 147
 148        for_each_online_cpu(i)
 149                cpu_context(i, mm) = 0;
 150
 151        return 0;
 152}
 153
 154static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 155                             struct task_struct *tsk)
 156{
 157        unsigned int cpu = smp_processor_id();
 158        unsigned long flags;
 159#ifdef CONFIG_MIPS_MT_SMTC
 160        unsigned long oldasid;
 161        unsigned long mtflags;
 162        int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
 163        local_irq_save(flags);
 164        mtflags = dvpe();
 165#else /* Not SMTC */
 166        local_irq_save(flags);
 167#endif /* CONFIG_MIPS_MT_SMTC */
 168
 169        /* Check if our ASID is of an older version and thus invalid */
 170        if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
 171                get_new_mmu_context(next, cpu);
 172#ifdef CONFIG_MIPS_MT_SMTC
 173        /*
 174         * If the EntryHi ASID being replaced happens to be
 175         * the value flagged at ASID recycling time as having
 176         * an extended life, clear the bit showing it being
 177         * in use by this "CPU", and if that's the last bit,
 178         * free up the ASID value for use and flush any old
 179         * instances of it from the TLB.
 180         */
 181        oldasid = (read_c0_entryhi() & ASID_MASK);
 182        if(smtc_live_asid[mytlb][oldasid]) {
 183                smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
 184                if(smtc_live_asid[mytlb][oldasid] == 0)
 185                        smtc_flush_tlb_asid(oldasid);
 186        }
 187        /*
 188         * Tread softly on EntryHi, and so long as we support
 189         * having ASID_MASK smaller than the hardware maximum,
 190         * make sure no "soft" bits become "hard"...
 191         */
 192        write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
 193                         cpu_asid(cpu, next));
 194        ehb(); /* Make sure it propagates to TCStatus */
 195        evpe(mtflags);
 196#else
 197        write_c0_entryhi(cpu_asid(cpu, next));
 198#endif /* CONFIG_MIPS_MT_SMTC */
 199        TLBMISS_HANDLER_SETUP_PGD(next->pgd);
 200
 201        /*
 202         * Mark current->active_mm as not "active" anymore.
 203         * We don't want to mislead possible IPI tlb flush routines.
 204         */
 205        cpumask_clear_cpu(cpu, mm_cpumask(prev));
 206        cpumask_set_cpu(cpu, mm_cpumask(next));
 207
 208        local_irq_restore(flags);
 209}
 210
 211/*
 212 * Destroy context related info for an mm_struct that is about
 213 * to be put to rest.
 214 */
 215static inline void destroy_context(struct mm_struct *mm)
 216{
 217}
 218
 219#define deactivate_mm(tsk, mm)  do { } while (0)
 220
 221/*
 222 * After we have set current->mm to a new value, this activates
 223 * the context for the new mm so we see the new mappings.
 224 */
 225static inline void
 226activate_mm(struct mm_struct *prev, struct mm_struct *next)
 227{
 228        unsigned long flags;
 229        unsigned int cpu = smp_processor_id();
 230
 231#ifdef CONFIG_MIPS_MT_SMTC
 232        unsigned long oldasid;
 233        unsigned long mtflags;
 234        int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
 235#endif /* CONFIG_MIPS_MT_SMTC */
 236
 237        local_irq_save(flags);
 238
 239        /* Unconditionally get a new ASID.  */
 240        get_new_mmu_context(next, cpu);
 241
 242#ifdef CONFIG_MIPS_MT_SMTC
 243        /* See comments for similar code above */
 244        mtflags = dvpe();
 245        oldasid = read_c0_entryhi() & ASID_MASK;
 246        if(smtc_live_asid[mytlb][oldasid]) {
 247                smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
 248                if(smtc_live_asid[mytlb][oldasid] == 0)
 249                         smtc_flush_tlb_asid(oldasid);
 250        }
 251        /* See comments for similar code above */
 252        write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
 253                         cpu_asid(cpu, next));
 254        ehb(); /* Make sure it propagates to TCStatus */
 255        evpe(mtflags);
 256#else
 257        write_c0_entryhi(cpu_asid(cpu, next));
 258#endif /* CONFIG_MIPS_MT_SMTC */
 259        TLBMISS_HANDLER_SETUP_PGD(next->pgd);
 260
 261        /* mark mmu ownership change */
 262        cpumask_clear_cpu(cpu, mm_cpumask(prev));
 263        cpumask_set_cpu(cpu, mm_cpumask(next));
 264
 265        local_irq_restore(flags);
 266}
 267
 268/*
 269 * If mm is currently active_mm, we can't really drop it.  Instead,
 270 * we will get a new one for it.
 271 */
 272static inline void
 273drop_mmu_context(struct mm_struct *mm, unsigned cpu)
 274{
 275        unsigned long flags;
 276#ifdef CONFIG_MIPS_MT_SMTC
 277        unsigned long oldasid;
 278        /* Can't use spinlock because called from TLB flush within DVPE */
 279        unsigned int prevvpe;
 280        int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
 281#endif /* CONFIG_MIPS_MT_SMTC */
 282
 283        local_irq_save(flags);
 284
 285        if (cpumask_test_cpu(cpu, mm_cpumask(mm)))  {
 286                get_new_mmu_context(mm, cpu);
 287#ifdef CONFIG_MIPS_MT_SMTC
 288                /* See comments for similar code above */
 289                prevvpe = dvpe();
 290                oldasid = (read_c0_entryhi() & ASID_MASK);
 291                if (smtc_live_asid[mytlb][oldasid]) {
 292                        smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
 293                        if(smtc_live_asid[mytlb][oldasid] == 0)
 294                                smtc_flush_tlb_asid(oldasid);
 295                }
 296                /* See comments for similar code above */
 297                write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
 298                                | cpu_asid(cpu, mm));
 299                ehb(); /* Make sure it propagates to TCStatus */
 300                evpe(prevvpe);
 301#else /* not CONFIG_MIPS_MT_SMTC */
 302                write_c0_entryhi(cpu_asid(cpu, mm));
 303#endif /* CONFIG_MIPS_MT_SMTC */
 304        } else {
 305                /* will get a new context next time */
 306#ifndef CONFIG_MIPS_MT_SMTC
 307                cpu_context(cpu, mm) = 0;
 308#else /* SMTC */
 309                int i;
 310
 311                /* SMTC shares the TLB (and ASIDs) across VPEs */
 312                for_each_online_cpu(i) {
 313                    if((smtc_status & SMTC_TLB_SHARED)
 314                    || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
 315                        cpu_context(i, mm) = 0;
 316                }
 317#endif /* CONFIG_MIPS_MT_SMTC */
 318        }
 319        local_irq_restore(flags);
 320}
 321
 322#endif /* _ASM_MMU_CONTEXT_H */
 323