linux/arch/sh/include/asm/mmu_context.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 1999 Niibe Yutaka
   3 * Copyright (C) 2003 - 2007 Paul Mundt
   4 *
   5 * ASID handling idea taken from MIPS implementation.
   6 */
   7#ifndef __ASM_SH_MMU_CONTEXT_H
   8#define __ASM_SH_MMU_CONTEXT_H
   9
  10#ifdef __KERNEL__
  11#include <cpu/mmu_context.h>
  12#include <asm/tlbflush.h>
  13#include <asm/uaccess.h>
  14#include <asm/io.h>
  15#include <asm-generic/mm_hooks.h>
  16
  17/*
  18 * The MMU "context" consists of two things:
  19 *    (a) TLB cache version (or round, cycle whatever expression you like)
  20 *    (b) ASID (Address Space IDentifier)
  21 */
  22#ifdef CONFIG_CPU_HAS_PTEAEX
  23#define MMU_CONTEXT_ASID_MASK           0x0000ffff
  24#else
  25#define MMU_CONTEXT_ASID_MASK           0x000000ff
  26#endif
  27
  28#define MMU_CONTEXT_VERSION_MASK        (~0UL & ~MMU_CONTEXT_ASID_MASK)
  29#define MMU_CONTEXT_FIRST_VERSION       (MMU_CONTEXT_ASID_MASK + 1)
  30
  31/* Impossible ASID value, to differentiate from NO_CONTEXT. */
  32#define MMU_NO_ASID                     MMU_CONTEXT_FIRST_VERSION
  33#define NO_CONTEXT                      0UL
  34
  35#define asid_cache(cpu)         (cpu_data[cpu].asid_cache)
  36
  37#ifdef CONFIG_MMU
  38#define cpu_context(cpu, mm)    ((mm)->context.id[cpu])
  39
  40#define cpu_asid(cpu, mm)       \
  41        (cpu_context((cpu), (mm)) & MMU_CONTEXT_ASID_MASK)
  42
  43/*
  44 * Virtual Page Number mask
  45 */
  46#define MMU_VPN_MASK    0xfffff000
  47
  48#if defined(CONFIG_SUPERH32)
  49#include "mmu_context_32.h"
  50#else
  51#include "mmu_context_64.h"
  52#endif
  53
  54/*
  55 * Get MMU context if needed.
  56 */
  57static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
  58{
  59        unsigned long asid = asid_cache(cpu);
  60
  61        /* Check if we have old version of context. */
  62        if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0)
  63                /* It's up to date, do nothing */
  64                return;
  65
  66        /* It's old, we need to get new context with new version. */
  67        if (!(++asid & MMU_CONTEXT_ASID_MASK)) {
  68                /*
  69                 * We exhaust ASID of this version.
  70                 * Flush all TLB and start new cycle.
  71                 */
  72                local_flush_tlb_all();
  73
  74#ifdef CONFIG_SUPERH64
  75                /*
  76                 * The SH-5 cache uses the ASIDs, requiring both the I and D
  77                 * cache to be flushed when the ASID is exhausted. Weak.
  78                 */
  79                flush_cache_all();
  80#endif
  81
  82                /*
  83                 * Fix version; Note that we avoid version #0
  84                 * to distingush NO_CONTEXT.
  85                 */
  86                if (!asid)
  87                        asid = MMU_CONTEXT_FIRST_VERSION;
  88        }
  89
  90        cpu_context(cpu, mm) = asid_cache(cpu) = asid;
  91}
  92
  93/*
  94 * Initialize the context related info for a new mm_struct
  95 * instance.
  96 */
  97static inline int init_new_context(struct task_struct *tsk,
  98                                   struct mm_struct *mm)
  99{
 100        int i;
 101
 102        for (i = 0; i < num_online_cpus(); i++)
 103                cpu_context(i, mm) = NO_CONTEXT;
 104
 105        return 0;
 106}
 107
 108/*
 109 * After we have set current->mm to a new value, this activates
 110 * the context for the new mm so we see the new mappings.
 111 */
 112static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
 113{
 114        get_mmu_context(mm, cpu);
 115        set_asid(cpu_asid(cpu, mm));
 116}
 117
 118static inline void switch_mm(struct mm_struct *prev,
 119                             struct mm_struct *next,
 120                             struct task_struct *tsk)
 121{
 122        unsigned int cpu = smp_processor_id();
 123
 124        if (likely(prev != next)) {
 125                cpumask_set_cpu(cpu, mm_cpumask(next));
 126                set_TTB(next->pgd);
 127                activate_context(next, cpu);
 128        } else
 129                if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)))
 130                        activate_context(next, cpu);
 131}
 132
 133#define activate_mm(prev, next)         switch_mm((prev),(next),NULL)
 134#define deactivate_mm(tsk,mm)           do { } while (0)
 135#define enter_lazy_tlb(mm,tsk)          do { } while (0)
 136
 137#else
 138
 139#define set_asid(asid)                  do { } while (0)
 140#define get_asid()                      (0)
 141#define cpu_asid(cpu, mm)               ({ (void)cpu; NO_CONTEXT; })
 142#define switch_and_save_asid(asid)      (0)
 143#define set_TTB(pgd)                    do { } while (0)
 144#define get_TTB()                       (0)
 145
 146#include <asm-generic/mmu_context.h>
 147
 148#endif /* CONFIG_MMU */
 149
 150#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4)
 151/*
 152 * If this processor has an MMU, we need methods to turn it off/on ..
 153 * paging_init() will also have to be updated for the processor in
 154 * question.
 155 */
 156static inline void enable_mmu(void)
 157{
 158        unsigned int cpu = smp_processor_id();
 159
 160        /* Enable MMU */
 161        ctrl_outl(MMU_CONTROL_INIT, MMUCR);
 162        ctrl_barrier();
 163
 164        if (asid_cache(cpu) == NO_CONTEXT)
 165                asid_cache(cpu) = MMU_CONTEXT_FIRST_VERSION;
 166
 167        set_asid(asid_cache(cpu) & MMU_CONTEXT_ASID_MASK);
 168}
 169
 170static inline void disable_mmu(void)
 171{
 172        unsigned long cr;
 173
 174        cr = ctrl_inl(MMUCR);
 175        cr &= ~MMU_CONTROL_INIT;
 176        ctrl_outl(cr, MMUCR);
 177
 178        ctrl_barrier();
 179}
 180#else
 181/*
 182 * MMU control handlers for processors lacking memory
 183 * management hardware.
 184 */
 185#define enable_mmu()    do { } while (0)
 186#define disable_mmu()   do { } while (0)
 187#endif
 188
 189#endif /* __KERNEL__ */
 190#endif /* __ASM_SH_MMU_CONTEXT_H */
 191