linux/arch/xtensa/include/asm/mmu_context.h
<<
>>
Prefs
   1/*
   2 * Switch an MMU context.
   3 *
   4 * This file is subject to the terms and conditions of the GNU General Public
   5 * License.  See the file "COPYING" in the main directory of this archive
   6 * for more details.
   7 *
   8 * Copyright (C) 2001 - 2013 Tensilica Inc.
   9 */
  10
  11#ifndef _XTENSA_MMU_CONTEXT_H
  12#define _XTENSA_MMU_CONTEXT_H
  13
  14#ifndef CONFIG_MMU
  15#include <asm/nommu_context.h>
  16#else
  17
  18#include <linux/stringify.h>
  19#include <linux/sched.h>
  20
  21#include <asm/vectors.h>
  22
  23#include <asm/pgtable.h>
  24#include <asm/cacheflush.h>
  25#include <asm/tlbflush.h>
  26#include <asm-generic/mm_hooks.h>
  27#include <asm-generic/percpu.h>
  28
  29#if (XCHAL_HAVE_TLBS != 1)
  30# error "Linux must have an MMU!"
  31#endif
  32
  33DECLARE_PER_CPU(unsigned long, asid_cache);
  34#define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
  35
  36/*
  37 * NO_CONTEXT is the invalid ASID value that we don't ever assign to
  38 * any user or kernel context.  We use the reserved values in the
  39 * ASID_INSERT macro below.
  40 *
  41 * 0 invalid
  42 * 1 kernel
  43 * 2 reserved
  44 * 3 reserved
  45 * 4...255 available
  46 */
  47
  48#define NO_CONTEXT      0
  49#define ASID_USER_FIRST 4
  50#define ASID_MASK       ((1 << XCHAL_MMU_ASID_BITS) - 1)
  51#define ASID_INSERT(x)  (0x03020001 | (((x) & ASID_MASK) << 8))
  52
  53void init_mmu(void);
  54
  55static inline void set_rasid_register (unsigned long val)
  56{
  57        __asm__ __volatile__ (" wsr %0, rasid\n\t"
  58                              " isync\n" : : "a" (val));
  59}
  60
  61static inline unsigned long get_rasid_register (void)
  62{
  63        unsigned long tmp;
  64        __asm__ __volatile__ (" rsr %0, rasid\n\t" : "=a" (tmp));
  65        return tmp;
  66}
  67
  68static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu)
  69{
  70        unsigned long asid = cpu_asid_cache(cpu);
  71        if ((++asid & ASID_MASK) == 0) {
  72                /*
  73                 * Start new asid cycle; continue counting with next
  74                 * incarnation bits; skipping over 0, 1, 2, 3.
  75                 */
  76                local_flush_tlb_all();
  77                asid += ASID_USER_FIRST;
  78        }
  79        cpu_asid_cache(cpu) = asid;
  80        mm->context.asid[cpu] = asid;
  81        mm->context.cpu = cpu;
  82}
  83
  84static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
  85{
  86        /*
  87         * Check if our ASID is of an older version and thus invalid.
  88         */
  89
  90        if (mm) {
  91                unsigned long asid = mm->context.asid[cpu];
  92
  93                if (asid == NO_CONTEXT ||
  94                                ((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK))
  95                        get_new_mmu_context(mm, cpu);
  96        }
  97}
  98
  99static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
 100{
 101        get_mmu_context(mm, cpu);
 102        set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
 103        invalidate_page_directory();
 104}
 105
 106/*
 107 * Initialize the context related info for a new mm_struct
 108 * instance.  Valid cpu values are 0..(NR_CPUS-1), so initializing
 109 * to -1 says the process has never run on any core.
 110 */
 111
 112static inline int init_new_context(struct task_struct *tsk,
 113                struct mm_struct *mm)
 114{
 115        int cpu;
 116        for_each_possible_cpu(cpu) {
 117                mm->context.asid[cpu] = NO_CONTEXT;
 118        }
 119        mm->context.cpu = -1;
 120        return 0;
 121}
 122
 123static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 124                             struct task_struct *tsk)
 125{
 126        unsigned int cpu = smp_processor_id();
 127        int migrated = next->context.cpu != cpu;
 128        /* Flush the icache if we migrated to a new core. */
 129        if (migrated) {
 130                __invalidate_icache_all();
 131                next->context.cpu = cpu;
 132        }
 133        if (migrated || prev != next)
 134                activate_context(next, cpu);
 135}
 136
 137#define activate_mm(prev, next) switch_mm((prev), (next), NULL)
 138#define deactivate_mm(tsk, mm)  do { } while (0)
 139
 140/*
 141 * Destroy context related info for an mm_struct that is about
 142 * to be put to rest.
 143 */
 144static inline void destroy_context(struct mm_struct *mm)
 145{
 146        invalidate_page_directory();
 147}
 148
 149
 150static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 151{
 152        /* Nothing to do. */
 153
 154}
 155
 156#endif /* CONFIG_MMU */
 157#endif /* _XTENSA_MMU_CONTEXT_H */
 158