linux/arch/xtensa/include/asm/mmu_context.h
<<
>>
Prefs
   1/*
   2 * include/asm-xtensa/mmu_context.h
   3 *
   4 * Switch an MMU context.
   5 *
   6 * This file is subject to the terms and conditions of the GNU General Public
   7 * License.  See the file "COPYING" in the main directory of this archive
   8 * for more details.
   9 *
  10 * Copyright (C) 2001 - 2005 Tensilica Inc.
  11 */
  12
  13#ifndef _XTENSA_MMU_CONTEXT_H
  14#define _XTENSA_MMU_CONTEXT_H
  15
  16#ifndef CONFIG_MMU
  17#include <asm/nommu_context.h>
  18#else
  19
  20#include <linux/stringify.h>
  21#include <linux/sched.h>
  22
  23#include <variant/core.h>
  24
  25#include <asm/pgtable.h>
  26#include <asm/cacheflush.h>
  27#include <asm/tlbflush.h>
  28#include <asm-generic/mm_hooks.h>
  29
  30#if (XCHAL_HAVE_TLBS != 1)
  31# error "Linux must have an MMU!"
  32#endif
  33
  34extern unsigned long asid_cache;
  35
  36/*
  37 * NO_CONTEXT is the invalid ASID value that we don't ever assign to
  38 * any user or kernel context.
  39 *
  40 * 0 invalid
  41 * 1 kernel
  42 * 2 reserved
  43 * 3 reserved
  44 * 4...255 available
  45 */
  46
  47#define NO_CONTEXT      0
  48#define ASID_USER_FIRST 4
  49#define ASID_MASK       ((1 << XCHAL_MMU_ASID_BITS) - 1)
  50#define ASID_INSERT(x)  (0x03020001 | (((x) & ASID_MASK) << 8))
  51
  52static inline void set_rasid_register (unsigned long val)
  53{
  54        __asm__ __volatile__ (" wsr %0, "__stringify(RASID)"\n\t"
  55                              " isync\n" : : "a" (val));
  56}
  57
  58static inline unsigned long get_rasid_register (void)
  59{
  60        unsigned long tmp;
  61        __asm__ __volatile__ (" rsr %0,"__stringify(RASID)"\n\t" : "=a" (tmp));
  62        return tmp;
  63}
  64
  65static inline void
  66__get_new_mmu_context(struct mm_struct *mm)
  67{
  68        extern void flush_tlb_all(void);
  69        if (! (++asid_cache & ASID_MASK) ) {
  70                flush_tlb_all(); /* start new asid cycle */
  71                asid_cache += ASID_USER_FIRST;
  72        }
  73        mm->context = asid_cache;
  74}
  75
  76static inline void
  77__load_mmu_context(struct mm_struct *mm)
  78{
  79        set_rasid_register(ASID_INSERT(mm->context));
  80        invalidate_page_directory();
  81}
  82
  83/*
  84 * Initialize the context related info for a new mm_struct
  85 * instance.
  86 */
  87
  88static inline int
  89init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  90{
  91        mm->context = NO_CONTEXT;
  92        return 0;
  93}
  94
  95/*
  96 * After we have set current->mm to a new value, this activates
  97 * the context for the new mm so we see the new mappings.
  98 */
  99static inline void
 100activate_mm(struct mm_struct *prev, struct mm_struct *next)
 101{
 102        /* Unconditionally get a new ASID.  */
 103
 104        __get_new_mmu_context(next);
 105        __load_mmu_context(next);
 106}
 107
 108
 109static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 110                             struct task_struct *tsk)
 111{
 112        unsigned long asid = asid_cache;
 113
 114        /* Check if our ASID is of an older version and thus invalid */
 115
 116        if (next->context == NO_CONTEXT || ((next->context^asid) & ~ASID_MASK))
 117                __get_new_mmu_context(next);
 118
 119        __load_mmu_context(next);
 120}
 121
 122#define deactivate_mm(tsk, mm)  do { } while(0)
 123
 124/*
 125 * Destroy context related info for an mm_struct that is about
 126 * to be put to rest.
 127 */
 128static inline void destroy_context(struct mm_struct *mm)
 129{
 130        invalidate_page_directory();
 131}
 132
 133
 134static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 135{
 136        /* Nothing to do. */
 137
 138}
 139
 140#endif /* CONFIG_MMU */
 141#endif /* _XTENSA_MMU_CONTEXT_H */
 142