linux/arch/metag/include/asm/mmu_context.h
<<
>>
Prefs
   1#ifndef __METAG_MMU_CONTEXT_H
   2#define __METAG_MMU_CONTEXT_H
   3
   4#include <asm-generic/mm_hooks.h>
   5
   6#include <asm/page.h>
   7#include <asm/mmu.h>
   8#include <asm/tlbflush.h>
   9#include <asm/cacheflush.h>
  10
  11#include <linux/io.h>
  12
  13static inline void enter_lazy_tlb(struct mm_struct *mm,
  14                                  struct task_struct *tsk)
  15{
  16}
  17
  18static inline int init_new_context(struct task_struct *tsk,
  19                                   struct mm_struct *mm)
  20{
  21#ifndef CONFIG_METAG_META21_MMU
  22        /* We use context to store a pointer to the page holding the
  23         * pgd of a process while it is running. While a process is not
  24         * running the pgd and context fields should be equal.
  25         */
  26        mm->context.pgd_base = (unsigned long) mm->pgd;
  27#endif
  28#ifdef CONFIG_METAG_USER_TCM
  29        INIT_LIST_HEAD(&mm->context.tcm);
  30#endif
  31        return 0;
  32}
  33
  34#ifdef CONFIG_METAG_USER_TCM
  35
  36#include <linux/slab.h>
  37#include <asm/tcm.h>
  38
  39static inline void destroy_context(struct mm_struct *mm)
  40{
  41        struct tcm_allocation *pos, *n;
  42
  43        list_for_each_entry_safe(pos, n,  &mm->context.tcm, list) {
  44                tcm_free(pos->tag, pos->addr, pos->size);
  45                list_del(&pos->list);
  46                kfree(pos);
  47        }
  48}
  49#else
  50#define destroy_context(mm)             do { } while (0)
  51#endif
  52
  53#ifdef CONFIG_METAG_META21_MMU
  54static inline void load_pgd(pgd_t *pgd, int thread)
  55{
  56        unsigned long phys0 = mmu_phys0_addr(thread);
  57        unsigned long phys1 = mmu_phys1_addr(thread);
  58
  59        /*
  60         *  0x900 2Gb address space
  61         *  The permission bits apply to MMU table region which gives a 2MB
  62         *  window into physical memory. We especially don't want userland to be
  63         *  able to access this.
  64         */
  65        metag_out32(0x900 | _PAGE_CACHEABLE | _PAGE_PRIV | _PAGE_WRITE |
  66                    _PAGE_PRESENT, phys0);
  67        /* Set new MMU base address */
  68        metag_out32(__pa(pgd) & MMCU_TBLPHYS1_ADDR_BITS, phys1);
  69}
  70#endif
  71
  72static inline void switch_mmu(struct mm_struct *prev, struct mm_struct *next)
  73{
  74#ifdef CONFIG_METAG_META21_MMU
  75        load_pgd(next->pgd, hard_processor_id());
  76#else
  77        unsigned int i;
  78
  79        /* prev->context == prev->pgd in the case where we are initially
  80           switching from the init task to the first process. */
  81        if (prev->context.pgd_base != (unsigned long) prev->pgd) {
  82                for (i = FIRST_USER_PGD_NR; i < USER_PTRS_PER_PGD; i++)
  83                        ((pgd_t *) prev->context.pgd_base)[i] = prev->pgd[i];
  84        } else
  85                prev->pgd = (pgd_t *)mmu_get_base();
  86
  87        next->pgd = prev->pgd;
  88        prev->pgd = (pgd_t *) prev->context.pgd_base;
  89
  90        for (i = FIRST_USER_PGD_NR; i < USER_PTRS_PER_PGD; i++)
  91                next->pgd[i] = ((pgd_t *) next->context.pgd_base)[i];
  92
  93        flush_cache_all();
  94#endif
  95        flush_tlb_all();
  96}
  97
  98static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  99                             struct task_struct *tsk)
 100{
 101        if (prev != next)
 102                switch_mmu(prev, next);
 103}
 104
 105static inline void activate_mm(struct mm_struct *prev_mm,
 106                               struct mm_struct *next_mm)
 107{
 108        switch_mmu(prev_mm, next_mm);
 109}
 110
 111#define deactivate_mm(tsk, mm)   do { } while (0)
 112
 113#endif
 114