linux/arch/unicore32/include/asm/mmu_context.h
<<
>>
Prefs
   1/*
   2 * linux/arch/unicore32/include/asm/mmu_context.h
   3 *
   4 * Code specific to PKUnity SoC and UniCore ISA
   5 *
   6 * Copyright (C) 2001-2010 GUAN Xue-tao
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12#ifndef __UNICORE_MMU_CONTEXT_H__
  13#define __UNICORE_MMU_CONTEXT_H__
  14
  15#include <linux/compiler.h>
  16#include <linux/sched.h>
  17#include <linux/mm.h>
  18#include <linux/vmacache.h>
  19#include <linux/io.h>
  20
  21#include <asm/cacheflush.h>
  22#include <asm/cpu-single.h>
  23
  24#define init_new_context(tsk, mm)       0
  25
  26#define destroy_context(mm)             do { } while (0)
  27
  28/*
  29 * This is called when "tsk" is about to enter lazy TLB mode.
  30 *
  31 * mm:  describes the currently active mm context
  32 * tsk: task which is entering lazy tlb
  33 * cpu: cpu number which is entering lazy tlb
  34 *
  35 * tsk->mm will be NULL
  36 */
  37static inline void
  38enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  39{
  40}
  41
  42/*
  43 * This is the actual mm switch as far as the scheduler
  44 * is concerned.  No registers are touched.  We avoid
  45 * calling the CPU specific function when the mm hasn't
  46 * actually changed.
  47 */
  48static inline void
  49switch_mm(struct mm_struct *prev, struct mm_struct *next,
  50          struct task_struct *tsk)
  51{
  52        unsigned int cpu = smp_processor_id();
  53
  54        if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
  55                cpu_switch_mm(next->pgd, next);
  56}
  57
  58#define deactivate_mm(tsk, mm)  do { } while (0)
  59#define activate_mm(prev, next) switch_mm(prev, next, NULL)
  60
  61/*
  62 * We are inserting a "fake" vma for the user-accessible vector page so
  63 * gdb and friends can get to it through ptrace and /proc/<pid>/mem.
  64 * But we also want to remove it before the generic code gets to see it
  65 * during process exit or the unmapping of it would  cause total havoc.
  66 * (the macro is used as remove_vma() is static to mm/mmap.c)
  67 */
  68#define arch_exit_mmap(mm) \
  69do { \
  70        struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \
  71        if (high_vma) { \
  72                BUG_ON(high_vma->vm_next);  /* it should be last */ \
  73                if (high_vma->vm_prev) \
  74                        high_vma->vm_prev->vm_next = NULL; \
  75                else \
  76                        mm->mmap = NULL; \
  77                rb_erase(&high_vma->vm_rb, &mm->mm_rb); \
  78                vmacache_invalidate(mm); \
  79                mm->map_count--; \
  80                remove_vma(high_vma); \
  81        } \
  82} while (0)
  83
  84static inline void arch_dup_mmap(struct mm_struct *oldmm,
  85                                 struct mm_struct *mm)
  86{
  87}
  88
  89static inline void arch_unmap(struct mm_struct *mm,
  90                        struct vm_area_struct *vma,
  91                        unsigned long start, unsigned long end)
  92{
  93}
  94
  95static inline void arch_bprm_mm_init(struct mm_struct *mm,
  96                                     struct vm_area_struct *vma)
  97{
  98}
  99
 100static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
 101                bool write, bool foreign)
 102{
 103        /* by default, allow everything */
 104        return true;
 105}
 106
 107static inline bool arch_pte_access_permitted(pte_t pte, bool write)
 108{
 109        /* by default, allow everything */
 110        return true;
 111}
 112#endif
 113