1
2
3
4
5
6#ifndef __UM_MMU_CONTEXT_H
7#define __UM_MMU_CONTEXT_H
8
9#include <linux/sched.h>
10#include <asm/mmu.h>
11
12extern void uml_setup_stubs(struct mm_struct *mm);
13
14
15
16static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
17{
18 uml_setup_stubs(mm);
19}
20extern void arch_exit_mmap(struct mm_struct *mm);
21static inline void arch_unmap(struct mm_struct *mm,
22 struct vm_area_struct *vma,
23 unsigned long start, unsigned long end)
24{
25}
26static inline void arch_bprm_mm_init(struct mm_struct *mm,
27 struct vm_area_struct *vma)
28{
29}
30
31static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
32 bool write, bool execute, bool foreign)
33{
34
35 return true;
36}
37
38static inline bool arch_pte_access_permitted(pte_t pte, bool write)
39{
40
41 return true;
42}
43
44
45
46
47
48#define deactivate_mm(tsk,mm) do { } while (0)
49
50extern void force_flush_all(void);
51
52static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
53{
54
55
56
57
58 __switch_mm(&new->context.id);
59 down_write(&new->mmap_sem);
60 uml_setup_stubs(new);
61 up_write(&new->mmap_sem);
62}
63
64static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
65 struct task_struct *tsk)
66{
67 unsigned cpu = smp_processor_id();
68
69 if(prev != next){
70 cpumask_clear_cpu(cpu, mm_cpumask(prev));
71 cpumask_set_cpu(cpu, mm_cpumask(next));
72 if(next != &init_mm)
73 __switch_mm(&next->context.id);
74 }
75}
76
77static inline void enter_lazy_tlb(struct mm_struct *mm,
78 struct task_struct *tsk)
79{
80}
81
82extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
83
84extern void destroy_context(struct mm_struct *mm);
85
86#endif
87