1
2
3
4
5
6#ifndef __UM_MMU_CONTEXT_H
7#define __UM_MMU_CONTEXT_H
8
9#include <linux/sched.h>
10#include <linux/mm_types.h>
11
12#include <asm/mmu.h>
13
14extern void uml_setup_stubs(struct mm_struct *mm);
15
16
17
18static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
19{
20 uml_setup_stubs(mm);
21}
22extern void arch_exit_mmap(struct mm_struct *mm);
23static inline void arch_unmap(struct mm_struct *mm,
24 struct vm_area_struct *vma,
25 unsigned long start, unsigned long end)
26{
27}
28static inline void arch_bprm_mm_init(struct mm_struct *mm,
29 struct vm_area_struct *vma)
30{
31}
32
33static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
34 bool write, bool execute, bool foreign)
35{
36
37 return true;
38}
39
40
41
42
43
44#define deactivate_mm(tsk,mm) do { } while (0)
45
46extern void force_flush_all(void);
47
48static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
49{
50
51
52
53
54 __switch_mm(&new->context.id);
55 down_write(&new->mmap_sem);
56 uml_setup_stubs(new);
57 up_write(&new->mmap_sem);
58}
59
60static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
61 struct task_struct *tsk)
62{
63 unsigned cpu = smp_processor_id();
64
65 if(prev != next){
66 cpumask_clear_cpu(cpu, mm_cpumask(prev));
67 cpumask_set_cpu(cpu, mm_cpumask(next));
68 if(next != &init_mm)
69 __switch_mm(&next->context.id);
70 }
71}
72
73static inline void enter_lazy_tlb(struct mm_struct *mm,
74 struct task_struct *tsk)
75{
76}
77
78extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
79
80extern void destroy_context(struct mm_struct *mm);
81
82#endif
83