1
2
3
4
5
6#ifndef __UM_MMU_CONTEXT_H
7#define __UM_MMU_CONTEXT_H
8
9#include <linux/sched.h>
10#include <asm/mmu.h>
11
12extern void uml_setup_stubs(struct mm_struct *mm);
13
14
15
16static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
17{
18 uml_setup_stubs(mm);
19}
20extern void arch_exit_mmap(struct mm_struct *mm);
21static inline void arch_unmap(struct mm_struct *mm,
22 struct vm_area_struct *vma,
23 unsigned long start, unsigned long end)
24{
25}
26static inline void arch_bprm_mm_init(struct mm_struct *mm,
27 struct vm_area_struct *vma)
28{
29}
30
31
32
33
34#define deactivate_mm(tsk,mm) do { } while (0)
35
36extern void force_flush_all(void);
37
38static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
39{
40
41
42
43
44 __switch_mm(&new->context.id);
45 down_write(&new->mmap_sem);
46 uml_setup_stubs(new);
47 up_write(&new->mmap_sem);
48}
49
50static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
51 struct task_struct *tsk)
52{
53 unsigned cpu = smp_processor_id();
54
55 if(prev != next){
56 cpumask_clear_cpu(cpu, mm_cpumask(prev));
57 cpumask_set_cpu(cpu, mm_cpumask(next));
58 if(next != &init_mm)
59 __switch_mm(&next->context.id);
60 }
61}
62
63static inline void enter_lazy_tlb(struct mm_struct *mm,
64 struct task_struct *tsk)
65{
66}
67
68extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
69
70extern void destroy_context(struct mm_struct *mm);
71
72#endif
73