1
2
3
4
5
6
7
8
9
10
11
12#ifndef __UNICORE_MMU_CONTEXT_H__
13#define __UNICORE_MMU_CONTEXT_H__
14
15#include <linux/compiler.h>
16#include <linux/sched.h>
17#include <linux/mm.h>
18#include <linux/vmacache.h>
19#include <linux/io.h>
20
21#include <asm/cacheflush.h>
22#include <asm/cpu-single.h>
23
24#define init_new_context(tsk, mm) 0
25
26#define destroy_context(mm) do { } while (0)
27
28
29
30
31
32
33
34
35
36
37static inline void
38enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
39{
40}
41
42
43
44
45
46
47
48static inline void
49switch_mm(struct mm_struct *prev, struct mm_struct *next,
50 struct task_struct *tsk)
51{
52 unsigned int cpu = smp_processor_id();
53
54 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
55 cpu_switch_mm(next->pgd, next);
56}
57
58#define deactivate_mm(tsk, mm) do { } while (0)
59#define activate_mm(prev, next) switch_mm(prev, next, NULL)
60
61
62
63
64
65
66
67
68#define arch_exit_mmap(mm) \
69do { \
70 struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \
71 if (high_vma) { \
72 BUG_ON(high_vma->vm_next); \
73 if (high_vma->vm_prev) \
74 high_vma->vm_prev->vm_next = NULL; \
75 else \
76 mm->mmap = NULL; \
77 rb_erase(&high_vma->vm_rb, &mm->mm_rb); \
78 vmacache_invalidate(mm); \
79 mm->map_count--; \
80 remove_vma(high_vma); \
81 } \
82} while (0)
83
84static inline void arch_dup_mmap(struct mm_struct *oldmm,
85 struct mm_struct *mm)
86{
87}
88
89static inline void arch_unmap(struct mm_struct *mm,
90 struct vm_area_struct *vma,
91 unsigned long start, unsigned long end)
92{
93}
94
95static inline void arch_bprm_mm_init(struct mm_struct *mm,
96 struct vm_area_struct *vma)
97{
98}
99
100static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
101 bool write, bool execute, bool foreign)
102{
103
104 return true;
105}
106#endif
107