1
2
3
4
5
6
7
8
9
10
11
12
13#ifndef __ASM_ARM_MMU_CONTEXT_H
14#define __ASM_ARM_MMU_CONTEXT_H
15
16#include <linux/compiler.h>
17#include <linux/sched.h>
18#include <linux/preempt.h>
19#include <asm/cacheflush.h>
20#include <asm/cachetype.h>
21#include <asm/proc-fns.h>
22#include <asm/smp_plat.h>
23#include <asm-generic/mm_hooks.h>
24
25void __check_vmalloc_seq(struct mm_struct *mm);
26
27#ifdef CONFIG_CPU_HAS_ASID
28
29void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
30static inline int
31init_new_context(struct task_struct *tsk, struct mm_struct *mm)
32{
33 atomic64_set(&mm->context.id, 0);
34 return 0;
35}
36
37#ifdef CONFIG_ARM_ERRATA_798181
38void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
39 cpumask_t *mask);
40#else
41static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
42 cpumask_t *mask)
43{
44}
45#endif
46
47#else
48
49#ifdef CONFIG_MMU
50
51static inline void check_and_switch_context(struct mm_struct *mm,
52 struct task_struct *tsk)
53{
54 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
55 __check_vmalloc_seq(mm);
56
57 if (irqs_disabled())
58
59
60
61
62
63
64
65 mm->context.switch_pending = 1;
66 else
67 cpu_switch_mm(mm->pgd, mm);
68}
69
70#ifndef MODULE
71#define finish_arch_post_lock_switch \
72 finish_arch_post_lock_switch
73static inline void finish_arch_post_lock_switch(void)
74{
75 struct mm_struct *mm = current->mm;
76
77 if (mm && mm->context.switch_pending) {
78
79
80
81
82
83
84 preempt_disable();
85 if (mm->context.switch_pending) {
86 mm->context.switch_pending = 0;
87 cpu_switch_mm(mm->pgd, mm);
88 }
89 preempt_enable_no_resched();
90 }
91}
92#endif
93
94#endif
95
96static inline int
97init_new_context(struct task_struct *tsk, struct mm_struct *mm)
98{
99 return 0;
100}
101
102
103#endif
104
105#define destroy_context(mm) do { } while(0)
106#define activate_mm(prev,next) switch_mm(prev, next, NULL)
107
108
109
110
111
112
113
114
115
116
117static inline void
118enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
119{
120}
121
122
123
124
125
126
127
128static inline void
129switch_mm(struct mm_struct *prev, struct mm_struct *next,
130 struct task_struct *tsk)
131{
132#ifdef CONFIG_MMU
133 unsigned int cpu = smp_processor_id();
134
135
136
137
138
139
140 if (cache_ops_need_broadcast() &&
141 !cpumask_empty(mm_cpumask(next)) &&
142 !cpumask_test_cpu(cpu, mm_cpumask(next)))
143 __flush_icache_all();
144
145 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
146 check_and_switch_context(next, tsk);
147 if (cache_is_vivt())
148 cpumask_clear_cpu(cpu, mm_cpumask(prev));
149 }
150#endif
151}
152
153#define deactivate_mm(tsk,mm) do { } while (0)
154
155#endif
156