1
2
3
4
5
6
7
8
9
10#ifndef __ASM_ARM_MMU_CONTEXT_H
11#define __ASM_ARM_MMU_CONTEXT_H
12
13#include <linux/compiler.h>
14#include <linux/sched.h>
15#include <linux/mm_types.h>
16#include <linux/preempt.h>
17
18#include <asm/cacheflush.h>
19#include <asm/cachetype.h>
20#include <asm/proc-fns.h>
21#include <asm/smp_plat.h>
22#include <asm-generic/mm_hooks.h>
23
24void __check_vmalloc_seq(struct mm_struct *mm);
25
26#ifdef CONFIG_CPU_HAS_ASID
27
28void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
29
30#define init_new_context init_new_context
31static inline int
32init_new_context(struct task_struct *tsk, struct mm_struct *mm)
33{
34 atomic64_set(&mm->context.id, 0);
35 return 0;
36}
37
38#ifdef CONFIG_ARM_ERRATA_798181
39void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
40 cpumask_t *mask);
41#else
42static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
43 cpumask_t *mask)
44{
45}
46#endif
47
48#else
49
50#ifdef CONFIG_MMU
51
52static inline void check_and_switch_context(struct mm_struct *mm,
53 struct task_struct *tsk)
54{
55 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
56 __check_vmalloc_seq(mm);
57
58 if (irqs_disabled())
59
60
61
62
63
64
65
66 mm->context.switch_pending = 1;
67 else
68 cpu_switch_mm(mm->pgd, mm);
69}
70
71#ifndef MODULE
72#define finish_arch_post_lock_switch \
73 finish_arch_post_lock_switch
74static inline void finish_arch_post_lock_switch(void)
75{
76 struct mm_struct *mm = current->mm;
77
78 if (mm && mm->context.switch_pending) {
79
80
81
82
83
84
85 preempt_disable();
86 if (mm->context.switch_pending) {
87 mm->context.switch_pending = 0;
88 cpu_switch_mm(mm->pgd, mm);
89 }
90 preempt_enable_no_resched();
91 }
92}
93#endif
94
95#endif
96
97#endif
98
99#define activate_mm(prev,next) switch_mm(prev, next, NULL)
100
101
102
103
104
105
106
107static inline void
108switch_mm(struct mm_struct *prev, struct mm_struct *next,
109 struct task_struct *tsk)
110{
111#ifdef CONFIG_MMU
112 unsigned int cpu = smp_processor_id();
113
114
115
116
117
118
119 if (cache_ops_need_broadcast() &&
120 !cpumask_empty(mm_cpumask(next)) &&
121 !cpumask_test_cpu(cpu, mm_cpumask(next)))
122 __flush_icache_all();
123
124 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
125 check_and_switch_context(next, tsk);
126 if (cache_is_vivt())
127 cpumask_clear_cpu(cpu, mm_cpumask(prev));
128 }
129#endif
130}
131
132#include <asm-generic/mmu_context.h>
133
134#endif
135