1
2
3
4
5
6
7
8
9
10
11#ifndef _XTENSA_MMU_CONTEXT_H
12#define _XTENSA_MMU_CONTEXT_H
13
14#ifndef CONFIG_MMU
15#include <asm/nommu_context.h>
16#else
17
18#include <linux/stringify.h>
19#include <linux/sched.h>
20
21#include <asm/vectors.h>
22
23#include <asm/pgtable.h>
24#include <asm/cacheflush.h>
25#include <asm/tlbflush.h>
26#include <asm-generic/mm_hooks.h>
27#include <asm-generic/percpu.h>
28
29#if (XCHAL_HAVE_TLBS != 1)
30# error "Linux must have an MMU!"
31#endif
32
33DECLARE_PER_CPU(unsigned long, asid_cache);
34#define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
35
36
37
38
39
40
41
42
43
44
45
46
47
48#define NO_CONTEXT 0
49#define ASID_USER_FIRST 4
50#define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
51#define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
52
53void init_mmu(void);
54
55static inline void set_rasid_register (unsigned long val)
56{
57 __asm__ __volatile__ (" wsr %0, rasid\n\t"
58 " isync\n" : : "a" (val));
59}
60
61static inline unsigned long get_rasid_register (void)
62{
63 unsigned long tmp;
64 __asm__ __volatile__ (" rsr %0, rasid\n\t" : "=a" (tmp));
65 return tmp;
66}
67
68static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu)
69{
70 unsigned long asid = cpu_asid_cache(cpu);
71 if ((++asid & ASID_MASK) == 0) {
72
73
74
75
76 local_flush_tlb_all();
77 asid += ASID_USER_FIRST;
78 }
79 cpu_asid_cache(cpu) = asid;
80 mm->context.asid[cpu] = asid;
81 mm->context.cpu = cpu;
82}
83
84static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
85{
86
87
88
89
90 if (mm) {
91 unsigned long asid = mm->context.asid[cpu];
92
93 if (asid == NO_CONTEXT ||
94 ((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK))
95 get_new_mmu_context(mm, cpu);
96 }
97}
98
99static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
100{
101 get_mmu_context(mm, cpu);
102 set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
103 invalidate_page_directory();
104}
105
106
107
108
109
110
111
112static inline int init_new_context(struct task_struct *tsk,
113 struct mm_struct *mm)
114{
115 int cpu;
116 for_each_possible_cpu(cpu) {
117 mm->context.asid[cpu] = NO_CONTEXT;
118 }
119 mm->context.cpu = -1;
120 return 0;
121}
122
123static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
124 struct task_struct *tsk)
125{
126 unsigned int cpu = smp_processor_id();
127 int migrated = next->context.cpu != cpu;
128
129 if (migrated) {
130 __invalidate_icache_all();
131 next->context.cpu = cpu;
132 }
133 if (migrated || prev != next)
134 activate_context(next, cpu);
135}
136
137#define activate_mm(prev, next) switch_mm((prev), (next), NULL)
138#define deactivate_mm(tsk, mm) do { } while (0)
139
140
141
142
143
144static inline void destroy_context(struct mm_struct *mm)
145{
146 invalidate_page_directory();
147}
148
149
150static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
151{
152
153
154}
155
156#endif
157#endif
158