1
2
3
4
5
6
7
8
9
10
11#ifndef _XTENSA_MMU_CONTEXT_H
12#define _XTENSA_MMU_CONTEXT_H
13
14#ifndef CONFIG_MMU
15#include <asm/nommu_context.h>
16#else
17
18#include <linux/stringify.h>
19#include <linux/sched.h>
20#include <linux/mm_types.h>
21#include <linux/pgtable.h>
22
23#include <asm/vectors.h>
24
25#include <asm/cacheflush.h>
26#include <asm/tlbflush.h>
27#include <asm-generic/mm_hooks.h>
28#include <asm-generic/percpu.h>
29
30#if (XCHAL_HAVE_TLBS != 1)
31# error "Linux must have an MMU!"
32#endif
33
34DECLARE_PER_CPU(unsigned long, asid_cache);
35#define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
36
37
38
39
40
41
42
43
44
45
46
47
48
49#define NO_CONTEXT 0
50#define ASID_USER_FIRST 4
51#define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1)
52#define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8))
53
54void init_mmu(void);
55void init_kio(void);
56
57static inline void set_rasid_register (unsigned long val)
58{
59 __asm__ __volatile__ (" wsr %0, rasid\n\t"
60 " isync\n" : : "a" (val));
61}
62
63static inline unsigned long get_rasid_register (void)
64{
65 unsigned long tmp;
66 __asm__ __volatile__ (" rsr %0, rasid\n\t" : "=a" (tmp));
67 return tmp;
68}
69
70static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu)
71{
72 unsigned long asid = cpu_asid_cache(cpu);
73 if ((++asid & ASID_MASK) == 0) {
74
75
76
77
78 local_flush_tlb_all();
79 asid += ASID_USER_FIRST;
80 }
81 cpu_asid_cache(cpu) = asid;
82 mm->context.asid[cpu] = asid;
83 mm->context.cpu = cpu;
84}
85
86static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
87{
88
89
90
91
92 if (mm) {
93 unsigned long asid = mm->context.asid[cpu];
94
95 if (asid == NO_CONTEXT ||
96 ((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK))
97 get_new_mmu_context(mm, cpu);
98 }
99}
100
101static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
102{
103 get_mmu_context(mm, cpu);
104 set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
105 invalidate_page_directory();
106}
107
108
109
110
111
112
113
114#define init_new_context init_new_context
115static inline int init_new_context(struct task_struct *tsk,
116 struct mm_struct *mm)
117{
118 int cpu;
119 for_each_possible_cpu(cpu) {
120 mm->context.asid[cpu] = NO_CONTEXT;
121 }
122 mm->context.cpu = -1;
123 return 0;
124}
125
126static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
127 struct task_struct *tsk)
128{
129 unsigned int cpu = smp_processor_id();
130 int migrated = next->context.cpu != cpu;
131
132 if (migrated) {
133 __invalidate_icache_all();
134 next->context.cpu = cpu;
135 }
136 if (migrated || prev != next)
137 activate_context(next, cpu);
138}
139
140
141
142
143
144#define destroy_context destroy_context
145static inline void destroy_context(struct mm_struct *mm)
146{
147 invalidate_page_directory();
148}
149
150
151#include <asm-generic/mmu_context.h>
152
153#endif
154#endif
155