1#ifndef _ASM_IA64_MMU_CONTEXT_H
2#define _ASM_IA64_MMU_CONTEXT_H
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#define IA64_REGION_ID_KERNEL 0
20
21#define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61))
22
23# include <asm/page.h>
24# ifndef __ASSEMBLY__
25
26#include <linux/compiler.h>
27#include <linux/percpu.h>
28#include <linux/sched.h>
29#include <linux/spinlock.h>
30
31#include <asm/processor.h>
32#include <asm-generic/mm_hooks.h>
33
34struct ia64_ctx {
35 spinlock_t lock;
36 unsigned int next;
37 unsigned int limit;
38 unsigned int max_ctx;
39
40 unsigned long *bitmap;
41 unsigned long *flushmap;
42};
43
44extern struct ia64_ctx ia64_ctx;
45DECLARE_PER_CPU(u8, ia64_need_tlb_flush);
46
47extern void mmu_context_init (void);
48extern void wrap_mmu_context (struct mm_struct *mm);
49
50static inline void
51enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
52{
53}
54
55
56
57
58
59
60
61static inline void
62delayed_tlb_flush (void)
63{
64 extern void local_flush_tlb_all (void);
65 unsigned long flags;
66
67 if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
68 spin_lock_irqsave(&ia64_ctx.lock, flags);
69 if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
70 local_flush_tlb_all();
71 __ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
72 }
73 spin_unlock_irqrestore(&ia64_ctx.lock, flags);
74 }
75}
76
77static inline nv_mm_context_t
78get_mmu_context (struct mm_struct *mm)
79{
80 unsigned long flags;
81 nv_mm_context_t context = mm->context;
82
83 if (likely(context))
84 goto out;
85
86 spin_lock_irqsave(&ia64_ctx.lock, flags);
87
88 context = mm->context;
89 if (context == 0) {
90 cpumask_clear(mm_cpumask(mm));
91 if (ia64_ctx.next >= ia64_ctx.limit) {
92 ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
93 ia64_ctx.max_ctx, ia64_ctx.next);
94 ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
95 ia64_ctx.max_ctx, ia64_ctx.next);
96 if (ia64_ctx.next >= ia64_ctx.max_ctx)
97 wrap_mmu_context(mm);
98 }
99 mm->context = context = ia64_ctx.next++;
100 __set_bit(context, ia64_ctx.bitmap);
101 }
102 spin_unlock_irqrestore(&ia64_ctx.lock, flags);
103out:
104
105
106
107
108 delayed_tlb_flush();
109
110 return context;
111}
112
113
114
115
116
117static inline int
118init_new_context (struct task_struct *p, struct mm_struct *mm)
119{
120 mm->context = 0;
121 return 0;
122}
123
124static inline void
125destroy_context (struct mm_struct *mm)
126{
127
128}
129
130static inline void
131reload_context (nv_mm_context_t context)
132{
133 unsigned long rid;
134 unsigned long rid_incr = 0;
135 unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4;
136
137 old_rr4 = ia64_get_rr(RGN_BASE(RGN_HPAGE));
138 rid = context << 3;
139 rid_incr = 1 << 8;
140
141
142 rr0 = (rid << 8) | (PAGE_SHIFT << 2) | 1;
143 rr1 = rr0 + 1*rid_incr;
144 rr2 = rr0 + 2*rid_incr;
145 rr3 = rr0 + 3*rid_incr;
146 rr4 = rr0 + 4*rid_incr;
147#ifdef CONFIG_HUGETLB_PAGE
148 rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc);
149
150# if RGN_HPAGE != 4
151# error "reload_context assumes RGN_HPAGE is 4"
152# endif
153#endif
154
155 ia64_set_rr0_to_rr4(rr0, rr1, rr2, rr3, rr4);
156 ia64_srlz_i();
157}
158
159
160
161
162static inline void
163activate_context (struct mm_struct *mm)
164{
165 nv_mm_context_t context;
166
167 do {
168 context = get_mmu_context(mm);
169 if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
170 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
171 reload_context(context);
172
173
174
175
176 } while (unlikely(context != mm->context));
177}
178
179#define deactivate_mm(tsk,mm) do { } while (0)
180
181
182
183
184static inline void
185activate_mm (struct mm_struct *prev, struct mm_struct *next)
186{
187
188
189
190
191 ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd));
192 activate_context(next);
193}
194
195#define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm)
196
197# endif
198#endif
199