1
2#ifndef _ASM_IA64_MMU_CONTEXT_H
3#define _ASM_IA64_MMU_CONTEXT_H
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#define IA64_REGION_ID_KERNEL 0
21
22#define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61))
23
24# include <asm/page.h>
25# ifndef __ASSEMBLY__
26
27#include <linux/compiler.h>
28#include <linux/percpu.h>
29#include <linux/sched.h>
30#include <linux/mm_types.h>
31#include <linux/spinlock.h>
32
33#include <asm/processor.h>
34#include <asm-generic/mm_hooks.h>
35
36struct ia64_ctx {
37 spinlock_t lock;
38 unsigned int next;
39 unsigned int limit;
40 unsigned int max_ctx;
41
42 unsigned long *bitmap;
43 unsigned long *flushmap;
44};
45
46extern struct ia64_ctx ia64_ctx;
47DECLARE_PER_CPU(u8, ia64_need_tlb_flush);
48
49extern void mmu_context_init (void);
50extern void wrap_mmu_context (struct mm_struct *mm);
51
52
53
54
55
56
57
58static inline void
59delayed_tlb_flush (void)
60{
61 extern void local_flush_tlb_all (void);
62 unsigned long flags;
63
64 if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
65 spin_lock_irqsave(&ia64_ctx.lock, flags);
66 if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
67 local_flush_tlb_all();
68 __ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
69 }
70 spin_unlock_irqrestore(&ia64_ctx.lock, flags);
71 }
72}
73
74static inline nv_mm_context_t
75get_mmu_context (struct mm_struct *mm)
76{
77 unsigned long flags;
78 nv_mm_context_t context = mm->context;
79
80 if (likely(context))
81 goto out;
82
83 spin_lock_irqsave(&ia64_ctx.lock, flags);
84
85 context = mm->context;
86 if (context == 0) {
87 cpumask_clear(mm_cpumask(mm));
88 if (ia64_ctx.next >= ia64_ctx.limit) {
89 ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
90 ia64_ctx.max_ctx, ia64_ctx.next);
91 ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
92 ia64_ctx.max_ctx, ia64_ctx.next);
93 if (ia64_ctx.next >= ia64_ctx.max_ctx)
94 wrap_mmu_context(mm);
95 }
96 mm->context = context = ia64_ctx.next++;
97 __set_bit(context, ia64_ctx.bitmap);
98 }
99 spin_unlock_irqrestore(&ia64_ctx.lock, flags);
100out:
101
102
103
104
105 delayed_tlb_flush();
106
107 return context;
108}
109
110
111
112
113
114#define init_new_context init_new_context
115static inline int
116init_new_context (struct task_struct *p, struct mm_struct *mm)
117{
118 mm->context = 0;
119 return 0;
120}
121
122static inline void
123reload_context (nv_mm_context_t context)
124{
125 unsigned long rid;
126 unsigned long rid_incr = 0;
127 unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4;
128
129 old_rr4 = ia64_get_rr(RGN_BASE(RGN_HPAGE));
130 rid = context << 3;
131 rid_incr = 1 << 8;
132
133
134 rr0 = (rid << 8) | (PAGE_SHIFT << 2) | 1;
135 rr1 = rr0 + 1*rid_incr;
136 rr2 = rr0 + 2*rid_incr;
137 rr3 = rr0 + 3*rid_incr;
138 rr4 = rr0 + 4*rid_incr;
139#ifdef CONFIG_HUGETLB_PAGE
140 rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc);
141
142# if RGN_HPAGE != 4
143# error "reload_context assumes RGN_HPAGE is 4"
144# endif
145#endif
146
147 ia64_set_rr0_to_rr4(rr0, rr1, rr2, rr3, rr4);
148 ia64_srlz_i();
149}
150
151
152
153
154static inline void
155activate_context (struct mm_struct *mm)
156{
157 nv_mm_context_t context;
158
159 do {
160 context = get_mmu_context(mm);
161 if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
162 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
163 reload_context(context);
164
165
166
167
168 } while (unlikely(context != mm->context));
169}
170
171
172
173
174#define activate_mm activate_mm
175static inline void
176activate_mm (struct mm_struct *prev, struct mm_struct *next)
177{
178
179
180
181
182 ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd));
183 activate_context(next);
184}
185
186#define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm)
187
188#include <asm-generic/mmu_context.h>
189
190# endif
191#endif
192