1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#ifdef DEBUG_HARDER
36#define pr_hard(args...) printk(KERN_DEBUG args)
37#define pr_hardcont(args...) printk(KERN_CONT args)
38#else
39#define pr_hard(args...) do { } while(0)
40#define pr_hardcont(args...) do { } while(0)
41#endif
42
43#include <linux/kernel.h>
44#include <linux/mm.h>
45#include <linux/init.h>
46#include <linux/spinlock.h>
47#include <linux/bootmem.h>
48#include <linux/notifier.h>
49#include <linux/cpu.h>
50
51#include <asm/mmu_context.h>
52#include <asm/tlbflush.h>
53
54static unsigned int first_context, last_context;
55static unsigned int next_context, nr_free_contexts;
56static unsigned long *context_map;
57static unsigned long *stale_map[NR_CPUS];
58static struct mm_struct **context_mm;
59static DEFINE_SPINLOCK(context_lock);
60
61#define CTX_MAP_SIZE \
62 (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80#ifdef CONFIG_SMP
81static unsigned int steal_context_smp(unsigned int id)
82{
83 struct mm_struct *mm;
84 unsigned int cpu, max, i;
85
86 max = last_context - first_context;
87
88
89 while (max--) {
90
91 mm = context_mm[id];
92
93
94
95
96 if (mm->context.active) {
97 id++;
98 if (id > last_context)
99 id = first_context;
100 continue;
101 }
102 pr_hardcont(" | steal %d from 0x%p", id, mm);
103
104
105 mm->context.id = MMU_NO_CONTEXT;
106
107
108
109
110
111
112 for_each_cpu(cpu, mm_cpumask(mm)) {
113 for (i = cpu_first_thread_in_core(cpu);
114 i <= cpu_last_thread_in_core(cpu); i++)
115 __set_bit(id, stale_map[i]);
116 cpu = i - 1;
117 }
118 return id;
119 }
120
121
122
123
124 spin_unlock(&context_lock);
125 cpu_relax();
126 spin_lock(&context_lock);
127
128
129 return MMU_NO_CONTEXT;
130}
131#endif
132
133
134
135
136
137
138static unsigned int steal_context_up(unsigned int id)
139{
140 struct mm_struct *mm;
141 int cpu = smp_processor_id();
142
143
144 mm = context_mm[id];
145
146 pr_hardcont(" | steal %d from 0x%p", id, mm);
147
148
149 local_flush_tlb_mm(mm);
150
151
152 mm->context.id = MMU_NO_CONTEXT;
153
154
155 __clear_bit(id, stale_map[cpu]);
156
157 return id;
158}
159
160#ifdef DEBUG_MAP_CONSISTENCY
161static void context_check_map(void)
162{
163 unsigned int id, nrf, nact;
164
165 nrf = nact = 0;
166 for (id = first_context; id <= last_context; id++) {
167 int used = test_bit(id, context_map);
168 if (!used)
169 nrf++;
170 if (used != (context_mm[id] != NULL))
171 pr_err("MMU: Context %d is %s and MM is %p !\n",
172 id, used ? "used" : "free", context_mm[id]);
173 if (context_mm[id] != NULL)
174 nact += context_mm[id]->context.active;
175 }
176 if (nrf != nr_free_contexts) {
177 pr_err("MMU: Free context count out of sync ! (%d vs %d)\n",
178 nr_free_contexts, nrf);
179 nr_free_contexts = nrf;
180 }
181 if (nact > num_online_cpus())
182 pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n",
183 nact, num_online_cpus());
184 if (first_context > 0 && !test_bit(0, context_map))
185 pr_err("MMU: Context 0 has been freed !!!\n");
186}
187#else
188static void context_check_map(void) { }
189#endif
190
191void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
192{
193 unsigned int i, id, cpu = smp_processor_id();
194 unsigned long *map;
195
196
197 spin_lock(&context_lock);
198
199 pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
200 cpu, next, next->context.active, next->context.id);
201
202#ifdef CONFIG_SMP
203
204 next->context.active++;
205 if (prev) {
206 pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active);
207 WARN_ON(prev->context.active < 1);
208 prev->context.active--;
209 }
210
211 again:
212#endif
213
214
215 id = next->context.id;
216 if (likely(id != MMU_NO_CONTEXT)) {
217#ifdef DEBUG_MAP_CONSISTENCY
218 if (context_mm[id] != next)
219 pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n",
220 next, id, id, context_mm[id]);
221#endif
222 goto ctxt_ok;
223 }
224
225
226 id = next_context;
227 if (id > last_context)
228 id = first_context;
229 map = context_map;
230
231
232 if (nr_free_contexts == 0) {
233#ifdef CONFIG_SMP
234 if (num_online_cpus() > 1) {
235 id = steal_context_smp(id);
236 if (id == MMU_NO_CONTEXT)
237 goto again;
238 goto stolen;
239 }
240#endif
241 id = steal_context_up(id);
242 goto stolen;
243 }
244 nr_free_contexts--;
245
246
247 while (__test_and_set_bit(id, map)) {
248 id = find_next_zero_bit(map, last_context+1, id);
249 if (id > last_context)
250 id = first_context;
251 }
252 stolen:
253 next_context = id + 1;
254 context_mm[id] = next;
255 next->context.id = id;
256 pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts);
257
258 context_check_map();
259 ctxt_ok:
260
261
262
263
264 if (test_bit(id, stale_map[cpu])) {
265 pr_hardcont(" | stale flush %d [%d..%d]",
266 id, cpu_first_thread_in_core(cpu),
267 cpu_last_thread_in_core(cpu));
268
269 local_flush_tlb_mm(next);
270
271
272 for (i = cpu_first_thread_in_core(cpu);
273 i <= cpu_last_thread_in_core(cpu); i++) {
274 __clear_bit(id, stale_map[i]);
275 }
276 }
277
278
279 pr_hardcont(" -> %d\n", id);
280 set_context(id, next->pgd);
281 spin_unlock(&context_lock);
282}
283
284
285
286
287int init_new_context(struct task_struct *t, struct mm_struct *mm)
288{
289 pr_hard("initing context for mm @%p\n", mm);
290
291 mm->context.id = MMU_NO_CONTEXT;
292 mm->context.active = 0;
293
294 return 0;
295}
296
297
298
299
300void destroy_context(struct mm_struct *mm)
301{
302 unsigned long flags;
303 unsigned int id;
304
305 if (mm->context.id == MMU_NO_CONTEXT)
306 return;
307
308 WARN_ON(mm->context.active != 0);
309
310 spin_lock_irqsave(&context_lock, flags);
311 id = mm->context.id;
312 if (id != MMU_NO_CONTEXT) {
313 __clear_bit(id, context_map);
314 mm->context.id = MMU_NO_CONTEXT;
315#ifdef DEBUG_MAP_CONSISTENCY
316 mm->context.active = 0;
317#endif
318 context_mm[id] = NULL;
319 nr_free_contexts++;
320 }
321 spin_unlock_irqrestore(&context_lock, flags);
322}
323
324#ifdef CONFIG_SMP
325
326static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
327 unsigned long action, void *hcpu)
328{
329 unsigned int cpu = (unsigned int)(long)hcpu;
330#ifdef CONFIG_HOTPLUG_CPU
331 struct task_struct *p;
332#endif
333
334
335
336 if (cpu == 0)
337 return NOTIFY_OK;
338
339 switch (action) {
340 case CPU_ONLINE:
341 case CPU_ONLINE_FROZEN:
342 pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
343 stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
344 break;
345#ifdef CONFIG_HOTPLUG_CPU
346 case CPU_DEAD:
347 case CPU_DEAD_FROZEN:
348 pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
349 kfree(stale_map[cpu]);
350 stale_map[cpu] = NULL;
351
352
353 read_lock(&tasklist_lock);
354 for_each_process(p) {
355 if (p->mm)
356 cpu_mask_clear_cpu(cpu, mm_cpumask(p->mm));
357 }
358 read_unlock(&tasklist_lock);
359 break;
360#endif
361 }
362 return NOTIFY_OK;
363}
364
365static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
366 .notifier_call = mmu_context_cpu_notify,
367};
368
369#endif
370
371
372
373
374void __init mmu_context_init(void)
375{
376
377
378
379
380 init_mm.context.active = NR_CPUS;
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398 if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
399 first_context = 0;
400 last_context = 15;
401 } else {
402 first_context = 1;
403 last_context = 255;
404 }
405
406#ifdef DEBUG_CLAMP_LAST_CONTEXT
407 last_context = DEBUG_CLAMP_LAST_CONTEXT;
408#endif
409
410
411
412 context_map = alloc_bootmem(CTX_MAP_SIZE);
413 context_mm = alloc_bootmem(sizeof(void *) * (last_context + 1));
414 stale_map[0] = alloc_bootmem(CTX_MAP_SIZE);
415
416#ifdef CONFIG_SMP
417 register_cpu_notifier(&mmu_context_cpu_nb);
418#endif
419
420 printk(KERN_INFO
421 "MMU: Allocated %zu bytes of context maps for %d contexts\n",
422 2 * CTX_MAP_SIZE + (sizeof(void *) * (last_context + 1)),
423 last_context - first_context + 1);
424
425
426
427
428
429
430
431 context_map[0] = (1 << first_context) - 1;
432 next_context = first_context;
433 nr_free_contexts = last_context - first_context + 1;
434}
435
436