1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#ifdef DEBUG_HARDER
36#define pr_hard(args...) printk(KERN_DEBUG args)
37#define pr_hardcont(args...) printk(KERN_CONT args)
38#else
39#define pr_hard(args...) do { } while(0)
40#define pr_hardcont(args...) do { } while(0)
41#endif
42
43#include <linux/kernel.h>
44#include <linux/mm.h>
45#include <linux/init.h>
46#include <linux/spinlock.h>
47#include <linux/bootmem.h>
48#include <linux/notifier.h>
49#include <linux/cpu.h>
50#include <linux/slab.h>
51
52#include <asm/mmu_context.h>
53#include <asm/tlbflush.h>
54
55static unsigned int first_context, last_context;
56static unsigned int next_context, nr_free_contexts;
57static unsigned long *context_map;
58static unsigned long *stale_map[NR_CPUS];
59static struct mm_struct **context_mm;
60static DEFINE_RAW_SPINLOCK(context_lock);
61
62#define CTX_MAP_SIZE \
63 (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81#ifdef CONFIG_SMP
82static unsigned int steal_context_smp(unsigned int id)
83{
84 struct mm_struct *mm;
85 unsigned int cpu, max, i;
86
87 max = last_context - first_context;
88
89
90 while (max--) {
91
92 mm = context_mm[id];
93
94
95
96
97 if (mm->context.active) {
98 id++;
99 if (id > last_context)
100 id = first_context;
101 continue;
102 }
103 pr_hardcont(" | steal %d from 0x%p", id, mm);
104
105
106 mm->context.id = MMU_NO_CONTEXT;
107
108
109
110
111
112
113 for_each_cpu(cpu, mm_cpumask(mm)) {
114 for (i = cpu_first_thread_sibling(cpu);
115 i <= cpu_last_thread_sibling(cpu); i++)
116 __set_bit(id, stale_map[i]);
117 cpu = i - 1;
118 }
119 return id;
120 }
121
122
123
124
125 raw_spin_unlock(&context_lock);
126 cpu_relax();
127 raw_spin_lock(&context_lock);
128
129
130 return MMU_NO_CONTEXT;
131}
132#endif
133
134
135
136
137
138
139static unsigned int steal_context_up(unsigned int id)
140{
141 struct mm_struct *mm;
142 int cpu = smp_processor_id();
143
144
145 mm = context_mm[id];
146
147 pr_hardcont(" | steal %d from 0x%p", id, mm);
148
149
150 local_flush_tlb_mm(mm);
151
152
153 mm->context.id = MMU_NO_CONTEXT;
154
155
156 __clear_bit(id, stale_map[cpu]);
157
158 return id;
159}
160
161#ifdef DEBUG_MAP_CONSISTENCY
162static void context_check_map(void)
163{
164 unsigned int id, nrf, nact;
165
166 nrf = nact = 0;
167 for (id = first_context; id <= last_context; id++) {
168 int used = test_bit(id, context_map);
169 if (!used)
170 nrf++;
171 if (used != (context_mm[id] != NULL))
172 pr_err("MMU: Context %d is %s and MM is %p !\n",
173 id, used ? "used" : "free", context_mm[id]);
174 if (context_mm[id] != NULL)
175 nact += context_mm[id]->context.active;
176 }
177 if (nrf != nr_free_contexts) {
178 pr_err("MMU: Free context count out of sync ! (%d vs %d)\n",
179 nr_free_contexts, nrf);
180 nr_free_contexts = nrf;
181 }
182 if (nact > num_online_cpus())
183 pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n",
184 nact, num_online_cpus());
185 if (first_context > 0 && !test_bit(0, context_map))
186 pr_err("MMU: Context 0 has been freed !!!\n");
187}
188#else
189static void context_check_map(void) { }
190#endif
191
192void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
193{
194 unsigned int i, id, cpu = smp_processor_id();
195 unsigned long *map;
196
197
198 raw_spin_lock(&context_lock);
199
200 pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
201 cpu, next, next->context.active, next->context.id);
202
203#ifdef CONFIG_SMP
204
205 next->context.active++;
206 if (prev) {
207 pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active);
208 WARN_ON(prev->context.active < 1);
209 prev->context.active--;
210 }
211
212 again:
213#endif
214
215
216 id = next->context.id;
217 if (likely(id != MMU_NO_CONTEXT)) {
218#ifdef DEBUG_MAP_CONSISTENCY
219 if (context_mm[id] != next)
220 pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n",
221 next, id, id, context_mm[id]);
222#endif
223 goto ctxt_ok;
224 }
225
226
227 id = next_context;
228 if (id > last_context)
229 id = first_context;
230 map = context_map;
231
232
233 if (nr_free_contexts == 0) {
234#ifdef CONFIG_SMP
235 if (num_online_cpus() > 1) {
236 id = steal_context_smp(id);
237 if (id == MMU_NO_CONTEXT)
238 goto again;
239 goto stolen;
240 }
241#endif
242 id = steal_context_up(id);
243 goto stolen;
244 }
245 nr_free_contexts--;
246
247
248 while (__test_and_set_bit(id, map)) {
249 id = find_next_zero_bit(map, last_context+1, id);
250 if (id > last_context)
251 id = first_context;
252 }
253 stolen:
254 next_context = id + 1;
255 context_mm[id] = next;
256 next->context.id = id;
257 pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts);
258
259 context_check_map();
260 ctxt_ok:
261
262
263
264
265 if (test_bit(id, stale_map[cpu])) {
266 pr_hardcont(" | stale flush %d [%d..%d]",
267 id, cpu_first_thread_sibling(cpu),
268 cpu_last_thread_sibling(cpu));
269
270 local_flush_tlb_mm(next);
271
272
273 for (i = cpu_first_thread_sibling(cpu);
274 i <= cpu_last_thread_sibling(cpu); i++) {
275 __clear_bit(id, stale_map[i]);
276 }
277 }
278
279
280 pr_hardcont(" -> %d\n", id);
281 set_context(id, next->pgd);
282 raw_spin_unlock(&context_lock);
283}
284
285
286
287
288int init_new_context(struct task_struct *t, struct mm_struct *mm)
289{
290 pr_hard("initing context for mm @%p\n", mm);
291
292 mm->context.id = MMU_NO_CONTEXT;
293 mm->context.active = 0;
294
295 return 0;
296}
297
298
299
300
301void destroy_context(struct mm_struct *mm)
302{
303 unsigned long flags;
304 unsigned int id;
305
306 if (mm->context.id == MMU_NO_CONTEXT)
307 return;
308
309 WARN_ON(mm->context.active != 0);
310
311 raw_spin_lock_irqsave(&context_lock, flags);
312 id = mm->context.id;
313 if (id != MMU_NO_CONTEXT) {
314 __clear_bit(id, context_map);
315 mm->context.id = MMU_NO_CONTEXT;
316#ifdef DEBUG_MAP_CONSISTENCY
317 mm->context.active = 0;
318#endif
319 context_mm[id] = NULL;
320 nr_free_contexts++;
321 }
322 raw_spin_unlock_irqrestore(&context_lock, flags);
323}
324
325#ifdef CONFIG_SMP
326
327static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
328 unsigned long action, void *hcpu)
329{
330 unsigned int cpu = (unsigned int)(long)hcpu;
331#ifdef CONFIG_HOTPLUG_CPU
332 struct task_struct *p;
333#endif
334
335
336
337 if (cpu == boot_cpuid)
338 return NOTIFY_OK;
339
340 switch (action) {
341 case CPU_ONLINE:
342 case CPU_ONLINE_FROZEN:
343 pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
344 stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
345 break;
346#ifdef CONFIG_HOTPLUG_CPU
347 case CPU_DEAD:
348 case CPU_DEAD_FROZEN:
349 pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
350 kfree(stale_map[cpu]);
351 stale_map[cpu] = NULL;
352
353
354 read_lock(&tasklist_lock);
355 for_each_process(p) {
356 if (p->mm)
357 cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
358 }
359 read_unlock(&tasklist_lock);
360 break;
361#endif
362 }
363 return NOTIFY_OK;
364}
365
366static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
367 .notifier_call = mmu_context_cpu_notify,
368};
369
370#endif
371
372
373
374
375void __init mmu_context_init(void)
376{
377
378
379
380
381 init_mm.context.active = NR_CPUS;
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404 if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
405 first_context = 0;
406 last_context = 15;
407 } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
408 first_context = 1;
409 last_context = 65535;
410 } else {
411 first_context = 1;
412 last_context = 255;
413 }
414
415#ifdef DEBUG_CLAMP_LAST_CONTEXT
416 last_context = DEBUG_CLAMP_LAST_CONTEXT;
417#endif
418
419
420
421 context_map = alloc_bootmem(CTX_MAP_SIZE);
422 context_mm = alloc_bootmem(sizeof(void *) * (last_context + 1));
423#ifndef CONFIG_SMP
424 stale_map[0] = alloc_bootmem(CTX_MAP_SIZE);
425#else
426 stale_map[boot_cpuid] = alloc_bootmem(CTX_MAP_SIZE);
427
428 register_cpu_notifier(&mmu_context_cpu_nb);
429#endif
430
431 printk(KERN_INFO
432 "MMU: Allocated %zu bytes of context maps for %d contexts\n",
433 2 * CTX_MAP_SIZE + (sizeof(void *) * (last_context + 1)),
434 last_context - first_context + 1);
435
436
437
438
439
440
441
442 context_map[0] = (1 << first_context) - 1;
443 next_context = first_context;
444 nr_free_contexts = last_context - first_context + 1;
445}
446
447