1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#ifdef DEBUG_HARDER
36#define pr_hard(args...) printk(KERN_DEBUG args)
37#define pr_hardcont(args...) printk(KERN_CONT args)
38#else
39#define pr_hard(args...) do { } while(0)
40#define pr_hardcont(args...) do { } while(0)
41#endif
42
43#include <linux/kernel.h>
44#include <linux/mm.h>
45#include <linux/init.h>
46#include <linux/spinlock.h>
47#include <linux/bootmem.h>
48#include <linux/notifier.h>
49#include <linux/cpu.h>
50#include <linux/slab.h>
51
52#include <asm/mmu_context.h>
53#include <asm/tlbflush.h>
54
55static unsigned int first_context, last_context;
56static unsigned int next_context, nr_free_contexts;
57static unsigned long *context_map;
58static unsigned long *stale_map[NR_CPUS];
59static struct mm_struct **context_mm;
60static DEFINE_RAW_SPINLOCK(context_lock);
61
62#define CTX_MAP_SIZE \
63 (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81#ifdef CONFIG_SMP
82static unsigned int steal_context_smp(unsigned int id)
83{
84 struct mm_struct *mm;
85 unsigned int cpu, max, i;
86
87 max = last_context - first_context;
88
89
90 while (max--) {
91
92 mm = context_mm[id];
93
94
95
96
97 if (mm->context.active) {
98 id++;
99 if (id > last_context)
100 id = first_context;
101 continue;
102 }
103 pr_hardcont(" | steal %d from 0x%p", id, mm);
104
105
106 mm->context.id = MMU_NO_CONTEXT;
107
108
109
110
111
112
113 for_each_cpu(cpu, mm_cpumask(mm)) {
114 for (i = cpu_first_thread_sibling(cpu);
115 i <= cpu_last_thread_sibling(cpu); i++) {
116 if (stale_map[i])
117 __set_bit(id, stale_map[i]);
118 }
119 cpu = i - 1;
120 }
121 return id;
122 }
123
124
125
126
127 raw_spin_unlock(&context_lock);
128 cpu_relax();
129 raw_spin_lock(&context_lock);
130
131
132 return MMU_NO_CONTEXT;
133}
134#endif
135
136
137
138
139
140
141static unsigned int steal_context_up(unsigned int id)
142{
143 struct mm_struct *mm;
144 int cpu = smp_processor_id();
145
146
147 mm = context_mm[id];
148
149 pr_hardcont(" | steal %d from 0x%p", id, mm);
150
151
152 local_flush_tlb_mm(mm);
153
154
155 mm->context.id = MMU_NO_CONTEXT;
156
157
158 __clear_bit(id, stale_map[cpu]);
159
160 return id;
161}
162
163#ifdef DEBUG_MAP_CONSISTENCY
164static void context_check_map(void)
165{
166 unsigned int id, nrf, nact;
167
168 nrf = nact = 0;
169 for (id = first_context; id <= last_context; id++) {
170 int used = test_bit(id, context_map);
171 if (!used)
172 nrf++;
173 if (used != (context_mm[id] != NULL))
174 pr_err("MMU: Context %d is %s and MM is %p !\n",
175 id, used ? "used" : "free", context_mm[id]);
176 if (context_mm[id] != NULL)
177 nact += context_mm[id]->context.active;
178 }
179 if (nrf != nr_free_contexts) {
180 pr_err("MMU: Free context count out of sync ! (%d vs %d)\n",
181 nr_free_contexts, nrf);
182 nr_free_contexts = nrf;
183 }
184 if (nact > num_online_cpus())
185 pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n",
186 nact, num_online_cpus());
187 if (first_context > 0 && !test_bit(0, context_map))
188 pr_err("MMU: Context 0 has been freed !!!\n");
189}
190#else
191static void context_check_map(void) { }
192#endif
193
194void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
195{
196 unsigned int i, id, cpu = smp_processor_id();
197 unsigned long *map;
198
199
200 raw_spin_lock(&context_lock);
201
202 pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
203 cpu, next, next->context.active, next->context.id);
204
205#ifdef CONFIG_SMP
206
207 next->context.active++;
208 if (prev) {
209 pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active);
210 WARN_ON(prev->context.active < 1);
211 prev->context.active--;
212 }
213
214 again:
215#endif
216
217
218 id = next->context.id;
219 if (likely(id != MMU_NO_CONTEXT)) {
220#ifdef DEBUG_MAP_CONSISTENCY
221 if (context_mm[id] != next)
222 pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n",
223 next, id, id, context_mm[id]);
224#endif
225 goto ctxt_ok;
226 }
227
228
229 id = next_context;
230 if (id > last_context)
231 id = first_context;
232 map = context_map;
233
234
235 if (nr_free_contexts == 0) {
236#ifdef CONFIG_SMP
237 if (num_online_cpus() > 1) {
238 id = steal_context_smp(id);
239 if (id == MMU_NO_CONTEXT)
240 goto again;
241 goto stolen;
242 }
243#endif
244 id = steal_context_up(id);
245 goto stolen;
246 }
247 nr_free_contexts--;
248
249
250 while (__test_and_set_bit(id, map)) {
251 id = find_next_zero_bit(map, last_context+1, id);
252 if (id > last_context)
253 id = first_context;
254 }
255 stolen:
256 next_context = id + 1;
257 context_mm[id] = next;
258 next->context.id = id;
259 pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts);
260
261 context_check_map();
262 ctxt_ok:
263
264
265
266
267 if (test_bit(id, stale_map[cpu])) {
268 pr_hardcont(" | stale flush %d [%d..%d]",
269 id, cpu_first_thread_sibling(cpu),
270 cpu_last_thread_sibling(cpu));
271
272 local_flush_tlb_mm(next);
273
274
275 for (i = cpu_first_thread_sibling(cpu);
276 i <= cpu_last_thread_sibling(cpu); i++) {
277 if (stale_map[i])
278 __clear_bit(id, stale_map[i]);
279 }
280 }
281
282
283 pr_hardcont(" -> %d\n", id);
284 set_context(id, next->pgd);
285 raw_spin_unlock(&context_lock);
286}
287
288
289
290
291int init_new_context(struct task_struct *t, struct mm_struct *mm)
292{
293 pr_hard("initing context for mm @%p\n", mm);
294
295 mm->context.id = MMU_NO_CONTEXT;
296 mm->context.active = 0;
297
298#ifdef CONFIG_PPC_MM_SLICES
299 if (slice_mm_new_context(mm))
300 slice_set_user_psize(mm, mmu_virtual_psize);
301#endif
302
303 return 0;
304}
305
306
307
308
309void destroy_context(struct mm_struct *mm)
310{
311 unsigned long flags;
312 unsigned int id;
313
314 if (mm->context.id == MMU_NO_CONTEXT)
315 return;
316
317 WARN_ON(mm->context.active != 0);
318
319 raw_spin_lock_irqsave(&context_lock, flags);
320 id = mm->context.id;
321 if (id != MMU_NO_CONTEXT) {
322 __clear_bit(id, context_map);
323 mm->context.id = MMU_NO_CONTEXT;
324#ifdef DEBUG_MAP_CONSISTENCY
325 mm->context.active = 0;
326#endif
327 context_mm[id] = NULL;
328 nr_free_contexts++;
329 }
330 raw_spin_unlock_irqrestore(&context_lock, flags);
331}
332
333#ifdef CONFIG_SMP
334
335static int mmu_context_cpu_notify(struct notifier_block *self,
336 unsigned long action, void *hcpu)
337{
338 unsigned int cpu = (unsigned int)(long)hcpu;
339
340
341
342
343 if (cpu == boot_cpuid)
344 return NOTIFY_OK;
345
346 switch (action) {
347 case CPU_UP_PREPARE:
348 case CPU_UP_PREPARE_FROZEN:
349 pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
350 stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
351 break;
352#ifdef CONFIG_HOTPLUG_CPU
353 case CPU_UP_CANCELED:
354 case CPU_UP_CANCELED_FROZEN:
355 case CPU_DEAD:
356 case CPU_DEAD_FROZEN:
357 pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
358 kfree(stale_map[cpu]);
359 stale_map[cpu] = NULL;
360
361
362 clear_tasks_mm_cpumask(cpu);
363 break;
364#endif
365 }
366 return NOTIFY_OK;
367}
368
369static struct notifier_block mmu_context_cpu_nb = {
370 .notifier_call = mmu_context_cpu_notify,
371};
372
373#endif
374
375
376
377
378void __init mmu_context_init(void)
379{
380
381
382
383
384 init_mm.context.active = NR_CPUS;
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407 if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
408 first_context = 0;
409 last_context = 15;
410 } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
411 first_context = 1;
412 last_context = 65535;
413 } else
414#ifdef CONFIG_PPC_BOOK3E_MMU
415 if (mmu_has_feature(MMU_FTR_TYPE_3E)) {
416 u32 mmucfg = mfspr(SPRN_MMUCFG);
417 u32 pid_bits = (mmucfg & MMUCFG_PIDSIZE_MASK)
418 >> MMUCFG_PIDSIZE_SHIFT;
419 first_context = 1;
420 last_context = (1UL << (pid_bits + 1)) - 1;
421 } else
422#endif
423 {
424 first_context = 1;
425 last_context = 255;
426 }
427
428#ifdef DEBUG_CLAMP_LAST_CONTEXT
429 last_context = DEBUG_CLAMP_LAST_CONTEXT;
430#endif
431
432
433
434 context_map = alloc_bootmem(CTX_MAP_SIZE);
435 context_mm = alloc_bootmem(sizeof(void *) * (last_context + 1));
436#ifndef CONFIG_SMP
437 stale_map[0] = alloc_bootmem(CTX_MAP_SIZE);
438#else
439 stale_map[boot_cpuid] = alloc_bootmem(CTX_MAP_SIZE);
440
441 register_cpu_notifier(&mmu_context_cpu_nb);
442#endif
443
444 printk(KERN_INFO
445 "MMU: Allocated %zu bytes of context maps for %d contexts\n",
446 2 * CTX_MAP_SIZE + (sizeof(void *) * (last_context + 1)),
447 last_context - first_context + 1);
448
449
450
451
452
453
454
455 context_map[0] = (1 << first_context) - 1;
456 next_context = first_context;
457 nr_free_contexts = last_context - first_context + 1;
458}
459
460