1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#ifdef DEBUG_HARDER
32#define pr_hard(args...) printk(KERN_DEBUG args)
33#define pr_hardcont(args...) printk(KERN_CONT args)
34#else
35#define pr_hard(args...) do { } while(0)
36#define pr_hardcont(args...) do { } while(0)
37#endif
38
39#include <linux/kernel.h>
40#include <linux/mm.h>
41#include <linux/init.h>
42#include <linux/spinlock.h>
43#include <linux/memblock.h>
44#include <linux/notifier.h>
45#include <linux/cpu.h>
46#include <linux/slab.h>
47
48#include <asm/mmu_context.h>
49#include <asm/tlbflush.h>
50
51#include <mm/mmu_decl.h>
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70#define FIRST_CONTEXT 1
71#ifdef DEBUG_CLAMP_LAST_CONTEXT
72#define LAST_CONTEXT DEBUG_CLAMP_LAST_CONTEXT
73#elif defined(CONFIG_PPC_8xx)
74#define LAST_CONTEXT 16
75#elif defined(CONFIG_PPC_47x)
76#define LAST_CONTEXT 65535
77#else
78#define LAST_CONTEXT 255
79#endif
80
81static unsigned int next_context, nr_free_contexts;
82static unsigned long *context_map;
83#ifdef CONFIG_SMP
84static unsigned long *stale_map[NR_CPUS];
85#endif
86static struct mm_struct **context_mm;
87static DEFINE_RAW_SPINLOCK(context_lock);
88
89#define CTX_MAP_SIZE \
90 (sizeof(unsigned long) * (LAST_CONTEXT / BITS_PER_LONG + 1))
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108#ifdef CONFIG_SMP
109static unsigned int steal_context_smp(unsigned int id)
110{
111 struct mm_struct *mm;
112 unsigned int cpu, max, i;
113
114 max = LAST_CONTEXT - FIRST_CONTEXT;
115
116
117 while (max--) {
118
119 mm = context_mm[id];
120
121
122
123
124 if (mm->context.active) {
125 id++;
126 if (id > LAST_CONTEXT)
127 id = FIRST_CONTEXT;
128 continue;
129 }
130 pr_hardcont(" | steal %d from 0x%p", id, mm);
131
132
133 mm->context.id = MMU_NO_CONTEXT;
134
135
136
137
138
139
140 for_each_cpu(cpu, mm_cpumask(mm)) {
141 for (i = cpu_first_thread_sibling(cpu);
142 i <= cpu_last_thread_sibling(cpu); i++) {
143 if (stale_map[i])
144 __set_bit(id, stale_map[i]);
145 }
146 cpu = i - 1;
147 }
148 return id;
149 }
150
151
152
153
154 raw_spin_unlock(&context_lock);
155 cpu_relax();
156 raw_spin_lock(&context_lock);
157
158
159 return MMU_NO_CONTEXT;
160}
161#endif
162
163static unsigned int steal_all_contexts(void)
164{
165 struct mm_struct *mm;
166#ifdef CONFIG_SMP
167 int cpu = smp_processor_id();
168#endif
169 unsigned int id;
170
171 for (id = FIRST_CONTEXT; id <= LAST_CONTEXT; id++) {
172
173 mm = context_mm[id];
174
175 pr_hardcont(" | steal %d from 0x%p", id, mm);
176
177
178 mm->context.id = MMU_NO_CONTEXT;
179 if (id != FIRST_CONTEXT) {
180 context_mm[id] = NULL;
181 __clear_bit(id, context_map);
182#ifdef DEBUG_MAP_CONSISTENCY
183 mm->context.active = 0;
184#endif
185 }
186#ifdef CONFIG_SMP
187 __clear_bit(id, stale_map[cpu]);
188#endif
189 }
190
191
192 _tlbil_all();
193
194 nr_free_contexts = LAST_CONTEXT - FIRST_CONTEXT;
195
196 return FIRST_CONTEXT;
197}
198
199
200
201
202
203
204static unsigned int steal_context_up(unsigned int id)
205{
206 struct mm_struct *mm;
207#ifdef CONFIG_SMP
208 int cpu = smp_processor_id();
209#endif
210
211
212 mm = context_mm[id];
213
214 pr_hardcont(" | steal %d from 0x%p", id, mm);
215
216
217 local_flush_tlb_mm(mm);
218
219
220 mm->context.id = MMU_NO_CONTEXT;
221
222
223#ifdef CONFIG_SMP
224 __clear_bit(id, stale_map[cpu]);
225#endif
226
227 return id;
228}
229
230#ifdef DEBUG_MAP_CONSISTENCY
231static void context_check_map(void)
232{
233 unsigned int id, nrf, nact;
234
235 nrf = nact = 0;
236 for (id = FIRST_CONTEXT; id <= LAST_CONTEXT; id++) {
237 int used = test_bit(id, context_map);
238 if (!used)
239 nrf++;
240 if (used != (context_mm[id] != NULL))
241 pr_err("MMU: Context %d is %s and MM is %p !\n",
242 id, used ? "used" : "free", context_mm[id]);
243 if (context_mm[id] != NULL)
244 nact += context_mm[id]->context.active;
245 }
246 if (nrf != nr_free_contexts) {
247 pr_err("MMU: Free context count out of sync ! (%d vs %d)\n",
248 nr_free_contexts, nrf);
249 nr_free_contexts = nrf;
250 }
251 if (nact > num_online_cpus())
252 pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n",
253 nact, num_online_cpus());
254 if (FIRST_CONTEXT > 0 && !test_bit(0, context_map))
255 pr_err("MMU: Context 0 has been freed !!!\n");
256}
257#else
258static void context_check_map(void) { }
259#endif
260
261void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
262 struct task_struct *tsk)
263{
264 unsigned int id;
265#ifdef CONFIG_SMP
266 unsigned int i, cpu = smp_processor_id();
267#endif
268 unsigned long *map;
269
270
271 raw_spin_lock(&context_lock);
272
273 pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
274 cpu, next, next->context.active, next->context.id);
275
276#ifdef CONFIG_SMP
277
278 next->context.active++;
279 if (prev) {
280 pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active);
281 WARN_ON(prev->context.active < 1);
282 prev->context.active--;
283 }
284
285 again:
286#endif
287
288
289 id = next->context.id;
290 if (likely(id != MMU_NO_CONTEXT)) {
291#ifdef DEBUG_MAP_CONSISTENCY
292 if (context_mm[id] != next)
293 pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n",
294 next, id, id, context_mm[id]);
295#endif
296 goto ctxt_ok;
297 }
298
299
300 id = next_context;
301 if (id > LAST_CONTEXT)
302 id = FIRST_CONTEXT;
303 map = context_map;
304
305
306 if (nr_free_contexts == 0) {
307#ifdef CONFIG_SMP
308 if (num_online_cpus() > 1) {
309 id = steal_context_smp(id);
310 if (id == MMU_NO_CONTEXT)
311 goto again;
312 goto stolen;
313 }
314#endif
315 if (IS_ENABLED(CONFIG_PPC_8xx))
316 id = steal_all_contexts();
317 else
318 id = steal_context_up(id);
319 goto stolen;
320 }
321 nr_free_contexts--;
322
323
324 while (__test_and_set_bit(id, map)) {
325 id = find_next_zero_bit(map, LAST_CONTEXT+1, id);
326 if (id > LAST_CONTEXT)
327 id = FIRST_CONTEXT;
328 }
329 stolen:
330 next_context = id + 1;
331 context_mm[id] = next;
332 next->context.id = id;
333 pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts);
334
335 context_check_map();
336 ctxt_ok:
337
338
339
340
341#ifdef CONFIG_SMP
342 if (test_bit(id, stale_map[cpu])) {
343 pr_hardcont(" | stale flush %d [%d..%d]",
344 id, cpu_first_thread_sibling(cpu),
345 cpu_last_thread_sibling(cpu));
346
347 local_flush_tlb_mm(next);
348
349
350 for (i = cpu_first_thread_sibling(cpu);
351 i <= cpu_last_thread_sibling(cpu); i++) {
352 if (stale_map[i])
353 __clear_bit(id, stale_map[i]);
354 }
355 }
356#endif
357
358
359 pr_hardcont(" -> %d\n", id);
360 set_context(id, next->pgd);
361 raw_spin_unlock(&context_lock);
362}
363
364
365
366
367int init_new_context(struct task_struct *t, struct mm_struct *mm)
368{
369 pr_hard("initing context for mm @%p\n", mm);
370
371
372
373
374
375
376
377
378 if (mm->context.id == 0)
379 slice_init_new_context_exec(mm);
380 mm->context.id = MMU_NO_CONTEXT;
381 mm->context.active = 0;
382 pte_frag_set(&mm->context, NULL);
383 return 0;
384}
385
386
387
388
389void destroy_context(struct mm_struct *mm)
390{
391 unsigned long flags;
392 unsigned int id;
393
394 if (mm->context.id == MMU_NO_CONTEXT)
395 return;
396
397 WARN_ON(mm->context.active != 0);
398
399 raw_spin_lock_irqsave(&context_lock, flags);
400 id = mm->context.id;
401 if (id != MMU_NO_CONTEXT) {
402 __clear_bit(id, context_map);
403 mm->context.id = MMU_NO_CONTEXT;
404#ifdef DEBUG_MAP_CONSISTENCY
405 mm->context.active = 0;
406#endif
407 context_mm[id] = NULL;
408 nr_free_contexts++;
409 }
410 raw_spin_unlock_irqrestore(&context_lock, flags);
411}
412
413#ifdef CONFIG_SMP
414static int mmu_ctx_cpu_prepare(unsigned int cpu)
415{
416
417
418
419 if (cpu == boot_cpuid)
420 return 0;
421
422 pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu);
423 stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
424 return 0;
425}
426
427static int mmu_ctx_cpu_dead(unsigned int cpu)
428{
429#ifdef CONFIG_HOTPLUG_CPU
430 if (cpu == boot_cpuid)
431 return 0;
432
433 pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu);
434 kfree(stale_map[cpu]);
435 stale_map[cpu] = NULL;
436
437
438 clear_tasks_mm_cpumask(cpu);
439#endif
440 return 0;
441}
442
443#endif
444
445
446
447
448void __init mmu_context_init(void)
449{
450
451
452
453
454 init_mm.context.active = NR_CPUS;
455
456
457
458
459 context_map = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
460 if (!context_map)
461 panic("%s: Failed to allocate %zu bytes\n", __func__,
462 CTX_MAP_SIZE);
463 context_mm = memblock_alloc(sizeof(void *) * (LAST_CONTEXT + 1),
464 SMP_CACHE_BYTES);
465 if (!context_mm)
466 panic("%s: Failed to allocate %zu bytes\n", __func__,
467 sizeof(void *) * (LAST_CONTEXT + 1));
468#ifdef CONFIG_SMP
469 stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
470 if (!stale_map[boot_cpuid])
471 panic("%s: Failed to allocate %zu bytes\n", __func__,
472 CTX_MAP_SIZE);
473
474 cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
475 "powerpc/mmu/ctx:prepare",
476 mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead);
477#endif
478
479 printk(KERN_INFO
480 "MMU: Allocated %zu bytes of context maps for %d contexts\n",
481 2 * CTX_MAP_SIZE + (sizeof(void *) * (LAST_CONTEXT + 1)),
482 LAST_CONTEXT - FIRST_CONTEXT + 1);
483
484
485
486
487
488
489
490 context_map[0] = (1 << FIRST_CONTEXT) - 1;
491 next_context = FIRST_CONTEXT;
492 nr_free_contexts = LAST_CONTEXT - FIRST_CONTEXT + 1;
493}
494