1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/smp.h>
23#include <linux/mm.h>
24#include <linux/memblock.h>
25#include <linux/slab.h>
26
27#include <asm/delay.h>
28#include <asm/mmu_context.h>
29#include <asm/pgalloc.h>
30#include <asm/pal.h>
31#include <asm/tlbflush.h>
32#include <asm/dma.h>
33#include <asm/processor.h>
34#include <asm/sal.h>
35#include <asm/tlb.h>
36
37static struct {
38 u64 mask;
39 unsigned long max_bits;
40} purge;
41
42struct ia64_ctx ia64_ctx = {
43 .lock = __SPIN_LOCK_UNLOCKED(ia64_ctx.lock),
44 .next = 1,
45 .max_ctx = ~0U
46};
47
48DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
49DEFINE_PER_CPU(u8, ia64_tr_num);
50DEFINE_PER_CPU(u8, ia64_tr_used);
51
52struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
53
54
55
56
57
58
59void __init
60mmu_context_init (void)
61{
62 ia64_ctx.bitmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
63 SMP_CACHE_BYTES);
64 ia64_ctx.flushmap = memblock_alloc((ia64_ctx.max_ctx + 1) >> 3,
65 SMP_CACHE_BYTES);
66}
67
68
69
70
71void
72wrap_mmu_context (struct mm_struct *mm)
73{
74 int i, cpu;
75 unsigned long flush_bit;
76
77 for (i=0; i <= ia64_ctx.max_ctx / BITS_PER_LONG; i++) {
78 flush_bit = xchg(&ia64_ctx.flushmap[i], 0);
79 ia64_ctx.bitmap[i] ^= flush_bit;
80 }
81
82
83 ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
84 ia64_ctx.max_ctx, 300);
85 ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
86 ia64_ctx.max_ctx, ia64_ctx.next);
87
88
89
90
91
92 cpu = get_cpu();
93 for_each_online_cpu(i)
94 if (i != cpu)
95 per_cpu(ia64_need_tlb_flush, i) = 1;
96 put_cpu();
97 local_flush_tlb_all();
98}
99
100
101
102
103
104
105struct spinaphore {
106 unsigned long ticket;
107 unsigned long serve;
108};
109
110static inline void spinaphore_init(struct spinaphore *ss, int val)
111{
112 ss->ticket = 0;
113 ss->serve = val;
114}
115
116static inline void down_spin(struct spinaphore *ss)
117{
118 unsigned long t = ia64_fetchadd(1, &ss->ticket, acq), serve;
119
120 if (time_before(t, ss->serve))
121 return;
122
123 ia64_invala();
124
125 for (;;) {
126 asm volatile ("ld8.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory");
127 if (time_before(t, serve))
128 return;
129 cpu_relax();
130 }
131}
132
133static inline void up_spin(struct spinaphore *ss)
134{
135 ia64_fetchadd(1, &ss->serve, rel);
136}
137
138static struct spinaphore ptcg_sem;
139static u16 nptcg = 1;
140static int need_ptcg_sem = 1;
141static int toolatetochangeptcgsem = 0;
142
143
144
145
146
147
148
149
150
151static int __init
152set_nptcg(char *str)
153{
154 int value = 0;
155
156 get_option(&str, &value);
157 setup_ptcg_sem(value, NPTCG_FROM_KERNEL_PARAMETER);
158
159 return 1;
160}
161
162__setup("nptcg=", set_nptcg);
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178void
179setup_ptcg_sem(int max_purges, int nptcg_from)
180{
181 static int kp_override;
182 static int palo_override;
183 static int firstcpu = 1;
184
185 if (toolatetochangeptcgsem) {
186 if (nptcg_from == NPTCG_FROM_PAL && max_purges == 0)
187 BUG_ON(1 < nptcg);
188 else
189 BUG_ON(max_purges < nptcg);
190 return;
191 }
192
193 if (nptcg_from == NPTCG_FROM_KERNEL_PARAMETER) {
194 kp_override = 1;
195 nptcg = max_purges;
196 goto resetsema;
197 }
198 if (kp_override) {
199 need_ptcg_sem = num_possible_cpus() > nptcg;
200 return;
201 }
202
203 if (nptcg_from == NPTCG_FROM_PALO) {
204 palo_override = 1;
205
206
207 if (max_purges == 0)
208 panic("Whoa! Platform does not support global TLB purges.\n");
209 nptcg = max_purges;
210 if (nptcg == PALO_MAX_TLB_PURGES) {
211 need_ptcg_sem = 0;
212 return;
213 }
214 goto resetsema;
215 }
216 if (palo_override) {
217 if (nptcg != PALO_MAX_TLB_PURGES)
218 need_ptcg_sem = (num_possible_cpus() > nptcg);
219 return;
220 }
221
222
223 if (max_purges == 0) max_purges = 1;
224
225 if (firstcpu) {
226 nptcg = max_purges;
227 firstcpu = 0;
228 }
229 if (max_purges < nptcg)
230 nptcg = max_purges;
231 if (nptcg == PAL_MAX_PURGES) {
232 need_ptcg_sem = 0;
233 return;
234 } else
235 need_ptcg_sem = (num_possible_cpus() > nptcg);
236
237resetsema:
238 spinaphore_init(&ptcg_sem, max_purges);
239}
240
241void
242ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start,
243 unsigned long end, unsigned long nbits)
244{
245 struct mm_struct *active_mm = current->active_mm;
246
247 toolatetochangeptcgsem = 1;
248
249 if (mm != active_mm) {
250
251 if (mm && active_mm) {
252 activate_context(mm);
253 } else {
254 flush_tlb_all();
255 return;
256 }
257 }
258
259 if (need_ptcg_sem)
260 down_spin(&ptcg_sem);
261
262 do {
263
264
265
266 ia64_ptcga(start, (nbits << 2));
267 ia64_srlz_i();
268 start += (1UL << nbits);
269 } while (start < end);
270
271 if (need_ptcg_sem)
272 up_spin(&ptcg_sem);
273
274 if (mm != active_mm) {
275 activate_context(active_mm);
276 }
277}
278
279void
280local_flush_tlb_all (void)
281{
282 unsigned long i, j, flags, count0, count1, stride0, stride1, addr;
283
284 addr = local_cpu_data->ptce_base;
285 count0 = local_cpu_data->ptce_count[0];
286 count1 = local_cpu_data->ptce_count[1];
287 stride0 = local_cpu_data->ptce_stride[0];
288 stride1 = local_cpu_data->ptce_stride[1];
289
290 local_irq_save(flags);
291 for (i = 0; i < count0; ++i) {
292 for (j = 0; j < count1; ++j) {
293 ia64_ptce(addr);
294 addr += stride1;
295 }
296 addr += stride0;
297 }
298 local_irq_restore(flags);
299 ia64_srlz_i();
300}
301
302void
303flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
304 unsigned long end)
305{
306 struct mm_struct *mm = vma->vm_mm;
307 unsigned long size = end - start;
308 unsigned long nbits;
309
310#ifndef CONFIG_SMP
311 if (mm != current->active_mm) {
312 mm->context = 0;
313 return;
314 }
315#endif
316
317 nbits = ia64_fls(size + 0xfff);
318 while (unlikely (((1UL << nbits) & purge.mask) == 0) &&
319 (nbits < purge.max_bits))
320 ++nbits;
321 if (nbits > purge.max_bits)
322 nbits = purge.max_bits;
323 start &= ~((1UL << nbits) - 1);
324
325 preempt_disable();
326#ifdef CONFIG_SMP
327 if (mm != current->active_mm || cpumask_weight(mm_cpumask(mm)) != 1) {
328 platform_global_tlb_purge(mm, start, end, nbits);
329 preempt_enable();
330 return;
331 }
332#endif
333 do {
334 ia64_ptcl(start, (nbits<<2));
335 start += (1UL << nbits);
336 } while (start < end);
337 preempt_enable();
338 ia64_srlz_i();
339}
340EXPORT_SYMBOL(flush_tlb_range);
341
342void ia64_tlb_init(void)
343{
344 ia64_ptce_info_t uninitialized_var(ptce_info);
345 u64 tr_pgbits;
346 long status;
347 pal_vm_info_1_u_t vm_info_1;
348 pal_vm_info_2_u_t vm_info_2;
349 int cpu = smp_processor_id();
350
351 if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) {
352 printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld; "
353 "defaulting to architected purge page-sizes.\n", status);
354 purge.mask = 0x115557000UL;
355 }
356 purge.max_bits = ia64_fls(purge.mask);
357
358 ia64_get_ptce(&ptce_info);
359 local_cpu_data->ptce_base = ptce_info.base;
360 local_cpu_data->ptce_count[0] = ptce_info.count[0];
361 local_cpu_data->ptce_count[1] = ptce_info.count[1];
362 local_cpu_data->ptce_stride[0] = ptce_info.stride[0];
363 local_cpu_data->ptce_stride[1] = ptce_info.stride[1];
364
365 local_flush_tlb_all();
366 status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2);
367
368 if (status) {
369 printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);
370 per_cpu(ia64_tr_num, cpu) = 8;
371 return;
372 }
373 per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_itr_entry+1;
374 if (per_cpu(ia64_tr_num, cpu) >
375 (vm_info_1.pal_vm_info_1_s.max_dtr_entry+1))
376 per_cpu(ia64_tr_num, cpu) =
377 vm_info_1.pal_vm_info_1_s.max_dtr_entry+1;
378 if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) {
379 static int justonce = 1;
380 per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX;
381 if (justonce) {
382 justonce = 0;
383 printk(KERN_DEBUG "TR register number exceeds "
384 "IA64_TR_ALLOC_MAX!\n");
385 }
386 }
387}
388
389
390
391
392
393
394static int is_tr_overlap(struct ia64_tr_entry *p, u64 va, u64 log_size)
395{
396 u64 tr_log_size;
397 u64 tr_end;
398 u64 va_rr = ia64_get_rr(va);
399 u64 va_rid = RR_TO_RID(va_rr);
400 u64 va_end = va + (1<<log_size) - 1;
401
402 if (va_rid != RR_TO_RID(p->rr))
403 return 0;
404 tr_log_size = (p->itir & 0xff) >> 2;
405 tr_end = p->ifa + (1<<tr_log_size) - 1;
406
407 if (va > tr_end || p->ifa > va_end)
408 return 0;
409 return 1;
410
411}
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
428{
429 int i, r;
430 unsigned long psr;
431 struct ia64_tr_entry *p;
432 int cpu = smp_processor_id();
433
434 if (!ia64_idtrs[cpu]) {
435 ia64_idtrs[cpu] = kmalloc_array(2 * IA64_TR_ALLOC_MAX,
436 sizeof(struct ia64_tr_entry),
437 GFP_KERNEL);
438 if (!ia64_idtrs[cpu])
439 return -ENOMEM;
440 }
441 r = -EINVAL;
442
443 if (target_mask & 0x1) {
444 p = ia64_idtrs[cpu];
445 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
446 i++, p++) {
447 if (p->pte & 0x1)
448 if (is_tr_overlap(p, va, log_size)) {
449 printk(KERN_DEBUG "Overlapped Entry"
450 "Inserted for TR Register!!\n");
451 goto out;
452 }
453 }
454 }
455 if (target_mask & 0x2) {
456 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX;
457 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
458 i++, p++) {
459 if (p->pte & 0x1)
460 if (is_tr_overlap(p, va, log_size)) {
461 printk(KERN_DEBUG "Overlapped Entry"
462 "Inserted for TR Register!!\n");
463 goto out;
464 }
465 }
466 }
467
468 for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) {
469 switch (target_mask & 0x3) {
470 case 1:
471 if (!((ia64_idtrs[cpu] + i)->pte & 0x1))
472 goto found;
473 continue;
474 case 2:
475 if (!((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
476 goto found;
477 continue;
478 case 3:
479 if (!((ia64_idtrs[cpu] + i)->pte & 0x1) &&
480 !((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
481 goto found;
482 continue;
483 default:
484 r = -EINVAL;
485 goto out;
486 }
487 }
488found:
489 if (i >= per_cpu(ia64_tr_num, cpu))
490 return -EBUSY;
491
492
493 if (i > per_cpu(ia64_tr_used, cpu))
494 per_cpu(ia64_tr_used, cpu) = i;
495
496 psr = ia64_clear_ic();
497 if (target_mask & 0x1) {
498 ia64_itr(0x1, i, va, pte, log_size);
499 ia64_srlz_i();
500 p = ia64_idtrs[cpu] + i;
501 p->ifa = va;
502 p->pte = pte;
503 p->itir = log_size << 2;
504 p->rr = ia64_get_rr(va);
505 }
506 if (target_mask & 0x2) {
507 ia64_itr(0x2, i, va, pte, log_size);
508 ia64_srlz_i();
509 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i;
510 p->ifa = va;
511 p->pte = pte;
512 p->itir = log_size << 2;
513 p->rr = ia64_get_rr(va);
514 }
515 ia64_set_psr(psr);
516 r = i;
517out:
518 return r;
519}
520EXPORT_SYMBOL_GPL(ia64_itr_entry);
521
522
523
524
525
526
527
528
529
530void ia64_ptr_entry(u64 target_mask, int slot)
531{
532 int cpu = smp_processor_id();
533 int i;
534 struct ia64_tr_entry *p;
535
536 if (slot < IA64_TR_ALLOC_BASE || slot >= per_cpu(ia64_tr_num, cpu))
537 return;
538
539 if (target_mask & 0x1) {
540 p = ia64_idtrs[cpu] + slot;
541 if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
542 p->pte = 0;
543 ia64_ptr(0x1, p->ifa, p->itir>>2);
544 ia64_srlz_i();
545 }
546 }
547
548 if (target_mask & 0x2) {
549 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + slot;
550 if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
551 p->pte = 0;
552 ia64_ptr(0x2, p->ifa, p->itir>>2);
553 ia64_srlz_i();
554 }
555 }
556
557 for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) {
558 if (((ia64_idtrs[cpu] + i)->pte & 0x1) ||
559 ((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
560 break;
561 }
562 per_cpu(ia64_tr_used, cpu) = i;
563}
564EXPORT_SYMBOL_GPL(ia64_ptr_entry);
565