1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/module.h>
18#include <linux/seq_file.h>
19#include <linux/pagemap.h>
20#include <linux/sched.h>
21#include <asm/pdc.h>
22#include <asm/cache.h>
23#include <asm/cacheflush.h>
24#include <asm/tlbflush.h>
25#include <asm/page.h>
26#include <asm/pgalloc.h>
27#include <asm/processor.h>
28#include <asm/sections.h>
29#include <asm/shmparam.h>
30
31int split_tlb __read_mostly;
32int dcache_stride __read_mostly;
33int icache_stride __read_mostly;
34EXPORT_SYMBOL(dcache_stride);
35
36void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
37EXPORT_SYMBOL(flush_dcache_page_asm);
38void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
39
40
41
42
43
44
45
46DEFINE_SPINLOCK(pa_tlb_lock);
47
48struct pdc_cache_info cache_info __read_mostly;
49#ifndef CONFIG_PA20
50static struct pdc_btlb_info btlb_info __read_mostly;
51#endif
52
53#ifdef CONFIG_SMP
54void
55flush_data_cache(void)
56{
57 on_each_cpu(flush_data_cache_local, NULL, 1);
58}
59void
60flush_instruction_cache(void)
61{
62 on_each_cpu(flush_instruction_cache_local, NULL, 1);
63}
64#endif
65
66void
67flush_cache_all_local(void)
68{
69 flush_instruction_cache_local(NULL);
70 flush_data_cache_local(NULL);
71}
72EXPORT_SYMBOL(flush_cache_all_local);
73
74
75#define pfn_va(pfn) __va(PFN_PHYS(pfn))
76
77void
78update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
79{
80 unsigned long pfn = pte_pfn(*ptep);
81 struct page *page;
82
83
84
85
86 if (!pfn_valid(pfn))
87 return;
88
89 page = pfn_to_page(pfn);
90 if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
91 flush_kernel_dcache_page_addr(pfn_va(pfn));
92 clear_bit(PG_dcache_dirty, &page->flags);
93 } else if (parisc_requires_coherency())
94 flush_kernel_dcache_page_addr(pfn_va(pfn));
95}
96
97void
98show_cache_info(struct seq_file *m)
99{
100 char buf[32];
101
102 seq_printf(m, "I-cache\t\t: %ld KB\n",
103 cache_info.ic_size/1024 );
104 if (cache_info.dc_loop != 1)
105 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
106 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
107 cache_info.dc_size/1024,
108 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
109 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
110 ((cache_info.dc_loop == 1) ? "direct mapped" : buf));
111 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
112 cache_info.it_size,
113 cache_info.dt_size,
114 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
115 );
116
117#ifndef CONFIG_PA20
118
119 if (btlb_info.max_size==0) {
120 seq_printf(m, "BTLB\t\t: not supported\n" );
121 } else {
122 seq_printf(m,
123 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
124 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
125 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
126 btlb_info.max_size, (int)4096,
127 btlb_info.max_size>>8,
128 btlb_info.fixed_range_info.num_i,
129 btlb_info.fixed_range_info.num_d,
130 btlb_info.fixed_range_info.num_comb,
131 btlb_info.variable_range_info.num_i,
132 btlb_info.variable_range_info.num_d,
133 btlb_info.variable_range_info.num_comb
134 );
135 }
136#endif
137}
138
139void __init
140parisc_cache_init(void)
141{
142 if (pdc_cache_info(&cache_info) < 0)
143 panic("parisc_cache_init: pdc_cache_info failed");
144
145#if 0
146 printk("ic_size %lx dc_size %lx it_size %lx\n",
147 cache_info.ic_size,
148 cache_info.dc_size,
149 cache_info.it_size);
150
151 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
152 cache_info.dc_base,
153 cache_info.dc_stride,
154 cache_info.dc_count,
155 cache_info.dc_loop);
156
157 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
158 *(unsigned long *) (&cache_info.dc_conf),
159 cache_info.dc_conf.cc_alias,
160 cache_info.dc_conf.cc_block,
161 cache_info.dc_conf.cc_line,
162 cache_info.dc_conf.cc_shift);
163 printk(" wt %d sh %d cst %d hv %d\n",
164 cache_info.dc_conf.cc_wt,
165 cache_info.dc_conf.cc_sh,
166 cache_info.dc_conf.cc_cst,
167 cache_info.dc_conf.cc_hv);
168
169 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
170 cache_info.ic_base,
171 cache_info.ic_stride,
172 cache_info.ic_count,
173 cache_info.ic_loop);
174
175 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
176 *(unsigned long *) (&cache_info.ic_conf),
177 cache_info.ic_conf.cc_alias,
178 cache_info.ic_conf.cc_block,
179 cache_info.ic_conf.cc_line,
180 cache_info.ic_conf.cc_shift);
181 printk(" wt %d sh %d cst %d hv %d\n",
182 cache_info.ic_conf.cc_wt,
183 cache_info.ic_conf.cc_sh,
184 cache_info.ic_conf.cc_cst,
185 cache_info.ic_conf.cc_hv);
186
187 printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
188 cache_info.dt_conf.tc_sh,
189 cache_info.dt_conf.tc_page,
190 cache_info.dt_conf.tc_cst,
191 cache_info.dt_conf.tc_aid,
192 cache_info.dt_conf.tc_pad1);
193
194 printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
195 cache_info.it_conf.tc_sh,
196 cache_info.it_conf.tc_page,
197 cache_info.it_conf.tc_cst,
198 cache_info.it_conf.tc_aid,
199 cache_info.it_conf.tc_pad1);
200#endif
201
202 split_tlb = 0;
203 if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
204 if (cache_info.dt_conf.tc_sh == 2)
205 printk(KERN_WARNING "Unexpected TLB configuration. "
206 "Will flush I/D separately (could be optimized).\n");
207
208 split_tlb = 1;
209 }
210
211
212
213
214
215
216
217#define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
218 dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
219 icache_stride = CAFL_STRIDE(cache_info.ic_conf);
220#undef CAFL_STRIDE
221
222#ifndef CONFIG_PA20
223 if (pdc_btlb_info(&btlb_info) < 0) {
224 memset(&btlb_info, 0, sizeof btlb_info);
225 }
226#endif
227
228 if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
229 PDC_MODEL_NVA_UNSUPPORTED) {
230 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
231#if 0
232 panic("SMP kernel required to avoid non-equivalent aliasing");
233#endif
234 }
235}
236
237void disable_sr_hashing(void)
238{
239 int srhash_type, retval;
240 unsigned long space_bits;
241
242 switch (boot_cpu_data.cpu_type) {
243 case pcx:
244 BUG();
245 return;
246
247 case pcxs:
248 case pcxt:
249 case pcxt_:
250 srhash_type = SRHASH_PCXST;
251 break;
252
253 case pcxl:
254 srhash_type = SRHASH_PCXL;
255 break;
256
257 case pcxl2:
258 return;
259
260 default:
261 srhash_type = SRHASH_PA20;
262 break;
263 }
264
265 disable_sr_hashing_asm(srhash_type);
266
267 retval = pdc_spaceid_bits(&space_bits);
268
269 if (retval < 0 && retval != PDC_BAD_OPTION)
270 panic("pdc_spaceid_bits call failed.\n");
271 if (space_bits != 0)
272 panic("SpaceID hashing is still on!\n");
273}
274
275static inline void
276__flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
277 unsigned long physaddr)
278{
279 preempt_disable();
280 flush_dcache_page_asm(physaddr, vmaddr);
281 if (vma->vm_flags & VM_EXEC)
282 flush_icache_page_asm(physaddr, vmaddr);
283 preempt_enable();
284}
285
286void flush_dcache_page(struct page *page)
287{
288 struct address_space *mapping = page_mapping(page);
289 struct vm_area_struct *mpnt;
290 unsigned long offset;
291 unsigned long addr, old_addr = 0;
292 pgoff_t pgoff;
293
294 if (mapping && !mapping_mapped(mapping)) {
295 set_bit(PG_dcache_dirty, &page->flags);
296 return;
297 }
298
299 flush_kernel_dcache_page(page);
300
301 if (!mapping)
302 return;
303
304 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
305
306
307
308
309
310
311 flush_dcache_mmap_lock(mapping);
312 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
313 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
314 addr = mpnt->vm_start + offset;
315
316
317
318
319
320
321
322
323
324
325 flush_tlb_page(mpnt, addr);
326 if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
327 __flush_cache_page(mpnt, addr, page_to_phys(page));
328 if (old_addr)
329 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
330 old_addr = addr;
331 }
332 }
333 flush_dcache_mmap_unlock(mapping);
334}
335EXPORT_SYMBOL(flush_dcache_page);
336
337
338EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
339EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
340EXPORT_SYMBOL(flush_data_cache_local);
341EXPORT_SYMBOL(flush_kernel_icache_range_asm);
342
343#define FLUSH_THRESHOLD 0x80000
344int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
345
346void __init parisc_setup_cache_timing(void)
347{
348 unsigned long rangetime, alltime;
349 unsigned long size;
350
351 alltime = mfctl(16);
352 flush_data_cache();
353 alltime = mfctl(16) - alltime;
354
355 size = (unsigned long)(_end - _text);
356 rangetime = mfctl(16);
357 flush_kernel_dcache_range((unsigned long)_text, size);
358 rangetime = mfctl(16) - rangetime;
359
360 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
361 alltime, size, rangetime);
362
363
364 parisc_cache_flush_threshold = size * alltime / rangetime;
365
366 parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1);
367 if (!parisc_cache_flush_threshold)
368 parisc_cache_flush_threshold = FLUSH_THRESHOLD;
369
370 if (parisc_cache_flush_threshold > cache_info.dc_size)
371 parisc_cache_flush_threshold = cache_info.dc_size;
372
373 printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
374}
375
376extern void purge_kernel_dcache_page_asm(unsigned long);
377extern void clear_user_page_asm(void *, unsigned long);
378extern void copy_user_page_asm(void *, void *, unsigned long);
379
380void flush_kernel_dcache_page_addr(void *addr)
381{
382 unsigned long flags;
383
384 flush_kernel_dcache_page_asm(addr);
385 purge_tlb_start(flags);
386 pdtlb_kernel(addr);
387 purge_tlb_end(flags);
388}
389EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
390
391void clear_user_page(void *vto, unsigned long vaddr, struct page *page)
392{
393 clear_page_asm(vto);
394 if (!parisc_requires_coherency())
395 flush_kernel_dcache_page_asm(vto);
396}
397EXPORT_SYMBOL(clear_user_page);
398
399void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
400 struct page *pg)
401{
402
403
404
405
406
407 preempt_disable();
408 flush_dcache_page_asm(__pa(vfrom), vaddr);
409 preempt_enable();
410 copy_page_asm(vto, vfrom);
411 if (!parisc_requires_coherency())
412 flush_kernel_dcache_page_asm(vto);
413}
414EXPORT_SYMBOL(copy_user_page);
415
416#ifdef CONFIG_PA8X00
417
418void kunmap_parisc(void *addr)
419{
420 if (parisc_requires_coherency())
421 flush_kernel_dcache_page_addr(addr);
422}
423EXPORT_SYMBOL(kunmap_parisc);
424#endif
425
426void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
427{
428 unsigned long flags;
429
430
431
432
433 purge_tlb_start(flags);
434 mtsp(mm->context, 1);
435 pdtlb(addr);
436 pitlb(addr);
437 purge_tlb_end(flags);
438}
439EXPORT_SYMBOL(purge_tlb_entries);
440
441void __flush_tlb_range(unsigned long sid, unsigned long start,
442 unsigned long end)
443{
444 unsigned long npages;
445
446 npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
447 if (npages >= 512)
448 flush_tlb_all();
449 else {
450 unsigned long flags;
451
452 purge_tlb_start(flags);
453 mtsp(sid, 1);
454 if (split_tlb) {
455 while (npages--) {
456 pdtlb(start);
457 pitlb(start);
458 start += PAGE_SIZE;
459 }
460 } else {
461 while (npages--) {
462 pdtlb(start);
463 start += PAGE_SIZE;
464 }
465 }
466 purge_tlb_end(flags);
467 }
468}
469
470static void cacheflush_h_tmp_function(void *dummy)
471{
472 flush_cache_all_local();
473}
474
475void flush_cache_all(void)
476{
477 on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
478}
479
480static inline unsigned long mm_total_size(struct mm_struct *mm)
481{
482 struct vm_area_struct *vma;
483 unsigned long usize = 0;
484
485 for (vma = mm->mmap; vma; vma = vma->vm_next)
486 usize += vma->vm_end - vma->vm_start;
487 return usize;
488}
489
490static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
491{
492 pte_t *ptep = NULL;
493
494 if (!pgd_none(*pgd)) {
495 pud_t *pud = pud_offset(pgd, addr);
496 if (!pud_none(*pud)) {
497 pmd_t *pmd = pmd_offset(pud, addr);
498 if (!pmd_none(*pmd))
499 ptep = pte_offset_map(pmd, addr);
500 }
501 }
502 return ptep;
503}
504
505void flush_cache_mm(struct mm_struct *mm)
506{
507 struct vm_area_struct *vma;
508 pgd_t *pgd;
509
510
511
512 if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
513 flush_cache_all();
514 return;
515 }
516
517 if (mm->context == mfsp(3)) {
518 for (vma = mm->mmap; vma; vma = vma->vm_next) {
519 flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
520 if ((vma->vm_flags & VM_EXEC) == 0)
521 continue;
522 flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
523 }
524 return;
525 }
526
527 pgd = mm->pgd;
528 for (vma = mm->mmap; vma; vma = vma->vm_next) {
529 unsigned long addr;
530
531 for (addr = vma->vm_start; addr < vma->vm_end;
532 addr += PAGE_SIZE) {
533 unsigned long pfn;
534 pte_t *ptep = get_ptep(pgd, addr);
535 if (!ptep)
536 continue;
537 pfn = pte_pfn(*ptep);
538 if (!pfn_valid(pfn))
539 continue;
540 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
541 }
542 }
543}
544
545void
546flush_user_dcache_range(unsigned long start, unsigned long end)
547{
548 if ((end - start) < parisc_cache_flush_threshold)
549 flush_user_dcache_range_asm(start,end);
550 else
551 flush_data_cache();
552}
553
554void
555flush_user_icache_range(unsigned long start, unsigned long end)
556{
557 if ((end - start) < parisc_cache_flush_threshold)
558 flush_user_icache_range_asm(start,end);
559 else
560 flush_instruction_cache();
561}
562
563void flush_cache_range(struct vm_area_struct *vma,
564 unsigned long start, unsigned long end)
565{
566 unsigned long addr;
567 pgd_t *pgd;
568
569 BUG_ON(!vma->vm_mm->context);
570
571 if ((end - start) >= parisc_cache_flush_threshold) {
572 flush_cache_all();
573 return;
574 }
575
576 if (vma->vm_mm->context == mfsp(3)) {
577 flush_user_dcache_range_asm(start, end);
578 if (vma->vm_flags & VM_EXEC)
579 flush_user_icache_range_asm(start, end);
580 return;
581 }
582
583 pgd = vma->vm_mm->pgd;
584 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
585 unsigned long pfn;
586 pte_t *ptep = get_ptep(pgd, addr);
587 if (!ptep)
588 continue;
589 pfn = pte_pfn(*ptep);
590 if (pfn_valid(pfn))
591 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
592 }
593}
594
595void
596flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
597{
598 BUG_ON(!vma->vm_mm->context);
599
600 if (pfn_valid(pfn)) {
601 flush_tlb_page(vma, vmaddr);
602 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
603 }
604}
605
606#ifdef CONFIG_PARISC_TMPALIAS
607
608void clear_user_highpage(struct page *page, unsigned long vaddr)
609{
610 void *vto;
611 unsigned long flags;
612
613
614
615
616 vto = kmap_atomic(page);
617
618
619
620
621
622
623
624
625
626
627
628
629 purge_kernel_dcache_page_asm((unsigned long)vto);
630 purge_tlb_start(flags);
631 pdtlb_kernel(vto);
632 purge_tlb_end(flags);
633 preempt_disable();
634 clear_user_page_asm(vto, vaddr);
635 preempt_enable();
636
637 pagefault_enable();
638}
639
640void copy_user_highpage(struct page *to, struct page *from,
641 unsigned long vaddr, struct vm_area_struct *vma)
642{
643 void *vfrom, *vto;
644 unsigned long flags;
645
646
647
648
649
650
651 vfrom = kmap_atomic(from);
652 vto = kmap_atomic(to);
653
654 purge_kernel_dcache_page_asm((unsigned long)vto);
655 purge_tlb_start(flags);
656 pdtlb_kernel(vto);
657 pdtlb_kernel(vfrom);
658 purge_tlb_end(flags);
659 preempt_disable();
660 copy_user_page_asm(vto, vfrom, vaddr);
661 flush_dcache_page_asm(__pa(vto), vaddr);
662 preempt_enable();
663
664 pagefault_enable();
665 pagefault_enable();
666}
667
668#endif
669