1
2
3
4
5
6
7
8
9#include <linux/module.h>
10#include <linux/bug.h>
11#include <linux/mm_types.h>
12
13#include <asm/arcregs.h>
14#include <asm/setup.h>
15#include <asm/mmu_context.h>
16#include <asm/mmu.h>
17
18
19DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
20
21static int __read_mostly pae_exists;
22
23
24
25
26
27static inline void __tlb_entry_erase(void)
28{
29 write_aux_reg(ARC_REG_TLBPD1, 0);
30
31 if (is_pae40_enabled())
32 write_aux_reg(ARC_REG_TLBPD1HI, 0);
33
34 write_aux_reg(ARC_REG_TLBPD0, 0);
35 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
36}
37
38static void utlb_invalidate(void)
39{
40 write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
41}
42
43#ifdef CONFIG_ARC_MMU_V3
44
45static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
46{
47 unsigned int idx;
48
49 write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
50
51 write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
52 idx = read_aux_reg(ARC_REG_TLBINDEX);
53
54 return idx;
55}
56
57static void tlb_entry_erase(unsigned int vaddr_n_asid)
58{
59 unsigned int idx;
60
61
62 idx = tlb_entry_lkup(vaddr_n_asid);
63
64
65 if (likely(!(idx & TLB_LKUP_ERR))) {
66 __tlb_entry_erase();
67 } else {
68
69 WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n",
70 vaddr_n_asid);
71 }
72}
73
74static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
75{
76 unsigned int idx;
77
78
79
80
81
82 idx = tlb_entry_lkup(pd0);
83
84
85
86
87
88
89
90 if (likely(idx & TLB_LKUP_ERR))
91 write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
92
93
94 write_aux_reg(ARC_REG_TLBPD1, pd1);
95
96
97
98
99
100
101 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
102}
103
104#else
105
106static void tlb_entry_erase(unsigned int vaddr_n_asid)
107{
108 write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid | _PAGE_PRESENT);
109 write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry);
110}
111
112static void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
113{
114 write_aux_reg(ARC_REG_TLBPD0, pd0);
115
116 if (!is_pae40_enabled()) {
117 write_aux_reg(ARC_REG_TLBPD1, pd1);
118 } else {
119 write_aux_reg(ARC_REG_TLBPD1, pd1 & 0xFFFFFFFF);
120 write_aux_reg(ARC_REG_TLBPD1HI, (u64)pd1 >> 32);
121 }
122
123 write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry);
124}
125
126#endif
127
128
129
130
131
132noinline void local_flush_tlb_all(void)
133{
134 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
135 unsigned long flags;
136 unsigned int entry;
137 int num_tlb = mmu->sets * mmu->ways;
138
139 local_irq_save(flags);
140
141
142 write_aux_reg(ARC_REG_TLBPD1, 0);
143
144 if (is_pae40_enabled())
145 write_aux_reg(ARC_REG_TLBPD1HI, 0);
146
147 write_aux_reg(ARC_REG_TLBPD0, 0);
148
149 for (entry = 0; entry < num_tlb; entry++) {
150
151 write_aux_reg(ARC_REG_TLBINDEX, entry);
152 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
153 }
154
155 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
156 const int stlb_idx = 0x800;
157
158
159 write_aux_reg(ARC_REG_TLBPD0, _PAGE_HW_SZ);
160
161 for (entry = stlb_idx; entry < stlb_idx + 16; entry++) {
162 write_aux_reg(ARC_REG_TLBINDEX, entry);
163 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
164 }
165 }
166
167 utlb_invalidate();
168
169 local_irq_restore(flags);
170}
171
172
173
174
175noinline void local_flush_tlb_mm(struct mm_struct *mm)
176{
177
178
179
180
181
182
183 if (atomic_read(&mm->mm_users) == 0)
184 return;
185
186
187
188
189
190
191
192
193 destroy_context(mm);
194 if (current->mm == mm)
195 get_new_mmu_context(mm);
196}
197
198
199
200
201
202
203
204
205
206void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
207 unsigned long end)
208{
209 const unsigned int cpu = smp_processor_id();
210 unsigned long flags;
211
212
213
214
215
216
217
218
219 if (unlikely((end - start) >= PAGE_SIZE * 32)) {
220 local_flush_tlb_mm(vma->vm_mm);
221 return;
222 }
223
224
225
226
227
228
229 start &= PAGE_MASK;
230
231 local_irq_save(flags);
232
233 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
234 while (start < end) {
235 tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu));
236 start += PAGE_SIZE;
237 }
238 }
239
240 local_irq_restore(flags);
241}
242
243
244
245
246
247
248
249void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
250{
251 unsigned long flags;
252
253
254
255 if (unlikely((end - start) >= PAGE_SIZE * 32)) {
256 local_flush_tlb_all();
257 return;
258 }
259
260 start &= PAGE_MASK;
261
262 local_irq_save(flags);
263 while (start < end) {
264 tlb_entry_erase(start);
265 start += PAGE_SIZE;
266 }
267
268 local_irq_restore(flags);
269}
270
271
272
273
274
275
276void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
277{
278 const unsigned int cpu = smp_processor_id();
279 unsigned long flags;
280
281
282
283
284 local_irq_save(flags);
285
286 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
287 tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
288 }
289
290 local_irq_restore(flags);
291}
292
293#ifdef CONFIG_SMP
294
295struct tlb_args {
296 struct vm_area_struct *ta_vma;
297 unsigned long ta_start;
298 unsigned long ta_end;
299};
300
301static inline void ipi_flush_tlb_page(void *arg)
302{
303 struct tlb_args *ta = arg;
304
305 local_flush_tlb_page(ta->ta_vma, ta->ta_start);
306}
307
308static inline void ipi_flush_tlb_range(void *arg)
309{
310 struct tlb_args *ta = arg;
311
312 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
313}
314
315#ifdef CONFIG_TRANSPARENT_HUGEPAGE
316static inline void ipi_flush_pmd_tlb_range(void *arg)
317{
318 struct tlb_args *ta = arg;
319
320 local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
321}
322#endif
323
324static inline void ipi_flush_tlb_kernel_range(void *arg)
325{
326 struct tlb_args *ta = (struct tlb_args *)arg;
327
328 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
329}
330
331void flush_tlb_all(void)
332{
333 on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1);
334}
335
336void flush_tlb_mm(struct mm_struct *mm)
337{
338 on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm,
339 mm, 1);
340}
341
342void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
343{
344 struct tlb_args ta = {
345 .ta_vma = vma,
346 .ta_start = uaddr
347 };
348
349 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1);
350}
351
352void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
353 unsigned long end)
354{
355 struct tlb_args ta = {
356 .ta_vma = vma,
357 .ta_start = start,
358 .ta_end = end
359 };
360
361 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
362}
363
364#ifdef CONFIG_TRANSPARENT_HUGEPAGE
365void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
366 unsigned long end)
367{
368 struct tlb_args ta = {
369 .ta_vma = vma,
370 .ta_start = start,
371 .ta_end = end
372 };
373
374 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1);
375}
376#endif
377
378void flush_tlb_kernel_range(unsigned long start, unsigned long end)
379{
380 struct tlb_args ta = {
381 .ta_start = start,
382 .ta_end = end
383 };
384
385 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
386}
387#endif
388
389
390
391
392void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
393{
394 unsigned long flags;
395 unsigned int asid_or_sasid, rwx;
396 unsigned long pd0;
397 phys_addr_t pd1;
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423 if (current->active_mm != vma->vm_mm)
424 return;
425
426 local_irq_save(flags);
427
428 vaddr &= PAGE_MASK;
429
430
431 pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
432
433
434
435
436 asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
437
438 pd0 = vaddr | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
439
440
441
442
443
444
445
446
447 rwx = pte_val(*ptep) & PTE_BITS_RWX;
448
449 if (pte_val(*ptep) & _PAGE_GLOBAL)
450 rwx <<= 3;
451 else
452 rwx |= (rwx << 3);
453
454 pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1);
455
456 tlb_entry_insert(pd0, pd1);
457
458 local_irq_restore(flags);
459}
460
461
462
463
464
465
466
467
468
469
470void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
471 pte_t *ptep)
472{
473 unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
474 phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
475 struct page *page = pfn_to_page(pte_pfn(*ptep));
476
477 create_tlb(vma, vaddr, ptep);
478
479 if (page == ZERO_PAGE(0)) {
480 return;
481 }
482
483
484
485
486
487
488
489
490
491
492 if ((vma->vm_flags & VM_EXEC) ||
493 addr_not_cache_congruent(paddr, vaddr)) {
494
495 int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
496 if (dirty) {
497
498 __flush_dcache_page(paddr, paddr);
499
500
501 if (vma->vm_flags & VM_EXEC)
502 __inv_icache_page(paddr, vaddr);
503 }
504 }
505}
506
507#ifdef CONFIG_TRANSPARENT_HUGEPAGE
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
531 pmd_t *pmd)
532{
533 pte_t pte = __pte(pmd_val(*pmd));
534 update_mmu_cache(vma, addr, &pte);
535}
536
537void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
538 unsigned long end)
539{
540 unsigned int cpu;
541 unsigned long flags;
542
543 local_irq_save(flags);
544
545 cpu = smp_processor_id();
546
547 if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) {
548 unsigned int asid = hw_pid(vma->vm_mm, cpu);
549
550
551 tlb_entry_erase(start | _PAGE_HW_SZ | asid);
552 }
553
554 local_irq_restore(flags);
555}
556
557#endif
558
559
560
561
562
563void read_decode_mmu_bcr(void)
564{
565 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
566 unsigned int tmp;
567 struct bcr_mmu_3 {
568#ifdef CONFIG_CPU_BIG_ENDIAN
569 unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4,
570 u_itlb:4, u_dtlb:4;
571#else
572 unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4,
573 ways:4, ver:8;
574#endif
575 } *mmu3;
576
577 struct bcr_mmu_4 {
578#ifdef CONFIG_CPU_BIG_ENDIAN
579 unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
580 n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
581#else
582
583 unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
584 pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
585#endif
586 } *mmu4;
587
588 tmp = read_aux_reg(ARC_REG_MMU_BCR);
589 mmu->ver = (tmp >> 24);
590
591 if (is_isa_arcompact() && mmu->ver == 3) {
592 mmu3 = (struct bcr_mmu_3 *)&tmp;
593 mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
594 mmu->sets = 1 << mmu3->sets;
595 mmu->ways = 1 << mmu3->ways;
596 mmu->u_dtlb = mmu3->u_dtlb;
597 mmu->u_itlb = mmu3->u_itlb;
598 mmu->sasid = mmu3->sasid;
599 } else {
600 mmu4 = (struct bcr_mmu_4 *)&tmp;
601 mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
602 mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11);
603 mmu->sets = 64 << mmu4->n_entry;
604 mmu->ways = mmu4->n_ways * 2;
605 mmu->u_dtlb = mmu4->u_dtlb * 4;
606 mmu->u_itlb = mmu4->u_itlb * 4;
607 mmu->sasid = mmu4->sasid;
608 pae_exists = mmu->pae = mmu4->pae;
609 }
610}
611
612char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
613{
614 int n = 0;
615 struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu;
616 char super_pg[64] = "";
617
618 if (p_mmu->s_pg_sz_m)
619 scnprintf(super_pg, 64, "%dM Super Page %s",
620 p_mmu->s_pg_sz_m,
621 IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
622
623 n += scnprintf(buf + n, len - n,
624 "MMU [v%x]\t: %dk PAGE, %s, swalk %d lvl, JTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n",
625 p_mmu->ver, p_mmu->pg_sz_k, super_pg, CONFIG_PGTABLE_LEVELS,
626 p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
627 p_mmu->u_dtlb, p_mmu->u_itlb,
628 IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
629
630 return buf;
631}
632
633int pae40_exist_but_not_enab(void)
634{
635 return pae_exists && !is_pae40_enabled();
636}
637
638void arc_mmu_init(void)
639{
640 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
641 char str[256];
642 int compat = 0;
643
644 pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str)));
645
646
647
648
649 BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE));
650
651
652
653
654
655 BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
656
657
658
659
660
661
662
663 if (is_isa_arcompact() && mmu->ver == 3)
664 compat = 1;
665 else if (is_isa_arcv2() && mmu->ver >= 4)
666 compat = 1;
667
668 if (!compat)
669 panic("MMU ver %d doesn't match kernel built for\n", mmu->ver);
670
671 if (mmu->pg_sz_k != TO_KB(PAGE_SIZE))
672 panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
673
674 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
675 mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE))
676 panic("MMU Super pg size != Linux HPAGE_PMD_SIZE (%luM)\n",
677 (unsigned long)TO_MB(HPAGE_PMD_SIZE));
678
679 if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae)
680 panic("Hardware doesn't support PAE40\n");
681
682
683 mmu_setup_asid(NULL, 0);
684
685
686 mmu_setup_pgd(NULL, swapper_pg_dir);
687
688 if (pae40_exist_but_not_enab())
689 write_aux_reg(ARC_REG_TLBPD1HI, 0);
690}
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707#define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way))
708
709
710
711
712
713
714
715
716
717volatile int dup_pd_silent;
718
719void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
720 struct pt_regs *regs)
721{
722 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
723 unsigned long flags;
724 int set, n_ways = mmu->ways;
725
726 n_ways = min(n_ways, 4);
727 BUG_ON(mmu->ways > 4);
728
729 local_irq_save(flags);
730
731
732 for (set = 0; set < mmu->sets; set++) {
733
734 int is_valid, way;
735 unsigned int pd0[4];
736
737
738 for (way = 0, is_valid = 0; way < n_ways; way++) {
739 write_aux_reg(ARC_REG_TLBINDEX,
740 SET_WAY_TO_IDX(mmu, set, way));
741 write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
742 pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
743 is_valid |= pd0[way] & _PAGE_PRESENT;
744 pd0[way] &= PAGE_MASK;
745 }
746
747
748 if (!is_valid)
749 continue;
750
751
752 for (way = 0; way < n_ways - 1; way++) {
753
754 int n;
755
756 if (!pd0[way])
757 continue;
758
759 for (n = way + 1; n < n_ways; n++) {
760 if (pd0[way] != pd0[n])
761 continue;
762
763 if (!dup_pd_silent)
764 pr_info("Dup TLB PD0 %08x @ set %d ways %d,%d\n",
765 pd0[way], set, way, n);
766
767
768
769
770
771 pd0[way] = 0;
772 write_aux_reg(ARC_REG_TLBINDEX,
773 SET_WAY_TO_IDX(mmu, set, way));
774 __tlb_entry_erase();
775 }
776 }
777 }
778
779 local_irq_restore(flags);
780}
781