1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54#include <linux/module.h>
55#include <linux/bug.h>
56#include <linux/mm_types.h>
57
58#include <asm/arcregs.h>
59#include <asm/setup.h>
60#include <asm/mmu_context.h>
61#include <asm/mmu.h>
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
106
107static int __read_mostly pae_exists;
108
109
110
111
112
113static inline void __tlb_entry_erase(void)
114{
115 write_aux_reg(ARC_REG_TLBPD1, 0);
116
117 if (is_pae40_enabled())
118 write_aux_reg(ARC_REG_TLBPD1HI, 0);
119
120 write_aux_reg(ARC_REG_TLBPD0, 0);
121 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
122}
123
124#if (CONFIG_ARC_MMU_VER < 4)
125
126static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
127{
128 unsigned int idx;
129
130 write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
131
132 write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
133 idx = read_aux_reg(ARC_REG_TLBINDEX);
134
135 return idx;
136}
137
138static void tlb_entry_erase(unsigned int vaddr_n_asid)
139{
140 unsigned int idx;
141
142
143 idx = tlb_entry_lkup(vaddr_n_asid);
144
145
146 if (likely(!(idx & TLB_LKUP_ERR))) {
147 __tlb_entry_erase();
148 } else {
149
150 WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n",
151 vaddr_n_asid);
152 }
153}
154
155
156
157
158
159
160
161
162
163
164
165
166static void utlb_invalidate(void)
167{
168#if (CONFIG_ARC_MMU_VER >= 2)
169
170#if (CONFIG_ARC_MMU_VER == 2)
171
172
173
174
175
176
177
178 unsigned int idx;
179
180
181 idx = read_aux_reg(ARC_REG_TLBINDEX);
182
183
184 if (unlikely(idx & TLB_LKUP_ERR))
185 write_aux_reg(ARC_REG_TLBINDEX, 0xa);
186#endif
187
188 write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
189#endif
190
191}
192
193static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
194{
195 unsigned int idx;
196
197
198
199
200
201 idx = tlb_entry_lkup(pd0);
202
203
204
205
206
207
208
209 if (likely(idx & TLB_LKUP_ERR))
210 write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
211
212
213 write_aux_reg(ARC_REG_TLBPD1, pd1);
214
215
216
217
218
219
220 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
221}
222
223#else
224
225static void utlb_invalidate(void)
226{
227
228}
229
230static void tlb_entry_erase(unsigned int vaddr_n_asid)
231{
232 write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid | _PAGE_PRESENT);
233 write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry);
234}
235
236static void tlb_entry_insert(unsigned int pd0, pte_t pd1)
237{
238 write_aux_reg(ARC_REG_TLBPD0, pd0);
239 write_aux_reg(ARC_REG_TLBPD1, pd1);
240
241 if (is_pae40_enabled())
242 write_aux_reg(ARC_REG_TLBPD1HI, (u64)pd1 >> 32);
243
244 write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry);
245}
246
247#endif
248
249
250
251
252
253noinline void local_flush_tlb_all(void)
254{
255 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
256 unsigned long flags;
257 unsigned int entry;
258 int num_tlb = mmu->sets * mmu->ways;
259
260 local_irq_save(flags);
261
262
263 write_aux_reg(ARC_REG_TLBPD1, 0);
264
265 if (is_pae40_enabled())
266 write_aux_reg(ARC_REG_TLBPD1HI, 0);
267
268 write_aux_reg(ARC_REG_TLBPD0, 0);
269
270 for (entry = 0; entry < num_tlb; entry++) {
271
272 write_aux_reg(ARC_REG_TLBINDEX, entry);
273 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
274 }
275
276 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
277 const int stlb_idx = 0x800;
278
279
280 write_aux_reg(ARC_REG_TLBPD0, _PAGE_HW_SZ);
281
282 for (entry = stlb_idx; entry < stlb_idx + 16; entry++) {
283 write_aux_reg(ARC_REG_TLBINDEX, entry);
284 write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
285 }
286 }
287
288 utlb_invalidate();
289
290 local_irq_restore(flags);
291}
292
293
294
295
296noinline void local_flush_tlb_mm(struct mm_struct *mm)
297{
298
299
300
301
302
303
304 if (atomic_read(&mm->mm_users) == 0)
305 return;
306
307
308
309
310
311
312
313
314 destroy_context(mm);
315 if (current->mm == mm)
316 get_new_mmu_context(mm);
317}
318
319
320
321
322
323
324
325
326
327void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
328 unsigned long end)
329{
330 const unsigned int cpu = smp_processor_id();
331 unsigned long flags;
332
333
334
335
336
337
338
339
340 if (unlikely((end - start) >= PAGE_SIZE * 32)) {
341 local_flush_tlb_mm(vma->vm_mm);
342 return;
343 }
344
345
346
347
348
349
350 start &= PAGE_MASK;
351
352 local_irq_save(flags);
353
354 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
355 while (start < end) {
356 tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu));
357 start += PAGE_SIZE;
358 }
359 }
360
361 utlb_invalidate();
362
363 local_irq_restore(flags);
364}
365
366
367
368
369
370
371
372void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
373{
374 unsigned long flags;
375
376
377
378 if (unlikely((end - start) >= PAGE_SIZE * 32)) {
379 local_flush_tlb_all();
380 return;
381 }
382
383 start &= PAGE_MASK;
384
385 local_irq_save(flags);
386 while (start < end) {
387 tlb_entry_erase(start);
388 start += PAGE_SIZE;
389 }
390
391 utlb_invalidate();
392
393 local_irq_restore(flags);
394}
395
396
397
398
399
400
401void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
402{
403 const unsigned int cpu = smp_processor_id();
404 unsigned long flags;
405
406
407
408
409 local_irq_save(flags);
410
411 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
412 tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
413 utlb_invalidate();
414 }
415
416 local_irq_restore(flags);
417}
418
419#ifdef CONFIG_SMP
420
421struct tlb_args {
422 struct vm_area_struct *ta_vma;
423 unsigned long ta_start;
424 unsigned long ta_end;
425};
426
427static inline void ipi_flush_tlb_page(void *arg)
428{
429 struct tlb_args *ta = arg;
430
431 local_flush_tlb_page(ta->ta_vma, ta->ta_start);
432}
433
434static inline void ipi_flush_tlb_range(void *arg)
435{
436 struct tlb_args *ta = arg;
437
438 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
439}
440
441#ifdef CONFIG_TRANSPARENT_HUGEPAGE
442static inline void ipi_flush_pmd_tlb_range(void *arg)
443{
444 struct tlb_args *ta = arg;
445
446 local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
447}
448#endif
449
450static inline void ipi_flush_tlb_kernel_range(void *arg)
451{
452 struct tlb_args *ta = (struct tlb_args *)arg;
453
454 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
455}
456
457void flush_tlb_all(void)
458{
459 on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1);
460}
461
462void flush_tlb_mm(struct mm_struct *mm)
463{
464 on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm,
465 mm, 1);
466}
467
468void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
469{
470 struct tlb_args ta = {
471 .ta_vma = vma,
472 .ta_start = uaddr
473 };
474
475 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1);
476}
477
478void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
479 unsigned long end)
480{
481 struct tlb_args ta = {
482 .ta_vma = vma,
483 .ta_start = start,
484 .ta_end = end
485 };
486
487 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
488}
489
490#ifdef CONFIG_TRANSPARENT_HUGEPAGE
491void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
492 unsigned long end)
493{
494 struct tlb_args ta = {
495 .ta_vma = vma,
496 .ta_start = start,
497 .ta_end = end
498 };
499
500 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1);
501}
502#endif
503
504void flush_tlb_kernel_range(unsigned long start, unsigned long end)
505{
506 struct tlb_args ta = {
507 .ta_start = start,
508 .ta_end = end
509 };
510
511 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
512}
513#endif
514
515
516
517
518void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
519{
520 unsigned long flags;
521 unsigned int asid_or_sasid, rwx;
522 unsigned long pd0;
523 pte_t pd1;
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550 if (current->active_mm != vma->vm_mm)
551 return;
552
553 local_irq_save(flags);
554
555 tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), vaddr);
556
557 vaddr &= PAGE_MASK;
558
559
560 pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
561
562
563
564
565 asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
566
567 pd0 = vaddr | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
568
569
570
571
572
573
574
575
576 rwx = pte_val(*ptep) & PTE_BITS_RWX;
577
578 if (pte_val(*ptep) & _PAGE_GLOBAL)
579 rwx <<= 3;
580 else
581 rwx |= (rwx << 3);
582
583 pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1);
584
585 tlb_entry_insert(pd0, pd1);
586
587 local_irq_restore(flags);
588}
589
590
591
592
593
594
595
596
597
598
599void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
600 pte_t *ptep)
601{
602 unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
603 phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK;
604 struct page *page = pfn_to_page(pte_pfn(*ptep));
605
606 create_tlb(vma, vaddr, ptep);
607
608 if (page == ZERO_PAGE(0)) {
609 return;
610 }
611
612
613
614
615
616
617
618
619
620
621 if ((vma->vm_flags & VM_EXEC) ||
622 addr_not_cache_congruent(paddr, vaddr)) {
623
624 int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
625 if (dirty) {
626
627 __flush_dcache_page(paddr, paddr);
628
629
630 if (vma->vm_flags & VM_EXEC)
631 __inv_icache_page(paddr, vaddr);
632 }
633 }
634}
635
636#ifdef CONFIG_TRANSPARENT_HUGEPAGE
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
660 pmd_t *pmd)
661{
662 pte_t pte = __pte(pmd_val(*pmd));
663 update_mmu_cache(vma, addr, &pte);
664}
665
666void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
667 pgtable_t pgtable)
668{
669 struct list_head *lh = (struct list_head *) pgtable;
670
671 assert_spin_locked(&mm->page_table_lock);
672
673
674 if (!pmd_huge_pte(mm, pmdp))
675 INIT_LIST_HEAD(lh);
676 else
677 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
678 pmd_huge_pte(mm, pmdp) = pgtable;
679}
680
681pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
682{
683 struct list_head *lh;
684 pgtable_t pgtable;
685
686 assert_spin_locked(&mm->page_table_lock);
687
688 pgtable = pmd_huge_pte(mm, pmdp);
689 lh = (struct list_head *) pgtable;
690 if (list_empty(lh))
691 pmd_huge_pte(mm, pmdp) = NULL;
692 else {
693 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
694 list_del(lh);
695 }
696
697 pte_val(pgtable[0]) = 0;
698 pte_val(pgtable[1]) = 0;
699
700 return pgtable;
701}
702
703void local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
704 unsigned long end)
705{
706 unsigned int cpu;
707 unsigned long flags;
708
709 local_irq_save(flags);
710
711 cpu = smp_processor_id();
712
713 if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) {
714 unsigned int asid = hw_pid(vma->vm_mm, cpu);
715
716
717 tlb_entry_erase(start | _PAGE_HW_SZ | asid);
718 }
719
720 local_irq_restore(flags);
721}
722
723#endif
724
725
726
727
728
729void read_decode_mmu_bcr(void)
730{
731 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
732 unsigned int tmp;
733 struct bcr_mmu_1_2 {
734#ifdef CONFIG_CPU_BIG_ENDIAN
735 unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8;
736#else
737 unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8;
738#endif
739 } *mmu2;
740
741 struct bcr_mmu_3 {
742#ifdef CONFIG_CPU_BIG_ENDIAN
743 unsigned int ver:8, ways:4, sets:4, res:3, sasid:1, pg_sz:4,
744 u_itlb:4, u_dtlb:4;
745#else
746 unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, sasid:1, res:3, sets:4,
747 ways:4, ver:8;
748#endif
749 } *mmu3;
750
751 struct bcr_mmu_4 {
752#ifdef CONFIG_CPU_BIG_ENDIAN
753 unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
754 n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
755#else
756
757 unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
758 pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
759#endif
760 } *mmu4;
761
762 tmp = read_aux_reg(ARC_REG_MMU_BCR);
763 mmu->ver = (tmp >> 24);
764
765 if (is_isa_arcompact()) {
766 if (mmu->ver <= 2) {
767 mmu2 = (struct bcr_mmu_1_2 *)&tmp;
768 mmu->pg_sz_k = TO_KB(0x2000);
769 mmu->sets = 1 << mmu2->sets;
770 mmu->ways = 1 << mmu2->ways;
771 mmu->u_dtlb = mmu2->u_dtlb;
772 mmu->u_itlb = mmu2->u_itlb;
773 } else {
774 mmu3 = (struct bcr_mmu_3 *)&tmp;
775 mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
776 mmu->sets = 1 << mmu3->sets;
777 mmu->ways = 1 << mmu3->ways;
778 mmu->u_dtlb = mmu3->u_dtlb;
779 mmu->u_itlb = mmu3->u_itlb;
780 mmu->sasid = mmu3->sasid;
781 }
782 } else {
783 mmu4 = (struct bcr_mmu_4 *)&tmp;
784 mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
785 mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11);
786 mmu->sets = 64 << mmu4->n_entry;
787 mmu->ways = mmu4->n_ways * 2;
788 mmu->u_dtlb = mmu4->u_dtlb * 4;
789 mmu->u_itlb = mmu4->u_itlb * 4;
790 mmu->sasid = mmu4->sasid;
791 pae_exists = mmu->pae = mmu4->pae;
792 }
793}
794
795char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
796{
797 int n = 0;
798 struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu;
799 char super_pg[64] = "";
800
801 if (p_mmu->s_pg_sz_m)
802 scnprintf(super_pg, 64, "%dM Super Page %s",
803 p_mmu->s_pg_sz_m,
804 IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE));
805
806 n += scnprintf(buf + n, len - n,
807 "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d%s%s\n",
808 p_mmu->ver, p_mmu->pg_sz_k, super_pg,
809 p_mmu->sets * p_mmu->ways, p_mmu->sets, p_mmu->ways,
810 p_mmu->u_dtlb, p_mmu->u_itlb,
811 IS_AVAIL2(p_mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
812
813 return buf;
814}
815
816int pae40_exist_but_not_enab(void)
817{
818 return pae_exists && !is_pae40_enabled();
819}
820
821void arc_mmu_init(void)
822{
823 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
824 char str[256];
825 int compat = 0;
826
827 pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str)));
828
829
830
831
832 BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE));
833
834
835
836
837
838 BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
839
840
841
842
843
844
845
846
847
848
849 if (is_isa_arcompact() && mmu->ver == CONFIG_ARC_MMU_VER)
850 compat = 1;
851 else if (is_isa_arcv2() && mmu->ver >= CONFIG_ARC_MMU_VER)
852 compat = 1;
853
854 if (!compat) {
855 panic("MMU ver %d doesn't match kernel built for %d...\n",
856 mmu->ver, CONFIG_ARC_MMU_VER);
857 }
858
859 if (mmu->pg_sz_k != TO_KB(PAGE_SIZE))
860 panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
861
862 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
863 mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE))
864 panic("MMU Super pg size != Linux HPAGE_PMD_SIZE (%luM)\n",
865 (unsigned long)TO_MB(HPAGE_PMD_SIZE));
866
867 if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae)
868 panic("Hardware doesn't support PAE40\n");
869
870
871 write_aux_reg(ARC_REG_PID, MMU_ENABLE);
872
873
874#ifndef CONFIG_SMP
875
876 write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
877#endif
878
879 if (pae40_exist_but_not_enab())
880 write_aux_reg(ARC_REG_TLBPD1HI, 0);
881}
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898#define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way))
899
900
901
902
903
904
905
906
907
908volatile int dup_pd_silent;
909
910void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
911 struct pt_regs *regs)
912{
913 struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
914 unsigned int pd0[mmu->ways];
915 unsigned long flags;
916 int set;
917
918 local_irq_save(flags);
919
920
921 for (set = 0; set < mmu->sets; set++) {
922
923 int is_valid, way;
924
925
926 for (way = 0, is_valid = 0; way < mmu->ways; way++) {
927 write_aux_reg(ARC_REG_TLBINDEX,
928 SET_WAY_TO_IDX(mmu, set, way));
929 write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
930 pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
931 is_valid |= pd0[way] & _PAGE_PRESENT;
932 pd0[way] &= PAGE_MASK;
933 }
934
935
936 if (!is_valid)
937 continue;
938
939
940 for (way = 0; way < mmu->ways - 1; way++) {
941
942 int n;
943
944 if (!pd0[way])
945 continue;
946
947 for (n = way + 1; n < mmu->ways; n++) {
948 if (pd0[way] != pd0[n])
949 continue;
950
951 if (!dup_pd_silent)
952 pr_info("Dup TLB PD0 %08x @ set %d ways %d,%d\n",
953 pd0[way], set, way, n);
954
955
956
957
958
959 pd0[way] = 0;
960 write_aux_reg(ARC_REG_TLBINDEX,
961 SET_WAY_TO_IDX(mmu, set, way));
962 __tlb_entry_erase();
963 }
964 }
965 }
966
967 local_irq_restore(flags);
968}
969
970
971
972
973
974
975#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
976
977
978
979
980
981void print_asid_mismatch(int mm_asid, int mmu_asid, int is_fast_path)
982{
983 pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n",
984 is_fast_path ? "Fast" : "Slow", mm_asid, mmu_asid);
985
986 __asm__ __volatile__("flag 1");
987}
988
989void tlb_paranoid_check(unsigned int mm_asid, unsigned long addr)
990{
991 unsigned int mmu_asid;
992
993 mmu_asid = read_aux_reg(ARC_REG_PID) & 0xff;
994
995
996
997
998
999
1000 if (addr < 0x70000000 &&
1001 ((mm_asid == MM_CTXT_NO_ASID) ||
1002 (mmu_asid != (mm_asid & MM_CTXT_ASID_MASK))))
1003 print_asid_mismatch(mm_asid, mmu_asid, 0);
1004}
1005#endif
1006