1
2
3
4
5
6
7
8
9
10
11#include <linux/kernel.h>
12#include <linux/mm.h>
13#include <linux/io.h>
14#include <asm/mmu_context.h>
15#include <asm/cacheflush.h>
16
17void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
18{
19 unsigned long flags, pteval, vpn;
20
21
22
23
24 if (vma && current->active_mm != vma->vm_mm)
25 return;
26
27 local_irq_save(flags);
28
29
30 vpn = (address & MMU_VPN_MASK) | get_asid();
31 __raw_writel(vpn, MMU_PTEH);
32
33 pteval = pte.pte_low;
34
35
36#ifdef CONFIG_X2TLB
37
38
39
40
41
42
43 __raw_writel(pte.pte_high, MMU_PTEA);
44#else
45 if (cpu_data->flags & CPU_HAS_PTEA) {
46
47
48
49 __raw_writel(copy_ptea_attributes(pteval), MMU_PTEA);
50 }
51#endif
52
53
54 pteval &= _PAGE_FLAGS_HARDWARE_MASK;
55#ifdef CONFIG_CACHE_WRITETHROUGH
56 pteval |= _PAGE_WT;
57#endif
58
59 __raw_writel(pteval, MMU_PTEL);
60
61
62 asm volatile("ldtlb": : : "memory");
63 local_irq_restore(flags);
64}
65
66void local_flush_tlb_one(unsigned long asid, unsigned long page)
67{
68 unsigned long addr, data;
69
70
71
72
73
74
75
76 addr = MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT;
77 data = page | asid;
78 jump_to_uncached();
79 __raw_writel(data, addr);
80 back_to_cached();
81}
82
83void local_flush_tlb_all(void)
84{
85 unsigned long flags, status;
86 int i;
87
88
89
90
91 local_irq_save(flags);
92 jump_to_uncached();
93
94 status = __raw_readl(MMUCR);
95 status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
96
97 if (status == 0)
98 status = MMUCR_URB_NENTRIES;
99
100 for (i = 0; i < status; i++)
101 __raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
102
103 for (i = 0; i < 4; i++)
104 __raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
105
106 back_to_cached();
107 ctrl_barrier();
108 local_irq_restore(flags);
109}
110