1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#ifndef _HW_MMU_H
20#define _HW_MMU_H
21
22#include <linux/types.h>
23
24
25#define HW_MMU_TRANSLATION_FAULT 0x2
26#define HW_MMU_ALL_INTERRUPTS 0x1F
27
28#define HW_MMU_COARSE_PAGE_SIZE 0x400
29
30
31
32enum hw_mmu_mixed_size_t {
33 HW_MMU_TLBES,
34 HW_MMU_CPUES
35};
36
37
38struct hw_mmu_map_attrs_t {
39 enum hw_endianism_t endianism;
40 enum hw_element_size_t element_size;
41 enum hw_mmu_mixed_size_t mixed_size;
42 bool donotlockmpupage;
43};
44
45extern hw_status hw_mmu_enable(void __iomem *base_address);
46
47extern hw_status hw_mmu_disable(void __iomem *base_address);
48
49extern hw_status hw_mmu_num_locked_set(void __iomem *base_address,
50 u32 num_locked_entries);
51
52extern hw_status hw_mmu_victim_num_set(void __iomem *base_address,
53 u32 victim_entry_num);
54
55
56extern hw_status hw_mmu_event_ack(void __iomem *base_address,
57 u32 irq_mask);
58
59extern hw_status hw_mmu_event_disable(void __iomem *base_address,
60 u32 irq_mask);
61
62extern hw_status hw_mmu_event_enable(void __iomem *base_address,
63 u32 irq_mask);
64
65extern hw_status hw_mmu_event_status(void __iomem *base_address,
66 u32 *irq_mask);
67
68extern hw_status hw_mmu_fault_addr_read(void __iomem *base_address,
69 u32 *addr);
70
71
72extern hw_status hw_mmu_ttb_set(void __iomem *base_address,
73 u32 ttb_phys_addr);
74
75extern hw_status hw_mmu_twl_enable(void __iomem *base_address);
76
77extern hw_status hw_mmu_twl_disable(void __iomem *base_address);
78
79extern hw_status hw_mmu_tlb_add(void __iomem *base_address,
80 u32 physical_addr,
81 u32 virtual_addr,
82 u32 page_sz,
83 u32 entry_num,
84 struct hw_mmu_map_attrs_t *map_attrs,
85 s8 preserved_bit, s8 valid_bit);
86
87
88extern hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
89 u32 physical_addr,
90 u32 virtual_addr,
91 u32 page_sz,
92 struct hw_mmu_map_attrs_t *map_attrs);
93
94extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va,
95 u32 virtual_addr, u32 page_size);
96
97void hw_mmu_tlb_flush_all(void __iomem *base);
98
99static inline u32 hw_mmu_pte_addr_l1(u32 l1_base, u32 va)
100{
101 u32 pte_addr;
102 u32 va31_to20;
103
104 va31_to20 = va >> (20 - 2);
105 va31_to20 &= 0xFFFFFFFCUL;
106 pte_addr = l1_base + va31_to20;
107
108 return pte_addr;
109}
110
111static inline u32 hw_mmu_pte_addr_l2(u32 l2_base, u32 va)
112{
113 u32 pte_addr;
114
115 pte_addr = (l2_base & 0xFFFFFC00) | ((va >> 10) & 0x3FC);
116
117 return pte_addr;
118}
119
120static inline u32 hw_mmu_pte_coarse_l1(u32 pte_val)
121{
122 u32 pte_coarse;
123
124 pte_coarse = pte_val & 0xFFFFFC00;
125
126 return pte_coarse;
127}
128
129static inline u32 hw_mmu_pte_size_l1(u32 pte_val)
130{
131 u32 pte_size = 0;
132
133 if ((pte_val & 0x3) == 0x1) {
134
135 pte_size = HW_MMU_COARSE_PAGE_SIZE;
136 }
137
138 if ((pte_val & 0x3) == 0x2) {
139 if (pte_val & (1 << 18))
140 pte_size = HW_PAGE_SIZE16MB;
141 else
142 pte_size = HW_PAGE_SIZE1MB;
143 }
144
145 return pte_size;
146}
147
148static inline u32 hw_mmu_pte_size_l2(u32 pte_val)
149{
150 u32 pte_size = 0;
151
152 if (pte_val & 0x2)
153 pte_size = HW_PAGE_SIZE4KB;
154 else if (pte_val & 0x1)
155 pte_size = HW_PAGE_SIZE64KB;
156
157 return pte_size;
158}
159
160#endif
161