1
2
3
4
5
6
7#include <common.h>
8#include <cpu_func.h>
9#include <log.h>
10#include <asm/system.h>
11#include <asm/cache.h>
12#include <linux/compiler.h>
13#include <asm/armv7_mpu.h>
14
15#if !(CONFIG_IS_ENABLED(SYS_ICACHE_OFF) && CONFIG_IS_ENABLED(SYS_DCACHE_OFF))
16
17DECLARE_GLOBAL_DATA_PTR;
18
19#ifdef CONFIG_SYS_ARM_MMU
20__weak void arm_init_before_mmu(void)
21{
22}
23
24__weak void arm_init_domains(void)
25{
26}
27
28static void set_section_phys(int section, phys_addr_t phys,
29 enum dcache_option option)
30{
31#ifdef CONFIG_ARMV7_LPAE
32 u64 *page_table = (u64 *)gd->arch.tlb_addr;
33
34 u64 value = TTB_SECT_AP | TTB_SECT_AF;
35#else
36 u32 *page_table = (u32 *)gd->arch.tlb_addr;
37 u32 value = TTB_SECT_AP;
38#endif
39
40
41 value |= phys;
42
43
44 value |= option;
45
46
47 page_table[section] = value;
48}
49
50void set_section_dcache(int section, enum dcache_option option)
51{
52 set_section_phys(section, (u32)section << MMU_SECTION_SHIFT, option);
53}
54
55__weak void mmu_page_table_flush(unsigned long start, unsigned long stop)
56{
57 debug("%s: Warning: not implemented\n", __func__);
58}
59
60void mmu_set_region_dcache_behaviour_phys(phys_addr_t start, phys_addr_t phys,
61 size_t size, enum dcache_option option)
62{
63#ifdef CONFIG_ARMV7_LPAE
64 u64 *page_table = (u64 *)gd->arch.tlb_addr;
65#else
66 u32 *page_table = (u32 *)gd->arch.tlb_addr;
67#endif
68 unsigned long startpt, stoppt;
69 unsigned long upto, end;
70
71
72 end = ALIGN((start / 2) + (size / 2), MMU_SECTION_SIZE / 2)
73 >> (MMU_SECTION_SHIFT - 1);
74 start = start >> MMU_SECTION_SHIFT;
75
76#ifdef CONFIG_ARMV7_LPAE
77 debug("%s: start=%pa, size=%zu, option=%llx\n", __func__, &start, size,
78 option);
79#else
80 debug("%s: start=%pa, size=%zu, option=0x%x\n", __func__, &start, size,
81 option);
82#endif
83 for (upto = start; upto < end; upto++, phys += MMU_SECTION_SIZE)
84 set_section_phys(upto, phys, option);
85
86
87
88
89
90
91
92 startpt = (unsigned long)&page_table[start];
93 startpt &= ~(CONFIG_SYS_CACHELINE_SIZE - 1);
94 stoppt = (unsigned long)&page_table[end];
95 stoppt = ALIGN(stoppt, CONFIG_SYS_CACHELINE_SIZE);
96 mmu_page_table_flush(startpt, stoppt);
97}
98
99void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
100 enum dcache_option option)
101{
102 mmu_set_region_dcache_behaviour_phys(start, start, size, option);
103}
104
105__weak void dram_bank_mmu_setup(int bank)
106{
107 struct bd_info *bd = gd->bd;
108 int i;
109
110
111 if ((gd->flags & GD_FLG_RELOC) == 0)
112 return;
113
114 debug("%s: bank: %d\n", __func__, bank);
115 for (i = bd->bi_dram[bank].start >> MMU_SECTION_SHIFT;
116 i < (bd->bi_dram[bank].start >> MMU_SECTION_SHIFT) +
117 (bd->bi_dram[bank].size >> MMU_SECTION_SHIFT);
118 i++)
119 set_section_dcache(i, DCACHE_DEFAULT_OPTION);
120}
121
122
123static inline void mmu_setup(void)
124{
125 int i;
126 u32 reg;
127
128 arm_init_before_mmu();
129
130 for (i = 0; i < ((4096ULL * 1024 * 1024) >> MMU_SECTION_SHIFT); i++)
131 set_section_dcache(i, DCACHE_OFF);
132
133 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
134 dram_bank_mmu_setup(i);
135 }
136
137#if defined(CONFIG_ARMV7_LPAE) && __LINUX_ARM_ARCH__ != 4
138
139 for (i = 0; i < 4; i++) {
140 u64 *page_table = (u64 *)(gd->arch.tlb_addr + (4096 * 4));
141 u64 tpt = gd->arch.tlb_addr + (4096 * i);
142 page_table[i] = tpt | TTB_PAGETABLE;
143 }
144
145 reg = TTBCR_EAE;
146#if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH)
147 reg |= TTBCR_ORGN0_WT | TTBCR_IRGN0_WT;
148#elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC)
149 reg |= TTBCR_ORGN0_WBWA | TTBCR_IRGN0_WBWA;
150#else
151 reg |= TTBCR_ORGN0_WBNWA | TTBCR_IRGN0_WBNWA;
152#endif
153
154 if (is_hyp()) {
155
156 asm volatile("mcr p15, 4, %0, c2, c0, 2"
157 : : "r" (reg) : "memory");
158
159 asm volatile("mcrr p15, 4, %0, %1, c2"
160 :
161 : "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0)
162 : "memory");
163
164 asm volatile("mcr p15, 4, %0, c10, c2, 0"
165 : : "r" (MEMORY_ATTRIBUTES) : "memory");
166 } else {
167
168 asm volatile("mcr p15, 0, %0, c2, c0, 2"
169 : : "r" (reg) : "memory");
170
171 asm volatile("mcrr p15, 0, %0, %1, c2"
172 :
173 : "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0)
174 : "memory");
175
176 asm volatile("mcr p15, 0, %0, c10, c2, 0"
177 : : "r" (MEMORY_ATTRIBUTES) : "memory");
178 }
179#elif defined(CONFIG_CPU_V7A)
180 if (is_hyp()) {
181
182 asm volatile("mcr p15, 4, %0, c2, c0, 2"
183 : : "r" (0) : "memory");
184 } else {
185
186 asm volatile("mcr p15, 0, %0, c2, c0, 2"
187 : : "r" (0) : "memory");
188 }
189
190 reg = gd->arch.tlb_addr & TTBR0_BASE_ADDR_MASK;
191#if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH)
192 reg |= TTBR0_RGN_WT | TTBR0_IRGN_WT;
193#elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC)
194 reg |= TTBR0_RGN_WBWA | TTBR0_IRGN_WBWA;
195#else
196 reg |= TTBR0_RGN_WB | TTBR0_IRGN_WB;
197#endif
198 asm volatile("mcr p15, 0, %0, c2, c0, 0"
199 : : "r" (reg) : "memory");
200#else
201
202 asm volatile("mcr p15, 0, %0, c2, c0, 0"
203 : : "r" (gd->arch.tlb_addr) : "memory");
204#endif
205
206 asm volatile("mcr p15, 0, %0, c3, c0, 0"
207 : : "r" (~0));
208
209 arm_init_domains();
210
211
212 reg = get_cr();
213 set_cr(reg | CR_M);
214}
215
216static int mmu_enabled(void)
217{
218 return get_cr() & CR_M;
219}
220#endif
221
222
223static void cache_enable(uint32_t cache_bit)
224{
225 uint32_t reg;
226
227
228#ifdef CONFIG_SYS_ARM_MMU
229 if ((cache_bit == CR_C) && !mmu_enabled())
230 mmu_setup();
231#elif defined(CONFIG_SYS_ARM_MPU)
232 if ((cache_bit == CR_C) && !mpu_enabled()) {
233 printf("Consider enabling MPU before enabling caches\n");
234 return;
235 }
236#endif
237 reg = get_cr();
238 set_cr(reg | cache_bit);
239}
240
241
242static void cache_disable(uint32_t cache_bit)
243{
244 uint32_t reg;
245
246 reg = get_cr();
247
248 if (cache_bit == CR_C) {
249
250 if ((reg & CR_C) != CR_C)
251 return;
252#ifdef CONFIG_SYS_ARM_MMU
253
254 cache_bit |= CR_M;
255#endif
256 }
257 reg = get_cr();
258
259#ifdef CONFIG_SYS_ARM_MMU
260 if (cache_bit == (CR_C | CR_M))
261#elif defined(CONFIG_SYS_ARM_MPU)
262 if (cache_bit == CR_C)
263#endif
264 flush_dcache_all();
265 set_cr(reg & ~cache_bit);
266}
267#endif
268
269#if CONFIG_IS_ENABLED(SYS_ICACHE_OFF)
270void icache_enable(void)
271{
272 return;
273}
274
275void icache_disable(void)
276{
277 return;
278}
279
280int icache_status(void)
281{
282 return 0;
283}
284#else
285void icache_enable(void)
286{
287 cache_enable(CR_I);
288}
289
290void icache_disable(void)
291{
292 cache_disable(CR_I);
293}
294
295int icache_status(void)
296{
297 return (get_cr() & CR_I) != 0;
298}
299#endif
300
301#if CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
302void dcache_enable(void)
303{
304 return;
305}
306
307void dcache_disable(void)
308{
309 return;
310}
311
312int dcache_status(void)
313{
314 return 0;
315}
316#else
317void dcache_enable(void)
318{
319 cache_enable(CR_C);
320}
321
322void dcache_disable(void)
323{
324 cache_disable(CR_C);
325}
326
327int dcache_status(void)
328{
329 return (get_cr() & CR_C) != 0;
330}
331#endif
332