1
2
3
4
5
6
7#include <common.h>
8#include <cpu_func.h>
9#include <log.h>
10#include <asm/global_data.h>
11#include <asm/system.h>
12#include <asm/cache.h>
13#include <linux/compiler.h>
14#include <asm/armv7_mpu.h>
15
16#if !(CONFIG_IS_ENABLED(SYS_ICACHE_OFF) && CONFIG_IS_ENABLED(SYS_DCACHE_OFF))
17
18DECLARE_GLOBAL_DATA_PTR;
19
20#ifdef CONFIG_SYS_ARM_MMU
21__weak void arm_init_before_mmu(void)
22{
23}
24
25__weak void arm_init_domains(void)
26{
27}
28
29static void set_section_phys(int section, phys_addr_t phys,
30 enum dcache_option option)
31{
32#ifdef CONFIG_ARMV7_LPAE
33 u64 *page_table = (u64 *)gd->arch.tlb_addr;
34
35 u64 value = TTB_SECT_AP | TTB_SECT_AF;
36#else
37 u32 *page_table = (u32 *)gd->arch.tlb_addr;
38 u32 value = TTB_SECT_AP;
39#endif
40
41
42 value |= phys;
43
44
45 value |= option;
46
47
48 page_table[section] = value;
49}
50
51void set_section_dcache(int section, enum dcache_option option)
52{
53 set_section_phys(section, (u32)section << MMU_SECTION_SHIFT, option);
54}
55
56__weak void mmu_page_table_flush(unsigned long start, unsigned long stop)
57{
58 debug("%s: Warning: not implemented\n", __func__);
59}
60
61void mmu_set_region_dcache_behaviour_phys(phys_addr_t start, phys_addr_t phys,
62 size_t size, enum dcache_option option)
63{
64#ifdef CONFIG_ARMV7_LPAE
65 u64 *page_table = (u64 *)gd->arch.tlb_addr;
66#else
67 u32 *page_table = (u32 *)gd->arch.tlb_addr;
68#endif
69 unsigned long startpt, stoppt;
70 unsigned long upto, end;
71
72
73 end = ALIGN((start / 2) + (size / 2), MMU_SECTION_SIZE / 2)
74 >> (MMU_SECTION_SHIFT - 1);
75 start = start >> MMU_SECTION_SHIFT;
76
77#ifdef CONFIG_ARMV7_LPAE
78 debug("%s: start=%pa, size=%zu, option=%llx\n", __func__, &start, size,
79 option);
80#else
81 debug("%s: start=%pa, size=%zu, option=0x%x\n", __func__, &start, size,
82 option);
83#endif
84 for (upto = start; upto < end; upto++, phys += MMU_SECTION_SIZE)
85 set_section_phys(upto, phys, option);
86
87
88
89
90
91
92
93 startpt = (unsigned long)&page_table[start];
94 startpt &= ~(CONFIG_SYS_CACHELINE_SIZE - 1);
95 stoppt = (unsigned long)&page_table[end];
96 stoppt = ALIGN(stoppt, CONFIG_SYS_CACHELINE_SIZE);
97 mmu_page_table_flush(startpt, stoppt);
98}
99
100void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
101 enum dcache_option option)
102{
103 mmu_set_region_dcache_behaviour_phys(start, start, size, option);
104}
105
106__weak void dram_bank_mmu_setup(int bank)
107{
108 struct bd_info *bd = gd->bd;
109 int i;
110
111
112 if ((gd->flags & GD_FLG_RELOC) == 0)
113 return;
114
115 debug("%s: bank: %d\n", __func__, bank);
116 for (i = bd->bi_dram[bank].start >> MMU_SECTION_SHIFT;
117 i < (bd->bi_dram[bank].start >> MMU_SECTION_SHIFT) +
118 (bd->bi_dram[bank].size >> MMU_SECTION_SHIFT);
119 i++)
120 set_section_dcache(i, DCACHE_DEFAULT_OPTION);
121}
122
123
124static inline void mmu_setup(void)
125{
126 int i;
127 u32 reg;
128
129 arm_init_before_mmu();
130
131 for (i = 0; i < ((4096ULL * 1024 * 1024) >> MMU_SECTION_SHIFT); i++)
132 set_section_dcache(i, DCACHE_OFF);
133
134 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
135 dram_bank_mmu_setup(i);
136 }
137
138#if defined(CONFIG_ARMV7_LPAE) && __LINUX_ARM_ARCH__ != 4
139
140 for (i = 0; i < 4; i++) {
141 u64 *page_table = (u64 *)(gd->arch.tlb_addr + (4096 * 4));
142 u64 tpt = gd->arch.tlb_addr + (4096 * i);
143 page_table[i] = tpt | TTB_PAGETABLE;
144 }
145
146 reg = TTBCR_EAE;
147#if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH)
148 reg |= TTBCR_ORGN0_WT | TTBCR_IRGN0_WT;
149#elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC)
150 reg |= TTBCR_ORGN0_WBWA | TTBCR_IRGN0_WBWA;
151#else
152 reg |= TTBCR_ORGN0_WBNWA | TTBCR_IRGN0_WBNWA;
153#endif
154
155 if (is_hyp()) {
156
157 asm volatile("mcr p15, 4, %0, c2, c0, 2"
158 : : "r" (reg) : "memory");
159
160 asm volatile("mcrr p15, 4, %0, %1, c2"
161 :
162 : "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0)
163 : "memory");
164
165 asm volatile("mcr p15, 4, %0, c10, c2, 0"
166 : : "r" (MEMORY_ATTRIBUTES) : "memory");
167 } else {
168
169 asm volatile("mcr p15, 0, %0, c2, c0, 2"
170 : : "r" (reg) : "memory");
171
172 asm volatile("mcrr p15, 0, %0, %1, c2"
173 :
174 : "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0)
175 : "memory");
176
177 asm volatile("mcr p15, 0, %0, c10, c2, 0"
178 : : "r" (MEMORY_ATTRIBUTES) : "memory");
179 }
180#elif defined(CONFIG_CPU_V7A)
181 if (is_hyp()) {
182
183 asm volatile("mcr p15, 4, %0, c2, c0, 2"
184 : : "r" (0) : "memory");
185 } else {
186
187 asm volatile("mcr p15, 0, %0, c2, c0, 2"
188 : : "r" (0) : "memory");
189 }
190
191 reg = gd->arch.tlb_addr & TTBR0_BASE_ADDR_MASK;
192#if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH)
193 reg |= TTBR0_RGN_WT | TTBR0_IRGN_WT;
194#elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC)
195 reg |= TTBR0_RGN_WBWA | TTBR0_IRGN_WBWA;
196#else
197 reg |= TTBR0_RGN_WB | TTBR0_IRGN_WB;
198#endif
199 asm volatile("mcr p15, 0, %0, c2, c0, 0"
200 : : "r" (reg) : "memory");
201#else
202
203 asm volatile("mcr p15, 0, %0, c2, c0, 0"
204 : : "r" (gd->arch.tlb_addr) : "memory");
205#endif
206
207 asm volatile("mcr p15, 0, %0, c3, c0, 0"
208 : : "r" (~0));
209
210 arm_init_domains();
211
212
213 reg = get_cr();
214 set_cr(reg | CR_M);
215}
216
217static int mmu_enabled(void)
218{
219 return get_cr() & CR_M;
220}
221#endif
222
223
224static void cache_enable(uint32_t cache_bit)
225{
226 uint32_t reg;
227
228
229#ifdef CONFIG_SYS_ARM_MMU
230 if ((cache_bit == CR_C) && !mmu_enabled())
231 mmu_setup();
232#elif defined(CONFIG_SYS_ARM_MPU)
233 if ((cache_bit == CR_C) && !mpu_enabled()) {
234 printf("Consider enabling MPU before enabling caches\n");
235 return;
236 }
237#endif
238 reg = get_cr();
239 set_cr(reg | cache_bit);
240}
241
242
243static void cache_disable(uint32_t cache_bit)
244{
245 uint32_t reg;
246
247 reg = get_cr();
248
249 if (cache_bit == CR_C) {
250
251 if ((reg & CR_C) != CR_C)
252 return;
253#ifdef CONFIG_SYS_ARM_MMU
254
255 cache_bit |= CR_M;
256#endif
257 }
258 reg = get_cr();
259
260#ifdef CONFIG_SYS_ARM_MMU
261 if (cache_bit == (CR_C | CR_M))
262#elif defined(CONFIG_SYS_ARM_MPU)
263 if (cache_bit == CR_C)
264#endif
265 flush_dcache_all();
266 set_cr(reg & ~cache_bit);
267}
268#endif
269
270#if CONFIG_IS_ENABLED(SYS_ICACHE_OFF)
271void icache_enable(void)
272{
273 return;
274}
275
276void icache_disable(void)
277{
278 return;
279}
280
281int icache_status(void)
282{
283 return 0;
284}
285#else
286void icache_enable(void)
287{
288 cache_enable(CR_I);
289}
290
291void icache_disable(void)
292{
293 cache_disable(CR_I);
294}
295
296int icache_status(void)
297{
298 return (get_cr() & CR_I) != 0;
299}
300#endif
301
302#if CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
303void dcache_enable(void)
304{
305 return;
306}
307
308void dcache_disable(void)
309{
310 return;
311}
312
313int dcache_status(void)
314{
315 return 0;
316}
317#else
318void dcache_enable(void)
319{
320 cache_enable(CR_C);
321}
322
323void dcache_disable(void)
324{
325 cache_disable(CR_C);
326}
327
328int dcache_status(void)
329{
330 return (get_cr() & CR_C) != 0;
331}
332#endif
333