1
2
3
4
5
6
7
8#include <common.h>
9#include <asm/system.h>
10#include <asm/cache.h>
11#include <linux/compiler.h>
12
13#if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF))
14
15DECLARE_GLOBAL_DATA_PTR;
16
17__weak void arm_init_before_mmu(void)
18{
19}
20
21__weak void arm_init_domains(void)
22{
23}
24
25static void cp_delay (void)
26{
27 volatile int i;
28
29
30 for (i = 0; i < 100; i++)
31 nop();
32 asm volatile("" : : : "memory");
33}
34
35void set_section_dcache(int section, enum dcache_option option)
36{
37#ifdef CONFIG_ARMV7_LPAE
38 u64 *page_table = (u64 *)gd->arch.tlb_addr;
39
40 u64 value = TTB_SECT_AP | TTB_SECT_AF;
41#else
42 u32 *page_table = (u32 *)gd->arch.tlb_addr;
43 u32 value = TTB_SECT_AP;
44#endif
45
46
47 value |= ((u32)section << MMU_SECTION_SHIFT);
48
49
50 value |= option;
51
52
53 page_table[section] = value;
54}
55
56__weak void mmu_page_table_flush(unsigned long start, unsigned long stop)
57{
58 debug("%s: Warning: not implemented\n", __func__);
59}
60
61void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
62 enum dcache_option option)
63{
64 u32 *page_table = (u32 *)gd->arch.tlb_addr;
65 unsigned long upto, end;
66
67 end = ALIGN(start + size, MMU_SECTION_SIZE) >> MMU_SECTION_SHIFT;
68 start = start >> MMU_SECTION_SHIFT;
69 debug("%s: start=%pa, size=%zu, option=%d\n", __func__, &start, size,
70 option);
71 for (upto = start; upto < end; upto++)
72 set_section_dcache(upto, option);
73 mmu_page_table_flush((u32)&page_table[start], (u32)&page_table[end]);
74}
75
76__weak void dram_bank_mmu_setup(int bank)
77{
78 bd_t *bd = gd->bd;
79 int i;
80
81 debug("%s: bank: %d\n", __func__, bank);
82 for (i = bd->bi_dram[bank].start >> MMU_SECTION_SHIFT;
83 i < (bd->bi_dram[bank].start >> MMU_SECTION_SHIFT) +
84 (bd->bi_dram[bank].size >> MMU_SECTION_SHIFT);
85 i++) {
86#if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH)
87 set_section_dcache(i, DCACHE_WRITETHROUGH);
88#elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC)
89 set_section_dcache(i, DCACHE_WRITEALLOC);
90#else
91 set_section_dcache(i, DCACHE_WRITEBACK);
92#endif
93 }
94}
95
96
97static inline void mmu_setup(void)
98{
99 int i;
100 u32 reg;
101
102 arm_init_before_mmu();
103
104 for (i = 0; i < ((4096ULL * 1024 * 1024) >> MMU_SECTION_SHIFT); i++)
105 set_section_dcache(i, DCACHE_OFF);
106
107 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
108 dram_bank_mmu_setup(i);
109 }
110
111#ifdef CONFIG_ARMV7_LPAE
112
113 for (i = 0; i < 4; i++) {
114 u64 *page_table = (u64 *)(gd->arch.tlb_addr + (4096 * 4));
115 u64 tpt = gd->arch.tlb_addr + (4096 * i);
116 page_table[i] = tpt | TTB_PAGETABLE;
117 }
118
119 reg = TTBCR_EAE;
120#if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH)
121 reg |= TTBCR_ORGN0_WT | TTBCR_IRGN0_WT;
122#elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC)
123 reg |= TTBCR_ORGN0_WBWA | TTBCR_IRGN0_WBWA;
124#else
125 reg |= TTBCR_ORGN0_WBNWA | TTBCR_IRGN0_WBNWA;
126#endif
127
128 if (is_hyp()) {
129
130 asm volatile("mcr p15, 4, %0, c2, c0, 2"
131 : : "r" (reg) : "memory");
132
133 asm volatile("mcrr p15, 4, %0, %1, c2"
134 :
135 : "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0)
136 : "memory");
137
138 asm volatile("mcr p15, 4, %0, c10, c2, 0"
139 : : "r" (MEMORY_ATTRIBUTES) : "memory");
140 } else {
141
142 asm volatile("mcr p15, 0, %0, c2, c0, 2"
143 : : "r" (reg) : "memory");
144
145 asm volatile("mcrr p15, 0, %0, %1, c2"
146 :
147 : "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0)
148 : "memory");
149
150 asm volatile("mcr p15, 0, %0, c10, c2, 0"
151 : : "r" (MEMORY_ATTRIBUTES) : "memory");
152 }
153#elif defined(CONFIG_CPU_V7)
154
155 reg = gd->arch.tlb_addr & TTBR0_BASE_ADDR_MASK;
156#if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH)
157 reg |= TTBR0_RGN_WT | TTBR0_IRGN_WT;
158#elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC)
159 reg |= TTBR0_RGN_WBWA | TTBR0_IRGN_WBWA;
160#else
161 reg |= TTBR0_RGN_WB | TTBR0_IRGN_WB;
162#endif
163 asm volatile("mcr p15, 0, %0, c2, c0, 0"
164 : : "r" (reg) : "memory");
165#else
166
167 asm volatile("mcr p15, 0, %0, c2, c0, 0"
168 : : "r" (gd->arch.tlb_addr) : "memory");
169#endif
170
171 asm volatile("mcr p15, 0, %0, c3, c0, 0"
172 : : "r" (~0));
173
174 arm_init_domains();
175
176
177 reg = get_cr();
178 cp_delay();
179 set_cr(reg | CR_M);
180}
181
182static int mmu_enabled(void)
183{
184 return get_cr() & CR_M;
185}
186
187
188static void cache_enable(uint32_t cache_bit)
189{
190 uint32_t reg;
191
192
193 if ((cache_bit == CR_C) && !mmu_enabled())
194 mmu_setup();
195 reg = get_cr();
196 cp_delay();
197 set_cr(reg | cache_bit);
198}
199
200
201static void cache_disable(uint32_t cache_bit)
202{
203 uint32_t reg;
204
205 reg = get_cr();
206 cp_delay();
207
208 if (cache_bit == CR_C) {
209
210 if ((reg & CR_C) != CR_C)
211 return;
212
213 cache_bit |= CR_M;
214 }
215 reg = get_cr();
216 cp_delay();
217 if (cache_bit == (CR_C | CR_M))
218 flush_dcache_all();
219 set_cr(reg & ~cache_bit);
220}
221#endif
222
223#ifdef CONFIG_SYS_ICACHE_OFF
224void icache_enable (void)
225{
226 return;
227}
228
229void icache_disable (void)
230{
231 return;
232}
233
234int icache_status (void)
235{
236 return 0;
237}
238#else
239void icache_enable(void)
240{
241 cache_enable(CR_I);
242}
243
244void icache_disable(void)
245{
246 cache_disable(CR_I);
247}
248
249int icache_status(void)
250{
251 return (get_cr() & CR_I) != 0;
252}
253#endif
254
255#ifdef CONFIG_SYS_DCACHE_OFF
256void dcache_enable (void)
257{
258 return;
259}
260
261void dcache_disable (void)
262{
263 return;
264}
265
266int dcache_status (void)
267{
268 return 0;
269}
270#else
271void dcache_enable(void)
272{
273 cache_enable(CR_C);
274}
275
276void dcache_disable(void)
277{
278 cache_disable(CR_C);
279}
280
281int dcache_status(void)
282{
283 return (get_cr() & CR_C) != 0;
284}
285#endif
286