1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26#include <common.h>
27#include <asm/processor.h>
28#include <asm/mmu.h>
29#ifdef CONFIG_ADDR_MAP
30#include <addr_map.h>
31#endif
32
33DECLARE_GLOBAL_DATA_PTR;
34
35void invalidate_tlb(u8 tlb)
36{
37 if (tlb == 0)
38 mtspr(MMUCSR0, 0x4);
39 if (tlb == 1)
40 mtspr(MMUCSR0, 0x2);
41}
42
43void init_tlbs(void)
44{
45 int i;
46
47 for (i = 0; i < num_tlb_entries; i++) {
48 write_tlb(tlb_table[i].mas0,
49 tlb_table[i].mas1,
50 tlb_table[i].mas2,
51 tlb_table[i].mas3,
52 tlb_table[i].mas7);
53 }
54
55 return ;
56}
57
58#ifndef CONFIG_NAND_SPL
59void set_tlb(u8 tlb, u32 epn, u64 rpn,
60 u8 perms, u8 wimge,
61 u8 ts, u8 esel, u8 tsize, u8 iprot)
62{
63 u32 _mas0, _mas1, _mas2, _mas3, _mas7;
64
65 _mas0 = FSL_BOOKE_MAS0(tlb, esel, 0);
66 _mas1 = FSL_BOOKE_MAS1(1, iprot, 0, ts, tsize);
67 _mas2 = FSL_BOOKE_MAS2(epn, wimge);
68 _mas3 = FSL_BOOKE_MAS3(rpn, 0, perms);
69 _mas7 = FSL_BOOKE_MAS7(rpn);
70
71 write_tlb(_mas0, _mas1, _mas2, _mas3, _mas7);
72
73#ifdef CONFIG_ADDR_MAP
74 if ((tlb == 1) && (gd->flags & GD_FLG_RELOC))
75 addrmap_set_entry(epn, rpn, (1UL << ((tsize * 2) + 10)), esel);
76#endif
77}
78
79void disable_tlb(u8 esel)
80{
81 u32 _mas0, _mas1, _mas2, _mas3, _mas7;
82
83 _mas0 = FSL_BOOKE_MAS0(1, esel, 0);
84 _mas1 = 0;
85 _mas2 = 0;
86 _mas3 = 0;
87 _mas7 = 0;
88
89 mtspr(MAS0, _mas0);
90 mtspr(MAS1, _mas1);
91 mtspr(MAS2, _mas2);
92 mtspr(MAS3, _mas3);
93#ifdef CONFIG_ENABLE_36BIT_PHYS
94 mtspr(MAS7, _mas7);
95#endif
96 asm volatile("isync;msync;tlbwe;isync");
97
98#ifdef CONFIG_ADDR_MAP
99 if (gd->flags & GD_FLG_RELOC)
100 addrmap_set_entry(0, 0, 0, esel);
101#endif
102}
103
104static void tlbsx (const volatile unsigned *addr)
105{
106 __asm__ __volatile__ ("tlbsx 0,%0" : : "r" (addr), "m" (*addr));
107}
108
109
110int find_tlb_idx(void *addr, u8 tlbsel)
111{
112 u32 _mas0, _mas1;
113
114
115 mtspr(MAS6, 0);
116
117 tlbsx(addr);
118
119 _mas0 = mfspr(MAS0);
120 _mas1 = mfspr(MAS1);
121
122
123 if ((MAS1_VALID & _mas1) &&
124 (MAS0_TLBSEL(tlbsel) == (_mas0 & MAS0_TLBSEL_MSK))) {
125 return ((_mas0 & MAS0_ESEL_MSK) >> 16);
126 }
127
128 return -1;
129}
130
131#ifdef CONFIG_ADDR_MAP
132void init_addr_map(void)
133{
134 int i;
135 unsigned int num_cam = mfspr(SPRN_TLB1CFG) & 0xfff;
136
137
138 for (i = 0; i < num_cam; i++) {
139 unsigned long epn;
140 u32 tsize, _mas1;
141 phys_addr_t rpn;
142
143 mtspr(MAS0, FSL_BOOKE_MAS0(1, i, 0));
144
145 asm volatile("tlbre;isync");
146 _mas1 = mfspr(MAS1);
147
148
149 if (!(_mas1 & MAS1_VALID))
150 continue;
151
152 tsize = (_mas1 >> 8) & 0xf;
153 epn = mfspr(MAS2) & MAS2_EPN;
154 rpn = mfspr(MAS3) & MAS3_RPN;
155#ifdef CONFIG_ENABLE_36BIT_PHYS
156 rpn |= ((phys_addr_t)mfspr(MAS7)) << 32;
157#endif
158
159 addrmap_set_entry(epn, rpn, (1UL << ((tsize * 2) + 10)), i);
160 }
161
162 return ;
163}
164#endif
165
166#ifndef CONFIG_SYS_DDR_TLB_START
167#define CONFIG_SYS_DDR_TLB_START 8
168#endif
169
170unsigned int setup_ddr_tlbs(unsigned int memsize_in_meg)
171{
172 unsigned int tlb_size;
173 unsigned int ram_tlb_index = CONFIG_SYS_DDR_TLB_START;
174 unsigned int ram_tlb_address = (unsigned int)CONFIG_SYS_DDR_SDRAM_BASE;
175 unsigned int max_cam = (mfspr(SPRN_TLB1CFG) >> 16) & 0xf;
176 u64 size, memsize = (u64)memsize_in_meg << 20;
177
178 size = min(memsize, CONFIG_MAX_MEM_MAPPED);
179
180
181 max_cam = max_cam * 2 + 10;
182
183 for (; size && ram_tlb_index < 16; ram_tlb_index++) {
184 u32 camsize = __ilog2_u64(size) & ~1U;
185 u32 align = __ilog2(ram_tlb_address) & ~1U;
186
187 if (align == -2) align = max_cam;
188 if (camsize > align)
189 camsize = align;
190
191 if (camsize > max_cam)
192 camsize = max_cam;
193
194 tlb_size = (camsize - 10) / 2;
195
196 set_tlb(1, ram_tlb_address, ram_tlb_address,
197 MAS3_SX|MAS3_SW|MAS3_SR, 0,
198 0, ram_tlb_index, tlb_size, 1);
199
200 size -= 1ULL << camsize;
201 memsize -= 1ULL << camsize;
202 ram_tlb_address += 1UL << camsize;
203 }
204
205 if (memsize)
206 print_size(memsize, " left unmapped\n");
207
208
209
210
211 return memsize_in_meg;
212}
213#endif
214