1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/init.h>
23#include <linux/highmem.h>
24#include <linux/memblock.h>
25
26#include <asm/prom.h>
27#include <asm/mmu.h>
28#include <asm/machdep.h>
29#include <asm/code-patching.h>
30#include <asm/sections.h>
31
32#include <mm/mmu_decl.h>
33
34u8 __initdata early_hash[SZ_256K] __aligned(SZ_256K) = {0};
35
36static struct hash_pte __initdata *Hash = (struct hash_pte *)early_hash;
37static unsigned long __initdata Hash_size, Hash_mask;
38static unsigned int __initdata hash_mb, hash_mb2;
39unsigned long __initdata _SDR1;
40
41struct ppc_bat BATS[8][2];
42
43static struct batrange {
44 unsigned long start;
45 unsigned long limit;
46 phys_addr_t phys;
47} bat_addrs[8];
48
49#ifdef CONFIG_SMP
50unsigned long mmu_hash_lock;
51#endif
52
53
54
55
56phys_addr_t v_block_mapped(unsigned long va)
57{
58 int b;
59 for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b)
60 if (va >= bat_addrs[b].start && va < bat_addrs[b].limit)
61 return bat_addrs[b].phys + (va - bat_addrs[b].start);
62 return 0;
63}
64
65
66
67
68unsigned long p_block_mapped(phys_addr_t pa)
69{
70 int b;
71 for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b)
72 if (pa >= bat_addrs[b].phys
73 && pa < (bat_addrs[b].limit-bat_addrs[b].start)
74 +bat_addrs[b].phys)
75 return bat_addrs[b].start+(pa-bat_addrs[b].phys);
76 return 0;
77}
78
79static int find_free_bat(void)
80{
81 int b;
82 int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
83
84 for (b = 0; b < n; b++) {
85 struct ppc_bat *bat = BATS[b];
86
87 if (!(bat[1].batu & 3))
88 return b;
89 }
90 return -1;
91}
92
93
94
95
96
97
98
99
100
101
102
103static unsigned int block_size(unsigned long base, unsigned long top)
104{
105 unsigned int max_size = SZ_256M;
106 unsigned int base_shift = (ffs(base) - 1) & 31;
107 unsigned int block_shift = (fls(top - base) - 1) & 31;
108
109 return min3(max_size, 1U << base_shift, 1U << block_shift);
110}
111
112
113
114
115
116
117static void setibat(int index, unsigned long virt, phys_addr_t phys,
118 unsigned int size, pgprot_t prot)
119{
120 unsigned int bl = (size >> 17) - 1;
121 int wimgxpp;
122 struct ppc_bat *bat = BATS[index];
123 unsigned long flags = pgprot_val(prot);
124
125 if (!cpu_has_feature(CPU_FTR_NEED_COHERENT))
126 flags &= ~_PAGE_COHERENT;
127
128 wimgxpp = (flags & _PAGE_COHERENT) | (_PAGE_EXEC ? BPP_RX : BPP_XX);
129 bat[0].batu = virt | (bl << 2) | 2;
130 bat[0].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
131 if (flags & _PAGE_USER)
132 bat[0].batu |= 1;
133}
134
135static void clearibat(int index)
136{
137 struct ppc_bat *bat = BATS[index];
138
139 bat[0].batu = 0;
140 bat[0].batl = 0;
141}
142
143static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long top)
144{
145 int idx;
146
147 while ((idx = find_free_bat()) != -1 && base != top) {
148 unsigned int size = block_size(base, top);
149
150 if (size < 128 << 10)
151 break;
152 setbat(idx, PAGE_OFFSET + base, base, size, PAGE_KERNEL_X);
153 base += size;
154 }
155
156 return base;
157}
158
159unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
160{
161 unsigned long done;
162 unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
163
164
165 if (debug_pagealloc_enabled() || __map_without_bats) {
166 pr_debug_once("Read-Write memory mapped without BATs\n");
167 if (base >= border)
168 return base;
169 if (top >= border)
170 top = border;
171 }
172
173 if (!strict_kernel_rwx_enabled() || base >= border || top <= border)
174 return __mmu_mapin_ram(base, top);
175
176 done = __mmu_mapin_ram(base, border);
177 if (done != border)
178 return done;
179
180 return __mmu_mapin_ram(border, top);
181}
182
183static bool is_module_segment(unsigned long addr)
184{
185 if (!IS_ENABLED(CONFIG_MODULES))
186 return false;
187#ifdef MODULES_VADDR
188 if (addr < ALIGN_DOWN(MODULES_VADDR, SZ_256M))
189 return false;
190 if (addr > ALIGN(MODULES_END, SZ_256M) - 1)
191 return false;
192#else
193 if (addr < ALIGN_DOWN(VMALLOC_START, SZ_256M))
194 return false;
195 if (addr > ALIGN(VMALLOC_END, SZ_256M) - 1)
196 return false;
197#endif
198 return true;
199}
200
201void mmu_mark_initmem_nx(void)
202{
203 int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
204 int i;
205 unsigned long base = (unsigned long)_stext - PAGE_OFFSET;
206 unsigned long top = (unsigned long)_etext - PAGE_OFFSET;
207 unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
208 unsigned long size;
209
210 for (i = 0; i < nb - 1 && base < top && top - base > (128 << 10);) {
211 size = block_size(base, top);
212 setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
213 base += size;
214 }
215 if (base < top) {
216 size = block_size(base, top);
217 size = max(size, 128UL << 10);
218 if ((top - base) > size) {
219 size <<= 1;
220 if (strict_kernel_rwx_enabled() && base + size > border)
221 pr_warn("Some RW data is getting mapped X. "
222 "Adjust CONFIG_DATA_SHIFT to avoid that.\n");
223 }
224 setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
225 base += size;
226 }
227 for (; i < nb; i++)
228 clearibat(i);
229
230 update_bats();
231
232 for (i = TASK_SIZE >> 28; i < 16; i++) {
233
234 if (is_module_segment(i << 28))
235 continue;
236
237 mtsr(mfsr(i << 28) | 0x10000000, i << 28);
238 }
239}
240
241void mmu_mark_rodata_ro(void)
242{
243 int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
244 int i;
245
246 for (i = 0; i < nb; i++) {
247 struct ppc_bat *bat = BATS[i];
248
249 if (bat_addrs[i].start < (unsigned long)__init_begin)
250 bat[1].batl = (bat[1].batl & ~BPP_RW) | BPP_RX;
251 }
252
253 update_bats();
254}
255
256
257
258
259
260
261
262void __init setbat(int index, unsigned long virt, phys_addr_t phys,
263 unsigned int size, pgprot_t prot)
264{
265 unsigned int bl;
266 int wimgxpp;
267 struct ppc_bat *bat;
268 unsigned long flags = pgprot_val(prot);
269
270 if (index == -1)
271 index = find_free_bat();
272 if (index == -1) {
273 pr_err("%s: no BAT available for mapping 0x%llx\n", __func__,
274 (unsigned long long)phys);
275 return;
276 }
277 bat = BATS[index];
278
279 if ((flags & _PAGE_NO_CACHE) ||
280 (cpu_has_feature(CPU_FTR_NEED_COHERENT) == 0))
281 flags &= ~_PAGE_COHERENT;
282
283 bl = (size >> 17) - 1;
284
285 wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
286 | _PAGE_COHERENT | _PAGE_GUARDED);
287 wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
288 bat[1].batu = virt | (bl << 2) | 2;
289 bat[1].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
290 if (flags & _PAGE_USER)
291 bat[1].batu |= 1;
292 if (flags & _PAGE_GUARDED) {
293
294 flags &= ~_PAGE_EXEC;
295 }
296 if (flags & _PAGE_EXEC)
297 bat[0] = bat[1];
298 else
299 bat[0].batu = bat[0].batl = 0;
300
301 bat_addrs[index].start = virt;
302 bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1;
303 bat_addrs[index].phys = phys;
304}
305
306
307
308
309static void hash_preload(struct mm_struct *mm, unsigned long ea)
310{
311 pmd_t *pmd;
312
313 if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
314 return;
315 pmd = pmd_off(mm, ea);
316 if (!pmd_none(*pmd))
317 add_hash_page(mm->context.id, ea, pmd_val(*pmd));
318}
319
320
321
322
323
324
325
326
327
328void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
329 pte_t *ptep)
330{
331 if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
332 return;
333
334
335
336
337
338
339 if (!pte_young(*ptep) || address >= TASK_SIZE)
340 return;
341
342
343 if (!current->thread.regs)
344 return;
345
346
347 if (TRAP(current->thread.regs) != 0x300 && TRAP(current->thread.regs) != 0x400)
348 return;
349
350 hash_preload(vma->vm_mm, address);
351}
352
353
354
355
356void __init MMU_init_hw(void)
357{
358 unsigned int n_hpteg, lg_n_hpteg;
359
360 if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
361 return;
362
363 if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);
364
365#define LG_HPTEG_SIZE 6
366#define SDR1_LOW_BITS ((n_hpteg - 1) >> 10)
367#define MIN_N_HPTEG 1024
368
369
370
371
372
373
374 n_hpteg = total_memory / (PAGE_SIZE * 8);
375 if (n_hpteg < MIN_N_HPTEG)
376 n_hpteg = MIN_N_HPTEG;
377 lg_n_hpteg = __ilog2(n_hpteg);
378 if (n_hpteg & (n_hpteg - 1)) {
379 ++lg_n_hpteg;
380 n_hpteg = 1 << lg_n_hpteg;
381 }
382 Hash_size = n_hpteg << LG_HPTEG_SIZE;
383
384
385
386
387 if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
388 Hash = memblock_alloc(Hash_size, Hash_size);
389 if (!Hash)
390 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
391 __func__, Hash_size, Hash_size);
392 _SDR1 = __pa(Hash) | SDR1_LOW_BITS;
393
394 pr_info("Total memory = %lldMB; using %ldkB for hash table\n",
395 (unsigned long long)(total_memory >> 20), Hash_size >> 10);
396
397
398 Hash_mask = n_hpteg - 1;
399 hash_mb2 = hash_mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg;
400 if (lg_n_hpteg > 16)
401 hash_mb2 = 16 - LG_HPTEG_SIZE;
402}
403
404void __init MMU_init_hw_patch(void)
405{
406 unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
407 unsigned int hash = (unsigned int)Hash - PAGE_OFFSET;
408
409 if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
410 return;
411
412 if (ppc_md.progress)
413 ppc_md.progress("hash:patch", 0x345);
414 if (ppc_md.progress)
415 ppc_md.progress("hash:done", 0x205);
416
417
418
419
420
421
422 modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
423 modify_instruction_site(&patch__hash_page_A1, 0x7c0, hash_mb << 6);
424 modify_instruction_site(&patch__hash_page_A2, 0x7c0, hash_mb2 << 6);
425 modify_instruction_site(&patch__hash_page_B, 0xffff, hmask);
426 modify_instruction_site(&patch__hash_page_C, 0xffff, hmask);
427
428
429
430
431 modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16);
432 modify_instruction_site(&patch__flush_hash_A1, 0x7c0, hash_mb << 6);
433 modify_instruction_site(&patch__flush_hash_A2, 0x7c0, hash_mb2 << 6);
434 modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask);
435}
436
437void setup_initial_memory_limit(phys_addr_t first_memblock_base,
438 phys_addr_t first_memblock_size)
439{
440
441
442
443 BUG_ON(first_memblock_base != 0);
444
445 memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_256M));
446}
447
448void __init print_system_hash_info(void)
449{
450 pr_info("Hash_size = 0x%lx\n", Hash_size);
451 if (Hash_mask)
452 pr_info("Hash_mask = 0x%lx\n", Hash_mask);
453}
454
455#ifdef CONFIG_PPC_KUEP
456void __init setup_kuep(bool disabled)
457{
458 pr_info("Activating Kernel Userspace Execution Prevention\n");
459
460 if (disabled)
461 pr_warn("KUEP cannot be disabled yet on 6xx when compiled in\n");
462}
463#endif
464
465#ifdef CONFIG_PPC_KUAP
466void __init setup_kuap(bool disabled)
467{
468 pr_info("Activating Kernel Userspace Access Protection\n");
469
470 if (disabled)
471 pr_warn("KUAP cannot be disabled yet on 6xx when compiled in\n");
472}
473#endif
474
475void __init early_init_mmu(void)
476{
477}
478