1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/cache.h>
21#include <linux/export.h>
22#include <linux/kernel.h>
23#include <linux/errno.h>
24#include <linux/init.h>
25#include <linux/libfdt.h>
26#include <linux/mman.h>
27#include <linux/nodemask.h>
28#include <linux/memblock.h>
29#include <linux/fs.h>
30#include <linux/io.h>
31#include <linux/slab.h>
32#include <linux/stop_machine.h>
33
34#include <asm/barrier.h>
35#include <asm/cputype.h>
36#include <asm/fixmap.h>
37#include <asm/kasan.h>
38#include <asm/kernel-pgtable.h>
39#include <asm/sections.h>
40#include <asm/setup.h>
41#include <asm/sizes.h>
42#include <asm/tlb.h>
43#include <asm/memblock.h>
44#include <asm/mmu_context.h>
45
46u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
47
48u64 kimage_voffset __ro_after_init;
49EXPORT_SYMBOL(kimage_voffset);
50
51
52
53
54
55unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
56EXPORT_SYMBOL(empty_zero_page);
57
58static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
59static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
60static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
61
62pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
63 unsigned long size, pgprot_t vma_prot)
64{
65 if (!pfn_valid(pfn))
66 return pgprot_noncached(vma_prot);
67 else if (file->f_flags & O_SYNC)
68 return pgprot_writecombine(vma_prot);
69 return vma_prot;
70}
71EXPORT_SYMBOL(phys_mem_access_prot);
72
73static phys_addr_t __init early_pgtable_alloc(void)
74{
75 phys_addr_t phys;
76 void *ptr;
77
78 phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
79
80
81
82
83
84
85 ptr = pte_set_fixmap(phys);
86
87 memset(ptr, 0, PAGE_SIZE);
88
89
90
91
92
93 pte_clear_fixmap();
94
95 return phys;
96}
97
98static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
99 unsigned long end, unsigned long pfn,
100 pgprot_t prot,
101 phys_addr_t (*pgtable_alloc)(void))
102{
103 pte_t *pte;
104
105 BUG_ON(pmd_sect(*pmd));
106 if (pmd_none(*pmd)) {
107 phys_addr_t pte_phys;
108 BUG_ON(!pgtable_alloc);
109 pte_phys = pgtable_alloc();
110 pte = pte_set_fixmap(pte_phys);
111 __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
112 pte_clear_fixmap();
113 }
114 BUG_ON(pmd_bad(*pmd));
115
116 pte = pte_set_fixmap_offset(pmd, addr);
117 do {
118 set_pte(pte, pfn_pte(pfn, prot));
119 pfn++;
120 } while (pte++, addr += PAGE_SIZE, addr != end);
121
122 pte_clear_fixmap();
123}
124
125static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
126 phys_addr_t phys, pgprot_t prot,
127 phys_addr_t (*pgtable_alloc)(void),
128 bool allow_block_mappings)
129{
130 pmd_t *pmd;
131 unsigned long next;
132
133
134
135
136 BUG_ON(pud_sect(*pud));
137 if (pud_none(*pud)) {
138 phys_addr_t pmd_phys;
139 BUG_ON(!pgtable_alloc);
140 pmd_phys = pgtable_alloc();
141 pmd = pmd_set_fixmap(pmd_phys);
142 __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
143 pmd_clear_fixmap();
144 }
145 BUG_ON(pud_bad(*pud));
146
147 pmd = pmd_set_fixmap_offset(pud, addr);
148 do {
149 next = pmd_addr_end(addr, end);
150
151 if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
152 allow_block_mappings) {
153 pmd_t old_pmd =*pmd;
154 pmd_set_huge(pmd, phys, prot);
155
156
157
158
159 if (!pmd_none(old_pmd)) {
160 flush_tlb_all();
161 if (pmd_table(old_pmd)) {
162 phys_addr_t table = pmd_page_paddr(old_pmd);
163 if (!WARN_ON_ONCE(slab_is_available()))
164 memblock_free(table, PAGE_SIZE);
165 }
166 }
167 } else {
168 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
169 prot, pgtable_alloc);
170 }
171 phys += next - addr;
172 } while (pmd++, addr = next, addr != end);
173
174 pmd_clear_fixmap();
175}
176
177static inline bool use_1G_block(unsigned long addr, unsigned long next,
178 unsigned long phys)
179{
180 if (PAGE_SHIFT != 12)
181 return false;
182
183 if (((addr | next | phys) & ~PUD_MASK) != 0)
184 return false;
185
186 return true;
187}
188
189static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
190 phys_addr_t phys, pgprot_t prot,
191 phys_addr_t (*pgtable_alloc)(void),
192 bool allow_block_mappings)
193{
194 pud_t *pud;
195 unsigned long next;
196
197 if (pgd_none(*pgd)) {
198 phys_addr_t pud_phys;
199 BUG_ON(!pgtable_alloc);
200 pud_phys = pgtable_alloc();
201 __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
202 }
203 BUG_ON(pgd_bad(*pgd));
204
205 pud = pud_set_fixmap_offset(pgd, addr);
206 do {
207 next = pud_addr_end(addr, end);
208
209
210
211
212 if (use_1G_block(addr, next, phys) && allow_block_mappings) {
213 pud_t old_pud = *pud;
214 pud_set_huge(pud, phys, prot);
215
216
217
218
219
220
221
222
223 if (!pud_none(old_pud)) {
224 flush_tlb_all();
225 if (pud_table(old_pud)) {
226 phys_addr_t table = pud_page_paddr(old_pud);
227 if (!WARN_ON_ONCE(slab_is_available()))
228 memblock_free(table, PAGE_SIZE);
229 }
230 }
231 } else {
232 alloc_init_pmd(pud, addr, next, phys, prot,
233 pgtable_alloc, allow_block_mappings);
234 }
235 phys += next - addr;
236 } while (pud++, addr = next, addr != end);
237
238 pud_clear_fixmap();
239}
240
241static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
242 unsigned long virt, phys_addr_t size,
243 pgprot_t prot,
244 phys_addr_t (*pgtable_alloc)(void),
245 bool allow_block_mappings)
246{
247 unsigned long addr, length, end, next;
248 pgd_t *pgd = pgd_offset_raw(pgdir, virt);
249
250
251
252
253
254 if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
255 return;
256
257 phys &= PAGE_MASK;
258 addr = virt & PAGE_MASK;
259 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
260
261 end = addr + length;
262 do {
263 next = pgd_addr_end(addr, end);
264 alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc,
265 allow_block_mappings);
266 phys += next - addr;
267 } while (pgd++, addr = next, addr != end);
268}
269
270static phys_addr_t pgd_pgtable_alloc(void)
271{
272 void *ptr = (void *)__get_free_page(PGALLOC_GFP);
273 if (!ptr || !pgtable_page_ctor(virt_to_page(ptr)))
274 BUG();
275
276
277 dsb(ishst);
278 return __pa(ptr);
279}
280
281
282
283
284
285
286static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
287 phys_addr_t size, pgprot_t prot)
288{
289 if (virt < VMALLOC_START) {
290 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
291 &phys, virt);
292 return;
293 }
294 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, true);
295}
296
297void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
298 unsigned long virt, phys_addr_t size,
299 pgprot_t prot, bool allow_block_mappings)
300{
301 BUG_ON(mm == &init_mm);
302
303 __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
304 pgd_pgtable_alloc, allow_block_mappings);
305}
306
307static void create_mapping_late(phys_addr_t phys, unsigned long virt,
308 phys_addr_t size, pgprot_t prot)
309{
310 if (virt < VMALLOC_START) {
311 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
312 &phys, virt);
313 return;
314 }
315
316 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
317 NULL, !debug_pagealloc_enabled());
318}
319
320static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
321{
322 unsigned long kernel_start = __pa(_text);
323 unsigned long kernel_end = __pa(__init_begin);
324
325
326
327
328
329
330
331 if (end < kernel_start || start >= kernel_end) {
332 __create_pgd_mapping(pgd, start, __phys_to_virt(start),
333 end - start, PAGE_KERNEL,
334 early_pgtable_alloc,
335 !debug_pagealloc_enabled());
336 return;
337 }
338
339
340
341
342
343 if (start < kernel_start)
344 __create_pgd_mapping(pgd, start,
345 __phys_to_virt(start),
346 kernel_start - start, PAGE_KERNEL,
347 early_pgtable_alloc,
348 !debug_pagealloc_enabled());
349 if (kernel_end < end)
350 __create_pgd_mapping(pgd, kernel_end,
351 __phys_to_virt(kernel_end),
352 end - kernel_end, PAGE_KERNEL,
353 early_pgtable_alloc,
354 !debug_pagealloc_enabled());
355
356
357
358
359
360
361
362 __create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start),
363 kernel_end - kernel_start, PAGE_KERNEL_RO,
364 early_pgtable_alloc, !debug_pagealloc_enabled());
365}
366
367static void __init map_mem(pgd_t *pgd)
368{
369 struct memblock_region *reg;
370
371
372 for_each_memblock(memory, reg) {
373 phys_addr_t start = reg->base;
374 phys_addr_t end = start + reg->size;
375
376 if (start >= end)
377 break;
378 if (memblock_is_nomap(reg))
379 continue;
380
381 __map_memblock(pgd, start, end);
382 }
383}
384
385void mark_rodata_ro(void)
386{
387 unsigned long section_size;
388
389 section_size = (unsigned long)_etext - (unsigned long)_text;
390 create_mapping_late(__pa(_text), (unsigned long)_text,
391 section_size, PAGE_KERNEL_ROX);
392
393
394
395
396 section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
397 create_mapping_late(__pa(__start_rodata), (unsigned long)__start_rodata,
398 section_size, PAGE_KERNEL_RO);
399}
400
401static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
402 pgprot_t prot, struct vm_struct *vma)
403{
404 phys_addr_t pa_start = __pa(va_start);
405 unsigned long size = va_end - va_start;
406
407 BUG_ON(!PAGE_ALIGNED(pa_start));
408 BUG_ON(!PAGE_ALIGNED(size));
409
410 __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
411 early_pgtable_alloc, !debug_pagealloc_enabled());
412
413 vma->addr = va_start;
414 vma->phys_addr = pa_start;
415 vma->size = size;
416 vma->flags = VM_MAP;
417 vma->caller = __builtin_return_address(0);
418
419 vm_area_add_early(vma);
420}
421
422
423
424
425static void __init map_kernel(pgd_t *pgd)
426{
427 static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_init, vmlinux_data;
428
429 map_kernel_segment(pgd, _text, _etext, PAGE_KERNEL_EXEC, &vmlinux_text);
430 map_kernel_segment(pgd, __start_rodata, __init_begin, PAGE_KERNEL, &vmlinux_rodata);
431 map_kernel_segment(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC,
432 &vmlinux_init);
433 map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data);
434
435 if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
436
437
438
439
440
441 set_pgd(pgd_offset_raw(pgd, FIXADDR_START),
442 *pgd_offset_k(FIXADDR_START));
443 } else if (CONFIG_PGTABLE_LEVELS > 3) {
444
445
446
447
448
449
450 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
451 set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START),
452 __pud(__pa(bm_pmd) | PUD_TYPE_TABLE));
453 pud_clear_fixmap();
454 } else {
455 BUG();
456 }
457
458 kasan_copy_shadow(pgd);
459}
460
461
462
463
464
465void __init paging_init(void)
466{
467 phys_addr_t pgd_phys = early_pgtable_alloc();
468 pgd_t *pgd = pgd_set_fixmap(pgd_phys);
469
470 map_kernel(pgd);
471 map_mem(pgd);
472
473
474
475
476
477
478
479
480
481 cpu_replace_ttbr1(__va(pgd_phys));
482 memcpy(swapper_pg_dir, pgd, PAGE_SIZE);
483 cpu_replace_ttbr1(swapper_pg_dir);
484
485 pgd_clear_fixmap();
486 memblock_free(pgd_phys, PAGE_SIZE);
487
488
489
490
491
492 memblock_free(__pa(swapper_pg_dir) + PAGE_SIZE,
493 SWAPPER_DIR_SIZE - PAGE_SIZE);
494}
495
496
497
498
499int kern_addr_valid(unsigned long addr)
500{
501 pgd_t *pgd;
502 pud_t *pud;
503 pmd_t *pmd;
504 pte_t *pte;
505
506 if ((((long)addr) >> VA_BITS) != -1UL)
507 return 0;
508
509 pgd = pgd_offset_k(addr);
510 if (pgd_none(*pgd))
511 return 0;
512
513 pud = pud_offset(pgd, addr);
514 if (pud_none(*pud))
515 return 0;
516
517 if (pud_sect(*pud))
518 return pfn_valid(pud_pfn(*pud));
519
520 pmd = pmd_offset(pud, addr);
521 if (pmd_none(*pmd))
522 return 0;
523
524 if (pmd_sect(*pmd))
525 return pfn_valid(pmd_pfn(*pmd));
526
527 pte = pte_offset_kernel(pmd, addr);
528 if (pte_none(*pte))
529 return 0;
530
531 return pfn_valid(pte_pfn(*pte));
532}
533#ifdef CONFIG_SPARSEMEM_VMEMMAP
534#if !ARM64_SWAPPER_USES_SECTION_MAPS
535int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
536{
537 return vmemmap_populate_basepages(start, end, node);
538}
539#else
540int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
541{
542 unsigned long addr = start;
543 unsigned long next;
544 pgd_t *pgd;
545 pud_t *pud;
546 pmd_t *pmd;
547
548 do {
549 next = pmd_addr_end(addr, end);
550
551 pgd = vmemmap_pgd_populate(addr, node);
552 if (!pgd)
553 return -ENOMEM;
554
555 pud = vmemmap_pud_populate(pgd, addr, node);
556 if (!pud)
557 return -ENOMEM;
558
559 pmd = pmd_offset(pud, addr);
560 if (pmd_none(*pmd)) {
561 void *p = NULL;
562
563 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
564 if (!p)
565 return -ENOMEM;
566
567 set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
568 } else
569 vmemmap_verify((pte_t *)pmd, node, addr, next);
570 } while (addr = next, addr != end);
571
572 return 0;
573}
574#endif
575void vmemmap_free(unsigned long start, unsigned long end)
576{
577}
578#endif
579
580static inline pud_t * fixmap_pud(unsigned long addr)
581{
582 pgd_t *pgd = pgd_offset_k(addr);
583
584 BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
585
586 return pud_offset_kimg(pgd, addr);
587}
588
589static inline pmd_t * fixmap_pmd(unsigned long addr)
590{
591 pud_t *pud = fixmap_pud(addr);
592
593 BUG_ON(pud_none(*pud) || pud_bad(*pud));
594
595 return pmd_offset_kimg(pud, addr);
596}
597
598static inline pte_t * fixmap_pte(unsigned long addr)
599{
600 return &bm_pte[pte_index(addr)];
601}
602
603void __init early_fixmap_init(void)
604{
605 pgd_t *pgd;
606 pud_t *pud;
607 pmd_t *pmd;
608 unsigned long addr = FIXADDR_START;
609
610 pgd = pgd_offset_k(addr);
611 if (CONFIG_PGTABLE_LEVELS > 3 &&
612 !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa(bm_pud))) {
613
614
615
616
617
618 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
619 pud = pud_offset_kimg(pgd, addr);
620 } else {
621 pgd_populate(&init_mm, pgd, bm_pud);
622 pud = fixmap_pud(addr);
623 }
624 pud_populate(&init_mm, pud, bm_pmd);
625 pmd = fixmap_pmd(addr);
626 pmd_populate_kernel(&init_mm, pmd, bm_pte);
627
628
629
630
631
632 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
633 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
634
635 if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
636 || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
637 WARN_ON(1);
638 pr_warn("pmd %p != %p, %p\n",
639 pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
640 fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
641 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
642 fix_to_virt(FIX_BTMAP_BEGIN));
643 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
644 fix_to_virt(FIX_BTMAP_END));
645
646 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
647 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
648 }
649}
650
651void __set_fixmap(enum fixed_addresses idx,
652 phys_addr_t phys, pgprot_t flags)
653{
654 unsigned long addr = __fix_to_virt(idx);
655 pte_t *pte;
656
657 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
658
659 pte = fixmap_pte(addr);
660
661 if (pgprot_val(flags)) {
662 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
663 } else {
664 pte_clear(&init_mm, addr, pte);
665 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
666 }
667}
668
669void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
670{
671 const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
672 int offset;
673 void *dt_virt;
674
675
676
677
678
679
680
681
682 BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
683 if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
684 return NULL;
685
686
687
688
689
690
691
692
693
694
695
696 BUILD_BUG_ON(dt_virt_base % SZ_2M);
697
698 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
699 __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
700
701 offset = dt_phys % SWAPPER_BLOCK_SIZE;
702 dt_virt = (void *)dt_virt_base + offset;
703
704
705 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
706 dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
707
708 if (fdt_magic(dt_virt) != FDT_MAGIC)
709 return NULL;
710
711 *size = fdt_totalsize(dt_virt);
712 if (*size > MAX_FDT_SIZE)
713 return NULL;
714
715 if (offset + *size > SWAPPER_BLOCK_SIZE)
716 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
717 round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot);
718
719 return dt_virt;
720}
721
722void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
723{
724 void *dt_virt;
725 int size;
726
727 dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
728 if (!dt_virt)
729 return NULL;
730
731 memblock_reserve(dt_phys, size);
732 return dt_virt;
733}
734
735int __init arch_ioremap_pud_supported(void)
736{
737
738 return IS_ENABLED(CONFIG_ARM64_4K_PAGES);
739}
740
741int __init arch_ioremap_pmd_supported(void)
742{
743 return 1;
744}
745
746int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
747{
748 BUG_ON(phys & ~PUD_MASK);
749 set_pud(pud, __pud(phys | PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
750 return 1;
751}
752
753int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
754{
755 BUG_ON(phys & ~PMD_MASK);
756 set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
757 return 1;
758}
759
760int pud_clear_huge(pud_t *pud)
761{
762 if (!pud_sect(*pud))
763 return 0;
764 pud_clear(pud);
765 return 1;
766}
767
768int pmd_clear_huge(pmd_t *pmd)
769{
770 if (!pmd_sect(*pmd))
771 return 0;
772 pmd_clear(pmd);
773 return 1;
774}
775