1
2
3
4
5
6
7
8
9#include <linux/cache.h>
10#include <linux/export.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/init.h>
14#include <linux/ioport.h>
15#include <linux/kexec.h>
16#include <linux/libfdt.h>
17#include <linux/mman.h>
18#include <linux/nodemask.h>
19#include <linux/memblock.h>
20#include <linux/fs.h>
21#include <linux/io.h>
22#include <linux/mm.h>
23#include <linux/vmalloc.h>
24
25#include <asm/barrier.h>
26#include <asm/cputype.h>
27#include <asm/fixmap.h>
28#include <asm/kasan.h>
29#include <asm/kernel-pgtable.h>
30#include <asm/sections.h>
31#include <asm/setup.h>
32#include <linux/sizes.h>
33#include <asm/tlb.h>
34#include <asm/mmu_context.h>
35#include <asm/ptdump.h>
36#include <asm/tlbflush.h>
37
38#define NO_BLOCK_MAPPINGS BIT(0)
39#define NO_CONT_MAPPINGS BIT(1)
40
41u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
42u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
43
44u64 __section(".mmuoff.data.write") vabits_actual;
45EXPORT_SYMBOL(vabits_actual);
46
47u64 kimage_voffset __ro_after_init;
48EXPORT_SYMBOL(kimage_voffset);
49
50
51
52
53
54unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
55EXPORT_SYMBOL(empty_zero_page);
56
57static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
58static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
59static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
60
61static DEFINE_SPINLOCK(swapper_pgdir_lock);
62
63void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
64{
65 pgd_t *fixmap_pgdp;
66
67 spin_lock(&swapper_pgdir_lock);
68 fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp));
69 WRITE_ONCE(*fixmap_pgdp, pgd);
70
71
72
73
74
75 pgd_clear_fixmap();
76 spin_unlock(&swapper_pgdir_lock);
77}
78
79pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
80 unsigned long size, pgprot_t vma_prot)
81{
82 if (!pfn_valid(pfn))
83 return pgprot_noncached(vma_prot);
84 else if (file->f_flags & O_SYNC)
85 return pgprot_writecombine(vma_prot);
86 return vma_prot;
87}
88EXPORT_SYMBOL(phys_mem_access_prot);
89
90static phys_addr_t __init early_pgtable_alloc(int shift)
91{
92 phys_addr_t phys;
93 void *ptr;
94
95 phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
96 if (!phys)
97 panic("Failed to allocate page table page\n");
98
99
100
101
102
103
104 ptr = pte_set_fixmap(phys);
105
106 memset(ptr, 0, PAGE_SIZE);
107
108
109
110
111
112 pte_clear_fixmap();
113
114 return phys;
115}
116
117static bool pgattr_change_is_safe(u64 old, u64 new)
118{
119
120
121
122
123 static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
124
125
126 if (old == 0 || new == 0)
127 return true;
128
129
130 if ((old | new) & PTE_CONT)
131 return false;
132
133
134 if (old & ~new & PTE_NG)
135 return false;
136
137 return ((old ^ new) & ~mask) == 0;
138}
139
140static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
141 phys_addr_t phys, pgprot_t prot)
142{
143 pte_t *ptep;
144
145 ptep = pte_set_fixmap_offset(pmdp, addr);
146 do {
147 pte_t old_pte = READ_ONCE(*ptep);
148
149 set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot));
150
151
152
153
154
155 BUG_ON(!pgattr_change_is_safe(pte_val(old_pte),
156 READ_ONCE(pte_val(*ptep))));
157
158 phys += PAGE_SIZE;
159 } while (ptep++, addr += PAGE_SIZE, addr != end);
160
161 pte_clear_fixmap();
162}
163
164static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
165 unsigned long end, phys_addr_t phys,
166 pgprot_t prot,
167 phys_addr_t (*pgtable_alloc)(int),
168 int flags)
169{
170 unsigned long next;
171 pmd_t pmd = READ_ONCE(*pmdp);
172
173 BUG_ON(pmd_sect(pmd));
174 if (pmd_none(pmd)) {
175 phys_addr_t pte_phys;
176 BUG_ON(!pgtable_alloc);
177 pte_phys = pgtable_alloc(PAGE_SHIFT);
178 __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
179 pmd = READ_ONCE(*pmdp);
180 }
181 BUG_ON(pmd_bad(pmd));
182
183 do {
184 pgprot_t __prot = prot;
185
186 next = pte_cont_addr_end(addr, end);
187
188
189 if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) &&
190 (flags & NO_CONT_MAPPINGS) == 0)
191 __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
192
193 init_pte(pmdp, addr, next, phys, __prot);
194
195 phys += next - addr;
196 } while (addr = next, addr != end);
197}
198
199static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
200 phys_addr_t phys, pgprot_t prot,
201 phys_addr_t (*pgtable_alloc)(int), int flags)
202{
203 unsigned long next;
204 pmd_t *pmdp;
205
206 pmdp = pmd_set_fixmap_offset(pudp, addr);
207 do {
208 pmd_t old_pmd = READ_ONCE(*pmdp);
209
210 next = pmd_addr_end(addr, end);
211
212
213 if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
214 (flags & NO_BLOCK_MAPPINGS) == 0) {
215 pmd_set_huge(pmdp, phys, prot);
216
217
218
219
220
221 BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
222 READ_ONCE(pmd_val(*pmdp))));
223 } else {
224 alloc_init_cont_pte(pmdp, addr, next, phys, prot,
225 pgtable_alloc, flags);
226
227 BUG_ON(pmd_val(old_pmd) != 0 &&
228 pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
229 }
230 phys += next - addr;
231 } while (pmdp++, addr = next, addr != end);
232
233 pmd_clear_fixmap();
234}
235
236static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
237 unsigned long end, phys_addr_t phys,
238 pgprot_t prot,
239 phys_addr_t (*pgtable_alloc)(int), int flags)
240{
241 unsigned long next;
242 pud_t pud = READ_ONCE(*pudp);
243
244
245
246
247 BUG_ON(pud_sect(pud));
248 if (pud_none(pud)) {
249 phys_addr_t pmd_phys;
250 BUG_ON(!pgtable_alloc);
251 pmd_phys = pgtable_alloc(PMD_SHIFT);
252 __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
253 pud = READ_ONCE(*pudp);
254 }
255 BUG_ON(pud_bad(pud));
256
257 do {
258 pgprot_t __prot = prot;
259
260 next = pmd_cont_addr_end(addr, end);
261
262
263 if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) &&
264 (flags & NO_CONT_MAPPINGS) == 0)
265 __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
266
267 init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags);
268
269 phys += next - addr;
270 } while (addr = next, addr != end);
271}
272
273static inline bool use_1G_block(unsigned long addr, unsigned long next,
274 unsigned long phys)
275{
276 if (PAGE_SHIFT != 12)
277 return false;
278
279 if (((addr | next | phys) & ~PUD_MASK) != 0)
280 return false;
281
282 return true;
283}
284
285static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
286 phys_addr_t phys, pgprot_t prot,
287 phys_addr_t (*pgtable_alloc)(int),
288 int flags)
289{
290 unsigned long next;
291 pud_t *pudp;
292 pgd_t pgd = READ_ONCE(*pgdp);
293
294 if (pgd_none(pgd)) {
295 phys_addr_t pud_phys;
296 BUG_ON(!pgtable_alloc);
297 pud_phys = pgtable_alloc(PUD_SHIFT);
298 __pgd_populate(pgdp, pud_phys, PUD_TYPE_TABLE);
299 pgd = READ_ONCE(*pgdp);
300 }
301 BUG_ON(pgd_bad(pgd));
302
303 pudp = pud_set_fixmap_offset(pgdp, addr);
304 do {
305 pud_t old_pud = READ_ONCE(*pudp);
306
307 next = pud_addr_end(addr, end);
308
309
310
311
312 if (use_1G_block(addr, next, phys) &&
313 (flags & NO_BLOCK_MAPPINGS) == 0) {
314 pud_set_huge(pudp, phys, prot);
315
316
317
318
319
320 BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
321 READ_ONCE(pud_val(*pudp))));
322 } else {
323 alloc_init_cont_pmd(pudp, addr, next, phys, prot,
324 pgtable_alloc, flags);
325
326 BUG_ON(pud_val(old_pud) != 0 &&
327 pud_val(old_pud) != READ_ONCE(pud_val(*pudp)));
328 }
329 phys += next - addr;
330 } while (pudp++, addr = next, addr != end);
331
332 pud_clear_fixmap();
333}
334
335static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
336 unsigned long virt, phys_addr_t size,
337 pgprot_t prot,
338 phys_addr_t (*pgtable_alloc)(int),
339 int flags)
340{
341 unsigned long addr, end, next;
342 pgd_t *pgdp = pgd_offset_raw(pgdir, virt);
343
344
345
346
347
348 if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
349 return;
350
351 phys &= PAGE_MASK;
352 addr = virt & PAGE_MASK;
353 end = PAGE_ALIGN(virt + size);
354
355 do {
356 next = pgd_addr_end(addr, end);
357 alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc,
358 flags);
359 phys += next - addr;
360 } while (pgdp++, addr = next, addr != end);
361}
362
363static phys_addr_t __pgd_pgtable_alloc(int shift)
364{
365 void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL);
366 BUG_ON(!ptr);
367
368
369 dsb(ishst);
370 return __pa(ptr);
371}
372
373static phys_addr_t pgd_pgtable_alloc(int shift)
374{
375 phys_addr_t pa = __pgd_pgtable_alloc(shift);
376
377
378
379
380
381
382
383
384
385 if (shift == PAGE_SHIFT)
386 BUG_ON(!pgtable_pte_page_ctor(phys_to_page(pa)));
387 else if (shift == PMD_SHIFT)
388 BUG_ON(!pgtable_pmd_page_ctor(phys_to_page(pa)));
389
390 return pa;
391}
392
393
394
395
396
397
398static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
399 phys_addr_t size, pgprot_t prot)
400{
401 if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
402 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
403 &phys, virt);
404 return;
405 }
406 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
407 NO_CONT_MAPPINGS);
408}
409
410void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
411 unsigned long virt, phys_addr_t size,
412 pgprot_t prot, bool page_mappings_only)
413{
414 int flags = 0;
415
416 BUG_ON(mm == &init_mm);
417
418 if (page_mappings_only)
419 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
420
421 __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
422 pgd_pgtable_alloc, flags);
423}
424
425static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
426 phys_addr_t size, pgprot_t prot)
427{
428 if ((virt >= PAGE_END) && (virt < VMALLOC_START)) {
429 pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n",
430 &phys, virt);
431 return;
432 }
433
434 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
435 NO_CONT_MAPPINGS);
436
437
438 flush_tlb_kernel_range(virt, virt + size);
439}
440
441static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start,
442 phys_addr_t end, pgprot_t prot, int flags)
443{
444 __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start,
445 prot, early_pgtable_alloc, flags);
446}
447
448void __init mark_linear_text_alias_ro(void)
449{
450
451
452
453 update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text),
454 (unsigned long)__init_begin - (unsigned long)_text,
455 PAGE_KERNEL_RO);
456}
457
458static void __init map_mem(pgd_t *pgdp)
459{
460 phys_addr_t kernel_start = __pa_symbol(_text);
461 phys_addr_t kernel_end = __pa_symbol(__init_begin);
462 struct memblock_region *reg;
463 int flags = 0;
464
465 if (rodata_full || debug_pagealloc_enabled())
466 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
467
468
469
470
471
472
473
474 memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
475#ifdef CONFIG_KEXEC_CORE
476 if (crashk_res.end)
477 memblock_mark_nomap(crashk_res.start,
478 resource_size(&crashk_res));
479#endif
480
481
482 for_each_memblock(memory, reg) {
483 phys_addr_t start = reg->base;
484 phys_addr_t end = start + reg->size;
485
486 if (start >= end)
487 break;
488 if (memblock_is_nomap(reg))
489 continue;
490
491 __map_memblock(pgdp, start, end, PAGE_KERNEL, flags);
492 }
493
494
495
496
497
498
499
500
501
502
503
504 __map_memblock(pgdp, kernel_start, kernel_end,
505 PAGE_KERNEL, NO_CONT_MAPPINGS);
506 memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
507
508#ifdef CONFIG_KEXEC_CORE
509
510
511
512
513
514 if (crashk_res.end) {
515 __map_memblock(pgdp, crashk_res.start, crashk_res.end + 1,
516 PAGE_KERNEL,
517 NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
518 memblock_clear_nomap(crashk_res.start,
519 resource_size(&crashk_res));
520 }
521#endif
522}
523
524void mark_rodata_ro(void)
525{
526 unsigned long section_size;
527
528
529
530
531
532 section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
533 update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata,
534 section_size, PAGE_KERNEL_RO);
535
536 debug_checkwx();
537}
538
539static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
540 pgprot_t prot, struct vm_struct *vma,
541 int flags, unsigned long vm_flags)
542{
543 phys_addr_t pa_start = __pa_symbol(va_start);
544 unsigned long size = va_end - va_start;
545
546 BUG_ON(!PAGE_ALIGNED(pa_start));
547 BUG_ON(!PAGE_ALIGNED(size));
548
549 __create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot,
550 early_pgtable_alloc, flags);
551
552 if (!(vm_flags & VM_NO_GUARD))
553 size += PAGE_SIZE;
554
555 vma->addr = va_start;
556 vma->phys_addr = pa_start;
557 vma->size = size;
558 vma->flags = VM_MAP | vm_flags;
559 vma->caller = __builtin_return_address(0);
560
561 vm_area_add_early(vma);
562}
563
564static int __init parse_rodata(char *arg)
565{
566 int ret = strtobool(arg, &rodata_enabled);
567 if (!ret) {
568 rodata_full = false;
569 return 0;
570 }
571
572
573 if (strcmp(arg, "full"))
574 return -EINVAL;
575
576 rodata_enabled = true;
577 rodata_full = true;
578 return 0;
579}
580early_param("rodata", parse_rodata);
581
582#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
583static int __init map_entry_trampoline(void)
584{
585 pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
586 phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
587
588
589 pgprot_val(prot) &= ~PTE_NG;
590
591
592 memset(tramp_pg_dir, 0, PGD_SIZE);
593 __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
594 prot, __pgd_pgtable_alloc, 0);
595
596
597 __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
598 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
599 extern char __entry_tramp_data_start[];
600
601 __set_fixmap(FIX_ENTRY_TRAMP_DATA,
602 __pa_symbol(__entry_tramp_data_start),
603 PAGE_KERNEL_RO);
604 }
605
606 return 0;
607}
608core_initcall(map_entry_trampoline);
609#endif
610
611
612
613
614static void __init map_kernel(pgd_t *pgdp)
615{
616 static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext,
617 vmlinux_initdata, vmlinux_data;
618
619
620
621
622
623
624 pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
625
626
627
628
629
630 map_kernel_segment(pgdp, _text, _etext, text_prot, &vmlinux_text, 0,
631 VM_NO_GUARD);
632 map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL,
633 &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
634 map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot,
635 &vmlinux_inittext, 0, VM_NO_GUARD);
636 map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL,
637 &vmlinux_initdata, 0, VM_NO_GUARD);
638 map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
639
640 if (!READ_ONCE(pgd_val(*pgd_offset_raw(pgdp, FIXADDR_START)))) {
641
642
643
644
645
646 set_pgd(pgd_offset_raw(pgdp, FIXADDR_START),
647 READ_ONCE(*pgd_offset_k(FIXADDR_START)));
648 } else if (CONFIG_PGTABLE_LEVELS > 3) {
649 pgd_t *bm_pgdp;
650 pud_t *bm_pudp;
651
652
653
654
655
656
657 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
658 bm_pgdp = pgd_offset_raw(pgdp, FIXADDR_START);
659 bm_pudp = pud_set_fixmap_offset(bm_pgdp, FIXADDR_START);
660 pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd));
661 pud_clear_fixmap();
662 } else {
663 BUG();
664 }
665
666 kasan_copy_shadow(pgdp);
667}
668
669void __init paging_init(void)
670{
671 pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir));
672
673 map_kernel(pgdp);
674 map_mem(pgdp);
675
676 pgd_clear_fixmap();
677
678 cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
679 init_mm.pgd = swapper_pg_dir;
680
681 memblock_free(__pa_symbol(init_pg_dir),
682 __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
683
684 memblock_allow_resize();
685}
686
687
688
689
690int kern_addr_valid(unsigned long addr)
691{
692 pgd_t *pgdp;
693 pud_t *pudp, pud;
694 pmd_t *pmdp, pmd;
695 pte_t *ptep, pte;
696
697 if ((((long)addr) >> VA_BITS) != -1UL)
698 return 0;
699
700 pgdp = pgd_offset_k(addr);
701 if (pgd_none(READ_ONCE(*pgdp)))
702 return 0;
703
704 pudp = pud_offset(pgdp, addr);
705 pud = READ_ONCE(*pudp);
706 if (pud_none(pud))
707 return 0;
708
709 if (pud_sect(pud))
710 return pfn_valid(pud_pfn(pud));
711
712 pmdp = pmd_offset(pudp, addr);
713 pmd = READ_ONCE(*pmdp);
714 if (pmd_none(pmd))
715 return 0;
716
717 if (pmd_sect(pmd))
718 return pfn_valid(pmd_pfn(pmd));
719
720 ptep = pte_offset_kernel(pmdp, addr);
721 pte = READ_ONCE(*ptep);
722 if (pte_none(pte))
723 return 0;
724
725 return pfn_valid(pte_pfn(pte));
726}
727#ifdef CONFIG_SPARSEMEM_VMEMMAP
728#if !ARM64_SWAPPER_USES_SECTION_MAPS
729int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
730 struct vmem_altmap *altmap)
731{
732 return vmemmap_populate_basepages(start, end, node);
733}
734#else
735int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
736 struct vmem_altmap *altmap)
737{
738 unsigned long addr = start;
739 unsigned long next;
740 pgd_t *pgdp;
741 pud_t *pudp;
742 pmd_t *pmdp;
743
744 do {
745 next = pmd_addr_end(addr, end);
746
747 pgdp = vmemmap_pgd_populate(addr, node);
748 if (!pgdp)
749 return -ENOMEM;
750
751 pudp = vmemmap_pud_populate(pgdp, addr, node);
752 if (!pudp)
753 return -ENOMEM;
754
755 pmdp = pmd_offset(pudp, addr);
756 if (pmd_none(READ_ONCE(*pmdp))) {
757 void *p = NULL;
758
759 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
760 if (!p)
761 return -ENOMEM;
762
763 pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
764 } else
765 vmemmap_verify((pte_t *)pmdp, node, addr, next);
766 } while (addr = next, addr != end);
767
768 return 0;
769}
770#endif
771void vmemmap_free(unsigned long start, unsigned long end,
772 struct vmem_altmap *altmap)
773{
774}
775#endif
776
777static inline pud_t * fixmap_pud(unsigned long addr)
778{
779 pgd_t *pgdp = pgd_offset_k(addr);
780 pgd_t pgd = READ_ONCE(*pgdp);
781
782 BUG_ON(pgd_none(pgd) || pgd_bad(pgd));
783
784 return pud_offset_kimg(pgdp, addr);
785}
786
787static inline pmd_t * fixmap_pmd(unsigned long addr)
788{
789 pud_t *pudp = fixmap_pud(addr);
790 pud_t pud = READ_ONCE(*pudp);
791
792 BUG_ON(pud_none(pud) || pud_bad(pud));
793
794 return pmd_offset_kimg(pudp, addr);
795}
796
797static inline pte_t * fixmap_pte(unsigned long addr)
798{
799 return &bm_pte[pte_index(addr)];
800}
801
802
803
804
805
806
807
808void __init early_fixmap_init(void)
809{
810 pgd_t *pgdp, pgd;
811 pud_t *pudp;
812 pmd_t *pmdp;
813 unsigned long addr = FIXADDR_START;
814
815 pgdp = pgd_offset_k(addr);
816 pgd = READ_ONCE(*pgdp);
817 if (CONFIG_PGTABLE_LEVELS > 3 &&
818 !(pgd_none(pgd) || pgd_page_paddr(pgd) == __pa_symbol(bm_pud))) {
819
820
821
822
823
824 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
825 pudp = pud_offset_kimg(pgdp, addr);
826 } else {
827 if (pgd_none(pgd))
828 __pgd_populate(pgdp, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
829 pudp = fixmap_pud(addr);
830 }
831 if (pud_none(READ_ONCE(*pudp)))
832 __pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
833 pmdp = fixmap_pmd(addr);
834 __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
835
836
837
838
839
840 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
841 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
842
843 if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
844 || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
845 WARN_ON(1);
846 pr_warn("pmdp %p != %p, %p\n",
847 pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
848 fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
849 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
850 fix_to_virt(FIX_BTMAP_BEGIN));
851 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
852 fix_to_virt(FIX_BTMAP_END));
853
854 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
855 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
856 }
857}
858
859
860
861
862
863void __set_fixmap(enum fixed_addresses idx,
864 phys_addr_t phys, pgprot_t flags)
865{
866 unsigned long addr = __fix_to_virt(idx);
867 pte_t *ptep;
868
869 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
870
871 ptep = fixmap_pte(addr);
872
873 if (pgprot_val(flags)) {
874 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
875 } else {
876 pte_clear(&init_mm, addr, ptep);
877 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
878 }
879}
880
881void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
882{
883 const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
884 int offset;
885 void *dt_virt;
886
887
888
889
890
891
892
893
894 BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
895 if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
896 return NULL;
897
898
899
900
901
902
903
904
905
906
907
908 BUILD_BUG_ON(dt_virt_base % SZ_2M);
909
910 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
911 __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
912
913 offset = dt_phys % SWAPPER_BLOCK_SIZE;
914 dt_virt = (void *)dt_virt_base + offset;
915
916
917 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
918 dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
919
920 if (fdt_magic(dt_virt) != FDT_MAGIC)
921 return NULL;
922
923 *size = fdt_totalsize(dt_virt);
924 if (*size > MAX_FDT_SIZE)
925 return NULL;
926
927 if (offset + *size > SWAPPER_BLOCK_SIZE)
928 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
929 round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot);
930
931 return dt_virt;
932}
933
934int __init arch_ioremap_p4d_supported(void)
935{
936 return 0;
937}
938
939int __init arch_ioremap_pud_supported(void)
940{
941
942
943
944
945 return IS_ENABLED(CONFIG_ARM64_4K_PAGES) &&
946 !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
947}
948
949int __init arch_ioremap_pmd_supported(void)
950{
951
952 return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS);
953}
954
955int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
956{
957 pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot));
958
959
960 if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)),
961 pud_val(new_pud)))
962 return 0;
963
964 VM_BUG_ON(phys & ~PUD_MASK);
965 set_pud(pudp, new_pud);
966 return 1;
967}
968
969int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
970{
971 pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot));
972
973
974 if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)),
975 pmd_val(new_pmd)))
976 return 0;
977
978 VM_BUG_ON(phys & ~PMD_MASK);
979 set_pmd(pmdp, new_pmd);
980 return 1;
981}
982
983int pud_clear_huge(pud_t *pudp)
984{
985 if (!pud_sect(READ_ONCE(*pudp)))
986 return 0;
987 pud_clear(pudp);
988 return 1;
989}
990
991int pmd_clear_huge(pmd_t *pmdp)
992{
993 if (!pmd_sect(READ_ONCE(*pmdp)))
994 return 0;
995 pmd_clear(pmdp);
996 return 1;
997}
998
999int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
1000{
1001 pte_t *table;
1002 pmd_t pmd;
1003
1004 pmd = READ_ONCE(*pmdp);
1005
1006 if (!pmd_table(pmd)) {
1007 VM_WARN_ON(1);
1008 return 1;
1009 }
1010
1011 table = pte_offset_kernel(pmdp, addr);
1012 pmd_clear(pmdp);
1013 __flush_tlb_kernel_pgtable(addr);
1014 pte_free_kernel(NULL, table);
1015 return 1;
1016}
1017
1018int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
1019{
1020 pmd_t *table;
1021 pmd_t *pmdp;
1022 pud_t pud;
1023 unsigned long next, end;
1024
1025 pud = READ_ONCE(*pudp);
1026
1027 if (!pud_table(pud)) {
1028 VM_WARN_ON(1);
1029 return 1;
1030 }
1031
1032 table = pmd_offset(pudp, addr);
1033 pmdp = table;
1034 next = addr;
1035 end = addr + PUD_SIZE;
1036 do {
1037 pmd_free_pte_page(pmdp, next);
1038 } while (pmdp++, next += PMD_SIZE, next != end);
1039
1040 pud_clear(pudp);
1041 __flush_tlb_kernel_pgtable(addr);
1042 pmd_free(NULL, table);
1043 return 1;
1044}
1045
1046int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
1047{
1048 return 0;
1049}
1050
1051#ifdef CONFIG_MEMORY_HOTPLUG
1052int arch_add_memory(int nid, u64 start, u64 size,
1053 struct mhp_restrictions *restrictions)
1054{
1055 int flags = 0;
1056
1057 if (rodata_full || debug_pagealloc_enabled())
1058 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
1059
1060 __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
1061 size, PAGE_KERNEL, __pgd_pgtable_alloc, flags);
1062
1063 memblock_clear_nomap(start, size);
1064
1065 return __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT,
1066 restrictions);
1067}
1068void arch_remove_memory(int nid, u64 start, u64 size,
1069 struct vmem_altmap *altmap)
1070{
1071 unsigned long start_pfn = start >> PAGE_SHIFT;
1072 unsigned long nr_pages = size >> PAGE_SHIFT;
1073
1074
1075
1076
1077
1078
1079
1080
1081 __remove_pages(start_pfn, nr_pages, altmap);
1082}
1083#endif
1084