linux/arch/riscv/mm/init.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2012 Regents of the University of California
   4 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
   5 */
   6
   7#include <linux/init.h>
   8#include <linux/mm.h>
   9#include <linux/memblock.h>
  10#include <linux/initrd.h>
  11#include <linux/swap.h>
  12#include <linux/sizes.h>
  13#include <linux/of_fdt.h>
  14#include <linux/libfdt.h>
  15#include <linux/set_memory.h>
  16
  17#include <asm/fixmap.h>
  18#include <asm/tlbflush.h>
  19#include <asm/sections.h>
  20#include <asm/soc.h>
  21#include <asm/io.h>
  22#include <asm/ptdump.h>
  23
  24#include "../kernel/head.h"
  25
  26unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
  27                                                        __page_aligned_bss;
  28EXPORT_SYMBOL(empty_zero_page);
  29
  30extern char _start[];
  31#define DTB_EARLY_BASE_VA      PGDIR_SIZE
  32void *dtb_early_va __initdata;
  33uintptr_t dtb_early_pa __initdata;
  34
  35struct pt_alloc_ops {
  36        pte_t *(*get_pte_virt)(phys_addr_t pa);
  37        phys_addr_t (*alloc_pte)(uintptr_t va);
  38#ifndef __PAGETABLE_PMD_FOLDED
  39        pmd_t *(*get_pmd_virt)(phys_addr_t pa);
  40        phys_addr_t (*alloc_pmd)(uintptr_t va);
  41#endif
  42};
  43
  44static void __init zone_sizes_init(void)
  45{
  46        unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
  47
  48#ifdef CONFIG_ZONE_DMA32
  49        max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
  50                        (unsigned long) PFN_PHYS(max_low_pfn)));
  51#endif
  52        max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
  53
  54        free_area_init(max_zone_pfns);
  55}
  56
  57static void setup_zero_page(void)
  58{
  59        memset((void *)empty_zero_page, 0, PAGE_SIZE);
  60}
  61
  62#if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
  63static inline void print_mlk(char *name, unsigned long b, unsigned long t)
  64{
  65        pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld kB)\n", name, b, t,
  66                  (((t) - (b)) >> 10));
  67}
  68
  69static inline void print_mlm(char *name, unsigned long b, unsigned long t)
  70{
  71        pr_notice("%12s : 0x%08lx - 0x%08lx   (%4ld MB)\n", name, b, t,
  72                  (((t) - (b)) >> 20));
  73}
  74
  75static void print_vm_layout(void)
  76{
  77        pr_notice("Virtual kernel memory layout:\n");
  78        print_mlk("fixmap", (unsigned long)FIXADDR_START,
  79                  (unsigned long)FIXADDR_TOP);
  80        print_mlm("pci io", (unsigned long)PCI_IO_START,
  81                  (unsigned long)PCI_IO_END);
  82        print_mlm("vmemmap", (unsigned long)VMEMMAP_START,
  83                  (unsigned long)VMEMMAP_END);
  84        print_mlm("vmalloc", (unsigned long)VMALLOC_START,
  85                  (unsigned long)VMALLOC_END);
  86        print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
  87                  (unsigned long)high_memory);
  88}
  89#else
  90static void print_vm_layout(void) { }
  91#endif /* CONFIG_DEBUG_VM */
  92
  93void __init mem_init(void)
  94{
  95#ifdef CONFIG_FLATMEM
  96        BUG_ON(!mem_map);
  97#endif /* CONFIG_FLATMEM */
  98
  99        high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
 100        memblock_free_all();
 101
 102        mem_init_print_info(NULL);
 103        print_vm_layout();
 104}
 105
 106#ifdef CONFIG_BLK_DEV_INITRD
 107static void __init setup_initrd(void)
 108{
 109        phys_addr_t start;
 110        unsigned long size;
 111
 112        /* Ignore the virtul address computed during device tree parsing */
 113        initrd_start = initrd_end = 0;
 114
 115        if (!phys_initrd_size)
 116                return;
 117        /*
 118         * Round the memory region to page boundaries as per free_initrd_mem()
 119         * This allows us to detect whether the pages overlapping the initrd
 120         * are in use, but more importantly, reserves the entire set of pages
 121         * as we don't want these pages allocated for other purposes.
 122         */
 123        start = round_down(phys_initrd_start, PAGE_SIZE);
 124        size = phys_initrd_size + (phys_initrd_start - start);
 125        size = round_up(size, PAGE_SIZE);
 126
 127        if (!memblock_is_region_memory(start, size)) {
 128                pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region",
 129                       (u64)start, size);
 130                goto disable;
 131        }
 132
 133        if (memblock_is_region_reserved(start, size)) {
 134                pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n",
 135                       (u64)start, size);
 136                goto disable;
 137        }
 138
 139        memblock_reserve(start, size);
 140        /* Now convert initrd to virtual addresses */
 141        initrd_start = (unsigned long)__va(phys_initrd_start);
 142        initrd_end = initrd_start + phys_initrd_size;
 143        initrd_below_start_ok = 1;
 144
 145        pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
 146                (void *)(initrd_start), size);
 147        return;
 148disable:
 149        pr_cont(" - disabling initrd\n");
 150        initrd_start = 0;
 151        initrd_end = 0;
 152}
 153#endif /* CONFIG_BLK_DEV_INITRD */
 154
 155void __init setup_bootmem(void)
 156{
 157        phys_addr_t mem_start = 0;
 158        phys_addr_t start, end = 0;
 159        phys_addr_t vmlinux_end = __pa_symbol(&_end);
 160        phys_addr_t vmlinux_start = __pa_symbol(&_start);
 161        u64 i;
 162
 163        /* Find the memory region containing the kernel */
 164        for_each_mem_range(i, &start, &end) {
 165                phys_addr_t size = end - start;
 166                if (!mem_start)
 167                        mem_start = start;
 168                if (start <= vmlinux_start && vmlinux_end <= end)
 169                        BUG_ON(size == 0);
 170        }
 171
 172        /*
 173         * The maximal physical memory size is -PAGE_OFFSET.
 174         * Make sure that any memory beyond mem_start + (-PAGE_OFFSET) is removed
 175         * as it is unusable by kernel.
 176         */
 177        memblock_enforce_memory_limit(mem_start - PAGE_OFFSET);
 178
 179        /* Reserve from the start of the kernel to the end of the kernel */
 180        memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
 181
 182        max_pfn = PFN_DOWN(memblock_end_of_DRAM());
 183        max_low_pfn = max_pfn;
 184        set_max_mapnr(max_low_pfn);
 185
 186#ifdef CONFIG_BLK_DEV_INITRD
 187        setup_initrd();
 188#endif /* CONFIG_BLK_DEV_INITRD */
 189
 190        /*
 191         * Avoid using early_init_fdt_reserve_self() since __pa() does
 192         * not work for DTB pointers that are fixmap addresses
 193         */
 194        memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
 195
 196        early_init_fdt_scan_reserved_mem();
 197        memblock_allow_resize();
 198        memblock_dump_all();
 199}
 200
 201#ifdef CONFIG_MMU
 202static struct pt_alloc_ops pt_ops;
 203
 204unsigned long va_pa_offset;
 205EXPORT_SYMBOL(va_pa_offset);
 206unsigned long pfn_base;
 207EXPORT_SYMBOL(pfn_base);
 208
 209pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
 210pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
 211pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
 212
 213#define MAX_EARLY_MAPPING_SIZE  SZ_128M
 214
 215pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
 216
 217void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
 218{
 219        unsigned long addr = __fix_to_virt(idx);
 220        pte_t *ptep;
 221
 222        BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
 223
 224        ptep = &fixmap_pte[pte_index(addr)];
 225
 226        if (pgprot_val(prot))
 227                set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
 228        else
 229                pte_clear(&init_mm, addr, ptep);
 230        local_flush_tlb_page(addr);
 231}
 232
 233static inline pte_t *__init get_pte_virt_early(phys_addr_t pa)
 234{
 235        return (pte_t *)((uintptr_t)pa);
 236}
 237
 238static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa)
 239{
 240        clear_fixmap(FIX_PTE);
 241        return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
 242}
 243
 244static inline pte_t *get_pte_virt_late(phys_addr_t pa)
 245{
 246        return (pte_t *) __va(pa);
 247}
 248
 249static inline phys_addr_t __init alloc_pte_early(uintptr_t va)
 250{
 251        /*
 252         * We only create PMD or PGD early mappings so we
 253         * should never reach here with MMU disabled.
 254         */
 255        BUG();
 256}
 257
 258static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va)
 259{
 260        return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
 261}
 262
 263static phys_addr_t alloc_pte_late(uintptr_t va)
 264{
 265        unsigned long vaddr;
 266
 267        vaddr = __get_free_page(GFP_KERNEL);
 268        if (!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr)))
 269                BUG();
 270        return __pa(vaddr);
 271}
 272
 273static void __init create_pte_mapping(pte_t *ptep,
 274                                      uintptr_t va, phys_addr_t pa,
 275                                      phys_addr_t sz, pgprot_t prot)
 276{
 277        uintptr_t pte_idx = pte_index(va);
 278
 279        BUG_ON(sz != PAGE_SIZE);
 280
 281        if (pte_none(ptep[pte_idx]))
 282                ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot);
 283}
 284
 285#ifndef __PAGETABLE_PMD_FOLDED
 286
 287pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
 288pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
 289
 290#if MAX_EARLY_MAPPING_SIZE < PGDIR_SIZE
 291#define NUM_EARLY_PMDS          1UL
 292#else
 293#define NUM_EARLY_PMDS          (1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE)
 294#endif
 295pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE);
 296pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
 297
 298static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
 299{
 300        /* Before MMU is enabled */
 301        return (pmd_t *)((uintptr_t)pa);
 302}
 303
 304static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa)
 305{
 306        clear_fixmap(FIX_PMD);
 307        return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
 308}
 309
 310static pmd_t *get_pmd_virt_late(phys_addr_t pa)
 311{
 312        return (pmd_t *) __va(pa);
 313}
 314
 315static phys_addr_t __init alloc_pmd_early(uintptr_t va)
 316{
 317        uintptr_t pmd_num;
 318
 319        pmd_num = (va - PAGE_OFFSET) >> PGDIR_SHIFT;
 320        BUG_ON(pmd_num >= NUM_EARLY_PMDS);
 321        return (uintptr_t)&early_pmd[pmd_num * PTRS_PER_PMD];
 322}
 323
 324static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
 325{
 326        return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
 327}
 328
 329static phys_addr_t alloc_pmd_late(uintptr_t va)
 330{
 331        unsigned long vaddr;
 332
 333        vaddr = __get_free_page(GFP_KERNEL);
 334        BUG_ON(!vaddr);
 335        return __pa(vaddr);
 336}
 337
 338static void __init create_pmd_mapping(pmd_t *pmdp,
 339                                      uintptr_t va, phys_addr_t pa,
 340                                      phys_addr_t sz, pgprot_t prot)
 341{
 342        pte_t *ptep;
 343        phys_addr_t pte_phys;
 344        uintptr_t pmd_idx = pmd_index(va);
 345
 346        if (sz == PMD_SIZE) {
 347                if (pmd_none(pmdp[pmd_idx]))
 348                        pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot);
 349                return;
 350        }
 351
 352        if (pmd_none(pmdp[pmd_idx])) {
 353                pte_phys = pt_ops.alloc_pte(va);
 354                pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
 355                ptep = pt_ops.get_pte_virt(pte_phys);
 356                memset(ptep, 0, PAGE_SIZE);
 357        } else {
 358                pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx]));
 359                ptep = pt_ops.get_pte_virt(pte_phys);
 360        }
 361
 362        create_pte_mapping(ptep, va, pa, sz, prot);
 363}
 364
 365#define pgd_next_t              pmd_t
 366#define alloc_pgd_next(__va)    pt_ops.alloc_pmd(__va)
 367#define get_pgd_next_virt(__pa) pt_ops.get_pmd_virt(__pa)
 368#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)      \
 369        create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
 370#define fixmap_pgd_next         fixmap_pmd
 371#else
 372#define pgd_next_t              pte_t
 373#define alloc_pgd_next(__va)    pt_ops.alloc_pte(__va)
 374#define get_pgd_next_virt(__pa) pt_ops.get_pte_virt(__pa)
 375#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot)      \
 376        create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
 377#define fixmap_pgd_next         fixmap_pte
 378#endif
 379
 380void __init create_pgd_mapping(pgd_t *pgdp,
 381                                      uintptr_t va, phys_addr_t pa,
 382                                      phys_addr_t sz, pgprot_t prot)
 383{
 384        pgd_next_t *nextp;
 385        phys_addr_t next_phys;
 386        uintptr_t pgd_idx = pgd_index(va);
 387
 388        if (sz == PGDIR_SIZE) {
 389                if (pgd_val(pgdp[pgd_idx]) == 0)
 390                        pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot);
 391                return;
 392        }
 393
 394        if (pgd_val(pgdp[pgd_idx]) == 0) {
 395                next_phys = alloc_pgd_next(va);
 396                pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
 397                nextp = get_pgd_next_virt(next_phys);
 398                memset(nextp, 0, PAGE_SIZE);
 399        } else {
 400                next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx]));
 401                nextp = get_pgd_next_virt(next_phys);
 402        }
 403
 404        create_pgd_next_mapping(nextp, va, pa, sz, prot);
 405}
 406
 407static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
 408{
 409        /* Upgrade to PMD_SIZE mappings whenever possible */
 410        if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1)))
 411                return PAGE_SIZE;
 412
 413        return PMD_SIZE;
 414}
 415
 416/*
 417 * setup_vm() is called from head.S with MMU-off.
 418 *
 419 * Following requirements should be honoured for setup_vm() to work
 420 * correctly:
 421 * 1) It should use PC-relative addressing for accessing kernel symbols.
 422 *    To achieve this we always use GCC cmodel=medany.
 423 * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
 424 *    so disable compiler instrumentation when FTRACE is enabled.
 425 *
 426 * Currently, the above requirements are honoured by using custom CFLAGS
 427 * for init.o in mm/Makefile.
 428 */
 429
 430#ifndef __riscv_cmodel_medany
 431#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
 432#endif
 433
 434asmlinkage void __init setup_vm(uintptr_t dtb_pa)
 435{
 436        uintptr_t va, pa, end_va;
 437        uintptr_t load_pa = (uintptr_t)(&_start);
 438        uintptr_t load_sz = (uintptr_t)(&_end) - load_pa;
 439        uintptr_t map_size = best_map_size(load_pa, MAX_EARLY_MAPPING_SIZE);
 440#ifndef __PAGETABLE_PMD_FOLDED
 441        pmd_t fix_bmap_spmd, fix_bmap_epmd;
 442#endif
 443
 444        va_pa_offset = PAGE_OFFSET - load_pa;
 445        pfn_base = PFN_DOWN(load_pa);
 446
 447        /*
 448         * Enforce boot alignment requirements of RV32 and
 449         * RV64 by only allowing PMD or PGD mappings.
 450         */
 451        BUG_ON(map_size == PAGE_SIZE);
 452
 453        /* Sanity check alignment and size */
 454        BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
 455        BUG_ON((load_pa % map_size) != 0);
 456        BUG_ON(load_sz > MAX_EARLY_MAPPING_SIZE);
 457
 458        pt_ops.alloc_pte = alloc_pte_early;
 459        pt_ops.get_pte_virt = get_pte_virt_early;
 460#ifndef __PAGETABLE_PMD_FOLDED
 461        pt_ops.alloc_pmd = alloc_pmd_early;
 462        pt_ops.get_pmd_virt = get_pmd_virt_early;
 463#endif
 464        /* Setup early PGD for fixmap */
 465        create_pgd_mapping(early_pg_dir, FIXADDR_START,
 466                           (uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
 467
 468#ifndef __PAGETABLE_PMD_FOLDED
 469        /* Setup fixmap PMD */
 470        create_pmd_mapping(fixmap_pmd, FIXADDR_START,
 471                           (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
 472        /* Setup trampoline PGD and PMD */
 473        create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
 474                           (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
 475        create_pmd_mapping(trampoline_pmd, PAGE_OFFSET,
 476                           load_pa, PMD_SIZE, PAGE_KERNEL_EXEC);
 477#else
 478        /* Setup trampoline PGD */
 479        create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
 480                           load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC);
 481#endif
 482
 483        /*
 484         * Setup early PGD covering entire kernel which will allows
 485         * us to reach paging_init(). We map all memory banks later
 486         * in setup_vm_final() below.
 487         */
 488        end_va = PAGE_OFFSET + load_sz;
 489        for (va = PAGE_OFFSET; va < end_va; va += map_size)
 490                create_pgd_mapping(early_pg_dir, va,
 491                                   load_pa + (va - PAGE_OFFSET),
 492                                   map_size, PAGE_KERNEL_EXEC);
 493
 494#ifndef __PAGETABLE_PMD_FOLDED
 495        /* Setup early PMD for DTB */
 496        create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
 497                           (uintptr_t)early_dtb_pmd, PGDIR_SIZE, PAGE_TABLE);
 498        /* Create two consecutive PMD mappings for FDT early scan */
 499        pa = dtb_pa & ~(PMD_SIZE - 1);
 500        create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
 501                           pa, PMD_SIZE, PAGE_KERNEL);
 502        create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
 503                           pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
 504        dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
 505#else
 506        /* Create two consecutive PGD mappings for FDT early scan */
 507        pa = dtb_pa & ~(PGDIR_SIZE - 1);
 508        create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
 509                           pa, PGDIR_SIZE, PAGE_KERNEL);
 510        create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE,
 511                           pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL);
 512        dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1));
 513#endif
 514        dtb_early_pa = dtb_pa;
 515
 516        /*
 517         * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
 518         * range can not span multiple pmds.
 519         */
 520        BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
 521                     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
 522
 523#ifndef __PAGETABLE_PMD_FOLDED
 524        /*
 525         * Early ioremap fixmap is already created as it lies within first 2MB
 526         * of fixmap region. We always map PMD_SIZE. Thus, both FIX_BTMAP_END
 527         * FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn
 528         * the user if not.
 529         */
 530        fix_bmap_spmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_BEGIN))];
 531        fix_bmap_epmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_END))];
 532        if (pmd_val(fix_bmap_spmd) != pmd_val(fix_bmap_epmd)) {
 533                WARN_ON(1);
 534                pr_warn("fixmap btmap start [%08lx] != end [%08lx]\n",
 535                        pmd_val(fix_bmap_spmd), pmd_val(fix_bmap_epmd));
 536                pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
 537                        fix_to_virt(FIX_BTMAP_BEGIN));
 538                pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
 539                        fix_to_virt(FIX_BTMAP_END));
 540
 541                pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
 542                pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
 543        }
 544#endif
 545}
 546
 547static void __init setup_vm_final(void)
 548{
 549        uintptr_t va, map_size;
 550        phys_addr_t pa, start, end;
 551        u64 i;
 552
 553        /**
 554         * MMU is enabled at this point. But page table setup is not complete yet.
 555         * fixmap page table alloc functions should be used at this point
 556         */
 557        pt_ops.alloc_pte = alloc_pte_fixmap;
 558        pt_ops.get_pte_virt = get_pte_virt_fixmap;
 559#ifndef __PAGETABLE_PMD_FOLDED
 560        pt_ops.alloc_pmd = alloc_pmd_fixmap;
 561        pt_ops.get_pmd_virt = get_pmd_virt_fixmap;
 562#endif
 563        /* Setup swapper PGD for fixmap */
 564        create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
 565                           __pa_symbol(fixmap_pgd_next),
 566                           PGDIR_SIZE, PAGE_TABLE);
 567
 568        /* Map all memory banks */
 569        for_each_mem_range(i, &start, &end) {
 570                if (start >= end)
 571                        break;
 572                if (start <= __pa(PAGE_OFFSET) &&
 573                    __pa(PAGE_OFFSET) < end)
 574                        start = __pa(PAGE_OFFSET);
 575
 576                map_size = best_map_size(start, end - start);
 577                for (pa = start; pa < end; pa += map_size) {
 578                        va = (uintptr_t)__va(pa);
 579                        create_pgd_mapping(swapper_pg_dir, va, pa,
 580                                           map_size, PAGE_KERNEL_EXEC);
 581                }
 582        }
 583
 584        /* Clear fixmap PTE and PMD mappings */
 585        clear_fixmap(FIX_PTE);
 586        clear_fixmap(FIX_PMD);
 587
 588        /* Move to swapper page table */
 589        csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
 590        local_flush_tlb_all();
 591
 592        /* generic page allocation functions must be used to setup page table */
 593        pt_ops.alloc_pte = alloc_pte_late;
 594        pt_ops.get_pte_virt = get_pte_virt_late;
 595#ifndef __PAGETABLE_PMD_FOLDED
 596        pt_ops.alloc_pmd = alloc_pmd_late;
 597        pt_ops.get_pmd_virt = get_pmd_virt_late;
 598#endif
 599}
 600#else
 601asmlinkage void __init setup_vm(uintptr_t dtb_pa)
 602{
 603#ifdef CONFIG_BUILTIN_DTB
 604        dtb_early_va = soc_lookup_builtin_dtb();
 605        if (!dtb_early_va) {
 606                /* Fallback to first available DTS */
 607                dtb_early_va = (void *) __dtb_start;
 608        }
 609#else
 610        dtb_early_va = (void *)dtb_pa;
 611#endif
 612        dtb_early_pa = dtb_pa;
 613}
 614
 615static inline void setup_vm_final(void)
 616{
 617}
 618#endif /* CONFIG_MMU */
 619
 620#ifdef CONFIG_STRICT_KERNEL_RWX
 621void mark_rodata_ro(void)
 622{
 623        unsigned long text_start = (unsigned long)_text;
 624        unsigned long text_end = (unsigned long)_etext;
 625        unsigned long rodata_start = (unsigned long)__start_rodata;
 626        unsigned long data_start = (unsigned long)_data;
 627        unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
 628
 629        set_memory_ro(text_start, (text_end - text_start) >> PAGE_SHIFT);
 630        set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
 631        set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
 632        set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
 633
 634        debug_checkwx();
 635}
 636#endif
 637
 638static void __init resource_init(void)
 639{
 640        struct memblock_region *region;
 641
 642        for_each_mem_region(region) {
 643                struct resource *res;
 644
 645                res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
 646                if (!res)
 647                        panic("%s: Failed to allocate %zu bytes\n", __func__,
 648                              sizeof(struct resource));
 649
 650                if (memblock_is_nomap(region)) {
 651                        res->name = "reserved";
 652                        res->flags = IORESOURCE_MEM;
 653                } else {
 654                        res->name = "System RAM";
 655                        res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 656                }
 657                res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
 658                res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
 659
 660                request_resource(&iomem_resource, res);
 661        }
 662}
 663
 664void __init paging_init(void)
 665{
 666        setup_vm_final();
 667        sparse_init();
 668        setup_zero_page();
 669        zone_sizes_init();
 670        resource_init();
 671}
 672
 673#ifdef CONFIG_SPARSEMEM_VMEMMAP
 674int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
 675                               struct vmem_altmap *altmap)
 676{
 677        return vmemmap_populate_basepages(start, end, node, NULL);
 678}
 679#endif
 680