linux/arch/arm64/mm/mmu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Based on arch/arm/mm/mmu.c
   4 *
   5 * Copyright (C) 1995-2005 Russell King
   6 * Copyright (C) 2012 ARM Ltd.
   7 */
   8
   9#include <linux/cache.h>
  10#include <linux/export.h>
  11#include <linux/kernel.h>
  12#include <linux/errno.h>
  13#include <linux/init.h>
  14#include <linux/ioport.h>
  15#include <linux/kexec.h>
  16#include <linux/libfdt.h>
  17#include <linux/mman.h>
  18#include <linux/nodemask.h>
  19#include <linux/memblock.h>
  20#include <linux/fs.h>
  21#include <linux/io.h>
  22#include <linux/mm.h>
  23#include <linux/vmalloc.h>
  24
  25#include <asm/barrier.h>
  26#include <asm/cputype.h>
  27#include <asm/fixmap.h>
  28#include <asm/kasan.h>
  29#include <asm/kernel-pgtable.h>
  30#include <asm/sections.h>
  31#include <asm/setup.h>
  32#include <linux/sizes.h>
  33#include <asm/tlb.h>
  34#include <asm/mmu_context.h>
  35#include <asm/ptdump.h>
  36#include <asm/tlbflush.h>
  37
  38#define NO_BLOCK_MAPPINGS       BIT(0)
  39#define NO_CONT_MAPPINGS        BIT(1)
  40
  41u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
  42u64 idmap_ptrs_per_pgd = PTRS_PER_PGD;
  43u64 vabits_user __ro_after_init;
  44EXPORT_SYMBOL(vabits_user);
  45
  46u64 kimage_voffset __ro_after_init;
  47EXPORT_SYMBOL(kimage_voffset);
  48
  49/*
  50 * Empty_zero_page is a special page that is used for zero-initialized data
  51 * and COW.
  52 */
  53unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
  54EXPORT_SYMBOL(empty_zero_page);
  55
  56static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
  57static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
  58static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
  59
  60static DEFINE_SPINLOCK(swapper_pgdir_lock);
  61
  62void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd)
  63{
  64        pgd_t *fixmap_pgdp;
  65
  66        spin_lock(&swapper_pgdir_lock);
  67        fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp));
  68        WRITE_ONCE(*fixmap_pgdp, pgd);
  69        /*
  70         * We need dsb(ishst) here to ensure the page-table-walker sees
  71         * our new entry before set_p?d() returns. The fixmap's
  72         * flush_tlb_kernel_range() via clear_fixmap() does this for us.
  73         */
  74        pgd_clear_fixmap();
  75        spin_unlock(&swapper_pgdir_lock);
  76}
  77
  78pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  79                              unsigned long size, pgprot_t vma_prot)
  80{
  81        if (!pfn_valid(pfn))
  82                return pgprot_noncached(vma_prot);
  83        else if (file->f_flags & O_SYNC)
  84                return pgprot_writecombine(vma_prot);
  85        return vma_prot;
  86}
  87EXPORT_SYMBOL(phys_mem_access_prot);
  88
  89static phys_addr_t __init early_pgtable_alloc(int shift)
  90{
  91        phys_addr_t phys;
  92        void *ptr;
  93
  94        phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
  95        if (!phys)
  96                panic("Failed to allocate page table page\n");
  97
  98        /*
  99         * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
 100         * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
 101         * any level of table.
 102         */
 103        ptr = pte_set_fixmap(phys);
 104
 105        memset(ptr, 0, PAGE_SIZE);
 106
 107        /*
 108         * Implicit barriers also ensure the zeroed page is visible to the page
 109         * table walker
 110         */
 111        pte_clear_fixmap();
 112
 113        return phys;
 114}
 115
 116static bool pgattr_change_is_safe(u64 old, u64 new)
 117{
 118        /*
 119         * The following mapping attributes may be updated in live
 120         * kernel mappings without the need for break-before-make.
 121         */
 122        static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
 123
 124        /* creating or taking down mappings is always safe */
 125        if (old == 0 || new == 0)
 126                return true;
 127
 128        /* live contiguous mappings may not be manipulated at all */
 129        if ((old | new) & PTE_CONT)
 130                return false;
 131
 132        /* Transitioning from Non-Global to Global is unsafe */
 133        if (old & ~new & PTE_NG)
 134                return false;
 135
 136        return ((old ^ new) & ~mask) == 0;
 137}
 138
 139static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
 140                     phys_addr_t phys, pgprot_t prot)
 141{
 142        pte_t *ptep;
 143
 144        ptep = pte_set_fixmap_offset(pmdp, addr);
 145        do {
 146                pte_t old_pte = READ_ONCE(*ptep);
 147
 148                set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot));
 149
 150                /*
 151                 * After the PTE entry has been populated once, we
 152                 * only allow updates to the permission attributes.
 153                 */
 154                BUG_ON(!pgattr_change_is_safe(pte_val(old_pte),
 155                                              READ_ONCE(pte_val(*ptep))));
 156
 157                phys += PAGE_SIZE;
 158        } while (ptep++, addr += PAGE_SIZE, addr != end);
 159
 160        pte_clear_fixmap();
 161}
 162
 163static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
 164                                unsigned long end, phys_addr_t phys,
 165                                pgprot_t prot,
 166                                phys_addr_t (*pgtable_alloc)(int),
 167                                int flags)
 168{
 169        unsigned long next;
 170        pmd_t pmd = READ_ONCE(*pmdp);
 171
 172        BUG_ON(pmd_sect(pmd));
 173        if (pmd_none(pmd)) {
 174                phys_addr_t pte_phys;
 175                BUG_ON(!pgtable_alloc);
 176                pte_phys = pgtable_alloc(PAGE_SHIFT);
 177                __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
 178                pmd = READ_ONCE(*pmdp);
 179        }
 180        BUG_ON(pmd_bad(pmd));
 181
 182        do {
 183                pgprot_t __prot = prot;
 184
 185                next = pte_cont_addr_end(addr, end);
 186
 187                /* use a contiguous mapping if the range is suitably aligned */
 188                if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) &&
 189                    (flags & NO_CONT_MAPPINGS) == 0)
 190                        __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
 191
 192                init_pte(pmdp, addr, next, phys, __prot);
 193
 194                phys += next - addr;
 195        } while (addr = next, addr != end);
 196}
 197
 198static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
 199                     phys_addr_t phys, pgprot_t prot,
 200                     phys_addr_t (*pgtable_alloc)(int), int flags)
 201{
 202        unsigned long next;
 203        pmd_t *pmdp;
 204
 205        pmdp = pmd_set_fixmap_offset(pudp, addr);
 206        do {
 207                pmd_t old_pmd = READ_ONCE(*pmdp);
 208
 209                next = pmd_addr_end(addr, end);
 210
 211                /* try section mapping first */
 212                if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
 213                    (flags & NO_BLOCK_MAPPINGS) == 0) {
 214                        pmd_set_huge(pmdp, phys, prot);
 215
 216                        /*
 217                         * After the PMD entry has been populated once, we
 218                         * only allow updates to the permission attributes.
 219                         */
 220                        BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
 221                                                      READ_ONCE(pmd_val(*pmdp))));
 222                } else {
 223                        alloc_init_cont_pte(pmdp, addr, next, phys, prot,
 224                                            pgtable_alloc, flags);
 225
 226                        BUG_ON(pmd_val(old_pmd) != 0 &&
 227                               pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
 228                }
 229                phys += next - addr;
 230        } while (pmdp++, addr = next, addr != end);
 231
 232        pmd_clear_fixmap();
 233}
 234
 235static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
 236                                unsigned long end, phys_addr_t phys,
 237                                pgprot_t prot,
 238                                phys_addr_t (*pgtable_alloc)(int), int flags)
 239{
 240        unsigned long next;
 241        pud_t pud = READ_ONCE(*pudp);
 242
 243        /*
 244         * Check for initial section mappings in the pgd/pud.
 245         */
 246        BUG_ON(pud_sect(pud));
 247        if (pud_none(pud)) {
 248                phys_addr_t pmd_phys;
 249                BUG_ON(!pgtable_alloc);
 250                pmd_phys = pgtable_alloc(PMD_SHIFT);
 251                __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
 252                pud = READ_ONCE(*pudp);
 253        }
 254        BUG_ON(pud_bad(pud));
 255
 256        do {
 257                pgprot_t __prot = prot;
 258
 259                next = pmd_cont_addr_end(addr, end);
 260
 261                /* use a contiguous mapping if the range is suitably aligned */
 262                if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) &&
 263                    (flags & NO_CONT_MAPPINGS) == 0)
 264                        __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
 265
 266                init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags);
 267
 268                phys += next - addr;
 269        } while (addr = next, addr != end);
 270}
 271
 272static inline bool use_1G_block(unsigned long addr, unsigned long next,
 273                        unsigned long phys)
 274{
 275        if (PAGE_SHIFT != 12)
 276                return false;
 277
 278        if (((addr | next | phys) & ~PUD_MASK) != 0)
 279                return false;
 280
 281        return true;
 282}
 283
 284static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
 285                           phys_addr_t phys, pgprot_t prot,
 286                           phys_addr_t (*pgtable_alloc)(int),
 287                           int flags)
 288{
 289        unsigned long next;
 290        pud_t *pudp;
 291        pgd_t pgd = READ_ONCE(*pgdp);
 292
 293        if (pgd_none(pgd)) {
 294                phys_addr_t pud_phys;
 295                BUG_ON(!pgtable_alloc);
 296                pud_phys = pgtable_alloc(PUD_SHIFT);
 297                __pgd_populate(pgdp, pud_phys, PUD_TYPE_TABLE);
 298                pgd = READ_ONCE(*pgdp);
 299        }
 300        BUG_ON(pgd_bad(pgd));
 301
 302        pudp = pud_set_fixmap_offset(pgdp, addr);
 303        do {
 304                pud_t old_pud = READ_ONCE(*pudp);
 305
 306                next = pud_addr_end(addr, end);
 307
 308                /*
 309                 * For 4K granule only, attempt to put down a 1GB block
 310                 */
 311                if (use_1G_block(addr, next, phys) &&
 312                    (flags & NO_BLOCK_MAPPINGS) == 0) {
 313                        pud_set_huge(pudp, phys, prot);
 314
 315                        /*
 316                         * After the PUD entry has been populated once, we
 317                         * only allow updates to the permission attributes.
 318                         */
 319                        BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
 320                                                      READ_ONCE(pud_val(*pudp))));
 321                } else {
 322                        alloc_init_cont_pmd(pudp, addr, next, phys, prot,
 323                                            pgtable_alloc, flags);
 324
 325                        BUG_ON(pud_val(old_pud) != 0 &&
 326                               pud_val(old_pud) != READ_ONCE(pud_val(*pudp)));
 327                }
 328                phys += next - addr;
 329        } while (pudp++, addr = next, addr != end);
 330
 331        pud_clear_fixmap();
 332}
 333
 334static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
 335                                 unsigned long virt, phys_addr_t size,
 336                                 pgprot_t prot,
 337                                 phys_addr_t (*pgtable_alloc)(int),
 338                                 int flags)
 339{
 340        unsigned long addr, length, end, next;
 341        pgd_t *pgdp = pgd_offset_raw(pgdir, virt);
 342
 343        /*
 344         * If the virtual and physical address don't have the same offset
 345         * within a page, we cannot map the region as the caller expects.
 346         */
 347        if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
 348                return;
 349
 350        phys &= PAGE_MASK;
 351        addr = virt & PAGE_MASK;
 352        length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
 353
 354        end = addr + length;
 355        do {
 356                next = pgd_addr_end(addr, end);
 357                alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc,
 358                               flags);
 359                phys += next - addr;
 360        } while (pgdp++, addr = next, addr != end);
 361}
 362
 363static phys_addr_t __pgd_pgtable_alloc(int shift)
 364{
 365        void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL);
 366        BUG_ON(!ptr);
 367
 368        /* Ensure the zeroed page is visible to the page table walker */
 369        dsb(ishst);
 370        return __pa(ptr);
 371}
 372
 373static phys_addr_t pgd_pgtable_alloc(int shift)
 374{
 375        phys_addr_t pa = __pgd_pgtable_alloc(shift);
 376
 377        /*
 378         * Call proper page table ctor in case later we need to
 379         * call core mm functions like apply_to_page_range() on
 380         * this pre-allocated page table.
 381         *
 382         * We don't select ARCH_ENABLE_SPLIT_PMD_PTLOCK if pmd is
 383         * folded, and if so pgtable_pmd_page_ctor() becomes nop.
 384         */
 385        if (shift == PAGE_SHIFT)
 386                BUG_ON(!pgtable_page_ctor(phys_to_page(pa)));
 387        else if (shift == PMD_SHIFT)
 388                BUG_ON(!pgtable_pmd_page_ctor(phys_to_page(pa)));
 389
 390        return pa;
 391}
 392
 393/*
 394 * This function can only be used to modify existing table entries,
 395 * without allocating new levels of table. Note that this permits the
 396 * creation of new section or page entries.
 397 */
 398static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
 399                                  phys_addr_t size, pgprot_t prot)
 400{
 401        if (virt < VMALLOC_START) {
 402                pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
 403                        &phys, virt);
 404                return;
 405        }
 406        __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
 407                             NO_CONT_MAPPINGS);
 408}
 409
 410void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
 411                               unsigned long virt, phys_addr_t size,
 412                               pgprot_t prot, bool page_mappings_only)
 413{
 414        int flags = 0;
 415
 416        BUG_ON(mm == &init_mm);
 417
 418        if (page_mappings_only)
 419                flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
 420
 421        __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
 422                             pgd_pgtable_alloc, flags);
 423}
 424
 425static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
 426                                phys_addr_t size, pgprot_t prot)
 427{
 428        if (virt < VMALLOC_START) {
 429                pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n",
 430                        &phys, virt);
 431                return;
 432        }
 433
 434        __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
 435                             NO_CONT_MAPPINGS);
 436
 437        /* flush the TLBs after updating live kernel mappings */
 438        flush_tlb_kernel_range(virt, virt + size);
 439}
 440
 441static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start,
 442                                  phys_addr_t end, pgprot_t prot, int flags)
 443{
 444        __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start,
 445                             prot, early_pgtable_alloc, flags);
 446}
 447
 448void __init mark_linear_text_alias_ro(void)
 449{
 450        /*
 451         * Remove the write permissions from the linear alias of .text/.rodata
 452         */
 453        update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text),
 454                            (unsigned long)__init_begin - (unsigned long)_text,
 455                            PAGE_KERNEL_RO);
 456}
 457
 458static void __init map_mem(pgd_t *pgdp)
 459{
 460        phys_addr_t kernel_start = __pa_symbol(_text);
 461        phys_addr_t kernel_end = __pa_symbol(__init_begin);
 462        struct memblock_region *reg;
 463        int flags = 0;
 464
 465        if (rodata_full || debug_pagealloc_enabled())
 466                flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
 467
 468        /*
 469         * Take care not to create a writable alias for the
 470         * read-only text and rodata sections of the kernel image.
 471         * So temporarily mark them as NOMAP to skip mappings in
 472         * the following for-loop
 473         */
 474        memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
 475#ifdef CONFIG_KEXEC_CORE
 476        if (crashk_res.end)
 477                memblock_mark_nomap(crashk_res.start,
 478                                    resource_size(&crashk_res));
 479#endif
 480
 481        /* map all the memory banks */
 482        for_each_memblock(memory, reg) {
 483                phys_addr_t start = reg->base;
 484                phys_addr_t end = start + reg->size;
 485
 486                if (start >= end)
 487                        break;
 488                if (memblock_is_nomap(reg))
 489                        continue;
 490
 491                __map_memblock(pgdp, start, end, PAGE_KERNEL, flags);
 492        }
 493
 494        /*
 495         * Map the linear alias of the [_text, __init_begin) interval
 496         * as non-executable now, and remove the write permission in
 497         * mark_linear_text_alias_ro() below (which will be called after
 498         * alternative patching has completed). This makes the contents
 499         * of the region accessible to subsystems such as hibernate,
 500         * but protects it from inadvertent modification or execution.
 501         * Note that contiguous mappings cannot be remapped in this way,
 502         * so we should avoid them here.
 503         */
 504        __map_memblock(pgdp, kernel_start, kernel_end,
 505                       PAGE_KERNEL, NO_CONT_MAPPINGS);
 506        memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
 507
 508#ifdef CONFIG_KEXEC_CORE
 509        /*
 510         * Use page-level mappings here so that we can shrink the region
 511         * in page granularity and put back unused memory to buddy system
 512         * through /sys/kernel/kexec_crash_size interface.
 513         */
 514        if (crashk_res.end) {
 515                __map_memblock(pgdp, crashk_res.start, crashk_res.end + 1,
 516                               PAGE_KERNEL,
 517                               NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
 518                memblock_clear_nomap(crashk_res.start,
 519                                     resource_size(&crashk_res));
 520        }
 521#endif
 522}
 523
 524void mark_rodata_ro(void)
 525{
 526        unsigned long section_size;
 527
 528        /*
 529         * mark .rodata as read only. Use __init_begin rather than __end_rodata
 530         * to cover NOTES and EXCEPTION_TABLE.
 531         */
 532        section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
 533        update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata,
 534                            section_size, PAGE_KERNEL_RO);
 535
 536        debug_checkwx();
 537}
 538
 539static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
 540                                      pgprot_t prot, struct vm_struct *vma,
 541                                      int flags, unsigned long vm_flags)
 542{
 543        phys_addr_t pa_start = __pa_symbol(va_start);
 544        unsigned long size = va_end - va_start;
 545
 546        BUG_ON(!PAGE_ALIGNED(pa_start));
 547        BUG_ON(!PAGE_ALIGNED(size));
 548
 549        __create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot,
 550                             early_pgtable_alloc, flags);
 551
 552        if (!(vm_flags & VM_NO_GUARD))
 553                size += PAGE_SIZE;
 554
 555        vma->addr       = va_start;
 556        vma->phys_addr  = pa_start;
 557        vma->size       = size;
 558        vma->flags      = VM_MAP | vm_flags;
 559        vma->caller     = __builtin_return_address(0);
 560
 561        vm_area_add_early(vma);
 562}
 563
 564static int __init parse_rodata(char *arg)
 565{
 566        int ret = strtobool(arg, &rodata_enabled);
 567        if (!ret) {
 568                rodata_full = false;
 569                return 0;
 570        }
 571
 572        /* permit 'full' in addition to boolean options */
 573        if (strcmp(arg, "full"))
 574                return -EINVAL;
 575
 576        rodata_enabled = true;
 577        rodata_full = true;
 578        return 0;
 579}
 580early_param("rodata", parse_rodata);
 581
 582#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 583static int __init map_entry_trampoline(void)
 584{
 585        pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
 586        phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
 587
 588        /* The trampoline is always mapped and can therefore be global */
 589        pgprot_val(prot) &= ~PTE_NG;
 590
 591        /* Map only the text into the trampoline page table */
 592        memset(tramp_pg_dir, 0, PGD_SIZE);
 593        __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
 594                             prot, __pgd_pgtable_alloc, 0);
 595
 596        /* Map both the text and data into the kernel page table */
 597        __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
 598        if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
 599                extern char __entry_tramp_data_start[];
 600
 601                __set_fixmap(FIX_ENTRY_TRAMP_DATA,
 602                             __pa_symbol(__entry_tramp_data_start),
 603                             PAGE_KERNEL_RO);
 604        }
 605
 606        return 0;
 607}
 608core_initcall(map_entry_trampoline);
 609#endif
 610
 611/*
 612 * Create fine-grained mappings for the kernel.
 613 */
 614static void __init map_kernel(pgd_t *pgdp)
 615{
 616        static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext,
 617                                vmlinux_initdata, vmlinux_data;
 618
 619        /*
 620         * External debuggers may need to write directly to the text
 621         * mapping to install SW breakpoints. Allow this (only) when
 622         * explicitly requested with rodata=off.
 623         */
 624        pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
 625
 626        /*
 627         * Only rodata will be remapped with different permissions later on,
 628         * all other segments are allowed to use contiguous mappings.
 629         */
 630        map_kernel_segment(pgdp, _text, _etext, text_prot, &vmlinux_text, 0,
 631                           VM_NO_GUARD);
 632        map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL,
 633                           &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
 634        map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot,
 635                           &vmlinux_inittext, 0, VM_NO_GUARD);
 636        map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL,
 637                           &vmlinux_initdata, 0, VM_NO_GUARD);
 638        map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
 639
 640        if (!READ_ONCE(pgd_val(*pgd_offset_raw(pgdp, FIXADDR_START)))) {
 641                /*
 642                 * The fixmap falls in a separate pgd to the kernel, and doesn't
 643                 * live in the carveout for the swapper_pg_dir. We can simply
 644                 * re-use the existing dir for the fixmap.
 645                 */
 646                set_pgd(pgd_offset_raw(pgdp, FIXADDR_START),
 647                        READ_ONCE(*pgd_offset_k(FIXADDR_START)));
 648        } else if (CONFIG_PGTABLE_LEVELS > 3) {
 649                /*
 650                 * The fixmap shares its top level pgd entry with the kernel
 651                 * mapping. This can really only occur when we are running
 652                 * with 16k/4 levels, so we can simply reuse the pud level
 653                 * entry instead.
 654                 */
 655                BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
 656                pud_populate(&init_mm,
 657                             pud_set_fixmap_offset(pgdp, FIXADDR_START),
 658                             lm_alias(bm_pmd));
 659                pud_clear_fixmap();
 660        } else {
 661                BUG();
 662        }
 663
 664        kasan_copy_shadow(pgdp);
 665}
 666
 667void __init paging_init(void)
 668{
 669        pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir));
 670
 671        map_kernel(pgdp);
 672        map_mem(pgdp);
 673
 674        pgd_clear_fixmap();
 675
 676        cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
 677        init_mm.pgd = swapper_pg_dir;
 678
 679        memblock_free(__pa_symbol(init_pg_dir),
 680                      __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
 681
 682        memblock_allow_resize();
 683}
 684
 685/*
 686 * Check whether a kernel address is valid (derived from arch/x86/).
 687 */
 688int kern_addr_valid(unsigned long addr)
 689{
 690        pgd_t *pgdp;
 691        pud_t *pudp, pud;
 692        pmd_t *pmdp, pmd;
 693        pte_t *ptep, pte;
 694
 695        if ((((long)addr) >> VA_BITS) != -1UL)
 696                return 0;
 697
 698        pgdp = pgd_offset_k(addr);
 699        if (pgd_none(READ_ONCE(*pgdp)))
 700                return 0;
 701
 702        pudp = pud_offset(pgdp, addr);
 703        pud = READ_ONCE(*pudp);
 704        if (pud_none(pud))
 705                return 0;
 706
 707        if (pud_sect(pud))
 708                return pfn_valid(pud_pfn(pud));
 709
 710        pmdp = pmd_offset(pudp, addr);
 711        pmd = READ_ONCE(*pmdp);
 712        if (pmd_none(pmd))
 713                return 0;
 714
 715        if (pmd_sect(pmd))
 716                return pfn_valid(pmd_pfn(pmd));
 717
 718        ptep = pte_offset_kernel(pmdp, addr);
 719        pte = READ_ONCE(*ptep);
 720        if (pte_none(pte))
 721                return 0;
 722
 723        return pfn_valid(pte_pfn(pte));
 724}
 725#ifdef CONFIG_SPARSEMEM_VMEMMAP
 726#if !ARM64_SWAPPER_USES_SECTION_MAPS
 727int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
 728                struct vmem_altmap *altmap)
 729{
 730        return vmemmap_populate_basepages(start, end, node);
 731}
 732#else   /* !ARM64_SWAPPER_USES_SECTION_MAPS */
 733int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
 734                struct vmem_altmap *altmap)
 735{
 736        unsigned long addr = start;
 737        unsigned long next;
 738        pgd_t *pgdp;
 739        pud_t *pudp;
 740        pmd_t *pmdp;
 741
 742        do {
 743                next = pmd_addr_end(addr, end);
 744
 745                pgdp = vmemmap_pgd_populate(addr, node);
 746                if (!pgdp)
 747                        return -ENOMEM;
 748
 749                pudp = vmemmap_pud_populate(pgdp, addr, node);
 750                if (!pudp)
 751                        return -ENOMEM;
 752
 753                pmdp = pmd_offset(pudp, addr);
 754                if (pmd_none(READ_ONCE(*pmdp))) {
 755                        void *p = NULL;
 756
 757                        p = vmemmap_alloc_block_buf(PMD_SIZE, node);
 758                        if (!p)
 759                                return -ENOMEM;
 760
 761                        pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
 762                } else
 763                        vmemmap_verify((pte_t *)pmdp, node, addr, next);
 764        } while (addr = next, addr != end);
 765
 766        return 0;
 767}
 768#endif  /* !ARM64_SWAPPER_USES_SECTION_MAPS */
 769void vmemmap_free(unsigned long start, unsigned long end,
 770                struct vmem_altmap *altmap)
 771{
 772}
 773#endif  /* CONFIG_SPARSEMEM_VMEMMAP */
 774
 775static inline pud_t * fixmap_pud(unsigned long addr)
 776{
 777        pgd_t *pgdp = pgd_offset_k(addr);
 778        pgd_t pgd = READ_ONCE(*pgdp);
 779
 780        BUG_ON(pgd_none(pgd) || pgd_bad(pgd));
 781
 782        return pud_offset_kimg(pgdp, addr);
 783}
 784
 785static inline pmd_t * fixmap_pmd(unsigned long addr)
 786{
 787        pud_t *pudp = fixmap_pud(addr);
 788        pud_t pud = READ_ONCE(*pudp);
 789
 790        BUG_ON(pud_none(pud) || pud_bad(pud));
 791
 792        return pmd_offset_kimg(pudp, addr);
 793}
 794
 795static inline pte_t * fixmap_pte(unsigned long addr)
 796{
 797        return &bm_pte[pte_index(addr)];
 798}
 799
 800/*
 801 * The p*d_populate functions call virt_to_phys implicitly so they can't be used
 802 * directly on kernel symbols (bm_p*d). This function is called too early to use
 803 * lm_alias so __p*d_populate functions must be used to populate with the
 804 * physical address from __pa_symbol.
 805 */
 806void __init early_fixmap_init(void)
 807{
 808        pgd_t *pgdp, pgd;
 809        pud_t *pudp;
 810        pmd_t *pmdp;
 811        unsigned long addr = FIXADDR_START;
 812
 813        pgdp = pgd_offset_k(addr);
 814        pgd = READ_ONCE(*pgdp);
 815        if (CONFIG_PGTABLE_LEVELS > 3 &&
 816            !(pgd_none(pgd) || pgd_page_paddr(pgd) == __pa_symbol(bm_pud))) {
 817                /*
 818                 * We only end up here if the kernel mapping and the fixmap
 819                 * share the top level pgd entry, which should only happen on
 820                 * 16k/4 levels configurations.
 821                 */
 822                BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
 823                pudp = pud_offset_kimg(pgdp, addr);
 824        } else {
 825                if (pgd_none(pgd))
 826                        __pgd_populate(pgdp, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
 827                pudp = fixmap_pud(addr);
 828        }
 829        if (pud_none(READ_ONCE(*pudp)))
 830                __pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
 831        pmdp = fixmap_pmd(addr);
 832        __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
 833
 834        /*
 835         * The boot-ioremap range spans multiple pmds, for which
 836         * we are not prepared:
 837         */
 838        BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
 839                     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
 840
 841        if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
 842             || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
 843                WARN_ON(1);
 844                pr_warn("pmdp %p != %p, %p\n",
 845                        pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
 846                        fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
 847                pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
 848                        fix_to_virt(FIX_BTMAP_BEGIN));
 849                pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
 850                        fix_to_virt(FIX_BTMAP_END));
 851
 852                pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
 853                pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
 854        }
 855}
 856
 857/*
 858 * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we
 859 * ever need to use IPIs for TLB broadcasting, then we're in trouble here.
 860 */
 861void __set_fixmap(enum fixed_addresses idx,
 862                               phys_addr_t phys, pgprot_t flags)
 863{
 864        unsigned long addr = __fix_to_virt(idx);
 865        pte_t *ptep;
 866
 867        BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
 868
 869        ptep = fixmap_pte(addr);
 870
 871        if (pgprot_val(flags)) {
 872                set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
 873        } else {
 874                pte_clear(&init_mm, addr, ptep);
 875                flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
 876        }
 877}
 878
 879void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
 880{
 881        const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
 882        int offset;
 883        void *dt_virt;
 884
 885        /*
 886         * Check whether the physical FDT address is set and meets the minimum
 887         * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
 888         * at least 8 bytes so that we can always access the magic and size
 889         * fields of the FDT header after mapping the first chunk, double check
 890         * here if that is indeed the case.
 891         */
 892        BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
 893        if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
 894                return NULL;
 895
 896        /*
 897         * Make sure that the FDT region can be mapped without the need to
 898         * allocate additional translation table pages, so that it is safe
 899         * to call create_mapping_noalloc() this early.
 900         *
 901         * On 64k pages, the FDT will be mapped using PTEs, so we need to
 902         * be in the same PMD as the rest of the fixmap.
 903         * On 4k pages, we'll use section mappings for the FDT so we only
 904         * have to be in the same PUD.
 905         */
 906        BUILD_BUG_ON(dt_virt_base % SZ_2M);
 907
 908        BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
 909                     __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
 910
 911        offset = dt_phys % SWAPPER_BLOCK_SIZE;
 912        dt_virt = (void *)dt_virt_base + offset;
 913
 914        /* map the first chunk so we can read the size from the header */
 915        create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
 916                        dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
 917
 918        if (fdt_magic(dt_virt) != FDT_MAGIC)
 919                return NULL;
 920
 921        *size = fdt_totalsize(dt_virt);
 922        if (*size > MAX_FDT_SIZE)
 923                return NULL;
 924
 925        if (offset + *size > SWAPPER_BLOCK_SIZE)
 926                create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
 927                               round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot);
 928
 929        return dt_virt;
 930}
 931
 932void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
 933{
 934        void *dt_virt;
 935        int size;
 936
 937        dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
 938        if (!dt_virt)
 939                return NULL;
 940
 941        memblock_reserve(dt_phys, size);
 942        return dt_virt;
 943}
 944
 945int __init arch_ioremap_p4d_supported(void)
 946{
 947        return 0;
 948}
 949
 950int __init arch_ioremap_pud_supported(void)
 951{
 952        /*
 953         * Only 4k granule supports level 1 block mappings.
 954         * SW table walks can't handle removal of intermediate entries.
 955         */
 956        return IS_ENABLED(CONFIG_ARM64_4K_PAGES) &&
 957               !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
 958}
 959
 960int __init arch_ioremap_pmd_supported(void)
 961{
 962        /* See arch_ioremap_pud_supported() */
 963        return !IS_ENABLED(CONFIG_ARM64_PTDUMP_DEBUGFS);
 964}
 965
 966int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
 967{
 968        pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot));
 969
 970        /* Only allow permission changes for now */
 971        if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)),
 972                                   pud_val(new_pud)))
 973                return 0;
 974
 975        VM_BUG_ON(phys & ~PUD_MASK);
 976        set_pud(pudp, new_pud);
 977        return 1;
 978}
 979
 980int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
 981{
 982        pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot));
 983
 984        /* Only allow permission changes for now */
 985        if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)),
 986                                   pmd_val(new_pmd)))
 987                return 0;
 988
 989        VM_BUG_ON(phys & ~PMD_MASK);
 990        set_pmd(pmdp, new_pmd);
 991        return 1;
 992}
 993
 994int pud_clear_huge(pud_t *pudp)
 995{
 996        if (!pud_sect(READ_ONCE(*pudp)))
 997                return 0;
 998        pud_clear(pudp);
 999        return 1;
1000}
1001
1002int pmd_clear_huge(pmd_t *pmdp)
1003{
1004        if (!pmd_sect(READ_ONCE(*pmdp)))
1005                return 0;
1006        pmd_clear(pmdp);
1007        return 1;
1008}
1009
1010int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
1011{
1012        pte_t *table;
1013        pmd_t pmd;
1014
1015        pmd = READ_ONCE(*pmdp);
1016
1017        if (!pmd_table(pmd)) {
1018                VM_WARN_ON(1);
1019                return 1;
1020        }
1021
1022        table = pte_offset_kernel(pmdp, addr);
1023        pmd_clear(pmdp);
1024        __flush_tlb_kernel_pgtable(addr);
1025        pte_free_kernel(NULL, table);
1026        return 1;
1027}
1028
1029int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
1030{
1031        pmd_t *table;
1032        pmd_t *pmdp;
1033        pud_t pud;
1034        unsigned long next, end;
1035
1036        pud = READ_ONCE(*pudp);
1037
1038        if (!pud_table(pud)) {
1039                VM_WARN_ON(1);
1040                return 1;
1041        }
1042
1043        table = pmd_offset(pudp, addr);
1044        pmdp = table;
1045        next = addr;
1046        end = addr + PUD_SIZE;
1047        do {
1048                pmd_free_pte_page(pmdp, next);
1049        } while (pmdp++, next += PMD_SIZE, next != end);
1050
1051        pud_clear(pudp);
1052        __flush_tlb_kernel_pgtable(addr);
1053        pmd_free(NULL, table);
1054        return 1;
1055}
1056
1057int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
1058{
1059        return 0;       /* Don't attempt a block mapping */
1060}
1061
1062#ifdef CONFIG_MEMORY_HOTPLUG
1063int arch_add_memory(int nid, u64 start, u64 size,
1064                        struct mhp_restrictions *restrictions)
1065{
1066        int flags = 0;
1067
1068        if (rodata_full || debug_pagealloc_enabled())
1069                flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
1070
1071        __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
1072                             size, PAGE_KERNEL, __pgd_pgtable_alloc, flags);
1073
1074        return __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT,
1075                           restrictions);
1076}
1077void arch_remove_memory(int nid, u64 start, u64 size,
1078                        struct vmem_altmap *altmap)
1079{
1080        unsigned long start_pfn = start >> PAGE_SHIFT;
1081        unsigned long nr_pages = size >> PAGE_SHIFT;
1082        struct zone *zone;
1083
1084        /*
1085         * FIXME: Cleanup page tables (also in arch_add_memory() in case
1086         * adding fails). Until then, this function should only be used
1087         * during memory hotplug (adding memory), not for memory
1088         * unplug. ARCH_ENABLE_MEMORY_HOTREMOVE must not be
1089         * unlocked yet.
1090         */
1091        zone = page_zone(pfn_to_page(start_pfn));
1092        __remove_pages(zone, start_pfn, nr_pages, altmap);
1093}
1094#endif
1095