linux/arch/x86/mm/pgtable.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/mm.h>
   3#include <linux/gfp.h>
   4#include <linux/hugetlb.h>
   5#include <asm/pgalloc.h>
   6#include <asm/pgtable.h>
   7#include <asm/tlb.h>
   8#include <asm/fixmap.h>
   9#include <asm/mtrr.h>
  10
  11#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
  12phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
  13EXPORT_SYMBOL(physical_mask);
  14#endif
  15
  16#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
  17
  18#ifdef CONFIG_HIGHPTE
  19#define PGALLOC_USER_GFP __GFP_HIGHMEM
  20#else
  21#define PGALLOC_USER_GFP 0
  22#endif
  23
  24gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
  25
  26pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
  27{
  28        return (pte_t *)__get_free_page(PGALLOC_GFP & ~__GFP_ACCOUNT);
  29}
  30
  31pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
  32{
  33        struct page *pte;
  34
  35        pte = alloc_pages(__userpte_alloc_gfp, 0);
  36        if (!pte)
  37                return NULL;
  38        if (!pgtable_page_ctor(pte)) {
  39                __free_page(pte);
  40                return NULL;
  41        }
  42        return pte;
  43}
  44
  45static int __init setup_userpte(char *arg)
  46{
  47        if (!arg)
  48                return -EINVAL;
  49
  50        /*
  51         * "userpte=nohigh" disables allocation of user pagetables in
  52         * high memory.
  53         */
  54        if (strcmp(arg, "nohigh") == 0)
  55                __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
  56        else
  57                return -EINVAL;
  58        return 0;
  59}
  60early_param("userpte", setup_userpte);
  61
  62void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
  63{
  64        pgtable_page_dtor(pte);
  65        paravirt_release_pte(page_to_pfn(pte));
  66        paravirt_tlb_remove_table(tlb, pte);
  67}
  68
  69#if CONFIG_PGTABLE_LEVELS > 2
  70void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
  71{
  72        struct page *page = virt_to_page(pmd);
  73        paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
  74        /*
  75         * NOTE! For PAE, any changes to the top page-directory-pointer-table
  76         * entries need a full cr3 reload to flush.
  77         */
  78#ifdef CONFIG_X86_PAE
  79        tlb->need_flush_all = 1;
  80#endif
  81        pgtable_pmd_page_dtor(page);
  82        paravirt_tlb_remove_table(tlb, page);
  83}
  84
  85#if CONFIG_PGTABLE_LEVELS > 3
  86void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
  87{
  88        paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
  89        paravirt_tlb_remove_table(tlb, virt_to_page(pud));
  90}
  91
  92#if CONFIG_PGTABLE_LEVELS > 4
  93void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d)
  94{
  95        paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT);
  96        paravirt_tlb_remove_table(tlb, virt_to_page(p4d));
  97}
  98#endif  /* CONFIG_PGTABLE_LEVELS > 4 */
  99#endif  /* CONFIG_PGTABLE_LEVELS > 3 */
 100#endif  /* CONFIG_PGTABLE_LEVELS > 2 */
 101
 102static inline void pgd_list_add(pgd_t *pgd)
 103{
 104        struct page *page = virt_to_page(pgd);
 105
 106        list_add(&page->lru, &pgd_list);
 107}
 108
 109static inline void pgd_list_del(pgd_t *pgd)
 110{
 111        struct page *page = virt_to_page(pgd);
 112
 113        list_del(&page->lru);
 114}
 115
 116#define UNSHARED_PTRS_PER_PGD                           \
 117        (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
 118#define MAX_UNSHARED_PTRS_PER_PGD                       \
 119        max_t(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD)
 120
 121
 122static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
 123{
 124        virt_to_page(pgd)->pt_mm = mm;
 125}
 126
 127struct mm_struct *pgd_page_get_mm(struct page *page)
 128{
 129        return page->pt_mm;
 130}
 131
 132static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
 133{
 134        /* If the pgd points to a shared pagetable level (either the
 135           ptes in non-PAE, or shared PMD in PAE), then just copy the
 136           references from swapper_pg_dir. */
 137        if (CONFIG_PGTABLE_LEVELS == 2 ||
 138            (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
 139            CONFIG_PGTABLE_LEVELS >= 4) {
 140                clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
 141                                swapper_pg_dir + KERNEL_PGD_BOUNDARY,
 142                                KERNEL_PGD_PTRS);
 143        }
 144
 145        /* list required to sync kernel mapping updates */
 146        if (!SHARED_KERNEL_PMD) {
 147                pgd_set_mm(pgd, mm);
 148                pgd_list_add(pgd);
 149        }
 150}
 151
 152static void pgd_dtor(pgd_t *pgd)
 153{
 154        if (SHARED_KERNEL_PMD)
 155                return;
 156
 157        spin_lock(&pgd_lock);
 158        pgd_list_del(pgd);
 159        spin_unlock(&pgd_lock);
 160}
 161
 162/*
 163 * List of all pgd's needed for non-PAE so it can invalidate entries
 164 * in both cached and uncached pgd's; not needed for PAE since the
 165 * kernel pmd is shared. If PAE were not to share the pmd a similar
 166 * tactic would be needed. This is essentially codepath-based locking
 167 * against pageattr.c; it is the unique case in which a valid change
 168 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
 169 * vmalloc faults work because attached pagetables are never freed.
 170 * -- nyc
 171 */
 172
 173#ifdef CONFIG_X86_PAE
 174/*
 175 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
 176 * updating the top-level pagetable entries to guarantee the
 177 * processor notices the update.  Since this is expensive, and
 178 * all 4 top-level entries are used almost immediately in a
 179 * new process's life, we just pre-populate them here.
 180 *
 181 * Also, if we're in a paravirt environment where the kernel pmd is
 182 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
 183 * and initialize the kernel pmds here.
 184 */
 185#define PREALLOCATED_PMDS       UNSHARED_PTRS_PER_PGD
 186#define MAX_PREALLOCATED_PMDS   MAX_UNSHARED_PTRS_PER_PGD
 187
 188/*
 189 * We allocate separate PMDs for the kernel part of the user page-table
 190 * when PTI is enabled. We need them to map the per-process LDT into the
 191 * user-space page-table.
 192 */
 193#define PREALLOCATED_USER_PMDS   (static_cpu_has(X86_FEATURE_PTI) ? \
 194                                        KERNEL_PGD_PTRS : 0)
 195#define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS
 196
 197void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
 198{
 199        paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
 200
 201        /* Note: almost everything apart from _PAGE_PRESENT is
 202           reserved at the pmd (PDPT) level. */
 203        set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
 204
 205        /*
 206         * According to Intel App note "TLBs, Paging-Structure Caches,
 207         * and Their Invalidation", April 2007, document 317080-001,
 208         * section 8.1: in PAE mode we explicitly have to flush the
 209         * TLB via cr3 if the top-level pgd is changed...
 210         */
 211        flush_tlb_mm(mm);
 212}
 213#else  /* !CONFIG_X86_PAE */
 214
 215/* No need to prepopulate any pagetable entries in non-PAE modes. */
 216#define PREALLOCATED_PMDS       0
 217#define MAX_PREALLOCATED_PMDS   0
 218#define PREALLOCATED_USER_PMDS   0
 219#define MAX_PREALLOCATED_USER_PMDS 0
 220#endif  /* CONFIG_X86_PAE */
 221
 222static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
 223{
 224        int i;
 225
 226        for (i = 0; i < count; i++)
 227                if (pmds[i]) {
 228                        pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
 229                        free_page((unsigned long)pmds[i]);
 230                        mm_dec_nr_pmds(mm);
 231                }
 232}
 233
 234static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
 235{
 236        int i;
 237        bool failed = false;
 238        gfp_t gfp = PGALLOC_GFP;
 239
 240        if (mm == &init_mm)
 241                gfp &= ~__GFP_ACCOUNT;
 242
 243        for (i = 0; i < count; i++) {
 244                pmd_t *pmd = (pmd_t *)__get_free_page(gfp);
 245                if (!pmd)
 246                        failed = true;
 247                if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
 248                        free_page((unsigned long)pmd);
 249                        pmd = NULL;
 250                        failed = true;
 251                }
 252                if (pmd)
 253                        mm_inc_nr_pmds(mm);
 254                pmds[i] = pmd;
 255        }
 256
 257        if (failed) {
 258                free_pmds(mm, pmds, count);
 259                return -ENOMEM;
 260        }
 261
 262        return 0;
 263}
 264
 265/*
 266 * Mop up any pmd pages which may still be attached to the pgd.
 267 * Normally they will be freed by munmap/exit_mmap, but any pmd we
 268 * preallocate which never got a corresponding vma will need to be
 269 * freed manually.
 270 */
 271static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
 272{
 273        pgd_t pgd = *pgdp;
 274
 275        if (pgd_val(pgd) != 0) {
 276                pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
 277
 278                pgd_clear(pgdp);
 279
 280                paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
 281                pmd_free(mm, pmd);
 282                mm_dec_nr_pmds(mm);
 283        }
 284}
 285
 286static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
 287{
 288        int i;
 289
 290        for (i = 0; i < PREALLOCATED_PMDS; i++)
 291                mop_up_one_pmd(mm, &pgdp[i]);
 292
 293#ifdef CONFIG_PAGE_TABLE_ISOLATION
 294
 295        if (!static_cpu_has(X86_FEATURE_PTI))
 296                return;
 297
 298        pgdp = kernel_to_user_pgdp(pgdp);
 299
 300        for (i = 0; i < PREALLOCATED_USER_PMDS; i++)
 301                mop_up_one_pmd(mm, &pgdp[i + KERNEL_PGD_BOUNDARY]);
 302#endif
 303}
 304
 305static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
 306{
 307        p4d_t *p4d;
 308        pud_t *pud;
 309        int i;
 310
 311        if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
 312                return;
 313
 314        p4d = p4d_offset(pgd, 0);
 315        pud = pud_offset(p4d, 0);
 316
 317        for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
 318                pmd_t *pmd = pmds[i];
 319
 320                if (i >= KERNEL_PGD_BOUNDARY)
 321                        memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
 322                               sizeof(pmd_t) * PTRS_PER_PMD);
 323
 324                pud_populate(mm, pud, pmd);
 325        }
 326}
 327
 328#ifdef CONFIG_PAGE_TABLE_ISOLATION
 329static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
 330                                     pgd_t *k_pgd, pmd_t *pmds[])
 331{
 332        pgd_t *s_pgd = kernel_to_user_pgdp(swapper_pg_dir);
 333        pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
 334        p4d_t *u_p4d;
 335        pud_t *u_pud;
 336        int i;
 337
 338        u_p4d = p4d_offset(u_pgd, 0);
 339        u_pud = pud_offset(u_p4d, 0);
 340
 341        s_pgd += KERNEL_PGD_BOUNDARY;
 342        u_pud += KERNEL_PGD_BOUNDARY;
 343
 344        for (i = 0; i < PREALLOCATED_USER_PMDS; i++, u_pud++, s_pgd++) {
 345                pmd_t *pmd = pmds[i];
 346
 347                memcpy(pmd, (pmd_t *)pgd_page_vaddr(*s_pgd),
 348                       sizeof(pmd_t) * PTRS_PER_PMD);
 349
 350                pud_populate(mm, u_pud, pmd);
 351        }
 352
 353}
 354#else
 355static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
 356                                     pgd_t *k_pgd, pmd_t *pmds[])
 357{
 358}
 359#endif
 360/*
 361 * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
 362 * assumes that pgd should be in one page.
 363 *
 364 * But kernel with PAE paging that is not running as a Xen domain
 365 * only needs to allocate 32 bytes for pgd instead of one page.
 366 */
 367#ifdef CONFIG_X86_PAE
 368
 369#include <linux/slab.h>
 370
 371#define PGD_SIZE        (PTRS_PER_PGD * sizeof(pgd_t))
 372#define PGD_ALIGN       32
 373
 374static struct kmem_cache *pgd_cache;
 375
 376static int __init pgd_cache_init(void)
 377{
 378        /*
 379         * When PAE kernel is running as a Xen domain, it does not use
 380         * shared kernel pmd. And this requires a whole page for pgd.
 381         */
 382        if (!SHARED_KERNEL_PMD)
 383                return 0;
 384
 385        /*
 386         * when PAE kernel is not running as a Xen domain, it uses
 387         * shared kernel pmd. Shared kernel pmd does not require a whole
 388         * page for pgd. We are able to just allocate a 32-byte for pgd.
 389         * During boot time, we create a 32-byte slab for pgd table allocation.
 390         */
 391        pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
 392                                      SLAB_PANIC, NULL);
 393        return 0;
 394}
 395core_initcall(pgd_cache_init);
 396
 397static inline pgd_t *_pgd_alloc(void)
 398{
 399        /*
 400         * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
 401         * We allocate one page for pgd.
 402         */
 403        if (!SHARED_KERNEL_PMD)
 404                return (pgd_t *)__get_free_pages(PGALLOC_GFP,
 405                                                 PGD_ALLOCATION_ORDER);
 406
 407        /*
 408         * Now PAE kernel is not running as a Xen domain. We can allocate
 409         * a 32-byte slab for pgd to save memory space.
 410         */
 411        return kmem_cache_alloc(pgd_cache, PGALLOC_GFP);
 412}
 413
 414static inline void _pgd_free(pgd_t *pgd)
 415{
 416        if (!SHARED_KERNEL_PMD)
 417                free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
 418        else
 419                kmem_cache_free(pgd_cache, pgd);
 420}
 421#else
 422
 423static inline pgd_t *_pgd_alloc(void)
 424{
 425        return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER);
 426}
 427
 428static inline void _pgd_free(pgd_t *pgd)
 429{
 430        free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
 431}
 432#endif /* CONFIG_X86_PAE */
 433
 434pgd_t *pgd_alloc(struct mm_struct *mm)
 435{
 436        pgd_t *pgd;
 437        pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS];
 438        pmd_t *pmds[MAX_PREALLOCATED_PMDS];
 439
 440        pgd = _pgd_alloc();
 441
 442        if (pgd == NULL)
 443                goto out;
 444
 445        mm->pgd = pgd;
 446
 447        if (preallocate_pmds(mm, pmds, PREALLOCATED_PMDS) != 0)
 448                goto out_free_pgd;
 449
 450        if (preallocate_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS) != 0)
 451                goto out_free_pmds;
 452
 453        if (paravirt_pgd_alloc(mm) != 0)
 454                goto out_free_user_pmds;
 455
 456        /*
 457         * Make sure that pre-populating the pmds is atomic with
 458         * respect to anything walking the pgd_list, so that they
 459         * never see a partially populated pgd.
 460         */
 461        spin_lock(&pgd_lock);
 462
 463        pgd_ctor(mm, pgd);
 464        pgd_prepopulate_pmd(mm, pgd, pmds);
 465        pgd_prepopulate_user_pmd(mm, pgd, u_pmds);
 466
 467        spin_unlock(&pgd_lock);
 468
 469        return pgd;
 470
 471out_free_user_pmds:
 472        free_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS);
 473out_free_pmds:
 474        free_pmds(mm, pmds, PREALLOCATED_PMDS);
 475out_free_pgd:
 476        _pgd_free(pgd);
 477out:
 478        return NULL;
 479}
 480
 481void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 482{
 483        pgd_mop_up_pmds(mm, pgd);
 484        pgd_dtor(pgd);
 485        paravirt_pgd_free(mm, pgd);
 486        _pgd_free(pgd);
 487}
 488
 489/*
 490 * Used to set accessed or dirty bits in the page table entries
 491 * on other architectures. On x86, the accessed and dirty bits
 492 * are tracked by hardware. However, do_wp_page calls this function
 493 * to also make the pte writeable at the same time the dirty bit is
 494 * set. In that case we do actually need to write the PTE.
 495 */
 496int ptep_set_access_flags(struct vm_area_struct *vma,
 497                          unsigned long address, pte_t *ptep,
 498                          pte_t entry, int dirty)
 499{
 500        int changed = !pte_same(*ptep, entry);
 501
 502        if (changed && dirty)
 503                set_pte(ptep, entry);
 504
 505        return changed;
 506}
 507
 508#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 509int pmdp_set_access_flags(struct vm_area_struct *vma,
 510                          unsigned long address, pmd_t *pmdp,
 511                          pmd_t entry, int dirty)
 512{
 513        int changed = !pmd_same(*pmdp, entry);
 514
 515        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 516
 517        if (changed && dirty) {
 518                set_pmd(pmdp, entry);
 519                /*
 520                 * We had a write-protection fault here and changed the pmd
 521                 * to to more permissive. No need to flush the TLB for that,
 522                 * #PF is architecturally guaranteed to do that and in the
 523                 * worst-case we'll generate a spurious fault.
 524                 */
 525        }
 526
 527        return changed;
 528}
 529
 530int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
 531                          pud_t *pudp, pud_t entry, int dirty)
 532{
 533        int changed = !pud_same(*pudp, entry);
 534
 535        VM_BUG_ON(address & ~HPAGE_PUD_MASK);
 536
 537        if (changed && dirty) {
 538                set_pud(pudp, entry);
 539                /*
 540                 * We had a write-protection fault here and changed the pud
 541                 * to to more permissive. No need to flush the TLB for that,
 542                 * #PF is architecturally guaranteed to do that and in the
 543                 * worst-case we'll generate a spurious fault.
 544                 */
 545        }
 546
 547        return changed;
 548}
 549#endif
 550
 551int ptep_test_and_clear_young(struct vm_area_struct *vma,
 552                              unsigned long addr, pte_t *ptep)
 553{
 554        int ret = 0;
 555
 556        if (pte_young(*ptep))
 557                ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
 558                                         (unsigned long *) &ptep->pte);
 559
 560        return ret;
 561}
 562
 563#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 564int pmdp_test_and_clear_young(struct vm_area_struct *vma,
 565                              unsigned long addr, pmd_t *pmdp)
 566{
 567        int ret = 0;
 568
 569        if (pmd_young(*pmdp))
 570                ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
 571                                         (unsigned long *)pmdp);
 572
 573        return ret;
 574}
 575int pudp_test_and_clear_young(struct vm_area_struct *vma,
 576                              unsigned long addr, pud_t *pudp)
 577{
 578        int ret = 0;
 579
 580        if (pud_young(*pudp))
 581                ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
 582                                         (unsigned long *)pudp);
 583
 584        return ret;
 585}
 586#endif
 587
 588int ptep_clear_flush_young(struct vm_area_struct *vma,
 589                           unsigned long address, pte_t *ptep)
 590{
 591        /*
 592         * On x86 CPUs, clearing the accessed bit without a TLB flush
 593         * doesn't cause data corruption. [ It could cause incorrect
 594         * page aging and the (mistaken) reclaim of hot pages, but the
 595         * chance of that should be relatively low. ]
 596         *
 597         * So as a performance optimization don't flush the TLB when
 598         * clearing the accessed bit, it will eventually be flushed by
 599         * a context switch or a VM operation anyway. [ In the rare
 600         * event of it not getting flushed for a long time the delay
 601         * shouldn't really matter because there's no real memory
 602         * pressure for swapout to react to. ]
 603         */
 604        return ptep_test_and_clear_young(vma, address, ptep);
 605}
 606
 607#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 608int pmdp_clear_flush_young(struct vm_area_struct *vma,
 609                           unsigned long address, pmd_t *pmdp)
 610{
 611        int young;
 612
 613        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 614
 615        young = pmdp_test_and_clear_young(vma, address, pmdp);
 616        if (young)
 617                flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 618
 619        return young;
 620}
 621#endif
 622
 623/**
 624 * reserve_top_address - reserves a hole in the top of kernel address space
 625 * @reserve - size of hole to reserve
 626 *
 627 * Can be used to relocate the fixmap area and poke a hole in the top
 628 * of kernel address space to make room for a hypervisor.
 629 */
 630void __init reserve_top_address(unsigned long reserve)
 631{
 632#ifdef CONFIG_X86_32
 633        BUG_ON(fixmaps_set > 0);
 634        __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE;
 635        printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n",
 636               -reserve, __FIXADDR_TOP + PAGE_SIZE);
 637#endif
 638}
 639
 640int fixmaps_set;
 641
 642void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
 643{
 644        unsigned long address = __fix_to_virt(idx);
 645
 646#ifdef CONFIG_X86_64
 647       /*
 648        * Ensure that the static initial page tables are covering the
 649        * fixmap completely.
 650        */
 651        BUILD_BUG_ON(__end_of_permanent_fixed_addresses >
 652                     (FIXMAP_PMD_NUM * PTRS_PER_PTE));
 653#endif
 654
 655        if (idx >= __end_of_fixed_addresses) {
 656                BUG();
 657                return;
 658        }
 659        set_pte_vaddr(address, pte);
 660        fixmaps_set++;
 661}
 662
 663void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
 664                       pgprot_t flags)
 665{
 666        /* Sanitize 'prot' against any unsupported bits: */
 667        pgprot_val(flags) &= __default_kernel_pte_mask;
 668
 669        __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
 670}
 671
 672#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
 673#ifdef CONFIG_X86_5LEVEL
 674/**
 675 * p4d_set_huge - setup kernel P4D mapping
 676 *
 677 * No 512GB pages yet -- always return 0
 678 */
 679int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
 680{
 681        return 0;
 682}
 683
 684/**
 685 * p4d_clear_huge - clear kernel P4D mapping when it is set
 686 *
 687 * No 512GB pages yet -- always return 0
 688 */
 689int p4d_clear_huge(p4d_t *p4d)
 690{
 691        return 0;
 692}
 693#endif
 694
 695/**
 696 * pud_set_huge - setup kernel PUD mapping
 697 *
 698 * MTRRs can override PAT memory types with 4KiB granularity. Therefore, this
 699 * function sets up a huge page only if any of the following conditions are met:
 700 *
 701 * - MTRRs are disabled, or
 702 *
 703 * - MTRRs are enabled and the range is completely covered by a single MTRR, or
 704 *
 705 * - MTRRs are enabled and the corresponding MTRR memory type is WB, which
 706 *   has no effect on the requested PAT memory type.
 707 *
 708 * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger
 709 * page mapping attempt fails.
 710 *
 711 * Returns 1 on success and 0 on failure.
 712 */
 713int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
 714{
 715        u8 mtrr, uniform;
 716
 717        mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform);
 718        if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
 719            (mtrr != MTRR_TYPE_WRBACK))
 720                return 0;
 721
 722        /* Bail out if we are we on a populated non-leaf entry: */
 723        if (pud_present(*pud) && !pud_huge(*pud))
 724                return 0;
 725
 726        prot = pgprot_4k_2_large(prot);
 727
 728        set_pte((pte_t *)pud, pfn_pte(
 729                (u64)addr >> PAGE_SHIFT,
 730                __pgprot(pgprot_val(prot) | _PAGE_PSE)));
 731
 732        return 1;
 733}
 734
 735/**
 736 * pmd_set_huge - setup kernel PMD mapping
 737 *
 738 * See text over pud_set_huge() above.
 739 *
 740 * Returns 1 on success and 0 on failure.
 741 */
 742int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
 743{
 744        u8 mtrr, uniform;
 745
 746        mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform);
 747        if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
 748            (mtrr != MTRR_TYPE_WRBACK)) {
 749                pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n",
 750                             __func__, addr, addr + PMD_SIZE);
 751                return 0;
 752        }
 753
 754        /* Bail out if we are we on a populated non-leaf entry: */
 755        if (pmd_present(*pmd) && !pmd_huge(*pmd))
 756                return 0;
 757
 758        prot = pgprot_4k_2_large(prot);
 759
 760        set_pte((pte_t *)pmd, pfn_pte(
 761                (u64)addr >> PAGE_SHIFT,
 762                __pgprot(pgprot_val(prot) | _PAGE_PSE)));
 763
 764        return 1;
 765}
 766
 767/**
 768 * pud_clear_huge - clear kernel PUD mapping when it is set
 769 *
 770 * Returns 1 on success and 0 on failure (no PUD map is found).
 771 */
 772int pud_clear_huge(pud_t *pud)
 773{
 774        if (pud_large(*pud)) {
 775                pud_clear(pud);
 776                return 1;
 777        }
 778
 779        return 0;
 780}
 781
 782/**
 783 * pmd_clear_huge - clear kernel PMD mapping when it is set
 784 *
 785 * Returns 1 on success and 0 on failure (no PMD map is found).
 786 */
 787int pmd_clear_huge(pmd_t *pmd)
 788{
 789        if (pmd_large(*pmd)) {
 790                pmd_clear(pmd);
 791                return 1;
 792        }
 793
 794        return 0;
 795}
 796
 797#ifdef CONFIG_X86_64
 798/**
 799 * pud_free_pmd_page - Clear pud entry and free pmd page.
 800 * @pud: Pointer to a PUD.
 801 * @addr: Virtual address associated with pud.
 802 *
 803 * Context: The pud range has been unmapped and TLB purged.
 804 * Return: 1 if clearing the entry succeeded. 0 otherwise.
 805 *
 806 * NOTE: Callers must allow a single page allocation.
 807 */
 808int pud_free_pmd_page(pud_t *pud, unsigned long addr)
 809{
 810        pmd_t *pmd, *pmd_sv;
 811        pte_t *pte;
 812        int i;
 813
 814        if (pud_none(*pud))
 815                return 1;
 816
 817        pmd = (pmd_t *)pud_page_vaddr(*pud);
 818        pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
 819        if (!pmd_sv)
 820                return 0;
 821
 822        for (i = 0; i < PTRS_PER_PMD; i++) {
 823                pmd_sv[i] = pmd[i];
 824                if (!pmd_none(pmd[i]))
 825                        pmd_clear(&pmd[i]);
 826        }
 827
 828        pud_clear(pud);
 829
 830        /* INVLPG to clear all paging-structure caches */
 831        flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
 832
 833        for (i = 0; i < PTRS_PER_PMD; i++) {
 834                if (!pmd_none(pmd_sv[i])) {
 835                        pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
 836                        free_page((unsigned long)pte);
 837                }
 838        }
 839
 840        free_page((unsigned long)pmd_sv);
 841        free_page((unsigned long)pmd);
 842
 843        return 1;
 844}
 845
 846/**
 847 * pmd_free_pte_page - Clear pmd entry and free pte page.
 848 * @pmd: Pointer to a PMD.
 849 * @addr: Virtual address associated with pmd.
 850 *
 851 * Context: The pmd range has been unmapped and TLB purged.
 852 * Return: 1 if clearing the entry succeeded. 0 otherwise.
 853 */
 854int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
 855{
 856        pte_t *pte;
 857
 858        if (pmd_none(*pmd))
 859                return 1;
 860
 861        pte = (pte_t *)pmd_page_vaddr(*pmd);
 862        pmd_clear(pmd);
 863
 864        /* INVLPG to clear all paging-structure caches */
 865        flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
 866
 867        free_page((unsigned long)pte);
 868
 869        return 1;
 870}
 871
 872#else /* !CONFIG_X86_64 */
 873
 874int pud_free_pmd_page(pud_t *pud, unsigned long addr)
 875{
 876        return pud_none(*pud);
 877}
 878
 879/*
 880 * Disable free page handling on x86-PAE. This assures that ioremap()
 881 * does not update sync'd pmd entries. See vmalloc_sync_one().
 882 */
 883int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
 884{
 885        return pmd_none(*pmd);
 886}
 887
 888#endif /* CONFIG_X86_64 */
 889#endif  /* CONFIG_HAVE_ARCH_HUGE_VMAP */
 890