linux/arch/x86/mm/pgtable.c
<<
>>
Prefs
   1#include <linux/mm.h>
   2#include <linux/gfp.h>
   3#include <asm/pgalloc.h>
   4#include <asm/pgtable.h>
   5#include <asm/tlb.h>
   6#include <asm/fixmap.h>
   7#include <asm/mtrr.h>
   8
   9#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
  10
  11#ifdef CONFIG_HIGHPTE
  12#define PGALLOC_USER_GFP __GFP_HIGHMEM
  13#else
  14#define PGALLOC_USER_GFP 0
  15#endif
  16
  17gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
  18
  19pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
  20{
  21        return (pte_t *)__get_free_page(PGALLOC_GFP);
  22}
  23
  24pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
  25{
  26        struct page *pte;
  27
  28        pte = alloc_pages(__userpte_alloc_gfp, 0);
  29        if (!pte)
  30                return NULL;
  31        if (!pgtable_page_ctor(pte)) {
  32                __free_page(pte);
  33                return NULL;
  34        }
  35        return pte;
  36}
  37
  38static int __init setup_userpte(char *arg)
  39{
  40        if (!arg)
  41                return -EINVAL;
  42
  43        /*
  44         * "userpte=nohigh" disables allocation of user pagetables in
  45         * high memory.
  46         */
  47        if (strcmp(arg, "nohigh") == 0)
  48                __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
  49        else
  50                return -EINVAL;
  51        return 0;
  52}
  53early_param("userpte", setup_userpte);
  54
  55void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
  56{
  57        pgtable_page_dtor(pte);
  58        paravirt_release_pte(page_to_pfn(pte));
  59        tlb_remove_page(tlb, pte);
  60}
  61
  62#if CONFIG_PGTABLE_LEVELS > 2
  63void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
  64{
  65        struct page *page = virt_to_page(pmd);
  66        paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
  67        /*
  68         * NOTE! For PAE, any changes to the top page-directory-pointer-table
  69         * entries need a full cr3 reload to flush.
  70         */
  71#ifdef CONFIG_X86_PAE
  72        tlb->need_flush_all = 1;
  73#endif
  74        pgtable_pmd_page_dtor(page);
  75        tlb_remove_page(tlb, page);
  76}
  77
  78#if CONFIG_PGTABLE_LEVELS > 3
  79void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
  80{
  81        paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
  82        tlb_remove_page(tlb, virt_to_page(pud));
  83}
  84#endif  /* CONFIG_PGTABLE_LEVELS > 3 */
  85#endif  /* CONFIG_PGTABLE_LEVELS > 2 */
  86
  87static inline void pgd_list_add(pgd_t *pgd)
  88{
  89        struct page *page = virt_to_page(pgd);
  90
  91        list_add(&page->lru, &pgd_list);
  92}
  93
  94static inline void pgd_list_del(pgd_t *pgd)
  95{
  96        struct page *page = virt_to_page(pgd);
  97
  98        list_del(&page->lru);
  99}
 100
 101#define UNSHARED_PTRS_PER_PGD                           \
 102        (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
 103
 104
 105static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
 106{
 107        BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
 108        virt_to_page(pgd)->index = (pgoff_t)mm;
 109}
 110
 111struct mm_struct *pgd_page_get_mm(struct page *page)
 112{
 113        return (struct mm_struct *)page->index;
 114}
 115
 116static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
 117{
 118        /* If the pgd points to a shared pagetable level (either the
 119           ptes in non-PAE, or shared PMD in PAE), then just copy the
 120           references from swapper_pg_dir. */
 121        if (CONFIG_PGTABLE_LEVELS == 2 ||
 122            (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
 123            CONFIG_PGTABLE_LEVELS == 4) {
 124                clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
 125                                swapper_pg_dir + KERNEL_PGD_BOUNDARY,
 126                                KERNEL_PGD_PTRS);
 127        }
 128
 129        /* list required to sync kernel mapping updates */
 130        if (!SHARED_KERNEL_PMD) {
 131                pgd_set_mm(pgd, mm);
 132                pgd_list_add(pgd);
 133        }
 134}
 135
 136static void pgd_dtor(pgd_t *pgd)
 137{
 138        if (SHARED_KERNEL_PMD)
 139                return;
 140
 141        spin_lock(&pgd_lock);
 142        pgd_list_del(pgd);
 143        spin_unlock(&pgd_lock);
 144}
 145
 146/*
 147 * List of all pgd's needed for non-PAE so it can invalidate entries
 148 * in both cached and uncached pgd's; not needed for PAE since the
 149 * kernel pmd is shared. If PAE were not to share the pmd a similar
 150 * tactic would be needed. This is essentially codepath-based locking
 151 * against pageattr.c; it is the unique case in which a valid change
 152 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
 153 * vmalloc faults work because attached pagetables are never freed.
 154 * -- nyc
 155 */
 156
 157#ifdef CONFIG_X86_PAE
 158/*
 159 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
 160 * updating the top-level pagetable entries to guarantee the
 161 * processor notices the update.  Since this is expensive, and
 162 * all 4 top-level entries are used almost immediately in a
 163 * new process's life, we just pre-populate them here.
 164 *
 165 * Also, if we're in a paravirt environment where the kernel pmd is
 166 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
 167 * and initialize the kernel pmds here.
 168 */
 169#define PREALLOCATED_PMDS       UNSHARED_PTRS_PER_PGD
 170
 171void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
 172{
 173        paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
 174
 175        /* Note: almost everything apart from _PAGE_PRESENT is
 176           reserved at the pmd (PDPT) level. */
 177        set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
 178
 179        /*
 180         * According to Intel App note "TLBs, Paging-Structure Caches,
 181         * and Their Invalidation", April 2007, document 317080-001,
 182         * section 8.1: in PAE mode we explicitly have to flush the
 183         * TLB via cr3 if the top-level pgd is changed...
 184         */
 185        flush_tlb_mm(mm);
 186}
 187#else  /* !CONFIG_X86_PAE */
 188
 189/* No need to prepopulate any pagetable entries in non-PAE modes. */
 190#define PREALLOCATED_PMDS       0
 191
 192#endif  /* CONFIG_X86_PAE */
 193
 194static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
 195{
 196        int i;
 197
 198        for(i = 0; i < PREALLOCATED_PMDS; i++)
 199                if (pmds[i]) {
 200                        pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
 201                        free_page((unsigned long)pmds[i]);
 202                        mm_dec_nr_pmds(mm);
 203                }
 204}
 205
 206static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
 207{
 208        int i;
 209        bool failed = false;
 210
 211        for(i = 0; i < PREALLOCATED_PMDS; i++) {
 212                pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
 213                if (!pmd)
 214                        failed = true;
 215                if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
 216                        free_page((unsigned long)pmd);
 217                        pmd = NULL;
 218                        failed = true;
 219                }
 220                if (pmd)
 221                        mm_inc_nr_pmds(mm);
 222                pmds[i] = pmd;
 223        }
 224
 225        if (failed) {
 226                free_pmds(mm, pmds);
 227                return -ENOMEM;
 228        }
 229
 230        return 0;
 231}
 232
 233/*
 234 * Mop up any pmd pages which may still be attached to the pgd.
 235 * Normally they will be freed by munmap/exit_mmap, but any pmd we
 236 * preallocate which never got a corresponding vma will need to be
 237 * freed manually.
 238 */
 239static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
 240{
 241        int i;
 242
 243        for(i = 0; i < PREALLOCATED_PMDS; i++) {
 244                pgd_t pgd = pgdp[i];
 245
 246                if (pgd_val(pgd) != 0) {
 247                        pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
 248
 249                        pgdp[i] = native_make_pgd(0);
 250
 251                        paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
 252                        pmd_free(mm, pmd);
 253                        mm_dec_nr_pmds(mm);
 254                }
 255        }
 256}
 257
 258static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
 259{
 260        pud_t *pud;
 261        int i;
 262
 263        if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
 264                return;
 265
 266        pud = pud_offset(pgd, 0);
 267
 268        for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
 269                pmd_t *pmd = pmds[i];
 270
 271                if (i >= KERNEL_PGD_BOUNDARY)
 272                        memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
 273                               sizeof(pmd_t) * PTRS_PER_PMD);
 274
 275                pud_populate(mm, pud, pmd);
 276        }
 277}
 278
 279/*
 280 * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
 281 * assumes that pgd should be in one page.
 282 *
 283 * But kernel with PAE paging that is not running as a Xen domain
 284 * only needs to allocate 32 bytes for pgd instead of one page.
 285 */
 286#ifdef CONFIG_X86_PAE
 287
 288#include <linux/slab.h>
 289
 290#define PGD_SIZE        (PTRS_PER_PGD * sizeof(pgd_t))
 291#define PGD_ALIGN       32
 292
 293static struct kmem_cache *pgd_cache;
 294
 295static int __init pgd_cache_init(void)
 296{
 297        /*
 298         * When PAE kernel is running as a Xen domain, it does not use
 299         * shared kernel pmd. And this requires a whole page for pgd.
 300         */
 301        if (!SHARED_KERNEL_PMD)
 302                return 0;
 303
 304        /*
 305         * when PAE kernel is not running as a Xen domain, it uses
 306         * shared kernel pmd. Shared kernel pmd does not require a whole
 307         * page for pgd. We are able to just allocate a 32-byte for pgd.
 308         * During boot time, we create a 32-byte slab for pgd table allocation.
 309         */
 310        pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
 311                                      SLAB_PANIC, NULL);
 312        if (!pgd_cache)
 313                return -ENOMEM;
 314
 315        return 0;
 316}
 317core_initcall(pgd_cache_init);
 318
 319static inline pgd_t *_pgd_alloc(void)
 320{
 321        /*
 322         * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
 323         * We allocate one page for pgd.
 324         */
 325        if (!SHARED_KERNEL_PMD)
 326                return (pgd_t *)__get_free_page(PGALLOC_GFP);
 327
 328        /*
 329         * Now PAE kernel is not running as a Xen domain. We can allocate
 330         * a 32-byte slab for pgd to save memory space.
 331         */
 332        return kmem_cache_alloc(pgd_cache, PGALLOC_GFP);
 333}
 334
 335static inline void _pgd_free(pgd_t *pgd)
 336{
 337        if (!SHARED_KERNEL_PMD)
 338                free_page((unsigned long)pgd);
 339        else
 340                kmem_cache_free(pgd_cache, pgd);
 341}
 342#else
 343static inline pgd_t *_pgd_alloc(void)
 344{
 345        return (pgd_t *)__get_free_page(PGALLOC_GFP);
 346}
 347
 348static inline void _pgd_free(pgd_t *pgd)
 349{
 350        free_page((unsigned long)pgd);
 351}
 352#endif /* CONFIG_X86_PAE */
 353
 354pgd_t *pgd_alloc(struct mm_struct *mm)
 355{
 356        pgd_t *pgd;
 357        pmd_t *pmds[PREALLOCATED_PMDS];
 358
 359        pgd = _pgd_alloc();
 360
 361        if (pgd == NULL)
 362                goto out;
 363
 364        mm->pgd = pgd;
 365
 366        if (preallocate_pmds(mm, pmds) != 0)
 367                goto out_free_pgd;
 368
 369        if (paravirt_pgd_alloc(mm) != 0)
 370                goto out_free_pmds;
 371
 372        /*
 373         * Make sure that pre-populating the pmds is atomic with
 374         * respect to anything walking the pgd_list, so that they
 375         * never see a partially populated pgd.
 376         */
 377        spin_lock(&pgd_lock);
 378
 379        pgd_ctor(mm, pgd);
 380        pgd_prepopulate_pmd(mm, pgd, pmds);
 381
 382        spin_unlock(&pgd_lock);
 383
 384        return pgd;
 385
 386out_free_pmds:
 387        free_pmds(mm, pmds);
 388out_free_pgd:
 389        _pgd_free(pgd);
 390out:
 391        return NULL;
 392}
 393
 394void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 395{
 396        pgd_mop_up_pmds(mm, pgd);
 397        pgd_dtor(pgd);
 398        paravirt_pgd_free(mm, pgd);
 399        _pgd_free(pgd);
 400}
 401
 402/*
 403 * Used to set accessed or dirty bits in the page table entries
 404 * on other architectures. On x86, the accessed and dirty bits
 405 * are tracked by hardware. However, do_wp_page calls this function
 406 * to also make the pte writeable at the same time the dirty bit is
 407 * set. In that case we do actually need to write the PTE.
 408 */
 409int ptep_set_access_flags(struct vm_area_struct *vma,
 410                          unsigned long address, pte_t *ptep,
 411                          pte_t entry, int dirty)
 412{
 413        int changed = !pte_same(*ptep, entry);
 414
 415        if (changed && dirty) {
 416                *ptep = entry;
 417                pte_update(vma->vm_mm, address, ptep);
 418        }
 419
 420        return changed;
 421}
 422
 423#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 424int pmdp_set_access_flags(struct vm_area_struct *vma,
 425                          unsigned long address, pmd_t *pmdp,
 426                          pmd_t entry, int dirty)
 427{
 428        int changed = !pmd_same(*pmdp, entry);
 429
 430        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 431
 432        if (changed && dirty) {
 433                *pmdp = entry;
 434                /*
 435                 * We had a write-protection fault here and changed the pmd
 436                 * to to more permissive. No need to flush the TLB for that,
 437                 * #PF is architecturally guaranteed to do that and in the
 438                 * worst-case we'll generate a spurious fault.
 439                 */
 440        }
 441
 442        return changed;
 443}
 444#endif
 445
 446int ptep_test_and_clear_young(struct vm_area_struct *vma,
 447                              unsigned long addr, pte_t *ptep)
 448{
 449        int ret = 0;
 450
 451        if (pte_young(*ptep))
 452                ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
 453                                         (unsigned long *) &ptep->pte);
 454
 455        if (ret)
 456                pte_update(vma->vm_mm, addr, ptep);
 457
 458        return ret;
 459}
 460
 461#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 462int pmdp_test_and_clear_young(struct vm_area_struct *vma,
 463                              unsigned long addr, pmd_t *pmdp)
 464{
 465        int ret = 0;
 466
 467        if (pmd_young(*pmdp))
 468                ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
 469                                         (unsigned long *)pmdp);
 470
 471        return ret;
 472}
 473#endif
 474
 475int ptep_clear_flush_young(struct vm_area_struct *vma,
 476                           unsigned long address, pte_t *ptep)
 477{
 478        /*
 479         * On x86 CPUs, clearing the accessed bit without a TLB flush
 480         * doesn't cause data corruption. [ It could cause incorrect
 481         * page aging and the (mistaken) reclaim of hot pages, but the
 482         * chance of that should be relatively low. ]
 483         *
 484         * So as a performance optimization don't flush the TLB when
 485         * clearing the accessed bit, it will eventually be flushed by
 486         * a context switch or a VM operation anyway. [ In the rare
 487         * event of it not getting flushed for a long time the delay
 488         * shouldn't really matter because there's no real memory
 489         * pressure for swapout to react to. ]
 490         */
 491        return ptep_test_and_clear_young(vma, address, ptep);
 492}
 493
 494#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 495int pmdp_clear_flush_young(struct vm_area_struct *vma,
 496                           unsigned long address, pmd_t *pmdp)
 497{
 498        int young;
 499
 500        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 501
 502        young = pmdp_test_and_clear_young(vma, address, pmdp);
 503        if (young)
 504                flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 505
 506        return young;
 507}
 508#endif
 509
 510/**
 511 * reserve_top_address - reserves a hole in the top of kernel address space
 512 * @reserve - size of hole to reserve
 513 *
 514 * Can be used to relocate the fixmap area and poke a hole in the top
 515 * of kernel address space to make room for a hypervisor.
 516 */
 517void __init reserve_top_address(unsigned long reserve)
 518{
 519#ifdef CONFIG_X86_32
 520        BUG_ON(fixmaps_set > 0);
 521        __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE;
 522        printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n",
 523               -reserve, __FIXADDR_TOP + PAGE_SIZE);
 524#endif
 525}
 526
 527int fixmaps_set;
 528
 529void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
 530{
 531        unsigned long address = __fix_to_virt(idx);
 532
 533        if (idx >= __end_of_fixed_addresses) {
 534                BUG();
 535                return;
 536        }
 537        set_pte_vaddr(address, pte);
 538        fixmaps_set++;
 539}
 540
 541void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
 542                       pgprot_t flags)
 543{
 544        __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
 545}
 546
 547#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
 548/**
 549 * pud_set_huge - setup kernel PUD mapping
 550 *
 551 * MTRRs can override PAT memory types with 4KiB granularity. Therefore, this
 552 * function sets up a huge page only if any of the following conditions are met:
 553 *
 554 * - MTRRs are disabled, or
 555 *
 556 * - MTRRs are enabled and the range is completely covered by a single MTRR, or
 557 *
 558 * - MTRRs are enabled and the corresponding MTRR memory type is WB, which
 559 *   has no effect on the requested PAT memory type.
 560 *
 561 * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger
 562 * page mapping attempt fails.
 563 *
 564 * Returns 1 on success and 0 on failure.
 565 */
 566int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
 567{
 568        u8 mtrr, uniform;
 569
 570        mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform);
 571        if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
 572            (mtrr != MTRR_TYPE_WRBACK))
 573                return 0;
 574
 575        prot = pgprot_4k_2_large(prot);
 576
 577        set_pte((pte_t *)pud, pfn_pte(
 578                (u64)addr >> PAGE_SHIFT,
 579                __pgprot(pgprot_val(prot) | _PAGE_PSE)));
 580
 581        return 1;
 582}
 583
 584/**
 585 * pmd_set_huge - setup kernel PMD mapping
 586 *
 587 * See text over pud_set_huge() above.
 588 *
 589 * Returns 1 on success and 0 on failure.
 590 */
 591int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
 592{
 593        u8 mtrr, uniform;
 594
 595        mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform);
 596        if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
 597            (mtrr != MTRR_TYPE_WRBACK)) {
 598                pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n",
 599                             __func__, addr, addr + PMD_SIZE);
 600                return 0;
 601        }
 602
 603        prot = pgprot_4k_2_large(prot);
 604
 605        set_pte((pte_t *)pmd, pfn_pte(
 606                (u64)addr >> PAGE_SHIFT,
 607                __pgprot(pgprot_val(prot) | _PAGE_PSE)));
 608
 609        return 1;
 610}
 611
 612/**
 613 * pud_clear_huge - clear kernel PUD mapping when it is set
 614 *
 615 * Returns 1 on success and 0 on failure (no PUD map is found).
 616 */
 617int pud_clear_huge(pud_t *pud)
 618{
 619        if (pud_large(*pud)) {
 620                pud_clear(pud);
 621                return 1;
 622        }
 623
 624        return 0;
 625}
 626
 627/**
 628 * pmd_clear_huge - clear kernel PMD mapping when it is set
 629 *
 630 * Returns 1 on success and 0 on failure (no PMD map is found).
 631 */
 632int pmd_clear_huge(pmd_t *pmd)
 633{
 634        if (pmd_large(*pmd)) {
 635                pmd_clear(pmd);
 636                return 1;
 637        }
 638
 639        return 0;
 640}
 641#endif  /* CONFIG_HAVE_ARCH_HUGE_VMAP */
 642