linux/arch/x86/mm/pgtable.c
<<
>>
Prefs
   1#include <linux/mm.h>
   2#include <linux/gfp.h>
   3#include <asm/pgalloc.h>
   4#include <asm/pgtable.h>
   5#include <asm/tlb.h>
   6#include <asm/fixmap.h>
   7
   8#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
   9
  10#ifdef CONFIG_HIGHPTE
  11#define PGALLOC_USER_GFP __GFP_HIGHMEM
  12#else
  13#define PGALLOC_USER_GFP 0
  14#endif
  15
  16gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
  17
  18pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
  19{
  20        return (pte_t *)__get_free_page(PGALLOC_GFP);
  21}
  22
  23pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
  24{
  25        struct page *pte;
  26
  27        pte = alloc_pages(__userpte_alloc_gfp, 0);
  28        if (pte)
  29                pgtable_page_ctor(pte);
  30        return pte;
  31}
  32
  33static int __init setup_userpte(char *arg)
  34{
  35        if (!arg)
  36                return -EINVAL;
  37
  38        /*
  39         * "userpte=nohigh" disables allocation of user pagetables in
  40         * high memory.
  41         */
  42        if (strcmp(arg, "nohigh") == 0)
  43                __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
  44        else
  45                return -EINVAL;
  46        return 0;
  47}
  48early_param("userpte", setup_userpte);
  49
  50void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
  51{
  52        pgtable_page_dtor(pte);
  53        paravirt_release_pte(page_to_pfn(pte));
  54        tlb_remove_page(tlb, pte);
  55}
  56
  57#if PAGETABLE_LEVELS > 2
  58void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
  59{
  60        paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
  61        /*
  62         * NOTE! For PAE, any changes to the top page-directory-pointer-table
  63         * entries need a full cr3 reload to flush.
  64         */
  65#ifdef CONFIG_X86_PAE
  66        tlb->need_flush_all = 1;
  67#endif
  68        tlb_remove_page(tlb, virt_to_page(pmd));
  69}
  70
  71#if PAGETABLE_LEVELS > 3
  72void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
  73{
  74        paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
  75        tlb_remove_page(tlb, virt_to_page(pud));
  76}
  77#endif  /* PAGETABLE_LEVELS > 3 */
  78#endif  /* PAGETABLE_LEVELS > 2 */
  79
  80static inline void pgd_list_add(pgd_t *pgd)
  81{
  82        struct page *page = virt_to_page(pgd);
  83
  84        list_add(&page->lru, &pgd_list);
  85}
  86
  87static inline void pgd_list_del(pgd_t *pgd)
  88{
  89        struct page *page = virt_to_page(pgd);
  90
  91        list_del(&page->lru);
  92}
  93
  94#define UNSHARED_PTRS_PER_PGD                           \
  95        (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
  96
  97
  98static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
  99{
 100        BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
 101        virt_to_page(pgd)->index = (pgoff_t)mm;
 102}
 103
 104struct mm_struct *pgd_page_get_mm(struct page *page)
 105{
 106        return (struct mm_struct *)page->index;
 107}
 108
 109static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
 110{
 111        /* If the pgd points to a shared pagetable level (either the
 112           ptes in non-PAE, or shared PMD in PAE), then just copy the
 113           references from swapper_pg_dir. */
 114        if (PAGETABLE_LEVELS == 2 ||
 115            (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
 116            PAGETABLE_LEVELS == 4) {
 117                clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
 118                                swapper_pg_dir + KERNEL_PGD_BOUNDARY,
 119                                KERNEL_PGD_PTRS);
 120        }
 121
 122        /* list required to sync kernel mapping updates */
 123        if (!SHARED_KERNEL_PMD) {
 124                pgd_set_mm(pgd, mm);
 125                pgd_list_add(pgd);
 126        }
 127}
 128
 129static void pgd_dtor(pgd_t *pgd)
 130{
 131        if (SHARED_KERNEL_PMD)
 132                return;
 133
 134        spin_lock(&pgd_lock);
 135        pgd_list_del(pgd);
 136        spin_unlock(&pgd_lock);
 137}
 138
 139/*
 140 * List of all pgd's needed for non-PAE so it can invalidate entries
 141 * in both cached and uncached pgd's; not needed for PAE since the
 142 * kernel pmd is shared. If PAE were not to share the pmd a similar
 143 * tactic would be needed. This is essentially codepath-based locking
 144 * against pageattr.c; it is the unique case in which a valid change
 145 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
 146 * vmalloc faults work because attached pagetables are never freed.
 147 * -- nyc
 148 */
 149
 150#ifdef CONFIG_X86_PAE
 151/*
 152 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
 153 * updating the top-level pagetable entries to guarantee the
 154 * processor notices the update.  Since this is expensive, and
 155 * all 4 top-level entries are used almost immediately in a
 156 * new process's life, we just pre-populate them here.
 157 *
 158 * Also, if we're in a paravirt environment where the kernel pmd is
 159 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
 160 * and initialize the kernel pmds here.
 161 */
 162#define PREALLOCATED_PMDS       UNSHARED_PTRS_PER_PGD
 163
 164void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
 165{
 166        paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
 167
 168        /* Note: almost everything apart from _PAGE_PRESENT is
 169           reserved at the pmd (PDPT) level. */
 170        set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
 171
 172        /*
 173         * According to Intel App note "TLBs, Paging-Structure Caches,
 174         * and Their Invalidation", April 2007, document 317080-001,
 175         * section 8.1: in PAE mode we explicitly have to flush the
 176         * TLB via cr3 if the top-level pgd is changed...
 177         */
 178        flush_tlb_mm(mm);
 179}
 180#else  /* !CONFIG_X86_PAE */
 181
 182/* No need to prepopulate any pagetable entries in non-PAE modes. */
 183#define PREALLOCATED_PMDS       0
 184
 185#endif  /* CONFIG_X86_PAE */
 186
 187static void free_pmds(pmd_t *pmds[])
 188{
 189        int i;
 190
 191        for(i = 0; i < PREALLOCATED_PMDS; i++)
 192                if (pmds[i])
 193                        free_page((unsigned long)pmds[i]);
 194}
 195
 196static int preallocate_pmds(pmd_t *pmds[])
 197{
 198        int i;
 199        bool failed = false;
 200
 201        for(i = 0; i < PREALLOCATED_PMDS; i++) {
 202                pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
 203                if (pmd == NULL)
 204                        failed = true;
 205                pmds[i] = pmd;
 206        }
 207
 208        if (failed) {
 209                free_pmds(pmds);
 210                return -ENOMEM;
 211        }
 212
 213        return 0;
 214}
 215
 216/*
 217 * Mop up any pmd pages which may still be attached to the pgd.
 218 * Normally they will be freed by munmap/exit_mmap, but any pmd we
 219 * preallocate which never got a corresponding vma will need to be
 220 * freed manually.
 221 */
 222static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
 223{
 224        int i;
 225
 226        for(i = 0; i < PREALLOCATED_PMDS; i++) {
 227                pgd_t pgd = pgdp[i];
 228
 229                if (pgd_val(pgd) != 0) {
 230                        pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
 231
 232                        pgdp[i] = native_make_pgd(0);
 233
 234                        paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
 235                        pmd_free(mm, pmd);
 236                }
 237        }
 238}
 239
 240static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
 241{
 242        pud_t *pud;
 243        int i;
 244
 245        if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
 246                return;
 247
 248        pud = pud_offset(pgd, 0);
 249
 250        for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
 251                pmd_t *pmd = pmds[i];
 252
 253                if (i >= KERNEL_PGD_BOUNDARY)
 254                        memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
 255                               sizeof(pmd_t) * PTRS_PER_PMD);
 256
 257                pud_populate(mm, pud, pmd);
 258        }
 259}
 260
 261pgd_t *pgd_alloc(struct mm_struct *mm)
 262{
 263        pgd_t *pgd;
 264        pmd_t *pmds[PREALLOCATED_PMDS];
 265
 266        pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
 267
 268        if (pgd == NULL)
 269                goto out;
 270
 271        mm->pgd = pgd;
 272
 273        if (preallocate_pmds(pmds) != 0)
 274                goto out_free_pgd;
 275
 276        if (paravirt_pgd_alloc(mm) != 0)
 277                goto out_free_pmds;
 278
 279        /*
 280         * Make sure that pre-populating the pmds is atomic with
 281         * respect to anything walking the pgd_list, so that they
 282         * never see a partially populated pgd.
 283         */
 284        spin_lock(&pgd_lock);
 285
 286        pgd_ctor(mm, pgd);
 287        pgd_prepopulate_pmd(mm, pgd, pmds);
 288
 289        spin_unlock(&pgd_lock);
 290
 291        return pgd;
 292
 293out_free_pmds:
 294        free_pmds(pmds);
 295out_free_pgd:
 296        free_page((unsigned long)pgd);
 297out:
 298        return NULL;
 299}
 300
 301void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 302{
 303        pgd_mop_up_pmds(mm, pgd);
 304        pgd_dtor(pgd);
 305        paravirt_pgd_free(mm, pgd);
 306        free_page((unsigned long)pgd);
 307}
 308
 309/*
 310 * Used to set accessed or dirty bits in the page table entries
 311 * on other architectures. On x86, the accessed and dirty bits
 312 * are tracked by hardware. However, do_wp_page calls this function
 313 * to also make the pte writeable at the same time the dirty bit is
 314 * set. In that case we do actually need to write the PTE.
 315 */
 316int ptep_set_access_flags(struct vm_area_struct *vma,
 317                          unsigned long address, pte_t *ptep,
 318                          pte_t entry, int dirty)
 319{
 320        int changed = !pte_same(*ptep, entry);
 321
 322        if (changed && dirty) {
 323                *ptep = entry;
 324                pte_update_defer(vma->vm_mm, address, ptep);
 325        }
 326
 327        return changed;
 328}
 329
 330#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 331int pmdp_set_access_flags(struct vm_area_struct *vma,
 332                          unsigned long address, pmd_t *pmdp,
 333                          pmd_t entry, int dirty)
 334{
 335        int changed = !pmd_same(*pmdp, entry);
 336
 337        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 338
 339        if (changed && dirty) {
 340                *pmdp = entry;
 341                pmd_update_defer(vma->vm_mm, address, pmdp);
 342                /*
 343                 * We had a write-protection fault here and changed the pmd
 344                 * to to more permissive. No need to flush the TLB for that,
 345                 * #PF is architecturally guaranteed to do that and in the
 346                 * worst-case we'll generate a spurious fault.
 347                 */
 348        }
 349
 350        return changed;
 351}
 352#endif
 353
 354int ptep_test_and_clear_young(struct vm_area_struct *vma,
 355                              unsigned long addr, pte_t *ptep)
 356{
 357        int ret = 0;
 358
 359        if (pte_young(*ptep))
 360                ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
 361                                         (unsigned long *) &ptep->pte);
 362
 363        if (ret)
 364                pte_update(vma->vm_mm, addr, ptep);
 365
 366        return ret;
 367}
 368
 369#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 370int pmdp_test_and_clear_young(struct vm_area_struct *vma,
 371                              unsigned long addr, pmd_t *pmdp)
 372{
 373        int ret = 0;
 374
 375        if (pmd_young(*pmdp))
 376                ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
 377                                         (unsigned long *)pmdp);
 378
 379        if (ret)
 380                pmd_update(vma->vm_mm, addr, pmdp);
 381
 382        return ret;
 383}
 384#endif
 385
 386int ptep_clear_flush_young(struct vm_area_struct *vma,
 387                           unsigned long address, pte_t *ptep)
 388{
 389        int young;
 390
 391        young = ptep_test_and_clear_young(vma, address, ptep);
 392        if (young)
 393                flush_tlb_page(vma, address);
 394
 395        return young;
 396}
 397
 398#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 399int pmdp_clear_flush_young(struct vm_area_struct *vma,
 400                           unsigned long address, pmd_t *pmdp)
 401{
 402        int young;
 403
 404        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 405
 406        young = pmdp_test_and_clear_young(vma, address, pmdp);
 407        if (young)
 408                flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 409
 410        return young;
 411}
 412
 413void pmdp_splitting_flush(struct vm_area_struct *vma,
 414                          unsigned long address, pmd_t *pmdp)
 415{
 416        int set;
 417        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 418        set = !test_and_set_bit(_PAGE_BIT_SPLITTING,
 419                                (unsigned long *)pmdp);
 420        if (set) {
 421                pmd_update(vma->vm_mm, address, pmdp);
 422                /* need tlb flush only to serialize against gup-fast */
 423                flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
 424        }
 425}
 426#endif
 427
 428/**
 429 * reserve_top_address - reserves a hole in the top of kernel address space
 430 * @reserve - size of hole to reserve
 431 *
 432 * Can be used to relocate the fixmap area and poke a hole in the top
 433 * of kernel address space to make room for a hypervisor.
 434 */
 435void __init reserve_top_address(unsigned long reserve)
 436{
 437#ifdef CONFIG_X86_32
 438        BUG_ON(fixmaps_set > 0);
 439        printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
 440               (int)-reserve);
 441        __FIXADDR_TOP = -reserve - PAGE_SIZE;
 442#endif
 443}
 444
 445int fixmaps_set;
 446
 447void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
 448{
 449        unsigned long address = __fix_to_virt(idx);
 450
 451        if (idx >= __end_of_fixed_addresses) {
 452                BUG();
 453                return;
 454        }
 455        set_pte_vaddr(address, pte);
 456        fixmaps_set++;
 457}
 458
 459void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
 460                       pgprot_t flags)
 461{
 462        __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
 463}
 464