linux/arch/powerpc/mm/hugetlbpage.c
<<
>>
Prefs
   1/*
   2 * PPC Huge TLB Page Support for Kernel.
   3 *
   4 * Copyright (C) 2003 David Gibson, IBM Corporation.
   5 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
   6 *
   7 * Based on the IA-32 version:
   8 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
   9 */
  10
  11#include <linux/mm.h>
  12#include <linux/io.h>
  13#include <linux/slab.h>
  14#include <linux/hugetlb.h>
  15#include <linux/export.h>
  16#include <linux/of_fdt.h>
  17#include <linux/memblock.h>
  18#include <linux/bootmem.h>
  19#include <linux/moduleparam.h>
  20#include <asm/pgtable.h>
  21#include <asm/pgalloc.h>
  22#include <asm/tlb.h>
  23#include <asm/setup.h>
  24#include <asm/hugetlb.h>
  25
  26#ifdef CONFIG_HUGETLB_PAGE
  27
  28#define PAGE_SHIFT_64K  16
  29#define PAGE_SHIFT_16M  24
  30#define PAGE_SHIFT_16G  34
  31
  32unsigned int HPAGE_SHIFT;
  33
  34/*
  35 * Tracks gpages after the device tree is scanned and before the
  36 * huge_boot_pages list is ready.  On non-Freescale implementations, this is
  37 * just used to track 16G pages and so is a single array.  FSL-based
  38 * implementations may have more than one gpage size, so we need multiple
  39 * arrays
  40 */
  41#ifdef CONFIG_PPC_FSL_BOOK3E
  42#define MAX_NUMBER_GPAGES       128
  43struct psize_gpages {
  44        u64 gpage_list[MAX_NUMBER_GPAGES];
  45        unsigned int nr_gpages;
  46};
  47static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT];
  48#else
  49#define MAX_NUMBER_GPAGES       1024
  50static u64 gpage_freearray[MAX_NUMBER_GPAGES];
  51static unsigned nr_gpages;
  52#endif
  53
  54#define hugepd_none(hpd)        ((hpd).pd == 0)
  55
  56#ifdef CONFIG_PPC_BOOK3S_64
  57/*
  58 * At this point we do the placement change only for BOOK3S 64. This would
  59 * possibly work on other subarchs.
  60 */
  61
  62/*
  63 * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
  64 * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD;
  65 */
  66int pmd_huge(pmd_t pmd)
  67{
  68        /*
  69         * leaf pte for huge page, bottom two bits != 00
  70         */
  71        return ((pmd_val(pmd) & 0x3) != 0x0);
  72}
  73
  74int pud_huge(pud_t pud)
  75{
  76        /*
  77         * leaf pte for huge page, bottom two bits != 00
  78         */
  79        return ((pud_val(pud) & 0x3) != 0x0);
  80}
  81
  82int pgd_huge(pgd_t pgd)
  83{
  84        /*
  85         * leaf pte for huge page, bottom two bits != 00
  86         */
  87        return ((pgd_val(pgd) & 0x3) != 0x0);
  88}
  89#else
  90int pmd_huge(pmd_t pmd)
  91{
  92        return 0;
  93}
  94
  95int pud_huge(pud_t pud)
  96{
  97        return 0;
  98}
  99
 100int pgd_huge(pgd_t pgd)
 101{
 102        return 0;
 103}
 104#endif
 105
 106pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 107{
 108        /* Only called for hugetlbfs pages, hence can ignore THP */
 109        return find_linux_pte_or_hugepte(mm->pgd, addr, NULL);
 110}
 111
 112static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
 113                           unsigned long address, unsigned pdshift, unsigned pshift)
 114{
 115        struct kmem_cache *cachep;
 116        pte_t *new;
 117
 118#ifdef CONFIG_PPC_FSL_BOOK3E
 119        int i;
 120        int num_hugepd = 1 << (pshift - pdshift);
 121        cachep = hugepte_cache;
 122#else
 123        cachep = PGT_CACHE(pdshift - pshift);
 124#endif
 125
 126        new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT);
 127
 128        BUG_ON(pshift > HUGEPD_SHIFT_MASK);
 129        BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
 130
 131        if (! new)
 132                return -ENOMEM;
 133
 134        spin_lock(&mm->page_table_lock);
 135#ifdef CONFIG_PPC_FSL_BOOK3E
 136        /*
 137         * We have multiple higher-level entries that point to the same
 138         * actual pte location.  Fill in each as we go and backtrack on error.
 139         * We need all of these so the DTLB pgtable walk code can find the
 140         * right higher-level entry without knowing if it's a hugepage or not.
 141         */
 142        for (i = 0; i < num_hugepd; i++, hpdp++) {
 143                if (unlikely(!hugepd_none(*hpdp)))
 144                        break;
 145                else
 146                        /* We use the old format for PPC_FSL_BOOK3E */
 147                        hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
 148        }
 149        /* If we bailed from the for loop early, an error occurred, clean up */
 150        if (i < num_hugepd) {
 151                for (i = i - 1 ; i >= 0; i--, hpdp--)
 152                        hpdp->pd = 0;
 153                kmem_cache_free(cachep, new);
 154        }
 155#else
 156        if (!hugepd_none(*hpdp))
 157                kmem_cache_free(cachep, new);
 158        else {
 159#ifdef CONFIG_PPC_BOOK3S_64
 160                hpdp->pd = (unsigned long)new |
 161                            (shift_to_mmu_psize(pshift) << 2);
 162#else
 163                hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
 164#endif
 165        }
 166#endif
 167        spin_unlock(&mm->page_table_lock);
 168        return 0;
 169}
 170
 171/*
 172 * These macros define how to determine which level of the page table holds
 173 * the hpdp.
 174 */
 175#ifdef CONFIG_PPC_FSL_BOOK3E
 176#define HUGEPD_PGD_SHIFT PGDIR_SHIFT
 177#define HUGEPD_PUD_SHIFT PUD_SHIFT
 178#else
 179#define HUGEPD_PGD_SHIFT PUD_SHIFT
 180#define HUGEPD_PUD_SHIFT PMD_SHIFT
 181#endif
 182
 183#ifdef CONFIG_PPC_BOOK3S_64
 184/*
 185 * At this point we do the placement change only for BOOK3S 64. This would
 186 * possibly work on other subarchs.
 187 */
 188pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
 189{
 190        pgd_t *pg;
 191        pud_t *pu;
 192        pmd_t *pm;
 193        hugepd_t *hpdp = NULL;
 194        unsigned pshift = __ffs(sz);
 195        unsigned pdshift = PGDIR_SHIFT;
 196
 197        addr &= ~(sz-1);
 198        pg = pgd_offset(mm, addr);
 199
 200        if (pshift == PGDIR_SHIFT)
 201                /* 16GB huge page */
 202                return (pte_t *) pg;
 203        else if (pshift > PUD_SHIFT)
 204                /*
 205                 * We need to use hugepd table
 206                 */
 207                hpdp = (hugepd_t *)pg;
 208        else {
 209                pdshift = PUD_SHIFT;
 210                pu = pud_alloc(mm, pg, addr);
 211                if (pshift == PUD_SHIFT)
 212                        return (pte_t *)pu;
 213                else if (pshift > PMD_SHIFT)
 214                        hpdp = (hugepd_t *)pu;
 215                else {
 216                        pdshift = PMD_SHIFT;
 217                        pm = pmd_alloc(mm, pu, addr);
 218                        if (pshift == PMD_SHIFT)
 219                                /* 16MB hugepage */
 220                                return (pte_t *)pm;
 221                        else
 222                                hpdp = (hugepd_t *)pm;
 223                }
 224        }
 225        if (!hpdp)
 226                return NULL;
 227
 228        BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
 229
 230        if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
 231                return NULL;
 232
 233        return hugepte_offset(hpdp, addr, pdshift);
 234}
 235
 236#else
 237
 238pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
 239{
 240        pgd_t *pg;
 241        pud_t *pu;
 242        pmd_t *pm;
 243        hugepd_t *hpdp = NULL;
 244        unsigned pshift = __ffs(sz);
 245        unsigned pdshift = PGDIR_SHIFT;
 246
 247        addr &= ~(sz-1);
 248
 249        pg = pgd_offset(mm, addr);
 250
 251        if (pshift >= HUGEPD_PGD_SHIFT) {
 252                hpdp = (hugepd_t *)pg;
 253        } else {
 254                pdshift = PUD_SHIFT;
 255                pu = pud_alloc(mm, pg, addr);
 256                if (pshift >= HUGEPD_PUD_SHIFT) {
 257                        hpdp = (hugepd_t *)pu;
 258                } else {
 259                        pdshift = PMD_SHIFT;
 260                        pm = pmd_alloc(mm, pu, addr);
 261                        hpdp = (hugepd_t *)pm;
 262                }
 263        }
 264
 265        if (!hpdp)
 266                return NULL;
 267
 268        BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
 269
 270        if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
 271                return NULL;
 272
 273        return hugepte_offset(hpdp, addr, pdshift);
 274}
 275#endif
 276
 277#ifdef CONFIG_PPC_FSL_BOOK3E
 278/* Build list of addresses of gigantic pages.  This function is used in early
 279 * boot before the buddy or bootmem allocator is setup.
 280 */
 281void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
 282{
 283        unsigned int idx = shift_to_mmu_psize(__ffs(page_size));
 284        int i;
 285
 286        if (addr == 0)
 287                return;
 288
 289        gpage_freearray[idx].nr_gpages = number_of_pages;
 290
 291        for (i = 0; i < number_of_pages; i++) {
 292                gpage_freearray[idx].gpage_list[i] = addr;
 293                addr += page_size;
 294        }
 295}
 296
 297/*
 298 * Moves the gigantic page addresses from the temporary list to the
 299 * huge_boot_pages list.
 300 */
 301int alloc_bootmem_huge_page(struct hstate *hstate)
 302{
 303        struct huge_bootmem_page *m;
 304        int idx = shift_to_mmu_psize(huge_page_shift(hstate));
 305        int nr_gpages = gpage_freearray[idx].nr_gpages;
 306
 307        if (nr_gpages == 0)
 308                return 0;
 309
 310#ifdef CONFIG_HIGHMEM
 311        /*
 312         * If gpages can be in highmem we can't use the trick of storing the
 313         * data structure in the page; allocate space for this
 314         */
 315        m = alloc_bootmem(sizeof(struct huge_bootmem_page));
 316        m->phys = gpage_freearray[idx].gpage_list[--nr_gpages];
 317#else
 318        m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]);
 319#endif
 320
 321        list_add(&m->list, &huge_boot_pages);
 322        gpage_freearray[idx].nr_gpages = nr_gpages;
 323        gpage_freearray[idx].gpage_list[nr_gpages] = 0;
 324        m->hstate = hstate;
 325
 326        return 1;
 327}
 328/*
 329 * Scan the command line hugepagesz= options for gigantic pages; store those in
 330 * a list that we use to allocate the memory once all options are parsed.
 331 */
 332
 333unsigned long gpage_npages[MMU_PAGE_COUNT];
 334
 335static int __init do_gpage_early_setup(char *param, char *val,
 336                                       const char *unused)
 337{
 338        static phys_addr_t size;
 339        unsigned long npages;
 340
 341        /*
 342         * The hugepagesz and hugepages cmdline options are interleaved.  We
 343         * use the size variable to keep track of whether or not this was done
 344         * properly and skip over instances where it is incorrect.  Other
 345         * command-line parsing code will issue warnings, so we don't need to.
 346         *
 347         */
 348        if ((strcmp(param, "default_hugepagesz") == 0) ||
 349            (strcmp(param, "hugepagesz") == 0)) {
 350                size = memparse(val, NULL);
 351        } else if (strcmp(param, "hugepages") == 0) {
 352                if (size != 0) {
 353                        if (sscanf(val, "%lu", &npages) <= 0)
 354                                npages = 0;
 355                        gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages;
 356                        size = 0;
 357                }
 358        }
 359        return 0;
 360}
 361
 362
 363/*
 364 * This function allocates physical space for pages that are larger than the
 365 * buddy allocator can handle.  We want to allocate these in highmem because
 366 * the amount of lowmem is limited.  This means that this function MUST be
 367 * called before lowmem_end_addr is set up in MMU_init() in order for the lmb
 368 * allocate to grab highmem.
 369 */
 370void __init reserve_hugetlb_gpages(void)
 371{
 372        static __initdata char cmdline[COMMAND_LINE_SIZE];
 373        phys_addr_t size, base;
 374        int i;
 375
 376        strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE);
 377        parse_args("hugetlb gpages", cmdline, NULL, 0, 0, 0,
 378                        &do_gpage_early_setup);
 379
 380        /*
 381         * Walk gpage list in reverse, allocating larger page sizes first.
 382         * Skip over unsupported sizes, or sizes that have 0 gpages allocated.
 383         * When we reach the point in the list where pages are no longer
 384         * considered gpages, we're done.
 385         */
 386        for (i = MMU_PAGE_COUNT-1; i >= 0; i--) {
 387                if (mmu_psize_defs[i].shift == 0 || gpage_npages[i] == 0)
 388                        continue;
 389                else if (mmu_psize_to_shift(i) < (MAX_ORDER + PAGE_SHIFT))
 390                        break;
 391
 392                size = (phys_addr_t)(1ULL << mmu_psize_to_shift(i));
 393                base = memblock_alloc_base(size * gpage_npages[i], size,
 394                                           MEMBLOCK_ALLOC_ANYWHERE);
 395                add_gpage(base, size, gpage_npages[i]);
 396        }
 397}
 398
 399#else /* !PPC_FSL_BOOK3E */
 400
 401/* Build list of addresses of gigantic pages.  This function is used in early
 402 * boot before the buddy or bootmem allocator is setup.
 403 */
 404void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
 405{
 406        if (!addr)
 407                return;
 408        while (number_of_pages > 0) {
 409                gpage_freearray[nr_gpages] = addr;
 410                nr_gpages++;
 411                number_of_pages--;
 412                addr += page_size;
 413        }
 414}
 415
 416/* Moves the gigantic page addresses from the temporary list to the
 417 * huge_boot_pages list.
 418 */
 419int alloc_bootmem_huge_page(struct hstate *hstate)
 420{
 421        struct huge_bootmem_page *m;
 422        if (nr_gpages == 0)
 423                return 0;
 424        m = phys_to_virt(gpage_freearray[--nr_gpages]);
 425        gpage_freearray[nr_gpages] = 0;
 426        list_add(&m->list, &huge_boot_pages);
 427        m->hstate = hstate;
 428        return 1;
 429}
 430#endif
 431
 432int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
 433{
 434        return 0;
 435}
 436
 437#ifdef CONFIG_PPC_FSL_BOOK3E
 438#define HUGEPD_FREELIST_SIZE \
 439        ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
 440
 441struct hugepd_freelist {
 442        struct rcu_head rcu;
 443        unsigned int index;
 444        void *ptes[0];
 445};
 446
 447static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
 448
 449static void hugepd_free_rcu_callback(struct rcu_head *head)
 450{
 451        struct hugepd_freelist *batch =
 452                container_of(head, struct hugepd_freelist, rcu);
 453        unsigned int i;
 454
 455        for (i = 0; i < batch->index; i++)
 456                kmem_cache_free(hugepte_cache, batch->ptes[i]);
 457
 458        free_page((unsigned long)batch);
 459}
 460
 461static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
 462{
 463        struct hugepd_freelist **batchp;
 464
 465        batchp = &__get_cpu_var(hugepd_freelist_cur);
 466
 467        if (atomic_read(&tlb->mm->mm_users) < 2 ||
 468            cpumask_equal(mm_cpumask(tlb->mm),
 469                          cpumask_of(smp_processor_id()))) {
 470                kmem_cache_free(hugepte_cache, hugepte);
 471                return;
 472        }
 473
 474        if (*batchp == NULL) {
 475                *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
 476                (*batchp)->index = 0;
 477        }
 478
 479        (*batchp)->ptes[(*batchp)->index++] = hugepte;
 480        if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
 481                call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback);
 482                *batchp = NULL;
 483        }
 484}
 485#endif
 486
 487static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
 488                              unsigned long start, unsigned long end,
 489                              unsigned long floor, unsigned long ceiling)
 490{
 491        pte_t *hugepte = hugepd_page(*hpdp);
 492        int i;
 493
 494        unsigned long pdmask = ~((1UL << pdshift) - 1);
 495        unsigned int num_hugepd = 1;
 496
 497#ifdef CONFIG_PPC_FSL_BOOK3E
 498        /* Note: On fsl the hpdp may be the first of several */
 499        num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift));
 500#else
 501        unsigned int shift = hugepd_shift(*hpdp);
 502#endif
 503
 504        start &= pdmask;
 505        if (start < floor)
 506                return;
 507        if (ceiling) {
 508                ceiling &= pdmask;
 509                if (! ceiling)
 510                        return;
 511        }
 512        if (end - 1 > ceiling - 1)
 513                return;
 514
 515        for (i = 0; i < num_hugepd; i++, hpdp++)
 516                hpdp->pd = 0;
 517
 518        tlb->need_flush = 1;
 519
 520#ifdef CONFIG_PPC_FSL_BOOK3E
 521        hugepd_free(tlb, hugepte);
 522#else
 523        pgtable_free_tlb(tlb, hugepte, pdshift - shift);
 524#endif
 525}
 526
 527static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
 528                                   unsigned long addr, unsigned long end,
 529                                   unsigned long floor, unsigned long ceiling)
 530{
 531        pmd_t *pmd;
 532        unsigned long next;
 533        unsigned long start;
 534
 535        start = addr;
 536        do {
 537                pmd = pmd_offset(pud, addr);
 538                next = pmd_addr_end(addr, end);
 539                if (!is_hugepd(pmd)) {
 540                        /*
 541                         * if it is not hugepd pointer, we should already find
 542                         * it cleared.
 543                         */
 544                        WARN_ON(!pmd_none_or_clear_bad(pmd));
 545                        continue;
 546                }
 547#ifdef CONFIG_PPC_FSL_BOOK3E
 548                /*
 549                 * Increment next by the size of the huge mapping since
 550                 * there may be more than one entry at this level for a
 551                 * single hugepage, but all of them point to
 552                 * the same kmem cache that holds the hugepte.
 553                 */
 554                next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
 555#endif
 556                free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
 557                                  addr, next, floor, ceiling);
 558        } while (addr = next, addr != end);
 559
 560        start &= PUD_MASK;
 561        if (start < floor)
 562                return;
 563        if (ceiling) {
 564                ceiling &= PUD_MASK;
 565                if (!ceiling)
 566                        return;
 567        }
 568        if (end - 1 > ceiling - 1)
 569                return;
 570
 571        pmd = pmd_offset(pud, start);
 572        pud_clear(pud);
 573        pmd_free_tlb(tlb, pmd, start);
 574}
 575
 576static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
 577                                   unsigned long addr, unsigned long end,
 578                                   unsigned long floor, unsigned long ceiling)
 579{
 580        pud_t *pud;
 581        unsigned long next;
 582        unsigned long start;
 583
 584        start = addr;
 585        do {
 586                pud = pud_offset(pgd, addr);
 587                next = pud_addr_end(addr, end);
 588                if (!is_hugepd(pud)) {
 589                        if (pud_none_or_clear_bad(pud))
 590                                continue;
 591                        hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
 592                                               ceiling);
 593                } else {
 594#ifdef CONFIG_PPC_FSL_BOOK3E
 595                        /*
 596                         * Increment next by the size of the huge mapping since
 597                         * there may be more than one entry at this level for a
 598                         * single hugepage, but all of them point to
 599                         * the same kmem cache that holds the hugepte.
 600                         */
 601                        next = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
 602#endif
 603                        free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
 604                                          addr, next, floor, ceiling);
 605                }
 606        } while (addr = next, addr != end);
 607
 608        start &= PGDIR_MASK;
 609        if (start < floor)
 610                return;
 611        if (ceiling) {
 612                ceiling &= PGDIR_MASK;
 613                if (!ceiling)
 614                        return;
 615        }
 616        if (end - 1 > ceiling - 1)
 617                return;
 618
 619        pud = pud_offset(pgd, start);
 620        pgd_clear(pgd);
 621        pud_free_tlb(tlb, pud, start);
 622}
 623
 624/*
 625 * This function frees user-level page tables of a process.
 626 *
 627 * Must be called with pagetable lock held.
 628 */
 629void hugetlb_free_pgd_range(struct mmu_gather *tlb,
 630                            unsigned long addr, unsigned long end,
 631                            unsigned long floor, unsigned long ceiling)
 632{
 633        pgd_t *pgd;
 634        unsigned long next;
 635
 636        /*
 637         * Because there are a number of different possible pagetable
 638         * layouts for hugepage ranges, we limit knowledge of how
 639         * things should be laid out to the allocation path
 640         * (huge_pte_alloc(), above).  Everything else works out the
 641         * structure as it goes from information in the hugepd
 642         * pointers.  That means that we can't here use the
 643         * optimization used in the normal page free_pgd_range(), of
 644         * checking whether we're actually covering a large enough
 645         * range to have to do anything at the top level of the walk
 646         * instead of at the bottom.
 647         *
 648         * To make sense of this, you should probably go read the big
 649         * block comment at the top of the normal free_pgd_range(),
 650         * too.
 651         */
 652
 653        do {
 654                next = pgd_addr_end(addr, end);
 655                pgd = pgd_offset(tlb->mm, addr);
 656                if (!is_hugepd(pgd)) {
 657                        if (pgd_none_or_clear_bad(pgd))
 658                                continue;
 659                        hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
 660                } else {
 661#ifdef CONFIG_PPC_FSL_BOOK3E
 662                        /*
 663                         * Increment next by the size of the huge mapping since
 664                         * there may be more than one entry at the pgd level
 665                         * for a single hugepage, but all of them point to the
 666                         * same kmem cache that holds the hugepte.
 667                         */
 668                        next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
 669#endif
 670                        free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
 671                                          addr, next, floor, ceiling);
 672                }
 673        } while (addr = next, addr != end);
 674}
 675
 676struct page *
 677follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
 678{
 679        pte_t *ptep;
 680        struct page *page;
 681        unsigned shift;
 682        unsigned long mask;
 683        /*
 684         * Transparent hugepages are handled by generic code. We can skip them
 685         * here.
 686         */
 687        ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
 688
 689        /* Verify it is a huge page else bail. */
 690        if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep))
 691                return ERR_PTR(-EINVAL);
 692
 693        mask = (1UL << shift) - 1;
 694        page = pte_page(*ptep);
 695        if (page)
 696                page += (address & mask) / PAGE_SIZE;
 697
 698        return page;
 699}
 700
 701struct page *
 702follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 703                pmd_t *pmd, int write)
 704{
 705        BUG();
 706        return NULL;
 707}
 708
 709static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
 710                                      unsigned long sz)
 711{
 712        unsigned long __boundary = (addr + sz) & ~(sz-1);
 713        return (__boundary - 1 < end - 1) ? __boundary : end;
 714}
 715
 716int gup_hugepd(hugepd_t *hugepd, unsigned pdshift,
 717               unsigned long addr, unsigned long end,
 718               int write, struct page **pages, int *nr)
 719{
 720        pte_t *ptep;
 721        unsigned long sz = 1UL << hugepd_shift(*hugepd);
 722        unsigned long next;
 723
 724        ptep = hugepte_offset(hugepd, addr, pdshift);
 725        do {
 726                next = hugepte_addr_end(addr, end, sz);
 727                if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
 728                        return 0;
 729        } while (ptep++, addr = next, addr != end);
 730
 731        return 1;
 732}
 733
 734#ifdef CONFIG_PPC_MM_SLICES
 735unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 736                                        unsigned long len, unsigned long pgoff,
 737                                        unsigned long flags)
 738{
 739        struct hstate *hstate = hstate_file(file);
 740        int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
 741
 742        return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
 743}
 744#endif
 745
 746unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
 747{
 748#ifdef CONFIG_PPC_MM_SLICES
 749        unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
 750
 751        return 1UL << mmu_psize_to_shift(psize);
 752#else
 753        if (!is_vm_hugetlb_page(vma))
 754                return PAGE_SIZE;
 755
 756        return huge_page_size(hstate_vma(vma));
 757#endif
 758}
 759
 760static inline bool is_power_of_4(unsigned long x)
 761{
 762        if (is_power_of_2(x))
 763                return (__ilog2(x) % 2) ? false : true;
 764        return false;
 765}
 766
 767static int __init add_huge_page_size(unsigned long long size)
 768{
 769        int shift = __ffs(size);
 770        int mmu_psize;
 771
 772        /* Check that it is a page size supported by the hardware and
 773         * that it fits within pagetable and slice limits. */
 774#ifdef CONFIG_PPC_FSL_BOOK3E
 775        if ((size < PAGE_SIZE) || !is_power_of_4(size))
 776                return -EINVAL;
 777#else
 778        if (!is_power_of_2(size)
 779            || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT))
 780                return -EINVAL;
 781#endif
 782
 783        if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
 784                return -EINVAL;
 785
 786#ifdef CONFIG_SPU_FS_64K_LS
 787        /* Disable support for 64K huge pages when 64K SPU local store
 788         * support is enabled as the current implementation conflicts.
 789         */
 790        if (shift == PAGE_SHIFT_64K)
 791                return -EINVAL;
 792#endif /* CONFIG_SPU_FS_64K_LS */
 793
 794        BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
 795
 796        /* Return if huge page size has already been setup */
 797        if (size_to_hstate(size))
 798                return 0;
 799
 800        hugetlb_add_hstate(shift - PAGE_SHIFT);
 801
 802        return 0;
 803}
 804
 805static int __init hugepage_setup_sz(char *str)
 806{
 807        unsigned long long size;
 808
 809        size = memparse(str, &str);
 810
 811        if (add_huge_page_size(size) != 0)
 812                printk(KERN_WARNING "Invalid huge page size specified(%llu)\n", size);
 813
 814        return 1;
 815}
 816__setup("hugepagesz=", hugepage_setup_sz);
 817
 818#ifdef CONFIG_PPC_FSL_BOOK3E
 819struct kmem_cache *hugepte_cache;
 820static int __init hugetlbpage_init(void)
 821{
 822        int psize;
 823
 824        for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
 825                unsigned shift;
 826
 827                if (!mmu_psize_defs[psize].shift)
 828                        continue;
 829
 830                shift = mmu_psize_to_shift(psize);
 831
 832                /* Don't treat normal page sizes as huge... */
 833                if (shift != PAGE_SHIFT)
 834                        if (add_huge_page_size(1ULL << shift) < 0)
 835                                continue;
 836        }
 837
 838        /*
 839         * Create a kmem cache for hugeptes.  The bottom bits in the pte have
 840         * size information encoded in them, so align them to allow this
 841         */
 842        hugepte_cache =  kmem_cache_create("hugepte-cache", sizeof(pte_t),
 843                                           HUGEPD_SHIFT_MASK + 1, 0, NULL);
 844        if (hugepte_cache == NULL)
 845                panic("%s: Unable to create kmem cache for hugeptes\n",
 846                      __func__);
 847
 848        /* Default hpage size = 4M */
 849        if (mmu_psize_defs[MMU_PAGE_4M].shift)
 850                HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
 851        else
 852                panic("%s: Unable to set default huge page size\n", __func__);
 853
 854
 855        return 0;
 856}
 857#else
 858static int __init hugetlbpage_init(void)
 859{
 860        int psize;
 861
 862        if (!mmu_has_feature(MMU_FTR_16M_PAGE))
 863                return -ENODEV;
 864
 865        for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
 866                unsigned shift;
 867                unsigned pdshift;
 868
 869                if (!mmu_psize_defs[psize].shift)
 870                        continue;
 871
 872                shift = mmu_psize_to_shift(psize);
 873
 874                if (add_huge_page_size(1ULL << shift) < 0)
 875                        continue;
 876
 877                if (shift < PMD_SHIFT)
 878                        pdshift = PMD_SHIFT;
 879                else if (shift < PUD_SHIFT)
 880                        pdshift = PUD_SHIFT;
 881                else
 882                        pdshift = PGDIR_SHIFT;
 883                /*
 884                 * if we have pdshift and shift value same, we don't
 885                 * use pgt cache for hugepd.
 886                 */
 887                if (pdshift != shift) {
 888                        pgtable_cache_add(pdshift - shift, NULL);
 889                        if (!PGT_CACHE(pdshift - shift))
 890                                panic("hugetlbpage_init(): could not create "
 891                                      "pgtable cache for %d bit pagesize\n", shift);
 892                }
 893        }
 894
 895        /* Set default large page size. Currently, we pick 16M or 1M
 896         * depending on what is available
 897         */
 898        if (mmu_psize_defs[MMU_PAGE_16M].shift)
 899                HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
 900        else if (mmu_psize_defs[MMU_PAGE_1M].shift)
 901                HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
 902
 903        return 0;
 904}
 905#endif
 906module_init(hugetlbpage_init);
 907
 908void flush_dcache_icache_hugepage(struct page *page)
 909{
 910        int i;
 911        void *start;
 912
 913        BUG_ON(!PageCompound(page));
 914
 915        for (i = 0; i < (1UL << compound_order(page)); i++) {
 916                if (!PageHighMem(page)) {
 917                        __flush_dcache_icache(page_address(page+i));
 918                } else {
 919                        start = kmap_atomic(page+i);
 920                        __flush_dcache_icache(start);
 921                        kunmap_atomic(start);
 922                }
 923        }
 924}
 925
 926#endif /* CONFIG_HUGETLB_PAGE */
 927
 928/*
 929 * We have 4 cases for pgds and pmds:
 930 * (1) invalid (all zeroes)
 931 * (2) pointer to next table, as normal; bottom 6 bits == 0
 932 * (3) leaf pte for huge page, bottom two bits != 00
 933 * (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table
 934 *
 935 * So long as we atomically load page table pointers we are safe against teardown,
 936 * we can follow the address down to the the page and take a ref on it.
 937 */
 938
 939pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift)
 940{
 941        pgd_t pgd, *pgdp;
 942        pud_t pud, *pudp;
 943        pmd_t pmd, *pmdp;
 944        pte_t *ret_pte;
 945        hugepd_t *hpdp = NULL;
 946        unsigned pdshift = PGDIR_SHIFT;
 947
 948        if (shift)
 949                *shift = 0;
 950
 951        pgdp = pgdir + pgd_index(ea);
 952        pgd  = ACCESS_ONCE(*pgdp);
 953        /*
 954         * Always operate on the local stack value. This make sure the
 955         * value don't get updated by a parallel THP split/collapse,
 956         * page fault or a page unmap. The return pte_t * is still not
 957         * stable. So should be checked there for above conditions.
 958         */
 959        if (pgd_none(pgd))
 960                return NULL;
 961        else if (pgd_huge(pgd)) {
 962                ret_pte = (pte_t *) pgdp;
 963                goto out;
 964        } else if (is_hugepd(&pgd))
 965                hpdp = (hugepd_t *)&pgd;
 966        else {
 967                /*
 968                 * Even if we end up with an unmap, the pgtable will not
 969                 * be freed, because we do an rcu free and here we are
 970                 * irq disabled
 971                 */
 972                pdshift = PUD_SHIFT;
 973                pudp = pud_offset(&pgd, ea);
 974                pud  = ACCESS_ONCE(*pudp);
 975
 976                if (pud_none(pud))
 977                        return NULL;
 978                else if (pud_huge(pud)) {
 979                        ret_pte = (pte_t *) pudp;
 980                        goto out;
 981                } else if (is_hugepd(&pud))
 982                        hpdp = (hugepd_t *)&pud;
 983                else {
 984                        pdshift = PMD_SHIFT;
 985                        pmdp = pmd_offset(&pud, ea);
 986                        pmd  = ACCESS_ONCE(*pmdp);
 987                        /*
 988                         * A hugepage collapse is captured by pmd_none, because
 989                         * it mark the pmd none and do a hpte invalidate.
 990                         *
 991                         * A hugepage split is captured by pmd_trans_splitting
 992                         * because we mark the pmd trans splitting and do a
 993                         * hpte invalidate
 994                         *
 995                         */
 996                        if (pmd_none(pmd) || pmd_trans_splitting(pmd))
 997                                return NULL;
 998
 999                        if (pmd_huge(pmd) || pmd_large(pmd)) {
1000                                ret_pte = (pte_t *) pmdp;
1001                                goto out;
1002                        } else if (is_hugepd(&pmd))
1003                                hpdp = (hugepd_t *)&pmd;
1004                        else
1005                                return pte_offset_kernel(&pmd, ea);
1006                }
1007        }
1008        if (!hpdp)
1009                return NULL;
1010
1011        ret_pte = hugepte_offset(hpdp, ea, pdshift);
1012        pdshift = hugepd_shift(*hpdp);
1013out:
1014        if (shift)
1015                *shift = pdshift;
1016        return ret_pte;
1017}
1018EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte);
1019
1020int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
1021                unsigned long end, int write, struct page **pages, int *nr)
1022{
1023        unsigned long mask;
1024        unsigned long pte_end;
1025        struct page *head, *page, *tail;
1026        pte_t pte;
1027        int refs;
1028
1029        pte_end = (addr + sz) & ~(sz-1);
1030        if (pte_end < end)
1031                end = pte_end;
1032
1033        pte = ACCESS_ONCE(*ptep);
1034        mask = _PAGE_PRESENT | _PAGE_USER;
1035        if (write)
1036                mask |= _PAGE_RW;
1037
1038        if ((pte_val(pte) & mask) != mask)
1039                return 0;
1040
1041#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1042        /*
1043         * check for splitting here
1044         */
1045        if (pmd_trans_splitting(pte_pmd(pte)))
1046                return 0;
1047#endif
1048
1049        /* hugepages are never "special" */
1050        VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
1051
1052        refs = 0;
1053        head = pte_page(pte);
1054
1055        page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
1056        tail = page;
1057        do {
1058                VM_BUG_ON(compound_head(page) != head);
1059                pages[*nr] = page;
1060                (*nr)++;
1061                page++;
1062                refs++;
1063        } while (addr += PAGE_SIZE, addr != end);
1064
1065        if (!page_cache_add_speculative(head, refs)) {
1066                *nr -= refs;
1067                return 0;
1068        }
1069
1070        if (unlikely(pte_val(pte) != pte_val(*ptep))) {
1071                /* Could be optimized better */
1072                *nr -= refs;
1073                while (refs--)
1074                        put_page(head);
1075                return 0;
1076        }
1077
1078        /*
1079         * Any tail page need their mapcount reference taken before we
1080         * return.
1081         */
1082        while (refs--) {
1083                if (PageTail(tail))
1084                        get_huge_page_tail(tail);
1085                tail++;
1086        }
1087
1088        return 1;
1089}
1090