linux/arch/tile/mm/hugetlbpage.c
<<
>>
Prefs
   1/*
   2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
   3 *
   4 *   This program is free software; you can redistribute it and/or
   5 *   modify it under the terms of the GNU General Public License
   6 *   as published by the Free Software Foundation, version 2.
   7 *
   8 *   This program is distributed in the hope that it will be useful, but
   9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
  10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11 *   NON INFRINGEMENT.  See the GNU General Public License for
  12 *   more details.
  13 *
  14 * TILE Huge TLB Page Support for Kernel.
  15 * Taken from i386 hugetlb implementation:
  16 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
  17 */
  18
  19#include <linux/init.h>
  20#include <linux/fs.h>
  21#include <linux/mm.h>
  22#include <linux/hugetlb.h>
  23#include <linux/pagemap.h>
  24#include <linux/slab.h>
  25#include <linux/err.h>
  26#include <linux/sysctl.h>
  27#include <linux/mman.h>
  28#include <asm/tlb.h>
  29#include <asm/tlbflush.h>
  30#include <asm/setup.h>
  31
  32#ifdef CONFIG_HUGETLB_SUPER_PAGES
  33
  34/*
  35 * Provide an additional huge page size (in addition to the regular default
  36 * huge page size) if no "hugepagesz" arguments are specified.
  37 * Note that it must be smaller than the default huge page size so
  38 * that it's possible to allocate them on demand from the buddy allocator.
  39 * You can change this to 64K (on a 16K build), 256K, 1M, or 4M,
  40 * or not define it at all.
  41 */
  42#define ADDITIONAL_HUGE_SIZE (1024 * 1024UL)
  43
  44/* "Extra" page-size multipliers, one per level of the page table. */
  45int huge_shift[HUGE_SHIFT_ENTRIES] = {
  46#ifdef ADDITIONAL_HUGE_SIZE
  47#define ADDITIONAL_HUGE_SHIFT __builtin_ctzl(ADDITIONAL_HUGE_SIZE / PAGE_SIZE)
  48        [HUGE_SHIFT_PAGE] = ADDITIONAL_HUGE_SHIFT
  49#endif
  50};
  51
  52#endif
  53
  54pte_t *huge_pte_alloc(struct mm_struct *mm,
  55                      unsigned long addr, unsigned long sz)
  56{
  57        pgd_t *pgd;
  58        pud_t *pud;
  59
  60        addr &= -sz;   /* Mask off any low bits in the address. */
  61
  62        pgd = pgd_offset(mm, addr);
  63        pud = pud_alloc(mm, pgd, addr);
  64
  65#ifdef CONFIG_HUGETLB_SUPER_PAGES
  66        if (sz >= PGDIR_SIZE) {
  67                BUG_ON(sz != PGDIR_SIZE &&
  68                       sz != PGDIR_SIZE << huge_shift[HUGE_SHIFT_PGDIR]);
  69                return (pte_t *)pud;
  70        } else {
  71                pmd_t *pmd = pmd_alloc(mm, pud, addr);
  72                if (sz >= PMD_SIZE) {
  73                        BUG_ON(sz != PMD_SIZE &&
  74                               sz != (PMD_SIZE << huge_shift[HUGE_SHIFT_PMD]));
  75                        return (pte_t *)pmd;
  76                }
  77                else {
  78                        if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE])
  79                                panic("Unexpected page size %#lx\n", sz);
  80                        return pte_alloc_map(mm, NULL, pmd, addr);
  81                }
  82        }
  83#else
  84        BUG_ON(sz != PMD_SIZE);
  85        return (pte_t *) pmd_alloc(mm, pud, addr);
  86#endif
  87}
  88
  89static pte_t *get_pte(pte_t *base, int index, int level)
  90{
  91        pte_t *ptep = base + index;
  92#ifdef CONFIG_HUGETLB_SUPER_PAGES
  93        if (!pte_present(*ptep) && huge_shift[level] != 0) {
  94                unsigned long mask = -1UL << huge_shift[level];
  95                pte_t *super_ptep = base + (index & mask);
  96                pte_t pte = *super_ptep;
  97                if (pte_present(pte) && pte_super(pte))
  98                        ptep = super_ptep;
  99        }
 100#endif
 101        return ptep;
 102}
 103
 104pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 105{
 106        pgd_t *pgd;
 107        pud_t *pud;
 108        pmd_t *pmd;
 109#ifdef CONFIG_HUGETLB_SUPER_PAGES
 110        pte_t *pte;
 111#endif
 112
 113        /* Get the top-level page table entry. */
 114        pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0);
 115
 116        /* We don't have four levels. */
 117        pud = pud_offset(pgd, addr);
 118#ifndef __PAGETABLE_PUD_FOLDED
 119# error support fourth page table level
 120#endif
 121        if (!pud_present(*pud))
 122                return NULL;
 123
 124        /* Check for an L0 huge PTE, if we have three levels. */
 125#ifndef __PAGETABLE_PMD_FOLDED
 126        if (pud_huge(*pud))
 127                return (pte_t *)pud;
 128
 129        pmd = (pmd_t *)get_pte((pte_t *)pud_page_vaddr(*pud),
 130                               pmd_index(addr), 1);
 131        if (!pmd_present(*pmd))
 132                return NULL;
 133#else
 134        pmd = pmd_offset(pud, addr);
 135#endif
 136
 137        /* Check for an L1 huge PTE. */
 138        if (pmd_huge(*pmd))
 139                return (pte_t *)pmd;
 140
 141#ifdef CONFIG_HUGETLB_SUPER_PAGES
 142        /* Check for an L2 huge PTE. */
 143        pte = get_pte((pte_t *)pmd_page_vaddr(*pmd), pte_index(addr), 2);
 144        if (!pte_present(*pte))
 145                return NULL;
 146        if (pte_super(*pte))
 147                return pte;
 148#endif
 149
 150        return NULL;
 151}
 152
 153struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
 154                              int write)
 155{
 156        return ERR_PTR(-EINVAL);
 157}
 158
 159int pmd_huge(pmd_t pmd)
 160{
 161        return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE);
 162}
 163
 164int pud_huge(pud_t pud)
 165{
 166        return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
 167}
 168
 169int pmd_huge_support(void)
 170{
 171        return 1;
 172}
 173
 174struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 175                             pmd_t *pmd, int write)
 176{
 177        struct page *page;
 178
 179        page = pte_page(*(pte_t *)pmd);
 180        if (page)
 181                page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
 182        return page;
 183}
 184
 185struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
 186                             pud_t *pud, int write)
 187{
 188        struct page *page;
 189
 190        page = pte_page(*(pte_t *)pud);
 191        if (page)
 192                page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
 193        return page;
 194}
 195
 196int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
 197{
 198        return 0;
 199}
 200
 201#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
 202static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
 203                unsigned long addr, unsigned long len,
 204                unsigned long pgoff, unsigned long flags)
 205{
 206        struct hstate *h = hstate_file(file);
 207        struct vm_unmapped_area_info info;
 208
 209        info.flags = 0;
 210        info.length = len;
 211        info.low_limit = TASK_UNMAPPED_BASE;
 212        info.high_limit = TASK_SIZE;
 213        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
 214        info.align_offset = 0;
 215        return vm_unmapped_area(&info);
 216}
 217
 218static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
 219                unsigned long addr0, unsigned long len,
 220                unsigned long pgoff, unsigned long flags)
 221{
 222        struct hstate *h = hstate_file(file);
 223        struct vm_unmapped_area_info info;
 224        unsigned long addr;
 225
 226        info.flags = VM_UNMAPPED_AREA_TOPDOWN;
 227        info.length = len;
 228        info.low_limit = PAGE_SIZE;
 229        info.high_limit = current->mm->mmap_base;
 230        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
 231        info.align_offset = 0;
 232        addr = vm_unmapped_area(&info);
 233
 234        /*
 235         * A failed mmap() very likely causes application failure,
 236         * so fall back to the bottom-up function here. This scenario
 237         * can happen with large stack limits and large mmap()
 238         * allocations.
 239         */
 240        if (addr & ~PAGE_MASK) {
 241                VM_BUG_ON(addr != -ENOMEM);
 242                info.flags = 0;
 243                info.low_limit = TASK_UNMAPPED_BASE;
 244                info.high_limit = TASK_SIZE;
 245                addr = vm_unmapped_area(&info);
 246        }
 247
 248        return addr;
 249}
 250
 251unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 252                unsigned long len, unsigned long pgoff, unsigned long flags)
 253{
 254        struct hstate *h = hstate_file(file);
 255        struct mm_struct *mm = current->mm;
 256        struct vm_area_struct *vma;
 257
 258        if (len & ~huge_page_mask(h))
 259                return -EINVAL;
 260        if (len > TASK_SIZE)
 261                return -ENOMEM;
 262
 263        if (flags & MAP_FIXED) {
 264                if (prepare_hugepage_range(file, addr, len))
 265                        return -EINVAL;
 266                return addr;
 267        }
 268
 269        if (addr) {
 270                addr = ALIGN(addr, huge_page_size(h));
 271                vma = find_vma(mm, addr);
 272                if (TASK_SIZE - len >= addr &&
 273                    (!vma || addr + len <= vma->vm_start))
 274                        return addr;
 275        }
 276        if (current->mm->get_unmapped_area == arch_get_unmapped_area)
 277                return hugetlb_get_unmapped_area_bottomup(file, addr, len,
 278                                pgoff, flags);
 279        else
 280                return hugetlb_get_unmapped_area_topdown(file, addr, len,
 281                                pgoff, flags);
 282}
 283#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
 284
 285#ifdef CONFIG_HUGETLB_SUPER_PAGES
 286static __init int __setup_hugepagesz(unsigned long ps)
 287{
 288        int log_ps = __builtin_ctzl(ps);
 289        int level, base_shift;
 290
 291        if ((1UL << log_ps) != ps || (log_ps & 1) != 0) {
 292                pr_warn("Not enabling %ld byte huge pages;"
 293                        " must be a power of four.\n", ps);
 294                return -EINVAL;
 295        }
 296
 297        if (ps > 64*1024*1024*1024UL) {
 298                pr_warn("Not enabling %ld MB huge pages;"
 299                        " largest legal value is 64 GB .\n", ps >> 20);
 300                return -EINVAL;
 301        } else if (ps >= PUD_SIZE) {
 302                static long hv_jpage_size;
 303                if (hv_jpage_size == 0)
 304                        hv_jpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO);
 305                if (hv_jpage_size != PUD_SIZE) {
 306                        pr_warn("Not enabling >= %ld MB huge pages:"
 307                                " hypervisor reports size %ld\n",
 308                                PUD_SIZE >> 20, hv_jpage_size);
 309                        return -EINVAL;
 310                }
 311                level = 0;
 312                base_shift = PUD_SHIFT;
 313        } else if (ps >= PMD_SIZE) {
 314                level = 1;
 315                base_shift = PMD_SHIFT;
 316        } else if (ps > PAGE_SIZE) {
 317                level = 2;
 318                base_shift = PAGE_SHIFT;
 319        } else {
 320                pr_err("hugepagesz: huge page size %ld too small\n", ps);
 321                return -EINVAL;
 322        }
 323
 324        if (log_ps != base_shift) {
 325                int shift_val = log_ps - base_shift;
 326                if (huge_shift[level] != 0) {
 327                        int old_shift = base_shift + huge_shift[level];
 328                        pr_warn("Not enabling %ld MB huge pages;"
 329                                " already have size %ld MB.\n",
 330                                ps >> 20, (1UL << old_shift) >> 20);
 331                        return -EINVAL;
 332                }
 333                if (hv_set_pte_super_shift(level, shift_val) != 0) {
 334                        pr_warn("Not enabling %ld MB huge pages;"
 335                                " no hypervisor support.\n", ps >> 20);
 336                        return -EINVAL;
 337                }
 338                printk(KERN_DEBUG "Enabled %ld MB huge pages\n", ps >> 20);
 339                huge_shift[level] = shift_val;
 340        }
 341
 342        hugetlb_add_hstate(log_ps - PAGE_SHIFT);
 343
 344        return 0;
 345}
 346
 347static bool saw_hugepagesz;
 348
 349static __init int setup_hugepagesz(char *opt)
 350{
 351        if (!saw_hugepagesz) {
 352                saw_hugepagesz = true;
 353                memset(huge_shift, 0, sizeof(huge_shift));
 354        }
 355        return __setup_hugepagesz(memparse(opt, NULL));
 356}
 357__setup("hugepagesz=", setup_hugepagesz);
 358
 359#ifdef ADDITIONAL_HUGE_SIZE
 360/*
 361 * Provide an additional huge page size if no "hugepagesz" args are given.
 362 * In that case, all the cores have properly set up their hv super_shift
 363 * already, but we need to notify the hugetlb code to enable the
 364 * new huge page size from the Linux point of view.
 365 */
 366static __init int add_default_hugepagesz(void)
 367{
 368        if (!saw_hugepagesz) {
 369                BUILD_BUG_ON(ADDITIONAL_HUGE_SIZE >= PMD_SIZE ||
 370                             ADDITIONAL_HUGE_SIZE <= PAGE_SIZE);
 371                BUILD_BUG_ON((PAGE_SIZE << ADDITIONAL_HUGE_SHIFT) !=
 372                             ADDITIONAL_HUGE_SIZE);
 373                BUILD_BUG_ON(ADDITIONAL_HUGE_SHIFT & 1);
 374                hugetlb_add_hstate(ADDITIONAL_HUGE_SHIFT);
 375        }
 376        return 0;
 377}
 378arch_initcall(add_default_hugepagesz);
 379#endif
 380
 381#endif /* CONFIG_HUGETLB_SUPER_PAGES */
 382