linux/arch/x86/mm/hugetlbpage.c
<<
>>
Prefs
   1/*
   2 * IA-32 Huge TLB Page Support for Kernel.
   3 *
   4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
   5 */
   6
   7#include <linux/init.h>
   8#include <linux/fs.h>
   9#include <linux/mm.h>
  10#include <linux/hugetlb.h>
  11#include <linux/pagemap.h>
  12#include <linux/err.h>
  13#include <linux/sysctl.h>
  14#include <asm/mman.h>
  15#include <asm/tlb.h>
  16#include <asm/tlbflush.h>
  17#include <asm/pgalloc.h>
  18
  19#if 0   /* This is just for testing */
  20struct page *
  21follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
  22{
  23        unsigned long start = address;
  24        int length = 1;
  25        int nr;
  26        struct page *page;
  27        struct vm_area_struct *vma;
  28
  29        vma = find_vma(mm, addr);
  30        if (!vma || !is_vm_hugetlb_page(vma))
  31                return ERR_PTR(-EINVAL);
  32
  33        pte = huge_pte_offset(mm, address);
  34
  35        /* hugetlb should be locked, and hence, prefaulted */
  36        WARN_ON(!pte || pte_none(*pte));
  37
  38        page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
  39
  40        WARN_ON(!PageHead(page));
  41
  42        return page;
  43}
  44
  45int pmd_huge(pmd_t pmd)
  46{
  47        return 0;
  48}
  49
  50int pud_huge(pud_t pud)
  51{
  52        return 0;
  53}
  54
  55#else
  56
  57/*
  58 * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
  59 * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
  60 * Otherwise, returns 0.
  61 */
  62int pmd_huge(pmd_t pmd)
  63{
  64        return !pmd_none(pmd) &&
  65                (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
  66}
  67
  68int pud_huge(pud_t pud)
  69{
  70        return !!(pud_val(pud) & _PAGE_PSE);
  71}
  72#endif
  73
  74#ifdef CONFIG_HUGETLB_PAGE
  75static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
  76                unsigned long addr, unsigned long len,
  77                unsigned long pgoff, unsigned long flags)
  78{
  79        struct hstate *h = hstate_file(file);
  80        struct vm_unmapped_area_info info;
  81
  82        info.flags = 0;
  83        info.length = len;
  84        info.low_limit = current->mm->mmap_legacy_base;
  85        info.high_limit = TASK_SIZE;
  86        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
  87        info.align_offset = 0;
  88        return vm_unmapped_area(&info);
  89}
  90
  91static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
  92                unsigned long addr0, unsigned long len,
  93                unsigned long pgoff, unsigned long flags)
  94{
  95        struct hstate *h = hstate_file(file);
  96        struct vm_unmapped_area_info info;
  97        unsigned long addr;
  98
  99        info.flags = VM_UNMAPPED_AREA_TOPDOWN;
 100        info.length = len;
 101        info.low_limit = PAGE_SIZE;
 102        info.high_limit = current->mm->mmap_base;
 103        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
 104        info.align_offset = 0;
 105        addr = vm_unmapped_area(&info);
 106
 107        /*
 108         * A failed mmap() very likely causes application failure,
 109         * so fall back to the bottom-up function here. This scenario
 110         * can happen with large stack limits and large mmap()
 111         * allocations.
 112         */
 113        if (addr & ~PAGE_MASK) {
 114                VM_BUG_ON(addr != -ENOMEM);
 115                info.flags = 0;
 116                info.low_limit = TASK_UNMAPPED_BASE;
 117                info.high_limit = TASK_SIZE;
 118                addr = vm_unmapped_area(&info);
 119        }
 120
 121        return addr;
 122}
 123
 124unsigned long
 125hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 126                unsigned long len, unsigned long pgoff, unsigned long flags)
 127{
 128        struct hstate *h = hstate_file(file);
 129        struct mm_struct *mm = current->mm;
 130        struct vm_area_struct *vma;
 131
 132        if (len & ~huge_page_mask(h))
 133                return -EINVAL;
 134        if (len > TASK_SIZE)
 135                return -ENOMEM;
 136
 137        if (flags & MAP_FIXED) {
 138                if (prepare_hugepage_range(file, addr, len))
 139                        return -EINVAL;
 140                return addr;
 141        }
 142
 143        if (addr) {
 144                addr = ALIGN(addr, huge_page_size(h));
 145                vma = find_vma(mm, addr);
 146                if (TASK_SIZE - len >= addr &&
 147                    (!vma || addr + len <= vma->vm_start))
 148                        return addr;
 149        }
 150        if (mm->get_unmapped_area == arch_get_unmapped_area)
 151                return hugetlb_get_unmapped_area_bottomup(file, addr, len,
 152                                pgoff, flags);
 153        else
 154                return hugetlb_get_unmapped_area_topdown(file, addr, len,
 155                                pgoff, flags);
 156}
 157#endif /* CONFIG_HUGETLB_PAGE */
 158
 159#ifdef CONFIG_X86_64
 160static __init int setup_hugepagesz(char *opt)
 161{
 162        unsigned long ps = memparse(opt, &opt);
 163        if (ps == PMD_SIZE) {
 164                hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
 165        } else if (ps == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES)) {
 166                hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
 167        } else {
 168                hugetlb_bad_size();
 169                printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
 170                        ps >> 20);
 171                return 0;
 172        }
 173        return 1;
 174}
 175__setup("hugepagesz=", setup_hugepagesz);
 176
 177#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
 178static __init int gigantic_pages_init(void)
 179{
 180        /* With compaction or CMA we can allocate gigantic pages at runtime */
 181        if (boot_cpu_has(X86_FEATURE_GBPAGES) && !size_to_hstate(1UL << PUD_SHIFT))
 182                hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
 183        return 0;
 184}
 185arch_initcall(gigantic_pages_init);
 186#endif
 187#endif
 188