linux/arch/x86/mm/hugetlbpage.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * IA-32 Huge TLB Page Support for Kernel.
   4 *
   5 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
   6 */
   7
   8#include <linux/init.h>
   9#include <linux/fs.h>
  10#include <linux/mm.h>
  11#include <linux/sched/mm.h>
  12#include <linux/hugetlb.h>
  13#include <linux/pagemap.h>
  14#include <linux/err.h>
  15#include <linux/sysctl.h>
  16#include <linux/compat.h>
  17#include <asm/mman.h>
  18#include <asm/tlb.h>
  19#include <asm/tlbflush.h>
  20#include <asm/pgalloc.h>
  21#include <asm/elf.h>
  22#include <asm/mpx.h>
  23
  24#if 0   /* This is just for testing */
  25struct page *
  26follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
  27{
  28        unsigned long start = address;
  29        int length = 1;
  30        int nr;
  31        struct page *page;
  32        struct vm_area_struct *vma;
  33
  34        vma = find_vma(mm, addr);
  35        if (!vma || !is_vm_hugetlb_page(vma))
  36                return ERR_PTR(-EINVAL);
  37
  38        pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
  39
  40        /* hugetlb should be locked, and hence, prefaulted */
  41        WARN_ON(!pte || pte_none(*pte));
  42
  43        page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
  44
  45        WARN_ON(!PageHead(page));
  46
  47        return page;
  48}
  49
  50int pmd_huge(pmd_t pmd)
  51{
  52        return 0;
  53}
  54
  55int pud_huge(pud_t pud)
  56{
  57        return 0;
  58}
  59
  60#else
  61
  62/*
  63 * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
  64 * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
  65 * Otherwise, returns 0.
  66 */
  67int pmd_huge(pmd_t pmd)
  68{
  69        return !pmd_none(pmd) &&
  70                (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
  71}
  72
  73int pud_huge(pud_t pud)
  74{
  75        return !!(pud_val(pud) & _PAGE_PSE);
  76}
  77#endif
  78
  79#ifdef CONFIG_HUGETLB_PAGE
  80static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
  81                unsigned long addr, unsigned long len,
  82                unsigned long pgoff, unsigned long flags)
  83{
  84        struct hstate *h = hstate_file(file);
  85        struct vm_unmapped_area_info info;
  86
  87        info.flags = 0;
  88        info.length = len;
  89        info.low_limit = get_mmap_base(1);
  90
  91        /*
  92         * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
  93         * in the full address space.
  94         */
  95        info.high_limit = in_compat_syscall() ?
  96                task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW);
  97
  98        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
  99        info.align_offset = 0;
 100        return vm_unmapped_area(&info);
 101}
 102
 103static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
 104                unsigned long addr, unsigned long len,
 105                unsigned long pgoff, unsigned long flags)
 106{
 107        struct hstate *h = hstate_file(file);
 108        struct vm_unmapped_area_info info;
 109
 110        info.flags = VM_UNMAPPED_AREA_TOPDOWN;
 111        info.length = len;
 112        info.low_limit = PAGE_SIZE;
 113        info.high_limit = get_mmap_base(0);
 114
 115        /*
 116         * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
 117         * in the full address space.
 118         */
 119        if (addr > DEFAULT_MAP_WINDOW && !in_compat_syscall())
 120                info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
 121
 122        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
 123        info.align_offset = 0;
 124        addr = vm_unmapped_area(&info);
 125
 126        /*
 127         * A failed mmap() very likely causes application failure,
 128         * so fall back to the bottom-up function here. This scenario
 129         * can happen with large stack limits and large mmap()
 130         * allocations.
 131         */
 132        if (addr & ~PAGE_MASK) {
 133                VM_BUG_ON(addr != -ENOMEM);
 134                info.flags = 0;
 135                info.low_limit = TASK_UNMAPPED_BASE;
 136                info.high_limit = TASK_SIZE_LOW;
 137                addr = vm_unmapped_area(&info);
 138        }
 139
 140        return addr;
 141}
 142
 143unsigned long
 144hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 145                unsigned long len, unsigned long pgoff, unsigned long flags)
 146{
 147        struct hstate *h = hstate_file(file);
 148        struct mm_struct *mm = current->mm;
 149        struct vm_area_struct *vma;
 150
 151        if (len & ~huge_page_mask(h))
 152                return -EINVAL;
 153
 154        addr = mpx_unmapped_area_check(addr, len, flags);
 155        if (IS_ERR_VALUE(addr))
 156                return addr;
 157
 158        if (len > TASK_SIZE)
 159                return -ENOMEM;
 160
 161        /* No address checking. See comment at mmap_address_hint_valid() */
 162        if (flags & MAP_FIXED) {
 163                if (prepare_hugepage_range(file, addr, len))
 164                        return -EINVAL;
 165                return addr;
 166        }
 167
 168        if (addr) {
 169                addr &= huge_page_mask(h);
 170                if (!mmap_address_hint_valid(addr, len))
 171                        goto get_unmapped_area;
 172
 173                vma = find_vma(mm, addr);
 174                if (!vma || addr + len <= vm_start_gap(vma))
 175                        return addr;
 176        }
 177
 178get_unmapped_area:
 179        if (mm->get_unmapped_area == arch_get_unmapped_area)
 180                return hugetlb_get_unmapped_area_bottomup(file, addr, len,
 181                                pgoff, flags);
 182        else
 183                return hugetlb_get_unmapped_area_topdown(file, addr, len,
 184                                pgoff, flags);
 185}
 186#endif /* CONFIG_HUGETLB_PAGE */
 187
 188#ifdef CONFIG_X86_64
 189static __init int setup_hugepagesz(char *opt)
 190{
 191        unsigned long ps = memparse(opt, &opt);
 192        if (ps == PMD_SIZE) {
 193                hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
 194        } else if (ps == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES)) {
 195                hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
 196        } else {
 197                hugetlb_bad_size();
 198                printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
 199                        ps >> 20);
 200                return 0;
 201        }
 202        return 1;
 203}
 204__setup("hugepagesz=", setup_hugepagesz);
 205
 206#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
 207static __init int gigantic_pages_init(void)
 208{
 209        /* With compaction or CMA we can allocate gigantic pages at runtime */
 210        if (boot_cpu_has(X86_FEATURE_GBPAGES) && !size_to_hstate(1UL << PUD_SHIFT))
 211                hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
 212        return 0;
 213}
 214arch_initcall(gigantic_pages_init);
 215#endif
 216#endif
 217