linux/arch/ia64/mm/hugetlbpage.c
<<
>>
Prefs
   1/*
   2 * IA-64 Huge TLB Page Support for Kernel.
   3 *
   4 * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
   5 * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
   6 *
   7 * Sep, 2003: add numa support
   8 * Feb, 2004: dynamic hugetlb page size via boot parameter
   9 */
  10
  11#include <linux/init.h>
  12#include <linux/fs.h>
  13#include <linux/mm.h>
  14#include <linux/hugetlb.h>
  15#include <linux/pagemap.h>
  16#include <linux/module.h>
  17#include <linux/slab.h>
  18#include <linux/sysctl.h>
  19#include <linux/log2.h>
  20#include <asm/mman.h>
  21#include <asm/pgalloc.h>
  22#include <asm/tlb.h>
  23#include <asm/tlbflush.h>
  24
  25unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT;
  26EXPORT_SYMBOL(hpage_shift);
  27
  28pte_t *
  29huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
  30{
  31        unsigned long taddr = htlbpage_to_page(addr);
  32        pgd_t *pgd;
  33        pud_t *pud;
  34        pmd_t *pmd;
  35        pte_t *pte = NULL;
  36
  37        pgd = pgd_offset(mm, taddr);
  38        pud = pud_alloc(mm, pgd, taddr);
  39        if (pud) {
  40                pmd = pmd_alloc(mm, pud, taddr);
  41                if (pmd)
  42                        pte = pte_alloc_map(mm, pmd, taddr);
  43        }
  44        return pte;
  45}
  46
  47pte_t *
  48huge_pte_offset (struct mm_struct *mm, unsigned long addr)
  49{
  50        unsigned long taddr = htlbpage_to_page(addr);
  51        pgd_t *pgd;
  52        pud_t *pud;
  53        pmd_t *pmd;
  54        pte_t *pte = NULL;
  55
  56        pgd = pgd_offset(mm, taddr);
  57        if (pgd_present(*pgd)) {
  58                pud = pud_offset(pgd, taddr);
  59                if (pud_present(*pud)) {
  60                        pmd = pmd_offset(pud, taddr);
  61                        if (pmd_present(*pmd))
  62                                pte = pte_offset_map(pmd, taddr);
  63                }
  64        }
  65
  66        return pte;
  67}
  68
  69int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
  70{
  71        return 0;
  72}
  73
  74#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
  75
  76/*
  77 * Don't actually need to do any preparation, but need to make sure
  78 * the address is in the right region.
  79 */
  80int prepare_hugepage_range(struct file *file,
  81                        unsigned long addr, unsigned long len)
  82{
  83        if (len & ~HPAGE_MASK)
  84                return -EINVAL;
  85        if (addr & ~HPAGE_MASK)
  86                return -EINVAL;
  87        if (REGION_NUMBER(addr) != RGN_HPAGE)
  88                return -EINVAL;
  89
  90        return 0;
  91}
  92
  93struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
  94{
  95        struct page *page;
  96        pte_t *ptep;
  97
  98        if (REGION_NUMBER(addr) != RGN_HPAGE)
  99                return ERR_PTR(-EINVAL);
 100
 101        ptep = huge_pte_offset(mm, addr);
 102        if (!ptep || pte_none(*ptep))
 103                return NULL;
 104        page = pte_page(*ptep);
 105        page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
 106        return page;
 107}
 108int pmd_huge(pmd_t pmd)
 109{
 110        return 0;
 111}
 112
 113int pud_huge(pud_t pud)
 114{
 115        return 0;
 116}
 117
 118struct page *
 119follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
 120{
 121        return NULL;
 122}
 123
 124void hugetlb_free_pgd_range(struct mmu_gather *tlb,
 125                        unsigned long addr, unsigned long end,
 126                        unsigned long floor, unsigned long ceiling)
 127{
 128        /*
 129         * This is called to free hugetlb page tables.
 130         *
 131         * The offset of these addresses from the base of the hugetlb
 132         * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
 133         * the standard free_pgd_range will free the right page tables.
 134         *
 135         * If floor and ceiling are also in the hugetlb region, they
 136         * must likewise be scaled down; but if outside, left unchanged.
 137         */
 138
 139        addr = htlbpage_to_page(addr);
 140        end  = htlbpage_to_page(end);
 141        if (REGION_NUMBER(floor) == RGN_HPAGE)
 142                floor = htlbpage_to_page(floor);
 143        if (REGION_NUMBER(ceiling) == RGN_HPAGE)
 144                ceiling = htlbpage_to_page(ceiling);
 145
 146        free_pgd_range(tlb, addr, end, floor, ceiling);
 147}
 148
 149unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
 150                unsigned long pgoff, unsigned long flags)
 151{
 152        struct vm_area_struct *vmm;
 153
 154        if (len > RGN_MAP_LIMIT)
 155                return -ENOMEM;
 156        if (len & ~HPAGE_MASK)
 157                return -EINVAL;
 158
 159        /* Handle MAP_FIXED */
 160        if (flags & MAP_FIXED) {
 161                if (prepare_hugepage_range(file, addr, len))
 162                        return -EINVAL;
 163                return addr;
 164        }
 165
 166        /* This code assumes that RGN_HPAGE != 0. */
 167        if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
 168                addr = HPAGE_REGION_BASE;
 169        else
 170                addr = ALIGN(addr, HPAGE_SIZE);
 171        for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
 172                /* At this point:  (!vmm || addr < vmm->vm_end). */
 173                if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
 174                        return -ENOMEM;
 175                if (!vmm || (addr + len) <= vmm->vm_start)
 176                        return addr;
 177                addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
 178        }
 179}
 180
 181static int __init hugetlb_setup_sz(char *str)
 182{
 183        u64 tr_pages;
 184        unsigned long long size;
 185
 186        if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
 187                /*
 188                 * shouldn't happen, but just in case.
 189                 */
 190                tr_pages = 0x15557000UL;
 191
 192        size = memparse(str, &str);
 193        if (*str || !is_power_of_2(size) || !(tr_pages & size) ||
 194                size <= PAGE_SIZE ||
 195                size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
 196                printk(KERN_WARNING "Invalid huge page size specified\n");
 197                return 1;
 198        }
 199
 200        hpage_shift = __ffs(size);
 201        /*
 202         * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
 203         * override here with new page shift.
 204         */
 205        ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
 206        return 0;
 207}
 208early_param("hugepagesz", hugetlb_setup_sz);
 209