linux/arch/sparc/mm/hugetlbpage.c
<<
>>
Prefs
   1/*
   2 * SPARC64 Huge TLB page support.
   3 *
   4 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
   5 */
   6
   7#include <linux/init.h>
   8#include <linux/fs.h>
   9#include <linux/mm.h>
  10#include <linux/hugetlb.h>
  11#include <linux/pagemap.h>
  12#include <linux/sysctl.h>
  13
  14#include <asm/mman.h>
  15#include <asm/pgalloc.h>
  16#include <asm/tlb.h>
  17#include <asm/tlbflush.h>
  18#include <asm/cacheflush.h>
  19#include <asm/mmu_context.h>
  20
  21/* Slightly simplified from the non-hugepage variant because by
  22 * definition we don't have to worry about any page coloring stuff
  23 */
  24#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
  25#define VA_EXCLUDE_END   (0xfffff80000000000UL + (1UL << 32UL))
  26
  27static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
  28                                                        unsigned long addr,
  29                                                        unsigned long len,
  30                                                        unsigned long pgoff,
  31                                                        unsigned long flags)
  32{
  33        unsigned long task_size = TASK_SIZE;
  34        struct vm_unmapped_area_info info;
  35
  36        if (test_thread_flag(TIF_32BIT))
  37                task_size = STACK_TOP32;
  38
  39        info.flags = 0;
  40        info.length = len;
  41        info.low_limit = TASK_UNMAPPED_BASE;
  42        info.high_limit = min(task_size, VA_EXCLUDE_START);
  43        info.align_mask = PAGE_MASK & ~HPAGE_MASK;
  44        info.align_offset = 0;
  45        addr = vm_unmapped_area(&info);
  46
  47        if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
  48                VM_BUG_ON(addr != -ENOMEM);
  49                info.low_limit = VA_EXCLUDE_END;
  50                info.high_limit = task_size;
  51                addr = vm_unmapped_area(&info);
  52        }
  53
  54        return addr;
  55}
  56
  57static unsigned long
  58hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  59                                  const unsigned long len,
  60                                  const unsigned long pgoff,
  61                                  const unsigned long flags)
  62{
  63        struct mm_struct *mm = current->mm;
  64        unsigned long addr = addr0;
  65        struct vm_unmapped_area_info info;
  66
  67        /* This should only ever run for 32-bit processes.  */
  68        BUG_ON(!test_thread_flag(TIF_32BIT));
  69
  70        info.flags = VM_UNMAPPED_AREA_TOPDOWN;
  71        info.length = len;
  72        info.low_limit = PAGE_SIZE;
  73        info.high_limit = mm->mmap_base;
  74        info.align_mask = PAGE_MASK & ~HPAGE_MASK;
  75        info.align_offset = 0;
  76        addr = vm_unmapped_area(&info);
  77
  78        /*
  79         * A failed mmap() very likely causes application failure,
  80         * so fall back to the bottom-up function here. This scenario
  81         * can happen with large stack limits and large mmap()
  82         * allocations.
  83         */
  84        if (addr & ~PAGE_MASK) {
  85                VM_BUG_ON(addr != -ENOMEM);
  86                info.flags = 0;
  87                info.low_limit = TASK_UNMAPPED_BASE;
  88                info.high_limit = STACK_TOP32;
  89                addr = vm_unmapped_area(&info);
  90        }
  91
  92        return addr;
  93}
  94
  95unsigned long
  96hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  97                unsigned long len, unsigned long pgoff, unsigned long flags)
  98{
  99        struct mm_struct *mm = current->mm;
 100        struct vm_area_struct *vma;
 101        unsigned long task_size = TASK_SIZE;
 102
 103        if (test_thread_flag(TIF_32BIT))
 104                task_size = STACK_TOP32;
 105
 106        if (len & ~HPAGE_MASK)
 107                return -EINVAL;
 108        if (len > task_size)
 109                return -ENOMEM;
 110
 111        if (flags & MAP_FIXED) {
 112                if (prepare_hugepage_range(file, addr, len))
 113                        return -EINVAL;
 114                return addr;
 115        }
 116
 117        if (addr) {
 118                addr = ALIGN(addr, HPAGE_SIZE);
 119                vma = find_vma(mm, addr);
 120                if (task_size - len >= addr &&
 121                    (!vma || addr + len <= vma->vm_start))
 122                        return addr;
 123        }
 124        if (mm->get_unmapped_area == arch_get_unmapped_area)
 125                return hugetlb_get_unmapped_area_bottomup(file, addr, len,
 126                                pgoff, flags);
 127        else
 128                return hugetlb_get_unmapped_area_topdown(file, addr, len,
 129                                pgoff, flags);
 130}
 131
 132pte_t *huge_pte_alloc(struct mm_struct *mm,
 133                        unsigned long addr, unsigned long sz)
 134{
 135        pgd_t *pgd;
 136        pud_t *pud;
 137        pmd_t *pmd;
 138        pte_t *pte = NULL;
 139
 140        /* We must align the address, because our caller will run
 141         * set_huge_pte_at() on whatever we return, which writes out
 142         * all of the sub-ptes for the hugepage range.  So we have
 143         * to give it the first such sub-pte.
 144         */
 145        addr &= HPAGE_MASK;
 146
 147        pgd = pgd_offset(mm, addr);
 148        pud = pud_alloc(mm, pgd, addr);
 149        if (pud) {
 150                pmd = pmd_alloc(mm, pud, addr);
 151                if (pmd)
 152                        pte = pte_alloc_map(mm, NULL, pmd, addr);
 153        }
 154        return pte;
 155}
 156
 157pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 158{
 159        pgd_t *pgd;
 160        pud_t *pud;
 161        pmd_t *pmd;
 162        pte_t *pte = NULL;
 163
 164        addr &= HPAGE_MASK;
 165
 166        pgd = pgd_offset(mm, addr);
 167        if (!pgd_none(*pgd)) {
 168                pud = pud_offset(pgd, addr);
 169                if (!pud_none(*pud)) {
 170                        pmd = pmd_offset(pud, addr);
 171                        if (!pmd_none(*pmd))
 172                                pte = pte_offset_map(pmd, addr);
 173                }
 174        }
 175        return pte;
 176}
 177
 178int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
 179{
 180        return 0;
 181}
 182
 183void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
 184                     pte_t *ptep, pte_t entry)
 185{
 186        int i;
 187
 188        if (!pte_present(*ptep) && pte_present(entry))
 189                mm->context.huge_pte_count++;
 190
 191        addr &= HPAGE_MASK;
 192        for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
 193                set_pte_at(mm, addr, ptep, entry);
 194                ptep++;
 195                addr += PAGE_SIZE;
 196                pte_val(entry) += PAGE_SIZE;
 197        }
 198}
 199
 200pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
 201                              pte_t *ptep)
 202{
 203        pte_t entry;
 204        int i;
 205
 206        entry = *ptep;
 207        if (pte_present(entry))
 208                mm->context.huge_pte_count--;
 209
 210        addr &= HPAGE_MASK;
 211
 212        for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
 213                pte_clear(mm, addr, ptep);
 214                addr += PAGE_SIZE;
 215                ptep++;
 216        }
 217
 218        return entry;
 219}
 220
 221struct page *follow_huge_addr(struct mm_struct *mm,
 222                              unsigned long address, int write)
 223{
 224        return ERR_PTR(-EINVAL);
 225}
 226
 227int pmd_huge(pmd_t pmd)
 228{
 229        return 0;
 230}
 231
 232int pud_huge(pud_t pud)
 233{
 234        return 0;
 235}
 236
 237struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 238                             pmd_t *pmd, int write)
 239{
 240        return NULL;
 241}
 242