linux/arch/tile/mm/hugetlbpage.c
<<
>>
Prefs
   1/*
   2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
   3 *
   4 *   This program is free software; you can redistribute it and/or
   5 *   modify it under the terms of the GNU General Public License
   6 *   as published by the Free Software Foundation, version 2.
   7 *
   8 *   This program is distributed in the hope that it will be useful, but
   9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
  10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11 *   NON INFRINGEMENT.  See the GNU General Public License for
  12 *   more details.
  13 *
  14 * TILE Huge TLB Page Support for Kernel.
  15 * Taken from i386 hugetlb implementation:
  16 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
  17 */
  18
  19#include <linux/init.h>
  20#include <linux/fs.h>
  21#include <linux/mm.h>
  22#include <linux/hugetlb.h>
  23#include <linux/pagemap.h>
  24#include <linux/slab.h>
  25#include <linux/err.h>
  26#include <linux/sysctl.h>
  27#include <linux/mman.h>
  28#include <asm/tlb.h>
  29#include <asm/tlbflush.h>
  30
  31pte_t *huge_pte_alloc(struct mm_struct *mm,
  32                      unsigned long addr, unsigned long sz)
  33{
  34        pgd_t *pgd;
  35        pud_t *pud;
  36        pte_t *pte = NULL;
  37
  38        /* We do not yet support multiple huge page sizes. */
  39        BUG_ON(sz != PMD_SIZE);
  40
  41        pgd = pgd_offset(mm, addr);
  42        pud = pud_alloc(mm, pgd, addr);
  43        if (pud)
  44                pte = (pte_t *) pmd_alloc(mm, pud, addr);
  45        BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
  46
  47        return pte;
  48}
  49
  50pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
  51{
  52        pgd_t *pgd;
  53        pud_t *pud;
  54        pmd_t *pmd = NULL;
  55
  56        pgd = pgd_offset(mm, addr);
  57        if (pgd_present(*pgd)) {
  58                pud = pud_offset(pgd, addr);
  59                if (pud_present(*pud))
  60                        pmd = pmd_offset(pud, addr);
  61        }
  62        return (pte_t *) pmd;
  63}
  64
  65#ifdef HUGETLB_TEST
  66struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
  67                              int write)
  68{
  69        unsigned long start = address;
  70        int length = 1;
  71        int nr;
  72        struct page *page;
  73        struct vm_area_struct *vma;
  74
  75        vma = find_vma(mm, addr);
  76        if (!vma || !is_vm_hugetlb_page(vma))
  77                return ERR_PTR(-EINVAL);
  78
  79        pte = huge_pte_offset(mm, address);
  80
  81        /* hugetlb should be locked, and hence, prefaulted */
  82        WARN_ON(!pte || pte_none(*pte));
  83
  84        page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
  85
  86        WARN_ON(!PageHead(page));
  87
  88        return page;
  89}
  90
  91int pmd_huge(pmd_t pmd)
  92{
  93        return 0;
  94}
  95
  96int pud_huge(pud_t pud)
  97{
  98        return 0;
  99}
 100
 101struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 102                             pmd_t *pmd, int write)
 103{
 104        return NULL;
 105}
 106
 107#else
 108
 109struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
 110                              int write)
 111{
 112        return ERR_PTR(-EINVAL);
 113}
 114
 115int pmd_huge(pmd_t pmd)
 116{
 117        return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE);
 118}
 119
 120int pud_huge(pud_t pud)
 121{
 122        return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
 123}
 124
 125struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 126                             pmd_t *pmd, int write)
 127{
 128        struct page *page;
 129
 130        page = pte_page(*(pte_t *)pmd);
 131        if (page)
 132                page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
 133        return page;
 134}
 135
 136struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
 137                             pud_t *pud, int write)
 138{
 139        struct page *page;
 140
 141        page = pte_page(*(pte_t *)pud);
 142        if (page)
 143                page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
 144        return page;
 145}
 146
 147int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
 148{
 149        return 0;
 150}
 151
 152#endif
 153
 154#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
 155static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
 156                unsigned long addr, unsigned long len,
 157                unsigned long pgoff, unsigned long flags)
 158{
 159        struct hstate *h = hstate_file(file);
 160        struct mm_struct *mm = current->mm;
 161        struct vm_area_struct *vma;
 162        unsigned long start_addr;
 163
 164        if (len > mm->cached_hole_size) {
 165                start_addr = mm->free_area_cache;
 166        } else {
 167                start_addr = TASK_UNMAPPED_BASE;
 168                mm->cached_hole_size = 0;
 169        }
 170
 171full_search:
 172        addr = ALIGN(start_addr, huge_page_size(h));
 173
 174        for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
 175                /* At this point:  (!vma || addr < vma->vm_end). */
 176                if (TASK_SIZE - len < addr) {
 177                        /*
 178                         * Start a new search - just in case we missed
 179                         * some holes.
 180                         */
 181                        if (start_addr != TASK_UNMAPPED_BASE) {
 182                                start_addr = TASK_UNMAPPED_BASE;
 183                                mm->cached_hole_size = 0;
 184                                goto full_search;
 185                        }
 186                        return -ENOMEM;
 187                }
 188                if (!vma || addr + len <= vma->vm_start) {
 189                        mm->free_area_cache = addr + len;
 190                        return addr;
 191                }
 192                if (addr + mm->cached_hole_size < vma->vm_start)
 193                        mm->cached_hole_size = vma->vm_start - addr;
 194                addr = ALIGN(vma->vm_end, huge_page_size(h));
 195        }
 196}
 197
 198static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
 199                unsigned long addr0, unsigned long len,
 200                unsigned long pgoff, unsigned long flags)
 201{
 202        struct hstate *h = hstate_file(file);
 203        struct mm_struct *mm = current->mm;
 204        struct vm_area_struct *vma, *prev_vma;
 205        unsigned long base = mm->mmap_base, addr = addr0;
 206        unsigned long largest_hole = mm->cached_hole_size;
 207        int first_time = 1;
 208
 209        /* don't allow allocations above current base */
 210        if (mm->free_area_cache > base)
 211                mm->free_area_cache = base;
 212
 213        if (len <= largest_hole) {
 214                largest_hole = 0;
 215                mm->free_area_cache  = base;
 216        }
 217try_again:
 218        /* make sure it can fit in the remaining address space */
 219        if (mm->free_area_cache < len)
 220                goto fail;
 221
 222        /* either no address requested or cant fit in requested address hole */
 223        addr = (mm->free_area_cache - len) & huge_page_mask(h);
 224        do {
 225                /*
 226                 * Lookup failure means no vma is above this address,
 227                 * i.e. return with success:
 228                 */
 229                vma = find_vma_prev(mm, addr, &prev_vma);
 230                if (!vma) {
 231                        return addr;
 232                        break;
 233                }
 234
 235                /*
 236                 * new region fits between prev_vma->vm_end and
 237                 * vma->vm_start, use it:
 238                 */
 239                if (addr + len <= vma->vm_start &&
 240                            (!prev_vma || (addr >= prev_vma->vm_end))) {
 241                        /* remember the address as a hint for next time */
 242                        mm->cached_hole_size = largest_hole;
 243                        mm->free_area_cache = addr;
 244                        return addr;
 245                } else {
 246                        /* pull free_area_cache down to the first hole */
 247                        if (mm->free_area_cache == vma->vm_end) {
 248                                mm->free_area_cache = vma->vm_start;
 249                                mm->cached_hole_size = largest_hole;
 250                        }
 251                }
 252
 253                /* remember the largest hole we saw so far */
 254                if (addr + largest_hole < vma->vm_start)
 255                        largest_hole = vma->vm_start - addr;
 256
 257                /* try just below the current vma->vm_start */
 258                addr = (vma->vm_start - len) & huge_page_mask(h);
 259
 260        } while (len <= vma->vm_start);
 261
 262fail:
 263        /*
 264         * if hint left us with no space for the requested
 265         * mapping then try again:
 266         */
 267        if (first_time) {
 268                mm->free_area_cache = base;
 269                largest_hole = 0;
 270                first_time = 0;
 271                goto try_again;
 272        }
 273        /*
 274         * A failed mmap() very likely causes application failure,
 275         * so fall back to the bottom-up function here. This scenario
 276         * can happen with large stack limits and large mmap()
 277         * allocations.
 278         */
 279        mm->free_area_cache = TASK_UNMAPPED_BASE;
 280        mm->cached_hole_size = ~0UL;
 281        addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
 282                        len, pgoff, flags);
 283
 284        /*
 285         * Restore the topdown base:
 286         */
 287        mm->free_area_cache = base;
 288        mm->cached_hole_size = ~0UL;
 289
 290        return addr;
 291}
 292
 293unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 294                unsigned long len, unsigned long pgoff, unsigned long flags)
 295{
 296        struct hstate *h = hstate_file(file);
 297        struct mm_struct *mm = current->mm;
 298        struct vm_area_struct *vma;
 299
 300        if (len & ~huge_page_mask(h))
 301                return -EINVAL;
 302        if (len > TASK_SIZE)
 303                return -ENOMEM;
 304
 305        if (flags & MAP_FIXED) {
 306                if (prepare_hugepage_range(file, addr, len))
 307                        return -EINVAL;
 308                return addr;
 309        }
 310
 311        if (addr) {
 312                addr = ALIGN(addr, huge_page_size(h));
 313                vma = find_vma(mm, addr);
 314                if (TASK_SIZE - len >= addr &&
 315                    (!vma || addr + len <= vma->vm_start))
 316                        return addr;
 317        }
 318        if (current->mm->get_unmapped_area == arch_get_unmapped_area)
 319                return hugetlb_get_unmapped_area_bottomup(file, addr, len,
 320                                pgoff, flags);
 321        else
 322                return hugetlb_get_unmapped_area_topdown(file, addr, len,
 323                                pgoff, flags);
 324}
 325
 326static __init int setup_hugepagesz(char *opt)
 327{
 328        unsigned long ps = memparse(opt, &opt);
 329        if (ps == PMD_SIZE) {
 330                hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
 331        } else if (ps == PUD_SIZE) {
 332                hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
 333        } else {
 334                pr_err("hugepagesz: Unsupported page size %lu M\n",
 335                        ps >> 20);
 336                return 0;
 337        }
 338        return 1;
 339}
 340__setup("hugepagesz=", setup_hugepagesz);
 341
 342#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
 343