linux/arch/tile/mm/hugetlbpage.c
<<
>>
Prefs
   1/*
   2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
   3 *
   4 *   This program is free software; you can redistribute it and/or
   5 *   modify it under the terms of the GNU General Public License
   6 *   as published by the Free Software Foundation, version 2.
   7 *
   8 *   This program is distributed in the hope that it will be useful, but
   9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
  10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11 *   NON INFRINGEMENT.  See the GNU General Public License for
  12 *   more details.
  13 *
  14 * TILE Huge TLB Page Support for Kernel.
  15 * Taken from i386 hugetlb implementation:
  16 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
  17 */
  18
  19#include <linux/init.h>
  20#include <linux/fs.h>
  21#include <linux/mm.h>
  22#include <linux/sched/mm.h>
  23#include <linux/hugetlb.h>
  24#include <linux/pagemap.h>
  25#include <linux/slab.h>
  26#include <linux/err.h>
  27#include <linux/sysctl.h>
  28#include <linux/mman.h>
  29#include <asm/tlb.h>
  30#include <asm/tlbflush.h>
  31#include <asm/setup.h>
  32
  33#ifdef CONFIG_HUGETLB_SUPER_PAGES
  34
  35/*
  36 * Provide an additional huge page size (in addition to the regular default
  37 * huge page size) if no "hugepagesz" arguments are specified.
  38 * Note that it must be smaller than the default huge page size so
  39 * that it's possible to allocate them on demand from the buddy allocator.
  40 * You can change this to 64K (on a 16K build), 256K, 1M, or 4M,
  41 * or not define it at all.
  42 */
  43#define ADDITIONAL_HUGE_SIZE (1024 * 1024UL)
  44
  45/* "Extra" page-size multipliers, one per level of the page table. */
  46int huge_shift[HUGE_SHIFT_ENTRIES] = {
  47#ifdef ADDITIONAL_HUGE_SIZE
  48#define ADDITIONAL_HUGE_SHIFT __builtin_ctzl(ADDITIONAL_HUGE_SIZE / PAGE_SIZE)
  49        [HUGE_SHIFT_PAGE] = ADDITIONAL_HUGE_SHIFT
  50#endif
  51};
  52
  53#endif
  54
  55pte_t *huge_pte_alloc(struct mm_struct *mm,
  56                      unsigned long addr, unsigned long sz)
  57{
  58        pgd_t *pgd;
  59        pud_t *pud;
  60
  61        addr &= -sz;   /* Mask off any low bits in the address. */
  62
  63        pgd = pgd_offset(mm, addr);
  64        pud = pud_alloc(mm, pgd, addr);
  65
  66#ifdef CONFIG_HUGETLB_SUPER_PAGES
  67        if (sz >= PGDIR_SIZE) {
  68                BUG_ON(sz != PGDIR_SIZE &&
  69                       sz != PGDIR_SIZE << huge_shift[HUGE_SHIFT_PGDIR]);
  70                return (pte_t *)pud;
  71        } else {
  72                pmd_t *pmd = pmd_alloc(mm, pud, addr);
  73                if (sz >= PMD_SIZE) {
  74                        BUG_ON(sz != PMD_SIZE &&
  75                               sz != (PMD_SIZE << huge_shift[HUGE_SHIFT_PMD]));
  76                        return (pte_t *)pmd;
  77                }
  78                else {
  79                        if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE])
  80                                panic("Unexpected page size %#lx\n", sz);
  81                        return pte_alloc_map(mm, pmd, addr);
  82                }
  83        }
  84#else
  85        BUG_ON(sz != PMD_SIZE);
  86        return (pte_t *) pmd_alloc(mm, pud, addr);
  87#endif
  88}
  89
  90static pte_t *get_pte(pte_t *base, int index, int level)
  91{
  92        pte_t *ptep = base + index;
  93#ifdef CONFIG_HUGETLB_SUPER_PAGES
  94        if (!pte_present(*ptep) && huge_shift[level] != 0) {
  95                unsigned long mask = -1UL << huge_shift[level];
  96                pte_t *super_ptep = base + (index & mask);
  97                pte_t pte = *super_ptep;
  98                if (pte_present(pte) && pte_super(pte))
  99                        ptep = super_ptep;
 100        }
 101#endif
 102        return ptep;
 103}
 104
 105pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 106{
 107        pgd_t *pgd;
 108        pud_t *pud;
 109        pmd_t *pmd;
 110#ifdef CONFIG_HUGETLB_SUPER_PAGES
 111        pte_t *pte;
 112#endif
 113
 114        /* Get the top-level page table entry. */
 115        pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0);
 116
 117        /* We don't have four levels. */
 118        pud = pud_offset(pgd, addr);
 119#ifndef __PAGETABLE_PUD_FOLDED
 120# error support fourth page table level
 121#endif
 122        if (!pud_present(*pud))
 123                return NULL;
 124
 125        /* Check for an L0 huge PTE, if we have three levels. */
 126#ifndef __PAGETABLE_PMD_FOLDED
 127        if (pud_huge(*pud))
 128                return (pte_t *)pud;
 129
 130        pmd = (pmd_t *)get_pte((pte_t *)pud_page_vaddr(*pud),
 131                               pmd_index(addr), 1);
 132        if (!pmd_present(*pmd))
 133                return NULL;
 134#else
 135        pmd = pmd_offset(pud, addr);
 136#endif
 137
 138        /* Check for an L1 huge PTE. */
 139        if (pmd_huge(*pmd))
 140                return (pte_t *)pmd;
 141
 142#ifdef CONFIG_HUGETLB_SUPER_PAGES
 143        /* Check for an L2 huge PTE. */
 144        pte = get_pte((pte_t *)pmd_page_vaddr(*pmd), pte_index(addr), 2);
 145        if (!pte_present(*pte))
 146                return NULL;
 147        if (pte_super(*pte))
 148                return pte;
 149#endif
 150
 151        return NULL;
 152}
 153
 154int pmd_huge(pmd_t pmd)
 155{
 156        return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE);
 157}
 158
 159int pud_huge(pud_t pud)
 160{
 161        return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
 162}
 163
 164#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
 165static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
 166                unsigned long addr, unsigned long len,
 167                unsigned long pgoff, unsigned long flags)
 168{
 169        struct hstate *h = hstate_file(file);
 170        struct vm_unmapped_area_info info;
 171
 172        info.flags = 0;
 173        info.length = len;
 174        info.low_limit = TASK_UNMAPPED_BASE;
 175        info.high_limit = TASK_SIZE;
 176        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
 177        info.align_offset = 0;
 178        return vm_unmapped_area(&info);
 179}
 180
 181static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
 182                unsigned long addr0, unsigned long len,
 183                unsigned long pgoff, unsigned long flags)
 184{
 185        struct hstate *h = hstate_file(file);
 186        struct vm_unmapped_area_info info;
 187        unsigned long addr;
 188
 189        info.flags = VM_UNMAPPED_AREA_TOPDOWN;
 190        info.length = len;
 191        info.low_limit = PAGE_SIZE;
 192        info.high_limit = current->mm->mmap_base;
 193        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
 194        info.align_offset = 0;
 195        addr = vm_unmapped_area(&info);
 196
 197        /*
 198         * A failed mmap() very likely causes application failure,
 199         * so fall back to the bottom-up function here. This scenario
 200         * can happen with large stack limits and large mmap()
 201         * allocations.
 202         */
 203        if (addr & ~PAGE_MASK) {
 204                VM_BUG_ON(addr != -ENOMEM);
 205                info.flags = 0;
 206                info.low_limit = TASK_UNMAPPED_BASE;
 207                info.high_limit = TASK_SIZE;
 208                addr = vm_unmapped_area(&info);
 209        }
 210
 211        return addr;
 212}
 213
 214unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 215                unsigned long len, unsigned long pgoff, unsigned long flags)
 216{
 217        struct hstate *h = hstate_file(file);
 218        struct mm_struct *mm = current->mm;
 219        struct vm_area_struct *vma;
 220
 221        if (len & ~huge_page_mask(h))
 222                return -EINVAL;
 223        if (len > TASK_SIZE)
 224                return -ENOMEM;
 225
 226        if (flags & MAP_FIXED) {
 227                if (prepare_hugepage_range(file, addr, len))
 228                        return -EINVAL;
 229                return addr;
 230        }
 231
 232        if (addr) {
 233                addr = ALIGN(addr, huge_page_size(h));
 234                vma = find_vma(mm, addr);
 235                if (TASK_SIZE - len >= addr &&
 236                    (!vma || addr + len <= vma->vm_start))
 237                        return addr;
 238        }
 239        if (current->mm->get_unmapped_area == arch_get_unmapped_area)
 240                return hugetlb_get_unmapped_area_bottomup(file, addr, len,
 241                                pgoff, flags);
 242        else
 243                return hugetlb_get_unmapped_area_topdown(file, addr, len,
 244                                pgoff, flags);
 245}
 246#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
 247
 248#ifdef CONFIG_HUGETLB_SUPER_PAGES
 249static __init int __setup_hugepagesz(unsigned long ps)
 250{
 251        int log_ps = __builtin_ctzl(ps);
 252        int level, base_shift;
 253
 254        if ((1UL << log_ps) != ps || (log_ps & 1) != 0) {
 255                pr_warn("Not enabling %ld byte huge pages; must be a power of four\n",
 256                        ps);
 257                return -EINVAL;
 258        }
 259
 260        if (ps > 64*1024*1024*1024UL) {
 261                pr_warn("Not enabling %ld MB huge pages; largest legal value is 64 GB\n",
 262                        ps >> 20);
 263                return -EINVAL;
 264        } else if (ps >= PUD_SIZE) {
 265                static long hv_jpage_size;
 266                if (hv_jpage_size == 0)
 267                        hv_jpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO);
 268                if (hv_jpage_size != PUD_SIZE) {
 269                        pr_warn("Not enabling >= %ld MB huge pages: hypervisor reports size %ld\n",
 270                                PUD_SIZE >> 20, hv_jpage_size);
 271                        return -EINVAL;
 272                }
 273                level = 0;
 274                base_shift = PUD_SHIFT;
 275        } else if (ps >= PMD_SIZE) {
 276                level = 1;
 277                base_shift = PMD_SHIFT;
 278        } else if (ps > PAGE_SIZE) {
 279                level = 2;
 280                base_shift = PAGE_SHIFT;
 281        } else {
 282                pr_err("hugepagesz: huge page size %ld too small\n", ps);
 283                return -EINVAL;
 284        }
 285
 286        if (log_ps != base_shift) {
 287                int shift_val = log_ps - base_shift;
 288                if (huge_shift[level] != 0) {
 289                        int old_shift = base_shift + huge_shift[level];
 290                        pr_warn("Not enabling %ld MB huge pages; already have size %ld MB\n",
 291                                ps >> 20, (1UL << old_shift) >> 20);
 292                        return -EINVAL;
 293                }
 294                if (hv_set_pte_super_shift(level, shift_val) != 0) {
 295                        pr_warn("Not enabling %ld MB huge pages; no hypervisor support\n",
 296                                ps >> 20);
 297                        return -EINVAL;
 298                }
 299                printk(KERN_DEBUG "Enabled %ld MB huge pages\n", ps >> 20);
 300                huge_shift[level] = shift_val;
 301        }
 302
 303        hugetlb_add_hstate(log_ps - PAGE_SHIFT);
 304
 305        return 0;
 306}
 307
 308static bool saw_hugepagesz;
 309
 310static __init int setup_hugepagesz(char *opt)
 311{
 312        int rc;
 313
 314        if (!saw_hugepagesz) {
 315                saw_hugepagesz = true;
 316                memset(huge_shift, 0, sizeof(huge_shift));
 317        }
 318        rc = __setup_hugepagesz(memparse(opt, NULL));
 319        if (rc)
 320                hugetlb_bad_size();
 321        return rc;
 322}
 323__setup("hugepagesz=", setup_hugepagesz);
 324
 325#ifdef ADDITIONAL_HUGE_SIZE
 326/*
 327 * Provide an additional huge page size if no "hugepagesz" args are given.
 328 * In that case, all the cores have properly set up their hv super_shift
 329 * already, but we need to notify the hugetlb code to enable the
 330 * new huge page size from the Linux point of view.
 331 */
 332static __init int add_default_hugepagesz(void)
 333{
 334        if (!saw_hugepagesz) {
 335                BUILD_BUG_ON(ADDITIONAL_HUGE_SIZE >= PMD_SIZE ||
 336                             ADDITIONAL_HUGE_SIZE <= PAGE_SIZE);
 337                BUILD_BUG_ON((PAGE_SIZE << ADDITIONAL_HUGE_SHIFT) !=
 338                             ADDITIONAL_HUGE_SIZE);
 339                BUILD_BUG_ON(ADDITIONAL_HUGE_SHIFT & 1);
 340                hugetlb_add_hstate(ADDITIONAL_HUGE_SHIFT);
 341        }
 342        return 0;
 343}
 344arch_initcall(add_default_hugepagesz);
 345#endif
 346
 347#endif /* CONFIG_HUGETLB_SUPER_PAGES */
 348