linux/include/linux/hugetlb.h
<<
>>
Prefs
   1#ifndef _LINUX_HUGETLB_H
   2#define _LINUX_HUGETLB_H
   3
   4#include <linux/mm_types.h>
   5#include <linux/mmdebug.h>
   6#include <linux/fs.h>
   7#include <linux/hugetlb_inline.h>
   8#include <linux/cgroup.h>
   9#include <linux/list.h>
  10#include <linux/kref.h>
  11
  12struct ctl_table;
  13struct user_struct;
  14struct mmu_gather;
  15
  16#ifdef CONFIG_HUGETLB_PAGE
  17
  18#include <linux/mempolicy.h>
  19#include <linux/shm.h>
  20#include <asm/tlbflush.h>
  21
  22struct hugepage_subpool {
  23        spinlock_t lock;
  24        long count;
  25        long max_hpages;        /* Maximum huge pages or -1 if no maximum. */
  26        long used_hpages;       /* Used count against maximum, includes */
  27                                /* both alloced and reserved pages. */
  28        struct hstate *hstate;
  29        long min_hpages;        /* Minimum huge pages or -1 if no minimum. */
  30        long rsv_hpages;        /* Pages reserved against global pool to */
  31                                /* sasitfy minimum size. */
  32};
  33
  34struct resv_map {
  35        struct kref refs;
  36        spinlock_t lock;
  37        struct list_head regions;
  38};
  39extern struct resv_map *resv_map_alloc(void);
  40void resv_map_release(struct kref *ref);
  41
  42extern spinlock_t hugetlb_lock;
  43extern int hugetlb_max_hstate __read_mostly;
  44#define for_each_hstate(h) \
  45        for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
  46
  47struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
  48                                                long min_hpages);
  49void hugepage_put_subpool(struct hugepage_subpool *spool);
  50
  51void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
  52int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
  53int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
  54int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
  55
  56#ifdef CONFIG_NUMA
  57int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
  58                                        void __user *, size_t *, loff_t *);
  59#endif
  60
  61int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
  62long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
  63                         struct page **, struct vm_area_struct **,
  64                         unsigned long *, unsigned long *, long, unsigned int);
  65void unmap_hugepage_range(struct vm_area_struct *,
  66                          unsigned long, unsigned long, struct page *);
  67void __unmap_hugepage_range_final(struct mmu_gather *tlb,
  68                          struct vm_area_struct *vma,
  69                          unsigned long start, unsigned long end,
  70                          struct page *ref_page);
  71void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
  72                                unsigned long start, unsigned long end,
  73                                struct page *ref_page);
  74void hugetlb_report_meminfo(struct seq_file *);
  75int hugetlb_report_node_meminfo(int, char *);
  76void hugetlb_show_meminfo(void);
  77unsigned long hugetlb_total_pages(void);
  78int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  79                        unsigned long address, unsigned int flags);
  80int hugetlb_reserve_pages(struct inode *inode, long from, long to,
  81                                                struct vm_area_struct *vma,
  82                                                vm_flags_t vm_flags);
  83void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
  84int dequeue_hwpoisoned_huge_page(struct page *page);
  85bool isolate_huge_page(struct page *page, struct list_head *list);
  86void putback_active_hugepage(struct page *page);
  87void free_huge_page(struct page *page);
  88
  89#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
  90pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
  91#endif
  92
  93extern int hugepages_treat_as_movable;
  94extern int sysctl_hugetlb_shm_group;
  95extern struct list_head huge_boot_pages;
  96
  97/* arch callbacks */
  98
  99pte_t *huge_pte_alloc(struct mm_struct *mm,
 100                        unsigned long addr, unsigned long sz);
 101pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
 102int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
 103struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
 104                              int write);
 105struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 106                                pmd_t *pmd, int flags);
 107struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
 108                                pud_t *pud, int flags);
 109int pmd_huge(pmd_t pmd);
 110int pud_huge(pud_t pmd);
 111unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
 112                unsigned long address, unsigned long end, pgprot_t newprot);
 113
 114#else /* !CONFIG_HUGETLB_PAGE */
 115
 116static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
 117{
 118}
 119
 120static inline unsigned long hugetlb_total_pages(void)
 121{
 122        return 0;
 123}
 124
 125#define follow_hugetlb_page(m,v,p,vs,a,b,i,w)   ({ BUG(); 0; })
 126#define follow_huge_addr(mm, addr, write)       ERR_PTR(-EINVAL)
 127#define copy_hugetlb_page_range(src, dst, vma)  ({ BUG(); 0; })
 128static inline void hugetlb_report_meminfo(struct seq_file *m)
 129{
 130}
 131#define hugetlb_report_node_meminfo(n, buf)     0
 132static inline void hugetlb_show_meminfo(void)
 133{
 134}
 135#define follow_huge_pmd(mm, addr, pmd, flags)   NULL
 136#define follow_huge_pud(mm, addr, pud, flags)   NULL
 137#define prepare_hugepage_range(file, addr, len) (-EINVAL)
 138#define pmd_huge(x)     0
 139#define pud_huge(x)     0
 140#define is_hugepage_only_range(mm, addr, len)   0
 141#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
 142#define hugetlb_fault(mm, vma, addr, flags)     ({ BUG(); 0; })
 143#define huge_pte_offset(mm, address)    0
 144static inline int dequeue_hwpoisoned_huge_page(struct page *page)
 145{
 146        return 0;
 147}
 148
 149static inline bool isolate_huge_page(struct page *page, struct list_head *list)
 150{
 151        return false;
 152}
 153#define putback_active_hugepage(p)      do {} while (0)
 154
 155static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
 156                unsigned long address, unsigned long end, pgprot_t newprot)
 157{
 158        return 0;
 159}
 160
 161static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
 162                        struct vm_area_struct *vma, unsigned long start,
 163                        unsigned long end, struct page *ref_page)
 164{
 165        BUG();
 166}
 167
 168static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
 169                        struct vm_area_struct *vma, unsigned long start,
 170                        unsigned long end, struct page *ref_page)
 171{
 172        BUG();
 173}
 174
 175#endif /* !CONFIG_HUGETLB_PAGE */
 176/*
 177 * hugepages at page global directory. If arch support
 178 * hugepages at pgd level, they need to define this.
 179 */
 180#ifndef pgd_huge
 181#define pgd_huge(x)     0
 182#endif
 183
 184#ifndef pgd_write
 185static inline int pgd_write(pgd_t pgd)
 186{
 187        BUG();
 188        return 0;
 189}
 190#endif
 191
 192#ifndef pud_write
 193static inline int pud_write(pud_t pud)
 194{
 195        BUG();
 196        return 0;
 197}
 198#endif
 199
 200#ifndef is_hugepd
 201/*
 202 * Some architectures requires a hugepage directory format that is
 203 * required to support multiple hugepage sizes. For example
 204 * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
 205 * introduced the same on powerpc. This allows for a more flexible hugepage
 206 * pagetable layout.
 207 */
 208typedef struct { unsigned long pd; } hugepd_t;
 209#define is_hugepd(hugepd) (0)
 210#define __hugepd(x) ((hugepd_t) { (x) })
 211static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
 212                              unsigned pdshift, unsigned long end,
 213                              int write, struct page **pages, int *nr)
 214{
 215        return 0;
 216}
 217#else
 218extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
 219                       unsigned pdshift, unsigned long end,
 220                       int write, struct page **pages, int *nr);
 221#endif
 222
 223#define HUGETLB_ANON_FILE "anon_hugepage"
 224
 225enum {
 226        /*
 227         * The file will be used as an shm file so shmfs accounting rules
 228         * apply
 229         */
 230        HUGETLB_SHMFS_INODE     = 1,
 231        /*
 232         * The file is being created on the internal vfs mount and shmfs
 233         * accounting rules do not apply
 234         */
 235        HUGETLB_ANONHUGE_INODE  = 2,
 236};
 237
 238#ifdef CONFIG_HUGETLBFS
 239struct hugetlbfs_sb_info {
 240        long    max_inodes;   /* inodes allowed */
 241        long    free_inodes;  /* inodes free */
 242        spinlock_t      stat_lock;
 243        struct hstate *hstate;
 244        struct hugepage_subpool *spool;
 245};
 246
 247static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
 248{
 249        return sb->s_fs_info;
 250}
 251
 252extern const struct file_operations hugetlbfs_file_operations;
 253extern const struct vm_operations_struct hugetlb_vm_ops;
 254struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
 255                                struct user_struct **user, int creat_flags,
 256                                int page_size_log);
 257
 258static inline int is_file_hugepages(struct file *file)
 259{
 260        if (file->f_op == &hugetlbfs_file_operations)
 261                return 1;
 262        if (is_file_shm_hugepages(file))
 263                return 1;
 264
 265        return 0;
 266}
 267
 268
 269#else /* !CONFIG_HUGETLBFS */
 270
 271#define is_file_hugepages(file)                 0
 272static inline struct file *
 273hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
 274                struct user_struct **user, int creat_flags,
 275                int page_size_log)
 276{
 277        return ERR_PTR(-ENOSYS);
 278}
 279
 280#endif /* !CONFIG_HUGETLBFS */
 281
 282#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
 283unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 284                                        unsigned long len, unsigned long pgoff,
 285                                        unsigned long flags);
 286#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
 287
 288#ifdef CONFIG_HUGETLB_PAGE
 289
 290#define HSTATE_NAME_LEN 32
 291/* Defines one hugetlb page size */
 292struct hstate {
 293        int next_nid_to_alloc;
 294        int next_nid_to_free;
 295        unsigned int order;
 296        unsigned long mask;
 297        unsigned long max_huge_pages;
 298        unsigned long nr_huge_pages;
 299        unsigned long free_huge_pages;
 300        unsigned long resv_huge_pages;
 301        unsigned long surplus_huge_pages;
 302        unsigned long nr_overcommit_huge_pages;
 303        struct list_head hugepage_activelist;
 304        struct list_head hugepage_freelists[MAX_NUMNODES];
 305        unsigned int nr_huge_pages_node[MAX_NUMNODES];
 306        unsigned int free_huge_pages_node[MAX_NUMNODES];
 307        unsigned int surplus_huge_pages_node[MAX_NUMNODES];
 308#ifdef CONFIG_CGROUP_HUGETLB
 309        /* cgroup control files */
 310        struct cftype cgroup_files[5];
 311#endif
 312        char name[HSTATE_NAME_LEN];
 313};
 314
 315struct huge_bootmem_page {
 316        struct list_head list;
 317        struct hstate *hstate;
 318#ifdef CONFIG_HIGHMEM
 319        phys_addr_t phys;
 320#endif
 321};
 322
 323struct page *alloc_huge_page_node(struct hstate *h, int nid);
 324struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
 325                                unsigned long addr, int avoid_reserve);
 326
 327/* arch callback */
 328int __init alloc_bootmem_huge_page(struct hstate *h);
 329
 330void __init hugetlb_add_hstate(unsigned order);
 331struct hstate *size_to_hstate(unsigned long size);
 332
 333#ifndef HUGE_MAX_HSTATE
 334#define HUGE_MAX_HSTATE 1
 335#endif
 336
 337extern struct hstate hstates[HUGE_MAX_HSTATE];
 338extern unsigned int default_hstate_idx;
 339
 340#define default_hstate (hstates[default_hstate_idx])
 341
 342static inline struct hstate *hstate_inode(struct inode *i)
 343{
 344        struct hugetlbfs_sb_info *hsb;
 345        hsb = HUGETLBFS_SB(i->i_sb);
 346        return hsb->hstate;
 347}
 348
 349static inline struct hstate *hstate_file(struct file *f)
 350{
 351        return hstate_inode(file_inode(f));
 352}
 353
 354static inline struct hstate *hstate_sizelog(int page_size_log)
 355{
 356        if (!page_size_log)
 357                return &default_hstate;
 358
 359        return size_to_hstate(1UL << page_size_log);
 360}
 361
 362static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
 363{
 364        return hstate_file(vma->vm_file);
 365}
 366
 367static inline unsigned long huge_page_size(struct hstate *h)
 368{
 369        return (unsigned long)PAGE_SIZE << h->order;
 370}
 371
 372extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
 373
 374extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
 375
 376static inline unsigned long huge_page_mask(struct hstate *h)
 377{
 378        return h->mask;
 379}
 380
 381static inline unsigned int huge_page_order(struct hstate *h)
 382{
 383        return h->order;
 384}
 385
 386static inline unsigned huge_page_shift(struct hstate *h)
 387{
 388        return h->order + PAGE_SHIFT;
 389}
 390
 391static inline bool hstate_is_gigantic(struct hstate *h)
 392{
 393        return huge_page_order(h) >= MAX_ORDER;
 394}
 395
 396static inline unsigned int pages_per_huge_page(struct hstate *h)
 397{
 398        return 1 << h->order;
 399}
 400
 401static inline unsigned int blocks_per_huge_page(struct hstate *h)
 402{
 403        return huge_page_size(h) / 512;
 404}
 405
 406#include <asm/hugetlb.h>
 407
 408#ifndef arch_make_huge_pte
 409static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
 410                                       struct page *page, int writable)
 411{
 412        return entry;
 413}
 414#endif
 415
 416static inline struct hstate *page_hstate(struct page *page)
 417{
 418        VM_BUG_ON_PAGE(!PageHuge(page), page);
 419        return size_to_hstate(PAGE_SIZE << compound_order(page));
 420}
 421
 422static inline unsigned hstate_index_to_shift(unsigned index)
 423{
 424        return hstates[index].order + PAGE_SHIFT;
 425}
 426
 427static inline int hstate_index(struct hstate *h)
 428{
 429        return h - hstates;
 430}
 431
 432pgoff_t __basepage_index(struct page *page);
 433
 434/* Return page->index in PAGE_SIZE units */
 435static inline pgoff_t basepage_index(struct page *page)
 436{
 437        if (!PageCompound(page))
 438                return page->index;
 439
 440        return __basepage_index(page);
 441}
 442
 443extern void dissolve_free_huge_pages(unsigned long start_pfn,
 444                                     unsigned long end_pfn);
 445static inline int hugepage_migration_supported(struct hstate *h)
 446{
 447#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
 448        return huge_page_shift(h) == PMD_SHIFT;
 449#else
 450        return 0;
 451#endif
 452}
 453
 454static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
 455                                           struct mm_struct *mm, pte_t *pte)
 456{
 457        if (huge_page_size(h) == PMD_SIZE)
 458                return pmd_lockptr(mm, (pmd_t *) pte);
 459        VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
 460        return &mm->page_table_lock;
 461}
 462
 463static inline bool hugepages_supported(void)
 464{
 465        /*
 466         * Some platform decide whether they support huge pages at boot
 467         * time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
 468         * there is no such support
 469         */
 470        return HPAGE_SHIFT != 0;
 471}
 472
 473#else   /* CONFIG_HUGETLB_PAGE */
 474struct hstate {};
 475#define alloc_huge_page_node(h, nid) NULL
 476#define alloc_huge_page_noerr(v, a, r) NULL
 477#define alloc_bootmem_huge_page(h) NULL
 478#define hstate_file(f) NULL
 479#define hstate_sizelog(s) NULL
 480#define hstate_vma(v) NULL
 481#define hstate_inode(i) NULL
 482#define page_hstate(page) NULL
 483#define huge_page_size(h) PAGE_SIZE
 484#define huge_page_mask(h) PAGE_MASK
 485#define vma_kernel_pagesize(v) PAGE_SIZE
 486#define vma_mmu_pagesize(v) PAGE_SIZE
 487#define huge_page_order(h) 0
 488#define huge_page_shift(h) PAGE_SHIFT
 489static inline unsigned int pages_per_huge_page(struct hstate *h)
 490{
 491        return 1;
 492}
 493#define hstate_index_to_shift(index) 0
 494#define hstate_index(h) 0
 495
 496static inline pgoff_t basepage_index(struct page *page)
 497{
 498        return page->index;
 499}
 500#define dissolve_free_huge_pages(s, e)  do {} while (0)
 501#define hugepage_migration_supported(h) 0
 502
 503static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
 504                                           struct mm_struct *mm, pte_t *pte)
 505{
 506        return &mm->page_table_lock;
 507}
 508#endif  /* CONFIG_HUGETLB_PAGE */
 509
 510static inline spinlock_t *huge_pte_lock(struct hstate *h,
 511                                        struct mm_struct *mm, pte_t *pte)
 512{
 513        spinlock_t *ptl;
 514
 515        ptl = huge_pte_lockptr(h, mm, pte);
 516        spin_lock(ptl);
 517        return ptl;
 518}
 519
 520#endif /* _LINUX_HUGETLB_H */
 521