linux/include/linux/hugetlb.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_HUGETLB_H
   3#define _LINUX_HUGETLB_H
   4
   5#include <linux/mm_types.h>
   6#include <linux/mmdebug.h>
   7#include <linux/fs.h>
   8#include <linux/hugetlb_inline.h>
   9#include <linux/cgroup.h>
  10#include <linux/list.h>
  11#include <linux/kref.h>
  12#include <asm/pgtable.h>
  13
  14struct ctl_table;
  15struct user_struct;
  16struct mmu_gather;
  17
  18#ifndef is_hugepd
  19typedef struct { unsigned long pd; } hugepd_t;
  20#define is_hugepd(hugepd) (0)
  21#define __hugepd(x) ((hugepd_t) { (x) })
  22#endif
  23
  24#ifdef CONFIG_HUGETLB_PAGE
  25
  26#include <linux/mempolicy.h>
  27#include <linux/shm.h>
  28#include <asm/tlbflush.h>
  29
  30struct hugepage_subpool {
  31        spinlock_t lock;
  32        long count;
  33        long max_hpages;        /* Maximum huge pages or -1 if no maximum. */
  34        long used_hpages;       /* Used count against maximum, includes */
  35                                /* both alloced and reserved pages. */
  36        struct hstate *hstate;
  37        long min_hpages;        /* Minimum huge pages or -1 if no minimum. */
  38        long rsv_hpages;        /* Pages reserved against global pool to */
  39                                /* sasitfy minimum size. */
  40};
  41
  42struct resv_map {
  43        struct kref refs;
  44        spinlock_t lock;
  45        struct list_head regions;
  46        long adds_in_progress;
  47        struct list_head region_cache;
  48        long region_cache_count;
  49};
  50extern struct resv_map *resv_map_alloc(void);
  51void resv_map_release(struct kref *ref);
  52
  53extern spinlock_t hugetlb_lock;
  54extern int hugetlb_max_hstate __read_mostly;
  55#define for_each_hstate(h) \
  56        for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
  57
  58struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
  59                                                long min_hpages);
  60void hugepage_put_subpool(struct hugepage_subpool *spool);
  61
  62void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
  63int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
  64int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
  65int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
  66
  67#ifdef CONFIG_NUMA
  68int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
  69                                        void __user *, size_t *, loff_t *);
  70#endif
  71
  72int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
  73long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
  74                         struct page **, struct vm_area_struct **,
  75                         unsigned long *, unsigned long *, long, unsigned int,
  76                         int *);
  77void unmap_hugepage_range(struct vm_area_struct *,
  78                          unsigned long, unsigned long, struct page *);
  79void __unmap_hugepage_range_final(struct mmu_gather *tlb,
  80                          struct vm_area_struct *vma,
  81                          unsigned long start, unsigned long end,
  82                          struct page *ref_page);
  83void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
  84                                unsigned long start, unsigned long end,
  85                                struct page *ref_page);
  86void hugetlb_report_meminfo(struct seq_file *);
  87int hugetlb_report_node_meminfo(int, char *);
  88void hugetlb_show_meminfo(void);
  89unsigned long hugetlb_total_pages(void);
  90vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  91                        unsigned long address, unsigned int flags);
  92int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
  93                                struct vm_area_struct *dst_vma,
  94                                unsigned long dst_addr,
  95                                unsigned long src_addr,
  96                                struct page **pagep);
  97int hugetlb_reserve_pages(struct inode *inode, long from, long to,
  98                                                struct vm_area_struct *vma,
  99                                                vm_flags_t vm_flags);
 100long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
 101                                                long freed);
 102bool isolate_huge_page(struct page *page, struct list_head *list);
 103void putback_active_hugepage(struct page *page);
 104void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
 105void free_huge_page(struct page *page);
 106void hugetlb_fix_reserve_counts(struct inode *inode);
 107extern struct mutex *hugetlb_fault_mutex_table;
 108u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
 109                                pgoff_t idx, unsigned long address);
 110
 111pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
 112
 113extern int sysctl_hugetlb_shm_group;
 114extern struct list_head huge_boot_pages;
 115
 116/* arch callbacks */
 117
 118pte_t *huge_pte_alloc(struct mm_struct *mm,
 119                        unsigned long addr, unsigned long sz);
 120pte_t *huge_pte_offset(struct mm_struct *mm,
 121                       unsigned long addr, unsigned long sz);
 122int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
 123void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
 124                                unsigned long *start, unsigned long *end);
 125struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
 126                              int write);
 127struct page *follow_huge_pd(struct vm_area_struct *vma,
 128                            unsigned long address, hugepd_t hpd,
 129                            int flags, int pdshift);
 130struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 131                                pmd_t *pmd, int flags);
 132struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
 133                                pud_t *pud, int flags);
 134struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
 135                             pgd_t *pgd, int flags);
 136
 137int pmd_huge(pmd_t pmd);
 138int pud_huge(pud_t pud);
 139unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
 140                unsigned long address, unsigned long end, pgprot_t newprot);
 141
 142bool is_hugetlb_entry_migration(pte_t pte);
 143
 144#else /* !CONFIG_HUGETLB_PAGE */
 145
 146static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
 147{
 148}
 149
 150static inline unsigned long hugetlb_total_pages(void)
 151{
 152        return 0;
 153}
 154
 155static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
 156                                        pte_t *ptep)
 157{
 158        return 0;
 159}
 160
 161static inline void adjust_range_if_pmd_sharing_possible(
 162                                struct vm_area_struct *vma,
 163                                unsigned long *start, unsigned long *end)
 164{
 165}
 166
 167#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
 168#define follow_huge_addr(mm, addr, write)       ERR_PTR(-EINVAL)
 169#define copy_hugetlb_page_range(src, dst, vma)  ({ BUG(); 0; })
 170static inline void hugetlb_report_meminfo(struct seq_file *m)
 171{
 172}
 173#define hugetlb_report_node_meminfo(n, buf)     0
 174static inline void hugetlb_show_meminfo(void)
 175{
 176}
 177#define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
 178#define follow_huge_pmd(mm, addr, pmd, flags)   NULL
 179#define follow_huge_pud(mm, addr, pud, flags)   NULL
 180#define follow_huge_pgd(mm, addr, pgd, flags)   NULL
 181#define prepare_hugepage_range(file, addr, len) (-EINVAL)
 182#define pmd_huge(x)     0
 183#define pud_huge(x)     0
 184#define is_hugepage_only_range(mm, addr, len)   0
 185#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
 186#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
 187                                src_addr, pagep)        ({ BUG(); 0; })
 188#define huge_pte_offset(mm, address, sz)        0
 189
 190static inline bool isolate_huge_page(struct page *page, struct list_head *list)
 191{
 192        return false;
 193}
 194#define putback_active_hugepage(p)      do {} while (0)
 195#define move_hugetlb_state(old, new, reason)    do {} while (0)
 196
 197static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
 198                unsigned long address, unsigned long end, pgprot_t newprot)
 199{
 200        return 0;
 201}
 202
 203static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
 204                        struct vm_area_struct *vma, unsigned long start,
 205                        unsigned long end, struct page *ref_page)
 206{
 207        BUG();
 208}
 209
 210static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
 211                        struct vm_area_struct *vma, unsigned long start,
 212                        unsigned long end, struct page *ref_page)
 213{
 214        BUG();
 215}
 216static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
 217                                struct vm_area_struct *vma, unsigned long address,
 218                                unsigned int flags)
 219{
 220        BUG();
 221        return 0;
 222}
 223
 224#endif /* !CONFIG_HUGETLB_PAGE */
 225/*
 226 * hugepages at page global directory. If arch support
 227 * hugepages at pgd level, they need to define this.
 228 */
 229#ifndef pgd_huge
 230#define pgd_huge(x)     0
 231#endif
 232#ifndef p4d_huge
 233#define p4d_huge(x)     0
 234#endif
 235
 236#ifndef pgd_write
 237static inline int pgd_write(pgd_t pgd)
 238{
 239        BUG();
 240        return 0;
 241}
 242#endif
 243
 244#define HUGETLB_ANON_FILE "anon_hugepage"
 245
 246enum {
 247        /*
 248         * The file will be used as an shm file so shmfs accounting rules
 249         * apply
 250         */
 251        HUGETLB_SHMFS_INODE     = 1,
 252        /*
 253         * The file is being created on the internal vfs mount and shmfs
 254         * accounting rules do not apply
 255         */
 256        HUGETLB_ANONHUGE_INODE  = 2,
 257};
 258
 259#ifdef CONFIG_HUGETLBFS
 260struct hugetlbfs_sb_info {
 261        long    max_inodes;   /* inodes allowed */
 262        long    free_inodes;  /* inodes free */
 263        spinlock_t      stat_lock;
 264        struct hstate *hstate;
 265        struct hugepage_subpool *spool;
 266        kuid_t  uid;
 267        kgid_t  gid;
 268        umode_t mode;
 269};
 270
 271static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
 272{
 273        return sb->s_fs_info;
 274}
 275
 276struct hugetlbfs_inode_info {
 277        struct shared_policy policy;
 278        struct inode vfs_inode;
 279        unsigned int seals;
 280};
 281
 282static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
 283{
 284        return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
 285}
 286
 287extern const struct file_operations hugetlbfs_file_operations;
 288extern const struct vm_operations_struct hugetlb_vm_ops;
 289struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
 290                                struct user_struct **user, int creat_flags,
 291                                int page_size_log);
 292
 293static inline bool is_file_hugepages(struct file *file)
 294{
 295        if (file->f_op == &hugetlbfs_file_operations)
 296                return true;
 297
 298        return is_file_shm_hugepages(file);
 299}
 300
 301
 302#else /* !CONFIG_HUGETLBFS */
 303
 304#define is_file_hugepages(file)                 false
 305static inline struct file *
 306hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
 307                struct user_struct **user, int creat_flags,
 308                int page_size_log)
 309{
 310        return ERR_PTR(-ENOSYS);
 311}
 312
 313#endif /* !CONFIG_HUGETLBFS */
 314
 315#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
 316unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 317                                        unsigned long len, unsigned long pgoff,
 318                                        unsigned long flags);
 319#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
 320
 321#ifdef CONFIG_HUGETLB_PAGE
 322
 323#define HSTATE_NAME_LEN 32
 324/* Defines one hugetlb page size */
 325struct hstate {
 326        int next_nid_to_alloc;
 327        int next_nid_to_free;
 328        unsigned int order;
 329        unsigned long mask;
 330        unsigned long max_huge_pages;
 331        unsigned long nr_huge_pages;
 332        unsigned long free_huge_pages;
 333        unsigned long resv_huge_pages;
 334        unsigned long surplus_huge_pages;
 335        unsigned long nr_overcommit_huge_pages;
 336        struct list_head hugepage_activelist;
 337        struct list_head hugepage_freelists[MAX_NUMNODES];
 338        unsigned int nr_huge_pages_node[MAX_NUMNODES];
 339        unsigned int free_huge_pages_node[MAX_NUMNODES];
 340        unsigned int surplus_huge_pages_node[MAX_NUMNODES];
 341#ifdef CONFIG_CGROUP_HUGETLB
 342        /* cgroup control files */
 343        struct cftype cgroup_files[5];
 344#endif
 345        char name[HSTATE_NAME_LEN];
 346};
 347
 348struct huge_bootmem_page {
 349        struct list_head list;
 350        struct hstate *hstate;
 351};
 352
 353struct page *alloc_huge_page(struct vm_area_struct *vma,
 354                                unsigned long addr, int avoid_reserve);
 355struct page *alloc_huge_page_node(struct hstate *h, int nid);
 356struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
 357                                nodemask_t *nmask);
 358struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
 359                                unsigned long address);
 360struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
 361                                     int nid, nodemask_t *nmask);
 362int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
 363                        pgoff_t idx);
 364
 365/* arch callback */
 366int __init __alloc_bootmem_huge_page(struct hstate *h);
 367int __init alloc_bootmem_huge_page(struct hstate *h);
 368
 369void __init hugetlb_bad_size(void);
 370void __init hugetlb_add_hstate(unsigned order);
 371struct hstate *size_to_hstate(unsigned long size);
 372
 373#ifndef HUGE_MAX_HSTATE
 374#define HUGE_MAX_HSTATE 1
 375#endif
 376
 377extern struct hstate hstates[HUGE_MAX_HSTATE];
 378extern unsigned int default_hstate_idx;
 379
 380#define default_hstate (hstates[default_hstate_idx])
 381
 382static inline struct hstate *hstate_inode(struct inode *i)
 383{
 384        return HUGETLBFS_SB(i->i_sb)->hstate;
 385}
 386
 387static inline struct hstate *hstate_file(struct file *f)
 388{
 389        return hstate_inode(file_inode(f));
 390}
 391
 392static inline struct hstate *hstate_sizelog(int page_size_log)
 393{
 394        if (!page_size_log)
 395                return &default_hstate;
 396
 397        return size_to_hstate(1UL << page_size_log);
 398}
 399
 400static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
 401{
 402        return hstate_file(vma->vm_file);
 403}
 404
 405static inline unsigned long huge_page_size(struct hstate *h)
 406{
 407        return (unsigned long)PAGE_SIZE << h->order;
 408}
 409
 410extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
 411
 412extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
 413
 414static inline unsigned long huge_page_mask(struct hstate *h)
 415{
 416        return h->mask;
 417}
 418
 419static inline unsigned int huge_page_order(struct hstate *h)
 420{
 421        return h->order;
 422}
 423
 424static inline unsigned huge_page_shift(struct hstate *h)
 425{
 426        return h->order + PAGE_SHIFT;
 427}
 428
 429static inline bool hstate_is_gigantic(struct hstate *h)
 430{
 431        return huge_page_order(h) >= MAX_ORDER;
 432}
 433
 434static inline unsigned int pages_per_huge_page(struct hstate *h)
 435{
 436        return 1 << h->order;
 437}
 438
 439static inline unsigned int blocks_per_huge_page(struct hstate *h)
 440{
 441        return huge_page_size(h) / 512;
 442}
 443
 444#include <asm/hugetlb.h>
 445
 446#ifndef arch_make_huge_pte
 447static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
 448                                       struct page *page, int writable)
 449{
 450        return entry;
 451}
 452#endif
 453
 454static inline struct hstate *page_hstate(struct page *page)
 455{
 456        VM_BUG_ON_PAGE(!PageHuge(page), page);
 457        return size_to_hstate(PAGE_SIZE << compound_order(page));
 458}
 459
 460static inline unsigned hstate_index_to_shift(unsigned index)
 461{
 462        return hstates[index].order + PAGE_SHIFT;
 463}
 464
 465static inline int hstate_index(struct hstate *h)
 466{
 467        return h - hstates;
 468}
 469
 470pgoff_t __basepage_index(struct page *page);
 471
 472/* Return page->index in PAGE_SIZE units */
 473static inline pgoff_t basepage_index(struct page *page)
 474{
 475        if (!PageCompound(page))
 476                return page->index;
 477
 478        return __basepage_index(page);
 479}
 480
 481extern int dissolve_free_huge_page(struct page *page);
 482extern int dissolve_free_huge_pages(unsigned long start_pfn,
 483                                    unsigned long end_pfn);
 484
 485#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
 486#ifndef arch_hugetlb_migration_supported
 487static inline bool arch_hugetlb_migration_supported(struct hstate *h)
 488{
 489        if ((huge_page_shift(h) == PMD_SHIFT) ||
 490                (huge_page_shift(h) == PUD_SHIFT) ||
 491                        (huge_page_shift(h) == PGDIR_SHIFT))
 492                return true;
 493        else
 494                return false;
 495}
 496#endif
 497#else
 498static inline bool arch_hugetlb_migration_supported(struct hstate *h)
 499{
 500        return false;
 501}
 502#endif
 503
 504static inline bool hugepage_migration_supported(struct hstate *h)
 505{
 506        return arch_hugetlb_migration_supported(h);
 507}
 508
 509/*
 510 * Movability check is different as compared to migration check.
 511 * It determines whether or not a huge page should be placed on
 512 * movable zone or not. Movability of any huge page should be
 513 * required only if huge page size is supported for migration.
 514 * There wont be any reason for the huge page to be movable if
 515 * it is not migratable to start with. Also the size of the huge
 516 * page should be large enough to be placed under a movable zone
 517 * and still feasible enough to be migratable. Just the presence
 518 * in movable zone does not make the migration feasible.
 519 *
 520 * So even though large huge page sizes like the gigantic ones
 521 * are migratable they should not be movable because its not
 522 * feasible to migrate them from movable zone.
 523 */
 524static inline bool hugepage_movable_supported(struct hstate *h)
 525{
 526        if (!hugepage_migration_supported(h))
 527                return false;
 528
 529        if (hstate_is_gigantic(h))
 530                return false;
 531        return true;
 532}
 533
 534static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
 535                                           struct mm_struct *mm, pte_t *pte)
 536{
 537        if (huge_page_size(h) == PMD_SIZE)
 538                return pmd_lockptr(mm, (pmd_t *) pte);
 539        VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
 540        return &mm->page_table_lock;
 541}
 542
 543#ifndef hugepages_supported
 544/*
 545 * Some platform decide whether they support huge pages at boot
 546 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
 547 * when there is no such support
 548 */
 549#define hugepages_supported() (HPAGE_SHIFT != 0)
 550#endif
 551
 552void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
 553
 554static inline void hugetlb_count_add(long l, struct mm_struct *mm)
 555{
 556        atomic_long_add(l, &mm->hugetlb_usage);
 557}
 558
 559static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
 560{
 561        atomic_long_sub(l, &mm->hugetlb_usage);
 562}
 563
 564#ifndef set_huge_swap_pte_at
 565static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
 566                                        pte_t *ptep, pte_t pte, unsigned long sz)
 567{
 568        set_huge_pte_at(mm, addr, ptep, pte);
 569}
 570#endif
 571
 572#ifndef huge_ptep_modify_prot_start
 573#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
 574static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
 575                                                unsigned long addr, pte_t *ptep)
 576{
 577        return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
 578}
 579#endif
 580
 581#ifndef huge_ptep_modify_prot_commit
 582#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
 583static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
 584                                                unsigned long addr, pte_t *ptep,
 585                                                pte_t old_pte, pte_t pte)
 586{
 587        set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
 588}
 589#endif
 590
 591#else   /* CONFIG_HUGETLB_PAGE */
 592struct hstate {};
 593
 594static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
 595                                           unsigned long addr,
 596                                           int avoid_reserve)
 597{
 598        return NULL;
 599}
 600
 601static inline struct page *alloc_huge_page_node(struct hstate *h, int nid)
 602{
 603        return NULL;
 604}
 605
 606static inline struct page *
 607alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask)
 608{
 609        return NULL;
 610}
 611
 612static inline struct page *alloc_huge_page_vma(struct hstate *h,
 613                                               struct vm_area_struct *vma,
 614                                               unsigned long address)
 615{
 616        return NULL;
 617}
 618
 619static inline int __alloc_bootmem_huge_page(struct hstate *h)
 620{
 621        return 0;
 622}
 623
 624static inline struct hstate *hstate_file(struct file *f)
 625{
 626        return NULL;
 627}
 628
 629static inline struct hstate *hstate_sizelog(int page_size_log)
 630{
 631        return NULL;
 632}
 633
 634static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
 635{
 636        return NULL;
 637}
 638
 639static inline struct hstate *hstate_inode(struct inode *i)
 640{
 641        return NULL;
 642}
 643
 644static inline struct hstate *page_hstate(struct page *page)
 645{
 646        return NULL;
 647}
 648
 649static inline unsigned long huge_page_size(struct hstate *h)
 650{
 651        return PAGE_SIZE;
 652}
 653
 654static inline unsigned long huge_page_mask(struct hstate *h)
 655{
 656        return PAGE_MASK;
 657}
 658
 659static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
 660{
 661        return PAGE_SIZE;
 662}
 663
 664static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
 665{
 666        return PAGE_SIZE;
 667}
 668
 669static inline unsigned int huge_page_order(struct hstate *h)
 670{
 671        return 0;
 672}
 673
 674static inline unsigned int huge_page_shift(struct hstate *h)
 675{
 676        return PAGE_SHIFT;
 677}
 678
 679static inline bool hstate_is_gigantic(struct hstate *h)
 680{
 681        return false;
 682}
 683
 684static inline unsigned int pages_per_huge_page(struct hstate *h)
 685{
 686        return 1;
 687}
 688
 689static inline unsigned hstate_index_to_shift(unsigned index)
 690{
 691        return 0;
 692}
 693
 694static inline int hstate_index(struct hstate *h)
 695{
 696        return 0;
 697}
 698
 699static inline pgoff_t basepage_index(struct page *page)
 700{
 701        return page->index;
 702}
 703
 704static inline int dissolve_free_huge_page(struct page *page)
 705{
 706        return 0;
 707}
 708
 709static inline int dissolve_free_huge_pages(unsigned long start_pfn,
 710                                           unsigned long end_pfn)
 711{
 712        return 0;
 713}
 714
 715static inline bool hugepage_migration_supported(struct hstate *h)
 716{
 717        return false;
 718}
 719
 720static inline bool hugepage_movable_supported(struct hstate *h)
 721{
 722        return false;
 723}
 724
 725static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
 726                                           struct mm_struct *mm, pte_t *pte)
 727{
 728        return &mm->page_table_lock;
 729}
 730
 731static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
 732{
 733}
 734
 735static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
 736{
 737}
 738
 739static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
 740                                        pte_t *ptep, pte_t pte, unsigned long sz)
 741{
 742}
 743#endif  /* CONFIG_HUGETLB_PAGE */
 744
 745static inline spinlock_t *huge_pte_lock(struct hstate *h,
 746                                        struct mm_struct *mm, pte_t *pte)
 747{
 748        spinlock_t *ptl;
 749
 750        ptl = huge_pte_lockptr(h, mm, pte);
 751        spin_lock(ptl);
 752        return ptl;
 753}
 754
 755#endif /* _LINUX_HUGETLB_H */
 756