linux/include/linux/hugetlb.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_HUGETLB_H
   3#define _LINUX_HUGETLB_H
   4
   5#include <linux/mm_types.h>
   6#include <linux/mmdebug.h>
   7#include <linux/fs.h>
   8#include <linux/hugetlb_inline.h>
   9#include <linux/cgroup.h>
  10#include <linux/list.h>
  11#include <linux/kref.h>
  12#include <asm/pgtable.h>
  13
  14struct ctl_table;
  15struct user_struct;
  16struct mmu_gather;
  17
  18#ifndef is_hugepd
  19typedef struct { unsigned long pd; } hugepd_t;
  20#define is_hugepd(hugepd) (0)
  21#define __hugepd(x) ((hugepd_t) { (x) })
  22#endif
  23
  24#ifdef CONFIG_HUGETLB_PAGE
  25
  26#include <linux/mempolicy.h>
  27#include <linux/shm.h>
  28#include <asm/tlbflush.h>
  29
  30struct hugepage_subpool {
  31        spinlock_t lock;
  32        long count;
  33        long max_hpages;        /* Maximum huge pages or -1 if no maximum. */
  34        long used_hpages;       /* Used count against maximum, includes */
  35                                /* both alloced and reserved pages. */
  36        struct hstate *hstate;
  37        long min_hpages;        /* Minimum huge pages or -1 if no minimum. */
  38        long rsv_hpages;        /* Pages reserved against global pool to */
  39                                /* sasitfy minimum size. */
  40};
  41
  42struct resv_map {
  43        struct kref refs;
  44        spinlock_t lock;
  45        struct list_head regions;
  46        long adds_in_progress;
  47        struct list_head region_cache;
  48        long region_cache_count;
  49};
  50extern struct resv_map *resv_map_alloc(void);
  51void resv_map_release(struct kref *ref);
  52
  53extern spinlock_t hugetlb_lock;
  54extern int hugetlb_max_hstate __read_mostly;
  55#define for_each_hstate(h) \
  56        for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
  57
  58struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
  59                                                long min_hpages);
  60void hugepage_put_subpool(struct hugepage_subpool *spool);
  61
  62void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
  63int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
  64int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
  65int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
  66
  67#ifdef CONFIG_NUMA
  68int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
  69                                        void __user *, size_t *, loff_t *);
  70#endif
  71
  72int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
  73long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
  74                         struct page **, struct vm_area_struct **,
  75                         unsigned long *, unsigned long *, long, unsigned int,
  76                         int *);
  77void unmap_hugepage_range(struct vm_area_struct *,
  78                          unsigned long, unsigned long, struct page *);
  79void __unmap_hugepage_range_final(struct mmu_gather *tlb,
  80                          struct vm_area_struct *vma,
  81                          unsigned long start, unsigned long end,
  82                          struct page *ref_page);
  83void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
  84                                unsigned long start, unsigned long end,
  85                                struct page *ref_page);
  86void hugetlb_report_meminfo(struct seq_file *);
  87int hugetlb_report_node_meminfo(int, char *);
  88void hugetlb_show_meminfo(void);
  89unsigned long hugetlb_total_pages(void);
  90vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  91                        unsigned long address, unsigned int flags);
  92int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
  93                                struct vm_area_struct *dst_vma,
  94                                unsigned long dst_addr,
  95                                unsigned long src_addr,
  96                                struct page **pagep);
  97int hugetlb_reserve_pages(struct inode *inode, long from, long to,
  98                                                struct vm_area_struct *vma,
  99                                                vm_flags_t vm_flags);
 100long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
 101                                                long freed);
 102bool isolate_huge_page(struct page *page, struct list_head *list);
 103void putback_active_hugepage(struct page *page);
 104void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
 105void free_huge_page(struct page *page);
 106void hugetlb_fix_reserve_counts(struct inode *inode);
 107extern struct mutex *hugetlb_fault_mutex_table;
 108u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
 109
 110pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
 111
 112extern int sysctl_hugetlb_shm_group;
 113extern struct list_head huge_boot_pages;
 114
 115/* arch callbacks */
 116
 117pte_t *huge_pte_alloc(struct mm_struct *mm,
 118                        unsigned long addr, unsigned long sz);
 119pte_t *huge_pte_offset(struct mm_struct *mm,
 120                       unsigned long addr, unsigned long sz);
 121int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
 122void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
 123                                unsigned long *start, unsigned long *end);
 124struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
 125                              int write);
 126struct page *follow_huge_pd(struct vm_area_struct *vma,
 127                            unsigned long address, hugepd_t hpd,
 128                            int flags, int pdshift);
 129struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 130                                pmd_t *pmd, int flags);
 131struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
 132                                pud_t *pud, int flags);
 133struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
 134                             pgd_t *pgd, int flags);
 135
 136int pmd_huge(pmd_t pmd);
 137int pud_huge(pud_t pud);
 138unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
 139                unsigned long address, unsigned long end, pgprot_t newprot);
 140
 141bool is_hugetlb_entry_migration(pte_t pte);
 142
 143#else /* !CONFIG_HUGETLB_PAGE */
 144
 145static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
 146{
 147}
 148
 149static inline unsigned long hugetlb_total_pages(void)
 150{
 151        return 0;
 152}
 153
 154static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
 155                                        pte_t *ptep)
 156{
 157        return 0;
 158}
 159
 160static inline void adjust_range_if_pmd_sharing_possible(
 161                                struct vm_area_struct *vma,
 162                                unsigned long *start, unsigned long *end)
 163{
 164}
 165
 166static inline long follow_hugetlb_page(struct mm_struct *mm,
 167                        struct vm_area_struct *vma, struct page **pages,
 168                        struct vm_area_struct **vmas, unsigned long *position,
 169                        unsigned long *nr_pages, long i, unsigned int flags,
 170                        int *nonblocking)
 171{
 172        BUG();
 173        return 0;
 174}
 175
 176static inline struct page *follow_huge_addr(struct mm_struct *mm,
 177                                        unsigned long address, int write)
 178{
 179        return ERR_PTR(-EINVAL);
 180}
 181
 182static inline int copy_hugetlb_page_range(struct mm_struct *dst,
 183                        struct mm_struct *src, struct vm_area_struct *vma)
 184{
 185        BUG();
 186        return 0;
 187}
 188
 189static inline void hugetlb_report_meminfo(struct seq_file *m)
 190{
 191}
 192
 193static inline int hugetlb_report_node_meminfo(int nid, char *buf)
 194{
 195        return 0;
 196}
 197
 198static inline void hugetlb_show_meminfo(void)
 199{
 200}
 201
 202static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
 203                                unsigned long address, hugepd_t hpd, int flags,
 204                                int pdshift)
 205{
 206        return NULL;
 207}
 208
 209static inline struct page *follow_huge_pmd(struct mm_struct *mm,
 210                                unsigned long address, pmd_t *pmd, int flags)
 211{
 212        return NULL;
 213}
 214
 215static inline struct page *follow_huge_pud(struct mm_struct *mm,
 216                                unsigned long address, pud_t *pud, int flags)
 217{
 218        return NULL;
 219}
 220
 221static inline struct page *follow_huge_pgd(struct mm_struct *mm,
 222                                unsigned long address, pgd_t *pgd, int flags)
 223{
 224        return NULL;
 225}
 226
 227static inline int prepare_hugepage_range(struct file *file,
 228                                unsigned long addr, unsigned long len)
 229{
 230        return -EINVAL;
 231}
 232
 233static inline int pmd_huge(pmd_t pmd)
 234{
 235        return 0;
 236}
 237
 238static inline int pud_huge(pud_t pud)
 239{
 240        return 0;
 241}
 242
 243static inline int is_hugepage_only_range(struct mm_struct *mm,
 244                                        unsigned long addr, unsigned long len)
 245{
 246        return 0;
 247}
 248
 249static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
 250                                unsigned long addr, unsigned long end,
 251                                unsigned long floor, unsigned long ceiling)
 252{
 253        BUG();
 254}
 255
 256static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
 257                                                pte_t *dst_pte,
 258                                                struct vm_area_struct *dst_vma,
 259                                                unsigned long dst_addr,
 260                                                unsigned long src_addr,
 261                                                struct page **pagep)
 262{
 263        BUG();
 264        return 0;
 265}
 266
 267static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
 268                                        unsigned long sz)
 269{
 270        return NULL;
 271}
 272
 273static inline bool isolate_huge_page(struct page *page, struct list_head *list)
 274{
 275        return false;
 276}
 277
 278static inline void putback_active_hugepage(struct page *page)
 279{
 280}
 281
 282static inline void move_hugetlb_state(struct page *oldpage,
 283                                        struct page *newpage, int reason)
 284{
 285}
 286
 287static inline unsigned long hugetlb_change_protection(
 288                        struct vm_area_struct *vma, unsigned long address,
 289                        unsigned long end, pgprot_t newprot)
 290{
 291        return 0;
 292}
 293
 294static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
 295                        struct vm_area_struct *vma, unsigned long start,
 296                        unsigned long end, struct page *ref_page)
 297{
 298        BUG();
 299}
 300
 301static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
 302                        struct vm_area_struct *vma, unsigned long start,
 303                        unsigned long end, struct page *ref_page)
 304{
 305        BUG();
 306}
 307
 308static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
 309                        struct vm_area_struct *vma, unsigned long address,
 310                        unsigned int flags)
 311{
 312        BUG();
 313        return 0;
 314}
 315
 316#endif /* !CONFIG_HUGETLB_PAGE */
 317/*
 318 * hugepages at page global directory. If arch support
 319 * hugepages at pgd level, they need to define this.
 320 */
 321#ifndef pgd_huge
 322#define pgd_huge(x)     0
 323#endif
 324#ifndef p4d_huge
 325#define p4d_huge(x)     0
 326#endif
 327
 328#ifndef pgd_write
 329static inline int pgd_write(pgd_t pgd)
 330{
 331        BUG();
 332        return 0;
 333}
 334#endif
 335
 336#define HUGETLB_ANON_FILE "anon_hugepage"
 337
 338enum {
 339        /*
 340         * The file will be used as an shm file so shmfs accounting rules
 341         * apply
 342         */
 343        HUGETLB_SHMFS_INODE     = 1,
 344        /*
 345         * The file is being created on the internal vfs mount and shmfs
 346         * accounting rules do not apply
 347         */
 348        HUGETLB_ANONHUGE_INODE  = 2,
 349};
 350
 351#ifdef CONFIG_HUGETLBFS
 352struct hugetlbfs_sb_info {
 353        long    max_inodes;   /* inodes allowed */
 354        long    free_inodes;  /* inodes free */
 355        spinlock_t      stat_lock;
 356        struct hstate *hstate;
 357        struct hugepage_subpool *spool;
 358        kuid_t  uid;
 359        kgid_t  gid;
 360        umode_t mode;
 361};
 362
 363static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
 364{
 365        return sb->s_fs_info;
 366}
 367
 368struct hugetlbfs_inode_info {
 369        struct shared_policy policy;
 370        struct inode vfs_inode;
 371        unsigned int seals;
 372};
 373
 374static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
 375{
 376        return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
 377}
 378
 379extern const struct file_operations hugetlbfs_file_operations;
 380extern const struct vm_operations_struct hugetlb_vm_ops;
 381struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
 382                                struct user_struct **user, int creat_flags,
 383                                int page_size_log);
 384
 385static inline bool is_file_hugepages(struct file *file)
 386{
 387        if (file->f_op == &hugetlbfs_file_operations)
 388                return true;
 389
 390        return is_file_shm_hugepages(file);
 391}
 392
 393
 394#else /* !CONFIG_HUGETLBFS */
 395
 396#define is_file_hugepages(file)                 false
 397static inline struct file *
 398hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
 399                struct user_struct **user, int creat_flags,
 400                int page_size_log)
 401{
 402        return ERR_PTR(-ENOSYS);
 403}
 404
 405#endif /* !CONFIG_HUGETLBFS */
 406
 407#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
 408unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 409                                        unsigned long len, unsigned long pgoff,
 410                                        unsigned long flags);
 411#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
 412
 413#ifdef CONFIG_HUGETLB_PAGE
 414
 415#define HSTATE_NAME_LEN 32
 416/* Defines one hugetlb page size */
 417struct hstate {
 418        int next_nid_to_alloc;
 419        int next_nid_to_free;
 420        unsigned int order;
 421        unsigned long mask;
 422        unsigned long max_huge_pages;
 423        unsigned long nr_huge_pages;
 424        unsigned long free_huge_pages;
 425        unsigned long resv_huge_pages;
 426        unsigned long surplus_huge_pages;
 427        unsigned long nr_overcommit_huge_pages;
 428        struct list_head hugepage_activelist;
 429        struct list_head hugepage_freelists[MAX_NUMNODES];
 430        unsigned int nr_huge_pages_node[MAX_NUMNODES];
 431        unsigned int free_huge_pages_node[MAX_NUMNODES];
 432        unsigned int surplus_huge_pages_node[MAX_NUMNODES];
 433#ifdef CONFIG_CGROUP_HUGETLB
 434        /* cgroup control files */
 435        struct cftype cgroup_files[5];
 436#endif
 437        char name[HSTATE_NAME_LEN];
 438};
 439
 440struct huge_bootmem_page {
 441        struct list_head list;
 442        struct hstate *hstate;
 443};
 444
 445struct page *alloc_huge_page(struct vm_area_struct *vma,
 446                                unsigned long addr, int avoid_reserve);
 447struct page *alloc_huge_page_node(struct hstate *h, int nid);
 448struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
 449                                nodemask_t *nmask);
 450struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
 451                                unsigned long address);
 452struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
 453                                     int nid, nodemask_t *nmask);
 454int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
 455                        pgoff_t idx);
 456
 457/* arch callback */
 458int __init __alloc_bootmem_huge_page(struct hstate *h);
 459int __init alloc_bootmem_huge_page(struct hstate *h);
 460
 461void __init hugetlb_bad_size(void);
 462void __init hugetlb_add_hstate(unsigned order);
 463struct hstate *size_to_hstate(unsigned long size);
 464
 465#ifndef HUGE_MAX_HSTATE
 466#define HUGE_MAX_HSTATE 1
 467#endif
 468
 469extern struct hstate hstates[HUGE_MAX_HSTATE];
 470extern unsigned int default_hstate_idx;
 471
 472#define default_hstate (hstates[default_hstate_idx])
 473
 474static inline struct hstate *hstate_inode(struct inode *i)
 475{
 476        return HUGETLBFS_SB(i->i_sb)->hstate;
 477}
 478
 479static inline struct hstate *hstate_file(struct file *f)
 480{
 481        return hstate_inode(file_inode(f));
 482}
 483
 484static inline struct hstate *hstate_sizelog(int page_size_log)
 485{
 486        if (!page_size_log)
 487                return &default_hstate;
 488
 489        return size_to_hstate(1UL << page_size_log);
 490}
 491
 492static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
 493{
 494        return hstate_file(vma->vm_file);
 495}
 496
 497static inline unsigned long huge_page_size(struct hstate *h)
 498{
 499        return (unsigned long)PAGE_SIZE << h->order;
 500}
 501
 502extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
 503
 504extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
 505
 506static inline unsigned long huge_page_mask(struct hstate *h)
 507{
 508        return h->mask;
 509}
 510
 511static inline unsigned int huge_page_order(struct hstate *h)
 512{
 513        return h->order;
 514}
 515
 516static inline unsigned huge_page_shift(struct hstate *h)
 517{
 518        return h->order + PAGE_SHIFT;
 519}
 520
 521static inline bool hstate_is_gigantic(struct hstate *h)
 522{
 523        return huge_page_order(h) >= MAX_ORDER;
 524}
 525
 526static inline unsigned int pages_per_huge_page(struct hstate *h)
 527{
 528        return 1 << h->order;
 529}
 530
 531static inline unsigned int blocks_per_huge_page(struct hstate *h)
 532{
 533        return huge_page_size(h) / 512;
 534}
 535
 536#include <asm/hugetlb.h>
 537
 538#ifndef arch_make_huge_pte
 539static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
 540                                       struct page *page, int writable)
 541{
 542        return entry;
 543}
 544#endif
 545
 546static inline struct hstate *page_hstate(struct page *page)
 547{
 548        VM_BUG_ON_PAGE(!PageHuge(page), page);
 549        return size_to_hstate(page_size(page));
 550}
 551
 552static inline unsigned hstate_index_to_shift(unsigned index)
 553{
 554        return hstates[index].order + PAGE_SHIFT;
 555}
 556
 557static inline int hstate_index(struct hstate *h)
 558{
 559        return h - hstates;
 560}
 561
 562pgoff_t __basepage_index(struct page *page);
 563
 564/* Return page->index in PAGE_SIZE units */
 565static inline pgoff_t basepage_index(struct page *page)
 566{
 567        if (!PageCompound(page))
 568                return page->index;
 569
 570        return __basepage_index(page);
 571}
 572
 573extern int dissolve_free_huge_page(struct page *page);
 574extern int dissolve_free_huge_pages(unsigned long start_pfn,
 575                                    unsigned long end_pfn);
 576
 577#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
 578#ifndef arch_hugetlb_migration_supported
 579static inline bool arch_hugetlb_migration_supported(struct hstate *h)
 580{
 581        if ((huge_page_shift(h) == PMD_SHIFT) ||
 582                (huge_page_shift(h) == PUD_SHIFT) ||
 583                        (huge_page_shift(h) == PGDIR_SHIFT))
 584                return true;
 585        else
 586                return false;
 587}
 588#endif
 589#else
 590static inline bool arch_hugetlb_migration_supported(struct hstate *h)
 591{
 592        return false;
 593}
 594#endif
 595
 596static inline bool hugepage_migration_supported(struct hstate *h)
 597{
 598        return arch_hugetlb_migration_supported(h);
 599}
 600
 601/*
 602 * Movability check is different as compared to migration check.
 603 * It determines whether or not a huge page should be placed on
 604 * movable zone or not. Movability of any huge page should be
 605 * required only if huge page size is supported for migration.
 606 * There wont be any reason for the huge page to be movable if
 607 * it is not migratable to start with. Also the size of the huge
 608 * page should be large enough to be placed under a movable zone
 609 * and still feasible enough to be migratable. Just the presence
 610 * in movable zone does not make the migration feasible.
 611 *
 612 * So even though large huge page sizes like the gigantic ones
 613 * are migratable they should not be movable because its not
 614 * feasible to migrate them from movable zone.
 615 */
 616static inline bool hugepage_movable_supported(struct hstate *h)
 617{
 618        if (!hugepage_migration_supported(h))
 619                return false;
 620
 621        if (hstate_is_gigantic(h))
 622                return false;
 623        return true;
 624}
 625
 626static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
 627                                           struct mm_struct *mm, pte_t *pte)
 628{
 629        if (huge_page_size(h) == PMD_SIZE)
 630                return pmd_lockptr(mm, (pmd_t *) pte);
 631        VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
 632        return &mm->page_table_lock;
 633}
 634
 635#ifndef hugepages_supported
 636/*
 637 * Some platform decide whether they support huge pages at boot
 638 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
 639 * when there is no such support
 640 */
 641#define hugepages_supported() (HPAGE_SHIFT != 0)
 642#endif
 643
 644void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
 645
 646static inline void hugetlb_count_add(long l, struct mm_struct *mm)
 647{
 648        atomic_long_add(l, &mm->hugetlb_usage);
 649}
 650
 651static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
 652{
 653        atomic_long_sub(l, &mm->hugetlb_usage);
 654}
 655
 656#ifndef set_huge_swap_pte_at
 657static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
 658                                        pte_t *ptep, pte_t pte, unsigned long sz)
 659{
 660        set_huge_pte_at(mm, addr, ptep, pte);
 661}
 662#endif
 663
 664#ifndef huge_ptep_modify_prot_start
 665#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
 666static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
 667                                                unsigned long addr, pte_t *ptep)
 668{
 669        return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
 670}
 671#endif
 672
 673#ifndef huge_ptep_modify_prot_commit
 674#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
 675static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
 676                                                unsigned long addr, pte_t *ptep,
 677                                                pte_t old_pte, pte_t pte)
 678{
 679        set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
 680}
 681#endif
 682
 683#else   /* CONFIG_HUGETLB_PAGE */
 684struct hstate {};
 685
 686static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
 687                                           unsigned long addr,
 688                                           int avoid_reserve)
 689{
 690        return NULL;
 691}
 692
 693static inline struct page *alloc_huge_page_node(struct hstate *h, int nid)
 694{
 695        return NULL;
 696}
 697
 698static inline struct page *
 699alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask)
 700{
 701        return NULL;
 702}
 703
 704static inline struct page *alloc_huge_page_vma(struct hstate *h,
 705                                               struct vm_area_struct *vma,
 706                                               unsigned long address)
 707{
 708        return NULL;
 709}
 710
 711static inline int __alloc_bootmem_huge_page(struct hstate *h)
 712{
 713        return 0;
 714}
 715
 716static inline struct hstate *hstate_file(struct file *f)
 717{
 718        return NULL;
 719}
 720
 721static inline struct hstate *hstate_sizelog(int page_size_log)
 722{
 723        return NULL;
 724}
 725
 726static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
 727{
 728        return NULL;
 729}
 730
 731static inline struct hstate *hstate_inode(struct inode *i)
 732{
 733        return NULL;
 734}
 735
 736static inline struct hstate *page_hstate(struct page *page)
 737{
 738        return NULL;
 739}
 740
 741static inline unsigned long huge_page_size(struct hstate *h)
 742{
 743        return PAGE_SIZE;
 744}
 745
 746static inline unsigned long huge_page_mask(struct hstate *h)
 747{
 748        return PAGE_MASK;
 749}
 750
 751static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
 752{
 753        return PAGE_SIZE;
 754}
 755
 756static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
 757{
 758        return PAGE_SIZE;
 759}
 760
 761static inline unsigned int huge_page_order(struct hstate *h)
 762{
 763        return 0;
 764}
 765
 766static inline unsigned int huge_page_shift(struct hstate *h)
 767{
 768        return PAGE_SHIFT;
 769}
 770
 771static inline bool hstate_is_gigantic(struct hstate *h)
 772{
 773        return false;
 774}
 775
 776static inline unsigned int pages_per_huge_page(struct hstate *h)
 777{
 778        return 1;
 779}
 780
 781static inline unsigned hstate_index_to_shift(unsigned index)
 782{
 783        return 0;
 784}
 785
 786static inline int hstate_index(struct hstate *h)
 787{
 788        return 0;
 789}
 790
 791static inline pgoff_t basepage_index(struct page *page)
 792{
 793        return page->index;
 794}
 795
 796static inline int dissolve_free_huge_page(struct page *page)
 797{
 798        return 0;
 799}
 800
 801static inline int dissolve_free_huge_pages(unsigned long start_pfn,
 802                                           unsigned long end_pfn)
 803{
 804        return 0;
 805}
 806
 807static inline bool hugepage_migration_supported(struct hstate *h)
 808{
 809        return false;
 810}
 811
 812static inline bool hugepage_movable_supported(struct hstate *h)
 813{
 814        return false;
 815}
 816
 817static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
 818                                           struct mm_struct *mm, pte_t *pte)
 819{
 820        return &mm->page_table_lock;
 821}
 822
 823static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
 824{
 825}
 826
 827static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
 828{
 829}
 830
 831static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
 832                                        pte_t *ptep, pte_t pte, unsigned long sz)
 833{
 834}
 835#endif  /* CONFIG_HUGETLB_PAGE */
 836
 837static inline spinlock_t *huge_pte_lock(struct hstate *h,
 838                                        struct mm_struct *mm, pte_t *pte)
 839{
 840        spinlock_t *ptl;
 841
 842        ptl = huge_pte_lockptr(h, mm, pte);
 843        spin_lock(ptl);
 844        return ptl;
 845}
 846
 847#endif /* _LINUX_HUGETLB_H */
 848