linux/include/linux/hugetlb.h
<<
>>
Prefs
   1#ifndef _LINUX_HUGETLB_H
   2#define _LINUX_HUGETLB_H
   3
   4#include <linux/mm_types.h>
   5#include <linux/mmdebug.h>
   6#include <linux/fs.h>
   7#include <linux/hugetlb_inline.h>
   8#include <linux/cgroup.h>
   9#include <linux/list.h>
  10#include <linux/kref.h>
  11#include <asm/pgtable.h>
  12
  13struct ctl_table;
  14struct user_struct;
  15struct mmu_gather;
  16
  17#ifndef is_hugepd
  18/*
  19 * Some architectures requires a hugepage directory format that is
  20 * required to support multiple hugepage sizes. For example
  21 * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
  22 * introduced the same on powerpc. This allows for a more flexible hugepage
  23 * pagetable layout.
  24 */
  25typedef struct { unsigned long pd; } hugepd_t;
  26#define is_hugepd(hugepd) (0)
  27#define __hugepd(x) ((hugepd_t) { (x) })
  28static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
  29                              unsigned pdshift, unsigned long end,
  30                              int write, struct page **pages, int *nr)
  31{
  32        return 0;
  33}
  34#else
  35extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
  36                       unsigned pdshift, unsigned long end,
  37                       int write, struct page **pages, int *nr);
  38#endif
  39
  40
  41#ifdef CONFIG_HUGETLB_PAGE
  42
  43#include <linux/mempolicy.h>
  44#include <linux/shm.h>
  45#include <asm/tlbflush.h>
  46
  47struct hugepage_subpool {
  48        spinlock_t lock;
  49        long count;
  50        long max_hpages;        /* Maximum huge pages or -1 if no maximum. */
  51        long used_hpages;       /* Used count against maximum, includes */
  52                                /* both alloced and reserved pages. */
  53        struct hstate *hstate;
  54        long min_hpages;        /* Minimum huge pages or -1 if no minimum. */
  55        long rsv_hpages;        /* Pages reserved against global pool to */
  56                                /* sasitfy minimum size. */
  57};
  58
  59struct resv_map {
  60        struct kref refs;
  61        spinlock_t lock;
  62        struct list_head regions;
  63        long adds_in_progress;
  64        struct list_head region_cache;
  65        long region_cache_count;
  66};
  67extern struct resv_map *resv_map_alloc(void);
  68void resv_map_release(struct kref *ref);
  69
  70extern spinlock_t hugetlb_lock;
  71extern int hugetlb_max_hstate __read_mostly;
  72#define for_each_hstate(h) \
  73        for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
  74
  75struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
  76                                                long min_hpages);
  77void hugepage_put_subpool(struct hugepage_subpool *spool);
  78
  79void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
  80int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
  81int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
  82int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
  83
  84#ifdef CONFIG_NUMA
  85int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
  86                                        void __user *, size_t *, loff_t *);
  87#endif
  88
  89int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
  90long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
  91                         struct page **, struct vm_area_struct **,
  92                         unsigned long *, unsigned long *, long, unsigned int,
  93                         int *);
  94void unmap_hugepage_range(struct vm_area_struct *,
  95                          unsigned long, unsigned long, struct page *);
  96void __unmap_hugepage_range_final(struct mmu_gather *tlb,
  97                          struct vm_area_struct *vma,
  98                          unsigned long start, unsigned long end,
  99                          struct page *ref_page);
 100void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
 101                                unsigned long start, unsigned long end,
 102                                struct page *ref_page);
 103void hugetlb_report_meminfo(struct seq_file *);
 104int hugetlb_report_node_meminfo(int, char *);
 105void hugetlb_show_meminfo(void);
 106unsigned long hugetlb_total_pages(void);
 107int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 108                        unsigned long address, unsigned int flags);
 109int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
 110                                struct vm_area_struct *dst_vma,
 111                                unsigned long dst_addr,
 112                                unsigned long src_addr,
 113                                struct page **pagep);
 114int hugetlb_reserve_pages(struct inode *inode, long from, long to,
 115                                                struct vm_area_struct *vma,
 116                                                vm_flags_t vm_flags);
 117long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
 118                                                long freed);
 119bool isolate_huge_page(struct page *page, struct list_head *list);
 120void putback_active_hugepage(struct page *page);
 121void free_huge_page(struct page *page);
 122void hugetlb_fix_reserve_counts(struct inode *inode);
 123extern struct mutex *hugetlb_fault_mutex_table;
 124u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
 125                                struct vm_area_struct *vma,
 126                                struct address_space *mapping,
 127                                pgoff_t idx, unsigned long address);
 128
 129pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
 130
 131extern int hugepages_treat_as_movable;
 132extern int sysctl_hugetlb_shm_group;
 133extern struct list_head huge_boot_pages;
 134
 135/* arch callbacks */
 136
 137pte_t *huge_pte_alloc(struct mm_struct *mm,
 138                        unsigned long addr, unsigned long sz);
 139pte_t *huge_pte_offset(struct mm_struct *mm,
 140                       unsigned long addr, unsigned long sz);
 141int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
 142struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
 143                              int write);
 144struct page *follow_huge_pd(struct vm_area_struct *vma,
 145                            unsigned long address, hugepd_t hpd,
 146                            int flags, int pdshift);
 147struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 148                                pmd_t *pmd, int flags);
 149struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
 150                                pud_t *pud, int flags);
 151struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
 152                             pgd_t *pgd, int flags);
 153
 154int pmd_huge(pmd_t pmd);
 155int pud_huge(pud_t pud);
 156unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
 157                unsigned long address, unsigned long end, pgprot_t newprot);
 158
 159bool is_hugetlb_entry_migration(pte_t pte);
 160#else /* !CONFIG_HUGETLB_PAGE */
 161
 162static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
 163{
 164}
 165
 166static inline unsigned long hugetlb_total_pages(void)
 167{
 168        return 0;
 169}
 170
 171#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
 172#define follow_huge_addr(mm, addr, write)       ERR_PTR(-EINVAL)
 173#define copy_hugetlb_page_range(src, dst, vma)  ({ BUG(); 0; })
 174static inline void hugetlb_report_meminfo(struct seq_file *m)
 175{
 176}
 177#define hugetlb_report_node_meminfo(n, buf)     0
 178static inline void hugetlb_show_meminfo(void)
 179{
 180}
 181#define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
 182#define follow_huge_pmd(mm, addr, pmd, flags)   NULL
 183#define follow_huge_pud(mm, addr, pud, flags)   NULL
 184#define follow_huge_pgd(mm, addr, pgd, flags)   NULL
 185#define prepare_hugepage_range(file, addr, len) (-EINVAL)
 186#define pmd_huge(x)     0
 187#define pud_huge(x)     0
 188#define is_hugepage_only_range(mm, addr, len)   0
 189#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
 190#define hugetlb_fault(mm, vma, addr, flags)     ({ BUG(); 0; })
 191#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
 192                                src_addr, pagep)        ({ BUG(); 0; })
 193#define huge_pte_offset(mm, address, sz)        0
 194
 195static inline bool isolate_huge_page(struct page *page, struct list_head *list)
 196{
 197        return false;
 198}
 199#define putback_active_hugepage(p)      do {} while (0)
 200
 201static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
 202                unsigned long address, unsigned long end, pgprot_t newprot)
 203{
 204        return 0;
 205}
 206
 207static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
 208                        struct vm_area_struct *vma, unsigned long start,
 209                        unsigned long end, struct page *ref_page)
 210{
 211        BUG();
 212}
 213
 214static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
 215                        struct vm_area_struct *vma, unsigned long start,
 216                        unsigned long end, struct page *ref_page)
 217{
 218        BUG();
 219}
 220
 221#endif /* !CONFIG_HUGETLB_PAGE */
 222/*
 223 * hugepages at page global directory. If arch support
 224 * hugepages at pgd level, they need to define this.
 225 */
 226#ifndef pgd_huge
 227#define pgd_huge(x)     0
 228#endif
 229#ifndef p4d_huge
 230#define p4d_huge(x)     0
 231#endif
 232
 233#ifndef pgd_write
 234static inline int pgd_write(pgd_t pgd)
 235{
 236        BUG();
 237        return 0;
 238}
 239#endif
 240
 241#ifndef pud_write
 242static inline int pud_write(pud_t pud)
 243{
 244        BUG();
 245        return 0;
 246}
 247#endif
 248
 249#define HUGETLB_ANON_FILE "anon_hugepage"
 250
 251enum {
 252        /*
 253         * The file will be used as an shm file so shmfs accounting rules
 254         * apply
 255         */
 256        HUGETLB_SHMFS_INODE     = 1,
 257        /*
 258         * The file is being created on the internal vfs mount and shmfs
 259         * accounting rules do not apply
 260         */
 261        HUGETLB_ANONHUGE_INODE  = 2,
 262};
 263
 264#ifdef CONFIG_HUGETLBFS
 265struct hugetlbfs_sb_info {
 266        long    max_inodes;   /* inodes allowed */
 267        long    free_inodes;  /* inodes free */
 268        spinlock_t      stat_lock;
 269        struct hstate *hstate;
 270        struct hugepage_subpool *spool;
 271        kuid_t  uid;
 272        kgid_t  gid;
 273        umode_t mode;
 274};
 275
 276static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
 277{
 278        return sb->s_fs_info;
 279}
 280
 281extern const struct file_operations hugetlbfs_file_operations;
 282extern const struct vm_operations_struct hugetlb_vm_ops;
 283struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
 284                                struct user_struct **user, int creat_flags,
 285                                int page_size_log);
 286
 287static inline bool is_file_hugepages(struct file *file)
 288{
 289        if (file->f_op == &hugetlbfs_file_operations)
 290                return true;
 291
 292        return is_file_shm_hugepages(file);
 293}
 294
 295
 296#else /* !CONFIG_HUGETLBFS */
 297
 298#define is_file_hugepages(file)                 false
 299static inline struct file *
 300hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
 301                struct user_struct **user, int creat_flags,
 302                int page_size_log)
 303{
 304        return ERR_PTR(-ENOSYS);
 305}
 306
 307#endif /* !CONFIG_HUGETLBFS */
 308
 309#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
 310unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 311                                        unsigned long len, unsigned long pgoff,
 312                                        unsigned long flags);
 313#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
 314
 315#ifdef CONFIG_HUGETLB_PAGE
 316
 317#define HSTATE_NAME_LEN 32
 318/* Defines one hugetlb page size */
 319struct hstate {
 320        int next_nid_to_alloc;
 321        int next_nid_to_free;
 322        unsigned int order;
 323        unsigned long mask;
 324        unsigned long max_huge_pages;
 325        unsigned long nr_huge_pages;
 326        unsigned long free_huge_pages;
 327        unsigned long resv_huge_pages;
 328        unsigned long surplus_huge_pages;
 329        unsigned long nr_overcommit_huge_pages;
 330        struct list_head hugepage_activelist;
 331        struct list_head hugepage_freelists[MAX_NUMNODES];
 332        unsigned int nr_huge_pages_node[MAX_NUMNODES];
 333        unsigned int free_huge_pages_node[MAX_NUMNODES];
 334        unsigned int surplus_huge_pages_node[MAX_NUMNODES];
 335#ifdef CONFIG_CGROUP_HUGETLB
 336        /* cgroup control files */
 337        struct cftype cgroup_files[5];
 338#endif
 339        char name[HSTATE_NAME_LEN];
 340};
 341
 342struct huge_bootmem_page {
 343        struct list_head list;
 344        struct hstate *hstate;
 345#ifdef CONFIG_HIGHMEM
 346        phys_addr_t phys;
 347#endif
 348};
 349
 350struct page *alloc_huge_page(struct vm_area_struct *vma,
 351                                unsigned long addr, int avoid_reserve);
 352struct page *alloc_huge_page_node(struct hstate *h, int nid);
 353struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
 354                                unsigned long addr, int avoid_reserve);
 355struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
 356                                nodemask_t *nmask);
 357int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
 358                        pgoff_t idx);
 359
 360/* arch callback */
 361int __init alloc_bootmem_huge_page(struct hstate *h);
 362
 363void __init hugetlb_bad_size(void);
 364void __init hugetlb_add_hstate(unsigned order);
 365struct hstate *size_to_hstate(unsigned long size);
 366
 367#ifndef HUGE_MAX_HSTATE
 368#define HUGE_MAX_HSTATE 1
 369#endif
 370
 371extern struct hstate hstates[HUGE_MAX_HSTATE];
 372extern unsigned int default_hstate_idx;
 373
 374#define default_hstate (hstates[default_hstate_idx])
 375
 376static inline struct hstate *hstate_inode(struct inode *i)
 377{
 378        return HUGETLBFS_SB(i->i_sb)->hstate;
 379}
 380
 381static inline struct hstate *hstate_file(struct file *f)
 382{
 383        return hstate_inode(file_inode(f));
 384}
 385
 386static inline struct hstate *hstate_sizelog(int page_size_log)
 387{
 388        if (!page_size_log)
 389                return &default_hstate;
 390
 391        return size_to_hstate(1UL << page_size_log);
 392}
 393
 394static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
 395{
 396        return hstate_file(vma->vm_file);
 397}
 398
 399static inline unsigned long huge_page_size(struct hstate *h)
 400{
 401        return (unsigned long)PAGE_SIZE << h->order;
 402}
 403
 404extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
 405
 406extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
 407
 408static inline unsigned long huge_page_mask(struct hstate *h)
 409{
 410        return h->mask;
 411}
 412
 413static inline unsigned int huge_page_order(struct hstate *h)
 414{
 415        return h->order;
 416}
 417
 418static inline unsigned huge_page_shift(struct hstate *h)
 419{
 420        return h->order + PAGE_SHIFT;
 421}
 422
 423static inline bool hstate_is_gigantic(struct hstate *h)
 424{
 425        return huge_page_order(h) >= MAX_ORDER;
 426}
 427
 428static inline unsigned int pages_per_huge_page(struct hstate *h)
 429{
 430        return 1 << h->order;
 431}
 432
 433static inline unsigned int blocks_per_huge_page(struct hstate *h)
 434{
 435        return huge_page_size(h) / 512;
 436}
 437
 438#include <asm/hugetlb.h>
 439
 440#ifndef arch_make_huge_pte
 441static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
 442                                       struct page *page, int writable)
 443{
 444        return entry;
 445}
 446#endif
 447
 448static inline struct hstate *page_hstate(struct page *page)
 449{
 450        VM_BUG_ON_PAGE(!PageHuge(page), page);
 451        return size_to_hstate(PAGE_SIZE << compound_order(page));
 452}
 453
 454static inline unsigned hstate_index_to_shift(unsigned index)
 455{
 456        return hstates[index].order + PAGE_SHIFT;
 457}
 458
 459static inline int hstate_index(struct hstate *h)
 460{
 461        return h - hstates;
 462}
 463
 464pgoff_t __basepage_index(struct page *page);
 465
 466/* Return page->index in PAGE_SIZE units */
 467static inline pgoff_t basepage_index(struct page *page)
 468{
 469        if (!PageCompound(page))
 470                return page->index;
 471
 472        return __basepage_index(page);
 473}
 474
 475extern int dissolve_free_huge_page(struct page *page);
 476extern int dissolve_free_huge_pages(unsigned long start_pfn,
 477                                    unsigned long end_pfn);
 478static inline bool hugepage_migration_supported(struct hstate *h)
 479{
 480#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
 481        if ((huge_page_shift(h) == PMD_SHIFT) ||
 482                (huge_page_shift(h) == PGDIR_SHIFT))
 483                return true;
 484        else
 485                return false;
 486#else
 487        return false;
 488#endif
 489}
 490
 491static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
 492                                           struct mm_struct *mm, pte_t *pte)
 493{
 494        if (huge_page_size(h) == PMD_SIZE)
 495                return pmd_lockptr(mm, (pmd_t *) pte);
 496        VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
 497        return &mm->page_table_lock;
 498}
 499
 500#ifndef hugepages_supported
 501/*
 502 * Some platform decide whether they support huge pages at boot
 503 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
 504 * when there is no such support
 505 */
 506#define hugepages_supported() (HPAGE_SHIFT != 0)
 507#endif
 508
 509void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
 510
 511static inline void hugetlb_count_add(long l, struct mm_struct *mm)
 512{
 513        atomic_long_add(l, &mm->hugetlb_usage);
 514}
 515
 516static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
 517{
 518        atomic_long_sub(l, &mm->hugetlb_usage);
 519}
 520
 521#ifndef set_huge_swap_pte_at
 522static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
 523                                        pte_t *ptep, pte_t pte, unsigned long sz)
 524{
 525        set_huge_pte_at(mm, addr, ptep, pte);
 526}
 527#endif
 528#else   /* CONFIG_HUGETLB_PAGE */
 529struct hstate {};
 530#define alloc_huge_page(v, a, r) NULL
 531#define alloc_huge_page_node(h, nid) NULL
 532#define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL
 533#define alloc_huge_page_noerr(v, a, r) NULL
 534#define alloc_bootmem_huge_page(h) NULL
 535#define hstate_file(f) NULL
 536#define hstate_sizelog(s) NULL
 537#define hstate_vma(v) NULL
 538#define hstate_inode(i) NULL
 539#define page_hstate(page) NULL
 540#define huge_page_size(h) PAGE_SIZE
 541#define huge_page_mask(h) PAGE_MASK
 542#define vma_kernel_pagesize(v) PAGE_SIZE
 543#define vma_mmu_pagesize(v) PAGE_SIZE
 544#define huge_page_order(h) 0
 545#define huge_page_shift(h) PAGE_SHIFT
 546static inline bool hstate_is_gigantic(struct hstate *h)
 547{
 548        return false;
 549}
 550
 551static inline unsigned int pages_per_huge_page(struct hstate *h)
 552{
 553        return 1;
 554}
 555
 556static inline unsigned hstate_index_to_shift(unsigned index)
 557{
 558        return 0;
 559}
 560
 561static inline int hstate_index(struct hstate *h)
 562{
 563        return 0;
 564}
 565
 566static inline pgoff_t basepage_index(struct page *page)
 567{
 568        return page->index;
 569}
 570
 571static inline int dissolve_free_huge_page(struct page *page)
 572{
 573        return 0;
 574}
 575
 576static inline int dissolve_free_huge_pages(unsigned long start_pfn,
 577                                           unsigned long end_pfn)
 578{
 579        return 0;
 580}
 581
 582static inline bool hugepage_migration_supported(struct hstate *h)
 583{
 584        return false;
 585}
 586
 587static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
 588                                           struct mm_struct *mm, pte_t *pte)
 589{
 590        return &mm->page_table_lock;
 591}
 592
 593static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
 594{
 595}
 596
 597static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
 598{
 599}
 600
 601static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
 602                                        pte_t *ptep, pte_t pte, unsigned long sz)
 603{
 604}
 605#endif  /* CONFIG_HUGETLB_PAGE */
 606
 607static inline spinlock_t *huge_pte_lock(struct hstate *h,
 608                                        struct mm_struct *mm, pte_t *pte)
 609{
 610        spinlock_t *ptl;
 611
 612        ptl = huge_pte_lockptr(h, mm, pte);
 613        spin_lock(ptl);
 614        return ptl;
 615}
 616
 617#endif /* _LINUX_HUGETLB_H */
 618