1
2#ifndef _LINUX_HUGETLB_H
3#define _LINUX_HUGETLB_H
4
5#include <linux/mm_types.h>
6#include <linux/mmdebug.h>
7#include <linux/fs.h>
8#include <linux/hugetlb_inline.h>
9#include <linux/cgroup.h>
10#include <linux/list.h>
11#include <linux/kref.h>
12#include <asm/pgtable.h>
13
14struct ctl_table;
15struct user_struct;
16struct mmu_gather;
17
18#ifndef is_hugepd
19
20
21
22
23
24
25
26typedef struct { unsigned long pd; } hugepd_t;
27#define is_hugepd(hugepd) (0)
28#define __hugepd(x) ((hugepd_t) { (x) })
29static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
30 unsigned pdshift, unsigned long end,
31 int write, struct page **pages, int *nr)
32{
33 return 0;
34}
35#else
36extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
37 unsigned pdshift, unsigned long end,
38 int write, struct page **pages, int *nr);
39#endif
40
41
42#ifdef CONFIG_HUGETLB_PAGE
43
44#include <linux/mempolicy.h>
45#include <linux/shm.h>
46#include <asm/tlbflush.h>
47
48struct hugepage_subpool {
49 spinlock_t lock;
50 long count;
51 long max_hpages;
52 long used_hpages;
53
54 struct hstate *hstate;
55 long min_hpages;
56 long rsv_hpages;
57
58};
59
60struct resv_map {
61 struct kref refs;
62 spinlock_t lock;
63 struct list_head regions;
64 long adds_in_progress;
65 struct list_head region_cache;
66 long region_cache_count;
67};
68extern struct resv_map *resv_map_alloc(void);
69void resv_map_release(struct kref *ref);
70
71extern spinlock_t hugetlb_lock;
72extern int hugetlb_max_hstate __read_mostly;
73#define for_each_hstate(h) \
74 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
75
76struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
77 long min_hpages);
78void hugepage_put_subpool(struct hugepage_subpool *spool);
79
80void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
81int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
82int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
83int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
84
85#ifdef CONFIG_NUMA
86int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
87 void __user *, size_t *, loff_t *);
88#endif
89
90int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
91long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
92 struct page **, struct vm_area_struct **,
93 unsigned long *, unsigned long *, long, unsigned int,
94 int *);
95void unmap_hugepage_range(struct vm_area_struct *,
96 unsigned long, unsigned long, struct page *);
97void __unmap_hugepage_range_final(struct mmu_gather *tlb,
98 struct vm_area_struct *vma,
99 unsigned long start, unsigned long end,
100 struct page *ref_page);
101void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
102 unsigned long start, unsigned long end,
103 struct page *ref_page);
104void hugetlb_report_meminfo(struct seq_file *);
105int hugetlb_report_node_meminfo(int, char *);
106void hugetlb_show_meminfo(void);
107unsigned long hugetlb_total_pages(void);
108vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
109 unsigned long address, unsigned int flags);
110int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
111 struct vm_area_struct *dst_vma,
112 unsigned long dst_addr,
113 unsigned long src_addr,
114 struct page **pagep);
115int hugetlb_reserve_pages(struct inode *inode, long from, long to,
116 struct vm_area_struct *vma,
117 vm_flags_t vm_flags);
118long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
119 long freed);
120bool isolate_huge_page(struct page *page, struct list_head *list);
121void putback_active_hugepage(struct page *page);
122void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
123void free_huge_page(struct page *page);
124void hugetlb_fix_reserve_counts(struct inode *inode);
125extern struct mutex *hugetlb_fault_mutex_table;
126u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
127 pgoff_t idx, unsigned long address);
128
129pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
130
131extern int sysctl_hugetlb_shm_group;
132extern struct list_head huge_boot_pages;
133
134
135
136pte_t *huge_pte_alloc(struct mm_struct *mm,
137 unsigned long addr, unsigned long sz);
138pte_t *huge_pte_offset(struct mm_struct *mm,
139 unsigned long addr, unsigned long sz);
140int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
141void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
142 unsigned long *start, unsigned long *end);
143struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
144 int write);
145struct page *follow_huge_pd(struct vm_area_struct *vma,
146 unsigned long address, hugepd_t hpd,
147 int flags, int pdshift);
148struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
149 pmd_t *pmd, int flags);
150struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
151 pud_t *pud, int flags);
152struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
153 pgd_t *pgd, int flags);
154
155int pmd_huge(pmd_t pmd);
156int pud_huge(pud_t pud);
157unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
158 unsigned long address, unsigned long end, pgprot_t newprot);
159
160bool is_hugetlb_entry_migration(pte_t pte);
161
162#else
163
164static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
165{
166}
167
168static inline unsigned long hugetlb_total_pages(void)
169{
170 return 0;
171}
172
173static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
174 pte_t *ptep)
175{
176 return 0;
177}
178
179static inline void adjust_range_if_pmd_sharing_possible(
180 struct vm_area_struct *vma,
181 unsigned long *start, unsigned long *end)
182{
183}
184
185#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
186#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
187#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
188static inline void hugetlb_report_meminfo(struct seq_file *m)
189{
190}
191#define hugetlb_report_node_meminfo(n, buf) 0
192static inline void hugetlb_show_meminfo(void)
193{
194}
195#define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
196#define follow_huge_pmd(mm, addr, pmd, flags) NULL
197#define follow_huge_pud(mm, addr, pud, flags) NULL
198#define follow_huge_pgd(mm, addr, pgd, flags) NULL
199#define prepare_hugepage_range(file, addr, len) (-EINVAL)
200#define pmd_huge(x) 0
201#define pud_huge(x) 0
202#define is_hugepage_only_range(mm, addr, len) 0
203#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
204#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
205 src_addr, pagep) ({ BUG(); 0; })
206#define huge_pte_offset(mm, address, sz) 0
207
208static inline bool isolate_huge_page(struct page *page, struct list_head *list)
209{
210 return false;
211}
212#define putback_active_hugepage(p) do {} while (0)
213#define move_hugetlb_state(old, new, reason) do {} while (0)
214
215static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
216 unsigned long address, unsigned long end, pgprot_t newprot)
217{
218 return 0;
219}
220
221static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
222 struct vm_area_struct *vma, unsigned long start,
223 unsigned long end, struct page *ref_page)
224{
225 BUG();
226}
227
228static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
229 struct vm_area_struct *vma, unsigned long start,
230 unsigned long end, struct page *ref_page)
231{
232 BUG();
233}
234static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
235 struct vm_area_struct *vma, unsigned long address,
236 unsigned int flags)
237{
238 BUG();
239 return 0;
240}
241
242#endif
243
244
245
246
247#ifndef pgd_huge
248#define pgd_huge(x) 0
249#endif
250#ifndef p4d_huge
251#define p4d_huge(x) 0
252#endif
253
254#ifndef pgd_write
255static inline int pgd_write(pgd_t pgd)
256{
257 BUG();
258 return 0;
259}
260#endif
261
262#define HUGETLB_ANON_FILE "anon_hugepage"
263
264enum {
265
266
267
268
269 HUGETLB_SHMFS_INODE = 1,
270
271
272
273
274 HUGETLB_ANONHUGE_INODE = 2,
275};
276
277#ifdef CONFIG_HUGETLBFS
278struct hugetlbfs_sb_info {
279 long max_inodes;
280 long free_inodes;
281 spinlock_t stat_lock;
282 struct hstate *hstate;
283 struct hugepage_subpool *spool;
284 kuid_t uid;
285 kgid_t gid;
286 umode_t mode;
287};
288
289static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
290{
291 return sb->s_fs_info;
292}
293
294struct hugetlbfs_inode_info {
295 struct shared_policy policy;
296 struct inode vfs_inode;
297 unsigned int seals;
298};
299
300static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
301{
302 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
303}
304
305extern const struct file_operations hugetlbfs_file_operations;
306extern const struct vm_operations_struct hugetlb_vm_ops;
307struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
308 struct user_struct **user, int creat_flags,
309 int page_size_log);
310
311static inline bool is_file_hugepages(struct file *file)
312{
313 if (file->f_op == &hugetlbfs_file_operations)
314 return true;
315
316 return is_file_shm_hugepages(file);
317}
318
319
320#else
321
322#define is_file_hugepages(file) false
323static inline struct file *
324hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
325 struct user_struct **user, int creat_flags,
326 int page_size_log)
327{
328 return ERR_PTR(-ENOSYS);
329}
330
331#endif
332
333#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
334unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
335 unsigned long len, unsigned long pgoff,
336 unsigned long flags);
337#endif
338
339#ifdef CONFIG_HUGETLB_PAGE
340
341#define HSTATE_NAME_LEN 32
342
343struct hstate {
344 int next_nid_to_alloc;
345 int next_nid_to_free;
346 unsigned int order;
347 unsigned long mask;
348 unsigned long max_huge_pages;
349 unsigned long nr_huge_pages;
350 unsigned long free_huge_pages;
351 unsigned long resv_huge_pages;
352 unsigned long surplus_huge_pages;
353 unsigned long nr_overcommit_huge_pages;
354 struct list_head hugepage_activelist;
355 struct list_head hugepage_freelists[MAX_NUMNODES];
356 unsigned int nr_huge_pages_node[MAX_NUMNODES];
357 unsigned int free_huge_pages_node[MAX_NUMNODES];
358 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
359#ifdef CONFIG_CGROUP_HUGETLB
360
361 struct cftype cgroup_files[5];
362#endif
363 char name[HSTATE_NAME_LEN];
364};
365
366struct huge_bootmem_page {
367 struct list_head list;
368 struct hstate *hstate;
369};
370
371struct page *alloc_huge_page(struct vm_area_struct *vma,
372 unsigned long addr, int avoid_reserve);
373struct page *alloc_huge_page_node(struct hstate *h, int nid);
374struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
375 nodemask_t *nmask);
376struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
377 unsigned long address);
378struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
379 int nid, nodemask_t *nmask);
380int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
381 pgoff_t idx);
382
383
384int __init __alloc_bootmem_huge_page(struct hstate *h);
385int __init alloc_bootmem_huge_page(struct hstate *h);
386
387void __init hugetlb_bad_size(void);
388void __init hugetlb_add_hstate(unsigned order);
389struct hstate *size_to_hstate(unsigned long size);
390
391#ifndef HUGE_MAX_HSTATE
392#define HUGE_MAX_HSTATE 1
393#endif
394
395extern struct hstate hstates[HUGE_MAX_HSTATE];
396extern unsigned int default_hstate_idx;
397
398#define default_hstate (hstates[default_hstate_idx])
399
400static inline struct hstate *hstate_inode(struct inode *i)
401{
402 return HUGETLBFS_SB(i->i_sb)->hstate;
403}
404
405static inline struct hstate *hstate_file(struct file *f)
406{
407 return hstate_inode(file_inode(f));
408}
409
410static inline struct hstate *hstate_sizelog(int page_size_log)
411{
412 if (!page_size_log)
413 return &default_hstate;
414
415 return size_to_hstate(1UL << page_size_log);
416}
417
418static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
419{
420 return hstate_file(vma->vm_file);
421}
422
423static inline unsigned long huge_page_size(struct hstate *h)
424{
425 return (unsigned long)PAGE_SIZE << h->order;
426}
427
428extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
429
430extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
431
432static inline unsigned long huge_page_mask(struct hstate *h)
433{
434 return h->mask;
435}
436
437static inline unsigned int huge_page_order(struct hstate *h)
438{
439 return h->order;
440}
441
442static inline unsigned huge_page_shift(struct hstate *h)
443{
444 return h->order + PAGE_SHIFT;
445}
446
447static inline bool hstate_is_gigantic(struct hstate *h)
448{
449 return huge_page_order(h) >= MAX_ORDER;
450}
451
452static inline unsigned int pages_per_huge_page(struct hstate *h)
453{
454 return 1 << h->order;
455}
456
457static inline unsigned int blocks_per_huge_page(struct hstate *h)
458{
459 return huge_page_size(h) / 512;
460}
461
462#include <asm/hugetlb.h>
463
464#ifndef arch_make_huge_pte
465static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
466 struct page *page, int writable)
467{
468 return entry;
469}
470#endif
471
472static inline struct hstate *page_hstate(struct page *page)
473{
474 VM_BUG_ON_PAGE(!PageHuge(page), page);
475 return size_to_hstate(PAGE_SIZE << compound_order(page));
476}
477
478static inline unsigned hstate_index_to_shift(unsigned index)
479{
480 return hstates[index].order + PAGE_SHIFT;
481}
482
483static inline int hstate_index(struct hstate *h)
484{
485 return h - hstates;
486}
487
488pgoff_t __basepage_index(struct page *page);
489
490
491static inline pgoff_t basepage_index(struct page *page)
492{
493 if (!PageCompound(page))
494 return page->index;
495
496 return __basepage_index(page);
497}
498
499extern int dissolve_free_huge_page(struct page *page);
500extern int dissolve_free_huge_pages(unsigned long start_pfn,
501 unsigned long end_pfn);
502
503#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
504#ifndef arch_hugetlb_migration_supported
505static inline bool arch_hugetlb_migration_supported(struct hstate *h)
506{
507 if ((huge_page_shift(h) == PMD_SHIFT) ||
508 (huge_page_shift(h) == PUD_SHIFT) ||
509 (huge_page_shift(h) == PGDIR_SHIFT))
510 return true;
511 else
512 return false;
513}
514#endif
515#else
516static inline bool arch_hugetlb_migration_supported(struct hstate *h)
517{
518 return false;
519}
520#endif
521
522static inline bool hugepage_migration_supported(struct hstate *h)
523{
524 return arch_hugetlb_migration_supported(h);
525}
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542static inline bool hugepage_movable_supported(struct hstate *h)
543{
544 if (!hugepage_migration_supported(h))
545 return false;
546
547 if (hstate_is_gigantic(h))
548 return false;
549 return true;
550}
551
552static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
553 struct mm_struct *mm, pte_t *pte)
554{
555 if (huge_page_size(h) == PMD_SIZE)
556 return pmd_lockptr(mm, (pmd_t *) pte);
557 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
558 return &mm->page_table_lock;
559}
560
561#ifndef hugepages_supported
562
563
564
565
566
567#define hugepages_supported() (HPAGE_SHIFT != 0)
568#endif
569
570void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
571
572static inline void hugetlb_count_add(long l, struct mm_struct *mm)
573{
574 atomic_long_add(l, &mm->hugetlb_usage);
575}
576
577static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
578{
579 atomic_long_sub(l, &mm->hugetlb_usage);
580}
581
582#ifndef set_huge_swap_pte_at
583static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
584 pte_t *ptep, pte_t pte, unsigned long sz)
585{
586 set_huge_pte_at(mm, addr, ptep, pte);
587}
588#endif
589
590#ifndef huge_ptep_modify_prot_start
591#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
592static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
593 unsigned long addr, pte_t *ptep)
594{
595 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
596}
597#endif
598
599#ifndef huge_ptep_modify_prot_commit
600#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
601static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
602 unsigned long addr, pte_t *ptep,
603 pte_t old_pte, pte_t pte)
604{
605 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
606}
607#endif
608
609#else
610struct hstate {};
611#define alloc_huge_page(v, a, r) NULL
612#define alloc_huge_page_node(h, nid) NULL
613#define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL
614#define alloc_huge_page_vma(h, vma, address) NULL
615#define alloc_bootmem_huge_page(h) NULL
616#define hstate_file(f) NULL
617#define hstate_sizelog(s) NULL
618#define hstate_vma(v) NULL
619#define hstate_inode(i) NULL
620#define page_hstate(page) NULL
621#define huge_page_size(h) PAGE_SIZE
622#define huge_page_mask(h) PAGE_MASK
623#define vma_kernel_pagesize(v) PAGE_SIZE
624#define vma_mmu_pagesize(v) PAGE_SIZE
625#define huge_page_order(h) 0
626#define huge_page_shift(h) PAGE_SHIFT
627static inline bool hstate_is_gigantic(struct hstate *h)
628{
629 return false;
630}
631
632static inline unsigned int pages_per_huge_page(struct hstate *h)
633{
634 return 1;
635}
636
637static inline unsigned hstate_index_to_shift(unsigned index)
638{
639 return 0;
640}
641
642static inline int hstate_index(struct hstate *h)
643{
644 return 0;
645}
646
647static inline pgoff_t basepage_index(struct page *page)
648{
649 return page->index;
650}
651
652static inline int dissolve_free_huge_page(struct page *page)
653{
654 return 0;
655}
656
657static inline int dissolve_free_huge_pages(unsigned long start_pfn,
658 unsigned long end_pfn)
659{
660 return 0;
661}
662
663static inline bool hugepage_migration_supported(struct hstate *h)
664{
665 return false;
666}
667
668static inline bool hugepage_movable_supported(struct hstate *h)
669{
670 return false;
671}
672
673static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
674 struct mm_struct *mm, pte_t *pte)
675{
676 return &mm->page_table_lock;
677}
678
679static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
680{
681}
682
683static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
684{
685}
686
687static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
688 pte_t *ptep, pte_t pte, unsigned long sz)
689{
690}
691#endif
692
693static inline spinlock_t *huge_pte_lock(struct hstate *h,
694 struct mm_struct *mm, pte_t *pte)
695{
696 spinlock_t *ptl;
697
698 ptl = huge_pte_lockptr(h, mm, pte);
699 spin_lock(ptl);
700 return ptl;
701}
702
703#endif
704