1
2#ifndef _LINUX_HUGETLB_H
3#define _LINUX_HUGETLB_H
4
5#include <linux/mm_types.h>
6#include <linux/mmdebug.h>
7#include <linux/fs.h>
8#include <linux/hugetlb_inline.h>
9#include <linux/cgroup.h>
10#include <linux/list.h>
11#include <linux/kref.h>
12#include <linux/pgtable.h>
13#include <linux/gfp.h>
14#include <linux/userfaultfd_k.h>
15
16struct ctl_table;
17struct user_struct;
18struct mmu_gather;
19
20#ifndef is_hugepd
21typedef struct { unsigned long pd; } hugepd_t;
22#define is_hugepd(hugepd) (0)
23#define __hugepd(x) ((hugepd_t) { (x) })
24#endif
25
26#ifdef CONFIG_HUGETLB_PAGE
27
28#include <linux/mempolicy.h>
29#include <linux/shm.h>
30#include <asm/tlbflush.h>
31
32
33
34
35
36
37
38
39enum {
40 SUBPAGE_INDEX_SUBPOOL = 1,
41#ifdef CONFIG_CGROUP_HUGETLB
42 SUBPAGE_INDEX_CGROUP,
43 SUBPAGE_INDEX_CGROUP_RSVD,
44 __MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
45#endif
46 __NR_USED_SUBPAGE,
47};
48
49struct hugepage_subpool {
50 spinlock_t lock;
51 long count;
52 long max_hpages;
53 long used_hpages;
54
55 struct hstate *hstate;
56 long min_hpages;
57 long rsv_hpages;
58
59};
60
61struct resv_map {
62 struct kref refs;
63 spinlock_t lock;
64 struct list_head regions;
65 long adds_in_progress;
66 struct list_head region_cache;
67 long region_cache_count;
68#ifdef CONFIG_CGROUP_HUGETLB
69
70
71
72
73
74 struct page_counter *reservation_counter;
75 unsigned long pages_per_hpage;
76 struct cgroup_subsys_state *css;
77#endif
78};
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99struct file_region {
100 struct list_head link;
101 long from;
102 long to;
103#ifdef CONFIG_CGROUP_HUGETLB
104
105
106
107
108
109 struct page_counter *reservation_counter;
110 struct cgroup_subsys_state *css;
111#endif
112};
113
114extern struct resv_map *resv_map_alloc(void);
115void resv_map_release(struct kref *ref);
116
117extern spinlock_t hugetlb_lock;
118extern int hugetlb_max_hstate __read_mostly;
119#define for_each_hstate(h) \
120 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
121
122struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
123 long min_hpages);
124void hugepage_put_subpool(struct hugepage_subpool *spool);
125
126void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
127int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
128int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
129 loff_t *);
130int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
131 loff_t *);
132int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
133 loff_t *);
134
135int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
136long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
137 struct page **, struct vm_area_struct **,
138 unsigned long *, unsigned long *, long, unsigned int,
139 int *);
140void unmap_hugepage_range(struct vm_area_struct *,
141 unsigned long, unsigned long, struct page *);
142void __unmap_hugepage_range_final(struct mmu_gather *tlb,
143 struct vm_area_struct *vma,
144 unsigned long start, unsigned long end,
145 struct page *ref_page);
146void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
147 unsigned long start, unsigned long end,
148 struct page *ref_page);
149void hugetlb_report_meminfo(struct seq_file *);
150int hugetlb_report_node_meminfo(char *buf, int len, int nid);
151void hugetlb_show_meminfo(void);
152unsigned long hugetlb_total_pages(void);
153vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
154 unsigned long address, unsigned int flags);
155#ifdef CONFIG_USERFAULTFD
156int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
157 struct vm_area_struct *dst_vma,
158 unsigned long dst_addr,
159 unsigned long src_addr,
160 enum mcopy_atomic_mode mode,
161 struct page **pagep);
162#endif
163bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
164 struct vm_area_struct *vma,
165 vm_flags_t vm_flags);
166long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
167 long freed);
168bool isolate_huge_page(struct page *page, struct list_head *list);
169int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
170void putback_active_hugepage(struct page *page);
171void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
172void free_huge_page(struct page *page);
173void hugetlb_fix_reserve_counts(struct inode *inode);
174extern struct mutex *hugetlb_fault_mutex_table;
175u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
176
177pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
178 unsigned long addr, pud_t *pud);
179
180struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
181
182extern int sysctl_hugetlb_shm_group;
183extern struct list_head huge_boot_pages;
184
185
186
187pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
188 unsigned long addr, unsigned long sz);
189pte_t *huge_pte_offset(struct mm_struct *mm,
190 unsigned long addr, unsigned long sz);
191int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
192 unsigned long *addr, pte_t *ptep);
193void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
194 unsigned long *start, unsigned long *end);
195struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
196 int write);
197struct page *follow_huge_pd(struct vm_area_struct *vma,
198 unsigned long address, hugepd_t hpd,
199 int flags, int pdshift);
200struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
201 pmd_t *pmd, int flags);
202struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
203 pud_t *pud, int flags);
204struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
205 pgd_t *pgd, int flags);
206
207int pmd_huge(pmd_t pmd);
208int pud_huge(pud_t pud);
209unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
210 unsigned long address, unsigned long end, pgprot_t newprot);
211
212bool is_hugetlb_entry_migration(pte_t pte);
213void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
214
215#else
216
217static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
218{
219}
220
221static inline unsigned long hugetlb_total_pages(void)
222{
223 return 0;
224}
225
226static inline struct address_space *hugetlb_page_mapping_lock_write(
227 struct page *hpage)
228{
229 return NULL;
230}
231
232static inline int huge_pmd_unshare(struct mm_struct *mm,
233 struct vm_area_struct *vma,
234 unsigned long *addr, pte_t *ptep)
235{
236 return 0;
237}
238
239static inline void adjust_range_if_pmd_sharing_possible(
240 struct vm_area_struct *vma,
241 unsigned long *start, unsigned long *end)
242{
243}
244
245static inline long follow_hugetlb_page(struct mm_struct *mm,
246 struct vm_area_struct *vma, struct page **pages,
247 struct vm_area_struct **vmas, unsigned long *position,
248 unsigned long *nr_pages, long i, unsigned int flags,
249 int *nonblocking)
250{
251 BUG();
252 return 0;
253}
254
255static inline struct page *follow_huge_addr(struct mm_struct *mm,
256 unsigned long address, int write)
257{
258 return ERR_PTR(-EINVAL);
259}
260
261static inline int copy_hugetlb_page_range(struct mm_struct *dst,
262 struct mm_struct *src, struct vm_area_struct *vma)
263{
264 BUG();
265 return 0;
266}
267
268static inline void hugetlb_report_meminfo(struct seq_file *m)
269{
270}
271
272static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
273{
274 return 0;
275}
276
277static inline void hugetlb_show_meminfo(void)
278{
279}
280
281static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
282 unsigned long address, hugepd_t hpd, int flags,
283 int pdshift)
284{
285 return NULL;
286}
287
288static inline struct page *follow_huge_pmd(struct mm_struct *mm,
289 unsigned long address, pmd_t *pmd, int flags)
290{
291 return NULL;
292}
293
294static inline struct page *follow_huge_pud(struct mm_struct *mm,
295 unsigned long address, pud_t *pud, int flags)
296{
297 return NULL;
298}
299
300static inline struct page *follow_huge_pgd(struct mm_struct *mm,
301 unsigned long address, pgd_t *pgd, int flags)
302{
303 return NULL;
304}
305
306static inline int prepare_hugepage_range(struct file *file,
307 unsigned long addr, unsigned long len)
308{
309 return -EINVAL;
310}
311
312static inline int pmd_huge(pmd_t pmd)
313{
314 return 0;
315}
316
317static inline int pud_huge(pud_t pud)
318{
319 return 0;
320}
321
322static inline int is_hugepage_only_range(struct mm_struct *mm,
323 unsigned long addr, unsigned long len)
324{
325 return 0;
326}
327
328static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
329 unsigned long addr, unsigned long end,
330 unsigned long floor, unsigned long ceiling)
331{
332 BUG();
333}
334
335#ifdef CONFIG_USERFAULTFD
336static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
337 pte_t *dst_pte,
338 struct vm_area_struct *dst_vma,
339 unsigned long dst_addr,
340 unsigned long src_addr,
341 enum mcopy_atomic_mode mode,
342 struct page **pagep)
343{
344 BUG();
345 return 0;
346}
347#endif
348
349static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
350 unsigned long sz)
351{
352 return NULL;
353}
354
355static inline bool isolate_huge_page(struct page *page, struct list_head *list)
356{
357 return false;
358}
359
360static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
361{
362 return 0;
363}
364
365static inline void putback_active_hugepage(struct page *page)
366{
367}
368
369static inline void move_hugetlb_state(struct page *oldpage,
370 struct page *newpage, int reason)
371{
372}
373
374static inline unsigned long hugetlb_change_protection(
375 struct vm_area_struct *vma, unsigned long address,
376 unsigned long end, pgprot_t newprot)
377{
378 return 0;
379}
380
381static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
382 struct vm_area_struct *vma, unsigned long start,
383 unsigned long end, struct page *ref_page)
384{
385 BUG();
386}
387
388static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
389 struct vm_area_struct *vma, unsigned long start,
390 unsigned long end, struct page *ref_page)
391{
392 BUG();
393}
394
395static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
396 struct vm_area_struct *vma, unsigned long address,
397 unsigned int flags)
398{
399 BUG();
400 return 0;
401}
402
403static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
404
405#endif
406
407
408
409
410#ifndef pgd_huge
411#define pgd_huge(x) 0
412#endif
413#ifndef p4d_huge
414#define p4d_huge(x) 0
415#endif
416
417#ifndef pgd_write
418static inline int pgd_write(pgd_t pgd)
419{
420 BUG();
421 return 0;
422}
423#endif
424
425#define HUGETLB_ANON_FILE "anon_hugepage"
426
427enum {
428
429
430
431
432 HUGETLB_SHMFS_INODE = 1,
433
434
435
436
437 HUGETLB_ANONHUGE_INODE = 2,
438};
439
440#ifdef CONFIG_HUGETLBFS
441struct hugetlbfs_sb_info {
442 long max_inodes;
443 long free_inodes;
444 spinlock_t stat_lock;
445 struct hstate *hstate;
446 struct hugepage_subpool *spool;
447 kuid_t uid;
448 kgid_t gid;
449 umode_t mode;
450};
451
452static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
453{
454 return sb->s_fs_info;
455}
456
457struct hugetlbfs_inode_info {
458 struct shared_policy policy;
459 struct inode vfs_inode;
460 unsigned int seals;
461};
462
463static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
464{
465 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
466}
467
468extern const struct file_operations hugetlbfs_file_operations;
469extern const struct vm_operations_struct hugetlb_vm_ops;
470struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
471 struct ucounts **ucounts, int creat_flags,
472 int page_size_log);
473
474static inline bool is_file_hugepages(struct file *file)
475{
476 if (file->f_op == &hugetlbfs_file_operations)
477 return true;
478
479 return is_file_shm_hugepages(file);
480}
481
482static inline struct hstate *hstate_inode(struct inode *i)
483{
484 return HUGETLBFS_SB(i->i_sb)->hstate;
485}
486#else
487
488#define is_file_hugepages(file) false
489static inline struct file *
490hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
491 struct ucounts **ucounts, int creat_flags,
492 int page_size_log)
493{
494 return ERR_PTR(-ENOSYS);
495}
496
497static inline struct hstate *hstate_inode(struct inode *i)
498{
499 return NULL;
500}
501#endif
502
503#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
504unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
505 unsigned long len, unsigned long pgoff,
506 unsigned long flags);
507#endif
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537enum hugetlb_page_flags {
538 HPG_restore_reserve = 0,
539 HPG_migratable,
540 HPG_temporary,
541 HPG_freed,
542 HPG_vmemmap_optimized,
543 __NR_HPAGEFLAGS,
544};
545
546
547
548
549
550#ifdef CONFIG_HUGETLB_PAGE
551#define TESTHPAGEFLAG(uname, flname) \
552static inline int HPage##uname(struct page *page) \
553 { return test_bit(HPG_##flname, &(page->private)); }
554
555#define SETHPAGEFLAG(uname, flname) \
556static inline void SetHPage##uname(struct page *page) \
557 { set_bit(HPG_##flname, &(page->private)); }
558
559#define CLEARHPAGEFLAG(uname, flname) \
560static inline void ClearHPage##uname(struct page *page) \
561 { clear_bit(HPG_##flname, &(page->private)); }
562#else
563#define TESTHPAGEFLAG(uname, flname) \
564static inline int HPage##uname(struct page *page) \
565 { return 0; }
566
567#define SETHPAGEFLAG(uname, flname) \
568static inline void SetHPage##uname(struct page *page) \
569 { }
570
571#define CLEARHPAGEFLAG(uname, flname) \
572static inline void ClearHPage##uname(struct page *page) \
573 { }
574#endif
575
576#define HPAGEFLAG(uname, flname) \
577 TESTHPAGEFLAG(uname, flname) \
578 SETHPAGEFLAG(uname, flname) \
579 CLEARHPAGEFLAG(uname, flname) \
580
581
582
583
584HPAGEFLAG(RestoreReserve, restore_reserve)
585HPAGEFLAG(Migratable, migratable)
586HPAGEFLAG(Temporary, temporary)
587HPAGEFLAG(Freed, freed)
588HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
589
590#ifdef CONFIG_HUGETLB_PAGE
591
592#define HSTATE_NAME_LEN 32
593
594struct hstate {
595 struct mutex resize_lock;
596 int next_nid_to_alloc;
597 int next_nid_to_free;
598 unsigned int order;
599 unsigned long mask;
600 unsigned long max_huge_pages;
601 unsigned long nr_huge_pages;
602 unsigned long free_huge_pages;
603 unsigned long resv_huge_pages;
604 unsigned long surplus_huge_pages;
605 unsigned long nr_overcommit_huge_pages;
606 struct list_head hugepage_activelist;
607 struct list_head hugepage_freelists[MAX_NUMNODES];
608 unsigned int nr_huge_pages_node[MAX_NUMNODES];
609 unsigned int free_huge_pages_node[MAX_NUMNODES];
610 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
611#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
612 unsigned int nr_free_vmemmap_pages;
613#endif
614#ifdef CONFIG_CGROUP_HUGETLB
615
616 struct cftype cgroup_files_dfl[7];
617 struct cftype cgroup_files_legacy[9];
618#endif
619 char name[HSTATE_NAME_LEN];
620};
621
622struct huge_bootmem_page {
623 struct list_head list;
624 struct hstate *hstate;
625};
626
627int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
628struct page *alloc_huge_page(struct vm_area_struct *vma,
629 unsigned long addr, int avoid_reserve);
630struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
631 nodemask_t *nmask, gfp_t gfp_mask);
632struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
633 unsigned long address);
634int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
635 pgoff_t idx);
636void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
637 unsigned long address, struct page *page);
638
639
640int __init __alloc_bootmem_huge_page(struct hstate *h);
641int __init alloc_bootmem_huge_page(struct hstate *h);
642
643void __init hugetlb_add_hstate(unsigned order);
644bool __init arch_hugetlb_valid_size(unsigned long size);
645struct hstate *size_to_hstate(unsigned long size);
646
647#ifndef HUGE_MAX_HSTATE
648#define HUGE_MAX_HSTATE 1
649#endif
650
651extern struct hstate hstates[HUGE_MAX_HSTATE];
652extern unsigned int default_hstate_idx;
653
654#define default_hstate (hstates[default_hstate_idx])
655
656
657
658
659static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
660{
661 return (void *)page_private(hpage + SUBPAGE_INDEX_SUBPOOL);
662}
663
664static inline void hugetlb_set_page_subpool(struct page *hpage,
665 struct hugepage_subpool *subpool)
666{
667 set_page_private(hpage + SUBPAGE_INDEX_SUBPOOL, (unsigned long)subpool);
668}
669
670static inline struct hstate *hstate_file(struct file *f)
671{
672 return hstate_inode(file_inode(f));
673}
674
675static inline struct hstate *hstate_sizelog(int page_size_log)
676{
677 if (!page_size_log)
678 return &default_hstate;
679
680 return size_to_hstate(1UL << page_size_log);
681}
682
683static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
684{
685 return hstate_file(vma->vm_file);
686}
687
688static inline unsigned long huge_page_size(struct hstate *h)
689{
690 return (unsigned long)PAGE_SIZE << h->order;
691}
692
693extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
694
695extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
696
697static inline unsigned long huge_page_mask(struct hstate *h)
698{
699 return h->mask;
700}
701
702static inline unsigned int huge_page_order(struct hstate *h)
703{
704 return h->order;
705}
706
707static inline unsigned huge_page_shift(struct hstate *h)
708{
709 return h->order + PAGE_SHIFT;
710}
711
712static inline bool hstate_is_gigantic(struct hstate *h)
713{
714 return huge_page_order(h) >= MAX_ORDER;
715}
716
717static inline unsigned int pages_per_huge_page(struct hstate *h)
718{
719 return 1 << h->order;
720}
721
722static inline unsigned int blocks_per_huge_page(struct hstate *h)
723{
724 return huge_page_size(h) / 512;
725}
726
727#include <asm/hugetlb.h>
728
729#ifndef is_hugepage_only_range
730static inline int is_hugepage_only_range(struct mm_struct *mm,
731 unsigned long addr, unsigned long len)
732{
733 return 0;
734}
735#define is_hugepage_only_range is_hugepage_only_range
736#endif
737
738#ifndef arch_clear_hugepage_flags
739static inline void arch_clear_hugepage_flags(struct page *page) { }
740#define arch_clear_hugepage_flags arch_clear_hugepage_flags
741#endif
742
743#ifndef arch_make_huge_pte
744static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
745 vm_flags_t flags)
746{
747 return entry;
748}
749#endif
750
751static inline struct hstate *page_hstate(struct page *page)
752{
753 VM_BUG_ON_PAGE(!PageHuge(page), page);
754 return size_to_hstate(page_size(page));
755}
756
757static inline unsigned hstate_index_to_shift(unsigned index)
758{
759 return hstates[index].order + PAGE_SHIFT;
760}
761
762static inline int hstate_index(struct hstate *h)
763{
764 return h - hstates;
765}
766
767extern int dissolve_free_huge_page(struct page *page);
768extern int dissolve_free_huge_pages(unsigned long start_pfn,
769 unsigned long end_pfn);
770
771#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
772#ifndef arch_hugetlb_migration_supported
773static inline bool arch_hugetlb_migration_supported(struct hstate *h)
774{
775 if ((huge_page_shift(h) == PMD_SHIFT) ||
776 (huge_page_shift(h) == PUD_SHIFT) ||
777 (huge_page_shift(h) == PGDIR_SHIFT))
778 return true;
779 else
780 return false;
781}
782#endif
783#else
784static inline bool arch_hugetlb_migration_supported(struct hstate *h)
785{
786 return false;
787}
788#endif
789
790static inline bool hugepage_migration_supported(struct hstate *h)
791{
792 return arch_hugetlb_migration_supported(h);
793}
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810static inline bool hugepage_movable_supported(struct hstate *h)
811{
812 if (!hugepage_migration_supported(h))
813 return false;
814
815 if (hstate_is_gigantic(h))
816 return false;
817 return true;
818}
819
820
821static inline gfp_t htlb_alloc_mask(struct hstate *h)
822{
823 if (hugepage_movable_supported(h))
824 return GFP_HIGHUSER_MOVABLE;
825 else
826 return GFP_HIGHUSER;
827}
828
829static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
830{
831 gfp_t modified_mask = htlb_alloc_mask(h);
832
833
834 modified_mask |= (gfp_mask & __GFP_THISNODE);
835
836 modified_mask |= (gfp_mask & __GFP_NOWARN);
837
838 return modified_mask;
839}
840
841static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
842 struct mm_struct *mm, pte_t *pte)
843{
844 if (huge_page_size(h) == PMD_SIZE)
845 return pmd_lockptr(mm, (pmd_t *) pte);
846 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
847 return &mm->page_table_lock;
848}
849
850#ifndef hugepages_supported
851
852
853
854
855
856#define hugepages_supported() (HPAGE_SHIFT != 0)
857#endif
858
859void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
860
861static inline void hugetlb_count_add(long l, struct mm_struct *mm)
862{
863 atomic_long_add(l, &mm->hugetlb_usage);
864}
865
866static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
867{
868 atomic_long_sub(l, &mm->hugetlb_usage);
869}
870
871#ifndef set_huge_swap_pte_at
872static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
873 pte_t *ptep, pte_t pte, unsigned long sz)
874{
875 set_huge_pte_at(mm, addr, ptep, pte);
876}
877#endif
878
879#ifndef huge_ptep_modify_prot_start
880#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
881static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
882 unsigned long addr, pte_t *ptep)
883{
884 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
885}
886#endif
887
888#ifndef huge_ptep_modify_prot_commit
889#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
890static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
891 unsigned long addr, pte_t *ptep,
892 pte_t old_pte, pte_t pte)
893{
894 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
895}
896#endif
897
898#else
899struct hstate {};
900
901static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
902{
903 return NULL;
904}
905
906static inline int isolate_or_dissolve_huge_page(struct page *page,
907 struct list_head *list)
908{
909 return -ENOMEM;
910}
911
912static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
913 unsigned long addr,
914 int avoid_reserve)
915{
916 return NULL;
917}
918
919static inline struct page *
920alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
921 nodemask_t *nmask, gfp_t gfp_mask)
922{
923 return NULL;
924}
925
926static inline struct page *alloc_huge_page_vma(struct hstate *h,
927 struct vm_area_struct *vma,
928 unsigned long address)
929{
930 return NULL;
931}
932
933static inline int __alloc_bootmem_huge_page(struct hstate *h)
934{
935 return 0;
936}
937
938static inline struct hstate *hstate_file(struct file *f)
939{
940 return NULL;
941}
942
943static inline struct hstate *hstate_sizelog(int page_size_log)
944{
945 return NULL;
946}
947
948static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
949{
950 return NULL;
951}
952
953static inline struct hstate *page_hstate(struct page *page)
954{
955 return NULL;
956}
957
958static inline unsigned long huge_page_size(struct hstate *h)
959{
960 return PAGE_SIZE;
961}
962
963static inline unsigned long huge_page_mask(struct hstate *h)
964{
965 return PAGE_MASK;
966}
967
968static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
969{
970 return PAGE_SIZE;
971}
972
973static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
974{
975 return PAGE_SIZE;
976}
977
978static inline unsigned int huge_page_order(struct hstate *h)
979{
980 return 0;
981}
982
983static inline unsigned int huge_page_shift(struct hstate *h)
984{
985 return PAGE_SHIFT;
986}
987
988static inline bool hstate_is_gigantic(struct hstate *h)
989{
990 return false;
991}
992
993static inline unsigned int pages_per_huge_page(struct hstate *h)
994{
995 return 1;
996}
997
998static inline unsigned hstate_index_to_shift(unsigned index)
999{
1000 return 0;
1001}
1002
1003static inline int hstate_index(struct hstate *h)
1004{
1005 return 0;
1006}
1007
1008static inline int dissolve_free_huge_page(struct page *page)
1009{
1010 return 0;
1011}
1012
1013static inline int dissolve_free_huge_pages(unsigned long start_pfn,
1014 unsigned long end_pfn)
1015{
1016 return 0;
1017}
1018
1019static inline bool hugepage_migration_supported(struct hstate *h)
1020{
1021 return false;
1022}
1023
1024static inline bool hugepage_movable_supported(struct hstate *h)
1025{
1026 return false;
1027}
1028
1029static inline gfp_t htlb_alloc_mask(struct hstate *h)
1030{
1031 return 0;
1032}
1033
1034static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
1035{
1036 return 0;
1037}
1038
1039static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1040 struct mm_struct *mm, pte_t *pte)
1041{
1042 return &mm->page_table_lock;
1043}
1044
1045static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1046{
1047}
1048
1049static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1050{
1051}
1052
1053static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
1054 pte_t *ptep, pte_t pte, unsigned long sz)
1055{
1056}
1057#endif
1058
1059#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
1060extern bool hugetlb_free_vmemmap_enabled;
1061#else
1062#define hugetlb_free_vmemmap_enabled false
1063#endif
1064
1065static inline spinlock_t *huge_pte_lock(struct hstate *h,
1066 struct mm_struct *mm, pte_t *pte)
1067{
1068 spinlock_t *ptl;
1069
1070 ptl = huge_pte_lockptr(h, mm, pte);
1071 spin_lock(ptl);
1072 return ptl;
1073}
1074
1075#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1076extern void __init hugetlb_cma_reserve(int order);
1077extern void __init hugetlb_cma_check(void);
1078#else
1079static inline __init void hugetlb_cma_reserve(int order)
1080{
1081}
1082static inline __init void hugetlb_cma_check(void)
1083{
1084}
1085#endif
1086
1087bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1088
1089#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1090
1091
1092
1093
1094#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1095#endif
1096
1097#endif
1098