1
2#ifndef _LINUX_HUGETLB_H
3#define _LINUX_HUGETLB_H
4
5#include <linux/mm_types.h>
6#include <linux/mmdebug.h>
7#include <linux/fs.h>
8#include <linux/hugetlb_inline.h>
9#include <linux/cgroup.h>
10#include <linux/list.h>
11#include <linux/kref.h>
12#include <asm/pgtable.h>
13
14struct ctl_table;
15struct user_struct;
16struct mmu_gather;
17
18#ifndef is_hugepd
19
20
21
22
23
24
25
26typedef struct { unsigned long pd; } hugepd_t;
27#define is_hugepd(hugepd) (0)
28#define __hugepd(x) ((hugepd_t) { (x) })
29static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
30 unsigned pdshift, unsigned long end,
31 int write, struct page **pages, int *nr)
32{
33 return 0;
34}
35#else
36extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
37 unsigned pdshift, unsigned long end,
38 int write, struct page **pages, int *nr);
39#endif
40
41
42#ifdef CONFIG_HUGETLB_PAGE
43
44#include <linux/mempolicy.h>
45#include <linux/shm.h>
46#include <asm/tlbflush.h>
47
48struct hugepage_subpool {
49 spinlock_t lock;
50 long count;
51 long max_hpages;
52 long used_hpages;
53
54 struct hstate *hstate;
55 long min_hpages;
56 long rsv_hpages;
57
58};
59
60struct resv_map {
61 struct kref refs;
62 spinlock_t lock;
63 struct list_head regions;
64 long adds_in_progress;
65 struct list_head region_cache;
66 long region_cache_count;
67};
68extern struct resv_map *resv_map_alloc(void);
69void resv_map_release(struct kref *ref);
70
71extern spinlock_t hugetlb_lock;
72extern int hugetlb_max_hstate __read_mostly;
73#define for_each_hstate(h) \
74 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
75
76struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
77 long min_hpages);
78void hugepage_put_subpool(struct hugepage_subpool *spool);
79
80void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
81int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
82int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
83int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
84
85#ifdef CONFIG_NUMA
86int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
87 void __user *, size_t *, loff_t *);
88#endif
89
90int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
91long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
92 struct page **, struct vm_area_struct **,
93 unsigned long *, unsigned long *, long, unsigned int,
94 int *);
95void unmap_hugepage_range(struct vm_area_struct *,
96 unsigned long, unsigned long, struct page *);
97void __unmap_hugepage_range_final(struct mmu_gather *tlb,
98 struct vm_area_struct *vma,
99 unsigned long start, unsigned long end,
100 struct page *ref_page);
101void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
102 unsigned long start, unsigned long end,
103 struct page *ref_page);
104void hugetlb_report_meminfo(struct seq_file *);
105int hugetlb_report_node_meminfo(int, char *);
106void hugetlb_show_meminfo(void);
107unsigned long hugetlb_total_pages(void);
108int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
109 unsigned long address, unsigned int flags);
110int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
111 struct vm_area_struct *dst_vma,
112 unsigned long dst_addr,
113 unsigned long src_addr,
114 struct page **pagep);
115int hugetlb_reserve_pages(struct inode *inode, long from, long to,
116 struct vm_area_struct *vma,
117 vm_flags_t vm_flags);
118long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
119 long freed);
120bool isolate_huge_page(struct page *page, struct list_head *list);
121void putback_active_hugepage(struct page *page);
122void free_huge_page(struct page *page);
123void hugetlb_fix_reserve_counts(struct inode *inode);
124extern struct mutex *hugetlb_fault_mutex_table;
125u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
126 struct vm_area_struct *vma,
127 struct address_space *mapping,
128 pgoff_t idx, unsigned long address);
129
130pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
131
132extern int hugepages_treat_as_movable;
133extern int sysctl_hugetlb_shm_group;
134extern struct list_head huge_boot_pages;
135
136
137
138pte_t *huge_pte_alloc(struct mm_struct *mm,
139 unsigned long addr, unsigned long sz);
140pte_t *huge_pte_offset(struct mm_struct *mm,
141 unsigned long addr, unsigned long sz);
142int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
143struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
144 int write);
145struct page *follow_huge_pd(struct vm_area_struct *vma,
146 unsigned long address, hugepd_t hpd,
147 int flags, int pdshift);
148struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
149 pmd_t *pmd, int flags);
150struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
151 pud_t *pud, int flags);
152struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
153 pgd_t *pgd, int flags);
154
155int pmd_huge(pmd_t pmd);
156int pud_huge(pud_t pud);
157unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
158 unsigned long address, unsigned long end, pgprot_t newprot);
159
160bool is_hugetlb_entry_migration(pte_t pte);
161#else
162
163static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
164{
165}
166
167static inline unsigned long hugetlb_total_pages(void)
168{
169 return 0;
170}
171
172#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
173#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
174#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
175static inline void hugetlb_report_meminfo(struct seq_file *m)
176{
177}
178#define hugetlb_report_node_meminfo(n, buf) 0
179static inline void hugetlb_show_meminfo(void)
180{
181}
182#define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
183#define follow_huge_pmd(mm, addr, pmd, flags) NULL
184#define follow_huge_pud(mm, addr, pud, flags) NULL
185#define follow_huge_pgd(mm, addr, pgd, flags) NULL
186#define prepare_hugepage_range(file, addr, len) (-EINVAL)
187#define pmd_huge(x) 0
188#define pud_huge(x) 0
189#define is_hugepage_only_range(mm, addr, len) 0
190#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
191#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
192#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
193 src_addr, pagep) ({ BUG(); 0; })
194#define huge_pte_offset(mm, address, sz) 0
195
196static inline bool isolate_huge_page(struct page *page, struct list_head *list)
197{
198 return false;
199}
200#define putback_active_hugepage(p) do {} while (0)
201
202static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
203 unsigned long address, unsigned long end, pgprot_t newprot)
204{
205 return 0;
206}
207
208static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
209 struct vm_area_struct *vma, unsigned long start,
210 unsigned long end, struct page *ref_page)
211{
212 BUG();
213}
214
215static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
216 struct vm_area_struct *vma, unsigned long start,
217 unsigned long end, struct page *ref_page)
218{
219 BUG();
220}
221
222#endif
223
224
225
226
227#ifndef pgd_huge
228#define pgd_huge(x) 0
229#endif
230#ifndef p4d_huge
231#define p4d_huge(x) 0
232#endif
233
234#ifndef pgd_write
235static inline int pgd_write(pgd_t pgd)
236{
237 BUG();
238 return 0;
239}
240#endif
241
242#define HUGETLB_ANON_FILE "anon_hugepage"
243
244enum {
245
246
247
248
249 HUGETLB_SHMFS_INODE = 1,
250
251
252
253
254 HUGETLB_ANONHUGE_INODE = 2,
255};
256
257#ifdef CONFIG_HUGETLBFS
258struct hugetlbfs_sb_info {
259 long max_inodes;
260 long free_inodes;
261 spinlock_t stat_lock;
262 struct hstate *hstate;
263 struct hugepage_subpool *spool;
264 kuid_t uid;
265 kgid_t gid;
266 umode_t mode;
267};
268
269static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
270{
271 return sb->s_fs_info;
272}
273
274extern const struct file_operations hugetlbfs_file_operations;
275extern const struct vm_operations_struct hugetlb_vm_ops;
276struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
277 struct user_struct **user, int creat_flags,
278 int page_size_log);
279
280static inline bool is_file_hugepages(struct file *file)
281{
282 if (file->f_op == &hugetlbfs_file_operations)
283 return true;
284
285 return is_file_shm_hugepages(file);
286}
287
288
289#else
290
291#define is_file_hugepages(file) false
292static inline struct file *
293hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
294 struct user_struct **user, int creat_flags,
295 int page_size_log)
296{
297 return ERR_PTR(-ENOSYS);
298}
299
300#endif
301
302#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
303unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
304 unsigned long len, unsigned long pgoff,
305 unsigned long flags);
306#endif
307
308#ifdef CONFIG_HUGETLB_PAGE
309
310#define HSTATE_NAME_LEN 32
311
312struct hstate {
313 int next_nid_to_alloc;
314 int next_nid_to_free;
315 unsigned int order;
316 unsigned long mask;
317 unsigned long max_huge_pages;
318 unsigned long nr_huge_pages;
319 unsigned long free_huge_pages;
320 unsigned long resv_huge_pages;
321 unsigned long surplus_huge_pages;
322 unsigned long nr_overcommit_huge_pages;
323 struct list_head hugepage_activelist;
324 struct list_head hugepage_freelists[MAX_NUMNODES];
325 unsigned int nr_huge_pages_node[MAX_NUMNODES];
326 unsigned int free_huge_pages_node[MAX_NUMNODES];
327 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
328#ifdef CONFIG_CGROUP_HUGETLB
329
330 struct cftype cgroup_files[5];
331#endif
332 char name[HSTATE_NAME_LEN];
333};
334
335struct huge_bootmem_page {
336 struct list_head list;
337 struct hstate *hstate;
338#ifdef CONFIG_HIGHMEM
339 phys_addr_t phys;
340#endif
341};
342
343struct page *alloc_huge_page(struct vm_area_struct *vma,
344 unsigned long addr, int avoid_reserve);
345struct page *alloc_huge_page_node(struct hstate *h, int nid);
346struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
347 unsigned long addr, int avoid_reserve);
348struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
349 nodemask_t *nmask);
350int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
351 pgoff_t idx);
352
353
354int __init __alloc_bootmem_huge_page(struct hstate *h);
355int __init alloc_bootmem_huge_page(struct hstate *h);
356
357void __init hugetlb_bad_size(void);
358void __init hugetlb_add_hstate(unsigned order);
359struct hstate *size_to_hstate(unsigned long size);
360
361#ifndef HUGE_MAX_HSTATE
362#define HUGE_MAX_HSTATE 1
363#endif
364
365extern struct hstate hstates[HUGE_MAX_HSTATE];
366extern unsigned int default_hstate_idx;
367
368#define default_hstate (hstates[default_hstate_idx])
369
370static inline struct hstate *hstate_inode(struct inode *i)
371{
372 return HUGETLBFS_SB(i->i_sb)->hstate;
373}
374
375static inline struct hstate *hstate_file(struct file *f)
376{
377 return hstate_inode(file_inode(f));
378}
379
380static inline struct hstate *hstate_sizelog(int page_size_log)
381{
382 if (!page_size_log)
383 return &default_hstate;
384
385 return size_to_hstate(1UL << page_size_log);
386}
387
388static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
389{
390 return hstate_file(vma->vm_file);
391}
392
393static inline unsigned long huge_page_size(struct hstate *h)
394{
395 return (unsigned long)PAGE_SIZE << h->order;
396}
397
398extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
399
400extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
401
402static inline unsigned long huge_page_mask(struct hstate *h)
403{
404 return h->mask;
405}
406
407static inline unsigned int huge_page_order(struct hstate *h)
408{
409 return h->order;
410}
411
412static inline unsigned huge_page_shift(struct hstate *h)
413{
414 return h->order + PAGE_SHIFT;
415}
416
417static inline bool hstate_is_gigantic(struct hstate *h)
418{
419 return huge_page_order(h) >= MAX_ORDER;
420}
421
422static inline unsigned int pages_per_huge_page(struct hstate *h)
423{
424 return 1 << h->order;
425}
426
427static inline unsigned int blocks_per_huge_page(struct hstate *h)
428{
429 return huge_page_size(h) / 512;
430}
431
432#include <asm/hugetlb.h>
433
434#ifndef arch_make_huge_pte
435static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
436 struct page *page, int writable)
437{
438 return entry;
439}
440#endif
441
442static inline struct hstate *page_hstate(struct page *page)
443{
444 VM_BUG_ON_PAGE(!PageHuge(page), page);
445 return size_to_hstate(PAGE_SIZE << compound_order(page));
446}
447
448static inline unsigned hstate_index_to_shift(unsigned index)
449{
450 return hstates[index].order + PAGE_SHIFT;
451}
452
453static inline int hstate_index(struct hstate *h)
454{
455 return h - hstates;
456}
457
458pgoff_t __basepage_index(struct page *page);
459
460
461static inline pgoff_t basepage_index(struct page *page)
462{
463 if (!PageCompound(page))
464 return page->index;
465
466 return __basepage_index(page);
467}
468
469extern int dissolve_free_huge_page(struct page *page);
470extern int dissolve_free_huge_pages(unsigned long start_pfn,
471 unsigned long end_pfn);
472static inline bool hugepage_migration_supported(struct hstate *h)
473{
474#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
475 if ((huge_page_shift(h) == PMD_SHIFT) ||
476 (huge_page_shift(h) == PGDIR_SHIFT))
477 return true;
478 else
479 return false;
480#else
481 return false;
482#endif
483}
484
485static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
486 struct mm_struct *mm, pte_t *pte)
487{
488 if (huge_page_size(h) == PMD_SIZE)
489 return pmd_lockptr(mm, (pmd_t *) pte);
490 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
491 return &mm->page_table_lock;
492}
493
494#ifndef hugepages_supported
495
496
497
498
499
500#define hugepages_supported() (HPAGE_SHIFT != 0)
501#endif
502
503void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
504
505static inline void hugetlb_count_add(long l, struct mm_struct *mm)
506{
507 atomic_long_add(l, &mm->hugetlb_usage);
508}
509
510static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
511{
512 atomic_long_sub(l, &mm->hugetlb_usage);
513}
514
515#ifndef set_huge_swap_pte_at
516static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
517 pte_t *ptep, pte_t pte, unsigned long sz)
518{
519 set_huge_pte_at(mm, addr, ptep, pte);
520}
521#endif
522#else
523struct hstate {};
524#define alloc_huge_page(v, a, r) NULL
525#define alloc_huge_page_node(h, nid) NULL
526#define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL
527#define alloc_huge_page_noerr(v, a, r) NULL
528#define alloc_bootmem_huge_page(h) NULL
529#define hstate_file(f) NULL
530#define hstate_sizelog(s) NULL
531#define hstate_vma(v) NULL
532#define hstate_inode(i) NULL
533#define page_hstate(page) NULL
534#define huge_page_size(h) PAGE_SIZE
535#define huge_page_mask(h) PAGE_MASK
536#define vma_kernel_pagesize(v) PAGE_SIZE
537#define vma_mmu_pagesize(v) PAGE_SIZE
538#define huge_page_order(h) 0
539#define huge_page_shift(h) PAGE_SHIFT
540static inline bool hstate_is_gigantic(struct hstate *h)
541{
542 return false;
543}
544
545static inline unsigned int pages_per_huge_page(struct hstate *h)
546{
547 return 1;
548}
549
550static inline unsigned hstate_index_to_shift(unsigned index)
551{
552 return 0;
553}
554
555static inline int hstate_index(struct hstate *h)
556{
557 return 0;
558}
559
560static inline pgoff_t basepage_index(struct page *page)
561{
562 return page->index;
563}
564
565static inline int dissolve_free_huge_page(struct page *page)
566{
567 return 0;
568}
569
570static inline int dissolve_free_huge_pages(unsigned long start_pfn,
571 unsigned long end_pfn)
572{
573 return 0;
574}
575
576static inline bool hugepage_migration_supported(struct hstate *h)
577{
578 return false;
579}
580
581static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
582 struct mm_struct *mm, pte_t *pte)
583{
584 return &mm->page_table_lock;
585}
586
587static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
588{
589}
590
591static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
592{
593}
594
595static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
596 pte_t *ptep, pte_t pte, unsigned long sz)
597{
598}
599#endif
600
601static inline spinlock_t *huge_pte_lock(struct hstate *h,
602 struct mm_struct *mm, pte_t *pte)
603{
604 spinlock_t *ptl;
605
606 ptl = huge_pte_lockptr(h, mm, pte);
607 spin_lock(ptl);
608 return ptl;
609}
610
611#endif
612