1#ifndef _LINUX_HUGETLB_H
2#define _LINUX_HUGETLB_H
3
4#include <linux/mm_types.h>
5#include <linux/fs.h>
6#include <linux/hugetlb_inline.h>
7#include <linux/cgroup.h>
8
9struct ctl_table;
10struct user_struct;
11struct mmu_gather;
12
13#ifdef CONFIG_HUGETLB_PAGE
14
15#include <linux/mempolicy.h>
16#include <linux/shm.h>
17#include <asm/tlbflush.h>
18
19struct hugepage_subpool {
20 spinlock_t lock;
21 long count;
22 long max_hpages, used_hpages;
23};
24
25extern spinlock_t hugetlb_lock;
26extern int hugetlb_max_hstate __read_mostly;
27#define for_each_hstate(h) \
28 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
29
30struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
31void hugepage_put_subpool(struct hugepage_subpool *spool);
32
33int PageHuge(struct page *page);
34
35void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
36int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
37int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
38int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
39
40#ifdef CONFIG_NUMA
41int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
42 void __user *, size_t *, loff_t *);
43#endif
44
45int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
46long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
47 struct page **, struct vm_area_struct **,
48 unsigned long *, unsigned long *, long, unsigned int);
49void unmap_hugepage_range(struct vm_area_struct *,
50 unsigned long, unsigned long, struct page *);
51void __unmap_hugepage_range_final(struct mmu_gather *tlb,
52 struct vm_area_struct *vma,
53 unsigned long start, unsigned long end,
54 struct page *ref_page);
55void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
56 unsigned long start, unsigned long end,
57 struct page *ref_page);
58void hugetlb_report_meminfo(struct seq_file *);
59int hugetlb_report_node_meminfo(int, char *);
60void hugetlb_show_meminfo(void);
61unsigned long hugetlb_total_pages(void);
62int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
63 unsigned long address, unsigned int flags);
64int hugetlb_reserve_pages(struct inode *inode, long from, long to,
65 struct vm_area_struct *vma,
66 vm_flags_t vm_flags);
67void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
68int dequeue_hwpoisoned_huge_page(struct page *page);
69bool isolate_huge_page(struct page *page, struct list_head *list);
70void putback_active_hugepage(struct page *page);
71bool is_hugepage_active(struct page *page);
72void copy_huge_page(struct page *dst, struct page *src);
73
74#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
75pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
76#endif
77
78extern unsigned long hugepages_treat_as_movable;
79extern const unsigned long hugetlb_zero, hugetlb_infinity;
80extern int sysctl_hugetlb_shm_group;
81extern struct list_head huge_boot_pages;
82
83
84
85pte_t *huge_pte_alloc(struct mm_struct *mm,
86 unsigned long addr, unsigned long sz);
87pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
88int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
89struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
90 int write);
91struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
92 pmd_t *pmd, int write);
93struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
94 pud_t *pud, int write);
95int pmd_huge(pmd_t pmd);
96int pud_huge(pud_t pmd);
97unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
98 unsigned long address, unsigned long end, pgprot_t newprot);
99
100#else
101
102static inline int PageHuge(struct page *page)
103{
104 return 0;
105}
106
107static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
108{
109}
110
111static inline unsigned long hugetlb_total_pages(void)
112{
113 return 0;
114}
115
116#define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; })
117#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
118#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
119static inline void hugetlb_report_meminfo(struct seq_file *m)
120{
121}
122#define hugetlb_report_node_meminfo(n, buf) 0
123static inline void hugetlb_show_meminfo(void)
124{
125}
126#define follow_huge_pmd(mm, addr, pmd, write) NULL
127#define follow_huge_pud(mm, addr, pud, write) NULL
128#define prepare_hugepage_range(file, addr, len) (-EINVAL)
129#define pmd_huge(x) 0
130#define pud_huge(x) 0
131#define is_hugepage_only_range(mm, addr, len) 0
132#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
133#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
134#define huge_pte_offset(mm, address) 0
135static inline int dequeue_hwpoisoned_huge_page(struct page *page)
136{
137 return 0;
138}
139
140#define isolate_huge_page(p, l) false
141#define putback_active_hugepage(p) do {} while (0)
142#define is_hugepage_active(x) false
143static inline void copy_huge_page(struct page *dst, struct page *src)
144{
145}
146
147static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
148 unsigned long address, unsigned long end, pgprot_t newprot)
149{
150 return 0;
151}
152
153static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
154 struct vm_area_struct *vma, unsigned long start,
155 unsigned long end, struct page *ref_page)
156{
157 BUG();
158}
159
160static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
161 struct vm_area_struct *vma, unsigned long start,
162 unsigned long end, struct page *ref_page)
163{
164 BUG();
165}
166
167#endif
168
169#define HUGETLB_ANON_FILE "anon_hugepage"
170
171enum {
172
173
174
175
176 HUGETLB_SHMFS_INODE = 1,
177
178
179
180
181 HUGETLB_ANONHUGE_INODE = 2,
182};
183
184#ifdef CONFIG_HUGETLBFS
185struct hugetlbfs_sb_info {
186 long max_inodes;
187 long free_inodes;
188 spinlock_t stat_lock;
189 struct hstate *hstate;
190 struct hugepage_subpool *spool;
191};
192
193static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
194{
195 return sb->s_fs_info;
196}
197
198extern const struct file_operations hugetlbfs_file_operations;
199extern const struct vm_operations_struct hugetlb_vm_ops;
200struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
201 struct user_struct **user, int creat_flags,
202 int page_size_log);
203
204static inline int is_file_hugepages(struct file *file)
205{
206 if (file->f_op == &hugetlbfs_file_operations)
207 return 1;
208 if (is_file_shm_hugepages(file))
209 return 1;
210
211 return 0;
212}
213
214
215#else
216
217#define is_file_hugepages(file) 0
218static inline struct file *
219hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
220 struct user_struct **user, int creat_flags,
221 int page_size_log)
222{
223 return ERR_PTR(-ENOSYS);
224}
225
226#endif
227
228#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
229unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
230 unsigned long len, unsigned long pgoff,
231 unsigned long flags);
232#endif
233
234#ifdef CONFIG_HUGETLB_PAGE
235
236#define HSTATE_NAME_LEN 32
237
238struct hstate {
239 int next_nid_to_alloc;
240 int next_nid_to_free;
241 unsigned int order;
242 unsigned long mask;
243 unsigned long max_huge_pages;
244 unsigned long nr_huge_pages;
245 unsigned long free_huge_pages;
246 unsigned long resv_huge_pages;
247 unsigned long surplus_huge_pages;
248 unsigned long nr_overcommit_huge_pages;
249 struct list_head hugepage_activelist;
250 struct list_head hugepage_freelists[MAX_NUMNODES];
251 unsigned int nr_huge_pages_node[MAX_NUMNODES];
252 unsigned int free_huge_pages_node[MAX_NUMNODES];
253 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
254#ifdef CONFIG_CGROUP_HUGETLB
255
256 struct cftype cgroup_files[5];
257#endif
258 char name[HSTATE_NAME_LEN];
259};
260
261struct huge_bootmem_page {
262 struct list_head list;
263 struct hstate *hstate;
264#ifdef CONFIG_HIGHMEM
265 phys_addr_t phys;
266#endif
267};
268
269struct page *alloc_huge_page_node(struct hstate *h, int nid);
270struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
271 unsigned long addr, int avoid_reserve);
272
273
274int __init alloc_bootmem_huge_page(struct hstate *h);
275
276void __init hugetlb_add_hstate(unsigned order);
277struct hstate *size_to_hstate(unsigned long size);
278
279#ifndef HUGE_MAX_HSTATE
280#define HUGE_MAX_HSTATE 1
281#endif
282
283extern struct hstate hstates[HUGE_MAX_HSTATE];
284extern unsigned int default_hstate_idx;
285
286#define default_hstate (hstates[default_hstate_idx])
287
288static inline struct hstate *hstate_inode(struct inode *i)
289{
290 struct hugetlbfs_sb_info *hsb;
291 hsb = HUGETLBFS_SB(i->i_sb);
292 return hsb->hstate;
293}
294
295static inline struct hstate *hstate_file(struct file *f)
296{
297 return hstate_inode(file_inode(f));
298}
299
300static inline struct hstate *hstate_sizelog(int page_size_log)
301{
302 if (!page_size_log)
303 return &default_hstate;
304 return size_to_hstate(1 << page_size_log);
305}
306
307static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
308{
309 return hstate_file(vma->vm_file);
310}
311
312static inline unsigned long huge_page_size(struct hstate *h)
313{
314 return (unsigned long)PAGE_SIZE << h->order;
315}
316
317extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
318
319extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
320
321static inline unsigned long huge_page_mask(struct hstate *h)
322{
323 return h->mask;
324}
325
326static inline unsigned int huge_page_order(struct hstate *h)
327{
328 return h->order;
329}
330
331static inline unsigned huge_page_shift(struct hstate *h)
332{
333 return h->order + PAGE_SHIFT;
334}
335
336static inline unsigned int pages_per_huge_page(struct hstate *h)
337{
338 return 1 << h->order;
339}
340
341static inline unsigned int blocks_per_huge_page(struct hstate *h)
342{
343 return huge_page_size(h) / 512;
344}
345
346#include <asm/hugetlb.h>
347
348#ifndef arch_make_huge_pte
349static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
350 struct page *page, int writable)
351{
352 return entry;
353}
354#endif
355
356static inline struct hstate *page_hstate(struct page *page)
357{
358 return size_to_hstate(PAGE_SIZE << compound_order(page));
359}
360
361static inline unsigned hstate_index_to_shift(unsigned index)
362{
363 return hstates[index].order + PAGE_SHIFT;
364}
365
366static inline int hstate_index(struct hstate *h)
367{
368 return h - hstates;
369}
370
371pgoff_t __basepage_index(struct page *page);
372
373
374static inline pgoff_t basepage_index(struct page *page)
375{
376 if (!PageCompound(page))
377 return page->index;
378
379 return __basepage_index(page);
380}
381
382extern void dissolve_free_huge_pages(unsigned long start_pfn,
383 unsigned long end_pfn);
384int pmd_huge_support(void);
385
386
387
388
389
390static inline int hugepage_migration_support(struct hstate *h)
391{
392 return pmd_huge_support() && (huge_page_shift(h) == PMD_SHIFT);
393}
394
395#else
396struct hstate {};
397#define alloc_huge_page_node(h, nid) NULL
398#define alloc_huge_page_noerr(v, a, r) NULL
399#define alloc_bootmem_huge_page(h) NULL
400#define hstate_file(f) NULL
401#define hstate_sizelog(s) NULL
402#define hstate_vma(v) NULL
403#define hstate_inode(i) NULL
404#define huge_page_size(h) PAGE_SIZE
405#define huge_page_mask(h) PAGE_MASK
406#define vma_kernel_pagesize(v) PAGE_SIZE
407#define vma_mmu_pagesize(v) PAGE_SIZE
408#define huge_page_order(h) 0
409#define huge_page_shift(h) PAGE_SHIFT
410static inline unsigned int pages_per_huge_page(struct hstate *h)
411{
412 return 1;
413}
414#define hstate_index_to_shift(index) 0
415#define hstate_index(h) 0
416
417static inline pgoff_t basepage_index(struct page *page)
418{
419 return page->index;
420}
421#define dissolve_free_huge_pages(s, e) do {} while (0)
422#define pmd_huge_support() 0
423#define hugepage_migration_support(h) 0
424#endif
425
426#endif
427