1#ifndef _LINUX_HUGETLB_H
2#define _LINUX_HUGETLB_H
3
4#include <linux/mm_types.h>
5#include <linux/mmdebug.h>
6#include <linux/fs.h>
7#include <linux/hugetlb_inline.h>
8#include <linux/cgroup.h>
9#include <linux/list.h>
10#include <linux/kref.h>
11
12struct ctl_table;
13struct user_struct;
14struct mmu_gather;
15
16#ifdef CONFIG_HUGETLB_PAGE
17
18#include <linux/mempolicy.h>
19#include <linux/shm.h>
20#include <asm/tlbflush.h>
21
22struct hugepage_subpool {
23 spinlock_t lock;
24 long count;
25 long max_hpages, used_hpages;
26};
27
28struct resv_map {
29 struct kref refs;
30 spinlock_t lock;
31 struct list_head regions;
32};
33extern struct resv_map *resv_map_alloc(void);
34void resv_map_release(struct kref *ref);
35
36extern spinlock_t hugetlb_lock;
37extern int hugetlb_max_hstate __read_mostly;
38#define for_each_hstate(h) \
39 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
40
41struct hugepage_subpool *hugepage_new_subpool(long nr_blocks);
42void hugepage_put_subpool(struct hugepage_subpool *spool);
43
44int PageHuge(struct page *page);
45
46void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
47int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
48int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
49int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
50
51#ifdef CONFIG_NUMA
52int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
53 void __user *, size_t *, loff_t *);
54#endif
55
56int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
57long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
58 struct page **, struct vm_area_struct **,
59 unsigned long *, unsigned long *, long, unsigned int);
60void unmap_hugepage_range(struct vm_area_struct *,
61 unsigned long, unsigned long, struct page *);
62void __unmap_hugepage_range_final(struct mmu_gather *tlb,
63 struct vm_area_struct *vma,
64 unsigned long start, unsigned long end,
65 struct page *ref_page);
66void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
67 unsigned long start, unsigned long end,
68 struct page *ref_page);
69void hugetlb_report_meminfo(struct seq_file *);
70int hugetlb_report_node_meminfo(int, char *);
71void hugetlb_show_meminfo(void);
72unsigned long hugetlb_total_pages(void);
73int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
74 unsigned long address, unsigned int flags);
75int hugetlb_reserve_pages(struct inode *inode, long from, long to,
76 struct vm_area_struct *vma,
77 vm_flags_t vm_flags);
78void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
79int dequeue_hwpoisoned_huge_page(struct page *page);
80bool isolate_huge_page(struct page *page, struct list_head *list);
81void putback_active_hugepage(struct page *page);
82bool is_hugepage_active(struct page *page);
83void free_huge_page(struct page *page);
84
85#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
86pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
87#endif
88
89extern unsigned long hugepages_treat_as_movable;
90extern const unsigned long hugetlb_zero, hugetlb_infinity;
91extern int sysctl_hugetlb_shm_group;
92extern struct list_head huge_boot_pages;
93
94
95
96pte_t *huge_pte_alloc(struct mm_struct *mm,
97 unsigned long addr, unsigned long sz);
98pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
99int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
100struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
101 int write);
102struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
103 pmd_t *pmd, int write);
104struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
105 pud_t *pud, int write);
106int pmd_huge(pmd_t pmd);
107int pud_huge(pud_t pmd);
108unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
109 unsigned long address, unsigned long end, pgprot_t newprot);
110
111#else
112
113static inline int PageHuge(struct page *page)
114{
115 return 0;
116}
117
118static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
119{
120}
121
122static inline unsigned long hugetlb_total_pages(void)
123{
124 return 0;
125}
126
127#define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; })
128#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
129#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
130static inline void hugetlb_report_meminfo(struct seq_file *m)
131{
132}
133#define hugetlb_report_node_meminfo(n, buf) 0
134static inline void hugetlb_show_meminfo(void)
135{
136}
137#define follow_huge_pmd(mm, addr, pmd, write) NULL
138#define follow_huge_pud(mm, addr, pud, write) NULL
139#define prepare_hugepage_range(file, addr, len) (-EINVAL)
140#define pmd_huge(x) 0
141#define pud_huge(x) 0
142#define is_hugepage_only_range(mm, addr, len) 0
143#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
144#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
145#define huge_pte_offset(mm, address) 0
146static inline int dequeue_hwpoisoned_huge_page(struct page *page)
147{
148 return 0;
149}
150
151static inline bool isolate_huge_page(struct page *page, struct list_head *list)
152{
153 return false;
154}
155#define putback_active_hugepage(p) do {} while (0)
156#define is_hugepage_active(x) false
157
158static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
159 unsigned long address, unsigned long end, pgprot_t newprot)
160{
161 return 0;
162}
163
164static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
165 struct vm_area_struct *vma, unsigned long start,
166 unsigned long end, struct page *ref_page)
167{
168 BUG();
169}
170
171static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
172 struct vm_area_struct *vma, unsigned long start,
173 unsigned long end, struct page *ref_page)
174{
175 BUG();
176}
177
178#endif
179
180#define HUGETLB_ANON_FILE "anon_hugepage"
181
182enum {
183
184
185
186
187 HUGETLB_SHMFS_INODE = 1,
188
189
190
191
192 HUGETLB_ANONHUGE_INODE = 2,
193};
194
195#ifdef CONFIG_HUGETLBFS
196struct hugetlbfs_sb_info {
197 long max_inodes;
198 long free_inodes;
199 spinlock_t stat_lock;
200 struct hstate *hstate;
201 struct hugepage_subpool *spool;
202};
203
204static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
205{
206 return sb->s_fs_info;
207}
208
209extern const struct file_operations hugetlbfs_file_operations;
210extern const struct vm_operations_struct hugetlb_vm_ops;
211struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
212 struct user_struct **user, int creat_flags,
213 int page_size_log);
214
215static inline int is_file_hugepages(struct file *file)
216{
217 if (file->f_op == &hugetlbfs_file_operations)
218 return 1;
219 if (is_file_shm_hugepages(file))
220 return 1;
221
222 return 0;
223}
224
225
226#else
227
228#define is_file_hugepages(file) 0
229static inline struct file *
230hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
231 struct user_struct **user, int creat_flags,
232 int page_size_log)
233{
234 return ERR_PTR(-ENOSYS);
235}
236
237#endif
238
239#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
240unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
241 unsigned long len, unsigned long pgoff,
242 unsigned long flags);
243#endif
244
245#ifdef CONFIG_HUGETLB_PAGE
246
247#define HSTATE_NAME_LEN 32
248
249struct hstate {
250 int next_nid_to_alloc;
251 int next_nid_to_free;
252 unsigned int order;
253 unsigned long mask;
254 unsigned long max_huge_pages;
255 unsigned long nr_huge_pages;
256 unsigned long free_huge_pages;
257 unsigned long resv_huge_pages;
258 unsigned long surplus_huge_pages;
259 unsigned long nr_overcommit_huge_pages;
260 struct list_head hugepage_activelist;
261 struct list_head hugepage_freelists[MAX_NUMNODES];
262 unsigned int nr_huge_pages_node[MAX_NUMNODES];
263 unsigned int free_huge_pages_node[MAX_NUMNODES];
264 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
265#ifdef CONFIG_CGROUP_HUGETLB
266
267 struct cftype cgroup_files[5];
268#endif
269 char name[HSTATE_NAME_LEN];
270};
271
272struct huge_bootmem_page {
273 struct list_head list;
274 struct hstate *hstate;
275#ifdef CONFIG_HIGHMEM
276 phys_addr_t phys;
277#endif
278};
279
280struct page *alloc_huge_page_node(struct hstate *h, int nid);
281struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
282 unsigned long addr, int avoid_reserve);
283
284
285int __init alloc_bootmem_huge_page(struct hstate *h);
286
287void __init hugetlb_add_hstate(unsigned order);
288struct hstate *size_to_hstate(unsigned long size);
289
290#ifndef HUGE_MAX_HSTATE
291#define HUGE_MAX_HSTATE 1
292#endif
293
294extern struct hstate hstates[HUGE_MAX_HSTATE];
295extern unsigned int default_hstate_idx;
296
297#define default_hstate (hstates[default_hstate_idx])
298
299static inline struct hstate *hstate_inode(struct inode *i)
300{
301 struct hugetlbfs_sb_info *hsb;
302 hsb = HUGETLBFS_SB(i->i_sb);
303 return hsb->hstate;
304}
305
306static inline struct hstate *hstate_file(struct file *f)
307{
308 return hstate_inode(file_inode(f));
309}
310
311static inline struct hstate *hstate_sizelog(int page_size_log)
312{
313 if (!page_size_log)
314 return &default_hstate;
315 return size_to_hstate(1 << page_size_log);
316}
317
318static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
319{
320 return hstate_file(vma->vm_file);
321}
322
323static inline unsigned long huge_page_size(struct hstate *h)
324{
325 return (unsigned long)PAGE_SIZE << h->order;
326}
327
328extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
329
330extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
331
332static inline unsigned long huge_page_mask(struct hstate *h)
333{
334 return h->mask;
335}
336
337static inline unsigned int huge_page_order(struct hstate *h)
338{
339 return h->order;
340}
341
342static inline unsigned huge_page_shift(struct hstate *h)
343{
344 return h->order + PAGE_SHIFT;
345}
346
347static inline bool hstate_is_gigantic(struct hstate *h)
348{
349 return huge_page_order(h) >= MAX_ORDER;
350}
351
352static inline unsigned int pages_per_huge_page(struct hstate *h)
353{
354 return 1 << h->order;
355}
356
357static inline unsigned int blocks_per_huge_page(struct hstate *h)
358{
359 return huge_page_size(h) / 512;
360}
361
362#include <asm/hugetlb.h>
363
364#ifndef arch_make_huge_pte
365static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
366 struct page *page, int writable)
367{
368 return entry;
369}
370#endif
371
372static inline struct hstate *page_hstate(struct page *page)
373{
374 VM_BUG_ON_PAGE(!PageHuge(page), page);
375 return size_to_hstate(PAGE_SIZE << compound_order(page));
376}
377
378static inline unsigned hstate_index_to_shift(unsigned index)
379{
380 return hstates[index].order + PAGE_SHIFT;
381}
382
383static inline int hstate_index(struct hstate *h)
384{
385 return h - hstates;
386}
387
388pgoff_t __basepage_index(struct page *page);
389
390
391static inline pgoff_t basepage_index(struct page *page)
392{
393 if (!PageCompound(page))
394 return page->index;
395
396 return __basepage_index(page);
397}
398
399extern void dissolve_free_huge_pages(unsigned long start_pfn,
400 unsigned long end_pfn);
401static inline int hugepage_migration_supported(struct hstate *h)
402{
403#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
404 return huge_page_shift(h) == PMD_SHIFT;
405#else
406 return 0;
407#endif
408}
409
410static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
411 struct mm_struct *mm, pte_t *pte)
412{
413 if (huge_page_size(h) == PMD_SIZE)
414 return pmd_lockptr(mm, (pmd_t *) pte);
415 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
416 return &mm->page_table_lock;
417}
418
419static inline bool hugepages_supported(void)
420{
421
422
423
424
425
426 return HPAGE_SHIFT != 0;
427}
428
429#else
430struct hstate {};
431#define alloc_huge_page_node(h, nid) NULL
432#define alloc_huge_page_noerr(v, a, r) NULL
433#define alloc_bootmem_huge_page(h) NULL
434#define hstate_file(f) NULL
435#define hstate_sizelog(s) NULL
436#define hstate_vma(v) NULL
437#define hstate_inode(i) NULL
438#define page_hstate(page) NULL
439#define huge_page_size(h) PAGE_SIZE
440#define huge_page_mask(h) PAGE_MASK
441#define vma_kernel_pagesize(v) PAGE_SIZE
442#define vma_mmu_pagesize(v) PAGE_SIZE
443#define huge_page_order(h) 0
444#define huge_page_shift(h) PAGE_SHIFT
445static inline unsigned int pages_per_huge_page(struct hstate *h)
446{
447 return 1;
448}
449#define hstate_index_to_shift(index) 0
450#define hstate_index(h) 0
451
452static inline pgoff_t basepage_index(struct page *page)
453{
454 return page->index;
455}
456#define dissolve_free_huge_pages(s, e) do {} while (0)
457#define hugepage_migration_supported(h) 0
458
459static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
460 struct mm_struct *mm, pte_t *pte)
461{
462 return &mm->page_table_lock;
463}
464#endif
465
466static inline spinlock_t *huge_pte_lock(struct hstate *h,
467 struct mm_struct *mm, pte_t *pte)
468{
469 spinlock_t *ptl;
470
471 ptl = huge_pte_lockptr(h, mm, pte);
472 spin_lock(ptl);
473 return ptl;
474}
475
476#endif
477