1#ifndef _LINUX_HUGETLB_H
2#define _LINUX_HUGETLB_H
3
4#include <linux/mm_types.h>
5#include <linux/mmdebug.h>
6#include <linux/fs.h>
7#include <linux/hugetlb_inline.h>
8#include <linux/cgroup.h>
9#include <linux/list.h>
10#include <linux/kref.h>
11
12struct ctl_table;
13struct user_struct;
14struct mmu_gather;
15
16#ifdef CONFIG_HUGETLB_PAGE
17
18#include <linux/mempolicy.h>
19#include <linux/shm.h>
20#include <asm/tlbflush.h>
21
22struct hugepage_subpool {
23 spinlock_t lock;
24 long count;
25 long max_hpages;
26 long used_hpages;
27
28 struct hstate *hstate;
29 long min_hpages;
30 long rsv_hpages;
31
32};
33
34struct resv_map {
35 struct kref refs;
36 spinlock_t lock;
37 struct list_head regions;
38 long adds_in_progress;
39 struct list_head region_cache;
40 long region_cache_count;
41};
42extern struct resv_map *resv_map_alloc(void);
43void resv_map_release(struct kref *ref);
44
45extern spinlock_t hugetlb_lock;
46extern int hugetlb_max_hstate __read_mostly;
47#define for_each_hstate(h) \
48 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
49
50struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
51 long min_hpages);
52void hugepage_put_subpool(struct hugepage_subpool *spool);
53
54void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
55int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
56int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
57int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
58
59#ifdef CONFIG_NUMA
60int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
61 void __user *, size_t *, loff_t *);
62#endif
63
64int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
65long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
66 struct page **, struct vm_area_struct **,
67 unsigned long *, unsigned long *, long, unsigned int);
68void unmap_hugepage_range(struct vm_area_struct *,
69 unsigned long, unsigned long, struct page *);
70void __unmap_hugepage_range_final(struct mmu_gather *tlb,
71 struct vm_area_struct *vma,
72 unsigned long start, unsigned long end,
73 struct page *ref_page);
74void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
75 unsigned long start, unsigned long end,
76 struct page *ref_page);
77void hugetlb_report_meminfo(struct seq_file *);
78int hugetlb_report_node_meminfo(int, char *);
79void hugetlb_show_meminfo(void);
80unsigned long hugetlb_total_pages(void);
81int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
82 unsigned long address, unsigned int flags);
83int hugetlb_reserve_pages(struct inode *inode, long from, long to,
84 struct vm_area_struct *vma,
85 vm_flags_t vm_flags);
86long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
87 long freed);
88int dequeue_hwpoisoned_huge_page(struct page *page);
89bool isolate_huge_page(struct page *page, struct list_head *list);
90void putback_active_hugepage(struct page *page);
91void free_huge_page(struct page *page);
92void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve);
93extern struct mutex *hugetlb_fault_mutex_table;
94u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
95 struct vm_area_struct *vma,
96 struct address_space *mapping,
97 pgoff_t idx, unsigned long address);
98
99#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
100pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
101#endif
102
103extern int hugepages_treat_as_movable;
104extern int sysctl_hugetlb_shm_group;
105extern struct list_head huge_boot_pages;
106
107
108
109pte_t *huge_pte_alloc(struct mm_struct *mm,
110 unsigned long addr, unsigned long sz);
111pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
112int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
113struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
114 int write);
115struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
116 pmd_t *pmd, int flags);
117struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
118 pud_t *pud, int flags);
119int pmd_huge(pmd_t pmd);
120int pud_huge(pud_t pmd);
121unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
122 unsigned long address, unsigned long end, pgprot_t newprot);
123
124#else
125
126static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
127{
128}
129
130static inline unsigned long hugetlb_total_pages(void)
131{
132 return 0;
133}
134
135#define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; })
136#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
137#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
138static inline void hugetlb_report_meminfo(struct seq_file *m)
139{
140}
141#define hugetlb_report_node_meminfo(n, buf) 0
142static inline void hugetlb_show_meminfo(void)
143{
144}
145#define follow_huge_pmd(mm, addr, pmd, flags) NULL
146#define follow_huge_pud(mm, addr, pud, flags) NULL
147#define prepare_hugepage_range(file, addr, len) (-EINVAL)
148#define pmd_huge(x) 0
149#define pud_huge(x) 0
150#define is_hugepage_only_range(mm, addr, len) 0
151#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
152#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
153#define huge_pte_offset(mm, address) 0
154static inline int dequeue_hwpoisoned_huge_page(struct page *page)
155{
156 return 0;
157}
158
159static inline bool isolate_huge_page(struct page *page, struct list_head *list)
160{
161 return false;
162}
163#define putback_active_hugepage(p) do {} while (0)
164
165static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
166 unsigned long address, unsigned long end, pgprot_t newprot)
167{
168 return 0;
169}
170
171static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
172 struct vm_area_struct *vma, unsigned long start,
173 unsigned long end, struct page *ref_page)
174{
175 BUG();
176}
177
178static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
179 struct vm_area_struct *vma, unsigned long start,
180 unsigned long end, struct page *ref_page)
181{
182 BUG();
183}
184
185#endif
186
187
188
189
190#ifndef pgd_huge
191#define pgd_huge(x) 0
192#endif
193
194#ifndef pgd_write
195static inline int pgd_write(pgd_t pgd)
196{
197 BUG();
198 return 0;
199}
200#endif
201
202#ifndef pud_write
203static inline int pud_write(pud_t pud)
204{
205 BUG();
206 return 0;
207}
208#endif
209
210#ifndef is_hugepd
211
212
213
214
215
216
217
218typedef struct { unsigned long pd; } hugepd_t;
219#define is_hugepd(hugepd) (0)
220#define __hugepd(x) ((hugepd_t) { (x) })
221static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
222 unsigned pdshift, unsigned long end,
223 int write, struct page **pages, int *nr)
224{
225 return 0;
226}
227#else
228extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
229 unsigned pdshift, unsigned long end,
230 int write, struct page **pages, int *nr);
231#endif
232
233#define HUGETLB_ANON_FILE "anon_hugepage"
234
235enum {
236
237
238
239
240 HUGETLB_SHMFS_INODE = 1,
241
242
243
244
245 HUGETLB_ANONHUGE_INODE = 2,
246};
247
248#ifdef CONFIG_HUGETLBFS
249struct hugetlbfs_sb_info {
250 long max_inodes;
251 long free_inodes;
252 spinlock_t stat_lock;
253 struct hstate *hstate;
254 struct hugepage_subpool *spool;
255};
256
257static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
258{
259 return sb->s_fs_info;
260}
261
262extern const struct file_operations hugetlbfs_file_operations;
263extern const struct vm_operations_struct hugetlb_vm_ops;
264struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
265 struct user_struct **user, int creat_flags,
266 int page_size_log);
267
268static inline int is_file_hugepages(struct file *file)
269{
270 if (file->f_op == &hugetlbfs_file_operations)
271 return 1;
272 if (is_file_shm_hugepages(file))
273 return 1;
274
275 return 0;
276}
277
278
279#else
280
281#define is_file_hugepages(file) 0
282static inline struct file *
283hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
284 struct user_struct **user, int creat_flags,
285 int page_size_log)
286{
287 return ERR_PTR(-ENOSYS);
288}
289
290#endif
291
292#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
293unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
294 unsigned long len, unsigned long pgoff,
295 unsigned long flags);
296#endif
297
298#ifdef CONFIG_HUGETLB_PAGE
299
300#define HSTATE_NAME_LEN 32
301
302struct hstate {
303 int next_nid_to_alloc;
304 int next_nid_to_free;
305 unsigned int order;
306 unsigned long mask;
307 unsigned long max_huge_pages;
308 unsigned long nr_huge_pages;
309 unsigned long free_huge_pages;
310 unsigned long resv_huge_pages;
311 unsigned long surplus_huge_pages;
312 unsigned long nr_overcommit_huge_pages;
313 struct list_head hugepage_activelist;
314 struct list_head hugepage_freelists[MAX_NUMNODES];
315 unsigned int nr_huge_pages_node[MAX_NUMNODES];
316 unsigned int free_huge_pages_node[MAX_NUMNODES];
317 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
318#ifdef CONFIG_CGROUP_HUGETLB
319
320 struct cftype cgroup_files[5];
321#endif
322 char name[HSTATE_NAME_LEN];
323};
324
325struct huge_bootmem_page {
326 struct list_head list;
327 struct hstate *hstate;
328#ifdef CONFIG_HIGHMEM
329 phys_addr_t phys;
330#endif
331};
332
333struct page *alloc_huge_page(struct vm_area_struct *vma,
334 unsigned long addr, int avoid_reserve);
335struct page *alloc_huge_page_node(struct hstate *h, int nid);
336struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
337 unsigned long addr, int avoid_reserve);
338int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
339 pgoff_t idx);
340
341
342int __init alloc_bootmem_huge_page(struct hstate *h);
343
344void __init hugetlb_add_hstate(unsigned order);
345struct hstate *size_to_hstate(unsigned long size);
346
347#ifndef HUGE_MAX_HSTATE
348#define HUGE_MAX_HSTATE 1
349#endif
350
351extern struct hstate hstates[HUGE_MAX_HSTATE];
352extern unsigned int default_hstate_idx;
353
354#define default_hstate (hstates[default_hstate_idx])
355
356static inline struct hstate *hstate_inode(struct inode *i)
357{
358 struct hugetlbfs_sb_info *hsb;
359 hsb = HUGETLBFS_SB(i->i_sb);
360 return hsb->hstate;
361}
362
363static inline struct hstate *hstate_file(struct file *f)
364{
365 return hstate_inode(file_inode(f));
366}
367
368static inline struct hstate *hstate_sizelog(int page_size_log)
369{
370 if (!page_size_log)
371 return &default_hstate;
372
373 return size_to_hstate(1UL << page_size_log);
374}
375
376static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
377{
378 return hstate_file(vma->vm_file);
379}
380
381static inline unsigned long huge_page_size(struct hstate *h)
382{
383 return (unsigned long)PAGE_SIZE << h->order;
384}
385
386extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
387
388extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
389
390static inline unsigned long huge_page_mask(struct hstate *h)
391{
392 return h->mask;
393}
394
395static inline unsigned int huge_page_order(struct hstate *h)
396{
397 return h->order;
398}
399
400static inline unsigned huge_page_shift(struct hstate *h)
401{
402 return h->order + PAGE_SHIFT;
403}
404
405static inline bool hstate_is_gigantic(struct hstate *h)
406{
407 return huge_page_order(h) >= MAX_ORDER;
408}
409
410static inline unsigned int pages_per_huge_page(struct hstate *h)
411{
412 return 1 << h->order;
413}
414
415static inline unsigned int blocks_per_huge_page(struct hstate *h)
416{
417 return huge_page_size(h) / 512;
418}
419
420#include <asm/hugetlb.h>
421
422#ifndef arch_make_huge_pte
423static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
424 struct page *page, int writable)
425{
426 return entry;
427}
428#endif
429
430static inline struct hstate *page_hstate(struct page *page)
431{
432 VM_BUG_ON_PAGE(!PageHuge(page), page);
433 return size_to_hstate(PAGE_SIZE << compound_order(page));
434}
435
436static inline unsigned hstate_index_to_shift(unsigned index)
437{
438 return hstates[index].order + PAGE_SHIFT;
439}
440
441static inline int hstate_index(struct hstate *h)
442{
443 return h - hstates;
444}
445
446pgoff_t __basepage_index(struct page *page);
447
448
449static inline pgoff_t basepage_index(struct page *page)
450{
451 if (!PageCompound(page))
452 return page->index;
453
454 return __basepage_index(page);
455}
456
457extern void dissolve_free_huge_pages(unsigned long start_pfn,
458 unsigned long end_pfn);
459static inline int hugepage_migration_supported(struct hstate *h)
460{
461#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
462 return huge_page_shift(h) == PMD_SHIFT;
463#else
464 return 0;
465#endif
466}
467
468static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
469 struct mm_struct *mm, pte_t *pte)
470{
471 if (huge_page_size(h) == PMD_SIZE)
472 return pmd_lockptr(mm, (pmd_t *) pte);
473 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
474 return &mm->page_table_lock;
475}
476
477#ifndef hugepages_supported
478
479
480
481
482
483#define hugepages_supported() (HPAGE_SHIFT != 0)
484#endif
485
486void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
487
488static inline void hugetlb_count_add(long l, struct mm_struct *mm)
489{
490 atomic_long_add(l, &mm->hugetlb_usage);
491}
492
493static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
494{
495 atomic_long_sub(l, &mm->hugetlb_usage);
496}
497#else
498struct hstate {};
499#define alloc_huge_page(v, a, r) NULL
500#define alloc_huge_page_node(h, nid) NULL
501#define alloc_huge_page_noerr(v, a, r) NULL
502#define alloc_bootmem_huge_page(h) NULL
503#define hstate_file(f) NULL
504#define hstate_sizelog(s) NULL
505#define hstate_vma(v) NULL
506#define hstate_inode(i) NULL
507#define page_hstate(page) NULL
508#define huge_page_size(h) PAGE_SIZE
509#define huge_page_mask(h) PAGE_MASK
510#define vma_kernel_pagesize(v) PAGE_SIZE
511#define vma_mmu_pagesize(v) PAGE_SIZE
512#define huge_page_order(h) 0
513#define huge_page_shift(h) PAGE_SHIFT
514static inline unsigned int pages_per_huge_page(struct hstate *h)
515{
516 return 1;
517}
518#define hstate_index_to_shift(index) 0
519#define hstate_index(h) 0
520
521static inline pgoff_t basepage_index(struct page *page)
522{
523 return page->index;
524}
525#define dissolve_free_huge_pages(s, e) do {} while (0)
526#define hugepage_migration_supported(h) 0
527
528static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
529 struct mm_struct *mm, pte_t *pte)
530{
531 return &mm->page_table_lock;
532}
533
534static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
535{
536}
537
538static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
539{
540}
541#endif
542
543static inline spinlock_t *huge_pte_lock(struct hstate *h,
544 struct mm_struct *mm, pte_t *pte)
545{
546 spinlock_t *ptl;
547
548 ptl = huge_pte_lockptr(h, mm, pte);
549 spin_lock(ptl);
550 return ptl;
551}
552
553#endif
554