1
2
3
4
5#include <linux/list.h>
6#include <linux/init.h>
7#include <linux/module.h>
8#include <linux/mm.h>
9#include <linux/seq_file.h>
10#include <linux/sysctl.h>
11#include <linux/highmem.h>
12#include <linux/mmu_notifier.h>
13#include <linux/nodemask.h>
14#include <linux/pagemap.h>
15#include <linux/mempolicy.h>
16#include <linux/cpuset.h>
17#include <linux/mutex.h>
18#include <linux/bootmem.h>
19#include <linux/sysfs.h>
20#include <linux/slab.h>
21#include <linux/rmap.h>
22#include <linux/swap.h>
23#include <linux/swapops.h>
24
25#include <asm/page.h>
26#include <asm/pgtable.h>
27#include <asm/tlb.h>
28
29#include <linux/io.h>
30#include <linux/hugetlb.h>
31#include <linux/hugetlb_cgroup.h>
32#include <linux/node.h>
33#include "internal.h"
34
35const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
36static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
37unsigned long hugepages_treat_as_movable;
38
39int hugetlb_max_hstate __read_mostly;
40unsigned int default_hstate_idx;
41struct hstate hstates[HUGE_MAX_HSTATE];
42
43__initdata LIST_HEAD(huge_boot_pages);
44
45
46static struct hstate * __initdata parsed_hstate;
47static unsigned long __initdata default_hstate_max_huge_pages;
48static unsigned long __initdata default_hstate_size;
49
50
51
52
53DEFINE_SPINLOCK(hugetlb_lock);
54
55static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
56{
57 bool free = (spool->count == 0) && (spool->used_hpages == 0);
58
59 spin_unlock(&spool->lock);
60
61
62
63 if (free)
64 kfree(spool);
65}
66
67struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
68{
69 struct hugepage_subpool *spool;
70
71 spool = kmalloc(sizeof(*spool), GFP_KERNEL);
72 if (!spool)
73 return NULL;
74
75 spin_lock_init(&spool->lock);
76 spool->count = 1;
77 spool->max_hpages = nr_blocks;
78 spool->used_hpages = 0;
79
80 return spool;
81}
82
83void hugepage_put_subpool(struct hugepage_subpool *spool)
84{
85 spin_lock(&spool->lock);
86 BUG_ON(!spool->count);
87 spool->count--;
88 unlock_or_release_subpool(spool);
89}
90
91static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
92 long delta)
93{
94 int ret = 0;
95
96 if (!spool)
97 return 0;
98
99 spin_lock(&spool->lock);
100 if ((spool->used_hpages + delta) <= spool->max_hpages) {
101 spool->used_hpages += delta;
102 } else {
103 ret = -ENOMEM;
104 }
105 spin_unlock(&spool->lock);
106
107 return ret;
108}
109
110static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
111 long delta)
112{
113 if (!spool)
114 return;
115
116 spin_lock(&spool->lock);
117 spool->used_hpages -= delta;
118
119
120 unlock_or_release_subpool(spool);
121}
122
123static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
124{
125 return HUGETLBFS_SB(inode->i_sb)->spool;
126}
127
128static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
129{
130 return subpool_inode(file_inode(vma->vm_file));
131}
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147struct file_region {
148 struct list_head link;
149 long from;
150 long to;
151};
152
153static long region_add(struct list_head *head, long f, long t)
154{
155 struct file_region *rg, *nrg, *trg;
156
157
158 list_for_each_entry(rg, head, link)
159 if (f <= rg->to)
160 break;
161
162
163 if (f > rg->from)
164 f = rg->from;
165
166
167 nrg = rg;
168 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
169 if (&rg->link == head)
170 break;
171 if (rg->from > t)
172 break;
173
174
175
176
177 if (rg->to > t)
178 t = rg->to;
179 if (rg != nrg) {
180 list_del(&rg->link);
181 kfree(rg);
182 }
183 }
184 nrg->from = f;
185 nrg->to = t;
186 return 0;
187}
188
189static long region_chg(struct list_head *head, long f, long t)
190{
191 struct file_region *rg, *nrg;
192 long chg = 0;
193
194
195 list_for_each_entry(rg, head, link)
196 if (f <= rg->to)
197 break;
198
199
200
201
202 if (&rg->link == head || t < rg->from) {
203 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
204 if (!nrg)
205 return -ENOMEM;
206 nrg->from = f;
207 nrg->to = f;
208 INIT_LIST_HEAD(&nrg->link);
209 list_add(&nrg->link, rg->link.prev);
210
211 return t - f;
212 }
213
214
215 if (f > rg->from)
216 f = rg->from;
217 chg = t - f;
218
219
220 list_for_each_entry(rg, rg->link.prev, link) {
221 if (&rg->link == head)
222 break;
223 if (rg->from > t)
224 return chg;
225
226
227
228
229 if (rg->to > t) {
230 chg += rg->to - t;
231 t = rg->to;
232 }
233 chg -= rg->to - rg->from;
234 }
235 return chg;
236}
237
238static long region_truncate(struct list_head *head, long end)
239{
240 struct file_region *rg, *trg;
241 long chg = 0;
242
243
244 list_for_each_entry(rg, head, link)
245 if (end <= rg->to)
246 break;
247 if (&rg->link == head)
248 return 0;
249
250
251 if (end > rg->from) {
252 chg = rg->to - end;
253 rg->to = end;
254 rg = list_entry(rg->link.next, typeof(*rg), link);
255 }
256
257
258 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
259 if (&rg->link == head)
260 break;
261 chg += rg->to - rg->from;
262 list_del(&rg->link);
263 kfree(rg);
264 }
265 return chg;
266}
267
268static long region_count(struct list_head *head, long f, long t)
269{
270 struct file_region *rg;
271 long chg = 0;
272
273
274 list_for_each_entry(rg, head, link) {
275 long seg_from;
276 long seg_to;
277
278 if (rg->to <= f)
279 continue;
280 if (rg->from >= t)
281 break;
282
283 seg_from = max(rg->from, f);
284 seg_to = min(rg->to, t);
285
286 chg += seg_to - seg_from;
287 }
288
289 return chg;
290}
291
292
293
294
295
296static pgoff_t vma_hugecache_offset(struct hstate *h,
297 struct vm_area_struct *vma, unsigned long address)
298{
299 return ((address - vma->vm_start) >> huge_page_shift(h)) +
300 (vma->vm_pgoff >> huge_page_order(h));
301}
302
303pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
304 unsigned long address)
305{
306 return vma_hugecache_offset(hstate_vma(vma), vma, address);
307}
308
309
310
311
312
313unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
314{
315 struct hstate *hstate;
316
317 if (!is_vm_hugetlb_page(vma))
318 return PAGE_SIZE;
319
320 hstate = hstate_vma(vma);
321
322 return 1UL << (hstate->order + PAGE_SHIFT);
323}
324EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
325
326
327
328
329
330
331
332#ifndef vma_mmu_pagesize
333unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
334{
335 return vma_kernel_pagesize(vma);
336}
337#endif
338
339
340
341
342
343
344#define HPAGE_RESV_OWNER (1UL << 0)
345#define HPAGE_RESV_UNMAPPED (1UL << 1)
346#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367static unsigned long get_vma_private_data(struct vm_area_struct *vma)
368{
369 return (unsigned long)vma->vm_private_data;
370}
371
372static void set_vma_private_data(struct vm_area_struct *vma,
373 unsigned long value)
374{
375 vma->vm_private_data = (void *)value;
376}
377
378struct resv_map {
379 struct kref refs;
380 struct list_head regions;
381};
382
383static struct resv_map *resv_map_alloc(void)
384{
385 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
386 if (!resv_map)
387 return NULL;
388
389 kref_init(&resv_map->refs);
390 INIT_LIST_HEAD(&resv_map->regions);
391
392 return resv_map;
393}
394
395static void resv_map_release(struct kref *ref)
396{
397 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
398
399
400 region_truncate(&resv_map->regions, 0);
401 kfree(resv_map);
402}
403
404static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
405{
406 VM_BUG_ON(!is_vm_hugetlb_page(vma));
407 if (!(vma->vm_flags & VM_MAYSHARE))
408 return (struct resv_map *)(get_vma_private_data(vma) &
409 ~HPAGE_RESV_MASK);
410 return NULL;
411}
412
413static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
414{
415 VM_BUG_ON(!is_vm_hugetlb_page(vma));
416 VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
417
418 set_vma_private_data(vma, (get_vma_private_data(vma) &
419 HPAGE_RESV_MASK) | (unsigned long)map);
420}
421
422static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
423{
424 VM_BUG_ON(!is_vm_hugetlb_page(vma));
425 VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
426
427 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
428}
429
430static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
431{
432 VM_BUG_ON(!is_vm_hugetlb_page(vma));
433
434 return (get_vma_private_data(vma) & flag) != 0;
435}
436
437
438static void decrement_hugepage_resv_vma(struct hstate *h,
439 struct vm_area_struct *vma)
440{
441 if (vma->vm_flags & VM_NORESERVE)
442 return;
443
444 if (vma->vm_flags & VM_MAYSHARE) {
445
446 h->resv_huge_pages--;
447 } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
448
449
450
451
452 h->resv_huge_pages--;
453 }
454}
455
456
457void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
458{
459 VM_BUG_ON(!is_vm_hugetlb_page(vma));
460 if (!(vma->vm_flags & VM_MAYSHARE))
461 vma->vm_private_data = (void *)0;
462}
463
464
465static int vma_has_reserves(struct vm_area_struct *vma)
466{
467 if (vma->vm_flags & VM_MAYSHARE)
468 return 1;
469 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
470 return 1;
471 return 0;
472}
473
474static void copy_gigantic_page(struct page *dst, struct page *src)
475{
476 int i;
477 struct hstate *h = page_hstate(src);
478 struct page *dst_base = dst;
479 struct page *src_base = src;
480
481 for (i = 0; i < pages_per_huge_page(h); ) {
482 cond_resched();
483 copy_highpage(dst, src);
484
485 i++;
486 dst = mem_map_next(dst, dst_base, i);
487 src = mem_map_next(src, src_base, i);
488 }
489}
490
491void copy_huge_page(struct page *dst, struct page *src)
492{
493 int i;
494 struct hstate *h = page_hstate(src);
495
496 if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
497 copy_gigantic_page(dst, src);
498 return;
499 }
500
501 might_sleep();
502 for (i = 0; i < pages_per_huge_page(h); i++) {
503 cond_resched();
504 copy_highpage(dst + i, src + i);
505 }
506}
507
508static void enqueue_huge_page(struct hstate *h, struct page *page)
509{
510 int nid = page_to_nid(page);
511 list_move(&page->lru, &h->hugepage_freelists[nid]);
512 h->free_huge_pages++;
513 h->free_huge_pages_node[nid]++;
514}
515
516static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
517{
518 struct page *page;
519
520 if (list_empty(&h->hugepage_freelists[nid]))
521 return NULL;
522 page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
523 list_move(&page->lru, &h->hugepage_activelist);
524 set_page_refcounted(page);
525 h->free_huge_pages--;
526 h->free_huge_pages_node[nid]--;
527 return page;
528}
529
530static struct page *dequeue_huge_page_vma(struct hstate *h,
531 struct vm_area_struct *vma,
532 unsigned long address, int avoid_reserve)
533{
534 struct page *page = NULL;
535 struct mempolicy *mpol;
536 nodemask_t *nodemask;
537 struct zonelist *zonelist;
538 struct zone *zone;
539 struct zoneref *z;
540 unsigned int cpuset_mems_cookie;
541
542retry_cpuset:
543 cpuset_mems_cookie = get_mems_allowed();
544 zonelist = huge_zonelist(vma, address,
545 htlb_alloc_mask, &mpol, &nodemask);
546
547
548
549
550
551 if (!vma_has_reserves(vma) &&
552 h->free_huge_pages - h->resv_huge_pages == 0)
553 goto err;
554
555
556 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
557 goto err;
558
559 for_each_zone_zonelist_nodemask(zone, z, zonelist,
560 MAX_NR_ZONES - 1, nodemask) {
561 if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
562 page = dequeue_huge_page_node(h, zone_to_nid(zone));
563 if (page) {
564 if (!avoid_reserve)
565 decrement_hugepage_resv_vma(h, vma);
566 break;
567 }
568 }
569 }
570
571 mpol_cond_put(mpol);
572 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
573 goto retry_cpuset;
574 return page;
575
576err:
577 mpol_cond_put(mpol);
578 return NULL;
579}
580
581static void update_and_free_page(struct hstate *h, struct page *page)
582{
583 int i;
584
585 VM_BUG_ON(h->order >= MAX_ORDER);
586
587 h->nr_huge_pages--;
588 h->nr_huge_pages_node[page_to_nid(page)]--;
589 for (i = 0; i < pages_per_huge_page(h); i++) {
590 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
591 1 << PG_referenced | 1 << PG_dirty |
592 1 << PG_active | 1 << PG_reserved |
593 1 << PG_private | 1 << PG_writeback);
594 }
595 VM_BUG_ON(hugetlb_cgroup_from_page(page));
596 set_compound_page_dtor(page, NULL);
597 set_page_refcounted(page);
598 arch_release_hugepage(page);
599 __free_pages(page, huge_page_order(h));
600}
601
602struct hstate *size_to_hstate(unsigned long size)
603{
604 struct hstate *h;
605
606 for_each_hstate(h) {
607 if (huge_page_size(h) == size)
608 return h;
609 }
610 return NULL;
611}
612
613static void free_huge_page(struct page *page)
614{
615
616
617
618
619 struct hstate *h = page_hstate(page);
620 int nid = page_to_nid(page);
621 struct hugepage_subpool *spool =
622 (struct hugepage_subpool *)page_private(page);
623
624 set_page_private(page, 0);
625 page->mapping = NULL;
626 BUG_ON(page_count(page));
627 BUG_ON(page_mapcount(page));
628
629 spin_lock(&hugetlb_lock);
630 hugetlb_cgroup_uncharge_page(hstate_index(h),
631 pages_per_huge_page(h), page);
632 if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
633
634 list_del(&page->lru);
635 update_and_free_page(h, page);
636 h->surplus_huge_pages--;
637 h->surplus_huge_pages_node[nid]--;
638 } else {
639 arch_clear_hugepage_flags(page);
640 enqueue_huge_page(h, page);
641 }
642 spin_unlock(&hugetlb_lock);
643 hugepage_subpool_put_pages(spool, 1);
644}
645
646static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
647{
648 INIT_LIST_HEAD(&page->lru);
649 set_compound_page_dtor(page, free_huge_page);
650 spin_lock(&hugetlb_lock);
651 set_hugetlb_cgroup(page, NULL);
652 h->nr_huge_pages++;
653 h->nr_huge_pages_node[nid]++;
654 spin_unlock(&hugetlb_lock);
655 put_page(page);
656}
657
658static void prep_compound_gigantic_page(struct page *page, unsigned long order)
659{
660 int i;
661 int nr_pages = 1 << order;
662 struct page *p = page + 1;
663
664
665 set_compound_order(page, order);
666 __SetPageHead(page);
667 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
668 __SetPageTail(p);
669 set_page_count(p, 0);
670 p->first_page = page;
671 }
672}
673
674
675
676
677
678
679int PageHuge(struct page *page)
680{
681 compound_page_dtor *dtor;
682
683 if (!PageCompound(page))
684 return 0;
685
686 page = compound_head(page);
687 dtor = get_compound_page_dtor(page);
688
689 return dtor == free_huge_page;
690}
691EXPORT_SYMBOL_GPL(PageHuge);
692
693static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
694{
695 struct page *page;
696
697 if (h->order >= MAX_ORDER)
698 return NULL;
699
700 page = alloc_pages_exact_node(nid,
701 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
702 __GFP_REPEAT|__GFP_NOWARN,
703 huge_page_order(h));
704 if (page) {
705 if (arch_prepare_hugepage(page)) {
706 __free_pages(page, huge_page_order(h));
707 return NULL;
708 }
709 prep_new_huge_page(h, page, nid);
710 }
711
712 return page;
713}
714
715
716
717
718
719
720
721
722static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
723{
724 nid = next_node(nid, *nodes_allowed);
725 if (nid == MAX_NUMNODES)
726 nid = first_node(*nodes_allowed);
727 VM_BUG_ON(nid >= MAX_NUMNODES);
728
729 return nid;
730}
731
732static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
733{
734 if (!node_isset(nid, *nodes_allowed))
735 nid = next_node_allowed(nid, nodes_allowed);
736 return nid;
737}
738
739
740
741
742
743
744
745static int hstate_next_node_to_alloc(struct hstate *h,
746 nodemask_t *nodes_allowed)
747{
748 int nid;
749
750 VM_BUG_ON(!nodes_allowed);
751
752 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
753 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
754
755 return nid;
756}
757
758static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
759{
760 struct page *page;
761 int start_nid;
762 int next_nid;
763 int ret = 0;
764
765 start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
766 next_nid = start_nid;
767
768 do {
769 page = alloc_fresh_huge_page_node(h, next_nid);
770 if (page) {
771 ret = 1;
772 break;
773 }
774 next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
775 } while (next_nid != start_nid);
776
777 if (ret)
778 count_vm_event(HTLB_BUDDY_PGALLOC);
779 else
780 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
781
782 return ret;
783}
784
785
786
787
788
789
790
791static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
792{
793 int nid;
794
795 VM_BUG_ON(!nodes_allowed);
796
797 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
798 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
799
800 return nid;
801}
802
803
804
805
806
807
808
809static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
810 bool acct_surplus)
811{
812 int start_nid;
813 int next_nid;
814 int ret = 0;
815
816 start_nid = hstate_next_node_to_free(h, nodes_allowed);
817 next_nid = start_nid;
818
819 do {
820
821
822
823
824 if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
825 !list_empty(&h->hugepage_freelists[next_nid])) {
826 struct page *page =
827 list_entry(h->hugepage_freelists[next_nid].next,
828 struct page, lru);
829 list_del(&page->lru);
830 h->free_huge_pages--;
831 h->free_huge_pages_node[next_nid]--;
832 if (acct_surplus) {
833 h->surplus_huge_pages--;
834 h->surplus_huge_pages_node[next_nid]--;
835 }
836 update_and_free_page(h, page);
837 ret = 1;
838 break;
839 }
840 next_nid = hstate_next_node_to_free(h, nodes_allowed);
841 } while (next_nid != start_nid);
842
843 return ret;
844}
845
846static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
847{
848 struct page *page;
849 unsigned int r_nid;
850
851 if (h->order >= MAX_ORDER)
852 return NULL;
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877 spin_lock(&hugetlb_lock);
878 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
879 spin_unlock(&hugetlb_lock);
880 return NULL;
881 } else {
882 h->nr_huge_pages++;
883 h->surplus_huge_pages++;
884 }
885 spin_unlock(&hugetlb_lock);
886
887 if (nid == NUMA_NO_NODE)
888 page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
889 __GFP_REPEAT|__GFP_NOWARN,
890 huge_page_order(h));
891 else
892 page = alloc_pages_exact_node(nid,
893 htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
894 __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
895
896 if (page && arch_prepare_hugepage(page)) {
897 __free_pages(page, huge_page_order(h));
898 page = NULL;
899 }
900
901 spin_lock(&hugetlb_lock);
902 if (page) {
903 INIT_LIST_HEAD(&page->lru);
904 r_nid = page_to_nid(page);
905 set_compound_page_dtor(page, free_huge_page);
906 set_hugetlb_cgroup(page, NULL);
907
908
909
910 h->nr_huge_pages_node[r_nid]++;
911 h->surplus_huge_pages_node[r_nid]++;
912 __count_vm_event(HTLB_BUDDY_PGALLOC);
913 } else {
914 h->nr_huge_pages--;
915 h->surplus_huge_pages--;
916 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
917 }
918 spin_unlock(&hugetlb_lock);
919
920 return page;
921}
922
923
924
925
926
927
928struct page *alloc_huge_page_node(struct hstate *h, int nid)
929{
930 struct page *page;
931
932 spin_lock(&hugetlb_lock);
933 page = dequeue_huge_page_node(h, nid);
934 spin_unlock(&hugetlb_lock);
935
936 if (!page)
937 page = alloc_buddy_huge_page(h, nid);
938
939 return page;
940}
941
942
943
944
945
946static int gather_surplus_pages(struct hstate *h, int delta)
947{
948 struct list_head surplus_list;
949 struct page *page, *tmp;
950 int ret, i;
951 int needed, allocated;
952 bool alloc_ok = true;
953
954 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
955 if (needed <= 0) {
956 h->resv_huge_pages += delta;
957 return 0;
958 }
959
960 allocated = 0;
961 INIT_LIST_HEAD(&surplus_list);
962
963 ret = -ENOMEM;
964retry:
965 spin_unlock(&hugetlb_lock);
966 for (i = 0; i < needed; i++) {
967 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
968 if (!page) {
969 alloc_ok = false;
970 break;
971 }
972 list_add(&page->lru, &surplus_list);
973 }
974 allocated += i;
975
976
977
978
979
980 spin_lock(&hugetlb_lock);
981 needed = (h->resv_huge_pages + delta) -
982 (h->free_huge_pages + allocated);
983 if (needed > 0) {
984 if (alloc_ok)
985 goto retry;
986
987
988
989
990
991 goto free;
992 }
993
994
995
996
997
998
999
1000
1001 needed += allocated;
1002 h->resv_huge_pages += delta;
1003 ret = 0;
1004
1005
1006 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1007 if ((--needed) < 0)
1008 break;
1009
1010
1011
1012
1013 put_page_testzero(page);
1014 VM_BUG_ON(page_count(page));
1015 enqueue_huge_page(h, page);
1016 }
1017free:
1018 spin_unlock(&hugetlb_lock);
1019
1020
1021 if (!list_empty(&surplus_list)) {
1022 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1023 put_page(page);
1024 }
1025 }
1026 spin_lock(&hugetlb_lock);
1027
1028 return ret;
1029}
1030
1031
1032
1033
1034
1035
1036
1037static void return_unused_surplus_pages(struct hstate *h,
1038 unsigned long unused_resv_pages)
1039{
1040 unsigned long nr_pages;
1041
1042
1043 h->resv_huge_pages -= unused_resv_pages;
1044
1045
1046 if (h->order >= MAX_ORDER)
1047 return;
1048
1049 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059 while (nr_pages--) {
1060 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1061 break;
1062 }
1063}
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075static long vma_needs_reservation(struct hstate *h,
1076 struct vm_area_struct *vma, unsigned long addr)
1077{
1078 struct address_space *mapping = vma->vm_file->f_mapping;
1079 struct inode *inode = mapping->host;
1080
1081 if (vma->vm_flags & VM_MAYSHARE) {
1082 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1083 return region_chg(&inode->i_mapping->private_list,
1084 idx, idx + 1);
1085
1086 } else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1087 return 1;
1088
1089 } else {
1090 long err;
1091 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1092 struct resv_map *reservations = vma_resv_map(vma);
1093
1094 err = region_chg(&reservations->regions, idx, idx + 1);
1095 if (err < 0)
1096 return err;
1097 return 0;
1098 }
1099}
1100static void vma_commit_reservation(struct hstate *h,
1101 struct vm_area_struct *vma, unsigned long addr)
1102{
1103 struct address_space *mapping = vma->vm_file->f_mapping;
1104 struct inode *inode = mapping->host;
1105
1106 if (vma->vm_flags & VM_MAYSHARE) {
1107 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1108 region_add(&inode->i_mapping->private_list, idx, idx + 1);
1109
1110 } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1111 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1112 struct resv_map *reservations = vma_resv_map(vma);
1113
1114
1115 region_add(&reservations->regions, idx, idx + 1);
1116 }
1117}
1118
1119static struct page *alloc_huge_page(struct vm_area_struct *vma,
1120 unsigned long addr, int avoid_reserve)
1121{
1122 struct hugepage_subpool *spool = subpool_vma(vma);
1123 struct hstate *h = hstate_vma(vma);
1124 struct page *page;
1125 long chg;
1126 int ret, idx;
1127 struct hugetlb_cgroup *h_cg;
1128
1129 idx = hstate_index(h);
1130
1131
1132
1133
1134
1135
1136
1137
1138 chg = vma_needs_reservation(h, vma, addr);
1139 if (chg < 0)
1140 return ERR_PTR(-ENOMEM);
1141 if (chg)
1142 if (hugepage_subpool_get_pages(spool, chg))
1143 return ERR_PTR(-ENOSPC);
1144
1145 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1146 if (ret) {
1147 hugepage_subpool_put_pages(spool, chg);
1148 return ERR_PTR(-ENOSPC);
1149 }
1150 spin_lock(&hugetlb_lock);
1151 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
1152 if (page) {
1153
1154 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
1155 h_cg, page);
1156 spin_unlock(&hugetlb_lock);
1157 } else {
1158 spin_unlock(&hugetlb_lock);
1159 page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
1160 if (!page) {
1161 hugetlb_cgroup_uncharge_cgroup(idx,
1162 pages_per_huge_page(h),
1163 h_cg);
1164 hugepage_subpool_put_pages(spool, chg);
1165 return ERR_PTR(-ENOSPC);
1166 }
1167 spin_lock(&hugetlb_lock);
1168 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
1169 h_cg, page);
1170 list_move(&page->lru, &h->hugepage_activelist);
1171 spin_unlock(&hugetlb_lock);
1172 }
1173
1174 set_page_private(page, (unsigned long)spool);
1175
1176 vma_commit_reservation(h, vma, addr);
1177 return page;
1178}
1179
1180int __weak alloc_bootmem_huge_page(struct hstate *h)
1181{
1182 struct huge_bootmem_page *m;
1183 int nr_nodes = nodes_weight(node_states[N_MEMORY]);
1184
1185 while (nr_nodes) {
1186 void *addr;
1187
1188 addr = __alloc_bootmem_node_nopanic(
1189 NODE_DATA(hstate_next_node_to_alloc(h,
1190 &node_states[N_MEMORY])),
1191 huge_page_size(h), huge_page_size(h), 0);
1192
1193 if (addr) {
1194
1195
1196
1197
1198
1199 m = addr;
1200 goto found;
1201 }
1202 nr_nodes--;
1203 }
1204 return 0;
1205
1206found:
1207 BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
1208
1209 list_add(&m->list, &huge_boot_pages);
1210 m->hstate = h;
1211 return 1;
1212}
1213
1214static void prep_compound_huge_page(struct page *page, int order)
1215{
1216 if (unlikely(order > (MAX_ORDER - 1)))
1217 prep_compound_gigantic_page(page, order);
1218 else
1219 prep_compound_page(page, order);
1220}
1221
1222
1223static void __init gather_bootmem_prealloc(void)
1224{
1225 struct huge_bootmem_page *m;
1226
1227 list_for_each_entry(m, &huge_boot_pages, list) {
1228 struct hstate *h = m->hstate;
1229 struct page *page;
1230
1231#ifdef CONFIG_HIGHMEM
1232 page = pfn_to_page(m->phys >> PAGE_SHIFT);
1233 free_bootmem_late((unsigned long)m,
1234 sizeof(struct huge_bootmem_page));
1235#else
1236 page = virt_to_page(m);
1237#endif
1238 __ClearPageReserved(page);
1239 WARN_ON(page_count(page) != 1);
1240 prep_compound_huge_page(page, h->order);
1241 prep_new_huge_page(h, page, page_to_nid(page));
1242
1243
1244
1245
1246
1247
1248 if (h->order > (MAX_ORDER - 1))
1249 totalram_pages += 1 << h->order;
1250 }
1251}
1252
1253static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
1254{
1255 unsigned long i;
1256
1257 for (i = 0; i < h->max_huge_pages; ++i) {
1258 if (h->order >= MAX_ORDER) {
1259 if (!alloc_bootmem_huge_page(h))
1260 break;
1261 } else if (!alloc_fresh_huge_page(h,
1262 &node_states[N_MEMORY]))
1263 break;
1264 }
1265 h->max_huge_pages = i;
1266}
1267
1268static void __init hugetlb_init_hstates(void)
1269{
1270 struct hstate *h;
1271
1272 for_each_hstate(h) {
1273
1274 if (h->order < MAX_ORDER)
1275 hugetlb_hstate_alloc_pages(h);
1276 }
1277}
1278
1279static char * __init memfmt(char *buf, unsigned long n)
1280{
1281 if (n >= (1UL << 30))
1282 sprintf(buf, "%lu GB", n >> 30);
1283 else if (n >= (1UL << 20))
1284 sprintf(buf, "%lu MB", n >> 20);
1285 else
1286 sprintf(buf, "%lu KB", n >> 10);
1287 return buf;
1288}
1289
1290static void __init report_hugepages(void)
1291{
1292 struct hstate *h;
1293
1294 for_each_hstate(h) {
1295 char buf[32];
1296 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
1297 memfmt(buf, huge_page_size(h)),
1298 h->free_huge_pages);
1299 }
1300}
1301
1302#ifdef CONFIG_HIGHMEM
1303static void try_to_free_low(struct hstate *h, unsigned long count,
1304 nodemask_t *nodes_allowed)
1305{
1306 int i;
1307
1308 if (h->order >= MAX_ORDER)
1309 return;
1310
1311 for_each_node_mask(i, *nodes_allowed) {
1312 struct page *page, *next;
1313 struct list_head *freel = &h->hugepage_freelists[i];
1314 list_for_each_entry_safe(page, next, freel, lru) {
1315 if (count >= h->nr_huge_pages)
1316 return;
1317 if (PageHighMem(page))
1318 continue;
1319 list_del(&page->lru);
1320 update_and_free_page(h, page);
1321 h->free_huge_pages--;
1322 h->free_huge_pages_node[page_to_nid(page)]--;
1323 }
1324 }
1325}
1326#else
1327static inline void try_to_free_low(struct hstate *h, unsigned long count,
1328 nodemask_t *nodes_allowed)
1329{
1330}
1331#endif
1332
1333
1334
1335
1336
1337
1338static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
1339 int delta)
1340{
1341 int start_nid, next_nid;
1342 int ret = 0;
1343
1344 VM_BUG_ON(delta != -1 && delta != 1);
1345
1346 if (delta < 0)
1347 start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
1348 else
1349 start_nid = hstate_next_node_to_free(h, nodes_allowed);
1350 next_nid = start_nid;
1351
1352 do {
1353 int nid = next_nid;
1354 if (delta < 0) {
1355
1356
1357
1358 if (!h->surplus_huge_pages_node[nid]) {
1359 next_nid = hstate_next_node_to_alloc(h,
1360 nodes_allowed);
1361 continue;
1362 }
1363 }
1364 if (delta > 0) {
1365
1366
1367
1368 if (h->surplus_huge_pages_node[nid] >=
1369 h->nr_huge_pages_node[nid]) {
1370 next_nid = hstate_next_node_to_free(h,
1371 nodes_allowed);
1372 continue;
1373 }
1374 }
1375
1376 h->surplus_huge_pages += delta;
1377 h->surplus_huge_pages_node[nid] += delta;
1378 ret = 1;
1379 break;
1380 } while (next_nid != start_nid);
1381
1382 return ret;
1383}
1384
1385#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
1386static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
1387 nodemask_t *nodes_allowed)
1388{
1389 unsigned long min_count, ret;
1390
1391 if (h->order >= MAX_ORDER)
1392 return h->max_huge_pages;
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405 spin_lock(&hugetlb_lock);
1406 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
1407 if (!adjust_pool_surplus(h, nodes_allowed, -1))
1408 break;
1409 }
1410
1411 while (count > persistent_huge_pages(h)) {
1412
1413
1414
1415
1416
1417 spin_unlock(&hugetlb_lock);
1418 ret = alloc_fresh_huge_page(h, nodes_allowed);
1419 spin_lock(&hugetlb_lock);
1420 if (!ret)
1421 goto out;
1422
1423
1424 if (signal_pending(current))
1425 goto out;
1426 }
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
1444 min_count = max(count, min_count);
1445 try_to_free_low(h, min_count, nodes_allowed);
1446 while (min_count < persistent_huge_pages(h)) {
1447 if (!free_pool_huge_page(h, nodes_allowed, 0))
1448 break;
1449 }
1450 while (count < persistent_huge_pages(h)) {
1451 if (!adjust_pool_surplus(h, nodes_allowed, 1))
1452 break;
1453 }
1454out:
1455 ret = persistent_huge_pages(h);
1456 spin_unlock(&hugetlb_lock);
1457 return ret;
1458}
1459
1460#define HSTATE_ATTR_RO(_name) \
1461 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
1462
1463#define HSTATE_ATTR(_name) \
1464 static struct kobj_attribute _name##_attr = \
1465 __ATTR(_name, 0644, _name##_show, _name##_store)
1466
1467static struct kobject *hugepages_kobj;
1468static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1469
1470static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
1471
1472static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
1473{
1474 int i;
1475
1476 for (i = 0; i < HUGE_MAX_HSTATE; i++)
1477 if (hstate_kobjs[i] == kobj) {
1478 if (nidp)
1479 *nidp = NUMA_NO_NODE;
1480 return &hstates[i];
1481 }
1482
1483 return kobj_to_node_hstate(kobj, nidp);
1484}
1485
1486static ssize_t nr_hugepages_show_common(struct kobject *kobj,
1487 struct kobj_attribute *attr, char *buf)
1488{
1489 struct hstate *h;
1490 unsigned long nr_huge_pages;
1491 int nid;
1492
1493 h = kobj_to_hstate(kobj, &nid);
1494 if (nid == NUMA_NO_NODE)
1495 nr_huge_pages = h->nr_huge_pages;
1496 else
1497 nr_huge_pages = h->nr_huge_pages_node[nid];
1498
1499 return sprintf(buf, "%lu\n", nr_huge_pages);
1500}
1501
1502static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
1503 struct kobject *kobj, struct kobj_attribute *attr,
1504 const char *buf, size_t len)
1505{
1506 int err;
1507 int nid;
1508 unsigned long count;
1509 struct hstate *h;
1510 NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
1511
1512 err = strict_strtoul(buf, 10, &count);
1513 if (err)
1514 goto out;
1515
1516 h = kobj_to_hstate(kobj, &nid);
1517 if (h->order >= MAX_ORDER) {
1518 err = -EINVAL;
1519 goto out;
1520 }
1521
1522 if (nid == NUMA_NO_NODE) {
1523
1524
1525
1526 if (!(obey_mempolicy &&
1527 init_nodemask_of_mempolicy(nodes_allowed))) {
1528 NODEMASK_FREE(nodes_allowed);
1529 nodes_allowed = &node_states[N_MEMORY];
1530 }
1531 } else if (nodes_allowed) {
1532
1533
1534
1535
1536 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
1537 init_nodemask_of_node(nodes_allowed, nid);
1538 } else
1539 nodes_allowed = &node_states[N_MEMORY];
1540
1541 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
1542
1543 if (nodes_allowed != &node_states[N_MEMORY])
1544 NODEMASK_FREE(nodes_allowed);
1545
1546 return len;
1547out:
1548 NODEMASK_FREE(nodes_allowed);
1549 return err;
1550}
1551
1552static ssize_t nr_hugepages_show(struct kobject *kobj,
1553 struct kobj_attribute *attr, char *buf)
1554{
1555 return nr_hugepages_show_common(kobj, attr, buf);
1556}
1557
1558static ssize_t nr_hugepages_store(struct kobject *kobj,
1559 struct kobj_attribute *attr, const char *buf, size_t len)
1560{
1561 return nr_hugepages_store_common(false, kobj, attr, buf, len);
1562}
1563HSTATE_ATTR(nr_hugepages);
1564
1565#ifdef CONFIG_NUMA
1566
1567
1568
1569
1570
1571static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
1572 struct kobj_attribute *attr, char *buf)
1573{
1574 return nr_hugepages_show_common(kobj, attr, buf);
1575}
1576
1577static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
1578 struct kobj_attribute *attr, const char *buf, size_t len)
1579{
1580 return nr_hugepages_store_common(true, kobj, attr, buf, len);
1581}
1582HSTATE_ATTR(nr_hugepages_mempolicy);
1583#endif
1584
1585
1586static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
1587 struct kobj_attribute *attr, char *buf)
1588{
1589 struct hstate *h = kobj_to_hstate(kobj, NULL);
1590 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
1591}
1592
1593static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
1594 struct kobj_attribute *attr, const char *buf, size_t count)
1595{
1596 int err;
1597 unsigned long input;
1598 struct hstate *h = kobj_to_hstate(kobj, NULL);
1599
1600 if (h->order >= MAX_ORDER)
1601 return -EINVAL;
1602
1603 err = strict_strtoul(buf, 10, &input);
1604 if (err)
1605 return err;
1606
1607 spin_lock(&hugetlb_lock);
1608 h->nr_overcommit_huge_pages = input;
1609 spin_unlock(&hugetlb_lock);
1610
1611 return count;
1612}
1613HSTATE_ATTR(nr_overcommit_hugepages);
1614
1615static ssize_t free_hugepages_show(struct kobject *kobj,
1616 struct kobj_attribute *attr, char *buf)
1617{
1618 struct hstate *h;
1619 unsigned long free_huge_pages;
1620 int nid;
1621
1622 h = kobj_to_hstate(kobj, &nid);
1623 if (nid == NUMA_NO_NODE)
1624 free_huge_pages = h->free_huge_pages;
1625 else
1626 free_huge_pages = h->free_huge_pages_node[nid];
1627
1628 return sprintf(buf, "%lu\n", free_huge_pages);
1629}
1630HSTATE_ATTR_RO(free_hugepages);
1631
1632static ssize_t resv_hugepages_show(struct kobject *kobj,
1633 struct kobj_attribute *attr, char *buf)
1634{
1635 struct hstate *h = kobj_to_hstate(kobj, NULL);
1636 return sprintf(buf, "%lu\n", h->resv_huge_pages);
1637}
1638HSTATE_ATTR_RO(resv_hugepages);
1639
1640static ssize_t surplus_hugepages_show(struct kobject *kobj,
1641 struct kobj_attribute *attr, char *buf)
1642{
1643 struct hstate *h;
1644 unsigned long surplus_huge_pages;
1645 int nid;
1646
1647 h = kobj_to_hstate(kobj, &nid);
1648 if (nid == NUMA_NO_NODE)
1649 surplus_huge_pages = h->surplus_huge_pages;
1650 else
1651 surplus_huge_pages = h->surplus_huge_pages_node[nid];
1652
1653 return sprintf(buf, "%lu\n", surplus_huge_pages);
1654}
1655HSTATE_ATTR_RO(surplus_hugepages);
1656
1657static struct attribute *hstate_attrs[] = {
1658 &nr_hugepages_attr.attr,
1659 &nr_overcommit_hugepages_attr.attr,
1660 &free_hugepages_attr.attr,
1661 &resv_hugepages_attr.attr,
1662 &surplus_hugepages_attr.attr,
1663#ifdef CONFIG_NUMA
1664 &nr_hugepages_mempolicy_attr.attr,
1665#endif
1666 NULL,
1667};
1668
1669static struct attribute_group hstate_attr_group = {
1670 .attrs = hstate_attrs,
1671};
1672
1673static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
1674 struct kobject **hstate_kobjs,
1675 struct attribute_group *hstate_attr_group)
1676{
1677 int retval;
1678 int hi = hstate_index(h);
1679
1680 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
1681 if (!hstate_kobjs[hi])
1682 return -ENOMEM;
1683
1684 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
1685 if (retval)
1686 kobject_put(hstate_kobjs[hi]);
1687
1688 return retval;
1689}
1690
1691static void __init hugetlb_sysfs_init(void)
1692{
1693 struct hstate *h;
1694 int err;
1695
1696 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
1697 if (!hugepages_kobj)
1698 return;
1699
1700 for_each_hstate(h) {
1701 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
1702 hstate_kobjs, &hstate_attr_group);
1703 if (err)
1704 pr_err("Hugetlb: Unable to add hstate %s", h->name);
1705 }
1706}
1707
1708#ifdef CONFIG_NUMA
1709
1710
1711
1712
1713
1714
1715
1716
1717struct node_hstate {
1718 struct kobject *hugepages_kobj;
1719 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
1720};
1721struct node_hstate node_hstates[MAX_NUMNODES];
1722
1723
1724
1725
1726static struct attribute *per_node_hstate_attrs[] = {
1727 &nr_hugepages_attr.attr,
1728 &free_hugepages_attr.attr,
1729 &surplus_hugepages_attr.attr,
1730 NULL,
1731};
1732
1733static struct attribute_group per_node_hstate_attr_group = {
1734 .attrs = per_node_hstate_attrs,
1735};
1736
1737
1738
1739
1740
1741static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1742{
1743 int nid;
1744
1745 for (nid = 0; nid < nr_node_ids; nid++) {
1746 struct node_hstate *nhs = &node_hstates[nid];
1747 int i;
1748 for (i = 0; i < HUGE_MAX_HSTATE; i++)
1749 if (nhs->hstate_kobjs[i] == kobj) {
1750 if (nidp)
1751 *nidp = nid;
1752 return &hstates[i];
1753 }
1754 }
1755
1756 BUG();
1757 return NULL;
1758}
1759
1760
1761
1762
1763
1764void hugetlb_unregister_node(struct node *node)
1765{
1766 struct hstate *h;
1767 struct node_hstate *nhs = &node_hstates[node->dev.id];
1768
1769 if (!nhs->hugepages_kobj)
1770 return;
1771
1772 for_each_hstate(h) {
1773 int idx = hstate_index(h);
1774 if (nhs->hstate_kobjs[idx]) {
1775 kobject_put(nhs->hstate_kobjs[idx]);
1776 nhs->hstate_kobjs[idx] = NULL;
1777 }
1778 }
1779
1780 kobject_put(nhs->hugepages_kobj);
1781 nhs->hugepages_kobj = NULL;
1782}
1783
1784
1785
1786
1787
1788static void hugetlb_unregister_all_nodes(void)
1789{
1790 int nid;
1791
1792
1793
1794
1795 register_hugetlbfs_with_node(NULL, NULL);
1796
1797
1798
1799
1800 for (nid = 0; nid < nr_node_ids; nid++)
1801 hugetlb_unregister_node(node_devices[nid]);
1802}
1803
1804
1805
1806
1807
1808void hugetlb_register_node(struct node *node)
1809{
1810 struct hstate *h;
1811 struct node_hstate *nhs = &node_hstates[node->dev.id];
1812 int err;
1813
1814 if (nhs->hugepages_kobj)
1815 return;
1816
1817 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
1818 &node->dev.kobj);
1819 if (!nhs->hugepages_kobj)
1820 return;
1821
1822 for_each_hstate(h) {
1823 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
1824 nhs->hstate_kobjs,
1825 &per_node_hstate_attr_group);
1826 if (err) {
1827 pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
1828 h->name, node->dev.id);
1829 hugetlb_unregister_node(node);
1830 break;
1831 }
1832 }
1833}
1834
1835
1836
1837
1838
1839
1840static void hugetlb_register_all_nodes(void)
1841{
1842 int nid;
1843
1844 for_each_node_state(nid, N_MEMORY) {
1845 struct node *node = node_devices[nid];
1846 if (node->dev.id == nid)
1847 hugetlb_register_node(node);
1848 }
1849
1850
1851
1852
1853
1854 register_hugetlbfs_with_node(hugetlb_register_node,
1855 hugetlb_unregister_node);
1856}
1857#else
1858
1859static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
1860{
1861 BUG();
1862 if (nidp)
1863 *nidp = -1;
1864 return NULL;
1865}
1866
1867static void hugetlb_unregister_all_nodes(void) { }
1868
1869static void hugetlb_register_all_nodes(void) { }
1870
1871#endif
1872
1873static void __exit hugetlb_exit(void)
1874{
1875 struct hstate *h;
1876
1877 hugetlb_unregister_all_nodes();
1878
1879 for_each_hstate(h) {
1880 kobject_put(hstate_kobjs[hstate_index(h)]);
1881 }
1882
1883 kobject_put(hugepages_kobj);
1884}
1885module_exit(hugetlb_exit);
1886
1887static int __init hugetlb_init(void)
1888{
1889
1890
1891
1892
1893 if (HPAGE_SHIFT == 0)
1894 return 0;
1895
1896 if (!size_to_hstate(default_hstate_size)) {
1897 default_hstate_size = HPAGE_SIZE;
1898 if (!size_to_hstate(default_hstate_size))
1899 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
1900 }
1901 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
1902 if (default_hstate_max_huge_pages)
1903 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
1904
1905 hugetlb_init_hstates();
1906 gather_bootmem_prealloc();
1907 report_hugepages();
1908
1909 hugetlb_sysfs_init();
1910 hugetlb_register_all_nodes();
1911 hugetlb_cgroup_file_init();
1912
1913 return 0;
1914}
1915module_init(hugetlb_init);
1916
1917
1918void __init hugetlb_add_hstate(unsigned order)
1919{
1920 struct hstate *h;
1921 unsigned long i;
1922
1923 if (size_to_hstate(PAGE_SIZE << order)) {
1924 pr_warning("hugepagesz= specified twice, ignoring\n");
1925 return;
1926 }
1927 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
1928 BUG_ON(order == 0);
1929 h = &hstates[hugetlb_max_hstate++];
1930 h->order = order;
1931 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
1932 h->nr_huge_pages = 0;
1933 h->free_huge_pages = 0;
1934 for (i = 0; i < MAX_NUMNODES; ++i)
1935 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
1936 INIT_LIST_HEAD(&h->hugepage_activelist);
1937 h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
1938 h->next_nid_to_free = first_node(node_states[N_MEMORY]);
1939 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
1940 huge_page_size(h)/1024);
1941
1942 parsed_hstate = h;
1943}
1944
1945static int __init hugetlb_nrpages_setup(char *s)
1946{
1947 unsigned long *mhp;
1948 static unsigned long *last_mhp;
1949
1950
1951
1952
1953
1954 if (!hugetlb_max_hstate)
1955 mhp = &default_hstate_max_huge_pages;
1956 else
1957 mhp = &parsed_hstate->max_huge_pages;
1958
1959 if (mhp == last_mhp) {
1960 pr_warning("hugepages= specified twice without "
1961 "interleaving hugepagesz=, ignoring\n");
1962 return 1;
1963 }
1964
1965 if (sscanf(s, "%lu", mhp) <= 0)
1966 *mhp = 0;
1967
1968
1969
1970
1971
1972
1973 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
1974 hugetlb_hstate_alloc_pages(parsed_hstate);
1975
1976 last_mhp = mhp;
1977
1978 return 1;
1979}
1980__setup("hugepages=", hugetlb_nrpages_setup);
1981
1982static int __init hugetlb_default_setup(char *s)
1983{
1984 default_hstate_size = memparse(s, &s);
1985 return 1;
1986}
1987__setup("default_hugepagesz=", hugetlb_default_setup);
1988
1989static unsigned int cpuset_mems_nr(unsigned int *array)
1990{
1991 int node;
1992 unsigned int nr = 0;
1993
1994 for_each_node_mask(node, cpuset_current_mems_allowed)
1995 nr += array[node];
1996
1997 return nr;
1998}
1999
2000#ifdef CONFIG_SYSCTL
2001static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2002 struct ctl_table *table, int write,
2003 void __user *buffer, size_t *length, loff_t *ppos)
2004{
2005 struct hstate *h = &default_hstate;
2006 unsigned long tmp;
2007 int ret;
2008
2009 tmp = h->max_huge_pages;
2010
2011 if (write && h->order >= MAX_ORDER)
2012 return -EINVAL;
2013
2014 table->data = &tmp;
2015 table->maxlen = sizeof(unsigned long);
2016 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2017 if (ret)
2018 goto out;
2019
2020 if (write) {
2021 NODEMASK_ALLOC(nodemask_t, nodes_allowed,
2022 GFP_KERNEL | __GFP_NORETRY);
2023 if (!(obey_mempolicy &&
2024 init_nodemask_of_mempolicy(nodes_allowed))) {
2025 NODEMASK_FREE(nodes_allowed);
2026 nodes_allowed = &node_states[N_MEMORY];
2027 }
2028 h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
2029
2030 if (nodes_allowed != &node_states[N_MEMORY])
2031 NODEMASK_FREE(nodes_allowed);
2032 }
2033out:
2034 return ret;
2035}
2036
2037int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2038 void __user *buffer, size_t *length, loff_t *ppos)
2039{
2040
2041 return hugetlb_sysctl_handler_common(false, table, write,
2042 buffer, length, ppos);
2043}
2044
2045#ifdef CONFIG_NUMA
2046int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2047 void __user *buffer, size_t *length, loff_t *ppos)
2048{
2049 return hugetlb_sysctl_handler_common(true, table, write,
2050 buffer, length, ppos);
2051}
2052#endif
2053
2054int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
2055 void __user *buffer,
2056 size_t *length, loff_t *ppos)
2057{
2058 proc_dointvec(table, write, buffer, length, ppos);
2059 if (hugepages_treat_as_movable)
2060 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
2061 else
2062 htlb_alloc_mask = GFP_HIGHUSER;
2063 return 0;
2064}
2065
2066int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2067 void __user *buffer,
2068 size_t *length, loff_t *ppos)
2069{
2070 struct hstate *h = &default_hstate;
2071 unsigned long tmp;
2072 int ret;
2073
2074 tmp = h->nr_overcommit_huge_pages;
2075
2076 if (write && h->order >= MAX_ORDER)
2077 return -EINVAL;
2078
2079 table->data = &tmp;
2080 table->maxlen = sizeof(unsigned long);
2081 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2082 if (ret)
2083 goto out;
2084
2085 if (write) {
2086 spin_lock(&hugetlb_lock);
2087 h->nr_overcommit_huge_pages = tmp;
2088 spin_unlock(&hugetlb_lock);
2089 }
2090out:
2091 return ret;
2092}
2093
2094#endif
2095
2096void hugetlb_report_meminfo(struct seq_file *m)
2097{
2098 struct hstate *h = &default_hstate;
2099 seq_printf(m,
2100 "HugePages_Total: %5lu\n"
2101 "HugePages_Free: %5lu\n"
2102 "HugePages_Rsvd: %5lu\n"
2103 "HugePages_Surp: %5lu\n"
2104 "Hugepagesize: %8lu kB\n",
2105 h->nr_huge_pages,
2106 h->free_huge_pages,
2107 h->resv_huge_pages,
2108 h->surplus_huge_pages,
2109 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2110}
2111
2112int hugetlb_report_node_meminfo(int nid, char *buf)
2113{
2114 struct hstate *h = &default_hstate;
2115 return sprintf(buf,
2116 "Node %d HugePages_Total: %5u\n"
2117 "Node %d HugePages_Free: %5u\n"
2118 "Node %d HugePages_Surp: %5u\n",
2119 nid, h->nr_huge_pages_node[nid],
2120 nid, h->free_huge_pages_node[nid],
2121 nid, h->surplus_huge_pages_node[nid]);
2122}
2123
2124
2125unsigned long hugetlb_total_pages(void)
2126{
2127 struct hstate *h;
2128 unsigned long nr_total_pages = 0;
2129
2130 for_each_hstate(h)
2131 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2132 return nr_total_pages;
2133}
2134
2135static int hugetlb_acct_memory(struct hstate *h, long delta)
2136{
2137 int ret = -ENOMEM;
2138
2139 spin_lock(&hugetlb_lock);
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157 if (delta > 0) {
2158 if (gather_surplus_pages(h, delta) < 0)
2159 goto out;
2160
2161 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2162 return_unused_surplus_pages(h, delta);
2163 goto out;
2164 }
2165 }
2166
2167 ret = 0;
2168 if (delta < 0)
2169 return_unused_surplus_pages(h, (unsigned long) -delta);
2170
2171out:
2172 spin_unlock(&hugetlb_lock);
2173 return ret;
2174}
2175
2176static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2177{
2178 struct resv_map *reservations = vma_resv_map(vma);
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188 if (reservations)
2189 kref_get(&reservations->refs);
2190}
2191
2192static void resv_map_put(struct vm_area_struct *vma)
2193{
2194 struct resv_map *reservations = vma_resv_map(vma);
2195
2196 if (!reservations)
2197 return;
2198 kref_put(&reservations->refs, resv_map_release);
2199}
2200
2201static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2202{
2203 struct hstate *h = hstate_vma(vma);
2204 struct resv_map *reservations = vma_resv_map(vma);
2205 struct hugepage_subpool *spool = subpool_vma(vma);
2206 unsigned long reserve;
2207 unsigned long start;
2208 unsigned long end;
2209
2210 if (reservations) {
2211 start = vma_hugecache_offset(h, vma, vma->vm_start);
2212 end = vma_hugecache_offset(h, vma, vma->vm_end);
2213
2214 reserve = (end - start) -
2215 region_count(&reservations->regions, start, end);
2216
2217 resv_map_put(vma);
2218
2219 if (reserve) {
2220 hugetlb_acct_memory(h, -reserve);
2221 hugepage_subpool_put_pages(spool, reserve);
2222 }
2223 }
2224}
2225
2226
2227
2228
2229
2230
2231
2232static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2233{
2234 BUG();
2235 return 0;
2236}
2237
2238const struct vm_operations_struct hugetlb_vm_ops = {
2239 .fault = hugetlb_vm_op_fault,
2240 .open = hugetlb_vm_op_open,
2241 .close = hugetlb_vm_op_close,
2242};
2243
2244static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2245 int writable)
2246{
2247 pte_t entry;
2248
2249 if (writable) {
2250 entry =
2251 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
2252 } else {
2253 entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
2254 }
2255 entry = pte_mkyoung(entry);
2256 entry = pte_mkhuge(entry);
2257 entry = arch_make_huge_pte(entry, vma, page, writable);
2258
2259 return entry;
2260}
2261
2262static void set_huge_ptep_writable(struct vm_area_struct *vma,
2263 unsigned long address, pte_t *ptep)
2264{
2265 pte_t entry;
2266
2267 entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
2268 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
2269 update_mmu_cache(vma, address, ptep);
2270}
2271
2272
2273int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2274 struct vm_area_struct *vma)
2275{
2276 pte_t *src_pte, *dst_pte, entry;
2277 struct page *ptepage;
2278 unsigned long addr;
2279 int cow;
2280 struct hstate *h = hstate_vma(vma);
2281 unsigned long sz = huge_page_size(h);
2282
2283 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
2284
2285 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
2286 src_pte = huge_pte_offset(src, addr);
2287 if (!src_pte)
2288 continue;
2289 dst_pte = huge_pte_alloc(dst, addr, sz);
2290 if (!dst_pte)
2291 goto nomem;
2292
2293
2294 if (dst_pte == src_pte)
2295 continue;
2296
2297 spin_lock(&dst->page_table_lock);
2298 spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
2299 if (!huge_pte_none(huge_ptep_get(src_pte))) {
2300 if (cow)
2301 huge_ptep_set_wrprotect(src, addr, src_pte);
2302 entry = huge_ptep_get(src_pte);
2303 ptepage = pte_page(entry);
2304 get_page(ptepage);
2305 page_dup_rmap(ptepage);
2306 set_huge_pte_at(dst, addr, dst_pte, entry);
2307 }
2308 spin_unlock(&src->page_table_lock);
2309 spin_unlock(&dst->page_table_lock);
2310 }
2311 return 0;
2312
2313nomem:
2314 return -ENOMEM;
2315}
2316
2317static int is_hugetlb_entry_migration(pte_t pte)
2318{
2319 swp_entry_t swp;
2320
2321 if (huge_pte_none(pte) || pte_present(pte))
2322 return 0;
2323 swp = pte_to_swp_entry(pte);
2324 if (non_swap_entry(swp) && is_migration_entry(swp))
2325 return 1;
2326 else
2327 return 0;
2328}
2329
2330static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2331{
2332 swp_entry_t swp;
2333
2334 if (huge_pte_none(pte) || pte_present(pte))
2335 return 0;
2336 swp = pte_to_swp_entry(pte);
2337 if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2338 return 1;
2339 else
2340 return 0;
2341}
2342
2343void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2344 unsigned long start, unsigned long end,
2345 struct page *ref_page)
2346{
2347 int force_flush = 0;
2348 struct mm_struct *mm = vma->vm_mm;
2349 unsigned long address;
2350 pte_t *ptep;
2351 pte_t pte;
2352 struct page *page;
2353 struct hstate *h = hstate_vma(vma);
2354 unsigned long sz = huge_page_size(h);
2355 const unsigned long mmun_start = start;
2356 const unsigned long mmun_end = end;
2357
2358 WARN_ON(!is_vm_hugetlb_page(vma));
2359 BUG_ON(start & ~huge_page_mask(h));
2360 BUG_ON(end & ~huge_page_mask(h));
2361
2362 tlb_start_vma(tlb, vma);
2363 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2364again:
2365 spin_lock(&mm->page_table_lock);
2366 for (address = start; address < end; address += sz) {
2367 ptep = huge_pte_offset(mm, address);
2368 if (!ptep)
2369 continue;
2370
2371 if (huge_pmd_unshare(mm, &address, ptep))
2372 continue;
2373
2374 pte = huge_ptep_get(ptep);
2375 if (huge_pte_none(pte))
2376 continue;
2377
2378
2379
2380
2381 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
2382 pte_clear(mm, address, ptep);
2383 continue;
2384 }
2385
2386 page = pte_page(pte);
2387
2388
2389
2390
2391
2392 if (ref_page) {
2393 if (page != ref_page)
2394 continue;
2395
2396
2397
2398
2399
2400
2401 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
2402 }
2403
2404 pte = huge_ptep_get_and_clear(mm, address, ptep);
2405 tlb_remove_tlb_entry(tlb, ptep, address);
2406 if (pte_dirty(pte))
2407 set_page_dirty(page);
2408
2409 page_remove_rmap(page);
2410 force_flush = !__tlb_remove_page(tlb, page);
2411 if (force_flush)
2412 break;
2413
2414 if (ref_page)
2415 break;
2416 }
2417 spin_unlock(&mm->page_table_lock);
2418
2419
2420
2421
2422
2423 if (force_flush) {
2424 force_flush = 0;
2425 tlb_flush_mmu(tlb);
2426 if (address < end && !ref_page)
2427 goto again;
2428 }
2429 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2430 tlb_end_vma(tlb, vma);
2431}
2432
2433void __unmap_hugepage_range_final(struct mmu_gather *tlb,
2434 struct vm_area_struct *vma, unsigned long start,
2435 unsigned long end, struct page *ref_page)
2436{
2437 __unmap_hugepage_range(tlb, vma, start, end, ref_page);
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449 vma->vm_flags &= ~VM_MAYSHARE;
2450}
2451
2452void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2453 unsigned long end, struct page *ref_page)
2454{
2455 struct mm_struct *mm;
2456 struct mmu_gather tlb;
2457
2458 mm = vma->vm_mm;
2459
2460 tlb_gather_mmu(&tlb, mm, 0);
2461 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
2462 tlb_finish_mmu(&tlb, start, end);
2463}
2464
2465
2466
2467
2468
2469
2470
2471static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2472 struct page *page, unsigned long address)
2473{
2474 struct hstate *h = hstate_vma(vma);
2475 struct vm_area_struct *iter_vma;
2476 struct address_space *mapping;
2477 pgoff_t pgoff;
2478
2479
2480
2481
2482
2483 address = address & huge_page_mask(h);
2484 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
2485 vma->vm_pgoff;
2486 mapping = file_inode(vma->vm_file)->i_mapping;
2487
2488
2489
2490
2491
2492
2493 mutex_lock(&mapping->i_mmap_mutex);
2494 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
2495
2496 if (iter_vma == vma)
2497 continue;
2498
2499
2500
2501
2502
2503
2504
2505
2506 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
2507 unmap_hugepage_range(iter_vma, address,
2508 address + huge_page_size(h), page);
2509 }
2510 mutex_unlock(&mapping->i_mmap_mutex);
2511
2512 return 1;
2513}
2514
2515
2516
2517
2518
2519
2520
2521static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
2522 unsigned long address, pte_t *ptep, pte_t pte,
2523 struct page *pagecache_page)
2524{
2525 struct hstate *h = hstate_vma(vma);
2526 struct page *old_page, *new_page;
2527 int avoidcopy;
2528 int outside_reserve = 0;
2529 unsigned long mmun_start;
2530 unsigned long mmun_end;
2531
2532 old_page = pte_page(pte);
2533
2534retry_avoidcopy:
2535
2536
2537 avoidcopy = (page_mapcount(old_page) == 1);
2538 if (avoidcopy) {
2539 if (PageAnon(old_page))
2540 page_move_anon_rmap(old_page, vma, address);
2541 set_huge_ptep_writable(vma, address, ptep);
2542 return 0;
2543 }
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554 if (!(vma->vm_flags & VM_MAYSHARE) &&
2555 is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
2556 old_page != pagecache_page)
2557 outside_reserve = 1;
2558
2559 page_cache_get(old_page);
2560
2561
2562 spin_unlock(&mm->page_table_lock);
2563 new_page = alloc_huge_page(vma, address, outside_reserve);
2564
2565 if (IS_ERR(new_page)) {
2566 long err = PTR_ERR(new_page);
2567 page_cache_release(old_page);
2568
2569
2570
2571
2572
2573
2574
2575
2576 if (outside_reserve) {
2577 BUG_ON(huge_pte_none(pte));
2578 if (unmap_ref_private(mm, vma, old_page, address)) {
2579 BUG_ON(huge_pte_none(pte));
2580 spin_lock(&mm->page_table_lock);
2581 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2582 if (likely(pte_same(huge_ptep_get(ptep), pte)))
2583 goto retry_avoidcopy;
2584
2585
2586
2587
2588 return 0;
2589 }
2590 WARN_ON_ONCE(1);
2591 }
2592
2593
2594 spin_lock(&mm->page_table_lock);
2595 if (err == -ENOMEM)
2596 return VM_FAULT_OOM;
2597 else
2598 return VM_FAULT_SIGBUS;
2599 }
2600
2601
2602
2603
2604
2605 if (unlikely(anon_vma_prepare(vma))) {
2606 page_cache_release(new_page);
2607 page_cache_release(old_page);
2608
2609 spin_lock(&mm->page_table_lock);
2610 return VM_FAULT_OOM;
2611 }
2612
2613 copy_user_huge_page(new_page, old_page, address, vma,
2614 pages_per_huge_page(h));
2615 __SetPageUptodate(new_page);
2616
2617 mmun_start = address & huge_page_mask(h);
2618 mmun_end = mmun_start + huge_page_size(h);
2619 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2620
2621
2622
2623
2624 spin_lock(&mm->page_table_lock);
2625 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
2626 if (likely(pte_same(huge_ptep_get(ptep), pte))) {
2627
2628 huge_ptep_clear_flush(vma, address, ptep);
2629 set_huge_pte_at(mm, address, ptep,
2630 make_huge_pte(vma, new_page, 1));
2631 page_remove_rmap(old_page);
2632 hugepage_add_new_anon_rmap(new_page, vma, address);
2633
2634 new_page = old_page;
2635 }
2636 spin_unlock(&mm->page_table_lock);
2637 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2638
2639 spin_lock(&mm->page_table_lock);
2640 page_cache_release(new_page);
2641 page_cache_release(old_page);
2642 return 0;
2643}
2644
2645
2646static struct page *hugetlbfs_pagecache_page(struct hstate *h,
2647 struct vm_area_struct *vma, unsigned long address)
2648{
2649 struct address_space *mapping;
2650 pgoff_t idx;
2651
2652 mapping = vma->vm_file->f_mapping;
2653 idx = vma_hugecache_offset(h, vma, address);
2654
2655 return find_lock_page(mapping, idx);
2656}
2657
2658
2659
2660
2661
2662static bool hugetlbfs_pagecache_present(struct hstate *h,
2663 struct vm_area_struct *vma, unsigned long address)
2664{
2665 struct address_space *mapping;
2666 pgoff_t idx;
2667 struct page *page;
2668
2669 mapping = vma->vm_file->f_mapping;
2670 idx = vma_hugecache_offset(h, vma, address);
2671
2672 page = find_get_page(mapping, idx);
2673 if (page)
2674 put_page(page);
2675 return page != NULL;
2676}
2677
2678static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
2679 unsigned long address, pte_t *ptep, unsigned int flags)
2680{
2681 struct hstate *h = hstate_vma(vma);
2682 int ret = VM_FAULT_SIGBUS;
2683 int anon_rmap = 0;
2684 pgoff_t idx;
2685 unsigned long size;
2686 struct page *page;
2687 struct address_space *mapping;
2688 pte_t new_pte;
2689
2690
2691
2692
2693
2694
2695 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
2696 pr_warning("PID %d killed due to inadequate hugepage pool\n",
2697 current->pid);
2698 return ret;
2699 }
2700
2701 mapping = vma->vm_file->f_mapping;
2702 idx = vma_hugecache_offset(h, vma, address);
2703
2704
2705
2706
2707
2708retry:
2709 page = find_lock_page(mapping, idx);
2710 if (!page) {
2711 size = i_size_read(mapping->host) >> huge_page_shift(h);
2712 if (idx >= size)
2713 goto out;
2714 page = alloc_huge_page(vma, address, 0);
2715 if (IS_ERR(page)) {
2716 ret = PTR_ERR(page);
2717 if (ret == -ENOMEM)
2718 ret = VM_FAULT_OOM;
2719 else
2720 ret = VM_FAULT_SIGBUS;
2721 goto out;
2722 }
2723 clear_huge_page(page, address, pages_per_huge_page(h));
2724 __SetPageUptodate(page);
2725
2726 if (vma->vm_flags & VM_MAYSHARE) {
2727 int err;
2728 struct inode *inode = mapping->host;
2729
2730 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
2731 if (err) {
2732 put_page(page);
2733 if (err == -EEXIST)
2734 goto retry;
2735 goto out;
2736 }
2737
2738 spin_lock(&inode->i_lock);
2739 inode->i_blocks += blocks_per_huge_page(h);
2740 spin_unlock(&inode->i_lock);
2741 } else {
2742 lock_page(page);
2743 if (unlikely(anon_vma_prepare(vma))) {
2744 ret = VM_FAULT_OOM;
2745 goto backout_unlocked;
2746 }
2747 anon_rmap = 1;
2748 }
2749 } else {
2750
2751
2752
2753
2754
2755 if (unlikely(PageHWPoison(page))) {
2756 ret = VM_FAULT_HWPOISON |
2757 VM_FAULT_SET_HINDEX(hstate_index(h));
2758 goto backout_unlocked;
2759 }
2760 }
2761
2762
2763
2764
2765
2766
2767
2768 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
2769 if (vma_needs_reservation(h, vma, address) < 0) {
2770 ret = VM_FAULT_OOM;
2771 goto backout_unlocked;
2772 }
2773
2774 spin_lock(&mm->page_table_lock);
2775 size = i_size_read(mapping->host) >> huge_page_shift(h);
2776 if (idx >= size)
2777 goto backout;
2778
2779 ret = 0;
2780 if (!huge_pte_none(huge_ptep_get(ptep)))
2781 goto backout;
2782
2783 if (anon_rmap)
2784 hugepage_add_new_anon_rmap(page, vma, address);
2785 else
2786 page_dup_rmap(page);
2787 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
2788 && (vma->vm_flags & VM_SHARED)));
2789 set_huge_pte_at(mm, address, ptep, new_pte);
2790
2791 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
2792
2793 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
2794 }
2795
2796 spin_unlock(&mm->page_table_lock);
2797 unlock_page(page);
2798out:
2799 return ret;
2800
2801backout:
2802 spin_unlock(&mm->page_table_lock);
2803backout_unlocked:
2804 unlock_page(page);
2805 put_page(page);
2806 goto out;
2807}
2808
2809int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2810 unsigned long address, unsigned int flags)
2811{
2812 pte_t *ptep;
2813 pte_t entry;
2814 int ret;
2815 struct page *page = NULL;
2816 struct page *pagecache_page = NULL;
2817 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
2818 struct hstate *h = hstate_vma(vma);
2819
2820 address &= huge_page_mask(h);
2821
2822 ptep = huge_pte_offset(mm, address);
2823 if (ptep) {
2824 entry = huge_ptep_get(ptep);
2825 if (unlikely(is_hugetlb_entry_migration(entry))) {
2826 migration_entry_wait(mm, (pmd_t *)ptep, address);
2827 return 0;
2828 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
2829 return VM_FAULT_HWPOISON_LARGE |
2830 VM_FAULT_SET_HINDEX(hstate_index(h));
2831 }
2832
2833 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
2834 if (!ptep)
2835 return VM_FAULT_OOM;
2836
2837
2838
2839
2840
2841
2842 mutex_lock(&hugetlb_instantiation_mutex);
2843 entry = huge_ptep_get(ptep);
2844 if (huge_pte_none(entry)) {
2845 ret = hugetlb_no_page(mm, vma, address, ptep, flags);
2846 goto out_mutex;
2847 }
2848
2849 ret = 0;
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859 if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
2860 if (vma_needs_reservation(h, vma, address) < 0) {
2861 ret = VM_FAULT_OOM;
2862 goto out_mutex;
2863 }
2864
2865 if (!(vma->vm_flags & VM_MAYSHARE))
2866 pagecache_page = hugetlbfs_pagecache_page(h,
2867 vma, address);
2868 }
2869
2870
2871
2872
2873
2874
2875
2876
2877 page = pte_page(entry);
2878 get_page(page);
2879 if (page != pagecache_page)
2880 lock_page(page);
2881
2882 spin_lock(&mm->page_table_lock);
2883
2884 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
2885 goto out_page_table_lock;
2886
2887
2888 if (flags & FAULT_FLAG_WRITE) {
2889 if (!pte_write(entry)) {
2890 ret = hugetlb_cow(mm, vma, address, ptep, entry,
2891 pagecache_page);
2892 goto out_page_table_lock;
2893 }
2894 entry = pte_mkdirty(entry);
2895 }
2896 entry = pte_mkyoung(entry);
2897 if (huge_ptep_set_access_flags(vma, address, ptep, entry,
2898 flags & FAULT_FLAG_WRITE))
2899 update_mmu_cache(vma, address, ptep);
2900
2901out_page_table_lock:
2902 spin_unlock(&mm->page_table_lock);
2903
2904 if (pagecache_page) {
2905 unlock_page(pagecache_page);
2906 put_page(pagecache_page);
2907 }
2908 if (page != pagecache_page)
2909 unlock_page(page);
2910 put_page(page);
2911
2912out_mutex:
2913 mutex_unlock(&hugetlb_instantiation_mutex);
2914
2915 return ret;
2916}
2917
2918
2919__attribute__((weak)) struct page *
2920follow_huge_pud(struct mm_struct *mm, unsigned long address,
2921 pud_t *pud, int write)
2922{
2923 BUG();
2924 return NULL;
2925}
2926
2927long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
2928 struct page **pages, struct vm_area_struct **vmas,
2929 unsigned long *position, unsigned long *nr_pages,
2930 long i, unsigned int flags)
2931{
2932 unsigned long pfn_offset;
2933 unsigned long vaddr = *position;
2934 unsigned long remainder = *nr_pages;
2935 struct hstate *h = hstate_vma(vma);
2936
2937 spin_lock(&mm->page_table_lock);
2938 while (vaddr < vma->vm_end && remainder) {
2939 pte_t *pte;
2940 int absent;
2941 struct page *page;
2942
2943
2944
2945
2946
2947
2948 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
2949 absent = !pte || huge_pte_none(huge_ptep_get(pte));
2950
2951
2952
2953
2954
2955
2956
2957
2958 if (absent && (flags & FOLL_DUMP) &&
2959 !hugetlbfs_pagecache_present(h, vma, vaddr)) {
2960 remainder = 0;
2961 break;
2962 }
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
2975 ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
2976 int ret;
2977
2978 spin_unlock(&mm->page_table_lock);
2979 ret = hugetlb_fault(mm, vma, vaddr,
2980 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
2981 spin_lock(&mm->page_table_lock);
2982 if (!(ret & VM_FAULT_ERROR))
2983 continue;
2984
2985 remainder = 0;
2986 break;
2987 }
2988
2989 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
2990 page = pte_page(huge_ptep_get(pte));
2991same_page:
2992 if (pages) {
2993 pages[i] = mem_map_offset(page, pfn_offset);
2994 get_page(pages[i]);
2995 }
2996
2997 if (vmas)
2998 vmas[i] = vma;
2999
3000 vaddr += PAGE_SIZE;
3001 ++pfn_offset;
3002 --remainder;
3003 ++i;
3004 if (vaddr < vma->vm_end && remainder &&
3005 pfn_offset < pages_per_huge_page(h)) {
3006
3007
3008
3009
3010 goto same_page;
3011 }
3012 }
3013 spin_unlock(&mm->page_table_lock);
3014 *nr_pages = remainder;
3015 *position = vaddr;
3016
3017 return i ? i : -EFAULT;
3018}
3019
3020unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3021 unsigned long address, unsigned long end, pgprot_t newprot)
3022{
3023 struct mm_struct *mm = vma->vm_mm;
3024 unsigned long start = address;
3025 pte_t *ptep;
3026 pte_t pte;
3027 struct hstate *h = hstate_vma(vma);
3028 unsigned long pages = 0;
3029
3030 BUG_ON(address >= end);
3031 flush_cache_range(vma, address, end);
3032
3033 mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
3034 spin_lock(&mm->page_table_lock);
3035 for (; address < end; address += huge_page_size(h)) {
3036 ptep = huge_pte_offset(mm, address);
3037 if (!ptep)
3038 continue;
3039 if (huge_pmd_unshare(mm, &address, ptep)) {
3040 pages++;
3041 continue;
3042 }
3043 if (!huge_pte_none(huge_ptep_get(ptep))) {
3044 pte = huge_ptep_get_and_clear(mm, address, ptep);
3045 pte = pte_mkhuge(pte_modify(pte, newprot));
3046 pte = arch_make_huge_pte(pte, vma, NULL, 0);
3047 set_huge_pte_at(mm, address, ptep, pte);
3048 pages++;
3049 }
3050 }
3051 spin_unlock(&mm->page_table_lock);
3052
3053
3054
3055
3056
3057
3058 flush_tlb_range(vma, start, end);
3059 mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
3060
3061 return pages << h->order;
3062}
3063
3064int hugetlb_reserve_pages(struct inode *inode,
3065 long from, long to,
3066 struct vm_area_struct *vma,
3067 vm_flags_t vm_flags)
3068{
3069 long ret, chg;
3070 struct hstate *h = hstate_inode(inode);
3071 struct hugepage_subpool *spool = subpool_inode(inode);
3072
3073
3074
3075
3076
3077
3078 if (vm_flags & VM_NORESERVE)
3079 return 0;
3080
3081
3082
3083
3084
3085
3086
3087 if (!vma || vma->vm_flags & VM_MAYSHARE)
3088 chg = region_chg(&inode->i_mapping->private_list, from, to);
3089 else {
3090 struct resv_map *resv_map = resv_map_alloc();
3091 if (!resv_map)
3092 return -ENOMEM;
3093
3094 chg = to - from;
3095
3096 set_vma_resv_map(vma, resv_map);
3097 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
3098 }
3099
3100 if (chg < 0) {
3101 ret = chg;
3102 goto out_err;
3103 }
3104
3105
3106 if (hugepage_subpool_get_pages(spool, chg)) {
3107 ret = -ENOSPC;
3108 goto out_err;
3109 }
3110
3111
3112
3113
3114
3115 ret = hugetlb_acct_memory(h, chg);
3116 if (ret < 0) {
3117 hugepage_subpool_put_pages(spool, chg);
3118 goto out_err;
3119 }
3120
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132 if (!vma || vma->vm_flags & VM_MAYSHARE)
3133 region_add(&inode->i_mapping->private_list, from, to);
3134 return 0;
3135out_err:
3136 if (vma)
3137 resv_map_put(vma);
3138 return ret;
3139}
3140
3141void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
3142{
3143 struct hstate *h = hstate_inode(inode);
3144 long chg = region_truncate(&inode->i_mapping->private_list, offset);
3145 struct hugepage_subpool *spool = subpool_inode(inode);
3146
3147 spin_lock(&inode->i_lock);
3148 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
3149 spin_unlock(&inode->i_lock);
3150
3151 hugepage_subpool_put_pages(spool, (chg - freed));
3152 hugetlb_acct_memory(h, -(chg - freed));
3153}
3154
3155#ifdef CONFIG_MEMORY_FAILURE
3156
3157
3158static int is_hugepage_on_freelist(struct page *hpage)
3159{
3160 struct page *page;
3161 struct page *tmp;
3162 struct hstate *h = page_hstate(hpage);
3163 int nid = page_to_nid(hpage);
3164
3165 list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
3166 if (page == hpage)
3167 return 1;
3168 return 0;
3169}
3170
3171
3172
3173
3174
3175int dequeue_hwpoisoned_huge_page(struct page *hpage)
3176{
3177 struct hstate *h = page_hstate(hpage);
3178 int nid = page_to_nid(hpage);
3179 int ret = -EBUSY;
3180
3181 spin_lock(&hugetlb_lock);
3182 if (is_hugepage_on_freelist(hpage)) {
3183
3184
3185
3186
3187
3188
3189 list_del_init(&hpage->lru);
3190 set_page_refcounted(hpage);
3191 h->free_huge_pages--;
3192 h->free_huge_pages_node[nid]--;
3193 ret = 0;
3194 }
3195 spin_unlock(&hugetlb_lock);
3196 return ret;
3197}
3198#endif
3199