1
2
3
4
5
6#include <linux/list.h>
7#include <linux/init.h>
8#include <linux/mm.h>
9#include <linux/seq_file.h>
10#include <linux/sysctl.h>
11#include <linux/highmem.h>
12#include <linux/mmu_notifier.h>
13#include <linux/nodemask.h>
14#include <linux/pagemap.h>
15#include <linux/mempolicy.h>
16#include <linux/compiler.h>
17#include <linux/cpuset.h>
18#include <linux/mutex.h>
19#include <linux/memblock.h>
20#include <linux/sysfs.h>
21#include <linux/slab.h>
22#include <linux/sched/mm.h>
23#include <linux/mmdebug.h>
24#include <linux/sched/signal.h>
25#include <linux/rmap.h>
26#include <linux/string_helpers.h>
27#include <linux/swap.h>
28#include <linux/swapops.h>
29#include <linux/jhash.h>
30#include <linux/numa.h>
31#include <linux/llist.h>
32#include <linux/cma.h>
33
34#include <asm/page.h>
35#include <asm/pgalloc.h>
36#include <asm/tlb.h>
37
38#include <linux/io.h>
39#include <linux/hugetlb.h>
40#include <linux/hugetlb_cgroup.h>
41#include <linux/node.h>
42#include <linux/userfaultfd_k.h>
43#include <linux/page_owner.h>
44#include "internal.h"
45
46int hugetlb_max_hstate __read_mostly;
47unsigned int default_hstate_idx;
48struct hstate hstates[HUGE_MAX_HSTATE];
49
50#ifdef CONFIG_CMA
51static struct cma *hugetlb_cma[MAX_NUMNODES];
52#endif
53static unsigned long hugetlb_cma_size __initdata;
54
55
56
57
58
59static unsigned int minimum_order __read_mostly = UINT_MAX;
60
61__initdata LIST_HEAD(huge_boot_pages);
62
63
64static struct hstate * __initdata parsed_hstate;
65static unsigned long __initdata default_hstate_max_huge_pages;
66static bool __initdata parsed_valid_hugepagesz = true;
67static bool __initdata parsed_default_hugepagesz;
68
69
70
71
72
73DEFINE_SPINLOCK(hugetlb_lock);
74
75
76
77
78
79static int num_fault_mutexes;
80struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
81
82
83static int hugetlb_acct_memory(struct hstate *h, long delta);
84
85static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
86{
87 bool free = (spool->count == 0) && (spool->used_hpages == 0);
88
89 spin_unlock(&spool->lock);
90
91
92
93
94 if (free) {
95 if (spool->min_hpages != -1)
96 hugetlb_acct_memory(spool->hstate,
97 -spool->min_hpages);
98 kfree(spool);
99 }
100}
101
102struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
103 long min_hpages)
104{
105 struct hugepage_subpool *spool;
106
107 spool = kzalloc(sizeof(*spool), GFP_KERNEL);
108 if (!spool)
109 return NULL;
110
111 spin_lock_init(&spool->lock);
112 spool->count = 1;
113 spool->max_hpages = max_hpages;
114 spool->hstate = h;
115 spool->min_hpages = min_hpages;
116
117 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
118 kfree(spool);
119 return NULL;
120 }
121 spool->rsv_hpages = min_hpages;
122
123 return spool;
124}
125
126void hugepage_put_subpool(struct hugepage_subpool *spool)
127{
128 spin_lock(&spool->lock);
129 BUG_ON(!spool->count);
130 spool->count--;
131 unlock_or_release_subpool(spool);
132}
133
134
135
136
137
138
139
140
141
142static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
143 long delta)
144{
145 long ret = delta;
146
147 if (!spool)
148 return ret;
149
150 spin_lock(&spool->lock);
151
152 if (spool->max_hpages != -1) {
153 if ((spool->used_hpages + delta) <= spool->max_hpages)
154 spool->used_hpages += delta;
155 else {
156 ret = -ENOMEM;
157 goto unlock_ret;
158 }
159 }
160
161
162 if (spool->min_hpages != -1 && spool->rsv_hpages) {
163 if (delta > spool->rsv_hpages) {
164
165
166
167
168 ret = delta - spool->rsv_hpages;
169 spool->rsv_hpages = 0;
170 } else {
171 ret = 0;
172 spool->rsv_hpages -= delta;
173 }
174 }
175
176unlock_ret:
177 spin_unlock(&spool->lock);
178 return ret;
179}
180
181
182
183
184
185
186
187static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
188 long delta)
189{
190 long ret = delta;
191
192 if (!spool)
193 return delta;
194
195 spin_lock(&spool->lock);
196
197 if (spool->max_hpages != -1)
198 spool->used_hpages -= delta;
199
200
201 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
202 if (spool->rsv_hpages + delta <= spool->min_hpages)
203 ret = 0;
204 else
205 ret = spool->rsv_hpages + delta - spool->min_hpages;
206
207 spool->rsv_hpages += delta;
208 if (spool->rsv_hpages > spool->min_hpages)
209 spool->rsv_hpages = spool->min_hpages;
210 }
211
212
213
214
215
216 unlock_or_release_subpool(spool);
217
218 return ret;
219}
220
221static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
222{
223 return HUGETLBFS_SB(inode->i_sb)->spool;
224}
225
226static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
227{
228 return subpool_inode(file_inode(vma->vm_file));
229}
230
231
232
233
234static struct file_region *
235get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
236{
237 struct file_region *nrg = NULL;
238
239 VM_BUG_ON(resv->region_cache_count <= 0);
240
241 resv->region_cache_count--;
242 nrg = list_first_entry(&resv->region_cache, struct file_region, link);
243 list_del(&nrg->link);
244
245 nrg->from = from;
246 nrg->to = to;
247
248 return nrg;
249}
250
251static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
252 struct file_region *rg)
253{
254#ifdef CONFIG_CGROUP_HUGETLB
255 nrg->reservation_counter = rg->reservation_counter;
256 nrg->css = rg->css;
257 if (rg->css)
258 css_get(rg->css);
259#endif
260}
261
262
263static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
264 struct hstate *h,
265 struct resv_map *resv,
266 struct file_region *nrg)
267{
268#ifdef CONFIG_CGROUP_HUGETLB
269 if (h_cg) {
270 nrg->reservation_counter =
271 &h_cg->rsvd_hugepage[hstate_index(h)];
272 nrg->css = &h_cg->css;
273 if (!resv->pages_per_hpage)
274 resv->pages_per_hpage = pages_per_huge_page(h);
275
276
277
278 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
279 } else {
280 nrg->reservation_counter = NULL;
281 nrg->css = NULL;
282 }
283#endif
284}
285
286static bool has_same_uncharge_info(struct file_region *rg,
287 struct file_region *org)
288{
289#ifdef CONFIG_CGROUP_HUGETLB
290 return rg && org &&
291 rg->reservation_counter == org->reservation_counter &&
292 rg->css == org->css;
293
294#else
295 return true;
296#endif
297}
298
299static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
300{
301 struct file_region *nrg = NULL, *prg = NULL;
302
303 prg = list_prev_entry(rg, link);
304 if (&prg->link != &resv->regions && prg->to == rg->from &&
305 has_same_uncharge_info(prg, rg)) {
306 prg->to = rg->to;
307
308 list_del(&rg->link);
309 kfree(rg);
310
311 rg = prg;
312 }
313
314 nrg = list_next_entry(rg, link);
315 if (&nrg->link != &resv->regions && nrg->from == rg->to &&
316 has_same_uncharge_info(nrg, rg)) {
317 nrg->from = rg->from;
318
319 list_del(&rg->link);
320 kfree(rg);
321 }
322}
323
324
325
326
327
328
329
330
331
332static long add_reservation_in_range(struct resv_map *resv, long f, long t,
333 struct hugetlb_cgroup *h_cg,
334 struct hstate *h, long *regions_needed)
335{
336 long add = 0;
337 struct list_head *head = &resv->regions;
338 long last_accounted_offset = f;
339 struct file_region *rg = NULL, *trg = NULL, *nrg = NULL;
340
341 if (regions_needed)
342 *regions_needed = 0;
343
344
345
346
347
348 list_for_each_entry_safe(rg, trg, head, link) {
349
350 if (rg->from < f) {
351
352
353
354 if (rg->to > last_accounted_offset)
355 last_accounted_offset = rg->to;
356 continue;
357 }
358
359
360
361
362 if (rg->from > t)
363 break;
364
365
366
367
368 if (rg->from > last_accounted_offset) {
369 add += rg->from - last_accounted_offset;
370 if (!regions_needed) {
371 nrg = get_file_region_entry_from_cache(
372 resv, last_accounted_offset, rg->from);
373 record_hugetlb_cgroup_uncharge_info(h_cg, h,
374 resv, nrg);
375 list_add(&nrg->link, rg->link.prev);
376 coalesce_file_region(resv, nrg);
377 } else
378 *regions_needed += 1;
379 }
380
381 last_accounted_offset = rg->to;
382 }
383
384
385
386
387 if (last_accounted_offset < t) {
388 add += t - last_accounted_offset;
389 if (!regions_needed) {
390 nrg = get_file_region_entry_from_cache(
391 resv, last_accounted_offset, t);
392 record_hugetlb_cgroup_uncharge_info(h_cg, h, resv, nrg);
393 list_add(&nrg->link, rg->link.prev);
394 coalesce_file_region(resv, nrg);
395 } else
396 *regions_needed += 1;
397 }
398
399 VM_BUG_ON(add < 0);
400 return add;
401}
402
403
404
405static int allocate_file_region_entries(struct resv_map *resv,
406 int regions_needed)
407 __must_hold(&resv->lock)
408{
409 struct list_head allocated_regions;
410 int to_allocate = 0, i = 0;
411 struct file_region *trg = NULL, *rg = NULL;
412
413 VM_BUG_ON(regions_needed < 0);
414
415 INIT_LIST_HEAD(&allocated_regions);
416
417
418
419
420
421
422
423
424
425
426 while (resv->region_cache_count <
427 (resv->adds_in_progress + regions_needed)) {
428 to_allocate = resv->adds_in_progress + regions_needed -
429 resv->region_cache_count;
430
431
432
433
434
435 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
436
437 spin_unlock(&resv->lock);
438 for (i = 0; i < to_allocate; i++) {
439 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
440 if (!trg)
441 goto out_of_memory;
442 list_add(&trg->link, &allocated_regions);
443 }
444
445 spin_lock(&resv->lock);
446
447 list_splice(&allocated_regions, &resv->region_cache);
448 resv->region_cache_count += to_allocate;
449 }
450
451 return 0;
452
453out_of_memory:
454 list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
455 list_del(&rg->link);
456 kfree(rg);
457 }
458 return -ENOMEM;
459}
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478static long region_add(struct resv_map *resv, long f, long t,
479 long in_regions_needed, struct hstate *h,
480 struct hugetlb_cgroup *h_cg)
481{
482 long add = 0, actual_regions_needed = 0;
483
484 spin_lock(&resv->lock);
485retry:
486
487
488 add_reservation_in_range(resv, f, t, NULL, NULL,
489 &actual_regions_needed);
490
491
492
493
494
495
496
497
498
499
500 if (actual_regions_needed > in_regions_needed &&
501 resv->region_cache_count <
502 resv->adds_in_progress +
503 (actual_regions_needed - in_regions_needed)) {
504
505
506
507 VM_BUG_ON(t - f <= 1);
508
509 if (allocate_file_region_entries(
510 resv, actual_regions_needed - in_regions_needed)) {
511 return -ENOMEM;
512 }
513
514 goto retry;
515 }
516
517 add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
518
519 resv->adds_in_progress -= in_regions_needed;
520
521 spin_unlock(&resv->lock);
522 VM_BUG_ON(add < 0);
523 return add;
524}
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546static long region_chg(struct resv_map *resv, long f, long t,
547 long *out_regions_needed)
548{
549 long chg = 0;
550
551 spin_lock(&resv->lock);
552
553
554 chg = add_reservation_in_range(resv, f, t, NULL, NULL,
555 out_regions_needed);
556
557 if (*out_regions_needed == 0)
558 *out_regions_needed = 1;
559
560 if (allocate_file_region_entries(resv, *out_regions_needed))
561 return -ENOMEM;
562
563 resv->adds_in_progress += *out_regions_needed;
564
565 spin_unlock(&resv->lock);
566 return chg;
567}
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582static void region_abort(struct resv_map *resv, long f, long t,
583 long regions_needed)
584{
585 spin_lock(&resv->lock);
586 VM_BUG_ON(!resv->region_cache_count);
587 resv->adds_in_progress -= regions_needed;
588 spin_unlock(&resv->lock);
589}
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605static long region_del(struct resv_map *resv, long f, long t)
606{
607 struct list_head *head = &resv->regions;
608 struct file_region *rg, *trg;
609 struct file_region *nrg = NULL;
610 long del = 0;
611
612retry:
613 spin_lock(&resv->lock);
614 list_for_each_entry_safe(rg, trg, head, link) {
615
616
617
618
619
620
621
622 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
623 continue;
624
625 if (rg->from >= t)
626 break;
627
628 if (f > rg->from && t < rg->to) {
629
630
631
632
633 if (!nrg &&
634 resv->region_cache_count > resv->adds_in_progress) {
635 nrg = list_first_entry(&resv->region_cache,
636 struct file_region,
637 link);
638 list_del(&nrg->link);
639 resv->region_cache_count--;
640 }
641
642 if (!nrg) {
643 spin_unlock(&resv->lock);
644 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
645 if (!nrg)
646 return -ENOMEM;
647 goto retry;
648 }
649
650 del += t - f;
651 hugetlb_cgroup_uncharge_file_region(
652 resv, rg, t - f);
653
654
655 nrg->from = t;
656 nrg->to = rg->to;
657
658 copy_hugetlb_cgroup_uncharge_info(nrg, rg);
659
660 INIT_LIST_HEAD(&nrg->link);
661
662
663 rg->to = f;
664
665 list_add(&nrg->link, &rg->link);
666 nrg = NULL;
667 break;
668 }
669
670 if (f <= rg->from && t >= rg->to) {
671 del += rg->to - rg->from;
672 hugetlb_cgroup_uncharge_file_region(resv, rg,
673 rg->to - rg->from);
674 list_del(&rg->link);
675 kfree(rg);
676 continue;
677 }
678
679 if (f <= rg->from) {
680 hugetlb_cgroup_uncharge_file_region(resv, rg,
681 t - rg->from);
682
683 del += t - rg->from;
684 rg->from = t;
685 } else {
686 hugetlb_cgroup_uncharge_file_region(resv, rg,
687 rg->to - f);
688
689 del += rg->to - f;
690 rg->to = f;
691 }
692 }
693
694 spin_unlock(&resv->lock);
695 kfree(nrg);
696 return del;
697}
698
699
700
701
702
703
704
705
706
707
708void hugetlb_fix_reserve_counts(struct inode *inode)
709{
710 struct hugepage_subpool *spool = subpool_inode(inode);
711 long rsv_adjust;
712
713 rsv_adjust = hugepage_subpool_get_pages(spool, 1);
714 if (rsv_adjust) {
715 struct hstate *h = hstate_inode(inode);
716
717 hugetlb_acct_memory(h, 1);
718 }
719}
720
721
722
723
724
725static long region_count(struct resv_map *resv, long f, long t)
726{
727 struct list_head *head = &resv->regions;
728 struct file_region *rg;
729 long chg = 0;
730
731 spin_lock(&resv->lock);
732
733 list_for_each_entry(rg, head, link) {
734 long seg_from;
735 long seg_to;
736
737 if (rg->to <= f)
738 continue;
739 if (rg->from >= t)
740 break;
741
742 seg_from = max(rg->from, f);
743 seg_to = min(rg->to, t);
744
745 chg += seg_to - seg_from;
746 }
747 spin_unlock(&resv->lock);
748
749 return chg;
750}
751
752
753
754
755
756static pgoff_t vma_hugecache_offset(struct hstate *h,
757 struct vm_area_struct *vma, unsigned long address)
758{
759 return ((address - vma->vm_start) >> huge_page_shift(h)) +
760 (vma->vm_pgoff >> huge_page_order(h));
761}
762
763pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
764 unsigned long address)
765{
766 return vma_hugecache_offset(hstate_vma(vma), vma, address);
767}
768EXPORT_SYMBOL_GPL(linear_hugepage_index);
769
770
771
772
773
774unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
775{
776 if (vma->vm_ops && vma->vm_ops->pagesize)
777 return vma->vm_ops->pagesize(vma);
778 return PAGE_SIZE;
779}
780EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
781
782
783
784
785
786
787
788__weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
789{
790 return vma_kernel_pagesize(vma);
791}
792
793
794
795
796
797
798#define HPAGE_RESV_OWNER (1UL << 0)
799#define HPAGE_RESV_UNMAPPED (1UL << 1)
800#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821static unsigned long get_vma_private_data(struct vm_area_struct *vma)
822{
823 return (unsigned long)vma->vm_private_data;
824}
825
826static void set_vma_private_data(struct vm_area_struct *vma,
827 unsigned long value)
828{
829 vma->vm_private_data = (void *)value;
830}
831
832static void
833resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
834 struct hugetlb_cgroup *h_cg,
835 struct hstate *h)
836{
837#ifdef CONFIG_CGROUP_HUGETLB
838 if (!h_cg || !h) {
839 resv_map->reservation_counter = NULL;
840 resv_map->pages_per_hpage = 0;
841 resv_map->css = NULL;
842 } else {
843 resv_map->reservation_counter =
844 &h_cg->rsvd_hugepage[hstate_index(h)];
845 resv_map->pages_per_hpage = pages_per_huge_page(h);
846 resv_map->css = &h_cg->css;
847 }
848#endif
849}
850
851struct resv_map *resv_map_alloc(void)
852{
853 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
854 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
855
856 if (!resv_map || !rg) {
857 kfree(resv_map);
858 kfree(rg);
859 return NULL;
860 }
861
862 kref_init(&resv_map->refs);
863 spin_lock_init(&resv_map->lock);
864 INIT_LIST_HEAD(&resv_map->regions);
865
866 resv_map->adds_in_progress = 0;
867
868
869
870
871
872
873 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
874
875 INIT_LIST_HEAD(&resv_map->region_cache);
876 list_add(&rg->link, &resv_map->region_cache);
877 resv_map->region_cache_count = 1;
878
879 return resv_map;
880}
881
882void resv_map_release(struct kref *ref)
883{
884 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
885 struct list_head *head = &resv_map->region_cache;
886 struct file_region *rg, *trg;
887
888
889 region_del(resv_map, 0, LONG_MAX);
890
891
892 list_for_each_entry_safe(rg, trg, head, link) {
893 list_del(&rg->link);
894 kfree(rg);
895 }
896
897 VM_BUG_ON(resv_map->adds_in_progress);
898
899 kfree(resv_map);
900}
901
902static inline struct resv_map *inode_resv_map(struct inode *inode)
903{
904
905
906
907
908
909
910
911
912 return (struct resv_map *)(&inode->i_data)->private_data;
913}
914
915static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
916{
917 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
918 if (vma->vm_flags & VM_MAYSHARE) {
919 struct address_space *mapping = vma->vm_file->f_mapping;
920 struct inode *inode = mapping->host;
921
922 return inode_resv_map(inode);
923
924 } else {
925 return (struct resv_map *)(get_vma_private_data(vma) &
926 ~HPAGE_RESV_MASK);
927 }
928}
929
930static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
931{
932 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
933 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
934
935 set_vma_private_data(vma, (get_vma_private_data(vma) &
936 HPAGE_RESV_MASK) | (unsigned long)map);
937}
938
939static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
940{
941 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
942 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
943
944 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
945}
946
947static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
948{
949 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
950
951 return (get_vma_private_data(vma) & flag) != 0;
952}
953
954
955void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
956{
957 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
958 if (!(vma->vm_flags & VM_MAYSHARE))
959 vma->vm_private_data = (void *)0;
960}
961
962
963static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
964{
965 if (vma->vm_flags & VM_NORESERVE) {
966
967
968
969
970
971
972
973
974
975 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
976 return true;
977 else
978 return false;
979 }
980
981
982 if (vma->vm_flags & VM_MAYSHARE) {
983
984
985
986
987
988
989
990 if (chg)
991 return false;
992 else
993 return true;
994 }
995
996
997
998
999
1000 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016 if (chg)
1017 return false;
1018 else
1019 return true;
1020 }
1021
1022 return false;
1023}
1024
1025static void enqueue_huge_page(struct hstate *h, struct page *page)
1026{
1027 int nid = page_to_nid(page);
1028 list_move(&page->lru, &h->hugepage_freelists[nid]);
1029 h->free_huge_pages++;
1030 h->free_huge_pages_node[nid]++;
1031}
1032
1033static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
1034{
1035 struct page *page;
1036 bool nocma = !!(current->flags & PF_MEMALLOC_NOCMA);
1037
1038 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
1039 if (nocma && is_migrate_cma_page(page))
1040 continue;
1041
1042 if (PageHWPoison(page))
1043 continue;
1044
1045 list_move(&page->lru, &h->hugepage_activelist);
1046 set_page_refcounted(page);
1047 h->free_huge_pages--;
1048 h->free_huge_pages_node[nid]--;
1049 return page;
1050 }
1051
1052 return NULL;
1053}
1054
1055static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
1056 nodemask_t *nmask)
1057{
1058 unsigned int cpuset_mems_cookie;
1059 struct zonelist *zonelist;
1060 struct zone *zone;
1061 struct zoneref *z;
1062 int node = NUMA_NO_NODE;
1063
1064 zonelist = node_zonelist(nid, gfp_mask);
1065
1066retry_cpuset:
1067 cpuset_mems_cookie = read_mems_allowed_begin();
1068 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
1069 struct page *page;
1070
1071 if (!cpuset_zone_allowed(zone, gfp_mask))
1072 continue;
1073
1074
1075
1076
1077 if (zone_to_nid(zone) == node)
1078 continue;
1079 node = zone_to_nid(zone);
1080
1081 page = dequeue_huge_page_node_exact(h, node);
1082 if (page)
1083 return page;
1084 }
1085 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
1086 goto retry_cpuset;
1087
1088 return NULL;
1089}
1090
1091static struct page *dequeue_huge_page_vma(struct hstate *h,
1092 struct vm_area_struct *vma,
1093 unsigned long address, int avoid_reserve,
1094 long chg)
1095{
1096 struct page *page;
1097 struct mempolicy *mpol;
1098 gfp_t gfp_mask;
1099 nodemask_t *nodemask;
1100 int nid;
1101
1102
1103
1104
1105
1106
1107 if (!vma_has_reserves(vma, chg) &&
1108 h->free_huge_pages - h->resv_huge_pages == 0)
1109 goto err;
1110
1111
1112 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
1113 goto err;
1114
1115 gfp_mask = htlb_alloc_mask(h);
1116 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1117 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
1118 if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
1119 SetPagePrivate(page);
1120 h->resv_huge_pages--;
1121 }
1122
1123 mpol_cond_put(mpol);
1124 return page;
1125
1126err:
1127 return NULL;
1128}
1129
1130
1131
1132
1133
1134
1135
1136
1137static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
1138{
1139 nid = next_node_in(nid, *nodes_allowed);
1140 VM_BUG_ON(nid >= MAX_NUMNODES);
1141
1142 return nid;
1143}
1144
1145static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
1146{
1147 if (!node_isset(nid, *nodes_allowed))
1148 nid = next_node_allowed(nid, nodes_allowed);
1149 return nid;
1150}
1151
1152
1153
1154
1155
1156
1157
1158static int hstate_next_node_to_alloc(struct hstate *h,
1159 nodemask_t *nodes_allowed)
1160{
1161 int nid;
1162
1163 VM_BUG_ON(!nodes_allowed);
1164
1165 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1166 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1167
1168 return nid;
1169}
1170
1171
1172
1173
1174
1175
1176
1177static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1178{
1179 int nid;
1180
1181 VM_BUG_ON(!nodes_allowed);
1182
1183 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1184 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1185
1186 return nid;
1187}
1188
1189#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
1190 for (nr_nodes = nodes_weight(*mask); \
1191 nr_nodes > 0 && \
1192 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
1193 nr_nodes--)
1194
1195#define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
1196 for (nr_nodes = nodes_weight(*mask); \
1197 nr_nodes > 0 && \
1198 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
1199 nr_nodes--)
1200
1201#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1202static void destroy_compound_gigantic_page(struct page *page,
1203 unsigned int order)
1204{
1205 int i;
1206 int nr_pages = 1 << order;
1207 struct page *p = page + 1;
1208
1209 atomic_set(compound_mapcount_ptr(page), 0);
1210 if (hpage_pincount_available(page))
1211 atomic_set(compound_pincount_ptr(page), 0);
1212
1213 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1214 clear_compound_head(p);
1215 set_page_refcounted(p);
1216 }
1217
1218 set_compound_order(page, 0);
1219 page[1].compound_nr = 0;
1220 __ClearPageHead(page);
1221}
1222
1223static void free_gigantic_page(struct page *page, unsigned int order)
1224{
1225
1226
1227
1228
1229#ifdef CONFIG_CMA
1230 if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
1231 return;
1232#endif
1233
1234 free_contig_range(page_to_pfn(page), 1 << order);
1235}
1236
1237#ifdef CONFIG_CONTIG_ALLOC
1238static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1239 int nid, nodemask_t *nodemask)
1240{
1241 unsigned long nr_pages = 1UL << huge_page_order(h);
1242 if (nid == NUMA_NO_NODE)
1243 nid = numa_mem_id();
1244
1245#ifdef CONFIG_CMA
1246 {
1247 struct page *page;
1248 int node;
1249
1250 if (hugetlb_cma[nid]) {
1251 page = cma_alloc(hugetlb_cma[nid], nr_pages,
1252 huge_page_order(h), true);
1253 if (page)
1254 return page;
1255 }
1256
1257 if (!(gfp_mask & __GFP_THISNODE)) {
1258 for_each_node_mask(node, *nodemask) {
1259 if (node == nid || !hugetlb_cma[node])
1260 continue;
1261
1262 page = cma_alloc(hugetlb_cma[node], nr_pages,
1263 huge_page_order(h), true);
1264 if (page)
1265 return page;
1266 }
1267 }
1268 }
1269#endif
1270
1271 return alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
1272}
1273
1274static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1275static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1276#else
1277static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1278 int nid, nodemask_t *nodemask)
1279{
1280 return NULL;
1281}
1282#endif
1283
1284#else
1285static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1286 int nid, nodemask_t *nodemask)
1287{
1288 return NULL;
1289}
1290static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1291static inline void destroy_compound_gigantic_page(struct page *page,
1292 unsigned int order) { }
1293#endif
1294
1295static void update_and_free_page(struct hstate *h, struct page *page)
1296{
1297 int i;
1298
1299 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1300 return;
1301
1302 h->nr_huge_pages--;
1303 h->nr_huge_pages_node[page_to_nid(page)]--;
1304 for (i = 0; i < pages_per_huge_page(h); i++) {
1305 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1306 1 << PG_referenced | 1 << PG_dirty |
1307 1 << PG_active | 1 << PG_private |
1308 1 << PG_writeback);
1309 }
1310 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1311 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page);
1312 set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1313 set_page_refcounted(page);
1314 if (hstate_is_gigantic(h)) {
1315
1316
1317
1318
1319 spin_unlock(&hugetlb_lock);
1320 destroy_compound_gigantic_page(page, huge_page_order(h));
1321 free_gigantic_page(page, huge_page_order(h));
1322 spin_lock(&hugetlb_lock);
1323 } else {
1324 __free_pages(page, huge_page_order(h));
1325 }
1326}
1327
1328struct hstate *size_to_hstate(unsigned long size)
1329{
1330 struct hstate *h;
1331
1332 for_each_hstate(h) {
1333 if (huge_page_size(h) == size)
1334 return h;
1335 }
1336 return NULL;
1337}
1338
1339
1340
1341
1342
1343
1344
1345bool page_huge_active(struct page *page)
1346{
1347 VM_BUG_ON_PAGE(!PageHuge(page), page);
1348 return PageHead(page) && PagePrivate(&page[1]);
1349}
1350
1351
1352static void set_page_huge_active(struct page *page)
1353{
1354 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1355 SetPagePrivate(&page[1]);
1356}
1357
1358static void clear_page_huge_active(struct page *page)
1359{
1360 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1361 ClearPagePrivate(&page[1]);
1362}
1363
1364
1365
1366
1367
1368static inline bool PageHugeTemporary(struct page *page)
1369{
1370 if (!PageHuge(page))
1371 return false;
1372
1373 return (unsigned long)page[2].mapping == -1U;
1374}
1375
1376static inline void SetPageHugeTemporary(struct page *page)
1377{
1378 page[2].mapping = (void *)-1U;
1379}
1380
1381static inline void ClearPageHugeTemporary(struct page *page)
1382{
1383 page[2].mapping = NULL;
1384}
1385
1386static void __free_huge_page(struct page *page)
1387{
1388
1389
1390
1391
1392 struct hstate *h = page_hstate(page);
1393 int nid = page_to_nid(page);
1394 struct hugepage_subpool *spool =
1395 (struct hugepage_subpool *)page_private(page);
1396 bool restore_reserve;
1397
1398 VM_BUG_ON_PAGE(page_count(page), page);
1399 VM_BUG_ON_PAGE(page_mapcount(page), page);
1400
1401 set_page_private(page, 0);
1402 page->mapping = NULL;
1403 restore_reserve = PagePrivate(page);
1404 ClearPagePrivate(page);
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414 if (!restore_reserve) {
1415
1416
1417
1418
1419
1420
1421 if (hugepage_subpool_put_pages(spool, 1) == 0)
1422 restore_reserve = true;
1423 }
1424
1425 spin_lock(&hugetlb_lock);
1426 clear_page_huge_active(page);
1427 hugetlb_cgroup_uncharge_page(hstate_index(h),
1428 pages_per_huge_page(h), page);
1429 hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
1430 pages_per_huge_page(h), page);
1431 if (restore_reserve)
1432 h->resv_huge_pages++;
1433
1434 if (PageHugeTemporary(page)) {
1435 list_del(&page->lru);
1436 ClearPageHugeTemporary(page);
1437 update_and_free_page(h, page);
1438 } else if (h->surplus_huge_pages_node[nid]) {
1439
1440 list_del(&page->lru);
1441 update_and_free_page(h, page);
1442 h->surplus_huge_pages--;
1443 h->surplus_huge_pages_node[nid]--;
1444 } else {
1445 arch_clear_hugepage_flags(page);
1446 enqueue_huge_page(h, page);
1447 }
1448 spin_unlock(&hugetlb_lock);
1449}
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461static LLIST_HEAD(hpage_freelist);
1462
1463static void free_hpage_workfn(struct work_struct *work)
1464{
1465 struct llist_node *node;
1466 struct page *page;
1467
1468 node = llist_del_all(&hpage_freelist);
1469
1470 while (node) {
1471 page = container_of((struct address_space **)node,
1472 struct page, mapping);
1473 node = node->next;
1474 __free_huge_page(page);
1475 }
1476}
1477static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
1478
1479void free_huge_page(struct page *page)
1480{
1481
1482
1483
1484 if (!in_task()) {
1485
1486
1487
1488
1489
1490 if (llist_add((struct llist_node *)&page->mapping,
1491 &hpage_freelist))
1492 schedule_work(&free_hpage_work);
1493 return;
1494 }
1495
1496 __free_huge_page(page);
1497}
1498
1499static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1500{
1501 INIT_LIST_HEAD(&page->lru);
1502 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1503 set_hugetlb_cgroup(page, NULL);
1504 set_hugetlb_cgroup_rsvd(page, NULL);
1505 spin_lock(&hugetlb_lock);
1506 h->nr_huge_pages++;
1507 h->nr_huge_pages_node[nid]++;
1508 spin_unlock(&hugetlb_lock);
1509}
1510
1511static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1512{
1513 int i;
1514 int nr_pages = 1 << order;
1515 struct page *p = page + 1;
1516
1517
1518 set_compound_order(page, order);
1519 __ClearPageReserved(page);
1520 __SetPageHead(page);
1521 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534 __ClearPageReserved(p);
1535 set_page_count(p, 0);
1536 set_compound_head(p, page);
1537 }
1538 atomic_set(compound_mapcount_ptr(page), -1);
1539
1540 if (hpage_pincount_available(page))
1541 atomic_set(compound_pincount_ptr(page), 0);
1542}
1543
1544
1545
1546
1547
1548
1549int PageHuge(struct page *page)
1550{
1551 if (!PageCompound(page))
1552 return 0;
1553
1554 page = compound_head(page);
1555 return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1556}
1557EXPORT_SYMBOL_GPL(PageHuge);
1558
1559
1560
1561
1562
1563int PageHeadHuge(struct page *page_head)
1564{
1565 if (!PageHead(page_head))
1566 return 0;
1567
1568 return page_head[1].compound_dtor == HUGETLB_PAGE_DTOR;
1569}
1570
1571
1572
1573
1574
1575
1576
1577
1578struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
1579{
1580 struct address_space *mapping = page_mapping(hpage);
1581
1582 if (!mapping)
1583 return mapping;
1584
1585 if (i_mmap_trylock_write(mapping))
1586 return mapping;
1587
1588 return NULL;
1589}
1590
1591pgoff_t __basepage_index(struct page *page)
1592{
1593 struct page *page_head = compound_head(page);
1594 pgoff_t index = page_index(page_head);
1595 unsigned long compound_idx;
1596
1597 if (!PageHuge(page_head))
1598 return page_index(page);
1599
1600 if (compound_order(page_head) >= MAX_ORDER)
1601 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1602 else
1603 compound_idx = page - page_head;
1604
1605 return (index << compound_order(page_head)) + compound_idx;
1606}
1607
1608static struct page *alloc_buddy_huge_page(struct hstate *h,
1609 gfp_t gfp_mask, int nid, nodemask_t *nmask,
1610 nodemask_t *node_alloc_noretry)
1611{
1612 int order = huge_page_order(h);
1613 struct page *page;
1614 bool alloc_try_hard = true;
1615
1616
1617
1618
1619
1620
1621
1622
1623 if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
1624 alloc_try_hard = false;
1625 gfp_mask |= __GFP_COMP|__GFP_NOWARN;
1626 if (alloc_try_hard)
1627 gfp_mask |= __GFP_RETRY_MAYFAIL;
1628 if (nid == NUMA_NO_NODE)
1629 nid = numa_mem_id();
1630 page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
1631 if (page)
1632 __count_vm_event(HTLB_BUDDY_PGALLOC);
1633 else
1634 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1635
1636
1637
1638
1639
1640
1641 if (node_alloc_noretry && page && !alloc_try_hard)
1642 node_clear(nid, *node_alloc_noretry);
1643
1644
1645
1646
1647
1648
1649 if (node_alloc_noretry && !page && alloc_try_hard)
1650 node_set(nid, *node_alloc_noretry);
1651
1652 return page;
1653}
1654
1655
1656
1657
1658
1659static struct page *alloc_fresh_huge_page(struct hstate *h,
1660 gfp_t gfp_mask, int nid, nodemask_t *nmask,
1661 nodemask_t *node_alloc_noretry)
1662{
1663 struct page *page;
1664
1665 if (hstate_is_gigantic(h))
1666 page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
1667 else
1668 page = alloc_buddy_huge_page(h, gfp_mask,
1669 nid, nmask, node_alloc_noretry);
1670 if (!page)
1671 return NULL;
1672
1673 if (hstate_is_gigantic(h))
1674 prep_compound_gigantic_page(page, huge_page_order(h));
1675 prep_new_huge_page(h, page, page_to_nid(page));
1676
1677 return page;
1678}
1679
1680
1681
1682
1683
1684static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1685 nodemask_t *node_alloc_noretry)
1686{
1687 struct page *page;
1688 int nr_nodes, node;
1689 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1690
1691 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1692 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
1693 node_alloc_noretry);
1694 if (page)
1695 break;
1696 }
1697
1698 if (!page)
1699 return 0;
1700
1701 put_page(page);
1702
1703 return 1;
1704}
1705
1706
1707
1708
1709
1710
1711
1712static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1713 bool acct_surplus)
1714{
1715 int nr_nodes, node;
1716 int ret = 0;
1717
1718 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1719
1720
1721
1722
1723 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1724 !list_empty(&h->hugepage_freelists[node])) {
1725 struct page *page =
1726 list_entry(h->hugepage_freelists[node].next,
1727 struct page, lru);
1728 list_del(&page->lru);
1729 h->free_huge_pages--;
1730 h->free_huge_pages_node[node]--;
1731 if (acct_surplus) {
1732 h->surplus_huge_pages--;
1733 h->surplus_huge_pages_node[node]--;
1734 }
1735 update_and_free_page(h, page);
1736 ret = 1;
1737 break;
1738 }
1739 }
1740
1741 return ret;
1742}
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754int dissolve_free_huge_page(struct page *page)
1755{
1756 int rc = -EBUSY;
1757
1758
1759 if (!PageHuge(page))
1760 return 0;
1761
1762 spin_lock(&hugetlb_lock);
1763 if (!PageHuge(page)) {
1764 rc = 0;
1765 goto out;
1766 }
1767
1768 if (!page_count(page)) {
1769 struct page *head = compound_head(page);
1770 struct hstate *h = page_hstate(head);
1771 int nid = page_to_nid(head);
1772 if (h->free_huge_pages - h->resv_huge_pages == 0)
1773 goto out;
1774
1775
1776
1777
1778 if (PageHWPoison(head) && page != head) {
1779 SetPageHWPoison(page);
1780 ClearPageHWPoison(head);
1781 }
1782 list_del(&head->lru);
1783 h->free_huge_pages--;
1784 h->free_huge_pages_node[nid]--;
1785 h->max_huge_pages--;
1786 update_and_free_page(h, head);
1787 rc = 0;
1788 }
1789out:
1790 spin_unlock(&hugetlb_lock);
1791 return rc;
1792}
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1803{
1804 unsigned long pfn;
1805 struct page *page;
1806 int rc = 0;
1807
1808 if (!hugepages_supported())
1809 return rc;
1810
1811 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1812 page = pfn_to_page(pfn);
1813 rc = dissolve_free_huge_page(page);
1814 if (rc)
1815 break;
1816 }
1817
1818 return rc;
1819}
1820
1821
1822
1823
1824static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
1825 int nid, nodemask_t *nmask)
1826{
1827 struct page *page = NULL;
1828
1829 if (hstate_is_gigantic(h))
1830 return NULL;
1831
1832 spin_lock(&hugetlb_lock);
1833 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
1834 goto out_unlock;
1835 spin_unlock(&hugetlb_lock);
1836
1837 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
1838 if (!page)
1839 return NULL;
1840
1841 spin_lock(&hugetlb_lock);
1842
1843
1844
1845
1846
1847
1848
1849 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1850 SetPageHugeTemporary(page);
1851 spin_unlock(&hugetlb_lock);
1852 put_page(page);
1853 return NULL;
1854 } else {
1855 h->surplus_huge_pages++;
1856 h->surplus_huge_pages_node[page_to_nid(page)]++;
1857 }
1858
1859out_unlock:
1860 spin_unlock(&hugetlb_lock);
1861
1862 return page;
1863}
1864
1865static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
1866 int nid, nodemask_t *nmask)
1867{
1868 struct page *page;
1869
1870 if (hstate_is_gigantic(h))
1871 return NULL;
1872
1873 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
1874 if (!page)
1875 return NULL;
1876
1877
1878
1879
1880
1881 SetPageHugeTemporary(page);
1882
1883 return page;
1884}
1885
1886
1887
1888
1889static
1890struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
1891 struct vm_area_struct *vma, unsigned long addr)
1892{
1893 struct page *page;
1894 struct mempolicy *mpol;
1895 gfp_t gfp_mask = htlb_alloc_mask(h);
1896 int nid;
1897 nodemask_t *nodemask;
1898
1899 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
1900 page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
1901 mpol_cond_put(mpol);
1902
1903 return page;
1904}
1905
1906
1907struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
1908 nodemask_t *nmask, gfp_t gfp_mask)
1909{
1910 spin_lock(&hugetlb_lock);
1911 if (h->free_huge_pages - h->resv_huge_pages > 0) {
1912 struct page *page;
1913
1914 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
1915 if (page) {
1916 spin_unlock(&hugetlb_lock);
1917 return page;
1918 }
1919 }
1920 spin_unlock(&hugetlb_lock);
1921
1922 return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
1923}
1924
1925
1926struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
1927 unsigned long address)
1928{
1929 struct mempolicy *mpol;
1930 nodemask_t *nodemask;
1931 struct page *page;
1932 gfp_t gfp_mask;
1933 int node;
1934
1935 gfp_mask = htlb_alloc_mask(h);
1936 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1937 page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask);
1938 mpol_cond_put(mpol);
1939
1940 return page;
1941}
1942
1943
1944
1945
1946
1947static int gather_surplus_pages(struct hstate *h, int delta)
1948 __must_hold(&hugetlb_lock)
1949{
1950 struct list_head surplus_list;
1951 struct page *page, *tmp;
1952 int ret, i;
1953 int needed, allocated;
1954 bool alloc_ok = true;
1955
1956 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1957 if (needed <= 0) {
1958 h->resv_huge_pages += delta;
1959 return 0;
1960 }
1961
1962 allocated = 0;
1963 INIT_LIST_HEAD(&surplus_list);
1964
1965 ret = -ENOMEM;
1966retry:
1967 spin_unlock(&hugetlb_lock);
1968 for (i = 0; i < needed; i++) {
1969 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
1970 NUMA_NO_NODE, NULL);
1971 if (!page) {
1972 alloc_ok = false;
1973 break;
1974 }
1975 list_add(&page->lru, &surplus_list);
1976 cond_resched();
1977 }
1978 allocated += i;
1979
1980
1981
1982
1983
1984 spin_lock(&hugetlb_lock);
1985 needed = (h->resv_huge_pages + delta) -
1986 (h->free_huge_pages + allocated);
1987 if (needed > 0) {
1988 if (alloc_ok)
1989 goto retry;
1990
1991
1992
1993
1994
1995 goto free;
1996 }
1997
1998
1999
2000
2001
2002
2003
2004
2005 needed += allocated;
2006 h->resv_huge_pages += delta;
2007 ret = 0;
2008
2009
2010 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
2011 if ((--needed) < 0)
2012 break;
2013
2014
2015
2016
2017 put_page_testzero(page);
2018 VM_BUG_ON_PAGE(page_count(page), page);
2019 enqueue_huge_page(h, page);
2020 }
2021free:
2022 spin_unlock(&hugetlb_lock);
2023
2024
2025 list_for_each_entry_safe(page, tmp, &surplus_list, lru)
2026 put_page(page);
2027 spin_lock(&hugetlb_lock);
2028
2029 return ret;
2030}
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046static void return_unused_surplus_pages(struct hstate *h,
2047 unsigned long unused_resv_pages)
2048{
2049 unsigned long nr_pages;
2050
2051
2052 if (hstate_is_gigantic(h))
2053 goto out;
2054
2055
2056
2057
2058
2059 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073 while (nr_pages--) {
2074 h->resv_huge_pages--;
2075 unused_resv_pages--;
2076 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
2077 goto out;
2078 cond_resched_lock(&hugetlb_lock);
2079 }
2080
2081out:
2082
2083 h->resv_huge_pages -= unused_resv_pages;
2084}
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111enum vma_resv_mode {
2112 VMA_NEEDS_RESV,
2113 VMA_COMMIT_RESV,
2114 VMA_END_RESV,
2115 VMA_ADD_RESV,
2116};
2117static long __vma_reservation_common(struct hstate *h,
2118 struct vm_area_struct *vma, unsigned long addr,
2119 enum vma_resv_mode mode)
2120{
2121 struct resv_map *resv;
2122 pgoff_t idx;
2123 long ret;
2124 long dummy_out_regions_needed;
2125
2126 resv = vma_resv_map(vma);
2127 if (!resv)
2128 return 1;
2129
2130 idx = vma_hugecache_offset(h, vma, addr);
2131 switch (mode) {
2132 case VMA_NEEDS_RESV:
2133 ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
2134
2135
2136
2137
2138 VM_BUG_ON(dummy_out_regions_needed != 1);
2139 break;
2140 case VMA_COMMIT_RESV:
2141 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2142
2143 VM_BUG_ON(ret < 0);
2144 break;
2145 case VMA_END_RESV:
2146 region_abort(resv, idx, idx + 1, 1);
2147 ret = 0;
2148 break;
2149 case VMA_ADD_RESV:
2150 if (vma->vm_flags & VM_MAYSHARE) {
2151 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2152
2153 VM_BUG_ON(ret < 0);
2154 } else {
2155 region_abort(resv, idx, idx + 1, 1);
2156 ret = region_del(resv, idx, idx + 1);
2157 }
2158 break;
2159 default:
2160 BUG();
2161 }
2162
2163 if (vma->vm_flags & VM_MAYSHARE)
2164 return ret;
2165 else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179 if (ret)
2180 return 0;
2181 else
2182 return 1;
2183 }
2184 else
2185 return ret < 0 ? ret : 0;
2186}
2187
2188static long vma_needs_reservation(struct hstate *h,
2189 struct vm_area_struct *vma, unsigned long addr)
2190{
2191 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2192}
2193
2194static long vma_commit_reservation(struct hstate *h,
2195 struct vm_area_struct *vma, unsigned long addr)
2196{
2197 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2198}
2199
2200static void vma_end_reservation(struct hstate *h,
2201 struct vm_area_struct *vma, unsigned long addr)
2202{
2203 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2204}
2205
2206static long vma_add_reservation(struct hstate *h,
2207 struct vm_area_struct *vma, unsigned long addr)
2208{
2209 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2210}
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223static void restore_reserve_on_error(struct hstate *h,
2224 struct vm_area_struct *vma, unsigned long address,
2225 struct page *page)
2226{
2227 if (unlikely(PagePrivate(page))) {
2228 long rc = vma_needs_reservation(h, vma, address);
2229
2230 if (unlikely(rc < 0)) {
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242 ClearPagePrivate(page);
2243 } else if (rc) {
2244 rc = vma_add_reservation(h, vma, address);
2245 if (unlikely(rc < 0))
2246
2247
2248
2249
2250 ClearPagePrivate(page);
2251 } else
2252 vma_end_reservation(h, vma, address);
2253 }
2254}
2255
2256struct page *alloc_huge_page(struct vm_area_struct *vma,
2257 unsigned long addr, int avoid_reserve)
2258{
2259 struct hugepage_subpool *spool = subpool_vma(vma);
2260 struct hstate *h = hstate_vma(vma);
2261 struct page *page;
2262 long map_chg, map_commit;
2263 long gbl_chg;
2264 int ret, idx;
2265 struct hugetlb_cgroup *h_cg;
2266 bool deferred_reserve;
2267
2268 idx = hstate_index(h);
2269
2270
2271
2272
2273
2274 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2275 if (map_chg < 0)
2276 return ERR_PTR(-ENOMEM);
2277
2278
2279
2280
2281
2282
2283
2284
2285 if (map_chg || avoid_reserve) {
2286 gbl_chg = hugepage_subpool_get_pages(spool, 1);
2287 if (gbl_chg < 0) {
2288 vma_end_reservation(h, vma, addr);
2289 return ERR_PTR(-ENOSPC);
2290 }
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300 if (avoid_reserve)
2301 gbl_chg = 1;
2302 }
2303
2304
2305
2306 deferred_reserve = map_chg || avoid_reserve || !vma_resv_map(vma);
2307 if (deferred_reserve) {
2308 ret = hugetlb_cgroup_charge_cgroup_rsvd(
2309 idx, pages_per_huge_page(h), &h_cg);
2310 if (ret)
2311 goto out_subpool_put;
2312 }
2313
2314 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2315 if (ret)
2316 goto out_uncharge_cgroup_reservation;
2317
2318 spin_lock(&hugetlb_lock);
2319
2320
2321
2322
2323
2324 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2325 if (!page) {
2326 spin_unlock(&hugetlb_lock);
2327 page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
2328 if (!page)
2329 goto out_uncharge_cgroup;
2330 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2331 SetPagePrivate(page);
2332 h->resv_huge_pages--;
2333 }
2334 spin_lock(&hugetlb_lock);
2335 list_add(&page->lru, &h->hugepage_activelist);
2336
2337 }
2338 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2339
2340
2341
2342 if (deferred_reserve) {
2343 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
2344 h_cg, page);
2345 }
2346
2347 spin_unlock(&hugetlb_lock);
2348
2349 set_page_private(page, (unsigned long)spool);
2350
2351 map_commit = vma_commit_reservation(h, vma, addr);
2352 if (unlikely(map_chg > map_commit)) {
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362 long rsv_adjust;
2363
2364 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2365 hugetlb_acct_memory(h, -rsv_adjust);
2366 if (deferred_reserve)
2367 hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h),
2368 pages_per_huge_page(h), page);
2369 }
2370 return page;
2371
2372out_uncharge_cgroup:
2373 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2374out_uncharge_cgroup_reservation:
2375 if (deferred_reserve)
2376 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
2377 h_cg);
2378out_subpool_put:
2379 if (map_chg || avoid_reserve)
2380 hugepage_subpool_put_pages(spool, 1);
2381 vma_end_reservation(h, vma, addr);
2382 return ERR_PTR(-ENOSPC);
2383}
2384
2385int alloc_bootmem_huge_page(struct hstate *h)
2386 __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
2387int __alloc_bootmem_huge_page(struct hstate *h)
2388{
2389 struct huge_bootmem_page *m;
2390 int nr_nodes, node;
2391
2392 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2393 void *addr;
2394
2395 addr = memblock_alloc_try_nid_raw(
2396 huge_page_size(h), huge_page_size(h),
2397 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
2398 if (addr) {
2399
2400
2401
2402
2403
2404 m = addr;
2405 goto found;
2406 }
2407 }
2408 return 0;
2409
2410found:
2411 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2412
2413 INIT_LIST_HEAD(&m->list);
2414 list_add(&m->list, &huge_boot_pages);
2415 m->hstate = h;
2416 return 1;
2417}
2418
2419static void __init prep_compound_huge_page(struct page *page,
2420 unsigned int order)
2421{
2422 if (unlikely(order > (MAX_ORDER - 1)))
2423 prep_compound_gigantic_page(page, order);
2424 else
2425 prep_compound_page(page, order);
2426}
2427
2428
2429static void __init gather_bootmem_prealloc(void)
2430{
2431 struct huge_bootmem_page *m;
2432
2433 list_for_each_entry(m, &huge_boot_pages, list) {
2434 struct page *page = virt_to_page(m);
2435 struct hstate *h = m->hstate;
2436
2437 WARN_ON(page_count(page) != 1);
2438 prep_compound_huge_page(page, h->order);
2439 WARN_ON(PageReserved(page));
2440 prep_new_huge_page(h, page, page_to_nid(page));
2441 put_page(page);
2442
2443
2444
2445
2446
2447
2448
2449 if (hstate_is_gigantic(h))
2450 adjust_managed_page_count(page, 1 << h->order);
2451 cond_resched();
2452 }
2453}
2454
2455static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2456{
2457 unsigned long i;
2458 nodemask_t *node_alloc_noretry;
2459
2460 if (!hstate_is_gigantic(h)) {
2461
2462
2463
2464
2465
2466
2467 node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
2468 GFP_KERNEL);
2469 } else {
2470
2471 node_alloc_noretry = NULL;
2472 }
2473
2474
2475 if (node_alloc_noretry)
2476 nodes_clear(*node_alloc_noretry);
2477
2478 for (i = 0; i < h->max_huge_pages; ++i) {
2479 if (hstate_is_gigantic(h)) {
2480 if (hugetlb_cma_size) {
2481 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
2482 break;
2483 }
2484 if (!alloc_bootmem_huge_page(h))
2485 break;
2486 } else if (!alloc_pool_huge_page(h,
2487 &node_states[N_MEMORY],
2488 node_alloc_noretry))
2489 break;
2490 cond_resched();
2491 }
2492 if (i < h->max_huge_pages) {
2493 char buf[32];
2494
2495 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2496 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n",
2497 h->max_huge_pages, buf, i);
2498 h->max_huge_pages = i;
2499 }
2500
2501 kfree(node_alloc_noretry);
2502}
2503
2504static void __init hugetlb_init_hstates(void)
2505{
2506 struct hstate *h;
2507
2508 for_each_hstate(h) {
2509 if (minimum_order > huge_page_order(h))
2510 minimum_order = huge_page_order(h);
2511
2512
2513 if (!hstate_is_gigantic(h))
2514 hugetlb_hstate_alloc_pages(h);
2515 }
2516 VM_BUG_ON(minimum_order == UINT_MAX);
2517}
2518
2519static void __init report_hugepages(void)
2520{
2521 struct hstate *h;
2522
2523 for_each_hstate(h) {
2524 char buf[32];
2525
2526 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2527 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2528 buf, h->free_huge_pages);
2529 }
2530}
2531
2532#ifdef CONFIG_HIGHMEM
2533static void try_to_free_low(struct hstate *h, unsigned long count,
2534 nodemask_t *nodes_allowed)
2535{
2536 int i;
2537
2538 if (hstate_is_gigantic(h))
2539 return;
2540
2541 for_each_node_mask(i, *nodes_allowed) {
2542 struct page *page, *next;
2543 struct list_head *freel = &h->hugepage_freelists[i];
2544 list_for_each_entry_safe(page, next, freel, lru) {
2545 if (count >= h->nr_huge_pages)
2546 return;
2547 if (PageHighMem(page))
2548 continue;
2549 list_del(&page->lru);
2550 update_and_free_page(h, page);
2551 h->free_huge_pages--;
2552 h->free_huge_pages_node[page_to_nid(page)]--;
2553 }
2554 }
2555}
2556#else
2557static inline void try_to_free_low(struct hstate *h, unsigned long count,
2558 nodemask_t *nodes_allowed)
2559{
2560}
2561#endif
2562
2563
2564
2565
2566
2567
2568static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2569 int delta)
2570{
2571 int nr_nodes, node;
2572
2573 VM_BUG_ON(delta != -1 && delta != 1);
2574
2575 if (delta < 0) {
2576 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2577 if (h->surplus_huge_pages_node[node])
2578 goto found;
2579 }
2580 } else {
2581 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2582 if (h->surplus_huge_pages_node[node] <
2583 h->nr_huge_pages_node[node])
2584 goto found;
2585 }
2586 }
2587 return 0;
2588
2589found:
2590 h->surplus_huge_pages += delta;
2591 h->surplus_huge_pages_node[node] += delta;
2592 return 1;
2593}
2594
2595#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2596static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
2597 nodemask_t *nodes_allowed)
2598{
2599 unsigned long min_count, ret;
2600 NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
2601
2602
2603
2604
2605
2606
2607 if (node_alloc_noretry)
2608 nodes_clear(*node_alloc_noretry);
2609 else
2610 return -ENOMEM;
2611
2612 spin_lock(&hugetlb_lock);
2613
2614
2615
2616
2617
2618
2619
2620 if (nid != NUMA_NO_NODE) {
2621 unsigned long old_count = count;
2622
2623 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2624
2625
2626
2627
2628
2629
2630 if (count < old_count)
2631 count = ULONG_MAX;
2632 }
2633
2634
2635
2636
2637
2638
2639
2640
2641 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
2642 if (count > persistent_huge_pages(h)) {
2643 spin_unlock(&hugetlb_lock);
2644 NODEMASK_FREE(node_alloc_noretry);
2645 return -EINVAL;
2646 }
2647
2648 }
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2662 if (!adjust_pool_surplus(h, nodes_allowed, -1))
2663 break;
2664 }
2665
2666 while (count > persistent_huge_pages(h)) {
2667
2668
2669
2670
2671
2672 spin_unlock(&hugetlb_lock);
2673
2674
2675 cond_resched();
2676
2677 ret = alloc_pool_huge_page(h, nodes_allowed,
2678 node_alloc_noretry);
2679 spin_lock(&hugetlb_lock);
2680 if (!ret)
2681 goto out;
2682
2683
2684 if (signal_pending(current))
2685 goto out;
2686 }
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2704 min_count = max(count, min_count);
2705 try_to_free_low(h, min_count, nodes_allowed);
2706 while (min_count < persistent_huge_pages(h)) {
2707 if (!free_pool_huge_page(h, nodes_allowed, 0))
2708 break;
2709 cond_resched_lock(&hugetlb_lock);
2710 }
2711 while (count < persistent_huge_pages(h)) {
2712 if (!adjust_pool_surplus(h, nodes_allowed, 1))
2713 break;
2714 }
2715out:
2716 h->max_huge_pages = persistent_huge_pages(h);
2717 spin_unlock(&hugetlb_lock);
2718
2719 NODEMASK_FREE(node_alloc_noretry);
2720
2721 return 0;
2722}
2723
2724#define HSTATE_ATTR_RO(_name) \
2725 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2726
2727#define HSTATE_ATTR(_name) \
2728 static struct kobj_attribute _name##_attr = \
2729 __ATTR(_name, 0644, _name##_show, _name##_store)
2730
2731static struct kobject *hugepages_kobj;
2732static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2733
2734static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2735
2736static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2737{
2738 int i;
2739
2740 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2741 if (hstate_kobjs[i] == kobj) {
2742 if (nidp)
2743 *nidp = NUMA_NO_NODE;
2744 return &hstates[i];
2745 }
2746
2747 return kobj_to_node_hstate(kobj, nidp);
2748}
2749
2750static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2751 struct kobj_attribute *attr, char *buf)
2752{
2753 struct hstate *h;
2754 unsigned long nr_huge_pages;
2755 int nid;
2756
2757 h = kobj_to_hstate(kobj, &nid);
2758 if (nid == NUMA_NO_NODE)
2759 nr_huge_pages = h->nr_huge_pages;
2760 else
2761 nr_huge_pages = h->nr_huge_pages_node[nid];
2762
2763 return sprintf(buf, "%lu\n", nr_huge_pages);
2764}
2765
2766static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2767 struct hstate *h, int nid,
2768 unsigned long count, size_t len)
2769{
2770 int err;
2771 nodemask_t nodes_allowed, *n_mask;
2772
2773 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
2774 return -EINVAL;
2775
2776 if (nid == NUMA_NO_NODE) {
2777
2778
2779
2780 if (!(obey_mempolicy &&
2781 init_nodemask_of_mempolicy(&nodes_allowed)))
2782 n_mask = &node_states[N_MEMORY];
2783 else
2784 n_mask = &nodes_allowed;
2785 } else {
2786
2787
2788
2789
2790 init_nodemask_of_node(&nodes_allowed, nid);
2791 n_mask = &nodes_allowed;
2792 }
2793
2794 err = set_max_huge_pages(h, count, nid, n_mask);
2795
2796 return err ? err : len;
2797}
2798
2799static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2800 struct kobject *kobj, const char *buf,
2801 size_t len)
2802{
2803 struct hstate *h;
2804 unsigned long count;
2805 int nid;
2806 int err;
2807
2808 err = kstrtoul(buf, 10, &count);
2809 if (err)
2810 return err;
2811
2812 h = kobj_to_hstate(kobj, &nid);
2813 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2814}
2815
2816static ssize_t nr_hugepages_show(struct kobject *kobj,
2817 struct kobj_attribute *attr, char *buf)
2818{
2819 return nr_hugepages_show_common(kobj, attr, buf);
2820}
2821
2822static ssize_t nr_hugepages_store(struct kobject *kobj,
2823 struct kobj_attribute *attr, const char *buf, size_t len)
2824{
2825 return nr_hugepages_store_common(false, kobj, buf, len);
2826}
2827HSTATE_ATTR(nr_hugepages);
2828
2829#ifdef CONFIG_NUMA
2830
2831
2832
2833
2834
2835static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2836 struct kobj_attribute *attr, char *buf)
2837{
2838 return nr_hugepages_show_common(kobj, attr, buf);
2839}
2840
2841static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2842 struct kobj_attribute *attr, const char *buf, size_t len)
2843{
2844 return nr_hugepages_store_common(true, kobj, buf, len);
2845}
2846HSTATE_ATTR(nr_hugepages_mempolicy);
2847#endif
2848
2849
2850static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2851 struct kobj_attribute *attr, char *buf)
2852{
2853 struct hstate *h = kobj_to_hstate(kobj, NULL);
2854 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2855}
2856
2857static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2858 struct kobj_attribute *attr, const char *buf, size_t count)
2859{
2860 int err;
2861 unsigned long input;
2862 struct hstate *h = kobj_to_hstate(kobj, NULL);
2863
2864 if (hstate_is_gigantic(h))
2865 return -EINVAL;
2866
2867 err = kstrtoul(buf, 10, &input);
2868 if (err)
2869 return err;
2870
2871 spin_lock(&hugetlb_lock);
2872 h->nr_overcommit_huge_pages = input;
2873 spin_unlock(&hugetlb_lock);
2874
2875 return count;
2876}
2877HSTATE_ATTR(nr_overcommit_hugepages);
2878
2879static ssize_t free_hugepages_show(struct kobject *kobj,
2880 struct kobj_attribute *attr, char *buf)
2881{
2882 struct hstate *h;
2883 unsigned long free_huge_pages;
2884 int nid;
2885
2886 h = kobj_to_hstate(kobj, &nid);
2887 if (nid == NUMA_NO_NODE)
2888 free_huge_pages = h->free_huge_pages;
2889 else
2890 free_huge_pages = h->free_huge_pages_node[nid];
2891
2892 return sprintf(buf, "%lu\n", free_huge_pages);
2893}
2894HSTATE_ATTR_RO(free_hugepages);
2895
2896static ssize_t resv_hugepages_show(struct kobject *kobj,
2897 struct kobj_attribute *attr, char *buf)
2898{
2899 struct hstate *h = kobj_to_hstate(kobj, NULL);
2900 return sprintf(buf, "%lu\n", h->resv_huge_pages);
2901}
2902HSTATE_ATTR_RO(resv_hugepages);
2903
2904static ssize_t surplus_hugepages_show(struct kobject *kobj,
2905 struct kobj_attribute *attr, char *buf)
2906{
2907 struct hstate *h;
2908 unsigned long surplus_huge_pages;
2909 int nid;
2910
2911 h = kobj_to_hstate(kobj, &nid);
2912 if (nid == NUMA_NO_NODE)
2913 surplus_huge_pages = h->surplus_huge_pages;
2914 else
2915 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2916
2917 return sprintf(buf, "%lu\n", surplus_huge_pages);
2918}
2919HSTATE_ATTR_RO(surplus_hugepages);
2920
2921static struct attribute *hstate_attrs[] = {
2922 &nr_hugepages_attr.attr,
2923 &nr_overcommit_hugepages_attr.attr,
2924 &free_hugepages_attr.attr,
2925 &resv_hugepages_attr.attr,
2926 &surplus_hugepages_attr.attr,
2927#ifdef CONFIG_NUMA
2928 &nr_hugepages_mempolicy_attr.attr,
2929#endif
2930 NULL,
2931};
2932
2933static const struct attribute_group hstate_attr_group = {
2934 .attrs = hstate_attrs,
2935};
2936
2937static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2938 struct kobject **hstate_kobjs,
2939 const struct attribute_group *hstate_attr_group)
2940{
2941 int retval;
2942 int hi = hstate_index(h);
2943
2944 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2945 if (!hstate_kobjs[hi])
2946 return -ENOMEM;
2947
2948 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2949 if (retval)
2950 kobject_put(hstate_kobjs[hi]);
2951
2952 return retval;
2953}
2954
2955static void __init hugetlb_sysfs_init(void)
2956{
2957 struct hstate *h;
2958 int err;
2959
2960 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2961 if (!hugepages_kobj)
2962 return;
2963
2964 for_each_hstate(h) {
2965 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2966 hstate_kobjs, &hstate_attr_group);
2967 if (err)
2968 pr_err("HugeTLB: Unable to add hstate %s", h->name);
2969 }
2970}
2971
2972#ifdef CONFIG_NUMA
2973
2974
2975
2976
2977
2978
2979
2980
2981struct node_hstate {
2982 struct kobject *hugepages_kobj;
2983 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2984};
2985static struct node_hstate node_hstates[MAX_NUMNODES];
2986
2987
2988
2989
2990static struct attribute *per_node_hstate_attrs[] = {
2991 &nr_hugepages_attr.attr,
2992 &free_hugepages_attr.attr,
2993 &surplus_hugepages_attr.attr,
2994 NULL,
2995};
2996
2997static const struct attribute_group per_node_hstate_attr_group = {
2998 .attrs = per_node_hstate_attrs,
2999};
3000
3001
3002
3003
3004
3005static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
3006{
3007 int nid;
3008
3009 for (nid = 0; nid < nr_node_ids; nid++) {
3010 struct node_hstate *nhs = &node_hstates[nid];
3011 int i;
3012 for (i = 0; i < HUGE_MAX_HSTATE; i++)
3013 if (nhs->hstate_kobjs[i] == kobj) {
3014 if (nidp)
3015 *nidp = nid;
3016 return &hstates[i];
3017 }
3018 }
3019
3020 BUG();
3021 return NULL;
3022}
3023
3024
3025
3026
3027
3028static void hugetlb_unregister_node(struct node *node)
3029{
3030 struct hstate *h;
3031 struct node_hstate *nhs = &node_hstates[node->dev.id];
3032
3033 if (!nhs->hugepages_kobj)
3034 return;
3035
3036 for_each_hstate(h) {
3037 int idx = hstate_index(h);
3038 if (nhs->hstate_kobjs[idx]) {
3039 kobject_put(nhs->hstate_kobjs[idx]);
3040 nhs->hstate_kobjs[idx] = NULL;
3041 }
3042 }
3043
3044 kobject_put(nhs->hugepages_kobj);
3045 nhs->hugepages_kobj = NULL;
3046}
3047
3048
3049
3050
3051
3052
3053static void hugetlb_register_node(struct node *node)
3054{
3055 struct hstate *h;
3056 struct node_hstate *nhs = &node_hstates[node->dev.id];
3057 int err;
3058
3059 if (nhs->hugepages_kobj)
3060 return;
3061
3062 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
3063 &node->dev.kobj);
3064 if (!nhs->hugepages_kobj)
3065 return;
3066
3067 for_each_hstate(h) {
3068 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
3069 nhs->hstate_kobjs,
3070 &per_node_hstate_attr_group);
3071 if (err) {
3072 pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
3073 h->name, node->dev.id);
3074 hugetlb_unregister_node(node);
3075 break;
3076 }
3077 }
3078}
3079
3080
3081
3082
3083
3084
3085static void __init hugetlb_register_all_nodes(void)
3086{
3087 int nid;
3088
3089 for_each_node_state(nid, N_MEMORY) {
3090 struct node *node = node_devices[nid];
3091 if (node->dev.id == nid)
3092 hugetlb_register_node(node);
3093 }
3094
3095
3096
3097
3098
3099 register_hugetlbfs_with_node(hugetlb_register_node,
3100 hugetlb_unregister_node);
3101}
3102#else
3103
3104static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
3105{
3106 BUG();
3107 if (nidp)
3108 *nidp = -1;
3109 return NULL;
3110}
3111
3112static void hugetlb_register_all_nodes(void) { }
3113
3114#endif
3115
3116static int __init hugetlb_init(void)
3117{
3118 int i;
3119
3120 if (!hugepages_supported()) {
3121 if (hugetlb_max_hstate || default_hstate_max_huge_pages)
3122 pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
3123 return 0;
3124 }
3125
3126
3127
3128
3129
3130 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
3131 if (!parsed_default_hugepagesz) {
3132
3133
3134
3135
3136
3137
3138
3139
3140 default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
3141 if (default_hstate_max_huge_pages) {
3142 if (default_hstate.max_huge_pages) {
3143 char buf[32];
3144
3145 string_get_size(huge_page_size(&default_hstate),
3146 1, STRING_UNITS_2, buf, 32);
3147 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
3148 default_hstate.max_huge_pages, buf);
3149 pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
3150 default_hstate_max_huge_pages);
3151 }
3152 default_hstate.max_huge_pages =
3153 default_hstate_max_huge_pages;
3154 }
3155 }
3156
3157 hugetlb_cma_check();
3158 hugetlb_init_hstates();
3159 gather_bootmem_prealloc();
3160 report_hugepages();
3161
3162 hugetlb_sysfs_init();
3163 hugetlb_register_all_nodes();
3164 hugetlb_cgroup_file_init();
3165
3166#ifdef CONFIG_SMP
3167 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
3168#else
3169 num_fault_mutexes = 1;
3170#endif
3171 hugetlb_fault_mutex_table =
3172 kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
3173 GFP_KERNEL);
3174 BUG_ON(!hugetlb_fault_mutex_table);
3175
3176 for (i = 0; i < num_fault_mutexes; i++)
3177 mutex_init(&hugetlb_fault_mutex_table[i]);
3178 return 0;
3179}
3180subsys_initcall(hugetlb_init);
3181
3182
3183bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
3184{
3185 return size == HPAGE_SIZE;
3186}
3187
3188void __init hugetlb_add_hstate(unsigned int order)
3189{
3190 struct hstate *h;
3191 unsigned long i;
3192
3193 if (size_to_hstate(PAGE_SIZE << order)) {
3194 return;
3195 }
3196 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
3197 BUG_ON(order == 0);
3198 h = &hstates[hugetlb_max_hstate++];
3199 h->order = order;
3200 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
3201 h->nr_huge_pages = 0;
3202 h->free_huge_pages = 0;
3203 for (i = 0; i < MAX_NUMNODES; ++i)
3204 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
3205 INIT_LIST_HEAD(&h->hugepage_activelist);
3206 h->next_nid_to_alloc = first_memory_node;
3207 h->next_nid_to_free = first_memory_node;
3208 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
3209 huge_page_size(h)/1024);
3210
3211 parsed_hstate = h;
3212}
3213
3214
3215
3216
3217
3218
3219
3220
3221static int __init hugepages_setup(char *s)
3222{
3223 unsigned long *mhp;
3224 static unsigned long *last_mhp;
3225
3226 if (!parsed_valid_hugepagesz) {
3227 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
3228 parsed_valid_hugepagesz = true;
3229 return 0;
3230 }
3231
3232
3233
3234
3235
3236
3237
3238 else if (!hugetlb_max_hstate)
3239 mhp = &default_hstate_max_huge_pages;
3240 else
3241 mhp = &parsed_hstate->max_huge_pages;
3242
3243 if (mhp == last_mhp) {
3244 pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
3245 return 0;
3246 }
3247
3248 if (sscanf(s, "%lu", mhp) <= 0)
3249 *mhp = 0;
3250
3251
3252
3253
3254
3255
3256 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
3257 hugetlb_hstate_alloc_pages(parsed_hstate);
3258
3259 last_mhp = mhp;
3260
3261 return 1;
3262}
3263__setup("hugepages=", hugepages_setup);
3264
3265
3266
3267
3268
3269
3270
3271
3272static int __init hugepagesz_setup(char *s)
3273{
3274 unsigned long size;
3275 struct hstate *h;
3276
3277 parsed_valid_hugepagesz = false;
3278 size = (unsigned long)memparse(s, NULL);
3279
3280 if (!arch_hugetlb_valid_size(size)) {
3281 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
3282 return 0;
3283 }
3284
3285 h = size_to_hstate(size);
3286 if (h) {
3287
3288
3289
3290
3291
3292
3293
3294 if (!parsed_default_hugepagesz || h != &default_hstate ||
3295 default_hstate.max_huge_pages) {
3296 pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
3297 return 0;
3298 }
3299
3300
3301
3302
3303
3304
3305 parsed_hstate = h;
3306 parsed_valid_hugepagesz = true;
3307 return 1;
3308 }
3309
3310 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
3311 parsed_valid_hugepagesz = true;
3312 return 1;
3313}
3314__setup("hugepagesz=", hugepagesz_setup);
3315
3316
3317
3318
3319
3320static int __init default_hugepagesz_setup(char *s)
3321{
3322 unsigned long size;
3323
3324 parsed_valid_hugepagesz = false;
3325 if (parsed_default_hugepagesz) {
3326 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
3327 return 0;
3328 }
3329
3330 size = (unsigned long)memparse(s, NULL);
3331
3332 if (!arch_hugetlb_valid_size(size)) {
3333 pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
3334 return 0;
3335 }
3336
3337 hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
3338 parsed_valid_hugepagesz = true;
3339 parsed_default_hugepagesz = true;
3340 default_hstate_idx = hstate_index(size_to_hstate(size));
3341
3342
3343
3344
3345
3346
3347
3348
3349 if (default_hstate_max_huge_pages) {
3350 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
3351 if (hstate_is_gigantic(&default_hstate))
3352 hugetlb_hstate_alloc_pages(&default_hstate);
3353 default_hstate_max_huge_pages = 0;
3354 }
3355
3356 return 1;
3357}
3358__setup("default_hugepagesz=", default_hugepagesz_setup);
3359
3360static unsigned int allowed_mems_nr(struct hstate *h)
3361{
3362 int node;
3363 unsigned int nr = 0;
3364 nodemask_t *mpol_allowed;
3365 unsigned int *array = h->free_huge_pages_node;
3366 gfp_t gfp_mask = htlb_alloc_mask(h);
3367
3368 mpol_allowed = policy_nodemask_current(gfp_mask);
3369
3370 for_each_node_mask(node, cpuset_current_mems_allowed) {
3371 if (!mpol_allowed ||
3372 (mpol_allowed && node_isset(node, *mpol_allowed)))
3373 nr += array[node];
3374 }
3375
3376 return nr;
3377}
3378
3379#ifdef CONFIG_SYSCTL
3380static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
3381 void *buffer, size_t *length,
3382 loff_t *ppos, unsigned long *out)
3383{
3384 struct ctl_table dup_table;
3385
3386
3387
3388
3389
3390 dup_table = *table;
3391 dup_table.data = out;
3392
3393 return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
3394}
3395
3396static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
3397 struct ctl_table *table, int write,
3398 void *buffer, size_t *length, loff_t *ppos)
3399{
3400 struct hstate *h = &default_hstate;
3401 unsigned long tmp = h->max_huge_pages;
3402 int ret;
3403
3404 if (!hugepages_supported())
3405 return -EOPNOTSUPP;
3406
3407 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
3408 &tmp);
3409 if (ret)
3410 goto out;
3411
3412 if (write)
3413 ret = __nr_hugepages_store_common(obey_mempolicy, h,
3414 NUMA_NO_NODE, tmp, *length);
3415out:
3416 return ret;
3417}
3418
3419int hugetlb_sysctl_handler(struct ctl_table *table, int write,
3420 void *buffer, size_t *length, loff_t *ppos)
3421{
3422
3423 return hugetlb_sysctl_handler_common(false, table, write,
3424 buffer, length, ppos);
3425}
3426
3427#ifdef CONFIG_NUMA
3428int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
3429 void *buffer, size_t *length, loff_t *ppos)
3430{
3431 return hugetlb_sysctl_handler_common(true, table, write,
3432 buffer, length, ppos);
3433}
3434#endif
3435
3436int hugetlb_overcommit_handler(struct ctl_table *table, int write,
3437 void *buffer, size_t *length, loff_t *ppos)
3438{
3439 struct hstate *h = &default_hstate;
3440 unsigned long tmp;
3441 int ret;
3442
3443 if (!hugepages_supported())
3444 return -EOPNOTSUPP;
3445
3446 tmp = h->nr_overcommit_huge_pages;
3447
3448 if (write && hstate_is_gigantic(h))
3449 return -EINVAL;
3450
3451 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
3452 &tmp);
3453 if (ret)
3454 goto out;
3455
3456 if (write) {
3457 spin_lock(&hugetlb_lock);
3458 h->nr_overcommit_huge_pages = tmp;
3459 spin_unlock(&hugetlb_lock);
3460 }
3461out:
3462 return ret;
3463}
3464
3465#endif
3466
3467void hugetlb_report_meminfo(struct seq_file *m)
3468{
3469 struct hstate *h;
3470 unsigned long total = 0;
3471
3472 if (!hugepages_supported())
3473 return;
3474
3475 for_each_hstate(h) {
3476 unsigned long count = h->nr_huge_pages;
3477
3478 total += (PAGE_SIZE << huge_page_order(h)) * count;
3479
3480 if (h == &default_hstate)
3481 seq_printf(m,
3482 "HugePages_Total: %5lu\n"
3483 "HugePages_Free: %5lu\n"
3484 "HugePages_Rsvd: %5lu\n"
3485 "HugePages_Surp: %5lu\n"
3486 "Hugepagesize: %8lu kB\n",
3487 count,
3488 h->free_huge_pages,
3489 h->resv_huge_pages,
3490 h->surplus_huge_pages,
3491 (PAGE_SIZE << huge_page_order(h)) / 1024);
3492 }
3493
3494 seq_printf(m, "Hugetlb: %8lu kB\n", total / 1024);
3495}
3496
3497int hugetlb_report_node_meminfo(char *buf, int len, int nid)
3498{
3499 struct hstate *h = &default_hstate;
3500
3501 if (!hugepages_supported())
3502 return 0;
3503
3504 return sysfs_emit_at(buf, len,
3505 "Node %d HugePages_Total: %5u\n"
3506 "Node %d HugePages_Free: %5u\n"
3507 "Node %d HugePages_Surp: %5u\n",
3508 nid, h->nr_huge_pages_node[nid],
3509 nid, h->free_huge_pages_node[nid],
3510 nid, h->surplus_huge_pages_node[nid]);
3511}
3512
3513void hugetlb_show_meminfo(void)
3514{
3515 struct hstate *h;
3516 int nid;
3517
3518 if (!hugepages_supported())
3519 return;
3520
3521 for_each_node_state(nid, N_MEMORY)
3522 for_each_hstate(h)
3523 pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
3524 nid,
3525 h->nr_huge_pages_node[nid],
3526 h->free_huge_pages_node[nid],
3527 h->surplus_huge_pages_node[nid],
3528 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
3529}
3530
3531void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
3532{
3533 seq_printf(m, "HugetlbPages:\t%8lu kB\n",
3534 atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
3535}
3536
3537
3538unsigned long hugetlb_total_pages(void)
3539{
3540 struct hstate *h;
3541 unsigned long nr_total_pages = 0;
3542
3543 for_each_hstate(h)
3544 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3545 return nr_total_pages;
3546}
3547
3548static int hugetlb_acct_memory(struct hstate *h, long delta)
3549{
3550 int ret = -ENOMEM;
3551
3552 spin_lock(&hugetlb_lock);
3553
3554
3555
3556
3557
3558
3559
3560
3561
3562
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576 if (delta > 0) {
3577 if (gather_surplus_pages(h, delta) < 0)
3578 goto out;
3579
3580 if (delta > allowed_mems_nr(h)) {
3581 return_unused_surplus_pages(h, delta);
3582 goto out;
3583 }
3584 }
3585
3586 ret = 0;
3587 if (delta < 0)
3588 return_unused_surplus_pages(h, (unsigned long) -delta);
3589
3590out:
3591 spin_unlock(&hugetlb_lock);
3592 return ret;
3593}
3594
3595static void hugetlb_vm_op_open(struct vm_area_struct *vma)
3596{
3597 struct resv_map *resv = vma_resv_map(vma);
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3608 kref_get(&resv->refs);
3609}
3610
3611static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3612{
3613 struct hstate *h = hstate_vma(vma);
3614 struct resv_map *resv = vma_resv_map(vma);
3615 struct hugepage_subpool *spool = subpool_vma(vma);
3616 unsigned long reserve, start, end;
3617 long gbl_reserve;
3618
3619 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3620 return;
3621
3622 start = vma_hugecache_offset(h, vma, vma->vm_start);
3623 end = vma_hugecache_offset(h, vma, vma->vm_end);
3624
3625 reserve = (end - start) - region_count(resv, start, end);
3626 hugetlb_cgroup_uncharge_counter(resv, start, end);
3627 if (reserve) {
3628
3629
3630
3631
3632 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3633 hugetlb_acct_memory(h, -gbl_reserve);
3634 }
3635
3636 kref_put(&resv->refs, resv_map_release);
3637}
3638
3639static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
3640{
3641 if (addr & ~(huge_page_mask(hstate_vma(vma))))
3642 return -EINVAL;
3643 return 0;
3644}
3645
3646static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
3647{
3648 struct hstate *hstate = hstate_vma(vma);
3649
3650 return 1UL << huge_page_shift(hstate);
3651}
3652
3653
3654
3655
3656
3657
3658
3659static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
3660{
3661 BUG();
3662 return 0;
3663}
3664
3665
3666
3667
3668
3669
3670
3671
3672const struct vm_operations_struct hugetlb_vm_ops = {
3673 .fault = hugetlb_vm_op_fault,
3674 .open = hugetlb_vm_op_open,
3675 .close = hugetlb_vm_op_close,
3676 .split = hugetlb_vm_op_split,
3677 .pagesize = hugetlb_vm_op_pagesize,
3678};
3679
3680static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3681 int writable)
3682{
3683 pte_t entry;
3684
3685 if (writable) {
3686 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3687 vma->vm_page_prot)));
3688 } else {
3689 entry = huge_pte_wrprotect(mk_huge_pte(page,
3690 vma->vm_page_prot));
3691 }
3692 entry = pte_mkyoung(entry);
3693 entry = pte_mkhuge(entry);
3694 entry = arch_make_huge_pte(entry, vma, page, writable);
3695
3696 return entry;
3697}
3698
3699static void set_huge_ptep_writable(struct vm_area_struct *vma,
3700 unsigned long address, pte_t *ptep)
3701{
3702 pte_t entry;
3703
3704 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3705 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3706 update_mmu_cache(vma, address, ptep);
3707}
3708
3709bool is_hugetlb_entry_migration(pte_t pte)
3710{
3711 swp_entry_t swp;
3712
3713 if (huge_pte_none(pte) || pte_present(pte))
3714 return false;
3715 swp = pte_to_swp_entry(pte);
3716 if (is_migration_entry(swp))
3717 return true;
3718 else
3719 return false;
3720}
3721
3722static bool is_hugetlb_entry_hwpoisoned(pte_t pte)
3723{
3724 swp_entry_t swp;
3725
3726 if (huge_pte_none(pte) || pte_present(pte))
3727 return false;
3728 swp = pte_to_swp_entry(pte);
3729 if (is_hwpoison_entry(swp))
3730 return true;
3731 else
3732 return false;
3733}
3734
3735int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3736 struct vm_area_struct *vma)
3737{
3738 pte_t *src_pte, *dst_pte, entry, dst_entry;
3739 struct page *ptepage;
3740 unsigned long addr;
3741 int cow;
3742 struct hstate *h = hstate_vma(vma);
3743 unsigned long sz = huge_page_size(h);
3744 struct address_space *mapping = vma->vm_file->f_mapping;
3745 struct mmu_notifier_range range;
3746 int ret = 0;
3747
3748 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3749
3750 if (cow) {
3751 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src,
3752 vma->vm_start,
3753 vma->vm_end);
3754 mmu_notifier_invalidate_range_start(&range);
3755 } else {
3756
3757
3758
3759
3760
3761
3762 i_mmap_lock_read(mapping);
3763 }
3764
3765 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3766 spinlock_t *src_ptl, *dst_ptl;
3767 src_pte = huge_pte_offset(src, addr, sz);
3768 if (!src_pte)
3769 continue;
3770 dst_pte = huge_pte_alloc(dst, addr, sz);
3771 if (!dst_pte) {
3772 ret = -ENOMEM;
3773 break;
3774 }
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784
3785 dst_entry = huge_ptep_get(dst_pte);
3786 if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
3787 continue;
3788
3789 dst_ptl = huge_pte_lock(h, dst, dst_pte);
3790 src_ptl = huge_pte_lockptr(h, src, src_pte);
3791 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3792 entry = huge_ptep_get(src_pte);
3793 dst_entry = huge_ptep_get(dst_pte);
3794 if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
3795
3796
3797
3798
3799
3800 ;
3801 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3802 is_hugetlb_entry_hwpoisoned(entry))) {
3803 swp_entry_t swp_entry = pte_to_swp_entry(entry);
3804
3805 if (is_write_migration_entry(swp_entry) && cow) {
3806
3807
3808
3809
3810 make_migration_entry_read(&swp_entry);
3811 entry = swp_entry_to_pte(swp_entry);
3812 set_huge_swap_pte_at(src, addr, src_pte,
3813 entry, sz);
3814 }
3815 set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
3816 } else {
3817 if (cow) {
3818
3819
3820
3821
3822
3823
3824
3825 huge_ptep_set_wrprotect(src, addr, src_pte);
3826 }
3827 entry = huge_ptep_get(src_pte);
3828 ptepage = pte_page(entry);
3829 get_page(ptepage);
3830 page_dup_rmap(ptepage, true);
3831 set_huge_pte_at(dst, addr, dst_pte, entry);
3832 hugetlb_count_add(pages_per_huge_page(h), dst);
3833 }
3834 spin_unlock(src_ptl);
3835 spin_unlock(dst_ptl);
3836 }
3837
3838 if (cow)
3839 mmu_notifier_invalidate_range_end(&range);
3840 else
3841 i_mmap_unlock_read(mapping);
3842
3843 return ret;
3844}
3845
3846void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3847 unsigned long start, unsigned long end,
3848 struct page *ref_page)
3849{
3850 struct mm_struct *mm = vma->vm_mm;
3851 unsigned long address;
3852 pte_t *ptep;
3853 pte_t pte;
3854 spinlock_t *ptl;
3855 struct page *page;
3856 struct hstate *h = hstate_vma(vma);
3857 unsigned long sz = huge_page_size(h);
3858 struct mmu_notifier_range range;
3859
3860 WARN_ON(!is_vm_hugetlb_page(vma));
3861 BUG_ON(start & ~huge_page_mask(h));
3862 BUG_ON(end & ~huge_page_mask(h));
3863
3864
3865
3866
3867
3868 tlb_change_page_size(tlb, sz);
3869 tlb_start_vma(tlb, vma);
3870
3871
3872
3873
3874 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
3875 end);
3876 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
3877 mmu_notifier_invalidate_range_start(&range);
3878 address = start;
3879 for (; address < end; address += sz) {
3880 ptep = huge_pte_offset(mm, address, sz);
3881 if (!ptep)
3882 continue;
3883
3884 ptl = huge_pte_lock(h, mm, ptep);
3885 if (huge_pmd_unshare(mm, vma, &address, ptep)) {
3886 spin_unlock(ptl);
3887
3888
3889
3890
3891 continue;
3892 }
3893
3894 pte = huge_ptep_get(ptep);
3895 if (huge_pte_none(pte)) {
3896 spin_unlock(ptl);
3897 continue;
3898 }
3899
3900
3901
3902
3903
3904 if (unlikely(!pte_present(pte))) {
3905 huge_pte_clear(mm, address, ptep, sz);
3906 spin_unlock(ptl);
3907 continue;
3908 }
3909
3910 page = pte_page(pte);
3911
3912
3913
3914
3915
3916 if (ref_page) {
3917 if (page != ref_page) {
3918 spin_unlock(ptl);
3919 continue;
3920 }
3921
3922
3923
3924
3925
3926 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3927 }
3928
3929 pte = huge_ptep_get_and_clear(mm, address, ptep);
3930 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
3931 if (huge_pte_dirty(pte))
3932 set_page_dirty(page);
3933
3934 hugetlb_count_sub(pages_per_huge_page(h), mm);
3935 page_remove_rmap(page, true);
3936
3937 spin_unlock(ptl);
3938 tlb_remove_page_size(tlb, page, huge_page_size(h));
3939
3940
3941
3942 if (ref_page)
3943 break;
3944 }
3945 mmu_notifier_invalidate_range_end(&range);
3946 tlb_end_vma(tlb, vma);
3947}
3948
3949void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3950 struct vm_area_struct *vma, unsigned long start,
3951 unsigned long end, struct page *ref_page)
3952{
3953 __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3954
3955
3956
3957
3958
3959
3960
3961
3962
3963
3964
3965 vma->vm_flags &= ~VM_MAYSHARE;
3966}
3967
3968void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3969 unsigned long end, struct page *ref_page)
3970{
3971 struct mm_struct *mm;
3972 struct mmu_gather tlb;
3973 unsigned long tlb_start = start;
3974 unsigned long tlb_end = end;
3975
3976
3977
3978
3979
3980
3981
3982
3983 adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
3984
3985 mm = vma->vm_mm;
3986
3987 tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
3988 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3989 tlb_finish_mmu(&tlb, tlb_start, tlb_end);
3990}
3991
3992
3993
3994
3995
3996
3997
3998static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3999 struct page *page, unsigned long address)
4000{
4001 struct hstate *h = hstate_vma(vma);
4002 struct vm_area_struct *iter_vma;
4003 struct address_space *mapping;
4004 pgoff_t pgoff;
4005
4006
4007
4008
4009
4010 address = address & huge_page_mask(h);
4011 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
4012 vma->vm_pgoff;
4013 mapping = vma->vm_file->f_mapping;
4014
4015
4016
4017
4018
4019
4020 i_mmap_lock_write(mapping);
4021 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
4022
4023 if (iter_vma == vma)
4024 continue;
4025
4026
4027
4028
4029
4030
4031 if (iter_vma->vm_flags & VM_MAYSHARE)
4032 continue;
4033
4034
4035
4036
4037
4038
4039
4040
4041 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
4042 unmap_hugepage_range(iter_vma, address,
4043 address + huge_page_size(h), page);
4044 }
4045 i_mmap_unlock_write(mapping);
4046}
4047
4048
4049
4050
4051
4052
4053
4054static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
4055 unsigned long address, pte_t *ptep,
4056 struct page *pagecache_page, spinlock_t *ptl)
4057{
4058 pte_t pte;
4059 struct hstate *h = hstate_vma(vma);
4060 struct page *old_page, *new_page;
4061 int outside_reserve = 0;
4062 vm_fault_t ret = 0;
4063 unsigned long haddr = address & huge_page_mask(h);
4064 struct mmu_notifier_range range;
4065
4066 pte = huge_ptep_get(ptep);
4067 old_page = pte_page(pte);
4068
4069retry_avoidcopy:
4070
4071
4072 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
4073 page_move_anon_rmap(old_page, vma);
4074 set_huge_ptep_writable(vma, haddr, ptep);
4075 return 0;
4076 }
4077
4078
4079
4080
4081
4082
4083
4084
4085
4086
4087 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
4088 old_page != pagecache_page)
4089 outside_reserve = 1;
4090
4091 get_page(old_page);
4092
4093
4094
4095
4096
4097 spin_unlock(ptl);
4098 new_page = alloc_huge_page(vma, haddr, outside_reserve);
4099
4100 if (IS_ERR(new_page)) {
4101
4102
4103
4104
4105
4106
4107
4108 if (outside_reserve) {
4109 put_page(old_page);
4110 BUG_ON(huge_pte_none(pte));
4111 unmap_ref_private(mm, vma, old_page, haddr);
4112 BUG_ON(huge_pte_none(pte));
4113 spin_lock(ptl);
4114 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
4115 if (likely(ptep &&
4116 pte_same(huge_ptep_get(ptep), pte)))
4117 goto retry_avoidcopy;
4118
4119
4120
4121
4122 return 0;
4123 }
4124
4125 ret = vmf_error(PTR_ERR(new_page));
4126 goto out_release_old;
4127 }
4128
4129
4130
4131
4132
4133 if (unlikely(anon_vma_prepare(vma))) {
4134 ret = VM_FAULT_OOM;
4135 goto out_release_all;
4136 }
4137
4138 copy_user_huge_page(new_page, old_page, address, vma,
4139 pages_per_huge_page(h));
4140 __SetPageUptodate(new_page);
4141
4142 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr,
4143 haddr + huge_page_size(h));
4144 mmu_notifier_invalidate_range_start(&range);
4145
4146
4147
4148
4149
4150 spin_lock(ptl);
4151 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
4152 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
4153 ClearPagePrivate(new_page);
4154
4155
4156 huge_ptep_clear_flush(vma, haddr, ptep);
4157 mmu_notifier_invalidate_range(mm, range.start, range.end);
4158 set_huge_pte_at(mm, haddr, ptep,
4159 make_huge_pte(vma, new_page, 1));
4160 page_remove_rmap(old_page, true);
4161 hugepage_add_new_anon_rmap(new_page, vma, haddr);
4162 set_page_huge_active(new_page);
4163
4164 new_page = old_page;
4165 }
4166 spin_unlock(ptl);
4167 mmu_notifier_invalidate_range_end(&range);
4168out_release_all:
4169 restore_reserve_on_error(h, vma, haddr, new_page);
4170 put_page(new_page);
4171out_release_old:
4172 put_page(old_page);
4173
4174 spin_lock(ptl);
4175 return ret;
4176}
4177
4178
4179static struct page *hugetlbfs_pagecache_page(struct hstate *h,
4180 struct vm_area_struct *vma, unsigned long address)
4181{
4182 struct address_space *mapping;
4183 pgoff_t idx;
4184
4185 mapping = vma->vm_file->f_mapping;
4186 idx = vma_hugecache_offset(h, vma, address);
4187
4188 return find_lock_page(mapping, idx);
4189}
4190
4191
4192
4193
4194
4195static bool hugetlbfs_pagecache_present(struct hstate *h,
4196 struct vm_area_struct *vma, unsigned long address)
4197{
4198 struct address_space *mapping;
4199 pgoff_t idx;
4200 struct page *page;
4201
4202 mapping = vma->vm_file->f_mapping;
4203 idx = vma_hugecache_offset(h, vma, address);
4204
4205 page = find_get_page(mapping, idx);
4206 if (page)
4207 put_page(page);
4208 return page != NULL;
4209}
4210
4211int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
4212 pgoff_t idx)
4213{
4214 struct inode *inode = mapping->host;
4215 struct hstate *h = hstate_inode(inode);
4216 int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
4217
4218 if (err)
4219 return err;
4220 ClearPagePrivate(page);
4221
4222
4223
4224
4225
4226 set_page_dirty(page);
4227
4228 spin_lock(&inode->i_lock);
4229 inode->i_blocks += blocks_per_huge_page(h);
4230 spin_unlock(&inode->i_lock);
4231 return 0;
4232}
4233
4234static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
4235 struct vm_area_struct *vma,
4236 struct address_space *mapping, pgoff_t idx,
4237 unsigned long address, pte_t *ptep, unsigned int flags)
4238{
4239 struct hstate *h = hstate_vma(vma);
4240 vm_fault_t ret = VM_FAULT_SIGBUS;
4241 int anon_rmap = 0;
4242 unsigned long size;
4243 struct page *page;
4244 pte_t new_pte;
4245 spinlock_t *ptl;
4246 unsigned long haddr = address & huge_page_mask(h);
4247 bool new_page = false;
4248
4249
4250
4251
4252
4253
4254 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
4255 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
4256 current->pid);
4257 return ret;
4258 }
4259
4260
4261
4262
4263
4264
4265 size = i_size_read(mapping->host) >> huge_page_shift(h);
4266 if (idx >= size)
4267 goto out;
4268
4269retry:
4270 page = find_lock_page(mapping, idx);
4271 if (!page) {
4272
4273
4274
4275 if (userfaultfd_missing(vma)) {
4276 u32 hash;
4277 struct vm_fault vmf = {
4278 .vma = vma,
4279 .address = haddr,
4280 .flags = flags,
4281
4282
4283
4284
4285
4286
4287
4288 };
4289
4290
4291
4292
4293
4294
4295 hash = hugetlb_fault_mutex_hash(mapping, idx);
4296 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4297 i_mmap_unlock_read(mapping);
4298 ret = handle_userfault(&vmf, VM_UFFD_MISSING);
4299 i_mmap_lock_read(mapping);
4300 mutex_lock(&hugetlb_fault_mutex_table[hash]);
4301 goto out;
4302 }
4303
4304 page = alloc_huge_page(vma, haddr, 0);
4305 if (IS_ERR(page)) {
4306
4307
4308
4309
4310
4311
4312
4313
4314
4315
4316
4317
4318 ptl = huge_pte_lock(h, mm, ptep);
4319 if (!huge_pte_none(huge_ptep_get(ptep))) {
4320 ret = 0;
4321 spin_unlock(ptl);
4322 goto out;
4323 }
4324 spin_unlock(ptl);
4325 ret = vmf_error(PTR_ERR(page));
4326 goto out;
4327 }
4328 clear_huge_page(page, address, pages_per_huge_page(h));
4329 __SetPageUptodate(page);
4330 new_page = true;
4331
4332 if (vma->vm_flags & VM_MAYSHARE) {
4333 int err = huge_add_to_page_cache(page, mapping, idx);
4334 if (err) {
4335 put_page(page);
4336 if (err == -EEXIST)
4337 goto retry;
4338 goto out;
4339 }
4340 } else {
4341 lock_page(page);
4342 if (unlikely(anon_vma_prepare(vma))) {
4343 ret = VM_FAULT_OOM;
4344 goto backout_unlocked;
4345 }
4346 anon_rmap = 1;
4347 }
4348 } else {
4349
4350
4351
4352
4353
4354 if (unlikely(PageHWPoison(page))) {
4355 ret = VM_FAULT_HWPOISON |
4356 VM_FAULT_SET_HINDEX(hstate_index(h));
4357 goto backout_unlocked;
4358 }
4359 }
4360
4361
4362
4363
4364
4365
4366
4367 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
4368 if (vma_needs_reservation(h, vma, haddr) < 0) {
4369 ret = VM_FAULT_OOM;
4370 goto backout_unlocked;
4371 }
4372
4373 vma_end_reservation(h, vma, haddr);
4374 }
4375
4376 ptl = huge_pte_lock(h, mm, ptep);
4377 ret = 0;
4378 if (!huge_pte_none(huge_ptep_get(ptep)))
4379 goto backout;
4380
4381 if (anon_rmap) {
4382 ClearPagePrivate(page);
4383 hugepage_add_new_anon_rmap(page, vma, haddr);
4384 } else
4385 page_dup_rmap(page, true);
4386 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
4387 && (vma->vm_flags & VM_SHARED)));
4388 set_huge_pte_at(mm, haddr, ptep, new_pte);
4389
4390 hugetlb_count_add(pages_per_huge_page(h), mm);
4391 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
4392
4393 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
4394 }
4395
4396 spin_unlock(ptl);
4397
4398
4399
4400
4401
4402
4403 if (new_page)
4404 set_page_huge_active(page);
4405
4406 unlock_page(page);
4407out:
4408 return ret;
4409
4410backout:
4411 spin_unlock(ptl);
4412backout_unlocked:
4413 unlock_page(page);
4414 restore_reserve_on_error(h, vma, haddr, page);
4415 put_page(page);
4416 goto out;
4417}
4418
4419#ifdef CONFIG_SMP
4420u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
4421{
4422 unsigned long key[2];
4423 u32 hash;
4424
4425 key[0] = (unsigned long) mapping;
4426 key[1] = idx;
4427
4428 hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
4429
4430 return hash & (num_fault_mutexes - 1);
4431}
4432#else
4433
4434
4435
4436
4437u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
4438{
4439 return 0;
4440}
4441#endif
4442
4443vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
4444 unsigned long address, unsigned int flags)
4445{
4446 pte_t *ptep, entry;
4447 spinlock_t *ptl;
4448 vm_fault_t ret;
4449 u32 hash;
4450 pgoff_t idx;
4451 struct page *page = NULL;
4452 struct page *pagecache_page = NULL;
4453 struct hstate *h = hstate_vma(vma);
4454 struct address_space *mapping;
4455 int need_wait_lock = 0;
4456 unsigned long haddr = address & huge_page_mask(h);
4457
4458 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
4459 if (ptep) {
4460
4461
4462
4463
4464
4465 entry = huge_ptep_get(ptep);
4466 if (unlikely(is_hugetlb_entry_migration(entry))) {
4467 migration_entry_wait_huge(vma, mm, ptep);
4468 return 0;
4469 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
4470 return VM_FAULT_HWPOISON_LARGE |
4471 VM_FAULT_SET_HINDEX(hstate_index(h));
4472 }
4473
4474
4475
4476
4477
4478
4479
4480
4481
4482
4483
4484
4485 mapping = vma->vm_file->f_mapping;
4486 i_mmap_lock_read(mapping);
4487 ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
4488 if (!ptep) {
4489 i_mmap_unlock_read(mapping);
4490 return VM_FAULT_OOM;
4491 }
4492
4493
4494
4495
4496
4497
4498 idx = vma_hugecache_offset(h, vma, haddr);
4499 hash = hugetlb_fault_mutex_hash(mapping, idx);
4500 mutex_lock(&hugetlb_fault_mutex_table[hash]);
4501
4502 entry = huge_ptep_get(ptep);
4503 if (huge_pte_none(entry)) {
4504 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
4505 goto out_mutex;
4506 }
4507
4508 ret = 0;
4509
4510
4511
4512
4513
4514
4515
4516
4517 if (!pte_present(entry))
4518 goto out_mutex;
4519
4520
4521
4522
4523
4524
4525
4526
4527
4528 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
4529 if (vma_needs_reservation(h, vma, haddr) < 0) {
4530 ret = VM_FAULT_OOM;
4531 goto out_mutex;
4532 }
4533
4534 vma_end_reservation(h, vma, haddr);
4535
4536 if (!(vma->vm_flags & VM_MAYSHARE))
4537 pagecache_page = hugetlbfs_pagecache_page(h,
4538 vma, haddr);
4539 }
4540
4541 ptl = huge_pte_lock(h, mm, ptep);
4542
4543
4544 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
4545 goto out_ptl;
4546
4547
4548
4549
4550
4551
4552 page = pte_page(entry);
4553 if (page != pagecache_page)
4554 if (!trylock_page(page)) {
4555 need_wait_lock = 1;
4556 goto out_ptl;
4557 }
4558
4559 get_page(page);
4560
4561 if (flags & FAULT_FLAG_WRITE) {
4562 if (!huge_pte_write(entry)) {
4563 ret = hugetlb_cow(mm, vma, address, ptep,
4564 pagecache_page, ptl);
4565 goto out_put_page;
4566 }
4567 entry = huge_pte_mkdirty(entry);
4568 }
4569 entry = pte_mkyoung(entry);
4570 if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
4571 flags & FAULT_FLAG_WRITE))
4572 update_mmu_cache(vma, haddr, ptep);
4573out_put_page:
4574 if (page != pagecache_page)
4575 unlock_page(page);
4576 put_page(page);
4577out_ptl:
4578 spin_unlock(ptl);
4579
4580 if (pagecache_page) {
4581 unlock_page(pagecache_page);
4582 put_page(pagecache_page);
4583 }
4584out_mutex:
4585 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4586 i_mmap_unlock_read(mapping);
4587
4588
4589
4590
4591
4592
4593
4594 if (need_wait_lock)
4595 wait_on_page_locked(page);
4596 return ret;
4597}
4598
4599
4600
4601
4602
4603int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
4604 pte_t *dst_pte,
4605 struct vm_area_struct *dst_vma,
4606 unsigned long dst_addr,
4607 unsigned long src_addr,
4608 struct page **pagep)
4609{
4610 struct address_space *mapping;
4611 pgoff_t idx;
4612 unsigned long size;
4613 int vm_shared = dst_vma->vm_flags & VM_SHARED;
4614 struct hstate *h = hstate_vma(dst_vma);
4615 pte_t _dst_pte;
4616 spinlock_t *ptl;
4617 int ret;
4618 struct page *page;
4619
4620 if (!*pagep) {
4621 ret = -ENOMEM;
4622 page = alloc_huge_page(dst_vma, dst_addr, 0);
4623 if (IS_ERR(page))
4624 goto out;
4625
4626 ret = copy_huge_page_from_user(page,
4627 (const void __user *) src_addr,
4628 pages_per_huge_page(h), false);
4629
4630
4631 if (unlikely(ret)) {
4632 ret = -ENOENT;
4633 *pagep = page;
4634
4635 goto out;
4636 }
4637 } else {
4638 page = *pagep;
4639 *pagep = NULL;
4640 }
4641
4642
4643
4644
4645
4646
4647 __SetPageUptodate(page);
4648
4649 mapping = dst_vma->vm_file->f_mapping;
4650 idx = vma_hugecache_offset(h, dst_vma, dst_addr);
4651
4652
4653
4654
4655 if (vm_shared) {
4656 size = i_size_read(mapping->host) >> huge_page_shift(h);
4657 ret = -EFAULT;
4658 if (idx >= size)
4659 goto out_release_nounlock;
4660
4661
4662
4663
4664
4665
4666
4667 ret = huge_add_to_page_cache(page, mapping, idx);
4668 if (ret)
4669 goto out_release_nounlock;
4670 }
4671
4672 ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
4673 spin_lock(ptl);
4674
4675
4676
4677
4678
4679
4680
4681
4682
4683
4684 size = i_size_read(mapping->host) >> huge_page_shift(h);
4685 ret = -EFAULT;
4686 if (idx >= size)
4687 goto out_release_unlock;
4688
4689 ret = -EEXIST;
4690 if (!huge_pte_none(huge_ptep_get(dst_pte)))
4691 goto out_release_unlock;
4692
4693 if (vm_shared) {
4694 page_dup_rmap(page, true);
4695 } else {
4696 ClearPagePrivate(page);
4697 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
4698 }
4699
4700 _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
4701 if (dst_vma->vm_flags & VM_WRITE)
4702 _dst_pte = huge_pte_mkdirty(_dst_pte);
4703 _dst_pte = pte_mkyoung(_dst_pte);
4704
4705 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
4706
4707 (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
4708 dst_vma->vm_flags & VM_WRITE);
4709 hugetlb_count_add(pages_per_huge_page(h), dst_mm);
4710
4711
4712 update_mmu_cache(dst_vma, dst_addr, dst_pte);
4713
4714 spin_unlock(ptl);
4715 set_page_huge_active(page);
4716 if (vm_shared)
4717 unlock_page(page);
4718 ret = 0;
4719out:
4720 return ret;
4721out_release_unlock:
4722 spin_unlock(ptl);
4723 if (vm_shared)
4724 unlock_page(page);
4725out_release_nounlock:
4726 put_page(page);
4727 goto out;
4728}
4729
4730long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
4731 struct page **pages, struct vm_area_struct **vmas,
4732 unsigned long *position, unsigned long *nr_pages,
4733 long i, unsigned int flags, int *locked)
4734{
4735 unsigned long pfn_offset;
4736 unsigned long vaddr = *position;
4737 unsigned long remainder = *nr_pages;
4738 struct hstate *h = hstate_vma(vma);
4739 int err = -EFAULT;
4740
4741 while (vaddr < vma->vm_end && remainder) {
4742 pte_t *pte;
4743 spinlock_t *ptl = NULL;
4744 int absent;
4745 struct page *page;
4746
4747
4748
4749
4750
4751 if (fatal_signal_pending(current)) {
4752 remainder = 0;
4753 break;
4754 }
4755
4756
4757
4758
4759
4760
4761
4762
4763 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
4764 huge_page_size(h));
4765 if (pte)
4766 ptl = huge_pte_lock(h, mm, pte);
4767 absent = !pte || huge_pte_none(huge_ptep_get(pte));
4768
4769
4770
4771
4772
4773
4774
4775
4776 if (absent && (flags & FOLL_DUMP) &&
4777 !hugetlbfs_pagecache_present(h, vma, vaddr)) {
4778 if (pte)
4779 spin_unlock(ptl);
4780 remainder = 0;
4781 break;
4782 }
4783
4784
4785
4786
4787
4788
4789
4790
4791
4792
4793
4794 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
4795 ((flags & FOLL_WRITE) &&
4796 !huge_pte_write(huge_ptep_get(pte)))) {
4797 vm_fault_t ret;
4798 unsigned int fault_flags = 0;
4799
4800 if (pte)
4801 spin_unlock(ptl);
4802 if (flags & FOLL_WRITE)
4803 fault_flags |= FAULT_FLAG_WRITE;
4804 if (locked)
4805 fault_flags |= FAULT_FLAG_ALLOW_RETRY |
4806 FAULT_FLAG_KILLABLE;
4807 if (flags & FOLL_NOWAIT)
4808 fault_flags |= FAULT_FLAG_ALLOW_RETRY |
4809 FAULT_FLAG_RETRY_NOWAIT;
4810 if (flags & FOLL_TRIED) {
4811
4812
4813
4814
4815 fault_flags |= FAULT_FLAG_TRIED;
4816 }
4817 ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
4818 if (ret & VM_FAULT_ERROR) {
4819 err = vm_fault_to_errno(ret, flags);
4820 remainder = 0;
4821 break;
4822 }
4823 if (ret & VM_FAULT_RETRY) {
4824 if (locked &&
4825 !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
4826 *locked = 0;
4827 *nr_pages = 0;
4828
4829
4830
4831
4832
4833
4834
4835
4836
4837 return i;
4838 }
4839 continue;
4840 }
4841
4842 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
4843 page = pte_page(huge_ptep_get(pte));
4844
4845
4846
4847
4848
4849 if (!pages && !vmas && !pfn_offset &&
4850 (vaddr + huge_page_size(h) < vma->vm_end) &&
4851 (remainder >= pages_per_huge_page(h))) {
4852 vaddr += huge_page_size(h);
4853 remainder -= pages_per_huge_page(h);
4854 i += pages_per_huge_page(h);
4855 spin_unlock(ptl);
4856 continue;
4857 }
4858
4859same_page:
4860 if (pages) {
4861 pages[i] = mem_map_offset(page, pfn_offset);
4862
4863
4864
4865
4866
4867
4868
4869
4870
4871
4872 if (WARN_ON_ONCE(!try_grab_page(pages[i], flags))) {
4873 spin_unlock(ptl);
4874 remainder = 0;
4875 err = -ENOMEM;
4876 break;
4877 }
4878 }
4879
4880 if (vmas)
4881 vmas[i] = vma;
4882
4883 vaddr += PAGE_SIZE;
4884 ++pfn_offset;
4885 --remainder;
4886 ++i;
4887 if (vaddr < vma->vm_end && remainder &&
4888 pfn_offset < pages_per_huge_page(h)) {
4889
4890
4891
4892
4893 goto same_page;
4894 }
4895 spin_unlock(ptl);
4896 }
4897 *nr_pages = remainder;
4898
4899
4900
4901
4902
4903 *position = vaddr;
4904
4905 return i ? i : err;
4906}
4907
4908#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
4909
4910
4911
4912
4913#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
4914#endif
4915
4916unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
4917 unsigned long address, unsigned long end, pgprot_t newprot)
4918{
4919 struct mm_struct *mm = vma->vm_mm;
4920 unsigned long start = address;
4921 pte_t *ptep;
4922 pte_t pte;
4923 struct hstate *h = hstate_vma(vma);
4924 unsigned long pages = 0;
4925 bool shared_pmd = false;
4926 struct mmu_notifier_range range;
4927
4928
4929
4930
4931
4932
4933 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
4934 0, vma, mm, start, end);
4935 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
4936
4937 BUG_ON(address >= end);
4938 flush_cache_range(vma, range.start, range.end);
4939
4940 mmu_notifier_invalidate_range_start(&range);
4941 i_mmap_lock_write(vma->vm_file->f_mapping);
4942 for (; address < end; address += huge_page_size(h)) {
4943 spinlock_t *ptl;
4944 ptep = huge_pte_offset(mm, address, huge_page_size(h));
4945 if (!ptep)
4946 continue;
4947 ptl = huge_pte_lock(h, mm, ptep);
4948 if (huge_pmd_unshare(mm, vma, &address, ptep)) {
4949 pages++;
4950 spin_unlock(ptl);
4951 shared_pmd = true;
4952 continue;
4953 }
4954 pte = huge_ptep_get(ptep);
4955 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
4956 spin_unlock(ptl);
4957 continue;
4958 }
4959 if (unlikely(is_hugetlb_entry_migration(pte))) {
4960 swp_entry_t entry = pte_to_swp_entry(pte);
4961
4962 if (is_write_migration_entry(entry)) {
4963 pte_t newpte;
4964
4965 make_migration_entry_read(&entry);
4966 newpte = swp_entry_to_pte(entry);
4967 set_huge_swap_pte_at(mm, address, ptep,
4968 newpte, huge_page_size(h));
4969 pages++;
4970 }
4971 spin_unlock(ptl);
4972 continue;
4973 }
4974 if (!huge_pte_none(pte)) {
4975 pte_t old_pte;
4976
4977 old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
4978 pte = pte_mkhuge(huge_pte_modify(old_pte, newprot));
4979 pte = arch_make_huge_pte(pte, vma, NULL, 0);
4980 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
4981 pages++;
4982 }
4983 spin_unlock(ptl);
4984 }
4985
4986
4987
4988
4989
4990
4991
4992 if (shared_pmd)
4993 flush_hugetlb_tlb_range(vma, range.start, range.end);
4994 else
4995 flush_hugetlb_tlb_range(vma, start, end);
4996
4997
4998
4999
5000
5001
5002 i_mmap_unlock_write(vma->vm_file->f_mapping);
5003 mmu_notifier_invalidate_range_end(&range);
5004
5005 return pages << h->order;
5006}
5007
5008int hugetlb_reserve_pages(struct inode *inode,
5009 long from, long to,
5010 struct vm_area_struct *vma,
5011 vm_flags_t vm_flags)
5012{
5013 long ret, chg, add = -1;
5014 struct hstate *h = hstate_inode(inode);
5015 struct hugepage_subpool *spool = subpool_inode(inode);
5016 struct resv_map *resv_map;
5017 struct hugetlb_cgroup *h_cg = NULL;
5018 long gbl_reserve, regions_needed = 0;
5019
5020
5021 if (from > to) {
5022 VM_WARN(1, "%s called with a negative range\n", __func__);
5023 return -EINVAL;
5024 }
5025
5026
5027
5028
5029
5030
5031 if (vm_flags & VM_NORESERVE)
5032 return 0;
5033
5034
5035
5036
5037
5038
5039
5040 if (!vma || vma->vm_flags & VM_MAYSHARE) {
5041
5042
5043
5044
5045
5046 resv_map = inode_resv_map(inode);
5047
5048 chg = region_chg(resv_map, from, to, ®ions_needed);
5049
5050 } else {
5051
5052 resv_map = resv_map_alloc();
5053 if (!resv_map)
5054 return -ENOMEM;
5055
5056 chg = to - from;
5057
5058 set_vma_resv_map(vma, resv_map);
5059 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
5060 }
5061
5062 if (chg < 0) {
5063 ret = chg;
5064 goto out_err;
5065 }
5066
5067 ret = hugetlb_cgroup_charge_cgroup_rsvd(
5068 hstate_index(h), chg * pages_per_huge_page(h), &h_cg);
5069
5070 if (ret < 0) {
5071 ret = -ENOMEM;
5072 goto out_err;
5073 }
5074
5075 if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
5076
5077
5078
5079 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
5080 }
5081
5082
5083
5084
5085
5086
5087 gbl_reserve = hugepage_subpool_get_pages(spool, chg);
5088 if (gbl_reserve < 0) {
5089 ret = -ENOSPC;
5090 goto out_uncharge_cgroup;
5091 }
5092
5093
5094
5095
5096
5097 ret = hugetlb_acct_memory(h, gbl_reserve);
5098 if (ret < 0) {
5099 goto out_put_pages;
5100 }
5101
5102
5103
5104
5105
5106
5107
5108
5109
5110
5111
5112
5113 if (!vma || vma->vm_flags & VM_MAYSHARE) {
5114 add = region_add(resv_map, from, to, regions_needed, h, h_cg);
5115
5116 if (unlikely(add < 0)) {
5117 hugetlb_acct_memory(h, -gbl_reserve);
5118 goto out_put_pages;
5119 } else if (unlikely(chg > add)) {
5120
5121
5122
5123
5124
5125
5126
5127 long rsv_adjust;
5128
5129 hugetlb_cgroup_uncharge_cgroup_rsvd(
5130 hstate_index(h),
5131 (chg - add) * pages_per_huge_page(h), h_cg);
5132
5133 rsv_adjust = hugepage_subpool_put_pages(spool,
5134 chg - add);
5135 hugetlb_acct_memory(h, -rsv_adjust);
5136 }
5137 }
5138 return 0;
5139out_put_pages:
5140
5141 (void)hugepage_subpool_put_pages(spool, chg);
5142out_uncharge_cgroup:
5143 hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
5144 chg * pages_per_huge_page(h), h_cg);
5145out_err:
5146 if (!vma || vma->vm_flags & VM_MAYSHARE)
5147
5148
5149
5150 if (chg >= 0 && add < 0)
5151 region_abort(resv_map, from, to, regions_needed);
5152 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
5153 kref_put(&resv_map->refs, resv_map_release);
5154 return ret;
5155}
5156
5157long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
5158 long freed)
5159{
5160 struct hstate *h = hstate_inode(inode);
5161 struct resv_map *resv_map = inode_resv_map(inode);
5162 long chg = 0;
5163 struct hugepage_subpool *spool = subpool_inode(inode);
5164 long gbl_reserve;
5165
5166
5167
5168
5169
5170 if (resv_map) {
5171 chg = region_del(resv_map, start, end);
5172
5173
5174
5175
5176
5177 if (chg < 0)
5178 return chg;
5179 }
5180
5181 spin_lock(&inode->i_lock);
5182 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
5183 spin_unlock(&inode->i_lock);
5184
5185
5186
5187
5188
5189 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
5190 hugetlb_acct_memory(h, -gbl_reserve);
5191
5192 return 0;
5193}
5194
5195#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
5196static unsigned long page_table_shareable(struct vm_area_struct *svma,
5197 struct vm_area_struct *vma,
5198 unsigned long addr, pgoff_t idx)
5199{
5200 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
5201 svma->vm_start;
5202 unsigned long sbase = saddr & PUD_MASK;
5203 unsigned long s_end = sbase + PUD_SIZE;
5204
5205
5206 unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
5207 unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
5208
5209
5210
5211
5212
5213 if (pmd_index(addr) != pmd_index(saddr) ||
5214 vm_flags != svm_flags ||
5215 sbase < svma->vm_start || svma->vm_end < s_end)
5216 return 0;
5217
5218 return saddr;
5219}
5220
5221static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
5222{
5223 unsigned long base = addr & PUD_MASK;
5224 unsigned long end = base + PUD_SIZE;
5225
5226
5227
5228
5229 if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
5230 return true;
5231 return false;
5232}
5233
5234
5235
5236
5237
5238
5239void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
5240 unsigned long *start, unsigned long *end)
5241{
5242 unsigned long a_start, a_end;
5243
5244 if (!(vma->vm_flags & VM_MAYSHARE))
5245 return;
5246
5247
5248 a_start = ALIGN_DOWN(*start, PUD_SIZE);
5249 a_end = ALIGN(*end, PUD_SIZE);
5250
5251
5252
5253
5254
5255 *start = max(vma->vm_start, a_start);
5256 *end = min(vma->vm_end, a_end);
5257}
5258
5259
5260
5261
5262
5263
5264
5265
5266
5267
5268
5269
5270
5271
5272
5273
5274
5275
5276pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
5277{
5278 struct vm_area_struct *vma = find_vma(mm, addr);
5279 struct address_space *mapping = vma->vm_file->f_mapping;
5280 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
5281 vma->vm_pgoff;
5282 struct vm_area_struct *svma;
5283 unsigned long saddr;
5284 pte_t *spte = NULL;
5285 pte_t *pte;
5286 spinlock_t *ptl;
5287
5288 if (!vma_shareable(vma, addr))
5289 return (pte_t *)pmd_alloc(mm, pud, addr);
5290
5291 i_mmap_assert_locked(mapping);
5292 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
5293 if (svma == vma)
5294 continue;
5295
5296 saddr = page_table_shareable(svma, vma, addr, idx);
5297 if (saddr) {
5298 spte = huge_pte_offset(svma->vm_mm, saddr,
5299 vma_mmu_pagesize(svma));
5300 if (spte) {
5301 get_page(virt_to_page(spte));
5302 break;
5303 }
5304 }
5305 }
5306
5307 if (!spte)
5308 goto out;
5309
5310 ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
5311 if (pud_none(*pud)) {
5312 pud_populate(mm, pud,
5313 (pmd_t *)((unsigned long)spte & PAGE_MASK));
5314 mm_inc_nr_pmds(mm);
5315 } else {
5316 put_page(virt_to_page(spte));
5317 }
5318 spin_unlock(ptl);
5319out:
5320 pte = (pte_t *)pmd_alloc(mm, pud, addr);
5321 return pte;
5322}
5323
5324
5325
5326
5327
5328
5329
5330
5331
5332
5333
5334
5335
5336int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
5337 unsigned long *addr, pte_t *ptep)
5338{
5339 pgd_t *pgd = pgd_offset(mm, *addr);
5340 p4d_t *p4d = p4d_offset(pgd, *addr);
5341 pud_t *pud = pud_offset(p4d, *addr);
5342
5343 i_mmap_assert_write_locked(vma->vm_file->f_mapping);
5344 BUG_ON(page_count(virt_to_page(ptep)) == 0);
5345 if (page_count(virt_to_page(ptep)) == 1)
5346 return 0;
5347
5348 pud_clear(pud);
5349 put_page(virt_to_page(ptep));
5350 mm_dec_nr_pmds(mm);
5351 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
5352 return 1;
5353}
5354#define want_pmd_share() (1)
5355#else
5356pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
5357{
5358 return NULL;
5359}
5360
5361int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
5362 unsigned long *addr, pte_t *ptep)
5363{
5364 return 0;
5365}
5366
5367void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
5368 unsigned long *start, unsigned long *end)
5369{
5370}
5371#define want_pmd_share() (0)
5372#endif
5373
5374#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
5375pte_t *huge_pte_alloc(struct mm_struct *mm,
5376 unsigned long addr, unsigned long sz)
5377{
5378 pgd_t *pgd;
5379 p4d_t *p4d;
5380 pud_t *pud;
5381 pte_t *pte = NULL;
5382
5383 pgd = pgd_offset(mm, addr);
5384 p4d = p4d_alloc(mm, pgd, addr);
5385 if (!p4d)
5386 return NULL;
5387 pud = pud_alloc(mm, p4d, addr);
5388 if (pud) {
5389 if (sz == PUD_SIZE) {
5390 pte = (pte_t *)pud;
5391 } else {
5392 BUG_ON(sz != PMD_SIZE);
5393 if (want_pmd_share() && pud_none(*pud))
5394 pte = huge_pmd_share(mm, addr, pud);
5395 else
5396 pte = (pte_t *)pmd_alloc(mm, pud, addr);
5397 }
5398 }
5399 BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
5400
5401 return pte;
5402}
5403
5404
5405
5406
5407
5408
5409
5410
5411
5412
5413pte_t *huge_pte_offset(struct mm_struct *mm,
5414 unsigned long addr, unsigned long sz)
5415{
5416 pgd_t *pgd;
5417 p4d_t *p4d;
5418 pud_t *pud;
5419 pmd_t *pmd;
5420
5421 pgd = pgd_offset(mm, addr);
5422 if (!pgd_present(*pgd))
5423 return NULL;
5424 p4d = p4d_offset(pgd, addr);
5425 if (!p4d_present(*p4d))
5426 return NULL;
5427
5428 pud = pud_offset(p4d, addr);
5429 if (sz == PUD_SIZE)
5430
5431 return (pte_t *)pud;
5432 if (!pud_present(*pud))
5433 return NULL;
5434
5435
5436 pmd = pmd_offset(pud, addr);
5437
5438 return (pte_t *)pmd;
5439}
5440
5441#endif
5442
5443
5444
5445
5446
5447struct page * __weak
5448follow_huge_addr(struct mm_struct *mm, unsigned long address,
5449 int write)
5450{
5451 return ERR_PTR(-EINVAL);
5452}
5453
5454struct page * __weak
5455follow_huge_pd(struct vm_area_struct *vma,
5456 unsigned long address, hugepd_t hpd, int flags, int pdshift)
5457{
5458 WARN(1, "hugepd follow called with no support for hugepage directory format\n");
5459 return NULL;
5460}
5461
5462struct page * __weak
5463follow_huge_pmd(struct mm_struct *mm, unsigned long address,
5464 pmd_t *pmd, int flags)
5465{
5466 struct page *page = NULL;
5467 spinlock_t *ptl;
5468 pte_t pte;
5469
5470
5471 if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
5472 (FOLL_PIN | FOLL_GET)))
5473 return NULL;
5474
5475retry:
5476 ptl = pmd_lockptr(mm, pmd);
5477 spin_lock(ptl);
5478
5479
5480
5481
5482 if (!pmd_huge(*pmd))
5483 goto out;
5484 pte = huge_ptep_get((pte_t *)pmd);
5485 if (pte_present(pte)) {
5486 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
5487
5488
5489
5490
5491
5492
5493
5494
5495 if (WARN_ON_ONCE(!try_grab_page(page, flags))) {
5496 page = NULL;
5497 goto out;
5498 }
5499 } else {
5500 if (is_hugetlb_entry_migration(pte)) {
5501 spin_unlock(ptl);
5502 __migration_entry_wait(mm, (pte_t *)pmd, ptl);
5503 goto retry;
5504 }
5505
5506
5507
5508
5509 }
5510out:
5511 spin_unlock(ptl);
5512 return page;
5513}
5514
5515struct page * __weak
5516follow_huge_pud(struct mm_struct *mm, unsigned long address,
5517 pud_t *pud, int flags)
5518{
5519 if (flags & (FOLL_GET | FOLL_PIN))
5520 return NULL;
5521
5522 return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
5523}
5524
5525struct page * __weak
5526follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
5527{
5528 if (flags & (FOLL_GET | FOLL_PIN))
5529 return NULL;
5530
5531 return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
5532}
5533
5534bool isolate_huge_page(struct page *page, struct list_head *list)
5535{
5536 bool ret = true;
5537
5538 VM_BUG_ON_PAGE(!PageHead(page), page);
5539 spin_lock(&hugetlb_lock);
5540 if (!page_huge_active(page) || !get_page_unless_zero(page)) {
5541 ret = false;
5542 goto unlock;
5543 }
5544 clear_page_huge_active(page);
5545 list_move_tail(&page->lru, list);
5546unlock:
5547 spin_unlock(&hugetlb_lock);
5548 return ret;
5549}
5550
5551void putback_active_hugepage(struct page *page)
5552{
5553 VM_BUG_ON_PAGE(!PageHead(page), page);
5554 spin_lock(&hugetlb_lock);
5555 set_page_huge_active(page);
5556 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
5557 spin_unlock(&hugetlb_lock);
5558 put_page(page);
5559}
5560
5561void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
5562{
5563 struct hstate *h = page_hstate(oldpage);
5564
5565 hugetlb_cgroup_migrate(oldpage, newpage);
5566 set_page_owner_migrate_reason(newpage, reason);
5567
5568
5569
5570
5571
5572
5573
5574
5575
5576
5577
5578 if (PageHugeTemporary(newpage)) {
5579 int old_nid = page_to_nid(oldpage);
5580 int new_nid = page_to_nid(newpage);
5581
5582 SetPageHugeTemporary(oldpage);
5583 ClearPageHugeTemporary(newpage);
5584
5585 spin_lock(&hugetlb_lock);
5586 if (h->surplus_huge_pages_node[old_nid]) {
5587 h->surplus_huge_pages_node[old_nid]--;
5588 h->surplus_huge_pages_node[new_nid]++;
5589 }
5590 spin_unlock(&hugetlb_lock);
5591 }
5592}
5593
5594#ifdef CONFIG_CMA
5595static bool cma_reserve_called __initdata;
5596
5597static int __init cmdline_parse_hugetlb_cma(char *p)
5598{
5599 hugetlb_cma_size = memparse(p, &p);
5600 return 0;
5601}
5602
5603early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
5604
5605void __init hugetlb_cma_reserve(int order)
5606{
5607 unsigned long size, reserved, per_node;
5608 int nid;
5609
5610 cma_reserve_called = true;
5611
5612 if (!hugetlb_cma_size)
5613 return;
5614
5615 if (hugetlb_cma_size < (PAGE_SIZE << order)) {
5616 pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
5617 (PAGE_SIZE << order) / SZ_1M);
5618 return;
5619 }
5620
5621
5622
5623
5624
5625 per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
5626 pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
5627 hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
5628
5629 reserved = 0;
5630 for_each_node_state(nid, N_ONLINE) {
5631 int res;
5632 char name[CMA_MAX_NAME];
5633
5634 size = min(per_node, hugetlb_cma_size - reserved);
5635 size = round_up(size, PAGE_SIZE << order);
5636
5637 snprintf(name, sizeof(name), "hugetlb%d", nid);
5638 res = cma_declare_contiguous_nid(0, size, 0, PAGE_SIZE << order,
5639 0, false, name,
5640 &hugetlb_cma[nid], nid);
5641 if (res) {
5642 pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
5643 res, nid);
5644 continue;
5645 }
5646
5647 reserved += size;
5648 pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
5649 size / SZ_1M, nid);
5650
5651 if (reserved >= hugetlb_cma_size)
5652 break;
5653 }
5654}
5655
5656void __init hugetlb_cma_check(void)
5657{
5658 if (!hugetlb_cma_size || cma_reserve_called)
5659 return;
5660
5661 pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
5662}
5663
5664#endif
5665