1
2
3
4
5#include <linux/list.h>
6#include <linux/init.h>
7#include <linux/mm.h>
8#include <linux/seq_file.h>
9#include <linux/sysctl.h>
10#include <linux/highmem.h>
11#include <linux/mmu_notifier.h>
12#include <linux/nodemask.h>
13#include <linux/pagemap.h>
14#include <linux/mempolicy.h>
15#include <linux/compiler.h>
16#include <linux/cpuset.h>
17#include <linux/mutex.h>
18#include <linux/memblock.h>
19#include <linux/sysfs.h>
20#include <linux/slab.h>
21#include <linux/mmdebug.h>
22#include <linux/sched/signal.h>
23#include <linux/rmap.h>
24#include <linux/string_helpers.h>
25#include <linux/swap.h>
26#include <linux/swapops.h>
27#include <linux/jhash.h>
28#include <linux/numa.h>
29
30#include <asm/page.h>
31#include <asm/pgtable.h>
32#include <asm/tlb.h>
33
34#include <linux/io.h>
35#include <linux/hugetlb.h>
36#include <linux/hugetlb_cgroup.h>
37#include <linux/node.h>
38#include <linux/userfaultfd_k.h>
39#include <linux/page_owner.h>
40#include "internal.h"
41
42int hugetlb_max_hstate __read_mostly;
43unsigned int default_hstate_idx;
44struct hstate hstates[HUGE_MAX_HSTATE];
45
46
47
48
49static unsigned int minimum_order __read_mostly = UINT_MAX;
50
51__initdata LIST_HEAD(huge_boot_pages);
52
53
54static struct hstate * __initdata parsed_hstate;
55static unsigned long __initdata default_hstate_max_huge_pages;
56static unsigned long __initdata default_hstate_size;
57static bool __initdata parsed_valid_hugepagesz = true;
58
59
60
61
62
63DEFINE_SPINLOCK(hugetlb_lock);
64
65
66
67
68
69static int num_fault_mutexes;
70struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
71
72
73static int hugetlb_acct_memory(struct hstate *h, long delta);
74
75static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
76{
77 bool free = (spool->count == 0) && (spool->used_hpages == 0);
78
79 spin_unlock(&spool->lock);
80
81
82
83
84 if (free) {
85 if (spool->min_hpages != -1)
86 hugetlb_acct_memory(spool->hstate,
87 -spool->min_hpages);
88 kfree(spool);
89 }
90}
91
92struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
93 long min_hpages)
94{
95 struct hugepage_subpool *spool;
96
97 spool = kzalloc(sizeof(*spool), GFP_KERNEL);
98 if (!spool)
99 return NULL;
100
101 spin_lock_init(&spool->lock);
102 spool->count = 1;
103 spool->max_hpages = max_hpages;
104 spool->hstate = h;
105 spool->min_hpages = min_hpages;
106
107 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
108 kfree(spool);
109 return NULL;
110 }
111 spool->rsv_hpages = min_hpages;
112
113 return spool;
114}
115
116void hugepage_put_subpool(struct hugepage_subpool *spool)
117{
118 spin_lock(&spool->lock);
119 BUG_ON(!spool->count);
120 spool->count--;
121 unlock_or_release_subpool(spool);
122}
123
124
125
126
127
128
129
130
131
132static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
133 long delta)
134{
135 long ret = delta;
136
137 if (!spool)
138 return ret;
139
140 spin_lock(&spool->lock);
141
142 if (spool->max_hpages != -1) {
143 if ((spool->used_hpages + delta) <= spool->max_hpages)
144 spool->used_hpages += delta;
145 else {
146 ret = -ENOMEM;
147 goto unlock_ret;
148 }
149 }
150
151
152 if (spool->min_hpages != -1 && spool->rsv_hpages) {
153 if (delta > spool->rsv_hpages) {
154
155
156
157
158 ret = delta - spool->rsv_hpages;
159 spool->rsv_hpages = 0;
160 } else {
161 ret = 0;
162 spool->rsv_hpages -= delta;
163 }
164 }
165
166unlock_ret:
167 spin_unlock(&spool->lock);
168 return ret;
169}
170
171
172
173
174
175
176
177static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
178 long delta)
179{
180 long ret = delta;
181
182 if (!spool)
183 return delta;
184
185 spin_lock(&spool->lock);
186
187 if (spool->max_hpages != -1)
188 spool->used_hpages -= delta;
189
190
191 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
192 if (spool->rsv_hpages + delta <= spool->min_hpages)
193 ret = 0;
194 else
195 ret = spool->rsv_hpages + delta - spool->min_hpages;
196
197 spool->rsv_hpages += delta;
198 if (spool->rsv_hpages > spool->min_hpages)
199 spool->rsv_hpages = spool->min_hpages;
200 }
201
202
203
204
205
206 unlock_or_release_subpool(spool);
207
208 return ret;
209}
210
211static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
212{
213 return HUGETLBFS_SB(inode->i_sb)->spool;
214}
215
216static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
217{
218 return subpool_inode(file_inode(vma->vm_file));
219}
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240struct file_region {
241 struct list_head link;
242 long from;
243 long to;
244};
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260static long region_add(struct resv_map *resv, long f, long t)
261{
262 struct list_head *head = &resv->regions;
263 struct file_region *rg, *nrg, *trg;
264 long add = 0;
265
266 spin_lock(&resv->lock);
267
268 list_for_each_entry(rg, head, link)
269 if (f <= rg->to)
270 break;
271
272
273
274
275
276
277
278 if (&rg->link == head || t < rg->from) {
279 VM_BUG_ON(resv->region_cache_count <= 0);
280
281 resv->region_cache_count--;
282 nrg = list_first_entry(&resv->region_cache, struct file_region,
283 link);
284 list_del(&nrg->link);
285
286 nrg->from = f;
287 nrg->to = t;
288 list_add(&nrg->link, rg->link.prev);
289
290 add += t - f;
291 goto out_locked;
292 }
293
294
295 if (f > rg->from)
296 f = rg->from;
297
298
299 nrg = rg;
300 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
301 if (&rg->link == head)
302 break;
303 if (rg->from > t)
304 break;
305
306
307
308
309 if (rg->to > t)
310 t = rg->to;
311 if (rg != nrg) {
312
313
314
315
316 add -= (rg->to - rg->from);
317 list_del(&rg->link);
318 kfree(rg);
319 }
320 }
321
322 add += (nrg->from - f);
323 nrg->from = f;
324 add += t - nrg->to;
325 nrg->to = t;
326
327out_locked:
328 resv->adds_in_progress--;
329 spin_unlock(&resv->lock);
330 VM_BUG_ON(add < 0);
331 return add;
332}
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356static long region_chg(struct resv_map *resv, long f, long t)
357{
358 struct list_head *head = &resv->regions;
359 struct file_region *rg, *nrg = NULL;
360 long chg = 0;
361
362retry:
363 spin_lock(&resv->lock);
364retry_locked:
365 resv->adds_in_progress++;
366
367
368
369
370
371 if (resv->adds_in_progress > resv->region_cache_count) {
372 struct file_region *trg;
373
374 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
375
376 resv->adds_in_progress--;
377 spin_unlock(&resv->lock);
378
379 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
380 if (!trg) {
381 kfree(nrg);
382 return -ENOMEM;
383 }
384
385 spin_lock(&resv->lock);
386 list_add(&trg->link, &resv->region_cache);
387 resv->region_cache_count++;
388 goto retry_locked;
389 }
390
391
392 list_for_each_entry(rg, head, link)
393 if (f <= rg->to)
394 break;
395
396
397
398
399 if (&rg->link == head || t < rg->from) {
400 if (!nrg) {
401 resv->adds_in_progress--;
402 spin_unlock(&resv->lock);
403 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
404 if (!nrg)
405 return -ENOMEM;
406
407 nrg->from = f;
408 nrg->to = f;
409 INIT_LIST_HEAD(&nrg->link);
410 goto retry;
411 }
412
413 list_add(&nrg->link, rg->link.prev);
414 chg = t - f;
415 goto out_nrg;
416 }
417
418
419 if (f > rg->from)
420 f = rg->from;
421 chg = t - f;
422
423
424 list_for_each_entry(rg, rg->link.prev, link) {
425 if (&rg->link == head)
426 break;
427 if (rg->from > t)
428 goto out;
429
430
431
432
433 if (rg->to > t) {
434 chg += rg->to - t;
435 t = rg->to;
436 }
437 chg -= rg->to - rg->from;
438 }
439
440out:
441 spin_unlock(&resv->lock);
442
443 kfree(nrg);
444 return chg;
445out_nrg:
446 spin_unlock(&resv->lock);
447 return chg;
448}
449
450
451
452
453
454
455
456
457
458
459
460
461static void region_abort(struct resv_map *resv, long f, long t)
462{
463 spin_lock(&resv->lock);
464 VM_BUG_ON(!resv->region_cache_count);
465 resv->adds_in_progress--;
466 spin_unlock(&resv->lock);
467}
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483static long region_del(struct resv_map *resv, long f, long t)
484{
485 struct list_head *head = &resv->regions;
486 struct file_region *rg, *trg;
487 struct file_region *nrg = NULL;
488 long del = 0;
489
490retry:
491 spin_lock(&resv->lock);
492 list_for_each_entry_safe(rg, trg, head, link) {
493
494
495
496
497
498
499
500 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
501 continue;
502
503 if (rg->from >= t)
504 break;
505
506 if (f > rg->from && t < rg->to) {
507
508
509
510
511 if (!nrg &&
512 resv->region_cache_count > resv->adds_in_progress) {
513 nrg = list_first_entry(&resv->region_cache,
514 struct file_region,
515 link);
516 list_del(&nrg->link);
517 resv->region_cache_count--;
518 }
519
520 if (!nrg) {
521 spin_unlock(&resv->lock);
522 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
523 if (!nrg)
524 return -ENOMEM;
525 goto retry;
526 }
527
528 del += t - f;
529
530
531 nrg->from = t;
532 nrg->to = rg->to;
533 INIT_LIST_HEAD(&nrg->link);
534
535
536 rg->to = f;
537
538 list_add(&nrg->link, &rg->link);
539 nrg = NULL;
540 break;
541 }
542
543 if (f <= rg->from && t >= rg->to) {
544 del += rg->to - rg->from;
545 list_del(&rg->link);
546 kfree(rg);
547 continue;
548 }
549
550 if (f <= rg->from) {
551 del += t - rg->from;
552 rg->from = t;
553 } else {
554 del += rg->to - f;
555 rg->to = f;
556 }
557 }
558
559 spin_unlock(&resv->lock);
560 kfree(nrg);
561 return del;
562}
563
564
565
566
567
568
569
570
571
572
573void hugetlb_fix_reserve_counts(struct inode *inode)
574{
575 struct hugepage_subpool *spool = subpool_inode(inode);
576 long rsv_adjust;
577
578 rsv_adjust = hugepage_subpool_get_pages(spool, 1);
579 if (rsv_adjust) {
580 struct hstate *h = hstate_inode(inode);
581
582 hugetlb_acct_memory(h, 1);
583 }
584}
585
586
587
588
589
590static long region_count(struct resv_map *resv, long f, long t)
591{
592 struct list_head *head = &resv->regions;
593 struct file_region *rg;
594 long chg = 0;
595
596 spin_lock(&resv->lock);
597
598 list_for_each_entry(rg, head, link) {
599 long seg_from;
600 long seg_to;
601
602 if (rg->to <= f)
603 continue;
604 if (rg->from >= t)
605 break;
606
607 seg_from = max(rg->from, f);
608 seg_to = min(rg->to, t);
609
610 chg += seg_to - seg_from;
611 }
612 spin_unlock(&resv->lock);
613
614 return chg;
615}
616
617
618
619
620
621static pgoff_t vma_hugecache_offset(struct hstate *h,
622 struct vm_area_struct *vma, unsigned long address)
623{
624 return ((address - vma->vm_start) >> huge_page_shift(h)) +
625 (vma->vm_pgoff >> huge_page_order(h));
626}
627
628pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
629 unsigned long address)
630{
631 return vma_hugecache_offset(hstate_vma(vma), vma, address);
632}
633EXPORT_SYMBOL_GPL(linear_hugepage_index);
634
635
636
637
638
639unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
640{
641 if (vma->vm_ops && vma->vm_ops->pagesize)
642 return vma->vm_ops->pagesize(vma);
643 return PAGE_SIZE;
644}
645EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
646
647
648
649
650
651
652
653__weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
654{
655 return vma_kernel_pagesize(vma);
656}
657
658
659
660
661
662
663#define HPAGE_RESV_OWNER (1UL << 0)
664#define HPAGE_RESV_UNMAPPED (1UL << 1)
665#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686static unsigned long get_vma_private_data(struct vm_area_struct *vma)
687{
688 return (unsigned long)vma->vm_private_data;
689}
690
691static void set_vma_private_data(struct vm_area_struct *vma,
692 unsigned long value)
693{
694 vma->vm_private_data = (void *)value;
695}
696
697struct resv_map *resv_map_alloc(void)
698{
699 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
700 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
701
702 if (!resv_map || !rg) {
703 kfree(resv_map);
704 kfree(rg);
705 return NULL;
706 }
707
708 kref_init(&resv_map->refs);
709 spin_lock_init(&resv_map->lock);
710 INIT_LIST_HEAD(&resv_map->regions);
711
712 resv_map->adds_in_progress = 0;
713
714 INIT_LIST_HEAD(&resv_map->region_cache);
715 list_add(&rg->link, &resv_map->region_cache);
716 resv_map->region_cache_count = 1;
717
718 return resv_map;
719}
720
721void resv_map_release(struct kref *ref)
722{
723 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
724 struct list_head *head = &resv_map->region_cache;
725 struct file_region *rg, *trg;
726
727
728 region_del(resv_map, 0, LONG_MAX);
729
730
731 list_for_each_entry_safe(rg, trg, head, link) {
732 list_del(&rg->link);
733 kfree(rg);
734 }
735
736 VM_BUG_ON(resv_map->adds_in_progress);
737
738 kfree(resv_map);
739}
740
741static inline struct resv_map *inode_resv_map(struct inode *inode)
742{
743 return inode->i_mapping->private_data;
744}
745
746static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
747{
748 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
749 if (vma->vm_flags & VM_MAYSHARE) {
750 struct address_space *mapping = vma->vm_file->f_mapping;
751 struct inode *inode = mapping->host;
752
753 return inode_resv_map(inode);
754
755 } else {
756 return (struct resv_map *)(get_vma_private_data(vma) &
757 ~HPAGE_RESV_MASK);
758 }
759}
760
761static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
762{
763 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
764 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
765
766 set_vma_private_data(vma, (get_vma_private_data(vma) &
767 HPAGE_RESV_MASK) | (unsigned long)map);
768}
769
770static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
771{
772 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
773 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
774
775 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
776}
777
778static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
779{
780 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
781
782 return (get_vma_private_data(vma) & flag) != 0;
783}
784
785
786void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
787{
788 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
789 if (!(vma->vm_flags & VM_MAYSHARE))
790 vma->vm_private_data = (void *)0;
791}
792
793
794static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
795{
796 if (vma->vm_flags & VM_NORESERVE) {
797
798
799
800
801
802
803
804
805
806 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
807 return true;
808 else
809 return false;
810 }
811
812
813 if (vma->vm_flags & VM_MAYSHARE) {
814
815
816
817
818
819
820
821 if (chg)
822 return false;
823 else
824 return true;
825 }
826
827
828
829
830
831 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847 if (chg)
848 return false;
849 else
850 return true;
851 }
852
853 return false;
854}
855
856static void enqueue_huge_page(struct hstate *h, struct page *page)
857{
858 int nid = page_to_nid(page);
859 list_move(&page->lru, &h->hugepage_freelists[nid]);
860 h->free_huge_pages++;
861 h->free_huge_pages_node[nid]++;
862}
863
864static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
865{
866 struct page *page;
867
868 list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
869 if (!PageHWPoison(page))
870 break;
871
872
873
874
875 if (&h->hugepage_freelists[nid] == &page->lru)
876 return NULL;
877 list_move(&page->lru, &h->hugepage_activelist);
878 set_page_refcounted(page);
879 h->free_huge_pages--;
880 h->free_huge_pages_node[nid]--;
881 return page;
882}
883
884static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
885 nodemask_t *nmask)
886{
887 unsigned int cpuset_mems_cookie;
888 struct zonelist *zonelist;
889 struct zone *zone;
890 struct zoneref *z;
891 int node = NUMA_NO_NODE;
892
893 zonelist = node_zonelist(nid, gfp_mask);
894
895retry_cpuset:
896 cpuset_mems_cookie = read_mems_allowed_begin();
897 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
898 struct page *page;
899
900 if (!cpuset_zone_allowed(zone, gfp_mask))
901 continue;
902
903
904
905
906 if (zone_to_nid(zone) == node)
907 continue;
908 node = zone_to_nid(zone);
909
910 page = dequeue_huge_page_node_exact(h, node);
911 if (page)
912 return page;
913 }
914 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
915 goto retry_cpuset;
916
917 return NULL;
918}
919
920
921static inline gfp_t htlb_alloc_mask(struct hstate *h)
922{
923 if (hugepage_movable_supported(h))
924 return GFP_HIGHUSER_MOVABLE;
925 else
926 return GFP_HIGHUSER;
927}
928
929static struct page *dequeue_huge_page_vma(struct hstate *h,
930 struct vm_area_struct *vma,
931 unsigned long address, int avoid_reserve,
932 long chg)
933{
934 struct page *page;
935 struct mempolicy *mpol;
936 gfp_t gfp_mask;
937 nodemask_t *nodemask;
938 int nid;
939
940
941
942
943
944
945 if (!vma_has_reserves(vma, chg) &&
946 h->free_huge_pages - h->resv_huge_pages == 0)
947 goto err;
948
949
950 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
951 goto err;
952
953 gfp_mask = htlb_alloc_mask(h);
954 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
955 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
956 if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
957 SetPagePrivate(page);
958 h->resv_huge_pages--;
959 }
960
961 mpol_cond_put(mpol);
962 return page;
963
964err:
965 return NULL;
966}
967
968
969
970
971
972
973
974
975static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
976{
977 nid = next_node_in(nid, *nodes_allowed);
978 VM_BUG_ON(nid >= MAX_NUMNODES);
979
980 return nid;
981}
982
983static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
984{
985 if (!node_isset(nid, *nodes_allowed))
986 nid = next_node_allowed(nid, nodes_allowed);
987 return nid;
988}
989
990
991
992
993
994
995
996static int hstate_next_node_to_alloc(struct hstate *h,
997 nodemask_t *nodes_allowed)
998{
999 int nid;
1000
1001 VM_BUG_ON(!nodes_allowed);
1002
1003 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1004 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1005
1006 return nid;
1007}
1008
1009
1010
1011
1012
1013
1014
1015static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1016{
1017 int nid;
1018
1019 VM_BUG_ON(!nodes_allowed);
1020
1021 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1022 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1023
1024 return nid;
1025}
1026
1027#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
1028 for (nr_nodes = nodes_weight(*mask); \
1029 nr_nodes > 0 && \
1030 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
1031 nr_nodes--)
1032
1033#define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
1034 for (nr_nodes = nodes_weight(*mask); \
1035 nr_nodes > 0 && \
1036 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
1037 nr_nodes--)
1038
1039#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1040static void destroy_compound_gigantic_page(struct page *page,
1041 unsigned int order)
1042{
1043 int i;
1044 int nr_pages = 1 << order;
1045 struct page *p = page + 1;
1046
1047 atomic_set(compound_mapcount_ptr(page), 0);
1048 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1049 clear_compound_head(p);
1050 set_page_refcounted(p);
1051 }
1052
1053 set_compound_order(page, 0);
1054 __ClearPageHead(page);
1055}
1056
1057static void free_gigantic_page(struct page *page, unsigned int order)
1058{
1059 free_contig_range(page_to_pfn(page), 1 << order);
1060}
1061
1062static int __alloc_gigantic_page(unsigned long start_pfn,
1063 unsigned long nr_pages, gfp_t gfp_mask)
1064{
1065 unsigned long end_pfn = start_pfn + nr_pages;
1066 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
1067 gfp_mask);
1068}
1069
1070static bool pfn_range_valid_gigantic(struct zone *z,
1071 unsigned long start_pfn, unsigned long nr_pages)
1072{
1073 unsigned long i, end_pfn = start_pfn + nr_pages;
1074 struct page *page;
1075
1076 for (i = start_pfn; i < end_pfn; i++) {
1077 if (!pfn_valid(i))
1078 return false;
1079
1080 page = pfn_to_page(i);
1081
1082 if (page_zone(page) != z)
1083 return false;
1084
1085 if (PageReserved(page))
1086 return false;
1087
1088 if (page_count(page) > 0)
1089 return false;
1090
1091 if (PageHuge(page))
1092 return false;
1093 }
1094
1095 return true;
1096}
1097
1098static bool zone_spans_last_pfn(const struct zone *zone,
1099 unsigned long start_pfn, unsigned long nr_pages)
1100{
1101 unsigned long last_pfn = start_pfn + nr_pages - 1;
1102 return zone_spans_pfn(zone, last_pfn);
1103}
1104
1105static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1106 int nid, nodemask_t *nodemask)
1107{
1108 unsigned int order = huge_page_order(h);
1109 unsigned long nr_pages = 1 << order;
1110 unsigned long ret, pfn, flags;
1111 struct zonelist *zonelist;
1112 struct zone *zone;
1113 struct zoneref *z;
1114
1115 zonelist = node_zonelist(nid, gfp_mask);
1116 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) {
1117 spin_lock_irqsave(&zone->lock, flags);
1118
1119 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
1120 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
1121 if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) {
1122
1123
1124
1125
1126
1127
1128
1129 spin_unlock_irqrestore(&zone->lock, flags);
1130 ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask);
1131 if (!ret)
1132 return pfn_to_page(pfn);
1133 spin_lock_irqsave(&zone->lock, flags);
1134 }
1135 pfn += nr_pages;
1136 }
1137
1138 spin_unlock_irqrestore(&zone->lock, flags);
1139 }
1140
1141 return NULL;
1142}
1143
1144static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1145static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1146
1147#else
1148static inline bool gigantic_page_supported(void) { return false; }
1149static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1150 int nid, nodemask_t *nodemask) { return NULL; }
1151static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1152static inline void destroy_compound_gigantic_page(struct page *page,
1153 unsigned int order) { }
1154#endif
1155
1156static void update_and_free_page(struct hstate *h, struct page *page)
1157{
1158 int i;
1159
1160 if (hstate_is_gigantic(h) && !gigantic_page_supported())
1161 return;
1162
1163 h->nr_huge_pages--;
1164 h->nr_huge_pages_node[page_to_nid(page)]--;
1165 for (i = 0; i < pages_per_huge_page(h); i++) {
1166 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1167 1 << PG_referenced | 1 << PG_dirty |
1168 1 << PG_active | 1 << PG_private |
1169 1 << PG_writeback);
1170 }
1171 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1172 set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1173 set_page_refcounted(page);
1174 if (hstate_is_gigantic(h)) {
1175 destroy_compound_gigantic_page(page, huge_page_order(h));
1176 free_gigantic_page(page, huge_page_order(h));
1177 } else {
1178 __free_pages(page, huge_page_order(h));
1179 }
1180}
1181
1182struct hstate *size_to_hstate(unsigned long size)
1183{
1184 struct hstate *h;
1185
1186 for_each_hstate(h) {
1187 if (huge_page_size(h) == size)
1188 return h;
1189 }
1190 return NULL;
1191}
1192
1193
1194
1195
1196
1197
1198
1199bool page_huge_active(struct page *page)
1200{
1201 VM_BUG_ON_PAGE(!PageHuge(page), page);
1202 return PageHead(page) && PagePrivate(&page[1]);
1203}
1204
1205
1206static void set_page_huge_active(struct page *page)
1207{
1208 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1209 SetPagePrivate(&page[1]);
1210}
1211
1212static void clear_page_huge_active(struct page *page)
1213{
1214 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1215 ClearPagePrivate(&page[1]);
1216}
1217
1218
1219
1220
1221
1222static inline bool PageHugeTemporary(struct page *page)
1223{
1224 if (!PageHuge(page))
1225 return false;
1226
1227 return (unsigned long)page[2].mapping == -1U;
1228}
1229
1230static inline void SetPageHugeTemporary(struct page *page)
1231{
1232 page[2].mapping = (void *)-1U;
1233}
1234
1235static inline void ClearPageHugeTemporary(struct page *page)
1236{
1237 page[2].mapping = NULL;
1238}
1239
1240void free_huge_page(struct page *page)
1241{
1242
1243
1244
1245
1246 struct hstate *h = page_hstate(page);
1247 int nid = page_to_nid(page);
1248 struct hugepage_subpool *spool =
1249 (struct hugepage_subpool *)page_private(page);
1250 bool restore_reserve;
1251
1252 VM_BUG_ON_PAGE(page_count(page), page);
1253 VM_BUG_ON_PAGE(page_mapcount(page), page);
1254
1255 set_page_private(page, 0);
1256 page->mapping = NULL;
1257 restore_reserve = PagePrivate(page);
1258 ClearPagePrivate(page);
1259
1260
1261
1262
1263
1264
1265 if (hugepage_subpool_put_pages(spool, 1) == 0)
1266 restore_reserve = true;
1267
1268 spin_lock(&hugetlb_lock);
1269 clear_page_huge_active(page);
1270 hugetlb_cgroup_uncharge_page(hstate_index(h),
1271 pages_per_huge_page(h), page);
1272 if (restore_reserve)
1273 h->resv_huge_pages++;
1274
1275 if (PageHugeTemporary(page)) {
1276 list_del(&page->lru);
1277 ClearPageHugeTemporary(page);
1278 update_and_free_page(h, page);
1279 } else if (h->surplus_huge_pages_node[nid]) {
1280
1281 list_del(&page->lru);
1282 update_and_free_page(h, page);
1283 h->surplus_huge_pages--;
1284 h->surplus_huge_pages_node[nid]--;
1285 } else {
1286 arch_clear_hugepage_flags(page);
1287 enqueue_huge_page(h, page);
1288 }
1289 spin_unlock(&hugetlb_lock);
1290}
1291
1292static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1293{
1294 INIT_LIST_HEAD(&page->lru);
1295 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1296 spin_lock(&hugetlb_lock);
1297 set_hugetlb_cgroup(page, NULL);
1298 h->nr_huge_pages++;
1299 h->nr_huge_pages_node[nid]++;
1300 spin_unlock(&hugetlb_lock);
1301}
1302
1303static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1304{
1305 int i;
1306 int nr_pages = 1 << order;
1307 struct page *p = page + 1;
1308
1309
1310 set_compound_order(page, order);
1311 __ClearPageReserved(page);
1312 __SetPageHead(page);
1313 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326 __ClearPageReserved(p);
1327 set_page_count(p, 0);
1328 set_compound_head(p, page);
1329 }
1330 atomic_set(compound_mapcount_ptr(page), -1);
1331}
1332
1333
1334
1335
1336
1337
1338int PageHuge(struct page *page)
1339{
1340 if (!PageCompound(page))
1341 return 0;
1342
1343 page = compound_head(page);
1344 return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1345}
1346EXPORT_SYMBOL_GPL(PageHuge);
1347
1348
1349
1350
1351
1352int PageHeadHuge(struct page *page_head)
1353{
1354 if (!PageHead(page_head))
1355 return 0;
1356
1357 return get_compound_page_dtor(page_head) == free_huge_page;
1358}
1359
1360pgoff_t __basepage_index(struct page *page)
1361{
1362 struct page *page_head = compound_head(page);
1363 pgoff_t index = page_index(page_head);
1364 unsigned long compound_idx;
1365
1366 if (!PageHuge(page_head))
1367 return page_index(page);
1368
1369 if (compound_order(page_head) >= MAX_ORDER)
1370 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1371 else
1372 compound_idx = page - page_head;
1373
1374 return (index << compound_order(page_head)) + compound_idx;
1375}
1376
1377static struct page *alloc_buddy_huge_page(struct hstate *h,
1378 gfp_t gfp_mask, int nid, nodemask_t *nmask)
1379{
1380 int order = huge_page_order(h);
1381 struct page *page;
1382
1383 gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
1384 if (nid == NUMA_NO_NODE)
1385 nid = numa_mem_id();
1386 page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
1387 if (page)
1388 __count_vm_event(HTLB_BUDDY_PGALLOC);
1389 else
1390 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1391
1392 return page;
1393}
1394
1395
1396
1397
1398
1399static struct page *alloc_fresh_huge_page(struct hstate *h,
1400 gfp_t gfp_mask, int nid, nodemask_t *nmask)
1401{
1402 struct page *page;
1403
1404 if (hstate_is_gigantic(h))
1405 page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
1406 else
1407 page = alloc_buddy_huge_page(h, gfp_mask,
1408 nid, nmask);
1409 if (!page)
1410 return NULL;
1411
1412 if (hstate_is_gigantic(h))
1413 prep_compound_gigantic_page(page, huge_page_order(h));
1414 prep_new_huge_page(h, page, page_to_nid(page));
1415
1416 return page;
1417}
1418
1419
1420
1421
1422
1423static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1424{
1425 struct page *page;
1426 int nr_nodes, node;
1427 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1428
1429 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1430 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed);
1431 if (page)
1432 break;
1433 }
1434
1435 if (!page)
1436 return 0;
1437
1438 put_page(page);
1439
1440 return 1;
1441}
1442
1443
1444
1445
1446
1447
1448
1449static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1450 bool acct_surplus)
1451{
1452 int nr_nodes, node;
1453 int ret = 0;
1454
1455 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1456
1457
1458
1459
1460 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1461 !list_empty(&h->hugepage_freelists[node])) {
1462 struct page *page =
1463 list_entry(h->hugepage_freelists[node].next,
1464 struct page, lru);
1465 list_del(&page->lru);
1466 h->free_huge_pages--;
1467 h->free_huge_pages_node[node]--;
1468 if (acct_surplus) {
1469 h->surplus_huge_pages--;
1470 h->surplus_huge_pages_node[node]--;
1471 }
1472 update_and_free_page(h, page);
1473 ret = 1;
1474 break;
1475 }
1476 }
1477
1478 return ret;
1479}
1480
1481
1482
1483
1484
1485
1486
1487int dissolve_free_huge_page(struct page *page)
1488{
1489 int rc = -EBUSY;
1490
1491 spin_lock(&hugetlb_lock);
1492 if (PageHuge(page) && !page_count(page)) {
1493 struct page *head = compound_head(page);
1494 struct hstate *h = page_hstate(head);
1495 int nid = page_to_nid(head);
1496 if (h->free_huge_pages - h->resv_huge_pages == 0)
1497 goto out;
1498
1499
1500
1501
1502 if (PageHWPoison(head) && page != head) {
1503 SetPageHWPoison(page);
1504 ClearPageHWPoison(head);
1505 }
1506 list_del(&head->lru);
1507 h->free_huge_pages--;
1508 h->free_huge_pages_node[nid]--;
1509 h->max_huge_pages--;
1510 update_and_free_page(h, head);
1511 rc = 0;
1512 }
1513out:
1514 spin_unlock(&hugetlb_lock);
1515 return rc;
1516}
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1527{
1528 unsigned long pfn;
1529 struct page *page;
1530 int rc = 0;
1531
1532 if (!hugepages_supported())
1533 return rc;
1534
1535 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1536 page = pfn_to_page(pfn);
1537 if (PageHuge(page) && !page_count(page)) {
1538 rc = dissolve_free_huge_page(page);
1539 if (rc)
1540 break;
1541 }
1542 }
1543
1544 return rc;
1545}
1546
1547
1548
1549
1550static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
1551 int nid, nodemask_t *nmask)
1552{
1553 struct page *page = NULL;
1554
1555 if (hstate_is_gigantic(h))
1556 return NULL;
1557
1558 spin_lock(&hugetlb_lock);
1559 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
1560 goto out_unlock;
1561 spin_unlock(&hugetlb_lock);
1562
1563 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
1564 if (!page)
1565 return NULL;
1566
1567 spin_lock(&hugetlb_lock);
1568
1569
1570
1571
1572
1573
1574
1575 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1576 SetPageHugeTemporary(page);
1577 put_page(page);
1578 page = NULL;
1579 } else {
1580 h->surplus_huge_pages++;
1581 h->surplus_huge_pages_node[page_to_nid(page)]++;
1582 }
1583
1584out_unlock:
1585 spin_unlock(&hugetlb_lock);
1586
1587 return page;
1588}
1589
1590struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
1591 int nid, nodemask_t *nmask)
1592{
1593 struct page *page;
1594
1595 if (hstate_is_gigantic(h))
1596 return NULL;
1597
1598 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
1599 if (!page)
1600 return NULL;
1601
1602
1603
1604
1605
1606 SetPageHugeTemporary(page);
1607
1608 return page;
1609}
1610
1611
1612
1613
1614static
1615struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
1616 struct vm_area_struct *vma, unsigned long addr)
1617{
1618 struct page *page;
1619 struct mempolicy *mpol;
1620 gfp_t gfp_mask = htlb_alloc_mask(h);
1621 int nid;
1622 nodemask_t *nodemask;
1623
1624 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
1625 page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
1626 mpol_cond_put(mpol);
1627
1628 return page;
1629}
1630
1631
1632struct page *alloc_huge_page_node(struct hstate *h, int nid)
1633{
1634 gfp_t gfp_mask = htlb_alloc_mask(h);
1635 struct page *page = NULL;
1636
1637 if (nid != NUMA_NO_NODE)
1638 gfp_mask |= __GFP_THISNODE;
1639
1640 spin_lock(&hugetlb_lock);
1641 if (h->free_huge_pages - h->resv_huge_pages > 0)
1642 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL);
1643 spin_unlock(&hugetlb_lock);
1644
1645 if (!page)
1646 page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
1647
1648 return page;
1649}
1650
1651
1652struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
1653 nodemask_t *nmask)
1654{
1655 gfp_t gfp_mask = htlb_alloc_mask(h);
1656
1657 spin_lock(&hugetlb_lock);
1658 if (h->free_huge_pages - h->resv_huge_pages > 0) {
1659 struct page *page;
1660
1661 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
1662 if (page) {
1663 spin_unlock(&hugetlb_lock);
1664 return page;
1665 }
1666 }
1667 spin_unlock(&hugetlb_lock);
1668
1669 return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
1670}
1671
1672
1673struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
1674 unsigned long address)
1675{
1676 struct mempolicy *mpol;
1677 nodemask_t *nodemask;
1678 struct page *page;
1679 gfp_t gfp_mask;
1680 int node;
1681
1682 gfp_mask = htlb_alloc_mask(h);
1683 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1684 page = alloc_huge_page_nodemask(h, node, nodemask);
1685 mpol_cond_put(mpol);
1686
1687 return page;
1688}
1689
1690
1691
1692
1693
1694static int gather_surplus_pages(struct hstate *h, int delta)
1695{
1696 struct list_head surplus_list;
1697 struct page *page, *tmp;
1698 int ret, i;
1699 int needed, allocated;
1700 bool alloc_ok = true;
1701
1702 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1703 if (needed <= 0) {
1704 h->resv_huge_pages += delta;
1705 return 0;
1706 }
1707
1708 allocated = 0;
1709 INIT_LIST_HEAD(&surplus_list);
1710
1711 ret = -ENOMEM;
1712retry:
1713 spin_unlock(&hugetlb_lock);
1714 for (i = 0; i < needed; i++) {
1715 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
1716 NUMA_NO_NODE, NULL);
1717 if (!page) {
1718 alloc_ok = false;
1719 break;
1720 }
1721 list_add(&page->lru, &surplus_list);
1722 cond_resched();
1723 }
1724 allocated += i;
1725
1726
1727
1728
1729
1730 spin_lock(&hugetlb_lock);
1731 needed = (h->resv_huge_pages + delta) -
1732 (h->free_huge_pages + allocated);
1733 if (needed > 0) {
1734 if (alloc_ok)
1735 goto retry;
1736
1737
1738
1739
1740
1741 goto free;
1742 }
1743
1744
1745
1746
1747
1748
1749
1750
1751 needed += allocated;
1752 h->resv_huge_pages += delta;
1753 ret = 0;
1754
1755
1756 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1757 if ((--needed) < 0)
1758 break;
1759
1760
1761
1762
1763 put_page_testzero(page);
1764 VM_BUG_ON_PAGE(page_count(page), page);
1765 enqueue_huge_page(h, page);
1766 }
1767free:
1768 spin_unlock(&hugetlb_lock);
1769
1770
1771 list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1772 put_page(page);
1773 spin_lock(&hugetlb_lock);
1774
1775 return ret;
1776}
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792static void return_unused_surplus_pages(struct hstate *h,
1793 unsigned long unused_resv_pages)
1794{
1795 unsigned long nr_pages;
1796
1797
1798 if (hstate_is_gigantic(h))
1799 goto out;
1800
1801
1802
1803
1804
1805 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819 while (nr_pages--) {
1820 h->resv_huge_pages--;
1821 unused_resv_pages--;
1822 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1823 goto out;
1824 cond_resched_lock(&hugetlb_lock);
1825 }
1826
1827out:
1828
1829 h->resv_huge_pages -= unused_resv_pages;
1830}
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857enum vma_resv_mode {
1858 VMA_NEEDS_RESV,
1859 VMA_COMMIT_RESV,
1860 VMA_END_RESV,
1861 VMA_ADD_RESV,
1862};
1863static long __vma_reservation_common(struct hstate *h,
1864 struct vm_area_struct *vma, unsigned long addr,
1865 enum vma_resv_mode mode)
1866{
1867 struct resv_map *resv;
1868 pgoff_t idx;
1869 long ret;
1870
1871 resv = vma_resv_map(vma);
1872 if (!resv)
1873 return 1;
1874
1875 idx = vma_hugecache_offset(h, vma, addr);
1876 switch (mode) {
1877 case VMA_NEEDS_RESV:
1878 ret = region_chg(resv, idx, idx + 1);
1879 break;
1880 case VMA_COMMIT_RESV:
1881 ret = region_add(resv, idx, idx + 1);
1882 break;
1883 case VMA_END_RESV:
1884 region_abort(resv, idx, idx + 1);
1885 ret = 0;
1886 break;
1887 case VMA_ADD_RESV:
1888 if (vma->vm_flags & VM_MAYSHARE)
1889 ret = region_add(resv, idx, idx + 1);
1890 else {
1891 region_abort(resv, idx, idx + 1);
1892 ret = region_del(resv, idx, idx + 1);
1893 }
1894 break;
1895 default:
1896 BUG();
1897 }
1898
1899 if (vma->vm_flags & VM_MAYSHARE)
1900 return ret;
1901 else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915 if (ret)
1916 return 0;
1917 else
1918 return 1;
1919 }
1920 else
1921 return ret < 0 ? ret : 0;
1922}
1923
1924static long vma_needs_reservation(struct hstate *h,
1925 struct vm_area_struct *vma, unsigned long addr)
1926{
1927 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1928}
1929
1930static long vma_commit_reservation(struct hstate *h,
1931 struct vm_area_struct *vma, unsigned long addr)
1932{
1933 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1934}
1935
1936static void vma_end_reservation(struct hstate *h,
1937 struct vm_area_struct *vma, unsigned long addr)
1938{
1939 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1940}
1941
1942static long vma_add_reservation(struct hstate *h,
1943 struct vm_area_struct *vma, unsigned long addr)
1944{
1945 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
1946}
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959static void restore_reserve_on_error(struct hstate *h,
1960 struct vm_area_struct *vma, unsigned long address,
1961 struct page *page)
1962{
1963 if (unlikely(PagePrivate(page))) {
1964 long rc = vma_needs_reservation(h, vma, address);
1965
1966 if (unlikely(rc < 0)) {
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978 ClearPagePrivate(page);
1979 } else if (rc) {
1980 rc = vma_add_reservation(h, vma, address);
1981 if (unlikely(rc < 0))
1982
1983
1984
1985
1986 ClearPagePrivate(page);
1987 } else
1988 vma_end_reservation(h, vma, address);
1989 }
1990}
1991
1992struct page *alloc_huge_page(struct vm_area_struct *vma,
1993 unsigned long addr, int avoid_reserve)
1994{
1995 struct hugepage_subpool *spool = subpool_vma(vma);
1996 struct hstate *h = hstate_vma(vma);
1997 struct page *page;
1998 long map_chg, map_commit;
1999 long gbl_chg;
2000 int ret, idx;
2001 struct hugetlb_cgroup *h_cg;
2002
2003 idx = hstate_index(h);
2004
2005
2006
2007
2008
2009 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2010 if (map_chg < 0)
2011 return ERR_PTR(-ENOMEM);
2012
2013
2014
2015
2016
2017
2018
2019
2020 if (map_chg || avoid_reserve) {
2021 gbl_chg = hugepage_subpool_get_pages(spool, 1);
2022 if (gbl_chg < 0) {
2023 vma_end_reservation(h, vma, addr);
2024 return ERR_PTR(-ENOSPC);
2025 }
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035 if (avoid_reserve)
2036 gbl_chg = 1;
2037 }
2038
2039 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2040 if (ret)
2041 goto out_subpool_put;
2042
2043 spin_lock(&hugetlb_lock);
2044
2045
2046
2047
2048
2049 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2050 if (!page) {
2051 spin_unlock(&hugetlb_lock);
2052 page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
2053 if (!page)
2054 goto out_uncharge_cgroup;
2055 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2056 SetPagePrivate(page);
2057 h->resv_huge_pages--;
2058 }
2059 spin_lock(&hugetlb_lock);
2060 list_move(&page->lru, &h->hugepage_activelist);
2061
2062 }
2063 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2064 spin_unlock(&hugetlb_lock);
2065
2066 set_page_private(page, (unsigned long)spool);
2067
2068 map_commit = vma_commit_reservation(h, vma, addr);
2069 if (unlikely(map_chg > map_commit)) {
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079 long rsv_adjust;
2080
2081 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2082 hugetlb_acct_memory(h, -rsv_adjust);
2083 }
2084 return page;
2085
2086out_uncharge_cgroup:
2087 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2088out_subpool_put:
2089 if (map_chg || avoid_reserve)
2090 hugepage_subpool_put_pages(spool, 1);
2091 vma_end_reservation(h, vma, addr);
2092 return ERR_PTR(-ENOSPC);
2093}
2094
2095int alloc_bootmem_huge_page(struct hstate *h)
2096 __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
2097int __alloc_bootmem_huge_page(struct hstate *h)
2098{
2099 struct huge_bootmem_page *m;
2100 int nr_nodes, node;
2101
2102 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2103 void *addr;
2104
2105 addr = memblock_alloc_try_nid_raw(
2106 huge_page_size(h), huge_page_size(h),
2107 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
2108 if (addr) {
2109
2110
2111
2112
2113
2114 m = addr;
2115 goto found;
2116 }
2117 }
2118 return 0;
2119
2120found:
2121 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2122
2123 INIT_LIST_HEAD(&m->list);
2124 list_add(&m->list, &huge_boot_pages);
2125 m->hstate = h;
2126 return 1;
2127}
2128
2129static void __init prep_compound_huge_page(struct page *page,
2130 unsigned int order)
2131{
2132 if (unlikely(order > (MAX_ORDER - 1)))
2133 prep_compound_gigantic_page(page, order);
2134 else
2135 prep_compound_page(page, order);
2136}
2137
2138
2139static void __init gather_bootmem_prealloc(void)
2140{
2141 struct huge_bootmem_page *m;
2142
2143 list_for_each_entry(m, &huge_boot_pages, list) {
2144 struct page *page = virt_to_page(m);
2145 struct hstate *h = m->hstate;
2146
2147 WARN_ON(page_count(page) != 1);
2148 prep_compound_huge_page(page, h->order);
2149 WARN_ON(PageReserved(page));
2150 prep_new_huge_page(h, page, page_to_nid(page));
2151 put_page(page);
2152
2153
2154
2155
2156
2157
2158
2159 if (hstate_is_gigantic(h))
2160 adjust_managed_page_count(page, 1 << h->order);
2161 cond_resched();
2162 }
2163}
2164
2165static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2166{
2167 unsigned long i;
2168
2169 for (i = 0; i < h->max_huge_pages; ++i) {
2170 if (hstate_is_gigantic(h)) {
2171 if (!alloc_bootmem_huge_page(h))
2172 break;
2173 } else if (!alloc_pool_huge_page(h,
2174 &node_states[N_MEMORY]))
2175 break;
2176 cond_resched();
2177 }
2178 if (i < h->max_huge_pages) {
2179 char buf[32];
2180
2181 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2182 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n",
2183 h->max_huge_pages, buf, i);
2184 h->max_huge_pages = i;
2185 }
2186}
2187
2188static void __init hugetlb_init_hstates(void)
2189{
2190 struct hstate *h;
2191
2192 for_each_hstate(h) {
2193 if (minimum_order > huge_page_order(h))
2194 minimum_order = huge_page_order(h);
2195
2196
2197 if (!hstate_is_gigantic(h))
2198 hugetlb_hstate_alloc_pages(h);
2199 }
2200 VM_BUG_ON(minimum_order == UINT_MAX);
2201}
2202
2203static void __init report_hugepages(void)
2204{
2205 struct hstate *h;
2206
2207 for_each_hstate(h) {
2208 char buf[32];
2209
2210 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2211 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2212 buf, h->free_huge_pages);
2213 }
2214}
2215
2216#ifdef CONFIG_HIGHMEM
2217static void try_to_free_low(struct hstate *h, unsigned long count,
2218 nodemask_t *nodes_allowed)
2219{
2220 int i;
2221
2222 if (hstate_is_gigantic(h))
2223 return;
2224
2225 for_each_node_mask(i, *nodes_allowed) {
2226 struct page *page, *next;
2227 struct list_head *freel = &h->hugepage_freelists[i];
2228 list_for_each_entry_safe(page, next, freel, lru) {
2229 if (count >= h->nr_huge_pages)
2230 return;
2231 if (PageHighMem(page))
2232 continue;
2233 list_del(&page->lru);
2234 update_and_free_page(h, page);
2235 h->free_huge_pages--;
2236 h->free_huge_pages_node[page_to_nid(page)]--;
2237 }
2238 }
2239}
2240#else
2241static inline void try_to_free_low(struct hstate *h, unsigned long count,
2242 nodemask_t *nodes_allowed)
2243{
2244}
2245#endif
2246
2247
2248
2249
2250
2251
2252static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2253 int delta)
2254{
2255 int nr_nodes, node;
2256
2257 VM_BUG_ON(delta != -1 && delta != 1);
2258
2259 if (delta < 0) {
2260 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2261 if (h->surplus_huge_pages_node[node])
2262 goto found;
2263 }
2264 } else {
2265 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2266 if (h->surplus_huge_pages_node[node] <
2267 h->nr_huge_pages_node[node])
2268 goto found;
2269 }
2270 }
2271 return 0;
2272
2273found:
2274 h->surplus_huge_pages += delta;
2275 h->surplus_huge_pages_node[node] += delta;
2276 return 1;
2277}
2278
2279#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2280static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
2281 nodemask_t *nodes_allowed)
2282{
2283 unsigned long min_count, ret;
2284
2285 if (hstate_is_gigantic(h) && !gigantic_page_supported())
2286 return h->max_huge_pages;
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299 spin_lock(&hugetlb_lock);
2300 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2301 if (!adjust_pool_surplus(h, nodes_allowed, -1))
2302 break;
2303 }
2304
2305 while (count > persistent_huge_pages(h)) {
2306
2307
2308
2309
2310
2311 spin_unlock(&hugetlb_lock);
2312
2313
2314 cond_resched();
2315
2316 ret = alloc_pool_huge_page(h, nodes_allowed);
2317 spin_lock(&hugetlb_lock);
2318 if (!ret)
2319 goto out;
2320
2321
2322 if (signal_pending(current))
2323 goto out;
2324 }
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2342 min_count = max(count, min_count);
2343 try_to_free_low(h, min_count, nodes_allowed);
2344 while (min_count < persistent_huge_pages(h)) {
2345 if (!free_pool_huge_page(h, nodes_allowed, 0))
2346 break;
2347 cond_resched_lock(&hugetlb_lock);
2348 }
2349 while (count < persistent_huge_pages(h)) {
2350 if (!adjust_pool_surplus(h, nodes_allowed, 1))
2351 break;
2352 }
2353out:
2354 ret = persistent_huge_pages(h);
2355 spin_unlock(&hugetlb_lock);
2356 return ret;
2357}
2358
2359#define HSTATE_ATTR_RO(_name) \
2360 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2361
2362#define HSTATE_ATTR(_name) \
2363 static struct kobj_attribute _name##_attr = \
2364 __ATTR(_name, 0644, _name##_show, _name##_store)
2365
2366static struct kobject *hugepages_kobj;
2367static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2368
2369static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2370
2371static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2372{
2373 int i;
2374
2375 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2376 if (hstate_kobjs[i] == kobj) {
2377 if (nidp)
2378 *nidp = NUMA_NO_NODE;
2379 return &hstates[i];
2380 }
2381
2382 return kobj_to_node_hstate(kobj, nidp);
2383}
2384
2385static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2386 struct kobj_attribute *attr, char *buf)
2387{
2388 struct hstate *h;
2389 unsigned long nr_huge_pages;
2390 int nid;
2391
2392 h = kobj_to_hstate(kobj, &nid);
2393 if (nid == NUMA_NO_NODE)
2394 nr_huge_pages = h->nr_huge_pages;
2395 else
2396 nr_huge_pages = h->nr_huge_pages_node[nid];
2397
2398 return sprintf(buf, "%lu\n", nr_huge_pages);
2399}
2400
2401static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2402 struct hstate *h, int nid,
2403 unsigned long count, size_t len)
2404{
2405 int err;
2406 NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
2407
2408 if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
2409 err = -EINVAL;
2410 goto out;
2411 }
2412
2413 if (nid == NUMA_NO_NODE) {
2414
2415
2416
2417 if (!(obey_mempolicy &&
2418 init_nodemask_of_mempolicy(nodes_allowed))) {
2419 NODEMASK_FREE(nodes_allowed);
2420 nodes_allowed = &node_states[N_MEMORY];
2421 }
2422 } else if (nodes_allowed) {
2423
2424
2425
2426
2427 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2428 init_nodemask_of_node(nodes_allowed, nid);
2429 } else
2430 nodes_allowed = &node_states[N_MEMORY];
2431
2432 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
2433
2434 if (nodes_allowed != &node_states[N_MEMORY])
2435 NODEMASK_FREE(nodes_allowed);
2436
2437 return len;
2438out:
2439 NODEMASK_FREE(nodes_allowed);
2440 return err;
2441}
2442
2443static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2444 struct kobject *kobj, const char *buf,
2445 size_t len)
2446{
2447 struct hstate *h;
2448 unsigned long count;
2449 int nid;
2450 int err;
2451
2452 err = kstrtoul(buf, 10, &count);
2453 if (err)
2454 return err;
2455
2456 h = kobj_to_hstate(kobj, &nid);
2457 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2458}
2459
2460static ssize_t nr_hugepages_show(struct kobject *kobj,
2461 struct kobj_attribute *attr, char *buf)
2462{
2463 return nr_hugepages_show_common(kobj, attr, buf);
2464}
2465
2466static ssize_t nr_hugepages_store(struct kobject *kobj,
2467 struct kobj_attribute *attr, const char *buf, size_t len)
2468{
2469 return nr_hugepages_store_common(false, kobj, buf, len);
2470}
2471HSTATE_ATTR(nr_hugepages);
2472
2473#ifdef CONFIG_NUMA
2474
2475
2476
2477
2478
2479static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2480 struct kobj_attribute *attr, char *buf)
2481{
2482 return nr_hugepages_show_common(kobj, attr, buf);
2483}
2484
2485static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2486 struct kobj_attribute *attr, const char *buf, size_t len)
2487{
2488 return nr_hugepages_store_common(true, kobj, buf, len);
2489}
2490HSTATE_ATTR(nr_hugepages_mempolicy);
2491#endif
2492
2493
2494static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2495 struct kobj_attribute *attr, char *buf)
2496{
2497 struct hstate *h = kobj_to_hstate(kobj, NULL);
2498 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2499}
2500
2501static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2502 struct kobj_attribute *attr, const char *buf, size_t count)
2503{
2504 int err;
2505 unsigned long input;
2506 struct hstate *h = kobj_to_hstate(kobj, NULL);
2507
2508 if (hstate_is_gigantic(h))
2509 return -EINVAL;
2510
2511 err = kstrtoul(buf, 10, &input);
2512 if (err)
2513 return err;
2514
2515 spin_lock(&hugetlb_lock);
2516 h->nr_overcommit_huge_pages = input;
2517 spin_unlock(&hugetlb_lock);
2518
2519 return count;
2520}
2521HSTATE_ATTR(nr_overcommit_hugepages);
2522
2523static ssize_t free_hugepages_show(struct kobject *kobj,
2524 struct kobj_attribute *attr, char *buf)
2525{
2526 struct hstate *h;
2527 unsigned long free_huge_pages;
2528 int nid;
2529
2530 h = kobj_to_hstate(kobj, &nid);
2531 if (nid == NUMA_NO_NODE)
2532 free_huge_pages = h->free_huge_pages;
2533 else
2534 free_huge_pages = h->free_huge_pages_node[nid];
2535
2536 return sprintf(buf, "%lu\n", free_huge_pages);
2537}
2538HSTATE_ATTR_RO(free_hugepages);
2539
2540static ssize_t resv_hugepages_show(struct kobject *kobj,
2541 struct kobj_attribute *attr, char *buf)
2542{
2543 struct hstate *h = kobj_to_hstate(kobj, NULL);
2544 return sprintf(buf, "%lu\n", h->resv_huge_pages);
2545}
2546HSTATE_ATTR_RO(resv_hugepages);
2547
2548static ssize_t surplus_hugepages_show(struct kobject *kobj,
2549 struct kobj_attribute *attr, char *buf)
2550{
2551 struct hstate *h;
2552 unsigned long surplus_huge_pages;
2553 int nid;
2554
2555 h = kobj_to_hstate(kobj, &nid);
2556 if (nid == NUMA_NO_NODE)
2557 surplus_huge_pages = h->surplus_huge_pages;
2558 else
2559 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2560
2561 return sprintf(buf, "%lu\n", surplus_huge_pages);
2562}
2563HSTATE_ATTR_RO(surplus_hugepages);
2564
2565static struct attribute *hstate_attrs[] = {
2566 &nr_hugepages_attr.attr,
2567 &nr_overcommit_hugepages_attr.attr,
2568 &free_hugepages_attr.attr,
2569 &resv_hugepages_attr.attr,
2570 &surplus_hugepages_attr.attr,
2571#ifdef CONFIG_NUMA
2572 &nr_hugepages_mempolicy_attr.attr,
2573#endif
2574 NULL,
2575};
2576
2577static const struct attribute_group hstate_attr_group = {
2578 .attrs = hstate_attrs,
2579};
2580
2581static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2582 struct kobject **hstate_kobjs,
2583 const struct attribute_group *hstate_attr_group)
2584{
2585 int retval;
2586 int hi = hstate_index(h);
2587
2588 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2589 if (!hstate_kobjs[hi])
2590 return -ENOMEM;
2591
2592 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2593 if (retval)
2594 kobject_put(hstate_kobjs[hi]);
2595
2596 return retval;
2597}
2598
2599static void __init hugetlb_sysfs_init(void)
2600{
2601 struct hstate *h;
2602 int err;
2603
2604 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2605 if (!hugepages_kobj)
2606 return;
2607
2608 for_each_hstate(h) {
2609 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2610 hstate_kobjs, &hstate_attr_group);
2611 if (err)
2612 pr_err("Hugetlb: Unable to add hstate %s", h->name);
2613 }
2614}
2615
2616#ifdef CONFIG_NUMA
2617
2618
2619
2620
2621
2622
2623
2624
2625struct node_hstate {
2626 struct kobject *hugepages_kobj;
2627 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2628};
2629static struct node_hstate node_hstates[MAX_NUMNODES];
2630
2631
2632
2633
2634static struct attribute *per_node_hstate_attrs[] = {
2635 &nr_hugepages_attr.attr,
2636 &free_hugepages_attr.attr,
2637 &surplus_hugepages_attr.attr,
2638 NULL,
2639};
2640
2641static const struct attribute_group per_node_hstate_attr_group = {
2642 .attrs = per_node_hstate_attrs,
2643};
2644
2645
2646
2647
2648
2649static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2650{
2651 int nid;
2652
2653 for (nid = 0; nid < nr_node_ids; nid++) {
2654 struct node_hstate *nhs = &node_hstates[nid];
2655 int i;
2656 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2657 if (nhs->hstate_kobjs[i] == kobj) {
2658 if (nidp)
2659 *nidp = nid;
2660 return &hstates[i];
2661 }
2662 }
2663
2664 BUG();
2665 return NULL;
2666}
2667
2668
2669
2670
2671
2672static void hugetlb_unregister_node(struct node *node)
2673{
2674 struct hstate *h;
2675 struct node_hstate *nhs = &node_hstates[node->dev.id];
2676
2677 if (!nhs->hugepages_kobj)
2678 return;
2679
2680 for_each_hstate(h) {
2681 int idx = hstate_index(h);
2682 if (nhs->hstate_kobjs[idx]) {
2683 kobject_put(nhs->hstate_kobjs[idx]);
2684 nhs->hstate_kobjs[idx] = NULL;
2685 }
2686 }
2687
2688 kobject_put(nhs->hugepages_kobj);
2689 nhs->hugepages_kobj = NULL;
2690}
2691
2692
2693
2694
2695
2696
2697static void hugetlb_register_node(struct node *node)
2698{
2699 struct hstate *h;
2700 struct node_hstate *nhs = &node_hstates[node->dev.id];
2701 int err;
2702
2703 if (nhs->hugepages_kobj)
2704 return;
2705
2706 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2707 &node->dev.kobj);
2708 if (!nhs->hugepages_kobj)
2709 return;
2710
2711 for_each_hstate(h) {
2712 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2713 nhs->hstate_kobjs,
2714 &per_node_hstate_attr_group);
2715 if (err) {
2716 pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2717 h->name, node->dev.id);
2718 hugetlb_unregister_node(node);
2719 break;
2720 }
2721 }
2722}
2723
2724
2725
2726
2727
2728
2729static void __init hugetlb_register_all_nodes(void)
2730{
2731 int nid;
2732
2733 for_each_node_state(nid, N_MEMORY) {
2734 struct node *node = node_devices[nid];
2735 if (node->dev.id == nid)
2736 hugetlb_register_node(node);
2737 }
2738
2739
2740
2741
2742
2743 register_hugetlbfs_with_node(hugetlb_register_node,
2744 hugetlb_unregister_node);
2745}
2746#else
2747
2748static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2749{
2750 BUG();
2751 if (nidp)
2752 *nidp = -1;
2753 return NULL;
2754}
2755
2756static void hugetlb_register_all_nodes(void) { }
2757
2758#endif
2759
2760static int __init hugetlb_init(void)
2761{
2762 int i;
2763
2764 if (!hugepages_supported())
2765 return 0;
2766
2767 if (!size_to_hstate(default_hstate_size)) {
2768 if (default_hstate_size != 0) {
2769 pr_err("HugeTLB: unsupported default_hugepagesz %lu. Reverting to %lu\n",
2770 default_hstate_size, HPAGE_SIZE);
2771 }
2772
2773 default_hstate_size = HPAGE_SIZE;
2774 if (!size_to_hstate(default_hstate_size))
2775 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2776 }
2777 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2778 if (default_hstate_max_huge_pages) {
2779 if (!default_hstate.max_huge_pages)
2780 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2781 }
2782
2783 hugetlb_init_hstates();
2784 gather_bootmem_prealloc();
2785 report_hugepages();
2786
2787 hugetlb_sysfs_init();
2788 hugetlb_register_all_nodes();
2789 hugetlb_cgroup_file_init();
2790
2791#ifdef CONFIG_SMP
2792 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2793#else
2794 num_fault_mutexes = 1;
2795#endif
2796 hugetlb_fault_mutex_table =
2797 kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
2798 GFP_KERNEL);
2799 BUG_ON(!hugetlb_fault_mutex_table);
2800
2801 for (i = 0; i < num_fault_mutexes; i++)
2802 mutex_init(&hugetlb_fault_mutex_table[i]);
2803 return 0;
2804}
2805subsys_initcall(hugetlb_init);
2806
2807
2808void __init hugetlb_bad_size(void)
2809{
2810 parsed_valid_hugepagesz = false;
2811}
2812
2813void __init hugetlb_add_hstate(unsigned int order)
2814{
2815 struct hstate *h;
2816 unsigned long i;
2817
2818 if (size_to_hstate(PAGE_SIZE << order)) {
2819 pr_warn("hugepagesz= specified twice, ignoring\n");
2820 return;
2821 }
2822 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2823 BUG_ON(order == 0);
2824 h = &hstates[hugetlb_max_hstate++];
2825 h->order = order;
2826 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2827 h->nr_huge_pages = 0;
2828 h->free_huge_pages = 0;
2829 for (i = 0; i < MAX_NUMNODES; ++i)
2830 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2831 INIT_LIST_HEAD(&h->hugepage_activelist);
2832 h->next_nid_to_alloc = first_memory_node;
2833 h->next_nid_to_free = first_memory_node;
2834 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2835 huge_page_size(h)/1024);
2836
2837 parsed_hstate = h;
2838}
2839
2840static int __init hugetlb_nrpages_setup(char *s)
2841{
2842 unsigned long *mhp;
2843 static unsigned long *last_mhp;
2844
2845 if (!parsed_valid_hugepagesz) {
2846 pr_warn("hugepages = %s preceded by "
2847 "an unsupported hugepagesz, ignoring\n", s);
2848 parsed_valid_hugepagesz = true;
2849 return 1;
2850 }
2851
2852
2853
2854
2855 else if (!hugetlb_max_hstate)
2856 mhp = &default_hstate_max_huge_pages;
2857 else
2858 mhp = &parsed_hstate->max_huge_pages;
2859
2860 if (mhp == last_mhp) {
2861 pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
2862 return 1;
2863 }
2864
2865 if (sscanf(s, "%lu", mhp) <= 0)
2866 *mhp = 0;
2867
2868
2869
2870
2871
2872
2873 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2874 hugetlb_hstate_alloc_pages(parsed_hstate);
2875
2876 last_mhp = mhp;
2877
2878 return 1;
2879}
2880__setup("hugepages=", hugetlb_nrpages_setup);
2881
2882static int __init hugetlb_default_setup(char *s)
2883{
2884 default_hstate_size = memparse(s, &s);
2885 return 1;
2886}
2887__setup("default_hugepagesz=", hugetlb_default_setup);
2888
2889static unsigned int cpuset_mems_nr(unsigned int *array)
2890{
2891 int node;
2892 unsigned int nr = 0;
2893
2894 for_each_node_mask(node, cpuset_current_mems_allowed)
2895 nr += array[node];
2896
2897 return nr;
2898}
2899
2900#ifdef CONFIG_SYSCTL
2901static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2902 struct ctl_table *table, int write,
2903 void __user *buffer, size_t *length, loff_t *ppos)
2904{
2905 struct hstate *h = &default_hstate;
2906 unsigned long tmp = h->max_huge_pages;
2907 int ret;
2908
2909 if (!hugepages_supported())
2910 return -EOPNOTSUPP;
2911
2912 table->data = &tmp;
2913 table->maxlen = sizeof(unsigned long);
2914 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2915 if (ret)
2916 goto out;
2917
2918 if (write)
2919 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2920 NUMA_NO_NODE, tmp, *length);
2921out:
2922 return ret;
2923}
2924
2925int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2926 void __user *buffer, size_t *length, loff_t *ppos)
2927{
2928
2929 return hugetlb_sysctl_handler_common(false, table, write,
2930 buffer, length, ppos);
2931}
2932
2933#ifdef CONFIG_NUMA
2934int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2935 void __user *buffer, size_t *length, loff_t *ppos)
2936{
2937 return hugetlb_sysctl_handler_common(true, table, write,
2938 buffer, length, ppos);
2939}
2940#endif
2941
2942int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2943 void __user *buffer,
2944 size_t *length, loff_t *ppos)
2945{
2946 struct hstate *h = &default_hstate;
2947 unsigned long tmp;
2948 int ret;
2949
2950 if (!hugepages_supported())
2951 return -EOPNOTSUPP;
2952
2953 tmp = h->nr_overcommit_huge_pages;
2954
2955 if (write && hstate_is_gigantic(h))
2956 return -EINVAL;
2957
2958 table->data = &tmp;
2959 table->maxlen = sizeof(unsigned long);
2960 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2961 if (ret)
2962 goto out;
2963
2964 if (write) {
2965 spin_lock(&hugetlb_lock);
2966 h->nr_overcommit_huge_pages = tmp;
2967 spin_unlock(&hugetlb_lock);
2968 }
2969out:
2970 return ret;
2971}
2972
2973#endif
2974
2975void hugetlb_report_meminfo(struct seq_file *m)
2976{
2977 struct hstate *h;
2978 unsigned long total = 0;
2979
2980 if (!hugepages_supported())
2981 return;
2982
2983 for_each_hstate(h) {
2984 unsigned long count = h->nr_huge_pages;
2985
2986 total += (PAGE_SIZE << huge_page_order(h)) * count;
2987
2988 if (h == &default_hstate)
2989 seq_printf(m,
2990 "HugePages_Total: %5lu\n"
2991 "HugePages_Free: %5lu\n"
2992 "HugePages_Rsvd: %5lu\n"
2993 "HugePages_Surp: %5lu\n"
2994 "Hugepagesize: %8lu kB\n",
2995 count,
2996 h->free_huge_pages,
2997 h->resv_huge_pages,
2998 h->surplus_huge_pages,
2999 (PAGE_SIZE << huge_page_order(h)) / 1024);
3000 }
3001
3002 seq_printf(m, "Hugetlb: %8lu kB\n", total / 1024);
3003}
3004
3005int hugetlb_report_node_meminfo(int nid, char *buf)
3006{
3007 struct hstate *h = &default_hstate;
3008 if (!hugepages_supported())
3009 return 0;
3010 return sprintf(buf,
3011 "Node %d HugePages_Total: %5u\n"
3012 "Node %d HugePages_Free: %5u\n"
3013 "Node %d HugePages_Surp: %5u\n",
3014 nid, h->nr_huge_pages_node[nid],
3015 nid, h->free_huge_pages_node[nid],
3016 nid, h->surplus_huge_pages_node[nid]);
3017}
3018
3019void hugetlb_show_meminfo(void)
3020{
3021 struct hstate *h;
3022 int nid;
3023
3024 if (!hugepages_supported())
3025 return;
3026
3027 for_each_node_state(nid, N_MEMORY)
3028 for_each_hstate(h)
3029 pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
3030 nid,
3031 h->nr_huge_pages_node[nid],
3032 h->free_huge_pages_node[nid],
3033 h->surplus_huge_pages_node[nid],
3034 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
3035}
3036
3037void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
3038{
3039 seq_printf(m, "HugetlbPages:\t%8lu kB\n",
3040 atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
3041}
3042
3043
3044unsigned long hugetlb_total_pages(void)
3045{
3046 struct hstate *h;
3047 unsigned long nr_total_pages = 0;
3048
3049 for_each_hstate(h)
3050 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3051 return nr_total_pages;
3052}
3053
3054static int hugetlb_acct_memory(struct hstate *h, long delta)
3055{
3056 int ret = -ENOMEM;
3057
3058 spin_lock(&hugetlb_lock);
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076 if (delta > 0) {
3077 if (gather_surplus_pages(h, delta) < 0)
3078 goto out;
3079
3080 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
3081 return_unused_surplus_pages(h, delta);
3082 goto out;
3083 }
3084 }
3085
3086 ret = 0;
3087 if (delta < 0)
3088 return_unused_surplus_pages(h, (unsigned long) -delta);
3089
3090out:
3091 spin_unlock(&hugetlb_lock);
3092 return ret;
3093}
3094
3095static void hugetlb_vm_op_open(struct vm_area_struct *vma)
3096{
3097 struct resv_map *resv = vma_resv_map(vma);
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3108 kref_get(&resv->refs);
3109}
3110
3111static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3112{
3113 struct hstate *h = hstate_vma(vma);
3114 struct resv_map *resv = vma_resv_map(vma);
3115 struct hugepage_subpool *spool = subpool_vma(vma);
3116 unsigned long reserve, start, end;
3117 long gbl_reserve;
3118
3119 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3120 return;
3121
3122 start = vma_hugecache_offset(h, vma, vma->vm_start);
3123 end = vma_hugecache_offset(h, vma, vma->vm_end);
3124
3125 reserve = (end - start) - region_count(resv, start, end);
3126
3127 kref_put(&resv->refs, resv_map_release);
3128
3129 if (reserve) {
3130
3131
3132
3133
3134 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3135 hugetlb_acct_memory(h, -gbl_reserve);
3136 }
3137}
3138
3139static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
3140{
3141 if (addr & ~(huge_page_mask(hstate_vma(vma))))
3142 return -EINVAL;
3143 return 0;
3144}
3145
3146static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
3147{
3148 struct hstate *hstate = hstate_vma(vma);
3149
3150 return 1UL << huge_page_shift(hstate);
3151}
3152
3153
3154
3155
3156
3157
3158
3159static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
3160{
3161 BUG();
3162 return 0;
3163}
3164
3165
3166
3167
3168
3169
3170
3171
3172const struct vm_operations_struct hugetlb_vm_ops = {
3173 .fault = hugetlb_vm_op_fault,
3174 .open = hugetlb_vm_op_open,
3175 .close = hugetlb_vm_op_close,
3176 .split = hugetlb_vm_op_split,
3177 .pagesize = hugetlb_vm_op_pagesize,
3178};
3179
3180static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3181 int writable)
3182{
3183 pte_t entry;
3184
3185 if (writable) {
3186 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3187 vma->vm_page_prot)));
3188 } else {
3189 entry = huge_pte_wrprotect(mk_huge_pte(page,
3190 vma->vm_page_prot));
3191 }
3192 entry = pte_mkyoung(entry);
3193 entry = pte_mkhuge(entry);
3194 entry = arch_make_huge_pte(entry, vma, page, writable);
3195
3196 return entry;
3197}
3198
3199static void set_huge_ptep_writable(struct vm_area_struct *vma,
3200 unsigned long address, pte_t *ptep)
3201{
3202 pte_t entry;
3203
3204 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3205 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3206 update_mmu_cache(vma, address, ptep);
3207}
3208
3209bool is_hugetlb_entry_migration(pte_t pte)
3210{
3211 swp_entry_t swp;
3212
3213 if (huge_pte_none(pte) || pte_present(pte))
3214 return false;
3215 swp = pte_to_swp_entry(pte);
3216 if (non_swap_entry(swp) && is_migration_entry(swp))
3217 return true;
3218 else
3219 return false;
3220}
3221
3222static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3223{
3224 swp_entry_t swp;
3225
3226 if (huge_pte_none(pte) || pte_present(pte))
3227 return 0;
3228 swp = pte_to_swp_entry(pte);
3229 if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3230 return 1;
3231 else
3232 return 0;
3233}
3234
3235int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3236 struct vm_area_struct *vma)
3237{
3238 pte_t *src_pte, *dst_pte, entry, dst_entry;
3239 struct page *ptepage;
3240 unsigned long addr;
3241 int cow;
3242 struct hstate *h = hstate_vma(vma);
3243 unsigned long sz = huge_page_size(h);
3244 struct mmu_notifier_range range;
3245 int ret = 0;
3246
3247 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3248
3249 if (cow) {
3250 mmu_notifier_range_init(&range, src, vma->vm_start,
3251 vma->vm_end);
3252 mmu_notifier_invalidate_range_start(&range);
3253 }
3254
3255 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3256 spinlock_t *src_ptl, *dst_ptl;
3257 src_pte = huge_pte_offset(src, addr, sz);
3258 if (!src_pte)
3259 continue;
3260 dst_pte = huge_pte_alloc(dst, addr, sz);
3261 if (!dst_pte) {
3262 ret = -ENOMEM;
3263 break;
3264 }
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274
3275 dst_entry = huge_ptep_get(dst_pte);
3276 if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
3277 continue;
3278
3279 dst_ptl = huge_pte_lock(h, dst, dst_pte);
3280 src_ptl = huge_pte_lockptr(h, src, src_pte);
3281 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3282 entry = huge_ptep_get(src_pte);
3283 dst_entry = huge_ptep_get(dst_pte);
3284 if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
3285
3286
3287
3288
3289
3290 ;
3291 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3292 is_hugetlb_entry_hwpoisoned(entry))) {
3293 swp_entry_t swp_entry = pte_to_swp_entry(entry);
3294
3295 if (is_write_migration_entry(swp_entry) && cow) {
3296
3297
3298
3299
3300 make_migration_entry_read(&swp_entry);
3301 entry = swp_entry_to_pte(swp_entry);
3302 set_huge_swap_pte_at(src, addr, src_pte,
3303 entry, sz);
3304 }
3305 set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
3306 } else {
3307 if (cow) {
3308
3309
3310
3311
3312
3313
3314
3315 huge_ptep_set_wrprotect(src, addr, src_pte);
3316 }
3317 entry = huge_ptep_get(src_pte);
3318 ptepage = pte_page(entry);
3319 get_page(ptepage);
3320 page_dup_rmap(ptepage, true);
3321 set_huge_pte_at(dst, addr, dst_pte, entry);
3322 hugetlb_count_add(pages_per_huge_page(h), dst);
3323 }
3324 spin_unlock(src_ptl);
3325 spin_unlock(dst_ptl);
3326 }
3327
3328 if (cow)
3329 mmu_notifier_invalidate_range_end(&range);
3330
3331 return ret;
3332}
3333
3334void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3335 unsigned long start, unsigned long end,
3336 struct page *ref_page)
3337{
3338 struct mm_struct *mm = vma->vm_mm;
3339 unsigned long address;
3340 pte_t *ptep;
3341 pte_t pte;
3342 spinlock_t *ptl;
3343 struct page *page;
3344 struct hstate *h = hstate_vma(vma);
3345 unsigned long sz = huge_page_size(h);
3346 struct mmu_notifier_range range;
3347
3348 WARN_ON(!is_vm_hugetlb_page(vma));
3349 BUG_ON(start & ~huge_page_mask(h));
3350 BUG_ON(end & ~huge_page_mask(h));
3351
3352
3353
3354
3355
3356 tlb_remove_check_page_size_change(tlb, sz);
3357 tlb_start_vma(tlb, vma);
3358
3359
3360
3361
3362 mmu_notifier_range_init(&range, mm, start, end);
3363 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
3364 mmu_notifier_invalidate_range_start(&range);
3365 address = start;
3366 for (; address < end; address += sz) {
3367 ptep = huge_pte_offset(mm, address, sz);
3368 if (!ptep)
3369 continue;
3370
3371 ptl = huge_pte_lock(h, mm, ptep);
3372 if (huge_pmd_unshare(mm, &address, ptep)) {
3373 spin_unlock(ptl);
3374
3375
3376
3377
3378 continue;
3379 }
3380
3381 pte = huge_ptep_get(ptep);
3382 if (huge_pte_none(pte)) {
3383 spin_unlock(ptl);
3384 continue;
3385 }
3386
3387
3388
3389
3390
3391 if (unlikely(!pte_present(pte))) {
3392 huge_pte_clear(mm, address, ptep, sz);
3393 spin_unlock(ptl);
3394 continue;
3395 }
3396
3397 page = pte_page(pte);
3398
3399
3400
3401
3402
3403 if (ref_page) {
3404 if (page != ref_page) {
3405 spin_unlock(ptl);
3406 continue;
3407 }
3408
3409
3410
3411
3412
3413 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3414 }
3415
3416 pte = huge_ptep_get_and_clear(mm, address, ptep);
3417 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
3418 if (huge_pte_dirty(pte))
3419 set_page_dirty(page);
3420
3421 hugetlb_count_sub(pages_per_huge_page(h), mm);
3422 page_remove_rmap(page, true);
3423
3424 spin_unlock(ptl);
3425 tlb_remove_page_size(tlb, page, huge_page_size(h));
3426
3427
3428
3429 if (ref_page)
3430 break;
3431 }
3432 mmu_notifier_invalidate_range_end(&range);
3433 tlb_end_vma(tlb, vma);
3434}
3435
3436void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3437 struct vm_area_struct *vma, unsigned long start,
3438 unsigned long end, struct page *ref_page)
3439{
3440 __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3441
3442
3443
3444
3445
3446
3447
3448
3449
3450
3451
3452 vma->vm_flags &= ~VM_MAYSHARE;
3453}
3454
3455void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3456 unsigned long end, struct page *ref_page)
3457{
3458 struct mm_struct *mm;
3459 struct mmu_gather tlb;
3460 unsigned long tlb_start = start;
3461 unsigned long tlb_end = end;
3462
3463
3464
3465
3466
3467
3468
3469
3470 adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
3471
3472 mm = vma->vm_mm;
3473
3474 tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
3475 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3476 tlb_finish_mmu(&tlb, tlb_start, tlb_end);
3477}
3478
3479
3480
3481
3482
3483
3484
3485static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3486 struct page *page, unsigned long address)
3487{
3488 struct hstate *h = hstate_vma(vma);
3489 struct vm_area_struct *iter_vma;
3490 struct address_space *mapping;
3491 pgoff_t pgoff;
3492
3493
3494
3495
3496
3497 address = address & huge_page_mask(h);
3498 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3499 vma->vm_pgoff;
3500 mapping = vma->vm_file->f_mapping;
3501
3502
3503
3504
3505
3506
3507 i_mmap_lock_write(mapping);
3508 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3509
3510 if (iter_vma == vma)
3511 continue;
3512
3513
3514
3515
3516
3517
3518 if (iter_vma->vm_flags & VM_MAYSHARE)
3519 continue;
3520
3521
3522
3523
3524
3525
3526
3527
3528 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3529 unmap_hugepage_range(iter_vma, address,
3530 address + huge_page_size(h), page);
3531 }
3532 i_mmap_unlock_write(mapping);
3533}
3534
3535
3536
3537
3538
3539
3540
3541static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3542 unsigned long address, pte_t *ptep,
3543 struct page *pagecache_page, spinlock_t *ptl)
3544{
3545 pte_t pte;
3546 struct hstate *h = hstate_vma(vma);
3547 struct page *old_page, *new_page;
3548 int outside_reserve = 0;
3549 vm_fault_t ret = 0;
3550 unsigned long haddr = address & huge_page_mask(h);
3551 struct mmu_notifier_range range;
3552
3553 pte = huge_ptep_get(ptep);
3554 old_page = pte_page(pte);
3555
3556retry_avoidcopy:
3557
3558
3559 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3560 page_move_anon_rmap(old_page, vma);
3561 set_huge_ptep_writable(vma, haddr, ptep);
3562 return 0;
3563 }
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3575 old_page != pagecache_page)
3576 outside_reserve = 1;
3577
3578 get_page(old_page);
3579
3580
3581
3582
3583
3584 spin_unlock(ptl);
3585 new_page = alloc_huge_page(vma, haddr, outside_reserve);
3586
3587 if (IS_ERR(new_page)) {
3588
3589
3590
3591
3592
3593
3594
3595 if (outside_reserve) {
3596 put_page(old_page);
3597 BUG_ON(huge_pte_none(pte));
3598 unmap_ref_private(mm, vma, old_page, haddr);
3599 BUG_ON(huge_pte_none(pte));
3600 spin_lock(ptl);
3601 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3602 if (likely(ptep &&
3603 pte_same(huge_ptep_get(ptep), pte)))
3604 goto retry_avoidcopy;
3605
3606
3607
3608
3609 return 0;
3610 }
3611
3612 ret = vmf_error(PTR_ERR(new_page));
3613 goto out_release_old;
3614 }
3615
3616
3617
3618
3619
3620 if (unlikely(anon_vma_prepare(vma))) {
3621 ret = VM_FAULT_OOM;
3622 goto out_release_all;
3623 }
3624
3625 copy_user_huge_page(new_page, old_page, address, vma,
3626 pages_per_huge_page(h));
3627 __SetPageUptodate(new_page);
3628
3629 mmu_notifier_range_init(&range, mm, haddr, haddr + huge_page_size(h));
3630 mmu_notifier_invalidate_range_start(&range);
3631
3632
3633
3634
3635
3636 spin_lock(ptl);
3637 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3638 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3639 ClearPagePrivate(new_page);
3640
3641
3642 huge_ptep_clear_flush(vma, haddr, ptep);
3643 mmu_notifier_invalidate_range(mm, range.start, range.end);
3644 set_huge_pte_at(mm, haddr, ptep,
3645 make_huge_pte(vma, new_page, 1));
3646 page_remove_rmap(old_page, true);
3647 hugepage_add_new_anon_rmap(new_page, vma, haddr);
3648 set_page_huge_active(new_page);
3649
3650 new_page = old_page;
3651 }
3652 spin_unlock(ptl);
3653 mmu_notifier_invalidate_range_end(&range);
3654out_release_all:
3655 restore_reserve_on_error(h, vma, haddr, new_page);
3656 put_page(new_page);
3657out_release_old:
3658 put_page(old_page);
3659
3660 spin_lock(ptl);
3661 return ret;
3662}
3663
3664
3665static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3666 struct vm_area_struct *vma, unsigned long address)
3667{
3668 struct address_space *mapping;
3669 pgoff_t idx;
3670
3671 mapping = vma->vm_file->f_mapping;
3672 idx = vma_hugecache_offset(h, vma, address);
3673
3674 return find_lock_page(mapping, idx);
3675}
3676
3677
3678
3679
3680
3681static bool hugetlbfs_pagecache_present(struct hstate *h,
3682 struct vm_area_struct *vma, unsigned long address)
3683{
3684 struct address_space *mapping;
3685 pgoff_t idx;
3686 struct page *page;
3687
3688 mapping = vma->vm_file->f_mapping;
3689 idx = vma_hugecache_offset(h, vma, address);
3690
3691 page = find_get_page(mapping, idx);
3692 if (page)
3693 put_page(page);
3694 return page != NULL;
3695}
3696
3697int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3698 pgoff_t idx)
3699{
3700 struct inode *inode = mapping->host;
3701 struct hstate *h = hstate_inode(inode);
3702 int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3703
3704 if (err)
3705 return err;
3706 ClearPagePrivate(page);
3707
3708
3709
3710
3711
3712 set_page_dirty(page);
3713
3714 spin_lock(&inode->i_lock);
3715 inode->i_blocks += blocks_per_huge_page(h);
3716 spin_unlock(&inode->i_lock);
3717 return 0;
3718}
3719
3720static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
3721 struct vm_area_struct *vma,
3722 struct address_space *mapping, pgoff_t idx,
3723 unsigned long address, pte_t *ptep, unsigned int flags)
3724{
3725 struct hstate *h = hstate_vma(vma);
3726 vm_fault_t ret = VM_FAULT_SIGBUS;
3727 int anon_rmap = 0;
3728 unsigned long size;
3729 struct page *page;
3730 pte_t new_pte;
3731 spinlock_t *ptl;
3732 unsigned long haddr = address & huge_page_mask(h);
3733 bool new_page = false;
3734
3735
3736
3737
3738
3739
3740 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3741 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
3742 current->pid);
3743 return ret;
3744 }
3745
3746
3747
3748
3749
3750retry:
3751 page = find_lock_page(mapping, idx);
3752 if (!page) {
3753 size = i_size_read(mapping->host) >> huge_page_shift(h);
3754 if (idx >= size)
3755 goto out;
3756
3757
3758
3759
3760 if (userfaultfd_missing(vma)) {
3761 u32 hash;
3762 struct vm_fault vmf = {
3763 .vma = vma,
3764 .address = haddr,
3765 .flags = flags,
3766
3767
3768
3769
3770
3771
3772
3773 };
3774
3775
3776
3777
3778
3779
3780 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping,
3781 idx, haddr);
3782 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3783 ret = handle_userfault(&vmf, VM_UFFD_MISSING);
3784 mutex_lock(&hugetlb_fault_mutex_table[hash]);
3785 goto out;
3786 }
3787
3788 page = alloc_huge_page(vma, haddr, 0);
3789 if (IS_ERR(page)) {
3790 ret = vmf_error(PTR_ERR(page));
3791 goto out;
3792 }
3793 clear_huge_page(page, address, pages_per_huge_page(h));
3794 __SetPageUptodate(page);
3795 new_page = true;
3796
3797 if (vma->vm_flags & VM_MAYSHARE) {
3798 int err = huge_add_to_page_cache(page, mapping, idx);
3799 if (err) {
3800 put_page(page);
3801 if (err == -EEXIST)
3802 goto retry;
3803 goto out;
3804 }
3805 } else {
3806 lock_page(page);
3807 if (unlikely(anon_vma_prepare(vma))) {
3808 ret = VM_FAULT_OOM;
3809 goto backout_unlocked;
3810 }
3811 anon_rmap = 1;
3812 }
3813 } else {
3814
3815
3816
3817
3818
3819 if (unlikely(PageHWPoison(page))) {
3820 ret = VM_FAULT_HWPOISON |
3821 VM_FAULT_SET_HINDEX(hstate_index(h));
3822 goto backout_unlocked;
3823 }
3824 }
3825
3826
3827
3828
3829
3830
3831
3832 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3833 if (vma_needs_reservation(h, vma, haddr) < 0) {
3834 ret = VM_FAULT_OOM;
3835 goto backout_unlocked;
3836 }
3837
3838 vma_end_reservation(h, vma, haddr);
3839 }
3840
3841 ptl = huge_pte_lock(h, mm, ptep);
3842 size = i_size_read(mapping->host) >> huge_page_shift(h);
3843 if (idx >= size)
3844 goto backout;
3845
3846 ret = 0;
3847 if (!huge_pte_none(huge_ptep_get(ptep)))
3848 goto backout;
3849
3850 if (anon_rmap) {
3851 ClearPagePrivate(page);
3852 hugepage_add_new_anon_rmap(page, vma, haddr);
3853 } else
3854 page_dup_rmap(page, true);
3855 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3856 && (vma->vm_flags & VM_SHARED)));
3857 set_huge_pte_at(mm, haddr, ptep, new_pte);
3858
3859 hugetlb_count_add(pages_per_huge_page(h), mm);
3860 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3861
3862 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
3863 }
3864
3865 spin_unlock(ptl);
3866
3867
3868
3869
3870
3871
3872 if (new_page)
3873 set_page_huge_active(page);
3874
3875 unlock_page(page);
3876out:
3877 return ret;
3878
3879backout:
3880 spin_unlock(ptl);
3881backout_unlocked:
3882 unlock_page(page);
3883 restore_reserve_on_error(h, vma, haddr, page);
3884 put_page(page);
3885 goto out;
3886}
3887
3888#ifdef CONFIG_SMP
3889u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3890 struct vm_area_struct *vma,
3891 struct address_space *mapping,
3892 pgoff_t idx, unsigned long address)
3893{
3894 unsigned long key[2];
3895 u32 hash;
3896
3897 if (vma->vm_flags & VM_SHARED) {
3898 key[0] = (unsigned long) mapping;
3899 key[1] = idx;
3900 } else {
3901 key[0] = (unsigned long) mm;
3902 key[1] = address >> huge_page_shift(h);
3903 }
3904
3905 hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3906
3907 return hash & (num_fault_mutexes - 1);
3908}
3909#else
3910
3911
3912
3913
3914u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3915 struct vm_area_struct *vma,
3916 struct address_space *mapping,
3917 pgoff_t idx, unsigned long address)
3918{
3919 return 0;
3920}
3921#endif
3922
3923vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3924 unsigned long address, unsigned int flags)
3925{
3926 pte_t *ptep, entry;
3927 spinlock_t *ptl;
3928 vm_fault_t ret;
3929 u32 hash;
3930 pgoff_t idx;
3931 struct page *page = NULL;
3932 struct page *pagecache_page = NULL;
3933 struct hstate *h = hstate_vma(vma);
3934 struct address_space *mapping;
3935 int need_wait_lock = 0;
3936 unsigned long haddr = address & huge_page_mask(h);
3937
3938 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3939 if (ptep) {
3940 entry = huge_ptep_get(ptep);
3941 if (unlikely(is_hugetlb_entry_migration(entry))) {
3942 migration_entry_wait_huge(vma, mm, ptep);
3943 return 0;
3944 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3945 return VM_FAULT_HWPOISON_LARGE |
3946 VM_FAULT_SET_HINDEX(hstate_index(h));
3947 } else {
3948 ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
3949 if (!ptep)
3950 return VM_FAULT_OOM;
3951 }
3952
3953 mapping = vma->vm_file->f_mapping;
3954 idx = vma_hugecache_offset(h, vma, haddr);
3955
3956
3957
3958
3959
3960
3961 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr);
3962 mutex_lock(&hugetlb_fault_mutex_table[hash]);
3963
3964 entry = huge_ptep_get(ptep);
3965 if (huge_pte_none(entry)) {
3966 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3967 goto out_mutex;
3968 }
3969
3970 ret = 0;
3971
3972
3973
3974
3975
3976
3977
3978
3979 if (!pte_present(entry))
3980 goto out_mutex;
3981
3982
3983
3984
3985
3986
3987
3988
3989
3990 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3991 if (vma_needs_reservation(h, vma, haddr) < 0) {
3992 ret = VM_FAULT_OOM;
3993 goto out_mutex;
3994 }
3995
3996 vma_end_reservation(h, vma, haddr);
3997
3998 if (!(vma->vm_flags & VM_MAYSHARE))
3999 pagecache_page = hugetlbfs_pagecache_page(h,
4000 vma, haddr);
4001 }
4002
4003 ptl = huge_pte_lock(h, mm, ptep);
4004
4005
4006 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
4007 goto out_ptl;
4008
4009
4010
4011
4012
4013
4014 page = pte_page(entry);
4015 if (page != pagecache_page)
4016 if (!trylock_page(page)) {
4017 need_wait_lock = 1;
4018 goto out_ptl;
4019 }
4020
4021 get_page(page);
4022
4023 if (flags & FAULT_FLAG_WRITE) {
4024 if (!huge_pte_write(entry)) {
4025 ret = hugetlb_cow(mm, vma, address, ptep,
4026 pagecache_page, ptl);
4027 goto out_put_page;
4028 }
4029 entry = huge_pte_mkdirty(entry);
4030 }
4031 entry = pte_mkyoung(entry);
4032 if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
4033 flags & FAULT_FLAG_WRITE))
4034 update_mmu_cache(vma, haddr, ptep);
4035out_put_page:
4036 if (page != pagecache_page)
4037 unlock_page(page);
4038 put_page(page);
4039out_ptl:
4040 spin_unlock(ptl);
4041
4042 if (pagecache_page) {
4043 unlock_page(pagecache_page);
4044 put_page(pagecache_page);
4045 }
4046out_mutex:
4047 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4048
4049
4050
4051
4052
4053
4054
4055 if (need_wait_lock)
4056 wait_on_page_locked(page);
4057 return ret;
4058}
4059
4060
4061
4062
4063
4064int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
4065 pte_t *dst_pte,
4066 struct vm_area_struct *dst_vma,
4067 unsigned long dst_addr,
4068 unsigned long src_addr,
4069 struct page **pagep)
4070{
4071 struct address_space *mapping;
4072 pgoff_t idx;
4073 unsigned long size;
4074 int vm_shared = dst_vma->vm_flags & VM_SHARED;
4075 struct hstate *h = hstate_vma(dst_vma);
4076 pte_t _dst_pte;
4077 spinlock_t *ptl;
4078 int ret;
4079 struct page *page;
4080
4081 if (!*pagep) {
4082 ret = -ENOMEM;
4083 page = alloc_huge_page(dst_vma, dst_addr, 0);
4084 if (IS_ERR(page))
4085 goto out;
4086
4087 ret = copy_huge_page_from_user(page,
4088 (const void __user *) src_addr,
4089 pages_per_huge_page(h), false);
4090
4091
4092 if (unlikely(ret)) {
4093 ret = -ENOENT;
4094 *pagep = page;
4095
4096 goto out;
4097 }
4098 } else {
4099 page = *pagep;
4100 *pagep = NULL;
4101 }
4102
4103
4104
4105
4106
4107
4108 __SetPageUptodate(page);
4109
4110 mapping = dst_vma->vm_file->f_mapping;
4111 idx = vma_hugecache_offset(h, dst_vma, dst_addr);
4112
4113
4114
4115
4116 if (vm_shared) {
4117 size = i_size_read(mapping->host) >> huge_page_shift(h);
4118 ret = -EFAULT;
4119 if (idx >= size)
4120 goto out_release_nounlock;
4121
4122
4123
4124
4125
4126
4127
4128 ret = huge_add_to_page_cache(page, mapping, idx);
4129 if (ret)
4130 goto out_release_nounlock;
4131 }
4132
4133 ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
4134 spin_lock(ptl);
4135
4136
4137
4138
4139
4140
4141
4142
4143
4144
4145 size = i_size_read(mapping->host) >> huge_page_shift(h);
4146 ret = -EFAULT;
4147 if (idx >= size)
4148 goto out_release_unlock;
4149
4150 ret = -EEXIST;
4151 if (!huge_pte_none(huge_ptep_get(dst_pte)))
4152 goto out_release_unlock;
4153
4154 if (vm_shared) {
4155 page_dup_rmap(page, true);
4156 } else {
4157 ClearPagePrivate(page);
4158 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
4159 }
4160
4161 _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
4162 if (dst_vma->vm_flags & VM_WRITE)
4163 _dst_pte = huge_pte_mkdirty(_dst_pte);
4164 _dst_pte = pte_mkyoung(_dst_pte);
4165
4166 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
4167
4168 (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
4169 dst_vma->vm_flags & VM_WRITE);
4170 hugetlb_count_add(pages_per_huge_page(h), dst_mm);
4171
4172
4173 update_mmu_cache(dst_vma, dst_addr, dst_pte);
4174
4175 spin_unlock(ptl);
4176 set_page_huge_active(page);
4177 if (vm_shared)
4178 unlock_page(page);
4179 ret = 0;
4180out:
4181 return ret;
4182out_release_unlock:
4183 spin_unlock(ptl);
4184 if (vm_shared)
4185 unlock_page(page);
4186out_release_nounlock:
4187 put_page(page);
4188 goto out;
4189}
4190
4191long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
4192 struct page **pages, struct vm_area_struct **vmas,
4193 unsigned long *position, unsigned long *nr_pages,
4194 long i, unsigned int flags, int *nonblocking)
4195{
4196 unsigned long pfn_offset;
4197 unsigned long vaddr = *position;
4198 unsigned long remainder = *nr_pages;
4199 struct hstate *h = hstate_vma(vma);
4200 int err = -EFAULT;
4201
4202 while (vaddr < vma->vm_end && remainder) {
4203 pte_t *pte;
4204 spinlock_t *ptl = NULL;
4205 int absent;
4206 struct page *page;
4207
4208
4209
4210
4211
4212 if (fatal_signal_pending(current)) {
4213 remainder = 0;
4214 break;
4215 }
4216
4217
4218
4219
4220
4221
4222
4223
4224 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
4225 huge_page_size(h));
4226 if (pte)
4227 ptl = huge_pte_lock(h, mm, pte);
4228 absent = !pte || huge_pte_none(huge_ptep_get(pte));
4229
4230
4231
4232
4233
4234
4235
4236
4237 if (absent && (flags & FOLL_DUMP) &&
4238 !hugetlbfs_pagecache_present(h, vma, vaddr)) {
4239 if (pte)
4240 spin_unlock(ptl);
4241 remainder = 0;
4242 break;
4243 }
4244
4245
4246
4247
4248
4249
4250
4251
4252
4253
4254
4255 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
4256 ((flags & FOLL_WRITE) &&
4257 !huge_pte_write(huge_ptep_get(pte)))) {
4258 vm_fault_t ret;
4259 unsigned int fault_flags = 0;
4260
4261 if (pte)
4262 spin_unlock(ptl);
4263 if (flags & FOLL_WRITE)
4264 fault_flags |= FAULT_FLAG_WRITE;
4265 if (nonblocking)
4266 fault_flags |= FAULT_FLAG_ALLOW_RETRY;
4267 if (flags & FOLL_NOWAIT)
4268 fault_flags |= FAULT_FLAG_ALLOW_RETRY |
4269 FAULT_FLAG_RETRY_NOWAIT;
4270 if (flags & FOLL_TRIED) {
4271 VM_WARN_ON_ONCE(fault_flags &
4272 FAULT_FLAG_ALLOW_RETRY);
4273 fault_flags |= FAULT_FLAG_TRIED;
4274 }
4275 ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
4276 if (ret & VM_FAULT_ERROR) {
4277 err = vm_fault_to_errno(ret, flags);
4278 remainder = 0;
4279 break;
4280 }
4281 if (ret & VM_FAULT_RETRY) {
4282 if (nonblocking &&
4283 !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
4284 *nonblocking = 0;
4285 *nr_pages = 0;
4286
4287
4288
4289
4290
4291
4292
4293
4294
4295 return i;
4296 }
4297 continue;
4298 }
4299
4300 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
4301 page = pte_page(huge_ptep_get(pte));
4302
4303
4304
4305
4306
4307 if (unlikely(page_count(page) <= 0)) {
4308 if (pages) {
4309 spin_unlock(ptl);
4310 remainder = 0;
4311 err = -ENOMEM;
4312 break;
4313 }
4314 }
4315same_page:
4316 if (pages) {
4317 pages[i] = mem_map_offset(page, pfn_offset);
4318 get_page(pages[i]);
4319 }
4320
4321 if (vmas)
4322 vmas[i] = vma;
4323
4324 vaddr += PAGE_SIZE;
4325 ++pfn_offset;
4326 --remainder;
4327 ++i;
4328 if (vaddr < vma->vm_end && remainder &&
4329 pfn_offset < pages_per_huge_page(h)) {
4330
4331
4332
4333
4334 goto same_page;
4335 }
4336 spin_unlock(ptl);
4337 }
4338 *nr_pages = remainder;
4339
4340
4341
4342
4343
4344 *position = vaddr;
4345
4346 return i ? i : err;
4347}
4348
4349#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
4350
4351
4352
4353
4354#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
4355#endif
4356
4357unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
4358 unsigned long address, unsigned long end, pgprot_t newprot)
4359{
4360 struct mm_struct *mm = vma->vm_mm;
4361 unsigned long start = address;
4362 pte_t *ptep;
4363 pte_t pte;
4364 struct hstate *h = hstate_vma(vma);
4365 unsigned long pages = 0;
4366 bool shared_pmd = false;
4367 struct mmu_notifier_range range;
4368
4369
4370
4371
4372
4373
4374 mmu_notifier_range_init(&range, mm, start, end);
4375 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
4376
4377 BUG_ON(address >= end);
4378 flush_cache_range(vma, range.start, range.end);
4379
4380 mmu_notifier_invalidate_range_start(&range);
4381 i_mmap_lock_write(vma->vm_file->f_mapping);
4382 for (; address < end; address += huge_page_size(h)) {
4383 spinlock_t *ptl;
4384 ptep = huge_pte_offset(mm, address, huge_page_size(h));
4385 if (!ptep)
4386 continue;
4387 ptl = huge_pte_lock(h, mm, ptep);
4388 if (huge_pmd_unshare(mm, &address, ptep)) {
4389 pages++;
4390 spin_unlock(ptl);
4391 shared_pmd = true;
4392 continue;
4393 }
4394 pte = huge_ptep_get(ptep);
4395 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
4396 spin_unlock(ptl);
4397 continue;
4398 }
4399 if (unlikely(is_hugetlb_entry_migration(pte))) {
4400 swp_entry_t entry = pte_to_swp_entry(pte);
4401
4402 if (is_write_migration_entry(entry)) {
4403 pte_t newpte;
4404
4405 make_migration_entry_read(&entry);
4406 newpte = swp_entry_to_pte(entry);
4407 set_huge_swap_pte_at(mm, address, ptep,
4408 newpte, huge_page_size(h));
4409 pages++;
4410 }
4411 spin_unlock(ptl);
4412 continue;
4413 }
4414 if (!huge_pte_none(pte)) {
4415 pte_t old_pte;
4416
4417 old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
4418 pte = pte_mkhuge(huge_pte_modify(old_pte, newprot));
4419 pte = arch_make_huge_pte(pte, vma, NULL, 0);
4420 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
4421 pages++;
4422 }
4423 spin_unlock(ptl);
4424 }
4425
4426
4427
4428
4429
4430
4431
4432 if (shared_pmd)
4433 flush_hugetlb_tlb_range(vma, range.start, range.end);
4434 else
4435 flush_hugetlb_tlb_range(vma, start, end);
4436
4437
4438
4439
4440
4441
4442 i_mmap_unlock_write(vma->vm_file->f_mapping);
4443 mmu_notifier_invalidate_range_end(&range);
4444
4445 return pages << h->order;
4446}
4447
4448int hugetlb_reserve_pages(struct inode *inode,
4449 long from, long to,
4450 struct vm_area_struct *vma,
4451 vm_flags_t vm_flags)
4452{
4453 long ret, chg;
4454 struct hstate *h = hstate_inode(inode);
4455 struct hugepage_subpool *spool = subpool_inode(inode);
4456 struct resv_map *resv_map;
4457 long gbl_reserve;
4458
4459
4460 if (from > to) {
4461 VM_WARN(1, "%s called with a negative range\n", __func__);
4462 return -EINVAL;
4463 }
4464
4465
4466
4467
4468
4469
4470 if (vm_flags & VM_NORESERVE)
4471 return 0;
4472
4473
4474
4475
4476
4477
4478
4479 if (!vma || vma->vm_flags & VM_MAYSHARE) {
4480 resv_map = inode_resv_map(inode);
4481
4482 chg = region_chg(resv_map, from, to);
4483
4484 } else {
4485 resv_map = resv_map_alloc();
4486 if (!resv_map)
4487 return -ENOMEM;
4488
4489 chg = to - from;
4490
4491 set_vma_resv_map(vma, resv_map);
4492 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4493 }
4494
4495 if (chg < 0) {
4496 ret = chg;
4497 goto out_err;
4498 }
4499
4500
4501
4502
4503
4504
4505 gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4506 if (gbl_reserve < 0) {
4507 ret = -ENOSPC;
4508 goto out_err;
4509 }
4510
4511
4512
4513
4514
4515 ret = hugetlb_acct_memory(h, gbl_reserve);
4516 if (ret < 0) {
4517
4518 (void)hugepage_subpool_put_pages(spool, chg);
4519 goto out_err;
4520 }
4521
4522
4523
4524
4525
4526
4527
4528
4529
4530
4531
4532
4533 if (!vma || vma->vm_flags & VM_MAYSHARE) {
4534 long add = region_add(resv_map, from, to);
4535
4536 if (unlikely(chg > add)) {
4537
4538
4539
4540
4541
4542
4543
4544 long rsv_adjust;
4545
4546 rsv_adjust = hugepage_subpool_put_pages(spool,
4547 chg - add);
4548 hugetlb_acct_memory(h, -rsv_adjust);
4549 }
4550 }
4551 return 0;
4552out_err:
4553 if (!vma || vma->vm_flags & VM_MAYSHARE)
4554
4555 if (chg >= 0)
4556 region_abort(resv_map, from, to);
4557 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4558 kref_put(&resv_map->refs, resv_map_release);
4559 return ret;
4560}
4561
4562long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4563 long freed)
4564{
4565 struct hstate *h = hstate_inode(inode);
4566 struct resv_map *resv_map = inode_resv_map(inode);
4567 long chg = 0;
4568 struct hugepage_subpool *spool = subpool_inode(inode);
4569 long gbl_reserve;
4570
4571 if (resv_map) {
4572 chg = region_del(resv_map, start, end);
4573
4574
4575
4576
4577
4578 if (chg < 0)
4579 return chg;
4580 }
4581
4582 spin_lock(&inode->i_lock);
4583 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4584 spin_unlock(&inode->i_lock);
4585
4586
4587
4588
4589
4590 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4591 hugetlb_acct_memory(h, -gbl_reserve);
4592
4593 return 0;
4594}
4595
4596#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
4597static unsigned long page_table_shareable(struct vm_area_struct *svma,
4598 struct vm_area_struct *vma,
4599 unsigned long addr, pgoff_t idx)
4600{
4601 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4602 svma->vm_start;
4603 unsigned long sbase = saddr & PUD_MASK;
4604 unsigned long s_end = sbase + PUD_SIZE;
4605
4606
4607 unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4608 unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4609
4610
4611
4612
4613
4614 if (pmd_index(addr) != pmd_index(saddr) ||
4615 vm_flags != svm_flags ||
4616 sbase < svma->vm_start || svma->vm_end < s_end)
4617 return 0;
4618
4619 return saddr;
4620}
4621
4622static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4623{
4624 unsigned long base = addr & PUD_MASK;
4625 unsigned long end = base + PUD_SIZE;
4626
4627
4628
4629
4630 if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
4631 return true;
4632 return false;
4633}
4634
4635
4636
4637
4638
4639
4640void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4641 unsigned long *start, unsigned long *end)
4642{
4643 unsigned long check_addr = *start;
4644
4645 if (!(vma->vm_flags & VM_MAYSHARE))
4646 return;
4647
4648 for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
4649 unsigned long a_start = check_addr & PUD_MASK;
4650 unsigned long a_end = a_start + PUD_SIZE;
4651
4652
4653
4654
4655 if (range_in_vma(vma, a_start, a_end)) {
4656 if (a_start < *start)
4657 *start = a_start;
4658 if (a_end > *end)
4659 *end = a_end;
4660 }
4661 }
4662}
4663
4664
4665
4666
4667
4668
4669
4670
4671
4672
4673pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4674{
4675 struct vm_area_struct *vma = find_vma(mm, addr);
4676 struct address_space *mapping = vma->vm_file->f_mapping;
4677 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4678 vma->vm_pgoff;
4679 struct vm_area_struct *svma;
4680 unsigned long saddr;
4681 pte_t *spte = NULL;
4682 pte_t *pte;
4683 spinlock_t *ptl;
4684
4685 if (!vma_shareable(vma, addr))
4686 return (pte_t *)pmd_alloc(mm, pud, addr);
4687
4688 i_mmap_lock_write(mapping);
4689 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4690 if (svma == vma)
4691 continue;
4692
4693 saddr = page_table_shareable(svma, vma, addr, idx);
4694 if (saddr) {
4695 spte = huge_pte_offset(svma->vm_mm, saddr,
4696 vma_mmu_pagesize(svma));
4697 if (spte) {
4698 get_page(virt_to_page(spte));
4699 break;
4700 }
4701 }
4702 }
4703
4704 if (!spte)
4705 goto out;
4706
4707 ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
4708 if (pud_none(*pud)) {
4709 pud_populate(mm, pud,
4710 (pmd_t *)((unsigned long)spte & PAGE_MASK));
4711 mm_inc_nr_pmds(mm);
4712 } else {
4713 put_page(virt_to_page(spte));
4714 }
4715 spin_unlock(ptl);
4716out:
4717 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4718 i_mmap_unlock_write(mapping);
4719 return pte;
4720}
4721
4722
4723
4724
4725
4726
4727
4728
4729
4730
4731
4732
4733
4734int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4735{
4736 pgd_t *pgd = pgd_offset(mm, *addr);
4737 p4d_t *p4d = p4d_offset(pgd, *addr);
4738 pud_t *pud = pud_offset(p4d, *addr);
4739
4740 BUG_ON(page_count(virt_to_page(ptep)) == 0);
4741 if (page_count(virt_to_page(ptep)) == 1)
4742 return 0;
4743
4744 pud_clear(pud);
4745 put_page(virt_to_page(ptep));
4746 mm_dec_nr_pmds(mm);
4747 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4748 return 1;
4749}
4750#define want_pmd_share() (1)
4751#else
4752pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4753{
4754 return NULL;
4755}
4756
4757int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4758{
4759 return 0;
4760}
4761
4762void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4763 unsigned long *start, unsigned long *end)
4764{
4765}
4766#define want_pmd_share() (0)
4767#endif
4768
4769#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4770pte_t *huge_pte_alloc(struct mm_struct *mm,
4771 unsigned long addr, unsigned long sz)
4772{
4773 pgd_t *pgd;
4774 p4d_t *p4d;
4775 pud_t *pud;
4776 pte_t *pte = NULL;
4777
4778 pgd = pgd_offset(mm, addr);
4779 p4d = p4d_alloc(mm, pgd, addr);
4780 if (!p4d)
4781 return NULL;
4782 pud = pud_alloc(mm, p4d, addr);
4783 if (pud) {
4784 if (sz == PUD_SIZE) {
4785 pte = (pte_t *)pud;
4786 } else {
4787 BUG_ON(sz != PMD_SIZE);
4788 if (want_pmd_share() && pud_none(*pud))
4789 pte = huge_pmd_share(mm, addr, pud);
4790 else
4791 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4792 }
4793 }
4794 BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
4795
4796 return pte;
4797}
4798
4799
4800
4801
4802
4803
4804
4805
4806
4807
4808pte_t *huge_pte_offset(struct mm_struct *mm,
4809 unsigned long addr, unsigned long sz)
4810{
4811 pgd_t *pgd;
4812 p4d_t *p4d;
4813 pud_t *pud;
4814 pmd_t *pmd;
4815
4816 pgd = pgd_offset(mm, addr);
4817 if (!pgd_present(*pgd))
4818 return NULL;
4819 p4d = p4d_offset(pgd, addr);
4820 if (!p4d_present(*p4d))
4821 return NULL;
4822
4823 pud = pud_offset(p4d, addr);
4824 if (sz != PUD_SIZE && pud_none(*pud))
4825 return NULL;
4826
4827 if (pud_huge(*pud) || !pud_present(*pud))
4828 return (pte_t *)pud;
4829
4830 pmd = pmd_offset(pud, addr);
4831 if (sz != PMD_SIZE && pmd_none(*pmd))
4832 return NULL;
4833
4834 if (pmd_huge(*pmd) || !pmd_present(*pmd))
4835 return (pte_t *)pmd;
4836
4837 return NULL;
4838}
4839
4840#endif
4841
4842
4843
4844
4845
4846struct page * __weak
4847follow_huge_addr(struct mm_struct *mm, unsigned long address,
4848 int write)
4849{
4850 return ERR_PTR(-EINVAL);
4851}
4852
4853struct page * __weak
4854follow_huge_pd(struct vm_area_struct *vma,
4855 unsigned long address, hugepd_t hpd, int flags, int pdshift)
4856{
4857 WARN(1, "hugepd follow called with no support for hugepage directory format\n");
4858 return NULL;
4859}
4860
4861struct page * __weak
4862follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4863 pmd_t *pmd, int flags)
4864{
4865 struct page *page = NULL;
4866 spinlock_t *ptl;
4867 pte_t pte;
4868retry:
4869 ptl = pmd_lockptr(mm, pmd);
4870 spin_lock(ptl);
4871
4872
4873
4874
4875 if (!pmd_huge(*pmd))
4876 goto out;
4877 pte = huge_ptep_get((pte_t *)pmd);
4878 if (pte_present(pte)) {
4879 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4880 if (flags & FOLL_GET)
4881 get_page(page);
4882 } else {
4883 if (is_hugetlb_entry_migration(pte)) {
4884 spin_unlock(ptl);
4885 __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4886 goto retry;
4887 }
4888
4889
4890
4891
4892 }
4893out:
4894 spin_unlock(ptl);
4895 return page;
4896}
4897
4898struct page * __weak
4899follow_huge_pud(struct mm_struct *mm, unsigned long address,
4900 pud_t *pud, int flags)
4901{
4902 if (flags & FOLL_GET)
4903 return NULL;
4904
4905 return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
4906}
4907
4908struct page * __weak
4909follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
4910{
4911 if (flags & FOLL_GET)
4912 return NULL;
4913
4914 return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
4915}
4916
4917bool isolate_huge_page(struct page *page, struct list_head *list)
4918{
4919 bool ret = true;
4920
4921 VM_BUG_ON_PAGE(!PageHead(page), page);
4922 spin_lock(&hugetlb_lock);
4923 if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4924 ret = false;
4925 goto unlock;
4926 }
4927 clear_page_huge_active(page);
4928 list_move_tail(&page->lru, list);
4929unlock:
4930 spin_unlock(&hugetlb_lock);
4931 return ret;
4932}
4933
4934void putback_active_hugepage(struct page *page)
4935{
4936 VM_BUG_ON_PAGE(!PageHead(page), page);
4937 spin_lock(&hugetlb_lock);
4938 set_page_huge_active(page);
4939 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4940 spin_unlock(&hugetlb_lock);
4941 put_page(page);
4942}
4943
4944void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
4945{
4946 struct hstate *h = page_hstate(oldpage);
4947
4948 hugetlb_cgroup_migrate(oldpage, newpage);
4949 set_page_owner_migrate_reason(newpage, reason);
4950
4951
4952
4953
4954
4955
4956
4957
4958
4959
4960
4961 if (PageHugeTemporary(newpage)) {
4962 int old_nid = page_to_nid(oldpage);
4963 int new_nid = page_to_nid(newpage);
4964
4965 SetPageHugeTemporary(oldpage);
4966 ClearPageHugeTemporary(newpage);
4967
4968 spin_lock(&hugetlb_lock);
4969 if (h->surplus_huge_pages_node[old_nid]) {
4970 h->surplus_huge_pages_node[old_nid]--;
4971 h->surplus_huge_pages_node[new_nid]++;
4972 }
4973 spin_unlock(&hugetlb_lock);
4974 }
4975}
4976