1
2
3
4
5
6#include <linux/list.h>
7#include <linux/init.h>
8#include <linux/mm.h>
9#include <linux/seq_file.h>
10#include <linux/sysctl.h>
11#include <linux/highmem.h>
12#include <linux/mmu_notifier.h>
13#include <linux/nodemask.h>
14#include <linux/pagemap.h>
15#include <linux/mempolicy.h>
16#include <linux/compiler.h>
17#include <linux/cpuset.h>
18#include <linux/mutex.h>
19#include <linux/memblock.h>
20#include <linux/sysfs.h>
21#include <linux/slab.h>
22#include <linux/mmdebug.h>
23#include <linux/sched/signal.h>
24#include <linux/rmap.h>
25#include <linux/string_helpers.h>
26#include <linux/swap.h>
27#include <linux/swapops.h>
28#include <linux/jhash.h>
29#include <linux/numa.h>
30
31#include <asm/page.h>
32#include <asm/pgtable.h>
33#include <asm/tlb.h>
34
35#include <linux/io.h>
36#include <linux/hugetlb.h>
37#include <linux/hugetlb_cgroup.h>
38#include <linux/node.h>
39#include <linux/userfaultfd_k.h>
40#include <linux/page_owner.h>
41#include "internal.h"
42
43int hugetlb_max_hstate __read_mostly;
44unsigned int default_hstate_idx;
45struct hstate hstates[HUGE_MAX_HSTATE];
46
47
48
49
50static unsigned int minimum_order __read_mostly = UINT_MAX;
51
52__initdata LIST_HEAD(huge_boot_pages);
53
54
55static struct hstate * __initdata parsed_hstate;
56static unsigned long __initdata default_hstate_max_huge_pages;
57static unsigned long __initdata default_hstate_size;
58static bool __initdata parsed_valid_hugepagesz = true;
59
60
61
62
63
64DEFINE_SPINLOCK(hugetlb_lock);
65
66
67
68
69
70static int num_fault_mutexes;
71struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
72
73
74static int hugetlb_acct_memory(struct hstate *h, long delta);
75
76static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
77{
78 bool free = (spool->count == 0) && (spool->used_hpages == 0);
79
80 spin_unlock(&spool->lock);
81
82
83
84
85 if (free) {
86 if (spool->min_hpages != -1)
87 hugetlb_acct_memory(spool->hstate,
88 -spool->min_hpages);
89 kfree(spool);
90 }
91}
92
93struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
94 long min_hpages)
95{
96 struct hugepage_subpool *spool;
97
98 spool = kzalloc(sizeof(*spool), GFP_KERNEL);
99 if (!spool)
100 return NULL;
101
102 spin_lock_init(&spool->lock);
103 spool->count = 1;
104 spool->max_hpages = max_hpages;
105 spool->hstate = h;
106 spool->min_hpages = min_hpages;
107
108 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
109 kfree(spool);
110 return NULL;
111 }
112 spool->rsv_hpages = min_hpages;
113
114 return spool;
115}
116
117void hugepage_put_subpool(struct hugepage_subpool *spool)
118{
119 spin_lock(&spool->lock);
120 BUG_ON(!spool->count);
121 spool->count--;
122 unlock_or_release_subpool(spool);
123}
124
125
126
127
128
129
130
131
132
133static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
134 long delta)
135{
136 long ret = delta;
137
138 if (!spool)
139 return ret;
140
141 spin_lock(&spool->lock);
142
143 if (spool->max_hpages != -1) {
144 if ((spool->used_hpages + delta) <= spool->max_hpages)
145 spool->used_hpages += delta;
146 else {
147 ret = -ENOMEM;
148 goto unlock_ret;
149 }
150 }
151
152
153 if (spool->min_hpages != -1 && spool->rsv_hpages) {
154 if (delta > spool->rsv_hpages) {
155
156
157
158
159 ret = delta - spool->rsv_hpages;
160 spool->rsv_hpages = 0;
161 } else {
162 ret = 0;
163 spool->rsv_hpages -= delta;
164 }
165 }
166
167unlock_ret:
168 spin_unlock(&spool->lock);
169 return ret;
170}
171
172
173
174
175
176
177
178static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
179 long delta)
180{
181 long ret = delta;
182
183 if (!spool)
184 return delta;
185
186 spin_lock(&spool->lock);
187
188 if (spool->max_hpages != -1)
189 spool->used_hpages -= delta;
190
191
192 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
193 if (spool->rsv_hpages + delta <= spool->min_hpages)
194 ret = 0;
195 else
196 ret = spool->rsv_hpages + delta - spool->min_hpages;
197
198 spool->rsv_hpages += delta;
199 if (spool->rsv_hpages > spool->min_hpages)
200 spool->rsv_hpages = spool->min_hpages;
201 }
202
203
204
205
206
207 unlock_or_release_subpool(spool);
208
209 return ret;
210}
211
212static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
213{
214 return HUGETLBFS_SB(inode->i_sb)->spool;
215}
216
217static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
218{
219 return subpool_inode(file_inode(vma->vm_file));
220}
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241struct file_region {
242 struct list_head link;
243 long from;
244 long to;
245};
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261static long region_add(struct resv_map *resv, long f, long t)
262{
263 struct list_head *head = &resv->regions;
264 struct file_region *rg, *nrg, *trg;
265 long add = 0;
266
267 spin_lock(&resv->lock);
268
269 list_for_each_entry(rg, head, link)
270 if (f <= rg->to)
271 break;
272
273
274
275
276
277
278
279 if (&rg->link == head || t < rg->from) {
280 VM_BUG_ON(resv->region_cache_count <= 0);
281
282 resv->region_cache_count--;
283 nrg = list_first_entry(&resv->region_cache, struct file_region,
284 link);
285 list_del(&nrg->link);
286
287 nrg->from = f;
288 nrg->to = t;
289 list_add(&nrg->link, rg->link.prev);
290
291 add += t - f;
292 goto out_locked;
293 }
294
295
296 if (f > rg->from)
297 f = rg->from;
298
299
300 nrg = rg;
301 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
302 if (&rg->link == head)
303 break;
304 if (rg->from > t)
305 break;
306
307
308
309
310 if (rg->to > t)
311 t = rg->to;
312 if (rg != nrg) {
313
314
315
316
317 add -= (rg->to - rg->from);
318 list_del(&rg->link);
319 kfree(rg);
320 }
321 }
322
323 add += (nrg->from - f);
324 nrg->from = f;
325 add += t - nrg->to;
326 nrg->to = t;
327
328out_locked:
329 resv->adds_in_progress--;
330 spin_unlock(&resv->lock);
331 VM_BUG_ON(add < 0);
332 return add;
333}
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357static long region_chg(struct resv_map *resv, long f, long t)
358{
359 struct list_head *head = &resv->regions;
360 struct file_region *rg, *nrg = NULL;
361 long chg = 0;
362
363retry:
364 spin_lock(&resv->lock);
365retry_locked:
366 resv->adds_in_progress++;
367
368
369
370
371
372 if (resv->adds_in_progress > resv->region_cache_count) {
373 struct file_region *trg;
374
375 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
376
377 resv->adds_in_progress--;
378 spin_unlock(&resv->lock);
379
380 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
381 if (!trg) {
382 kfree(nrg);
383 return -ENOMEM;
384 }
385
386 spin_lock(&resv->lock);
387 list_add(&trg->link, &resv->region_cache);
388 resv->region_cache_count++;
389 goto retry_locked;
390 }
391
392
393 list_for_each_entry(rg, head, link)
394 if (f <= rg->to)
395 break;
396
397
398
399
400 if (&rg->link == head || t < rg->from) {
401 if (!nrg) {
402 resv->adds_in_progress--;
403 spin_unlock(&resv->lock);
404 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
405 if (!nrg)
406 return -ENOMEM;
407
408 nrg->from = f;
409 nrg->to = f;
410 INIT_LIST_HEAD(&nrg->link);
411 goto retry;
412 }
413
414 list_add(&nrg->link, rg->link.prev);
415 chg = t - f;
416 goto out_nrg;
417 }
418
419
420 if (f > rg->from)
421 f = rg->from;
422 chg = t - f;
423
424
425 list_for_each_entry(rg, rg->link.prev, link) {
426 if (&rg->link == head)
427 break;
428 if (rg->from > t)
429 goto out;
430
431
432
433
434 if (rg->to > t) {
435 chg += rg->to - t;
436 t = rg->to;
437 }
438 chg -= rg->to - rg->from;
439 }
440
441out:
442 spin_unlock(&resv->lock);
443
444 kfree(nrg);
445 return chg;
446out_nrg:
447 spin_unlock(&resv->lock);
448 return chg;
449}
450
451
452
453
454
455
456
457
458
459
460
461
462static void region_abort(struct resv_map *resv, long f, long t)
463{
464 spin_lock(&resv->lock);
465 VM_BUG_ON(!resv->region_cache_count);
466 resv->adds_in_progress--;
467 spin_unlock(&resv->lock);
468}
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484static long region_del(struct resv_map *resv, long f, long t)
485{
486 struct list_head *head = &resv->regions;
487 struct file_region *rg, *trg;
488 struct file_region *nrg = NULL;
489 long del = 0;
490
491retry:
492 spin_lock(&resv->lock);
493 list_for_each_entry_safe(rg, trg, head, link) {
494
495
496
497
498
499
500
501 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
502 continue;
503
504 if (rg->from >= t)
505 break;
506
507 if (f > rg->from && t < rg->to) {
508
509
510
511
512 if (!nrg &&
513 resv->region_cache_count > resv->adds_in_progress) {
514 nrg = list_first_entry(&resv->region_cache,
515 struct file_region,
516 link);
517 list_del(&nrg->link);
518 resv->region_cache_count--;
519 }
520
521 if (!nrg) {
522 spin_unlock(&resv->lock);
523 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
524 if (!nrg)
525 return -ENOMEM;
526 goto retry;
527 }
528
529 del += t - f;
530
531
532 nrg->from = t;
533 nrg->to = rg->to;
534 INIT_LIST_HEAD(&nrg->link);
535
536
537 rg->to = f;
538
539 list_add(&nrg->link, &rg->link);
540 nrg = NULL;
541 break;
542 }
543
544 if (f <= rg->from && t >= rg->to) {
545 del += rg->to - rg->from;
546 list_del(&rg->link);
547 kfree(rg);
548 continue;
549 }
550
551 if (f <= rg->from) {
552 del += t - rg->from;
553 rg->from = t;
554 } else {
555 del += rg->to - f;
556 rg->to = f;
557 }
558 }
559
560 spin_unlock(&resv->lock);
561 kfree(nrg);
562 return del;
563}
564
565
566
567
568
569
570
571
572
573
574void hugetlb_fix_reserve_counts(struct inode *inode)
575{
576 struct hugepage_subpool *spool = subpool_inode(inode);
577 long rsv_adjust;
578
579 rsv_adjust = hugepage_subpool_get_pages(spool, 1);
580 if (rsv_adjust) {
581 struct hstate *h = hstate_inode(inode);
582
583 hugetlb_acct_memory(h, 1);
584 }
585}
586
587
588
589
590
591static long region_count(struct resv_map *resv, long f, long t)
592{
593 struct list_head *head = &resv->regions;
594 struct file_region *rg;
595 long chg = 0;
596
597 spin_lock(&resv->lock);
598
599 list_for_each_entry(rg, head, link) {
600 long seg_from;
601 long seg_to;
602
603 if (rg->to <= f)
604 continue;
605 if (rg->from >= t)
606 break;
607
608 seg_from = max(rg->from, f);
609 seg_to = min(rg->to, t);
610
611 chg += seg_to - seg_from;
612 }
613 spin_unlock(&resv->lock);
614
615 return chg;
616}
617
618
619
620
621
622static pgoff_t vma_hugecache_offset(struct hstate *h,
623 struct vm_area_struct *vma, unsigned long address)
624{
625 return ((address - vma->vm_start) >> huge_page_shift(h)) +
626 (vma->vm_pgoff >> huge_page_order(h));
627}
628
629pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
630 unsigned long address)
631{
632 return vma_hugecache_offset(hstate_vma(vma), vma, address);
633}
634EXPORT_SYMBOL_GPL(linear_hugepage_index);
635
636
637
638
639
640unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
641{
642 if (vma->vm_ops && vma->vm_ops->pagesize)
643 return vma->vm_ops->pagesize(vma);
644 return PAGE_SIZE;
645}
646EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
647
648
649
650
651
652
653
654__weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
655{
656 return vma_kernel_pagesize(vma);
657}
658
659
660
661
662
663
664#define HPAGE_RESV_OWNER (1UL << 0)
665#define HPAGE_RESV_UNMAPPED (1UL << 1)
666#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687static unsigned long get_vma_private_data(struct vm_area_struct *vma)
688{
689 return (unsigned long)vma->vm_private_data;
690}
691
692static void set_vma_private_data(struct vm_area_struct *vma,
693 unsigned long value)
694{
695 vma->vm_private_data = (void *)value;
696}
697
698struct resv_map *resv_map_alloc(void)
699{
700 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
701 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
702
703 if (!resv_map || !rg) {
704 kfree(resv_map);
705 kfree(rg);
706 return NULL;
707 }
708
709 kref_init(&resv_map->refs);
710 spin_lock_init(&resv_map->lock);
711 INIT_LIST_HEAD(&resv_map->regions);
712
713 resv_map->adds_in_progress = 0;
714
715 INIT_LIST_HEAD(&resv_map->region_cache);
716 list_add(&rg->link, &resv_map->region_cache);
717 resv_map->region_cache_count = 1;
718
719 return resv_map;
720}
721
722void resv_map_release(struct kref *ref)
723{
724 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
725 struct list_head *head = &resv_map->region_cache;
726 struct file_region *rg, *trg;
727
728
729 region_del(resv_map, 0, LONG_MAX);
730
731
732 list_for_each_entry_safe(rg, trg, head, link) {
733 list_del(&rg->link);
734 kfree(rg);
735 }
736
737 VM_BUG_ON(resv_map->adds_in_progress);
738
739 kfree(resv_map);
740}
741
742static inline struct resv_map *inode_resv_map(struct inode *inode)
743{
744
745
746
747
748
749
750
751
752 return (struct resv_map *)(&inode->i_data)->private_data;
753}
754
755static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
756{
757 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
758 if (vma->vm_flags & VM_MAYSHARE) {
759 struct address_space *mapping = vma->vm_file->f_mapping;
760 struct inode *inode = mapping->host;
761
762 return inode_resv_map(inode);
763
764 } else {
765 return (struct resv_map *)(get_vma_private_data(vma) &
766 ~HPAGE_RESV_MASK);
767 }
768}
769
770static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
771{
772 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
773 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
774
775 set_vma_private_data(vma, (get_vma_private_data(vma) &
776 HPAGE_RESV_MASK) | (unsigned long)map);
777}
778
779static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
780{
781 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
782 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
783
784 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
785}
786
787static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
788{
789 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
790
791 return (get_vma_private_data(vma) & flag) != 0;
792}
793
794
795void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
796{
797 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
798 if (!(vma->vm_flags & VM_MAYSHARE))
799 vma->vm_private_data = (void *)0;
800}
801
802
803static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
804{
805 if (vma->vm_flags & VM_NORESERVE) {
806
807
808
809
810
811
812
813
814
815 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
816 return true;
817 else
818 return false;
819 }
820
821
822 if (vma->vm_flags & VM_MAYSHARE) {
823
824
825
826
827
828
829
830 if (chg)
831 return false;
832 else
833 return true;
834 }
835
836
837
838
839
840 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856 if (chg)
857 return false;
858 else
859 return true;
860 }
861
862 return false;
863}
864
865static void enqueue_huge_page(struct hstate *h, struct page *page)
866{
867 int nid = page_to_nid(page);
868 list_move(&page->lru, &h->hugepage_freelists[nid]);
869 h->free_huge_pages++;
870 h->free_huge_pages_node[nid]++;
871}
872
873static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
874{
875 struct page *page;
876
877 list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
878 if (!PageHWPoison(page))
879 break;
880
881
882
883
884 if (&h->hugepage_freelists[nid] == &page->lru)
885 return NULL;
886 list_move(&page->lru, &h->hugepage_activelist);
887 set_page_refcounted(page);
888 h->free_huge_pages--;
889 h->free_huge_pages_node[nid]--;
890 return page;
891}
892
893static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
894 nodemask_t *nmask)
895{
896 unsigned int cpuset_mems_cookie;
897 struct zonelist *zonelist;
898 struct zone *zone;
899 struct zoneref *z;
900 int node = NUMA_NO_NODE;
901
902 zonelist = node_zonelist(nid, gfp_mask);
903
904retry_cpuset:
905 cpuset_mems_cookie = read_mems_allowed_begin();
906 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
907 struct page *page;
908
909 if (!cpuset_zone_allowed(zone, gfp_mask))
910 continue;
911
912
913
914
915 if (zone_to_nid(zone) == node)
916 continue;
917 node = zone_to_nid(zone);
918
919 page = dequeue_huge_page_node_exact(h, node);
920 if (page)
921 return page;
922 }
923 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
924 goto retry_cpuset;
925
926 return NULL;
927}
928
929
930static inline gfp_t htlb_alloc_mask(struct hstate *h)
931{
932 if (hugepage_movable_supported(h))
933 return GFP_HIGHUSER_MOVABLE;
934 else
935 return GFP_HIGHUSER;
936}
937
938static struct page *dequeue_huge_page_vma(struct hstate *h,
939 struct vm_area_struct *vma,
940 unsigned long address, int avoid_reserve,
941 long chg)
942{
943 struct page *page;
944 struct mempolicy *mpol;
945 gfp_t gfp_mask;
946 nodemask_t *nodemask;
947 int nid;
948
949
950
951
952
953
954 if (!vma_has_reserves(vma, chg) &&
955 h->free_huge_pages - h->resv_huge_pages == 0)
956 goto err;
957
958
959 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
960 goto err;
961
962 gfp_mask = htlb_alloc_mask(h);
963 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
964 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
965 if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
966 SetPagePrivate(page);
967 h->resv_huge_pages--;
968 }
969
970 mpol_cond_put(mpol);
971 return page;
972
973err:
974 return NULL;
975}
976
977
978
979
980
981
982
983
984static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
985{
986 nid = next_node_in(nid, *nodes_allowed);
987 VM_BUG_ON(nid >= MAX_NUMNODES);
988
989 return nid;
990}
991
992static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
993{
994 if (!node_isset(nid, *nodes_allowed))
995 nid = next_node_allowed(nid, nodes_allowed);
996 return nid;
997}
998
999
1000
1001
1002
1003
1004
1005static int hstate_next_node_to_alloc(struct hstate *h,
1006 nodemask_t *nodes_allowed)
1007{
1008 int nid;
1009
1010 VM_BUG_ON(!nodes_allowed);
1011
1012 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1013 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1014
1015 return nid;
1016}
1017
1018
1019
1020
1021
1022
1023
1024static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1025{
1026 int nid;
1027
1028 VM_BUG_ON(!nodes_allowed);
1029
1030 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1031 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1032
1033 return nid;
1034}
1035
1036#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
1037 for (nr_nodes = nodes_weight(*mask); \
1038 nr_nodes > 0 && \
1039 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
1040 nr_nodes--)
1041
1042#define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
1043 for (nr_nodes = nodes_weight(*mask); \
1044 nr_nodes > 0 && \
1045 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
1046 nr_nodes--)
1047
1048#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1049static void destroy_compound_gigantic_page(struct page *page,
1050 unsigned int order)
1051{
1052 int i;
1053 int nr_pages = 1 << order;
1054 struct page *p = page + 1;
1055
1056 atomic_set(compound_mapcount_ptr(page), 0);
1057 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1058 clear_compound_head(p);
1059 set_page_refcounted(p);
1060 }
1061
1062 set_compound_order(page, 0);
1063 __ClearPageHead(page);
1064}
1065
1066static void free_gigantic_page(struct page *page, unsigned int order)
1067{
1068 free_contig_range(page_to_pfn(page), 1 << order);
1069}
1070
1071#ifdef CONFIG_CONTIG_ALLOC
1072static int __alloc_gigantic_page(unsigned long start_pfn,
1073 unsigned long nr_pages, gfp_t gfp_mask)
1074{
1075 unsigned long end_pfn = start_pfn + nr_pages;
1076 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
1077 gfp_mask);
1078}
1079
1080static bool pfn_range_valid_gigantic(struct zone *z,
1081 unsigned long start_pfn, unsigned long nr_pages)
1082{
1083 unsigned long i, end_pfn = start_pfn + nr_pages;
1084 struct page *page;
1085
1086 for (i = start_pfn; i < end_pfn; i++) {
1087 if (!pfn_valid(i))
1088 return false;
1089
1090 page = pfn_to_page(i);
1091
1092 if (page_zone(page) != z)
1093 return false;
1094
1095 if (PageReserved(page))
1096 return false;
1097
1098 if (page_count(page) > 0)
1099 return false;
1100
1101 if (PageHuge(page))
1102 return false;
1103 }
1104
1105 return true;
1106}
1107
1108static bool zone_spans_last_pfn(const struct zone *zone,
1109 unsigned long start_pfn, unsigned long nr_pages)
1110{
1111 unsigned long last_pfn = start_pfn + nr_pages - 1;
1112 return zone_spans_pfn(zone, last_pfn);
1113}
1114
1115static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1116 int nid, nodemask_t *nodemask)
1117{
1118 unsigned int order = huge_page_order(h);
1119 unsigned long nr_pages = 1 << order;
1120 unsigned long ret, pfn, flags;
1121 struct zonelist *zonelist;
1122 struct zone *zone;
1123 struct zoneref *z;
1124
1125 zonelist = node_zonelist(nid, gfp_mask);
1126 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) {
1127 spin_lock_irqsave(&zone->lock, flags);
1128
1129 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
1130 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
1131 if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) {
1132
1133
1134
1135
1136
1137
1138
1139 spin_unlock_irqrestore(&zone->lock, flags);
1140 ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask);
1141 if (!ret)
1142 return pfn_to_page(pfn);
1143 spin_lock_irqsave(&zone->lock, flags);
1144 }
1145 pfn += nr_pages;
1146 }
1147
1148 spin_unlock_irqrestore(&zone->lock, flags);
1149 }
1150
1151 return NULL;
1152}
1153
1154static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1155static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1156#else
1157static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1158 int nid, nodemask_t *nodemask)
1159{
1160 return NULL;
1161}
1162#endif
1163
1164#else
1165static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1166 int nid, nodemask_t *nodemask)
1167{
1168 return NULL;
1169}
1170static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1171static inline void destroy_compound_gigantic_page(struct page *page,
1172 unsigned int order) { }
1173#endif
1174
1175static void update_and_free_page(struct hstate *h, struct page *page)
1176{
1177 int i;
1178
1179 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1180 return;
1181
1182 h->nr_huge_pages--;
1183 h->nr_huge_pages_node[page_to_nid(page)]--;
1184 for (i = 0; i < pages_per_huge_page(h); i++) {
1185 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1186 1 << PG_referenced | 1 << PG_dirty |
1187 1 << PG_active | 1 << PG_private |
1188 1 << PG_writeback);
1189 }
1190 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1191 set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1192 set_page_refcounted(page);
1193 if (hstate_is_gigantic(h)) {
1194 destroy_compound_gigantic_page(page, huge_page_order(h));
1195 free_gigantic_page(page, huge_page_order(h));
1196 } else {
1197 __free_pages(page, huge_page_order(h));
1198 }
1199}
1200
1201struct hstate *size_to_hstate(unsigned long size)
1202{
1203 struct hstate *h;
1204
1205 for_each_hstate(h) {
1206 if (huge_page_size(h) == size)
1207 return h;
1208 }
1209 return NULL;
1210}
1211
1212
1213
1214
1215
1216
1217
1218bool page_huge_active(struct page *page)
1219{
1220 VM_BUG_ON_PAGE(!PageHuge(page), page);
1221 return PageHead(page) && PagePrivate(&page[1]);
1222}
1223
1224
1225static void set_page_huge_active(struct page *page)
1226{
1227 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1228 SetPagePrivate(&page[1]);
1229}
1230
1231static void clear_page_huge_active(struct page *page)
1232{
1233 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1234 ClearPagePrivate(&page[1]);
1235}
1236
1237
1238
1239
1240
1241static inline bool PageHugeTemporary(struct page *page)
1242{
1243 if (!PageHuge(page))
1244 return false;
1245
1246 return (unsigned long)page[2].mapping == -1U;
1247}
1248
1249static inline void SetPageHugeTemporary(struct page *page)
1250{
1251 page[2].mapping = (void *)-1U;
1252}
1253
1254static inline void ClearPageHugeTemporary(struct page *page)
1255{
1256 page[2].mapping = NULL;
1257}
1258
1259void free_huge_page(struct page *page)
1260{
1261
1262
1263
1264
1265 struct hstate *h = page_hstate(page);
1266 int nid = page_to_nid(page);
1267 struct hugepage_subpool *spool =
1268 (struct hugepage_subpool *)page_private(page);
1269 bool restore_reserve;
1270
1271 VM_BUG_ON_PAGE(page_count(page), page);
1272 VM_BUG_ON_PAGE(page_mapcount(page), page);
1273
1274 set_page_private(page, 0);
1275 page->mapping = NULL;
1276 restore_reserve = PagePrivate(page);
1277 ClearPagePrivate(page);
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287 if (!restore_reserve) {
1288
1289
1290
1291
1292
1293
1294 if (hugepage_subpool_put_pages(spool, 1) == 0)
1295 restore_reserve = true;
1296 }
1297
1298 spin_lock(&hugetlb_lock);
1299 clear_page_huge_active(page);
1300 hugetlb_cgroup_uncharge_page(hstate_index(h),
1301 pages_per_huge_page(h), page);
1302 if (restore_reserve)
1303 h->resv_huge_pages++;
1304
1305 if (PageHugeTemporary(page)) {
1306 list_del(&page->lru);
1307 ClearPageHugeTemporary(page);
1308 update_and_free_page(h, page);
1309 } else if (h->surplus_huge_pages_node[nid]) {
1310
1311 list_del(&page->lru);
1312 update_and_free_page(h, page);
1313 h->surplus_huge_pages--;
1314 h->surplus_huge_pages_node[nid]--;
1315 } else {
1316 arch_clear_hugepage_flags(page);
1317 enqueue_huge_page(h, page);
1318 }
1319 spin_unlock(&hugetlb_lock);
1320}
1321
1322static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1323{
1324 INIT_LIST_HEAD(&page->lru);
1325 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1326 spin_lock(&hugetlb_lock);
1327 set_hugetlb_cgroup(page, NULL);
1328 h->nr_huge_pages++;
1329 h->nr_huge_pages_node[nid]++;
1330 spin_unlock(&hugetlb_lock);
1331}
1332
1333static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1334{
1335 int i;
1336 int nr_pages = 1 << order;
1337 struct page *p = page + 1;
1338
1339
1340 set_compound_order(page, order);
1341 __ClearPageReserved(page);
1342 __SetPageHead(page);
1343 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356 __ClearPageReserved(p);
1357 set_page_count(p, 0);
1358 set_compound_head(p, page);
1359 }
1360 atomic_set(compound_mapcount_ptr(page), -1);
1361}
1362
1363
1364
1365
1366
1367
1368int PageHuge(struct page *page)
1369{
1370 if (!PageCompound(page))
1371 return 0;
1372
1373 page = compound_head(page);
1374 return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1375}
1376EXPORT_SYMBOL_GPL(PageHuge);
1377
1378
1379
1380
1381
1382int PageHeadHuge(struct page *page_head)
1383{
1384 if (!PageHead(page_head))
1385 return 0;
1386
1387 return get_compound_page_dtor(page_head) == free_huge_page;
1388}
1389
1390pgoff_t __basepage_index(struct page *page)
1391{
1392 struct page *page_head = compound_head(page);
1393 pgoff_t index = page_index(page_head);
1394 unsigned long compound_idx;
1395
1396 if (!PageHuge(page_head))
1397 return page_index(page);
1398
1399 if (compound_order(page_head) >= MAX_ORDER)
1400 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1401 else
1402 compound_idx = page - page_head;
1403
1404 return (index << compound_order(page_head)) + compound_idx;
1405}
1406
1407static struct page *alloc_buddy_huge_page(struct hstate *h,
1408 gfp_t gfp_mask, int nid, nodemask_t *nmask)
1409{
1410 int order = huge_page_order(h);
1411 struct page *page;
1412
1413 gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
1414 if (nid == NUMA_NO_NODE)
1415 nid = numa_mem_id();
1416 page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
1417 if (page)
1418 __count_vm_event(HTLB_BUDDY_PGALLOC);
1419 else
1420 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1421
1422 return page;
1423}
1424
1425
1426
1427
1428
1429static struct page *alloc_fresh_huge_page(struct hstate *h,
1430 gfp_t gfp_mask, int nid, nodemask_t *nmask)
1431{
1432 struct page *page;
1433
1434 if (hstate_is_gigantic(h))
1435 page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
1436 else
1437 page = alloc_buddy_huge_page(h, gfp_mask,
1438 nid, nmask);
1439 if (!page)
1440 return NULL;
1441
1442 if (hstate_is_gigantic(h))
1443 prep_compound_gigantic_page(page, huge_page_order(h));
1444 prep_new_huge_page(h, page, page_to_nid(page));
1445
1446 return page;
1447}
1448
1449
1450
1451
1452
1453static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1454{
1455 struct page *page;
1456 int nr_nodes, node;
1457 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1458
1459 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1460 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed);
1461 if (page)
1462 break;
1463 }
1464
1465 if (!page)
1466 return 0;
1467
1468 put_page(page);
1469
1470 return 1;
1471}
1472
1473
1474
1475
1476
1477
1478
1479static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1480 bool acct_surplus)
1481{
1482 int nr_nodes, node;
1483 int ret = 0;
1484
1485 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1486
1487
1488
1489
1490 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1491 !list_empty(&h->hugepage_freelists[node])) {
1492 struct page *page =
1493 list_entry(h->hugepage_freelists[node].next,
1494 struct page, lru);
1495 list_del(&page->lru);
1496 h->free_huge_pages--;
1497 h->free_huge_pages_node[node]--;
1498 if (acct_surplus) {
1499 h->surplus_huge_pages--;
1500 h->surplus_huge_pages_node[node]--;
1501 }
1502 update_and_free_page(h, page);
1503 ret = 1;
1504 break;
1505 }
1506 }
1507
1508 return ret;
1509}
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521int dissolve_free_huge_page(struct page *page)
1522{
1523 int rc = -EBUSY;
1524
1525
1526 if (!PageHuge(page))
1527 return 0;
1528
1529 spin_lock(&hugetlb_lock);
1530 if (!PageHuge(page)) {
1531 rc = 0;
1532 goto out;
1533 }
1534
1535 if (!page_count(page)) {
1536 struct page *head = compound_head(page);
1537 struct hstate *h = page_hstate(head);
1538 int nid = page_to_nid(head);
1539 if (h->free_huge_pages - h->resv_huge_pages == 0)
1540 goto out;
1541
1542
1543
1544
1545 if (PageHWPoison(head) && page != head) {
1546 SetPageHWPoison(page);
1547 ClearPageHWPoison(head);
1548 }
1549 list_del(&head->lru);
1550 h->free_huge_pages--;
1551 h->free_huge_pages_node[nid]--;
1552 h->max_huge_pages--;
1553 update_and_free_page(h, head);
1554 rc = 0;
1555 }
1556out:
1557 spin_unlock(&hugetlb_lock);
1558 return rc;
1559}
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1570{
1571 unsigned long pfn;
1572 struct page *page;
1573 int rc = 0;
1574
1575 if (!hugepages_supported())
1576 return rc;
1577
1578 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1579 page = pfn_to_page(pfn);
1580 rc = dissolve_free_huge_page(page);
1581 if (rc)
1582 break;
1583 }
1584
1585 return rc;
1586}
1587
1588
1589
1590
1591static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
1592 int nid, nodemask_t *nmask)
1593{
1594 struct page *page = NULL;
1595
1596 if (hstate_is_gigantic(h))
1597 return NULL;
1598
1599 spin_lock(&hugetlb_lock);
1600 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
1601 goto out_unlock;
1602 spin_unlock(&hugetlb_lock);
1603
1604 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
1605 if (!page)
1606 return NULL;
1607
1608 spin_lock(&hugetlb_lock);
1609
1610
1611
1612
1613
1614
1615
1616 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1617 SetPageHugeTemporary(page);
1618 spin_unlock(&hugetlb_lock);
1619 put_page(page);
1620 return NULL;
1621 } else {
1622 h->surplus_huge_pages++;
1623 h->surplus_huge_pages_node[page_to_nid(page)]++;
1624 }
1625
1626out_unlock:
1627 spin_unlock(&hugetlb_lock);
1628
1629 return page;
1630}
1631
1632struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
1633 int nid, nodemask_t *nmask)
1634{
1635 struct page *page;
1636
1637 if (hstate_is_gigantic(h))
1638 return NULL;
1639
1640 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
1641 if (!page)
1642 return NULL;
1643
1644
1645
1646
1647
1648 SetPageHugeTemporary(page);
1649
1650 return page;
1651}
1652
1653
1654
1655
1656static
1657struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
1658 struct vm_area_struct *vma, unsigned long addr)
1659{
1660 struct page *page;
1661 struct mempolicy *mpol;
1662 gfp_t gfp_mask = htlb_alloc_mask(h);
1663 int nid;
1664 nodemask_t *nodemask;
1665
1666 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
1667 page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
1668 mpol_cond_put(mpol);
1669
1670 return page;
1671}
1672
1673
1674struct page *alloc_huge_page_node(struct hstate *h, int nid)
1675{
1676 gfp_t gfp_mask = htlb_alloc_mask(h);
1677 struct page *page = NULL;
1678
1679 if (nid != NUMA_NO_NODE)
1680 gfp_mask |= __GFP_THISNODE;
1681
1682 spin_lock(&hugetlb_lock);
1683 if (h->free_huge_pages - h->resv_huge_pages > 0)
1684 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL);
1685 spin_unlock(&hugetlb_lock);
1686
1687 if (!page)
1688 page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
1689
1690 return page;
1691}
1692
1693
1694struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
1695 nodemask_t *nmask)
1696{
1697 gfp_t gfp_mask = htlb_alloc_mask(h);
1698
1699 spin_lock(&hugetlb_lock);
1700 if (h->free_huge_pages - h->resv_huge_pages > 0) {
1701 struct page *page;
1702
1703 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
1704 if (page) {
1705 spin_unlock(&hugetlb_lock);
1706 return page;
1707 }
1708 }
1709 spin_unlock(&hugetlb_lock);
1710
1711 return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
1712}
1713
1714
1715struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
1716 unsigned long address)
1717{
1718 struct mempolicy *mpol;
1719 nodemask_t *nodemask;
1720 struct page *page;
1721 gfp_t gfp_mask;
1722 int node;
1723
1724 gfp_mask = htlb_alloc_mask(h);
1725 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1726 page = alloc_huge_page_nodemask(h, node, nodemask);
1727 mpol_cond_put(mpol);
1728
1729 return page;
1730}
1731
1732
1733
1734
1735
1736static int gather_surplus_pages(struct hstate *h, int delta)
1737{
1738 struct list_head surplus_list;
1739 struct page *page, *tmp;
1740 int ret, i;
1741 int needed, allocated;
1742 bool alloc_ok = true;
1743
1744 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1745 if (needed <= 0) {
1746 h->resv_huge_pages += delta;
1747 return 0;
1748 }
1749
1750 allocated = 0;
1751 INIT_LIST_HEAD(&surplus_list);
1752
1753 ret = -ENOMEM;
1754retry:
1755 spin_unlock(&hugetlb_lock);
1756 for (i = 0; i < needed; i++) {
1757 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
1758 NUMA_NO_NODE, NULL);
1759 if (!page) {
1760 alloc_ok = false;
1761 break;
1762 }
1763 list_add(&page->lru, &surplus_list);
1764 cond_resched();
1765 }
1766 allocated += i;
1767
1768
1769
1770
1771
1772 spin_lock(&hugetlb_lock);
1773 needed = (h->resv_huge_pages + delta) -
1774 (h->free_huge_pages + allocated);
1775 if (needed > 0) {
1776 if (alloc_ok)
1777 goto retry;
1778
1779
1780
1781
1782
1783 goto free;
1784 }
1785
1786
1787
1788
1789
1790
1791
1792
1793 needed += allocated;
1794 h->resv_huge_pages += delta;
1795 ret = 0;
1796
1797
1798 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1799 if ((--needed) < 0)
1800 break;
1801
1802
1803
1804
1805 put_page_testzero(page);
1806 VM_BUG_ON_PAGE(page_count(page), page);
1807 enqueue_huge_page(h, page);
1808 }
1809free:
1810 spin_unlock(&hugetlb_lock);
1811
1812
1813 list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1814 put_page(page);
1815 spin_lock(&hugetlb_lock);
1816
1817 return ret;
1818}
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834static void return_unused_surplus_pages(struct hstate *h,
1835 unsigned long unused_resv_pages)
1836{
1837 unsigned long nr_pages;
1838
1839
1840 if (hstate_is_gigantic(h))
1841 goto out;
1842
1843
1844
1845
1846
1847 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861 while (nr_pages--) {
1862 h->resv_huge_pages--;
1863 unused_resv_pages--;
1864 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1865 goto out;
1866 cond_resched_lock(&hugetlb_lock);
1867 }
1868
1869out:
1870
1871 h->resv_huge_pages -= unused_resv_pages;
1872}
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899enum vma_resv_mode {
1900 VMA_NEEDS_RESV,
1901 VMA_COMMIT_RESV,
1902 VMA_END_RESV,
1903 VMA_ADD_RESV,
1904};
1905static long __vma_reservation_common(struct hstate *h,
1906 struct vm_area_struct *vma, unsigned long addr,
1907 enum vma_resv_mode mode)
1908{
1909 struct resv_map *resv;
1910 pgoff_t idx;
1911 long ret;
1912
1913 resv = vma_resv_map(vma);
1914 if (!resv)
1915 return 1;
1916
1917 idx = vma_hugecache_offset(h, vma, addr);
1918 switch (mode) {
1919 case VMA_NEEDS_RESV:
1920 ret = region_chg(resv, idx, idx + 1);
1921 break;
1922 case VMA_COMMIT_RESV:
1923 ret = region_add(resv, idx, idx + 1);
1924 break;
1925 case VMA_END_RESV:
1926 region_abort(resv, idx, idx + 1);
1927 ret = 0;
1928 break;
1929 case VMA_ADD_RESV:
1930 if (vma->vm_flags & VM_MAYSHARE)
1931 ret = region_add(resv, idx, idx + 1);
1932 else {
1933 region_abort(resv, idx, idx + 1);
1934 ret = region_del(resv, idx, idx + 1);
1935 }
1936 break;
1937 default:
1938 BUG();
1939 }
1940
1941 if (vma->vm_flags & VM_MAYSHARE)
1942 return ret;
1943 else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957 if (ret)
1958 return 0;
1959 else
1960 return 1;
1961 }
1962 else
1963 return ret < 0 ? ret : 0;
1964}
1965
1966static long vma_needs_reservation(struct hstate *h,
1967 struct vm_area_struct *vma, unsigned long addr)
1968{
1969 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1970}
1971
1972static long vma_commit_reservation(struct hstate *h,
1973 struct vm_area_struct *vma, unsigned long addr)
1974{
1975 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1976}
1977
1978static void vma_end_reservation(struct hstate *h,
1979 struct vm_area_struct *vma, unsigned long addr)
1980{
1981 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1982}
1983
1984static long vma_add_reservation(struct hstate *h,
1985 struct vm_area_struct *vma, unsigned long addr)
1986{
1987 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
1988}
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001static void restore_reserve_on_error(struct hstate *h,
2002 struct vm_area_struct *vma, unsigned long address,
2003 struct page *page)
2004{
2005 if (unlikely(PagePrivate(page))) {
2006 long rc = vma_needs_reservation(h, vma, address);
2007
2008 if (unlikely(rc < 0)) {
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020 ClearPagePrivate(page);
2021 } else if (rc) {
2022 rc = vma_add_reservation(h, vma, address);
2023 if (unlikely(rc < 0))
2024
2025
2026
2027
2028 ClearPagePrivate(page);
2029 } else
2030 vma_end_reservation(h, vma, address);
2031 }
2032}
2033
2034struct page *alloc_huge_page(struct vm_area_struct *vma,
2035 unsigned long addr, int avoid_reserve)
2036{
2037 struct hugepage_subpool *spool = subpool_vma(vma);
2038 struct hstate *h = hstate_vma(vma);
2039 struct page *page;
2040 long map_chg, map_commit;
2041 long gbl_chg;
2042 int ret, idx;
2043 struct hugetlb_cgroup *h_cg;
2044
2045 idx = hstate_index(h);
2046
2047
2048
2049
2050
2051 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2052 if (map_chg < 0)
2053 return ERR_PTR(-ENOMEM);
2054
2055
2056
2057
2058
2059
2060
2061
2062 if (map_chg || avoid_reserve) {
2063 gbl_chg = hugepage_subpool_get_pages(spool, 1);
2064 if (gbl_chg < 0) {
2065 vma_end_reservation(h, vma, addr);
2066 return ERR_PTR(-ENOSPC);
2067 }
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077 if (avoid_reserve)
2078 gbl_chg = 1;
2079 }
2080
2081 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2082 if (ret)
2083 goto out_subpool_put;
2084
2085 spin_lock(&hugetlb_lock);
2086
2087
2088
2089
2090
2091 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2092 if (!page) {
2093 spin_unlock(&hugetlb_lock);
2094 page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
2095 if (!page)
2096 goto out_uncharge_cgroup;
2097 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2098 SetPagePrivate(page);
2099 h->resv_huge_pages--;
2100 }
2101 spin_lock(&hugetlb_lock);
2102 list_move(&page->lru, &h->hugepage_activelist);
2103
2104 }
2105 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2106 spin_unlock(&hugetlb_lock);
2107
2108 set_page_private(page, (unsigned long)spool);
2109
2110 map_commit = vma_commit_reservation(h, vma, addr);
2111 if (unlikely(map_chg > map_commit)) {
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121 long rsv_adjust;
2122
2123 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2124 hugetlb_acct_memory(h, -rsv_adjust);
2125 }
2126 return page;
2127
2128out_uncharge_cgroup:
2129 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2130out_subpool_put:
2131 if (map_chg || avoid_reserve)
2132 hugepage_subpool_put_pages(spool, 1);
2133 vma_end_reservation(h, vma, addr);
2134 return ERR_PTR(-ENOSPC);
2135}
2136
2137int alloc_bootmem_huge_page(struct hstate *h)
2138 __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
2139int __alloc_bootmem_huge_page(struct hstate *h)
2140{
2141 struct huge_bootmem_page *m;
2142 int nr_nodes, node;
2143
2144 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2145 void *addr;
2146
2147 addr = memblock_alloc_try_nid_raw(
2148 huge_page_size(h), huge_page_size(h),
2149 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
2150 if (addr) {
2151
2152
2153
2154
2155
2156 m = addr;
2157 goto found;
2158 }
2159 }
2160 return 0;
2161
2162found:
2163 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2164
2165 INIT_LIST_HEAD(&m->list);
2166 list_add(&m->list, &huge_boot_pages);
2167 m->hstate = h;
2168 return 1;
2169}
2170
2171static void __init prep_compound_huge_page(struct page *page,
2172 unsigned int order)
2173{
2174 if (unlikely(order > (MAX_ORDER - 1)))
2175 prep_compound_gigantic_page(page, order);
2176 else
2177 prep_compound_page(page, order);
2178}
2179
2180
2181static void __init gather_bootmem_prealloc(void)
2182{
2183 struct huge_bootmem_page *m;
2184
2185 list_for_each_entry(m, &huge_boot_pages, list) {
2186 struct page *page = virt_to_page(m);
2187 struct hstate *h = m->hstate;
2188
2189 WARN_ON(page_count(page) != 1);
2190 prep_compound_huge_page(page, h->order);
2191 WARN_ON(PageReserved(page));
2192 prep_new_huge_page(h, page, page_to_nid(page));
2193 put_page(page);
2194
2195
2196
2197
2198
2199
2200
2201 if (hstate_is_gigantic(h))
2202 adjust_managed_page_count(page, 1 << h->order);
2203 cond_resched();
2204 }
2205}
2206
2207static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2208{
2209 unsigned long i;
2210
2211 for (i = 0; i < h->max_huge_pages; ++i) {
2212 if (hstate_is_gigantic(h)) {
2213 if (!alloc_bootmem_huge_page(h))
2214 break;
2215 } else if (!alloc_pool_huge_page(h,
2216 &node_states[N_MEMORY]))
2217 break;
2218 cond_resched();
2219 }
2220 if (i < h->max_huge_pages) {
2221 char buf[32];
2222
2223 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2224 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n",
2225 h->max_huge_pages, buf, i);
2226 h->max_huge_pages = i;
2227 }
2228}
2229
2230static void __init hugetlb_init_hstates(void)
2231{
2232 struct hstate *h;
2233
2234 for_each_hstate(h) {
2235 if (minimum_order > huge_page_order(h))
2236 minimum_order = huge_page_order(h);
2237
2238
2239 if (!hstate_is_gigantic(h))
2240 hugetlb_hstate_alloc_pages(h);
2241 }
2242 VM_BUG_ON(minimum_order == UINT_MAX);
2243}
2244
2245static void __init report_hugepages(void)
2246{
2247 struct hstate *h;
2248
2249 for_each_hstate(h) {
2250 char buf[32];
2251
2252 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2253 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2254 buf, h->free_huge_pages);
2255 }
2256}
2257
2258#ifdef CONFIG_HIGHMEM
2259static void try_to_free_low(struct hstate *h, unsigned long count,
2260 nodemask_t *nodes_allowed)
2261{
2262 int i;
2263
2264 if (hstate_is_gigantic(h))
2265 return;
2266
2267 for_each_node_mask(i, *nodes_allowed) {
2268 struct page *page, *next;
2269 struct list_head *freel = &h->hugepage_freelists[i];
2270 list_for_each_entry_safe(page, next, freel, lru) {
2271 if (count >= h->nr_huge_pages)
2272 return;
2273 if (PageHighMem(page))
2274 continue;
2275 list_del(&page->lru);
2276 update_and_free_page(h, page);
2277 h->free_huge_pages--;
2278 h->free_huge_pages_node[page_to_nid(page)]--;
2279 }
2280 }
2281}
2282#else
2283static inline void try_to_free_low(struct hstate *h, unsigned long count,
2284 nodemask_t *nodes_allowed)
2285{
2286}
2287#endif
2288
2289
2290
2291
2292
2293
2294static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2295 int delta)
2296{
2297 int nr_nodes, node;
2298
2299 VM_BUG_ON(delta != -1 && delta != 1);
2300
2301 if (delta < 0) {
2302 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2303 if (h->surplus_huge_pages_node[node])
2304 goto found;
2305 }
2306 } else {
2307 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2308 if (h->surplus_huge_pages_node[node] <
2309 h->nr_huge_pages_node[node])
2310 goto found;
2311 }
2312 }
2313 return 0;
2314
2315found:
2316 h->surplus_huge_pages += delta;
2317 h->surplus_huge_pages_node[node] += delta;
2318 return 1;
2319}
2320
2321#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2322static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
2323 nodemask_t *nodes_allowed)
2324{
2325 unsigned long min_count, ret;
2326
2327 spin_lock(&hugetlb_lock);
2328
2329
2330
2331
2332
2333
2334
2335 if (nid != NUMA_NO_NODE) {
2336 unsigned long old_count = count;
2337
2338 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2339
2340
2341
2342
2343
2344
2345 if (count < old_count)
2346 count = ULONG_MAX;
2347 }
2348
2349
2350
2351
2352
2353
2354
2355
2356 if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
2357 if (count > persistent_huge_pages(h)) {
2358 spin_unlock(&hugetlb_lock);
2359 return -EINVAL;
2360 }
2361
2362 }
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2376 if (!adjust_pool_surplus(h, nodes_allowed, -1))
2377 break;
2378 }
2379
2380 while (count > persistent_huge_pages(h)) {
2381
2382
2383
2384
2385
2386 spin_unlock(&hugetlb_lock);
2387
2388
2389 cond_resched();
2390
2391 ret = alloc_pool_huge_page(h, nodes_allowed);
2392 spin_lock(&hugetlb_lock);
2393 if (!ret)
2394 goto out;
2395
2396
2397 if (signal_pending(current))
2398 goto out;
2399 }
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2417 min_count = max(count, min_count);
2418 try_to_free_low(h, min_count, nodes_allowed);
2419 while (min_count < persistent_huge_pages(h)) {
2420 if (!free_pool_huge_page(h, nodes_allowed, 0))
2421 break;
2422 cond_resched_lock(&hugetlb_lock);
2423 }
2424 while (count < persistent_huge_pages(h)) {
2425 if (!adjust_pool_surplus(h, nodes_allowed, 1))
2426 break;
2427 }
2428out:
2429 h->max_huge_pages = persistent_huge_pages(h);
2430 spin_unlock(&hugetlb_lock);
2431
2432 return 0;
2433}
2434
2435#define HSTATE_ATTR_RO(_name) \
2436 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2437
2438#define HSTATE_ATTR(_name) \
2439 static struct kobj_attribute _name##_attr = \
2440 __ATTR(_name, 0644, _name##_show, _name##_store)
2441
2442static struct kobject *hugepages_kobj;
2443static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2444
2445static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2446
2447static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2448{
2449 int i;
2450
2451 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2452 if (hstate_kobjs[i] == kobj) {
2453 if (nidp)
2454 *nidp = NUMA_NO_NODE;
2455 return &hstates[i];
2456 }
2457
2458 return kobj_to_node_hstate(kobj, nidp);
2459}
2460
2461static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2462 struct kobj_attribute *attr, char *buf)
2463{
2464 struct hstate *h;
2465 unsigned long nr_huge_pages;
2466 int nid;
2467
2468 h = kobj_to_hstate(kobj, &nid);
2469 if (nid == NUMA_NO_NODE)
2470 nr_huge_pages = h->nr_huge_pages;
2471 else
2472 nr_huge_pages = h->nr_huge_pages_node[nid];
2473
2474 return sprintf(buf, "%lu\n", nr_huge_pages);
2475}
2476
2477static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2478 struct hstate *h, int nid,
2479 unsigned long count, size_t len)
2480{
2481 int err;
2482 nodemask_t nodes_allowed, *n_mask;
2483
2484 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
2485 return -EINVAL;
2486
2487 if (nid == NUMA_NO_NODE) {
2488
2489
2490
2491 if (!(obey_mempolicy &&
2492 init_nodemask_of_mempolicy(&nodes_allowed)))
2493 n_mask = &node_states[N_MEMORY];
2494 else
2495 n_mask = &nodes_allowed;
2496 } else {
2497
2498
2499
2500
2501 init_nodemask_of_node(&nodes_allowed, nid);
2502 n_mask = &nodes_allowed;
2503 }
2504
2505 err = set_max_huge_pages(h, count, nid, n_mask);
2506
2507 return err ? err : len;
2508}
2509
2510static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2511 struct kobject *kobj, const char *buf,
2512 size_t len)
2513{
2514 struct hstate *h;
2515 unsigned long count;
2516 int nid;
2517 int err;
2518
2519 err = kstrtoul(buf, 10, &count);
2520 if (err)
2521 return err;
2522
2523 h = kobj_to_hstate(kobj, &nid);
2524 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2525}
2526
2527static ssize_t nr_hugepages_show(struct kobject *kobj,
2528 struct kobj_attribute *attr, char *buf)
2529{
2530 return nr_hugepages_show_common(kobj, attr, buf);
2531}
2532
2533static ssize_t nr_hugepages_store(struct kobject *kobj,
2534 struct kobj_attribute *attr, const char *buf, size_t len)
2535{
2536 return nr_hugepages_store_common(false, kobj, buf, len);
2537}
2538HSTATE_ATTR(nr_hugepages);
2539
2540#ifdef CONFIG_NUMA
2541
2542
2543
2544
2545
2546static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2547 struct kobj_attribute *attr, char *buf)
2548{
2549 return nr_hugepages_show_common(kobj, attr, buf);
2550}
2551
2552static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2553 struct kobj_attribute *attr, const char *buf, size_t len)
2554{
2555 return nr_hugepages_store_common(true, kobj, buf, len);
2556}
2557HSTATE_ATTR(nr_hugepages_mempolicy);
2558#endif
2559
2560
2561static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2562 struct kobj_attribute *attr, char *buf)
2563{
2564 struct hstate *h = kobj_to_hstate(kobj, NULL);
2565 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2566}
2567
2568static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2569 struct kobj_attribute *attr, const char *buf, size_t count)
2570{
2571 int err;
2572 unsigned long input;
2573 struct hstate *h = kobj_to_hstate(kobj, NULL);
2574
2575 if (hstate_is_gigantic(h))
2576 return -EINVAL;
2577
2578 err = kstrtoul(buf, 10, &input);
2579 if (err)
2580 return err;
2581
2582 spin_lock(&hugetlb_lock);
2583 h->nr_overcommit_huge_pages = input;
2584 spin_unlock(&hugetlb_lock);
2585
2586 return count;
2587}
2588HSTATE_ATTR(nr_overcommit_hugepages);
2589
2590static ssize_t free_hugepages_show(struct kobject *kobj,
2591 struct kobj_attribute *attr, char *buf)
2592{
2593 struct hstate *h;
2594 unsigned long free_huge_pages;
2595 int nid;
2596
2597 h = kobj_to_hstate(kobj, &nid);
2598 if (nid == NUMA_NO_NODE)
2599 free_huge_pages = h->free_huge_pages;
2600 else
2601 free_huge_pages = h->free_huge_pages_node[nid];
2602
2603 return sprintf(buf, "%lu\n", free_huge_pages);
2604}
2605HSTATE_ATTR_RO(free_hugepages);
2606
2607static ssize_t resv_hugepages_show(struct kobject *kobj,
2608 struct kobj_attribute *attr, char *buf)
2609{
2610 struct hstate *h = kobj_to_hstate(kobj, NULL);
2611 return sprintf(buf, "%lu\n", h->resv_huge_pages);
2612}
2613HSTATE_ATTR_RO(resv_hugepages);
2614
2615static ssize_t surplus_hugepages_show(struct kobject *kobj,
2616 struct kobj_attribute *attr, char *buf)
2617{
2618 struct hstate *h;
2619 unsigned long surplus_huge_pages;
2620 int nid;
2621
2622 h = kobj_to_hstate(kobj, &nid);
2623 if (nid == NUMA_NO_NODE)
2624 surplus_huge_pages = h->surplus_huge_pages;
2625 else
2626 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2627
2628 return sprintf(buf, "%lu\n", surplus_huge_pages);
2629}
2630HSTATE_ATTR_RO(surplus_hugepages);
2631
2632static struct attribute *hstate_attrs[] = {
2633 &nr_hugepages_attr.attr,
2634 &nr_overcommit_hugepages_attr.attr,
2635 &free_hugepages_attr.attr,
2636 &resv_hugepages_attr.attr,
2637 &surplus_hugepages_attr.attr,
2638#ifdef CONFIG_NUMA
2639 &nr_hugepages_mempolicy_attr.attr,
2640#endif
2641 NULL,
2642};
2643
2644static const struct attribute_group hstate_attr_group = {
2645 .attrs = hstate_attrs,
2646};
2647
2648static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2649 struct kobject **hstate_kobjs,
2650 const struct attribute_group *hstate_attr_group)
2651{
2652 int retval;
2653 int hi = hstate_index(h);
2654
2655 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2656 if (!hstate_kobjs[hi])
2657 return -ENOMEM;
2658
2659 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2660 if (retval)
2661 kobject_put(hstate_kobjs[hi]);
2662
2663 return retval;
2664}
2665
2666static void __init hugetlb_sysfs_init(void)
2667{
2668 struct hstate *h;
2669 int err;
2670
2671 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2672 if (!hugepages_kobj)
2673 return;
2674
2675 for_each_hstate(h) {
2676 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2677 hstate_kobjs, &hstate_attr_group);
2678 if (err)
2679 pr_err("Hugetlb: Unable to add hstate %s", h->name);
2680 }
2681}
2682
2683#ifdef CONFIG_NUMA
2684
2685
2686
2687
2688
2689
2690
2691
2692struct node_hstate {
2693 struct kobject *hugepages_kobj;
2694 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2695};
2696static struct node_hstate node_hstates[MAX_NUMNODES];
2697
2698
2699
2700
2701static struct attribute *per_node_hstate_attrs[] = {
2702 &nr_hugepages_attr.attr,
2703 &free_hugepages_attr.attr,
2704 &surplus_hugepages_attr.attr,
2705 NULL,
2706};
2707
2708static const struct attribute_group per_node_hstate_attr_group = {
2709 .attrs = per_node_hstate_attrs,
2710};
2711
2712
2713
2714
2715
2716static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2717{
2718 int nid;
2719
2720 for (nid = 0; nid < nr_node_ids; nid++) {
2721 struct node_hstate *nhs = &node_hstates[nid];
2722 int i;
2723 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2724 if (nhs->hstate_kobjs[i] == kobj) {
2725 if (nidp)
2726 *nidp = nid;
2727 return &hstates[i];
2728 }
2729 }
2730
2731 BUG();
2732 return NULL;
2733}
2734
2735
2736
2737
2738
2739static void hugetlb_unregister_node(struct node *node)
2740{
2741 struct hstate *h;
2742 struct node_hstate *nhs = &node_hstates[node->dev.id];
2743
2744 if (!nhs->hugepages_kobj)
2745 return;
2746
2747 for_each_hstate(h) {
2748 int idx = hstate_index(h);
2749 if (nhs->hstate_kobjs[idx]) {
2750 kobject_put(nhs->hstate_kobjs[idx]);
2751 nhs->hstate_kobjs[idx] = NULL;
2752 }
2753 }
2754
2755 kobject_put(nhs->hugepages_kobj);
2756 nhs->hugepages_kobj = NULL;
2757}
2758
2759
2760
2761
2762
2763
2764static void hugetlb_register_node(struct node *node)
2765{
2766 struct hstate *h;
2767 struct node_hstate *nhs = &node_hstates[node->dev.id];
2768 int err;
2769
2770 if (nhs->hugepages_kobj)
2771 return;
2772
2773 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2774 &node->dev.kobj);
2775 if (!nhs->hugepages_kobj)
2776 return;
2777
2778 for_each_hstate(h) {
2779 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2780 nhs->hstate_kobjs,
2781 &per_node_hstate_attr_group);
2782 if (err) {
2783 pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2784 h->name, node->dev.id);
2785 hugetlb_unregister_node(node);
2786 break;
2787 }
2788 }
2789}
2790
2791
2792
2793
2794
2795
2796static void __init hugetlb_register_all_nodes(void)
2797{
2798 int nid;
2799
2800 for_each_node_state(nid, N_MEMORY) {
2801 struct node *node = node_devices[nid];
2802 if (node->dev.id == nid)
2803 hugetlb_register_node(node);
2804 }
2805
2806
2807
2808
2809
2810 register_hugetlbfs_with_node(hugetlb_register_node,
2811 hugetlb_unregister_node);
2812}
2813#else
2814
2815static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2816{
2817 BUG();
2818 if (nidp)
2819 *nidp = -1;
2820 return NULL;
2821}
2822
2823static void hugetlb_register_all_nodes(void) { }
2824
2825#endif
2826
2827static int __init hugetlb_init(void)
2828{
2829 int i;
2830
2831 if (!hugepages_supported())
2832 return 0;
2833
2834 if (!size_to_hstate(default_hstate_size)) {
2835 if (default_hstate_size != 0) {
2836 pr_err("HugeTLB: unsupported default_hugepagesz %lu. Reverting to %lu\n",
2837 default_hstate_size, HPAGE_SIZE);
2838 }
2839
2840 default_hstate_size = HPAGE_SIZE;
2841 if (!size_to_hstate(default_hstate_size))
2842 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2843 }
2844 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2845 if (default_hstate_max_huge_pages) {
2846 if (!default_hstate.max_huge_pages)
2847 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2848 }
2849
2850 hugetlb_init_hstates();
2851 gather_bootmem_prealloc();
2852 report_hugepages();
2853
2854 hugetlb_sysfs_init();
2855 hugetlb_register_all_nodes();
2856 hugetlb_cgroup_file_init();
2857
2858#ifdef CONFIG_SMP
2859 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2860#else
2861 num_fault_mutexes = 1;
2862#endif
2863 hugetlb_fault_mutex_table =
2864 kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
2865 GFP_KERNEL);
2866 BUG_ON(!hugetlb_fault_mutex_table);
2867
2868 for (i = 0; i < num_fault_mutexes; i++)
2869 mutex_init(&hugetlb_fault_mutex_table[i]);
2870 return 0;
2871}
2872subsys_initcall(hugetlb_init);
2873
2874
2875void __init hugetlb_bad_size(void)
2876{
2877 parsed_valid_hugepagesz = false;
2878}
2879
2880void __init hugetlb_add_hstate(unsigned int order)
2881{
2882 struct hstate *h;
2883 unsigned long i;
2884
2885 if (size_to_hstate(PAGE_SIZE << order)) {
2886 pr_warn("hugepagesz= specified twice, ignoring\n");
2887 return;
2888 }
2889 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2890 BUG_ON(order == 0);
2891 h = &hstates[hugetlb_max_hstate++];
2892 h->order = order;
2893 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2894 h->nr_huge_pages = 0;
2895 h->free_huge_pages = 0;
2896 for (i = 0; i < MAX_NUMNODES; ++i)
2897 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2898 INIT_LIST_HEAD(&h->hugepage_activelist);
2899 h->next_nid_to_alloc = first_memory_node;
2900 h->next_nid_to_free = first_memory_node;
2901 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2902 huge_page_size(h)/1024);
2903
2904 parsed_hstate = h;
2905}
2906
2907static int __init hugetlb_nrpages_setup(char *s)
2908{
2909 unsigned long *mhp;
2910 static unsigned long *last_mhp;
2911
2912 if (!parsed_valid_hugepagesz) {
2913 pr_warn("hugepages = %s preceded by "
2914 "an unsupported hugepagesz, ignoring\n", s);
2915 parsed_valid_hugepagesz = true;
2916 return 1;
2917 }
2918
2919
2920
2921
2922 else if (!hugetlb_max_hstate)
2923 mhp = &default_hstate_max_huge_pages;
2924 else
2925 mhp = &parsed_hstate->max_huge_pages;
2926
2927 if (mhp == last_mhp) {
2928 pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
2929 return 1;
2930 }
2931
2932 if (sscanf(s, "%lu", mhp) <= 0)
2933 *mhp = 0;
2934
2935
2936
2937
2938
2939
2940 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2941 hugetlb_hstate_alloc_pages(parsed_hstate);
2942
2943 last_mhp = mhp;
2944
2945 return 1;
2946}
2947__setup("hugepages=", hugetlb_nrpages_setup);
2948
2949static int __init hugetlb_default_setup(char *s)
2950{
2951 default_hstate_size = memparse(s, &s);
2952 return 1;
2953}
2954__setup("default_hugepagesz=", hugetlb_default_setup);
2955
2956static unsigned int cpuset_mems_nr(unsigned int *array)
2957{
2958 int node;
2959 unsigned int nr = 0;
2960
2961 for_each_node_mask(node, cpuset_current_mems_allowed)
2962 nr += array[node];
2963
2964 return nr;
2965}
2966
2967#ifdef CONFIG_SYSCTL
2968static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2969 struct ctl_table *table, int write,
2970 void __user *buffer, size_t *length, loff_t *ppos)
2971{
2972 struct hstate *h = &default_hstate;
2973 unsigned long tmp = h->max_huge_pages;
2974 int ret;
2975
2976 if (!hugepages_supported())
2977 return -EOPNOTSUPP;
2978
2979 table->data = &tmp;
2980 table->maxlen = sizeof(unsigned long);
2981 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2982 if (ret)
2983 goto out;
2984
2985 if (write)
2986 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2987 NUMA_NO_NODE, tmp, *length);
2988out:
2989 return ret;
2990}
2991
2992int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2993 void __user *buffer, size_t *length, loff_t *ppos)
2994{
2995
2996 return hugetlb_sysctl_handler_common(false, table, write,
2997 buffer, length, ppos);
2998}
2999
3000#ifdef CONFIG_NUMA
3001int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
3002 void __user *buffer, size_t *length, loff_t *ppos)
3003{
3004 return hugetlb_sysctl_handler_common(true, table, write,
3005 buffer, length, ppos);
3006}
3007#endif
3008
3009int hugetlb_overcommit_handler(struct ctl_table *table, int write,
3010 void __user *buffer,
3011 size_t *length, loff_t *ppos)
3012{
3013 struct hstate *h = &default_hstate;
3014 unsigned long tmp;
3015 int ret;
3016
3017 if (!hugepages_supported())
3018 return -EOPNOTSUPP;
3019
3020 tmp = h->nr_overcommit_huge_pages;
3021
3022 if (write && hstate_is_gigantic(h))
3023 return -EINVAL;
3024
3025 table->data = &tmp;
3026 table->maxlen = sizeof(unsigned long);
3027 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
3028 if (ret)
3029 goto out;
3030
3031 if (write) {
3032 spin_lock(&hugetlb_lock);
3033 h->nr_overcommit_huge_pages = tmp;
3034 spin_unlock(&hugetlb_lock);
3035 }
3036out:
3037 return ret;
3038}
3039
3040#endif
3041
3042void hugetlb_report_meminfo(struct seq_file *m)
3043{
3044 struct hstate *h;
3045 unsigned long total = 0;
3046
3047 if (!hugepages_supported())
3048 return;
3049
3050 for_each_hstate(h) {
3051 unsigned long count = h->nr_huge_pages;
3052
3053 total += (PAGE_SIZE << huge_page_order(h)) * count;
3054
3055 if (h == &default_hstate)
3056 seq_printf(m,
3057 "HugePages_Total: %5lu\n"
3058 "HugePages_Free: %5lu\n"
3059 "HugePages_Rsvd: %5lu\n"
3060 "HugePages_Surp: %5lu\n"
3061 "Hugepagesize: %8lu kB\n",
3062 count,
3063 h->free_huge_pages,
3064 h->resv_huge_pages,
3065 h->surplus_huge_pages,
3066 (PAGE_SIZE << huge_page_order(h)) / 1024);
3067 }
3068
3069 seq_printf(m, "Hugetlb: %8lu kB\n", total / 1024);
3070}
3071
3072int hugetlb_report_node_meminfo(int nid, char *buf)
3073{
3074 struct hstate *h = &default_hstate;
3075 if (!hugepages_supported())
3076 return 0;
3077 return sprintf(buf,
3078 "Node %d HugePages_Total: %5u\n"
3079 "Node %d HugePages_Free: %5u\n"
3080 "Node %d HugePages_Surp: %5u\n",
3081 nid, h->nr_huge_pages_node[nid],
3082 nid, h->free_huge_pages_node[nid],
3083 nid, h->surplus_huge_pages_node[nid]);
3084}
3085
3086void hugetlb_show_meminfo(void)
3087{
3088 struct hstate *h;
3089 int nid;
3090
3091 if (!hugepages_supported())
3092 return;
3093
3094 for_each_node_state(nid, N_MEMORY)
3095 for_each_hstate(h)
3096 pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
3097 nid,
3098 h->nr_huge_pages_node[nid],
3099 h->free_huge_pages_node[nid],
3100 h->surplus_huge_pages_node[nid],
3101 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
3102}
3103
3104void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
3105{
3106 seq_printf(m, "HugetlbPages:\t%8lu kB\n",
3107 atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
3108}
3109
3110
3111unsigned long hugetlb_total_pages(void)
3112{
3113 struct hstate *h;
3114 unsigned long nr_total_pages = 0;
3115
3116 for_each_hstate(h)
3117 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3118 return nr_total_pages;
3119}
3120
3121static int hugetlb_acct_memory(struct hstate *h, long delta)
3122{
3123 int ret = -ENOMEM;
3124
3125 spin_lock(&hugetlb_lock);
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143 if (delta > 0) {
3144 if (gather_surplus_pages(h, delta) < 0)
3145 goto out;
3146
3147 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
3148 return_unused_surplus_pages(h, delta);
3149 goto out;
3150 }
3151 }
3152
3153 ret = 0;
3154 if (delta < 0)
3155 return_unused_surplus_pages(h, (unsigned long) -delta);
3156
3157out:
3158 spin_unlock(&hugetlb_lock);
3159 return ret;
3160}
3161
3162static void hugetlb_vm_op_open(struct vm_area_struct *vma)
3163{
3164 struct resv_map *resv = vma_resv_map(vma);
3165
3166
3167
3168
3169
3170
3171
3172
3173
3174 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3175 kref_get(&resv->refs);
3176}
3177
3178static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3179{
3180 struct hstate *h = hstate_vma(vma);
3181 struct resv_map *resv = vma_resv_map(vma);
3182 struct hugepage_subpool *spool = subpool_vma(vma);
3183 unsigned long reserve, start, end;
3184 long gbl_reserve;
3185
3186 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3187 return;
3188
3189 start = vma_hugecache_offset(h, vma, vma->vm_start);
3190 end = vma_hugecache_offset(h, vma, vma->vm_end);
3191
3192 reserve = (end - start) - region_count(resv, start, end);
3193
3194 kref_put(&resv->refs, resv_map_release);
3195
3196 if (reserve) {
3197
3198
3199
3200
3201 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3202 hugetlb_acct_memory(h, -gbl_reserve);
3203 }
3204}
3205
3206static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
3207{
3208 if (addr & ~(huge_page_mask(hstate_vma(vma))))
3209 return -EINVAL;
3210 return 0;
3211}
3212
3213static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
3214{
3215 struct hstate *hstate = hstate_vma(vma);
3216
3217 return 1UL << huge_page_shift(hstate);
3218}
3219
3220
3221
3222
3223
3224
3225
3226static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
3227{
3228 BUG();
3229 return 0;
3230}
3231
3232
3233
3234
3235
3236
3237
3238
3239const struct vm_operations_struct hugetlb_vm_ops = {
3240 .fault = hugetlb_vm_op_fault,
3241 .open = hugetlb_vm_op_open,
3242 .close = hugetlb_vm_op_close,
3243 .split = hugetlb_vm_op_split,
3244 .pagesize = hugetlb_vm_op_pagesize,
3245};
3246
3247static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3248 int writable)
3249{
3250 pte_t entry;
3251
3252 if (writable) {
3253 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3254 vma->vm_page_prot)));
3255 } else {
3256 entry = huge_pte_wrprotect(mk_huge_pte(page,
3257 vma->vm_page_prot));
3258 }
3259 entry = pte_mkyoung(entry);
3260 entry = pte_mkhuge(entry);
3261 entry = arch_make_huge_pte(entry, vma, page, writable);
3262
3263 return entry;
3264}
3265
3266static void set_huge_ptep_writable(struct vm_area_struct *vma,
3267 unsigned long address, pte_t *ptep)
3268{
3269 pte_t entry;
3270
3271 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3272 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3273 update_mmu_cache(vma, address, ptep);
3274}
3275
3276bool is_hugetlb_entry_migration(pte_t pte)
3277{
3278 swp_entry_t swp;
3279
3280 if (huge_pte_none(pte) || pte_present(pte))
3281 return false;
3282 swp = pte_to_swp_entry(pte);
3283 if (non_swap_entry(swp) && is_migration_entry(swp))
3284 return true;
3285 else
3286 return false;
3287}
3288
3289static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3290{
3291 swp_entry_t swp;
3292
3293 if (huge_pte_none(pte) || pte_present(pte))
3294 return 0;
3295 swp = pte_to_swp_entry(pte);
3296 if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3297 return 1;
3298 else
3299 return 0;
3300}
3301
3302int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3303 struct vm_area_struct *vma)
3304{
3305 pte_t *src_pte, *dst_pte, entry, dst_entry;
3306 struct page *ptepage;
3307 unsigned long addr;
3308 int cow;
3309 struct hstate *h = hstate_vma(vma);
3310 unsigned long sz = huge_page_size(h);
3311 struct mmu_notifier_range range;
3312 int ret = 0;
3313
3314 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3315
3316 if (cow) {
3317 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src,
3318 vma->vm_start,
3319 vma->vm_end);
3320 mmu_notifier_invalidate_range_start(&range);
3321 }
3322
3323 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3324 spinlock_t *src_ptl, *dst_ptl;
3325 src_pte = huge_pte_offset(src, addr, sz);
3326 if (!src_pte)
3327 continue;
3328 dst_pte = huge_pte_alloc(dst, addr, sz);
3329 if (!dst_pte) {
3330 ret = -ENOMEM;
3331 break;
3332 }
3333
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343 dst_entry = huge_ptep_get(dst_pte);
3344 if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
3345 continue;
3346
3347 dst_ptl = huge_pte_lock(h, dst, dst_pte);
3348 src_ptl = huge_pte_lockptr(h, src, src_pte);
3349 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3350 entry = huge_ptep_get(src_pte);
3351 dst_entry = huge_ptep_get(dst_pte);
3352 if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
3353
3354
3355
3356
3357
3358 ;
3359 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3360 is_hugetlb_entry_hwpoisoned(entry))) {
3361 swp_entry_t swp_entry = pte_to_swp_entry(entry);
3362
3363 if (is_write_migration_entry(swp_entry) && cow) {
3364
3365
3366
3367
3368 make_migration_entry_read(&swp_entry);
3369 entry = swp_entry_to_pte(swp_entry);
3370 set_huge_swap_pte_at(src, addr, src_pte,
3371 entry, sz);
3372 }
3373 set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
3374 } else {
3375 if (cow) {
3376
3377
3378
3379
3380
3381
3382
3383 huge_ptep_set_wrprotect(src, addr, src_pte);
3384 }
3385 entry = huge_ptep_get(src_pte);
3386 ptepage = pte_page(entry);
3387 get_page(ptepage);
3388 page_dup_rmap(ptepage, true);
3389 set_huge_pte_at(dst, addr, dst_pte, entry);
3390 hugetlb_count_add(pages_per_huge_page(h), dst);
3391 }
3392 spin_unlock(src_ptl);
3393 spin_unlock(dst_ptl);
3394 }
3395
3396 if (cow)
3397 mmu_notifier_invalidate_range_end(&range);
3398
3399 return ret;
3400}
3401
3402void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3403 unsigned long start, unsigned long end,
3404 struct page *ref_page)
3405{
3406 struct mm_struct *mm = vma->vm_mm;
3407 unsigned long address;
3408 pte_t *ptep;
3409 pte_t pte;
3410 spinlock_t *ptl;
3411 struct page *page;
3412 struct hstate *h = hstate_vma(vma);
3413 unsigned long sz = huge_page_size(h);
3414 struct mmu_notifier_range range;
3415
3416 WARN_ON(!is_vm_hugetlb_page(vma));
3417 BUG_ON(start & ~huge_page_mask(h));
3418 BUG_ON(end & ~huge_page_mask(h));
3419
3420
3421
3422
3423
3424 tlb_change_page_size(tlb, sz);
3425 tlb_start_vma(tlb, vma);
3426
3427
3428
3429
3430 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
3431 end);
3432 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
3433 mmu_notifier_invalidate_range_start(&range);
3434 address = start;
3435 for (; address < end; address += sz) {
3436 ptep = huge_pte_offset(mm, address, sz);
3437 if (!ptep)
3438 continue;
3439
3440 ptl = huge_pte_lock(h, mm, ptep);
3441 if (huge_pmd_unshare(mm, &address, ptep)) {
3442 spin_unlock(ptl);
3443
3444
3445
3446
3447 continue;
3448 }
3449
3450 pte = huge_ptep_get(ptep);
3451 if (huge_pte_none(pte)) {
3452 spin_unlock(ptl);
3453 continue;
3454 }
3455
3456
3457
3458
3459
3460 if (unlikely(!pte_present(pte))) {
3461 huge_pte_clear(mm, address, ptep, sz);
3462 spin_unlock(ptl);
3463 continue;
3464 }
3465
3466 page = pte_page(pte);
3467
3468
3469
3470
3471
3472 if (ref_page) {
3473 if (page != ref_page) {
3474 spin_unlock(ptl);
3475 continue;
3476 }
3477
3478
3479
3480
3481
3482 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3483 }
3484
3485 pte = huge_ptep_get_and_clear(mm, address, ptep);
3486 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
3487 if (huge_pte_dirty(pte))
3488 set_page_dirty(page);
3489
3490 hugetlb_count_sub(pages_per_huge_page(h), mm);
3491 page_remove_rmap(page, true);
3492
3493 spin_unlock(ptl);
3494 tlb_remove_page_size(tlb, page, huge_page_size(h));
3495
3496
3497
3498 if (ref_page)
3499 break;
3500 }
3501 mmu_notifier_invalidate_range_end(&range);
3502 tlb_end_vma(tlb, vma);
3503}
3504
3505void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3506 struct vm_area_struct *vma, unsigned long start,
3507 unsigned long end, struct page *ref_page)
3508{
3509 __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3510
3511
3512
3513
3514
3515
3516
3517
3518
3519
3520
3521 vma->vm_flags &= ~VM_MAYSHARE;
3522}
3523
3524void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3525 unsigned long end, struct page *ref_page)
3526{
3527 struct mm_struct *mm;
3528 struct mmu_gather tlb;
3529 unsigned long tlb_start = start;
3530 unsigned long tlb_end = end;
3531
3532
3533
3534
3535
3536
3537
3538
3539 adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
3540
3541 mm = vma->vm_mm;
3542
3543 tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
3544 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3545 tlb_finish_mmu(&tlb, tlb_start, tlb_end);
3546}
3547
3548
3549
3550
3551
3552
3553
3554static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3555 struct page *page, unsigned long address)
3556{
3557 struct hstate *h = hstate_vma(vma);
3558 struct vm_area_struct *iter_vma;
3559 struct address_space *mapping;
3560 pgoff_t pgoff;
3561
3562
3563
3564
3565
3566 address = address & huge_page_mask(h);
3567 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3568 vma->vm_pgoff;
3569 mapping = vma->vm_file->f_mapping;
3570
3571
3572
3573
3574
3575
3576 i_mmap_lock_write(mapping);
3577 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3578
3579 if (iter_vma == vma)
3580 continue;
3581
3582
3583
3584
3585
3586
3587 if (iter_vma->vm_flags & VM_MAYSHARE)
3588 continue;
3589
3590
3591
3592
3593
3594
3595
3596
3597 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3598 unmap_hugepage_range(iter_vma, address,
3599 address + huge_page_size(h), page);
3600 }
3601 i_mmap_unlock_write(mapping);
3602}
3603
3604
3605
3606
3607
3608
3609
3610static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3611 unsigned long address, pte_t *ptep,
3612 struct page *pagecache_page, spinlock_t *ptl)
3613{
3614 pte_t pte;
3615 struct hstate *h = hstate_vma(vma);
3616 struct page *old_page, *new_page;
3617 int outside_reserve = 0;
3618 vm_fault_t ret = 0;
3619 unsigned long haddr = address & huge_page_mask(h);
3620 struct mmu_notifier_range range;
3621
3622 pte = huge_ptep_get(ptep);
3623 old_page = pte_page(pte);
3624
3625retry_avoidcopy:
3626
3627
3628 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3629 page_move_anon_rmap(old_page, vma);
3630 set_huge_ptep_writable(vma, haddr, ptep);
3631 return 0;
3632 }
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3644 old_page != pagecache_page)
3645 outside_reserve = 1;
3646
3647 get_page(old_page);
3648
3649
3650
3651
3652
3653 spin_unlock(ptl);
3654 new_page = alloc_huge_page(vma, haddr, outside_reserve);
3655
3656 if (IS_ERR(new_page)) {
3657
3658
3659
3660
3661
3662
3663
3664 if (outside_reserve) {
3665 put_page(old_page);
3666 BUG_ON(huge_pte_none(pte));
3667 unmap_ref_private(mm, vma, old_page, haddr);
3668 BUG_ON(huge_pte_none(pte));
3669 spin_lock(ptl);
3670 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3671 if (likely(ptep &&
3672 pte_same(huge_ptep_get(ptep), pte)))
3673 goto retry_avoidcopy;
3674
3675
3676
3677
3678 return 0;
3679 }
3680
3681 ret = vmf_error(PTR_ERR(new_page));
3682 goto out_release_old;
3683 }
3684
3685
3686
3687
3688
3689 if (unlikely(anon_vma_prepare(vma))) {
3690 ret = VM_FAULT_OOM;
3691 goto out_release_all;
3692 }
3693
3694 copy_user_huge_page(new_page, old_page, address, vma,
3695 pages_per_huge_page(h));
3696 __SetPageUptodate(new_page);
3697
3698 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr,
3699 haddr + huge_page_size(h));
3700 mmu_notifier_invalidate_range_start(&range);
3701
3702
3703
3704
3705
3706 spin_lock(ptl);
3707 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3708 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3709 ClearPagePrivate(new_page);
3710
3711
3712 huge_ptep_clear_flush(vma, haddr, ptep);
3713 mmu_notifier_invalidate_range(mm, range.start, range.end);
3714 set_huge_pte_at(mm, haddr, ptep,
3715 make_huge_pte(vma, new_page, 1));
3716 page_remove_rmap(old_page, true);
3717 hugepage_add_new_anon_rmap(new_page, vma, haddr);
3718 set_page_huge_active(new_page);
3719
3720 new_page = old_page;
3721 }
3722 spin_unlock(ptl);
3723 mmu_notifier_invalidate_range_end(&range);
3724out_release_all:
3725 restore_reserve_on_error(h, vma, haddr, new_page);
3726 put_page(new_page);
3727out_release_old:
3728 put_page(old_page);
3729
3730 spin_lock(ptl);
3731 return ret;
3732}
3733
3734
3735static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3736 struct vm_area_struct *vma, unsigned long address)
3737{
3738 struct address_space *mapping;
3739 pgoff_t idx;
3740
3741 mapping = vma->vm_file->f_mapping;
3742 idx = vma_hugecache_offset(h, vma, address);
3743
3744 return find_lock_page(mapping, idx);
3745}
3746
3747
3748
3749
3750
3751static bool hugetlbfs_pagecache_present(struct hstate *h,
3752 struct vm_area_struct *vma, unsigned long address)
3753{
3754 struct address_space *mapping;
3755 pgoff_t idx;
3756 struct page *page;
3757
3758 mapping = vma->vm_file->f_mapping;
3759 idx = vma_hugecache_offset(h, vma, address);
3760
3761 page = find_get_page(mapping, idx);
3762 if (page)
3763 put_page(page);
3764 return page != NULL;
3765}
3766
3767int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3768 pgoff_t idx)
3769{
3770 struct inode *inode = mapping->host;
3771 struct hstate *h = hstate_inode(inode);
3772 int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3773
3774 if (err)
3775 return err;
3776 ClearPagePrivate(page);
3777
3778
3779
3780
3781
3782 set_page_dirty(page);
3783
3784 spin_lock(&inode->i_lock);
3785 inode->i_blocks += blocks_per_huge_page(h);
3786 spin_unlock(&inode->i_lock);
3787 return 0;
3788}
3789
3790static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
3791 struct vm_area_struct *vma,
3792 struct address_space *mapping, pgoff_t idx,
3793 unsigned long address, pte_t *ptep, unsigned int flags)
3794{
3795 struct hstate *h = hstate_vma(vma);
3796 vm_fault_t ret = VM_FAULT_SIGBUS;
3797 int anon_rmap = 0;
3798 unsigned long size;
3799 struct page *page;
3800 pte_t new_pte;
3801 spinlock_t *ptl;
3802 unsigned long haddr = address & huge_page_mask(h);
3803 bool new_page = false;
3804
3805
3806
3807
3808
3809
3810 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3811 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
3812 current->pid);
3813 return ret;
3814 }
3815
3816
3817
3818
3819
3820retry:
3821 page = find_lock_page(mapping, idx);
3822 if (!page) {
3823 size = i_size_read(mapping->host) >> huge_page_shift(h);
3824 if (idx >= size)
3825 goto out;
3826
3827
3828
3829
3830 if (userfaultfd_missing(vma)) {
3831 u32 hash;
3832 struct vm_fault vmf = {
3833 .vma = vma,
3834 .address = haddr,
3835 .flags = flags,
3836
3837
3838
3839
3840
3841
3842
3843 };
3844
3845
3846
3847
3848
3849
3850 hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
3851 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3852 ret = handle_userfault(&vmf, VM_UFFD_MISSING);
3853 mutex_lock(&hugetlb_fault_mutex_table[hash]);
3854 goto out;
3855 }
3856
3857 page = alloc_huge_page(vma, haddr, 0);
3858 if (IS_ERR(page)) {
3859 ret = vmf_error(PTR_ERR(page));
3860 goto out;
3861 }
3862 clear_huge_page(page, address, pages_per_huge_page(h));
3863 __SetPageUptodate(page);
3864 new_page = true;
3865
3866 if (vma->vm_flags & VM_MAYSHARE) {
3867 int err = huge_add_to_page_cache(page, mapping, idx);
3868 if (err) {
3869 put_page(page);
3870 if (err == -EEXIST)
3871 goto retry;
3872 goto out;
3873 }
3874 } else {
3875 lock_page(page);
3876 if (unlikely(anon_vma_prepare(vma))) {
3877 ret = VM_FAULT_OOM;
3878 goto backout_unlocked;
3879 }
3880 anon_rmap = 1;
3881 }
3882 } else {
3883
3884
3885
3886
3887
3888 if (unlikely(PageHWPoison(page))) {
3889 ret = VM_FAULT_HWPOISON |
3890 VM_FAULT_SET_HINDEX(hstate_index(h));
3891 goto backout_unlocked;
3892 }
3893 }
3894
3895
3896
3897
3898
3899
3900
3901 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3902 if (vma_needs_reservation(h, vma, haddr) < 0) {
3903 ret = VM_FAULT_OOM;
3904 goto backout_unlocked;
3905 }
3906
3907 vma_end_reservation(h, vma, haddr);
3908 }
3909
3910 ptl = huge_pte_lock(h, mm, ptep);
3911 size = i_size_read(mapping->host) >> huge_page_shift(h);
3912 if (idx >= size)
3913 goto backout;
3914
3915 ret = 0;
3916 if (!huge_pte_none(huge_ptep_get(ptep)))
3917 goto backout;
3918
3919 if (anon_rmap) {
3920 ClearPagePrivate(page);
3921 hugepage_add_new_anon_rmap(page, vma, haddr);
3922 } else
3923 page_dup_rmap(page, true);
3924 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3925 && (vma->vm_flags & VM_SHARED)));
3926 set_huge_pte_at(mm, haddr, ptep, new_pte);
3927
3928 hugetlb_count_add(pages_per_huge_page(h), mm);
3929 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3930
3931 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
3932 }
3933
3934 spin_unlock(ptl);
3935
3936
3937
3938
3939
3940
3941 if (new_page)
3942 set_page_huge_active(page);
3943
3944 unlock_page(page);
3945out:
3946 return ret;
3947
3948backout:
3949 spin_unlock(ptl);
3950backout_unlocked:
3951 unlock_page(page);
3952 restore_reserve_on_error(h, vma, haddr, page);
3953 put_page(page);
3954 goto out;
3955}
3956
3957#ifdef CONFIG_SMP
3958u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
3959 pgoff_t idx, unsigned long address)
3960{
3961 unsigned long key[2];
3962 u32 hash;
3963
3964 key[0] = (unsigned long) mapping;
3965 key[1] = idx;
3966
3967 hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3968
3969 return hash & (num_fault_mutexes - 1);
3970}
3971#else
3972
3973
3974
3975
3976u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
3977 pgoff_t idx, unsigned long address)
3978{
3979 return 0;
3980}
3981#endif
3982
3983vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3984 unsigned long address, unsigned int flags)
3985{
3986 pte_t *ptep, entry;
3987 spinlock_t *ptl;
3988 vm_fault_t ret;
3989 u32 hash;
3990 pgoff_t idx;
3991 struct page *page = NULL;
3992 struct page *pagecache_page = NULL;
3993 struct hstate *h = hstate_vma(vma);
3994 struct address_space *mapping;
3995 int need_wait_lock = 0;
3996 unsigned long haddr = address & huge_page_mask(h);
3997
3998 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3999 if (ptep) {
4000 entry = huge_ptep_get(ptep);
4001 if (unlikely(is_hugetlb_entry_migration(entry))) {
4002 migration_entry_wait_huge(vma, mm, ptep);
4003 return 0;
4004 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
4005 return VM_FAULT_HWPOISON_LARGE |
4006 VM_FAULT_SET_HINDEX(hstate_index(h));
4007 } else {
4008 ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
4009 if (!ptep)
4010 return VM_FAULT_OOM;
4011 }
4012
4013 mapping = vma->vm_file->f_mapping;
4014 idx = vma_hugecache_offset(h, vma, haddr);
4015
4016
4017
4018
4019
4020
4021 hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
4022 mutex_lock(&hugetlb_fault_mutex_table[hash]);
4023
4024 entry = huge_ptep_get(ptep);
4025 if (huge_pte_none(entry)) {
4026 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
4027 goto out_mutex;
4028 }
4029
4030 ret = 0;
4031
4032
4033
4034
4035
4036
4037
4038
4039 if (!pte_present(entry))
4040 goto out_mutex;
4041
4042
4043
4044
4045
4046
4047
4048
4049
4050 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
4051 if (vma_needs_reservation(h, vma, haddr) < 0) {
4052 ret = VM_FAULT_OOM;
4053 goto out_mutex;
4054 }
4055
4056 vma_end_reservation(h, vma, haddr);
4057
4058 if (!(vma->vm_flags & VM_MAYSHARE))
4059 pagecache_page = hugetlbfs_pagecache_page(h,
4060 vma, haddr);
4061 }
4062
4063 ptl = huge_pte_lock(h, mm, ptep);
4064
4065
4066 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
4067 goto out_ptl;
4068
4069
4070
4071
4072
4073
4074 page = pte_page(entry);
4075 if (page != pagecache_page)
4076 if (!trylock_page(page)) {
4077 need_wait_lock = 1;
4078 goto out_ptl;
4079 }
4080
4081 get_page(page);
4082
4083 if (flags & FAULT_FLAG_WRITE) {
4084 if (!huge_pte_write(entry)) {
4085 ret = hugetlb_cow(mm, vma, address, ptep,
4086 pagecache_page, ptl);
4087 goto out_put_page;
4088 }
4089 entry = huge_pte_mkdirty(entry);
4090 }
4091 entry = pte_mkyoung(entry);
4092 if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
4093 flags & FAULT_FLAG_WRITE))
4094 update_mmu_cache(vma, haddr, ptep);
4095out_put_page:
4096 if (page != pagecache_page)
4097 unlock_page(page);
4098 put_page(page);
4099out_ptl:
4100 spin_unlock(ptl);
4101
4102 if (pagecache_page) {
4103 unlock_page(pagecache_page);
4104 put_page(pagecache_page);
4105 }
4106out_mutex:
4107 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4108
4109
4110
4111
4112
4113
4114
4115 if (need_wait_lock)
4116 wait_on_page_locked(page);
4117 return ret;
4118}
4119
4120
4121
4122
4123
4124int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
4125 pte_t *dst_pte,
4126 struct vm_area_struct *dst_vma,
4127 unsigned long dst_addr,
4128 unsigned long src_addr,
4129 struct page **pagep)
4130{
4131 struct address_space *mapping;
4132 pgoff_t idx;
4133 unsigned long size;
4134 int vm_shared = dst_vma->vm_flags & VM_SHARED;
4135 struct hstate *h = hstate_vma(dst_vma);
4136 pte_t _dst_pte;
4137 spinlock_t *ptl;
4138 int ret;
4139 struct page *page;
4140
4141 if (!*pagep) {
4142 ret = -ENOMEM;
4143 page = alloc_huge_page(dst_vma, dst_addr, 0);
4144 if (IS_ERR(page))
4145 goto out;
4146
4147 ret = copy_huge_page_from_user(page,
4148 (const void __user *) src_addr,
4149 pages_per_huge_page(h), false);
4150
4151
4152 if (unlikely(ret)) {
4153 ret = -ENOENT;
4154 *pagep = page;
4155
4156 goto out;
4157 }
4158 } else {
4159 page = *pagep;
4160 *pagep = NULL;
4161 }
4162
4163
4164
4165
4166
4167
4168 __SetPageUptodate(page);
4169
4170 mapping = dst_vma->vm_file->f_mapping;
4171 idx = vma_hugecache_offset(h, dst_vma, dst_addr);
4172
4173
4174
4175
4176 if (vm_shared) {
4177 size = i_size_read(mapping->host) >> huge_page_shift(h);
4178 ret = -EFAULT;
4179 if (idx >= size)
4180 goto out_release_nounlock;
4181
4182
4183
4184
4185
4186
4187
4188 ret = huge_add_to_page_cache(page, mapping, idx);
4189 if (ret)
4190 goto out_release_nounlock;
4191 }
4192
4193 ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
4194 spin_lock(ptl);
4195
4196
4197
4198
4199
4200
4201
4202
4203
4204
4205 size = i_size_read(mapping->host) >> huge_page_shift(h);
4206 ret = -EFAULT;
4207 if (idx >= size)
4208 goto out_release_unlock;
4209
4210 ret = -EEXIST;
4211 if (!huge_pte_none(huge_ptep_get(dst_pte)))
4212 goto out_release_unlock;
4213
4214 if (vm_shared) {
4215 page_dup_rmap(page, true);
4216 } else {
4217 ClearPagePrivate(page);
4218 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
4219 }
4220
4221 _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
4222 if (dst_vma->vm_flags & VM_WRITE)
4223 _dst_pte = huge_pte_mkdirty(_dst_pte);
4224 _dst_pte = pte_mkyoung(_dst_pte);
4225
4226 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
4227
4228 (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
4229 dst_vma->vm_flags & VM_WRITE);
4230 hugetlb_count_add(pages_per_huge_page(h), dst_mm);
4231
4232
4233 update_mmu_cache(dst_vma, dst_addr, dst_pte);
4234
4235 spin_unlock(ptl);
4236 set_page_huge_active(page);
4237 if (vm_shared)
4238 unlock_page(page);
4239 ret = 0;
4240out:
4241 return ret;
4242out_release_unlock:
4243 spin_unlock(ptl);
4244 if (vm_shared)
4245 unlock_page(page);
4246out_release_nounlock:
4247 put_page(page);
4248 goto out;
4249}
4250
4251long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
4252 struct page **pages, struct vm_area_struct **vmas,
4253 unsigned long *position, unsigned long *nr_pages,
4254 long i, unsigned int flags, int *nonblocking)
4255{
4256 unsigned long pfn_offset;
4257 unsigned long vaddr = *position;
4258 unsigned long remainder = *nr_pages;
4259 struct hstate *h = hstate_vma(vma);
4260 int err = -EFAULT;
4261
4262 while (vaddr < vma->vm_end && remainder) {
4263 pte_t *pte;
4264 spinlock_t *ptl = NULL;
4265 int absent;
4266 struct page *page;
4267
4268
4269
4270
4271
4272 if (fatal_signal_pending(current)) {
4273 remainder = 0;
4274 break;
4275 }
4276
4277
4278
4279
4280
4281
4282
4283
4284 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
4285 huge_page_size(h));
4286 if (pte)
4287 ptl = huge_pte_lock(h, mm, pte);
4288 absent = !pte || huge_pte_none(huge_ptep_get(pte));
4289
4290
4291
4292
4293
4294
4295
4296
4297 if (absent && (flags & FOLL_DUMP) &&
4298 !hugetlbfs_pagecache_present(h, vma, vaddr)) {
4299 if (pte)
4300 spin_unlock(ptl);
4301 remainder = 0;
4302 break;
4303 }
4304
4305
4306
4307
4308
4309
4310
4311
4312
4313
4314
4315 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
4316 ((flags & FOLL_WRITE) &&
4317 !huge_pte_write(huge_ptep_get(pte)))) {
4318 vm_fault_t ret;
4319 unsigned int fault_flags = 0;
4320
4321 if (pte)
4322 spin_unlock(ptl);
4323 if (flags & FOLL_WRITE)
4324 fault_flags |= FAULT_FLAG_WRITE;
4325 if (nonblocking)
4326 fault_flags |= FAULT_FLAG_ALLOW_RETRY;
4327 if (flags & FOLL_NOWAIT)
4328 fault_flags |= FAULT_FLAG_ALLOW_RETRY |
4329 FAULT_FLAG_RETRY_NOWAIT;
4330 if (flags & FOLL_TRIED) {
4331 VM_WARN_ON_ONCE(fault_flags &
4332 FAULT_FLAG_ALLOW_RETRY);
4333 fault_flags |= FAULT_FLAG_TRIED;
4334 }
4335 ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
4336 if (ret & VM_FAULT_ERROR) {
4337 err = vm_fault_to_errno(ret, flags);
4338 remainder = 0;
4339 break;
4340 }
4341 if (ret & VM_FAULT_RETRY) {
4342 if (nonblocking &&
4343 !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
4344 *nonblocking = 0;
4345 *nr_pages = 0;
4346
4347
4348
4349
4350
4351
4352
4353
4354
4355 return i;
4356 }
4357 continue;
4358 }
4359
4360 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
4361 page = pte_page(huge_ptep_get(pte));
4362
4363
4364
4365
4366
4367 if (unlikely(page_count(page) <= 0)) {
4368 if (pages) {
4369 spin_unlock(ptl);
4370 remainder = 0;
4371 err = -ENOMEM;
4372 break;
4373 }
4374 }
4375same_page:
4376 if (pages) {
4377 pages[i] = mem_map_offset(page, pfn_offset);
4378 get_page(pages[i]);
4379 }
4380
4381 if (vmas)
4382 vmas[i] = vma;
4383
4384 vaddr += PAGE_SIZE;
4385 ++pfn_offset;
4386 --remainder;
4387 ++i;
4388 if (vaddr < vma->vm_end && remainder &&
4389 pfn_offset < pages_per_huge_page(h)) {
4390
4391
4392
4393
4394 goto same_page;
4395 }
4396 spin_unlock(ptl);
4397 }
4398 *nr_pages = remainder;
4399
4400
4401
4402
4403
4404 *position = vaddr;
4405
4406 return i ? i : err;
4407}
4408
4409#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
4410
4411
4412
4413
4414#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
4415#endif
4416
4417unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
4418 unsigned long address, unsigned long end, pgprot_t newprot)
4419{
4420 struct mm_struct *mm = vma->vm_mm;
4421 unsigned long start = address;
4422 pte_t *ptep;
4423 pte_t pte;
4424 struct hstate *h = hstate_vma(vma);
4425 unsigned long pages = 0;
4426 bool shared_pmd = false;
4427 struct mmu_notifier_range range;
4428
4429
4430
4431
4432
4433
4434 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
4435 0, vma, mm, start, end);
4436 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
4437
4438 BUG_ON(address >= end);
4439 flush_cache_range(vma, range.start, range.end);
4440
4441 mmu_notifier_invalidate_range_start(&range);
4442 i_mmap_lock_write(vma->vm_file->f_mapping);
4443 for (; address < end; address += huge_page_size(h)) {
4444 spinlock_t *ptl;
4445 ptep = huge_pte_offset(mm, address, huge_page_size(h));
4446 if (!ptep)
4447 continue;
4448 ptl = huge_pte_lock(h, mm, ptep);
4449 if (huge_pmd_unshare(mm, &address, ptep)) {
4450 pages++;
4451 spin_unlock(ptl);
4452 shared_pmd = true;
4453 continue;
4454 }
4455 pte = huge_ptep_get(ptep);
4456 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
4457 spin_unlock(ptl);
4458 continue;
4459 }
4460 if (unlikely(is_hugetlb_entry_migration(pte))) {
4461 swp_entry_t entry = pte_to_swp_entry(pte);
4462
4463 if (is_write_migration_entry(entry)) {
4464 pte_t newpte;
4465
4466 make_migration_entry_read(&entry);
4467 newpte = swp_entry_to_pte(entry);
4468 set_huge_swap_pte_at(mm, address, ptep,
4469 newpte, huge_page_size(h));
4470 pages++;
4471 }
4472 spin_unlock(ptl);
4473 continue;
4474 }
4475 if (!huge_pte_none(pte)) {
4476 pte_t old_pte;
4477
4478 old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
4479 pte = pte_mkhuge(huge_pte_modify(old_pte, newprot));
4480 pte = arch_make_huge_pte(pte, vma, NULL, 0);
4481 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
4482 pages++;
4483 }
4484 spin_unlock(ptl);
4485 }
4486
4487
4488
4489
4490
4491
4492
4493 if (shared_pmd)
4494 flush_hugetlb_tlb_range(vma, range.start, range.end);
4495 else
4496 flush_hugetlb_tlb_range(vma, start, end);
4497
4498
4499
4500
4501
4502
4503 i_mmap_unlock_write(vma->vm_file->f_mapping);
4504 mmu_notifier_invalidate_range_end(&range);
4505
4506 return pages << h->order;
4507}
4508
4509int hugetlb_reserve_pages(struct inode *inode,
4510 long from, long to,
4511 struct vm_area_struct *vma,
4512 vm_flags_t vm_flags)
4513{
4514 long ret, chg;
4515 struct hstate *h = hstate_inode(inode);
4516 struct hugepage_subpool *spool = subpool_inode(inode);
4517 struct resv_map *resv_map;
4518 long gbl_reserve;
4519
4520
4521 if (from > to) {
4522 VM_WARN(1, "%s called with a negative range\n", __func__);
4523 return -EINVAL;
4524 }
4525
4526
4527
4528
4529
4530
4531 if (vm_flags & VM_NORESERVE)
4532 return 0;
4533
4534
4535
4536
4537
4538
4539
4540 if (!vma || vma->vm_flags & VM_MAYSHARE) {
4541
4542
4543
4544
4545
4546 resv_map = inode_resv_map(inode);
4547
4548 chg = region_chg(resv_map, from, to);
4549
4550 } else {
4551 resv_map = resv_map_alloc();
4552 if (!resv_map)
4553 return -ENOMEM;
4554
4555 chg = to - from;
4556
4557 set_vma_resv_map(vma, resv_map);
4558 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4559 }
4560
4561 if (chg < 0) {
4562 ret = chg;
4563 goto out_err;
4564 }
4565
4566
4567
4568
4569
4570
4571 gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4572 if (gbl_reserve < 0) {
4573 ret = -ENOSPC;
4574 goto out_err;
4575 }
4576
4577
4578
4579
4580
4581 ret = hugetlb_acct_memory(h, gbl_reserve);
4582 if (ret < 0) {
4583
4584 (void)hugepage_subpool_put_pages(spool, chg);
4585 goto out_err;
4586 }
4587
4588
4589
4590
4591
4592
4593
4594
4595
4596
4597
4598
4599 if (!vma || vma->vm_flags & VM_MAYSHARE) {
4600 long add = region_add(resv_map, from, to);
4601
4602 if (unlikely(chg > add)) {
4603
4604
4605
4606
4607
4608
4609
4610 long rsv_adjust;
4611
4612 rsv_adjust = hugepage_subpool_put_pages(spool,
4613 chg - add);
4614 hugetlb_acct_memory(h, -rsv_adjust);
4615 }
4616 }
4617 return 0;
4618out_err:
4619 if (!vma || vma->vm_flags & VM_MAYSHARE)
4620
4621 if (chg >= 0)
4622 region_abort(resv_map, from, to);
4623 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4624 kref_put(&resv_map->refs, resv_map_release);
4625 return ret;
4626}
4627
4628long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4629 long freed)
4630{
4631 struct hstate *h = hstate_inode(inode);
4632 struct resv_map *resv_map = inode_resv_map(inode);
4633 long chg = 0;
4634 struct hugepage_subpool *spool = subpool_inode(inode);
4635 long gbl_reserve;
4636
4637
4638
4639
4640
4641 if (resv_map) {
4642 chg = region_del(resv_map, start, end);
4643
4644
4645
4646
4647
4648 if (chg < 0)
4649 return chg;
4650 }
4651
4652 spin_lock(&inode->i_lock);
4653 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4654 spin_unlock(&inode->i_lock);
4655
4656
4657
4658
4659
4660 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4661 hugetlb_acct_memory(h, -gbl_reserve);
4662
4663 return 0;
4664}
4665
4666#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
4667static unsigned long page_table_shareable(struct vm_area_struct *svma,
4668 struct vm_area_struct *vma,
4669 unsigned long addr, pgoff_t idx)
4670{
4671 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4672 svma->vm_start;
4673 unsigned long sbase = saddr & PUD_MASK;
4674 unsigned long s_end = sbase + PUD_SIZE;
4675
4676
4677 unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4678 unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4679
4680
4681
4682
4683
4684 if (pmd_index(addr) != pmd_index(saddr) ||
4685 vm_flags != svm_flags ||
4686 sbase < svma->vm_start || svma->vm_end < s_end)
4687 return 0;
4688
4689 return saddr;
4690}
4691
4692static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4693{
4694 unsigned long base = addr & PUD_MASK;
4695 unsigned long end = base + PUD_SIZE;
4696
4697
4698
4699
4700 if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
4701 return true;
4702 return false;
4703}
4704
4705
4706
4707
4708
4709
4710void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4711 unsigned long *start, unsigned long *end)
4712{
4713 unsigned long check_addr = *start;
4714
4715 if (!(vma->vm_flags & VM_MAYSHARE))
4716 return;
4717
4718 for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
4719 unsigned long a_start = check_addr & PUD_MASK;
4720 unsigned long a_end = a_start + PUD_SIZE;
4721
4722
4723
4724
4725 if (range_in_vma(vma, a_start, a_end)) {
4726 if (a_start < *start)
4727 *start = a_start;
4728 if (a_end > *end)
4729 *end = a_end;
4730 }
4731 }
4732}
4733
4734
4735
4736
4737
4738
4739
4740
4741
4742
4743pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4744{
4745 struct vm_area_struct *vma = find_vma(mm, addr);
4746 struct address_space *mapping = vma->vm_file->f_mapping;
4747 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4748 vma->vm_pgoff;
4749 struct vm_area_struct *svma;
4750 unsigned long saddr;
4751 pte_t *spte = NULL;
4752 pte_t *pte;
4753 spinlock_t *ptl;
4754
4755 if (!vma_shareable(vma, addr))
4756 return (pte_t *)pmd_alloc(mm, pud, addr);
4757
4758 i_mmap_lock_write(mapping);
4759 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4760 if (svma == vma)
4761 continue;
4762
4763 saddr = page_table_shareable(svma, vma, addr, idx);
4764 if (saddr) {
4765 spte = huge_pte_offset(svma->vm_mm, saddr,
4766 vma_mmu_pagesize(svma));
4767 if (spte) {
4768 get_page(virt_to_page(spte));
4769 break;
4770 }
4771 }
4772 }
4773
4774 if (!spte)
4775 goto out;
4776
4777 ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
4778 if (pud_none(*pud)) {
4779 pud_populate(mm, pud,
4780 (pmd_t *)((unsigned long)spte & PAGE_MASK));
4781 mm_inc_nr_pmds(mm);
4782 } else {
4783 put_page(virt_to_page(spte));
4784 }
4785 spin_unlock(ptl);
4786out:
4787 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4788 i_mmap_unlock_write(mapping);
4789 return pte;
4790}
4791
4792
4793
4794
4795
4796
4797
4798
4799
4800
4801
4802
4803
4804int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4805{
4806 pgd_t *pgd = pgd_offset(mm, *addr);
4807 p4d_t *p4d = p4d_offset(pgd, *addr);
4808 pud_t *pud = pud_offset(p4d, *addr);
4809
4810 BUG_ON(page_count(virt_to_page(ptep)) == 0);
4811 if (page_count(virt_to_page(ptep)) == 1)
4812 return 0;
4813
4814 pud_clear(pud);
4815 put_page(virt_to_page(ptep));
4816 mm_dec_nr_pmds(mm);
4817 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4818 return 1;
4819}
4820#define want_pmd_share() (1)
4821#else
4822pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4823{
4824 return NULL;
4825}
4826
4827int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4828{
4829 return 0;
4830}
4831
4832void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4833 unsigned long *start, unsigned long *end)
4834{
4835}
4836#define want_pmd_share() (0)
4837#endif
4838
4839#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4840pte_t *huge_pte_alloc(struct mm_struct *mm,
4841 unsigned long addr, unsigned long sz)
4842{
4843 pgd_t *pgd;
4844 p4d_t *p4d;
4845 pud_t *pud;
4846 pte_t *pte = NULL;
4847
4848 pgd = pgd_offset(mm, addr);
4849 p4d = p4d_alloc(mm, pgd, addr);
4850 if (!p4d)
4851 return NULL;
4852 pud = pud_alloc(mm, p4d, addr);
4853 if (pud) {
4854 if (sz == PUD_SIZE) {
4855 pte = (pte_t *)pud;
4856 } else {
4857 BUG_ON(sz != PMD_SIZE);
4858 if (want_pmd_share() && pud_none(*pud))
4859 pte = huge_pmd_share(mm, addr, pud);
4860 else
4861 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4862 }
4863 }
4864 BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
4865
4866 return pte;
4867}
4868
4869
4870
4871
4872
4873
4874
4875
4876
4877
4878pte_t *huge_pte_offset(struct mm_struct *mm,
4879 unsigned long addr, unsigned long sz)
4880{
4881 pgd_t *pgd;
4882 p4d_t *p4d;
4883 pud_t *pud;
4884 pmd_t *pmd;
4885
4886 pgd = pgd_offset(mm, addr);
4887 if (!pgd_present(*pgd))
4888 return NULL;
4889 p4d = p4d_offset(pgd, addr);
4890 if (!p4d_present(*p4d))
4891 return NULL;
4892
4893 pud = pud_offset(p4d, addr);
4894 if (sz != PUD_SIZE && pud_none(*pud))
4895 return NULL;
4896
4897 if (pud_huge(*pud) || !pud_present(*pud))
4898 return (pte_t *)pud;
4899
4900 pmd = pmd_offset(pud, addr);
4901 if (sz != PMD_SIZE && pmd_none(*pmd))
4902 return NULL;
4903
4904 if (pmd_huge(*pmd) || !pmd_present(*pmd))
4905 return (pte_t *)pmd;
4906
4907 return NULL;
4908}
4909
4910#endif
4911
4912
4913
4914
4915
4916struct page * __weak
4917follow_huge_addr(struct mm_struct *mm, unsigned long address,
4918 int write)
4919{
4920 return ERR_PTR(-EINVAL);
4921}
4922
4923struct page * __weak
4924follow_huge_pd(struct vm_area_struct *vma,
4925 unsigned long address, hugepd_t hpd, int flags, int pdshift)
4926{
4927 WARN(1, "hugepd follow called with no support for hugepage directory format\n");
4928 return NULL;
4929}
4930
4931struct page * __weak
4932follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4933 pmd_t *pmd, int flags)
4934{
4935 struct page *page = NULL;
4936 spinlock_t *ptl;
4937 pte_t pte;
4938retry:
4939 ptl = pmd_lockptr(mm, pmd);
4940 spin_lock(ptl);
4941
4942
4943
4944
4945 if (!pmd_huge(*pmd))
4946 goto out;
4947 pte = huge_ptep_get((pte_t *)pmd);
4948 if (pte_present(pte)) {
4949 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4950 if (flags & FOLL_GET)
4951 get_page(page);
4952 } else {
4953 if (is_hugetlb_entry_migration(pte)) {
4954 spin_unlock(ptl);
4955 __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4956 goto retry;
4957 }
4958
4959
4960
4961
4962 }
4963out:
4964 spin_unlock(ptl);
4965 return page;
4966}
4967
4968struct page * __weak
4969follow_huge_pud(struct mm_struct *mm, unsigned long address,
4970 pud_t *pud, int flags)
4971{
4972 if (flags & FOLL_GET)
4973 return NULL;
4974
4975 return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
4976}
4977
4978struct page * __weak
4979follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
4980{
4981 if (flags & FOLL_GET)
4982 return NULL;
4983
4984 return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
4985}
4986
4987bool isolate_huge_page(struct page *page, struct list_head *list)
4988{
4989 bool ret = true;
4990
4991 VM_BUG_ON_PAGE(!PageHead(page), page);
4992 spin_lock(&hugetlb_lock);
4993 if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4994 ret = false;
4995 goto unlock;
4996 }
4997 clear_page_huge_active(page);
4998 list_move_tail(&page->lru, list);
4999unlock:
5000 spin_unlock(&hugetlb_lock);
5001 return ret;
5002}
5003
5004void putback_active_hugepage(struct page *page)
5005{
5006 VM_BUG_ON_PAGE(!PageHead(page), page);
5007 spin_lock(&hugetlb_lock);
5008 set_page_huge_active(page);
5009 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
5010 spin_unlock(&hugetlb_lock);
5011 put_page(page);
5012}
5013
5014void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
5015{
5016 struct hstate *h = page_hstate(oldpage);
5017
5018 hugetlb_cgroup_migrate(oldpage, newpage);
5019 set_page_owner_migrate_reason(newpage, reason);
5020
5021
5022
5023
5024
5025
5026
5027
5028
5029
5030
5031 if (PageHugeTemporary(newpage)) {
5032 int old_nid = page_to_nid(oldpage);
5033 int new_nid = page_to_nid(newpage);
5034
5035 SetPageHugeTemporary(oldpage);
5036 ClearPageHugeTemporary(newpage);
5037
5038 spin_lock(&hugetlb_lock);
5039 if (h->surplus_huge_pages_node[old_nid]) {
5040 h->surplus_huge_pages_node[old_nid]--;
5041 h->surplus_huge_pages_node[new_nid]++;
5042 }
5043 spin_unlock(&hugetlb_lock);
5044 }
5045}
5046