1
2
3
4
5#include <linux/list.h>
6#include <linux/init.h>
7#include <linux/mm.h>
8#include <linux/seq_file.h>
9#include <linux/sysctl.h>
10#include <linux/highmem.h>
11#include <linux/mmu_notifier.h>
12#include <linux/nodemask.h>
13#include <linux/pagemap.h>
14#include <linux/mempolicy.h>
15#include <linux/compiler.h>
16#include <linux/cpuset.h>
17#include <linux/mutex.h>
18#include <linux/bootmem.h>
19#include <linux/sysfs.h>
20#include <linux/slab.h>
21#include <linux/mmdebug.h>
22#include <linux/sched/signal.h>
23#include <linux/rmap.h>
24#include <linux/string_helpers.h>
25#include <linux/swap.h>
26#include <linux/swapops.h>
27#include <linux/jhash.h>
28
29#include <asm/page.h>
30#include <asm/pgtable.h>
31#include <asm/tlb.h>
32
33#include <linux/io.h>
34#include <linux/hugetlb.h>
35#include <linux/hugetlb_cgroup.h>
36#include <linux/node.h>
37#include <linux/userfaultfd_k.h>
38#include <linux/page_owner.h>
39#include "internal.h"
40
41int hugetlb_max_hstate __read_mostly;
42unsigned int default_hstate_idx;
43struct hstate hstates[HUGE_MAX_HSTATE];
44
45
46
47
48static unsigned int minimum_order __read_mostly = UINT_MAX;
49
50__initdata LIST_HEAD(huge_boot_pages);
51
52
53static struct hstate * __initdata parsed_hstate;
54static unsigned long __initdata default_hstate_max_huge_pages;
55static unsigned long __initdata default_hstate_size;
56static bool __initdata parsed_valid_hugepagesz = true;
57
58
59
60
61
62DEFINE_SPINLOCK(hugetlb_lock);
63
64
65
66
67
68static int num_fault_mutexes;
69struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
70
71
72static int hugetlb_acct_memory(struct hstate *h, long delta);
73
74static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
75{
76 bool free = (spool->count == 0) && (spool->used_hpages == 0);
77
78 spin_unlock(&spool->lock);
79
80
81
82
83 if (free) {
84 if (spool->min_hpages != -1)
85 hugetlb_acct_memory(spool->hstate,
86 -spool->min_hpages);
87 kfree(spool);
88 }
89}
90
91struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
92 long min_hpages)
93{
94 struct hugepage_subpool *spool;
95
96 spool = kzalloc(sizeof(*spool), GFP_KERNEL);
97 if (!spool)
98 return NULL;
99
100 spin_lock_init(&spool->lock);
101 spool->count = 1;
102 spool->max_hpages = max_hpages;
103 spool->hstate = h;
104 spool->min_hpages = min_hpages;
105
106 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
107 kfree(spool);
108 return NULL;
109 }
110 spool->rsv_hpages = min_hpages;
111
112 return spool;
113}
114
115void hugepage_put_subpool(struct hugepage_subpool *spool)
116{
117 spin_lock(&spool->lock);
118 BUG_ON(!spool->count);
119 spool->count--;
120 unlock_or_release_subpool(spool);
121}
122
123
124
125
126
127
128
129
130
131static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
132 long delta)
133{
134 long ret = delta;
135
136 if (!spool)
137 return ret;
138
139 spin_lock(&spool->lock);
140
141 if (spool->max_hpages != -1) {
142 if ((spool->used_hpages + delta) <= spool->max_hpages)
143 spool->used_hpages += delta;
144 else {
145 ret = -ENOMEM;
146 goto unlock_ret;
147 }
148 }
149
150
151 if (spool->min_hpages != -1 && spool->rsv_hpages) {
152 if (delta > spool->rsv_hpages) {
153
154
155
156
157 ret = delta - spool->rsv_hpages;
158 spool->rsv_hpages = 0;
159 } else {
160 ret = 0;
161 spool->rsv_hpages -= delta;
162 }
163 }
164
165unlock_ret:
166 spin_unlock(&spool->lock);
167 return ret;
168}
169
170
171
172
173
174
175
176static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
177 long delta)
178{
179 long ret = delta;
180
181 if (!spool)
182 return delta;
183
184 spin_lock(&spool->lock);
185
186 if (spool->max_hpages != -1)
187 spool->used_hpages -= delta;
188
189
190 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
191 if (spool->rsv_hpages + delta <= spool->min_hpages)
192 ret = 0;
193 else
194 ret = spool->rsv_hpages + delta - spool->min_hpages;
195
196 spool->rsv_hpages += delta;
197 if (spool->rsv_hpages > spool->min_hpages)
198 spool->rsv_hpages = spool->min_hpages;
199 }
200
201
202
203
204
205 unlock_or_release_subpool(spool);
206
207 return ret;
208}
209
210static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
211{
212 return HUGETLBFS_SB(inode->i_sb)->spool;
213}
214
215static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
216{
217 return subpool_inode(file_inode(vma->vm_file));
218}
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239struct file_region {
240 struct list_head link;
241 long from;
242 long to;
243};
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259static long region_add(struct resv_map *resv, long f, long t)
260{
261 struct list_head *head = &resv->regions;
262 struct file_region *rg, *nrg, *trg;
263 long add = 0;
264
265 spin_lock(&resv->lock);
266
267 list_for_each_entry(rg, head, link)
268 if (f <= rg->to)
269 break;
270
271
272
273
274
275
276
277 if (&rg->link == head || t < rg->from) {
278 VM_BUG_ON(resv->region_cache_count <= 0);
279
280 resv->region_cache_count--;
281 nrg = list_first_entry(&resv->region_cache, struct file_region,
282 link);
283 list_del(&nrg->link);
284
285 nrg->from = f;
286 nrg->to = t;
287 list_add(&nrg->link, rg->link.prev);
288
289 add += t - f;
290 goto out_locked;
291 }
292
293
294 if (f > rg->from)
295 f = rg->from;
296
297
298 nrg = rg;
299 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
300 if (&rg->link == head)
301 break;
302 if (rg->from > t)
303 break;
304
305
306
307
308 if (rg->to > t)
309 t = rg->to;
310 if (rg != nrg) {
311
312
313
314
315 add -= (rg->to - rg->from);
316 list_del(&rg->link);
317 kfree(rg);
318 }
319 }
320
321 add += (nrg->from - f);
322 nrg->from = f;
323 add += t - nrg->to;
324 nrg->to = t;
325
326out_locked:
327 resv->adds_in_progress--;
328 spin_unlock(&resv->lock);
329 VM_BUG_ON(add < 0);
330 return add;
331}
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355static long region_chg(struct resv_map *resv, long f, long t)
356{
357 struct list_head *head = &resv->regions;
358 struct file_region *rg, *nrg = NULL;
359 long chg = 0;
360
361retry:
362 spin_lock(&resv->lock);
363retry_locked:
364 resv->adds_in_progress++;
365
366
367
368
369
370 if (resv->adds_in_progress > resv->region_cache_count) {
371 struct file_region *trg;
372
373 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
374
375 resv->adds_in_progress--;
376 spin_unlock(&resv->lock);
377
378 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
379 if (!trg) {
380 kfree(nrg);
381 return -ENOMEM;
382 }
383
384 spin_lock(&resv->lock);
385 list_add(&trg->link, &resv->region_cache);
386 resv->region_cache_count++;
387 goto retry_locked;
388 }
389
390
391 list_for_each_entry(rg, head, link)
392 if (f <= rg->to)
393 break;
394
395
396
397
398 if (&rg->link == head || t < rg->from) {
399 if (!nrg) {
400 resv->adds_in_progress--;
401 spin_unlock(&resv->lock);
402 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
403 if (!nrg)
404 return -ENOMEM;
405
406 nrg->from = f;
407 nrg->to = f;
408 INIT_LIST_HEAD(&nrg->link);
409 goto retry;
410 }
411
412 list_add(&nrg->link, rg->link.prev);
413 chg = t - f;
414 goto out_nrg;
415 }
416
417
418 if (f > rg->from)
419 f = rg->from;
420 chg = t - f;
421
422
423 list_for_each_entry(rg, rg->link.prev, link) {
424 if (&rg->link == head)
425 break;
426 if (rg->from > t)
427 goto out;
428
429
430
431
432 if (rg->to > t) {
433 chg += rg->to - t;
434 t = rg->to;
435 }
436 chg -= rg->to - rg->from;
437 }
438
439out:
440 spin_unlock(&resv->lock);
441
442 kfree(nrg);
443 return chg;
444out_nrg:
445 spin_unlock(&resv->lock);
446 return chg;
447}
448
449
450
451
452
453
454
455
456
457
458
459
460static void region_abort(struct resv_map *resv, long f, long t)
461{
462 spin_lock(&resv->lock);
463 VM_BUG_ON(!resv->region_cache_count);
464 resv->adds_in_progress--;
465 spin_unlock(&resv->lock);
466}
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482static long region_del(struct resv_map *resv, long f, long t)
483{
484 struct list_head *head = &resv->regions;
485 struct file_region *rg, *trg;
486 struct file_region *nrg = NULL;
487 long del = 0;
488
489retry:
490 spin_lock(&resv->lock);
491 list_for_each_entry_safe(rg, trg, head, link) {
492
493
494
495
496
497
498
499 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
500 continue;
501
502 if (rg->from >= t)
503 break;
504
505 if (f > rg->from && t < rg->to) {
506
507
508
509
510 if (!nrg &&
511 resv->region_cache_count > resv->adds_in_progress) {
512 nrg = list_first_entry(&resv->region_cache,
513 struct file_region,
514 link);
515 list_del(&nrg->link);
516 resv->region_cache_count--;
517 }
518
519 if (!nrg) {
520 spin_unlock(&resv->lock);
521 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
522 if (!nrg)
523 return -ENOMEM;
524 goto retry;
525 }
526
527 del += t - f;
528
529
530 nrg->from = t;
531 nrg->to = rg->to;
532 INIT_LIST_HEAD(&nrg->link);
533
534
535 rg->to = f;
536
537 list_add(&nrg->link, &rg->link);
538 nrg = NULL;
539 break;
540 }
541
542 if (f <= rg->from && t >= rg->to) {
543 del += rg->to - rg->from;
544 list_del(&rg->link);
545 kfree(rg);
546 continue;
547 }
548
549 if (f <= rg->from) {
550 del += t - rg->from;
551 rg->from = t;
552 } else {
553 del += rg->to - f;
554 rg->to = f;
555 }
556 }
557
558 spin_unlock(&resv->lock);
559 kfree(nrg);
560 return del;
561}
562
563
564
565
566
567
568
569
570
571
572void hugetlb_fix_reserve_counts(struct inode *inode)
573{
574 struct hugepage_subpool *spool = subpool_inode(inode);
575 long rsv_adjust;
576
577 rsv_adjust = hugepage_subpool_get_pages(spool, 1);
578 if (rsv_adjust) {
579 struct hstate *h = hstate_inode(inode);
580
581 hugetlb_acct_memory(h, 1);
582 }
583}
584
585
586
587
588
589static long region_count(struct resv_map *resv, long f, long t)
590{
591 struct list_head *head = &resv->regions;
592 struct file_region *rg;
593 long chg = 0;
594
595 spin_lock(&resv->lock);
596
597 list_for_each_entry(rg, head, link) {
598 long seg_from;
599 long seg_to;
600
601 if (rg->to <= f)
602 continue;
603 if (rg->from >= t)
604 break;
605
606 seg_from = max(rg->from, f);
607 seg_to = min(rg->to, t);
608
609 chg += seg_to - seg_from;
610 }
611 spin_unlock(&resv->lock);
612
613 return chg;
614}
615
616
617
618
619
620static pgoff_t vma_hugecache_offset(struct hstate *h,
621 struct vm_area_struct *vma, unsigned long address)
622{
623 return ((address - vma->vm_start) >> huge_page_shift(h)) +
624 (vma->vm_pgoff >> huge_page_order(h));
625}
626
627pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
628 unsigned long address)
629{
630 return vma_hugecache_offset(hstate_vma(vma), vma, address);
631}
632EXPORT_SYMBOL_GPL(linear_hugepage_index);
633
634
635
636
637
638unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
639{
640 if (vma->vm_ops && vma->vm_ops->pagesize)
641 return vma->vm_ops->pagesize(vma);
642 return PAGE_SIZE;
643}
644EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
645
646
647
648
649
650
651
652__weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
653{
654 return vma_kernel_pagesize(vma);
655}
656
657
658
659
660
661
662#define HPAGE_RESV_OWNER (1UL << 0)
663#define HPAGE_RESV_UNMAPPED (1UL << 1)
664#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685static unsigned long get_vma_private_data(struct vm_area_struct *vma)
686{
687 return (unsigned long)vma->vm_private_data;
688}
689
690static void set_vma_private_data(struct vm_area_struct *vma,
691 unsigned long value)
692{
693 vma->vm_private_data = (void *)value;
694}
695
696struct resv_map *resv_map_alloc(void)
697{
698 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
699 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
700
701 if (!resv_map || !rg) {
702 kfree(resv_map);
703 kfree(rg);
704 return NULL;
705 }
706
707 kref_init(&resv_map->refs);
708 spin_lock_init(&resv_map->lock);
709 INIT_LIST_HEAD(&resv_map->regions);
710
711 resv_map->adds_in_progress = 0;
712
713 INIT_LIST_HEAD(&resv_map->region_cache);
714 list_add(&rg->link, &resv_map->region_cache);
715 resv_map->region_cache_count = 1;
716
717 return resv_map;
718}
719
720void resv_map_release(struct kref *ref)
721{
722 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
723 struct list_head *head = &resv_map->region_cache;
724 struct file_region *rg, *trg;
725
726
727 region_del(resv_map, 0, LONG_MAX);
728
729
730 list_for_each_entry_safe(rg, trg, head, link) {
731 list_del(&rg->link);
732 kfree(rg);
733 }
734
735 VM_BUG_ON(resv_map->adds_in_progress);
736
737 kfree(resv_map);
738}
739
740static inline struct resv_map *inode_resv_map(struct inode *inode)
741{
742 return inode->i_mapping->private_data;
743}
744
745static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
746{
747 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
748 if (vma->vm_flags & VM_MAYSHARE) {
749 struct address_space *mapping = vma->vm_file->f_mapping;
750 struct inode *inode = mapping->host;
751
752 return inode_resv_map(inode);
753
754 } else {
755 return (struct resv_map *)(get_vma_private_data(vma) &
756 ~HPAGE_RESV_MASK);
757 }
758}
759
760static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
761{
762 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
763 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
764
765 set_vma_private_data(vma, (get_vma_private_data(vma) &
766 HPAGE_RESV_MASK) | (unsigned long)map);
767}
768
769static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
770{
771 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
772 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
773
774 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
775}
776
777static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
778{
779 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
780
781 return (get_vma_private_data(vma) & flag) != 0;
782}
783
784
785void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
786{
787 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
788 if (!(vma->vm_flags & VM_MAYSHARE))
789 vma->vm_private_data = (void *)0;
790}
791
792
793static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
794{
795 if (vma->vm_flags & VM_NORESERVE) {
796
797
798
799
800
801
802
803
804
805 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
806 return true;
807 else
808 return false;
809 }
810
811
812 if (vma->vm_flags & VM_MAYSHARE) {
813
814
815
816
817
818
819
820 if (chg)
821 return false;
822 else
823 return true;
824 }
825
826
827
828
829
830 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846 if (chg)
847 return false;
848 else
849 return true;
850 }
851
852 return false;
853}
854
855static void enqueue_huge_page(struct hstate *h, struct page *page)
856{
857 int nid = page_to_nid(page);
858 list_move(&page->lru, &h->hugepage_freelists[nid]);
859 h->free_huge_pages++;
860 h->free_huge_pages_node[nid]++;
861}
862
863static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
864{
865 struct page *page;
866
867 list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
868 if (!PageHWPoison(page))
869 break;
870
871
872
873
874 if (&h->hugepage_freelists[nid] == &page->lru)
875 return NULL;
876 list_move(&page->lru, &h->hugepage_activelist);
877 set_page_refcounted(page);
878 h->free_huge_pages--;
879 h->free_huge_pages_node[nid]--;
880 return page;
881}
882
883static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
884 nodemask_t *nmask)
885{
886 unsigned int cpuset_mems_cookie;
887 struct zonelist *zonelist;
888 struct zone *zone;
889 struct zoneref *z;
890 int node = -1;
891
892 zonelist = node_zonelist(nid, gfp_mask);
893
894retry_cpuset:
895 cpuset_mems_cookie = read_mems_allowed_begin();
896 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
897 struct page *page;
898
899 if (!cpuset_zone_allowed(zone, gfp_mask))
900 continue;
901
902
903
904
905 if (zone_to_nid(zone) == node)
906 continue;
907 node = zone_to_nid(zone);
908
909 page = dequeue_huge_page_node_exact(h, node);
910 if (page)
911 return page;
912 }
913 if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
914 goto retry_cpuset;
915
916 return NULL;
917}
918
919
920static inline gfp_t htlb_alloc_mask(struct hstate *h)
921{
922 if (hugepage_migration_supported(h))
923 return GFP_HIGHUSER_MOVABLE;
924 else
925 return GFP_HIGHUSER;
926}
927
928static struct page *dequeue_huge_page_vma(struct hstate *h,
929 struct vm_area_struct *vma,
930 unsigned long address, int avoid_reserve,
931 long chg)
932{
933 struct page *page;
934 struct mempolicy *mpol;
935 gfp_t gfp_mask;
936 nodemask_t *nodemask;
937 int nid;
938
939
940
941
942
943
944 if (!vma_has_reserves(vma, chg) &&
945 h->free_huge_pages - h->resv_huge_pages == 0)
946 goto err;
947
948
949 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
950 goto err;
951
952 gfp_mask = htlb_alloc_mask(h);
953 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
954 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
955 if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
956 SetPagePrivate(page);
957 h->resv_huge_pages--;
958 }
959
960 mpol_cond_put(mpol);
961 return page;
962
963err:
964 return NULL;
965}
966
967
968
969
970
971
972
973
974static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
975{
976 nid = next_node_in(nid, *nodes_allowed);
977 VM_BUG_ON(nid >= MAX_NUMNODES);
978
979 return nid;
980}
981
982static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
983{
984 if (!node_isset(nid, *nodes_allowed))
985 nid = next_node_allowed(nid, nodes_allowed);
986 return nid;
987}
988
989
990
991
992
993
994
995static int hstate_next_node_to_alloc(struct hstate *h,
996 nodemask_t *nodes_allowed)
997{
998 int nid;
999
1000 VM_BUG_ON(!nodes_allowed);
1001
1002 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1003 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1004
1005 return nid;
1006}
1007
1008
1009
1010
1011
1012
1013
1014static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1015{
1016 int nid;
1017
1018 VM_BUG_ON(!nodes_allowed);
1019
1020 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1021 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1022
1023 return nid;
1024}
1025
1026#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
1027 for (nr_nodes = nodes_weight(*mask); \
1028 nr_nodes > 0 && \
1029 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
1030 nr_nodes--)
1031
1032#define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
1033 for (nr_nodes = nodes_weight(*mask); \
1034 nr_nodes > 0 && \
1035 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
1036 nr_nodes--)
1037
1038#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1039static void destroy_compound_gigantic_page(struct page *page,
1040 unsigned int order)
1041{
1042 int i;
1043 int nr_pages = 1 << order;
1044 struct page *p = page + 1;
1045
1046 atomic_set(compound_mapcount_ptr(page), 0);
1047 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1048 clear_compound_head(p);
1049 set_page_refcounted(p);
1050 }
1051
1052 set_compound_order(page, 0);
1053 __ClearPageHead(page);
1054}
1055
1056static void free_gigantic_page(struct page *page, unsigned int order)
1057{
1058 free_contig_range(page_to_pfn(page), 1 << order);
1059}
1060
1061static int __alloc_gigantic_page(unsigned long start_pfn,
1062 unsigned long nr_pages, gfp_t gfp_mask)
1063{
1064 unsigned long end_pfn = start_pfn + nr_pages;
1065 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
1066 gfp_mask);
1067}
1068
1069static bool pfn_range_valid_gigantic(struct zone *z,
1070 unsigned long start_pfn, unsigned long nr_pages)
1071{
1072 unsigned long i, end_pfn = start_pfn + nr_pages;
1073 struct page *page;
1074
1075 for (i = start_pfn; i < end_pfn; i++) {
1076 if (!pfn_valid(i))
1077 return false;
1078
1079 page = pfn_to_page(i);
1080
1081 if (page_zone(page) != z)
1082 return false;
1083
1084 if (PageReserved(page))
1085 return false;
1086
1087 if (page_count(page) > 0)
1088 return false;
1089
1090 if (PageHuge(page))
1091 return false;
1092 }
1093
1094 return true;
1095}
1096
1097static bool zone_spans_last_pfn(const struct zone *zone,
1098 unsigned long start_pfn, unsigned long nr_pages)
1099{
1100 unsigned long last_pfn = start_pfn + nr_pages - 1;
1101 return zone_spans_pfn(zone, last_pfn);
1102}
1103
1104static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1105 int nid, nodemask_t *nodemask)
1106{
1107 unsigned int order = huge_page_order(h);
1108 unsigned long nr_pages = 1 << order;
1109 unsigned long ret, pfn, flags;
1110 struct zonelist *zonelist;
1111 struct zone *zone;
1112 struct zoneref *z;
1113
1114 zonelist = node_zonelist(nid, gfp_mask);
1115 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) {
1116 spin_lock_irqsave(&zone->lock, flags);
1117
1118 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
1119 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
1120 if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) {
1121
1122
1123
1124
1125
1126
1127
1128 spin_unlock_irqrestore(&zone->lock, flags);
1129 ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask);
1130 if (!ret)
1131 return pfn_to_page(pfn);
1132 spin_lock_irqsave(&zone->lock, flags);
1133 }
1134 pfn += nr_pages;
1135 }
1136
1137 spin_unlock_irqrestore(&zone->lock, flags);
1138 }
1139
1140 return NULL;
1141}
1142
1143static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1144static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1145
1146#else
1147static inline bool gigantic_page_supported(void) { return false; }
1148static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1149 int nid, nodemask_t *nodemask) { return NULL; }
1150static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1151static inline void destroy_compound_gigantic_page(struct page *page,
1152 unsigned int order) { }
1153#endif
1154
1155static void update_and_free_page(struct hstate *h, struct page *page)
1156{
1157 int i;
1158
1159 if (hstate_is_gigantic(h) && !gigantic_page_supported())
1160 return;
1161
1162 h->nr_huge_pages--;
1163 h->nr_huge_pages_node[page_to_nid(page)]--;
1164 for (i = 0; i < pages_per_huge_page(h); i++) {
1165 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1166 1 << PG_referenced | 1 << PG_dirty |
1167 1 << PG_active | 1 << PG_private |
1168 1 << PG_writeback);
1169 }
1170 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1171 set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1172 set_page_refcounted(page);
1173 if (hstate_is_gigantic(h)) {
1174 destroy_compound_gigantic_page(page, huge_page_order(h));
1175 free_gigantic_page(page, huge_page_order(h));
1176 } else {
1177 __free_pages(page, huge_page_order(h));
1178 }
1179}
1180
1181struct hstate *size_to_hstate(unsigned long size)
1182{
1183 struct hstate *h;
1184
1185 for_each_hstate(h) {
1186 if (huge_page_size(h) == size)
1187 return h;
1188 }
1189 return NULL;
1190}
1191
1192
1193
1194
1195
1196
1197
1198bool page_huge_active(struct page *page)
1199{
1200 VM_BUG_ON_PAGE(!PageHuge(page), page);
1201 return PageHead(page) && PagePrivate(&page[1]);
1202}
1203
1204
1205static void set_page_huge_active(struct page *page)
1206{
1207 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1208 SetPagePrivate(&page[1]);
1209}
1210
1211static void clear_page_huge_active(struct page *page)
1212{
1213 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1214 ClearPagePrivate(&page[1]);
1215}
1216
1217
1218
1219
1220
1221static inline bool PageHugeTemporary(struct page *page)
1222{
1223 if (!PageHuge(page))
1224 return false;
1225
1226 return (unsigned long)page[2].mapping == -1U;
1227}
1228
1229static inline void SetPageHugeTemporary(struct page *page)
1230{
1231 page[2].mapping = (void *)-1U;
1232}
1233
1234static inline void ClearPageHugeTemporary(struct page *page)
1235{
1236 page[2].mapping = NULL;
1237}
1238
1239void free_huge_page(struct page *page)
1240{
1241
1242
1243
1244
1245 struct hstate *h = page_hstate(page);
1246 int nid = page_to_nid(page);
1247 struct hugepage_subpool *spool =
1248 (struct hugepage_subpool *)page_private(page);
1249 bool restore_reserve;
1250
1251 set_page_private(page, 0);
1252 page->mapping = NULL;
1253 VM_BUG_ON_PAGE(page_count(page), page);
1254 VM_BUG_ON_PAGE(page_mapcount(page), page);
1255 restore_reserve = PagePrivate(page);
1256 ClearPagePrivate(page);
1257
1258
1259
1260
1261
1262
1263 if (hugepage_subpool_put_pages(spool, 1) == 0)
1264 restore_reserve = true;
1265
1266 spin_lock(&hugetlb_lock);
1267 clear_page_huge_active(page);
1268 hugetlb_cgroup_uncharge_page(hstate_index(h),
1269 pages_per_huge_page(h), page);
1270 if (restore_reserve)
1271 h->resv_huge_pages++;
1272
1273 if (PageHugeTemporary(page)) {
1274 list_del(&page->lru);
1275 ClearPageHugeTemporary(page);
1276 update_and_free_page(h, page);
1277 } else if (h->surplus_huge_pages_node[nid]) {
1278
1279 list_del(&page->lru);
1280 update_and_free_page(h, page);
1281 h->surplus_huge_pages--;
1282 h->surplus_huge_pages_node[nid]--;
1283 } else {
1284 arch_clear_hugepage_flags(page);
1285 enqueue_huge_page(h, page);
1286 }
1287 spin_unlock(&hugetlb_lock);
1288}
1289
1290static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1291{
1292 INIT_LIST_HEAD(&page->lru);
1293 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1294 spin_lock(&hugetlb_lock);
1295 set_hugetlb_cgroup(page, NULL);
1296 h->nr_huge_pages++;
1297 h->nr_huge_pages_node[nid]++;
1298 spin_unlock(&hugetlb_lock);
1299}
1300
1301static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1302{
1303 int i;
1304 int nr_pages = 1 << order;
1305 struct page *p = page + 1;
1306
1307
1308 set_compound_order(page, order);
1309 __ClearPageReserved(page);
1310 __SetPageHead(page);
1311 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324 __ClearPageReserved(p);
1325 set_page_count(p, 0);
1326 set_compound_head(p, page);
1327 }
1328 atomic_set(compound_mapcount_ptr(page), -1);
1329}
1330
1331
1332
1333
1334
1335
1336int PageHuge(struct page *page)
1337{
1338 if (!PageCompound(page))
1339 return 0;
1340
1341 page = compound_head(page);
1342 return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1343}
1344EXPORT_SYMBOL_GPL(PageHuge);
1345
1346
1347
1348
1349
1350int PageHeadHuge(struct page *page_head)
1351{
1352 if (!PageHead(page_head))
1353 return 0;
1354
1355 return get_compound_page_dtor(page_head) == free_huge_page;
1356}
1357
1358pgoff_t __basepage_index(struct page *page)
1359{
1360 struct page *page_head = compound_head(page);
1361 pgoff_t index = page_index(page_head);
1362 unsigned long compound_idx;
1363
1364 if (!PageHuge(page_head))
1365 return page_index(page);
1366
1367 if (compound_order(page_head) >= MAX_ORDER)
1368 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1369 else
1370 compound_idx = page - page_head;
1371
1372 return (index << compound_order(page_head)) + compound_idx;
1373}
1374
1375static struct page *alloc_buddy_huge_page(struct hstate *h,
1376 gfp_t gfp_mask, int nid, nodemask_t *nmask)
1377{
1378 int order = huge_page_order(h);
1379 struct page *page;
1380
1381 gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
1382 if (nid == NUMA_NO_NODE)
1383 nid = numa_mem_id();
1384 page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
1385 if (page)
1386 __count_vm_event(HTLB_BUDDY_PGALLOC);
1387 else
1388 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1389
1390 return page;
1391}
1392
1393
1394
1395
1396
1397static struct page *alloc_fresh_huge_page(struct hstate *h,
1398 gfp_t gfp_mask, int nid, nodemask_t *nmask)
1399{
1400 struct page *page;
1401
1402 if (hstate_is_gigantic(h))
1403 page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
1404 else
1405 page = alloc_buddy_huge_page(h, gfp_mask,
1406 nid, nmask);
1407 if (!page)
1408 return NULL;
1409
1410 if (hstate_is_gigantic(h))
1411 prep_compound_gigantic_page(page, huge_page_order(h));
1412 prep_new_huge_page(h, page, page_to_nid(page));
1413
1414 return page;
1415}
1416
1417
1418
1419
1420
1421static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1422{
1423 struct page *page;
1424 int nr_nodes, node;
1425 gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1426
1427 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1428 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed);
1429 if (page)
1430 break;
1431 }
1432
1433 if (!page)
1434 return 0;
1435
1436 put_page(page);
1437
1438 return 1;
1439}
1440
1441
1442
1443
1444
1445
1446
1447static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1448 bool acct_surplus)
1449{
1450 int nr_nodes, node;
1451 int ret = 0;
1452
1453 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1454
1455
1456
1457
1458 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1459 !list_empty(&h->hugepage_freelists[node])) {
1460 struct page *page =
1461 list_entry(h->hugepage_freelists[node].next,
1462 struct page, lru);
1463 list_del(&page->lru);
1464 h->free_huge_pages--;
1465 h->free_huge_pages_node[node]--;
1466 if (acct_surplus) {
1467 h->surplus_huge_pages--;
1468 h->surplus_huge_pages_node[node]--;
1469 }
1470 update_and_free_page(h, page);
1471 ret = 1;
1472 break;
1473 }
1474 }
1475
1476 return ret;
1477}
1478
1479
1480
1481
1482
1483
1484
1485int dissolve_free_huge_page(struct page *page)
1486{
1487 int rc = -EBUSY;
1488
1489 spin_lock(&hugetlb_lock);
1490 if (PageHuge(page) && !page_count(page)) {
1491 struct page *head = compound_head(page);
1492 struct hstate *h = page_hstate(head);
1493 int nid = page_to_nid(head);
1494 if (h->free_huge_pages - h->resv_huge_pages == 0)
1495 goto out;
1496
1497
1498
1499
1500 if (PageHWPoison(head) && page != head) {
1501 SetPageHWPoison(page);
1502 ClearPageHWPoison(head);
1503 }
1504 list_del(&head->lru);
1505 h->free_huge_pages--;
1506 h->free_huge_pages_node[nid]--;
1507 h->max_huge_pages--;
1508 update_and_free_page(h, head);
1509 rc = 0;
1510 }
1511out:
1512 spin_unlock(&hugetlb_lock);
1513 return rc;
1514}
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1525{
1526 unsigned long pfn;
1527 struct page *page;
1528 int rc = 0;
1529
1530 if (!hugepages_supported())
1531 return rc;
1532
1533 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1534 page = pfn_to_page(pfn);
1535 if (PageHuge(page) && !page_count(page)) {
1536 rc = dissolve_free_huge_page(page);
1537 if (rc)
1538 break;
1539 }
1540 }
1541
1542 return rc;
1543}
1544
1545
1546
1547
1548static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
1549 int nid, nodemask_t *nmask)
1550{
1551 struct page *page = NULL;
1552
1553 if (hstate_is_gigantic(h))
1554 return NULL;
1555
1556 spin_lock(&hugetlb_lock);
1557 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
1558 goto out_unlock;
1559 spin_unlock(&hugetlb_lock);
1560
1561 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
1562 if (!page)
1563 return NULL;
1564
1565 spin_lock(&hugetlb_lock);
1566
1567
1568
1569
1570
1571
1572
1573 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1574 SetPageHugeTemporary(page);
1575 put_page(page);
1576 page = NULL;
1577 } else {
1578 h->surplus_huge_pages++;
1579 h->surplus_huge_pages_node[page_to_nid(page)]++;
1580 }
1581
1582out_unlock:
1583 spin_unlock(&hugetlb_lock);
1584
1585 return page;
1586}
1587
1588static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
1589 int nid, nodemask_t *nmask)
1590{
1591 struct page *page;
1592
1593 if (hstate_is_gigantic(h))
1594 return NULL;
1595
1596 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
1597 if (!page)
1598 return NULL;
1599
1600
1601
1602
1603
1604 SetPageHugeTemporary(page);
1605
1606 return page;
1607}
1608
1609
1610
1611
1612static
1613struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
1614 struct vm_area_struct *vma, unsigned long addr)
1615{
1616 struct page *page;
1617 struct mempolicy *mpol;
1618 gfp_t gfp_mask = htlb_alloc_mask(h);
1619 int nid;
1620 nodemask_t *nodemask;
1621
1622 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
1623 page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
1624 mpol_cond_put(mpol);
1625
1626 return page;
1627}
1628
1629
1630struct page *alloc_huge_page_node(struct hstate *h, int nid)
1631{
1632 gfp_t gfp_mask = htlb_alloc_mask(h);
1633 struct page *page = NULL;
1634
1635 if (nid != NUMA_NO_NODE)
1636 gfp_mask |= __GFP_THISNODE;
1637
1638 spin_lock(&hugetlb_lock);
1639 if (h->free_huge_pages - h->resv_huge_pages > 0)
1640 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL);
1641 spin_unlock(&hugetlb_lock);
1642
1643 if (!page)
1644 page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
1645
1646 return page;
1647}
1648
1649
1650struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
1651 nodemask_t *nmask)
1652{
1653 gfp_t gfp_mask = htlb_alloc_mask(h);
1654
1655 spin_lock(&hugetlb_lock);
1656 if (h->free_huge_pages - h->resv_huge_pages > 0) {
1657 struct page *page;
1658
1659 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
1660 if (page) {
1661 spin_unlock(&hugetlb_lock);
1662 return page;
1663 }
1664 }
1665 spin_unlock(&hugetlb_lock);
1666
1667 return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
1668}
1669
1670
1671struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
1672 unsigned long address)
1673{
1674 struct mempolicy *mpol;
1675 nodemask_t *nodemask;
1676 struct page *page;
1677 gfp_t gfp_mask;
1678 int node;
1679
1680 gfp_mask = htlb_alloc_mask(h);
1681 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1682 page = alloc_huge_page_nodemask(h, node, nodemask);
1683 mpol_cond_put(mpol);
1684
1685 return page;
1686}
1687
1688
1689
1690
1691
1692static int gather_surplus_pages(struct hstate *h, int delta)
1693{
1694 struct list_head surplus_list;
1695 struct page *page, *tmp;
1696 int ret, i;
1697 int needed, allocated;
1698 bool alloc_ok = true;
1699
1700 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1701 if (needed <= 0) {
1702 h->resv_huge_pages += delta;
1703 return 0;
1704 }
1705
1706 allocated = 0;
1707 INIT_LIST_HEAD(&surplus_list);
1708
1709 ret = -ENOMEM;
1710retry:
1711 spin_unlock(&hugetlb_lock);
1712 for (i = 0; i < needed; i++) {
1713 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
1714 NUMA_NO_NODE, NULL);
1715 if (!page) {
1716 alloc_ok = false;
1717 break;
1718 }
1719 list_add(&page->lru, &surplus_list);
1720 cond_resched();
1721 }
1722 allocated += i;
1723
1724
1725
1726
1727
1728 spin_lock(&hugetlb_lock);
1729 needed = (h->resv_huge_pages + delta) -
1730 (h->free_huge_pages + allocated);
1731 if (needed > 0) {
1732 if (alloc_ok)
1733 goto retry;
1734
1735
1736
1737
1738
1739 goto free;
1740 }
1741
1742
1743
1744
1745
1746
1747
1748
1749 needed += allocated;
1750 h->resv_huge_pages += delta;
1751 ret = 0;
1752
1753
1754 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1755 if ((--needed) < 0)
1756 break;
1757
1758
1759
1760
1761 put_page_testzero(page);
1762 VM_BUG_ON_PAGE(page_count(page), page);
1763 enqueue_huge_page(h, page);
1764 }
1765free:
1766 spin_unlock(&hugetlb_lock);
1767
1768
1769 list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1770 put_page(page);
1771 spin_lock(&hugetlb_lock);
1772
1773 return ret;
1774}
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790static void return_unused_surplus_pages(struct hstate *h,
1791 unsigned long unused_resv_pages)
1792{
1793 unsigned long nr_pages;
1794
1795
1796 if (hstate_is_gigantic(h))
1797 goto out;
1798
1799
1800
1801
1802
1803 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817 while (nr_pages--) {
1818 h->resv_huge_pages--;
1819 unused_resv_pages--;
1820 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1821 goto out;
1822 cond_resched_lock(&hugetlb_lock);
1823 }
1824
1825out:
1826
1827 h->resv_huge_pages -= unused_resv_pages;
1828}
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855enum vma_resv_mode {
1856 VMA_NEEDS_RESV,
1857 VMA_COMMIT_RESV,
1858 VMA_END_RESV,
1859 VMA_ADD_RESV,
1860};
1861static long __vma_reservation_common(struct hstate *h,
1862 struct vm_area_struct *vma, unsigned long addr,
1863 enum vma_resv_mode mode)
1864{
1865 struct resv_map *resv;
1866 pgoff_t idx;
1867 long ret;
1868
1869 resv = vma_resv_map(vma);
1870 if (!resv)
1871 return 1;
1872
1873 idx = vma_hugecache_offset(h, vma, addr);
1874 switch (mode) {
1875 case VMA_NEEDS_RESV:
1876 ret = region_chg(resv, idx, idx + 1);
1877 break;
1878 case VMA_COMMIT_RESV:
1879 ret = region_add(resv, idx, idx + 1);
1880 break;
1881 case VMA_END_RESV:
1882 region_abort(resv, idx, idx + 1);
1883 ret = 0;
1884 break;
1885 case VMA_ADD_RESV:
1886 if (vma->vm_flags & VM_MAYSHARE)
1887 ret = region_add(resv, idx, idx + 1);
1888 else {
1889 region_abort(resv, idx, idx + 1);
1890 ret = region_del(resv, idx, idx + 1);
1891 }
1892 break;
1893 default:
1894 BUG();
1895 }
1896
1897 if (vma->vm_flags & VM_MAYSHARE)
1898 return ret;
1899 else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913 if (ret)
1914 return 0;
1915 else
1916 return 1;
1917 }
1918 else
1919 return ret < 0 ? ret : 0;
1920}
1921
1922static long vma_needs_reservation(struct hstate *h,
1923 struct vm_area_struct *vma, unsigned long addr)
1924{
1925 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1926}
1927
1928static long vma_commit_reservation(struct hstate *h,
1929 struct vm_area_struct *vma, unsigned long addr)
1930{
1931 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1932}
1933
1934static void vma_end_reservation(struct hstate *h,
1935 struct vm_area_struct *vma, unsigned long addr)
1936{
1937 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1938}
1939
1940static long vma_add_reservation(struct hstate *h,
1941 struct vm_area_struct *vma, unsigned long addr)
1942{
1943 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
1944}
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957static void restore_reserve_on_error(struct hstate *h,
1958 struct vm_area_struct *vma, unsigned long address,
1959 struct page *page)
1960{
1961 if (unlikely(PagePrivate(page))) {
1962 long rc = vma_needs_reservation(h, vma, address);
1963
1964 if (unlikely(rc < 0)) {
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976 ClearPagePrivate(page);
1977 } else if (rc) {
1978 rc = vma_add_reservation(h, vma, address);
1979 if (unlikely(rc < 0))
1980
1981
1982
1983
1984 ClearPagePrivate(page);
1985 } else
1986 vma_end_reservation(h, vma, address);
1987 }
1988}
1989
1990struct page *alloc_huge_page(struct vm_area_struct *vma,
1991 unsigned long addr, int avoid_reserve)
1992{
1993 struct hugepage_subpool *spool = subpool_vma(vma);
1994 struct hstate *h = hstate_vma(vma);
1995 struct page *page;
1996 long map_chg, map_commit;
1997 long gbl_chg;
1998 int ret, idx;
1999 struct hugetlb_cgroup *h_cg;
2000
2001 idx = hstate_index(h);
2002
2003
2004
2005
2006
2007 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2008 if (map_chg < 0)
2009 return ERR_PTR(-ENOMEM);
2010
2011
2012
2013
2014
2015
2016
2017
2018 if (map_chg || avoid_reserve) {
2019 gbl_chg = hugepage_subpool_get_pages(spool, 1);
2020 if (gbl_chg < 0) {
2021 vma_end_reservation(h, vma, addr);
2022 return ERR_PTR(-ENOSPC);
2023 }
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033 if (avoid_reserve)
2034 gbl_chg = 1;
2035 }
2036
2037 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2038 if (ret)
2039 goto out_subpool_put;
2040
2041 spin_lock(&hugetlb_lock);
2042
2043
2044
2045
2046
2047 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2048 if (!page) {
2049 spin_unlock(&hugetlb_lock);
2050 page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
2051 if (!page)
2052 goto out_uncharge_cgroup;
2053 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2054 SetPagePrivate(page);
2055 h->resv_huge_pages--;
2056 }
2057 spin_lock(&hugetlb_lock);
2058 list_move(&page->lru, &h->hugepage_activelist);
2059
2060 }
2061 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2062 spin_unlock(&hugetlb_lock);
2063
2064 set_page_private(page, (unsigned long)spool);
2065
2066 map_commit = vma_commit_reservation(h, vma, addr);
2067 if (unlikely(map_chg > map_commit)) {
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077 long rsv_adjust;
2078
2079 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2080 hugetlb_acct_memory(h, -rsv_adjust);
2081 }
2082 return page;
2083
2084out_uncharge_cgroup:
2085 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2086out_subpool_put:
2087 if (map_chg || avoid_reserve)
2088 hugepage_subpool_put_pages(spool, 1);
2089 vma_end_reservation(h, vma, addr);
2090 return ERR_PTR(-ENOSPC);
2091}
2092
2093int alloc_bootmem_huge_page(struct hstate *h)
2094 __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
2095int __alloc_bootmem_huge_page(struct hstate *h)
2096{
2097 struct huge_bootmem_page *m;
2098 int nr_nodes, node;
2099
2100 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2101 void *addr;
2102
2103 addr = memblock_virt_alloc_try_nid_raw(
2104 huge_page_size(h), huge_page_size(h),
2105 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
2106 if (addr) {
2107
2108
2109
2110
2111
2112 m = addr;
2113 goto found;
2114 }
2115 }
2116 return 0;
2117
2118found:
2119 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2120
2121 INIT_LIST_HEAD(&m->list);
2122 list_add(&m->list, &huge_boot_pages);
2123 m->hstate = h;
2124 return 1;
2125}
2126
2127static void __init prep_compound_huge_page(struct page *page,
2128 unsigned int order)
2129{
2130 if (unlikely(order > (MAX_ORDER - 1)))
2131 prep_compound_gigantic_page(page, order);
2132 else
2133 prep_compound_page(page, order);
2134}
2135
2136
2137static void __init gather_bootmem_prealloc(void)
2138{
2139 struct huge_bootmem_page *m;
2140
2141 list_for_each_entry(m, &huge_boot_pages, list) {
2142 struct page *page = virt_to_page(m);
2143 struct hstate *h = m->hstate;
2144
2145 WARN_ON(page_count(page) != 1);
2146 prep_compound_huge_page(page, h->order);
2147 WARN_ON(PageReserved(page));
2148 prep_new_huge_page(h, page, page_to_nid(page));
2149 put_page(page);
2150
2151
2152
2153
2154
2155
2156
2157 if (hstate_is_gigantic(h))
2158 adjust_managed_page_count(page, 1 << h->order);
2159 cond_resched();
2160 }
2161}
2162
2163static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2164{
2165 unsigned long i;
2166
2167 for (i = 0; i < h->max_huge_pages; ++i) {
2168 if (hstate_is_gigantic(h)) {
2169 if (!alloc_bootmem_huge_page(h))
2170 break;
2171 } else if (!alloc_pool_huge_page(h,
2172 &node_states[N_MEMORY]))
2173 break;
2174 cond_resched();
2175 }
2176 if (i < h->max_huge_pages) {
2177 char buf[32];
2178
2179 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2180 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n",
2181 h->max_huge_pages, buf, i);
2182 h->max_huge_pages = i;
2183 }
2184}
2185
2186static void __init hugetlb_init_hstates(void)
2187{
2188 struct hstate *h;
2189
2190 for_each_hstate(h) {
2191 if (minimum_order > huge_page_order(h))
2192 minimum_order = huge_page_order(h);
2193
2194
2195 if (!hstate_is_gigantic(h))
2196 hugetlb_hstate_alloc_pages(h);
2197 }
2198 VM_BUG_ON(minimum_order == UINT_MAX);
2199}
2200
2201static void __init report_hugepages(void)
2202{
2203 struct hstate *h;
2204
2205 for_each_hstate(h) {
2206 char buf[32];
2207
2208 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2209 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2210 buf, h->free_huge_pages);
2211 }
2212}
2213
2214#ifdef CONFIG_HIGHMEM
2215static void try_to_free_low(struct hstate *h, unsigned long count,
2216 nodemask_t *nodes_allowed)
2217{
2218 int i;
2219
2220 if (hstate_is_gigantic(h))
2221 return;
2222
2223 for_each_node_mask(i, *nodes_allowed) {
2224 struct page *page, *next;
2225 struct list_head *freel = &h->hugepage_freelists[i];
2226 list_for_each_entry_safe(page, next, freel, lru) {
2227 if (count >= h->nr_huge_pages)
2228 return;
2229 if (PageHighMem(page))
2230 continue;
2231 list_del(&page->lru);
2232 update_and_free_page(h, page);
2233 h->free_huge_pages--;
2234 h->free_huge_pages_node[page_to_nid(page)]--;
2235 }
2236 }
2237}
2238#else
2239static inline void try_to_free_low(struct hstate *h, unsigned long count,
2240 nodemask_t *nodes_allowed)
2241{
2242}
2243#endif
2244
2245
2246
2247
2248
2249
2250static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2251 int delta)
2252{
2253 int nr_nodes, node;
2254
2255 VM_BUG_ON(delta != -1 && delta != 1);
2256
2257 if (delta < 0) {
2258 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2259 if (h->surplus_huge_pages_node[node])
2260 goto found;
2261 }
2262 } else {
2263 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2264 if (h->surplus_huge_pages_node[node] <
2265 h->nr_huge_pages_node[node])
2266 goto found;
2267 }
2268 }
2269 return 0;
2270
2271found:
2272 h->surplus_huge_pages += delta;
2273 h->surplus_huge_pages_node[node] += delta;
2274 return 1;
2275}
2276
2277#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2278static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
2279 nodemask_t *nodes_allowed)
2280{
2281 unsigned long min_count, ret;
2282
2283 if (hstate_is_gigantic(h) && !gigantic_page_supported())
2284 return h->max_huge_pages;
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297 spin_lock(&hugetlb_lock);
2298 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2299 if (!adjust_pool_surplus(h, nodes_allowed, -1))
2300 break;
2301 }
2302
2303 while (count > persistent_huge_pages(h)) {
2304
2305
2306
2307
2308
2309 spin_unlock(&hugetlb_lock);
2310
2311
2312 cond_resched();
2313
2314 ret = alloc_pool_huge_page(h, nodes_allowed);
2315 spin_lock(&hugetlb_lock);
2316 if (!ret)
2317 goto out;
2318
2319
2320 if (signal_pending(current))
2321 goto out;
2322 }
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2340 min_count = max(count, min_count);
2341 try_to_free_low(h, min_count, nodes_allowed);
2342 while (min_count < persistent_huge_pages(h)) {
2343 if (!free_pool_huge_page(h, nodes_allowed, 0))
2344 break;
2345 cond_resched_lock(&hugetlb_lock);
2346 }
2347 while (count < persistent_huge_pages(h)) {
2348 if (!adjust_pool_surplus(h, nodes_allowed, 1))
2349 break;
2350 }
2351out:
2352 ret = persistent_huge_pages(h);
2353 spin_unlock(&hugetlb_lock);
2354 return ret;
2355}
2356
2357#define HSTATE_ATTR_RO(_name) \
2358 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2359
2360#define HSTATE_ATTR(_name) \
2361 static struct kobj_attribute _name##_attr = \
2362 __ATTR(_name, 0644, _name##_show, _name##_store)
2363
2364static struct kobject *hugepages_kobj;
2365static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2366
2367static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2368
2369static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2370{
2371 int i;
2372
2373 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2374 if (hstate_kobjs[i] == kobj) {
2375 if (nidp)
2376 *nidp = NUMA_NO_NODE;
2377 return &hstates[i];
2378 }
2379
2380 return kobj_to_node_hstate(kobj, nidp);
2381}
2382
2383static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2384 struct kobj_attribute *attr, char *buf)
2385{
2386 struct hstate *h;
2387 unsigned long nr_huge_pages;
2388 int nid;
2389
2390 h = kobj_to_hstate(kobj, &nid);
2391 if (nid == NUMA_NO_NODE)
2392 nr_huge_pages = h->nr_huge_pages;
2393 else
2394 nr_huge_pages = h->nr_huge_pages_node[nid];
2395
2396 return sprintf(buf, "%lu\n", nr_huge_pages);
2397}
2398
2399static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2400 struct hstate *h, int nid,
2401 unsigned long count, size_t len)
2402{
2403 int err;
2404 NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
2405
2406 if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
2407 err = -EINVAL;
2408 goto out;
2409 }
2410
2411 if (nid == NUMA_NO_NODE) {
2412
2413
2414
2415 if (!(obey_mempolicy &&
2416 init_nodemask_of_mempolicy(nodes_allowed))) {
2417 NODEMASK_FREE(nodes_allowed);
2418 nodes_allowed = &node_states[N_MEMORY];
2419 }
2420 } else if (nodes_allowed) {
2421
2422
2423
2424
2425 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2426 init_nodemask_of_node(nodes_allowed, nid);
2427 } else
2428 nodes_allowed = &node_states[N_MEMORY];
2429
2430 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
2431
2432 if (nodes_allowed != &node_states[N_MEMORY])
2433 NODEMASK_FREE(nodes_allowed);
2434
2435 return len;
2436out:
2437 NODEMASK_FREE(nodes_allowed);
2438 return err;
2439}
2440
2441static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2442 struct kobject *kobj, const char *buf,
2443 size_t len)
2444{
2445 struct hstate *h;
2446 unsigned long count;
2447 int nid;
2448 int err;
2449
2450 err = kstrtoul(buf, 10, &count);
2451 if (err)
2452 return err;
2453
2454 h = kobj_to_hstate(kobj, &nid);
2455 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2456}
2457
2458static ssize_t nr_hugepages_show(struct kobject *kobj,
2459 struct kobj_attribute *attr, char *buf)
2460{
2461 return nr_hugepages_show_common(kobj, attr, buf);
2462}
2463
2464static ssize_t nr_hugepages_store(struct kobject *kobj,
2465 struct kobj_attribute *attr, const char *buf, size_t len)
2466{
2467 return nr_hugepages_store_common(false, kobj, buf, len);
2468}
2469HSTATE_ATTR(nr_hugepages);
2470
2471#ifdef CONFIG_NUMA
2472
2473
2474
2475
2476
2477static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2478 struct kobj_attribute *attr, char *buf)
2479{
2480 return nr_hugepages_show_common(kobj, attr, buf);
2481}
2482
2483static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2484 struct kobj_attribute *attr, const char *buf, size_t len)
2485{
2486 return nr_hugepages_store_common(true, kobj, buf, len);
2487}
2488HSTATE_ATTR(nr_hugepages_mempolicy);
2489#endif
2490
2491
2492static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2493 struct kobj_attribute *attr, char *buf)
2494{
2495 struct hstate *h = kobj_to_hstate(kobj, NULL);
2496 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2497}
2498
2499static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2500 struct kobj_attribute *attr, const char *buf, size_t count)
2501{
2502 int err;
2503 unsigned long input;
2504 struct hstate *h = kobj_to_hstate(kobj, NULL);
2505
2506 if (hstate_is_gigantic(h))
2507 return -EINVAL;
2508
2509 err = kstrtoul(buf, 10, &input);
2510 if (err)
2511 return err;
2512
2513 spin_lock(&hugetlb_lock);
2514 h->nr_overcommit_huge_pages = input;
2515 spin_unlock(&hugetlb_lock);
2516
2517 return count;
2518}
2519HSTATE_ATTR(nr_overcommit_hugepages);
2520
2521static ssize_t free_hugepages_show(struct kobject *kobj,
2522 struct kobj_attribute *attr, char *buf)
2523{
2524 struct hstate *h;
2525 unsigned long free_huge_pages;
2526 int nid;
2527
2528 h = kobj_to_hstate(kobj, &nid);
2529 if (nid == NUMA_NO_NODE)
2530 free_huge_pages = h->free_huge_pages;
2531 else
2532 free_huge_pages = h->free_huge_pages_node[nid];
2533
2534 return sprintf(buf, "%lu\n", free_huge_pages);
2535}
2536HSTATE_ATTR_RO(free_hugepages);
2537
2538static ssize_t resv_hugepages_show(struct kobject *kobj,
2539 struct kobj_attribute *attr, char *buf)
2540{
2541 struct hstate *h = kobj_to_hstate(kobj, NULL);
2542 return sprintf(buf, "%lu\n", h->resv_huge_pages);
2543}
2544HSTATE_ATTR_RO(resv_hugepages);
2545
2546static ssize_t surplus_hugepages_show(struct kobject *kobj,
2547 struct kobj_attribute *attr, char *buf)
2548{
2549 struct hstate *h;
2550 unsigned long surplus_huge_pages;
2551 int nid;
2552
2553 h = kobj_to_hstate(kobj, &nid);
2554 if (nid == NUMA_NO_NODE)
2555 surplus_huge_pages = h->surplus_huge_pages;
2556 else
2557 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2558
2559 return sprintf(buf, "%lu\n", surplus_huge_pages);
2560}
2561HSTATE_ATTR_RO(surplus_hugepages);
2562
2563static struct attribute *hstate_attrs[] = {
2564 &nr_hugepages_attr.attr,
2565 &nr_overcommit_hugepages_attr.attr,
2566 &free_hugepages_attr.attr,
2567 &resv_hugepages_attr.attr,
2568 &surplus_hugepages_attr.attr,
2569#ifdef CONFIG_NUMA
2570 &nr_hugepages_mempolicy_attr.attr,
2571#endif
2572 NULL,
2573};
2574
2575static const struct attribute_group hstate_attr_group = {
2576 .attrs = hstate_attrs,
2577};
2578
2579static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2580 struct kobject **hstate_kobjs,
2581 const struct attribute_group *hstate_attr_group)
2582{
2583 int retval;
2584 int hi = hstate_index(h);
2585
2586 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2587 if (!hstate_kobjs[hi])
2588 return -ENOMEM;
2589
2590 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2591 if (retval)
2592 kobject_put(hstate_kobjs[hi]);
2593
2594 return retval;
2595}
2596
2597static void __init hugetlb_sysfs_init(void)
2598{
2599 struct hstate *h;
2600 int err;
2601
2602 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2603 if (!hugepages_kobj)
2604 return;
2605
2606 for_each_hstate(h) {
2607 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2608 hstate_kobjs, &hstate_attr_group);
2609 if (err)
2610 pr_err("Hugetlb: Unable to add hstate %s", h->name);
2611 }
2612}
2613
2614#ifdef CONFIG_NUMA
2615
2616
2617
2618
2619
2620
2621
2622
2623struct node_hstate {
2624 struct kobject *hugepages_kobj;
2625 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2626};
2627static struct node_hstate node_hstates[MAX_NUMNODES];
2628
2629
2630
2631
2632static struct attribute *per_node_hstate_attrs[] = {
2633 &nr_hugepages_attr.attr,
2634 &free_hugepages_attr.attr,
2635 &surplus_hugepages_attr.attr,
2636 NULL,
2637};
2638
2639static const struct attribute_group per_node_hstate_attr_group = {
2640 .attrs = per_node_hstate_attrs,
2641};
2642
2643
2644
2645
2646
2647static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2648{
2649 int nid;
2650
2651 for (nid = 0; nid < nr_node_ids; nid++) {
2652 struct node_hstate *nhs = &node_hstates[nid];
2653 int i;
2654 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2655 if (nhs->hstate_kobjs[i] == kobj) {
2656 if (nidp)
2657 *nidp = nid;
2658 return &hstates[i];
2659 }
2660 }
2661
2662 BUG();
2663 return NULL;
2664}
2665
2666
2667
2668
2669
2670static void hugetlb_unregister_node(struct node *node)
2671{
2672 struct hstate *h;
2673 struct node_hstate *nhs = &node_hstates[node->dev.id];
2674
2675 if (!nhs->hugepages_kobj)
2676 return;
2677
2678 for_each_hstate(h) {
2679 int idx = hstate_index(h);
2680 if (nhs->hstate_kobjs[idx]) {
2681 kobject_put(nhs->hstate_kobjs[idx]);
2682 nhs->hstate_kobjs[idx] = NULL;
2683 }
2684 }
2685
2686 kobject_put(nhs->hugepages_kobj);
2687 nhs->hugepages_kobj = NULL;
2688}
2689
2690
2691
2692
2693
2694
2695static void hugetlb_register_node(struct node *node)
2696{
2697 struct hstate *h;
2698 struct node_hstate *nhs = &node_hstates[node->dev.id];
2699 int err;
2700
2701 if (nhs->hugepages_kobj)
2702 return;
2703
2704 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2705 &node->dev.kobj);
2706 if (!nhs->hugepages_kobj)
2707 return;
2708
2709 for_each_hstate(h) {
2710 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2711 nhs->hstate_kobjs,
2712 &per_node_hstate_attr_group);
2713 if (err) {
2714 pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2715 h->name, node->dev.id);
2716 hugetlb_unregister_node(node);
2717 break;
2718 }
2719 }
2720}
2721
2722
2723
2724
2725
2726
2727static void __init hugetlb_register_all_nodes(void)
2728{
2729 int nid;
2730
2731 for_each_node_state(nid, N_MEMORY) {
2732 struct node *node = node_devices[nid];
2733 if (node->dev.id == nid)
2734 hugetlb_register_node(node);
2735 }
2736
2737
2738
2739
2740
2741 register_hugetlbfs_with_node(hugetlb_register_node,
2742 hugetlb_unregister_node);
2743}
2744#else
2745
2746static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2747{
2748 BUG();
2749 if (nidp)
2750 *nidp = -1;
2751 return NULL;
2752}
2753
2754static void hugetlb_register_all_nodes(void) { }
2755
2756#endif
2757
2758static int __init hugetlb_init(void)
2759{
2760 int i;
2761
2762 if (!hugepages_supported())
2763 return 0;
2764
2765 if (!size_to_hstate(default_hstate_size)) {
2766 if (default_hstate_size != 0) {
2767 pr_err("HugeTLB: unsupported default_hugepagesz %lu. Reverting to %lu\n",
2768 default_hstate_size, HPAGE_SIZE);
2769 }
2770
2771 default_hstate_size = HPAGE_SIZE;
2772 if (!size_to_hstate(default_hstate_size))
2773 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2774 }
2775 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2776 if (default_hstate_max_huge_pages) {
2777 if (!default_hstate.max_huge_pages)
2778 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2779 }
2780
2781 hugetlb_init_hstates();
2782 gather_bootmem_prealloc();
2783 report_hugepages();
2784
2785 hugetlb_sysfs_init();
2786 hugetlb_register_all_nodes();
2787 hugetlb_cgroup_file_init();
2788
2789#ifdef CONFIG_SMP
2790 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2791#else
2792 num_fault_mutexes = 1;
2793#endif
2794 hugetlb_fault_mutex_table =
2795 kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
2796 GFP_KERNEL);
2797 BUG_ON(!hugetlb_fault_mutex_table);
2798
2799 for (i = 0; i < num_fault_mutexes; i++)
2800 mutex_init(&hugetlb_fault_mutex_table[i]);
2801 return 0;
2802}
2803subsys_initcall(hugetlb_init);
2804
2805
2806void __init hugetlb_bad_size(void)
2807{
2808 parsed_valid_hugepagesz = false;
2809}
2810
2811void __init hugetlb_add_hstate(unsigned int order)
2812{
2813 struct hstate *h;
2814 unsigned long i;
2815
2816 if (size_to_hstate(PAGE_SIZE << order)) {
2817 pr_warn("hugepagesz= specified twice, ignoring\n");
2818 return;
2819 }
2820 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2821 BUG_ON(order == 0);
2822 h = &hstates[hugetlb_max_hstate++];
2823 h->order = order;
2824 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2825 h->nr_huge_pages = 0;
2826 h->free_huge_pages = 0;
2827 for (i = 0; i < MAX_NUMNODES; ++i)
2828 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2829 INIT_LIST_HEAD(&h->hugepage_activelist);
2830 h->next_nid_to_alloc = first_memory_node;
2831 h->next_nid_to_free = first_memory_node;
2832 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2833 huge_page_size(h)/1024);
2834
2835 parsed_hstate = h;
2836}
2837
2838static int __init hugetlb_nrpages_setup(char *s)
2839{
2840 unsigned long *mhp;
2841 static unsigned long *last_mhp;
2842
2843 if (!parsed_valid_hugepagesz) {
2844 pr_warn("hugepages = %s preceded by "
2845 "an unsupported hugepagesz, ignoring\n", s);
2846 parsed_valid_hugepagesz = true;
2847 return 1;
2848 }
2849
2850
2851
2852
2853 else if (!hugetlb_max_hstate)
2854 mhp = &default_hstate_max_huge_pages;
2855 else
2856 mhp = &parsed_hstate->max_huge_pages;
2857
2858 if (mhp == last_mhp) {
2859 pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
2860 return 1;
2861 }
2862
2863 if (sscanf(s, "%lu", mhp) <= 0)
2864 *mhp = 0;
2865
2866
2867
2868
2869
2870
2871 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2872 hugetlb_hstate_alloc_pages(parsed_hstate);
2873
2874 last_mhp = mhp;
2875
2876 return 1;
2877}
2878__setup("hugepages=", hugetlb_nrpages_setup);
2879
2880static int __init hugetlb_default_setup(char *s)
2881{
2882 default_hstate_size = memparse(s, &s);
2883 return 1;
2884}
2885__setup("default_hugepagesz=", hugetlb_default_setup);
2886
2887static unsigned int cpuset_mems_nr(unsigned int *array)
2888{
2889 int node;
2890 unsigned int nr = 0;
2891
2892 for_each_node_mask(node, cpuset_current_mems_allowed)
2893 nr += array[node];
2894
2895 return nr;
2896}
2897
2898#ifdef CONFIG_SYSCTL
2899static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2900 struct ctl_table *table, int write,
2901 void __user *buffer, size_t *length, loff_t *ppos)
2902{
2903 struct hstate *h = &default_hstate;
2904 unsigned long tmp = h->max_huge_pages;
2905 int ret;
2906
2907 if (!hugepages_supported())
2908 return -EOPNOTSUPP;
2909
2910 table->data = &tmp;
2911 table->maxlen = sizeof(unsigned long);
2912 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2913 if (ret)
2914 goto out;
2915
2916 if (write)
2917 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2918 NUMA_NO_NODE, tmp, *length);
2919out:
2920 return ret;
2921}
2922
2923int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2924 void __user *buffer, size_t *length, loff_t *ppos)
2925{
2926
2927 return hugetlb_sysctl_handler_common(false, table, write,
2928 buffer, length, ppos);
2929}
2930
2931#ifdef CONFIG_NUMA
2932int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2933 void __user *buffer, size_t *length, loff_t *ppos)
2934{
2935 return hugetlb_sysctl_handler_common(true, table, write,
2936 buffer, length, ppos);
2937}
2938#endif
2939
2940int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2941 void __user *buffer,
2942 size_t *length, loff_t *ppos)
2943{
2944 struct hstate *h = &default_hstate;
2945 unsigned long tmp;
2946 int ret;
2947
2948 if (!hugepages_supported())
2949 return -EOPNOTSUPP;
2950
2951 tmp = h->nr_overcommit_huge_pages;
2952
2953 if (write && hstate_is_gigantic(h))
2954 return -EINVAL;
2955
2956 table->data = &tmp;
2957 table->maxlen = sizeof(unsigned long);
2958 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2959 if (ret)
2960 goto out;
2961
2962 if (write) {
2963 spin_lock(&hugetlb_lock);
2964 h->nr_overcommit_huge_pages = tmp;
2965 spin_unlock(&hugetlb_lock);
2966 }
2967out:
2968 return ret;
2969}
2970
2971#endif
2972
2973void hugetlb_report_meminfo(struct seq_file *m)
2974{
2975 struct hstate *h;
2976 unsigned long total = 0;
2977
2978 if (!hugepages_supported())
2979 return;
2980
2981 for_each_hstate(h) {
2982 unsigned long count = h->nr_huge_pages;
2983
2984 total += (PAGE_SIZE << huge_page_order(h)) * count;
2985
2986 if (h == &default_hstate)
2987 seq_printf(m,
2988 "HugePages_Total: %5lu\n"
2989 "HugePages_Free: %5lu\n"
2990 "HugePages_Rsvd: %5lu\n"
2991 "HugePages_Surp: %5lu\n"
2992 "Hugepagesize: %8lu kB\n",
2993 count,
2994 h->free_huge_pages,
2995 h->resv_huge_pages,
2996 h->surplus_huge_pages,
2997 (PAGE_SIZE << huge_page_order(h)) / 1024);
2998 }
2999
3000 seq_printf(m, "Hugetlb: %8lu kB\n", total / 1024);
3001}
3002
3003int hugetlb_report_node_meminfo(int nid, char *buf)
3004{
3005 struct hstate *h = &default_hstate;
3006 if (!hugepages_supported())
3007 return 0;
3008 return sprintf(buf,
3009 "Node %d HugePages_Total: %5u\n"
3010 "Node %d HugePages_Free: %5u\n"
3011 "Node %d HugePages_Surp: %5u\n",
3012 nid, h->nr_huge_pages_node[nid],
3013 nid, h->free_huge_pages_node[nid],
3014 nid, h->surplus_huge_pages_node[nid]);
3015}
3016
3017void hugetlb_show_meminfo(void)
3018{
3019 struct hstate *h;
3020 int nid;
3021
3022 if (!hugepages_supported())
3023 return;
3024
3025 for_each_node_state(nid, N_MEMORY)
3026 for_each_hstate(h)
3027 pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
3028 nid,
3029 h->nr_huge_pages_node[nid],
3030 h->free_huge_pages_node[nid],
3031 h->surplus_huge_pages_node[nid],
3032 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
3033}
3034
3035void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
3036{
3037 seq_printf(m, "HugetlbPages:\t%8lu kB\n",
3038 atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
3039}
3040
3041
3042unsigned long hugetlb_total_pages(void)
3043{
3044 struct hstate *h;
3045 unsigned long nr_total_pages = 0;
3046
3047 for_each_hstate(h)
3048 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3049 return nr_total_pages;
3050}
3051
3052static int hugetlb_acct_memory(struct hstate *h, long delta)
3053{
3054 int ret = -ENOMEM;
3055
3056 spin_lock(&hugetlb_lock);
3057
3058
3059
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074 if (delta > 0) {
3075 if (gather_surplus_pages(h, delta) < 0)
3076 goto out;
3077
3078 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
3079 return_unused_surplus_pages(h, delta);
3080 goto out;
3081 }
3082 }
3083
3084 ret = 0;
3085 if (delta < 0)
3086 return_unused_surplus_pages(h, (unsigned long) -delta);
3087
3088out:
3089 spin_unlock(&hugetlb_lock);
3090 return ret;
3091}
3092
3093static void hugetlb_vm_op_open(struct vm_area_struct *vma)
3094{
3095 struct resv_map *resv = vma_resv_map(vma);
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3106 kref_get(&resv->refs);
3107}
3108
3109static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3110{
3111 struct hstate *h = hstate_vma(vma);
3112 struct resv_map *resv = vma_resv_map(vma);
3113 struct hugepage_subpool *spool = subpool_vma(vma);
3114 unsigned long reserve, start, end;
3115 long gbl_reserve;
3116
3117 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3118 return;
3119
3120 start = vma_hugecache_offset(h, vma, vma->vm_start);
3121 end = vma_hugecache_offset(h, vma, vma->vm_end);
3122
3123 reserve = (end - start) - region_count(resv, start, end);
3124
3125 kref_put(&resv->refs, resv_map_release);
3126
3127 if (reserve) {
3128
3129
3130
3131
3132 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3133 hugetlb_acct_memory(h, -gbl_reserve);
3134 }
3135}
3136
3137static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
3138{
3139 if (addr & ~(huge_page_mask(hstate_vma(vma))))
3140 return -EINVAL;
3141 return 0;
3142}
3143
3144static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
3145{
3146 struct hstate *hstate = hstate_vma(vma);
3147
3148 return 1UL << huge_page_shift(hstate);
3149}
3150
3151
3152
3153
3154
3155
3156
3157static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
3158{
3159 BUG();
3160 return 0;
3161}
3162
3163
3164
3165
3166
3167
3168
3169
3170const struct vm_operations_struct hugetlb_vm_ops = {
3171 .fault = hugetlb_vm_op_fault,
3172 .open = hugetlb_vm_op_open,
3173 .close = hugetlb_vm_op_close,
3174 .split = hugetlb_vm_op_split,
3175 .pagesize = hugetlb_vm_op_pagesize,
3176};
3177
3178static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3179 int writable)
3180{
3181 pte_t entry;
3182
3183 if (writable) {
3184 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3185 vma->vm_page_prot)));
3186 } else {
3187 entry = huge_pte_wrprotect(mk_huge_pte(page,
3188 vma->vm_page_prot));
3189 }
3190 entry = pte_mkyoung(entry);
3191 entry = pte_mkhuge(entry);
3192 entry = arch_make_huge_pte(entry, vma, page, writable);
3193
3194 return entry;
3195}
3196
3197static void set_huge_ptep_writable(struct vm_area_struct *vma,
3198 unsigned long address, pte_t *ptep)
3199{
3200 pte_t entry;
3201
3202 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3203 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3204 update_mmu_cache(vma, address, ptep);
3205}
3206
3207bool is_hugetlb_entry_migration(pte_t pte)
3208{
3209 swp_entry_t swp;
3210
3211 if (huge_pte_none(pte) || pte_present(pte))
3212 return false;
3213 swp = pte_to_swp_entry(pte);
3214 if (non_swap_entry(swp) && is_migration_entry(swp))
3215 return true;
3216 else
3217 return false;
3218}
3219
3220static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3221{
3222 swp_entry_t swp;
3223
3224 if (huge_pte_none(pte) || pte_present(pte))
3225 return 0;
3226 swp = pte_to_swp_entry(pte);
3227 if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3228 return 1;
3229 else
3230 return 0;
3231}
3232
3233int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3234 struct vm_area_struct *vma)
3235{
3236 pte_t *src_pte, *dst_pte, entry;
3237 struct page *ptepage;
3238 unsigned long addr;
3239 int cow;
3240 struct hstate *h = hstate_vma(vma);
3241 unsigned long sz = huge_page_size(h);
3242 unsigned long mmun_start;
3243 unsigned long mmun_end;
3244 int ret = 0;
3245
3246 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3247
3248 mmun_start = vma->vm_start;
3249 mmun_end = vma->vm_end;
3250 if (cow)
3251 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
3252
3253 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3254 spinlock_t *src_ptl, *dst_ptl;
3255 src_pte = huge_pte_offset(src, addr, sz);
3256 if (!src_pte)
3257 continue;
3258 dst_pte = huge_pte_alloc(dst, addr, sz);
3259 if (!dst_pte) {
3260 ret = -ENOMEM;
3261 break;
3262 }
3263
3264
3265 if (dst_pte == src_pte)
3266 continue;
3267
3268 dst_ptl = huge_pte_lock(h, dst, dst_pte);
3269 src_ptl = huge_pte_lockptr(h, src, src_pte);
3270 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3271 entry = huge_ptep_get(src_pte);
3272 if (huge_pte_none(entry)) {
3273 ;
3274 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3275 is_hugetlb_entry_hwpoisoned(entry))) {
3276 swp_entry_t swp_entry = pte_to_swp_entry(entry);
3277
3278 if (is_write_migration_entry(swp_entry) && cow) {
3279
3280
3281
3282
3283 make_migration_entry_read(&swp_entry);
3284 entry = swp_entry_to_pte(swp_entry);
3285 set_huge_swap_pte_at(src, addr, src_pte,
3286 entry, sz);
3287 }
3288 set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
3289 } else {
3290 if (cow) {
3291
3292
3293
3294
3295
3296
3297
3298 huge_ptep_set_wrprotect(src, addr, src_pte);
3299 }
3300 entry = huge_ptep_get(src_pte);
3301 ptepage = pte_page(entry);
3302 get_page(ptepage);
3303 page_dup_rmap(ptepage, true);
3304 set_huge_pte_at(dst, addr, dst_pte, entry);
3305 hugetlb_count_add(pages_per_huge_page(h), dst);
3306 }
3307 spin_unlock(src_ptl);
3308 spin_unlock(dst_ptl);
3309 }
3310
3311 if (cow)
3312 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
3313
3314 return ret;
3315}
3316
3317void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3318 unsigned long start, unsigned long end,
3319 struct page *ref_page)
3320{
3321 struct mm_struct *mm = vma->vm_mm;
3322 unsigned long address;
3323 pte_t *ptep;
3324 pte_t pte;
3325 spinlock_t *ptl;
3326 struct page *page;
3327 struct hstate *h = hstate_vma(vma);
3328 unsigned long sz = huge_page_size(h);
3329 unsigned long mmun_start = start;
3330 unsigned long mmun_end = end;
3331
3332 WARN_ON(!is_vm_hugetlb_page(vma));
3333 BUG_ON(start & ~huge_page_mask(h));
3334 BUG_ON(end & ~huge_page_mask(h));
3335
3336
3337
3338
3339
3340 tlb_remove_check_page_size_change(tlb, sz);
3341 tlb_start_vma(tlb, vma);
3342
3343
3344
3345
3346 adjust_range_if_pmd_sharing_possible(vma, &mmun_start, &mmun_end);
3347 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3348 address = start;
3349 for (; address < end; address += sz) {
3350 ptep = huge_pte_offset(mm, address, sz);
3351 if (!ptep)
3352 continue;
3353
3354 ptl = huge_pte_lock(h, mm, ptep);
3355 if (huge_pmd_unshare(mm, &address, ptep)) {
3356 spin_unlock(ptl);
3357
3358
3359
3360
3361 continue;
3362 }
3363
3364 pte = huge_ptep_get(ptep);
3365 if (huge_pte_none(pte)) {
3366 spin_unlock(ptl);
3367 continue;
3368 }
3369
3370
3371
3372
3373
3374 if (unlikely(!pte_present(pte))) {
3375 huge_pte_clear(mm, address, ptep, sz);
3376 spin_unlock(ptl);
3377 continue;
3378 }
3379
3380 page = pte_page(pte);
3381
3382
3383
3384
3385
3386 if (ref_page) {
3387 if (page != ref_page) {
3388 spin_unlock(ptl);
3389 continue;
3390 }
3391
3392
3393
3394
3395
3396 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3397 }
3398
3399 pte = huge_ptep_get_and_clear(mm, address, ptep);
3400 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
3401 if (huge_pte_dirty(pte))
3402 set_page_dirty(page);
3403
3404 hugetlb_count_sub(pages_per_huge_page(h), mm);
3405 page_remove_rmap(page, true);
3406
3407 spin_unlock(ptl);
3408 tlb_remove_page_size(tlb, page, huge_page_size(h));
3409
3410
3411
3412 if (ref_page)
3413 break;
3414 }
3415 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3416 tlb_end_vma(tlb, vma);
3417}
3418
3419void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3420 struct vm_area_struct *vma, unsigned long start,
3421 unsigned long end, struct page *ref_page)
3422{
3423 __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3424
3425
3426
3427
3428
3429
3430
3431
3432
3433
3434
3435 vma->vm_flags &= ~VM_MAYSHARE;
3436}
3437
3438void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3439 unsigned long end, struct page *ref_page)
3440{
3441 struct mm_struct *mm;
3442 struct mmu_gather tlb;
3443 unsigned long tlb_start = start;
3444 unsigned long tlb_end = end;
3445
3446
3447
3448
3449
3450
3451
3452
3453 adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
3454
3455 mm = vma->vm_mm;
3456
3457 tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
3458 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3459 tlb_finish_mmu(&tlb, tlb_start, tlb_end);
3460}
3461
3462
3463
3464
3465
3466
3467
3468static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3469 struct page *page, unsigned long address)
3470{
3471 struct hstate *h = hstate_vma(vma);
3472 struct vm_area_struct *iter_vma;
3473 struct address_space *mapping;
3474 pgoff_t pgoff;
3475
3476
3477
3478
3479
3480 address = address & huge_page_mask(h);
3481 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3482 vma->vm_pgoff;
3483 mapping = vma->vm_file->f_mapping;
3484
3485
3486
3487
3488
3489
3490 i_mmap_lock_write(mapping);
3491 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3492
3493 if (iter_vma == vma)
3494 continue;
3495
3496
3497
3498
3499
3500
3501 if (iter_vma->vm_flags & VM_MAYSHARE)
3502 continue;
3503
3504
3505
3506
3507
3508
3509
3510
3511 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3512 unmap_hugepage_range(iter_vma, address,
3513 address + huge_page_size(h), page);
3514 }
3515 i_mmap_unlock_write(mapping);
3516}
3517
3518
3519
3520
3521
3522
3523
3524static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3525 unsigned long address, pte_t *ptep,
3526 struct page *pagecache_page, spinlock_t *ptl)
3527{
3528 pte_t pte;
3529 struct hstate *h = hstate_vma(vma);
3530 struct page *old_page, *new_page;
3531 int outside_reserve = 0;
3532 vm_fault_t ret = 0;
3533 unsigned long mmun_start;
3534 unsigned long mmun_end;
3535 unsigned long haddr = address & huge_page_mask(h);
3536
3537 pte = huge_ptep_get(ptep);
3538 old_page = pte_page(pte);
3539
3540retry_avoidcopy:
3541
3542
3543 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3544 page_move_anon_rmap(old_page, vma);
3545 set_huge_ptep_writable(vma, haddr, ptep);
3546 return 0;
3547 }
3548
3549
3550
3551
3552
3553
3554
3555
3556
3557
3558 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3559 old_page != pagecache_page)
3560 outside_reserve = 1;
3561
3562 get_page(old_page);
3563
3564
3565
3566
3567
3568 spin_unlock(ptl);
3569 new_page = alloc_huge_page(vma, haddr, outside_reserve);
3570
3571 if (IS_ERR(new_page)) {
3572
3573
3574
3575
3576
3577
3578
3579 if (outside_reserve) {
3580 put_page(old_page);
3581 BUG_ON(huge_pte_none(pte));
3582 unmap_ref_private(mm, vma, old_page, haddr);
3583 BUG_ON(huge_pte_none(pte));
3584 spin_lock(ptl);
3585 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3586 if (likely(ptep &&
3587 pte_same(huge_ptep_get(ptep), pte)))
3588 goto retry_avoidcopy;
3589
3590
3591
3592
3593 return 0;
3594 }
3595
3596 ret = vmf_error(PTR_ERR(new_page));
3597 goto out_release_old;
3598 }
3599
3600
3601
3602
3603
3604 if (unlikely(anon_vma_prepare(vma))) {
3605 ret = VM_FAULT_OOM;
3606 goto out_release_all;
3607 }
3608
3609 copy_user_huge_page(new_page, old_page, address, vma,
3610 pages_per_huge_page(h));
3611 __SetPageUptodate(new_page);
3612 set_page_huge_active(new_page);
3613
3614 mmun_start = haddr;
3615 mmun_end = mmun_start + huge_page_size(h);
3616 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3617
3618
3619
3620
3621
3622 spin_lock(ptl);
3623 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3624 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3625 ClearPagePrivate(new_page);
3626
3627
3628 huge_ptep_clear_flush(vma, haddr, ptep);
3629 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
3630 set_huge_pte_at(mm, haddr, ptep,
3631 make_huge_pte(vma, new_page, 1));
3632 page_remove_rmap(old_page, true);
3633 hugepage_add_new_anon_rmap(new_page, vma, haddr);
3634
3635 new_page = old_page;
3636 }
3637 spin_unlock(ptl);
3638 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3639out_release_all:
3640 restore_reserve_on_error(h, vma, haddr, new_page);
3641 put_page(new_page);
3642out_release_old:
3643 put_page(old_page);
3644
3645 spin_lock(ptl);
3646 return ret;
3647}
3648
3649
3650static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3651 struct vm_area_struct *vma, unsigned long address)
3652{
3653 struct address_space *mapping;
3654 pgoff_t idx;
3655
3656 mapping = vma->vm_file->f_mapping;
3657 idx = vma_hugecache_offset(h, vma, address);
3658
3659 return find_lock_page(mapping, idx);
3660}
3661
3662
3663
3664
3665
3666static bool hugetlbfs_pagecache_present(struct hstate *h,
3667 struct vm_area_struct *vma, unsigned long address)
3668{
3669 struct address_space *mapping;
3670 pgoff_t idx;
3671 struct page *page;
3672
3673 mapping = vma->vm_file->f_mapping;
3674 idx = vma_hugecache_offset(h, vma, address);
3675
3676 page = find_get_page(mapping, idx);
3677 if (page)
3678 put_page(page);
3679 return page != NULL;
3680}
3681
3682int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3683 pgoff_t idx)
3684{
3685 struct inode *inode = mapping->host;
3686 struct hstate *h = hstate_inode(inode);
3687 int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3688
3689 if (err)
3690 return err;
3691 ClearPagePrivate(page);
3692
3693 spin_lock(&inode->i_lock);
3694 inode->i_blocks += blocks_per_huge_page(h);
3695 spin_unlock(&inode->i_lock);
3696 return 0;
3697}
3698
3699static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
3700 struct vm_area_struct *vma,
3701 struct address_space *mapping, pgoff_t idx,
3702 unsigned long address, pte_t *ptep, unsigned int flags)
3703{
3704 struct hstate *h = hstate_vma(vma);
3705 vm_fault_t ret = VM_FAULT_SIGBUS;
3706 int anon_rmap = 0;
3707 unsigned long size;
3708 struct page *page;
3709 pte_t new_pte;
3710 spinlock_t *ptl;
3711 unsigned long haddr = address & huge_page_mask(h);
3712
3713
3714
3715
3716
3717
3718 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3719 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
3720 current->pid);
3721 return ret;
3722 }
3723
3724
3725
3726
3727
3728retry:
3729 page = find_lock_page(mapping, idx);
3730 if (!page) {
3731 size = i_size_read(mapping->host) >> huge_page_shift(h);
3732 if (idx >= size)
3733 goto out;
3734
3735
3736
3737
3738 if (userfaultfd_missing(vma)) {
3739 u32 hash;
3740 struct vm_fault vmf = {
3741 .vma = vma,
3742 .address = haddr,
3743 .flags = flags,
3744
3745
3746
3747
3748
3749
3750
3751 };
3752
3753
3754
3755
3756
3757
3758 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping,
3759 idx, haddr);
3760 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3761 ret = handle_userfault(&vmf, VM_UFFD_MISSING);
3762 mutex_lock(&hugetlb_fault_mutex_table[hash]);
3763 goto out;
3764 }
3765
3766 page = alloc_huge_page(vma, haddr, 0);
3767 if (IS_ERR(page)) {
3768 ret = vmf_error(PTR_ERR(page));
3769 goto out;
3770 }
3771 clear_huge_page(page, address, pages_per_huge_page(h));
3772 __SetPageUptodate(page);
3773 set_page_huge_active(page);
3774
3775 if (vma->vm_flags & VM_MAYSHARE) {
3776 int err = huge_add_to_page_cache(page, mapping, idx);
3777 if (err) {
3778 put_page(page);
3779 if (err == -EEXIST)
3780 goto retry;
3781 goto out;
3782 }
3783 } else {
3784 lock_page(page);
3785 if (unlikely(anon_vma_prepare(vma))) {
3786 ret = VM_FAULT_OOM;
3787 goto backout_unlocked;
3788 }
3789 anon_rmap = 1;
3790 }
3791 } else {
3792
3793
3794
3795
3796
3797 if (unlikely(PageHWPoison(page))) {
3798 ret = VM_FAULT_HWPOISON |
3799 VM_FAULT_SET_HINDEX(hstate_index(h));
3800 goto backout_unlocked;
3801 }
3802 }
3803
3804
3805
3806
3807
3808
3809
3810 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3811 if (vma_needs_reservation(h, vma, haddr) < 0) {
3812 ret = VM_FAULT_OOM;
3813 goto backout_unlocked;
3814 }
3815
3816 vma_end_reservation(h, vma, haddr);
3817 }
3818
3819 ptl = huge_pte_lock(h, mm, ptep);
3820 size = i_size_read(mapping->host) >> huge_page_shift(h);
3821 if (idx >= size)
3822 goto backout;
3823
3824 ret = 0;
3825 if (!huge_pte_none(huge_ptep_get(ptep)))
3826 goto backout;
3827
3828 if (anon_rmap) {
3829 ClearPagePrivate(page);
3830 hugepage_add_new_anon_rmap(page, vma, haddr);
3831 } else
3832 page_dup_rmap(page, true);
3833 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3834 && (vma->vm_flags & VM_SHARED)));
3835 set_huge_pte_at(mm, haddr, ptep, new_pte);
3836
3837 hugetlb_count_add(pages_per_huge_page(h), mm);
3838 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3839
3840 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
3841 }
3842
3843 spin_unlock(ptl);
3844 unlock_page(page);
3845out:
3846 return ret;
3847
3848backout:
3849 spin_unlock(ptl);
3850backout_unlocked:
3851 unlock_page(page);
3852 restore_reserve_on_error(h, vma, haddr, page);
3853 put_page(page);
3854 goto out;
3855}
3856
3857#ifdef CONFIG_SMP
3858u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3859 struct vm_area_struct *vma,
3860 struct address_space *mapping,
3861 pgoff_t idx, unsigned long address)
3862{
3863 unsigned long key[2];
3864 u32 hash;
3865
3866 if (vma->vm_flags & VM_SHARED) {
3867 key[0] = (unsigned long) mapping;
3868 key[1] = idx;
3869 } else {
3870 key[0] = (unsigned long) mm;
3871 key[1] = address >> huge_page_shift(h);
3872 }
3873
3874 hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3875
3876 return hash & (num_fault_mutexes - 1);
3877}
3878#else
3879
3880
3881
3882
3883u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3884 struct vm_area_struct *vma,
3885 struct address_space *mapping,
3886 pgoff_t idx, unsigned long address)
3887{
3888 return 0;
3889}
3890#endif
3891
3892vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3893 unsigned long address, unsigned int flags)
3894{
3895 pte_t *ptep, entry;
3896 spinlock_t *ptl;
3897 vm_fault_t ret;
3898 u32 hash;
3899 pgoff_t idx;
3900 struct page *page = NULL;
3901 struct page *pagecache_page = NULL;
3902 struct hstate *h = hstate_vma(vma);
3903 struct address_space *mapping;
3904 int need_wait_lock = 0;
3905 unsigned long haddr = address & huge_page_mask(h);
3906
3907 ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3908 if (ptep) {
3909 entry = huge_ptep_get(ptep);
3910 if (unlikely(is_hugetlb_entry_migration(entry))) {
3911 migration_entry_wait_huge(vma, mm, ptep);
3912 return 0;
3913 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3914 return VM_FAULT_HWPOISON_LARGE |
3915 VM_FAULT_SET_HINDEX(hstate_index(h));
3916 } else {
3917 ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
3918 if (!ptep)
3919 return VM_FAULT_OOM;
3920 }
3921
3922 mapping = vma->vm_file->f_mapping;
3923 idx = vma_hugecache_offset(h, vma, haddr);
3924
3925
3926
3927
3928
3929
3930 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, haddr);
3931 mutex_lock(&hugetlb_fault_mutex_table[hash]);
3932
3933 entry = huge_ptep_get(ptep);
3934 if (huge_pte_none(entry)) {
3935 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3936 goto out_mutex;
3937 }
3938
3939 ret = 0;
3940
3941
3942
3943
3944
3945
3946
3947
3948 if (!pte_present(entry))
3949 goto out_mutex;
3950
3951
3952
3953
3954
3955
3956
3957
3958
3959 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3960 if (vma_needs_reservation(h, vma, haddr) < 0) {
3961 ret = VM_FAULT_OOM;
3962 goto out_mutex;
3963 }
3964
3965 vma_end_reservation(h, vma, haddr);
3966
3967 if (!(vma->vm_flags & VM_MAYSHARE))
3968 pagecache_page = hugetlbfs_pagecache_page(h,
3969 vma, haddr);
3970 }
3971
3972 ptl = huge_pte_lock(h, mm, ptep);
3973
3974
3975 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3976 goto out_ptl;
3977
3978
3979
3980
3981
3982
3983 page = pte_page(entry);
3984 if (page != pagecache_page)
3985 if (!trylock_page(page)) {
3986 need_wait_lock = 1;
3987 goto out_ptl;
3988 }
3989
3990 get_page(page);
3991
3992 if (flags & FAULT_FLAG_WRITE) {
3993 if (!huge_pte_write(entry)) {
3994 ret = hugetlb_cow(mm, vma, address, ptep,
3995 pagecache_page, ptl);
3996 goto out_put_page;
3997 }
3998 entry = huge_pte_mkdirty(entry);
3999 }
4000 entry = pte_mkyoung(entry);
4001 if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
4002 flags & FAULT_FLAG_WRITE))
4003 update_mmu_cache(vma, haddr, ptep);
4004out_put_page:
4005 if (page != pagecache_page)
4006 unlock_page(page);
4007 put_page(page);
4008out_ptl:
4009 spin_unlock(ptl);
4010
4011 if (pagecache_page) {
4012 unlock_page(pagecache_page);
4013 put_page(pagecache_page);
4014 }
4015out_mutex:
4016 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4017
4018
4019
4020
4021
4022
4023
4024 if (need_wait_lock)
4025 wait_on_page_locked(page);
4026 return ret;
4027}
4028
4029
4030
4031
4032
4033int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
4034 pte_t *dst_pte,
4035 struct vm_area_struct *dst_vma,
4036 unsigned long dst_addr,
4037 unsigned long src_addr,
4038 struct page **pagep)
4039{
4040 struct address_space *mapping;
4041 pgoff_t idx;
4042 unsigned long size;
4043 int vm_shared = dst_vma->vm_flags & VM_SHARED;
4044 struct hstate *h = hstate_vma(dst_vma);
4045 pte_t _dst_pte;
4046 spinlock_t *ptl;
4047 int ret;
4048 struct page *page;
4049
4050 if (!*pagep) {
4051 ret = -ENOMEM;
4052 page = alloc_huge_page(dst_vma, dst_addr, 0);
4053 if (IS_ERR(page))
4054 goto out;
4055
4056 ret = copy_huge_page_from_user(page,
4057 (const void __user *) src_addr,
4058 pages_per_huge_page(h), false);
4059
4060
4061 if (unlikely(ret)) {
4062 ret = -EFAULT;
4063 *pagep = page;
4064
4065 goto out;
4066 }
4067 } else {
4068 page = *pagep;
4069 *pagep = NULL;
4070 }
4071
4072
4073
4074
4075
4076
4077 __SetPageUptodate(page);
4078 set_page_huge_active(page);
4079
4080 mapping = dst_vma->vm_file->f_mapping;
4081 idx = vma_hugecache_offset(h, dst_vma, dst_addr);
4082
4083
4084
4085
4086 if (vm_shared) {
4087 size = i_size_read(mapping->host) >> huge_page_shift(h);
4088 ret = -EFAULT;
4089 if (idx >= size)
4090 goto out_release_nounlock;
4091
4092
4093
4094
4095
4096
4097
4098 ret = huge_add_to_page_cache(page, mapping, idx);
4099 if (ret)
4100 goto out_release_nounlock;
4101 }
4102
4103 ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
4104 spin_lock(ptl);
4105
4106
4107
4108
4109
4110
4111
4112
4113
4114
4115 size = i_size_read(mapping->host) >> huge_page_shift(h);
4116 ret = -EFAULT;
4117 if (idx >= size)
4118 goto out_release_unlock;
4119
4120 ret = -EEXIST;
4121 if (!huge_pte_none(huge_ptep_get(dst_pte)))
4122 goto out_release_unlock;
4123
4124 if (vm_shared) {
4125 page_dup_rmap(page, true);
4126 } else {
4127 ClearPagePrivate(page);
4128 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
4129 }
4130
4131 _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
4132 if (dst_vma->vm_flags & VM_WRITE)
4133 _dst_pte = huge_pte_mkdirty(_dst_pte);
4134 _dst_pte = pte_mkyoung(_dst_pte);
4135
4136 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
4137
4138 (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
4139 dst_vma->vm_flags & VM_WRITE);
4140 hugetlb_count_add(pages_per_huge_page(h), dst_mm);
4141
4142
4143 update_mmu_cache(dst_vma, dst_addr, dst_pte);
4144
4145 spin_unlock(ptl);
4146 if (vm_shared)
4147 unlock_page(page);
4148 ret = 0;
4149out:
4150 return ret;
4151out_release_unlock:
4152 spin_unlock(ptl);
4153 if (vm_shared)
4154 unlock_page(page);
4155out_release_nounlock:
4156 put_page(page);
4157 goto out;
4158}
4159
4160long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
4161 struct page **pages, struct vm_area_struct **vmas,
4162 unsigned long *position, unsigned long *nr_pages,
4163 long i, unsigned int flags, int *nonblocking)
4164{
4165 unsigned long pfn_offset;
4166 unsigned long vaddr = *position;
4167 unsigned long remainder = *nr_pages;
4168 struct hstate *h = hstate_vma(vma);
4169 int err = -EFAULT;
4170
4171 while (vaddr < vma->vm_end && remainder) {
4172 pte_t *pte;
4173 spinlock_t *ptl = NULL;
4174 int absent;
4175 struct page *page;
4176
4177
4178
4179
4180
4181 if (unlikely(fatal_signal_pending(current))) {
4182 remainder = 0;
4183 break;
4184 }
4185
4186
4187
4188
4189
4190
4191
4192
4193 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
4194 huge_page_size(h));
4195 if (pte)
4196 ptl = huge_pte_lock(h, mm, pte);
4197 absent = !pte || huge_pte_none(huge_ptep_get(pte));
4198
4199
4200
4201
4202
4203
4204
4205
4206 if (absent && (flags & FOLL_DUMP) &&
4207 !hugetlbfs_pagecache_present(h, vma, vaddr)) {
4208 if (pte)
4209 spin_unlock(ptl);
4210 remainder = 0;
4211 break;
4212 }
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222
4223
4224 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
4225 ((flags & FOLL_WRITE) &&
4226 !huge_pte_write(huge_ptep_get(pte)))) {
4227 vm_fault_t ret;
4228 unsigned int fault_flags = 0;
4229
4230 if (pte)
4231 spin_unlock(ptl);
4232 if (flags & FOLL_WRITE)
4233 fault_flags |= FAULT_FLAG_WRITE;
4234 if (nonblocking)
4235 fault_flags |= FAULT_FLAG_ALLOW_RETRY;
4236 if (flags & FOLL_NOWAIT)
4237 fault_flags |= FAULT_FLAG_ALLOW_RETRY |
4238 FAULT_FLAG_RETRY_NOWAIT;
4239 if (flags & FOLL_TRIED) {
4240 VM_WARN_ON_ONCE(fault_flags &
4241 FAULT_FLAG_ALLOW_RETRY);
4242 fault_flags |= FAULT_FLAG_TRIED;
4243 }
4244 ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
4245 if (ret & VM_FAULT_ERROR) {
4246 err = vm_fault_to_errno(ret, flags);
4247 remainder = 0;
4248 break;
4249 }
4250 if (ret & VM_FAULT_RETRY) {
4251 if (nonblocking)
4252 *nonblocking = 0;
4253 *nr_pages = 0;
4254
4255
4256
4257
4258
4259
4260
4261
4262
4263 return i;
4264 }
4265 continue;
4266 }
4267
4268 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
4269 page = pte_page(huge_ptep_get(pte));
4270same_page:
4271 if (pages) {
4272 pages[i] = mem_map_offset(page, pfn_offset);
4273 get_page(pages[i]);
4274 }
4275
4276 if (vmas)
4277 vmas[i] = vma;
4278
4279 vaddr += PAGE_SIZE;
4280 ++pfn_offset;
4281 --remainder;
4282 ++i;
4283 if (vaddr < vma->vm_end && remainder &&
4284 pfn_offset < pages_per_huge_page(h)) {
4285
4286
4287
4288
4289 goto same_page;
4290 }
4291 spin_unlock(ptl);
4292 }
4293 *nr_pages = remainder;
4294
4295
4296
4297
4298
4299 *position = vaddr;
4300
4301 return i ? i : err;
4302}
4303
4304#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
4305
4306
4307
4308
4309#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
4310#endif
4311
4312unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
4313 unsigned long address, unsigned long end, pgprot_t newprot)
4314{
4315 struct mm_struct *mm = vma->vm_mm;
4316 unsigned long start = address;
4317 pte_t *ptep;
4318 pte_t pte;
4319 struct hstate *h = hstate_vma(vma);
4320 unsigned long pages = 0;
4321 unsigned long f_start = start;
4322 unsigned long f_end = end;
4323 bool shared_pmd = false;
4324
4325
4326
4327
4328
4329
4330 adjust_range_if_pmd_sharing_possible(vma, &f_start, &f_end);
4331
4332 BUG_ON(address >= end);
4333 flush_cache_range(vma, f_start, f_end);
4334
4335 mmu_notifier_invalidate_range_start(mm, f_start, f_end);
4336 i_mmap_lock_write(vma->vm_file->f_mapping);
4337 for (; address < end; address += huge_page_size(h)) {
4338 spinlock_t *ptl;
4339 ptep = huge_pte_offset(mm, address, huge_page_size(h));
4340 if (!ptep)
4341 continue;
4342 ptl = huge_pte_lock(h, mm, ptep);
4343 if (huge_pmd_unshare(mm, &address, ptep)) {
4344 pages++;
4345 spin_unlock(ptl);
4346 shared_pmd = true;
4347 continue;
4348 }
4349 pte = huge_ptep_get(ptep);
4350 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
4351 spin_unlock(ptl);
4352 continue;
4353 }
4354 if (unlikely(is_hugetlb_entry_migration(pte))) {
4355 swp_entry_t entry = pte_to_swp_entry(pte);
4356
4357 if (is_write_migration_entry(entry)) {
4358 pte_t newpte;
4359
4360 make_migration_entry_read(&entry);
4361 newpte = swp_entry_to_pte(entry);
4362 set_huge_swap_pte_at(mm, address, ptep,
4363 newpte, huge_page_size(h));
4364 pages++;
4365 }
4366 spin_unlock(ptl);
4367 continue;
4368 }
4369 if (!huge_pte_none(pte)) {
4370 pte = huge_ptep_get_and_clear(mm, address, ptep);
4371 pte = pte_mkhuge(huge_pte_modify(pte, newprot));
4372 pte = arch_make_huge_pte(pte, vma, NULL, 0);
4373 set_huge_pte_at(mm, address, ptep, pte);
4374 pages++;
4375 }
4376 spin_unlock(ptl);
4377 }
4378
4379
4380
4381
4382
4383
4384
4385 if (shared_pmd)
4386 flush_hugetlb_tlb_range(vma, f_start, f_end);
4387 else
4388 flush_hugetlb_tlb_range(vma, start, end);
4389
4390
4391
4392
4393
4394
4395 i_mmap_unlock_write(vma->vm_file->f_mapping);
4396 mmu_notifier_invalidate_range_end(mm, f_start, f_end);
4397
4398 return pages << h->order;
4399}
4400
4401int hugetlb_reserve_pages(struct inode *inode,
4402 long from, long to,
4403 struct vm_area_struct *vma,
4404 vm_flags_t vm_flags)
4405{
4406 long ret, chg;
4407 struct hstate *h = hstate_inode(inode);
4408 struct hugepage_subpool *spool = subpool_inode(inode);
4409 struct resv_map *resv_map;
4410 long gbl_reserve;
4411
4412
4413 if (from > to) {
4414 VM_WARN(1, "%s called with a negative range\n", __func__);
4415 return -EINVAL;
4416 }
4417
4418
4419
4420
4421
4422
4423 if (vm_flags & VM_NORESERVE)
4424 return 0;
4425
4426
4427
4428
4429
4430
4431
4432 if (!vma || vma->vm_flags & VM_MAYSHARE) {
4433 resv_map = inode_resv_map(inode);
4434
4435 chg = region_chg(resv_map, from, to);
4436
4437 } else {
4438 resv_map = resv_map_alloc();
4439 if (!resv_map)
4440 return -ENOMEM;
4441
4442 chg = to - from;
4443
4444 set_vma_resv_map(vma, resv_map);
4445 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4446 }
4447
4448 if (chg < 0) {
4449 ret = chg;
4450 goto out_err;
4451 }
4452
4453
4454
4455
4456
4457
4458 gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4459 if (gbl_reserve < 0) {
4460 ret = -ENOSPC;
4461 goto out_err;
4462 }
4463
4464
4465
4466
4467
4468 ret = hugetlb_acct_memory(h, gbl_reserve);
4469 if (ret < 0) {
4470
4471 (void)hugepage_subpool_put_pages(spool, chg);
4472 goto out_err;
4473 }
4474
4475
4476
4477
4478
4479
4480
4481
4482
4483
4484
4485
4486 if (!vma || vma->vm_flags & VM_MAYSHARE) {
4487 long add = region_add(resv_map, from, to);
4488
4489 if (unlikely(chg > add)) {
4490
4491
4492
4493
4494
4495
4496
4497 long rsv_adjust;
4498
4499 rsv_adjust = hugepage_subpool_put_pages(spool,
4500 chg - add);
4501 hugetlb_acct_memory(h, -rsv_adjust);
4502 }
4503 }
4504 return 0;
4505out_err:
4506 if (!vma || vma->vm_flags & VM_MAYSHARE)
4507
4508 if (chg >= 0)
4509 region_abort(resv_map, from, to);
4510 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4511 kref_put(&resv_map->refs, resv_map_release);
4512 return ret;
4513}
4514
4515long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4516 long freed)
4517{
4518 struct hstate *h = hstate_inode(inode);
4519 struct resv_map *resv_map = inode_resv_map(inode);
4520 long chg = 0;
4521 struct hugepage_subpool *spool = subpool_inode(inode);
4522 long gbl_reserve;
4523
4524 if (resv_map) {
4525 chg = region_del(resv_map, start, end);
4526
4527
4528
4529
4530
4531 if (chg < 0)
4532 return chg;
4533 }
4534
4535 spin_lock(&inode->i_lock);
4536 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4537 spin_unlock(&inode->i_lock);
4538
4539
4540
4541
4542
4543 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4544 hugetlb_acct_memory(h, -gbl_reserve);
4545
4546 return 0;
4547}
4548
4549#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
4550static unsigned long page_table_shareable(struct vm_area_struct *svma,
4551 struct vm_area_struct *vma,
4552 unsigned long addr, pgoff_t idx)
4553{
4554 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4555 svma->vm_start;
4556 unsigned long sbase = saddr & PUD_MASK;
4557 unsigned long s_end = sbase + PUD_SIZE;
4558
4559
4560 unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4561 unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4562
4563
4564
4565
4566
4567 if (pmd_index(addr) != pmd_index(saddr) ||
4568 vm_flags != svm_flags ||
4569 sbase < svma->vm_start || svma->vm_end < s_end)
4570 return 0;
4571
4572 return saddr;
4573}
4574
4575static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4576{
4577 unsigned long base = addr & PUD_MASK;
4578 unsigned long end = base + PUD_SIZE;
4579
4580
4581
4582
4583 if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
4584 return true;
4585 return false;
4586}
4587
4588
4589
4590
4591
4592
4593void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4594 unsigned long *start, unsigned long *end)
4595{
4596 unsigned long check_addr = *start;
4597
4598 if (!(vma->vm_flags & VM_MAYSHARE))
4599 return;
4600
4601 for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
4602 unsigned long a_start = check_addr & PUD_MASK;
4603 unsigned long a_end = a_start + PUD_SIZE;
4604
4605
4606
4607
4608 if (range_in_vma(vma, a_start, a_end)) {
4609 if (a_start < *start)
4610 *start = a_start;
4611 if (a_end > *end)
4612 *end = a_end;
4613 }
4614 }
4615}
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4627{
4628 struct vm_area_struct *vma = find_vma(mm, addr);
4629 struct address_space *mapping = vma->vm_file->f_mapping;
4630 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4631 vma->vm_pgoff;
4632 struct vm_area_struct *svma;
4633 unsigned long saddr;
4634 pte_t *spte = NULL;
4635 pte_t *pte;
4636 spinlock_t *ptl;
4637
4638 if (!vma_shareable(vma, addr))
4639 return (pte_t *)pmd_alloc(mm, pud, addr);
4640
4641 i_mmap_lock_write(mapping);
4642 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4643 if (svma == vma)
4644 continue;
4645
4646 saddr = page_table_shareable(svma, vma, addr, idx);
4647 if (saddr) {
4648 spte = huge_pte_offset(svma->vm_mm, saddr,
4649 vma_mmu_pagesize(svma));
4650 if (spte) {
4651 get_page(virt_to_page(spte));
4652 break;
4653 }
4654 }
4655 }
4656
4657 if (!spte)
4658 goto out;
4659
4660 ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
4661 if (pud_none(*pud)) {
4662 pud_populate(mm, pud,
4663 (pmd_t *)((unsigned long)spte & PAGE_MASK));
4664 mm_inc_nr_pmds(mm);
4665 } else {
4666 put_page(virt_to_page(spte));
4667 }
4668 spin_unlock(ptl);
4669out:
4670 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4671 i_mmap_unlock_write(mapping);
4672 return pte;
4673}
4674
4675
4676
4677
4678
4679
4680
4681
4682
4683
4684
4685
4686
4687int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4688{
4689 pgd_t *pgd = pgd_offset(mm, *addr);
4690 p4d_t *p4d = p4d_offset(pgd, *addr);
4691 pud_t *pud = pud_offset(p4d, *addr);
4692
4693 BUG_ON(page_count(virt_to_page(ptep)) == 0);
4694 if (page_count(virt_to_page(ptep)) == 1)
4695 return 0;
4696
4697 pud_clear(pud);
4698 put_page(virt_to_page(ptep));
4699 mm_dec_nr_pmds(mm);
4700 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4701 return 1;
4702}
4703#define want_pmd_share() (1)
4704#else
4705pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4706{
4707 return NULL;
4708}
4709
4710int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4711{
4712 return 0;
4713}
4714
4715void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4716 unsigned long *start, unsigned long *end)
4717{
4718}
4719#define want_pmd_share() (0)
4720#endif
4721
4722#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4723pte_t *huge_pte_alloc(struct mm_struct *mm,
4724 unsigned long addr, unsigned long sz)
4725{
4726 pgd_t *pgd;
4727 p4d_t *p4d;
4728 pud_t *pud;
4729 pte_t *pte = NULL;
4730
4731 pgd = pgd_offset(mm, addr);
4732 p4d = p4d_alloc(mm, pgd, addr);
4733 if (!p4d)
4734 return NULL;
4735 pud = pud_alloc(mm, p4d, addr);
4736 if (pud) {
4737 if (sz == PUD_SIZE) {
4738 pte = (pte_t *)pud;
4739 } else {
4740 BUG_ON(sz != PMD_SIZE);
4741 if (want_pmd_share() && pud_none(*pud))
4742 pte = huge_pmd_share(mm, addr, pud);
4743 else
4744 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4745 }
4746 }
4747 BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
4748
4749 return pte;
4750}
4751
4752
4753
4754
4755
4756
4757
4758
4759
4760
4761pte_t *huge_pte_offset(struct mm_struct *mm,
4762 unsigned long addr, unsigned long sz)
4763{
4764 pgd_t *pgd;
4765 p4d_t *p4d;
4766 pud_t *pud;
4767 pmd_t *pmd;
4768
4769 pgd = pgd_offset(mm, addr);
4770 if (!pgd_present(*pgd))
4771 return NULL;
4772 p4d = p4d_offset(pgd, addr);
4773 if (!p4d_present(*p4d))
4774 return NULL;
4775
4776 pud = pud_offset(p4d, addr);
4777 if (sz != PUD_SIZE && pud_none(*pud))
4778 return NULL;
4779
4780 if (pud_huge(*pud) || !pud_present(*pud))
4781 return (pte_t *)pud;
4782
4783 pmd = pmd_offset(pud, addr);
4784 if (sz != PMD_SIZE && pmd_none(*pmd))
4785 return NULL;
4786
4787 if (pmd_huge(*pmd) || !pmd_present(*pmd))
4788 return (pte_t *)pmd;
4789
4790 return NULL;
4791}
4792
4793#endif
4794
4795
4796
4797
4798
4799struct page * __weak
4800follow_huge_addr(struct mm_struct *mm, unsigned long address,
4801 int write)
4802{
4803 return ERR_PTR(-EINVAL);
4804}
4805
4806struct page * __weak
4807follow_huge_pd(struct vm_area_struct *vma,
4808 unsigned long address, hugepd_t hpd, int flags, int pdshift)
4809{
4810 WARN(1, "hugepd follow called with no support for hugepage directory format\n");
4811 return NULL;
4812}
4813
4814struct page * __weak
4815follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4816 pmd_t *pmd, int flags)
4817{
4818 struct page *page = NULL;
4819 spinlock_t *ptl;
4820 pte_t pte;
4821retry:
4822 ptl = pmd_lockptr(mm, pmd);
4823 spin_lock(ptl);
4824
4825
4826
4827
4828 if (!pmd_huge(*pmd))
4829 goto out;
4830 pte = huge_ptep_get((pte_t *)pmd);
4831 if (pte_present(pte)) {
4832 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4833 if (flags & FOLL_GET)
4834 get_page(page);
4835 } else {
4836 if (is_hugetlb_entry_migration(pte)) {
4837 spin_unlock(ptl);
4838 __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4839 goto retry;
4840 }
4841
4842
4843
4844
4845 }
4846out:
4847 spin_unlock(ptl);
4848 return page;
4849}
4850
4851struct page * __weak
4852follow_huge_pud(struct mm_struct *mm, unsigned long address,
4853 pud_t *pud, int flags)
4854{
4855 if (flags & FOLL_GET)
4856 return NULL;
4857
4858 return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
4859}
4860
4861struct page * __weak
4862follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
4863{
4864 if (flags & FOLL_GET)
4865 return NULL;
4866
4867 return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
4868}
4869
4870bool isolate_huge_page(struct page *page, struct list_head *list)
4871{
4872 bool ret = true;
4873
4874 VM_BUG_ON_PAGE(!PageHead(page), page);
4875 spin_lock(&hugetlb_lock);
4876 if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4877 ret = false;
4878 goto unlock;
4879 }
4880 clear_page_huge_active(page);
4881 list_move_tail(&page->lru, list);
4882unlock:
4883 spin_unlock(&hugetlb_lock);
4884 return ret;
4885}
4886
4887void putback_active_hugepage(struct page *page)
4888{
4889 VM_BUG_ON_PAGE(!PageHead(page), page);
4890 spin_lock(&hugetlb_lock);
4891 set_page_huge_active(page);
4892 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4893 spin_unlock(&hugetlb_lock);
4894 put_page(page);
4895}
4896
4897void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
4898{
4899 struct hstate *h = page_hstate(oldpage);
4900
4901 hugetlb_cgroup_migrate(oldpage, newpage);
4902 set_page_owner_migrate_reason(newpage, reason);
4903
4904
4905
4906
4907
4908
4909
4910
4911
4912
4913
4914 if (PageHugeTemporary(newpage)) {
4915 int old_nid = page_to_nid(oldpage);
4916 int new_nid = page_to_nid(newpage);
4917
4918 SetPageHugeTemporary(oldpage);
4919 ClearPageHugeTemporary(newpage);
4920
4921 spin_lock(&hugetlb_lock);
4922 if (h->surplus_huge_pages_node[old_nid]) {
4923 h->surplus_huge_pages_node[old_nid]--;
4924 h->surplus_huge_pages_node[new_nid]++;
4925 }
4926 spin_unlock(&hugetlb_lock);
4927 }
4928}
4929