1
2
3
4
5#include <linux/list.h>
6#include <linux/init.h>
7#include <linux/mm.h>
8#include <linux/seq_file.h>
9#include <linux/sysctl.h>
10#include <linux/highmem.h>
11#include <linux/mmu_notifier.h>
12#include <linux/nodemask.h>
13#include <linux/pagemap.h>
14#include <linux/mempolicy.h>
15#include <linux/compiler.h>
16#include <linux/cpuset.h>
17#include <linux/mutex.h>
18#include <linux/bootmem.h>
19#include <linux/sysfs.h>
20#include <linux/slab.h>
21#include <linux/rmap.h>
22#include <linux/swap.h>
23#include <linux/swapops.h>
24#include <linux/page-isolation.h>
25#include <linux/jhash.h>
26
27#include <asm/page.h>
28#include <asm/pgtable.h>
29#include <asm/tlb.h>
30
31#include <linux/io.h>
32#include <linux/hugetlb.h>
33#include <linux/hugetlb_cgroup.h>
34#include <linux/node.h>
35#include "internal.h"
36
37int hugepages_treat_as_movable;
38
39int hugetlb_max_hstate __read_mostly;
40unsigned int default_hstate_idx;
41struct hstate hstates[HUGE_MAX_HSTATE];
42
43
44
45
46static unsigned int minimum_order __read_mostly = UINT_MAX;
47
48__initdata LIST_HEAD(huge_boot_pages);
49
50
51static struct hstate * __initdata parsed_hstate;
52static unsigned long __initdata default_hstate_max_huge_pages;
53static unsigned long __initdata default_hstate_size;
54
55
56
57
58
59DEFINE_SPINLOCK(hugetlb_lock);
60
61
62
63
64
65static int num_fault_mutexes;
66struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
67
68
69static int hugetlb_acct_memory(struct hstate *h, long delta);
70
71static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
72{
73 bool free = (spool->count == 0) && (spool->used_hpages == 0);
74
75 spin_unlock(&spool->lock);
76
77
78
79
80 if (free) {
81 if (spool->min_hpages != -1)
82 hugetlb_acct_memory(spool->hstate,
83 -spool->min_hpages);
84 kfree(spool);
85 }
86}
87
88struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
89 long min_hpages)
90{
91 struct hugepage_subpool *spool;
92
93 spool = kzalloc(sizeof(*spool), GFP_KERNEL);
94 if (!spool)
95 return NULL;
96
97 spin_lock_init(&spool->lock);
98 spool->count = 1;
99 spool->max_hpages = max_hpages;
100 spool->hstate = h;
101 spool->min_hpages = min_hpages;
102
103 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
104 kfree(spool);
105 return NULL;
106 }
107 spool->rsv_hpages = min_hpages;
108
109 return spool;
110}
111
112void hugepage_put_subpool(struct hugepage_subpool *spool)
113{
114 spin_lock(&spool->lock);
115 BUG_ON(!spool->count);
116 spool->count--;
117 unlock_or_release_subpool(spool);
118}
119
120
121
122
123
124
125
126
127
128static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
129 long delta)
130{
131 long ret = delta;
132
133 if (!spool)
134 return ret;
135
136 spin_lock(&spool->lock);
137
138 if (spool->max_hpages != -1) {
139 if ((spool->used_hpages + delta) <= spool->max_hpages)
140 spool->used_hpages += delta;
141 else {
142 ret = -ENOMEM;
143 goto unlock_ret;
144 }
145 }
146
147 if (spool->min_hpages != -1) {
148 if (delta > spool->rsv_hpages) {
149
150
151
152
153 ret = delta - spool->rsv_hpages;
154 spool->rsv_hpages = 0;
155 } else {
156 ret = 0;
157 spool->rsv_hpages -= delta;
158 }
159 }
160
161unlock_ret:
162 spin_unlock(&spool->lock);
163 return ret;
164}
165
166
167
168
169
170
171
172static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
173 long delta)
174{
175 long ret = delta;
176
177 if (!spool)
178 return delta;
179
180 spin_lock(&spool->lock);
181
182 if (spool->max_hpages != -1)
183 spool->used_hpages -= delta;
184
185 if (spool->min_hpages != -1) {
186 if (spool->rsv_hpages + delta <= spool->min_hpages)
187 ret = 0;
188 else
189 ret = spool->rsv_hpages + delta - spool->min_hpages;
190
191 spool->rsv_hpages += delta;
192 if (spool->rsv_hpages > spool->min_hpages)
193 spool->rsv_hpages = spool->min_hpages;
194 }
195
196
197
198
199
200 unlock_or_release_subpool(spool);
201
202 return ret;
203}
204
205static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
206{
207 return HUGETLBFS_SB(inode->i_sb)->spool;
208}
209
210static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
211{
212 return subpool_inode(file_inode(vma->vm_file));
213}
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234struct file_region {
235 struct list_head link;
236 long from;
237 long to;
238};
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254static long region_add(struct resv_map *resv, long f, long t)
255{
256 struct list_head *head = &resv->regions;
257 struct file_region *rg, *nrg, *trg;
258 long add = 0;
259
260 spin_lock(&resv->lock);
261
262 list_for_each_entry(rg, head, link)
263 if (f <= rg->to)
264 break;
265
266
267
268
269
270
271
272 if (&rg->link == head || t < rg->from) {
273 VM_BUG_ON(resv->region_cache_count <= 0);
274
275 resv->region_cache_count--;
276 nrg = list_first_entry(&resv->region_cache, struct file_region,
277 link);
278 list_del(&nrg->link);
279
280 nrg->from = f;
281 nrg->to = t;
282 list_add(&nrg->link, rg->link.prev);
283
284 add += t - f;
285 goto out_locked;
286 }
287
288
289 if (f > rg->from)
290 f = rg->from;
291
292
293 nrg = rg;
294 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
295 if (&rg->link == head)
296 break;
297 if (rg->from > t)
298 break;
299
300
301
302
303 if (rg->to > t)
304 t = rg->to;
305 if (rg != nrg) {
306
307
308
309
310 add -= (rg->to - rg->from);
311 list_del(&rg->link);
312 kfree(rg);
313 }
314 }
315
316 add += (nrg->from - f);
317 nrg->from = f;
318 add += t - nrg->to;
319 nrg->to = t;
320
321out_locked:
322 resv->adds_in_progress--;
323 spin_unlock(&resv->lock);
324 VM_BUG_ON(add < 0);
325 return add;
326}
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350static long region_chg(struct resv_map *resv, long f, long t)
351{
352 struct list_head *head = &resv->regions;
353 struct file_region *rg, *nrg = NULL;
354 long chg = 0;
355
356retry:
357 spin_lock(&resv->lock);
358retry_locked:
359 resv->adds_in_progress++;
360
361
362
363
364
365 if (resv->adds_in_progress > resv->region_cache_count) {
366 struct file_region *trg;
367
368 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
369
370 resv->adds_in_progress--;
371 spin_unlock(&resv->lock);
372
373 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
374 if (!trg) {
375 kfree(nrg);
376 return -ENOMEM;
377 }
378
379 spin_lock(&resv->lock);
380 list_add(&trg->link, &resv->region_cache);
381 resv->region_cache_count++;
382 goto retry_locked;
383 }
384
385
386 list_for_each_entry(rg, head, link)
387 if (f <= rg->to)
388 break;
389
390
391
392
393 if (&rg->link == head || t < rg->from) {
394 if (!nrg) {
395 resv->adds_in_progress--;
396 spin_unlock(&resv->lock);
397 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
398 if (!nrg)
399 return -ENOMEM;
400
401 nrg->from = f;
402 nrg->to = f;
403 INIT_LIST_HEAD(&nrg->link);
404 goto retry;
405 }
406
407 list_add(&nrg->link, rg->link.prev);
408 chg = t - f;
409 goto out_nrg;
410 }
411
412
413 if (f > rg->from)
414 f = rg->from;
415 chg = t - f;
416
417
418 list_for_each_entry(rg, rg->link.prev, link) {
419 if (&rg->link == head)
420 break;
421 if (rg->from > t)
422 goto out;
423
424
425
426
427 if (rg->to > t) {
428 chg += rg->to - t;
429 t = rg->to;
430 }
431 chg -= rg->to - rg->from;
432 }
433
434out:
435 spin_unlock(&resv->lock);
436
437 kfree(nrg);
438 return chg;
439out_nrg:
440 spin_unlock(&resv->lock);
441 return chg;
442}
443
444
445
446
447
448
449
450
451
452
453
454
455static void region_abort(struct resv_map *resv, long f, long t)
456{
457 spin_lock(&resv->lock);
458 VM_BUG_ON(!resv->region_cache_count);
459 resv->adds_in_progress--;
460 spin_unlock(&resv->lock);
461}
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477static long region_del(struct resv_map *resv, long f, long t)
478{
479 struct list_head *head = &resv->regions;
480 struct file_region *rg, *trg;
481 struct file_region *nrg = NULL;
482 long del = 0;
483
484retry:
485 spin_lock(&resv->lock);
486 list_for_each_entry_safe(rg, trg, head, link) {
487
488
489
490
491
492
493
494 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
495 continue;
496
497 if (rg->from >= t)
498 break;
499
500 if (f > rg->from && t < rg->to) {
501
502
503
504
505 if (!nrg &&
506 resv->region_cache_count > resv->adds_in_progress) {
507 nrg = list_first_entry(&resv->region_cache,
508 struct file_region,
509 link);
510 list_del(&nrg->link);
511 resv->region_cache_count--;
512 }
513
514 if (!nrg) {
515 spin_unlock(&resv->lock);
516 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
517 if (!nrg)
518 return -ENOMEM;
519 goto retry;
520 }
521
522 del += t - f;
523
524
525 nrg->from = t;
526 nrg->to = rg->to;
527 INIT_LIST_HEAD(&nrg->link);
528
529
530 rg->to = f;
531
532 list_add(&nrg->link, &rg->link);
533 nrg = NULL;
534 break;
535 }
536
537 if (f <= rg->from && t >= rg->to) {
538 del += rg->to - rg->from;
539 list_del(&rg->link);
540 kfree(rg);
541 continue;
542 }
543
544 if (f <= rg->from) {
545 del += t - rg->from;
546 rg->from = t;
547 } else {
548 del += rg->to - f;
549 rg->to = f;
550 }
551 }
552
553 spin_unlock(&resv->lock);
554 kfree(nrg);
555 return del;
556}
557
558
559
560
561
562
563
564
565
566
567void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve)
568{
569 struct hugepage_subpool *spool = subpool_inode(inode);
570 long rsv_adjust;
571
572 rsv_adjust = hugepage_subpool_get_pages(spool, 1);
573 if (restore_reserve && rsv_adjust) {
574 struct hstate *h = hstate_inode(inode);
575
576 hugetlb_acct_memory(h, 1);
577 }
578}
579
580
581
582
583
584static long region_count(struct resv_map *resv, long f, long t)
585{
586 struct list_head *head = &resv->regions;
587 struct file_region *rg;
588 long chg = 0;
589
590 spin_lock(&resv->lock);
591
592 list_for_each_entry(rg, head, link) {
593 long seg_from;
594 long seg_to;
595
596 if (rg->to <= f)
597 continue;
598 if (rg->from >= t)
599 break;
600
601 seg_from = max(rg->from, f);
602 seg_to = min(rg->to, t);
603
604 chg += seg_to - seg_from;
605 }
606 spin_unlock(&resv->lock);
607
608 return chg;
609}
610
611
612
613
614
615static pgoff_t vma_hugecache_offset(struct hstate *h,
616 struct vm_area_struct *vma, unsigned long address)
617{
618 return ((address - vma->vm_start) >> huge_page_shift(h)) +
619 (vma->vm_pgoff >> huge_page_order(h));
620}
621
622pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
623 unsigned long address)
624{
625 return vma_hugecache_offset(hstate_vma(vma), vma, address);
626}
627
628
629
630
631
632unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
633{
634 struct hstate *hstate;
635
636 if (!is_vm_hugetlb_page(vma))
637 return PAGE_SIZE;
638
639 hstate = hstate_vma(vma);
640
641 return 1UL << huge_page_shift(hstate);
642}
643EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
644
645
646
647
648
649
650
651#ifndef vma_mmu_pagesize
652unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
653{
654 return vma_kernel_pagesize(vma);
655}
656#endif
657
658
659
660
661
662
663#define HPAGE_RESV_OWNER (1UL << 0)
664#define HPAGE_RESV_UNMAPPED (1UL << 1)
665#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686static unsigned long get_vma_private_data(struct vm_area_struct *vma)
687{
688 return (unsigned long)vma->vm_private_data;
689}
690
691static void set_vma_private_data(struct vm_area_struct *vma,
692 unsigned long value)
693{
694 vma->vm_private_data = (void *)value;
695}
696
697struct resv_map *resv_map_alloc(void)
698{
699 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
700 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
701
702 if (!resv_map || !rg) {
703 kfree(resv_map);
704 kfree(rg);
705 return NULL;
706 }
707
708 kref_init(&resv_map->refs);
709 spin_lock_init(&resv_map->lock);
710 INIT_LIST_HEAD(&resv_map->regions);
711
712 resv_map->adds_in_progress = 0;
713
714 INIT_LIST_HEAD(&resv_map->region_cache);
715 list_add(&rg->link, &resv_map->region_cache);
716 resv_map->region_cache_count = 1;
717
718 return resv_map;
719}
720
721void resv_map_release(struct kref *ref)
722{
723 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
724 struct list_head *head = &resv_map->region_cache;
725 struct file_region *rg, *trg;
726
727
728 region_del(resv_map, 0, LONG_MAX);
729
730
731 list_for_each_entry_safe(rg, trg, head, link) {
732 list_del(&rg->link);
733 kfree(rg);
734 }
735
736 VM_BUG_ON(resv_map->adds_in_progress);
737
738 kfree(resv_map);
739}
740
741static inline struct resv_map *inode_resv_map(struct inode *inode)
742{
743 return inode->i_mapping->private_data;
744}
745
746static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
747{
748 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
749 if (vma->vm_flags & VM_MAYSHARE) {
750 struct address_space *mapping = vma->vm_file->f_mapping;
751 struct inode *inode = mapping->host;
752
753 return inode_resv_map(inode);
754
755 } else {
756 return (struct resv_map *)(get_vma_private_data(vma) &
757 ~HPAGE_RESV_MASK);
758 }
759}
760
761static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
762{
763 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
764 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
765
766 set_vma_private_data(vma, (get_vma_private_data(vma) &
767 HPAGE_RESV_MASK) | (unsigned long)map);
768}
769
770static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
771{
772 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
773 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
774
775 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
776}
777
778static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
779{
780 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
781
782 return (get_vma_private_data(vma) & flag) != 0;
783}
784
785
786void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
787{
788 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
789 if (!(vma->vm_flags & VM_MAYSHARE))
790 vma->vm_private_data = (void *)0;
791}
792
793
794static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
795{
796 if (vma->vm_flags & VM_NORESERVE) {
797
798
799
800
801
802
803
804
805
806 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
807 return true;
808 else
809 return false;
810 }
811
812
813 if (vma->vm_flags & VM_MAYSHARE) {
814
815
816
817
818
819
820
821 if (chg)
822 return false;
823 else
824 return true;
825 }
826
827
828
829
830
831 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
832 return true;
833
834 return false;
835}
836
837static void enqueue_huge_page(struct hstate *h, struct page *page)
838{
839 int nid = page_to_nid(page);
840 list_move(&page->lru, &h->hugepage_freelists[nid]);
841 h->free_huge_pages++;
842 h->free_huge_pages_node[nid]++;
843}
844
845static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
846{
847 struct page *page;
848
849 list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
850 if (!is_migrate_isolate_page(page))
851 break;
852
853
854
855
856 if (&h->hugepage_freelists[nid] == &page->lru)
857 return NULL;
858 list_move(&page->lru, &h->hugepage_activelist);
859 set_page_refcounted(page);
860 h->free_huge_pages--;
861 h->free_huge_pages_node[nid]--;
862 return page;
863}
864
865
866static inline gfp_t htlb_alloc_mask(struct hstate *h)
867{
868 if (hugepages_treat_as_movable || hugepage_migration_supported(h))
869 return GFP_HIGHUSER_MOVABLE;
870 else
871 return GFP_HIGHUSER;
872}
873
874static struct page *dequeue_huge_page_vma(struct hstate *h,
875 struct vm_area_struct *vma,
876 unsigned long address, int avoid_reserve,
877 long chg)
878{
879 struct page *page = NULL;
880 struct mempolicy *mpol;
881 nodemask_t *nodemask;
882 struct zonelist *zonelist;
883 struct zone *zone;
884 struct zoneref *z;
885 unsigned int cpuset_mems_cookie;
886
887
888
889
890
891
892 if (!vma_has_reserves(vma, chg) &&
893 h->free_huge_pages - h->resv_huge_pages == 0)
894 goto err;
895
896
897 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
898 goto err;
899
900retry_cpuset:
901 cpuset_mems_cookie = read_mems_allowed_begin();
902 zonelist = huge_zonelist(vma, address,
903 htlb_alloc_mask(h), &mpol, &nodemask);
904
905 for_each_zone_zonelist_nodemask(zone, z, zonelist,
906 MAX_NR_ZONES - 1, nodemask) {
907 if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
908 page = dequeue_huge_page_node(h, zone_to_nid(zone));
909 if (page) {
910 if (avoid_reserve)
911 break;
912 if (!vma_has_reserves(vma, chg))
913 break;
914
915 SetPagePrivate(page);
916 h->resv_huge_pages--;
917 break;
918 }
919 }
920 }
921
922 mpol_cond_put(mpol);
923 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
924 goto retry_cpuset;
925 return page;
926
927err:
928 return NULL;
929}
930
931
932
933
934
935
936
937
938static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
939{
940 nid = next_node(nid, *nodes_allowed);
941 if (nid == MAX_NUMNODES)
942 nid = first_node(*nodes_allowed);
943 VM_BUG_ON(nid >= MAX_NUMNODES);
944
945 return nid;
946}
947
948static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
949{
950 if (!node_isset(nid, *nodes_allowed))
951 nid = next_node_allowed(nid, nodes_allowed);
952 return nid;
953}
954
955
956
957
958
959
960
961static int hstate_next_node_to_alloc(struct hstate *h,
962 nodemask_t *nodes_allowed)
963{
964 int nid;
965
966 VM_BUG_ON(!nodes_allowed);
967
968 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
969 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
970
971 return nid;
972}
973
974
975
976
977
978
979
980static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
981{
982 int nid;
983
984 VM_BUG_ON(!nodes_allowed);
985
986 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
987 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
988
989 return nid;
990}
991
992#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
993 for (nr_nodes = nodes_weight(*mask); \
994 nr_nodes > 0 && \
995 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
996 nr_nodes--)
997
998#define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
999 for (nr_nodes = nodes_weight(*mask); \
1000 nr_nodes > 0 && \
1001 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
1002 nr_nodes--)
1003
1004#if defined(CONFIG_X86_64) && ((defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA))
1005static void destroy_compound_gigantic_page(struct page *page,
1006 unsigned int order)
1007{
1008 int i;
1009 int nr_pages = 1 << order;
1010 struct page *p = page + 1;
1011
1012 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1013 clear_compound_head(p);
1014 set_page_refcounted(p);
1015 }
1016
1017 set_compound_order(page, 0);
1018 __ClearPageHead(page);
1019}
1020
1021static void free_gigantic_page(struct page *page, unsigned int order)
1022{
1023 free_contig_range(page_to_pfn(page), 1 << order);
1024}
1025
1026static int __alloc_gigantic_page(unsigned long start_pfn,
1027 unsigned long nr_pages)
1028{
1029 unsigned long end_pfn = start_pfn + nr_pages;
1030 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1031}
1032
1033static bool pfn_range_valid_gigantic(unsigned long start_pfn,
1034 unsigned long nr_pages)
1035{
1036 unsigned long i, end_pfn = start_pfn + nr_pages;
1037 struct page *page;
1038
1039 for (i = start_pfn; i < end_pfn; i++) {
1040 if (!pfn_valid(i))
1041 return false;
1042
1043 page = pfn_to_page(i);
1044
1045 if (PageReserved(page))
1046 return false;
1047
1048 if (page_count(page) > 0)
1049 return false;
1050
1051 if (PageHuge(page))
1052 return false;
1053 }
1054
1055 return true;
1056}
1057
1058static bool zone_spans_last_pfn(const struct zone *zone,
1059 unsigned long start_pfn, unsigned long nr_pages)
1060{
1061 unsigned long last_pfn = start_pfn + nr_pages - 1;
1062 return zone_spans_pfn(zone, last_pfn);
1063}
1064
1065static struct page *alloc_gigantic_page(int nid, unsigned int order)
1066{
1067 unsigned long nr_pages = 1 << order;
1068 unsigned long ret, pfn, flags;
1069 struct zone *z;
1070
1071 z = NODE_DATA(nid)->node_zones;
1072 for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
1073 spin_lock_irqsave(&z->lock, flags);
1074
1075 pfn = ALIGN(z->zone_start_pfn, nr_pages);
1076 while (zone_spans_last_pfn(z, pfn, nr_pages)) {
1077 if (pfn_range_valid_gigantic(pfn, nr_pages)) {
1078
1079
1080
1081
1082
1083
1084
1085 spin_unlock_irqrestore(&z->lock, flags);
1086 ret = __alloc_gigantic_page(pfn, nr_pages);
1087 if (!ret)
1088 return pfn_to_page(pfn);
1089 spin_lock_irqsave(&z->lock, flags);
1090 }
1091 pfn += nr_pages;
1092 }
1093
1094 spin_unlock_irqrestore(&z->lock, flags);
1095 }
1096
1097 return NULL;
1098}
1099
1100static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1101static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1102
1103static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
1104{
1105 struct page *page;
1106
1107 page = alloc_gigantic_page(nid, huge_page_order(h));
1108 if (page) {
1109 prep_compound_gigantic_page(page, huge_page_order(h));
1110 prep_new_huge_page(h, page, nid);
1111 }
1112
1113 return page;
1114}
1115
1116static int alloc_fresh_gigantic_page(struct hstate *h,
1117 nodemask_t *nodes_allowed)
1118{
1119 struct page *page = NULL;
1120 int nr_nodes, node;
1121
1122 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1123 page = alloc_fresh_gigantic_page_node(h, node);
1124 if (page)
1125 return 1;
1126 }
1127
1128 return 0;
1129}
1130
1131static inline bool gigantic_page_supported(void) { return true; }
1132#else
1133static inline bool gigantic_page_supported(void) { return false; }
1134static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1135static inline void destroy_compound_gigantic_page(struct page *page,
1136 unsigned int order) { }
1137static inline int alloc_fresh_gigantic_page(struct hstate *h,
1138 nodemask_t *nodes_allowed) { return 0; }
1139#endif
1140
1141static void update_and_free_page(struct hstate *h, struct page *page)
1142{
1143 int i;
1144
1145 if (hstate_is_gigantic(h) && !gigantic_page_supported())
1146 return;
1147
1148 h->nr_huge_pages--;
1149 h->nr_huge_pages_node[page_to_nid(page)]--;
1150 for (i = 0; i < pages_per_huge_page(h); i++) {
1151 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1152 1 << PG_referenced | 1 << PG_dirty |
1153 1 << PG_active | 1 << PG_private |
1154 1 << PG_writeback);
1155 }
1156 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1157 set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1158 set_page_refcounted(page);
1159 if (hstate_is_gigantic(h)) {
1160 destroy_compound_gigantic_page(page, huge_page_order(h));
1161 free_gigantic_page(page, huge_page_order(h));
1162 } else {
1163 __free_pages(page, huge_page_order(h));
1164 }
1165}
1166
1167struct hstate *size_to_hstate(unsigned long size)
1168{
1169 struct hstate *h;
1170
1171 for_each_hstate(h) {
1172 if (huge_page_size(h) == size)
1173 return h;
1174 }
1175 return NULL;
1176}
1177
1178
1179
1180
1181
1182
1183
1184bool page_huge_active(struct page *page)
1185{
1186 VM_BUG_ON_PAGE(!PageHuge(page), page);
1187 return PageHead(page) && PagePrivate(&page[1]);
1188}
1189
1190
1191static void set_page_huge_active(struct page *page)
1192{
1193 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1194 SetPagePrivate(&page[1]);
1195}
1196
1197static void clear_page_huge_active(struct page *page)
1198{
1199 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1200 ClearPagePrivate(&page[1]);
1201}
1202
1203void free_huge_page(struct page *page)
1204{
1205
1206
1207
1208
1209 struct hstate *h = page_hstate(page);
1210 int nid = page_to_nid(page);
1211 struct hugepage_subpool *spool =
1212 (struct hugepage_subpool *)page_private(page);
1213 bool restore_reserve;
1214
1215 set_page_private(page, 0);
1216 page->mapping = NULL;
1217 VM_BUG_ON_PAGE(page_count(page), page);
1218 VM_BUG_ON_PAGE(page_mapcount(page), page);
1219 restore_reserve = PagePrivate(page);
1220 ClearPagePrivate(page);
1221
1222
1223
1224
1225
1226
1227 if (hugepage_subpool_put_pages(spool, 1) == 0)
1228 restore_reserve = true;
1229
1230 spin_lock(&hugetlb_lock);
1231 clear_page_huge_active(page);
1232 hugetlb_cgroup_uncharge_page(hstate_index(h),
1233 pages_per_huge_page(h), page);
1234 if (restore_reserve)
1235 h->resv_huge_pages++;
1236
1237 if (h->surplus_huge_pages_node[nid]) {
1238
1239 list_del(&page->lru);
1240 update_and_free_page(h, page);
1241 h->surplus_huge_pages--;
1242 h->surplus_huge_pages_node[nid]--;
1243 } else {
1244 arch_clear_hugepage_flags(page);
1245 enqueue_huge_page(h, page);
1246 }
1247 spin_unlock(&hugetlb_lock);
1248}
1249
1250static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1251{
1252 INIT_LIST_HEAD(&page->lru);
1253 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1254 spin_lock(&hugetlb_lock);
1255 set_hugetlb_cgroup(page, NULL);
1256 h->nr_huge_pages++;
1257 h->nr_huge_pages_node[nid]++;
1258 spin_unlock(&hugetlb_lock);
1259 put_page(page);
1260}
1261
1262static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1263{
1264 int i;
1265 int nr_pages = 1 << order;
1266 struct page *p = page + 1;
1267
1268
1269 set_compound_order(page, order);
1270 __ClearPageReserved(page);
1271 __SetPageHead(page);
1272 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285 __ClearPageReserved(p);
1286 set_page_count(p, 0);
1287 set_compound_head(p, page);
1288 }
1289 atomic_set(compound_mapcount_ptr(page), -1);
1290}
1291
1292
1293
1294
1295
1296
1297int PageHuge(struct page *page)
1298{
1299 if (!PageCompound(page))
1300 return 0;
1301
1302 page = compound_head(page);
1303 return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1304}
1305EXPORT_SYMBOL_GPL(PageHuge);
1306
1307
1308
1309
1310
1311int PageHeadHuge(struct page *page_head)
1312{
1313 if (!PageHead(page_head))
1314 return 0;
1315
1316 return get_compound_page_dtor(page_head) == free_huge_page;
1317}
1318
1319pgoff_t __basepage_index(struct page *page)
1320{
1321 struct page *page_head = compound_head(page);
1322 pgoff_t index = page_index(page_head);
1323 unsigned long compound_idx;
1324
1325 if (!PageHuge(page_head))
1326 return page_index(page);
1327
1328 if (compound_order(page_head) >= MAX_ORDER)
1329 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1330 else
1331 compound_idx = page - page_head;
1332
1333 return (index << compound_order(page_head)) + compound_idx;
1334}
1335
1336static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
1337{
1338 struct page *page;
1339
1340 page = __alloc_pages_node(nid,
1341 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1342 __GFP_REPEAT|__GFP_NOWARN,
1343 huge_page_order(h));
1344 if (page) {
1345 prep_new_huge_page(h, page, nid);
1346 }
1347
1348 return page;
1349}
1350
1351static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1352{
1353 struct page *page;
1354 int nr_nodes, node;
1355 int ret = 0;
1356
1357 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1358 page = alloc_fresh_huge_page_node(h, node);
1359 if (page) {
1360 ret = 1;
1361 break;
1362 }
1363 }
1364
1365 if (ret)
1366 count_vm_event(HTLB_BUDDY_PGALLOC);
1367 else
1368 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1369
1370 return ret;
1371}
1372
1373
1374
1375
1376
1377
1378
1379static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1380 bool acct_surplus)
1381{
1382 int nr_nodes, node;
1383 int ret = 0;
1384
1385 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1386
1387
1388
1389
1390 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1391 !list_empty(&h->hugepage_freelists[node])) {
1392 struct page *page =
1393 list_entry(h->hugepage_freelists[node].next,
1394 struct page, lru);
1395 list_del(&page->lru);
1396 h->free_huge_pages--;
1397 h->free_huge_pages_node[node]--;
1398 if (acct_surplus) {
1399 h->surplus_huge_pages--;
1400 h->surplus_huge_pages_node[node]--;
1401 }
1402 update_and_free_page(h, page);
1403 ret = 1;
1404 break;
1405 }
1406 }
1407
1408 return ret;
1409}
1410
1411
1412
1413
1414
1415static void dissolve_free_huge_page(struct page *page)
1416{
1417 spin_lock(&hugetlb_lock);
1418 if (PageHuge(page) && !page_count(page)) {
1419 struct hstate *h = page_hstate(page);
1420 int nid = page_to_nid(page);
1421 list_del(&page->lru);
1422 h->free_huge_pages--;
1423 h->free_huge_pages_node[nid]--;
1424 update_and_free_page(h, page);
1425 }
1426 spin_unlock(&hugetlb_lock);
1427}
1428
1429
1430
1431
1432
1433
1434void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1435{
1436 unsigned long pfn;
1437
1438 if (!hugepages_supported())
1439 return;
1440
1441 VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
1442 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
1443 dissolve_free_huge_page(pfn_to_page(pfn));
1444}
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
1456 struct vm_area_struct *vma, unsigned long addr, int nid)
1457{
1458 int order = huge_page_order(h);
1459 gfp_t gfp = htlb_alloc_mask(h)|__GFP_COMP|__GFP_REPEAT|__GFP_NOWARN;
1460 unsigned int cpuset_mems_cookie;
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471 if (!IS_ENABLED(CONFIG_NUMA) || !vma) {
1472
1473
1474
1475
1476
1477 if (nid != NUMA_NO_NODE)
1478 gfp |= __GFP_THISNODE;
1479
1480
1481
1482
1483 return alloc_pages_node(nid, gfp, order);
1484 }
1485
1486
1487
1488
1489
1490
1491 do {
1492 struct page *page;
1493 struct mempolicy *mpol;
1494 struct zonelist *zl;
1495 nodemask_t *nodemask;
1496
1497 cpuset_mems_cookie = read_mems_allowed_begin();
1498 zl = huge_zonelist(vma, addr, gfp, &mpol, &nodemask);
1499 mpol_cond_put(mpol);
1500 page = __alloc_pages_nodemask(gfp, order, zl, nodemask);
1501 if (page)
1502 return page;
1503 } while (read_mems_allowed_retry(cpuset_mems_cookie));
1504
1505 return NULL;
1506}
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520static struct page *__alloc_buddy_huge_page(struct hstate *h,
1521 struct vm_area_struct *vma, unsigned long addr, int nid)
1522{
1523 struct page *page;
1524 unsigned int r_nid;
1525
1526 if (hstate_is_gigantic(h))
1527 return NULL;
1528
1529
1530
1531
1532
1533
1534 if (vma || (addr != -1)) {
1535 VM_WARN_ON_ONCE(addr == -1);
1536 VM_WARN_ON_ONCE(nid != NUMA_NO_NODE);
1537 }
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561 spin_lock(&hugetlb_lock);
1562 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1563 spin_unlock(&hugetlb_lock);
1564 return NULL;
1565 } else {
1566 h->nr_huge_pages++;
1567 h->surplus_huge_pages++;
1568 }
1569 spin_unlock(&hugetlb_lock);
1570
1571 page = __hugetlb_alloc_buddy_huge_page(h, vma, addr, nid);
1572
1573 spin_lock(&hugetlb_lock);
1574 if (page) {
1575 INIT_LIST_HEAD(&page->lru);
1576 r_nid = page_to_nid(page);
1577 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1578 set_hugetlb_cgroup(page, NULL);
1579
1580
1581
1582 h->nr_huge_pages_node[r_nid]++;
1583 h->surplus_huge_pages_node[r_nid]++;
1584 __count_vm_event(HTLB_BUDDY_PGALLOC);
1585 } else {
1586 h->nr_huge_pages--;
1587 h->surplus_huge_pages--;
1588 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1589 }
1590 spin_unlock(&hugetlb_lock);
1591
1592 return page;
1593}
1594
1595
1596
1597
1598
1599
1600static
1601struct page *__alloc_buddy_huge_page_no_mpol(struct hstate *h, int nid)
1602{
1603 unsigned long addr = -1;
1604
1605 return __alloc_buddy_huge_page(h, NULL, addr, nid);
1606}
1607
1608
1609
1610
1611static
1612struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h,
1613 struct vm_area_struct *vma, unsigned long addr)
1614{
1615 return __alloc_buddy_huge_page(h, vma, addr, NUMA_NO_NODE);
1616}
1617
1618
1619
1620
1621
1622
1623struct page *alloc_huge_page_node(struct hstate *h, int nid)
1624{
1625 struct page *page = NULL;
1626
1627 spin_lock(&hugetlb_lock);
1628 if (h->free_huge_pages - h->resv_huge_pages > 0)
1629 page = dequeue_huge_page_node(h, nid);
1630 spin_unlock(&hugetlb_lock);
1631
1632 if (!page)
1633 page = __alloc_buddy_huge_page_no_mpol(h, nid);
1634
1635 return page;
1636}
1637
1638
1639
1640
1641
1642static int gather_surplus_pages(struct hstate *h, int delta)
1643{
1644 struct list_head surplus_list;
1645 struct page *page, *tmp;
1646 int ret, i;
1647 int needed, allocated;
1648 bool alloc_ok = true;
1649
1650 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1651 if (needed <= 0) {
1652 h->resv_huge_pages += delta;
1653 return 0;
1654 }
1655
1656 allocated = 0;
1657 INIT_LIST_HEAD(&surplus_list);
1658
1659 ret = -ENOMEM;
1660retry:
1661 spin_unlock(&hugetlb_lock);
1662 for (i = 0; i < needed; i++) {
1663 page = __alloc_buddy_huge_page_no_mpol(h, NUMA_NO_NODE);
1664 if (!page) {
1665 alloc_ok = false;
1666 break;
1667 }
1668 list_add(&page->lru, &surplus_list);
1669 }
1670 allocated += i;
1671
1672
1673
1674
1675
1676 spin_lock(&hugetlb_lock);
1677 needed = (h->resv_huge_pages + delta) -
1678 (h->free_huge_pages + allocated);
1679 if (needed > 0) {
1680 if (alloc_ok)
1681 goto retry;
1682
1683
1684
1685
1686
1687 goto free;
1688 }
1689
1690
1691
1692
1693
1694
1695
1696
1697 needed += allocated;
1698 h->resv_huge_pages += delta;
1699 ret = 0;
1700
1701
1702 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1703 if ((--needed) < 0)
1704 break;
1705
1706
1707
1708
1709 put_page_testzero(page);
1710 VM_BUG_ON_PAGE(page_count(page), page);
1711 enqueue_huge_page(h, page);
1712 }
1713free:
1714 spin_unlock(&hugetlb_lock);
1715
1716
1717 list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1718 put_page(page);
1719 spin_lock(&hugetlb_lock);
1720
1721 return ret;
1722}
1723
1724
1725
1726
1727
1728
1729
1730static void return_unused_surplus_pages(struct hstate *h,
1731 unsigned long unused_resv_pages)
1732{
1733 unsigned long nr_pages;
1734
1735
1736 h->resv_huge_pages -= unused_resv_pages;
1737
1738
1739 if (hstate_is_gigantic(h))
1740 return;
1741
1742 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752 while (nr_pages--) {
1753 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1754 break;
1755 cond_resched_lock(&hugetlb_lock);
1756 }
1757}
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779enum vma_resv_mode {
1780 VMA_NEEDS_RESV,
1781 VMA_COMMIT_RESV,
1782 VMA_END_RESV,
1783};
1784static long __vma_reservation_common(struct hstate *h,
1785 struct vm_area_struct *vma, unsigned long addr,
1786 enum vma_resv_mode mode)
1787{
1788 struct resv_map *resv;
1789 pgoff_t idx;
1790 long ret;
1791
1792 resv = vma_resv_map(vma);
1793 if (!resv)
1794 return 1;
1795
1796 idx = vma_hugecache_offset(h, vma, addr);
1797 switch (mode) {
1798 case VMA_NEEDS_RESV:
1799 ret = region_chg(resv, idx, idx + 1);
1800 break;
1801 case VMA_COMMIT_RESV:
1802 ret = region_add(resv, idx, idx + 1);
1803 break;
1804 case VMA_END_RESV:
1805 region_abort(resv, idx, idx + 1);
1806 ret = 0;
1807 break;
1808 default:
1809 BUG();
1810 }
1811
1812 if (vma->vm_flags & VM_MAYSHARE)
1813 return ret;
1814 else
1815 return ret < 0 ? ret : 0;
1816}
1817
1818static long vma_needs_reservation(struct hstate *h,
1819 struct vm_area_struct *vma, unsigned long addr)
1820{
1821 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1822}
1823
1824static long vma_commit_reservation(struct hstate *h,
1825 struct vm_area_struct *vma, unsigned long addr)
1826{
1827 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1828}
1829
1830static void vma_end_reservation(struct hstate *h,
1831 struct vm_area_struct *vma, unsigned long addr)
1832{
1833 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1834}
1835
1836struct page *alloc_huge_page(struct vm_area_struct *vma,
1837 unsigned long addr, int avoid_reserve)
1838{
1839 struct hugepage_subpool *spool = subpool_vma(vma);
1840 struct hstate *h = hstate_vma(vma);
1841 struct page *page;
1842 long map_chg, map_commit;
1843 long gbl_chg;
1844 int ret, idx;
1845 struct hugetlb_cgroup *h_cg;
1846
1847 idx = hstate_index(h);
1848
1849
1850
1851
1852
1853 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
1854 if (map_chg < 0)
1855 return ERR_PTR(-ENOMEM);
1856
1857
1858
1859
1860
1861
1862
1863
1864 if (map_chg || avoid_reserve) {
1865 gbl_chg = hugepage_subpool_get_pages(spool, 1);
1866 if (gbl_chg < 0) {
1867 vma_end_reservation(h, vma, addr);
1868 return ERR_PTR(-ENOSPC);
1869 }
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879 if (avoid_reserve)
1880 gbl_chg = 1;
1881 }
1882
1883 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1884 if (ret)
1885 goto out_subpool_put;
1886
1887 spin_lock(&hugetlb_lock);
1888
1889
1890
1891
1892
1893 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
1894 if (!page) {
1895 spin_unlock(&hugetlb_lock);
1896 page = __alloc_buddy_huge_page_with_mpol(h, vma, addr);
1897 if (!page)
1898 goto out_uncharge_cgroup;
1899 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
1900 SetPagePrivate(page);
1901 h->resv_huge_pages--;
1902 }
1903 spin_lock(&hugetlb_lock);
1904 list_move(&page->lru, &h->hugepage_activelist);
1905
1906 }
1907 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
1908 spin_unlock(&hugetlb_lock);
1909
1910 set_page_private(page, (unsigned long)spool);
1911
1912 map_commit = vma_commit_reservation(h, vma, addr);
1913 if (unlikely(map_chg > map_commit)) {
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923 long rsv_adjust;
1924
1925 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
1926 hugetlb_acct_memory(h, -rsv_adjust);
1927 }
1928 return page;
1929
1930out_uncharge_cgroup:
1931 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
1932out_subpool_put:
1933 if (map_chg || avoid_reserve)
1934 hugepage_subpool_put_pages(spool, 1);
1935 vma_end_reservation(h, vma, addr);
1936 return ERR_PTR(-ENOSPC);
1937}
1938
1939
1940
1941
1942
1943
1944struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
1945 unsigned long addr, int avoid_reserve)
1946{
1947 struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
1948 if (IS_ERR(page))
1949 page = NULL;
1950 return page;
1951}
1952
1953int __weak alloc_bootmem_huge_page(struct hstate *h)
1954{
1955 struct huge_bootmem_page *m;
1956 int nr_nodes, node;
1957
1958 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
1959 void *addr;
1960
1961 addr = memblock_virt_alloc_try_nid_nopanic(
1962 huge_page_size(h), huge_page_size(h),
1963 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
1964 if (addr) {
1965
1966
1967
1968
1969
1970 m = addr;
1971 goto found;
1972 }
1973 }
1974 return 0;
1975
1976found:
1977 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
1978
1979 list_add(&m->list, &huge_boot_pages);
1980 m->hstate = h;
1981 return 1;
1982}
1983
1984static void __init prep_compound_huge_page(struct page *page,
1985 unsigned int order)
1986{
1987 if (unlikely(order > (MAX_ORDER - 1)))
1988 prep_compound_gigantic_page(page, order);
1989 else
1990 prep_compound_page(page, order);
1991}
1992
1993
1994static void __init gather_bootmem_prealloc(void)
1995{
1996 struct huge_bootmem_page *m;
1997
1998 list_for_each_entry(m, &huge_boot_pages, list) {
1999 struct hstate *h = m->hstate;
2000 struct page *page;
2001
2002#ifdef CONFIG_HIGHMEM
2003 page = pfn_to_page(m->phys >> PAGE_SHIFT);
2004 memblock_free_late(__pa(m),
2005 sizeof(struct huge_bootmem_page));
2006#else
2007 page = virt_to_page(m);
2008#endif
2009 WARN_ON(page_count(page) != 1);
2010 prep_compound_huge_page(page, h->order);
2011 WARN_ON(PageReserved(page));
2012 prep_new_huge_page(h, page, page_to_nid(page));
2013
2014
2015
2016
2017
2018
2019 if (hstate_is_gigantic(h))
2020 adjust_managed_page_count(page, 1 << h->order);
2021 }
2022}
2023
2024static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2025{
2026 unsigned long i;
2027
2028 for (i = 0; i < h->max_huge_pages; ++i) {
2029 if (hstate_is_gigantic(h)) {
2030 if (!alloc_bootmem_huge_page(h))
2031 break;
2032 } else if (!alloc_fresh_huge_page(h,
2033 &node_states[N_MEMORY]))
2034 break;
2035 }
2036 h->max_huge_pages = i;
2037}
2038
2039static void __init hugetlb_init_hstates(void)
2040{
2041 struct hstate *h;
2042
2043 for_each_hstate(h) {
2044 if (minimum_order > huge_page_order(h))
2045 minimum_order = huge_page_order(h);
2046
2047
2048 if (!hstate_is_gigantic(h))
2049 hugetlb_hstate_alloc_pages(h);
2050 }
2051 VM_BUG_ON(minimum_order == UINT_MAX);
2052}
2053
2054static char * __init memfmt(char *buf, unsigned long n)
2055{
2056 if (n >= (1UL << 30))
2057 sprintf(buf, "%lu GB", n >> 30);
2058 else if (n >= (1UL << 20))
2059 sprintf(buf, "%lu MB", n >> 20);
2060 else
2061 sprintf(buf, "%lu KB", n >> 10);
2062 return buf;
2063}
2064
2065static void __init report_hugepages(void)
2066{
2067 struct hstate *h;
2068
2069 for_each_hstate(h) {
2070 char buf[32];
2071 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2072 memfmt(buf, huge_page_size(h)),
2073 h->free_huge_pages);
2074 }
2075}
2076
2077#ifdef CONFIG_HIGHMEM
2078static void try_to_free_low(struct hstate *h, unsigned long count,
2079 nodemask_t *nodes_allowed)
2080{
2081 int i;
2082
2083 if (hstate_is_gigantic(h))
2084 return;
2085
2086 for_each_node_mask(i, *nodes_allowed) {
2087 struct page *page, *next;
2088 struct list_head *freel = &h->hugepage_freelists[i];
2089 list_for_each_entry_safe(page, next, freel, lru) {
2090 if (count >= h->nr_huge_pages)
2091 return;
2092 if (PageHighMem(page))
2093 continue;
2094 list_del(&page->lru);
2095 update_and_free_page(h, page);
2096 h->free_huge_pages--;
2097 h->free_huge_pages_node[page_to_nid(page)]--;
2098 }
2099 }
2100}
2101#else
2102static inline void try_to_free_low(struct hstate *h, unsigned long count,
2103 nodemask_t *nodes_allowed)
2104{
2105}
2106#endif
2107
2108
2109
2110
2111
2112
2113static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2114 int delta)
2115{
2116 int nr_nodes, node;
2117
2118 VM_BUG_ON(delta != -1 && delta != 1);
2119
2120 if (delta < 0) {
2121 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2122 if (h->surplus_huge_pages_node[node])
2123 goto found;
2124 }
2125 } else {
2126 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2127 if (h->surplus_huge_pages_node[node] <
2128 h->nr_huge_pages_node[node])
2129 goto found;
2130 }
2131 }
2132 return 0;
2133
2134found:
2135 h->surplus_huge_pages += delta;
2136 h->surplus_huge_pages_node[node] += delta;
2137 return 1;
2138}
2139
2140#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2141static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
2142 nodemask_t *nodes_allowed)
2143{
2144 unsigned long min_count, ret;
2145
2146 if (hstate_is_gigantic(h) && !gigantic_page_supported())
2147 return h->max_huge_pages;
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160 spin_lock(&hugetlb_lock);
2161 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2162 if (!adjust_pool_surplus(h, nodes_allowed, -1))
2163 break;
2164 }
2165
2166 while (count > persistent_huge_pages(h)) {
2167
2168
2169
2170
2171
2172 spin_unlock(&hugetlb_lock);
2173 if (hstate_is_gigantic(h))
2174 ret = alloc_fresh_gigantic_page(h, nodes_allowed);
2175 else
2176 ret = alloc_fresh_huge_page(h, nodes_allowed);
2177 spin_lock(&hugetlb_lock);
2178 if (!ret)
2179 goto out;
2180
2181
2182 if (signal_pending(current))
2183 goto out;
2184 }
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2202 min_count = max(count, min_count);
2203 try_to_free_low(h, min_count, nodes_allowed);
2204 while (min_count < persistent_huge_pages(h)) {
2205 if (!free_pool_huge_page(h, nodes_allowed, 0))
2206 break;
2207 cond_resched_lock(&hugetlb_lock);
2208 }
2209 while (count < persistent_huge_pages(h)) {
2210 if (!adjust_pool_surplus(h, nodes_allowed, 1))
2211 break;
2212 }
2213out:
2214 ret = persistent_huge_pages(h);
2215 spin_unlock(&hugetlb_lock);
2216 return ret;
2217}
2218
2219#define HSTATE_ATTR_RO(_name) \
2220 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2221
2222#define HSTATE_ATTR(_name) \
2223 static struct kobj_attribute _name##_attr = \
2224 __ATTR(_name, 0644, _name##_show, _name##_store)
2225
2226static struct kobject *hugepages_kobj;
2227static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2228
2229static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2230
2231static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2232{
2233 int i;
2234
2235 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2236 if (hstate_kobjs[i] == kobj) {
2237 if (nidp)
2238 *nidp = NUMA_NO_NODE;
2239 return &hstates[i];
2240 }
2241
2242 return kobj_to_node_hstate(kobj, nidp);
2243}
2244
2245static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2246 struct kobj_attribute *attr, char *buf)
2247{
2248 struct hstate *h;
2249 unsigned long nr_huge_pages;
2250 int nid;
2251
2252 h = kobj_to_hstate(kobj, &nid);
2253 if (nid == NUMA_NO_NODE)
2254 nr_huge_pages = h->nr_huge_pages;
2255 else
2256 nr_huge_pages = h->nr_huge_pages_node[nid];
2257
2258 return sprintf(buf, "%lu\n", nr_huge_pages);
2259}
2260
2261static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2262 struct hstate *h, int nid,
2263 unsigned long count, size_t len)
2264{
2265 int err;
2266 NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
2267
2268 if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
2269 err = -EINVAL;
2270 goto out;
2271 }
2272
2273 if (nid == NUMA_NO_NODE) {
2274
2275
2276
2277 if (!(obey_mempolicy &&
2278 init_nodemask_of_mempolicy(nodes_allowed))) {
2279 NODEMASK_FREE(nodes_allowed);
2280 nodes_allowed = &node_states[N_MEMORY];
2281 }
2282 } else if (nodes_allowed) {
2283
2284
2285
2286
2287 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2288 init_nodemask_of_node(nodes_allowed, nid);
2289 } else
2290 nodes_allowed = &node_states[N_MEMORY];
2291
2292 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
2293
2294 if (nodes_allowed != &node_states[N_MEMORY])
2295 NODEMASK_FREE(nodes_allowed);
2296
2297 return len;
2298out:
2299 NODEMASK_FREE(nodes_allowed);
2300 return err;
2301}
2302
2303static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2304 struct kobject *kobj, const char *buf,
2305 size_t len)
2306{
2307 struct hstate *h;
2308 unsigned long count;
2309 int nid;
2310 int err;
2311
2312 err = kstrtoul(buf, 10, &count);
2313 if (err)
2314 return err;
2315
2316 h = kobj_to_hstate(kobj, &nid);
2317 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2318}
2319
2320static ssize_t nr_hugepages_show(struct kobject *kobj,
2321 struct kobj_attribute *attr, char *buf)
2322{
2323 return nr_hugepages_show_common(kobj, attr, buf);
2324}
2325
2326static ssize_t nr_hugepages_store(struct kobject *kobj,
2327 struct kobj_attribute *attr, const char *buf, size_t len)
2328{
2329 return nr_hugepages_store_common(false, kobj, buf, len);
2330}
2331HSTATE_ATTR(nr_hugepages);
2332
2333#ifdef CONFIG_NUMA
2334
2335
2336
2337
2338
2339static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2340 struct kobj_attribute *attr, char *buf)
2341{
2342 return nr_hugepages_show_common(kobj, attr, buf);
2343}
2344
2345static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2346 struct kobj_attribute *attr, const char *buf, size_t len)
2347{
2348 return nr_hugepages_store_common(true, kobj, buf, len);
2349}
2350HSTATE_ATTR(nr_hugepages_mempolicy);
2351#endif
2352
2353
2354static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2355 struct kobj_attribute *attr, char *buf)
2356{
2357 struct hstate *h = kobj_to_hstate(kobj, NULL);
2358 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2359}
2360
2361static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2362 struct kobj_attribute *attr, const char *buf, size_t count)
2363{
2364 int err;
2365 unsigned long input;
2366 struct hstate *h = kobj_to_hstate(kobj, NULL);
2367
2368 if (hstate_is_gigantic(h))
2369 return -EINVAL;
2370
2371 err = kstrtoul(buf, 10, &input);
2372 if (err)
2373 return err;
2374
2375 spin_lock(&hugetlb_lock);
2376 h->nr_overcommit_huge_pages = input;
2377 spin_unlock(&hugetlb_lock);
2378
2379 return count;
2380}
2381HSTATE_ATTR(nr_overcommit_hugepages);
2382
2383static ssize_t free_hugepages_show(struct kobject *kobj,
2384 struct kobj_attribute *attr, char *buf)
2385{
2386 struct hstate *h;
2387 unsigned long free_huge_pages;
2388 int nid;
2389
2390 h = kobj_to_hstate(kobj, &nid);
2391 if (nid == NUMA_NO_NODE)
2392 free_huge_pages = h->free_huge_pages;
2393 else
2394 free_huge_pages = h->free_huge_pages_node[nid];
2395
2396 return sprintf(buf, "%lu\n", free_huge_pages);
2397}
2398HSTATE_ATTR_RO(free_hugepages);
2399
2400static ssize_t resv_hugepages_show(struct kobject *kobj,
2401 struct kobj_attribute *attr, char *buf)
2402{
2403 struct hstate *h = kobj_to_hstate(kobj, NULL);
2404 return sprintf(buf, "%lu\n", h->resv_huge_pages);
2405}
2406HSTATE_ATTR_RO(resv_hugepages);
2407
2408static ssize_t surplus_hugepages_show(struct kobject *kobj,
2409 struct kobj_attribute *attr, char *buf)
2410{
2411 struct hstate *h;
2412 unsigned long surplus_huge_pages;
2413 int nid;
2414
2415 h = kobj_to_hstate(kobj, &nid);
2416 if (nid == NUMA_NO_NODE)
2417 surplus_huge_pages = h->surplus_huge_pages;
2418 else
2419 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2420
2421 return sprintf(buf, "%lu\n", surplus_huge_pages);
2422}
2423HSTATE_ATTR_RO(surplus_hugepages);
2424
2425static struct attribute *hstate_attrs[] = {
2426 &nr_hugepages_attr.attr,
2427 &nr_overcommit_hugepages_attr.attr,
2428 &free_hugepages_attr.attr,
2429 &resv_hugepages_attr.attr,
2430 &surplus_hugepages_attr.attr,
2431#ifdef CONFIG_NUMA
2432 &nr_hugepages_mempolicy_attr.attr,
2433#endif
2434 NULL,
2435};
2436
2437static struct attribute_group hstate_attr_group = {
2438 .attrs = hstate_attrs,
2439};
2440
2441static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2442 struct kobject **hstate_kobjs,
2443 struct attribute_group *hstate_attr_group)
2444{
2445 int retval;
2446 int hi = hstate_index(h);
2447
2448 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2449 if (!hstate_kobjs[hi])
2450 return -ENOMEM;
2451
2452 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2453 if (retval)
2454 kobject_put(hstate_kobjs[hi]);
2455
2456 return retval;
2457}
2458
2459static void __init hugetlb_sysfs_init(void)
2460{
2461 struct hstate *h;
2462 int err;
2463
2464 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2465 if (!hugepages_kobj)
2466 return;
2467
2468 for_each_hstate(h) {
2469 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2470 hstate_kobjs, &hstate_attr_group);
2471 if (err)
2472 pr_err("Hugetlb: Unable to add hstate %s", h->name);
2473 }
2474}
2475
2476#ifdef CONFIG_NUMA
2477
2478
2479
2480
2481
2482
2483
2484
2485struct node_hstate {
2486 struct kobject *hugepages_kobj;
2487 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2488};
2489static struct node_hstate node_hstates[MAX_NUMNODES];
2490
2491
2492
2493
2494static struct attribute *per_node_hstate_attrs[] = {
2495 &nr_hugepages_attr.attr,
2496 &free_hugepages_attr.attr,
2497 &surplus_hugepages_attr.attr,
2498 NULL,
2499};
2500
2501static struct attribute_group per_node_hstate_attr_group = {
2502 .attrs = per_node_hstate_attrs,
2503};
2504
2505
2506
2507
2508
2509static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2510{
2511 int nid;
2512
2513 for (nid = 0; nid < nr_node_ids; nid++) {
2514 struct node_hstate *nhs = &node_hstates[nid];
2515 int i;
2516 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2517 if (nhs->hstate_kobjs[i] == kobj) {
2518 if (nidp)
2519 *nidp = nid;
2520 return &hstates[i];
2521 }
2522 }
2523
2524 BUG();
2525 return NULL;
2526}
2527
2528
2529
2530
2531
2532static void hugetlb_unregister_node(struct node *node)
2533{
2534 struct hstate *h;
2535 struct node_hstate *nhs = &node_hstates[node->dev.id];
2536
2537 if (!nhs->hugepages_kobj)
2538 return;
2539
2540 for_each_hstate(h) {
2541 int idx = hstate_index(h);
2542 if (nhs->hstate_kobjs[idx]) {
2543 kobject_put(nhs->hstate_kobjs[idx]);
2544 nhs->hstate_kobjs[idx] = NULL;
2545 }
2546 }
2547
2548 kobject_put(nhs->hugepages_kobj);
2549 nhs->hugepages_kobj = NULL;
2550}
2551
2552
2553
2554
2555
2556
2557static void hugetlb_register_node(struct node *node)
2558{
2559 struct hstate *h;
2560 struct node_hstate *nhs = &node_hstates[node->dev.id];
2561 int err;
2562
2563 if (nhs->hugepages_kobj)
2564 return;
2565
2566 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2567 &node->dev.kobj);
2568 if (!nhs->hugepages_kobj)
2569 return;
2570
2571 for_each_hstate(h) {
2572 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2573 nhs->hstate_kobjs,
2574 &per_node_hstate_attr_group);
2575 if (err) {
2576 pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2577 h->name, node->dev.id);
2578 hugetlb_unregister_node(node);
2579 break;
2580 }
2581 }
2582}
2583
2584
2585
2586
2587
2588
2589static void __init hugetlb_register_all_nodes(void)
2590{
2591 int nid;
2592
2593 for_each_node_state(nid, N_MEMORY) {
2594 struct node *node = node_devices[nid];
2595 if (node->dev.id == nid)
2596 hugetlb_register_node(node);
2597 }
2598
2599
2600
2601
2602
2603 register_hugetlbfs_with_node(hugetlb_register_node,
2604 hugetlb_unregister_node);
2605}
2606#else
2607
2608static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2609{
2610 BUG();
2611 if (nidp)
2612 *nidp = -1;
2613 return NULL;
2614}
2615
2616static void hugetlb_register_all_nodes(void) { }
2617
2618#endif
2619
2620static int __init hugetlb_init(void)
2621{
2622 int i;
2623
2624 if (!hugepages_supported())
2625 return 0;
2626
2627 if (!size_to_hstate(default_hstate_size)) {
2628 default_hstate_size = HPAGE_SIZE;
2629 if (!size_to_hstate(default_hstate_size))
2630 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2631 }
2632 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2633 if (default_hstate_max_huge_pages) {
2634 if (!default_hstate.max_huge_pages)
2635 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2636 }
2637
2638 hugetlb_init_hstates();
2639 gather_bootmem_prealloc();
2640 report_hugepages();
2641
2642 hugetlb_sysfs_init();
2643 hugetlb_register_all_nodes();
2644 hugetlb_cgroup_file_init();
2645
2646#ifdef CONFIG_SMP
2647 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2648#else
2649 num_fault_mutexes = 1;
2650#endif
2651 hugetlb_fault_mutex_table =
2652 kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2653 BUG_ON(!hugetlb_fault_mutex_table);
2654
2655 for (i = 0; i < num_fault_mutexes; i++)
2656 mutex_init(&hugetlb_fault_mutex_table[i]);
2657 return 0;
2658}
2659subsys_initcall(hugetlb_init);
2660
2661
2662void __init hugetlb_add_hstate(unsigned int order)
2663{
2664 struct hstate *h;
2665 unsigned long i;
2666
2667 if (size_to_hstate(PAGE_SIZE << order)) {
2668 pr_warn("hugepagesz= specified twice, ignoring\n");
2669 return;
2670 }
2671 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2672 BUG_ON(order == 0);
2673 h = &hstates[hugetlb_max_hstate++];
2674 h->order = order;
2675 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2676 h->nr_huge_pages = 0;
2677 h->free_huge_pages = 0;
2678 for (i = 0; i < MAX_NUMNODES; ++i)
2679 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2680 INIT_LIST_HEAD(&h->hugepage_activelist);
2681 h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
2682 h->next_nid_to_free = first_node(node_states[N_MEMORY]);
2683 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2684 huge_page_size(h)/1024);
2685
2686 parsed_hstate = h;
2687}
2688
2689static int __init hugetlb_nrpages_setup(char *s)
2690{
2691 unsigned long *mhp;
2692 static unsigned long *last_mhp;
2693
2694
2695
2696
2697
2698 if (!hugetlb_max_hstate)
2699 mhp = &default_hstate_max_huge_pages;
2700 else
2701 mhp = &parsed_hstate->max_huge_pages;
2702
2703 if (mhp == last_mhp) {
2704 pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
2705 return 1;
2706 }
2707
2708 if (sscanf(s, "%lu", mhp) <= 0)
2709 *mhp = 0;
2710
2711
2712
2713
2714
2715
2716 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2717 hugetlb_hstate_alloc_pages(parsed_hstate);
2718
2719 last_mhp = mhp;
2720
2721 return 1;
2722}
2723__setup("hugepages=", hugetlb_nrpages_setup);
2724
2725static int __init hugetlb_default_setup(char *s)
2726{
2727 default_hstate_size = memparse(s, &s);
2728 return 1;
2729}
2730__setup("default_hugepagesz=", hugetlb_default_setup);
2731
2732static unsigned int cpuset_mems_nr(unsigned int *array)
2733{
2734 int node;
2735 unsigned int nr = 0;
2736
2737 for_each_node_mask(node, cpuset_current_mems_allowed)
2738 nr += array[node];
2739
2740 return nr;
2741}
2742
2743#ifdef CONFIG_SYSCTL
2744static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2745 struct ctl_table *table, int write,
2746 void __user *buffer, size_t *length, loff_t *ppos)
2747{
2748 struct hstate *h = &default_hstate;
2749 unsigned long tmp = h->max_huge_pages;
2750 int ret;
2751
2752 if (!hugepages_supported())
2753 return -EOPNOTSUPP;
2754
2755 table->data = &tmp;
2756 table->maxlen = sizeof(unsigned long);
2757 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2758 if (ret)
2759 goto out;
2760
2761 if (write)
2762 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2763 NUMA_NO_NODE, tmp, *length);
2764out:
2765 return ret;
2766}
2767
2768int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2769 void __user *buffer, size_t *length, loff_t *ppos)
2770{
2771
2772 return hugetlb_sysctl_handler_common(false, table, write,
2773 buffer, length, ppos);
2774}
2775
2776#ifdef CONFIG_NUMA
2777int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2778 void __user *buffer, size_t *length, loff_t *ppos)
2779{
2780 return hugetlb_sysctl_handler_common(true, table, write,
2781 buffer, length, ppos);
2782}
2783#endif
2784
2785int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2786 void __user *buffer,
2787 size_t *length, loff_t *ppos)
2788{
2789 struct hstate *h = &default_hstate;
2790 unsigned long tmp;
2791 int ret;
2792
2793 if (!hugepages_supported())
2794 return -EOPNOTSUPP;
2795
2796 tmp = h->nr_overcommit_huge_pages;
2797
2798 if (write && hstate_is_gigantic(h))
2799 return -EINVAL;
2800
2801 table->data = &tmp;
2802 table->maxlen = sizeof(unsigned long);
2803 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2804 if (ret)
2805 goto out;
2806
2807 if (write) {
2808 spin_lock(&hugetlb_lock);
2809 h->nr_overcommit_huge_pages = tmp;
2810 spin_unlock(&hugetlb_lock);
2811 }
2812out:
2813 return ret;
2814}
2815
2816#endif
2817
2818void hugetlb_report_meminfo(struct seq_file *m)
2819{
2820 struct hstate *h = &default_hstate;
2821 if (!hugepages_supported())
2822 return;
2823 seq_printf(m,
2824 "HugePages_Total: %5lu\n"
2825 "HugePages_Free: %5lu\n"
2826 "HugePages_Rsvd: %5lu\n"
2827 "HugePages_Surp: %5lu\n"
2828 "Hugepagesize: %8lu kB\n",
2829 h->nr_huge_pages,
2830 h->free_huge_pages,
2831 h->resv_huge_pages,
2832 h->surplus_huge_pages,
2833 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2834}
2835
2836int hugetlb_report_node_meminfo(int nid, char *buf)
2837{
2838 struct hstate *h = &default_hstate;
2839 if (!hugepages_supported())
2840 return 0;
2841 return sprintf(buf,
2842 "Node %d HugePages_Total: %5u\n"
2843 "Node %d HugePages_Free: %5u\n"
2844 "Node %d HugePages_Surp: %5u\n",
2845 nid, h->nr_huge_pages_node[nid],
2846 nid, h->free_huge_pages_node[nid],
2847 nid, h->surplus_huge_pages_node[nid]);
2848}
2849
2850void hugetlb_show_meminfo(void)
2851{
2852 struct hstate *h;
2853 int nid;
2854
2855 if (!hugepages_supported())
2856 return;
2857
2858 for_each_node_state(nid, N_MEMORY)
2859 for_each_hstate(h)
2860 pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
2861 nid,
2862 h->nr_huge_pages_node[nid],
2863 h->free_huge_pages_node[nid],
2864 h->surplus_huge_pages_node[nid],
2865 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2866}
2867
2868void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
2869{
2870 seq_printf(m, "HugetlbPages:\t%8lu kB\n",
2871 atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
2872}
2873
2874
2875unsigned long hugetlb_total_pages(void)
2876{
2877 struct hstate *h;
2878 unsigned long nr_total_pages = 0;
2879
2880 for_each_hstate(h)
2881 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
2882 return nr_total_pages;
2883}
2884
2885static int hugetlb_acct_memory(struct hstate *h, long delta)
2886{
2887 int ret = -ENOMEM;
2888
2889 spin_lock(&hugetlb_lock);
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905
2906
2907 if (delta > 0) {
2908 if (gather_surplus_pages(h, delta) < 0)
2909 goto out;
2910
2911 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
2912 return_unused_surplus_pages(h, delta);
2913 goto out;
2914 }
2915 }
2916
2917 ret = 0;
2918 if (delta < 0)
2919 return_unused_surplus_pages(h, (unsigned long) -delta);
2920
2921out:
2922 spin_unlock(&hugetlb_lock);
2923 return ret;
2924}
2925
2926static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2927{
2928 struct resv_map *resv = vma_resv_map(vma);
2929
2930
2931
2932
2933
2934
2935
2936
2937
2938 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2939 kref_get(&resv->refs);
2940}
2941
2942static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2943{
2944 struct hstate *h = hstate_vma(vma);
2945 struct resv_map *resv = vma_resv_map(vma);
2946 struct hugepage_subpool *spool = subpool_vma(vma);
2947 unsigned long reserve, start, end;
2948 long gbl_reserve;
2949
2950 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
2951 return;
2952
2953 start = vma_hugecache_offset(h, vma, vma->vm_start);
2954 end = vma_hugecache_offset(h, vma, vma->vm_end);
2955
2956 reserve = (end - start) - region_count(resv, start, end);
2957
2958 kref_put(&resv->refs, resv_map_release);
2959
2960 if (reserve) {
2961
2962
2963
2964
2965 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
2966 hugetlb_acct_memory(h, -gbl_reserve);
2967 }
2968}
2969
2970
2971
2972
2973
2974
2975
2976static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2977{
2978 BUG();
2979 return 0;
2980}
2981
2982const struct vm_operations_struct hugetlb_vm_ops = {
2983 .fault = hugetlb_vm_op_fault,
2984 .open = hugetlb_vm_op_open,
2985 .close = hugetlb_vm_op_close,
2986};
2987
2988static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2989 int writable)
2990{
2991 pte_t entry;
2992
2993 if (writable) {
2994 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
2995 vma->vm_page_prot)));
2996 } else {
2997 entry = huge_pte_wrprotect(mk_huge_pte(page,
2998 vma->vm_page_prot));
2999 }
3000 entry = pte_mkyoung(entry);
3001 entry = pte_mkhuge(entry);
3002 entry = arch_make_huge_pte(entry, vma, page, writable);
3003
3004 return entry;
3005}
3006
3007static void set_huge_ptep_writable(struct vm_area_struct *vma,
3008 unsigned long address, pte_t *ptep)
3009{
3010 pte_t entry;
3011
3012 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3013 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3014 update_mmu_cache(vma, address, ptep);
3015}
3016
3017static int is_hugetlb_entry_migration(pte_t pte)
3018{
3019 swp_entry_t swp;
3020
3021 if (huge_pte_none(pte) || pte_present(pte))
3022 return 0;
3023 swp = pte_to_swp_entry(pte);
3024 if (non_swap_entry(swp) && is_migration_entry(swp))
3025 return 1;
3026 else
3027 return 0;
3028}
3029
3030static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3031{
3032 swp_entry_t swp;
3033
3034 if (huge_pte_none(pte) || pte_present(pte))
3035 return 0;
3036 swp = pte_to_swp_entry(pte);
3037 if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3038 return 1;
3039 else
3040 return 0;
3041}
3042
3043int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3044 struct vm_area_struct *vma)
3045{
3046 pte_t *src_pte, *dst_pte, entry;
3047 struct page *ptepage;
3048 unsigned long addr;
3049 int cow;
3050 struct hstate *h = hstate_vma(vma);
3051 unsigned long sz = huge_page_size(h);
3052 unsigned long mmun_start;
3053 unsigned long mmun_end;
3054 int ret = 0;
3055
3056 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3057
3058 mmun_start = vma->vm_start;
3059 mmun_end = vma->vm_end;
3060 if (cow)
3061 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
3062
3063 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3064 spinlock_t *src_ptl, *dst_ptl;
3065 src_pte = huge_pte_offset(src, addr);
3066 if (!src_pte)
3067 continue;
3068 dst_pte = huge_pte_alloc(dst, addr, sz);
3069 if (!dst_pte) {
3070 ret = -ENOMEM;
3071 break;
3072 }
3073
3074
3075 if (dst_pte == src_pte)
3076 continue;
3077
3078 dst_ptl = huge_pte_lock(h, dst, dst_pte);
3079 src_ptl = huge_pte_lockptr(h, src, src_pte);
3080 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3081 entry = huge_ptep_get(src_pte);
3082 if (huge_pte_none(entry)) {
3083 ;
3084 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3085 is_hugetlb_entry_hwpoisoned(entry))) {
3086 swp_entry_t swp_entry = pte_to_swp_entry(entry);
3087
3088 if (is_write_migration_entry(swp_entry) && cow) {
3089
3090
3091
3092
3093 make_migration_entry_read(&swp_entry);
3094 entry = swp_entry_to_pte(swp_entry);
3095 set_huge_pte_at(src, addr, src_pte, entry);
3096 }
3097 set_huge_pte_at(dst, addr, dst_pte, entry);
3098 } else {
3099 if (cow) {
3100 huge_ptep_set_wrprotect(src, addr, src_pte);
3101 mmu_notifier_invalidate_range(src, mmun_start,
3102 mmun_end);
3103 }
3104 entry = huge_ptep_get(src_pte);
3105 ptepage = pte_page(entry);
3106 get_page(ptepage);
3107 page_dup_rmap(ptepage, true);
3108 set_huge_pte_at(dst, addr, dst_pte, entry);
3109 hugetlb_count_add(pages_per_huge_page(h), dst);
3110 }
3111 spin_unlock(src_ptl);
3112 spin_unlock(dst_ptl);
3113 }
3114
3115 if (cow)
3116 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
3117
3118 return ret;
3119}
3120
3121void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3122 unsigned long start, unsigned long end,
3123 struct page *ref_page)
3124{
3125 int force_flush = 0;
3126 struct mm_struct *mm = vma->vm_mm;
3127 unsigned long address;
3128 pte_t *ptep;
3129 pte_t pte;
3130 spinlock_t *ptl;
3131 struct page *page;
3132 struct hstate *h = hstate_vma(vma);
3133 unsigned long sz = huge_page_size(h);
3134 const unsigned long mmun_start = start;
3135 const unsigned long mmun_end = end;
3136
3137 WARN_ON(!is_vm_hugetlb_page(vma));
3138 BUG_ON(start & ~huge_page_mask(h));
3139 BUG_ON(end & ~huge_page_mask(h));
3140
3141 tlb_start_vma(tlb, vma);
3142 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3143 address = start;
3144again:
3145 for (; address < end; address += sz) {
3146 ptep = huge_pte_offset(mm, address);
3147 if (!ptep)
3148 continue;
3149
3150 ptl = huge_pte_lock(h, mm, ptep);
3151 if (huge_pmd_unshare(mm, &address, ptep))
3152 goto unlock;
3153
3154 pte = huge_ptep_get(ptep);
3155 if (huge_pte_none(pte))
3156 goto unlock;
3157
3158
3159
3160
3161
3162 if (unlikely(!pte_present(pte))) {
3163 huge_pte_clear(mm, address, ptep);
3164 goto unlock;
3165 }
3166
3167 page = pte_page(pte);
3168
3169
3170
3171
3172
3173 if (ref_page) {
3174 if (page != ref_page)
3175 goto unlock;
3176
3177
3178
3179
3180
3181
3182 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3183 }
3184
3185 pte = huge_ptep_get_and_clear(mm, address, ptep);
3186 tlb_remove_tlb_entry(tlb, ptep, address);
3187 if (huge_pte_dirty(pte))
3188 set_page_dirty(page);
3189
3190 hugetlb_count_sub(pages_per_huge_page(h), mm);
3191 page_remove_rmap(page, true);
3192 force_flush = !__tlb_remove_page(tlb, page);
3193 if (force_flush) {
3194 address += sz;
3195 spin_unlock(ptl);
3196 break;
3197 }
3198
3199 if (ref_page) {
3200 spin_unlock(ptl);
3201 break;
3202 }
3203unlock:
3204 spin_unlock(ptl);
3205 }
3206
3207
3208
3209
3210
3211 if (force_flush) {
3212 force_flush = 0;
3213 tlb_flush_mmu(tlb);
3214 if (address < end && !ref_page)
3215 goto again;
3216 }
3217 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3218 tlb_end_vma(tlb, vma);
3219}
3220
3221void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3222 struct vm_area_struct *vma, unsigned long start,
3223 unsigned long end, struct page *ref_page)
3224{
3225 __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237 vma->vm_flags &= ~VM_MAYSHARE;
3238}
3239
3240void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3241 unsigned long end, struct page *ref_page)
3242{
3243 struct mm_struct *mm;
3244 struct mmu_gather tlb;
3245
3246 mm = vma->vm_mm;
3247
3248 tlb_gather_mmu(&tlb, mm, start, end);
3249 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3250 tlb_finish_mmu(&tlb, start, end);
3251}
3252
3253
3254
3255
3256
3257
3258
3259static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3260 struct page *page, unsigned long address)
3261{
3262 struct hstate *h = hstate_vma(vma);
3263 struct vm_area_struct *iter_vma;
3264 struct address_space *mapping;
3265 pgoff_t pgoff;
3266
3267
3268
3269
3270
3271 address = address & huge_page_mask(h);
3272 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3273 vma->vm_pgoff;
3274 mapping = file_inode(vma->vm_file)->i_mapping;
3275
3276
3277
3278
3279
3280
3281 i_mmap_lock_write(mapping);
3282 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3283
3284 if (iter_vma == vma)
3285 continue;
3286
3287
3288
3289
3290
3291
3292 if (iter_vma->vm_flags & VM_MAYSHARE)
3293 continue;
3294
3295
3296
3297
3298
3299
3300
3301
3302 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3303 unmap_hugepage_range(iter_vma, address,
3304 address + huge_page_size(h), page);
3305 }
3306 i_mmap_unlock_write(mapping);
3307}
3308
3309
3310
3311
3312
3313
3314
3315static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3316 unsigned long address, pte_t *ptep, pte_t pte,
3317 struct page *pagecache_page, spinlock_t *ptl)
3318{
3319 struct hstate *h = hstate_vma(vma);
3320 struct page *old_page, *new_page;
3321 int ret = 0, outside_reserve = 0;
3322 unsigned long mmun_start;
3323 unsigned long mmun_end;
3324
3325 old_page = pte_page(pte);
3326
3327retry_avoidcopy:
3328
3329
3330 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3331 page_move_anon_rmap(old_page, vma, address);
3332 set_huge_ptep_writable(vma, address, ptep);
3333 return 0;
3334 }
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3346 old_page != pagecache_page)
3347 outside_reserve = 1;
3348
3349 get_page(old_page);
3350
3351
3352
3353
3354
3355 spin_unlock(ptl);
3356 new_page = alloc_huge_page(vma, address, outside_reserve);
3357
3358 if (IS_ERR(new_page)) {
3359
3360
3361
3362
3363
3364
3365
3366 if (outside_reserve) {
3367 put_page(old_page);
3368 BUG_ON(huge_pte_none(pte));
3369 unmap_ref_private(mm, vma, old_page, address);
3370 BUG_ON(huge_pte_none(pte));
3371 spin_lock(ptl);
3372 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3373 if (likely(ptep &&
3374 pte_same(huge_ptep_get(ptep), pte)))
3375 goto retry_avoidcopy;
3376
3377
3378
3379
3380 return 0;
3381 }
3382
3383 ret = (PTR_ERR(new_page) == -ENOMEM) ?
3384 VM_FAULT_OOM : VM_FAULT_SIGBUS;
3385 goto out_release_old;
3386 }
3387
3388
3389
3390
3391
3392 if (unlikely(anon_vma_prepare(vma))) {
3393 ret = VM_FAULT_OOM;
3394 goto out_release_all;
3395 }
3396
3397 copy_user_huge_page(new_page, old_page, address, vma,
3398 pages_per_huge_page(h));
3399 __SetPageUptodate(new_page);
3400 set_page_huge_active(new_page);
3401
3402 mmun_start = address & huge_page_mask(h);
3403 mmun_end = mmun_start + huge_page_size(h);
3404 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3405
3406
3407
3408
3409
3410 spin_lock(ptl);
3411 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3412 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3413 ClearPagePrivate(new_page);
3414
3415
3416 huge_ptep_clear_flush(vma, address, ptep);
3417 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
3418 set_huge_pte_at(mm, address, ptep,
3419 make_huge_pte(vma, new_page, 1));
3420 page_remove_rmap(old_page, true);
3421 hugepage_add_new_anon_rmap(new_page, vma, address);
3422
3423 new_page = old_page;
3424 }
3425 spin_unlock(ptl);
3426 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3427out_release_all:
3428 put_page(new_page);
3429out_release_old:
3430 put_page(old_page);
3431
3432 spin_lock(ptl);
3433 return ret;
3434}
3435
3436
3437static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3438 struct vm_area_struct *vma, unsigned long address)
3439{
3440 struct address_space *mapping;
3441 pgoff_t idx;
3442
3443 mapping = vma->vm_file->f_mapping;
3444 idx = vma_hugecache_offset(h, vma, address);
3445
3446 return find_lock_page(mapping, idx);
3447}
3448
3449
3450
3451
3452
3453static bool hugetlbfs_pagecache_present(struct hstate *h,
3454 struct vm_area_struct *vma, unsigned long address)
3455{
3456 struct address_space *mapping;
3457 pgoff_t idx;
3458 struct page *page;
3459
3460 mapping = vma->vm_file->f_mapping;
3461 idx = vma_hugecache_offset(h, vma, address);
3462
3463 page = find_get_page(mapping, idx);
3464 if (page)
3465 put_page(page);
3466 return page != NULL;
3467}
3468
3469int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3470 pgoff_t idx)
3471{
3472 struct inode *inode = mapping->host;
3473 struct hstate *h = hstate_inode(inode);
3474 int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3475
3476 if (err)
3477 return err;
3478 ClearPagePrivate(page);
3479
3480 spin_lock(&inode->i_lock);
3481 inode->i_blocks += blocks_per_huge_page(h);
3482 spin_unlock(&inode->i_lock);
3483 return 0;
3484}
3485
3486static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
3487 struct address_space *mapping, pgoff_t idx,
3488 unsigned long address, pte_t *ptep, unsigned int flags)
3489{
3490 struct hstate *h = hstate_vma(vma);
3491 int ret = VM_FAULT_SIGBUS;
3492 int anon_rmap = 0;
3493 unsigned long size;
3494 struct page *page;
3495 pte_t new_pte;
3496 spinlock_t *ptl;
3497
3498
3499
3500
3501
3502
3503 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3504 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
3505 current->pid);
3506 return ret;
3507 }
3508
3509
3510
3511
3512
3513retry:
3514 page = find_lock_page(mapping, idx);
3515 if (!page) {
3516 size = i_size_read(mapping->host) >> huge_page_shift(h);
3517 if (idx >= size)
3518 goto out;
3519 page = alloc_huge_page(vma, address, 0);
3520 if (IS_ERR(page)) {
3521 ret = PTR_ERR(page);
3522 if (ret == -ENOMEM)
3523 ret = VM_FAULT_OOM;
3524 else
3525 ret = VM_FAULT_SIGBUS;
3526 goto out;
3527 }
3528 clear_huge_page(page, address, pages_per_huge_page(h));
3529 __SetPageUptodate(page);
3530 set_page_huge_active(page);
3531
3532 if (vma->vm_flags & VM_MAYSHARE) {
3533 int err = huge_add_to_page_cache(page, mapping, idx);
3534 if (err) {
3535 put_page(page);
3536 if (err == -EEXIST)
3537 goto retry;
3538 goto out;
3539 }
3540 } else {
3541 lock_page(page);
3542 if (unlikely(anon_vma_prepare(vma))) {
3543 ret = VM_FAULT_OOM;
3544 goto backout_unlocked;
3545 }
3546 anon_rmap = 1;
3547 }
3548 } else {
3549
3550
3551
3552
3553
3554 if (unlikely(PageHWPoison(page))) {
3555 ret = VM_FAULT_HWPOISON |
3556 VM_FAULT_SET_HINDEX(hstate_index(h));
3557 goto backout_unlocked;
3558 }
3559 }
3560
3561
3562
3563
3564
3565
3566
3567 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3568 if (vma_needs_reservation(h, vma, address) < 0) {
3569 ret = VM_FAULT_OOM;
3570 goto backout_unlocked;
3571 }
3572
3573 vma_end_reservation(h, vma, address);
3574 }
3575
3576 ptl = huge_pte_lockptr(h, mm, ptep);
3577 spin_lock(ptl);
3578 size = i_size_read(mapping->host) >> huge_page_shift(h);
3579 if (idx >= size)
3580 goto backout;
3581
3582 ret = 0;
3583 if (!huge_pte_none(huge_ptep_get(ptep)))
3584 goto backout;
3585
3586 if (anon_rmap) {
3587 ClearPagePrivate(page);
3588 hugepage_add_new_anon_rmap(page, vma, address);
3589 } else
3590 page_dup_rmap(page, true);
3591 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3592 && (vma->vm_flags & VM_SHARED)));
3593 set_huge_pte_at(mm, address, ptep, new_pte);
3594
3595 hugetlb_count_add(pages_per_huge_page(h), mm);
3596 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3597
3598 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
3599 }
3600
3601 spin_unlock(ptl);
3602 unlock_page(page);
3603out:
3604 return ret;
3605
3606backout:
3607 spin_unlock(ptl);
3608backout_unlocked:
3609 unlock_page(page);
3610 put_page(page);
3611 goto out;
3612}
3613
3614#ifdef CONFIG_SMP
3615u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3616 struct vm_area_struct *vma,
3617 struct address_space *mapping,
3618 pgoff_t idx, unsigned long address)
3619{
3620 unsigned long key[2];
3621 u32 hash;
3622
3623 if (vma->vm_flags & VM_SHARED) {
3624 key[0] = (unsigned long) mapping;
3625 key[1] = idx;
3626 } else {
3627 key[0] = (unsigned long) mm;
3628 key[1] = address >> huge_page_shift(h);
3629 }
3630
3631 hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3632
3633 return hash & (num_fault_mutexes - 1);
3634}
3635#else
3636
3637
3638
3639
3640u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3641 struct vm_area_struct *vma,
3642 struct address_space *mapping,
3643 pgoff_t idx, unsigned long address)
3644{
3645 return 0;
3646}
3647#endif
3648
3649int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3650 unsigned long address, unsigned int flags)
3651{
3652 pte_t *ptep, entry;
3653 spinlock_t *ptl;
3654 int ret;
3655 u32 hash;
3656 pgoff_t idx;
3657 struct page *page = NULL;
3658 struct page *pagecache_page = NULL;
3659 struct hstate *h = hstate_vma(vma);
3660 struct address_space *mapping;
3661 int need_wait_lock = 0;
3662
3663 address &= huge_page_mask(h);
3664
3665 ptep = huge_pte_offset(mm, address);
3666 if (ptep) {
3667 entry = huge_ptep_get(ptep);
3668 if (unlikely(is_hugetlb_entry_migration(entry))) {
3669 migration_entry_wait_huge(vma, mm, ptep);
3670 return 0;
3671 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3672 return VM_FAULT_HWPOISON_LARGE |
3673 VM_FAULT_SET_HINDEX(hstate_index(h));
3674 } else {
3675 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3676 if (!ptep)
3677 return VM_FAULT_OOM;
3678 }
3679
3680 mapping = vma->vm_file->f_mapping;
3681 idx = vma_hugecache_offset(h, vma, address);
3682
3683
3684
3685
3686
3687
3688 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address);
3689 mutex_lock(&hugetlb_fault_mutex_table[hash]);
3690
3691 entry = huge_ptep_get(ptep);
3692 if (huge_pte_none(entry)) {
3693 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3694 goto out_mutex;
3695 }
3696
3697 ret = 0;
3698
3699
3700
3701
3702
3703
3704
3705
3706 if (!pte_present(entry))
3707 goto out_mutex;
3708
3709
3710
3711
3712
3713
3714
3715
3716
3717 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3718 if (vma_needs_reservation(h, vma, address) < 0) {
3719 ret = VM_FAULT_OOM;
3720 goto out_mutex;
3721 }
3722
3723 vma_end_reservation(h, vma, address);
3724
3725 if (!(vma->vm_flags & VM_MAYSHARE))
3726 pagecache_page = hugetlbfs_pagecache_page(h,
3727 vma, address);
3728 }
3729
3730 ptl = huge_pte_lock(h, mm, ptep);
3731
3732
3733 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3734 goto out_ptl;
3735
3736
3737
3738
3739
3740
3741 page = pte_page(entry);
3742 if (page != pagecache_page)
3743 if (!trylock_page(page)) {
3744 need_wait_lock = 1;
3745 goto out_ptl;
3746 }
3747
3748 get_page(page);
3749
3750 if (flags & FAULT_FLAG_WRITE) {
3751 if (!huge_pte_write(entry)) {
3752 ret = hugetlb_cow(mm, vma, address, ptep, entry,
3753 pagecache_page, ptl);
3754 goto out_put_page;
3755 }
3756 entry = huge_pte_mkdirty(entry);
3757 }
3758 entry = pte_mkyoung(entry);
3759 if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3760 flags & FAULT_FLAG_WRITE))
3761 update_mmu_cache(vma, address, ptep);
3762out_put_page:
3763 if (page != pagecache_page)
3764 unlock_page(page);
3765 put_page(page);
3766out_ptl:
3767 spin_unlock(ptl);
3768
3769 if (pagecache_page) {
3770 unlock_page(pagecache_page);
3771 put_page(pagecache_page);
3772 }
3773out_mutex:
3774 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3775
3776
3777
3778
3779
3780
3781
3782 if (need_wait_lock)
3783 wait_on_page_locked(page);
3784 return ret;
3785}
3786
3787long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3788 struct page **pages, struct vm_area_struct **vmas,
3789 unsigned long *position, unsigned long *nr_pages,
3790 long i, unsigned int flags)
3791{
3792 unsigned long pfn_offset;
3793 unsigned long vaddr = *position;
3794 unsigned long remainder = *nr_pages;
3795 struct hstate *h = hstate_vma(vma);
3796
3797 while (vaddr < vma->vm_end && remainder) {
3798 pte_t *pte;
3799 spinlock_t *ptl = NULL;
3800 int absent;
3801 struct page *page;
3802
3803
3804
3805
3806
3807 if (unlikely(fatal_signal_pending(current))) {
3808 remainder = 0;
3809 break;
3810 }
3811
3812
3813
3814
3815
3816
3817
3818
3819 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3820 if (pte)
3821 ptl = huge_pte_lock(h, mm, pte);
3822 absent = !pte || huge_pte_none(huge_ptep_get(pte));
3823
3824
3825
3826
3827
3828
3829
3830
3831 if (absent && (flags & FOLL_DUMP) &&
3832 !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3833 if (pte)
3834 spin_unlock(ptl);
3835 remainder = 0;
3836 break;
3837 }
3838
3839
3840
3841
3842
3843
3844
3845
3846
3847
3848
3849 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
3850 ((flags & FOLL_WRITE) &&
3851 !huge_pte_write(huge_ptep_get(pte)))) {
3852 int ret;
3853
3854 if (pte)
3855 spin_unlock(ptl);
3856 ret = hugetlb_fault(mm, vma, vaddr,
3857 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
3858 if (!(ret & VM_FAULT_ERROR))
3859 continue;
3860
3861 remainder = 0;
3862 break;
3863 }
3864
3865 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
3866 page = pte_page(huge_ptep_get(pte));
3867same_page:
3868 if (pages) {
3869 pages[i] = mem_map_offset(page, pfn_offset);
3870 get_page(pages[i]);
3871 }
3872
3873 if (vmas)
3874 vmas[i] = vma;
3875
3876 vaddr += PAGE_SIZE;
3877 ++pfn_offset;
3878 --remainder;
3879 ++i;
3880 if (vaddr < vma->vm_end && remainder &&
3881 pfn_offset < pages_per_huge_page(h)) {
3882
3883
3884
3885
3886 goto same_page;
3887 }
3888 spin_unlock(ptl);
3889 }
3890 *nr_pages = remainder;
3891 *position = vaddr;
3892
3893 return i ? i : -EFAULT;
3894}
3895
3896unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3897 unsigned long address, unsigned long end, pgprot_t newprot)
3898{
3899 struct mm_struct *mm = vma->vm_mm;
3900 unsigned long start = address;
3901 pte_t *ptep;
3902 pte_t pte;
3903 struct hstate *h = hstate_vma(vma);
3904 unsigned long pages = 0;
3905
3906 BUG_ON(address >= end);
3907 flush_cache_range(vma, address, end);
3908
3909 mmu_notifier_invalidate_range_start(mm, start, end);
3910 i_mmap_lock_write(vma->vm_file->f_mapping);
3911 for (; address < end; address += huge_page_size(h)) {
3912 spinlock_t *ptl;
3913 ptep = huge_pte_offset(mm, address);
3914 if (!ptep)
3915 continue;
3916 ptl = huge_pte_lock(h, mm, ptep);
3917 if (huge_pmd_unshare(mm, &address, ptep)) {
3918 pages++;
3919 spin_unlock(ptl);
3920 continue;
3921 }
3922 pte = huge_ptep_get(ptep);
3923 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
3924 spin_unlock(ptl);
3925 continue;
3926 }
3927 if (unlikely(is_hugetlb_entry_migration(pte))) {
3928 swp_entry_t entry = pte_to_swp_entry(pte);
3929
3930 if (is_write_migration_entry(entry)) {
3931 pte_t newpte;
3932
3933 make_migration_entry_read(&entry);
3934 newpte = swp_entry_to_pte(entry);
3935 set_huge_pte_at(mm, address, ptep, newpte);
3936 pages++;
3937 }
3938 spin_unlock(ptl);
3939 continue;
3940 }
3941 if (!huge_pte_none(pte)) {
3942 pte = huge_ptep_get_and_clear(mm, address, ptep);
3943 pte = pte_mkhuge(huge_pte_modify(pte, newprot));
3944 pte = arch_make_huge_pte(pte, vma, NULL, 0);
3945 set_huge_pte_at(mm, address, ptep, pte);
3946 pages++;
3947 }
3948 spin_unlock(ptl);
3949 }
3950
3951
3952
3953
3954
3955
3956 flush_tlb_range(vma, start, end);
3957 mmu_notifier_invalidate_range(mm, start, end);
3958 i_mmap_unlock_write(vma->vm_file->f_mapping);
3959 mmu_notifier_invalidate_range_end(mm, start, end);
3960
3961 return pages << h->order;
3962}
3963
3964int hugetlb_reserve_pages(struct inode *inode,
3965 long from, long to,
3966 struct vm_area_struct *vma,
3967 vm_flags_t vm_flags)
3968{
3969 long ret, chg;
3970 struct hstate *h = hstate_inode(inode);
3971 struct hugepage_subpool *spool = subpool_inode(inode);
3972 struct resv_map *resv_map;
3973 long gbl_reserve;
3974
3975
3976
3977
3978
3979
3980 if (vm_flags & VM_NORESERVE)
3981 return 0;
3982
3983
3984
3985
3986
3987
3988
3989 if (!vma || vma->vm_flags & VM_MAYSHARE) {
3990 resv_map = inode_resv_map(inode);
3991
3992 chg = region_chg(resv_map, from, to);
3993
3994 } else {
3995 resv_map = resv_map_alloc();
3996 if (!resv_map)
3997 return -ENOMEM;
3998
3999 chg = to - from;
4000
4001 set_vma_resv_map(vma, resv_map);
4002 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4003 }
4004
4005 if (chg < 0) {
4006 ret = chg;
4007 goto out_err;
4008 }
4009
4010
4011
4012
4013
4014
4015 gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4016 if (gbl_reserve < 0) {
4017 ret = -ENOSPC;
4018 goto out_err;
4019 }
4020
4021
4022
4023
4024
4025 ret = hugetlb_acct_memory(h, gbl_reserve);
4026 if (ret < 0) {
4027
4028 (void)hugepage_subpool_put_pages(spool, chg);
4029 goto out_err;
4030 }
4031
4032
4033
4034
4035
4036
4037
4038
4039
4040
4041
4042
4043 if (!vma || vma->vm_flags & VM_MAYSHARE) {
4044 long add = region_add(resv_map, from, to);
4045
4046 if (unlikely(chg > add)) {
4047
4048
4049
4050
4051
4052
4053
4054 long rsv_adjust;
4055
4056 rsv_adjust = hugepage_subpool_put_pages(spool,
4057 chg - add);
4058 hugetlb_acct_memory(h, -rsv_adjust);
4059 }
4060 }
4061 return 0;
4062out_err:
4063 if (!vma || vma->vm_flags & VM_MAYSHARE)
4064 region_abort(resv_map, from, to);
4065 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4066 kref_put(&resv_map->refs, resv_map_release);
4067 return ret;
4068}
4069
4070long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4071 long freed)
4072{
4073 struct hstate *h = hstate_inode(inode);
4074 struct resv_map *resv_map = inode_resv_map(inode);
4075 long chg = 0;
4076 struct hugepage_subpool *spool = subpool_inode(inode);
4077 long gbl_reserve;
4078
4079 if (resv_map) {
4080 chg = region_del(resv_map, start, end);
4081
4082
4083
4084
4085
4086 if (chg < 0)
4087 return chg;
4088 }
4089
4090 spin_lock(&inode->i_lock);
4091 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4092 spin_unlock(&inode->i_lock);
4093
4094
4095
4096
4097
4098 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4099 hugetlb_acct_memory(h, -gbl_reserve);
4100
4101 return 0;
4102}
4103
4104#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
4105static unsigned long page_table_shareable(struct vm_area_struct *svma,
4106 struct vm_area_struct *vma,
4107 unsigned long addr, pgoff_t idx)
4108{
4109 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4110 svma->vm_start;
4111 unsigned long sbase = saddr & PUD_MASK;
4112 unsigned long s_end = sbase + PUD_SIZE;
4113
4114
4115 unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4116 unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4117
4118
4119
4120
4121
4122 if (pmd_index(addr) != pmd_index(saddr) ||
4123 vm_flags != svm_flags ||
4124 sbase < svma->vm_start || svma->vm_end < s_end)
4125 return 0;
4126
4127 return saddr;
4128}
4129
4130static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4131{
4132 unsigned long base = addr & PUD_MASK;
4133 unsigned long end = base + PUD_SIZE;
4134
4135
4136
4137
4138 if (vma->vm_flags & VM_MAYSHARE &&
4139 vma->vm_start <= base && end <= vma->vm_end)
4140 return true;
4141 return false;
4142}
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4154{
4155 struct vm_area_struct *vma = find_vma(mm, addr);
4156 struct address_space *mapping = vma->vm_file->f_mapping;
4157 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4158 vma->vm_pgoff;
4159 struct vm_area_struct *svma;
4160 unsigned long saddr;
4161 pte_t *spte = NULL;
4162 pte_t *pte;
4163 spinlock_t *ptl;
4164
4165 if (!vma_shareable(vma, addr))
4166 return (pte_t *)pmd_alloc(mm, pud, addr);
4167
4168 i_mmap_lock_write(mapping);
4169 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4170 if (svma == vma)
4171 continue;
4172
4173 saddr = page_table_shareable(svma, vma, addr, idx);
4174 if (saddr) {
4175 spte = huge_pte_offset(svma->vm_mm, saddr);
4176 if (spte) {
4177 mm_inc_nr_pmds(mm);
4178 get_page(virt_to_page(spte));
4179 break;
4180 }
4181 }
4182 }
4183
4184 if (!spte)
4185 goto out;
4186
4187 ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
4188 spin_lock(ptl);
4189 if (pud_none(*pud)) {
4190 pud_populate(mm, pud,
4191 (pmd_t *)((unsigned long)spte & PAGE_MASK));
4192 } else {
4193 put_page(virt_to_page(spte));
4194 mm_inc_nr_pmds(mm);
4195 }
4196 spin_unlock(ptl);
4197out:
4198 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4199 i_mmap_unlock_write(mapping);
4200 return pte;
4201}
4202
4203
4204
4205
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4216{
4217 pgd_t *pgd = pgd_offset(mm, *addr);
4218 pud_t *pud = pud_offset(pgd, *addr);
4219
4220 BUG_ON(page_count(virt_to_page(ptep)) == 0);
4221 if (page_count(virt_to_page(ptep)) == 1)
4222 return 0;
4223
4224 pud_clear(pud);
4225 put_page(virt_to_page(ptep));
4226 mm_dec_nr_pmds(mm);
4227 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4228 return 1;
4229}
4230#define want_pmd_share() (1)
4231#else
4232pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4233{
4234 return NULL;
4235}
4236
4237int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4238{
4239 return 0;
4240}
4241#define want_pmd_share() (0)
4242#endif
4243
4244#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4245pte_t *huge_pte_alloc(struct mm_struct *mm,
4246 unsigned long addr, unsigned long sz)
4247{
4248 pgd_t *pgd;
4249 pud_t *pud;
4250 pte_t *pte = NULL;
4251
4252 pgd = pgd_offset(mm, addr);
4253 pud = pud_alloc(mm, pgd, addr);
4254 if (pud) {
4255 if (sz == PUD_SIZE) {
4256 pte = (pte_t *)pud;
4257 } else {
4258 BUG_ON(sz != PMD_SIZE);
4259 if (want_pmd_share() && pud_none(*pud))
4260 pte = huge_pmd_share(mm, addr, pud);
4261 else
4262 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4263 }
4264 }
4265 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
4266
4267 return pte;
4268}
4269
4270pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
4271{
4272 pgd_t *pgd;
4273 pud_t *pud;
4274 pmd_t *pmd = NULL;
4275
4276 pgd = pgd_offset(mm, addr);
4277 if (pgd_present(*pgd)) {
4278 pud = pud_offset(pgd, addr);
4279 if (pud_present(*pud)) {
4280 if (pud_huge(*pud))
4281 return (pte_t *)pud;
4282 pmd = pmd_offset(pud, addr);
4283 }
4284 }
4285 return (pte_t *) pmd;
4286}
4287
4288#endif
4289
4290
4291
4292
4293
4294struct page * __weak
4295follow_huge_addr(struct mm_struct *mm, unsigned long address,
4296 int write)
4297{
4298 return ERR_PTR(-EINVAL);
4299}
4300
4301struct page * __weak
4302follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4303 pmd_t *pmd, int flags)
4304{
4305 struct page *page = NULL;
4306 spinlock_t *ptl;
4307retry:
4308 ptl = pmd_lockptr(mm, pmd);
4309 spin_lock(ptl);
4310
4311
4312
4313
4314 if (!pmd_huge(*pmd))
4315 goto out;
4316 if (pmd_present(*pmd)) {
4317 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4318 if (flags & FOLL_GET)
4319 get_page(page);
4320 } else {
4321 if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
4322 spin_unlock(ptl);
4323 __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4324 goto retry;
4325 }
4326
4327
4328
4329
4330 }
4331out:
4332 spin_unlock(ptl);
4333 return page;
4334}
4335
4336struct page * __weak
4337follow_huge_pud(struct mm_struct *mm, unsigned long address,
4338 pud_t *pud, int flags)
4339{
4340 if (flags & FOLL_GET)
4341 return NULL;
4342
4343 return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
4344}
4345
4346#ifdef CONFIG_MEMORY_FAILURE
4347
4348
4349
4350
4351
4352int dequeue_hwpoisoned_huge_page(struct page *hpage)
4353{
4354 struct hstate *h = page_hstate(hpage);
4355 int nid = page_to_nid(hpage);
4356 int ret = -EBUSY;
4357
4358 spin_lock(&hugetlb_lock);
4359
4360
4361
4362
4363 if (!page_huge_active(hpage) && !page_count(hpage)) {
4364
4365
4366
4367
4368
4369
4370 list_del_init(&hpage->lru);
4371 set_page_refcounted(hpage);
4372 h->free_huge_pages--;
4373 h->free_huge_pages_node[nid]--;
4374 ret = 0;
4375 }
4376 spin_unlock(&hugetlb_lock);
4377 return ret;
4378}
4379#endif
4380
4381bool isolate_huge_page(struct page *page, struct list_head *list)
4382{
4383 bool ret = true;
4384
4385 VM_BUG_ON_PAGE(!PageHead(page), page);
4386 spin_lock(&hugetlb_lock);
4387 if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4388 ret = false;
4389 goto unlock;
4390 }
4391 clear_page_huge_active(page);
4392 list_move_tail(&page->lru, list);
4393unlock:
4394 spin_unlock(&hugetlb_lock);
4395 return ret;
4396}
4397
4398void putback_active_hugepage(struct page *page)
4399{
4400 VM_BUG_ON_PAGE(!PageHead(page), page);
4401 spin_lock(&hugetlb_lock);
4402 set_page_huge_active(page);
4403 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4404 spin_unlock(&hugetlb_lock);
4405 put_page(page);
4406}
4407