1
2
3
4
5#include <linux/list.h>
6#include <linux/init.h>
7#include <linux/mm.h>
8#include <linux/seq_file.h>
9#include <linux/sysctl.h>
10#include <linux/highmem.h>
11#include <linux/mmu_notifier.h>
12#include <linux/nodemask.h>
13#include <linux/pagemap.h>
14#include <linux/mempolicy.h>
15#include <linux/compiler.h>
16#include <linux/cpuset.h>
17#include <linux/mutex.h>
18#include <linux/bootmem.h>
19#include <linux/sysfs.h>
20#include <linux/slab.h>
21#include <linux/rmap.h>
22#include <linux/swap.h>
23#include <linux/swapops.h>
24#include <linux/page-isolation.h>
25#include <linux/jhash.h>
26
27#include <asm/page.h>
28#include <asm/pgtable.h>
29#include <asm/tlb.h>
30
31#include <linux/io.h>
32#include <linux/hugetlb.h>
33#include <linux/hugetlb_cgroup.h>
34#include <linux/node.h>
35#include "internal.h"
36
37int hugepages_treat_as_movable;
38
39int hugetlb_max_hstate __read_mostly;
40unsigned int default_hstate_idx;
41struct hstate hstates[HUGE_MAX_HSTATE];
42
43
44
45
46static unsigned int minimum_order __read_mostly = UINT_MAX;
47
48__initdata LIST_HEAD(huge_boot_pages);
49
50
51static struct hstate * __initdata parsed_hstate;
52static unsigned long __initdata default_hstate_max_huge_pages;
53static unsigned long __initdata default_hstate_size;
54static bool __initdata parsed_valid_hugepagesz = true;
55
56
57
58
59
60DEFINE_SPINLOCK(hugetlb_lock);
61
62
63
64
65
66static int num_fault_mutexes;
67struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
68
69
70static int hugetlb_acct_memory(struct hstate *h, long delta);
71
72static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
73{
74 bool free = (spool->count == 0) && (spool->used_hpages == 0);
75
76 spin_unlock(&spool->lock);
77
78
79
80
81 if (free) {
82 if (spool->min_hpages != -1)
83 hugetlb_acct_memory(spool->hstate,
84 -spool->min_hpages);
85 kfree(spool);
86 }
87}
88
89struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
90 long min_hpages)
91{
92 struct hugepage_subpool *spool;
93
94 spool = kzalloc(sizeof(*spool), GFP_KERNEL);
95 if (!spool)
96 return NULL;
97
98 spin_lock_init(&spool->lock);
99 spool->count = 1;
100 spool->max_hpages = max_hpages;
101 spool->hstate = h;
102 spool->min_hpages = min_hpages;
103
104 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
105 kfree(spool);
106 return NULL;
107 }
108 spool->rsv_hpages = min_hpages;
109
110 return spool;
111}
112
113void hugepage_put_subpool(struct hugepage_subpool *spool)
114{
115 spin_lock(&spool->lock);
116 BUG_ON(!spool->count);
117 spool->count--;
118 unlock_or_release_subpool(spool);
119}
120
121
122
123
124
125
126
127
128
129static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
130 long delta)
131{
132 long ret = delta;
133
134 if (!spool)
135 return ret;
136
137 spin_lock(&spool->lock);
138
139 if (spool->max_hpages != -1) {
140 if ((spool->used_hpages + delta) <= spool->max_hpages)
141 spool->used_hpages += delta;
142 else {
143 ret = -ENOMEM;
144 goto unlock_ret;
145 }
146 }
147
148
149 if (spool->min_hpages != -1 && spool->rsv_hpages) {
150 if (delta > spool->rsv_hpages) {
151
152
153
154
155 ret = delta - spool->rsv_hpages;
156 spool->rsv_hpages = 0;
157 } else {
158 ret = 0;
159 spool->rsv_hpages -= delta;
160 }
161 }
162
163unlock_ret:
164 spin_unlock(&spool->lock);
165 return ret;
166}
167
168
169
170
171
172
173
174static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
175 long delta)
176{
177 long ret = delta;
178
179 if (!spool)
180 return delta;
181
182 spin_lock(&spool->lock);
183
184 if (spool->max_hpages != -1)
185 spool->used_hpages -= delta;
186
187
188 if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
189 if (spool->rsv_hpages + delta <= spool->min_hpages)
190 ret = 0;
191 else
192 ret = spool->rsv_hpages + delta - spool->min_hpages;
193
194 spool->rsv_hpages += delta;
195 if (spool->rsv_hpages > spool->min_hpages)
196 spool->rsv_hpages = spool->min_hpages;
197 }
198
199
200
201
202
203 unlock_or_release_subpool(spool);
204
205 return ret;
206}
207
208static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
209{
210 return HUGETLBFS_SB(inode->i_sb)->spool;
211}
212
213static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
214{
215 return subpool_inode(file_inode(vma->vm_file));
216}
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237struct file_region {
238 struct list_head link;
239 long from;
240 long to;
241};
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257static long region_add(struct resv_map *resv, long f, long t)
258{
259 struct list_head *head = &resv->regions;
260 struct file_region *rg, *nrg, *trg;
261 long add = 0;
262
263 spin_lock(&resv->lock);
264
265 list_for_each_entry(rg, head, link)
266 if (f <= rg->to)
267 break;
268
269
270
271
272
273
274
275 if (&rg->link == head || t < rg->from) {
276 VM_BUG_ON(resv->region_cache_count <= 0);
277
278 resv->region_cache_count--;
279 nrg = list_first_entry(&resv->region_cache, struct file_region,
280 link);
281 list_del(&nrg->link);
282
283 nrg->from = f;
284 nrg->to = t;
285 list_add(&nrg->link, rg->link.prev);
286
287 add += t - f;
288 goto out_locked;
289 }
290
291
292 if (f > rg->from)
293 f = rg->from;
294
295
296 nrg = rg;
297 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
298 if (&rg->link == head)
299 break;
300 if (rg->from > t)
301 break;
302
303
304
305
306 if (rg->to > t)
307 t = rg->to;
308 if (rg != nrg) {
309
310
311
312
313 add -= (rg->to - rg->from);
314 list_del(&rg->link);
315 kfree(rg);
316 }
317 }
318
319 add += (nrg->from - f);
320 nrg->from = f;
321 add += t - nrg->to;
322 nrg->to = t;
323
324out_locked:
325 resv->adds_in_progress--;
326 spin_unlock(&resv->lock);
327 VM_BUG_ON(add < 0);
328 return add;
329}
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353static long region_chg(struct resv_map *resv, long f, long t)
354{
355 struct list_head *head = &resv->regions;
356 struct file_region *rg, *nrg = NULL;
357 long chg = 0;
358
359retry:
360 spin_lock(&resv->lock);
361retry_locked:
362 resv->adds_in_progress++;
363
364
365
366
367
368 if (resv->adds_in_progress > resv->region_cache_count) {
369 struct file_region *trg;
370
371 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
372
373 resv->adds_in_progress--;
374 spin_unlock(&resv->lock);
375
376 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
377 if (!trg) {
378 kfree(nrg);
379 return -ENOMEM;
380 }
381
382 spin_lock(&resv->lock);
383 list_add(&trg->link, &resv->region_cache);
384 resv->region_cache_count++;
385 goto retry_locked;
386 }
387
388
389 list_for_each_entry(rg, head, link)
390 if (f <= rg->to)
391 break;
392
393
394
395
396 if (&rg->link == head || t < rg->from) {
397 if (!nrg) {
398 resv->adds_in_progress--;
399 spin_unlock(&resv->lock);
400 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
401 if (!nrg)
402 return -ENOMEM;
403
404 nrg->from = f;
405 nrg->to = f;
406 INIT_LIST_HEAD(&nrg->link);
407 goto retry;
408 }
409
410 list_add(&nrg->link, rg->link.prev);
411 chg = t - f;
412 goto out_nrg;
413 }
414
415
416 if (f > rg->from)
417 f = rg->from;
418 chg = t - f;
419
420
421 list_for_each_entry(rg, rg->link.prev, link) {
422 if (&rg->link == head)
423 break;
424 if (rg->from > t)
425 goto out;
426
427
428
429
430 if (rg->to > t) {
431 chg += rg->to - t;
432 t = rg->to;
433 }
434 chg -= rg->to - rg->from;
435 }
436
437out:
438 spin_unlock(&resv->lock);
439
440 kfree(nrg);
441 return chg;
442out_nrg:
443 spin_unlock(&resv->lock);
444 return chg;
445}
446
447
448
449
450
451
452
453
454
455
456
457
458static void region_abort(struct resv_map *resv, long f, long t)
459{
460 spin_lock(&resv->lock);
461 VM_BUG_ON(!resv->region_cache_count);
462 resv->adds_in_progress--;
463 spin_unlock(&resv->lock);
464}
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480static long region_del(struct resv_map *resv, long f, long t)
481{
482 struct list_head *head = &resv->regions;
483 struct file_region *rg, *trg;
484 struct file_region *nrg = NULL;
485 long del = 0;
486
487retry:
488 spin_lock(&resv->lock);
489 list_for_each_entry_safe(rg, trg, head, link) {
490
491
492
493
494
495
496
497 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
498 continue;
499
500 if (rg->from >= t)
501 break;
502
503 if (f > rg->from && t < rg->to) {
504
505
506
507
508 if (!nrg &&
509 resv->region_cache_count > resv->adds_in_progress) {
510 nrg = list_first_entry(&resv->region_cache,
511 struct file_region,
512 link);
513 list_del(&nrg->link);
514 resv->region_cache_count--;
515 }
516
517 if (!nrg) {
518 spin_unlock(&resv->lock);
519 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
520 if (!nrg)
521 return -ENOMEM;
522 goto retry;
523 }
524
525 del += t - f;
526
527
528 nrg->from = t;
529 nrg->to = rg->to;
530 INIT_LIST_HEAD(&nrg->link);
531
532
533 rg->to = f;
534
535 list_add(&nrg->link, &rg->link);
536 nrg = NULL;
537 break;
538 }
539
540 if (f <= rg->from && t >= rg->to) {
541 del += rg->to - rg->from;
542 list_del(&rg->link);
543 kfree(rg);
544 continue;
545 }
546
547 if (f <= rg->from) {
548 del += t - rg->from;
549 rg->from = t;
550 } else {
551 del += rg->to - f;
552 rg->to = f;
553 }
554 }
555
556 spin_unlock(&resv->lock);
557 kfree(nrg);
558 return del;
559}
560
561
562
563
564
565
566
567
568
569
570void hugetlb_fix_reserve_counts(struct inode *inode)
571{
572 struct hugepage_subpool *spool = subpool_inode(inode);
573 long rsv_adjust;
574
575 rsv_adjust = hugepage_subpool_get_pages(spool, 1);
576 if (rsv_adjust) {
577 struct hstate *h = hstate_inode(inode);
578
579 hugetlb_acct_memory(h, 1);
580 }
581}
582
583
584
585
586
587static long region_count(struct resv_map *resv, long f, long t)
588{
589 struct list_head *head = &resv->regions;
590 struct file_region *rg;
591 long chg = 0;
592
593 spin_lock(&resv->lock);
594
595 list_for_each_entry(rg, head, link) {
596 long seg_from;
597 long seg_to;
598
599 if (rg->to <= f)
600 continue;
601 if (rg->from >= t)
602 break;
603
604 seg_from = max(rg->from, f);
605 seg_to = min(rg->to, t);
606
607 chg += seg_to - seg_from;
608 }
609 spin_unlock(&resv->lock);
610
611 return chg;
612}
613
614
615
616
617
618static pgoff_t vma_hugecache_offset(struct hstate *h,
619 struct vm_area_struct *vma, unsigned long address)
620{
621 return ((address - vma->vm_start) >> huge_page_shift(h)) +
622 (vma->vm_pgoff >> huge_page_order(h));
623}
624
625pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
626 unsigned long address)
627{
628 return vma_hugecache_offset(hstate_vma(vma), vma, address);
629}
630EXPORT_SYMBOL_GPL(linear_hugepage_index);
631
632
633
634
635
636unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
637{
638 struct hstate *hstate;
639
640 if (!is_vm_hugetlb_page(vma))
641 return PAGE_SIZE;
642
643 hstate = hstate_vma(vma);
644
645 return 1UL << huge_page_shift(hstate);
646}
647EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
648
649
650
651
652
653
654
655#ifndef vma_mmu_pagesize
656unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
657{
658 return vma_kernel_pagesize(vma);
659}
660#endif
661
662
663
664
665
666
667#define HPAGE_RESV_OWNER (1UL << 0)
668#define HPAGE_RESV_UNMAPPED (1UL << 1)
669#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690static unsigned long get_vma_private_data(struct vm_area_struct *vma)
691{
692 return (unsigned long)vma->vm_private_data;
693}
694
695static void set_vma_private_data(struct vm_area_struct *vma,
696 unsigned long value)
697{
698 vma->vm_private_data = (void *)value;
699}
700
701struct resv_map *resv_map_alloc(void)
702{
703 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
704 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
705
706 if (!resv_map || !rg) {
707 kfree(resv_map);
708 kfree(rg);
709 return NULL;
710 }
711
712 kref_init(&resv_map->refs);
713 spin_lock_init(&resv_map->lock);
714 INIT_LIST_HEAD(&resv_map->regions);
715
716 resv_map->adds_in_progress = 0;
717
718 INIT_LIST_HEAD(&resv_map->region_cache);
719 list_add(&rg->link, &resv_map->region_cache);
720 resv_map->region_cache_count = 1;
721
722 return resv_map;
723}
724
725void resv_map_release(struct kref *ref)
726{
727 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
728 struct list_head *head = &resv_map->region_cache;
729 struct file_region *rg, *trg;
730
731
732 region_del(resv_map, 0, LONG_MAX);
733
734
735 list_for_each_entry_safe(rg, trg, head, link) {
736 list_del(&rg->link);
737 kfree(rg);
738 }
739
740 VM_BUG_ON(resv_map->adds_in_progress);
741
742 kfree(resv_map);
743}
744
745static inline struct resv_map *inode_resv_map(struct inode *inode)
746{
747 return inode->i_mapping->private_data;
748}
749
750static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
751{
752 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
753 if (vma->vm_flags & VM_MAYSHARE) {
754 struct address_space *mapping = vma->vm_file->f_mapping;
755 struct inode *inode = mapping->host;
756
757 return inode_resv_map(inode);
758
759 } else {
760 return (struct resv_map *)(get_vma_private_data(vma) &
761 ~HPAGE_RESV_MASK);
762 }
763}
764
765static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
766{
767 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
768 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
769
770 set_vma_private_data(vma, (get_vma_private_data(vma) &
771 HPAGE_RESV_MASK) | (unsigned long)map);
772}
773
774static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
775{
776 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
777 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
778
779 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
780}
781
782static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
783{
784 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
785
786 return (get_vma_private_data(vma) & flag) != 0;
787}
788
789
790void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
791{
792 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
793 if (!(vma->vm_flags & VM_MAYSHARE))
794 vma->vm_private_data = (void *)0;
795}
796
797
798static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
799{
800 if (vma->vm_flags & VM_NORESERVE) {
801
802
803
804
805
806
807
808
809
810 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
811 return true;
812 else
813 return false;
814 }
815
816
817 if (vma->vm_flags & VM_MAYSHARE) {
818
819
820
821
822
823
824
825 if (chg)
826 return false;
827 else
828 return true;
829 }
830
831
832
833
834
835 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851 if (chg)
852 return false;
853 else
854 return true;
855 }
856
857 return false;
858}
859
860static void enqueue_huge_page(struct hstate *h, struct page *page)
861{
862 int nid = page_to_nid(page);
863 list_move(&page->lru, &h->hugepage_freelists[nid]);
864 h->free_huge_pages++;
865 h->free_huge_pages_node[nid]++;
866}
867
868static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
869{
870 struct page *page;
871
872 list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
873 if (!is_migrate_isolate_page(page))
874 break;
875
876
877
878
879 if (&h->hugepage_freelists[nid] == &page->lru)
880 return NULL;
881 list_move(&page->lru, &h->hugepage_activelist);
882 set_page_refcounted(page);
883 h->free_huge_pages--;
884 h->free_huge_pages_node[nid]--;
885 return page;
886}
887
888
889static inline gfp_t htlb_alloc_mask(struct hstate *h)
890{
891 if (hugepages_treat_as_movable || hugepage_migration_supported(h))
892 return GFP_HIGHUSER_MOVABLE;
893 else
894 return GFP_HIGHUSER;
895}
896
897static struct page *dequeue_huge_page_vma(struct hstate *h,
898 struct vm_area_struct *vma,
899 unsigned long address, int avoid_reserve,
900 long chg)
901{
902 struct page *page = NULL;
903 struct mempolicy *mpol;
904 nodemask_t *nodemask;
905 struct zonelist *zonelist;
906 struct zone *zone;
907 struct zoneref *z;
908 unsigned int cpuset_mems_cookie;
909
910
911
912
913
914
915 if (!vma_has_reserves(vma, chg) &&
916 h->free_huge_pages - h->resv_huge_pages == 0)
917 goto err;
918
919
920 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
921 goto err;
922
923retry_cpuset:
924 cpuset_mems_cookie = read_mems_allowed_begin();
925 zonelist = huge_zonelist(vma, address,
926 htlb_alloc_mask(h), &mpol, &nodemask);
927
928 for_each_zone_zonelist_nodemask(zone, z, zonelist,
929 MAX_NR_ZONES - 1, nodemask) {
930 if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
931 page = dequeue_huge_page_node(h, zone_to_nid(zone));
932 if (page) {
933 if (avoid_reserve)
934 break;
935 if (!vma_has_reserves(vma, chg))
936 break;
937
938 SetPagePrivate(page);
939 h->resv_huge_pages--;
940 break;
941 }
942 }
943 }
944
945 mpol_cond_put(mpol);
946 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
947 goto retry_cpuset;
948 return page;
949
950err:
951 return NULL;
952}
953
954
955
956
957
958
959
960
961static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
962{
963 nid = next_node_in(nid, *nodes_allowed);
964 VM_BUG_ON(nid >= MAX_NUMNODES);
965
966 return nid;
967}
968
969static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
970{
971 if (!node_isset(nid, *nodes_allowed))
972 nid = next_node_allowed(nid, nodes_allowed);
973 return nid;
974}
975
976
977
978
979
980
981
982static int hstate_next_node_to_alloc(struct hstate *h,
983 nodemask_t *nodes_allowed)
984{
985 int nid;
986
987 VM_BUG_ON(!nodes_allowed);
988
989 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
990 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
991
992 return nid;
993}
994
995
996
997
998
999
1000
1001static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1002{
1003 int nid;
1004
1005 VM_BUG_ON(!nodes_allowed);
1006
1007 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1008 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1009
1010 return nid;
1011}
1012
1013#define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
1014 for (nr_nodes = nodes_weight(*mask); \
1015 nr_nodes > 0 && \
1016 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
1017 nr_nodes--)
1018
1019#define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
1020 for (nr_nodes = nodes_weight(*mask); \
1021 nr_nodes > 0 && \
1022 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
1023 nr_nodes--)
1024
1025#if defined(CONFIG_ARCH_HAS_GIGANTIC_PAGE) && \
1026 ((defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || \
1027 defined(CONFIG_CMA))
1028static void destroy_compound_gigantic_page(struct page *page,
1029 unsigned int order)
1030{
1031 int i;
1032 int nr_pages = 1 << order;
1033 struct page *p = page + 1;
1034
1035 atomic_set(compound_mapcount_ptr(page), 0);
1036 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1037 clear_compound_head(p);
1038 set_page_refcounted(p);
1039 }
1040
1041 set_compound_order(page, 0);
1042 __ClearPageHead(page);
1043}
1044
1045static void free_gigantic_page(struct page *page, unsigned int order)
1046{
1047 free_contig_range(page_to_pfn(page), 1 << order);
1048}
1049
1050static int __alloc_gigantic_page(unsigned long start_pfn,
1051 unsigned long nr_pages)
1052{
1053 unsigned long end_pfn = start_pfn + nr_pages;
1054 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1055}
1056
1057static bool pfn_range_valid_gigantic(struct zone *z,
1058 unsigned long start_pfn, unsigned long nr_pages)
1059{
1060 unsigned long i, end_pfn = start_pfn + nr_pages;
1061 struct page *page;
1062
1063 for (i = start_pfn; i < end_pfn; i++) {
1064 if (!pfn_valid(i))
1065 return false;
1066
1067 page = pfn_to_page(i);
1068
1069 if (page_zone(page) != z)
1070 return false;
1071
1072 if (PageReserved(page))
1073 return false;
1074
1075 if (page_count(page) > 0)
1076 return false;
1077
1078 if (PageHuge(page))
1079 return false;
1080 }
1081
1082 return true;
1083}
1084
1085static bool zone_spans_last_pfn(const struct zone *zone,
1086 unsigned long start_pfn, unsigned long nr_pages)
1087{
1088 unsigned long last_pfn = start_pfn + nr_pages - 1;
1089 return zone_spans_pfn(zone, last_pfn);
1090}
1091
1092static struct page *alloc_gigantic_page(int nid, unsigned int order)
1093{
1094 unsigned long nr_pages = 1 << order;
1095 unsigned long ret, pfn, flags;
1096 struct zone *z;
1097
1098 z = NODE_DATA(nid)->node_zones;
1099 for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
1100 spin_lock_irqsave(&z->lock, flags);
1101
1102 pfn = ALIGN(z->zone_start_pfn, nr_pages);
1103 while (zone_spans_last_pfn(z, pfn, nr_pages)) {
1104 if (pfn_range_valid_gigantic(z, pfn, nr_pages)) {
1105
1106
1107
1108
1109
1110
1111
1112 spin_unlock_irqrestore(&z->lock, flags);
1113 ret = __alloc_gigantic_page(pfn, nr_pages);
1114 if (!ret)
1115 return pfn_to_page(pfn);
1116 spin_lock_irqsave(&z->lock, flags);
1117 }
1118 pfn += nr_pages;
1119 }
1120
1121 spin_unlock_irqrestore(&z->lock, flags);
1122 }
1123
1124 return NULL;
1125}
1126
1127static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1128static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1129
1130static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
1131{
1132 struct page *page;
1133
1134 page = alloc_gigantic_page(nid, huge_page_order(h));
1135 if (page) {
1136 prep_compound_gigantic_page(page, huge_page_order(h));
1137 prep_new_huge_page(h, page, nid);
1138 }
1139
1140 return page;
1141}
1142
1143static int alloc_fresh_gigantic_page(struct hstate *h,
1144 nodemask_t *nodes_allowed)
1145{
1146 struct page *page = NULL;
1147 int nr_nodes, node;
1148
1149 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1150 page = alloc_fresh_gigantic_page_node(h, node);
1151 if (page)
1152 return 1;
1153 }
1154
1155 return 0;
1156}
1157
1158static inline bool gigantic_page_supported(void) { return true; }
1159#else
1160static inline bool gigantic_page_supported(void) { return false; }
1161static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1162static inline void destroy_compound_gigantic_page(struct page *page,
1163 unsigned int order) { }
1164static inline int alloc_fresh_gigantic_page(struct hstate *h,
1165 nodemask_t *nodes_allowed) { return 0; }
1166#endif
1167
1168static void update_and_free_page(struct hstate *h, struct page *page)
1169{
1170 int i;
1171
1172 if (hstate_is_gigantic(h) && !gigantic_page_supported())
1173 return;
1174
1175 h->nr_huge_pages--;
1176 h->nr_huge_pages_node[page_to_nid(page)]--;
1177 for (i = 0; i < pages_per_huge_page(h); i++) {
1178 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1179 1 << PG_referenced | 1 << PG_dirty |
1180 1 << PG_active | 1 << PG_private |
1181 1 << PG_writeback);
1182 }
1183 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1184 set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1185 set_page_refcounted(page);
1186 if (hstate_is_gigantic(h)) {
1187 destroy_compound_gigantic_page(page, huge_page_order(h));
1188 free_gigantic_page(page, huge_page_order(h));
1189 } else {
1190 __free_pages(page, huge_page_order(h));
1191 }
1192}
1193
1194struct hstate *size_to_hstate(unsigned long size)
1195{
1196 struct hstate *h;
1197
1198 for_each_hstate(h) {
1199 if (huge_page_size(h) == size)
1200 return h;
1201 }
1202 return NULL;
1203}
1204
1205
1206
1207
1208
1209
1210
1211bool page_huge_active(struct page *page)
1212{
1213 VM_BUG_ON_PAGE(!PageHuge(page), page);
1214 return PageHead(page) && PagePrivate(&page[1]);
1215}
1216
1217
1218static void set_page_huge_active(struct page *page)
1219{
1220 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1221 SetPagePrivate(&page[1]);
1222}
1223
1224static void clear_page_huge_active(struct page *page)
1225{
1226 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1227 ClearPagePrivate(&page[1]);
1228}
1229
1230void free_huge_page(struct page *page)
1231{
1232
1233
1234
1235
1236 struct hstate *h = page_hstate(page);
1237 int nid = page_to_nid(page);
1238 struct hugepage_subpool *spool =
1239 (struct hugepage_subpool *)page_private(page);
1240 bool restore_reserve;
1241
1242 set_page_private(page, 0);
1243 page->mapping = NULL;
1244 VM_BUG_ON_PAGE(page_count(page), page);
1245 VM_BUG_ON_PAGE(page_mapcount(page), page);
1246 restore_reserve = PagePrivate(page);
1247 ClearPagePrivate(page);
1248
1249
1250
1251
1252
1253
1254 if (hugepage_subpool_put_pages(spool, 1) == 0)
1255 restore_reserve = true;
1256
1257 spin_lock(&hugetlb_lock);
1258 clear_page_huge_active(page);
1259 hugetlb_cgroup_uncharge_page(hstate_index(h),
1260 pages_per_huge_page(h), page);
1261 if (restore_reserve)
1262 h->resv_huge_pages++;
1263
1264 if (h->surplus_huge_pages_node[nid]) {
1265
1266 list_del(&page->lru);
1267 update_and_free_page(h, page);
1268 h->surplus_huge_pages--;
1269 h->surplus_huge_pages_node[nid]--;
1270 } else {
1271 arch_clear_hugepage_flags(page);
1272 enqueue_huge_page(h, page);
1273 }
1274 spin_unlock(&hugetlb_lock);
1275}
1276
1277static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1278{
1279 INIT_LIST_HEAD(&page->lru);
1280 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1281 spin_lock(&hugetlb_lock);
1282 set_hugetlb_cgroup(page, NULL);
1283 h->nr_huge_pages++;
1284 h->nr_huge_pages_node[nid]++;
1285 spin_unlock(&hugetlb_lock);
1286 put_page(page);
1287}
1288
1289static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1290{
1291 int i;
1292 int nr_pages = 1 << order;
1293 struct page *p = page + 1;
1294
1295
1296 set_compound_order(page, order);
1297 __ClearPageReserved(page);
1298 __SetPageHead(page);
1299 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312 __ClearPageReserved(p);
1313 set_page_count(p, 0);
1314 set_compound_head(p, page);
1315 }
1316 atomic_set(compound_mapcount_ptr(page), -1);
1317}
1318
1319
1320
1321
1322
1323
1324int PageHuge(struct page *page)
1325{
1326 if (!PageCompound(page))
1327 return 0;
1328
1329 page = compound_head(page);
1330 return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1331}
1332EXPORT_SYMBOL_GPL(PageHuge);
1333
1334
1335
1336
1337
1338int PageHeadHuge(struct page *page_head)
1339{
1340 if (!PageHead(page_head))
1341 return 0;
1342
1343 return get_compound_page_dtor(page_head) == free_huge_page;
1344}
1345
1346pgoff_t __basepage_index(struct page *page)
1347{
1348 struct page *page_head = compound_head(page);
1349 pgoff_t index = page_index(page_head);
1350 unsigned long compound_idx;
1351
1352 if (!PageHuge(page_head))
1353 return page_index(page);
1354
1355 if (compound_order(page_head) >= MAX_ORDER)
1356 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1357 else
1358 compound_idx = page - page_head;
1359
1360 return (index << compound_order(page_head)) + compound_idx;
1361}
1362
1363static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
1364{
1365 struct page *page;
1366
1367 page = __alloc_pages_node(nid,
1368 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1369 __GFP_REPEAT|__GFP_NOWARN,
1370 huge_page_order(h));
1371 if (page) {
1372 prep_new_huge_page(h, page, nid);
1373 }
1374
1375 return page;
1376}
1377
1378static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1379{
1380 struct page *page;
1381 int nr_nodes, node;
1382 int ret = 0;
1383
1384 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1385 page = alloc_fresh_huge_page_node(h, node);
1386 if (page) {
1387 ret = 1;
1388 break;
1389 }
1390 }
1391
1392 if (ret)
1393 count_vm_event(HTLB_BUDDY_PGALLOC);
1394 else
1395 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1396
1397 return ret;
1398}
1399
1400
1401
1402
1403
1404
1405
1406static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1407 bool acct_surplus)
1408{
1409 int nr_nodes, node;
1410 int ret = 0;
1411
1412 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1413
1414
1415
1416
1417 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1418 !list_empty(&h->hugepage_freelists[node])) {
1419 struct page *page =
1420 list_entry(h->hugepage_freelists[node].next,
1421 struct page, lru);
1422 list_del(&page->lru);
1423 h->free_huge_pages--;
1424 h->free_huge_pages_node[node]--;
1425 if (acct_surplus) {
1426 h->surplus_huge_pages--;
1427 h->surplus_huge_pages_node[node]--;
1428 }
1429 update_and_free_page(h, page);
1430 ret = 1;
1431 break;
1432 }
1433 }
1434
1435 return ret;
1436}
1437
1438
1439
1440
1441
1442
1443
1444static int dissolve_free_huge_page(struct page *page)
1445{
1446 int rc = 0;
1447
1448 spin_lock(&hugetlb_lock);
1449 if (PageHuge(page) && !page_count(page)) {
1450 struct page *head = compound_head(page);
1451 struct hstate *h = page_hstate(head);
1452 int nid = page_to_nid(head);
1453 if (h->free_huge_pages - h->resv_huge_pages == 0) {
1454 rc = -EBUSY;
1455 goto out;
1456 }
1457 list_del(&head->lru);
1458 h->free_huge_pages--;
1459 h->free_huge_pages_node[nid]--;
1460 h->max_huge_pages--;
1461 update_and_free_page(h, head);
1462 }
1463out:
1464 spin_unlock(&hugetlb_lock);
1465 return rc;
1466}
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1477{
1478 unsigned long pfn;
1479 struct page *page;
1480 int rc = 0;
1481
1482 if (!hugepages_supported())
1483 return rc;
1484
1485 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1486 page = pfn_to_page(pfn);
1487 if (PageHuge(page) && !page_count(page)) {
1488 rc = dissolve_free_huge_page(page);
1489 if (rc)
1490 break;
1491 }
1492 }
1493
1494 return rc;
1495}
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
1507 struct vm_area_struct *vma, unsigned long addr, int nid)
1508{
1509 int order = huge_page_order(h);
1510 gfp_t gfp = htlb_alloc_mask(h)|__GFP_COMP|__GFP_REPEAT|__GFP_NOWARN;
1511 unsigned int cpuset_mems_cookie;
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522 if (!IS_ENABLED(CONFIG_NUMA) || !vma) {
1523
1524
1525
1526
1527
1528 if (nid != NUMA_NO_NODE)
1529 gfp |= __GFP_THISNODE;
1530
1531
1532
1533
1534 return alloc_pages_node(nid, gfp, order);
1535 }
1536
1537
1538
1539
1540
1541
1542 do {
1543 struct page *page;
1544 struct mempolicy *mpol;
1545 struct zonelist *zl;
1546 nodemask_t *nodemask;
1547
1548 cpuset_mems_cookie = read_mems_allowed_begin();
1549 zl = huge_zonelist(vma, addr, gfp, &mpol, &nodemask);
1550 mpol_cond_put(mpol);
1551 page = __alloc_pages_nodemask(gfp, order, zl, nodemask);
1552 if (page)
1553 return page;
1554 } while (read_mems_allowed_retry(cpuset_mems_cookie));
1555
1556 return NULL;
1557}
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571static struct page *__alloc_buddy_huge_page(struct hstate *h,
1572 struct vm_area_struct *vma, unsigned long addr, int nid)
1573{
1574 struct page *page;
1575 unsigned int r_nid;
1576
1577 if (hstate_is_gigantic(h))
1578 return NULL;
1579
1580
1581
1582
1583
1584
1585 if (vma || (addr != -1)) {
1586 VM_WARN_ON_ONCE(addr == -1);
1587 VM_WARN_ON_ONCE(nid != NUMA_NO_NODE);
1588 }
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612 spin_lock(&hugetlb_lock);
1613 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1614 spin_unlock(&hugetlb_lock);
1615 return NULL;
1616 } else {
1617 h->nr_huge_pages++;
1618 h->surplus_huge_pages++;
1619 }
1620 spin_unlock(&hugetlb_lock);
1621
1622 page = __hugetlb_alloc_buddy_huge_page(h, vma, addr, nid);
1623
1624 spin_lock(&hugetlb_lock);
1625 if (page) {
1626 INIT_LIST_HEAD(&page->lru);
1627 r_nid = page_to_nid(page);
1628 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1629 set_hugetlb_cgroup(page, NULL);
1630
1631
1632
1633 h->nr_huge_pages_node[r_nid]++;
1634 h->surplus_huge_pages_node[r_nid]++;
1635 __count_vm_event(HTLB_BUDDY_PGALLOC);
1636 } else {
1637 h->nr_huge_pages--;
1638 h->surplus_huge_pages--;
1639 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1640 }
1641 spin_unlock(&hugetlb_lock);
1642
1643 return page;
1644}
1645
1646
1647
1648
1649
1650
1651static
1652struct page *__alloc_buddy_huge_page_no_mpol(struct hstate *h, int nid)
1653{
1654 unsigned long addr = -1;
1655
1656 return __alloc_buddy_huge_page(h, NULL, addr, nid);
1657}
1658
1659
1660
1661
1662static
1663struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h,
1664 struct vm_area_struct *vma, unsigned long addr)
1665{
1666 return __alloc_buddy_huge_page(h, vma, addr, NUMA_NO_NODE);
1667}
1668
1669
1670
1671
1672
1673
1674struct page *alloc_huge_page_node(struct hstate *h, int nid)
1675{
1676 struct page *page = NULL;
1677
1678 spin_lock(&hugetlb_lock);
1679 if (h->free_huge_pages - h->resv_huge_pages > 0)
1680 page = dequeue_huge_page_node(h, nid);
1681 spin_unlock(&hugetlb_lock);
1682
1683 if (!page)
1684 page = __alloc_buddy_huge_page_no_mpol(h, nid);
1685
1686 return page;
1687}
1688
1689
1690
1691
1692
1693static int gather_surplus_pages(struct hstate *h, int delta)
1694{
1695 struct list_head surplus_list;
1696 struct page *page, *tmp;
1697 int ret, i;
1698 int needed, allocated;
1699 bool alloc_ok = true;
1700
1701 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1702 if (needed <= 0) {
1703 h->resv_huge_pages += delta;
1704 return 0;
1705 }
1706
1707 allocated = 0;
1708 INIT_LIST_HEAD(&surplus_list);
1709
1710 ret = -ENOMEM;
1711retry:
1712 spin_unlock(&hugetlb_lock);
1713 for (i = 0; i < needed; i++) {
1714 page = __alloc_buddy_huge_page_no_mpol(h, NUMA_NO_NODE);
1715 if (!page) {
1716 alloc_ok = false;
1717 break;
1718 }
1719 list_add(&page->lru, &surplus_list);
1720 }
1721 allocated += i;
1722
1723
1724
1725
1726
1727 spin_lock(&hugetlb_lock);
1728 needed = (h->resv_huge_pages + delta) -
1729 (h->free_huge_pages + allocated);
1730 if (needed > 0) {
1731 if (alloc_ok)
1732 goto retry;
1733
1734
1735
1736
1737
1738 goto free;
1739 }
1740
1741
1742
1743
1744
1745
1746
1747
1748 needed += allocated;
1749 h->resv_huge_pages += delta;
1750 ret = 0;
1751
1752
1753 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1754 if ((--needed) < 0)
1755 break;
1756
1757
1758
1759
1760 put_page_testzero(page);
1761 VM_BUG_ON_PAGE(page_count(page), page);
1762 enqueue_huge_page(h, page);
1763 }
1764free:
1765 spin_unlock(&hugetlb_lock);
1766
1767
1768 list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1769 put_page(page);
1770 spin_lock(&hugetlb_lock);
1771
1772 return ret;
1773}
1774
1775
1776
1777
1778
1779
1780
1781static void return_unused_surplus_pages(struct hstate *h,
1782 unsigned long unused_resv_pages)
1783{
1784 unsigned long nr_pages;
1785
1786
1787 h->resv_huge_pages -= unused_resv_pages;
1788
1789
1790 if (hstate_is_gigantic(h))
1791 return;
1792
1793 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803 while (nr_pages--) {
1804 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1805 break;
1806 cond_resched_lock(&hugetlb_lock);
1807 }
1808}
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835enum vma_resv_mode {
1836 VMA_NEEDS_RESV,
1837 VMA_COMMIT_RESV,
1838 VMA_END_RESV,
1839 VMA_ADD_RESV,
1840};
1841static long __vma_reservation_common(struct hstate *h,
1842 struct vm_area_struct *vma, unsigned long addr,
1843 enum vma_resv_mode mode)
1844{
1845 struct resv_map *resv;
1846 pgoff_t idx;
1847 long ret;
1848
1849 resv = vma_resv_map(vma);
1850 if (!resv)
1851 return 1;
1852
1853 idx = vma_hugecache_offset(h, vma, addr);
1854 switch (mode) {
1855 case VMA_NEEDS_RESV:
1856 ret = region_chg(resv, idx, idx + 1);
1857 break;
1858 case VMA_COMMIT_RESV:
1859 ret = region_add(resv, idx, idx + 1);
1860 break;
1861 case VMA_END_RESV:
1862 region_abort(resv, idx, idx + 1);
1863 ret = 0;
1864 break;
1865 case VMA_ADD_RESV:
1866 if (vma->vm_flags & VM_MAYSHARE)
1867 ret = region_add(resv, idx, idx + 1);
1868 else {
1869 region_abort(resv, idx, idx + 1);
1870 ret = region_del(resv, idx, idx + 1);
1871 }
1872 break;
1873 default:
1874 BUG();
1875 }
1876
1877 if (vma->vm_flags & VM_MAYSHARE)
1878 return ret;
1879 else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893 if (ret)
1894 return 0;
1895 else
1896 return 1;
1897 }
1898 else
1899 return ret < 0 ? ret : 0;
1900}
1901
1902static long vma_needs_reservation(struct hstate *h,
1903 struct vm_area_struct *vma, unsigned long addr)
1904{
1905 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1906}
1907
1908static long vma_commit_reservation(struct hstate *h,
1909 struct vm_area_struct *vma, unsigned long addr)
1910{
1911 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1912}
1913
1914static void vma_end_reservation(struct hstate *h,
1915 struct vm_area_struct *vma, unsigned long addr)
1916{
1917 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1918}
1919
1920static long vma_add_reservation(struct hstate *h,
1921 struct vm_area_struct *vma, unsigned long addr)
1922{
1923 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
1924}
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937static void restore_reserve_on_error(struct hstate *h,
1938 struct vm_area_struct *vma, unsigned long address,
1939 struct page *page)
1940{
1941 if (unlikely(PagePrivate(page))) {
1942 long rc = vma_needs_reservation(h, vma, address);
1943
1944 if (unlikely(rc < 0)) {
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956 ClearPagePrivate(page);
1957 } else if (rc) {
1958 rc = vma_add_reservation(h, vma, address);
1959 if (unlikely(rc < 0))
1960
1961
1962
1963
1964 ClearPagePrivate(page);
1965 } else
1966 vma_end_reservation(h, vma, address);
1967 }
1968}
1969
1970struct page *alloc_huge_page(struct vm_area_struct *vma,
1971 unsigned long addr, int avoid_reserve)
1972{
1973 struct hugepage_subpool *spool = subpool_vma(vma);
1974 struct hstate *h = hstate_vma(vma);
1975 struct page *page;
1976 long map_chg, map_commit;
1977 long gbl_chg;
1978 int ret, idx;
1979 struct hugetlb_cgroup *h_cg;
1980
1981 idx = hstate_index(h);
1982
1983
1984
1985
1986
1987 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
1988 if (map_chg < 0)
1989 return ERR_PTR(-ENOMEM);
1990
1991
1992
1993
1994
1995
1996
1997
1998 if (map_chg || avoid_reserve) {
1999 gbl_chg = hugepage_subpool_get_pages(spool, 1);
2000 if (gbl_chg < 0) {
2001 vma_end_reservation(h, vma, addr);
2002 return ERR_PTR(-ENOSPC);
2003 }
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013 if (avoid_reserve)
2014 gbl_chg = 1;
2015 }
2016
2017 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2018 if (ret)
2019 goto out_subpool_put;
2020
2021 spin_lock(&hugetlb_lock);
2022
2023
2024
2025
2026
2027 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2028 if (!page) {
2029 spin_unlock(&hugetlb_lock);
2030 page = __alloc_buddy_huge_page_with_mpol(h, vma, addr);
2031 if (!page)
2032 goto out_uncharge_cgroup;
2033 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2034 SetPagePrivate(page);
2035 h->resv_huge_pages--;
2036 }
2037 spin_lock(&hugetlb_lock);
2038 list_move(&page->lru, &h->hugepage_activelist);
2039
2040 }
2041 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2042 spin_unlock(&hugetlb_lock);
2043
2044 set_page_private(page, (unsigned long)spool);
2045
2046 map_commit = vma_commit_reservation(h, vma, addr);
2047 if (unlikely(map_chg > map_commit)) {
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057 long rsv_adjust;
2058
2059 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2060 hugetlb_acct_memory(h, -rsv_adjust);
2061 }
2062 return page;
2063
2064out_uncharge_cgroup:
2065 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2066out_subpool_put:
2067 if (map_chg || avoid_reserve)
2068 hugepage_subpool_put_pages(spool, 1);
2069 vma_end_reservation(h, vma, addr);
2070 return ERR_PTR(-ENOSPC);
2071}
2072
2073
2074
2075
2076
2077
2078struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
2079 unsigned long addr, int avoid_reserve)
2080{
2081 struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
2082 if (IS_ERR(page))
2083 page = NULL;
2084 return page;
2085}
2086
2087int __weak alloc_bootmem_huge_page(struct hstate *h)
2088{
2089 struct huge_bootmem_page *m;
2090 int nr_nodes, node;
2091
2092 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2093 void *addr;
2094
2095 addr = memblock_virt_alloc_try_nid_nopanic(
2096 huge_page_size(h), huge_page_size(h),
2097 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
2098 if (addr) {
2099
2100
2101
2102
2103
2104 m = addr;
2105 goto found;
2106 }
2107 }
2108 return 0;
2109
2110found:
2111 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2112
2113 list_add(&m->list, &huge_boot_pages);
2114 m->hstate = h;
2115 return 1;
2116}
2117
2118static void __init prep_compound_huge_page(struct page *page,
2119 unsigned int order)
2120{
2121 if (unlikely(order > (MAX_ORDER - 1)))
2122 prep_compound_gigantic_page(page, order);
2123 else
2124 prep_compound_page(page, order);
2125}
2126
2127
2128static void __init gather_bootmem_prealloc(void)
2129{
2130 struct huge_bootmem_page *m;
2131
2132 list_for_each_entry(m, &huge_boot_pages, list) {
2133 struct hstate *h = m->hstate;
2134 struct page *page;
2135
2136#ifdef CONFIG_HIGHMEM
2137 page = pfn_to_page(m->phys >> PAGE_SHIFT);
2138 memblock_free_late(__pa(m),
2139 sizeof(struct huge_bootmem_page));
2140#else
2141 page = virt_to_page(m);
2142#endif
2143 WARN_ON(page_count(page) != 1);
2144 prep_compound_huge_page(page, h->order);
2145 WARN_ON(PageReserved(page));
2146 prep_new_huge_page(h, page, page_to_nid(page));
2147
2148
2149
2150
2151
2152
2153 if (hstate_is_gigantic(h))
2154 adjust_managed_page_count(page, 1 << h->order);
2155 }
2156}
2157
2158static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2159{
2160 unsigned long i;
2161
2162 for (i = 0; i < h->max_huge_pages; ++i) {
2163 if (hstate_is_gigantic(h)) {
2164 if (!alloc_bootmem_huge_page(h))
2165 break;
2166 } else if (!alloc_fresh_huge_page(h,
2167 &node_states[N_MEMORY]))
2168 break;
2169 }
2170 h->max_huge_pages = i;
2171}
2172
2173static void __init hugetlb_init_hstates(void)
2174{
2175 struct hstate *h;
2176
2177 for_each_hstate(h) {
2178 if (minimum_order > huge_page_order(h))
2179 minimum_order = huge_page_order(h);
2180
2181
2182 if (!hstate_is_gigantic(h))
2183 hugetlb_hstate_alloc_pages(h);
2184 }
2185 VM_BUG_ON(minimum_order == UINT_MAX);
2186}
2187
2188static char * __init memfmt(char *buf, unsigned long n)
2189{
2190 if (n >= (1UL << 30))
2191 sprintf(buf, "%lu GB", n >> 30);
2192 else if (n >= (1UL << 20))
2193 sprintf(buf, "%lu MB", n >> 20);
2194 else
2195 sprintf(buf, "%lu KB", n >> 10);
2196 return buf;
2197}
2198
2199static void __init report_hugepages(void)
2200{
2201 struct hstate *h;
2202
2203 for_each_hstate(h) {
2204 char buf[32];
2205 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2206 memfmt(buf, huge_page_size(h)),
2207 h->free_huge_pages);
2208 }
2209}
2210
2211#ifdef CONFIG_HIGHMEM
2212static void try_to_free_low(struct hstate *h, unsigned long count,
2213 nodemask_t *nodes_allowed)
2214{
2215 int i;
2216
2217 if (hstate_is_gigantic(h))
2218 return;
2219
2220 for_each_node_mask(i, *nodes_allowed) {
2221 struct page *page, *next;
2222 struct list_head *freel = &h->hugepage_freelists[i];
2223 list_for_each_entry_safe(page, next, freel, lru) {
2224 if (count >= h->nr_huge_pages)
2225 return;
2226 if (PageHighMem(page))
2227 continue;
2228 list_del(&page->lru);
2229 update_and_free_page(h, page);
2230 h->free_huge_pages--;
2231 h->free_huge_pages_node[page_to_nid(page)]--;
2232 }
2233 }
2234}
2235#else
2236static inline void try_to_free_low(struct hstate *h, unsigned long count,
2237 nodemask_t *nodes_allowed)
2238{
2239}
2240#endif
2241
2242
2243
2244
2245
2246
2247static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2248 int delta)
2249{
2250 int nr_nodes, node;
2251
2252 VM_BUG_ON(delta != -1 && delta != 1);
2253
2254 if (delta < 0) {
2255 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2256 if (h->surplus_huge_pages_node[node])
2257 goto found;
2258 }
2259 } else {
2260 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2261 if (h->surplus_huge_pages_node[node] <
2262 h->nr_huge_pages_node[node])
2263 goto found;
2264 }
2265 }
2266 return 0;
2267
2268found:
2269 h->surplus_huge_pages += delta;
2270 h->surplus_huge_pages_node[node] += delta;
2271 return 1;
2272}
2273
2274#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2275static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
2276 nodemask_t *nodes_allowed)
2277{
2278 unsigned long min_count, ret;
2279
2280 if (hstate_is_gigantic(h) && !gigantic_page_supported())
2281 return h->max_huge_pages;
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294 spin_lock(&hugetlb_lock);
2295 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2296 if (!adjust_pool_surplus(h, nodes_allowed, -1))
2297 break;
2298 }
2299
2300 while (count > persistent_huge_pages(h)) {
2301
2302
2303
2304
2305
2306 spin_unlock(&hugetlb_lock);
2307
2308
2309 cond_resched();
2310
2311 if (hstate_is_gigantic(h))
2312 ret = alloc_fresh_gigantic_page(h, nodes_allowed);
2313 else
2314 ret = alloc_fresh_huge_page(h, nodes_allowed);
2315 spin_lock(&hugetlb_lock);
2316 if (!ret)
2317 goto out;
2318
2319
2320 if (signal_pending(current))
2321 goto out;
2322 }
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2340 min_count = max(count, min_count);
2341 try_to_free_low(h, min_count, nodes_allowed);
2342 while (min_count < persistent_huge_pages(h)) {
2343 if (!free_pool_huge_page(h, nodes_allowed, 0))
2344 break;
2345 cond_resched_lock(&hugetlb_lock);
2346 }
2347 while (count < persistent_huge_pages(h)) {
2348 if (!adjust_pool_surplus(h, nodes_allowed, 1))
2349 break;
2350 }
2351out:
2352 ret = persistent_huge_pages(h);
2353 spin_unlock(&hugetlb_lock);
2354 return ret;
2355}
2356
2357#define HSTATE_ATTR_RO(_name) \
2358 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2359
2360#define HSTATE_ATTR(_name) \
2361 static struct kobj_attribute _name##_attr = \
2362 __ATTR(_name, 0644, _name##_show, _name##_store)
2363
2364static struct kobject *hugepages_kobj;
2365static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2366
2367static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2368
2369static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2370{
2371 int i;
2372
2373 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2374 if (hstate_kobjs[i] == kobj) {
2375 if (nidp)
2376 *nidp = NUMA_NO_NODE;
2377 return &hstates[i];
2378 }
2379
2380 return kobj_to_node_hstate(kobj, nidp);
2381}
2382
2383static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2384 struct kobj_attribute *attr, char *buf)
2385{
2386 struct hstate *h;
2387 unsigned long nr_huge_pages;
2388 int nid;
2389
2390 h = kobj_to_hstate(kobj, &nid);
2391 if (nid == NUMA_NO_NODE)
2392 nr_huge_pages = h->nr_huge_pages;
2393 else
2394 nr_huge_pages = h->nr_huge_pages_node[nid];
2395
2396 return sprintf(buf, "%lu\n", nr_huge_pages);
2397}
2398
2399static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2400 struct hstate *h, int nid,
2401 unsigned long count, size_t len)
2402{
2403 int err;
2404 NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
2405
2406 if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
2407 err = -EINVAL;
2408 goto out;
2409 }
2410
2411 if (nid == NUMA_NO_NODE) {
2412
2413
2414
2415 if (!(obey_mempolicy &&
2416 init_nodemask_of_mempolicy(nodes_allowed))) {
2417 NODEMASK_FREE(nodes_allowed);
2418 nodes_allowed = &node_states[N_MEMORY];
2419 }
2420 } else if (nodes_allowed) {
2421
2422
2423
2424
2425 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2426 init_nodemask_of_node(nodes_allowed, nid);
2427 } else
2428 nodes_allowed = &node_states[N_MEMORY];
2429
2430 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
2431
2432 if (nodes_allowed != &node_states[N_MEMORY])
2433 NODEMASK_FREE(nodes_allowed);
2434
2435 return len;
2436out:
2437 NODEMASK_FREE(nodes_allowed);
2438 return err;
2439}
2440
2441static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2442 struct kobject *kobj, const char *buf,
2443 size_t len)
2444{
2445 struct hstate *h;
2446 unsigned long count;
2447 int nid;
2448 int err;
2449
2450 err = kstrtoul(buf, 10, &count);
2451 if (err)
2452 return err;
2453
2454 h = kobj_to_hstate(kobj, &nid);
2455 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2456}
2457
2458static ssize_t nr_hugepages_show(struct kobject *kobj,
2459 struct kobj_attribute *attr, char *buf)
2460{
2461 return nr_hugepages_show_common(kobj, attr, buf);
2462}
2463
2464static ssize_t nr_hugepages_store(struct kobject *kobj,
2465 struct kobj_attribute *attr, const char *buf, size_t len)
2466{
2467 return nr_hugepages_store_common(false, kobj, buf, len);
2468}
2469HSTATE_ATTR(nr_hugepages);
2470
2471#ifdef CONFIG_NUMA
2472
2473
2474
2475
2476
2477static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2478 struct kobj_attribute *attr, char *buf)
2479{
2480 return nr_hugepages_show_common(kobj, attr, buf);
2481}
2482
2483static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2484 struct kobj_attribute *attr, const char *buf, size_t len)
2485{
2486 return nr_hugepages_store_common(true, kobj, buf, len);
2487}
2488HSTATE_ATTR(nr_hugepages_mempolicy);
2489#endif
2490
2491
2492static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2493 struct kobj_attribute *attr, char *buf)
2494{
2495 struct hstate *h = kobj_to_hstate(kobj, NULL);
2496 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2497}
2498
2499static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2500 struct kobj_attribute *attr, const char *buf, size_t count)
2501{
2502 int err;
2503 unsigned long input;
2504 struct hstate *h = kobj_to_hstate(kobj, NULL);
2505
2506 if (hstate_is_gigantic(h))
2507 return -EINVAL;
2508
2509 err = kstrtoul(buf, 10, &input);
2510 if (err)
2511 return err;
2512
2513 spin_lock(&hugetlb_lock);
2514 h->nr_overcommit_huge_pages = input;
2515 spin_unlock(&hugetlb_lock);
2516
2517 return count;
2518}
2519HSTATE_ATTR(nr_overcommit_hugepages);
2520
2521static ssize_t free_hugepages_show(struct kobject *kobj,
2522 struct kobj_attribute *attr, char *buf)
2523{
2524 struct hstate *h;
2525 unsigned long free_huge_pages;
2526 int nid;
2527
2528 h = kobj_to_hstate(kobj, &nid);
2529 if (nid == NUMA_NO_NODE)
2530 free_huge_pages = h->free_huge_pages;
2531 else
2532 free_huge_pages = h->free_huge_pages_node[nid];
2533
2534 return sprintf(buf, "%lu\n", free_huge_pages);
2535}
2536HSTATE_ATTR_RO(free_hugepages);
2537
2538static ssize_t resv_hugepages_show(struct kobject *kobj,
2539 struct kobj_attribute *attr, char *buf)
2540{
2541 struct hstate *h = kobj_to_hstate(kobj, NULL);
2542 return sprintf(buf, "%lu\n", h->resv_huge_pages);
2543}
2544HSTATE_ATTR_RO(resv_hugepages);
2545
2546static ssize_t surplus_hugepages_show(struct kobject *kobj,
2547 struct kobj_attribute *attr, char *buf)
2548{
2549 struct hstate *h;
2550 unsigned long surplus_huge_pages;
2551 int nid;
2552
2553 h = kobj_to_hstate(kobj, &nid);
2554 if (nid == NUMA_NO_NODE)
2555 surplus_huge_pages = h->surplus_huge_pages;
2556 else
2557 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2558
2559 return sprintf(buf, "%lu\n", surplus_huge_pages);
2560}
2561HSTATE_ATTR_RO(surplus_hugepages);
2562
2563static struct attribute *hstate_attrs[] = {
2564 &nr_hugepages_attr.attr,
2565 &nr_overcommit_hugepages_attr.attr,
2566 &free_hugepages_attr.attr,
2567 &resv_hugepages_attr.attr,
2568 &surplus_hugepages_attr.attr,
2569#ifdef CONFIG_NUMA
2570 &nr_hugepages_mempolicy_attr.attr,
2571#endif
2572 NULL,
2573};
2574
2575static struct attribute_group hstate_attr_group = {
2576 .attrs = hstate_attrs,
2577};
2578
2579static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2580 struct kobject **hstate_kobjs,
2581 struct attribute_group *hstate_attr_group)
2582{
2583 int retval;
2584 int hi = hstate_index(h);
2585
2586 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2587 if (!hstate_kobjs[hi])
2588 return -ENOMEM;
2589
2590 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2591 if (retval)
2592 kobject_put(hstate_kobjs[hi]);
2593
2594 return retval;
2595}
2596
2597static void __init hugetlb_sysfs_init(void)
2598{
2599 struct hstate *h;
2600 int err;
2601
2602 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2603 if (!hugepages_kobj)
2604 return;
2605
2606 for_each_hstate(h) {
2607 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2608 hstate_kobjs, &hstate_attr_group);
2609 if (err)
2610 pr_err("Hugetlb: Unable to add hstate %s", h->name);
2611 }
2612}
2613
2614#ifdef CONFIG_NUMA
2615
2616
2617
2618
2619
2620
2621
2622
2623struct node_hstate {
2624 struct kobject *hugepages_kobj;
2625 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2626};
2627static struct node_hstate node_hstates[MAX_NUMNODES];
2628
2629
2630
2631
2632static struct attribute *per_node_hstate_attrs[] = {
2633 &nr_hugepages_attr.attr,
2634 &free_hugepages_attr.attr,
2635 &surplus_hugepages_attr.attr,
2636 NULL,
2637};
2638
2639static struct attribute_group per_node_hstate_attr_group = {
2640 .attrs = per_node_hstate_attrs,
2641};
2642
2643
2644
2645
2646
2647static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2648{
2649 int nid;
2650
2651 for (nid = 0; nid < nr_node_ids; nid++) {
2652 struct node_hstate *nhs = &node_hstates[nid];
2653 int i;
2654 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2655 if (nhs->hstate_kobjs[i] == kobj) {
2656 if (nidp)
2657 *nidp = nid;
2658 return &hstates[i];
2659 }
2660 }
2661
2662 BUG();
2663 return NULL;
2664}
2665
2666
2667
2668
2669
2670static void hugetlb_unregister_node(struct node *node)
2671{
2672 struct hstate *h;
2673 struct node_hstate *nhs = &node_hstates[node->dev.id];
2674
2675 if (!nhs->hugepages_kobj)
2676 return;
2677
2678 for_each_hstate(h) {
2679 int idx = hstate_index(h);
2680 if (nhs->hstate_kobjs[idx]) {
2681 kobject_put(nhs->hstate_kobjs[idx]);
2682 nhs->hstate_kobjs[idx] = NULL;
2683 }
2684 }
2685
2686 kobject_put(nhs->hugepages_kobj);
2687 nhs->hugepages_kobj = NULL;
2688}
2689
2690
2691
2692
2693
2694
2695static void hugetlb_register_node(struct node *node)
2696{
2697 struct hstate *h;
2698 struct node_hstate *nhs = &node_hstates[node->dev.id];
2699 int err;
2700
2701 if (nhs->hugepages_kobj)
2702 return;
2703
2704 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2705 &node->dev.kobj);
2706 if (!nhs->hugepages_kobj)
2707 return;
2708
2709 for_each_hstate(h) {
2710 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2711 nhs->hstate_kobjs,
2712 &per_node_hstate_attr_group);
2713 if (err) {
2714 pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2715 h->name, node->dev.id);
2716 hugetlb_unregister_node(node);
2717 break;
2718 }
2719 }
2720}
2721
2722
2723
2724
2725
2726
2727static void __init hugetlb_register_all_nodes(void)
2728{
2729 int nid;
2730
2731 for_each_node_state(nid, N_MEMORY) {
2732 struct node *node = node_devices[nid];
2733 if (node->dev.id == nid)
2734 hugetlb_register_node(node);
2735 }
2736
2737
2738
2739
2740
2741 register_hugetlbfs_with_node(hugetlb_register_node,
2742 hugetlb_unregister_node);
2743}
2744#else
2745
2746static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2747{
2748 BUG();
2749 if (nidp)
2750 *nidp = -1;
2751 return NULL;
2752}
2753
2754static void hugetlb_register_all_nodes(void) { }
2755
2756#endif
2757
2758static int __init hugetlb_init(void)
2759{
2760 int i;
2761
2762 if (!hugepages_supported())
2763 return 0;
2764
2765 if (!size_to_hstate(default_hstate_size)) {
2766 default_hstate_size = HPAGE_SIZE;
2767 if (!size_to_hstate(default_hstate_size))
2768 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2769 }
2770 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2771 if (default_hstate_max_huge_pages) {
2772 if (!default_hstate.max_huge_pages)
2773 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2774 }
2775
2776 hugetlb_init_hstates();
2777 gather_bootmem_prealloc();
2778 report_hugepages();
2779
2780 hugetlb_sysfs_init();
2781 hugetlb_register_all_nodes();
2782 hugetlb_cgroup_file_init();
2783
2784#ifdef CONFIG_SMP
2785 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2786#else
2787 num_fault_mutexes = 1;
2788#endif
2789 hugetlb_fault_mutex_table =
2790 kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2791 BUG_ON(!hugetlb_fault_mutex_table);
2792
2793 for (i = 0; i < num_fault_mutexes; i++)
2794 mutex_init(&hugetlb_fault_mutex_table[i]);
2795 return 0;
2796}
2797subsys_initcall(hugetlb_init);
2798
2799
2800void __init hugetlb_bad_size(void)
2801{
2802 parsed_valid_hugepagesz = false;
2803}
2804
2805void __init hugetlb_add_hstate(unsigned int order)
2806{
2807 struct hstate *h;
2808 unsigned long i;
2809
2810 if (size_to_hstate(PAGE_SIZE << order)) {
2811 pr_warn("hugepagesz= specified twice, ignoring\n");
2812 return;
2813 }
2814 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2815 BUG_ON(order == 0);
2816 h = &hstates[hugetlb_max_hstate++];
2817 h->order = order;
2818 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2819 h->nr_huge_pages = 0;
2820 h->free_huge_pages = 0;
2821 for (i = 0; i < MAX_NUMNODES; ++i)
2822 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2823 INIT_LIST_HEAD(&h->hugepage_activelist);
2824 h->next_nid_to_alloc = first_memory_node;
2825 h->next_nid_to_free = first_memory_node;
2826 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2827 huge_page_size(h)/1024);
2828
2829 parsed_hstate = h;
2830}
2831
2832static int __init hugetlb_nrpages_setup(char *s)
2833{
2834 unsigned long *mhp;
2835 static unsigned long *last_mhp;
2836
2837 if (!parsed_valid_hugepagesz) {
2838 pr_warn("hugepages = %s preceded by "
2839 "an unsupported hugepagesz, ignoring\n", s);
2840 parsed_valid_hugepagesz = true;
2841 return 1;
2842 }
2843
2844
2845
2846
2847 else if (!hugetlb_max_hstate)
2848 mhp = &default_hstate_max_huge_pages;
2849 else
2850 mhp = &parsed_hstate->max_huge_pages;
2851
2852 if (mhp == last_mhp) {
2853 pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
2854 return 1;
2855 }
2856
2857 if (sscanf(s, "%lu", mhp) <= 0)
2858 *mhp = 0;
2859
2860
2861
2862
2863
2864
2865 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2866 hugetlb_hstate_alloc_pages(parsed_hstate);
2867
2868 last_mhp = mhp;
2869
2870 return 1;
2871}
2872__setup("hugepages=", hugetlb_nrpages_setup);
2873
2874static int __init hugetlb_default_setup(char *s)
2875{
2876 default_hstate_size = memparse(s, &s);
2877 return 1;
2878}
2879__setup("default_hugepagesz=", hugetlb_default_setup);
2880
2881static unsigned int cpuset_mems_nr(unsigned int *array)
2882{
2883 int node;
2884 unsigned int nr = 0;
2885
2886 for_each_node_mask(node, cpuset_current_mems_allowed)
2887 nr += array[node];
2888
2889 return nr;
2890}
2891
2892#ifdef CONFIG_SYSCTL
2893static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2894 struct ctl_table *table, int write,
2895 void __user *buffer, size_t *length, loff_t *ppos)
2896{
2897 struct hstate *h = &default_hstate;
2898 unsigned long tmp = h->max_huge_pages;
2899 int ret;
2900
2901 if (!hugepages_supported())
2902 return -EOPNOTSUPP;
2903
2904 table->data = &tmp;
2905 table->maxlen = sizeof(unsigned long);
2906 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2907 if (ret)
2908 goto out;
2909
2910 if (write)
2911 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2912 NUMA_NO_NODE, tmp, *length);
2913out:
2914 return ret;
2915}
2916
2917int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2918 void __user *buffer, size_t *length, loff_t *ppos)
2919{
2920
2921 return hugetlb_sysctl_handler_common(false, table, write,
2922 buffer, length, ppos);
2923}
2924
2925#ifdef CONFIG_NUMA
2926int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2927 void __user *buffer, size_t *length, loff_t *ppos)
2928{
2929 return hugetlb_sysctl_handler_common(true, table, write,
2930 buffer, length, ppos);
2931}
2932#endif
2933
2934int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2935 void __user *buffer,
2936 size_t *length, loff_t *ppos)
2937{
2938 struct hstate *h = &default_hstate;
2939 unsigned long tmp;
2940 int ret;
2941
2942 if (!hugepages_supported())
2943 return -EOPNOTSUPP;
2944
2945 tmp = h->nr_overcommit_huge_pages;
2946
2947 if (write && hstate_is_gigantic(h))
2948 return -EINVAL;
2949
2950 table->data = &tmp;
2951 table->maxlen = sizeof(unsigned long);
2952 ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2953 if (ret)
2954 goto out;
2955
2956 if (write) {
2957 spin_lock(&hugetlb_lock);
2958 h->nr_overcommit_huge_pages = tmp;
2959 spin_unlock(&hugetlb_lock);
2960 }
2961out:
2962 return ret;
2963}
2964
2965#endif
2966
2967void hugetlb_report_meminfo(struct seq_file *m)
2968{
2969 struct hstate *h = &default_hstate;
2970 if (!hugepages_supported())
2971 return;
2972 seq_printf(m,
2973 "HugePages_Total: %5lu\n"
2974 "HugePages_Free: %5lu\n"
2975 "HugePages_Rsvd: %5lu\n"
2976 "HugePages_Surp: %5lu\n"
2977 "Hugepagesize: %8lu kB\n",
2978 h->nr_huge_pages,
2979 h->free_huge_pages,
2980 h->resv_huge_pages,
2981 h->surplus_huge_pages,
2982 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2983}
2984
2985int hugetlb_report_node_meminfo(int nid, char *buf)
2986{
2987 struct hstate *h = &default_hstate;
2988 if (!hugepages_supported())
2989 return 0;
2990 return sprintf(buf,
2991 "Node %d HugePages_Total: %5u\n"
2992 "Node %d HugePages_Free: %5u\n"
2993 "Node %d HugePages_Surp: %5u\n",
2994 nid, h->nr_huge_pages_node[nid],
2995 nid, h->free_huge_pages_node[nid],
2996 nid, h->surplus_huge_pages_node[nid]);
2997}
2998
2999void hugetlb_show_meminfo(void)
3000{
3001 struct hstate *h;
3002 int nid;
3003
3004 if (!hugepages_supported())
3005 return;
3006
3007 for_each_node_state(nid, N_MEMORY)
3008 for_each_hstate(h)
3009 pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
3010 nid,
3011 h->nr_huge_pages_node[nid],
3012 h->free_huge_pages_node[nid],
3013 h->surplus_huge_pages_node[nid],
3014 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
3015}
3016
3017void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
3018{
3019 seq_printf(m, "HugetlbPages:\t%8lu kB\n",
3020 atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
3021}
3022
3023
3024unsigned long hugetlb_total_pages(void)
3025{
3026 struct hstate *h;
3027 unsigned long nr_total_pages = 0;
3028
3029 for_each_hstate(h)
3030 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3031 return nr_total_pages;
3032}
3033
3034static int hugetlb_acct_memory(struct hstate *h, long delta)
3035{
3036 int ret = -ENOMEM;
3037
3038 spin_lock(&hugetlb_lock);
3039
3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052
3053
3054
3055
3056 if (delta > 0) {
3057 if (gather_surplus_pages(h, delta) < 0)
3058 goto out;
3059
3060 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
3061 return_unused_surplus_pages(h, delta);
3062 goto out;
3063 }
3064 }
3065
3066 ret = 0;
3067 if (delta < 0)
3068 return_unused_surplus_pages(h, (unsigned long) -delta);
3069
3070out:
3071 spin_unlock(&hugetlb_lock);
3072 return ret;
3073}
3074
3075static void hugetlb_vm_op_open(struct vm_area_struct *vma)
3076{
3077 struct resv_map *resv = vma_resv_map(vma);
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3088 kref_get(&resv->refs);
3089}
3090
3091static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3092{
3093 struct hstate *h = hstate_vma(vma);
3094 struct resv_map *resv = vma_resv_map(vma);
3095 struct hugepage_subpool *spool = subpool_vma(vma);
3096 unsigned long reserve, start, end;
3097 long gbl_reserve;
3098
3099 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3100 return;
3101
3102 start = vma_hugecache_offset(h, vma, vma->vm_start);
3103 end = vma_hugecache_offset(h, vma, vma->vm_end);
3104
3105 reserve = (end - start) - region_count(resv, start, end);
3106
3107 kref_put(&resv->refs, resv_map_release);
3108
3109 if (reserve) {
3110
3111
3112
3113
3114 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3115 hugetlb_acct_memory(h, -gbl_reserve);
3116 }
3117}
3118
3119
3120
3121
3122
3123
3124
3125static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3126{
3127 BUG();
3128 return 0;
3129}
3130
3131const struct vm_operations_struct hugetlb_vm_ops = {
3132 .fault = hugetlb_vm_op_fault,
3133 .open = hugetlb_vm_op_open,
3134 .close = hugetlb_vm_op_close,
3135};
3136
3137static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3138 int writable)
3139{
3140 pte_t entry;
3141
3142 if (writable) {
3143 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3144 vma->vm_page_prot)));
3145 } else {
3146 entry = huge_pte_wrprotect(mk_huge_pte(page,
3147 vma->vm_page_prot));
3148 }
3149 entry = pte_mkyoung(entry);
3150 entry = pte_mkhuge(entry);
3151 entry = arch_make_huge_pte(entry, vma, page, writable);
3152
3153 return entry;
3154}
3155
3156static void set_huge_ptep_writable(struct vm_area_struct *vma,
3157 unsigned long address, pte_t *ptep)
3158{
3159 pte_t entry;
3160
3161 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3162 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3163 update_mmu_cache(vma, address, ptep);
3164}
3165
3166static int is_hugetlb_entry_migration(pte_t pte)
3167{
3168 swp_entry_t swp;
3169
3170 if (huge_pte_none(pte) || pte_present(pte))
3171 return 0;
3172 swp = pte_to_swp_entry(pte);
3173 if (non_swap_entry(swp) && is_migration_entry(swp))
3174 return 1;
3175 else
3176 return 0;
3177}
3178
3179static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3180{
3181 swp_entry_t swp;
3182
3183 if (huge_pte_none(pte) || pte_present(pte))
3184 return 0;
3185 swp = pte_to_swp_entry(pte);
3186 if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3187 return 1;
3188 else
3189 return 0;
3190}
3191
3192int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3193 struct vm_area_struct *vma)
3194{
3195 pte_t *src_pte, *dst_pte, entry;
3196 struct page *ptepage;
3197 unsigned long addr;
3198 int cow;
3199 struct hstate *h = hstate_vma(vma);
3200 unsigned long sz = huge_page_size(h);
3201 unsigned long mmun_start;
3202 unsigned long mmun_end;
3203 int ret = 0;
3204
3205 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3206
3207 mmun_start = vma->vm_start;
3208 mmun_end = vma->vm_end;
3209 if (cow)
3210 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
3211
3212 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3213 spinlock_t *src_ptl, *dst_ptl;
3214 src_pte = huge_pte_offset(src, addr);
3215 if (!src_pte)
3216 continue;
3217 dst_pte = huge_pte_alloc(dst, addr, sz);
3218 if (!dst_pte) {
3219 ret = -ENOMEM;
3220 break;
3221 }
3222
3223
3224 if (dst_pte == src_pte)
3225 continue;
3226
3227 dst_ptl = huge_pte_lock(h, dst, dst_pte);
3228 src_ptl = huge_pte_lockptr(h, src, src_pte);
3229 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3230 entry = huge_ptep_get(src_pte);
3231 if (huge_pte_none(entry)) {
3232 ;
3233 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3234 is_hugetlb_entry_hwpoisoned(entry))) {
3235 swp_entry_t swp_entry = pte_to_swp_entry(entry);
3236
3237 if (is_write_migration_entry(swp_entry) && cow) {
3238
3239
3240
3241
3242 make_migration_entry_read(&swp_entry);
3243 entry = swp_entry_to_pte(swp_entry);
3244 set_huge_pte_at(src, addr, src_pte, entry);
3245 }
3246 set_huge_pte_at(dst, addr, dst_pte, entry);
3247 } else {
3248 if (cow) {
3249 huge_ptep_set_wrprotect(src, addr, src_pte);
3250 mmu_notifier_invalidate_range(src, mmun_start,
3251 mmun_end);
3252 }
3253 entry = huge_ptep_get(src_pte);
3254 ptepage = pte_page(entry);
3255 get_page(ptepage);
3256 page_dup_rmap(ptepage, true);
3257 set_huge_pte_at(dst, addr, dst_pte, entry);
3258 hugetlb_count_add(pages_per_huge_page(h), dst);
3259 }
3260 spin_unlock(src_ptl);
3261 spin_unlock(dst_ptl);
3262 }
3263
3264 if (cow)
3265 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
3266
3267 return ret;
3268}
3269
3270void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3271 unsigned long start, unsigned long end,
3272 struct page *ref_page)
3273{
3274 struct mm_struct *mm = vma->vm_mm;
3275 unsigned long address;
3276 pte_t *ptep;
3277 pte_t pte;
3278 spinlock_t *ptl;
3279 struct page *page;
3280 struct hstate *h = hstate_vma(vma);
3281 unsigned long sz = huge_page_size(h);
3282 const unsigned long mmun_start = start;
3283 const unsigned long mmun_end = end;
3284
3285 WARN_ON(!is_vm_hugetlb_page(vma));
3286 BUG_ON(start & ~huge_page_mask(h));
3287 BUG_ON(end & ~huge_page_mask(h));
3288
3289 tlb_start_vma(tlb, vma);
3290 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3291 address = start;
3292 for (; address < end; address += sz) {
3293 ptep = huge_pte_offset(mm, address);
3294 if (!ptep)
3295 continue;
3296
3297 ptl = huge_pte_lock(h, mm, ptep);
3298 if (huge_pmd_unshare(mm, &address, ptep)) {
3299 spin_unlock(ptl);
3300 continue;
3301 }
3302
3303 pte = huge_ptep_get(ptep);
3304 if (huge_pte_none(pte)) {
3305 spin_unlock(ptl);
3306 continue;
3307 }
3308
3309
3310
3311
3312
3313 if (unlikely(!pte_present(pte))) {
3314 huge_pte_clear(mm, address, ptep);
3315 spin_unlock(ptl);
3316 continue;
3317 }
3318
3319 page = pte_page(pte);
3320
3321
3322
3323
3324
3325 if (ref_page) {
3326 if (page != ref_page) {
3327 spin_unlock(ptl);
3328 continue;
3329 }
3330
3331
3332
3333
3334
3335 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3336 }
3337
3338 pte = huge_ptep_get_and_clear(mm, address, ptep);
3339 tlb_remove_tlb_entry(tlb, ptep, address);
3340 if (huge_pte_dirty(pte))
3341 set_page_dirty(page);
3342
3343 hugetlb_count_sub(pages_per_huge_page(h), mm);
3344 page_remove_rmap(page, true);
3345
3346 spin_unlock(ptl);
3347 tlb_remove_page_size(tlb, page, huge_page_size(h));
3348
3349
3350
3351 if (ref_page)
3352 break;
3353 }
3354 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3355 tlb_end_vma(tlb, vma);
3356}
3357
3358void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3359 struct vm_area_struct *vma, unsigned long start,
3360 unsigned long end, struct page *ref_page)
3361{
3362 __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373
3374 vma->vm_flags &= ~VM_MAYSHARE;
3375}
3376
3377void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3378 unsigned long end, struct page *ref_page)
3379{
3380 struct mm_struct *mm;
3381 struct mmu_gather tlb;
3382
3383 mm = vma->vm_mm;
3384
3385 tlb_gather_mmu(&tlb, mm, start, end);
3386 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3387 tlb_finish_mmu(&tlb, start, end);
3388}
3389
3390
3391
3392
3393
3394
3395
3396static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3397 struct page *page, unsigned long address)
3398{
3399 struct hstate *h = hstate_vma(vma);
3400 struct vm_area_struct *iter_vma;
3401 struct address_space *mapping;
3402 pgoff_t pgoff;
3403
3404
3405
3406
3407
3408 address = address & huge_page_mask(h);
3409 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3410 vma->vm_pgoff;
3411 mapping = vma->vm_file->f_mapping;
3412
3413
3414
3415
3416
3417
3418 i_mmap_lock_write(mapping);
3419 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3420
3421 if (iter_vma == vma)
3422 continue;
3423
3424
3425
3426
3427
3428
3429 if (iter_vma->vm_flags & VM_MAYSHARE)
3430 continue;
3431
3432
3433
3434
3435
3436
3437
3438
3439 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3440 unmap_hugepage_range(iter_vma, address,
3441 address + huge_page_size(h), page);
3442 }
3443 i_mmap_unlock_write(mapping);
3444}
3445
3446
3447
3448
3449
3450
3451
3452static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3453 unsigned long address, pte_t *ptep, pte_t pte,
3454 struct page *pagecache_page, spinlock_t *ptl)
3455{
3456 struct hstate *h = hstate_vma(vma);
3457 struct page *old_page, *new_page;
3458 int ret = 0, outside_reserve = 0;
3459 unsigned long mmun_start;
3460 unsigned long mmun_end;
3461
3462 old_page = pte_page(pte);
3463
3464retry_avoidcopy:
3465
3466
3467 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3468 page_move_anon_rmap(old_page, vma);
3469 set_huge_ptep_writable(vma, address, ptep);
3470 return 0;
3471 }
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3483 old_page != pagecache_page)
3484 outside_reserve = 1;
3485
3486 get_page(old_page);
3487
3488
3489
3490
3491
3492 spin_unlock(ptl);
3493 new_page = alloc_huge_page(vma, address, outside_reserve);
3494
3495 if (IS_ERR(new_page)) {
3496
3497
3498
3499
3500
3501
3502
3503 if (outside_reserve) {
3504 put_page(old_page);
3505 BUG_ON(huge_pte_none(pte));
3506 unmap_ref_private(mm, vma, old_page, address);
3507 BUG_ON(huge_pte_none(pte));
3508 spin_lock(ptl);
3509 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3510 if (likely(ptep &&
3511 pte_same(huge_ptep_get(ptep), pte)))
3512 goto retry_avoidcopy;
3513
3514
3515
3516
3517 return 0;
3518 }
3519
3520 ret = (PTR_ERR(new_page) == -ENOMEM) ?
3521 VM_FAULT_OOM : VM_FAULT_SIGBUS;
3522 goto out_release_old;
3523 }
3524
3525
3526
3527
3528
3529 if (unlikely(anon_vma_prepare(vma))) {
3530 ret = VM_FAULT_OOM;
3531 goto out_release_all;
3532 }
3533
3534 copy_user_huge_page(new_page, old_page, address, vma,
3535 pages_per_huge_page(h));
3536 __SetPageUptodate(new_page);
3537 set_page_huge_active(new_page);
3538
3539 mmun_start = address & huge_page_mask(h);
3540 mmun_end = mmun_start + huge_page_size(h);
3541 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3542
3543
3544
3545
3546
3547 spin_lock(ptl);
3548 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3549 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3550 ClearPagePrivate(new_page);
3551
3552
3553 huge_ptep_clear_flush(vma, address, ptep);
3554 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
3555 set_huge_pte_at(mm, address, ptep,
3556 make_huge_pte(vma, new_page, 1));
3557 page_remove_rmap(old_page, true);
3558 hugepage_add_new_anon_rmap(new_page, vma, address);
3559
3560 new_page = old_page;
3561 }
3562 spin_unlock(ptl);
3563 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3564out_release_all:
3565 restore_reserve_on_error(h, vma, address, new_page);
3566 put_page(new_page);
3567out_release_old:
3568 put_page(old_page);
3569
3570 spin_lock(ptl);
3571 return ret;
3572}
3573
3574
3575static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3576 struct vm_area_struct *vma, unsigned long address)
3577{
3578 struct address_space *mapping;
3579 pgoff_t idx;
3580
3581 mapping = vma->vm_file->f_mapping;
3582 idx = vma_hugecache_offset(h, vma, address);
3583
3584 return find_lock_page(mapping, idx);
3585}
3586
3587
3588
3589
3590
3591static bool hugetlbfs_pagecache_present(struct hstate *h,
3592 struct vm_area_struct *vma, unsigned long address)
3593{
3594 struct address_space *mapping;
3595 pgoff_t idx;
3596 struct page *page;
3597
3598 mapping = vma->vm_file->f_mapping;
3599 idx = vma_hugecache_offset(h, vma, address);
3600
3601 page = find_get_page(mapping, idx);
3602 if (page)
3603 put_page(page);
3604 return page != NULL;
3605}
3606
3607int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3608 pgoff_t idx)
3609{
3610 struct inode *inode = mapping->host;
3611 struct hstate *h = hstate_inode(inode);
3612 int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3613
3614 if (err)
3615 return err;
3616 ClearPagePrivate(page);
3617
3618 spin_lock(&inode->i_lock);
3619 inode->i_blocks += blocks_per_huge_page(h);
3620 spin_unlock(&inode->i_lock);
3621 return 0;
3622}
3623
3624static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
3625 struct address_space *mapping, pgoff_t idx,
3626 unsigned long address, pte_t *ptep, unsigned int flags)
3627{
3628 struct hstate *h = hstate_vma(vma);
3629 int ret = VM_FAULT_SIGBUS;
3630 int anon_rmap = 0;
3631 unsigned long size;
3632 struct page *page;
3633 pte_t new_pte;
3634 spinlock_t *ptl;
3635
3636
3637
3638
3639
3640
3641 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3642 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
3643 current->pid);
3644 return ret;
3645 }
3646
3647
3648
3649
3650
3651retry:
3652 page = find_lock_page(mapping, idx);
3653 if (!page) {
3654 size = i_size_read(mapping->host) >> huge_page_shift(h);
3655 if (idx >= size)
3656 goto out;
3657 page = alloc_huge_page(vma, address, 0);
3658 if (IS_ERR(page)) {
3659 ret = PTR_ERR(page);
3660 if (ret == -ENOMEM)
3661 ret = VM_FAULT_OOM;
3662 else
3663 ret = VM_FAULT_SIGBUS;
3664 goto out;
3665 }
3666 clear_huge_page(page, address, pages_per_huge_page(h));
3667 __SetPageUptodate(page);
3668 set_page_huge_active(page);
3669
3670 if (vma->vm_flags & VM_MAYSHARE) {
3671 int err = huge_add_to_page_cache(page, mapping, idx);
3672 if (err) {
3673 put_page(page);
3674 if (err == -EEXIST)
3675 goto retry;
3676 goto out;
3677 }
3678 } else {
3679 lock_page(page);
3680 if (unlikely(anon_vma_prepare(vma))) {
3681 ret = VM_FAULT_OOM;
3682 goto backout_unlocked;
3683 }
3684 anon_rmap = 1;
3685 }
3686 } else {
3687
3688
3689
3690
3691
3692 if (unlikely(PageHWPoison(page))) {
3693 ret = VM_FAULT_HWPOISON |
3694 VM_FAULT_SET_HINDEX(hstate_index(h));
3695 goto backout_unlocked;
3696 }
3697 }
3698
3699
3700
3701
3702
3703
3704
3705 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3706 if (vma_needs_reservation(h, vma, address) < 0) {
3707 ret = VM_FAULT_OOM;
3708 goto backout_unlocked;
3709 }
3710
3711 vma_end_reservation(h, vma, address);
3712 }
3713
3714 ptl = huge_pte_lockptr(h, mm, ptep);
3715 spin_lock(ptl);
3716 size = i_size_read(mapping->host) >> huge_page_shift(h);
3717 if (idx >= size)
3718 goto backout;
3719
3720 ret = 0;
3721 if (!huge_pte_none(huge_ptep_get(ptep)))
3722 goto backout;
3723
3724 if (anon_rmap) {
3725 ClearPagePrivate(page);
3726 hugepage_add_new_anon_rmap(page, vma, address);
3727 } else
3728 page_dup_rmap(page, true);
3729 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3730 && (vma->vm_flags & VM_SHARED)));
3731 set_huge_pte_at(mm, address, ptep, new_pte);
3732
3733 hugetlb_count_add(pages_per_huge_page(h), mm);
3734 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3735
3736 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
3737 }
3738
3739 spin_unlock(ptl);
3740 unlock_page(page);
3741out:
3742 return ret;
3743
3744backout:
3745 spin_unlock(ptl);
3746backout_unlocked:
3747 unlock_page(page);
3748 restore_reserve_on_error(h, vma, address, page);
3749 put_page(page);
3750 goto out;
3751}
3752
3753#ifdef CONFIG_SMP
3754u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3755 struct vm_area_struct *vma,
3756 struct address_space *mapping,
3757 pgoff_t idx, unsigned long address)
3758{
3759 unsigned long key[2];
3760 u32 hash;
3761
3762 if (vma->vm_flags & VM_SHARED) {
3763 key[0] = (unsigned long) mapping;
3764 key[1] = idx;
3765 } else {
3766 key[0] = (unsigned long) mm;
3767 key[1] = address >> huge_page_shift(h);
3768 }
3769
3770 hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3771
3772 return hash & (num_fault_mutexes - 1);
3773}
3774#else
3775
3776
3777
3778
3779u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3780 struct vm_area_struct *vma,
3781 struct address_space *mapping,
3782 pgoff_t idx, unsigned long address)
3783{
3784 return 0;
3785}
3786#endif
3787
3788int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3789 unsigned long address, unsigned int flags)
3790{
3791 pte_t *ptep, entry;
3792 spinlock_t *ptl;
3793 int ret;
3794 u32 hash;
3795 pgoff_t idx;
3796 struct page *page = NULL;
3797 struct page *pagecache_page = NULL;
3798 struct hstate *h = hstate_vma(vma);
3799 struct address_space *mapping;
3800 int need_wait_lock = 0;
3801
3802 address &= huge_page_mask(h);
3803
3804 ptep = huge_pte_offset(mm, address);
3805 if (ptep) {
3806 entry = huge_ptep_get(ptep);
3807 if (unlikely(is_hugetlb_entry_migration(entry))) {
3808 migration_entry_wait_huge(vma, mm, ptep);
3809 return 0;
3810 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3811 return VM_FAULT_HWPOISON_LARGE |
3812 VM_FAULT_SET_HINDEX(hstate_index(h));
3813 } else {
3814 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3815 if (!ptep)
3816 return VM_FAULT_OOM;
3817 }
3818
3819 mapping = vma->vm_file->f_mapping;
3820 idx = vma_hugecache_offset(h, vma, address);
3821
3822
3823
3824
3825
3826
3827 hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address);
3828 mutex_lock(&hugetlb_fault_mutex_table[hash]);
3829
3830 entry = huge_ptep_get(ptep);
3831 if (huge_pte_none(entry)) {
3832 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3833 goto out_mutex;
3834 }
3835
3836 ret = 0;
3837
3838
3839
3840
3841
3842
3843
3844
3845 if (!pte_present(entry))
3846 goto out_mutex;
3847
3848
3849
3850
3851
3852
3853
3854
3855
3856 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3857 if (vma_needs_reservation(h, vma, address) < 0) {
3858 ret = VM_FAULT_OOM;
3859 goto out_mutex;
3860 }
3861
3862 vma_end_reservation(h, vma, address);
3863
3864 if (!(vma->vm_flags & VM_MAYSHARE))
3865 pagecache_page = hugetlbfs_pagecache_page(h,
3866 vma, address);
3867 }
3868
3869 ptl = huge_pte_lock(h, mm, ptep);
3870
3871
3872 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3873 goto out_ptl;
3874
3875
3876
3877
3878
3879
3880 page = pte_page(entry);
3881 if (page != pagecache_page)
3882 if (!trylock_page(page)) {
3883 need_wait_lock = 1;
3884 goto out_ptl;
3885 }
3886
3887 get_page(page);
3888
3889 if (flags & FAULT_FLAG_WRITE) {
3890 if (!huge_pte_write(entry)) {
3891 ret = hugetlb_cow(mm, vma, address, ptep, entry,
3892 pagecache_page, ptl);
3893 goto out_put_page;
3894 }
3895 entry = huge_pte_mkdirty(entry);
3896 }
3897 entry = pte_mkyoung(entry);
3898 if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3899 flags & FAULT_FLAG_WRITE))
3900 update_mmu_cache(vma, address, ptep);
3901out_put_page:
3902 if (page != pagecache_page)
3903 unlock_page(page);
3904 put_page(page);
3905out_ptl:
3906 spin_unlock(ptl);
3907
3908 if (pagecache_page) {
3909 unlock_page(pagecache_page);
3910 put_page(pagecache_page);
3911 }
3912out_mutex:
3913 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3914
3915
3916
3917
3918
3919
3920
3921 if (need_wait_lock)
3922 wait_on_page_locked(page);
3923 return ret;
3924}
3925
3926long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3927 struct page **pages, struct vm_area_struct **vmas,
3928 unsigned long *position, unsigned long *nr_pages,
3929 long i, unsigned int flags)
3930{
3931 unsigned long pfn_offset;
3932 unsigned long vaddr = *position;
3933 unsigned long remainder = *nr_pages;
3934 struct hstate *h = hstate_vma(vma);
3935
3936 while (vaddr < vma->vm_end && remainder) {
3937 pte_t *pte;
3938 spinlock_t *ptl = NULL;
3939 int absent;
3940 struct page *page;
3941
3942
3943
3944
3945
3946 if (unlikely(fatal_signal_pending(current))) {
3947 remainder = 0;
3948 break;
3949 }
3950
3951
3952
3953
3954
3955
3956
3957
3958 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3959 if (pte)
3960 ptl = huge_pte_lock(h, mm, pte);
3961 absent = !pte || huge_pte_none(huge_ptep_get(pte));
3962
3963
3964
3965
3966
3967
3968
3969
3970 if (absent && (flags & FOLL_DUMP) &&
3971 !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3972 if (pte)
3973 spin_unlock(ptl);
3974 remainder = 0;
3975 break;
3976 }
3977
3978
3979
3980
3981
3982
3983
3984
3985
3986
3987
3988 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
3989 ((flags & FOLL_WRITE) &&
3990 !huge_pte_write(huge_ptep_get(pte)))) {
3991 int ret;
3992
3993 if (pte)
3994 spin_unlock(ptl);
3995 ret = hugetlb_fault(mm, vma, vaddr,
3996 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
3997 if (!(ret & VM_FAULT_ERROR))
3998 continue;
3999
4000 remainder = 0;
4001 break;
4002 }
4003
4004 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
4005 page = pte_page(huge_ptep_get(pte));
4006same_page:
4007 if (pages) {
4008 pages[i] = mem_map_offset(page, pfn_offset);
4009 get_page(pages[i]);
4010 }
4011
4012 if (vmas)
4013 vmas[i] = vma;
4014
4015 vaddr += PAGE_SIZE;
4016 ++pfn_offset;
4017 --remainder;
4018 ++i;
4019 if (vaddr < vma->vm_end && remainder &&
4020 pfn_offset < pages_per_huge_page(h)) {
4021
4022
4023
4024
4025 goto same_page;
4026 }
4027 spin_unlock(ptl);
4028 }
4029 *nr_pages = remainder;
4030 *position = vaddr;
4031
4032 return i ? i : -EFAULT;
4033}
4034
4035#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
4036
4037
4038
4039
4040#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
4041#endif
4042
4043unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
4044 unsigned long address, unsigned long end, pgprot_t newprot)
4045{
4046 struct mm_struct *mm = vma->vm_mm;
4047 unsigned long start = address;
4048 pte_t *ptep;
4049 pte_t pte;
4050 struct hstate *h = hstate_vma(vma);
4051 unsigned long pages = 0;
4052
4053 BUG_ON(address >= end);
4054 flush_cache_range(vma, address, end);
4055
4056 mmu_notifier_invalidate_range_start(mm, start, end);
4057 i_mmap_lock_write(vma->vm_file->f_mapping);
4058 for (; address < end; address += huge_page_size(h)) {
4059 spinlock_t *ptl;
4060 ptep = huge_pte_offset(mm, address);
4061 if (!ptep)
4062 continue;
4063 ptl = huge_pte_lock(h, mm, ptep);
4064 if (huge_pmd_unshare(mm, &address, ptep)) {
4065 pages++;
4066 spin_unlock(ptl);
4067 continue;
4068 }
4069 pte = huge_ptep_get(ptep);
4070 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
4071 spin_unlock(ptl);
4072 continue;
4073 }
4074 if (unlikely(is_hugetlb_entry_migration(pte))) {
4075 swp_entry_t entry = pte_to_swp_entry(pte);
4076
4077 if (is_write_migration_entry(entry)) {
4078 pte_t newpte;
4079
4080 make_migration_entry_read(&entry);
4081 newpte = swp_entry_to_pte(entry);
4082 set_huge_pte_at(mm, address, ptep, newpte);
4083 pages++;
4084 }
4085 spin_unlock(ptl);
4086 continue;
4087 }
4088 if (!huge_pte_none(pte)) {
4089 pte = huge_ptep_get_and_clear(mm, address, ptep);
4090 pte = pte_mkhuge(huge_pte_modify(pte, newprot));
4091 pte = arch_make_huge_pte(pte, vma, NULL, 0);
4092 set_huge_pte_at(mm, address, ptep, pte);
4093 pages++;
4094 }
4095 spin_unlock(ptl);
4096 }
4097
4098
4099
4100
4101
4102
4103 flush_hugetlb_tlb_range(vma, start, end);
4104 mmu_notifier_invalidate_range(mm, start, end);
4105 i_mmap_unlock_write(vma->vm_file->f_mapping);
4106 mmu_notifier_invalidate_range_end(mm, start, end);
4107
4108 return pages << h->order;
4109}
4110
4111int hugetlb_reserve_pages(struct inode *inode,
4112 long from, long to,
4113 struct vm_area_struct *vma,
4114 vm_flags_t vm_flags)
4115{
4116 long ret, chg;
4117 struct hstate *h = hstate_inode(inode);
4118 struct hugepage_subpool *spool = subpool_inode(inode);
4119 struct resv_map *resv_map;
4120 long gbl_reserve;
4121
4122
4123
4124
4125
4126
4127 if (vm_flags & VM_NORESERVE)
4128 return 0;
4129
4130
4131
4132
4133
4134
4135
4136 if (!vma || vma->vm_flags & VM_MAYSHARE) {
4137 resv_map = inode_resv_map(inode);
4138
4139 chg = region_chg(resv_map, from, to);
4140
4141 } else {
4142 resv_map = resv_map_alloc();
4143 if (!resv_map)
4144 return -ENOMEM;
4145
4146 chg = to - from;
4147
4148 set_vma_resv_map(vma, resv_map);
4149 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4150 }
4151
4152 if (chg < 0) {
4153 ret = chg;
4154 goto out_err;
4155 }
4156
4157
4158
4159
4160
4161
4162 gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4163 if (gbl_reserve < 0) {
4164 ret = -ENOSPC;
4165 goto out_err;
4166 }
4167
4168
4169
4170
4171
4172 ret = hugetlb_acct_memory(h, gbl_reserve);
4173 if (ret < 0) {
4174
4175 (void)hugepage_subpool_put_pages(spool, chg);
4176 goto out_err;
4177 }
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190 if (!vma || vma->vm_flags & VM_MAYSHARE) {
4191 long add = region_add(resv_map, from, to);
4192
4193 if (unlikely(chg > add)) {
4194
4195
4196
4197
4198
4199
4200
4201 long rsv_adjust;
4202
4203 rsv_adjust = hugepage_subpool_put_pages(spool,
4204 chg - add);
4205 hugetlb_acct_memory(h, -rsv_adjust);
4206 }
4207 }
4208 return 0;
4209out_err:
4210 if (!vma || vma->vm_flags & VM_MAYSHARE)
4211 region_abort(resv_map, from, to);
4212 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4213 kref_put(&resv_map->refs, resv_map_release);
4214 return ret;
4215}
4216
4217long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4218 long freed)
4219{
4220 struct hstate *h = hstate_inode(inode);
4221 struct resv_map *resv_map = inode_resv_map(inode);
4222 long chg = 0;
4223 struct hugepage_subpool *spool = subpool_inode(inode);
4224 long gbl_reserve;
4225
4226 if (resv_map) {
4227 chg = region_del(resv_map, start, end);
4228
4229
4230
4231
4232
4233 if (chg < 0)
4234 return chg;
4235 }
4236
4237 spin_lock(&inode->i_lock);
4238 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4239 spin_unlock(&inode->i_lock);
4240
4241
4242
4243
4244
4245 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4246 hugetlb_acct_memory(h, -gbl_reserve);
4247
4248 return 0;
4249}
4250
4251#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
4252static unsigned long page_table_shareable(struct vm_area_struct *svma,
4253 struct vm_area_struct *vma,
4254 unsigned long addr, pgoff_t idx)
4255{
4256 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4257 svma->vm_start;
4258 unsigned long sbase = saddr & PUD_MASK;
4259 unsigned long s_end = sbase + PUD_SIZE;
4260
4261
4262 unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4263 unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4264
4265
4266
4267
4268
4269 if (pmd_index(addr) != pmd_index(saddr) ||
4270 vm_flags != svm_flags ||
4271 sbase < svma->vm_start || svma->vm_end < s_end)
4272 return 0;
4273
4274 return saddr;
4275}
4276
4277static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4278{
4279 unsigned long base = addr & PUD_MASK;
4280 unsigned long end = base + PUD_SIZE;
4281
4282
4283
4284
4285 if (vma->vm_flags & VM_MAYSHARE &&
4286 vma->vm_start <= base && end <= vma->vm_end)
4287 return true;
4288 return false;
4289}
4290
4291
4292
4293
4294
4295
4296
4297
4298
4299
4300pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4301{
4302 struct vm_area_struct *vma = find_vma(mm, addr);
4303 struct address_space *mapping = vma->vm_file->f_mapping;
4304 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4305 vma->vm_pgoff;
4306 struct vm_area_struct *svma;
4307 unsigned long saddr;
4308 pte_t *spte = NULL;
4309 pte_t *pte;
4310 spinlock_t *ptl;
4311
4312 if (!vma_shareable(vma, addr))
4313 return (pte_t *)pmd_alloc(mm, pud, addr);
4314
4315 i_mmap_lock_write(mapping);
4316 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4317 if (svma == vma)
4318 continue;
4319
4320 saddr = page_table_shareable(svma, vma, addr, idx);
4321 if (saddr) {
4322 spte = huge_pte_offset(svma->vm_mm, saddr);
4323 if (spte) {
4324 get_page(virt_to_page(spte));
4325 break;
4326 }
4327 }
4328 }
4329
4330 if (!spte)
4331 goto out;
4332
4333 ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
4334 spin_lock(ptl);
4335 if (pud_none(*pud)) {
4336 pud_populate(mm, pud,
4337 (pmd_t *)((unsigned long)spte & PAGE_MASK));
4338 mm_inc_nr_pmds(mm);
4339 } else {
4340 put_page(virt_to_page(spte));
4341 }
4342 spin_unlock(ptl);
4343out:
4344 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4345 i_mmap_unlock_write(mapping);
4346 return pte;
4347}
4348
4349
4350
4351
4352
4353
4354
4355
4356
4357
4358
4359
4360
4361int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4362{
4363 pgd_t *pgd = pgd_offset(mm, *addr);
4364 pud_t *pud = pud_offset(pgd, *addr);
4365
4366 BUG_ON(page_count(virt_to_page(ptep)) == 0);
4367 if (page_count(virt_to_page(ptep)) == 1)
4368 return 0;
4369
4370 pud_clear(pud);
4371 put_page(virt_to_page(ptep));
4372 mm_dec_nr_pmds(mm);
4373 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4374 return 1;
4375}
4376#define want_pmd_share() (1)
4377#else
4378pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4379{
4380 return NULL;
4381}
4382
4383int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4384{
4385 return 0;
4386}
4387#define want_pmd_share() (0)
4388#endif
4389
4390#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4391pte_t *huge_pte_alloc(struct mm_struct *mm,
4392 unsigned long addr, unsigned long sz)
4393{
4394 pgd_t *pgd;
4395 pud_t *pud;
4396 pte_t *pte = NULL;
4397
4398 pgd = pgd_offset(mm, addr);
4399 pud = pud_alloc(mm, pgd, addr);
4400 if (pud) {
4401 if (sz == PUD_SIZE) {
4402 pte = (pte_t *)pud;
4403 } else {
4404 BUG_ON(sz != PMD_SIZE);
4405 if (want_pmd_share() && pud_none(*pud))
4406 pte = huge_pmd_share(mm, addr, pud);
4407 else
4408 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4409 }
4410 }
4411 BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
4412
4413 return pte;
4414}
4415
4416pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
4417{
4418 pgd_t *pgd;
4419 pud_t *pud;
4420 pmd_t *pmd = NULL;
4421
4422 pgd = pgd_offset(mm, addr);
4423 if (pgd_present(*pgd)) {
4424 pud = pud_offset(pgd, addr);
4425 if (pud_present(*pud)) {
4426 if (pud_huge(*pud))
4427 return (pte_t *)pud;
4428 pmd = pmd_offset(pud, addr);
4429 }
4430 }
4431 return (pte_t *) pmd;
4432}
4433
4434#endif
4435
4436
4437
4438
4439
4440struct page * __weak
4441follow_huge_addr(struct mm_struct *mm, unsigned long address,
4442 int write)
4443{
4444 return ERR_PTR(-EINVAL);
4445}
4446
4447struct page * __weak
4448follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4449 pmd_t *pmd, int flags)
4450{
4451 struct page *page = NULL;
4452 spinlock_t *ptl;
4453retry:
4454 ptl = pmd_lockptr(mm, pmd);
4455 spin_lock(ptl);
4456
4457
4458
4459
4460 if (!pmd_huge(*pmd))
4461 goto out;
4462 if (pmd_present(*pmd)) {
4463 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4464 if (flags & FOLL_GET)
4465 get_page(page);
4466 } else {
4467 if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
4468 spin_unlock(ptl);
4469 __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4470 goto retry;
4471 }
4472
4473
4474
4475
4476 }
4477out:
4478 spin_unlock(ptl);
4479 return page;
4480}
4481
4482struct page * __weak
4483follow_huge_pud(struct mm_struct *mm, unsigned long address,
4484 pud_t *pud, int flags)
4485{
4486 if (flags & FOLL_GET)
4487 return NULL;
4488
4489 return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
4490}
4491
4492#ifdef CONFIG_MEMORY_FAILURE
4493
4494
4495
4496
4497int dequeue_hwpoisoned_huge_page(struct page *hpage)
4498{
4499 struct hstate *h = page_hstate(hpage);
4500 int nid = page_to_nid(hpage);
4501 int ret = -EBUSY;
4502
4503 spin_lock(&hugetlb_lock);
4504
4505
4506
4507
4508 if (!page_huge_active(hpage) && !page_count(hpage)) {
4509
4510
4511
4512
4513
4514
4515 list_del_init(&hpage->lru);
4516 set_page_refcounted(hpage);
4517 h->free_huge_pages--;
4518 h->free_huge_pages_node[nid]--;
4519 ret = 0;
4520 }
4521 spin_unlock(&hugetlb_lock);
4522 return ret;
4523}
4524#endif
4525
4526bool isolate_huge_page(struct page *page, struct list_head *list)
4527{
4528 bool ret = true;
4529
4530 VM_BUG_ON_PAGE(!PageHead(page), page);
4531 spin_lock(&hugetlb_lock);
4532 if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4533 ret = false;
4534 goto unlock;
4535 }
4536 clear_page_huge_active(page);
4537 list_move_tail(&page->lru, list);
4538unlock:
4539 spin_unlock(&hugetlb_lock);
4540 return ret;
4541}
4542
4543void putback_active_hugepage(struct page *page)
4544{
4545 VM_BUG_ON_PAGE(!PageHead(page), page);
4546 spin_lock(&hugetlb_lock);
4547 set_page_huge_active(page);
4548 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4549 spin_unlock(&hugetlb_lock);
4550 put_page(page);
4551}
4552