1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#include <linux/mm.h>
49#include <linux/sched/mm.h>
50#include <linux/sched/task.h>
51#include <linux/pagemap.h>
52#include <linux/swap.h>
53#include <linux/swapops.h>
54#include <linux/slab.h>
55#include <linux/init.h>
56#include <linux/ksm.h>
57#include <linux/rmap.h>
58#include <linux/rcupdate.h>
59#include <linux/export.h>
60#include <linux/memcontrol.h>
61#include <linux/mmu_notifier.h>
62#include <linux/migrate.h>
63#include <linux/hugetlb.h>
64#include <linux/backing-dev.h>
65#include <linux/page_idle.h>
66#include <linux/memremap.h>
67#include <linux/userfaultfd_k.h>
68
69#include <asm/tlbflush.h>
70
71#include <trace/events/tlb.h>
72
73#include "internal.h"
74
75static struct kmem_cache *anon_vma_cachep;
76static struct kmem_cache *anon_vma_chain_cachep;
77
78static inline struct anon_vma *anon_vma_alloc(void)
79{
80 struct anon_vma *anon_vma;
81
82 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
83 if (anon_vma) {
84 atomic_set(&anon_vma->refcount, 1);
85 anon_vma->degree = 1;
86 anon_vma->parent = anon_vma;
87
88
89
90
91 anon_vma->root = anon_vma;
92 }
93
94 return anon_vma;
95}
96
97static inline void anon_vma_free(struct anon_vma *anon_vma)
98{
99 VM_BUG_ON(atomic_read(&anon_vma->refcount));
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118 might_sleep();
119 if (rwsem_is_locked(&anon_vma->root->rwsem)) {
120 anon_vma_lock_write(anon_vma);
121 anon_vma_unlock_write(anon_vma);
122 }
123
124 kmem_cache_free(anon_vma_cachep, anon_vma);
125}
126
127static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
128{
129 return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
130}
131
132static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
133{
134 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
135}
136
137static void anon_vma_chain_link(struct vm_area_struct *vma,
138 struct anon_vma_chain *avc,
139 struct anon_vma *anon_vma)
140{
141 avc->vma = vma;
142 avc->anon_vma = anon_vma;
143 list_add(&avc->same_vma, &vma->anon_vma_chain);
144 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
145}
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175int __anon_vma_prepare(struct vm_area_struct *vma)
176{
177 struct mm_struct *mm = vma->vm_mm;
178 struct anon_vma *anon_vma, *allocated;
179 struct anon_vma_chain *avc;
180
181 might_sleep();
182
183 avc = anon_vma_chain_alloc(GFP_KERNEL);
184 if (!avc)
185 goto out_enomem;
186
187 anon_vma = find_mergeable_anon_vma(vma);
188 allocated = NULL;
189 if (!anon_vma) {
190 anon_vma = anon_vma_alloc();
191 if (unlikely(!anon_vma))
192 goto out_enomem_free_avc;
193 allocated = anon_vma;
194 }
195
196 anon_vma_lock_write(anon_vma);
197
198 spin_lock(&mm->page_table_lock);
199 if (likely(!vma->anon_vma)) {
200 vma->anon_vma = anon_vma;
201 anon_vma_chain_link(vma, avc, anon_vma);
202
203 anon_vma->degree++;
204 allocated = NULL;
205 avc = NULL;
206 }
207 spin_unlock(&mm->page_table_lock);
208 anon_vma_unlock_write(anon_vma);
209
210 if (unlikely(allocated))
211 put_anon_vma(allocated);
212 if (unlikely(avc))
213 anon_vma_chain_free(avc);
214
215 return 0;
216
217 out_enomem_free_avc:
218 anon_vma_chain_free(avc);
219 out_enomem:
220 return -ENOMEM;
221}
222
223
224
225
226
227
228
229
230
231static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
232{
233 struct anon_vma *new_root = anon_vma->root;
234 if (new_root != root) {
235 if (WARN_ON_ONCE(root))
236 up_write(&root->rwsem);
237 root = new_root;
238 down_write(&root->rwsem);
239 }
240 return root;
241}
242
243static inline void unlock_anon_vma_root(struct anon_vma *root)
244{
245 if (root)
246 up_write(&root->rwsem);
247}
248
249
250
251
252
253
254
255
256
257
258
259
260
261int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
262{
263 struct anon_vma_chain *avc, *pavc;
264 struct anon_vma *root = NULL;
265
266 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
267 struct anon_vma *anon_vma;
268
269 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
270 if (unlikely(!avc)) {
271 unlock_anon_vma_root(root);
272 root = NULL;
273 avc = anon_vma_chain_alloc(GFP_KERNEL);
274 if (!avc)
275 goto enomem_failure;
276 }
277 anon_vma = pavc->anon_vma;
278 root = lock_anon_vma_root(root, anon_vma);
279 anon_vma_chain_link(dst, avc, anon_vma);
280
281
282
283
284
285
286
287
288
289 if (!dst->anon_vma && anon_vma != src->anon_vma &&
290 anon_vma->degree < 2)
291 dst->anon_vma = anon_vma;
292 }
293 if (dst->anon_vma)
294 dst->anon_vma->degree++;
295 unlock_anon_vma_root(root);
296 return 0;
297
298 enomem_failure:
299
300
301
302
303
304
305 dst->anon_vma = NULL;
306 unlink_anon_vmas(dst);
307 return -ENOMEM;
308}
309
310
311
312
313
314
315int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
316{
317 struct anon_vma_chain *avc;
318 struct anon_vma *anon_vma;
319 int error;
320
321
322 if (!pvma->anon_vma)
323 return 0;
324
325
326 vma->anon_vma = NULL;
327
328
329
330
331
332 error = anon_vma_clone(vma, pvma);
333 if (error)
334 return error;
335
336
337 if (vma->anon_vma)
338 return 0;
339
340
341 anon_vma = anon_vma_alloc();
342 if (!anon_vma)
343 goto out_error;
344 avc = anon_vma_chain_alloc(GFP_KERNEL);
345 if (!avc)
346 goto out_error_free_anon_vma;
347
348
349
350
351
352 anon_vma->root = pvma->anon_vma->root;
353 anon_vma->parent = pvma->anon_vma;
354
355
356
357
358
359 get_anon_vma(anon_vma->root);
360
361 vma->anon_vma = anon_vma;
362 anon_vma_lock_write(anon_vma);
363 anon_vma_chain_link(vma, avc, anon_vma);
364 anon_vma->parent->degree++;
365 anon_vma_unlock_write(anon_vma);
366
367 return 0;
368
369 out_error_free_anon_vma:
370 put_anon_vma(anon_vma);
371 out_error:
372 unlink_anon_vmas(vma);
373 return -ENOMEM;
374}
375
376void unlink_anon_vmas(struct vm_area_struct *vma)
377{
378 struct anon_vma_chain *avc, *next;
379 struct anon_vma *root = NULL;
380
381
382
383
384
385 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
386 struct anon_vma *anon_vma = avc->anon_vma;
387
388 root = lock_anon_vma_root(root, anon_vma);
389 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
390
391
392
393
394
395 if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
396 anon_vma->parent->degree--;
397 continue;
398 }
399
400 list_del(&avc->same_vma);
401 anon_vma_chain_free(avc);
402 }
403 if (vma->anon_vma)
404 vma->anon_vma->degree--;
405 unlock_anon_vma_root(root);
406
407
408
409
410
411
412 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
413 struct anon_vma *anon_vma = avc->anon_vma;
414
415 VM_WARN_ON(anon_vma->degree);
416 put_anon_vma(anon_vma);
417
418 list_del(&avc->same_vma);
419 anon_vma_chain_free(avc);
420 }
421}
422
423static void anon_vma_ctor(void *data)
424{
425 struct anon_vma *anon_vma = data;
426
427 init_rwsem(&anon_vma->rwsem);
428 atomic_set(&anon_vma->refcount, 0);
429 anon_vma->rb_root = RB_ROOT_CACHED;
430}
431
432void __init anon_vma_init(void)
433{
434 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
435 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
436 anon_vma_ctor);
437 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
438 SLAB_PANIC|SLAB_ACCOUNT);
439}
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464struct anon_vma *page_get_anon_vma(struct page *page)
465{
466 struct anon_vma *anon_vma = NULL;
467 unsigned long anon_mapping;
468
469 rcu_read_lock();
470 anon_mapping = (unsigned long)READ_ONCE(page->mapping);
471 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
472 goto out;
473 if (!page_mapped(page))
474 goto out;
475
476 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
477 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
478 anon_vma = NULL;
479 goto out;
480 }
481
482
483
484
485
486
487
488
489 if (!page_mapped(page)) {
490 rcu_read_unlock();
491 put_anon_vma(anon_vma);
492 return NULL;
493 }
494out:
495 rcu_read_unlock();
496
497 return anon_vma;
498}
499
500
501
502
503
504
505
506
507struct anon_vma *page_lock_anon_vma_read(struct page *page)
508{
509 struct anon_vma *anon_vma = NULL;
510 struct anon_vma *root_anon_vma;
511 unsigned long anon_mapping;
512
513 rcu_read_lock();
514 anon_mapping = (unsigned long)READ_ONCE(page->mapping);
515 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
516 goto out;
517 if (!page_mapped(page))
518 goto out;
519
520 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
521 root_anon_vma = READ_ONCE(anon_vma->root);
522 if (down_read_trylock(&root_anon_vma->rwsem)) {
523
524
525
526
527
528 if (!page_mapped(page)) {
529 up_read(&root_anon_vma->rwsem);
530 anon_vma = NULL;
531 }
532 goto out;
533 }
534
535
536 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
537 anon_vma = NULL;
538 goto out;
539 }
540
541 if (!page_mapped(page)) {
542 rcu_read_unlock();
543 put_anon_vma(anon_vma);
544 return NULL;
545 }
546
547
548 rcu_read_unlock();
549 anon_vma_lock_read(anon_vma);
550
551 if (atomic_dec_and_test(&anon_vma->refcount)) {
552
553
554
555
556
557 anon_vma_unlock_read(anon_vma);
558 __put_anon_vma(anon_vma);
559 anon_vma = NULL;
560 }
561
562 return anon_vma;
563
564out:
565 rcu_read_unlock();
566 return anon_vma;
567}
568
569void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
570{
571 anon_vma_unlock_read(anon_vma);
572}
573
574#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
575
576
577
578
579
580
581void try_to_unmap_flush(void)
582{
583 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
584
585 if (!tlb_ubc->flush_required)
586 return;
587
588 arch_tlbbatch_flush(&tlb_ubc->arch);
589 tlb_ubc->flush_required = false;
590 tlb_ubc->writable = false;
591}
592
593
594void try_to_unmap_flush_dirty(void)
595{
596 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
597
598 if (tlb_ubc->writable)
599 try_to_unmap_flush();
600}
601
602static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
603{
604 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
605
606 arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
607 tlb_ubc->flush_required = true;
608
609
610
611
612
613 barrier();
614 mm->tlb_flush_batched = true;
615
616
617
618
619
620
621 if (writable)
622 tlb_ubc->writable = true;
623}
624
625
626
627
628
629static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
630{
631 bool should_defer = false;
632
633 if (!(flags & TTU_BATCH_FLUSH))
634 return false;
635
636
637 if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
638 should_defer = true;
639 put_cpu();
640
641 return should_defer;
642}
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659void flush_tlb_batched_pending(struct mm_struct *mm)
660{
661 if (mm->tlb_flush_batched) {
662 flush_tlb_mm(mm);
663
664
665
666
667
668 barrier();
669 mm->tlb_flush_batched = false;
670 }
671}
672#else
673static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
674{
675}
676
677static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
678{
679 return false;
680}
681#endif
682
683
684
685
686
687unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
688{
689 unsigned long address;
690 if (PageAnon(page)) {
691 struct anon_vma *page__anon_vma = page_anon_vma(page);
692
693
694
695
696 if (!vma->anon_vma || !page__anon_vma ||
697 vma->anon_vma->root != page__anon_vma->root)
698 return -EFAULT;
699 } else if (page->mapping) {
700 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping)
701 return -EFAULT;
702 } else
703 return -EFAULT;
704 address = __vma_address(page, vma);
705 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
706 return -EFAULT;
707 return address;
708}
709
710pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
711{
712 pgd_t *pgd;
713 p4d_t *p4d;
714 pud_t *pud;
715 pmd_t *pmd = NULL;
716 pmd_t pmde;
717
718 pgd = pgd_offset(mm, address);
719 if (!pgd_present(*pgd))
720 goto out;
721
722 p4d = p4d_offset(pgd, address);
723 if (!p4d_present(*p4d))
724 goto out;
725
726 pud = pud_offset(p4d, address);
727 if (!pud_present(*pud))
728 goto out;
729
730 pmd = pmd_offset(pud, address);
731
732
733
734
735
736 pmde = *pmd;
737 barrier();
738 if (!pmd_present(pmde) || pmd_trans_huge(pmde))
739 pmd = NULL;
740out:
741 return pmd;
742}
743
744struct page_referenced_arg {
745 int mapcount;
746 int referenced;
747 unsigned long vm_flags;
748 struct mem_cgroup *memcg;
749};
750
751
752
753static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
754 unsigned long address, void *arg)
755{
756 struct page_referenced_arg *pra = arg;
757 struct page_vma_mapped_walk pvmw = {
758 .page = page,
759 .vma = vma,
760 .address = address,
761 };
762 int referenced = 0;
763
764 while (page_vma_mapped_walk(&pvmw)) {
765 address = pvmw.address;
766
767 if (vma->vm_flags & VM_LOCKED) {
768 page_vma_mapped_walk_done(&pvmw);
769 pra->vm_flags |= VM_LOCKED;
770 return false;
771 }
772
773 if (pvmw.pte) {
774 if (ptep_clear_flush_young_notify(vma, address,
775 pvmw.pte)) {
776
777
778
779
780
781
782
783
784 if (likely(!(vma->vm_flags & VM_SEQ_READ)))
785 referenced++;
786 }
787 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
788 if (pmdp_clear_flush_young_notify(vma, address,
789 pvmw.pmd))
790 referenced++;
791 } else {
792
793 WARN_ON_ONCE(1);
794 }
795
796 pra->mapcount--;
797 }
798
799 if (referenced)
800 clear_page_idle(page);
801 if (test_and_clear_page_young(page))
802 referenced++;
803
804 if (referenced) {
805 pra->referenced++;
806 pra->vm_flags |= vma->vm_flags;
807 }
808
809 if (!pra->mapcount)
810 return false;
811
812 return true;
813}
814
815static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg)
816{
817 struct page_referenced_arg *pra = arg;
818 struct mem_cgroup *memcg = pra->memcg;
819
820 if (!mm_match_cgroup(vma->vm_mm, memcg))
821 return true;
822
823 return false;
824}
825
826
827
828
829
830
831
832
833
834
835
836int page_referenced(struct page *page,
837 int is_locked,
838 struct mem_cgroup *memcg,
839 unsigned long *vm_flags)
840{
841 int we_locked = 0;
842 struct page_referenced_arg pra = {
843 .mapcount = total_mapcount(page),
844 .memcg = memcg,
845 };
846 struct rmap_walk_control rwc = {
847 .rmap_one = page_referenced_one,
848 .arg = (void *)&pra,
849 .anon_lock = page_lock_anon_vma_read,
850 };
851
852 *vm_flags = 0;
853 if (!page_mapped(page))
854 return 0;
855
856 if (!page_rmapping(page))
857 return 0;
858
859 if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
860 we_locked = trylock_page(page);
861 if (!we_locked)
862 return 1;
863 }
864
865
866
867
868
869
870 if (memcg) {
871 rwc.invalid_vma = invalid_page_referenced_vma;
872 }
873
874 rmap_walk(page, &rwc);
875 *vm_flags = pra.vm_flags;
876
877 if (we_locked)
878 unlock_page(page);
879
880 return pra.referenced;
881}
882
883static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
884 unsigned long address, void *arg)
885{
886 struct page_vma_mapped_walk pvmw = {
887 .page = page,
888 .vma = vma,
889 .address = address,
890 .flags = PVMW_SYNC,
891 };
892 unsigned long start = address, end;
893 int *cleaned = arg;
894
895
896
897
898
899 end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
900 mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
901
902 while (page_vma_mapped_walk(&pvmw)) {
903 unsigned long cstart;
904 int ret = 0;
905
906 cstart = address = pvmw.address;
907 if (pvmw.pte) {
908 pte_t entry;
909 pte_t *pte = pvmw.pte;
910
911 if (!pte_dirty(*pte) && !pte_write(*pte))
912 continue;
913
914 flush_cache_page(vma, address, pte_pfn(*pte));
915 entry = ptep_clear_flush(vma, address, pte);
916 entry = pte_wrprotect(entry);
917 entry = pte_mkclean(entry);
918 set_pte_at(vma->vm_mm, address, pte, entry);
919 ret = 1;
920 } else {
921#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
922 pmd_t *pmd = pvmw.pmd;
923 pmd_t entry;
924
925 if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
926 continue;
927
928 flush_cache_page(vma, address, page_to_pfn(page));
929 entry = pmdp_huge_clear_flush(vma, address, pmd);
930 entry = pmd_wrprotect(entry);
931 entry = pmd_mkclean(entry);
932 set_pmd_at(vma->vm_mm, address, pmd, entry);
933 cstart &= PMD_MASK;
934 ret = 1;
935#else
936
937 WARN_ON_ONCE(1);
938#endif
939 }
940
941
942
943
944
945
946
947
948 if (ret)
949 (*cleaned)++;
950 }
951
952 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
953
954 return true;
955}
956
957static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
958{
959 if (vma->vm_flags & VM_SHARED)
960 return false;
961
962 return true;
963}
964
965int page_mkclean(struct page *page)
966{
967 int cleaned = 0;
968 struct address_space *mapping;
969 struct rmap_walk_control rwc = {
970 .arg = (void *)&cleaned,
971 .rmap_one = page_mkclean_one,
972 .invalid_vma = invalid_mkclean_vma,
973 };
974
975 BUG_ON(!PageLocked(page));
976
977 if (!page_mapped(page))
978 return 0;
979
980 mapping = page_mapping(page);
981 if (!mapping)
982 return 0;
983
984 rmap_walk(page, &rwc);
985
986 return cleaned;
987}
988EXPORT_SYMBOL_GPL(page_mkclean);
989
990
991
992
993
994
995
996
997
998
999
1000void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
1001{
1002 struct anon_vma *anon_vma = vma->anon_vma;
1003
1004 page = compound_head(page);
1005
1006 VM_BUG_ON_PAGE(!PageLocked(page), page);
1007 VM_BUG_ON_VMA(!anon_vma, vma);
1008
1009 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1010
1011
1012
1013
1014
1015 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
1016}
1017
1018
1019
1020
1021
1022
1023
1024
1025static void __page_set_anon_rmap(struct page *page,
1026 struct vm_area_struct *vma, unsigned long address, int exclusive)
1027{
1028 struct anon_vma *anon_vma = vma->anon_vma;
1029
1030 BUG_ON(!anon_vma);
1031
1032 if (PageAnon(page))
1033 return;
1034
1035
1036
1037
1038
1039
1040 if (!exclusive)
1041 anon_vma = anon_vma->root;
1042
1043 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1044 page->mapping = (struct address_space *) anon_vma;
1045 page->index = linear_page_index(vma, address);
1046}
1047
1048
1049
1050
1051
1052
1053
1054static void __page_check_anon_rmap(struct page *page,
1055 struct vm_area_struct *vma, unsigned long address)
1056{
1057#ifdef CONFIG_DEBUG_VM
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root);
1071 BUG_ON(page_to_pgoff(page) != linear_page_index(vma, address));
1072#endif
1073}
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087void page_add_anon_rmap(struct page *page,
1088 struct vm_area_struct *vma, unsigned long address, bool compound)
1089{
1090 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0);
1091}
1092
1093
1094
1095
1096
1097
1098void do_page_add_anon_rmap(struct page *page,
1099 struct vm_area_struct *vma, unsigned long address, int flags)
1100{
1101 bool compound = flags & RMAP_COMPOUND;
1102 bool first;
1103
1104 if (compound) {
1105 atomic_t *mapcount;
1106 VM_BUG_ON_PAGE(!PageLocked(page), page);
1107 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1108 mapcount = compound_mapcount_ptr(page);
1109 first = atomic_inc_and_test(mapcount);
1110 } else {
1111 first = atomic_inc_and_test(&page->_mapcount);
1112 }
1113
1114 if (first) {
1115 int nr = compound ? hpage_nr_pages(page) : 1;
1116
1117
1118
1119
1120
1121
1122 if (compound)
1123 __inc_node_page_state(page, NR_ANON_THPS);
1124 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
1125 }
1126 if (unlikely(PageKsm(page)))
1127 return;
1128
1129 VM_BUG_ON_PAGE(!PageLocked(page), page);
1130
1131
1132 if (first)
1133 __page_set_anon_rmap(page, vma, address,
1134 flags & RMAP_EXCLUSIVE);
1135 else
1136 __page_check_anon_rmap(page, vma, address);
1137}
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150void page_add_new_anon_rmap(struct page *page,
1151 struct vm_area_struct *vma, unsigned long address, bool compound)
1152{
1153 int nr = compound ? hpage_nr_pages(page) : 1;
1154
1155 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
1156 __SetPageSwapBacked(page);
1157 if (compound) {
1158 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1159
1160 atomic_set(compound_mapcount_ptr(page), 0);
1161 __inc_node_page_state(page, NR_ANON_THPS);
1162 } else {
1163
1164 VM_BUG_ON_PAGE(PageTransCompound(page), page);
1165
1166 atomic_set(&page->_mapcount, 0);
1167 }
1168 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
1169 __page_set_anon_rmap(page, vma, address, 1);
1170}
1171
1172
1173
1174
1175
1176
1177
1178
1179void page_add_file_rmap(struct page *page, bool compound)
1180{
1181 int i, nr = 1;
1182
1183 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
1184 lock_page_memcg(page);
1185 if (compound && PageTransHuge(page)) {
1186 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
1187 if (atomic_inc_and_test(&page[i]._mapcount))
1188 nr++;
1189 }
1190 if (!atomic_inc_and_test(compound_mapcount_ptr(page)))
1191 goto out;
1192 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
1193 __inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
1194 } else {
1195 if (PageTransCompound(page) && page_mapping(page)) {
1196 VM_WARN_ON_ONCE(!PageLocked(page));
1197
1198 SetPageDoubleMap(compound_head(page));
1199 if (PageMlocked(page))
1200 clear_page_mlock(compound_head(page));
1201 }
1202 if (!atomic_inc_and_test(&page->_mapcount))
1203 goto out;
1204 }
1205 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
1206out:
1207 unlock_page_memcg(page);
1208}
1209
1210static void page_remove_file_rmap(struct page *page, bool compound)
1211{
1212 int i, nr = 1;
1213
1214 VM_BUG_ON_PAGE(compound && !PageHead(page), page);
1215 lock_page_memcg(page);
1216
1217
1218 if (unlikely(PageHuge(page))) {
1219
1220 atomic_dec(compound_mapcount_ptr(page));
1221 goto out;
1222 }
1223
1224
1225 if (compound && PageTransHuge(page)) {
1226 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
1227 if (atomic_add_negative(-1, &page[i]._mapcount))
1228 nr++;
1229 }
1230 if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1231 goto out;
1232 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
1233 __dec_node_page_state(page, NR_SHMEM_PMDMAPPED);
1234 } else {
1235 if (!atomic_add_negative(-1, &page->_mapcount))
1236 goto out;
1237 }
1238
1239
1240
1241
1242
1243
1244 __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr);
1245
1246 if (unlikely(PageMlocked(page)))
1247 clear_page_mlock(page);
1248out:
1249 unlock_page_memcg(page);
1250}
1251
1252static void page_remove_anon_compound_rmap(struct page *page)
1253{
1254 int i, nr;
1255
1256 if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1257 return;
1258
1259
1260 if (unlikely(PageHuge(page)))
1261 return;
1262
1263 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1264 return;
1265
1266 __dec_node_page_state(page, NR_ANON_THPS);
1267
1268 if (TestClearPageDoubleMap(page)) {
1269
1270
1271
1272
1273 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
1274 if (atomic_add_negative(-1, &page[i]._mapcount))
1275 nr++;
1276 }
1277 } else {
1278 nr = HPAGE_PMD_NR;
1279 }
1280
1281 if (unlikely(PageMlocked(page)))
1282 clear_page_mlock(page);
1283
1284 if (nr) {
1285 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr);
1286 deferred_split_huge_page(page);
1287 }
1288}
1289
1290
1291
1292
1293
1294
1295
1296
1297void page_remove_rmap(struct page *page, bool compound)
1298{
1299 if (!PageAnon(page))
1300 return page_remove_file_rmap(page, compound);
1301
1302 if (compound)
1303 return page_remove_anon_compound_rmap(page);
1304
1305
1306 if (!atomic_add_negative(-1, &page->_mapcount))
1307 return;
1308
1309
1310
1311
1312
1313
1314 __dec_node_page_state(page, NR_ANON_MAPPED);
1315
1316 if (unlikely(PageMlocked(page)))
1317 clear_page_mlock(page);
1318
1319 if (PageTransCompound(page))
1320 deferred_split_huge_page(compound_head(page));
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331}
1332
1333
1334
1335
1336static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1337 unsigned long address, void *arg)
1338{
1339 struct mm_struct *mm = vma->vm_mm;
1340 struct page_vma_mapped_walk pvmw = {
1341 .page = page,
1342 .vma = vma,
1343 .address = address,
1344 };
1345 pte_t pteval;
1346 struct page *subpage;
1347 bool ret = true;
1348 unsigned long start = address, end;
1349 enum ttu_flags flags = (enum ttu_flags)arg;
1350
1351
1352 if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
1353 return true;
1354
1355 if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) &&
1356 is_zone_device_page(page) && !is_device_private_page(page))
1357 return true;
1358
1359 if (flags & TTU_SPLIT_HUGE_PMD) {
1360 split_huge_pmd_address(vma, address,
1361 flags & TTU_SPLIT_FREEZE, page);
1362 }
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372 end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
1373 if (PageHuge(page)) {
1374
1375
1376
1377
1378 adjust_range_if_pmd_sharing_possible(vma, &start, &end);
1379 }
1380 mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
1381
1382 while (page_vma_mapped_walk(&pvmw)) {
1383#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1384
1385 if (!pvmw.pte && (flags & TTU_MIGRATION)) {
1386 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
1387
1388 set_pmd_migration_entry(&pvmw, page);
1389 continue;
1390 }
1391#endif
1392
1393
1394
1395
1396
1397
1398 if (!(flags & TTU_IGNORE_MLOCK)) {
1399 if (vma->vm_flags & VM_LOCKED) {
1400
1401 if (!PageTransCompound(page)) {
1402
1403
1404
1405
1406 mlock_vma_page(page);
1407 }
1408 ret = false;
1409 page_vma_mapped_walk_done(&pvmw);
1410 break;
1411 }
1412 if (flags & TTU_MUNLOCK)
1413 continue;
1414 }
1415
1416
1417 VM_BUG_ON_PAGE(!pvmw.pte, page);
1418
1419 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
1420 address = pvmw.address;
1421
1422 if (PageHuge(page)) {
1423 if (huge_pmd_unshare(mm, &address, pvmw.pte)) {
1424
1425
1426
1427
1428
1429
1430
1431 flush_cache_range(vma, start, end);
1432 flush_tlb_range(vma, start, end);
1433 mmu_notifier_invalidate_range(mm, start, end);
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444 page_vma_mapped_walk_done(&pvmw);
1445 break;
1446 }
1447 }
1448
1449 if (IS_ENABLED(CONFIG_MIGRATION) &&
1450 (flags & TTU_MIGRATION) &&
1451 is_zone_device_page(page)) {
1452 swp_entry_t entry;
1453 pte_t swp_pte;
1454
1455 pteval = ptep_get_and_clear(mm, pvmw.address, pvmw.pte);
1456
1457
1458
1459
1460
1461
1462 entry = make_migration_entry(page, 0);
1463 swp_pte = swp_entry_to_pte(entry);
1464 if (pte_soft_dirty(pteval))
1465 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1466 set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
1467
1468
1469
1470
1471 goto discard;
1472 }
1473
1474 if (!(flags & TTU_IGNORE_ACCESS)) {
1475 if (ptep_clear_flush_young_notify(vma, address,
1476 pvmw.pte)) {
1477 ret = false;
1478 page_vma_mapped_walk_done(&pvmw);
1479 break;
1480 }
1481 }
1482
1483
1484 flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
1485 if (should_defer_flush(mm, flags)) {
1486
1487
1488
1489
1490
1491
1492
1493
1494 pteval = ptep_get_and_clear(mm, address, pvmw.pte);
1495
1496 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
1497 } else {
1498 pteval = ptep_clear_flush(vma, address, pvmw.pte);
1499 }
1500
1501
1502 if (pte_dirty(pteval))
1503 set_page_dirty(page);
1504
1505
1506 update_hiwater_rss(mm);
1507
1508 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
1509 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
1510 if (PageHuge(page)) {
1511 int nr = 1 << compound_order(page);
1512 hugetlb_count_sub(nr, mm);
1513 set_huge_swap_pte_at(mm, address,
1514 pvmw.pte, pteval,
1515 vma_mmu_pagesize(vma));
1516 } else {
1517 dec_mm_counter(mm, mm_counter(page));
1518 set_pte_at(mm, address, pvmw.pte, pteval);
1519 }
1520
1521 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532 dec_mm_counter(mm, mm_counter(page));
1533
1534 mmu_notifier_invalidate_range(mm, address,
1535 address + PAGE_SIZE);
1536 } else if (IS_ENABLED(CONFIG_MIGRATION) &&
1537 (flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))) {
1538 swp_entry_t entry;
1539 pte_t swp_pte;
1540
1541 if (arch_unmap_one(mm, vma, address, pteval) < 0) {
1542 set_pte_at(mm, address, pvmw.pte, pteval);
1543 ret = false;
1544 page_vma_mapped_walk_done(&pvmw);
1545 break;
1546 }
1547
1548
1549
1550
1551
1552
1553 entry = make_migration_entry(subpage,
1554 pte_write(pteval));
1555 swp_pte = swp_entry_to_pte(entry);
1556 if (pte_soft_dirty(pteval))
1557 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1558 set_pte_at(mm, address, pvmw.pte, swp_pte);
1559
1560
1561
1562
1563 } else if (PageAnon(page)) {
1564 swp_entry_t entry = { .val = page_private(subpage) };
1565 pte_t swp_pte;
1566
1567
1568
1569
1570 if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) {
1571 WARN_ON_ONCE(1);
1572 ret = false;
1573
1574 mmu_notifier_invalidate_range(mm, address,
1575 address + PAGE_SIZE);
1576 page_vma_mapped_walk_done(&pvmw);
1577 break;
1578 }
1579
1580
1581 if (!PageSwapBacked(page)) {
1582 if (!PageDirty(page)) {
1583
1584 mmu_notifier_invalidate_range(mm,
1585 address, address + PAGE_SIZE);
1586 dec_mm_counter(mm, MM_ANONPAGES);
1587 goto discard;
1588 }
1589
1590
1591
1592
1593
1594 set_pte_at(mm, address, pvmw.pte, pteval);
1595 SetPageSwapBacked(page);
1596 ret = false;
1597 page_vma_mapped_walk_done(&pvmw);
1598 break;
1599 }
1600
1601 if (swap_duplicate(entry) < 0) {
1602 set_pte_at(mm, address, pvmw.pte, pteval);
1603 ret = false;
1604 page_vma_mapped_walk_done(&pvmw);
1605 break;
1606 }
1607 if (arch_unmap_one(mm, vma, address, pteval) < 0) {
1608 set_pte_at(mm, address, pvmw.pte, pteval);
1609 ret = false;
1610 page_vma_mapped_walk_done(&pvmw);
1611 break;
1612 }
1613 if (list_empty(&mm->mmlist)) {
1614 spin_lock(&mmlist_lock);
1615 if (list_empty(&mm->mmlist))
1616 list_add(&mm->mmlist, &init_mm.mmlist);
1617 spin_unlock(&mmlist_lock);
1618 }
1619 dec_mm_counter(mm, MM_ANONPAGES);
1620 inc_mm_counter(mm, MM_SWAPENTS);
1621 swp_pte = swp_entry_to_pte(entry);
1622 if (pte_soft_dirty(pteval))
1623 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1624 set_pte_at(mm, address, pvmw.pte, swp_pte);
1625
1626 mmu_notifier_invalidate_range(mm, address,
1627 address + PAGE_SIZE);
1628 } else {
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646 dec_mm_counter(mm, mm_counter_file(page));
1647 }
1648discard:
1649
1650
1651
1652
1653
1654
1655
1656 page_remove_rmap(subpage, PageHuge(page));
1657 put_page(page);
1658 }
1659
1660 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
1661
1662 return ret;
1663}
1664
1665bool is_vma_temporary_stack(struct vm_area_struct *vma)
1666{
1667 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
1668
1669 if (!maybe_stack)
1670 return false;
1671
1672 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
1673 VM_STACK_INCOMPLETE_SETUP)
1674 return true;
1675
1676 return false;
1677}
1678
1679static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
1680{
1681 return is_vma_temporary_stack(vma);
1682}
1683
1684static int page_mapcount_is_zero(struct page *page)
1685{
1686 return !total_mapcount(page);
1687}
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699bool try_to_unmap(struct page *page, enum ttu_flags flags)
1700{
1701 struct rmap_walk_control rwc = {
1702 .rmap_one = try_to_unmap_one,
1703 .arg = (void *)flags,
1704 .done = page_mapcount_is_zero,
1705 .anon_lock = page_lock_anon_vma_read,
1706 };
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716 if ((flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))
1717 && !PageKsm(page) && PageAnon(page))
1718 rwc.invalid_vma = invalid_migration_vma;
1719
1720 if (flags & TTU_RMAP_LOCKED)
1721 rmap_walk_locked(page, &rwc);
1722 else
1723 rmap_walk(page, &rwc);
1724
1725 return !page_mapcount(page) ? true : false;
1726}
1727
1728static int page_not_mapped(struct page *page)
1729{
1730 return !page_mapped(page);
1731};
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742void try_to_munlock(struct page *page)
1743{
1744 struct rmap_walk_control rwc = {
1745 .rmap_one = try_to_unmap_one,
1746 .arg = (void *)TTU_MUNLOCK,
1747 .done = page_not_mapped,
1748 .anon_lock = page_lock_anon_vma_read,
1749
1750 };
1751
1752 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
1753 VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
1754
1755 rmap_walk(page, &rwc);
1756}
1757
1758void __put_anon_vma(struct anon_vma *anon_vma)
1759{
1760 struct anon_vma *root = anon_vma->root;
1761
1762 anon_vma_free(anon_vma);
1763 if (root != anon_vma && atomic_dec_and_test(&root->refcount))
1764 anon_vma_free(root);
1765}
1766
1767static struct anon_vma *rmap_walk_anon_lock(struct page *page,
1768 struct rmap_walk_control *rwc)
1769{
1770 struct anon_vma *anon_vma;
1771
1772 if (rwc->anon_lock)
1773 return rwc->anon_lock(page);
1774
1775
1776
1777
1778
1779
1780
1781 anon_vma = page_anon_vma(page);
1782 if (!anon_vma)
1783 return NULL;
1784
1785 anon_vma_lock_read(anon_vma);
1786 return anon_vma;
1787}
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
1804 bool locked)
1805{
1806 struct anon_vma *anon_vma;
1807 pgoff_t pgoff_start, pgoff_end;
1808 struct anon_vma_chain *avc;
1809
1810 if (locked) {
1811 anon_vma = page_anon_vma(page);
1812
1813 VM_BUG_ON_PAGE(!anon_vma, page);
1814 } else {
1815 anon_vma = rmap_walk_anon_lock(page, rwc);
1816 }
1817 if (!anon_vma)
1818 return;
1819
1820 pgoff_start = page_to_pgoff(page);
1821 pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
1822 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
1823 pgoff_start, pgoff_end) {
1824 struct vm_area_struct *vma = avc->vma;
1825 unsigned long address = vma_address(page, vma);
1826
1827 cond_resched();
1828
1829 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1830 continue;
1831
1832 if (!rwc->rmap_one(page, vma, address, rwc->arg))
1833 break;
1834 if (rwc->done && rwc->done(page))
1835 break;
1836 }
1837
1838 if (!locked)
1839 anon_vma_unlock_read(anon_vma);
1840}
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
1856 bool locked)
1857{
1858 struct address_space *mapping = page_mapping(page);
1859 pgoff_t pgoff_start, pgoff_end;
1860 struct vm_area_struct *vma;
1861
1862
1863
1864
1865
1866
1867
1868 VM_BUG_ON_PAGE(!PageLocked(page), page);
1869
1870 if (!mapping)
1871 return;
1872
1873 pgoff_start = page_to_pgoff(page);
1874 pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
1875 if (!locked)
1876 i_mmap_lock_read(mapping);
1877 vma_interval_tree_foreach(vma, &mapping->i_mmap,
1878 pgoff_start, pgoff_end) {
1879 unsigned long address = vma_address(page, vma);
1880
1881 cond_resched();
1882
1883 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1884 continue;
1885
1886 if (!rwc->rmap_one(page, vma, address, rwc->arg))
1887 goto done;
1888 if (rwc->done && rwc->done(page))
1889 goto done;
1890 }
1891
1892done:
1893 if (!locked)
1894 i_mmap_unlock_read(mapping);
1895}
1896
1897void rmap_walk(struct page *page, struct rmap_walk_control *rwc)
1898{
1899 if (unlikely(PageKsm(page)))
1900 rmap_walk_ksm(page, rwc);
1901 else if (PageAnon(page))
1902 rmap_walk_anon(page, rwc, false);
1903 else
1904 rmap_walk_file(page, rwc, false);
1905}
1906
1907
1908void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
1909{
1910
1911 VM_BUG_ON_PAGE(PageKsm(page), page);
1912 if (PageAnon(page))
1913 rmap_walk_anon(page, rwc, true);
1914 else
1915 rmap_walk_file(page, rwc, true);
1916}
1917
1918#ifdef CONFIG_HUGETLB_PAGE
1919
1920
1921
1922
1923
1924static void __hugepage_set_anon_rmap(struct page *page,
1925 struct vm_area_struct *vma, unsigned long address, int exclusive)
1926{
1927 struct anon_vma *anon_vma = vma->anon_vma;
1928
1929 BUG_ON(!anon_vma);
1930
1931 if (PageAnon(page))
1932 return;
1933 if (!exclusive)
1934 anon_vma = anon_vma->root;
1935
1936 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1937 page->mapping = (struct address_space *) anon_vma;
1938 page->index = linear_page_index(vma, address);
1939}
1940
1941void hugepage_add_anon_rmap(struct page *page,
1942 struct vm_area_struct *vma, unsigned long address)
1943{
1944 struct anon_vma *anon_vma = vma->anon_vma;
1945 int first;
1946
1947 BUG_ON(!PageLocked(page));
1948 BUG_ON(!anon_vma);
1949
1950 first = atomic_inc_and_test(compound_mapcount_ptr(page));
1951 if (first)
1952 __hugepage_set_anon_rmap(page, vma, address, 0);
1953}
1954
1955void hugepage_add_new_anon_rmap(struct page *page,
1956 struct vm_area_struct *vma, unsigned long address)
1957{
1958 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1959 atomic_set(compound_mapcount_ptr(page), 0);
1960 __hugepage_set_anon_rmap(page, vma, address, 1);
1961}
1962#endif
1963