1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54#include <linux/mm.h>
55#include <linux/sched/mm.h>
56#include <linux/sched/task.h>
57#include <linux/pagemap.h>
58#include <linux/swap.h>
59#include <linux/swapops.h>
60#include <linux/slab.h>
61#include <linux/init.h>
62#include <linux/ksm.h>
63#include <linux/rmap.h>
64#include <linux/rcupdate.h>
65#include <linux/export.h>
66#include <linux/memcontrol.h>
67#include <linux/mmu_notifier.h>
68#include <linux/migrate.h>
69#include <linux/hugetlb.h>
70#include <linux/huge_mm.h>
71#include <linux/backing-dev.h>
72#include <linux/page_idle.h>
73#include <linux/memremap.h>
74#include <linux/userfaultfd_k.h>
75
76#include <asm/tlbflush.h>
77
78#include <trace/events/tlb.h>
79
80#include "internal.h"
81
82static struct kmem_cache *anon_vma_cachep;
83static struct kmem_cache *anon_vma_chain_cachep;
84
85static inline struct anon_vma *anon_vma_alloc(void)
86{
87 struct anon_vma *anon_vma;
88
89 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
90 if (anon_vma) {
91 atomic_set(&anon_vma->refcount, 1);
92 anon_vma->degree = 1;
93 anon_vma->parent = anon_vma;
94
95
96
97
98 anon_vma->root = anon_vma;
99 }
100
101 return anon_vma;
102}
103
104static inline void anon_vma_free(struct anon_vma *anon_vma)
105{
106 VM_BUG_ON(atomic_read(&anon_vma->refcount));
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125 might_sleep();
126 if (rwsem_is_locked(&anon_vma->root->rwsem)) {
127 anon_vma_lock_write(anon_vma);
128 anon_vma_unlock_write(anon_vma);
129 }
130
131 kmem_cache_free(anon_vma_cachep, anon_vma);
132}
133
134static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
135{
136 return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
137}
138
139static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
140{
141 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
142}
143
144static void anon_vma_chain_link(struct vm_area_struct *vma,
145 struct anon_vma_chain *avc,
146 struct anon_vma *anon_vma)
147{
148 avc->vma = vma;
149 avc->anon_vma = anon_vma;
150 list_add(&avc->same_vma, &vma->anon_vma_chain);
151 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
152}
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182int __anon_vma_prepare(struct vm_area_struct *vma)
183{
184 struct mm_struct *mm = vma->vm_mm;
185 struct anon_vma *anon_vma, *allocated;
186 struct anon_vma_chain *avc;
187
188 might_sleep();
189
190 avc = anon_vma_chain_alloc(GFP_KERNEL);
191 if (!avc)
192 goto out_enomem;
193
194 anon_vma = find_mergeable_anon_vma(vma);
195 allocated = NULL;
196 if (!anon_vma) {
197 anon_vma = anon_vma_alloc();
198 if (unlikely(!anon_vma))
199 goto out_enomem_free_avc;
200 allocated = anon_vma;
201 }
202
203 anon_vma_lock_write(anon_vma);
204
205 spin_lock(&mm->page_table_lock);
206 if (likely(!vma->anon_vma)) {
207 vma->anon_vma = anon_vma;
208 anon_vma_chain_link(vma, avc, anon_vma);
209
210 anon_vma->degree++;
211 allocated = NULL;
212 avc = NULL;
213 }
214 spin_unlock(&mm->page_table_lock);
215 anon_vma_unlock_write(anon_vma);
216
217 if (unlikely(allocated))
218 put_anon_vma(allocated);
219 if (unlikely(avc))
220 anon_vma_chain_free(avc);
221
222 return 0;
223
224 out_enomem_free_avc:
225 anon_vma_chain_free(avc);
226 out_enomem:
227 return -ENOMEM;
228}
229
230
231
232
233
234
235
236
237
238static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
239{
240 struct anon_vma *new_root = anon_vma->root;
241 if (new_root != root) {
242 if (WARN_ON_ONCE(root))
243 up_write(&root->rwsem);
244 root = new_root;
245 down_write(&root->rwsem);
246 }
247 return root;
248}
249
250static inline void unlock_anon_vma_root(struct anon_vma *root)
251{
252 if (root)
253 up_write(&root->rwsem);
254}
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
275{
276 struct anon_vma_chain *avc, *pavc;
277 struct anon_vma *root = NULL;
278
279 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
280 struct anon_vma *anon_vma;
281
282 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
283 if (unlikely(!avc)) {
284 unlock_anon_vma_root(root);
285 root = NULL;
286 avc = anon_vma_chain_alloc(GFP_KERNEL);
287 if (!avc)
288 goto enomem_failure;
289 }
290 anon_vma = pavc->anon_vma;
291 root = lock_anon_vma_root(root, anon_vma);
292 anon_vma_chain_link(dst, avc, anon_vma);
293
294
295
296
297
298
299
300
301
302 if (!dst->anon_vma && src->anon_vma &&
303 anon_vma != src->anon_vma && anon_vma->degree < 2)
304 dst->anon_vma = anon_vma;
305 }
306 if (dst->anon_vma)
307 dst->anon_vma->degree++;
308 unlock_anon_vma_root(root);
309 return 0;
310
311 enomem_failure:
312
313
314
315
316
317
318 dst->anon_vma = NULL;
319 unlink_anon_vmas(dst);
320 return -ENOMEM;
321}
322
323
324
325
326
327
328int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
329{
330 struct anon_vma_chain *avc;
331 struct anon_vma *anon_vma;
332 int error;
333
334
335 if (!pvma->anon_vma)
336 return 0;
337
338
339 vma->anon_vma = NULL;
340
341
342
343
344
345 error = anon_vma_clone(vma, pvma);
346 if (error)
347 return error;
348
349
350 if (vma->anon_vma)
351 return 0;
352
353
354 anon_vma = anon_vma_alloc();
355 if (!anon_vma)
356 goto out_error;
357 avc = anon_vma_chain_alloc(GFP_KERNEL);
358 if (!avc)
359 goto out_error_free_anon_vma;
360
361
362
363
364
365 anon_vma->root = pvma->anon_vma->root;
366 anon_vma->parent = pvma->anon_vma;
367
368
369
370
371
372 get_anon_vma(anon_vma->root);
373
374 vma->anon_vma = anon_vma;
375 anon_vma_lock_write(anon_vma);
376 anon_vma_chain_link(vma, avc, anon_vma);
377 anon_vma->parent->degree++;
378 anon_vma_unlock_write(anon_vma);
379
380 return 0;
381
382 out_error_free_anon_vma:
383 put_anon_vma(anon_vma);
384 out_error:
385 unlink_anon_vmas(vma);
386 return -ENOMEM;
387}
388
389void unlink_anon_vmas(struct vm_area_struct *vma)
390{
391 struct anon_vma_chain *avc, *next;
392 struct anon_vma *root = NULL;
393
394
395
396
397
398 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
399 struct anon_vma *anon_vma = avc->anon_vma;
400
401 root = lock_anon_vma_root(root, anon_vma);
402 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
403
404
405
406
407
408 if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
409 anon_vma->parent->degree--;
410 continue;
411 }
412
413 list_del(&avc->same_vma);
414 anon_vma_chain_free(avc);
415 }
416 if (vma->anon_vma) {
417 vma->anon_vma->degree--;
418
419
420
421
422
423 vma->anon_vma = NULL;
424 }
425 unlock_anon_vma_root(root);
426
427
428
429
430
431
432 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
433 struct anon_vma *anon_vma = avc->anon_vma;
434
435 VM_WARN_ON(anon_vma->degree);
436 put_anon_vma(anon_vma);
437
438 list_del(&avc->same_vma);
439 anon_vma_chain_free(avc);
440 }
441}
442
443static void anon_vma_ctor(void *data)
444{
445 struct anon_vma *anon_vma = data;
446
447 init_rwsem(&anon_vma->rwsem);
448 atomic_set(&anon_vma->refcount, 0);
449 anon_vma->rb_root = RB_ROOT_CACHED;
450}
451
452void __init anon_vma_init(void)
453{
454 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
455 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
456 anon_vma_ctor);
457 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
458 SLAB_PANIC|SLAB_ACCOUNT);
459}
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485struct anon_vma *page_get_anon_vma(struct page *page)
486{
487 struct anon_vma *anon_vma = NULL;
488 unsigned long anon_mapping;
489
490 rcu_read_lock();
491 anon_mapping = (unsigned long)READ_ONCE(page->mapping);
492 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
493 goto out;
494 if (!page_mapped(page))
495 goto out;
496
497 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
498 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
499 anon_vma = NULL;
500 goto out;
501 }
502
503
504
505
506
507
508
509
510 if (!page_mapped(page)) {
511 rcu_read_unlock();
512 put_anon_vma(anon_vma);
513 return NULL;
514 }
515out:
516 rcu_read_unlock();
517
518 return anon_vma;
519}
520
521
522
523
524
525
526
527
528struct anon_vma *page_lock_anon_vma_read(struct page *page)
529{
530 struct anon_vma *anon_vma = NULL;
531 struct anon_vma *root_anon_vma;
532 unsigned long anon_mapping;
533
534 rcu_read_lock();
535 anon_mapping = (unsigned long)READ_ONCE(page->mapping);
536 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
537 goto out;
538 if (!page_mapped(page))
539 goto out;
540
541 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
542 root_anon_vma = READ_ONCE(anon_vma->root);
543 if (down_read_trylock(&root_anon_vma->rwsem)) {
544
545
546
547
548
549 if (!page_mapped(page)) {
550 up_read(&root_anon_vma->rwsem);
551 anon_vma = NULL;
552 }
553 goto out;
554 }
555
556
557 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
558 anon_vma = NULL;
559 goto out;
560 }
561
562 if (!page_mapped(page)) {
563 rcu_read_unlock();
564 put_anon_vma(anon_vma);
565 return NULL;
566 }
567
568
569 rcu_read_unlock();
570 anon_vma_lock_read(anon_vma);
571
572 if (atomic_dec_and_test(&anon_vma->refcount)) {
573
574
575
576
577
578 anon_vma_unlock_read(anon_vma);
579 __put_anon_vma(anon_vma);
580 anon_vma = NULL;
581 }
582
583 return anon_vma;
584
585out:
586 rcu_read_unlock();
587 return anon_vma;
588}
589
590void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
591{
592 anon_vma_unlock_read(anon_vma);
593}
594
595#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
596
597
598
599
600
601
602void try_to_unmap_flush(void)
603{
604 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
605
606 if (!tlb_ubc->flush_required)
607 return;
608
609 arch_tlbbatch_flush(&tlb_ubc->arch);
610 tlb_ubc->flush_required = false;
611 tlb_ubc->writable = false;
612}
613
614
615void try_to_unmap_flush_dirty(void)
616{
617 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
618
619 if (tlb_ubc->writable)
620 try_to_unmap_flush();
621}
622
623static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
624{
625 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
626
627 arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
628 tlb_ubc->flush_required = true;
629
630
631
632
633
634 barrier();
635 mm->tlb_flush_batched = true;
636
637
638
639
640
641
642 if (writable)
643 tlb_ubc->writable = true;
644}
645
646
647
648
649
650static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
651{
652 bool should_defer = false;
653
654 if (!(flags & TTU_BATCH_FLUSH))
655 return false;
656
657
658 if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
659 should_defer = true;
660 put_cpu();
661
662 return should_defer;
663}
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680void flush_tlb_batched_pending(struct mm_struct *mm)
681{
682 if (data_race(mm->tlb_flush_batched)) {
683 flush_tlb_mm(mm);
684
685
686
687
688
689 barrier();
690 mm->tlb_flush_batched = false;
691 }
692}
693#else
694static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
695{
696}
697
698static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
699{
700 return false;
701}
702#endif
703
704
705
706
707
708unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
709{
710 unsigned long address;
711 if (PageAnon(page)) {
712 struct anon_vma *page__anon_vma = page_anon_vma(page);
713
714
715
716
717 if (!vma->anon_vma || !page__anon_vma ||
718 vma->anon_vma->root != page__anon_vma->root)
719 return -EFAULT;
720 } else if (page->mapping) {
721 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping)
722 return -EFAULT;
723 } else
724 return -EFAULT;
725 address = __vma_address(page, vma);
726 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
727 return -EFAULT;
728 return address;
729}
730
731pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
732{
733 pgd_t *pgd;
734 p4d_t *p4d;
735 pud_t *pud;
736 pmd_t *pmd = NULL;
737 pmd_t pmde;
738
739 pgd = pgd_offset(mm, address);
740 if (!pgd_present(*pgd))
741 goto out;
742
743 p4d = p4d_offset(pgd, address);
744 if (!p4d_present(*p4d))
745 goto out;
746
747 pud = pud_offset(p4d, address);
748 if (!pud_present(*pud))
749 goto out;
750
751 pmd = pmd_offset(pud, address);
752
753
754
755
756
757 pmde = *pmd;
758 barrier();
759 if (!pmd_present(pmde) || pmd_trans_huge(pmde))
760 pmd = NULL;
761out:
762 return pmd;
763}
764
765struct page_referenced_arg {
766 int mapcount;
767 int referenced;
768 unsigned long vm_flags;
769 struct mem_cgroup *memcg;
770};
771
772
773
774static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
775 unsigned long address, void *arg)
776{
777 struct page_referenced_arg *pra = arg;
778 struct page_vma_mapped_walk pvmw = {
779 .page = page,
780 .vma = vma,
781 .address = address,
782 };
783 int referenced = 0;
784
785 while (page_vma_mapped_walk(&pvmw)) {
786 address = pvmw.address;
787
788 if (vma->vm_flags & VM_LOCKED) {
789 page_vma_mapped_walk_done(&pvmw);
790 pra->vm_flags |= VM_LOCKED;
791 return false;
792 }
793
794 if (pvmw.pte) {
795 if (ptep_clear_flush_young_notify(vma, address,
796 pvmw.pte)) {
797
798
799
800
801
802
803
804
805 if (likely(!(vma->vm_flags & VM_SEQ_READ)))
806 referenced++;
807 }
808 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
809 if (pmdp_clear_flush_young_notify(vma, address,
810 pvmw.pmd))
811 referenced++;
812 } else {
813
814 WARN_ON_ONCE(1);
815 }
816
817 pra->mapcount--;
818 }
819
820 if (referenced)
821 clear_page_idle(page);
822 if (test_and_clear_page_young(page))
823 referenced++;
824
825 if (referenced) {
826 pra->referenced++;
827 pra->vm_flags |= vma->vm_flags;
828 }
829
830 if (!pra->mapcount)
831 return false;
832
833 return true;
834}
835
836static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg)
837{
838 struct page_referenced_arg *pra = arg;
839 struct mem_cgroup *memcg = pra->memcg;
840
841 if (!mm_match_cgroup(vma->vm_mm, memcg))
842 return true;
843
844 return false;
845}
846
847
848
849
850
851
852
853
854
855
856
857int page_referenced(struct page *page,
858 int is_locked,
859 struct mem_cgroup *memcg,
860 unsigned long *vm_flags)
861{
862 int we_locked = 0;
863 struct page_referenced_arg pra = {
864 .mapcount = total_mapcount(page),
865 .memcg = memcg,
866 };
867 struct rmap_walk_control rwc = {
868 .rmap_one = page_referenced_one,
869 .arg = (void *)&pra,
870 .anon_lock = page_lock_anon_vma_read,
871 };
872
873 *vm_flags = 0;
874 if (!pra.mapcount)
875 return 0;
876
877 if (!page_rmapping(page))
878 return 0;
879
880 if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
881 we_locked = trylock_page(page);
882 if (!we_locked)
883 return 1;
884 }
885
886
887
888
889
890
891 if (memcg) {
892 rwc.invalid_vma = invalid_page_referenced_vma;
893 }
894
895 rmap_walk(page, &rwc);
896 *vm_flags = pra.vm_flags;
897
898 if (we_locked)
899 unlock_page(page);
900
901 return pra.referenced;
902}
903
904static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
905 unsigned long address, void *arg)
906{
907 struct page_vma_mapped_walk pvmw = {
908 .page = page,
909 .vma = vma,
910 .address = address,
911 .flags = PVMW_SYNC,
912 };
913 struct mmu_notifier_range range;
914 int *cleaned = arg;
915
916
917
918
919
920 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
921 0, vma, vma->vm_mm, address,
922 min(vma->vm_end, address + page_size(page)));
923 mmu_notifier_invalidate_range_start(&range);
924
925 while (page_vma_mapped_walk(&pvmw)) {
926 int ret = 0;
927
928 address = pvmw.address;
929 if (pvmw.pte) {
930 pte_t entry;
931 pte_t *pte = pvmw.pte;
932
933 if (!pte_dirty(*pte) && !pte_write(*pte))
934 continue;
935
936 flush_cache_page(vma, address, pte_pfn(*pte));
937 entry = ptep_clear_flush(vma, address, pte);
938 entry = pte_wrprotect(entry);
939 entry = pte_mkclean(entry);
940 set_pte_at(vma->vm_mm, address, pte, entry);
941 ret = 1;
942 } else {
943#ifdef CONFIG_TRANSPARENT_HUGEPAGE
944 pmd_t *pmd = pvmw.pmd;
945 pmd_t entry;
946
947 if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
948 continue;
949
950 flush_cache_page(vma, address, page_to_pfn(page));
951 entry = pmdp_invalidate(vma, address, pmd);
952 entry = pmd_wrprotect(entry);
953 entry = pmd_mkclean(entry);
954 set_pmd_at(vma->vm_mm, address, pmd, entry);
955 ret = 1;
956#else
957
958 WARN_ON_ONCE(1);
959#endif
960 }
961
962
963
964
965
966
967
968
969 if (ret)
970 (*cleaned)++;
971 }
972
973 mmu_notifier_invalidate_range_end(&range);
974
975 return true;
976}
977
978static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
979{
980 if (vma->vm_flags & VM_SHARED)
981 return false;
982
983 return true;
984}
985
986int page_mkclean(struct page *page)
987{
988 int cleaned = 0;
989 struct address_space *mapping;
990 struct rmap_walk_control rwc = {
991 .arg = (void *)&cleaned,
992 .rmap_one = page_mkclean_one,
993 .invalid_vma = invalid_mkclean_vma,
994 };
995
996 BUG_ON(!PageLocked(page));
997
998 if (!page_mapped(page))
999 return 0;
1000
1001 mapping = page_mapping(page);
1002 if (!mapping)
1003 return 0;
1004
1005 rmap_walk(page, &rwc);
1006
1007 return cleaned;
1008}
1009EXPORT_SYMBOL_GPL(page_mkclean);
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
1022{
1023 struct anon_vma *anon_vma = vma->anon_vma;
1024
1025 page = compound_head(page);
1026
1027 VM_BUG_ON_PAGE(!PageLocked(page), page);
1028 VM_BUG_ON_VMA(!anon_vma, vma);
1029
1030 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1031
1032
1033
1034
1035
1036 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
1037}
1038
1039
1040
1041
1042
1043
1044
1045
1046static void __page_set_anon_rmap(struct page *page,
1047 struct vm_area_struct *vma, unsigned long address, int exclusive)
1048{
1049 struct anon_vma *anon_vma = vma->anon_vma;
1050
1051 BUG_ON(!anon_vma);
1052
1053 if (PageAnon(page))
1054 return;
1055
1056
1057
1058
1059
1060
1061 if (!exclusive)
1062 anon_vma = anon_vma->root;
1063
1064
1065
1066
1067
1068
1069
1070 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1071 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
1072 page->index = linear_page_index(vma, address);
1073}
1074
1075
1076
1077
1078
1079
1080
1081static void __page_check_anon_rmap(struct page *page,
1082 struct vm_area_struct *vma, unsigned long address)
1083{
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095 VM_BUG_ON_PAGE(page_anon_vma(page)->root != vma->anon_vma->root, page);
1096 VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address),
1097 page);
1098}
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112void page_add_anon_rmap(struct page *page,
1113 struct vm_area_struct *vma, unsigned long address, bool compound)
1114{
1115 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0);
1116}
1117
1118
1119
1120
1121
1122
1123void do_page_add_anon_rmap(struct page *page,
1124 struct vm_area_struct *vma, unsigned long address, int flags)
1125{
1126 bool compound = flags & RMAP_COMPOUND;
1127 bool first;
1128
1129 if (unlikely(PageKsm(page)))
1130 lock_page_memcg(page);
1131 else
1132 VM_BUG_ON_PAGE(!PageLocked(page), page);
1133
1134 if (compound) {
1135 atomic_t *mapcount;
1136 VM_BUG_ON_PAGE(!PageLocked(page), page);
1137 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1138 mapcount = compound_mapcount_ptr(page);
1139 first = atomic_inc_and_test(mapcount);
1140 } else {
1141 first = atomic_inc_and_test(&page->_mapcount);
1142 }
1143
1144 if (first) {
1145 int nr = compound ? thp_nr_pages(page) : 1;
1146
1147
1148
1149
1150
1151
1152 if (compound)
1153 __mod_lruvec_page_state(page, NR_ANON_THPS, nr);
1154 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
1155 }
1156
1157 if (unlikely(PageKsm(page))) {
1158 unlock_page_memcg(page);
1159 return;
1160 }
1161
1162
1163 if (first)
1164 __page_set_anon_rmap(page, vma, address,
1165 flags & RMAP_EXCLUSIVE);
1166 else
1167 __page_check_anon_rmap(page, vma, address);
1168}
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181void page_add_new_anon_rmap(struct page *page,
1182 struct vm_area_struct *vma, unsigned long address, bool compound)
1183{
1184 int nr = compound ? thp_nr_pages(page) : 1;
1185
1186 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
1187 __SetPageSwapBacked(page);
1188 if (compound) {
1189 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1190
1191 atomic_set(compound_mapcount_ptr(page), 0);
1192 if (hpage_pincount_available(page))
1193 atomic_set(compound_pincount_ptr(page), 0);
1194
1195 __mod_lruvec_page_state(page, NR_ANON_THPS, nr);
1196 } else {
1197
1198 VM_BUG_ON_PAGE(PageTransCompound(page), page);
1199
1200 atomic_set(&page->_mapcount, 0);
1201 }
1202 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
1203 __page_set_anon_rmap(page, vma, address, 1);
1204}
1205
1206
1207
1208
1209
1210
1211
1212
1213void page_add_file_rmap(struct page *page, bool compound)
1214{
1215 int i, nr = 1;
1216
1217 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
1218 lock_page_memcg(page);
1219 if (compound && PageTransHuge(page)) {
1220 int nr_pages = thp_nr_pages(page);
1221
1222 for (i = 0, nr = 0; i < nr_pages; i++) {
1223 if (atomic_inc_and_test(&page[i]._mapcount))
1224 nr++;
1225 }
1226 if (!atomic_inc_and_test(compound_mapcount_ptr(page)))
1227 goto out;
1228 if (PageSwapBacked(page))
1229 __mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED,
1230 nr_pages);
1231 else
1232 __mod_lruvec_page_state(page, NR_FILE_PMDMAPPED,
1233 nr_pages);
1234 } else {
1235 if (PageTransCompound(page) && page_mapping(page)) {
1236 VM_WARN_ON_ONCE(!PageLocked(page));
1237
1238 SetPageDoubleMap(compound_head(page));
1239 if (PageMlocked(page))
1240 clear_page_mlock(compound_head(page));
1241 }
1242 if (!atomic_inc_and_test(&page->_mapcount))
1243 goto out;
1244 }
1245 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
1246out:
1247 unlock_page_memcg(page);
1248}
1249
1250static void page_remove_file_rmap(struct page *page, bool compound)
1251{
1252 int i, nr = 1;
1253
1254 VM_BUG_ON_PAGE(compound && !PageHead(page), page);
1255
1256
1257 if (unlikely(PageHuge(page))) {
1258
1259 atomic_dec(compound_mapcount_ptr(page));
1260 return;
1261 }
1262
1263
1264 if (compound && PageTransHuge(page)) {
1265 int nr_pages = thp_nr_pages(page);
1266
1267 for (i = 0, nr = 0; i < nr_pages; i++) {
1268 if (atomic_add_negative(-1, &page[i]._mapcount))
1269 nr++;
1270 }
1271 if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1272 return;
1273 if (PageSwapBacked(page))
1274 __mod_lruvec_page_state(page, NR_SHMEM_PMDMAPPED,
1275 -nr_pages);
1276 else
1277 __mod_lruvec_page_state(page, NR_FILE_PMDMAPPED,
1278 -nr_pages);
1279 } else {
1280 if (!atomic_add_negative(-1, &page->_mapcount))
1281 return;
1282 }
1283
1284
1285
1286
1287
1288
1289 __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr);
1290
1291 if (unlikely(PageMlocked(page)))
1292 clear_page_mlock(page);
1293}
1294
1295static void page_remove_anon_compound_rmap(struct page *page)
1296{
1297 int i, nr;
1298
1299 if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1300 return;
1301
1302
1303 if (unlikely(PageHuge(page)))
1304 return;
1305
1306 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1307 return;
1308
1309 __mod_lruvec_page_state(page, NR_ANON_THPS, -thp_nr_pages(page));
1310
1311 if (TestClearPageDoubleMap(page)) {
1312
1313
1314
1315
1316 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) {
1317 if (atomic_add_negative(-1, &page[i]._mapcount))
1318 nr++;
1319 }
1320
1321
1322
1323
1324
1325
1326 if (nr && nr < thp_nr_pages(page))
1327 deferred_split_huge_page(page);
1328 } else {
1329 nr = thp_nr_pages(page);
1330 }
1331
1332 if (unlikely(PageMlocked(page)))
1333 clear_page_mlock(page);
1334
1335 if (nr)
1336 __mod_lruvec_page_state(page, NR_ANON_MAPPED, -nr);
1337}
1338
1339
1340
1341
1342
1343
1344
1345
1346void page_remove_rmap(struct page *page, bool compound)
1347{
1348 lock_page_memcg(page);
1349
1350 if (!PageAnon(page)) {
1351 page_remove_file_rmap(page, compound);
1352 goto out;
1353 }
1354
1355 if (compound) {
1356 page_remove_anon_compound_rmap(page);
1357 goto out;
1358 }
1359
1360
1361 if (!atomic_add_negative(-1, &page->_mapcount))
1362 goto out;
1363
1364
1365
1366
1367
1368
1369 __dec_lruvec_page_state(page, NR_ANON_MAPPED);
1370
1371 if (unlikely(PageMlocked(page)))
1372 clear_page_mlock(page);
1373
1374 if (PageTransCompound(page))
1375 deferred_split_huge_page(compound_head(page));
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386out:
1387 unlock_page_memcg(page);
1388}
1389
1390
1391
1392
1393static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1394 unsigned long address, void *arg)
1395{
1396 struct mm_struct *mm = vma->vm_mm;
1397 struct page_vma_mapped_walk pvmw = {
1398 .page = page,
1399 .vma = vma,
1400 .address = address,
1401 };
1402 pte_t pteval;
1403 struct page *subpage;
1404 bool ret = true;
1405 struct mmu_notifier_range range;
1406 enum ttu_flags flags = (enum ttu_flags)(long)arg;
1407
1408
1409 if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
1410 return true;
1411
1412 if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) &&
1413 is_zone_device_page(page) && !is_device_private_page(page))
1414 return true;
1415
1416 if (flags & TTU_SPLIT_HUGE_PMD) {
1417 split_huge_pmd_address(vma, address,
1418 flags & TTU_SPLIT_FREEZE, page);
1419 }
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1430 address,
1431 min(vma->vm_end, address + page_size(page)));
1432 if (PageHuge(page)) {
1433
1434
1435
1436
1437 adjust_range_if_pmd_sharing_possible(vma, &range.start,
1438 &range.end);
1439 }
1440 mmu_notifier_invalidate_range_start(&range);
1441
1442 while (page_vma_mapped_walk(&pvmw)) {
1443#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1444
1445 if (!pvmw.pte && (flags & TTU_MIGRATION)) {
1446 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
1447
1448 set_pmd_migration_entry(&pvmw, page);
1449 continue;
1450 }
1451#endif
1452
1453
1454
1455
1456
1457
1458 if (!(flags & TTU_IGNORE_MLOCK)) {
1459 if (vma->vm_flags & VM_LOCKED) {
1460
1461 if (!PageTransCompound(page)) {
1462
1463
1464
1465
1466 mlock_vma_page(page);
1467 }
1468 ret = false;
1469 page_vma_mapped_walk_done(&pvmw);
1470 break;
1471 }
1472 if (flags & TTU_MUNLOCK)
1473 continue;
1474 }
1475
1476
1477 VM_BUG_ON_PAGE(!pvmw.pte, page);
1478
1479 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
1480 address = pvmw.address;
1481
1482 if (PageHuge(page) && !PageAnon(page)) {
1483
1484
1485
1486
1487
1488 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
1489 if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
1490
1491
1492
1493
1494
1495
1496
1497 flush_cache_range(vma, range.start, range.end);
1498 flush_tlb_range(vma, range.start, range.end);
1499 mmu_notifier_invalidate_range(mm, range.start,
1500 range.end);
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511 page_vma_mapped_walk_done(&pvmw);
1512 break;
1513 }
1514 }
1515
1516 if (IS_ENABLED(CONFIG_MIGRATION) &&
1517 (flags & TTU_MIGRATION) &&
1518 is_zone_device_page(page)) {
1519 swp_entry_t entry;
1520 pte_t swp_pte;
1521
1522 pteval = ptep_get_and_clear(mm, pvmw.address, pvmw.pte);
1523
1524
1525
1526
1527
1528
1529 entry = make_migration_entry(page, 0);
1530 swp_pte = swp_entry_to_pte(entry);
1531
1532
1533
1534
1535
1536 if (pte_swp_soft_dirty(pteval))
1537 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1538 if (pte_swp_uffd_wp(pteval))
1539 swp_pte = pte_swp_mkuffd_wp(swp_pte);
1540 set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552 subpage = page;
1553 goto discard;
1554 }
1555
1556
1557 flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
1558 if (should_defer_flush(mm, flags)) {
1559
1560
1561
1562
1563
1564
1565
1566
1567 pteval = ptep_get_and_clear(mm, address, pvmw.pte);
1568
1569 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
1570 } else {
1571 pteval = ptep_clear_flush(vma, address, pvmw.pte);
1572 }
1573
1574
1575 if (pte_dirty(pteval))
1576 set_page_dirty(page);
1577
1578
1579 update_hiwater_rss(mm);
1580
1581 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
1582 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
1583 if (PageHuge(page)) {
1584 hugetlb_count_sub(compound_nr(page), mm);
1585 set_huge_swap_pte_at(mm, address,
1586 pvmw.pte, pteval,
1587 vma_mmu_pagesize(vma));
1588 } else {
1589 dec_mm_counter(mm, mm_counter(page));
1590 set_pte_at(mm, address, pvmw.pte, pteval);
1591 }
1592
1593 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604 dec_mm_counter(mm, mm_counter(page));
1605
1606 mmu_notifier_invalidate_range(mm, address,
1607 address + PAGE_SIZE);
1608 } else if (IS_ENABLED(CONFIG_MIGRATION) &&
1609 (flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))) {
1610 swp_entry_t entry;
1611 pte_t swp_pte;
1612
1613 if (arch_unmap_one(mm, vma, address, pteval) < 0) {
1614 set_pte_at(mm, address, pvmw.pte, pteval);
1615 ret = false;
1616 page_vma_mapped_walk_done(&pvmw);
1617 break;
1618 }
1619
1620
1621
1622
1623
1624
1625 entry = make_migration_entry(subpage,
1626 pte_write(pteval));
1627 swp_pte = swp_entry_to_pte(entry);
1628 if (pte_soft_dirty(pteval))
1629 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1630 if (pte_uffd_wp(pteval))
1631 swp_pte = pte_swp_mkuffd_wp(swp_pte);
1632 set_pte_at(mm, address, pvmw.pte, swp_pte);
1633
1634
1635
1636
1637 } else if (PageAnon(page)) {
1638 swp_entry_t entry = { .val = page_private(subpage) };
1639 pte_t swp_pte;
1640
1641
1642
1643
1644 if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) {
1645 WARN_ON_ONCE(1);
1646 ret = false;
1647
1648 mmu_notifier_invalidate_range(mm, address,
1649 address + PAGE_SIZE);
1650 page_vma_mapped_walk_done(&pvmw);
1651 break;
1652 }
1653
1654
1655 if (!PageSwapBacked(page)) {
1656 if (!PageDirty(page)) {
1657
1658 mmu_notifier_invalidate_range(mm,
1659 address, address + PAGE_SIZE);
1660 dec_mm_counter(mm, MM_ANONPAGES);
1661 goto discard;
1662 }
1663
1664
1665
1666
1667
1668 set_pte_at(mm, address, pvmw.pte, pteval);
1669 SetPageSwapBacked(page);
1670 ret = false;
1671 page_vma_mapped_walk_done(&pvmw);
1672 break;
1673 }
1674
1675 if (swap_duplicate(entry) < 0) {
1676 set_pte_at(mm, address, pvmw.pte, pteval);
1677 ret = false;
1678 page_vma_mapped_walk_done(&pvmw);
1679 break;
1680 }
1681 if (arch_unmap_one(mm, vma, address, pteval) < 0) {
1682 set_pte_at(mm, address, pvmw.pte, pteval);
1683 ret = false;
1684 page_vma_mapped_walk_done(&pvmw);
1685 break;
1686 }
1687 if (list_empty(&mm->mmlist)) {
1688 spin_lock(&mmlist_lock);
1689 if (list_empty(&mm->mmlist))
1690 list_add(&mm->mmlist, &init_mm.mmlist);
1691 spin_unlock(&mmlist_lock);
1692 }
1693 dec_mm_counter(mm, MM_ANONPAGES);
1694 inc_mm_counter(mm, MM_SWAPENTS);
1695 swp_pte = swp_entry_to_pte(entry);
1696 if (pte_soft_dirty(pteval))
1697 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1698 if (pte_uffd_wp(pteval))
1699 swp_pte = pte_swp_mkuffd_wp(swp_pte);
1700 set_pte_at(mm, address, pvmw.pte, swp_pte);
1701
1702 mmu_notifier_invalidate_range(mm, address,
1703 address + PAGE_SIZE);
1704 } else {
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715 dec_mm_counter(mm, mm_counter_file(page));
1716 }
1717discard:
1718
1719
1720
1721
1722
1723
1724
1725 page_remove_rmap(subpage, PageHuge(page));
1726 put_page(page);
1727 }
1728
1729 mmu_notifier_invalidate_range_end(&range);
1730
1731 return ret;
1732}
1733
1734static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
1735{
1736 return vma_is_temporary_stack(vma);
1737}
1738
1739static int page_not_mapped(struct page *page)
1740{
1741 return !page_mapped(page);
1742}
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754bool try_to_unmap(struct page *page, enum ttu_flags flags)
1755{
1756 struct rmap_walk_control rwc = {
1757 .rmap_one = try_to_unmap_one,
1758 .arg = (void *)flags,
1759 .done = page_not_mapped,
1760 .anon_lock = page_lock_anon_vma_read,
1761 };
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771 if ((flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))
1772 && !PageKsm(page) && PageAnon(page))
1773 rwc.invalid_vma = invalid_migration_vma;
1774
1775 if (flags & TTU_RMAP_LOCKED)
1776 rmap_walk_locked(page, &rwc);
1777 else
1778 rmap_walk(page, &rwc);
1779
1780 return !page_mapcount(page) ? true : false;
1781}
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792void try_to_munlock(struct page *page)
1793{
1794 struct rmap_walk_control rwc = {
1795 .rmap_one = try_to_unmap_one,
1796 .arg = (void *)TTU_MUNLOCK,
1797 .done = page_not_mapped,
1798 .anon_lock = page_lock_anon_vma_read,
1799
1800 };
1801
1802 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
1803 VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
1804
1805 rmap_walk(page, &rwc);
1806}
1807
1808void __put_anon_vma(struct anon_vma *anon_vma)
1809{
1810 struct anon_vma *root = anon_vma->root;
1811
1812 anon_vma_free(anon_vma);
1813 if (root != anon_vma && atomic_dec_and_test(&root->refcount))
1814 anon_vma_free(root);
1815}
1816
1817static struct anon_vma *rmap_walk_anon_lock(struct page *page,
1818 struct rmap_walk_control *rwc)
1819{
1820 struct anon_vma *anon_vma;
1821
1822 if (rwc->anon_lock)
1823 return rwc->anon_lock(page);
1824
1825
1826
1827
1828
1829
1830
1831 anon_vma = page_anon_vma(page);
1832 if (!anon_vma)
1833 return NULL;
1834
1835 anon_vma_lock_read(anon_vma);
1836 return anon_vma;
1837}
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
1854 bool locked)
1855{
1856 struct anon_vma *anon_vma;
1857 pgoff_t pgoff_start, pgoff_end;
1858 struct anon_vma_chain *avc;
1859
1860 if (locked) {
1861 anon_vma = page_anon_vma(page);
1862
1863 VM_BUG_ON_PAGE(!anon_vma, page);
1864 } else {
1865 anon_vma = rmap_walk_anon_lock(page, rwc);
1866 }
1867 if (!anon_vma)
1868 return;
1869
1870 pgoff_start = page_to_pgoff(page);
1871 pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
1872 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
1873 pgoff_start, pgoff_end) {
1874 struct vm_area_struct *vma = avc->vma;
1875 unsigned long address = vma_address(page, vma);
1876
1877 cond_resched();
1878
1879 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1880 continue;
1881
1882 if (!rwc->rmap_one(page, vma, address, rwc->arg))
1883 break;
1884 if (rwc->done && rwc->done(page))
1885 break;
1886 }
1887
1888 if (!locked)
1889 anon_vma_unlock_read(anon_vma);
1890}
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
1906 bool locked)
1907{
1908 struct address_space *mapping = page_mapping(page);
1909 pgoff_t pgoff_start, pgoff_end;
1910 struct vm_area_struct *vma;
1911
1912
1913
1914
1915
1916
1917
1918 VM_BUG_ON_PAGE(!PageLocked(page), page);
1919
1920 if (!mapping)
1921 return;
1922
1923 pgoff_start = page_to_pgoff(page);
1924 pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
1925 if (!locked)
1926 i_mmap_lock_read(mapping);
1927 vma_interval_tree_foreach(vma, &mapping->i_mmap,
1928 pgoff_start, pgoff_end) {
1929 unsigned long address = vma_address(page, vma);
1930
1931 cond_resched();
1932
1933 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1934 continue;
1935
1936 if (!rwc->rmap_one(page, vma, address, rwc->arg))
1937 goto done;
1938 if (rwc->done && rwc->done(page))
1939 goto done;
1940 }
1941
1942done:
1943 if (!locked)
1944 i_mmap_unlock_read(mapping);
1945}
1946
1947void rmap_walk(struct page *page, struct rmap_walk_control *rwc)
1948{
1949 if (unlikely(PageKsm(page)))
1950 rmap_walk_ksm(page, rwc);
1951 else if (PageAnon(page))
1952 rmap_walk_anon(page, rwc, false);
1953 else
1954 rmap_walk_file(page, rwc, false);
1955}
1956
1957
1958void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
1959{
1960
1961 VM_BUG_ON_PAGE(PageKsm(page), page);
1962 if (PageAnon(page))
1963 rmap_walk_anon(page, rwc, true);
1964 else
1965 rmap_walk_file(page, rwc, true);
1966}
1967
1968#ifdef CONFIG_HUGETLB_PAGE
1969
1970
1971
1972
1973
1974void hugepage_add_anon_rmap(struct page *page,
1975 struct vm_area_struct *vma, unsigned long address)
1976{
1977 struct anon_vma *anon_vma = vma->anon_vma;
1978 int first;
1979
1980 BUG_ON(!PageLocked(page));
1981 BUG_ON(!anon_vma);
1982
1983 first = atomic_inc_and_test(compound_mapcount_ptr(page));
1984 if (first)
1985 __page_set_anon_rmap(page, vma, address, 0);
1986}
1987
1988void hugepage_add_new_anon_rmap(struct page *page,
1989 struct vm_area_struct *vma, unsigned long address)
1990{
1991 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1992 atomic_set(compound_mapcount_ptr(page), 0);
1993 if (hpage_pincount_available(page))
1994 atomic_set(compound_pincount_ptr(page), 0);
1995
1996 __page_set_anon_rmap(page, vma, address, 1);
1997}
1998#endif
1999