1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54#include <linux/mm.h>
55#include <linux/sched/mm.h>
56#include <linux/sched/task.h>
57#include <linux/pagemap.h>
58#include <linux/swap.h>
59#include <linux/swapops.h>
60#include <linux/slab.h>
61#include <linux/init.h>
62#include <linux/ksm.h>
63#include <linux/rmap.h>
64#include <linux/rcupdate.h>
65#include <linux/export.h>
66#include <linux/memcontrol.h>
67#include <linux/mmu_notifier.h>
68#include <linux/migrate.h>
69#include <linux/hugetlb.h>
70#include <linux/huge_mm.h>
71#include <linux/backing-dev.h>
72#include <linux/page_idle.h>
73#include <linux/memremap.h>
74#include <linux/userfaultfd_k.h>
75
76#include <asm/tlbflush.h>
77
78#include <trace/events/tlb.h>
79
80#include "internal.h"
81
82static struct kmem_cache *anon_vma_cachep;
83static struct kmem_cache *anon_vma_chain_cachep;
84
85static inline struct anon_vma *anon_vma_alloc(void)
86{
87 struct anon_vma *anon_vma;
88
89 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
90 if (anon_vma) {
91 atomic_set(&anon_vma->refcount, 1);
92 anon_vma->degree = 1;
93 anon_vma->parent = anon_vma;
94
95
96
97
98 anon_vma->root = anon_vma;
99 }
100
101 return anon_vma;
102}
103
104static inline void anon_vma_free(struct anon_vma *anon_vma)
105{
106 VM_BUG_ON(atomic_read(&anon_vma->refcount));
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125 might_sleep();
126 if (rwsem_is_locked(&anon_vma->root->rwsem)) {
127 anon_vma_lock_write(anon_vma);
128 anon_vma_unlock_write(anon_vma);
129 }
130
131 kmem_cache_free(anon_vma_cachep, anon_vma);
132}
133
134static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
135{
136 return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
137}
138
139static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
140{
141 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
142}
143
144static void anon_vma_chain_link(struct vm_area_struct *vma,
145 struct anon_vma_chain *avc,
146 struct anon_vma *anon_vma)
147{
148 avc->vma = vma;
149 avc->anon_vma = anon_vma;
150 list_add(&avc->same_vma, &vma->anon_vma_chain);
151 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
152}
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182int __anon_vma_prepare(struct vm_area_struct *vma)
183{
184 struct mm_struct *mm = vma->vm_mm;
185 struct anon_vma *anon_vma, *allocated;
186 struct anon_vma_chain *avc;
187
188 might_sleep();
189
190 avc = anon_vma_chain_alloc(GFP_KERNEL);
191 if (!avc)
192 goto out_enomem;
193
194 anon_vma = find_mergeable_anon_vma(vma);
195 allocated = NULL;
196 if (!anon_vma) {
197 anon_vma = anon_vma_alloc();
198 if (unlikely(!anon_vma))
199 goto out_enomem_free_avc;
200 allocated = anon_vma;
201 }
202
203 anon_vma_lock_write(anon_vma);
204
205 spin_lock(&mm->page_table_lock);
206 if (likely(!vma->anon_vma)) {
207 vma->anon_vma = anon_vma;
208 anon_vma_chain_link(vma, avc, anon_vma);
209
210 anon_vma->degree++;
211 allocated = NULL;
212 avc = NULL;
213 }
214 spin_unlock(&mm->page_table_lock);
215 anon_vma_unlock_write(anon_vma);
216
217 if (unlikely(allocated))
218 put_anon_vma(allocated);
219 if (unlikely(avc))
220 anon_vma_chain_free(avc);
221
222 return 0;
223
224 out_enomem_free_avc:
225 anon_vma_chain_free(avc);
226 out_enomem:
227 return -ENOMEM;
228}
229
230
231
232
233
234
235
236
237
238static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
239{
240 struct anon_vma *new_root = anon_vma->root;
241 if (new_root != root) {
242 if (WARN_ON_ONCE(root))
243 up_write(&root->rwsem);
244 root = new_root;
245 down_write(&root->rwsem);
246 }
247 return root;
248}
249
250static inline void unlock_anon_vma_root(struct anon_vma *root)
251{
252 if (root)
253 up_write(&root->rwsem);
254}
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
275{
276 struct anon_vma_chain *avc, *pavc;
277 struct anon_vma *root = NULL;
278
279 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
280 struct anon_vma *anon_vma;
281
282 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
283 if (unlikely(!avc)) {
284 unlock_anon_vma_root(root);
285 root = NULL;
286 avc = anon_vma_chain_alloc(GFP_KERNEL);
287 if (!avc)
288 goto enomem_failure;
289 }
290 anon_vma = pavc->anon_vma;
291 root = lock_anon_vma_root(root, anon_vma);
292 anon_vma_chain_link(dst, avc, anon_vma);
293
294
295
296
297
298
299
300
301
302 if (!dst->anon_vma && src->anon_vma &&
303 anon_vma != src->anon_vma && anon_vma->degree < 2)
304 dst->anon_vma = anon_vma;
305 }
306 if (dst->anon_vma)
307 dst->anon_vma->degree++;
308 unlock_anon_vma_root(root);
309 return 0;
310
311 enomem_failure:
312
313
314
315
316
317
318 dst->anon_vma = NULL;
319 unlink_anon_vmas(dst);
320 return -ENOMEM;
321}
322
323
324
325
326
327
328int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
329{
330 struct anon_vma_chain *avc;
331 struct anon_vma *anon_vma;
332 int error;
333
334
335 if (!pvma->anon_vma)
336 return 0;
337
338
339 vma->anon_vma = NULL;
340
341
342
343
344
345 error = anon_vma_clone(vma, pvma);
346 if (error)
347 return error;
348
349
350 if (vma->anon_vma)
351 return 0;
352
353
354 anon_vma = anon_vma_alloc();
355 if (!anon_vma)
356 goto out_error;
357 avc = anon_vma_chain_alloc(GFP_KERNEL);
358 if (!avc)
359 goto out_error_free_anon_vma;
360
361
362
363
364
365 anon_vma->root = pvma->anon_vma->root;
366 anon_vma->parent = pvma->anon_vma;
367
368
369
370
371
372 get_anon_vma(anon_vma->root);
373
374 vma->anon_vma = anon_vma;
375 anon_vma_lock_write(anon_vma);
376 anon_vma_chain_link(vma, avc, anon_vma);
377 anon_vma->parent->degree++;
378 anon_vma_unlock_write(anon_vma);
379
380 return 0;
381
382 out_error_free_anon_vma:
383 put_anon_vma(anon_vma);
384 out_error:
385 unlink_anon_vmas(vma);
386 return -ENOMEM;
387}
388
389void unlink_anon_vmas(struct vm_area_struct *vma)
390{
391 struct anon_vma_chain *avc, *next;
392 struct anon_vma *root = NULL;
393
394
395
396
397
398 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
399 struct anon_vma *anon_vma = avc->anon_vma;
400
401 root = lock_anon_vma_root(root, anon_vma);
402 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
403
404
405
406
407
408 if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
409 anon_vma->parent->degree--;
410 continue;
411 }
412
413 list_del(&avc->same_vma);
414 anon_vma_chain_free(avc);
415 }
416 if (vma->anon_vma)
417 vma->anon_vma->degree--;
418 unlock_anon_vma_root(root);
419
420
421
422
423
424
425 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
426 struct anon_vma *anon_vma = avc->anon_vma;
427
428 VM_WARN_ON(anon_vma->degree);
429 put_anon_vma(anon_vma);
430
431 list_del(&avc->same_vma);
432 anon_vma_chain_free(avc);
433 }
434}
435
436static void anon_vma_ctor(void *data)
437{
438 struct anon_vma *anon_vma = data;
439
440 init_rwsem(&anon_vma->rwsem);
441 atomic_set(&anon_vma->refcount, 0);
442 anon_vma->rb_root = RB_ROOT_CACHED;
443}
444
445void __init anon_vma_init(void)
446{
447 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
448 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
449 anon_vma_ctor);
450 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
451 SLAB_PANIC|SLAB_ACCOUNT);
452}
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478struct anon_vma *page_get_anon_vma(struct page *page)
479{
480 struct anon_vma *anon_vma = NULL;
481 unsigned long anon_mapping;
482
483 rcu_read_lock();
484 anon_mapping = (unsigned long)READ_ONCE(page->mapping);
485 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
486 goto out;
487 if (!page_mapped(page))
488 goto out;
489
490 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
491 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
492 anon_vma = NULL;
493 goto out;
494 }
495
496
497
498
499
500
501
502
503 if (!page_mapped(page)) {
504 rcu_read_unlock();
505 put_anon_vma(anon_vma);
506 return NULL;
507 }
508out:
509 rcu_read_unlock();
510
511 return anon_vma;
512}
513
514
515
516
517
518
519
520
521struct anon_vma *page_lock_anon_vma_read(struct page *page)
522{
523 struct anon_vma *anon_vma = NULL;
524 struct anon_vma *root_anon_vma;
525 unsigned long anon_mapping;
526
527 rcu_read_lock();
528 anon_mapping = (unsigned long)READ_ONCE(page->mapping);
529 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
530 goto out;
531 if (!page_mapped(page))
532 goto out;
533
534 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
535 root_anon_vma = READ_ONCE(anon_vma->root);
536 if (down_read_trylock(&root_anon_vma->rwsem)) {
537
538
539
540
541
542 if (!page_mapped(page)) {
543 up_read(&root_anon_vma->rwsem);
544 anon_vma = NULL;
545 }
546 goto out;
547 }
548
549
550 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
551 anon_vma = NULL;
552 goto out;
553 }
554
555 if (!page_mapped(page)) {
556 rcu_read_unlock();
557 put_anon_vma(anon_vma);
558 return NULL;
559 }
560
561
562 rcu_read_unlock();
563 anon_vma_lock_read(anon_vma);
564
565 if (atomic_dec_and_test(&anon_vma->refcount)) {
566
567
568
569
570
571 anon_vma_unlock_read(anon_vma);
572 __put_anon_vma(anon_vma);
573 anon_vma = NULL;
574 }
575
576 return anon_vma;
577
578out:
579 rcu_read_unlock();
580 return anon_vma;
581}
582
583void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
584{
585 anon_vma_unlock_read(anon_vma);
586}
587
588#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
589
590
591
592
593
594
595void try_to_unmap_flush(void)
596{
597 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
598
599 if (!tlb_ubc->flush_required)
600 return;
601
602 arch_tlbbatch_flush(&tlb_ubc->arch);
603 tlb_ubc->flush_required = false;
604 tlb_ubc->writable = false;
605}
606
607
608void try_to_unmap_flush_dirty(void)
609{
610 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
611
612 if (tlb_ubc->writable)
613 try_to_unmap_flush();
614}
615
616static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
617{
618 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
619
620 arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
621 tlb_ubc->flush_required = true;
622
623
624
625
626
627 barrier();
628 mm->tlb_flush_batched = true;
629
630
631
632
633
634
635 if (writable)
636 tlb_ubc->writable = true;
637}
638
639
640
641
642
643static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
644{
645 bool should_defer = false;
646
647 if (!(flags & TTU_BATCH_FLUSH))
648 return false;
649
650
651 if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
652 should_defer = true;
653 put_cpu();
654
655 return should_defer;
656}
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673void flush_tlb_batched_pending(struct mm_struct *mm)
674{
675 if (data_race(mm->tlb_flush_batched)) {
676 flush_tlb_mm(mm);
677
678
679
680
681
682 barrier();
683 mm->tlb_flush_batched = false;
684 }
685}
686#else
687static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
688{
689}
690
691static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
692{
693 return false;
694}
695#endif
696
697
698
699
700
701unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
702{
703 unsigned long address;
704 if (PageAnon(page)) {
705 struct anon_vma *page__anon_vma = page_anon_vma(page);
706
707
708
709
710 if (!vma->anon_vma || !page__anon_vma ||
711 vma->anon_vma->root != page__anon_vma->root)
712 return -EFAULT;
713 } else if (page->mapping) {
714 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping)
715 return -EFAULT;
716 } else
717 return -EFAULT;
718 address = __vma_address(page, vma);
719 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
720 return -EFAULT;
721 return address;
722}
723
724pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
725{
726 pgd_t *pgd;
727 p4d_t *p4d;
728 pud_t *pud;
729 pmd_t *pmd = NULL;
730 pmd_t pmde;
731
732 pgd = pgd_offset(mm, address);
733 if (!pgd_present(*pgd))
734 goto out;
735
736 p4d = p4d_offset(pgd, address);
737 if (!p4d_present(*p4d))
738 goto out;
739
740 pud = pud_offset(p4d, address);
741 if (!pud_present(*pud))
742 goto out;
743
744 pmd = pmd_offset(pud, address);
745
746
747
748
749
750 pmde = *pmd;
751 barrier();
752 if (!pmd_present(pmde) || pmd_trans_huge(pmde))
753 pmd = NULL;
754out:
755 return pmd;
756}
757
758struct page_referenced_arg {
759 int mapcount;
760 int referenced;
761 unsigned long vm_flags;
762 struct mem_cgroup *memcg;
763};
764
765
766
767static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
768 unsigned long address, void *arg)
769{
770 struct page_referenced_arg *pra = arg;
771 struct page_vma_mapped_walk pvmw = {
772 .page = page,
773 .vma = vma,
774 .address = address,
775 };
776 int referenced = 0;
777
778 while (page_vma_mapped_walk(&pvmw)) {
779 address = pvmw.address;
780
781 if (vma->vm_flags & VM_LOCKED) {
782 page_vma_mapped_walk_done(&pvmw);
783 pra->vm_flags |= VM_LOCKED;
784 return false;
785 }
786
787 if (pvmw.pte) {
788 if (ptep_clear_flush_young_notify(vma, address,
789 pvmw.pte)) {
790
791
792
793
794
795
796
797
798 if (likely(!(vma->vm_flags & VM_SEQ_READ)))
799 referenced++;
800 }
801 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
802 if (pmdp_clear_flush_young_notify(vma, address,
803 pvmw.pmd))
804 referenced++;
805 } else {
806
807 WARN_ON_ONCE(1);
808 }
809
810 pra->mapcount--;
811 }
812
813 if (referenced)
814 clear_page_idle(page);
815 if (test_and_clear_page_young(page))
816 referenced++;
817
818 if (referenced) {
819 pra->referenced++;
820 pra->vm_flags |= vma->vm_flags;
821 }
822
823 if (!pra->mapcount)
824 return false;
825
826 return true;
827}
828
829static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg)
830{
831 struct page_referenced_arg *pra = arg;
832 struct mem_cgroup *memcg = pra->memcg;
833
834 if (!mm_match_cgroup(vma->vm_mm, memcg))
835 return true;
836
837 return false;
838}
839
840
841
842
843
844
845
846
847
848
849
850int page_referenced(struct page *page,
851 int is_locked,
852 struct mem_cgroup *memcg,
853 unsigned long *vm_flags)
854{
855 int we_locked = 0;
856 struct page_referenced_arg pra = {
857 .mapcount = total_mapcount(page),
858 .memcg = memcg,
859 };
860 struct rmap_walk_control rwc = {
861 .rmap_one = page_referenced_one,
862 .arg = (void *)&pra,
863 .anon_lock = page_lock_anon_vma_read,
864 };
865
866 *vm_flags = 0;
867 if (!pra.mapcount)
868 return 0;
869
870 if (!page_rmapping(page))
871 return 0;
872
873 if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
874 we_locked = trylock_page(page);
875 if (!we_locked)
876 return 1;
877 }
878
879
880
881
882
883
884 if (memcg) {
885 rwc.invalid_vma = invalid_page_referenced_vma;
886 }
887
888 rmap_walk(page, &rwc);
889 *vm_flags = pra.vm_flags;
890
891 if (we_locked)
892 unlock_page(page);
893
894 return pra.referenced;
895}
896
897static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
898 unsigned long address, void *arg)
899{
900 struct page_vma_mapped_walk pvmw = {
901 .page = page,
902 .vma = vma,
903 .address = address,
904 .flags = PVMW_SYNC,
905 };
906 struct mmu_notifier_range range;
907 int *cleaned = arg;
908
909
910
911
912
913 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
914 0, vma, vma->vm_mm, address,
915 min(vma->vm_end, address + page_size(page)));
916 mmu_notifier_invalidate_range_start(&range);
917
918 while (page_vma_mapped_walk(&pvmw)) {
919 int ret = 0;
920
921 address = pvmw.address;
922 if (pvmw.pte) {
923 pte_t entry;
924 pte_t *pte = pvmw.pte;
925
926 if (!pte_dirty(*pte) && !pte_write(*pte))
927 continue;
928
929 flush_cache_page(vma, address, pte_pfn(*pte));
930 entry = ptep_clear_flush(vma, address, pte);
931 entry = pte_wrprotect(entry);
932 entry = pte_mkclean(entry);
933 set_pte_at(vma->vm_mm, address, pte, entry);
934 ret = 1;
935 } else {
936#ifdef CONFIG_TRANSPARENT_HUGEPAGE
937 pmd_t *pmd = pvmw.pmd;
938 pmd_t entry;
939
940 if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
941 continue;
942
943 flush_cache_page(vma, address, page_to_pfn(page));
944 entry = pmdp_invalidate(vma, address, pmd);
945 entry = pmd_wrprotect(entry);
946 entry = pmd_mkclean(entry);
947 set_pmd_at(vma->vm_mm, address, pmd, entry);
948 ret = 1;
949#else
950
951 WARN_ON_ONCE(1);
952#endif
953 }
954
955
956
957
958
959
960
961
962 if (ret)
963 (*cleaned)++;
964 }
965
966 mmu_notifier_invalidate_range_end(&range);
967
968 return true;
969}
970
971static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
972{
973 if (vma->vm_flags & VM_SHARED)
974 return false;
975
976 return true;
977}
978
979int page_mkclean(struct page *page)
980{
981 int cleaned = 0;
982 struct address_space *mapping;
983 struct rmap_walk_control rwc = {
984 .arg = (void *)&cleaned,
985 .rmap_one = page_mkclean_one,
986 .invalid_vma = invalid_mkclean_vma,
987 };
988
989 BUG_ON(!PageLocked(page));
990
991 if (!page_mapped(page))
992 return 0;
993
994 mapping = page_mapping(page);
995 if (!mapping)
996 return 0;
997
998 rmap_walk(page, &rwc);
999
1000 return cleaned;
1001}
1002EXPORT_SYMBOL_GPL(page_mkclean);
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
1015{
1016 struct anon_vma *anon_vma = vma->anon_vma;
1017
1018 page = compound_head(page);
1019
1020 VM_BUG_ON_PAGE(!PageLocked(page), page);
1021 VM_BUG_ON_VMA(!anon_vma, vma);
1022
1023 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1024
1025
1026
1027
1028
1029 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
1030}
1031
1032
1033
1034
1035
1036
1037
1038
1039static void __page_set_anon_rmap(struct page *page,
1040 struct vm_area_struct *vma, unsigned long address, int exclusive)
1041{
1042 struct anon_vma *anon_vma = vma->anon_vma;
1043
1044 BUG_ON(!anon_vma);
1045
1046 if (PageAnon(page))
1047 return;
1048
1049
1050
1051
1052
1053
1054 if (!exclusive)
1055 anon_vma = anon_vma->root;
1056
1057
1058
1059
1060
1061
1062
1063 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1064 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
1065 page->index = linear_page_index(vma, address);
1066}
1067
1068
1069
1070
1071
1072
1073
1074static void __page_check_anon_rmap(struct page *page,
1075 struct vm_area_struct *vma, unsigned long address)
1076{
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089 VM_BUG_ON_PAGE(page_anon_vma(page)->root != vma->anon_vma->root, page);
1090 VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address),
1091 page);
1092}
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106void page_add_anon_rmap(struct page *page,
1107 struct vm_area_struct *vma, unsigned long address, bool compound)
1108{
1109 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0);
1110}
1111
1112
1113
1114
1115
1116
1117void do_page_add_anon_rmap(struct page *page,
1118 struct vm_area_struct *vma, unsigned long address, int flags)
1119{
1120 bool compound = flags & RMAP_COMPOUND;
1121 bool first;
1122
1123 if (unlikely(PageKsm(page)))
1124 lock_page_memcg(page);
1125 else
1126 VM_BUG_ON_PAGE(!PageLocked(page), page);
1127
1128 if (compound) {
1129 atomic_t *mapcount;
1130 VM_BUG_ON_PAGE(!PageLocked(page), page);
1131 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1132 mapcount = compound_mapcount_ptr(page);
1133 first = atomic_inc_and_test(mapcount);
1134 } else {
1135 first = atomic_inc_and_test(&page->_mapcount);
1136 }
1137
1138 if (first) {
1139 int nr = compound ? thp_nr_pages(page) : 1;
1140
1141
1142
1143
1144
1145
1146 if (compound)
1147 __inc_lruvec_page_state(page, NR_ANON_THPS);
1148 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
1149 }
1150
1151 if (unlikely(PageKsm(page))) {
1152 unlock_page_memcg(page);
1153 return;
1154 }
1155
1156
1157 if (first)
1158 __page_set_anon_rmap(page, vma, address,
1159 flags & RMAP_EXCLUSIVE);
1160 else
1161 __page_check_anon_rmap(page, vma, address);
1162}
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175void page_add_new_anon_rmap(struct page *page,
1176 struct vm_area_struct *vma, unsigned long address, bool compound)
1177{
1178 int nr = compound ? thp_nr_pages(page) : 1;
1179
1180 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
1181 __SetPageSwapBacked(page);
1182 if (compound) {
1183 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1184
1185 atomic_set(compound_mapcount_ptr(page), 0);
1186 if (hpage_pincount_available(page))
1187 atomic_set(compound_pincount_ptr(page), 0);
1188
1189 __inc_lruvec_page_state(page, NR_ANON_THPS);
1190 } else {
1191
1192 VM_BUG_ON_PAGE(PageTransCompound(page), page);
1193
1194 atomic_set(&page->_mapcount, 0);
1195 }
1196 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
1197 __page_set_anon_rmap(page, vma, address, 1);
1198}
1199
1200
1201
1202
1203
1204
1205
1206
1207void page_add_file_rmap(struct page *page, bool compound)
1208{
1209 int i, nr = 1;
1210
1211 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
1212 lock_page_memcg(page);
1213 if (compound && PageTransHuge(page)) {
1214 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) {
1215 if (atomic_inc_and_test(&page[i]._mapcount))
1216 nr++;
1217 }
1218 if (!atomic_inc_and_test(compound_mapcount_ptr(page)))
1219 goto out;
1220 if (PageSwapBacked(page))
1221 __inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
1222 else
1223 __inc_node_page_state(page, NR_FILE_PMDMAPPED);
1224 } else {
1225 if (PageTransCompound(page) && page_mapping(page)) {
1226 VM_WARN_ON_ONCE(!PageLocked(page));
1227
1228 SetPageDoubleMap(compound_head(page));
1229 if (PageMlocked(page))
1230 clear_page_mlock(compound_head(page));
1231 }
1232 if (!atomic_inc_and_test(&page->_mapcount))
1233 goto out;
1234 }
1235 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
1236out:
1237 unlock_page_memcg(page);
1238}
1239
1240static void page_remove_file_rmap(struct page *page, bool compound)
1241{
1242 int i, nr = 1;
1243
1244 VM_BUG_ON_PAGE(compound && !PageHead(page), page);
1245
1246
1247 if (unlikely(PageHuge(page))) {
1248
1249 atomic_dec(compound_mapcount_ptr(page));
1250 return;
1251 }
1252
1253
1254 if (compound && PageTransHuge(page)) {
1255 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) {
1256 if (atomic_add_negative(-1, &page[i]._mapcount))
1257 nr++;
1258 }
1259 if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1260 return;
1261 if (PageSwapBacked(page))
1262 __dec_node_page_state(page, NR_SHMEM_PMDMAPPED);
1263 else
1264 __dec_node_page_state(page, NR_FILE_PMDMAPPED);
1265 } else {
1266 if (!atomic_add_negative(-1, &page->_mapcount))
1267 return;
1268 }
1269
1270
1271
1272
1273
1274
1275 __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr);
1276
1277 if (unlikely(PageMlocked(page)))
1278 clear_page_mlock(page);
1279}
1280
1281static void page_remove_anon_compound_rmap(struct page *page)
1282{
1283 int i, nr;
1284
1285 if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1286 return;
1287
1288
1289 if (unlikely(PageHuge(page)))
1290 return;
1291
1292 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1293 return;
1294
1295 __dec_lruvec_page_state(page, NR_ANON_THPS);
1296
1297 if (TestClearPageDoubleMap(page)) {
1298
1299
1300
1301
1302 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) {
1303 if (atomic_add_negative(-1, &page[i]._mapcount))
1304 nr++;
1305 }
1306
1307
1308
1309
1310
1311
1312 if (nr && nr < thp_nr_pages(page))
1313 deferred_split_huge_page(page);
1314 } else {
1315 nr = thp_nr_pages(page);
1316 }
1317
1318 if (unlikely(PageMlocked(page)))
1319 clear_page_mlock(page);
1320
1321 if (nr)
1322 __mod_lruvec_page_state(page, NR_ANON_MAPPED, -nr);
1323}
1324
1325
1326
1327
1328
1329
1330
1331
1332void page_remove_rmap(struct page *page, bool compound)
1333{
1334 lock_page_memcg(page);
1335
1336 if (!PageAnon(page)) {
1337 page_remove_file_rmap(page, compound);
1338 goto out;
1339 }
1340
1341 if (compound) {
1342 page_remove_anon_compound_rmap(page);
1343 goto out;
1344 }
1345
1346
1347 if (!atomic_add_negative(-1, &page->_mapcount))
1348 goto out;
1349
1350
1351
1352
1353
1354
1355 __dec_lruvec_page_state(page, NR_ANON_MAPPED);
1356
1357 if (unlikely(PageMlocked(page)))
1358 clear_page_mlock(page);
1359
1360 if (PageTransCompound(page))
1361 deferred_split_huge_page(compound_head(page));
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372out:
1373 unlock_page_memcg(page);
1374}
1375
1376
1377
1378
1379static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1380 unsigned long address, void *arg)
1381{
1382 struct mm_struct *mm = vma->vm_mm;
1383 struct page_vma_mapped_walk pvmw = {
1384 .page = page,
1385 .vma = vma,
1386 .address = address,
1387 };
1388 pte_t pteval;
1389 struct page *subpage;
1390 bool ret = true;
1391 struct mmu_notifier_range range;
1392 enum ttu_flags flags = (enum ttu_flags)(long)arg;
1393
1394
1395 if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
1396 return true;
1397
1398 if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) &&
1399 is_zone_device_page(page) && !is_device_private_page(page))
1400 return true;
1401
1402 if (flags & TTU_SPLIT_HUGE_PMD) {
1403 split_huge_pmd_address(vma, address,
1404 flags & TTU_SPLIT_FREEZE, page);
1405 }
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1416 address,
1417 min(vma->vm_end, address + page_size(page)));
1418 if (PageHuge(page)) {
1419
1420
1421
1422
1423 adjust_range_if_pmd_sharing_possible(vma, &range.start,
1424 &range.end);
1425 }
1426 mmu_notifier_invalidate_range_start(&range);
1427
1428 while (page_vma_mapped_walk(&pvmw)) {
1429#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1430
1431 if (!pvmw.pte && (flags & TTU_MIGRATION)) {
1432 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
1433
1434 set_pmd_migration_entry(&pvmw, page);
1435 continue;
1436 }
1437#endif
1438
1439
1440
1441
1442
1443
1444 if (!(flags & TTU_IGNORE_MLOCK)) {
1445 if (vma->vm_flags & VM_LOCKED) {
1446
1447 if (!PageTransCompound(page)) {
1448
1449
1450
1451
1452 mlock_vma_page(page);
1453 }
1454 ret = false;
1455 page_vma_mapped_walk_done(&pvmw);
1456 break;
1457 }
1458 if (flags & TTU_MUNLOCK)
1459 continue;
1460 }
1461
1462
1463 VM_BUG_ON_PAGE(!pvmw.pte, page);
1464
1465 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
1466 address = pvmw.address;
1467
1468 if (PageHuge(page) && !PageAnon(page)) {
1469
1470
1471
1472
1473
1474 VM_BUG_ON(!(flags & TTU_RMAP_LOCKED));
1475 if (huge_pmd_unshare(mm, vma, &address, pvmw.pte)) {
1476
1477
1478
1479
1480
1481
1482
1483 flush_cache_range(vma, range.start, range.end);
1484 flush_tlb_range(vma, range.start, range.end);
1485 mmu_notifier_invalidate_range(mm, range.start,
1486 range.end);
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497 page_vma_mapped_walk_done(&pvmw);
1498 break;
1499 }
1500 }
1501
1502 if (IS_ENABLED(CONFIG_MIGRATION) &&
1503 (flags & TTU_MIGRATION) &&
1504 is_zone_device_page(page)) {
1505 swp_entry_t entry;
1506 pte_t swp_pte;
1507
1508 pteval = ptep_get_and_clear(mm, pvmw.address, pvmw.pte);
1509
1510
1511
1512
1513
1514
1515 entry = make_migration_entry(page, 0);
1516 swp_pte = swp_entry_to_pte(entry);
1517
1518
1519
1520
1521
1522 if (pte_swp_soft_dirty(pteval))
1523 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1524 if (pte_swp_uffd_wp(pteval))
1525 swp_pte = pte_swp_mkuffd_wp(swp_pte);
1526 set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538 subpage = page;
1539 goto discard;
1540 }
1541
1542
1543 flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
1544 if (should_defer_flush(mm, flags)) {
1545
1546
1547
1548
1549
1550
1551
1552
1553 pteval = ptep_get_and_clear(mm, address, pvmw.pte);
1554
1555 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
1556 } else {
1557 pteval = ptep_clear_flush(vma, address, pvmw.pte);
1558 }
1559
1560
1561 if (pte_dirty(pteval))
1562 set_page_dirty(page);
1563
1564
1565 update_hiwater_rss(mm);
1566
1567 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
1568 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
1569 if (PageHuge(page)) {
1570 hugetlb_count_sub(compound_nr(page), mm);
1571 set_huge_swap_pte_at(mm, address,
1572 pvmw.pte, pteval,
1573 vma_mmu_pagesize(vma));
1574 } else {
1575 dec_mm_counter(mm, mm_counter(page));
1576 set_pte_at(mm, address, pvmw.pte, pteval);
1577 }
1578
1579 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590 dec_mm_counter(mm, mm_counter(page));
1591
1592 mmu_notifier_invalidate_range(mm, address,
1593 address + PAGE_SIZE);
1594 } else if (IS_ENABLED(CONFIG_MIGRATION) &&
1595 (flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))) {
1596 swp_entry_t entry;
1597 pte_t swp_pte;
1598
1599 if (arch_unmap_one(mm, vma, address, pteval) < 0) {
1600 set_pte_at(mm, address, pvmw.pte, pteval);
1601 ret = false;
1602 page_vma_mapped_walk_done(&pvmw);
1603 break;
1604 }
1605
1606
1607
1608
1609
1610
1611 entry = make_migration_entry(subpage,
1612 pte_write(pteval));
1613 swp_pte = swp_entry_to_pte(entry);
1614 if (pte_soft_dirty(pteval))
1615 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1616 if (pte_uffd_wp(pteval))
1617 swp_pte = pte_swp_mkuffd_wp(swp_pte);
1618 set_pte_at(mm, address, pvmw.pte, swp_pte);
1619
1620
1621
1622
1623 } else if (PageAnon(page)) {
1624 swp_entry_t entry = { .val = page_private(subpage) };
1625 pte_t swp_pte;
1626
1627
1628
1629
1630 if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) {
1631 WARN_ON_ONCE(1);
1632 ret = false;
1633
1634 mmu_notifier_invalidate_range(mm, address,
1635 address + PAGE_SIZE);
1636 page_vma_mapped_walk_done(&pvmw);
1637 break;
1638 }
1639
1640
1641 if (!PageSwapBacked(page)) {
1642 if (!PageDirty(page)) {
1643
1644 mmu_notifier_invalidate_range(mm,
1645 address, address + PAGE_SIZE);
1646 dec_mm_counter(mm, MM_ANONPAGES);
1647 goto discard;
1648 }
1649
1650
1651
1652
1653
1654 set_pte_at(mm, address, pvmw.pte, pteval);
1655 SetPageSwapBacked(page);
1656 ret = false;
1657 page_vma_mapped_walk_done(&pvmw);
1658 break;
1659 }
1660
1661 if (swap_duplicate(entry) < 0) {
1662 set_pte_at(mm, address, pvmw.pte, pteval);
1663 ret = false;
1664 page_vma_mapped_walk_done(&pvmw);
1665 break;
1666 }
1667 if (arch_unmap_one(mm, vma, address, pteval) < 0) {
1668 set_pte_at(mm, address, pvmw.pte, pteval);
1669 ret = false;
1670 page_vma_mapped_walk_done(&pvmw);
1671 break;
1672 }
1673 if (list_empty(&mm->mmlist)) {
1674 spin_lock(&mmlist_lock);
1675 if (list_empty(&mm->mmlist))
1676 list_add(&mm->mmlist, &init_mm.mmlist);
1677 spin_unlock(&mmlist_lock);
1678 }
1679 dec_mm_counter(mm, MM_ANONPAGES);
1680 inc_mm_counter(mm, MM_SWAPENTS);
1681 swp_pte = swp_entry_to_pte(entry);
1682 if (pte_soft_dirty(pteval))
1683 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1684 if (pte_uffd_wp(pteval))
1685 swp_pte = pte_swp_mkuffd_wp(swp_pte);
1686 set_pte_at(mm, address, pvmw.pte, swp_pte);
1687
1688 mmu_notifier_invalidate_range(mm, address,
1689 address + PAGE_SIZE);
1690 } else {
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701 dec_mm_counter(mm, mm_counter_file(page));
1702 }
1703discard:
1704
1705
1706
1707
1708
1709
1710
1711 page_remove_rmap(subpage, PageHuge(page));
1712 put_page(page);
1713 }
1714
1715 mmu_notifier_invalidate_range_end(&range);
1716
1717 return ret;
1718}
1719
1720static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
1721{
1722 return vma_is_temporary_stack(vma);
1723}
1724
1725static int page_mapcount_is_zero(struct page *page)
1726{
1727 return !total_mapcount(page);
1728}
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740bool try_to_unmap(struct page *page, enum ttu_flags flags)
1741{
1742 struct rmap_walk_control rwc = {
1743 .rmap_one = try_to_unmap_one,
1744 .arg = (void *)flags,
1745 .done = page_mapcount_is_zero,
1746 .anon_lock = page_lock_anon_vma_read,
1747 };
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757 if ((flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))
1758 && !PageKsm(page) && PageAnon(page))
1759 rwc.invalid_vma = invalid_migration_vma;
1760
1761 if (flags & TTU_RMAP_LOCKED)
1762 rmap_walk_locked(page, &rwc);
1763 else
1764 rmap_walk(page, &rwc);
1765
1766 return !page_mapcount(page) ? true : false;
1767}
1768
1769static int page_not_mapped(struct page *page)
1770{
1771 return !page_mapped(page);
1772};
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783void try_to_munlock(struct page *page)
1784{
1785 struct rmap_walk_control rwc = {
1786 .rmap_one = try_to_unmap_one,
1787 .arg = (void *)TTU_MUNLOCK,
1788 .done = page_not_mapped,
1789 .anon_lock = page_lock_anon_vma_read,
1790
1791 };
1792
1793 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
1794 VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
1795
1796 rmap_walk(page, &rwc);
1797}
1798
1799void __put_anon_vma(struct anon_vma *anon_vma)
1800{
1801 struct anon_vma *root = anon_vma->root;
1802
1803 anon_vma_free(anon_vma);
1804 if (root != anon_vma && atomic_dec_and_test(&root->refcount))
1805 anon_vma_free(root);
1806}
1807
1808static struct anon_vma *rmap_walk_anon_lock(struct page *page,
1809 struct rmap_walk_control *rwc)
1810{
1811 struct anon_vma *anon_vma;
1812
1813 if (rwc->anon_lock)
1814 return rwc->anon_lock(page);
1815
1816
1817
1818
1819
1820
1821
1822 anon_vma = page_anon_vma(page);
1823 if (!anon_vma)
1824 return NULL;
1825
1826 anon_vma_lock_read(anon_vma);
1827 return anon_vma;
1828}
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
1845 bool locked)
1846{
1847 struct anon_vma *anon_vma;
1848 pgoff_t pgoff_start, pgoff_end;
1849 struct anon_vma_chain *avc;
1850
1851 if (locked) {
1852 anon_vma = page_anon_vma(page);
1853
1854 VM_BUG_ON_PAGE(!anon_vma, page);
1855 } else {
1856 anon_vma = rmap_walk_anon_lock(page, rwc);
1857 }
1858 if (!anon_vma)
1859 return;
1860
1861 pgoff_start = page_to_pgoff(page);
1862 pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
1863 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
1864 pgoff_start, pgoff_end) {
1865 struct vm_area_struct *vma = avc->vma;
1866 unsigned long address = vma_address(page, vma);
1867
1868 cond_resched();
1869
1870 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1871 continue;
1872
1873 if (!rwc->rmap_one(page, vma, address, rwc->arg))
1874 break;
1875 if (rwc->done && rwc->done(page))
1876 break;
1877 }
1878
1879 if (!locked)
1880 anon_vma_unlock_read(anon_vma);
1881}
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
1897 bool locked)
1898{
1899 struct address_space *mapping = page_mapping(page);
1900 pgoff_t pgoff_start, pgoff_end;
1901 struct vm_area_struct *vma;
1902
1903
1904
1905
1906
1907
1908
1909 VM_BUG_ON_PAGE(!PageLocked(page), page);
1910
1911 if (!mapping)
1912 return;
1913
1914 pgoff_start = page_to_pgoff(page);
1915 pgoff_end = pgoff_start + thp_nr_pages(page) - 1;
1916 if (!locked)
1917 i_mmap_lock_read(mapping);
1918 vma_interval_tree_foreach(vma, &mapping->i_mmap,
1919 pgoff_start, pgoff_end) {
1920 unsigned long address = vma_address(page, vma);
1921
1922 cond_resched();
1923
1924 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1925 continue;
1926
1927 if (!rwc->rmap_one(page, vma, address, rwc->arg))
1928 goto done;
1929 if (rwc->done && rwc->done(page))
1930 goto done;
1931 }
1932
1933done:
1934 if (!locked)
1935 i_mmap_unlock_read(mapping);
1936}
1937
1938void rmap_walk(struct page *page, struct rmap_walk_control *rwc)
1939{
1940 if (unlikely(PageKsm(page)))
1941 rmap_walk_ksm(page, rwc);
1942 else if (PageAnon(page))
1943 rmap_walk_anon(page, rwc, false);
1944 else
1945 rmap_walk_file(page, rwc, false);
1946}
1947
1948
1949void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
1950{
1951
1952 VM_BUG_ON_PAGE(PageKsm(page), page);
1953 if (PageAnon(page))
1954 rmap_walk_anon(page, rwc, true);
1955 else
1956 rmap_walk_file(page, rwc, true);
1957}
1958
1959#ifdef CONFIG_HUGETLB_PAGE
1960
1961
1962
1963
1964
1965void hugepage_add_anon_rmap(struct page *page,
1966 struct vm_area_struct *vma, unsigned long address)
1967{
1968 struct anon_vma *anon_vma = vma->anon_vma;
1969 int first;
1970
1971 BUG_ON(!PageLocked(page));
1972 BUG_ON(!anon_vma);
1973
1974 first = atomic_inc_and_test(compound_mapcount_ptr(page));
1975 if (first)
1976 __page_set_anon_rmap(page, vma, address, 0);
1977}
1978
1979void hugepage_add_new_anon_rmap(struct page *page,
1980 struct vm_area_struct *vma, unsigned long address)
1981{
1982 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1983 atomic_set(compound_mapcount_ptr(page), 0);
1984 if (hpage_pincount_available(page))
1985 atomic_set(compound_pincount_ptr(page), 0);
1986
1987 __page_set_anon_rmap(page, vma, address, 1);
1988}
1989#endif
1990