1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#include <linux/mm.h>
49#include <linux/sched/mm.h>
50#include <linux/sched/task.h>
51#include <linux/pagemap.h>
52#include <linux/swap.h>
53#include <linux/swapops.h>
54#include <linux/slab.h>
55#include <linux/init.h>
56#include <linux/ksm.h>
57#include <linux/rmap.h>
58#include <linux/rcupdate.h>
59#include <linux/export.h>
60#include <linux/memcontrol.h>
61#include <linux/mmu_notifier.h>
62#include <linux/migrate.h>
63#include <linux/hugetlb.h>
64#include <linux/backing-dev.h>
65#include <linux/page_idle.h>
66#include <linux/memremap.h>
67#include <linux/userfaultfd_k.h>
68
69#include <asm/tlbflush.h>
70
71#include <trace/events/tlb.h>
72
73#include "internal.h"
74
75static struct kmem_cache *anon_vma_cachep;
76static struct kmem_cache *anon_vma_chain_cachep;
77
78static inline struct anon_vma *anon_vma_alloc(void)
79{
80 struct anon_vma *anon_vma;
81
82 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
83 if (anon_vma) {
84 atomic_set(&anon_vma->refcount, 1);
85 anon_vma->degree = 1;
86 anon_vma->parent = anon_vma;
87
88
89
90
91 anon_vma->root = anon_vma;
92 }
93
94 return anon_vma;
95}
96
97static inline void anon_vma_free(struct anon_vma *anon_vma)
98{
99 VM_BUG_ON(atomic_read(&anon_vma->refcount));
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118 might_sleep();
119 if (rwsem_is_locked(&anon_vma->root->rwsem)) {
120 anon_vma_lock_write(anon_vma);
121 anon_vma_unlock_write(anon_vma);
122 }
123
124 kmem_cache_free(anon_vma_cachep, anon_vma);
125}
126
127static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
128{
129 return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
130}
131
132static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
133{
134 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
135}
136
137static void anon_vma_chain_link(struct vm_area_struct *vma,
138 struct anon_vma_chain *avc,
139 struct anon_vma *anon_vma)
140{
141 avc->vma = vma;
142 avc->anon_vma = anon_vma;
143 list_add(&avc->same_vma, &vma->anon_vma_chain);
144 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
145}
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175int __anon_vma_prepare(struct vm_area_struct *vma)
176{
177 struct mm_struct *mm = vma->vm_mm;
178 struct anon_vma *anon_vma, *allocated;
179 struct anon_vma_chain *avc;
180
181 might_sleep();
182
183 avc = anon_vma_chain_alloc(GFP_KERNEL);
184 if (!avc)
185 goto out_enomem;
186
187 anon_vma = find_mergeable_anon_vma(vma);
188 allocated = NULL;
189 if (!anon_vma) {
190 anon_vma = anon_vma_alloc();
191 if (unlikely(!anon_vma))
192 goto out_enomem_free_avc;
193 allocated = anon_vma;
194 }
195
196 anon_vma_lock_write(anon_vma);
197
198 spin_lock(&mm->page_table_lock);
199 if (likely(!vma->anon_vma)) {
200 vma->anon_vma = anon_vma;
201 anon_vma_chain_link(vma, avc, anon_vma);
202
203 anon_vma->degree++;
204 allocated = NULL;
205 avc = NULL;
206 }
207 spin_unlock(&mm->page_table_lock);
208 anon_vma_unlock_write(anon_vma);
209
210 if (unlikely(allocated))
211 put_anon_vma(allocated);
212 if (unlikely(avc))
213 anon_vma_chain_free(avc);
214
215 return 0;
216
217 out_enomem_free_avc:
218 anon_vma_chain_free(avc);
219 out_enomem:
220 return -ENOMEM;
221}
222
223
224
225
226
227
228
229
230
231static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
232{
233 struct anon_vma *new_root = anon_vma->root;
234 if (new_root != root) {
235 if (WARN_ON_ONCE(root))
236 up_write(&root->rwsem);
237 root = new_root;
238 down_write(&root->rwsem);
239 }
240 return root;
241}
242
243static inline void unlock_anon_vma_root(struct anon_vma *root)
244{
245 if (root)
246 up_write(&root->rwsem);
247}
248
249
250
251
252
253
254
255
256
257
258
259
260
261int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
262{
263 struct anon_vma_chain *avc, *pavc;
264 struct anon_vma *root = NULL;
265
266 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
267 struct anon_vma *anon_vma;
268
269 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
270 if (unlikely(!avc)) {
271 unlock_anon_vma_root(root);
272 root = NULL;
273 avc = anon_vma_chain_alloc(GFP_KERNEL);
274 if (!avc)
275 goto enomem_failure;
276 }
277 anon_vma = pavc->anon_vma;
278 root = lock_anon_vma_root(root, anon_vma);
279 anon_vma_chain_link(dst, avc, anon_vma);
280
281
282
283
284
285
286
287
288
289 if (!dst->anon_vma && anon_vma != src->anon_vma &&
290 anon_vma->degree < 2)
291 dst->anon_vma = anon_vma;
292 }
293 if (dst->anon_vma)
294 dst->anon_vma->degree++;
295 unlock_anon_vma_root(root);
296 return 0;
297
298 enomem_failure:
299
300
301
302
303
304
305 dst->anon_vma = NULL;
306 unlink_anon_vmas(dst);
307 return -ENOMEM;
308}
309
310
311
312
313
314
315int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
316{
317 struct anon_vma_chain *avc;
318 struct anon_vma *anon_vma;
319 int error;
320
321
322 if (!pvma->anon_vma)
323 return 0;
324
325
326 vma->anon_vma = NULL;
327
328
329
330
331
332 error = anon_vma_clone(vma, pvma);
333 if (error)
334 return error;
335
336
337 if (vma->anon_vma)
338 return 0;
339
340
341 anon_vma = anon_vma_alloc();
342 if (!anon_vma)
343 goto out_error;
344 avc = anon_vma_chain_alloc(GFP_KERNEL);
345 if (!avc)
346 goto out_error_free_anon_vma;
347
348
349
350
351
352 anon_vma->root = pvma->anon_vma->root;
353 anon_vma->parent = pvma->anon_vma;
354
355
356
357
358
359 get_anon_vma(anon_vma->root);
360
361 vma->anon_vma = anon_vma;
362 anon_vma_lock_write(anon_vma);
363 anon_vma_chain_link(vma, avc, anon_vma);
364 anon_vma->parent->degree++;
365 anon_vma_unlock_write(anon_vma);
366
367 return 0;
368
369 out_error_free_anon_vma:
370 put_anon_vma(anon_vma);
371 out_error:
372 unlink_anon_vmas(vma);
373 return -ENOMEM;
374}
375
376void unlink_anon_vmas(struct vm_area_struct *vma)
377{
378 struct anon_vma_chain *avc, *next;
379 struct anon_vma *root = NULL;
380
381
382
383
384
385 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
386 struct anon_vma *anon_vma = avc->anon_vma;
387
388 root = lock_anon_vma_root(root, anon_vma);
389 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
390
391
392
393
394
395 if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
396 anon_vma->parent->degree--;
397 continue;
398 }
399
400 list_del(&avc->same_vma);
401 anon_vma_chain_free(avc);
402 }
403 if (vma->anon_vma)
404 vma->anon_vma->degree--;
405 unlock_anon_vma_root(root);
406
407
408
409
410
411
412 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
413 struct anon_vma *anon_vma = avc->anon_vma;
414
415 VM_WARN_ON(anon_vma->degree);
416 put_anon_vma(anon_vma);
417
418 list_del(&avc->same_vma);
419 anon_vma_chain_free(avc);
420 }
421}
422
423static void anon_vma_ctor(void *data)
424{
425 struct anon_vma *anon_vma = data;
426
427 init_rwsem(&anon_vma->rwsem);
428 atomic_set(&anon_vma->refcount, 0);
429 anon_vma->rb_root = RB_ROOT_CACHED;
430}
431
432void __init anon_vma_init(void)
433{
434 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
435 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
436 anon_vma_ctor);
437 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
438 SLAB_PANIC|SLAB_ACCOUNT);
439}
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464struct anon_vma *page_get_anon_vma(struct page *page)
465{
466 struct anon_vma *anon_vma = NULL;
467 unsigned long anon_mapping;
468
469 rcu_read_lock();
470 anon_mapping = (unsigned long)READ_ONCE(page->mapping);
471 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
472 goto out;
473 if (!page_mapped(page))
474 goto out;
475
476 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
477 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
478 anon_vma = NULL;
479 goto out;
480 }
481
482
483
484
485
486
487
488
489 if (!page_mapped(page)) {
490 rcu_read_unlock();
491 put_anon_vma(anon_vma);
492 return NULL;
493 }
494out:
495 rcu_read_unlock();
496
497 return anon_vma;
498}
499
500
501
502
503
504
505
506
507struct anon_vma *page_lock_anon_vma_read(struct page *page)
508{
509 struct anon_vma *anon_vma = NULL;
510 struct anon_vma *root_anon_vma;
511 unsigned long anon_mapping;
512
513 rcu_read_lock();
514 anon_mapping = (unsigned long)READ_ONCE(page->mapping);
515 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
516 goto out;
517 if (!page_mapped(page))
518 goto out;
519
520 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
521 root_anon_vma = READ_ONCE(anon_vma->root);
522 if (down_read_trylock(&root_anon_vma->rwsem)) {
523
524
525
526
527
528 if (!page_mapped(page)) {
529 up_read(&root_anon_vma->rwsem);
530 anon_vma = NULL;
531 }
532 goto out;
533 }
534
535
536 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
537 anon_vma = NULL;
538 goto out;
539 }
540
541 if (!page_mapped(page)) {
542 rcu_read_unlock();
543 put_anon_vma(anon_vma);
544 return NULL;
545 }
546
547
548 rcu_read_unlock();
549 anon_vma_lock_read(anon_vma);
550
551 if (atomic_dec_and_test(&anon_vma->refcount)) {
552
553
554
555
556
557 anon_vma_unlock_read(anon_vma);
558 __put_anon_vma(anon_vma);
559 anon_vma = NULL;
560 }
561
562 return anon_vma;
563
564out:
565 rcu_read_unlock();
566 return anon_vma;
567}
568
569void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
570{
571 anon_vma_unlock_read(anon_vma);
572}
573
574#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
575
576
577
578
579
580
581void try_to_unmap_flush(void)
582{
583 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
584
585 if (!tlb_ubc->flush_required)
586 return;
587
588 arch_tlbbatch_flush(&tlb_ubc->arch);
589 tlb_ubc->flush_required = false;
590 tlb_ubc->writable = false;
591}
592
593
594void try_to_unmap_flush_dirty(void)
595{
596 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
597
598 if (tlb_ubc->writable)
599 try_to_unmap_flush();
600}
601
602static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
603{
604 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
605
606 arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
607 tlb_ubc->flush_required = true;
608
609
610
611
612
613 barrier();
614 mm->tlb_flush_batched = true;
615
616
617
618
619
620
621 if (writable)
622 tlb_ubc->writable = true;
623}
624
625
626
627
628
629static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
630{
631 bool should_defer = false;
632
633 if (!(flags & TTU_BATCH_FLUSH))
634 return false;
635
636
637 if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
638 should_defer = true;
639 put_cpu();
640
641 return should_defer;
642}
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659void flush_tlb_batched_pending(struct mm_struct *mm)
660{
661 if (mm->tlb_flush_batched) {
662 flush_tlb_mm(mm);
663
664
665
666
667
668 barrier();
669 mm->tlb_flush_batched = false;
670 }
671}
672#else
673static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
674{
675}
676
677static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
678{
679 return false;
680}
681#endif
682
683
684
685
686
687unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
688{
689 unsigned long address;
690 if (PageAnon(page)) {
691 struct anon_vma *page__anon_vma = page_anon_vma(page);
692
693
694
695
696 if (!vma->anon_vma || !page__anon_vma ||
697 vma->anon_vma->root != page__anon_vma->root)
698 return -EFAULT;
699 } else if (page->mapping) {
700 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping)
701 return -EFAULT;
702 } else
703 return -EFAULT;
704 address = __vma_address(page, vma);
705 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
706 return -EFAULT;
707 return address;
708}
709
710pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
711{
712 pgd_t *pgd;
713 p4d_t *p4d;
714 pud_t *pud;
715 pmd_t *pmd = NULL;
716 pmd_t pmde;
717
718 pgd = pgd_offset(mm, address);
719 if (!pgd_present(*pgd))
720 goto out;
721
722 p4d = p4d_offset(pgd, address);
723 if (!p4d_present(*p4d))
724 goto out;
725
726 pud = pud_offset(p4d, address);
727 if (!pud_present(*pud))
728 goto out;
729
730 pmd = pmd_offset(pud, address);
731
732
733
734
735
736 pmde = *pmd;
737 barrier();
738 if (!pmd_present(pmde) || pmd_trans_huge(pmde))
739 pmd = NULL;
740out:
741 return pmd;
742}
743
744struct page_referenced_arg {
745 int mapcount;
746 int referenced;
747 unsigned long vm_flags;
748 struct mem_cgroup *memcg;
749};
750
751
752
753static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
754 unsigned long address, void *arg)
755{
756 struct page_referenced_arg *pra = arg;
757 struct page_vma_mapped_walk pvmw = {
758 .page = page,
759 .vma = vma,
760 .address = address,
761 };
762 int referenced = 0;
763
764 while (page_vma_mapped_walk(&pvmw)) {
765 address = pvmw.address;
766
767 if (vma->vm_flags & VM_LOCKED) {
768 page_vma_mapped_walk_done(&pvmw);
769 pra->vm_flags |= VM_LOCKED;
770 return false;
771 }
772
773 if (pvmw.pte) {
774 if (ptep_clear_flush_young_notify(vma, address,
775 pvmw.pte)) {
776
777
778
779
780
781
782
783
784 if (likely(!(vma->vm_flags & VM_SEQ_READ)))
785 referenced++;
786 }
787 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
788 if (pmdp_clear_flush_young_notify(vma, address,
789 pvmw.pmd))
790 referenced++;
791 } else {
792
793 WARN_ON_ONCE(1);
794 }
795
796 pra->mapcount--;
797 }
798
799 if (referenced)
800 clear_page_idle(page);
801 if (test_and_clear_page_young(page))
802 referenced++;
803
804 if (referenced) {
805 pra->referenced++;
806 pra->vm_flags |= vma->vm_flags;
807 }
808
809 if (!pra->mapcount)
810 return false;
811
812 return true;
813}
814
815static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg)
816{
817 struct page_referenced_arg *pra = arg;
818 struct mem_cgroup *memcg = pra->memcg;
819
820 if (!mm_match_cgroup(vma->vm_mm, memcg))
821 return true;
822
823 return false;
824}
825
826
827
828
829
830
831
832
833
834
835
836int page_referenced(struct page *page,
837 int is_locked,
838 struct mem_cgroup *memcg,
839 unsigned long *vm_flags)
840{
841 int we_locked = 0;
842 struct page_referenced_arg pra = {
843 .mapcount = total_mapcount(page),
844 .memcg = memcg,
845 };
846 struct rmap_walk_control rwc = {
847 .rmap_one = page_referenced_one,
848 .arg = (void *)&pra,
849 .anon_lock = page_lock_anon_vma_read,
850 };
851
852 *vm_flags = 0;
853 if (!pra.mapcount)
854 return 0;
855
856 if (!page_rmapping(page))
857 return 0;
858
859 if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
860 we_locked = trylock_page(page);
861 if (!we_locked)
862 return 1;
863 }
864
865
866
867
868
869
870 if (memcg) {
871 rwc.invalid_vma = invalid_page_referenced_vma;
872 }
873
874 rmap_walk(page, &rwc);
875 *vm_flags = pra.vm_flags;
876
877 if (we_locked)
878 unlock_page(page);
879
880 return pra.referenced;
881}
882
883static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
884 unsigned long address, void *arg)
885{
886 struct page_vma_mapped_walk pvmw = {
887 .page = page,
888 .vma = vma,
889 .address = address,
890 .flags = PVMW_SYNC,
891 };
892 struct mmu_notifier_range range;
893 int *cleaned = arg;
894
895
896
897
898
899 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
900 0, vma, vma->vm_mm, address,
901 min(vma->vm_end, address +
902 (PAGE_SIZE << compound_order(page))));
903 mmu_notifier_invalidate_range_start(&range);
904
905 while (page_vma_mapped_walk(&pvmw)) {
906 unsigned long cstart;
907 int ret = 0;
908
909 cstart = address = pvmw.address;
910 if (pvmw.pte) {
911 pte_t entry;
912 pte_t *pte = pvmw.pte;
913
914 if (!pte_dirty(*pte) && !pte_write(*pte))
915 continue;
916
917 flush_cache_page(vma, address, pte_pfn(*pte));
918 entry = ptep_clear_flush(vma, address, pte);
919 entry = pte_wrprotect(entry);
920 entry = pte_mkclean(entry);
921 set_pte_at(vma->vm_mm, address, pte, entry);
922 ret = 1;
923 } else {
924#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
925 pmd_t *pmd = pvmw.pmd;
926 pmd_t entry;
927
928 if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
929 continue;
930
931 flush_cache_page(vma, address, page_to_pfn(page));
932 entry = pmdp_invalidate(vma, address, pmd);
933 entry = pmd_wrprotect(entry);
934 entry = pmd_mkclean(entry);
935 set_pmd_at(vma->vm_mm, address, pmd, entry);
936 cstart &= PMD_MASK;
937 ret = 1;
938#else
939
940 WARN_ON_ONCE(1);
941#endif
942 }
943
944
945
946
947
948
949
950
951 if (ret)
952 (*cleaned)++;
953 }
954
955 mmu_notifier_invalidate_range_end(&range);
956
957 return true;
958}
959
960static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
961{
962 if (vma->vm_flags & VM_SHARED)
963 return false;
964
965 return true;
966}
967
968int page_mkclean(struct page *page)
969{
970 int cleaned = 0;
971 struct address_space *mapping;
972 struct rmap_walk_control rwc = {
973 .arg = (void *)&cleaned,
974 .rmap_one = page_mkclean_one,
975 .invalid_vma = invalid_mkclean_vma,
976 };
977
978 BUG_ON(!PageLocked(page));
979
980 if (!page_mapped(page))
981 return 0;
982
983 mapping = page_mapping(page);
984 if (!mapping)
985 return 0;
986
987 rmap_walk(page, &rwc);
988
989 return cleaned;
990}
991EXPORT_SYMBOL_GPL(page_mkclean);
992
993
994
995
996
997
998
999
1000
1001
1002
1003void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
1004{
1005 struct anon_vma *anon_vma = vma->anon_vma;
1006
1007 page = compound_head(page);
1008
1009 VM_BUG_ON_PAGE(!PageLocked(page), page);
1010 VM_BUG_ON_VMA(!anon_vma, vma);
1011
1012 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1013
1014
1015
1016
1017
1018 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
1019}
1020
1021
1022
1023
1024
1025
1026
1027
1028static void __page_set_anon_rmap(struct page *page,
1029 struct vm_area_struct *vma, unsigned long address, int exclusive)
1030{
1031 struct anon_vma *anon_vma = vma->anon_vma;
1032
1033 BUG_ON(!anon_vma);
1034
1035 if (PageAnon(page))
1036 return;
1037
1038
1039
1040
1041
1042
1043 if (!exclusive)
1044 anon_vma = anon_vma->root;
1045
1046 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1047 page->mapping = (struct address_space *) anon_vma;
1048 page->index = linear_page_index(vma, address);
1049}
1050
1051
1052
1053
1054
1055
1056
1057static void __page_check_anon_rmap(struct page *page,
1058 struct vm_area_struct *vma, unsigned long address)
1059{
1060#ifdef CONFIG_DEBUG_VM
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root);
1074 BUG_ON(page_to_pgoff(page) != linear_page_index(vma, address));
1075#endif
1076}
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090void page_add_anon_rmap(struct page *page,
1091 struct vm_area_struct *vma, unsigned long address, bool compound)
1092{
1093 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0);
1094}
1095
1096
1097
1098
1099
1100
1101void do_page_add_anon_rmap(struct page *page,
1102 struct vm_area_struct *vma, unsigned long address, int flags)
1103{
1104 bool compound = flags & RMAP_COMPOUND;
1105 bool first;
1106
1107 if (compound) {
1108 atomic_t *mapcount;
1109 VM_BUG_ON_PAGE(!PageLocked(page), page);
1110 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1111 mapcount = compound_mapcount_ptr(page);
1112 first = atomic_inc_and_test(mapcount);
1113 } else {
1114 first = atomic_inc_and_test(&page->_mapcount);
1115 }
1116
1117 if (first) {
1118 int nr = compound ? hpage_nr_pages(page) : 1;
1119
1120
1121
1122
1123
1124
1125 if (compound)
1126 __inc_node_page_state(page, NR_ANON_THPS);
1127 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
1128 }
1129 if (unlikely(PageKsm(page)))
1130 return;
1131
1132 VM_BUG_ON_PAGE(!PageLocked(page), page);
1133
1134
1135 if (first)
1136 __page_set_anon_rmap(page, vma, address,
1137 flags & RMAP_EXCLUSIVE);
1138 else
1139 __page_check_anon_rmap(page, vma, address);
1140}
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153void page_add_new_anon_rmap(struct page *page,
1154 struct vm_area_struct *vma, unsigned long address, bool compound)
1155{
1156 int nr = compound ? hpage_nr_pages(page) : 1;
1157
1158 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
1159 __SetPageSwapBacked(page);
1160 if (compound) {
1161 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1162
1163 atomic_set(compound_mapcount_ptr(page), 0);
1164 __inc_node_page_state(page, NR_ANON_THPS);
1165 } else {
1166
1167 VM_BUG_ON_PAGE(PageTransCompound(page), page);
1168
1169 atomic_set(&page->_mapcount, 0);
1170 }
1171 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
1172 __page_set_anon_rmap(page, vma, address, 1);
1173}
1174
1175
1176
1177
1178
1179
1180
1181
1182void page_add_file_rmap(struct page *page, bool compound)
1183{
1184 int i, nr = 1;
1185
1186 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
1187 lock_page_memcg(page);
1188 if (compound && PageTransHuge(page)) {
1189 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
1190 if (atomic_inc_and_test(&page[i]._mapcount))
1191 nr++;
1192 }
1193 if (!atomic_inc_and_test(compound_mapcount_ptr(page)))
1194 goto out;
1195 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
1196 __inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
1197 } else {
1198 if (PageTransCompound(page) && page_mapping(page)) {
1199 VM_WARN_ON_ONCE(!PageLocked(page));
1200
1201 SetPageDoubleMap(compound_head(page));
1202 if (PageMlocked(page))
1203 clear_page_mlock(compound_head(page));
1204 }
1205 if (!atomic_inc_and_test(&page->_mapcount))
1206 goto out;
1207 }
1208 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
1209out:
1210 unlock_page_memcg(page);
1211}
1212
1213static void page_remove_file_rmap(struct page *page, bool compound)
1214{
1215 int i, nr = 1;
1216
1217 VM_BUG_ON_PAGE(compound && !PageHead(page), page);
1218 lock_page_memcg(page);
1219
1220
1221 if (unlikely(PageHuge(page))) {
1222
1223 atomic_dec(compound_mapcount_ptr(page));
1224 goto out;
1225 }
1226
1227
1228 if (compound && PageTransHuge(page)) {
1229 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
1230 if (atomic_add_negative(-1, &page[i]._mapcount))
1231 nr++;
1232 }
1233 if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1234 goto out;
1235 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
1236 __dec_node_page_state(page, NR_SHMEM_PMDMAPPED);
1237 } else {
1238 if (!atomic_add_negative(-1, &page->_mapcount))
1239 goto out;
1240 }
1241
1242
1243
1244
1245
1246
1247 __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr);
1248
1249 if (unlikely(PageMlocked(page)))
1250 clear_page_mlock(page);
1251out:
1252 unlock_page_memcg(page);
1253}
1254
1255static void page_remove_anon_compound_rmap(struct page *page)
1256{
1257 int i, nr;
1258
1259 if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1260 return;
1261
1262
1263 if (unlikely(PageHuge(page)))
1264 return;
1265
1266 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1267 return;
1268
1269 __dec_node_page_state(page, NR_ANON_THPS);
1270
1271 if (TestClearPageDoubleMap(page)) {
1272
1273
1274
1275
1276 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
1277 if (atomic_add_negative(-1, &page[i]._mapcount))
1278 nr++;
1279 }
1280 } else {
1281 nr = HPAGE_PMD_NR;
1282 }
1283
1284 if (unlikely(PageMlocked(page)))
1285 clear_page_mlock(page);
1286
1287 if (nr) {
1288 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr);
1289 deferred_split_huge_page(page);
1290 }
1291}
1292
1293
1294
1295
1296
1297
1298
1299
1300void page_remove_rmap(struct page *page, bool compound)
1301{
1302 if (!PageAnon(page))
1303 return page_remove_file_rmap(page, compound);
1304
1305 if (compound)
1306 return page_remove_anon_compound_rmap(page);
1307
1308
1309 if (!atomic_add_negative(-1, &page->_mapcount))
1310 return;
1311
1312
1313
1314
1315
1316
1317 __dec_node_page_state(page, NR_ANON_MAPPED);
1318
1319 if (unlikely(PageMlocked(page)))
1320 clear_page_mlock(page);
1321
1322 if (PageTransCompound(page))
1323 deferred_split_huge_page(compound_head(page));
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334}
1335
1336
1337
1338
1339static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1340 unsigned long address, void *arg)
1341{
1342 struct mm_struct *mm = vma->vm_mm;
1343 struct page_vma_mapped_walk pvmw = {
1344 .page = page,
1345 .vma = vma,
1346 .address = address,
1347 };
1348 pte_t pteval;
1349 struct page *subpage;
1350 bool ret = true;
1351 struct mmu_notifier_range range;
1352 enum ttu_flags flags = (enum ttu_flags)arg;
1353
1354
1355 if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
1356 return true;
1357
1358 if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) &&
1359 is_zone_device_page(page) && !is_device_private_page(page))
1360 return true;
1361
1362 if (flags & TTU_SPLIT_HUGE_PMD) {
1363 split_huge_pmd_address(vma, address,
1364 flags & TTU_SPLIT_FREEZE, page);
1365 }
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1376 address,
1377 min(vma->vm_end, address +
1378 (PAGE_SIZE << compound_order(page))));
1379 if (PageHuge(page)) {
1380
1381
1382
1383
1384 adjust_range_if_pmd_sharing_possible(vma, &range.start,
1385 &range.end);
1386 }
1387 mmu_notifier_invalidate_range_start(&range);
1388
1389 while (page_vma_mapped_walk(&pvmw)) {
1390#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1391
1392 if (!pvmw.pte && (flags & TTU_MIGRATION)) {
1393 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
1394
1395 set_pmd_migration_entry(&pvmw, page);
1396 continue;
1397 }
1398#endif
1399
1400
1401
1402
1403
1404
1405 if (!(flags & TTU_IGNORE_MLOCK)) {
1406 if (vma->vm_flags & VM_LOCKED) {
1407
1408 if (!PageTransCompound(page)) {
1409
1410
1411
1412
1413 mlock_vma_page(page);
1414 }
1415 ret = false;
1416 page_vma_mapped_walk_done(&pvmw);
1417 break;
1418 }
1419 if (flags & TTU_MUNLOCK)
1420 continue;
1421 }
1422
1423
1424 VM_BUG_ON_PAGE(!pvmw.pte, page);
1425
1426 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
1427 address = pvmw.address;
1428
1429 if (PageHuge(page)) {
1430 if (huge_pmd_unshare(mm, &address, pvmw.pte)) {
1431
1432
1433
1434
1435
1436
1437
1438 flush_cache_range(vma, range.start, range.end);
1439 flush_tlb_range(vma, range.start, range.end);
1440 mmu_notifier_invalidate_range(mm, range.start,
1441 range.end);
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452 page_vma_mapped_walk_done(&pvmw);
1453 break;
1454 }
1455 }
1456
1457 if (IS_ENABLED(CONFIG_MIGRATION) &&
1458 (flags & TTU_MIGRATION) &&
1459 is_zone_device_page(page)) {
1460 swp_entry_t entry;
1461 pte_t swp_pte;
1462
1463 pteval = ptep_get_and_clear(mm, pvmw.address, pvmw.pte);
1464
1465
1466
1467
1468
1469
1470 entry = make_migration_entry(page, 0);
1471 swp_pte = swp_entry_to_pte(entry);
1472 if (pte_soft_dirty(pteval))
1473 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1474 set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
1475
1476
1477
1478
1479 goto discard;
1480 }
1481
1482 if (!(flags & TTU_IGNORE_ACCESS)) {
1483 if (ptep_clear_flush_young_notify(vma, address,
1484 pvmw.pte)) {
1485 ret = false;
1486 page_vma_mapped_walk_done(&pvmw);
1487 break;
1488 }
1489 }
1490
1491
1492 flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
1493 if (should_defer_flush(mm, flags)) {
1494
1495
1496
1497
1498
1499
1500
1501
1502 pteval = ptep_get_and_clear(mm, address, pvmw.pte);
1503
1504 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
1505 } else {
1506 pteval = ptep_clear_flush(vma, address, pvmw.pte);
1507 }
1508
1509
1510 if (pte_dirty(pteval))
1511 set_page_dirty(page);
1512
1513
1514 update_hiwater_rss(mm);
1515
1516 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
1517 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
1518 if (PageHuge(page)) {
1519 int nr = 1 << compound_order(page);
1520 hugetlb_count_sub(nr, mm);
1521 set_huge_swap_pte_at(mm, address,
1522 pvmw.pte, pteval,
1523 vma_mmu_pagesize(vma));
1524 } else {
1525 dec_mm_counter(mm, mm_counter(page));
1526 set_pte_at(mm, address, pvmw.pte, pteval);
1527 }
1528
1529 } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540 dec_mm_counter(mm, mm_counter(page));
1541
1542 mmu_notifier_invalidate_range(mm, address,
1543 address + PAGE_SIZE);
1544 } else if (IS_ENABLED(CONFIG_MIGRATION) &&
1545 (flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))) {
1546 swp_entry_t entry;
1547 pte_t swp_pte;
1548
1549 if (arch_unmap_one(mm, vma, address, pteval) < 0) {
1550 set_pte_at(mm, address, pvmw.pte, pteval);
1551 ret = false;
1552 page_vma_mapped_walk_done(&pvmw);
1553 break;
1554 }
1555
1556
1557
1558
1559
1560
1561 entry = make_migration_entry(subpage,
1562 pte_write(pteval));
1563 swp_pte = swp_entry_to_pte(entry);
1564 if (pte_soft_dirty(pteval))
1565 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1566 set_pte_at(mm, address, pvmw.pte, swp_pte);
1567
1568
1569
1570
1571 } else if (PageAnon(page)) {
1572 swp_entry_t entry = { .val = page_private(subpage) };
1573 pte_t swp_pte;
1574
1575
1576
1577
1578 if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) {
1579 WARN_ON_ONCE(1);
1580 ret = false;
1581
1582 mmu_notifier_invalidate_range(mm, address,
1583 address + PAGE_SIZE);
1584 page_vma_mapped_walk_done(&pvmw);
1585 break;
1586 }
1587
1588
1589 if (!PageSwapBacked(page)) {
1590 if (!PageDirty(page)) {
1591
1592 mmu_notifier_invalidate_range(mm,
1593 address, address + PAGE_SIZE);
1594 dec_mm_counter(mm, MM_ANONPAGES);
1595 goto discard;
1596 }
1597
1598
1599
1600
1601
1602 set_pte_at(mm, address, pvmw.pte, pteval);
1603 SetPageSwapBacked(page);
1604 ret = false;
1605 page_vma_mapped_walk_done(&pvmw);
1606 break;
1607 }
1608
1609 if (swap_duplicate(entry) < 0) {
1610 set_pte_at(mm, address, pvmw.pte, pteval);
1611 ret = false;
1612 page_vma_mapped_walk_done(&pvmw);
1613 break;
1614 }
1615 if (arch_unmap_one(mm, vma, address, pteval) < 0) {
1616 set_pte_at(mm, address, pvmw.pte, pteval);
1617 ret = false;
1618 page_vma_mapped_walk_done(&pvmw);
1619 break;
1620 }
1621 if (list_empty(&mm->mmlist)) {
1622 spin_lock(&mmlist_lock);
1623 if (list_empty(&mm->mmlist))
1624 list_add(&mm->mmlist, &init_mm.mmlist);
1625 spin_unlock(&mmlist_lock);
1626 }
1627 dec_mm_counter(mm, MM_ANONPAGES);
1628 inc_mm_counter(mm, MM_SWAPENTS);
1629 swp_pte = swp_entry_to_pte(entry);
1630 if (pte_soft_dirty(pteval))
1631 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1632 set_pte_at(mm, address, pvmw.pte, swp_pte);
1633
1634 mmu_notifier_invalidate_range(mm, address,
1635 address + PAGE_SIZE);
1636 } else {
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647 dec_mm_counter(mm, mm_counter_file(page));
1648 }
1649discard:
1650
1651
1652
1653
1654
1655
1656
1657 page_remove_rmap(subpage, PageHuge(page));
1658 put_page(page);
1659 }
1660
1661 mmu_notifier_invalidate_range_end(&range);
1662
1663 return ret;
1664}
1665
1666bool is_vma_temporary_stack(struct vm_area_struct *vma)
1667{
1668 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
1669
1670 if (!maybe_stack)
1671 return false;
1672
1673 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
1674 VM_STACK_INCOMPLETE_SETUP)
1675 return true;
1676
1677 return false;
1678}
1679
1680static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
1681{
1682 return is_vma_temporary_stack(vma);
1683}
1684
1685static int page_mapcount_is_zero(struct page *page)
1686{
1687 return !total_mapcount(page);
1688}
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700bool try_to_unmap(struct page *page, enum ttu_flags flags)
1701{
1702 struct rmap_walk_control rwc = {
1703 .rmap_one = try_to_unmap_one,
1704 .arg = (void *)flags,
1705 .done = page_mapcount_is_zero,
1706 .anon_lock = page_lock_anon_vma_read,
1707 };
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717 if ((flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))
1718 && !PageKsm(page) && PageAnon(page))
1719 rwc.invalid_vma = invalid_migration_vma;
1720
1721 if (flags & TTU_RMAP_LOCKED)
1722 rmap_walk_locked(page, &rwc);
1723 else
1724 rmap_walk(page, &rwc);
1725
1726 return !page_mapcount(page) ? true : false;
1727}
1728
1729static int page_not_mapped(struct page *page)
1730{
1731 return !page_mapped(page);
1732};
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743void try_to_munlock(struct page *page)
1744{
1745 struct rmap_walk_control rwc = {
1746 .rmap_one = try_to_unmap_one,
1747 .arg = (void *)TTU_MUNLOCK,
1748 .done = page_not_mapped,
1749 .anon_lock = page_lock_anon_vma_read,
1750
1751 };
1752
1753 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
1754 VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
1755
1756 rmap_walk(page, &rwc);
1757}
1758
1759void __put_anon_vma(struct anon_vma *anon_vma)
1760{
1761 struct anon_vma *root = anon_vma->root;
1762
1763 anon_vma_free(anon_vma);
1764 if (root != anon_vma && atomic_dec_and_test(&root->refcount))
1765 anon_vma_free(root);
1766}
1767
1768static struct anon_vma *rmap_walk_anon_lock(struct page *page,
1769 struct rmap_walk_control *rwc)
1770{
1771 struct anon_vma *anon_vma;
1772
1773 if (rwc->anon_lock)
1774 return rwc->anon_lock(page);
1775
1776
1777
1778
1779
1780
1781
1782 anon_vma = page_anon_vma(page);
1783 if (!anon_vma)
1784 return NULL;
1785
1786 anon_vma_lock_read(anon_vma);
1787 return anon_vma;
1788}
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
1805 bool locked)
1806{
1807 struct anon_vma *anon_vma;
1808 pgoff_t pgoff_start, pgoff_end;
1809 struct anon_vma_chain *avc;
1810
1811 if (locked) {
1812 anon_vma = page_anon_vma(page);
1813
1814 VM_BUG_ON_PAGE(!anon_vma, page);
1815 } else {
1816 anon_vma = rmap_walk_anon_lock(page, rwc);
1817 }
1818 if (!anon_vma)
1819 return;
1820
1821 pgoff_start = page_to_pgoff(page);
1822 pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
1823 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
1824 pgoff_start, pgoff_end) {
1825 struct vm_area_struct *vma = avc->vma;
1826 unsigned long address = vma_address(page, vma);
1827
1828 cond_resched();
1829
1830 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1831 continue;
1832
1833 if (!rwc->rmap_one(page, vma, address, rwc->arg))
1834 break;
1835 if (rwc->done && rwc->done(page))
1836 break;
1837 }
1838
1839 if (!locked)
1840 anon_vma_unlock_read(anon_vma);
1841}
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
1857 bool locked)
1858{
1859 struct address_space *mapping = page_mapping(page);
1860 pgoff_t pgoff_start, pgoff_end;
1861 struct vm_area_struct *vma;
1862
1863
1864
1865
1866
1867
1868
1869 VM_BUG_ON_PAGE(!PageLocked(page), page);
1870
1871 if (!mapping)
1872 return;
1873
1874 pgoff_start = page_to_pgoff(page);
1875 pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
1876 if (!locked)
1877 i_mmap_lock_read(mapping);
1878 vma_interval_tree_foreach(vma, &mapping->i_mmap,
1879 pgoff_start, pgoff_end) {
1880 unsigned long address = vma_address(page, vma);
1881
1882 cond_resched();
1883
1884 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1885 continue;
1886
1887 if (!rwc->rmap_one(page, vma, address, rwc->arg))
1888 goto done;
1889 if (rwc->done && rwc->done(page))
1890 goto done;
1891 }
1892
1893done:
1894 if (!locked)
1895 i_mmap_unlock_read(mapping);
1896}
1897
1898void rmap_walk(struct page *page, struct rmap_walk_control *rwc)
1899{
1900 if (unlikely(PageKsm(page)))
1901 rmap_walk_ksm(page, rwc);
1902 else if (PageAnon(page))
1903 rmap_walk_anon(page, rwc, false);
1904 else
1905 rmap_walk_file(page, rwc, false);
1906}
1907
1908
1909void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
1910{
1911
1912 VM_BUG_ON_PAGE(PageKsm(page), page);
1913 if (PageAnon(page))
1914 rmap_walk_anon(page, rwc, true);
1915 else
1916 rmap_walk_file(page, rwc, true);
1917}
1918
1919#ifdef CONFIG_HUGETLB_PAGE
1920
1921
1922
1923
1924
1925void hugepage_add_anon_rmap(struct page *page,
1926 struct vm_area_struct *vma, unsigned long address)
1927{
1928 struct anon_vma *anon_vma = vma->anon_vma;
1929 int first;
1930
1931 BUG_ON(!PageLocked(page));
1932 BUG_ON(!anon_vma);
1933
1934 first = atomic_inc_and_test(compound_mapcount_ptr(page));
1935 if (first)
1936 __page_set_anon_rmap(page, vma, address, 0);
1937}
1938
1939void hugepage_add_new_anon_rmap(struct page *page,
1940 struct vm_area_struct *vma, unsigned long address)
1941{
1942 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1943 atomic_set(compound_mapcount_ptr(page), 0);
1944 __page_set_anon_rmap(page, vma, address, 1);
1945}
1946#endif
1947