1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#include <linux/mm.h>
49#include <linux/sched/mm.h>
50#include <linux/sched/task.h>
51#include <linux/pagemap.h>
52#include <linux/swap.h>
53#include <linux/swapops.h>
54#include <linux/slab.h>
55#include <linux/init.h>
56#include <linux/ksm.h>
57#include <linux/rmap.h>
58#include <linux/rcupdate.h>
59#include <linux/export.h>
60#include <linux/memcontrol.h>
61#include <linux/mmu_notifier.h>
62#include <linux/migrate.h>
63#include <linux/hugetlb.h>
64#include <linux/backing-dev.h>
65#include <linux/page_idle.h>
66#include <linux/memremap.h>
67
68#include <asm/tlbflush.h>
69
70#include <trace/events/tlb.h>
71
72#include "internal.h"
73
74static struct kmem_cache *anon_vma_cachep;
75static struct kmem_cache *anon_vma_chain_cachep;
76
77static inline struct anon_vma *anon_vma_alloc(void)
78{
79 struct anon_vma *anon_vma;
80
81 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
82 if (anon_vma) {
83 atomic_set(&anon_vma->refcount, 1);
84 anon_vma->degree = 1;
85 anon_vma->parent = anon_vma;
86
87
88
89
90 anon_vma->root = anon_vma;
91 }
92
93 return anon_vma;
94}
95
96static inline void anon_vma_free(struct anon_vma *anon_vma)
97{
98 VM_BUG_ON(atomic_read(&anon_vma->refcount));
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117 might_sleep();
118 if (rwsem_is_locked(&anon_vma->root->rwsem)) {
119 anon_vma_lock_write(anon_vma);
120 anon_vma_unlock_write(anon_vma);
121 }
122
123 kmem_cache_free(anon_vma_cachep, anon_vma);
124}
125
126static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
127{
128 return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
129}
130
131static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
132{
133 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
134}
135
136static void anon_vma_chain_link(struct vm_area_struct *vma,
137 struct anon_vma_chain *avc,
138 struct anon_vma *anon_vma)
139{
140 avc->vma = vma;
141 avc->anon_vma = anon_vma;
142 list_add(&avc->same_vma, &vma->anon_vma_chain);
143 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
144}
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174int __anon_vma_prepare(struct vm_area_struct *vma)
175{
176 struct mm_struct *mm = vma->vm_mm;
177 struct anon_vma *anon_vma, *allocated;
178 struct anon_vma_chain *avc;
179
180 might_sleep();
181
182 avc = anon_vma_chain_alloc(GFP_KERNEL);
183 if (!avc)
184 goto out_enomem;
185
186 anon_vma = find_mergeable_anon_vma(vma);
187 allocated = NULL;
188 if (!anon_vma) {
189 anon_vma = anon_vma_alloc();
190 if (unlikely(!anon_vma))
191 goto out_enomem_free_avc;
192 allocated = anon_vma;
193 }
194
195 anon_vma_lock_write(anon_vma);
196
197 spin_lock(&mm->page_table_lock);
198 if (likely(!vma->anon_vma)) {
199 vma->anon_vma = anon_vma;
200 anon_vma_chain_link(vma, avc, anon_vma);
201
202 anon_vma->degree++;
203 allocated = NULL;
204 avc = NULL;
205 }
206 spin_unlock(&mm->page_table_lock);
207 anon_vma_unlock_write(anon_vma);
208
209 if (unlikely(allocated))
210 put_anon_vma(allocated);
211 if (unlikely(avc))
212 anon_vma_chain_free(avc);
213
214 return 0;
215
216 out_enomem_free_avc:
217 anon_vma_chain_free(avc);
218 out_enomem:
219 return -ENOMEM;
220}
221
222
223
224
225
226
227
228
229
230static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
231{
232 struct anon_vma *new_root = anon_vma->root;
233 if (new_root != root) {
234 if (WARN_ON_ONCE(root))
235 up_write(&root->rwsem);
236 root = new_root;
237 down_write(&root->rwsem);
238 }
239 return root;
240}
241
242static inline void unlock_anon_vma_root(struct anon_vma *root)
243{
244 if (root)
245 up_write(&root->rwsem);
246}
247
248
249
250
251
252
253
254
255
256
257
258
259
260int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
261{
262 struct anon_vma_chain *avc, *pavc;
263 struct anon_vma *root = NULL;
264
265 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
266 struct anon_vma *anon_vma;
267
268 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
269 if (unlikely(!avc)) {
270 unlock_anon_vma_root(root);
271 root = NULL;
272 avc = anon_vma_chain_alloc(GFP_KERNEL);
273 if (!avc)
274 goto enomem_failure;
275 }
276 anon_vma = pavc->anon_vma;
277 root = lock_anon_vma_root(root, anon_vma);
278 anon_vma_chain_link(dst, avc, anon_vma);
279
280
281
282
283
284
285
286
287
288 if (!dst->anon_vma && anon_vma != src->anon_vma &&
289 anon_vma->degree < 2)
290 dst->anon_vma = anon_vma;
291 }
292 if (dst->anon_vma)
293 dst->anon_vma->degree++;
294 unlock_anon_vma_root(root);
295 return 0;
296
297 enomem_failure:
298
299
300
301
302
303
304 dst->anon_vma = NULL;
305 unlink_anon_vmas(dst);
306 return -ENOMEM;
307}
308
309
310
311
312
313
314int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
315{
316 struct anon_vma_chain *avc;
317 struct anon_vma *anon_vma;
318 int error;
319
320
321 if (!pvma->anon_vma)
322 return 0;
323
324
325 vma->anon_vma = NULL;
326
327
328
329
330
331 error = anon_vma_clone(vma, pvma);
332 if (error)
333 return error;
334
335
336 if (vma->anon_vma)
337 return 0;
338
339
340 anon_vma = anon_vma_alloc();
341 if (!anon_vma)
342 goto out_error;
343 avc = anon_vma_chain_alloc(GFP_KERNEL);
344 if (!avc)
345 goto out_error_free_anon_vma;
346
347
348
349
350
351 anon_vma->root = pvma->anon_vma->root;
352 anon_vma->parent = pvma->anon_vma;
353
354
355
356
357
358 get_anon_vma(anon_vma->root);
359
360 vma->anon_vma = anon_vma;
361 anon_vma_lock_write(anon_vma);
362 anon_vma_chain_link(vma, avc, anon_vma);
363 anon_vma->parent->degree++;
364 anon_vma_unlock_write(anon_vma);
365
366 return 0;
367
368 out_error_free_anon_vma:
369 put_anon_vma(anon_vma);
370 out_error:
371 unlink_anon_vmas(vma);
372 return -ENOMEM;
373}
374
375void unlink_anon_vmas(struct vm_area_struct *vma)
376{
377 struct anon_vma_chain *avc, *next;
378 struct anon_vma *root = NULL;
379
380
381
382
383
384 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
385 struct anon_vma *anon_vma = avc->anon_vma;
386
387 root = lock_anon_vma_root(root, anon_vma);
388 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
389
390
391
392
393
394 if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
395 anon_vma->parent->degree--;
396 continue;
397 }
398
399 list_del(&avc->same_vma);
400 anon_vma_chain_free(avc);
401 }
402 if (vma->anon_vma)
403 vma->anon_vma->degree--;
404 unlock_anon_vma_root(root);
405
406
407
408
409
410
411 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
412 struct anon_vma *anon_vma = avc->anon_vma;
413
414 VM_WARN_ON(anon_vma->degree);
415 put_anon_vma(anon_vma);
416
417 list_del(&avc->same_vma);
418 anon_vma_chain_free(avc);
419 }
420}
421
422static void anon_vma_ctor(void *data)
423{
424 struct anon_vma *anon_vma = data;
425
426 init_rwsem(&anon_vma->rwsem);
427 atomic_set(&anon_vma->refcount, 0);
428 anon_vma->rb_root = RB_ROOT_CACHED;
429}
430
431void __init anon_vma_init(void)
432{
433 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
434 0, SLAB_TYPESAFE_BY_RCU|SLAB_PANIC|SLAB_ACCOUNT,
435 anon_vma_ctor);
436 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
437 SLAB_PANIC|SLAB_ACCOUNT);
438}
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463struct anon_vma *page_get_anon_vma(struct page *page)
464{
465 struct anon_vma *anon_vma = NULL;
466 unsigned long anon_mapping;
467
468 rcu_read_lock();
469 anon_mapping = (unsigned long)READ_ONCE(page->mapping);
470 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
471 goto out;
472 if (!page_mapped(page))
473 goto out;
474
475 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
476 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
477 anon_vma = NULL;
478 goto out;
479 }
480
481
482
483
484
485
486
487
488 if (!page_mapped(page)) {
489 rcu_read_unlock();
490 put_anon_vma(anon_vma);
491 return NULL;
492 }
493out:
494 rcu_read_unlock();
495
496 return anon_vma;
497}
498
499
500
501
502
503
504
505
506struct anon_vma *page_lock_anon_vma_read(struct page *page)
507{
508 struct anon_vma *anon_vma = NULL;
509 struct anon_vma *root_anon_vma;
510 unsigned long anon_mapping;
511
512 rcu_read_lock();
513 anon_mapping = (unsigned long)READ_ONCE(page->mapping);
514 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
515 goto out;
516 if (!page_mapped(page))
517 goto out;
518
519 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
520 root_anon_vma = READ_ONCE(anon_vma->root);
521 if (down_read_trylock(&root_anon_vma->rwsem)) {
522
523
524
525
526
527 if (!page_mapped(page)) {
528 up_read(&root_anon_vma->rwsem);
529 anon_vma = NULL;
530 }
531 goto out;
532 }
533
534
535 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
536 anon_vma = NULL;
537 goto out;
538 }
539
540 if (!page_mapped(page)) {
541 rcu_read_unlock();
542 put_anon_vma(anon_vma);
543 return NULL;
544 }
545
546
547 rcu_read_unlock();
548 anon_vma_lock_read(anon_vma);
549
550 if (atomic_dec_and_test(&anon_vma->refcount)) {
551
552
553
554
555
556 anon_vma_unlock_read(anon_vma);
557 __put_anon_vma(anon_vma);
558 anon_vma = NULL;
559 }
560
561 return anon_vma;
562
563out:
564 rcu_read_unlock();
565 return anon_vma;
566}
567
568void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
569{
570 anon_vma_unlock_read(anon_vma);
571}
572
573#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
574
575
576
577
578
579
580void try_to_unmap_flush(void)
581{
582 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
583
584 if (!tlb_ubc->flush_required)
585 return;
586
587 arch_tlbbatch_flush(&tlb_ubc->arch);
588 tlb_ubc->flush_required = false;
589 tlb_ubc->writable = false;
590}
591
592
593void try_to_unmap_flush_dirty(void)
594{
595 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
596
597 if (tlb_ubc->writable)
598 try_to_unmap_flush();
599}
600
601static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
602{
603 struct tlbflush_unmap_batch *tlb_ubc = ¤t->tlb_ubc;
604
605 arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
606 tlb_ubc->flush_required = true;
607
608
609
610
611
612 barrier();
613 mm->tlb_flush_batched = true;
614
615
616
617
618
619
620 if (writable)
621 tlb_ubc->writable = true;
622}
623
624
625
626
627
628static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
629{
630 bool should_defer = false;
631
632 if (!(flags & TTU_BATCH_FLUSH))
633 return false;
634
635
636 if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
637 should_defer = true;
638 put_cpu();
639
640 return should_defer;
641}
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658void flush_tlb_batched_pending(struct mm_struct *mm)
659{
660 if (mm->tlb_flush_batched) {
661 flush_tlb_mm(mm);
662
663
664
665
666
667 barrier();
668 mm->tlb_flush_batched = false;
669 }
670}
671#else
672static void set_tlb_ubc_flush_pending(struct mm_struct *mm, bool writable)
673{
674}
675
676static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
677{
678 return false;
679}
680#endif
681
682
683
684
685
686unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
687{
688 unsigned long address;
689 if (PageAnon(page)) {
690 struct anon_vma *page__anon_vma = page_anon_vma(page);
691
692
693
694
695 if (!vma->anon_vma || !page__anon_vma ||
696 vma->anon_vma->root != page__anon_vma->root)
697 return -EFAULT;
698 } else if (page->mapping) {
699 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping)
700 return -EFAULT;
701 } else
702 return -EFAULT;
703 address = __vma_address(page, vma);
704 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
705 return -EFAULT;
706 return address;
707}
708
709pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
710{
711 pgd_t *pgd;
712 p4d_t *p4d;
713 pud_t *pud;
714 pmd_t *pmd = NULL;
715 pmd_t pmde;
716
717 pgd = pgd_offset(mm, address);
718 if (!pgd_present(*pgd))
719 goto out;
720
721 p4d = p4d_offset(pgd, address);
722 if (!p4d_present(*p4d))
723 goto out;
724
725 pud = pud_offset(p4d, address);
726 if (!pud_present(*pud))
727 goto out;
728
729 pmd = pmd_offset(pud, address);
730
731
732
733
734
735 pmde = *pmd;
736 barrier();
737 if (!pmd_present(pmde) || pmd_trans_huge(pmde))
738 pmd = NULL;
739out:
740 return pmd;
741}
742
743struct page_referenced_arg {
744 int mapcount;
745 int referenced;
746 unsigned long vm_flags;
747 struct mem_cgroup *memcg;
748};
749
750
751
752static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
753 unsigned long address, void *arg)
754{
755 struct page_referenced_arg *pra = arg;
756 struct page_vma_mapped_walk pvmw = {
757 .page = page,
758 .vma = vma,
759 .address = address,
760 };
761 int referenced = 0;
762
763 while (page_vma_mapped_walk(&pvmw)) {
764 address = pvmw.address;
765
766 if (vma->vm_flags & VM_LOCKED) {
767 page_vma_mapped_walk_done(&pvmw);
768 pra->vm_flags |= VM_LOCKED;
769 return false;
770 }
771
772 if (pvmw.pte) {
773 if (ptep_clear_flush_young_notify(vma, address,
774 pvmw.pte)) {
775
776
777
778
779
780
781
782
783 if (likely(!(vma->vm_flags & VM_SEQ_READ)))
784 referenced++;
785 }
786 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
787 if (pmdp_clear_flush_young_notify(vma, address,
788 pvmw.pmd))
789 referenced++;
790 } else {
791
792 WARN_ON_ONCE(1);
793 }
794
795 pra->mapcount--;
796 }
797
798 if (referenced)
799 clear_page_idle(page);
800 if (test_and_clear_page_young(page))
801 referenced++;
802
803 if (referenced) {
804 pra->referenced++;
805 pra->vm_flags |= vma->vm_flags;
806 }
807
808 if (!pra->mapcount)
809 return false;
810
811 return true;
812}
813
814static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg)
815{
816 struct page_referenced_arg *pra = arg;
817 struct mem_cgroup *memcg = pra->memcg;
818
819 if (!mm_match_cgroup(vma->vm_mm, memcg))
820 return true;
821
822 return false;
823}
824
825
826
827
828
829
830
831
832
833
834
835int page_referenced(struct page *page,
836 int is_locked,
837 struct mem_cgroup *memcg,
838 unsigned long *vm_flags)
839{
840 int we_locked = 0;
841 struct page_referenced_arg pra = {
842 .mapcount = total_mapcount(page),
843 .memcg = memcg,
844 };
845 struct rmap_walk_control rwc = {
846 .rmap_one = page_referenced_one,
847 .arg = (void *)&pra,
848 .anon_lock = page_lock_anon_vma_read,
849 };
850
851 *vm_flags = 0;
852 if (!page_mapped(page))
853 return 0;
854
855 if (!page_rmapping(page))
856 return 0;
857
858 if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
859 we_locked = trylock_page(page);
860 if (!we_locked)
861 return 1;
862 }
863
864
865
866
867
868
869 if (memcg) {
870 rwc.invalid_vma = invalid_page_referenced_vma;
871 }
872
873 rmap_walk(page, &rwc);
874 *vm_flags = pra.vm_flags;
875
876 if (we_locked)
877 unlock_page(page);
878
879 return pra.referenced;
880}
881
882static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
883 unsigned long address, void *arg)
884{
885 struct page_vma_mapped_walk pvmw = {
886 .page = page,
887 .vma = vma,
888 .address = address,
889 .flags = PVMW_SYNC,
890 };
891 unsigned long start = address, end;
892 int *cleaned = arg;
893
894
895
896
897
898 end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
899 mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
900
901 while (page_vma_mapped_walk(&pvmw)) {
902 unsigned long cstart;
903 int ret = 0;
904
905 cstart = address = pvmw.address;
906 if (pvmw.pte) {
907 pte_t entry;
908 pte_t *pte = pvmw.pte;
909
910 if (!pte_dirty(*pte) && !pte_write(*pte))
911 continue;
912
913 flush_cache_page(vma, address, pte_pfn(*pte));
914 entry = ptep_clear_flush(vma, address, pte);
915 entry = pte_wrprotect(entry);
916 entry = pte_mkclean(entry);
917 set_pte_at(vma->vm_mm, address, pte, entry);
918 ret = 1;
919 } else {
920#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
921 pmd_t *pmd = pvmw.pmd;
922 pmd_t entry;
923
924 if (!pmd_dirty(*pmd) && !pmd_write(*pmd))
925 continue;
926
927 flush_cache_page(vma, address, page_to_pfn(page));
928 entry = pmdp_huge_clear_flush(vma, address, pmd);
929 entry = pmd_wrprotect(entry);
930 entry = pmd_mkclean(entry);
931 set_pmd_at(vma->vm_mm, address, pmd, entry);
932 cstart &= PMD_MASK;
933 ret = 1;
934#else
935
936 WARN_ON_ONCE(1);
937#endif
938 }
939
940
941
942
943
944
945
946
947 if (ret)
948 (*cleaned)++;
949 }
950
951 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
952
953 return true;
954}
955
956static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
957{
958 if (vma->vm_flags & VM_SHARED)
959 return false;
960
961 return true;
962}
963
964int page_mkclean(struct page *page)
965{
966 int cleaned = 0;
967 struct address_space *mapping;
968 struct rmap_walk_control rwc = {
969 .arg = (void *)&cleaned,
970 .rmap_one = page_mkclean_one,
971 .invalid_vma = invalid_mkclean_vma,
972 };
973
974 BUG_ON(!PageLocked(page));
975
976 if (!page_mapped(page))
977 return 0;
978
979 mapping = page_mapping(page);
980 if (!mapping)
981 return 0;
982
983 rmap_walk(page, &rwc);
984
985 return cleaned;
986}
987EXPORT_SYMBOL_GPL(page_mkclean);
988
989
990
991
992
993
994
995
996
997
998
999void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
1000{
1001 struct anon_vma *anon_vma = vma->anon_vma;
1002
1003 page = compound_head(page);
1004
1005 VM_BUG_ON_PAGE(!PageLocked(page), page);
1006 VM_BUG_ON_VMA(!anon_vma, vma);
1007
1008 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1009
1010
1011
1012
1013
1014 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
1015}
1016
1017
1018
1019
1020
1021
1022
1023
1024static void __page_set_anon_rmap(struct page *page,
1025 struct vm_area_struct *vma, unsigned long address, int exclusive)
1026{
1027 struct anon_vma *anon_vma = vma->anon_vma;
1028
1029 BUG_ON(!anon_vma);
1030
1031 if (PageAnon(page))
1032 return;
1033
1034
1035
1036
1037
1038
1039 if (!exclusive)
1040 anon_vma = anon_vma->root;
1041
1042 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1043 page->mapping = (struct address_space *) anon_vma;
1044 page->index = linear_page_index(vma, address);
1045}
1046
1047
1048
1049
1050
1051
1052
1053static void __page_check_anon_rmap(struct page *page,
1054 struct vm_area_struct *vma, unsigned long address)
1055{
1056#ifdef CONFIG_DEBUG_VM
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root);
1070 BUG_ON(page_to_pgoff(page) != linear_page_index(vma, address));
1071#endif
1072}
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086void page_add_anon_rmap(struct page *page,
1087 struct vm_area_struct *vma, unsigned long address, bool compound)
1088{
1089 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0);
1090}
1091
1092
1093
1094
1095
1096
1097void do_page_add_anon_rmap(struct page *page,
1098 struct vm_area_struct *vma, unsigned long address, int flags)
1099{
1100 bool compound = flags & RMAP_COMPOUND;
1101 bool first;
1102
1103 if (compound) {
1104 atomic_t *mapcount;
1105 VM_BUG_ON_PAGE(!PageLocked(page), page);
1106 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1107 mapcount = compound_mapcount_ptr(page);
1108 first = atomic_inc_and_test(mapcount);
1109 } else {
1110 first = atomic_inc_and_test(&page->_mapcount);
1111 }
1112
1113 if (first) {
1114 int nr = compound ? hpage_nr_pages(page) : 1;
1115
1116
1117
1118
1119
1120
1121 if (compound)
1122 __inc_node_page_state(page, NR_ANON_THPS);
1123 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
1124 }
1125 if (unlikely(PageKsm(page)))
1126 return;
1127
1128 VM_BUG_ON_PAGE(!PageLocked(page), page);
1129
1130
1131 if (first)
1132 __page_set_anon_rmap(page, vma, address,
1133 flags & RMAP_EXCLUSIVE);
1134 else
1135 __page_check_anon_rmap(page, vma, address);
1136}
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149void page_add_new_anon_rmap(struct page *page,
1150 struct vm_area_struct *vma, unsigned long address, bool compound)
1151{
1152 int nr = compound ? hpage_nr_pages(page) : 1;
1153
1154 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
1155 __SetPageSwapBacked(page);
1156 if (compound) {
1157 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
1158
1159 atomic_set(compound_mapcount_ptr(page), 0);
1160 __inc_node_page_state(page, NR_ANON_THPS);
1161 } else {
1162
1163 VM_BUG_ON_PAGE(PageTransCompound(page), page);
1164
1165 atomic_set(&page->_mapcount, 0);
1166 }
1167 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr);
1168 __page_set_anon_rmap(page, vma, address, 1);
1169}
1170
1171
1172
1173
1174
1175
1176
1177
1178void page_add_file_rmap(struct page *page, bool compound)
1179{
1180 int i, nr = 1;
1181
1182 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page);
1183 lock_page_memcg(page);
1184 if (compound && PageTransHuge(page)) {
1185 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
1186 if (atomic_inc_and_test(&page[i]._mapcount))
1187 nr++;
1188 }
1189 if (!atomic_inc_and_test(compound_mapcount_ptr(page)))
1190 goto out;
1191 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
1192 __inc_node_page_state(page, NR_SHMEM_PMDMAPPED);
1193 } else {
1194 if (PageTransCompound(page) && page_mapping(page)) {
1195 VM_WARN_ON_ONCE(!PageLocked(page));
1196
1197 SetPageDoubleMap(compound_head(page));
1198 if (PageMlocked(page))
1199 clear_page_mlock(compound_head(page));
1200 }
1201 if (!atomic_inc_and_test(&page->_mapcount))
1202 goto out;
1203 }
1204 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr);
1205out:
1206 unlock_page_memcg(page);
1207}
1208
1209static void page_remove_file_rmap(struct page *page, bool compound)
1210{
1211 int i, nr = 1;
1212
1213 VM_BUG_ON_PAGE(compound && !PageHead(page), page);
1214 lock_page_memcg(page);
1215
1216
1217 if (unlikely(PageHuge(page))) {
1218
1219 atomic_dec(compound_mapcount_ptr(page));
1220 goto out;
1221 }
1222
1223
1224 if (compound && PageTransHuge(page)) {
1225 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
1226 if (atomic_add_negative(-1, &page[i]._mapcount))
1227 nr++;
1228 }
1229 if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1230 goto out;
1231 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
1232 __dec_node_page_state(page, NR_SHMEM_PMDMAPPED);
1233 } else {
1234 if (!atomic_add_negative(-1, &page->_mapcount))
1235 goto out;
1236 }
1237
1238
1239
1240
1241
1242
1243 __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr);
1244
1245 if (unlikely(PageMlocked(page)))
1246 clear_page_mlock(page);
1247out:
1248 unlock_page_memcg(page);
1249}
1250
1251static void page_remove_anon_compound_rmap(struct page *page)
1252{
1253 int i, nr;
1254
1255 if (!atomic_add_negative(-1, compound_mapcount_ptr(page)))
1256 return;
1257
1258
1259 if (unlikely(PageHuge(page)))
1260 return;
1261
1262 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
1263 return;
1264
1265 __dec_node_page_state(page, NR_ANON_THPS);
1266
1267 if (TestClearPageDoubleMap(page)) {
1268
1269
1270
1271
1272 for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) {
1273 if (atomic_add_negative(-1, &page[i]._mapcount))
1274 nr++;
1275 }
1276 } else {
1277 nr = HPAGE_PMD_NR;
1278 }
1279
1280 if (unlikely(PageMlocked(page)))
1281 clear_page_mlock(page);
1282
1283 if (nr) {
1284 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr);
1285 deferred_split_huge_page(page);
1286 }
1287}
1288
1289
1290
1291
1292
1293
1294
1295
1296void page_remove_rmap(struct page *page, bool compound)
1297{
1298 if (!PageAnon(page))
1299 return page_remove_file_rmap(page, compound);
1300
1301 if (compound)
1302 return page_remove_anon_compound_rmap(page);
1303
1304
1305 if (!atomic_add_negative(-1, &page->_mapcount))
1306 return;
1307
1308
1309
1310
1311
1312
1313 __dec_node_page_state(page, NR_ANON_MAPPED);
1314
1315 if (unlikely(PageMlocked(page)))
1316 clear_page_mlock(page);
1317
1318 if (PageTransCompound(page))
1319 deferred_split_huge_page(compound_head(page));
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330}
1331
1332
1333
1334
1335static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1336 unsigned long address, void *arg)
1337{
1338 struct mm_struct *mm = vma->vm_mm;
1339 struct page_vma_mapped_walk pvmw = {
1340 .page = page,
1341 .vma = vma,
1342 .address = address,
1343 };
1344 pte_t pteval;
1345 struct page *subpage;
1346 bool ret = true;
1347 unsigned long start = address, end;
1348 enum ttu_flags flags = (enum ttu_flags)arg;
1349
1350
1351 if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
1352 return true;
1353
1354 if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) &&
1355 is_zone_device_page(page) && !is_device_private_page(page))
1356 return true;
1357
1358 if (flags & TTU_SPLIT_HUGE_PMD) {
1359 split_huge_pmd_address(vma, address,
1360 flags & TTU_SPLIT_FREEZE, page);
1361 }
1362
1363
1364
1365
1366
1367
1368 end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
1369 mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
1370
1371 while (page_vma_mapped_walk(&pvmw)) {
1372#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1373
1374 if (!pvmw.pte && (flags & TTU_MIGRATION)) {
1375 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
1376
1377 set_pmd_migration_entry(&pvmw, page);
1378 continue;
1379 }
1380#endif
1381
1382
1383
1384
1385
1386
1387 if (!(flags & TTU_IGNORE_MLOCK)) {
1388 if (vma->vm_flags & VM_LOCKED) {
1389
1390 if (!PageTransCompound(page)) {
1391
1392
1393
1394
1395 mlock_vma_page(page);
1396 }
1397 ret = false;
1398 page_vma_mapped_walk_done(&pvmw);
1399 break;
1400 }
1401 if (flags & TTU_MUNLOCK)
1402 continue;
1403 }
1404
1405
1406 VM_BUG_ON_PAGE(!pvmw.pte, page);
1407
1408 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
1409 address = pvmw.address;
1410
1411
1412 if (IS_ENABLED(CONFIG_MIGRATION) &&
1413 (flags & TTU_MIGRATION) &&
1414 is_zone_device_page(page)) {
1415 swp_entry_t entry;
1416 pte_t swp_pte;
1417
1418 pteval = ptep_get_and_clear(mm, pvmw.address, pvmw.pte);
1419
1420
1421
1422
1423
1424
1425 entry = make_migration_entry(page, 0);
1426 swp_pte = swp_entry_to_pte(entry);
1427 if (pte_soft_dirty(pteval))
1428 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1429 set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
1430
1431
1432
1433
1434 goto discard;
1435 }
1436
1437 if (!(flags & TTU_IGNORE_ACCESS)) {
1438 if (ptep_clear_flush_young_notify(vma, address,
1439 pvmw.pte)) {
1440 ret = false;
1441 page_vma_mapped_walk_done(&pvmw);
1442 break;
1443 }
1444 }
1445
1446
1447 flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
1448 if (should_defer_flush(mm, flags)) {
1449
1450
1451
1452
1453
1454
1455
1456
1457 pteval = ptep_get_and_clear(mm, address, pvmw.pte);
1458
1459 set_tlb_ubc_flush_pending(mm, pte_dirty(pteval));
1460 } else {
1461 pteval = ptep_clear_flush(vma, address, pvmw.pte);
1462 }
1463
1464
1465 if (pte_dirty(pteval))
1466 set_page_dirty(page);
1467
1468
1469 update_hiwater_rss(mm);
1470
1471 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
1472 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
1473 if (PageHuge(page)) {
1474 int nr = 1 << compound_order(page);
1475 hugetlb_count_sub(nr, mm);
1476 set_huge_swap_pte_at(mm, address,
1477 pvmw.pte, pteval,
1478 vma_mmu_pagesize(vma));
1479 } else {
1480 dec_mm_counter(mm, mm_counter(page));
1481 set_pte_at(mm, address, pvmw.pte, pteval);
1482 }
1483
1484 } else if (pte_unused(pteval)) {
1485
1486
1487
1488
1489
1490 dec_mm_counter(mm, mm_counter(page));
1491
1492 mmu_notifier_invalidate_range(mm, address,
1493 address + PAGE_SIZE);
1494 } else if (IS_ENABLED(CONFIG_MIGRATION) &&
1495 (flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))) {
1496 swp_entry_t entry;
1497 pte_t swp_pte;
1498
1499 if (arch_unmap_one(mm, vma, address, pteval) < 0) {
1500 set_pte_at(mm, address, pvmw.pte, pteval);
1501 ret = false;
1502 page_vma_mapped_walk_done(&pvmw);
1503 break;
1504 }
1505
1506
1507
1508
1509
1510
1511 entry = make_migration_entry(subpage,
1512 pte_write(pteval));
1513 swp_pte = swp_entry_to_pte(entry);
1514 if (pte_soft_dirty(pteval))
1515 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1516 set_pte_at(mm, address, pvmw.pte, swp_pte);
1517
1518
1519
1520
1521 } else if (PageAnon(page)) {
1522 swp_entry_t entry = { .val = page_private(subpage) };
1523 pte_t swp_pte;
1524
1525
1526
1527
1528 if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) {
1529 WARN_ON_ONCE(1);
1530 ret = false;
1531
1532 mmu_notifier_invalidate_range(mm, address,
1533 address + PAGE_SIZE);
1534 page_vma_mapped_walk_done(&pvmw);
1535 break;
1536 }
1537
1538
1539 if (!PageSwapBacked(page)) {
1540 if (!PageDirty(page)) {
1541
1542 mmu_notifier_invalidate_range(mm,
1543 address, address + PAGE_SIZE);
1544 dec_mm_counter(mm, MM_ANONPAGES);
1545 goto discard;
1546 }
1547
1548
1549
1550
1551
1552 set_pte_at(mm, address, pvmw.pte, pteval);
1553 SetPageSwapBacked(page);
1554 ret = false;
1555 page_vma_mapped_walk_done(&pvmw);
1556 break;
1557 }
1558
1559 if (swap_duplicate(entry) < 0) {
1560 set_pte_at(mm, address, pvmw.pte, pteval);
1561 ret = false;
1562 page_vma_mapped_walk_done(&pvmw);
1563 break;
1564 }
1565 if (arch_unmap_one(mm, vma, address, pteval) < 0) {
1566 set_pte_at(mm, address, pvmw.pte, pteval);
1567 ret = false;
1568 page_vma_mapped_walk_done(&pvmw);
1569 break;
1570 }
1571 if (list_empty(&mm->mmlist)) {
1572 spin_lock(&mmlist_lock);
1573 if (list_empty(&mm->mmlist))
1574 list_add(&mm->mmlist, &init_mm.mmlist);
1575 spin_unlock(&mmlist_lock);
1576 }
1577 dec_mm_counter(mm, MM_ANONPAGES);
1578 inc_mm_counter(mm, MM_SWAPENTS);
1579 swp_pte = swp_entry_to_pte(entry);
1580 if (pte_soft_dirty(pteval))
1581 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1582 set_pte_at(mm, address, pvmw.pte, swp_pte);
1583
1584 mmu_notifier_invalidate_range(mm, address,
1585 address + PAGE_SIZE);
1586 } else {
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604 dec_mm_counter(mm, mm_counter_file(page));
1605 }
1606discard:
1607
1608
1609
1610
1611
1612
1613
1614 page_remove_rmap(subpage, PageHuge(page));
1615 put_page(page);
1616 }
1617
1618 mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
1619
1620 return ret;
1621}
1622
1623bool is_vma_temporary_stack(struct vm_area_struct *vma)
1624{
1625 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
1626
1627 if (!maybe_stack)
1628 return false;
1629
1630 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
1631 VM_STACK_INCOMPLETE_SETUP)
1632 return true;
1633
1634 return false;
1635}
1636
1637static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
1638{
1639 return is_vma_temporary_stack(vma);
1640}
1641
1642static int page_mapcount_is_zero(struct page *page)
1643{
1644 return !total_mapcount(page);
1645}
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657bool try_to_unmap(struct page *page, enum ttu_flags flags)
1658{
1659 struct rmap_walk_control rwc = {
1660 .rmap_one = try_to_unmap_one,
1661 .arg = (void *)flags,
1662 .done = page_mapcount_is_zero,
1663 .anon_lock = page_lock_anon_vma_read,
1664 };
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674 if ((flags & (TTU_MIGRATION|TTU_SPLIT_FREEZE))
1675 && !PageKsm(page) && PageAnon(page))
1676 rwc.invalid_vma = invalid_migration_vma;
1677
1678 if (flags & TTU_RMAP_LOCKED)
1679 rmap_walk_locked(page, &rwc);
1680 else
1681 rmap_walk(page, &rwc);
1682
1683 return !page_mapcount(page) ? true : false;
1684}
1685
1686static int page_not_mapped(struct page *page)
1687{
1688 return !page_mapped(page);
1689};
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700void try_to_munlock(struct page *page)
1701{
1702 struct rmap_walk_control rwc = {
1703 .rmap_one = try_to_unmap_one,
1704 .arg = (void *)TTU_MUNLOCK,
1705 .done = page_not_mapped,
1706 .anon_lock = page_lock_anon_vma_read,
1707
1708 };
1709
1710 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
1711 VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
1712
1713 rmap_walk(page, &rwc);
1714}
1715
1716void __put_anon_vma(struct anon_vma *anon_vma)
1717{
1718 struct anon_vma *root = anon_vma->root;
1719
1720 anon_vma_free(anon_vma);
1721 if (root != anon_vma && atomic_dec_and_test(&root->refcount))
1722 anon_vma_free(root);
1723}
1724
1725static struct anon_vma *rmap_walk_anon_lock(struct page *page,
1726 struct rmap_walk_control *rwc)
1727{
1728 struct anon_vma *anon_vma;
1729
1730 if (rwc->anon_lock)
1731 return rwc->anon_lock(page);
1732
1733
1734
1735
1736
1737
1738
1739 anon_vma = page_anon_vma(page);
1740 if (!anon_vma)
1741 return NULL;
1742
1743 anon_vma_lock_read(anon_vma);
1744 return anon_vma;
1745}
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
1762 bool locked)
1763{
1764 struct anon_vma *anon_vma;
1765 pgoff_t pgoff_start, pgoff_end;
1766 struct anon_vma_chain *avc;
1767
1768 if (locked) {
1769 anon_vma = page_anon_vma(page);
1770
1771 VM_BUG_ON_PAGE(!anon_vma, page);
1772 } else {
1773 anon_vma = rmap_walk_anon_lock(page, rwc);
1774 }
1775 if (!anon_vma)
1776 return;
1777
1778 pgoff_start = page_to_pgoff(page);
1779 pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
1780 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
1781 pgoff_start, pgoff_end) {
1782 struct vm_area_struct *vma = avc->vma;
1783 unsigned long address = vma_address(page, vma);
1784
1785 cond_resched();
1786
1787 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1788 continue;
1789
1790 if (!rwc->rmap_one(page, vma, address, rwc->arg))
1791 break;
1792 if (rwc->done && rwc->done(page))
1793 break;
1794 }
1795
1796 if (!locked)
1797 anon_vma_unlock_read(anon_vma);
1798}
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
1814 bool locked)
1815{
1816 struct address_space *mapping = page_mapping(page);
1817 pgoff_t pgoff_start, pgoff_end;
1818 struct vm_area_struct *vma;
1819
1820
1821
1822
1823
1824
1825
1826 VM_BUG_ON_PAGE(!PageLocked(page), page);
1827
1828 if (!mapping)
1829 return;
1830
1831 pgoff_start = page_to_pgoff(page);
1832 pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
1833 if (!locked)
1834 i_mmap_lock_read(mapping);
1835 vma_interval_tree_foreach(vma, &mapping->i_mmap,
1836 pgoff_start, pgoff_end) {
1837 unsigned long address = vma_address(page, vma);
1838
1839 cond_resched();
1840
1841 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1842 continue;
1843
1844 if (!rwc->rmap_one(page, vma, address, rwc->arg))
1845 goto done;
1846 if (rwc->done && rwc->done(page))
1847 goto done;
1848 }
1849
1850done:
1851 if (!locked)
1852 i_mmap_unlock_read(mapping);
1853}
1854
1855void rmap_walk(struct page *page, struct rmap_walk_control *rwc)
1856{
1857 if (unlikely(PageKsm(page)))
1858 rmap_walk_ksm(page, rwc);
1859 else if (PageAnon(page))
1860 rmap_walk_anon(page, rwc, false);
1861 else
1862 rmap_walk_file(page, rwc, false);
1863}
1864
1865
1866void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
1867{
1868
1869 VM_BUG_ON_PAGE(PageKsm(page), page);
1870 if (PageAnon(page))
1871 rmap_walk_anon(page, rwc, true);
1872 else
1873 rmap_walk_file(page, rwc, true);
1874}
1875
1876#ifdef CONFIG_HUGETLB_PAGE
1877
1878
1879
1880
1881
1882static void __hugepage_set_anon_rmap(struct page *page,
1883 struct vm_area_struct *vma, unsigned long address, int exclusive)
1884{
1885 struct anon_vma *anon_vma = vma->anon_vma;
1886
1887 BUG_ON(!anon_vma);
1888
1889 if (PageAnon(page))
1890 return;
1891 if (!exclusive)
1892 anon_vma = anon_vma->root;
1893
1894 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1895 page->mapping = (struct address_space *) anon_vma;
1896 page->index = linear_page_index(vma, address);
1897}
1898
1899void hugepage_add_anon_rmap(struct page *page,
1900 struct vm_area_struct *vma, unsigned long address)
1901{
1902 struct anon_vma *anon_vma = vma->anon_vma;
1903 int first;
1904
1905 BUG_ON(!PageLocked(page));
1906 BUG_ON(!anon_vma);
1907
1908 first = atomic_inc_and_test(compound_mapcount_ptr(page));
1909 if (first)
1910 __hugepage_set_anon_rmap(page, vma, address, 0);
1911}
1912
1913void hugepage_add_new_anon_rmap(struct page *page,
1914 struct vm_area_struct *vma, unsigned long address)
1915{
1916 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1917 atomic_set(compound_mapcount_ptr(page), 0);
1918 __hugepage_set_anon_rmap(page, vma, address, 1);
1919}
1920#endif
1921