1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45#include <linux/mm.h>
46#include <linux/pagemap.h>
47#include <linux/swap.h>
48#include <linux/swapops.h>
49#include <linux/slab.h>
50#include <linux/init.h>
51#include <linux/ksm.h>
52#include <linux/rmap.h>
53#include <linux/rcupdate.h>
54#include <linux/export.h>
55#include <linux/memcontrol.h>
56#include <linux/mmu_notifier.h>
57#include <linux/migrate.h>
58#include <linux/hugetlb.h>
59#include <linux/backing-dev.h>
60
61#include <asm/tlbflush.h>
62
63#include "internal.h"
64
65static struct kmem_cache *anon_vma_cachep;
66static struct kmem_cache *anon_vma_chain_cachep;
67
68static inline struct anon_vma *anon_vma_alloc(void)
69{
70 struct anon_vma *anon_vma;
71
72 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
73 if (anon_vma) {
74 atomic_set(&anon_vma->refcount, 1);
75
76
77
78
79 anon_vma->root = anon_vma;
80 }
81
82 return anon_vma;
83}
84
85static inline void anon_vma_free(struct anon_vma *anon_vma)
86{
87 VM_BUG_ON(atomic_read(&anon_vma->refcount));
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106 might_sleep();
107 if (rwsem_is_locked(&anon_vma->root->rwsem)) {
108 anon_vma_lock_write(anon_vma);
109 anon_vma_unlock_write(anon_vma);
110 }
111
112 kmem_cache_free(anon_vma_cachep, anon_vma);
113}
114
115static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
116{
117 return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
118}
119
120static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
121{
122 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
123}
124
125static void anon_vma_chain_link(struct vm_area_struct *vma,
126 struct anon_vma_chain *avc,
127 struct anon_vma *anon_vma)
128{
129 avc->vma = vma;
130 avc->anon_vma = anon_vma;
131 list_add(&avc->same_vma, &vma->anon_vma_chain);
132 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
133}
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162int anon_vma_prepare(struct vm_area_struct *vma)
163{
164 struct anon_vma *anon_vma = vma->anon_vma;
165 struct anon_vma_chain *avc;
166
167 might_sleep();
168 if (unlikely(!anon_vma)) {
169 struct mm_struct *mm = vma->vm_mm;
170 struct anon_vma *allocated;
171
172 avc = anon_vma_chain_alloc(GFP_KERNEL);
173 if (!avc)
174 goto out_enomem;
175
176 anon_vma = find_mergeable_anon_vma(vma);
177 allocated = NULL;
178 if (!anon_vma) {
179 anon_vma = anon_vma_alloc();
180 if (unlikely(!anon_vma))
181 goto out_enomem_free_avc;
182 allocated = anon_vma;
183 }
184
185 anon_vma_lock_write(anon_vma);
186
187 spin_lock(&mm->page_table_lock);
188 if (likely(!vma->anon_vma)) {
189 vma->anon_vma = anon_vma;
190 anon_vma_chain_link(vma, avc, anon_vma);
191 allocated = NULL;
192 avc = NULL;
193 }
194 spin_unlock(&mm->page_table_lock);
195 anon_vma_unlock_write(anon_vma);
196
197 if (unlikely(allocated))
198 put_anon_vma(allocated);
199 if (unlikely(avc))
200 anon_vma_chain_free(avc);
201 }
202 return 0;
203
204 out_enomem_free_avc:
205 anon_vma_chain_free(avc);
206 out_enomem:
207 return -ENOMEM;
208}
209
210
211
212
213
214
215
216
217
218static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
219{
220 struct anon_vma *new_root = anon_vma->root;
221 if (new_root != root) {
222 if (WARN_ON_ONCE(root))
223 up_write(&root->rwsem);
224 root = new_root;
225 down_write(&root->rwsem);
226 }
227 return root;
228}
229
230static inline void unlock_anon_vma_root(struct anon_vma *root)
231{
232 if (root)
233 up_write(&root->rwsem);
234}
235
236
237
238
239
240int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
241{
242 struct anon_vma_chain *avc, *pavc;
243 struct anon_vma *root = NULL;
244
245 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
246 struct anon_vma *anon_vma;
247
248 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
249 if (unlikely(!avc)) {
250 unlock_anon_vma_root(root);
251 root = NULL;
252 avc = anon_vma_chain_alloc(GFP_KERNEL);
253 if (!avc)
254 goto enomem_failure;
255 }
256 anon_vma = pavc->anon_vma;
257 root = lock_anon_vma_root(root, anon_vma);
258 anon_vma_chain_link(dst, avc, anon_vma);
259 }
260 unlock_anon_vma_root(root);
261 return 0;
262
263 enomem_failure:
264 unlink_anon_vmas(dst);
265 return -ENOMEM;
266}
267
268
269
270
271
272
273int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
274{
275 struct anon_vma_chain *avc;
276 struct anon_vma *anon_vma;
277 int error;
278
279
280 if (!pvma->anon_vma)
281 return 0;
282
283
284
285
286
287 error = anon_vma_clone(vma, pvma);
288 if (error)
289 return error;
290
291
292 anon_vma = anon_vma_alloc();
293 if (!anon_vma)
294 goto out_error;
295 avc = anon_vma_chain_alloc(GFP_KERNEL);
296 if (!avc)
297 goto out_error_free_anon_vma;
298
299
300
301
302
303 anon_vma->root = pvma->anon_vma->root;
304
305
306
307
308
309 get_anon_vma(anon_vma->root);
310
311 vma->anon_vma = anon_vma;
312 anon_vma_lock_write(anon_vma);
313 anon_vma_chain_link(vma, avc, anon_vma);
314 anon_vma_unlock_write(anon_vma);
315
316 return 0;
317
318 out_error_free_anon_vma:
319 put_anon_vma(anon_vma);
320 out_error:
321 unlink_anon_vmas(vma);
322 return -ENOMEM;
323}
324
325void unlink_anon_vmas(struct vm_area_struct *vma)
326{
327 struct anon_vma_chain *avc, *next;
328 struct anon_vma *root = NULL;
329
330
331
332
333
334 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
335 struct anon_vma *anon_vma = avc->anon_vma;
336
337 root = lock_anon_vma_root(root, anon_vma);
338 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
339
340
341
342
343
344 if (RB_EMPTY_ROOT(&anon_vma->rb_root))
345 continue;
346
347 list_del(&avc->same_vma);
348 anon_vma_chain_free(avc);
349 }
350 unlock_anon_vma_root(root);
351
352
353
354
355
356
357 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
358 struct anon_vma *anon_vma = avc->anon_vma;
359
360 put_anon_vma(anon_vma);
361
362 list_del(&avc->same_vma);
363 anon_vma_chain_free(avc);
364 }
365}
366
367static void anon_vma_ctor(void *data)
368{
369 struct anon_vma *anon_vma = data;
370
371 init_rwsem(&anon_vma->rwsem);
372 atomic_set(&anon_vma->refcount, 0);
373 anon_vma->rb_root = RB_ROOT;
374}
375
376void __init anon_vma_init(void)
377{
378 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
379 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
380 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
381}
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406struct anon_vma *page_get_anon_vma(struct page *page)
407{
408 struct anon_vma *anon_vma = NULL;
409 unsigned long anon_mapping;
410
411 rcu_read_lock();
412 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
413 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
414 goto out;
415 if (!page_mapped(page))
416 goto out;
417
418 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
419 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
420 anon_vma = NULL;
421 goto out;
422 }
423
424
425
426
427
428
429
430
431 if (!page_mapped(page)) {
432 rcu_read_unlock();
433 put_anon_vma(anon_vma);
434 return NULL;
435 }
436out:
437 rcu_read_unlock();
438
439 return anon_vma;
440}
441
442
443
444
445
446
447
448
449struct anon_vma *page_lock_anon_vma_read(struct page *page)
450{
451 struct anon_vma *anon_vma = NULL;
452 struct anon_vma *root_anon_vma;
453 unsigned long anon_mapping;
454
455 rcu_read_lock();
456 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
457 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
458 goto out;
459 if (!page_mapped(page))
460 goto out;
461
462 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
463 root_anon_vma = ACCESS_ONCE(anon_vma->root);
464 if (down_read_trylock(&root_anon_vma->rwsem)) {
465
466
467
468
469
470 if (!page_mapped(page)) {
471 up_read(&root_anon_vma->rwsem);
472 anon_vma = NULL;
473 }
474 goto out;
475 }
476
477
478 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
479 anon_vma = NULL;
480 goto out;
481 }
482
483 if (!page_mapped(page)) {
484 rcu_read_unlock();
485 put_anon_vma(anon_vma);
486 return NULL;
487 }
488
489
490 rcu_read_unlock();
491 anon_vma_lock_read(anon_vma);
492
493 if (atomic_dec_and_test(&anon_vma->refcount)) {
494
495
496
497
498
499 anon_vma_unlock_read(anon_vma);
500 __put_anon_vma(anon_vma);
501 anon_vma = NULL;
502 }
503
504 return anon_vma;
505
506out:
507 rcu_read_unlock();
508 return anon_vma;
509}
510
511void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
512{
513 anon_vma_unlock_read(anon_vma);
514}
515
516
517
518
519static inline unsigned long
520__vma_address(struct page *page, struct vm_area_struct *vma)
521{
522 pgoff_t pgoff = page_to_pgoff(page);
523 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
524}
525
526inline unsigned long
527vma_address(struct page *page, struct vm_area_struct *vma)
528{
529 unsigned long address = __vma_address(page, vma);
530
531
532 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
533
534 return address;
535}
536
537
538
539
540
541unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
542{
543 unsigned long address;
544 if (PageAnon(page)) {
545 struct anon_vma *page__anon_vma = page_anon_vma(page);
546
547
548
549
550 if (!vma->anon_vma || !page__anon_vma ||
551 vma->anon_vma->root != page__anon_vma->root)
552 return -EFAULT;
553 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
554 if (!vma->vm_file ||
555 vma->vm_file->f_mapping != page->mapping)
556 return -EFAULT;
557 } else
558 return -EFAULT;
559 address = __vma_address(page, vma);
560 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
561 return -EFAULT;
562 return address;
563}
564
565pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
566{
567 pgd_t *pgd;
568 pud_t *pud;
569 pmd_t *pmd = NULL;
570 pmd_t pmde;
571
572 pgd = pgd_offset(mm, address);
573 if (!pgd_present(*pgd))
574 goto out;
575
576 pud = pud_offset(pgd, address);
577 if (!pud_present(*pud))
578 goto out;
579
580 pmd = pmd_offset(pud, address);
581
582
583
584
585
586 pmde = ACCESS_ONCE(*pmd);
587 if (!pmd_present(pmde) || pmd_trans_huge(pmde))
588 pmd = NULL;
589out:
590 return pmd;
591}
592
593
594
595
596
597
598
599
600
601
602pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
603 unsigned long address, spinlock_t **ptlp, int sync)
604{
605 pmd_t *pmd;
606 pte_t *pte;
607 spinlock_t *ptl;
608
609 if (unlikely(PageHuge(page))) {
610
611 pte = huge_pte_offset(mm, address);
612 if (!pte)
613 return NULL;
614
615 ptl = huge_pte_lockptr(page_hstate(page), mm, pte);
616 goto check;
617 }
618
619 pmd = mm_find_pmd(mm, address);
620 if (!pmd)
621 return NULL;
622
623 pte = pte_offset_map(pmd, address);
624
625 if (!sync && !pte_present(*pte)) {
626 pte_unmap(pte);
627 return NULL;
628 }
629
630 ptl = pte_lockptr(mm, pmd);
631check:
632 spin_lock(ptl);
633 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
634 *ptlp = ptl;
635 return pte;
636 }
637 pte_unmap_unlock(pte, ptl);
638 return NULL;
639}
640
641
642
643
644
645
646
647
648
649
650int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
651{
652 unsigned long address;
653 pte_t *pte;
654 spinlock_t *ptl;
655
656 address = __vma_address(page, vma);
657 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
658 return 0;
659 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
660 if (!pte)
661 return 0;
662 pte_unmap_unlock(pte, ptl);
663
664 return 1;
665}
666
667struct page_referenced_arg {
668 int mapcount;
669 int referenced;
670 unsigned long vm_flags;
671 struct mem_cgroup *memcg;
672};
673
674
675
676static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
677 unsigned long address, void *arg)
678{
679 struct mm_struct *mm = vma->vm_mm;
680 spinlock_t *ptl;
681 int referenced = 0;
682 struct page_referenced_arg *pra = arg;
683
684 if (unlikely(PageTransHuge(page))) {
685 pmd_t *pmd;
686
687
688
689
690
691 pmd = page_check_address_pmd(page, mm, address,
692 PAGE_CHECK_ADDRESS_PMD_FLAG, &ptl);
693 if (!pmd)
694 return SWAP_AGAIN;
695
696 if (vma->vm_flags & VM_LOCKED) {
697 spin_unlock(ptl);
698 pra->vm_flags |= VM_LOCKED;
699 return SWAP_FAIL;
700 }
701
702
703 if (pmdp_clear_flush_young_notify(vma, address, pmd))
704 referenced++;
705 spin_unlock(ptl);
706 } else {
707 pte_t *pte;
708
709
710
711
712
713 pte = page_check_address(page, mm, address, &ptl, 0);
714 if (!pte)
715 return SWAP_AGAIN;
716
717 if (vma->vm_flags & VM_LOCKED) {
718 pte_unmap_unlock(pte, ptl);
719 pra->vm_flags |= VM_LOCKED;
720 return SWAP_FAIL;
721 }
722
723 if (ptep_clear_flush_young_notify(vma, address, pte)) {
724
725
726
727
728
729
730
731 if (likely(!(vma->vm_flags & VM_SEQ_READ)))
732 referenced++;
733 }
734 pte_unmap_unlock(pte, ptl);
735 }
736
737 if (referenced) {
738 pra->referenced++;
739 pra->vm_flags |= vma->vm_flags;
740 }
741
742 pra->mapcount--;
743 if (!pra->mapcount)
744 return SWAP_SUCCESS;
745
746 return SWAP_AGAIN;
747}
748
749static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg)
750{
751 struct page_referenced_arg *pra = arg;
752 struct mem_cgroup *memcg = pra->memcg;
753
754 if (!mm_match_cgroup(vma->vm_mm, memcg))
755 return true;
756
757 return false;
758}
759
760
761
762
763
764
765
766
767
768
769
770int page_referenced(struct page *page,
771 int is_locked,
772 struct mem_cgroup *memcg,
773 unsigned long *vm_flags)
774{
775 int ret;
776 int we_locked = 0;
777 struct page_referenced_arg pra = {
778 .mapcount = page_mapcount(page),
779 .memcg = memcg,
780 };
781 struct rmap_walk_control rwc = {
782 .rmap_one = page_referenced_one,
783 .arg = (void *)&pra,
784 .anon_lock = page_lock_anon_vma_read,
785 };
786
787 *vm_flags = 0;
788 if (!page_mapped(page))
789 return 0;
790
791 if (!page_rmapping(page))
792 return 0;
793
794 if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
795 we_locked = trylock_page(page);
796 if (!we_locked)
797 return 1;
798 }
799
800
801
802
803
804
805 if (memcg) {
806 rwc.invalid_vma = invalid_page_referenced_vma;
807 }
808
809 ret = rmap_walk(page, &rwc);
810 *vm_flags = pra.vm_flags;
811
812 if (we_locked)
813 unlock_page(page);
814
815 return pra.referenced;
816}
817
818static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
819 unsigned long address, void *arg)
820{
821 struct mm_struct *mm = vma->vm_mm;
822 pte_t *pte;
823 spinlock_t *ptl;
824 int ret = 0;
825 int *cleaned = arg;
826
827 pte = page_check_address(page, mm, address, &ptl, 1);
828 if (!pte)
829 goto out;
830
831 if (pte_dirty(*pte) || pte_write(*pte)) {
832 pte_t entry;
833
834 flush_cache_page(vma, address, pte_pfn(*pte));
835 entry = ptep_clear_flush(vma, address, pte);
836 entry = pte_wrprotect(entry);
837 entry = pte_mkclean(entry);
838 set_pte_at(mm, address, pte, entry);
839 ret = 1;
840 }
841
842 pte_unmap_unlock(pte, ptl);
843
844 if (ret) {
845 mmu_notifier_invalidate_page(mm, address);
846 (*cleaned)++;
847 }
848out:
849 return SWAP_AGAIN;
850}
851
852static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
853{
854 if (vma->vm_flags & VM_SHARED)
855 return false;
856
857 return true;
858}
859
860int page_mkclean(struct page *page)
861{
862 int cleaned = 0;
863 struct address_space *mapping;
864 struct rmap_walk_control rwc = {
865 .arg = (void *)&cleaned,
866 .rmap_one = page_mkclean_one,
867 .invalid_vma = invalid_mkclean_vma,
868 };
869
870 BUG_ON(!PageLocked(page));
871
872 if (!page_mapped(page))
873 return 0;
874
875 mapping = page_mapping(page);
876 if (!mapping)
877 return 0;
878
879 rmap_walk(page, &rwc);
880
881 return cleaned;
882}
883EXPORT_SYMBOL_GPL(page_mkclean);
884
885
886
887
888
889
890
891
892
893
894
895
896void page_move_anon_rmap(struct page *page,
897 struct vm_area_struct *vma, unsigned long address)
898{
899 struct anon_vma *anon_vma = vma->anon_vma;
900
901 VM_BUG_ON_PAGE(!PageLocked(page), page);
902 VM_BUG_ON_VMA(!anon_vma, vma);
903 VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
904
905 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
906 page->mapping = (struct address_space *) anon_vma;
907}
908
909
910
911
912
913
914
915
916static void __page_set_anon_rmap(struct page *page,
917 struct vm_area_struct *vma, unsigned long address, int exclusive)
918{
919 struct anon_vma *anon_vma = vma->anon_vma;
920
921 BUG_ON(!anon_vma);
922
923 if (PageAnon(page))
924 return;
925
926
927
928
929
930
931 if (!exclusive)
932 anon_vma = anon_vma->root;
933
934 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
935 page->mapping = (struct address_space *) anon_vma;
936 page->index = linear_page_index(vma, address);
937}
938
939
940
941
942
943
944
945static void __page_check_anon_rmap(struct page *page,
946 struct vm_area_struct *vma, unsigned long address)
947{
948#ifdef CONFIG_DEBUG_VM
949
950
951
952
953
954
955
956
957
958
959
960
961 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root);
962 BUG_ON(page->index != linear_page_index(vma, address));
963#endif
964}
965
966
967
968
969
970
971
972
973
974
975
976
977void page_add_anon_rmap(struct page *page,
978 struct vm_area_struct *vma, unsigned long address)
979{
980 do_page_add_anon_rmap(page, vma, address, 0);
981}
982
983
984
985
986
987
988void do_page_add_anon_rmap(struct page *page,
989 struct vm_area_struct *vma, unsigned long address, int exclusive)
990{
991 int first = atomic_inc_and_test(&page->_mapcount);
992 if (first) {
993
994
995
996
997
998
999 if (PageTransHuge(page))
1000 __inc_zone_page_state(page,
1001 NR_ANON_TRANSPARENT_HUGEPAGES);
1002 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
1003 hpage_nr_pages(page));
1004 }
1005 if (unlikely(PageKsm(page)))
1006 return;
1007
1008 VM_BUG_ON_PAGE(!PageLocked(page), page);
1009
1010 if (first)
1011 __page_set_anon_rmap(page, vma, address, exclusive);
1012 else
1013 __page_check_anon_rmap(page, vma, address);
1014}
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026void page_add_new_anon_rmap(struct page *page,
1027 struct vm_area_struct *vma, unsigned long address)
1028{
1029 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
1030 SetPageSwapBacked(page);
1031 atomic_set(&page->_mapcount, 0);
1032 if (PageTransHuge(page))
1033 __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1034 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
1035 hpage_nr_pages(page));
1036 __page_set_anon_rmap(page, vma, address, 1);
1037}
1038
1039
1040
1041
1042
1043
1044
1045void page_add_file_rmap(struct page *page)
1046{
1047 struct mem_cgroup *memcg;
1048 unsigned long flags;
1049 bool locked;
1050
1051 memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
1052 if (atomic_inc_and_test(&page->_mapcount)) {
1053 __inc_zone_page_state(page, NR_FILE_MAPPED);
1054 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
1055 }
1056 mem_cgroup_end_page_stat(memcg, locked, flags);
1057}
1058
1059static void page_remove_file_rmap(struct page *page)
1060{
1061 struct mem_cgroup *memcg;
1062 unsigned long flags;
1063 bool locked;
1064
1065 memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
1066
1067
1068 if (!atomic_add_negative(-1, &page->_mapcount))
1069 goto out;
1070
1071
1072 if (unlikely(PageHuge(page)))
1073 goto out;
1074
1075
1076
1077
1078
1079
1080 __dec_zone_page_state(page, NR_FILE_MAPPED);
1081 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
1082
1083 if (unlikely(PageMlocked(page)))
1084 clear_page_mlock(page);
1085out:
1086 mem_cgroup_end_page_stat(memcg, locked, flags);
1087}
1088
1089
1090
1091
1092
1093
1094
1095void page_remove_rmap(struct page *page)
1096{
1097 if (!PageAnon(page)) {
1098 page_remove_file_rmap(page);
1099 return;
1100 }
1101
1102
1103 if (!atomic_add_negative(-1, &page->_mapcount))
1104 return;
1105
1106
1107 if (unlikely(PageHuge(page)))
1108 return;
1109
1110
1111
1112
1113
1114
1115 if (PageTransHuge(page))
1116 __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1117
1118 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
1119 -hpage_nr_pages(page));
1120
1121 if (unlikely(PageMlocked(page)))
1122 clear_page_mlock(page);
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133}
1134
1135
1136
1137
1138static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1139 unsigned long address, void *arg)
1140{
1141 struct mm_struct *mm = vma->vm_mm;
1142 pte_t *pte;
1143 pte_t pteval;
1144 spinlock_t *ptl;
1145 int ret = SWAP_AGAIN;
1146 enum ttu_flags flags = (enum ttu_flags)arg;
1147
1148 pte = page_check_address(page, mm, address, &ptl, 0);
1149 if (!pte)
1150 goto out;
1151
1152
1153
1154
1155
1156
1157 if (!(flags & TTU_IGNORE_MLOCK)) {
1158 if (vma->vm_flags & VM_LOCKED)
1159 goto out_mlock;
1160
1161 if (flags & TTU_MUNLOCK)
1162 goto out_unmap;
1163 }
1164 if (!(flags & TTU_IGNORE_ACCESS)) {
1165 if (ptep_clear_flush_young_notify(vma, address, pte)) {
1166 ret = SWAP_FAIL;
1167 goto out_unmap;
1168 }
1169 }
1170
1171
1172 flush_cache_page(vma, address, page_to_pfn(page));
1173 pteval = ptep_clear_flush(vma, address, pte);
1174
1175
1176 if (pte_dirty(pteval))
1177 set_page_dirty(page);
1178
1179
1180 update_hiwater_rss(mm);
1181
1182 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
1183 if (!PageHuge(page)) {
1184 if (PageAnon(page))
1185 dec_mm_counter(mm, MM_ANONPAGES);
1186 else
1187 dec_mm_counter(mm, MM_FILEPAGES);
1188 }
1189 set_pte_at(mm, address, pte,
1190 swp_entry_to_pte(make_hwpoison_entry(page)));
1191 } else if (pte_unused(pteval)) {
1192
1193
1194
1195
1196
1197 if (PageAnon(page))
1198 dec_mm_counter(mm, MM_ANONPAGES);
1199 else
1200 dec_mm_counter(mm, MM_FILEPAGES);
1201 } else if (PageAnon(page)) {
1202 swp_entry_t entry = { .val = page_private(page) };
1203 pte_t swp_pte;
1204
1205 if (PageSwapCache(page)) {
1206
1207
1208
1209
1210 if (swap_duplicate(entry) < 0) {
1211 set_pte_at(mm, address, pte, pteval);
1212 ret = SWAP_FAIL;
1213 goto out_unmap;
1214 }
1215 if (list_empty(&mm->mmlist)) {
1216 spin_lock(&mmlist_lock);
1217 if (list_empty(&mm->mmlist))
1218 list_add(&mm->mmlist, &init_mm.mmlist);
1219 spin_unlock(&mmlist_lock);
1220 }
1221 dec_mm_counter(mm, MM_ANONPAGES);
1222 inc_mm_counter(mm, MM_SWAPENTS);
1223 } else if (IS_ENABLED(CONFIG_MIGRATION)) {
1224
1225
1226
1227
1228
1229 BUG_ON(!(flags & TTU_MIGRATION));
1230 entry = make_migration_entry(page, pte_write(pteval));
1231 }
1232 swp_pte = swp_entry_to_pte(entry);
1233 if (pte_soft_dirty(pteval))
1234 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1235 set_pte_at(mm, address, pte, swp_pte);
1236 BUG_ON(pte_file(*pte));
1237 } else if (IS_ENABLED(CONFIG_MIGRATION) &&
1238 (flags & TTU_MIGRATION)) {
1239
1240 swp_entry_t entry;
1241 entry = make_migration_entry(page, pte_write(pteval));
1242 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
1243 } else
1244 dec_mm_counter(mm, MM_FILEPAGES);
1245
1246 page_remove_rmap(page);
1247 page_cache_release(page);
1248
1249out_unmap:
1250 pte_unmap_unlock(pte, ptl);
1251 if (ret != SWAP_FAIL && !(flags & TTU_MUNLOCK))
1252 mmu_notifier_invalidate_page(mm, address);
1253out:
1254 return ret;
1255
1256out_mlock:
1257 pte_unmap_unlock(pte, ptl);
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268 if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
1269 if (vma->vm_flags & VM_LOCKED) {
1270 mlock_vma_page(page);
1271 ret = SWAP_MLOCK;
1272 }
1273 up_read(&vma->vm_mm->mmap_sem);
1274 }
1275 return ret;
1276}
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302#define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
1303#define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
1304
1305static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
1306 struct vm_area_struct *vma, struct page *check_page)
1307{
1308 struct mm_struct *mm = vma->vm_mm;
1309 pmd_t *pmd;
1310 pte_t *pte;
1311 pte_t pteval;
1312 spinlock_t *ptl;
1313 struct page *page;
1314 unsigned long address;
1315 unsigned long mmun_start;
1316 unsigned long mmun_end;
1317 unsigned long end;
1318 int ret = SWAP_AGAIN;
1319 int locked_vma = 0;
1320
1321 address = (vma->vm_start + cursor) & CLUSTER_MASK;
1322 end = address + CLUSTER_SIZE;
1323 if (address < vma->vm_start)
1324 address = vma->vm_start;
1325 if (end > vma->vm_end)
1326 end = vma->vm_end;
1327
1328 pmd = mm_find_pmd(mm, address);
1329 if (!pmd)
1330 return ret;
1331
1332 mmun_start = address;
1333 mmun_end = end;
1334 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1335
1336
1337
1338
1339
1340 if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
1341 locked_vma = (vma->vm_flags & VM_LOCKED);
1342 if (!locked_vma)
1343 up_read(&vma->vm_mm->mmap_sem);
1344 }
1345
1346 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1347
1348
1349 update_hiwater_rss(mm);
1350
1351 for (; address < end; pte++, address += PAGE_SIZE) {
1352 if (!pte_present(*pte))
1353 continue;
1354 page = vm_normal_page(vma, address, *pte);
1355 BUG_ON(!page || PageAnon(page));
1356
1357 if (locked_vma) {
1358 if (page == check_page) {
1359
1360 mlock_vma_page(page);
1361 ret = SWAP_MLOCK;
1362 } else if (trylock_page(page)) {
1363
1364
1365
1366
1367
1368 mlock_vma_page(page);
1369 unlock_page(page);
1370 }
1371 continue;
1372 }
1373
1374
1375
1376
1377
1378 if (ptep_clear_flush_young(vma, address, pte))
1379 continue;
1380
1381
1382 flush_cache_page(vma, address, pte_pfn(*pte));
1383 pteval = ptep_clear_flush(vma, address, pte);
1384
1385
1386 if (page->index != linear_page_index(vma, address)) {
1387 pte_t ptfile = pgoff_to_pte(page->index);
1388 if (pte_soft_dirty(pteval))
1389 ptfile = pte_file_mksoft_dirty(ptfile);
1390 set_pte_at(mm, address, pte, ptfile);
1391 }
1392
1393
1394 if (pte_dirty(pteval))
1395 set_page_dirty(page);
1396
1397 page_remove_rmap(page);
1398 page_cache_release(page);
1399 dec_mm_counter(mm, MM_FILEPAGES);
1400 (*mapcount)--;
1401 }
1402 pte_unmap_unlock(pte - 1, ptl);
1403 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1404 if (locked_vma)
1405 up_read(&vma->vm_mm->mmap_sem);
1406 return ret;
1407}
1408
1409static int try_to_unmap_nonlinear(struct page *page,
1410 struct address_space *mapping, void *arg)
1411{
1412 struct vm_area_struct *vma;
1413 int ret = SWAP_AGAIN;
1414 unsigned long cursor;
1415 unsigned long max_nl_cursor = 0;
1416 unsigned long max_nl_size = 0;
1417 unsigned int mapcount;
1418
1419 list_for_each_entry(vma,
1420 &mapping->i_mmap_nonlinear, shared.nonlinear) {
1421
1422 cursor = (unsigned long) vma->vm_private_data;
1423 if (cursor > max_nl_cursor)
1424 max_nl_cursor = cursor;
1425 cursor = vma->vm_end - vma->vm_start;
1426 if (cursor > max_nl_size)
1427 max_nl_size = cursor;
1428 }
1429
1430 if (max_nl_size == 0) {
1431 return SWAP_FAIL;
1432 }
1433
1434
1435
1436
1437
1438
1439
1440
1441 mapcount = page_mapcount(page);
1442 if (!mapcount)
1443 return ret;
1444
1445 cond_resched();
1446
1447 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
1448 if (max_nl_cursor == 0)
1449 max_nl_cursor = CLUSTER_SIZE;
1450
1451 do {
1452 list_for_each_entry(vma,
1453 &mapping->i_mmap_nonlinear, shared.nonlinear) {
1454
1455 cursor = (unsigned long) vma->vm_private_data;
1456 while (cursor < max_nl_cursor &&
1457 cursor < vma->vm_end - vma->vm_start) {
1458 if (try_to_unmap_cluster(cursor, &mapcount,
1459 vma, page) == SWAP_MLOCK)
1460 ret = SWAP_MLOCK;
1461 cursor += CLUSTER_SIZE;
1462 vma->vm_private_data = (void *) cursor;
1463 if ((int)mapcount <= 0)
1464 return ret;
1465 }
1466 vma->vm_private_data = (void *) max_nl_cursor;
1467 }
1468 cond_resched();
1469 max_nl_cursor += CLUSTER_SIZE;
1470 } while (max_nl_cursor <= max_nl_size);
1471
1472
1473
1474
1475
1476
1477 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.nonlinear)
1478 vma->vm_private_data = NULL;
1479
1480 return ret;
1481}
1482
1483bool is_vma_temporary_stack(struct vm_area_struct *vma)
1484{
1485 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
1486
1487 if (!maybe_stack)
1488 return false;
1489
1490 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
1491 VM_STACK_INCOMPLETE_SETUP)
1492 return true;
1493
1494 return false;
1495}
1496
1497static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
1498{
1499 return is_vma_temporary_stack(vma);
1500}
1501
1502static int page_not_mapped(struct page *page)
1503{
1504 return !page_mapped(page);
1505};
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521int try_to_unmap(struct page *page, enum ttu_flags flags)
1522{
1523 int ret;
1524 struct rmap_walk_control rwc = {
1525 .rmap_one = try_to_unmap_one,
1526 .arg = (void *)flags,
1527 .done = page_not_mapped,
1528 .file_nonlinear = try_to_unmap_nonlinear,
1529 .anon_lock = page_lock_anon_vma_read,
1530 };
1531
1532 VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page);
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542 if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page))
1543 rwc.invalid_vma = invalid_migration_vma;
1544
1545 ret = rmap_walk(page, &rwc);
1546
1547 if (ret != SWAP_MLOCK && !page_mapped(page))
1548 ret = SWAP_SUCCESS;
1549 return ret;
1550}
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567int try_to_munlock(struct page *page)
1568{
1569 int ret;
1570 struct rmap_walk_control rwc = {
1571 .rmap_one = try_to_unmap_one,
1572 .arg = (void *)TTU_MUNLOCK,
1573 .done = page_not_mapped,
1574
1575
1576
1577
1578
1579 .file_nonlinear = NULL,
1580 .anon_lock = page_lock_anon_vma_read,
1581
1582 };
1583
1584 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
1585
1586 ret = rmap_walk(page, &rwc);
1587 return ret;
1588}
1589
1590void __put_anon_vma(struct anon_vma *anon_vma)
1591{
1592 struct anon_vma *root = anon_vma->root;
1593
1594 anon_vma_free(anon_vma);
1595 if (root != anon_vma && atomic_dec_and_test(&root->refcount))
1596 anon_vma_free(root);
1597}
1598
1599static struct anon_vma *rmap_walk_anon_lock(struct page *page,
1600 struct rmap_walk_control *rwc)
1601{
1602 struct anon_vma *anon_vma;
1603
1604 if (rwc->anon_lock)
1605 return rwc->anon_lock(page);
1606
1607
1608
1609
1610
1611
1612
1613 anon_vma = page_anon_vma(page);
1614 if (!anon_vma)
1615 return NULL;
1616
1617 anon_vma_lock_read(anon_vma);
1618 return anon_vma;
1619}
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
1636{
1637 struct anon_vma *anon_vma;
1638 pgoff_t pgoff = page_to_pgoff(page);
1639 struct anon_vma_chain *avc;
1640 int ret = SWAP_AGAIN;
1641
1642 anon_vma = rmap_walk_anon_lock(page, rwc);
1643 if (!anon_vma)
1644 return ret;
1645
1646 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1647 struct vm_area_struct *vma = avc->vma;
1648 unsigned long address = vma_address(page, vma);
1649
1650 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1651 continue;
1652
1653 ret = rwc->rmap_one(page, vma, address, rwc->arg);
1654 if (ret != SWAP_AGAIN)
1655 break;
1656 if (rwc->done && rwc->done(page))
1657 break;
1658 }
1659 anon_vma_unlock_read(anon_vma);
1660 return ret;
1661}
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
1677{
1678 struct address_space *mapping = page->mapping;
1679 pgoff_t pgoff = page_to_pgoff(page);
1680 struct vm_area_struct *vma;
1681 int ret = SWAP_AGAIN;
1682
1683
1684
1685
1686
1687
1688
1689 VM_BUG_ON_PAGE(!PageLocked(page), page);
1690
1691 if (!mapping)
1692 return ret;
1693 mutex_lock(&mapping->i_mmap_mutex);
1694 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1695 unsigned long address = vma_address(page, vma);
1696
1697 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1698 continue;
1699
1700 ret = rwc->rmap_one(page, vma, address, rwc->arg);
1701 if (ret != SWAP_AGAIN)
1702 goto done;
1703 if (rwc->done && rwc->done(page))
1704 goto done;
1705 }
1706
1707 if (!rwc->file_nonlinear)
1708 goto done;
1709
1710 if (list_empty(&mapping->i_mmap_nonlinear))
1711 goto done;
1712
1713 ret = rwc->file_nonlinear(page, mapping, rwc->arg);
1714
1715done:
1716 mutex_unlock(&mapping->i_mmap_mutex);
1717 return ret;
1718}
1719
1720int rmap_walk(struct page *page, struct rmap_walk_control *rwc)
1721{
1722 if (unlikely(PageKsm(page)))
1723 return rmap_walk_ksm(page, rwc);
1724 else if (PageAnon(page))
1725 return rmap_walk_anon(page, rwc);
1726 else
1727 return rmap_walk_file(page, rwc);
1728}
1729
1730#ifdef CONFIG_HUGETLB_PAGE
1731
1732
1733
1734
1735
1736static void __hugepage_set_anon_rmap(struct page *page,
1737 struct vm_area_struct *vma, unsigned long address, int exclusive)
1738{
1739 struct anon_vma *anon_vma = vma->anon_vma;
1740
1741 BUG_ON(!anon_vma);
1742
1743 if (PageAnon(page))
1744 return;
1745 if (!exclusive)
1746 anon_vma = anon_vma->root;
1747
1748 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1749 page->mapping = (struct address_space *) anon_vma;
1750 page->index = linear_page_index(vma, address);
1751}
1752
1753void hugepage_add_anon_rmap(struct page *page,
1754 struct vm_area_struct *vma, unsigned long address)
1755{
1756 struct anon_vma *anon_vma = vma->anon_vma;
1757 int first;
1758
1759 BUG_ON(!PageLocked(page));
1760 BUG_ON(!anon_vma);
1761
1762 first = atomic_inc_and_test(&page->_mapcount);
1763 if (first)
1764 __hugepage_set_anon_rmap(page, vma, address, 0);
1765}
1766
1767void hugepage_add_new_anon_rmap(struct page *page,
1768 struct vm_area_struct *vma, unsigned long address)
1769{
1770 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1771 atomic_set(&page->_mapcount, 0);
1772 __hugepage_set_anon_rmap(page, vma, address, 1);
1773}
1774#endif
1775