1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45#include <linux/mm.h>
46#include <linux/pagemap.h>
47#include <linux/swap.h>
48#include <linux/swapops.h>
49#include <linux/slab.h>
50#include <linux/init.h>
51#include <linux/ksm.h>
52#include <linux/rmap.h>
53#include <linux/rcupdate.h>
54#include <linux/export.h>
55#include <linux/memcontrol.h>
56#include <linux/mmu_notifier.h>
57#include <linux/migrate.h>
58#include <linux/hugetlb.h>
59#include <linux/backing-dev.h>
60
61#include <asm/tlbflush.h>
62
63#include "internal.h"
64
65static struct kmem_cache *anon_vma_cachep;
66static struct kmem_cache *anon_vma_chain_cachep;
67
68static inline struct anon_vma *anon_vma_alloc(void)
69{
70 struct anon_vma *anon_vma;
71
72 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
73 if (anon_vma) {
74 atomic_set(&anon_vma->refcount, 1);
75
76
77
78
79 anon_vma->root = anon_vma;
80 }
81
82 return anon_vma;
83}
84
85static inline void anon_vma_free(struct anon_vma *anon_vma)
86{
87 VM_BUG_ON(atomic_read(&anon_vma->refcount));
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106 if (rwsem_is_locked(&anon_vma->root->rwsem)) {
107 anon_vma_lock_write(anon_vma);
108 anon_vma_unlock_write(anon_vma);
109 }
110
111 kmem_cache_free(anon_vma_cachep, anon_vma);
112}
113
114static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
115{
116 return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
117}
118
119static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
120{
121 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
122}
123
124static void anon_vma_chain_link(struct vm_area_struct *vma,
125 struct anon_vma_chain *avc,
126 struct anon_vma *anon_vma)
127{
128 avc->vma = vma;
129 avc->anon_vma = anon_vma;
130 list_add(&avc->same_vma, &vma->anon_vma_chain);
131 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
132}
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161int anon_vma_prepare(struct vm_area_struct *vma)
162{
163 struct anon_vma *anon_vma = vma->anon_vma;
164 struct anon_vma_chain *avc;
165
166 might_sleep();
167 if (unlikely(!anon_vma)) {
168 struct mm_struct *mm = vma->vm_mm;
169 struct anon_vma *allocated;
170
171 avc = anon_vma_chain_alloc(GFP_KERNEL);
172 if (!avc)
173 goto out_enomem;
174
175 anon_vma = find_mergeable_anon_vma(vma);
176 allocated = NULL;
177 if (!anon_vma) {
178 anon_vma = anon_vma_alloc();
179 if (unlikely(!anon_vma))
180 goto out_enomem_free_avc;
181 allocated = anon_vma;
182 }
183
184 anon_vma_lock_write(anon_vma);
185
186 spin_lock(&mm->page_table_lock);
187 if (likely(!vma->anon_vma)) {
188 vma->anon_vma = anon_vma;
189 anon_vma_chain_link(vma, avc, anon_vma);
190 allocated = NULL;
191 avc = NULL;
192 }
193 spin_unlock(&mm->page_table_lock);
194 anon_vma_unlock_write(anon_vma);
195
196 if (unlikely(allocated))
197 put_anon_vma(allocated);
198 if (unlikely(avc))
199 anon_vma_chain_free(avc);
200 }
201 return 0;
202
203 out_enomem_free_avc:
204 anon_vma_chain_free(avc);
205 out_enomem:
206 return -ENOMEM;
207}
208
209
210
211
212
213
214
215
216
217static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
218{
219 struct anon_vma *new_root = anon_vma->root;
220 if (new_root != root) {
221 if (WARN_ON_ONCE(root))
222 up_write(&root->rwsem);
223 root = new_root;
224 down_write(&root->rwsem);
225 }
226 return root;
227}
228
229static inline void unlock_anon_vma_root(struct anon_vma *root)
230{
231 if (root)
232 up_write(&root->rwsem);
233}
234
235
236
237
238
239int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
240{
241 struct anon_vma_chain *avc, *pavc;
242 struct anon_vma *root = NULL;
243
244 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
245 struct anon_vma *anon_vma;
246
247 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
248 if (unlikely(!avc)) {
249 unlock_anon_vma_root(root);
250 root = NULL;
251 avc = anon_vma_chain_alloc(GFP_KERNEL);
252 if (!avc)
253 goto enomem_failure;
254 }
255 anon_vma = pavc->anon_vma;
256 root = lock_anon_vma_root(root, anon_vma);
257 anon_vma_chain_link(dst, avc, anon_vma);
258 }
259 unlock_anon_vma_root(root);
260 return 0;
261
262 enomem_failure:
263 unlink_anon_vmas(dst);
264 return -ENOMEM;
265}
266
267
268
269
270
271
272int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
273{
274 struct anon_vma_chain *avc;
275 struct anon_vma *anon_vma;
276
277
278 if (!pvma->anon_vma)
279 return 0;
280
281
282
283
284
285 if (anon_vma_clone(vma, pvma))
286 return -ENOMEM;
287
288
289 anon_vma = anon_vma_alloc();
290 if (!anon_vma)
291 goto out_error;
292 avc = anon_vma_chain_alloc(GFP_KERNEL);
293 if (!avc)
294 goto out_error_free_anon_vma;
295
296
297
298
299
300 anon_vma->root = pvma->anon_vma->root;
301
302
303
304
305
306 get_anon_vma(anon_vma->root);
307
308 vma->anon_vma = anon_vma;
309 anon_vma_lock_write(anon_vma);
310 anon_vma_chain_link(vma, avc, anon_vma);
311 anon_vma_unlock_write(anon_vma);
312
313 return 0;
314
315 out_error_free_anon_vma:
316 put_anon_vma(anon_vma);
317 out_error:
318 unlink_anon_vmas(vma);
319 return -ENOMEM;
320}
321
322void unlink_anon_vmas(struct vm_area_struct *vma)
323{
324 struct anon_vma_chain *avc, *next;
325 struct anon_vma *root = NULL;
326
327
328
329
330
331 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
332 struct anon_vma *anon_vma = avc->anon_vma;
333
334 root = lock_anon_vma_root(root, anon_vma);
335 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
336
337
338
339
340
341 if (RB_EMPTY_ROOT(&anon_vma->rb_root))
342 continue;
343
344 list_del(&avc->same_vma);
345 anon_vma_chain_free(avc);
346 }
347 unlock_anon_vma_root(root);
348
349
350
351
352
353
354 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
355 struct anon_vma *anon_vma = avc->anon_vma;
356
357 put_anon_vma(anon_vma);
358
359 list_del(&avc->same_vma);
360 anon_vma_chain_free(avc);
361 }
362}
363
364static void anon_vma_ctor(void *data)
365{
366 struct anon_vma *anon_vma = data;
367
368 init_rwsem(&anon_vma->rwsem);
369 atomic_set(&anon_vma->refcount, 0);
370 anon_vma->rb_root = RB_ROOT;
371}
372
373void __init anon_vma_init(void)
374{
375 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
376 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
377 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
378}
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403struct anon_vma *page_get_anon_vma(struct page *page)
404{
405 struct anon_vma *anon_vma = NULL;
406 unsigned long anon_mapping;
407
408 rcu_read_lock();
409 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
410 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
411 goto out;
412 if (!page_mapped(page))
413 goto out;
414
415 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
416 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
417 anon_vma = NULL;
418 goto out;
419 }
420
421
422
423
424
425
426
427
428 if (!page_mapped(page)) {
429 put_anon_vma(anon_vma);
430 anon_vma = NULL;
431 }
432out:
433 rcu_read_unlock();
434
435 return anon_vma;
436}
437
438
439
440
441
442
443
444
445struct anon_vma *page_lock_anon_vma_read(struct page *page)
446{
447 struct anon_vma *anon_vma = NULL;
448 struct anon_vma *root_anon_vma;
449 unsigned long anon_mapping;
450
451 rcu_read_lock();
452 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
453 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
454 goto out;
455 if (!page_mapped(page))
456 goto out;
457
458 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
459 root_anon_vma = ACCESS_ONCE(anon_vma->root);
460 if (down_read_trylock(&root_anon_vma->rwsem)) {
461
462
463
464
465
466 if (!page_mapped(page)) {
467 up_read(&root_anon_vma->rwsem);
468 anon_vma = NULL;
469 }
470 goto out;
471 }
472
473
474 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
475 anon_vma = NULL;
476 goto out;
477 }
478
479 if (!page_mapped(page)) {
480 put_anon_vma(anon_vma);
481 anon_vma = NULL;
482 goto out;
483 }
484
485
486 rcu_read_unlock();
487 anon_vma_lock_read(anon_vma);
488
489 if (atomic_dec_and_test(&anon_vma->refcount)) {
490
491
492
493
494
495 anon_vma_unlock_read(anon_vma);
496 __put_anon_vma(anon_vma);
497 anon_vma = NULL;
498 }
499
500 return anon_vma;
501
502out:
503 rcu_read_unlock();
504 return anon_vma;
505}
506
507void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
508{
509 anon_vma_unlock_read(anon_vma);
510}
511
512
513
514
515static inline unsigned long
516__vma_address(struct page *page, struct vm_area_struct *vma)
517{
518 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
519
520 if (unlikely(is_vm_hugetlb_page(vma)))
521 pgoff = page->index << huge_page_order(page_hstate(page));
522
523 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
524}
525
526inline unsigned long
527vma_address(struct page *page, struct vm_area_struct *vma)
528{
529 unsigned long address = __vma_address(page, vma);
530
531
532 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
533
534 return address;
535}
536
537
538
539
540
541unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
542{
543 unsigned long address;
544 if (PageAnon(page)) {
545 struct anon_vma *page__anon_vma = page_anon_vma(page);
546
547
548
549
550 if (!vma->anon_vma || !page__anon_vma ||
551 vma->anon_vma->root != page__anon_vma->root)
552 return -EFAULT;
553 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
554 if (!vma->vm_file ||
555 vma->vm_file->f_mapping != page->mapping)
556 return -EFAULT;
557 } else
558 return -EFAULT;
559 address = __vma_address(page, vma);
560 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
561 return -EFAULT;
562 return address;
563}
564
565pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
566{
567 pgd_t *pgd;
568 pud_t *pud;
569 pmd_t *pmd = NULL;
570
571 pgd = pgd_offset(mm, address);
572 if (!pgd_present(*pgd))
573 goto out;
574
575 pud = pud_offset(pgd, address);
576 if (!pud_present(*pud))
577 goto out;
578
579 pmd = pmd_offset(pud, address);
580 if (!pmd_present(*pmd))
581 pmd = NULL;
582out:
583 return pmd;
584}
585
586
587
588
589
590
591
592
593
594
595pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
596 unsigned long address, spinlock_t **ptlp, int sync)
597{
598 pmd_t *pmd;
599 pte_t *pte;
600 spinlock_t *ptl;
601
602 if (unlikely(PageHuge(page))) {
603 pte = huge_pte_offset(mm, address);
604 ptl = &mm->page_table_lock;
605 goto check;
606 }
607
608 pmd = mm_find_pmd(mm, address);
609 if (!pmd)
610 return NULL;
611
612 if (pmd_trans_huge(*pmd))
613 return NULL;
614
615 pte = pte_offset_map(pmd, address);
616
617 if (!sync && !pte_present(*pte)) {
618 pte_unmap(pte);
619 return NULL;
620 }
621
622 ptl = pte_lockptr(mm, pmd);
623check:
624 spin_lock(ptl);
625 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
626 *ptlp = ptl;
627 return pte;
628 }
629 pte_unmap_unlock(pte, ptl);
630 return NULL;
631}
632
633
634
635
636
637
638
639
640
641
642int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
643{
644 unsigned long address;
645 pte_t *pte;
646 spinlock_t *ptl;
647
648 address = __vma_address(page, vma);
649 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
650 return 0;
651 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
652 if (!pte)
653 return 0;
654 pte_unmap_unlock(pte, ptl);
655
656 return 1;
657}
658
659
660
661
662
663int page_referenced_one(struct page *page, struct vm_area_struct *vma,
664 unsigned long address, unsigned int *mapcount,
665 unsigned long *vm_flags)
666{
667 struct mm_struct *mm = vma->vm_mm;
668 int referenced = 0;
669
670 if (unlikely(PageTransHuge(page))) {
671 pmd_t *pmd;
672
673 spin_lock(&mm->page_table_lock);
674
675
676
677
678 pmd = page_check_address_pmd(page, mm, address,
679 PAGE_CHECK_ADDRESS_PMD_FLAG);
680 if (!pmd) {
681 spin_unlock(&mm->page_table_lock);
682 goto out;
683 }
684
685 if (vma->vm_flags & VM_LOCKED) {
686 spin_unlock(&mm->page_table_lock);
687 *mapcount = 0;
688 *vm_flags |= VM_LOCKED;
689 goto out;
690 }
691
692
693 if (pmdp_clear_flush_young_notify(vma, address, pmd))
694 referenced++;
695 spin_unlock(&mm->page_table_lock);
696 } else {
697 pte_t *pte;
698 spinlock_t *ptl;
699
700
701
702
703
704 pte = page_check_address(page, mm, address, &ptl, 0);
705 if (!pte)
706 goto out;
707
708 if (vma->vm_flags & VM_LOCKED) {
709 pte_unmap_unlock(pte, ptl);
710 *mapcount = 0;
711 *vm_flags |= VM_LOCKED;
712 goto out;
713 }
714
715 if (ptep_clear_flush_young_notify(vma, address, pte)) {
716
717
718
719
720
721
722
723 if (likely(!(vma->vm_flags & VM_SEQ_READ)))
724 referenced++;
725 }
726 pte_unmap_unlock(pte, ptl);
727 }
728
729 (*mapcount)--;
730
731 if (referenced)
732 *vm_flags |= vma->vm_flags;
733out:
734 return referenced;
735}
736
737static int page_referenced_anon(struct page *page,
738 struct mem_cgroup *memcg,
739 unsigned long *vm_flags)
740{
741 unsigned int mapcount;
742 struct anon_vma *anon_vma;
743 pgoff_t pgoff;
744 struct anon_vma_chain *avc;
745 int referenced = 0;
746
747 anon_vma = page_lock_anon_vma_read(page);
748 if (!anon_vma)
749 return referenced;
750
751 mapcount = page_mapcount(page);
752 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
753 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
754 struct vm_area_struct *vma = avc->vma;
755 unsigned long address = vma_address(page, vma);
756
757
758
759
760
761 if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
762 continue;
763 referenced += page_referenced_one(page, vma, address,
764 &mapcount, vm_flags);
765 if (!mapcount)
766 break;
767 }
768
769 page_unlock_anon_vma_read(anon_vma);
770 return referenced;
771}
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786static int page_referenced_file(struct page *page,
787 struct mem_cgroup *memcg,
788 unsigned long *vm_flags)
789{
790 unsigned int mapcount;
791 struct address_space *mapping = page->mapping;
792 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
793 struct vm_area_struct *vma;
794 int referenced = 0;
795
796
797
798
799
800
801 BUG_ON(PageAnon(page));
802
803
804
805
806
807
808
809 BUG_ON(!PageLocked(page));
810
811 mutex_lock(&mapping->i_mmap_mutex);
812
813
814
815
816
817 mapcount = page_mapcount(page);
818
819 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
820 unsigned long address = vma_address(page, vma);
821
822
823
824
825
826 if (memcg && !mm_match_cgroup(vma->vm_mm, memcg))
827 continue;
828 referenced += page_referenced_one(page, vma, address,
829 &mapcount, vm_flags);
830 if (!mapcount)
831 break;
832 }
833
834 mutex_unlock(&mapping->i_mmap_mutex);
835 return referenced;
836}
837
838
839
840
841
842
843
844
845
846
847
848int page_referenced(struct page *page,
849 int is_locked,
850 struct mem_cgroup *memcg,
851 unsigned long *vm_flags)
852{
853 int referenced = 0;
854 int we_locked = 0;
855
856 *vm_flags = 0;
857 if (page_mapped(page) && page_rmapping(page)) {
858 if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
859 we_locked = trylock_page(page);
860 if (!we_locked) {
861 referenced++;
862 goto out;
863 }
864 }
865 if (unlikely(PageKsm(page)))
866 referenced += page_referenced_ksm(page, memcg,
867 vm_flags);
868 else if (PageAnon(page))
869 referenced += page_referenced_anon(page, memcg,
870 vm_flags);
871 else if (page->mapping)
872 referenced += page_referenced_file(page, memcg,
873 vm_flags);
874 if (we_locked)
875 unlock_page(page);
876 }
877out:
878 return referenced;
879}
880
881static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
882 unsigned long address)
883{
884 struct mm_struct *mm = vma->vm_mm;
885 pte_t *pte;
886 spinlock_t *ptl;
887 int ret = 0;
888
889 pte = page_check_address(page, mm, address, &ptl, 1);
890 if (!pte)
891 goto out;
892
893 if (pte_dirty(*pte) || pte_write(*pte)) {
894 pte_t entry;
895
896 flush_cache_page(vma, address, pte_pfn(*pte));
897 entry = ptep_clear_flush(vma, address, pte);
898 entry = pte_wrprotect(entry);
899 entry = pte_mkclean(entry);
900 set_pte_at(mm, address, pte, entry);
901 ret = 1;
902 }
903
904 pte_unmap_unlock(pte, ptl);
905
906 if (ret)
907 mmu_notifier_invalidate_page(mm, address);
908out:
909 return ret;
910}
911
912static int page_mkclean_file(struct address_space *mapping, struct page *page)
913{
914 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
915 struct vm_area_struct *vma;
916 int ret = 0;
917
918 BUG_ON(PageAnon(page));
919
920 mutex_lock(&mapping->i_mmap_mutex);
921 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
922 if (vma->vm_flags & VM_SHARED) {
923 unsigned long address = vma_address(page, vma);
924 ret += page_mkclean_one(page, vma, address);
925 }
926 }
927 mutex_unlock(&mapping->i_mmap_mutex);
928 return ret;
929}
930
931int page_mkclean(struct page *page)
932{
933 int ret = 0;
934
935 BUG_ON(!PageLocked(page));
936
937 if (page_mapped(page)) {
938 struct address_space *mapping = page_mapping(page);
939 if (mapping)
940 ret = page_mkclean_file(mapping, page);
941 }
942
943 return ret;
944}
945EXPORT_SYMBOL_GPL(page_mkclean);
946
947
948
949
950
951
952
953
954
955
956
957
958void page_move_anon_rmap(struct page *page,
959 struct vm_area_struct *vma, unsigned long address)
960{
961 struct anon_vma *anon_vma = vma->anon_vma;
962
963 VM_BUG_ON(!PageLocked(page));
964 VM_BUG_ON(!anon_vma);
965 VM_BUG_ON(page->index != linear_page_index(vma, address));
966
967 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
968 page->mapping = (struct address_space *) anon_vma;
969}
970
971
972
973
974
975
976
977
978static void __page_set_anon_rmap(struct page *page,
979 struct vm_area_struct *vma, unsigned long address, int exclusive)
980{
981 struct anon_vma *anon_vma = vma->anon_vma;
982
983 BUG_ON(!anon_vma);
984
985 if (PageAnon(page))
986 return;
987
988
989
990
991
992
993 if (!exclusive)
994 anon_vma = anon_vma->root;
995
996 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
997 page->mapping = (struct address_space *) anon_vma;
998 page->index = linear_page_index(vma, address);
999}
1000
1001
1002
1003
1004
1005
1006
1007static void __page_check_anon_rmap(struct page *page,
1008 struct vm_area_struct *vma, unsigned long address)
1009{
1010#ifdef CONFIG_DEBUG_VM
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root);
1024 BUG_ON(page->index != linear_page_index(vma, address));
1025#endif
1026}
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039void page_add_anon_rmap(struct page *page,
1040 struct vm_area_struct *vma, unsigned long address)
1041{
1042 do_page_add_anon_rmap(page, vma, address, 0);
1043}
1044
1045
1046
1047
1048
1049
1050void do_page_add_anon_rmap(struct page *page,
1051 struct vm_area_struct *vma, unsigned long address, int exclusive)
1052{
1053 int first = atomic_inc_and_test(&page->_mapcount);
1054 if (first) {
1055 if (PageTransHuge(page))
1056 __inc_zone_page_state(page,
1057 NR_ANON_TRANSPARENT_HUGEPAGES);
1058 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
1059 hpage_nr_pages(page));
1060 }
1061 if (unlikely(PageKsm(page)))
1062 return;
1063
1064 VM_BUG_ON(!PageLocked(page));
1065
1066 if (first)
1067 __page_set_anon_rmap(page, vma, address, exclusive);
1068 else
1069 __page_check_anon_rmap(page, vma, address);
1070}
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082void page_add_new_anon_rmap(struct page *page,
1083 struct vm_area_struct *vma, unsigned long address)
1084{
1085 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1086 SetPageSwapBacked(page);
1087 atomic_set(&page->_mapcount, 0);
1088 if (PageTransHuge(page))
1089 __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1090 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
1091 hpage_nr_pages(page));
1092 __page_set_anon_rmap(page, vma, address, 1);
1093 if (!mlocked_vma_newpage(vma, page)) {
1094 SetPageActive(page);
1095 lru_cache_add(page);
1096 } else
1097 add_page_to_unevictable_list(page);
1098}
1099
1100
1101
1102
1103
1104
1105
1106void page_add_file_rmap(struct page *page)
1107{
1108 bool locked;
1109 unsigned long flags;
1110
1111 mem_cgroup_begin_update_page_stat(page, &locked, &flags);
1112 if (atomic_inc_and_test(&page->_mapcount)) {
1113 __inc_zone_page_state(page, NR_FILE_MAPPED);
1114 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
1115 }
1116 mem_cgroup_end_update_page_stat(page, &locked, &flags);
1117}
1118
1119
1120
1121
1122
1123
1124
1125void page_remove_rmap(struct page *page)
1126{
1127 bool anon = PageAnon(page);
1128 bool locked;
1129 unsigned long flags;
1130
1131
1132
1133
1134
1135
1136 if (!anon)
1137 mem_cgroup_begin_update_page_stat(page, &locked, &flags);
1138
1139
1140 if (!atomic_add_negative(-1, &page->_mapcount))
1141 goto out;
1142
1143
1144
1145
1146
1147 if (unlikely(PageHuge(page)))
1148 goto out;
1149 if (anon) {
1150 mem_cgroup_uncharge_page(page);
1151 if (PageTransHuge(page))
1152 __dec_zone_page_state(page,
1153 NR_ANON_TRANSPARENT_HUGEPAGES);
1154 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
1155 -hpage_nr_pages(page));
1156 } else {
1157 __dec_zone_page_state(page, NR_FILE_MAPPED);
1158 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
1159 mem_cgroup_end_update_page_stat(page, &locked, &flags);
1160 }
1161 if (unlikely(PageMlocked(page)))
1162 clear_page_mlock(page);
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172 return;
1173out:
1174 if (!anon)
1175 mem_cgroup_end_update_page_stat(page, &locked, &flags);
1176}
1177
1178
1179
1180
1181
1182int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1183 unsigned long address, enum ttu_flags flags)
1184{
1185 struct mm_struct *mm = vma->vm_mm;
1186 pte_t *pte;
1187 pte_t pteval;
1188 spinlock_t *ptl;
1189 int ret = SWAP_AGAIN;
1190
1191 pte = page_check_address(page, mm, address, &ptl, 0);
1192 if (!pte)
1193 goto out;
1194
1195
1196
1197
1198
1199
1200 if (!(flags & TTU_IGNORE_MLOCK)) {
1201 if (vma->vm_flags & VM_LOCKED)
1202 goto out_mlock;
1203
1204 if (TTU_ACTION(flags) == TTU_MUNLOCK)
1205 goto out_unmap;
1206 }
1207 if (!(flags & TTU_IGNORE_ACCESS)) {
1208 if (ptep_clear_flush_young_notify(vma, address, pte)) {
1209 ret = SWAP_FAIL;
1210 goto out_unmap;
1211 }
1212 }
1213
1214
1215 flush_cache_page(vma, address, page_to_pfn(page));
1216 pteval = ptep_clear_flush(vma, address, pte);
1217
1218
1219 if (pte_dirty(pteval))
1220 set_page_dirty(page);
1221
1222
1223 update_hiwater_rss(mm);
1224
1225 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
1226 if (!PageHuge(page)) {
1227 if (PageAnon(page))
1228 dec_mm_counter(mm, MM_ANONPAGES);
1229 else
1230 dec_mm_counter(mm, MM_FILEPAGES);
1231 }
1232 set_pte_at(mm, address, pte,
1233 swp_entry_to_pte(make_hwpoison_entry(page)));
1234 } else if (PageAnon(page)) {
1235 swp_entry_t entry = { .val = page_private(page) };
1236 pte_t swp_pte;
1237
1238 if (PageSwapCache(page)) {
1239
1240
1241
1242
1243 if (swap_duplicate(entry) < 0) {
1244 set_pte_at(mm, address, pte, pteval);
1245 ret = SWAP_FAIL;
1246 goto out_unmap;
1247 }
1248 if (list_empty(&mm->mmlist)) {
1249 spin_lock(&mmlist_lock);
1250 if (list_empty(&mm->mmlist))
1251 list_add(&mm->mmlist, &init_mm.mmlist);
1252 spin_unlock(&mmlist_lock);
1253 }
1254 dec_mm_counter(mm, MM_ANONPAGES);
1255 inc_mm_counter(mm, MM_SWAPENTS);
1256 } else if (IS_ENABLED(CONFIG_MIGRATION)) {
1257
1258
1259
1260
1261
1262 BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
1263 entry = make_migration_entry(page, pte_write(pteval));
1264 }
1265 swp_pte = swp_entry_to_pte(entry);
1266 if (pte_soft_dirty(pteval))
1267 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1268 set_pte_at(mm, address, pte, swp_pte);
1269 BUG_ON(pte_file(*pte));
1270 } else if (IS_ENABLED(CONFIG_MIGRATION) &&
1271 (TTU_ACTION(flags) == TTU_MIGRATION)) {
1272
1273 swp_entry_t entry;
1274 entry = make_migration_entry(page, pte_write(pteval));
1275 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
1276 } else
1277 dec_mm_counter(mm, MM_FILEPAGES);
1278
1279 page_remove_rmap(page);
1280 page_cache_release(page);
1281
1282out_unmap:
1283 pte_unmap_unlock(pte, ptl);
1284 if (ret != SWAP_FAIL)
1285 mmu_notifier_invalidate_page(mm, address);
1286out:
1287 return ret;
1288
1289out_mlock:
1290 pte_unmap_unlock(pte, ptl);
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301 if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
1302 if (vma->vm_flags & VM_LOCKED) {
1303 mlock_vma_page(page);
1304 ret = SWAP_MLOCK;
1305 }
1306 up_read(&vma->vm_mm->mmap_sem);
1307 }
1308 return ret;
1309}
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335#define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
1336#define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
1337
1338static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
1339 struct vm_area_struct *vma, struct page *check_page)
1340{
1341 struct mm_struct *mm = vma->vm_mm;
1342 pmd_t *pmd;
1343 pte_t *pte;
1344 pte_t pteval;
1345 spinlock_t *ptl;
1346 struct page *page;
1347 unsigned long address;
1348 unsigned long mmun_start;
1349 unsigned long mmun_end;
1350 unsigned long end;
1351 int ret = SWAP_AGAIN;
1352 int locked_vma = 0;
1353
1354 address = (vma->vm_start + cursor) & CLUSTER_MASK;
1355 end = address + CLUSTER_SIZE;
1356 if (address < vma->vm_start)
1357 address = vma->vm_start;
1358 if (end > vma->vm_end)
1359 end = vma->vm_end;
1360
1361 pmd = mm_find_pmd(mm, address);
1362 if (!pmd)
1363 return ret;
1364
1365 mmun_start = address;
1366 mmun_end = end;
1367 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1368
1369
1370
1371
1372
1373 if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
1374 locked_vma = (vma->vm_flags & VM_LOCKED);
1375 if (!locked_vma)
1376 up_read(&vma->vm_mm->mmap_sem);
1377 }
1378
1379 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1380
1381
1382 update_hiwater_rss(mm);
1383
1384 for (; address < end; pte++, address += PAGE_SIZE) {
1385 if (!pte_present(*pte))
1386 continue;
1387 page = vm_normal_page(vma, address, *pte);
1388 BUG_ON(!page || PageAnon(page));
1389
1390 if (locked_vma) {
1391 mlock_vma_page(page);
1392 if (page == check_page)
1393 ret = SWAP_MLOCK;
1394 continue;
1395 }
1396
1397 if (ptep_clear_flush_young_notify(vma, address, pte))
1398 continue;
1399
1400
1401 flush_cache_page(vma, address, pte_pfn(*pte));
1402 pteval = ptep_clear_flush(vma, address, pte);
1403
1404
1405 if (page->index != linear_page_index(vma, address)) {
1406 pte_t ptfile = pgoff_to_pte(page->index);
1407 if (pte_soft_dirty(pteval))
1408 pte_file_mksoft_dirty(ptfile);
1409 set_pte_at(mm, address, pte, ptfile);
1410 }
1411
1412
1413 if (pte_dirty(pteval))
1414 set_page_dirty(page);
1415
1416 page_remove_rmap(page);
1417 page_cache_release(page);
1418 dec_mm_counter(mm, MM_FILEPAGES);
1419 (*mapcount)--;
1420 }
1421 pte_unmap_unlock(pte - 1, ptl);
1422 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1423 if (locked_vma)
1424 up_read(&vma->vm_mm->mmap_sem);
1425 return ret;
1426}
1427
1428bool is_vma_temporary_stack(struct vm_area_struct *vma)
1429{
1430 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
1431
1432 if (!maybe_stack)
1433 return false;
1434
1435 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
1436 VM_STACK_INCOMPLETE_SETUP)
1437 return true;
1438
1439 return false;
1440}
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
1459{
1460 struct anon_vma *anon_vma;
1461 pgoff_t pgoff;
1462 struct anon_vma_chain *avc;
1463 int ret = SWAP_AGAIN;
1464
1465 anon_vma = page_lock_anon_vma_read(page);
1466 if (!anon_vma)
1467 return ret;
1468
1469 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1470 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1471 struct vm_area_struct *vma = avc->vma;
1472 unsigned long address;
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482 if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) &&
1483 is_vma_temporary_stack(vma))
1484 continue;
1485
1486 address = vma_address(page, vma);
1487 ret = try_to_unmap_one(page, vma, address, flags);
1488 if (ret != SWAP_AGAIN || !page_mapped(page))
1489 break;
1490 }
1491
1492 page_unlock_anon_vma_read(anon_vma);
1493 return ret;
1494}
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1512{
1513 struct address_space *mapping = page->mapping;
1514 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1515 struct vm_area_struct *vma;
1516 int ret = SWAP_AGAIN;
1517 unsigned long cursor;
1518 unsigned long max_nl_cursor = 0;
1519 unsigned long max_nl_size = 0;
1520 unsigned int mapcount;
1521
1522 if (PageHuge(page))
1523 pgoff = page->index << compound_order(page);
1524
1525 mutex_lock(&mapping->i_mmap_mutex);
1526 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1527 unsigned long address = vma_address(page, vma);
1528 ret = try_to_unmap_one(page, vma, address, flags);
1529 if (ret != SWAP_AGAIN || !page_mapped(page))
1530 goto out;
1531 }
1532
1533 if (list_empty(&mapping->i_mmap_nonlinear))
1534 goto out;
1535
1536
1537
1538
1539
1540
1541 if (TTU_ACTION(flags) == TTU_MUNLOCK)
1542 goto out;
1543
1544 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
1545 shared.nonlinear) {
1546 cursor = (unsigned long) vma->vm_private_data;
1547 if (cursor > max_nl_cursor)
1548 max_nl_cursor = cursor;
1549 cursor = vma->vm_end - vma->vm_start;
1550 if (cursor > max_nl_size)
1551 max_nl_size = cursor;
1552 }
1553
1554 if (max_nl_size == 0) {
1555 ret = SWAP_FAIL;
1556 goto out;
1557 }
1558
1559
1560
1561
1562
1563
1564
1565
1566 mapcount = page_mapcount(page);
1567 if (!mapcount)
1568 goto out;
1569 cond_resched();
1570
1571 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
1572 if (max_nl_cursor == 0)
1573 max_nl_cursor = CLUSTER_SIZE;
1574
1575 do {
1576 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
1577 shared.nonlinear) {
1578 cursor = (unsigned long) vma->vm_private_data;
1579 while ( cursor < max_nl_cursor &&
1580 cursor < vma->vm_end - vma->vm_start) {
1581 if (try_to_unmap_cluster(cursor, &mapcount,
1582 vma, page) == SWAP_MLOCK)
1583 ret = SWAP_MLOCK;
1584 cursor += CLUSTER_SIZE;
1585 vma->vm_private_data = (void *) cursor;
1586 if ((int)mapcount <= 0)
1587 goto out;
1588 }
1589 vma->vm_private_data = (void *) max_nl_cursor;
1590 }
1591 cond_resched();
1592 max_nl_cursor += CLUSTER_SIZE;
1593 } while (max_nl_cursor <= max_nl_size);
1594
1595
1596
1597
1598
1599
1600 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.nonlinear)
1601 vma->vm_private_data = NULL;
1602out:
1603 mutex_unlock(&mapping->i_mmap_mutex);
1604 return ret;
1605}
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621int try_to_unmap(struct page *page, enum ttu_flags flags)
1622{
1623 int ret;
1624
1625 BUG_ON(!PageLocked(page));
1626 VM_BUG_ON(!PageHuge(page) && PageTransHuge(page));
1627
1628 if (unlikely(PageKsm(page)))
1629 ret = try_to_unmap_ksm(page, flags);
1630 else if (PageAnon(page))
1631 ret = try_to_unmap_anon(page, flags);
1632 else
1633 ret = try_to_unmap_file(page, flags);
1634 if (ret != SWAP_MLOCK && !page_mapped(page))
1635 ret = SWAP_SUCCESS;
1636 return ret;
1637}
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654int try_to_munlock(struct page *page)
1655{
1656 VM_BUG_ON(!PageLocked(page) || PageLRU(page));
1657
1658 if (unlikely(PageKsm(page)))
1659 return try_to_unmap_ksm(page, TTU_MUNLOCK);
1660 else if (PageAnon(page))
1661 return try_to_unmap_anon(page, TTU_MUNLOCK);
1662 else
1663 return try_to_unmap_file(page, TTU_MUNLOCK);
1664}
1665
1666void __put_anon_vma(struct anon_vma *anon_vma)
1667{
1668 struct anon_vma *root = anon_vma->root;
1669
1670 if (root != anon_vma && atomic_dec_and_test(&root->refcount))
1671 anon_vma_free(root);
1672
1673 anon_vma_free(anon_vma);
1674}
1675
1676#ifdef CONFIG_MIGRATION
1677
1678
1679
1680
1681static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
1682 struct vm_area_struct *, unsigned long, void *), void *arg)
1683{
1684 struct anon_vma *anon_vma;
1685 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1686 struct anon_vma_chain *avc;
1687 int ret = SWAP_AGAIN;
1688
1689
1690
1691
1692
1693
1694
1695 anon_vma = page_anon_vma(page);
1696 if (!anon_vma)
1697 return ret;
1698 anon_vma_lock_read(anon_vma);
1699 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1700 struct vm_area_struct *vma = avc->vma;
1701 unsigned long address = vma_address(page, vma);
1702 ret = rmap_one(page, vma, address, arg);
1703 if (ret != SWAP_AGAIN)
1704 break;
1705 }
1706 anon_vma_unlock_read(anon_vma);
1707 return ret;
1708}
1709
1710static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
1711 struct vm_area_struct *, unsigned long, void *), void *arg)
1712{
1713 struct address_space *mapping = page->mapping;
1714 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1715 struct vm_area_struct *vma;
1716 int ret = SWAP_AGAIN;
1717
1718 if (!mapping)
1719 return ret;
1720 mutex_lock(&mapping->i_mmap_mutex);
1721 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1722 unsigned long address = vma_address(page, vma);
1723 ret = rmap_one(page, vma, address, arg);
1724 if (ret != SWAP_AGAIN)
1725 break;
1726 }
1727
1728
1729
1730
1731
1732 mutex_unlock(&mapping->i_mmap_mutex);
1733 return ret;
1734}
1735
1736int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
1737 struct vm_area_struct *, unsigned long, void *), void *arg)
1738{
1739 VM_BUG_ON(!PageLocked(page));
1740
1741 if (unlikely(PageKsm(page)))
1742 return rmap_walk_ksm(page, rmap_one, arg);
1743 else if (PageAnon(page))
1744 return rmap_walk_anon(page, rmap_one, arg);
1745 else
1746 return rmap_walk_file(page, rmap_one, arg);
1747}
1748#endif
1749
1750#ifdef CONFIG_HUGETLB_PAGE
1751
1752
1753
1754
1755
1756static void __hugepage_set_anon_rmap(struct page *page,
1757 struct vm_area_struct *vma, unsigned long address, int exclusive)
1758{
1759 struct anon_vma *anon_vma = vma->anon_vma;
1760
1761 BUG_ON(!anon_vma);
1762
1763 if (PageAnon(page))
1764 return;
1765 if (!exclusive)
1766 anon_vma = anon_vma->root;
1767
1768 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1769 page->mapping = (struct address_space *) anon_vma;
1770 page->index = linear_page_index(vma, address);
1771}
1772
1773void hugepage_add_anon_rmap(struct page *page,
1774 struct vm_area_struct *vma, unsigned long address)
1775{
1776 struct anon_vma *anon_vma = vma->anon_vma;
1777 int first;
1778
1779 BUG_ON(!PageLocked(page));
1780 BUG_ON(!anon_vma);
1781
1782 first = atomic_inc_and_test(&page->_mapcount);
1783 if (first)
1784 __hugepage_set_anon_rmap(page, vma, address, 0);
1785}
1786
1787void hugepage_add_new_anon_rmap(struct page *page,
1788 struct vm_area_struct *vma, unsigned long address)
1789{
1790 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1791 atomic_set(&page->_mapcount, 0);
1792 __hugepage_set_anon_rmap(page, vma, address, 1);
1793}
1794#endif
1795