1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45#include <linux/mm.h>
46#include <linux/pagemap.h>
47#include <linux/swap.h>
48#include <linux/swapops.h>
49#include <linux/slab.h>
50#include <linux/init.h>
51#include <linux/ksm.h>
52#include <linux/rmap.h>
53#include <linux/rcupdate.h>
54#include <linux/export.h>
55#include <linux/memcontrol.h>
56#include <linux/mmu_notifier.h>
57#include <linux/migrate.h>
58#include <linux/hugetlb.h>
59#include <linux/backing-dev.h>
60
61#include <asm/tlbflush.h>
62
63#include "internal.h"
64
65static struct kmem_cache *anon_vma_cachep;
66static struct kmem_cache *anon_vma_chain_cachep;
67
68static inline struct anon_vma *anon_vma_alloc(void)
69{
70 struct anon_vma *anon_vma;
71
72 anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
73 if (anon_vma) {
74 atomic_set(&anon_vma->refcount, 1);
75
76
77
78
79 anon_vma->root = anon_vma;
80 }
81
82 return anon_vma;
83}
84
85static inline void anon_vma_free(struct anon_vma *anon_vma)
86{
87 VM_BUG_ON(atomic_read(&anon_vma->refcount));
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106 might_sleep();
107 if (rwsem_is_locked(&anon_vma->root->rwsem)) {
108 anon_vma_lock_write(anon_vma);
109 anon_vma_unlock_write(anon_vma);
110 }
111
112 kmem_cache_free(anon_vma_cachep, anon_vma);
113}
114
115static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
116{
117 return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
118}
119
120static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
121{
122 kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
123}
124
125static void anon_vma_chain_link(struct vm_area_struct *vma,
126 struct anon_vma_chain *avc,
127 struct anon_vma *anon_vma)
128{
129 avc->vma = vma;
130 avc->anon_vma = anon_vma;
131 list_add(&avc->same_vma, &vma->anon_vma_chain);
132 anon_vma_interval_tree_insert(avc, &anon_vma->rb_root);
133}
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162int anon_vma_prepare(struct vm_area_struct *vma)
163{
164 struct anon_vma *anon_vma = vma->anon_vma;
165 struct anon_vma_chain *avc;
166
167 might_sleep();
168 if (unlikely(!anon_vma)) {
169 struct mm_struct *mm = vma->vm_mm;
170 struct anon_vma *allocated;
171
172 avc = anon_vma_chain_alloc(GFP_KERNEL);
173 if (!avc)
174 goto out_enomem;
175
176 anon_vma = find_mergeable_anon_vma(vma);
177 allocated = NULL;
178 if (!anon_vma) {
179 anon_vma = anon_vma_alloc();
180 if (unlikely(!anon_vma))
181 goto out_enomem_free_avc;
182 allocated = anon_vma;
183 }
184
185 anon_vma_lock_write(anon_vma);
186
187 spin_lock(&mm->page_table_lock);
188 if (likely(!vma->anon_vma)) {
189 vma->anon_vma = anon_vma;
190 anon_vma_chain_link(vma, avc, anon_vma);
191 allocated = NULL;
192 avc = NULL;
193 }
194 spin_unlock(&mm->page_table_lock);
195 anon_vma_unlock_write(anon_vma);
196
197 if (unlikely(allocated))
198 put_anon_vma(allocated);
199 if (unlikely(avc))
200 anon_vma_chain_free(avc);
201 }
202 return 0;
203
204 out_enomem_free_avc:
205 anon_vma_chain_free(avc);
206 out_enomem:
207 return -ENOMEM;
208}
209
210
211
212
213
214
215
216
217
218static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
219{
220 struct anon_vma *new_root = anon_vma->root;
221 if (new_root != root) {
222 if (WARN_ON_ONCE(root))
223 up_write(&root->rwsem);
224 root = new_root;
225 down_write(&root->rwsem);
226 }
227 return root;
228}
229
230static inline void unlock_anon_vma_root(struct anon_vma *root)
231{
232 if (root)
233 up_write(&root->rwsem);
234}
235
236
237
238
239
240int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
241{
242 struct anon_vma_chain *avc, *pavc;
243 struct anon_vma *root = NULL;
244
245 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
246 struct anon_vma *anon_vma;
247
248 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
249 if (unlikely(!avc)) {
250 unlock_anon_vma_root(root);
251 root = NULL;
252 avc = anon_vma_chain_alloc(GFP_KERNEL);
253 if (!avc)
254 goto enomem_failure;
255 }
256 anon_vma = pavc->anon_vma;
257 root = lock_anon_vma_root(root, anon_vma);
258 anon_vma_chain_link(dst, avc, anon_vma);
259 }
260 unlock_anon_vma_root(root);
261 return 0;
262
263 enomem_failure:
264 unlink_anon_vmas(dst);
265 return -ENOMEM;
266}
267
268
269
270
271
272
273int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
274{
275 struct anon_vma_chain *avc;
276 struct anon_vma *anon_vma;
277
278
279 if (!pvma->anon_vma)
280 return 0;
281
282
283
284
285
286 if (anon_vma_clone(vma, pvma))
287 return -ENOMEM;
288
289
290 anon_vma = anon_vma_alloc();
291 if (!anon_vma)
292 goto out_error;
293 avc = anon_vma_chain_alloc(GFP_KERNEL);
294 if (!avc)
295 goto out_error_free_anon_vma;
296
297
298
299
300
301 anon_vma->root = pvma->anon_vma->root;
302
303
304
305
306
307 get_anon_vma(anon_vma->root);
308
309 vma->anon_vma = anon_vma;
310 anon_vma_lock_write(anon_vma);
311 anon_vma_chain_link(vma, avc, anon_vma);
312 anon_vma_unlock_write(anon_vma);
313
314 return 0;
315
316 out_error_free_anon_vma:
317 put_anon_vma(anon_vma);
318 out_error:
319 unlink_anon_vmas(vma);
320 return -ENOMEM;
321}
322
323void unlink_anon_vmas(struct vm_area_struct *vma)
324{
325 struct anon_vma_chain *avc, *next;
326 struct anon_vma *root = NULL;
327
328
329
330
331
332 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
333 struct anon_vma *anon_vma = avc->anon_vma;
334
335 root = lock_anon_vma_root(root, anon_vma);
336 anon_vma_interval_tree_remove(avc, &anon_vma->rb_root);
337
338
339
340
341
342 if (RB_EMPTY_ROOT(&anon_vma->rb_root))
343 continue;
344
345 list_del(&avc->same_vma);
346 anon_vma_chain_free(avc);
347 }
348 unlock_anon_vma_root(root);
349
350
351
352
353
354
355 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
356 struct anon_vma *anon_vma = avc->anon_vma;
357
358 put_anon_vma(anon_vma);
359
360 list_del(&avc->same_vma);
361 anon_vma_chain_free(avc);
362 }
363}
364
365static void anon_vma_ctor(void *data)
366{
367 struct anon_vma *anon_vma = data;
368
369 init_rwsem(&anon_vma->rwsem);
370 atomic_set(&anon_vma->refcount, 0);
371 anon_vma->rb_root = RB_ROOT;
372}
373
374void __init anon_vma_init(void)
375{
376 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
377 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
378 anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
379}
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404struct anon_vma *page_get_anon_vma(struct page *page)
405{
406 struct anon_vma *anon_vma = NULL;
407 unsigned long anon_mapping;
408
409 rcu_read_lock();
410 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
411 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
412 goto out;
413 if (!page_mapped(page))
414 goto out;
415
416 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
417 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
418 anon_vma = NULL;
419 goto out;
420 }
421
422
423
424
425
426
427
428
429 if (!page_mapped(page)) {
430 rcu_read_unlock();
431 put_anon_vma(anon_vma);
432 return NULL;
433 }
434out:
435 rcu_read_unlock();
436
437 return anon_vma;
438}
439
440
441
442
443
444
445
446
447struct anon_vma *page_lock_anon_vma_read(struct page *page)
448{
449 struct anon_vma *anon_vma = NULL;
450 struct anon_vma *root_anon_vma;
451 unsigned long anon_mapping;
452
453 rcu_read_lock();
454 anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
455 if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
456 goto out;
457 if (!page_mapped(page))
458 goto out;
459
460 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
461 root_anon_vma = ACCESS_ONCE(anon_vma->root);
462 if (down_read_trylock(&root_anon_vma->rwsem)) {
463
464
465
466
467
468 if (!page_mapped(page)) {
469 up_read(&root_anon_vma->rwsem);
470 anon_vma = NULL;
471 }
472 goto out;
473 }
474
475
476 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
477 anon_vma = NULL;
478 goto out;
479 }
480
481 if (!page_mapped(page)) {
482 rcu_read_unlock();
483 put_anon_vma(anon_vma);
484 return NULL;
485 }
486
487
488 rcu_read_unlock();
489 anon_vma_lock_read(anon_vma);
490
491 if (atomic_dec_and_test(&anon_vma->refcount)) {
492
493
494
495
496
497 anon_vma_unlock_read(anon_vma);
498 __put_anon_vma(anon_vma);
499 anon_vma = NULL;
500 }
501
502 return anon_vma;
503
504out:
505 rcu_read_unlock();
506 return anon_vma;
507}
508
509void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
510{
511 anon_vma_unlock_read(anon_vma);
512}
513
514
515
516
517static inline unsigned long
518__vma_address(struct page *page, struct vm_area_struct *vma)
519{
520 pgoff_t pgoff = page_to_pgoff(page);
521 return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
522}
523
524inline unsigned long
525vma_address(struct page *page, struct vm_area_struct *vma)
526{
527 unsigned long address = __vma_address(page, vma);
528
529
530 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
531
532 return address;
533}
534
535
536
537
538
539unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
540{
541 unsigned long address;
542 if (PageAnon(page)) {
543 struct anon_vma *page__anon_vma = page_anon_vma(page);
544
545
546
547
548 if (!vma->anon_vma || !page__anon_vma ||
549 vma->anon_vma->root != page__anon_vma->root)
550 return -EFAULT;
551 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
552 if (!vma->vm_file ||
553 vma->vm_file->f_mapping != page->mapping)
554 return -EFAULT;
555 } else
556 return -EFAULT;
557 address = __vma_address(page, vma);
558 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
559 return -EFAULT;
560 return address;
561}
562
563pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
564{
565 pgd_t *pgd;
566 pud_t *pud;
567 pmd_t *pmd = NULL;
568 pmd_t pmde;
569
570 pgd = pgd_offset(mm, address);
571 if (!pgd_present(*pgd))
572 goto out;
573
574 pud = pud_offset(pgd, address);
575 if (!pud_present(*pud))
576 goto out;
577
578 pmd = pmd_offset(pud, address);
579
580
581
582
583
584 pmde = ACCESS_ONCE(*pmd);
585 if (!pmd_present(pmde) || pmd_trans_huge(pmde))
586 pmd = NULL;
587out:
588 return pmd;
589}
590
591
592
593
594
595
596
597
598
599
600pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
601 unsigned long address, spinlock_t **ptlp, int sync)
602{
603 pmd_t *pmd;
604 pte_t *pte;
605 spinlock_t *ptl;
606
607 if (unlikely(PageHuge(page))) {
608
609 pte = huge_pte_offset(mm, address);
610 if (!pte)
611 return NULL;
612
613 ptl = huge_pte_lockptr(page_hstate(page), mm, pte);
614 goto check;
615 }
616
617 pmd = mm_find_pmd(mm, address);
618 if (!pmd)
619 return NULL;
620
621 pte = pte_offset_map(pmd, address);
622
623 if (!sync && !pte_present(*pte)) {
624 pte_unmap(pte);
625 return NULL;
626 }
627
628 ptl = pte_lockptr(mm, pmd);
629check:
630 spin_lock(ptl);
631 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
632 *ptlp = ptl;
633 return pte;
634 }
635 pte_unmap_unlock(pte, ptl);
636 return NULL;
637}
638
639
640
641
642
643
644
645
646
647
648int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
649{
650 unsigned long address;
651 pte_t *pte;
652 spinlock_t *ptl;
653
654 address = __vma_address(page, vma);
655 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
656 return 0;
657 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
658 if (!pte)
659 return 0;
660 pte_unmap_unlock(pte, ptl);
661
662 return 1;
663}
664
665struct page_referenced_arg {
666 int mapcount;
667 int referenced;
668 unsigned long vm_flags;
669 struct mem_cgroup *memcg;
670};
671
672
673
674static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
675 unsigned long address, void *arg)
676{
677 struct mm_struct *mm = vma->vm_mm;
678 spinlock_t *ptl;
679 int referenced = 0;
680 struct page_referenced_arg *pra = arg;
681
682 if (unlikely(PageTransHuge(page))) {
683 pmd_t *pmd;
684
685
686
687
688
689 pmd = page_check_address_pmd(page, mm, address,
690 PAGE_CHECK_ADDRESS_PMD_FLAG, &ptl);
691 if (!pmd)
692 return SWAP_AGAIN;
693
694 if (vma->vm_flags & VM_LOCKED) {
695 spin_unlock(ptl);
696 pra->vm_flags |= VM_LOCKED;
697 return SWAP_FAIL;
698 }
699
700
701 if (pmdp_clear_flush_young_notify(vma, address, pmd))
702 referenced++;
703 spin_unlock(ptl);
704 } else {
705 pte_t *pte;
706
707
708
709
710
711 pte = page_check_address(page, mm, address, &ptl, 0);
712 if (!pte)
713 return SWAP_AGAIN;
714
715 if (vma->vm_flags & VM_LOCKED) {
716 pte_unmap_unlock(pte, ptl);
717 pra->vm_flags |= VM_LOCKED;
718 return SWAP_FAIL;
719 }
720
721 if (ptep_clear_flush_young_notify(vma, address, pte)) {
722
723
724
725
726
727
728
729 if (likely(!(vma->vm_flags & VM_SEQ_READ)))
730 referenced++;
731 }
732 pte_unmap_unlock(pte, ptl);
733 }
734
735 if (referenced) {
736 pra->referenced++;
737 pra->vm_flags |= vma->vm_flags;
738 }
739
740 pra->mapcount--;
741 if (!pra->mapcount)
742 return SWAP_SUCCESS;
743
744 return SWAP_AGAIN;
745}
746
747static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg)
748{
749 struct page_referenced_arg *pra = arg;
750 struct mem_cgroup *memcg = pra->memcg;
751
752 if (!mm_match_cgroup(vma->vm_mm, memcg))
753 return true;
754
755 return false;
756}
757
758
759
760
761
762
763
764
765
766
767
768int page_referenced(struct page *page,
769 int is_locked,
770 struct mem_cgroup *memcg,
771 unsigned long *vm_flags)
772{
773 int ret;
774 int we_locked = 0;
775 struct page_referenced_arg pra = {
776 .mapcount = page_mapcount(page),
777 .memcg = memcg,
778 };
779 struct rmap_walk_control rwc = {
780 .rmap_one = page_referenced_one,
781 .arg = (void *)&pra,
782 .anon_lock = page_lock_anon_vma_read,
783 };
784
785 *vm_flags = 0;
786 if (!page_mapped(page))
787 return 0;
788
789 if (!page_rmapping(page))
790 return 0;
791
792 if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
793 we_locked = trylock_page(page);
794 if (!we_locked)
795 return 1;
796 }
797
798
799
800
801
802
803 if (memcg) {
804 rwc.invalid_vma = invalid_page_referenced_vma;
805 }
806
807 ret = rmap_walk(page, &rwc);
808 *vm_flags = pra.vm_flags;
809
810 if (we_locked)
811 unlock_page(page);
812
813 return pra.referenced;
814}
815
816static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
817 unsigned long address, void *arg)
818{
819 struct mm_struct *mm = vma->vm_mm;
820 pte_t *pte;
821 spinlock_t *ptl;
822 int ret = 0;
823 int *cleaned = arg;
824
825 pte = page_check_address(page, mm, address, &ptl, 1);
826 if (!pte)
827 goto out;
828
829 if (pte_dirty(*pte) || pte_write(*pte)) {
830 pte_t entry;
831
832 flush_cache_page(vma, address, pte_pfn(*pte));
833 entry = ptep_clear_flush(vma, address, pte);
834 entry = pte_wrprotect(entry);
835 entry = pte_mkclean(entry);
836 set_pte_at(mm, address, pte, entry);
837 ret = 1;
838 }
839
840 pte_unmap_unlock(pte, ptl);
841
842 if (ret) {
843 mmu_notifier_invalidate_page(mm, address);
844 (*cleaned)++;
845 }
846out:
847 return SWAP_AGAIN;
848}
849
850static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
851{
852 if (vma->vm_flags & VM_SHARED)
853 return false;
854
855 return true;
856}
857
858int page_mkclean(struct page *page)
859{
860 int cleaned = 0;
861 struct address_space *mapping;
862 struct rmap_walk_control rwc = {
863 .arg = (void *)&cleaned,
864 .rmap_one = page_mkclean_one,
865 .invalid_vma = invalid_mkclean_vma,
866 };
867
868 BUG_ON(!PageLocked(page));
869
870 if (!page_mapped(page))
871 return 0;
872
873 mapping = page_mapping(page);
874 if (!mapping)
875 return 0;
876
877 rmap_walk(page, &rwc);
878
879 return cleaned;
880}
881EXPORT_SYMBOL_GPL(page_mkclean);
882
883
884
885
886
887
888
889
890
891
892
893
894void page_move_anon_rmap(struct page *page,
895 struct vm_area_struct *vma, unsigned long address)
896{
897 struct anon_vma *anon_vma = vma->anon_vma;
898
899 VM_BUG_ON_PAGE(!PageLocked(page), page);
900 VM_BUG_ON(!anon_vma);
901 VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
902
903 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
904 page->mapping = (struct address_space *) anon_vma;
905}
906
907
908
909
910
911
912
913
914static void __page_set_anon_rmap(struct page *page,
915 struct vm_area_struct *vma, unsigned long address, int exclusive)
916{
917 struct anon_vma *anon_vma = vma->anon_vma;
918
919 BUG_ON(!anon_vma);
920
921 if (PageAnon(page))
922 return;
923
924
925
926
927
928
929 if (!exclusive)
930 anon_vma = anon_vma->root;
931
932 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
933 page->mapping = (struct address_space *) anon_vma;
934 page->index = linear_page_index(vma, address);
935}
936
937
938
939
940
941
942
943static void __page_check_anon_rmap(struct page *page,
944 struct vm_area_struct *vma, unsigned long address)
945{
946#ifdef CONFIG_DEBUG_VM
947
948
949
950
951
952
953
954
955
956
957
958
959 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root);
960 BUG_ON(page->index != linear_page_index(vma, address));
961#endif
962}
963
964
965
966
967
968
969
970
971
972
973
974
975void page_add_anon_rmap(struct page *page,
976 struct vm_area_struct *vma, unsigned long address)
977{
978 do_page_add_anon_rmap(page, vma, address, 0);
979}
980
981
982
983
984
985
986void do_page_add_anon_rmap(struct page *page,
987 struct vm_area_struct *vma, unsigned long address, int exclusive)
988{
989 int first = atomic_inc_and_test(&page->_mapcount);
990 if (first) {
991
992
993
994
995
996
997 if (PageTransHuge(page))
998 __inc_zone_page_state(page,
999 NR_ANON_TRANSPARENT_HUGEPAGES);
1000 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
1001 hpage_nr_pages(page));
1002 }
1003 if (unlikely(PageKsm(page)))
1004 return;
1005
1006 VM_BUG_ON_PAGE(!PageLocked(page), page);
1007
1008 if (first)
1009 __page_set_anon_rmap(page, vma, address, exclusive);
1010 else
1011 __page_check_anon_rmap(page, vma, address);
1012}
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024void page_add_new_anon_rmap(struct page *page,
1025 struct vm_area_struct *vma, unsigned long address)
1026{
1027 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1028 SetPageSwapBacked(page);
1029 atomic_set(&page->_mapcount, 0);
1030 if (PageTransHuge(page))
1031 __inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1032 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
1033 hpage_nr_pages(page));
1034 __page_set_anon_rmap(page, vma, address, 1);
1035
1036 VM_BUG_ON_PAGE(PageLRU(page), page);
1037 if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) {
1038 SetPageActive(page);
1039 lru_cache_add(page);
1040 return;
1041 }
1042
1043 if (!TestSetPageMlocked(page)) {
1044
1045
1046
1047
1048
1049 __mod_zone_page_state(page_zone(page), NR_MLOCK,
1050 hpage_nr_pages(page));
1051 count_vm_event(UNEVICTABLE_PGMLOCKED);
1052 }
1053 add_page_to_unevictable_list(page);
1054}
1055
1056
1057
1058
1059
1060
1061
1062void page_add_file_rmap(struct page *page)
1063{
1064 bool locked;
1065 unsigned long flags;
1066
1067 mem_cgroup_begin_update_page_stat(page, &locked, &flags);
1068 if (atomic_inc_and_test(&page->_mapcount)) {
1069 __inc_zone_page_state(page, NR_FILE_MAPPED);
1070 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
1071 }
1072 mem_cgroup_end_update_page_stat(page, &locked, &flags);
1073}
1074
1075
1076
1077
1078
1079
1080
1081void page_remove_rmap(struct page *page)
1082{
1083 bool anon = PageAnon(page);
1084 bool locked;
1085 unsigned long flags;
1086
1087
1088
1089
1090
1091
1092 if (!anon)
1093 mem_cgroup_begin_update_page_stat(page, &locked, &flags);
1094
1095
1096 if (!atomic_add_negative(-1, &page->_mapcount))
1097 goto out;
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108 if (unlikely(PageHuge(page)))
1109 goto out;
1110 if (anon) {
1111 mem_cgroup_uncharge_page(page);
1112 if (PageTransHuge(page))
1113 __dec_zone_page_state(page,
1114 NR_ANON_TRANSPARENT_HUGEPAGES);
1115 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
1116 -hpage_nr_pages(page));
1117 } else {
1118 __dec_zone_page_state(page, NR_FILE_MAPPED);
1119 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
1120 mem_cgroup_end_update_page_stat(page, &locked, &flags);
1121 }
1122 if (unlikely(PageMlocked(page)))
1123 clear_page_mlock(page);
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133 return;
1134out:
1135 if (!anon)
1136 mem_cgroup_end_update_page_stat(page, &locked, &flags);
1137}
1138
1139
1140
1141
1142static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1143 unsigned long address, void *arg)
1144{
1145 struct mm_struct *mm = vma->vm_mm;
1146 pte_t *pte;
1147 pte_t pteval;
1148 spinlock_t *ptl;
1149 int ret = SWAP_AGAIN;
1150 enum ttu_flags flags = (enum ttu_flags)arg;
1151
1152 pte = page_check_address(page, mm, address, &ptl, 0);
1153 if (!pte)
1154 goto out;
1155
1156
1157
1158
1159
1160
1161 if (!(flags & TTU_IGNORE_MLOCK)) {
1162 if (vma->vm_flags & VM_LOCKED)
1163 goto out_mlock;
1164
1165 if (flags & TTU_MUNLOCK)
1166 goto out_unmap;
1167 }
1168 if (!(flags & TTU_IGNORE_ACCESS)) {
1169 if (ptep_clear_flush_young_notify(vma, address, pte)) {
1170 ret = SWAP_FAIL;
1171 goto out_unmap;
1172 }
1173 }
1174
1175
1176 flush_cache_page(vma, address, page_to_pfn(page));
1177 pteval = ptep_clear_flush(vma, address, pte);
1178
1179
1180 if (pte_dirty(pteval))
1181 set_page_dirty(page);
1182
1183
1184 update_hiwater_rss(mm);
1185
1186 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
1187 if (!PageHuge(page)) {
1188 if (PageAnon(page))
1189 dec_mm_counter(mm, MM_ANONPAGES);
1190 else
1191 dec_mm_counter(mm, MM_FILEPAGES);
1192 }
1193 set_pte_at(mm, address, pte,
1194 swp_entry_to_pte(make_hwpoison_entry(page)));
1195 } else if (pte_unused(pteval)) {
1196
1197
1198
1199
1200
1201 if (PageAnon(page))
1202 dec_mm_counter(mm, MM_ANONPAGES);
1203 else
1204 dec_mm_counter(mm, MM_FILEPAGES);
1205 } else if (PageAnon(page)) {
1206 swp_entry_t entry = { .val = page_private(page) };
1207 pte_t swp_pte;
1208
1209 if (PageSwapCache(page)) {
1210
1211
1212
1213
1214 if (swap_duplicate(entry) < 0) {
1215 set_pte_at(mm, address, pte, pteval);
1216 ret = SWAP_FAIL;
1217 goto out_unmap;
1218 }
1219 if (list_empty(&mm->mmlist)) {
1220 spin_lock(&mmlist_lock);
1221 if (list_empty(&mm->mmlist))
1222 list_add(&mm->mmlist, &init_mm.mmlist);
1223 spin_unlock(&mmlist_lock);
1224 }
1225 dec_mm_counter(mm, MM_ANONPAGES);
1226 inc_mm_counter(mm, MM_SWAPENTS);
1227 } else if (IS_ENABLED(CONFIG_MIGRATION)) {
1228
1229
1230
1231
1232
1233 BUG_ON(!(flags & TTU_MIGRATION));
1234 entry = make_migration_entry(page, pte_write(pteval));
1235 }
1236 swp_pte = swp_entry_to_pte(entry);
1237 if (pte_soft_dirty(pteval))
1238 swp_pte = pte_swp_mksoft_dirty(swp_pte);
1239 set_pte_at(mm, address, pte, swp_pte);
1240 BUG_ON(pte_file(*pte));
1241 } else if (IS_ENABLED(CONFIG_MIGRATION) &&
1242 (flags & TTU_MIGRATION)) {
1243
1244 swp_entry_t entry;
1245 entry = make_migration_entry(page, pte_write(pteval));
1246 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
1247 } else
1248 dec_mm_counter(mm, MM_FILEPAGES);
1249
1250 page_remove_rmap(page);
1251 page_cache_release(page);
1252
1253out_unmap:
1254 pte_unmap_unlock(pte, ptl);
1255 if (ret != SWAP_FAIL && !(flags & TTU_MUNLOCK))
1256 mmu_notifier_invalidate_page(mm, address);
1257out:
1258 return ret;
1259
1260out_mlock:
1261 pte_unmap_unlock(pte, ptl);
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272 if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
1273 if (vma->vm_flags & VM_LOCKED) {
1274 mlock_vma_page(page);
1275 ret = SWAP_MLOCK;
1276 }
1277 up_read(&vma->vm_mm->mmap_sem);
1278 }
1279 return ret;
1280}
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306#define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
1307#define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
1308
1309static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
1310 struct vm_area_struct *vma, struct page *check_page)
1311{
1312 struct mm_struct *mm = vma->vm_mm;
1313 pmd_t *pmd;
1314 pte_t *pte;
1315 pte_t pteval;
1316 spinlock_t *ptl;
1317 struct page *page;
1318 unsigned long address;
1319 unsigned long mmun_start;
1320 unsigned long mmun_end;
1321 unsigned long end;
1322 int ret = SWAP_AGAIN;
1323 int locked_vma = 0;
1324
1325 address = (vma->vm_start + cursor) & CLUSTER_MASK;
1326 end = address + CLUSTER_SIZE;
1327 if (address < vma->vm_start)
1328 address = vma->vm_start;
1329 if (end > vma->vm_end)
1330 end = vma->vm_end;
1331
1332 pmd = mm_find_pmd(mm, address);
1333 if (!pmd)
1334 return ret;
1335
1336 mmun_start = address;
1337 mmun_end = end;
1338 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1339
1340
1341
1342
1343
1344 if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
1345 locked_vma = (vma->vm_flags & VM_LOCKED);
1346 if (!locked_vma)
1347 up_read(&vma->vm_mm->mmap_sem);
1348 }
1349
1350 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1351
1352
1353 update_hiwater_rss(mm);
1354
1355 for (; address < end; pte++, address += PAGE_SIZE) {
1356 if (!pte_present(*pte))
1357 continue;
1358 page = vm_normal_page(vma, address, *pte);
1359 BUG_ON(!page || PageAnon(page));
1360
1361 if (locked_vma) {
1362 if (page == check_page) {
1363
1364 mlock_vma_page(page);
1365 ret = SWAP_MLOCK;
1366 } else if (trylock_page(page)) {
1367
1368
1369
1370
1371
1372 mlock_vma_page(page);
1373 unlock_page(page);
1374 }
1375 continue;
1376 }
1377
1378 if (ptep_clear_flush_young_notify(vma, address, pte))
1379 continue;
1380
1381
1382 flush_cache_page(vma, address, pte_pfn(*pte));
1383 pteval = ptep_clear_flush(vma, address, pte);
1384
1385
1386 if (page->index != linear_page_index(vma, address)) {
1387 pte_t ptfile = pgoff_to_pte(page->index);
1388 if (pte_soft_dirty(pteval))
1389 ptfile = pte_file_mksoft_dirty(ptfile);
1390 set_pte_at(mm, address, pte, ptfile);
1391 }
1392
1393
1394 if (pte_dirty(pteval))
1395 set_page_dirty(page);
1396
1397 page_remove_rmap(page);
1398 page_cache_release(page);
1399 dec_mm_counter(mm, MM_FILEPAGES);
1400 (*mapcount)--;
1401 }
1402 pte_unmap_unlock(pte - 1, ptl);
1403 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1404 if (locked_vma)
1405 up_read(&vma->vm_mm->mmap_sem);
1406 return ret;
1407}
1408
1409static int try_to_unmap_nonlinear(struct page *page,
1410 struct address_space *mapping, void *arg)
1411{
1412 struct vm_area_struct *vma;
1413 int ret = SWAP_AGAIN;
1414 unsigned long cursor;
1415 unsigned long max_nl_cursor = 0;
1416 unsigned long max_nl_size = 0;
1417 unsigned int mapcount;
1418
1419 list_for_each_entry(vma,
1420 &mapping->i_mmap_nonlinear, shared.nonlinear) {
1421
1422 cursor = (unsigned long) vma->vm_private_data;
1423 if (cursor > max_nl_cursor)
1424 max_nl_cursor = cursor;
1425 cursor = vma->vm_end - vma->vm_start;
1426 if (cursor > max_nl_size)
1427 max_nl_size = cursor;
1428 }
1429
1430 if (max_nl_size == 0) {
1431 return SWAP_FAIL;
1432 }
1433
1434
1435
1436
1437
1438
1439
1440
1441 mapcount = page_mapcount(page);
1442 if (!mapcount)
1443 return ret;
1444
1445 cond_resched();
1446
1447 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
1448 if (max_nl_cursor == 0)
1449 max_nl_cursor = CLUSTER_SIZE;
1450
1451 do {
1452 list_for_each_entry(vma,
1453 &mapping->i_mmap_nonlinear, shared.nonlinear) {
1454
1455 cursor = (unsigned long) vma->vm_private_data;
1456 while (cursor < max_nl_cursor &&
1457 cursor < vma->vm_end - vma->vm_start) {
1458 if (try_to_unmap_cluster(cursor, &mapcount,
1459 vma, page) == SWAP_MLOCK)
1460 ret = SWAP_MLOCK;
1461 cursor += CLUSTER_SIZE;
1462 vma->vm_private_data = (void *) cursor;
1463 if ((int)mapcount <= 0)
1464 return ret;
1465 }
1466 vma->vm_private_data = (void *) max_nl_cursor;
1467 }
1468 cond_resched();
1469 max_nl_cursor += CLUSTER_SIZE;
1470 } while (max_nl_cursor <= max_nl_size);
1471
1472
1473
1474
1475
1476
1477 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.nonlinear)
1478 vma->vm_private_data = NULL;
1479
1480 return ret;
1481}
1482
1483bool is_vma_temporary_stack(struct vm_area_struct *vma)
1484{
1485 int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
1486
1487 if (!maybe_stack)
1488 return false;
1489
1490 if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
1491 VM_STACK_INCOMPLETE_SETUP)
1492 return true;
1493
1494 return false;
1495}
1496
1497static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
1498{
1499 return is_vma_temporary_stack(vma);
1500}
1501
1502static int page_not_mapped(struct page *page)
1503{
1504 return !page_mapped(page);
1505};
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521int try_to_unmap(struct page *page, enum ttu_flags flags)
1522{
1523 int ret;
1524 struct rmap_walk_control rwc = {
1525 .rmap_one = try_to_unmap_one,
1526 .arg = (void *)flags,
1527 .done = page_not_mapped,
1528 .file_nonlinear = try_to_unmap_nonlinear,
1529 .anon_lock = page_lock_anon_vma_read,
1530 };
1531
1532 VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page);
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542 if ((flags & TTU_MIGRATION) && !PageKsm(page) && PageAnon(page))
1543 rwc.invalid_vma = invalid_migration_vma;
1544
1545 ret = rmap_walk(page, &rwc);
1546
1547 if (ret != SWAP_MLOCK && !page_mapped(page))
1548 ret = SWAP_SUCCESS;
1549 return ret;
1550}
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567int try_to_munlock(struct page *page)
1568{
1569 int ret;
1570 struct rmap_walk_control rwc = {
1571 .rmap_one = try_to_unmap_one,
1572 .arg = (void *)TTU_MUNLOCK,
1573 .done = page_not_mapped,
1574
1575
1576
1577
1578
1579 .file_nonlinear = NULL,
1580 .anon_lock = page_lock_anon_vma_read,
1581
1582 };
1583
1584 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page);
1585
1586 ret = rmap_walk(page, &rwc);
1587 return ret;
1588}
1589
1590void __put_anon_vma(struct anon_vma *anon_vma)
1591{
1592 struct anon_vma *root = anon_vma->root;
1593
1594 anon_vma_free(anon_vma);
1595 if (root != anon_vma && atomic_dec_and_test(&root->refcount))
1596 anon_vma_free(root);
1597}
1598
1599static struct anon_vma *rmap_walk_anon_lock(struct page *page,
1600 struct rmap_walk_control *rwc)
1601{
1602 struct anon_vma *anon_vma;
1603
1604 if (rwc->anon_lock)
1605 return rwc->anon_lock(page);
1606
1607
1608
1609
1610
1611
1612
1613 anon_vma = page_anon_vma(page);
1614 if (!anon_vma)
1615 return NULL;
1616
1617 anon_vma_lock_read(anon_vma);
1618 return anon_vma;
1619}
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
1636{
1637 struct anon_vma *anon_vma;
1638 pgoff_t pgoff = page_to_pgoff(page);
1639 struct anon_vma_chain *avc;
1640 int ret = SWAP_AGAIN;
1641
1642 anon_vma = rmap_walk_anon_lock(page, rwc);
1643 if (!anon_vma)
1644 return ret;
1645
1646 anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1647 struct vm_area_struct *vma = avc->vma;
1648 unsigned long address = vma_address(page, vma);
1649
1650 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1651 continue;
1652
1653 ret = rwc->rmap_one(page, vma, address, rwc->arg);
1654 if (ret != SWAP_AGAIN)
1655 break;
1656 if (rwc->done && rwc->done(page))
1657 break;
1658 }
1659 anon_vma_unlock_read(anon_vma);
1660 return ret;
1661}
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
1677{
1678 struct address_space *mapping = page->mapping;
1679 pgoff_t pgoff = page_to_pgoff(page);
1680 struct vm_area_struct *vma;
1681 int ret = SWAP_AGAIN;
1682
1683
1684
1685
1686
1687
1688
1689 VM_BUG_ON(!PageLocked(page));
1690
1691 if (!mapping)
1692 return ret;
1693 mutex_lock(&mapping->i_mmap_mutex);
1694 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1695 unsigned long address = vma_address(page, vma);
1696
1697 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1698 continue;
1699
1700 ret = rwc->rmap_one(page, vma, address, rwc->arg);
1701 if (ret != SWAP_AGAIN)
1702 goto done;
1703 if (rwc->done && rwc->done(page))
1704 goto done;
1705 }
1706
1707 if (!rwc->file_nonlinear)
1708 goto done;
1709
1710 if (list_empty(&mapping->i_mmap_nonlinear))
1711 goto done;
1712
1713 ret = rwc->file_nonlinear(page, mapping, rwc->arg);
1714
1715done:
1716 mutex_unlock(&mapping->i_mmap_mutex);
1717 return ret;
1718}
1719
1720int rmap_walk(struct page *page, struct rmap_walk_control *rwc)
1721{
1722 if (unlikely(PageKsm(page)))
1723 return rmap_walk_ksm(page, rwc);
1724 else if (PageAnon(page))
1725 return rmap_walk_anon(page, rwc);
1726 else
1727 return rmap_walk_file(page, rwc);
1728}
1729
1730#ifdef CONFIG_HUGETLB_PAGE
1731
1732
1733
1734
1735
1736static void __hugepage_set_anon_rmap(struct page *page,
1737 struct vm_area_struct *vma, unsigned long address, int exclusive)
1738{
1739 struct anon_vma *anon_vma = vma->anon_vma;
1740
1741 BUG_ON(!anon_vma);
1742
1743 if (PageAnon(page))
1744 return;
1745 if (!exclusive)
1746 anon_vma = anon_vma->root;
1747
1748 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
1749 page->mapping = (struct address_space *) anon_vma;
1750 page->index = linear_page_index(vma, address);
1751}
1752
1753void hugepage_add_anon_rmap(struct page *page,
1754 struct vm_area_struct *vma, unsigned long address)
1755{
1756 struct anon_vma *anon_vma = vma->anon_vma;
1757 int first;
1758
1759 BUG_ON(!PageLocked(page));
1760 BUG_ON(!anon_vma);
1761
1762 first = atomic_inc_and_test(&page->_mapcount);
1763 if (first)
1764 __hugepage_set_anon_rmap(page, vma, address, 0);
1765}
1766
1767void hugepage_add_new_anon_rmap(struct page *page,
1768 struct vm_area_struct *vma, unsigned long address)
1769{
1770 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
1771 atomic_set(&page->_mapcount, 0);
1772 __hugepage_set_anon_rmap(page, vma, address, 1);
1773}
1774#endif
1775