1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46#include <linux/mm.h>
47#include <linux/pagemap.h>
48#include <linux/swap.h>
49#include <linux/swapops.h>
50#include <linux/slab.h>
51#include <linux/init.h>
52#include <linux/rmap.h>
53#include <linux/rcupdate.h>
54#include <linux/module.h>
55#include <linux/memcontrol.h>
56#include <linux/mmu_notifier.h>
57#include <linux/migrate.h>
58
59#include <asm/tlbflush.h>
60
61#include "internal.h"
62
63static struct kmem_cache *anon_vma_cachep;
64
65static inline struct anon_vma *anon_vma_alloc(void)
66{
67 return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
68}
69
70static inline void anon_vma_free(struct anon_vma *anon_vma)
71{
72 kmem_cache_free(anon_vma_cachep, anon_vma);
73}
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102int anon_vma_prepare(struct vm_area_struct *vma)
103{
104 struct anon_vma *anon_vma = vma->anon_vma;
105
106 might_sleep();
107 if (unlikely(!anon_vma)) {
108 struct mm_struct *mm = vma->vm_mm;
109 struct anon_vma *allocated;
110
111 anon_vma = find_mergeable_anon_vma(vma);
112 allocated = NULL;
113 if (!anon_vma) {
114 anon_vma = anon_vma_alloc();
115 if (unlikely(!anon_vma))
116 return -ENOMEM;
117 allocated = anon_vma;
118 }
119 spin_lock(&anon_vma->lock);
120
121
122 spin_lock(&mm->page_table_lock);
123 if (likely(!vma->anon_vma)) {
124 vma->anon_vma = anon_vma;
125 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
126 allocated = NULL;
127 }
128 spin_unlock(&mm->page_table_lock);
129
130 spin_unlock(&anon_vma->lock);
131 if (unlikely(allocated))
132 anon_vma_free(allocated);
133 }
134 return 0;
135}
136
137void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next)
138{
139 BUG_ON(vma->anon_vma != next->anon_vma);
140 list_del(&next->anon_vma_node);
141}
142
143void __anon_vma_link(struct vm_area_struct *vma)
144{
145 struct anon_vma *anon_vma = vma->anon_vma;
146
147 if (anon_vma)
148 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
149}
150
151void anon_vma_link(struct vm_area_struct *vma)
152{
153 struct anon_vma *anon_vma = vma->anon_vma;
154
155 if (anon_vma) {
156 spin_lock(&anon_vma->lock);
157 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
158 spin_unlock(&anon_vma->lock);
159 }
160}
161
162void anon_vma_unlink(struct vm_area_struct *vma)
163{
164 struct anon_vma *anon_vma = vma->anon_vma;
165 int empty;
166
167 if (!anon_vma)
168 return;
169
170 spin_lock(&anon_vma->lock);
171 list_del(&vma->anon_vma_node);
172
173
174 empty = list_empty(&anon_vma->head);
175 spin_unlock(&anon_vma->lock);
176
177 if (empty)
178 anon_vma_free(anon_vma);
179}
180
181static void anon_vma_ctor(void *data)
182{
183 struct anon_vma *anon_vma = data;
184
185 spin_lock_init(&anon_vma->lock);
186 INIT_LIST_HEAD(&anon_vma->head);
187}
188
189void __init anon_vma_init(void)
190{
191 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
192 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
193}
194
195
196
197
198
199struct anon_vma *page_lock_anon_vma(struct page *page)
200{
201 struct anon_vma *anon_vma;
202 unsigned long anon_mapping;
203
204 rcu_read_lock();
205 anon_mapping = (unsigned long) page->mapping;
206 if (!(anon_mapping & PAGE_MAPPING_ANON))
207 goto out;
208 if (!page_mapped(page))
209 goto out;
210
211 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
212 spin_lock(&anon_vma->lock);
213 return anon_vma;
214out:
215 rcu_read_unlock();
216 return NULL;
217}
218
219void page_unlock_anon_vma(struct anon_vma *anon_vma)
220{
221 spin_unlock(&anon_vma->lock);
222 rcu_read_unlock();
223}
224
225
226
227
228
229
230static inline unsigned long
231vma_address(struct page *page, struct vm_area_struct *vma)
232{
233 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
234 unsigned long address;
235
236 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
237 if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
238
239 return -EFAULT;
240 }
241 return address;
242}
243
244
245
246
247
248unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
249{
250 if (PageAnon(page)) {
251 if ((void *)vma->anon_vma !=
252 (void *)page->mapping - PAGE_MAPPING_ANON)
253 return -EFAULT;
254 } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
255 if (!vma->vm_file ||
256 vma->vm_file->f_mapping != page->mapping)
257 return -EFAULT;
258 } else
259 return -EFAULT;
260 return vma_address(page, vma);
261}
262
263
264
265
266
267
268
269
270
271
272pte_t *page_check_address(struct page *page, struct mm_struct *mm,
273 unsigned long address, spinlock_t **ptlp, int sync)
274{
275 pgd_t *pgd;
276 pud_t *pud;
277 pmd_t *pmd;
278 pte_t *pte;
279 spinlock_t *ptl;
280
281 pgd = pgd_offset(mm, address);
282 if (!pgd_present(*pgd))
283 return NULL;
284
285 pud = pud_offset(pgd, address);
286 if (!pud_present(*pud))
287 return NULL;
288
289 pmd = pmd_offset(pud, address);
290 if (!pmd_present(*pmd))
291 return NULL;
292
293 pte = pte_offset_map(pmd, address);
294
295 if (!sync && !pte_present(*pte)) {
296 pte_unmap(pte);
297 return NULL;
298 }
299
300 ptl = pte_lockptr(mm, pmd);
301 spin_lock(ptl);
302 if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
303 *ptlp = ptl;
304 return pte;
305 }
306 pte_unmap_unlock(pte, ptl);
307 return NULL;
308}
309
310
311
312
313
314
315
316
317
318
319int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
320{
321 unsigned long address;
322 pte_t *pte;
323 spinlock_t *ptl;
324
325 address = vma_address(page, vma);
326 if (address == -EFAULT)
327 return 0;
328 pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
329 if (!pte)
330 return 0;
331 pte_unmap_unlock(pte, ptl);
332
333 return 1;
334}
335
336
337
338
339
340static int page_referenced_one(struct page *page,
341 struct vm_area_struct *vma,
342 unsigned int *mapcount,
343 unsigned long *vm_flags)
344{
345 struct mm_struct *mm = vma->vm_mm;
346 unsigned long address;
347 pte_t *pte;
348 spinlock_t *ptl;
349 int referenced = 0;
350
351 address = vma_address(page, vma);
352 if (address == -EFAULT)
353 goto out;
354
355 pte = page_check_address(page, mm, address, &ptl, 0);
356 if (!pte)
357 goto out;
358
359
360
361
362
363
364 if (vma->vm_flags & VM_LOCKED) {
365 *mapcount = 1;
366 *vm_flags |= VM_LOCKED;
367 goto out_unmap;
368 }
369
370 if (ptep_clear_flush_young_notify(vma, address, pte)) {
371
372
373
374
375
376
377
378 if (likely(!VM_SequentialReadHint(vma)))
379 referenced++;
380 }
381
382
383
384 if (mm != current->mm && has_swap_token(mm) &&
385 rwsem_is_locked(&mm->mmap_sem))
386 referenced++;
387
388out_unmap:
389 (*mapcount)--;
390 pte_unmap_unlock(pte, ptl);
391out:
392 if (referenced)
393 *vm_flags |= vma->vm_flags;
394 return referenced;
395}
396
397static int page_referenced_anon(struct page *page,
398 struct mem_cgroup *mem_cont,
399 unsigned long *vm_flags)
400{
401 unsigned int mapcount;
402 struct anon_vma *anon_vma;
403 struct vm_area_struct *vma;
404 int referenced = 0;
405
406 anon_vma = page_lock_anon_vma(page);
407 if (!anon_vma)
408 return referenced;
409
410 mapcount = page_mapcount(page);
411 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
412
413
414
415
416
417 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
418 continue;
419 referenced += page_referenced_one(page, vma,
420 &mapcount, vm_flags);
421 if (!mapcount)
422 break;
423 }
424
425 page_unlock_anon_vma(anon_vma);
426 return referenced;
427}
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442static int page_referenced_file(struct page *page,
443 struct mem_cgroup *mem_cont,
444 unsigned long *vm_flags)
445{
446 unsigned int mapcount;
447 struct address_space *mapping = page->mapping;
448 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
449 struct vm_area_struct *vma;
450 struct prio_tree_iter iter;
451 int referenced = 0;
452
453
454
455
456
457
458 BUG_ON(PageAnon(page));
459
460
461
462
463
464
465
466 BUG_ON(!PageLocked(page));
467
468 spin_lock(&mapping->i_mmap_lock);
469
470
471
472
473
474 mapcount = page_mapcount(page);
475
476 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
477
478
479
480
481
482 if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
483 continue;
484 referenced += page_referenced_one(page, vma,
485 &mapcount, vm_flags);
486 if (!mapcount)
487 break;
488 }
489
490 spin_unlock(&mapping->i_mmap_lock);
491 return referenced;
492}
493
494
495
496
497
498
499
500
501
502
503
504int page_referenced(struct page *page,
505 int is_locked,
506 struct mem_cgroup *mem_cont,
507 unsigned long *vm_flags)
508{
509 int referenced = 0;
510
511 if (TestClearPageReferenced(page))
512 referenced++;
513
514 *vm_flags = 0;
515 if (page_mapped(page) && page->mapping) {
516 if (PageAnon(page))
517 referenced += page_referenced_anon(page, mem_cont,
518 vm_flags);
519 else if (is_locked)
520 referenced += page_referenced_file(page, mem_cont,
521 vm_flags);
522 else if (!trylock_page(page))
523 referenced++;
524 else {
525 if (page->mapping)
526 referenced += page_referenced_file(page,
527 mem_cont, vm_flags);
528 unlock_page(page);
529 }
530 }
531
532 if (page_test_and_clear_young(page))
533 referenced++;
534
535 return referenced;
536}
537
538static int page_mkclean_one(struct page *page, struct vm_area_struct *vma)
539{
540 struct mm_struct *mm = vma->vm_mm;
541 unsigned long address;
542 pte_t *pte;
543 spinlock_t *ptl;
544 int ret = 0;
545
546 address = vma_address(page, vma);
547 if (address == -EFAULT)
548 goto out;
549
550 pte = page_check_address(page, mm, address, &ptl, 1);
551 if (!pte)
552 goto out;
553
554 if (pte_dirty(*pte) || pte_write(*pte)) {
555 pte_t entry;
556
557 flush_cache_page(vma, address, pte_pfn(*pte));
558 entry = ptep_clear_flush_notify(vma, address, pte);
559 entry = pte_wrprotect(entry);
560 entry = pte_mkclean(entry);
561 set_pte_at(mm, address, pte, entry);
562 ret = 1;
563 }
564
565 pte_unmap_unlock(pte, ptl);
566out:
567 return ret;
568}
569
570static int page_mkclean_file(struct address_space *mapping, struct page *page)
571{
572 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
573 struct vm_area_struct *vma;
574 struct prio_tree_iter iter;
575 int ret = 0;
576
577 BUG_ON(PageAnon(page));
578
579 spin_lock(&mapping->i_mmap_lock);
580 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
581 if (vma->vm_flags & VM_SHARED)
582 ret += page_mkclean_one(page, vma);
583 }
584 spin_unlock(&mapping->i_mmap_lock);
585 return ret;
586}
587
588int page_mkclean(struct page *page)
589{
590 int ret = 0;
591
592 BUG_ON(!PageLocked(page));
593
594 if (page_mapped(page)) {
595 struct address_space *mapping = page_mapping(page);
596 if (mapping) {
597 ret = page_mkclean_file(mapping, page);
598 if (page_test_dirty(page)) {
599 page_clear_dirty(page);
600 ret = 1;
601 }
602 }
603 }
604
605 return ret;
606}
607EXPORT_SYMBOL_GPL(page_mkclean);
608
609
610
611
612
613
614
615static void __page_set_anon_rmap(struct page *page,
616 struct vm_area_struct *vma, unsigned long address)
617{
618 struct anon_vma *anon_vma = vma->anon_vma;
619
620 BUG_ON(!anon_vma);
621 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
622 page->mapping = (struct address_space *) anon_vma;
623
624 page->index = linear_page_index(vma, address);
625
626
627
628
629
630 __inc_zone_page_state(page, NR_ANON_PAGES);
631}
632
633
634
635
636
637
638
639static void __page_check_anon_rmap(struct page *page,
640 struct vm_area_struct *vma, unsigned long address)
641{
642#ifdef CONFIG_DEBUG_VM
643
644
645
646
647
648
649
650
651
652
653
654
655 struct anon_vma *anon_vma = vma->anon_vma;
656 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
657 BUG_ON(page->mapping != (struct address_space *)anon_vma);
658 BUG_ON(page->index != linear_page_index(vma, address));
659#endif
660}
661
662
663
664
665
666
667
668
669
670void page_add_anon_rmap(struct page *page,
671 struct vm_area_struct *vma, unsigned long address)
672{
673 VM_BUG_ON(!PageLocked(page));
674 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
675 if (atomic_inc_and_test(&page->_mapcount))
676 __page_set_anon_rmap(page, vma, address);
677 else
678 __page_check_anon_rmap(page, vma, address);
679}
680
681
682
683
684
685
686
687
688
689
690
691void page_add_new_anon_rmap(struct page *page,
692 struct vm_area_struct *vma, unsigned long address)
693{
694 VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
695 SetPageSwapBacked(page);
696 atomic_set(&page->_mapcount, 0);
697 __page_set_anon_rmap(page, vma, address);
698 if (page_evictable(page, vma))
699 lru_cache_add_lru(page, LRU_ACTIVE_ANON);
700 else
701 add_page_to_unevictable_list(page);
702}
703
704
705
706
707
708
709
710void page_add_file_rmap(struct page *page)
711{
712 if (atomic_inc_and_test(&page->_mapcount)) {
713 __inc_zone_page_state(page, NR_FILE_MAPPED);
714 mem_cgroup_update_mapped_file_stat(page, 1);
715 }
716}
717
718
719
720
721
722
723
724void page_remove_rmap(struct page *page)
725{
726
727 if (!atomic_add_negative(-1, &page->_mapcount))
728 return;
729
730
731
732
733
734
735
736
737 if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) {
738 page_clear_dirty(page);
739 set_page_dirty(page);
740 }
741 if (PageAnon(page)) {
742 mem_cgroup_uncharge_page(page);
743 __dec_zone_page_state(page, NR_ANON_PAGES);
744 } else {
745 __dec_zone_page_state(page, NR_FILE_MAPPED);
746 }
747 mem_cgroup_update_mapped_file_stat(page, -1);
748
749
750
751
752
753
754
755
756
757}
758
759
760
761
762
763static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
764 enum ttu_flags flags)
765{
766 struct mm_struct *mm = vma->vm_mm;
767 unsigned long address;
768 pte_t *pte;
769 pte_t pteval;
770 spinlock_t *ptl;
771 int ret = SWAP_AGAIN;
772
773 address = vma_address(page, vma);
774 if (address == -EFAULT)
775 goto out;
776
777 pte = page_check_address(page, mm, address, &ptl, 0);
778 if (!pte)
779 goto out;
780
781
782
783
784
785
786 if (!(flags & TTU_IGNORE_MLOCK)) {
787 if (vma->vm_flags & VM_LOCKED) {
788 ret = SWAP_MLOCK;
789 goto out_unmap;
790 }
791 }
792 if (!(flags & TTU_IGNORE_ACCESS)) {
793 if (ptep_clear_flush_young_notify(vma, address, pte)) {
794 ret = SWAP_FAIL;
795 goto out_unmap;
796 }
797 }
798
799
800 flush_cache_page(vma, address, page_to_pfn(page));
801 pteval = ptep_clear_flush_notify(vma, address, pte);
802
803
804 if (pte_dirty(pteval))
805 set_page_dirty(page);
806
807
808 update_hiwater_rss(mm);
809
810 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
811 if (PageAnon(page))
812 dec_mm_counter(mm, anon_rss);
813 else
814 dec_mm_counter(mm, file_rss);
815 set_pte_at(mm, address, pte,
816 swp_entry_to_pte(make_hwpoison_entry(page)));
817 } else if (PageAnon(page)) {
818 swp_entry_t entry = { .val = page_private(page) };
819
820 if (PageSwapCache(page)) {
821
822
823
824
825 swap_duplicate(entry);
826 if (list_empty(&mm->mmlist)) {
827 spin_lock(&mmlist_lock);
828 if (list_empty(&mm->mmlist))
829 list_add(&mm->mmlist, &init_mm.mmlist);
830 spin_unlock(&mmlist_lock);
831 }
832 dec_mm_counter(mm, anon_rss);
833 } else if (PAGE_MIGRATION) {
834
835
836
837
838
839 BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
840 entry = make_migration_entry(page, pte_write(pteval));
841 }
842 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
843 BUG_ON(pte_file(*pte));
844 } else if (PAGE_MIGRATION && (TTU_ACTION(flags) == TTU_MIGRATION)) {
845
846 swp_entry_t entry;
847 entry = make_migration_entry(page, pte_write(pteval));
848 set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
849 } else
850 dec_mm_counter(mm, file_rss);
851
852
853 page_remove_rmap(page);
854 page_cache_release(page);
855
856out_unmap:
857 pte_unmap_unlock(pte, ptl);
858out:
859 return ret;
860}
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886#define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE)
887#define CLUSTER_MASK (~(CLUSTER_SIZE - 1))
888
889static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
890 struct vm_area_struct *vma, struct page *check_page)
891{
892 struct mm_struct *mm = vma->vm_mm;
893 pgd_t *pgd;
894 pud_t *pud;
895 pmd_t *pmd;
896 pte_t *pte;
897 pte_t pteval;
898 spinlock_t *ptl;
899 struct page *page;
900 unsigned long address;
901 unsigned long end;
902 int ret = SWAP_AGAIN;
903 int locked_vma = 0;
904
905 address = (vma->vm_start + cursor) & CLUSTER_MASK;
906 end = address + CLUSTER_SIZE;
907 if (address < vma->vm_start)
908 address = vma->vm_start;
909 if (end > vma->vm_end)
910 end = vma->vm_end;
911
912 pgd = pgd_offset(mm, address);
913 if (!pgd_present(*pgd))
914 return ret;
915
916 pud = pud_offset(pgd, address);
917 if (!pud_present(*pud))
918 return ret;
919
920 pmd = pmd_offset(pud, address);
921 if (!pmd_present(*pmd))
922 return ret;
923
924
925
926
927
928
929 if (MLOCK_PAGES && down_read_trylock(&vma->vm_mm->mmap_sem)) {
930 locked_vma = (vma->vm_flags & VM_LOCKED);
931 if (!locked_vma)
932 up_read(&vma->vm_mm->mmap_sem);
933 }
934
935 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
936
937
938 update_hiwater_rss(mm);
939
940 for (; address < end; pte++, address += PAGE_SIZE) {
941 if (!pte_present(*pte))
942 continue;
943 page = vm_normal_page(vma, address, *pte);
944 BUG_ON(!page || PageAnon(page));
945
946 if (locked_vma) {
947 mlock_vma_page(page);
948 if (page == check_page)
949 ret = SWAP_MLOCK;
950 continue;
951 }
952
953 if (ptep_clear_flush_young_notify(vma, address, pte))
954 continue;
955
956
957 flush_cache_page(vma, address, pte_pfn(*pte));
958 pteval = ptep_clear_flush_notify(vma, address, pte);
959
960
961 if (page->index != linear_page_index(vma, address))
962 set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
963
964
965 if (pte_dirty(pteval))
966 set_page_dirty(page);
967
968 page_remove_rmap(page);
969 page_cache_release(page);
970 dec_mm_counter(mm, file_rss);
971 (*mapcount)--;
972 }
973 pte_unmap_unlock(pte - 1, ptl);
974 if (locked_vma)
975 up_read(&vma->vm_mm->mmap_sem);
976 return ret;
977}
978
979
980
981
982static int try_to_mlock_page(struct page *page, struct vm_area_struct *vma)
983{
984 int mlocked = 0;
985
986 if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
987 if (vma->vm_flags & VM_LOCKED) {
988 mlock_vma_page(page);
989 mlocked++;
990 }
991 up_read(&vma->vm_mm->mmap_sem);
992 }
993 return mlocked;
994}
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
1014{
1015 struct anon_vma *anon_vma;
1016 struct vm_area_struct *vma;
1017 unsigned int mlocked = 0;
1018 int ret = SWAP_AGAIN;
1019 int unlock = TTU_ACTION(flags) == TTU_MUNLOCK;
1020
1021 if (MLOCK_PAGES && unlikely(unlock))
1022 ret = SWAP_SUCCESS;
1023
1024 anon_vma = page_lock_anon_vma(page);
1025 if (!anon_vma)
1026 return ret;
1027
1028 list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
1029 if (MLOCK_PAGES && unlikely(unlock)) {
1030 if (!((vma->vm_flags & VM_LOCKED) &&
1031 page_mapped_in_vma(page, vma)))
1032 continue;
1033 ret = SWAP_MLOCK;
1034 } else {
1035 ret = try_to_unmap_one(page, vma, flags);
1036 if (ret == SWAP_FAIL || !page_mapped(page))
1037 break;
1038 }
1039 if (ret == SWAP_MLOCK) {
1040 mlocked = try_to_mlock_page(page, vma);
1041 if (mlocked)
1042 break;
1043 }
1044 }
1045
1046 page_unlock_anon_vma(anon_vma);
1047
1048 if (mlocked)
1049 ret = SWAP_MLOCK;
1050 else if (ret == SWAP_MLOCK)
1051 ret = SWAP_AGAIN;
1052
1053 return ret;
1054}
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1072{
1073 struct address_space *mapping = page->mapping;
1074 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1075 struct vm_area_struct *vma;
1076 struct prio_tree_iter iter;
1077 int ret = SWAP_AGAIN;
1078 unsigned long cursor;
1079 unsigned long max_nl_cursor = 0;
1080 unsigned long max_nl_size = 0;
1081 unsigned int mapcount;
1082 unsigned int mlocked = 0;
1083 int unlock = TTU_ACTION(flags) == TTU_MUNLOCK;
1084
1085 if (MLOCK_PAGES && unlikely(unlock))
1086 ret = SWAP_SUCCESS;
1087
1088 spin_lock(&mapping->i_mmap_lock);
1089 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1090 if (MLOCK_PAGES && unlikely(unlock)) {
1091 if (!((vma->vm_flags & VM_LOCKED) &&
1092 page_mapped_in_vma(page, vma)))
1093 continue;
1094 ret = SWAP_MLOCK;
1095 } else {
1096 ret = try_to_unmap_one(page, vma, flags);
1097 if (ret == SWAP_FAIL || !page_mapped(page))
1098 goto out;
1099 }
1100 if (ret == SWAP_MLOCK) {
1101 mlocked = try_to_mlock_page(page, vma);
1102 if (mlocked)
1103 break;
1104 }
1105 }
1106
1107 if (mlocked)
1108 goto out;
1109
1110 if (list_empty(&mapping->i_mmap_nonlinear))
1111 goto out;
1112
1113 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
1114 shared.vm_set.list) {
1115 if (MLOCK_PAGES && unlikely(unlock)) {
1116 if (!(vma->vm_flags & VM_LOCKED))
1117 continue;
1118 ret = SWAP_MLOCK;
1119 goto out;
1120 }
1121 if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
1122 (vma->vm_flags & VM_LOCKED))
1123 continue;
1124 cursor = (unsigned long) vma->vm_private_data;
1125 if (cursor > max_nl_cursor)
1126 max_nl_cursor = cursor;
1127 cursor = vma->vm_end - vma->vm_start;
1128 if (cursor > max_nl_size)
1129 max_nl_size = cursor;
1130 }
1131
1132 if (max_nl_size == 0) {
1133 ret = SWAP_FAIL;
1134 goto out;
1135 }
1136
1137
1138
1139
1140
1141
1142
1143
1144 mapcount = page_mapcount(page);
1145 if (!mapcount)
1146 goto out;
1147 cond_resched_lock(&mapping->i_mmap_lock);
1148
1149 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
1150 if (max_nl_cursor == 0)
1151 max_nl_cursor = CLUSTER_SIZE;
1152
1153 do {
1154 list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
1155 shared.vm_set.list) {
1156 if (!MLOCK_PAGES && !(flags & TTU_IGNORE_MLOCK) &&
1157 (vma->vm_flags & VM_LOCKED))
1158 continue;
1159 cursor = (unsigned long) vma->vm_private_data;
1160 while ( cursor < max_nl_cursor &&
1161 cursor < vma->vm_end - vma->vm_start) {
1162 ret = try_to_unmap_cluster(cursor, &mapcount,
1163 vma, page);
1164 if (ret == SWAP_MLOCK)
1165 mlocked = 2;
1166 cursor += CLUSTER_SIZE;
1167 vma->vm_private_data = (void *) cursor;
1168 if ((int)mapcount <= 0)
1169 goto out;
1170 }
1171 vma->vm_private_data = (void *) max_nl_cursor;
1172 }
1173 cond_resched_lock(&mapping->i_mmap_lock);
1174 max_nl_cursor += CLUSTER_SIZE;
1175 } while (max_nl_cursor <= max_nl_size);
1176
1177
1178
1179
1180
1181
1182 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
1183 vma->vm_private_data = NULL;
1184out:
1185 spin_unlock(&mapping->i_mmap_lock);
1186 if (mlocked)
1187 ret = SWAP_MLOCK;
1188 else if (ret == SWAP_MLOCK)
1189 ret = SWAP_AGAIN;
1190 return ret;
1191}
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207int try_to_unmap(struct page *page, enum ttu_flags flags)
1208{
1209 int ret;
1210
1211 BUG_ON(!PageLocked(page));
1212
1213 if (PageAnon(page))
1214 ret = try_to_unmap_anon(page, flags);
1215 else
1216 ret = try_to_unmap_file(page, flags);
1217 if (ret != SWAP_MLOCK && !page_mapped(page))
1218 ret = SWAP_SUCCESS;
1219 return ret;
1220}
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236int try_to_munlock(struct page *page)
1237{
1238 VM_BUG_ON(!PageLocked(page) || PageLRU(page));
1239
1240 if (PageAnon(page))
1241 return try_to_unmap_anon(page, TTU_MUNLOCK);
1242 else
1243 return try_to_unmap_file(page, TTU_MUNLOCK);
1244}
1245
1246