1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/migrate.h>
16#include <linux/export.h>
17#include <linux/swap.h>
18#include <linux/swapops.h>
19#include <linux/pagemap.h>
20#include <linux/buffer_head.h>
21#include <linux/mm_inline.h>
22#include <linux/nsproxy.h>
23#include <linux/pagevec.h>
24#include <linux/ksm.h>
25#include <linux/rmap.h>
26#include <linux/topology.h>
27#include <linux/cpu.h>
28#include <linux/cpuset.h>
29#include <linux/writeback.h>
30#include <linux/mempolicy.h>
31#include <linux/vmalloc.h>
32#include <linux/security.h>
33#include <linux/backing-dev.h>
34#include <linux/compaction.h>
35#include <linux/syscalls.h>
36#include <linux/hugetlb.h>
37#include <linux/hugetlb_cgroup.h>
38#include <linux/gfp.h>
39#include <linux/balloon_compaction.h>
40#include <linux/mmu_notifier.h>
41#include <linux/page_idle.h>
42#include <linux/page_owner.h>
43
44#include <asm/tlbflush.h>
45
46#define CREATE_TRACE_POINTS
47#include <trace/events/migrate.h>
48
49#include "internal.h"
50
51
52
53
54
55
56int migrate_prep(void)
57{
58
59
60
61
62
63
64 lru_add_drain_all();
65
66 return 0;
67}
68
69
70int migrate_prep_local(void)
71{
72 lru_add_drain();
73
74 return 0;
75}
76
77bool isolate_movable_page(struct page *page, isolate_mode_t mode)
78{
79 struct address_space *mapping;
80
81
82
83
84
85
86
87
88
89
90 if (unlikely(!get_page_unless_zero(page)))
91 goto out;
92
93
94
95
96
97
98 if (unlikely(!__PageMovable(page)))
99 goto out_putpage;
100
101
102
103
104
105
106
107
108
109
110
111 if (unlikely(!trylock_page(page)))
112 goto out_putpage;
113
114 if (!PageMovable(page) || PageIsolated(page))
115 goto out_no_isolated;
116
117 mapping = page_mapping(page);
118 VM_BUG_ON_PAGE(!mapping, page);
119
120 if (!mapping->a_ops->isolate_page(page, mode))
121 goto out_no_isolated;
122
123
124 WARN_ON_ONCE(PageIsolated(page));
125 __SetPageIsolated(page);
126 unlock_page(page);
127
128 return true;
129
130out_no_isolated:
131 unlock_page(page);
132out_putpage:
133 put_page(page);
134out:
135 return false;
136}
137
138
139void putback_movable_page(struct page *page)
140{
141 struct address_space *mapping;
142
143 VM_BUG_ON_PAGE(!PageLocked(page), page);
144 VM_BUG_ON_PAGE(!PageMovable(page), page);
145 VM_BUG_ON_PAGE(!PageIsolated(page), page);
146
147 mapping = page_mapping(page);
148 mapping->a_ops->putback_page(page);
149 __ClearPageIsolated(page);
150}
151
152
153
154
155
156
157
158
159
160void putback_movable_pages(struct list_head *l)
161{
162 struct page *page;
163 struct page *page2;
164
165 list_for_each_entry_safe(page, page2, l, lru) {
166 if (unlikely(PageHuge(page))) {
167 putback_active_hugepage(page);
168 continue;
169 }
170 list_del(&page->lru);
171 dec_node_page_state(page, NR_ISOLATED_ANON +
172 page_is_file_cache(page));
173
174
175
176
177
178 if (unlikely(__PageMovable(page))) {
179 VM_BUG_ON_PAGE(!PageIsolated(page), page);
180 lock_page(page);
181 if (PageMovable(page))
182 putback_movable_page(page);
183 else
184 __ClearPageIsolated(page);
185 unlock_page(page);
186 put_page(page);
187 } else {
188 putback_lru_page(page);
189 }
190 }
191}
192
193
194
195
196static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
197 unsigned long addr, void *old)
198{
199 struct mm_struct *mm = vma->vm_mm;
200 swp_entry_t entry;
201 pmd_t *pmd;
202 pte_t *ptep, pte;
203 spinlock_t *ptl;
204
205 if (unlikely(PageHuge(new))) {
206 ptep = huge_pte_offset(mm, addr);
207 if (!ptep)
208 goto out;
209 ptl = huge_pte_lockptr(hstate_vma(vma), mm, ptep);
210 } else {
211 pmd = mm_find_pmd(mm, addr);
212 if (!pmd)
213 goto out;
214
215 ptep = pte_offset_map(pmd, addr);
216
217
218
219
220
221
222 ptl = pte_lockptr(mm, pmd);
223 }
224
225 spin_lock(ptl);
226 pte = *ptep;
227 if (!is_swap_pte(pte))
228 goto unlock;
229
230 entry = pte_to_swp_entry(pte);
231
232 if (!is_migration_entry(entry) ||
233 migration_entry_to_page(entry) != old)
234 goto unlock;
235
236 get_page(new);
237 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
238 if (pte_swp_soft_dirty(*ptep))
239 pte = pte_mksoft_dirty(pte);
240
241
242 if (is_write_migration_entry(entry))
243 pte = maybe_mkwrite(pte, vma);
244
245#ifdef CONFIG_HUGETLB_PAGE
246 if (PageHuge(new)) {
247 pte = pte_mkhuge(pte);
248 pte = arch_make_huge_pte(pte, vma, new, 0);
249 }
250#endif
251 flush_dcache_page(new);
252 set_pte_at(mm, addr, ptep, pte);
253
254 if (PageHuge(new)) {
255 if (PageAnon(new))
256 hugepage_add_anon_rmap(new, vma, addr);
257 else
258 page_dup_rmap(new, true);
259 } else if (PageAnon(new))
260 page_add_anon_rmap(new, vma, addr, false);
261 else
262 page_add_file_rmap(new, false);
263
264 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
265 mlock_vma_page(new);
266
267
268 update_mmu_cache(vma, addr, ptep);
269unlock:
270 pte_unmap_unlock(ptep, ptl);
271out:
272 return SWAP_AGAIN;
273}
274
275
276
277
278
279void remove_migration_ptes(struct page *old, struct page *new, bool locked)
280{
281 struct rmap_walk_control rwc = {
282 .rmap_one = remove_migration_pte,
283 .arg = old,
284 };
285
286 if (locked)
287 rmap_walk_locked(new, &rwc);
288 else
289 rmap_walk(new, &rwc);
290}
291
292
293
294
295
296
297void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
298 spinlock_t *ptl)
299{
300 pte_t pte;
301 swp_entry_t entry;
302 struct page *page;
303
304 spin_lock(ptl);
305 pte = *ptep;
306 if (!is_swap_pte(pte))
307 goto out;
308
309 entry = pte_to_swp_entry(pte);
310 if (!is_migration_entry(entry))
311 goto out;
312
313 page = migration_entry_to_page(entry);
314
315
316
317
318
319
320
321
322 if (!get_page_unless_zero(page))
323 goto out;
324 pte_unmap_unlock(ptep, ptl);
325 wait_on_page_locked(page);
326 put_page(page);
327 return;
328out:
329 pte_unmap_unlock(ptep, ptl);
330}
331
332void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
333 unsigned long address)
334{
335 spinlock_t *ptl = pte_lockptr(mm, pmd);
336 pte_t *ptep = pte_offset_map(pmd, address);
337 __migration_entry_wait(mm, ptep, ptl);
338}
339
340void migration_entry_wait_huge(struct vm_area_struct *vma,
341 struct mm_struct *mm, pte_t *pte)
342{
343 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
344 __migration_entry_wait(mm, pte, ptl);
345}
346
347#ifdef CONFIG_BLOCK
348
349static bool buffer_migrate_lock_buffers(struct buffer_head *head,
350 enum migrate_mode mode)
351{
352 struct buffer_head *bh = head;
353
354
355 if (mode != MIGRATE_ASYNC) {
356 do {
357 get_bh(bh);
358 lock_buffer(bh);
359 bh = bh->b_this_page;
360
361 } while (bh != head);
362
363 return true;
364 }
365
366
367 do {
368 get_bh(bh);
369 if (!trylock_buffer(bh)) {
370
371
372
373
374 struct buffer_head *failed_bh = bh;
375 put_bh(failed_bh);
376 bh = head;
377 while (bh != failed_bh) {
378 unlock_buffer(bh);
379 put_bh(bh);
380 bh = bh->b_this_page;
381 }
382 return false;
383 }
384
385 bh = bh->b_this_page;
386 } while (bh != head);
387 return true;
388}
389#else
390static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
391 enum migrate_mode mode)
392{
393 return true;
394}
395#endif
396
397
398
399
400
401
402
403
404
405int migrate_page_move_mapping(struct address_space *mapping,
406 struct page *newpage, struct page *page,
407 struct buffer_head *head, enum migrate_mode mode,
408 int extra_count)
409{
410 struct zone *oldzone, *newzone;
411 int dirty;
412 int expected_count = 1 + extra_count;
413 void **pslot;
414
415 if (!mapping) {
416
417 if (page_count(page) != expected_count)
418 return -EAGAIN;
419
420
421 newpage->index = page->index;
422 newpage->mapping = page->mapping;
423 if (PageSwapBacked(page))
424 __SetPageSwapBacked(newpage);
425
426 return MIGRATEPAGE_SUCCESS;
427 }
428
429 oldzone = page_zone(page);
430 newzone = page_zone(newpage);
431
432 spin_lock_irq(&mapping->tree_lock);
433
434 pslot = radix_tree_lookup_slot(&mapping->page_tree,
435 page_index(page));
436
437 expected_count += 1 + page_has_private(page);
438 if (page_count(page) != expected_count ||
439 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
440 spin_unlock_irq(&mapping->tree_lock);
441 return -EAGAIN;
442 }
443
444 if (!page_ref_freeze(page, expected_count)) {
445 spin_unlock_irq(&mapping->tree_lock);
446 return -EAGAIN;
447 }
448
449
450
451
452
453
454
455
456 if (mode == MIGRATE_ASYNC && head &&
457 !buffer_migrate_lock_buffers(head, mode)) {
458 page_ref_unfreeze(page, expected_count);
459 spin_unlock_irq(&mapping->tree_lock);
460 return -EAGAIN;
461 }
462
463
464
465
466
467 newpage->index = page->index;
468 newpage->mapping = page->mapping;
469 if (PageSwapBacked(page))
470 __SetPageSwapBacked(newpage);
471
472 get_page(newpage);
473 if (PageSwapCache(page)) {
474 SetPageSwapCache(newpage);
475 set_page_private(newpage, page_private(page));
476 }
477
478
479 dirty = PageDirty(page);
480 if (dirty) {
481 ClearPageDirty(page);
482 SetPageDirty(newpage);
483 }
484
485 radix_tree_replace_slot(pslot, newpage);
486
487
488
489
490
491
492 page_ref_unfreeze(page, expected_count - 1);
493
494 spin_unlock(&mapping->tree_lock);
495
496
497
498
499
500
501
502
503
504
505
506
507 if (newzone != oldzone) {
508 __dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES);
509 __inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES);
510 if (PageSwapBacked(page) && !PageSwapCache(page)) {
511 __dec_node_state(oldzone->zone_pgdat, NR_SHMEM);
512 __inc_node_state(newzone->zone_pgdat, NR_SHMEM);
513 }
514 if (dirty && mapping_cap_account_dirty(mapping)) {
515 __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
516 __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
517 __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
518 __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
519 }
520 }
521 local_irq_enable();
522
523 return MIGRATEPAGE_SUCCESS;
524}
525EXPORT_SYMBOL(migrate_page_move_mapping);
526
527
528
529
530
531int migrate_huge_page_move_mapping(struct address_space *mapping,
532 struct page *newpage, struct page *page)
533{
534 int expected_count;
535 void **pslot;
536
537 spin_lock_irq(&mapping->tree_lock);
538
539 pslot = radix_tree_lookup_slot(&mapping->page_tree,
540 page_index(page));
541
542 expected_count = 2 + page_has_private(page);
543 if (page_count(page) != expected_count ||
544 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
545 spin_unlock_irq(&mapping->tree_lock);
546 return -EAGAIN;
547 }
548
549 if (!page_ref_freeze(page, expected_count)) {
550 spin_unlock_irq(&mapping->tree_lock);
551 return -EAGAIN;
552 }
553
554 newpage->index = page->index;
555 newpage->mapping = page->mapping;
556
557 get_page(newpage);
558
559 radix_tree_replace_slot(pslot, newpage);
560
561 page_ref_unfreeze(page, expected_count - 1);
562
563 spin_unlock_irq(&mapping->tree_lock);
564
565 return MIGRATEPAGE_SUCCESS;
566}
567
568
569
570
571
572
573static void __copy_gigantic_page(struct page *dst, struct page *src,
574 int nr_pages)
575{
576 int i;
577 struct page *dst_base = dst;
578 struct page *src_base = src;
579
580 for (i = 0; i < nr_pages; ) {
581 cond_resched();
582 copy_highpage(dst, src);
583
584 i++;
585 dst = mem_map_next(dst, dst_base, i);
586 src = mem_map_next(src, src_base, i);
587 }
588}
589
590static void copy_huge_page(struct page *dst, struct page *src)
591{
592 int i;
593 int nr_pages;
594
595 if (PageHuge(src)) {
596
597 struct hstate *h = page_hstate(src);
598 nr_pages = pages_per_huge_page(h);
599
600 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
601 __copy_gigantic_page(dst, src, nr_pages);
602 return;
603 }
604 } else {
605
606 BUG_ON(!PageTransHuge(src));
607 nr_pages = hpage_nr_pages(src);
608 }
609
610 for (i = 0; i < nr_pages; i++) {
611 cond_resched();
612 copy_highpage(dst + i, src + i);
613 }
614}
615
616
617
618
619void migrate_page_copy(struct page *newpage, struct page *page)
620{
621 int cpupid;
622
623 if (PageHuge(page) || PageTransHuge(page))
624 copy_huge_page(newpage, page);
625 else
626 copy_highpage(newpage, page);
627
628 if (PageError(page))
629 SetPageError(newpage);
630 if (PageReferenced(page))
631 SetPageReferenced(newpage);
632 if (PageUptodate(page))
633 SetPageUptodate(newpage);
634 if (TestClearPageActive(page)) {
635 VM_BUG_ON_PAGE(PageUnevictable(page), page);
636 SetPageActive(newpage);
637 } else if (TestClearPageUnevictable(page))
638 SetPageUnevictable(newpage);
639 if (PageChecked(page))
640 SetPageChecked(newpage);
641 if (PageMappedToDisk(page))
642 SetPageMappedToDisk(newpage);
643
644
645 if (PageDirty(page))
646 SetPageDirty(newpage);
647
648 if (page_is_young(page))
649 set_page_young(newpage);
650 if (page_is_idle(page))
651 set_page_idle(newpage);
652
653
654
655
656
657 cpupid = page_cpupid_xchg_last(page, -1);
658 page_cpupid_xchg_last(newpage, cpupid);
659
660 ksm_migrate_page(newpage, page);
661
662
663
664
665 if (PageSwapCache(page))
666 ClearPageSwapCache(page);
667 ClearPagePrivate(page);
668 set_page_private(page, 0);
669
670
671
672
673
674 if (PageWriteback(newpage))
675 end_page_writeback(newpage);
676
677 copy_page_owner(page, newpage);
678
679 mem_cgroup_migrate(page, newpage);
680}
681EXPORT_SYMBOL(migrate_page_copy);
682
683
684
685
686
687
688
689
690
691
692
693int migrate_page(struct address_space *mapping,
694 struct page *newpage, struct page *page,
695 enum migrate_mode mode)
696{
697 int rc;
698
699 BUG_ON(PageWriteback(page));
700
701 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
702
703 if (rc != MIGRATEPAGE_SUCCESS)
704 return rc;
705
706 migrate_page_copy(newpage, page);
707 return MIGRATEPAGE_SUCCESS;
708}
709EXPORT_SYMBOL(migrate_page);
710
711#ifdef CONFIG_BLOCK
712
713
714
715
716
717int buffer_migrate_page(struct address_space *mapping,
718 struct page *newpage, struct page *page, enum migrate_mode mode)
719{
720 struct buffer_head *bh, *head;
721 int rc;
722
723 if (!page_has_buffers(page))
724 return migrate_page(mapping, newpage, page, mode);
725
726 head = page_buffers(page);
727
728 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
729
730 if (rc != MIGRATEPAGE_SUCCESS)
731 return rc;
732
733
734
735
736
737
738 if (mode != MIGRATE_ASYNC)
739 BUG_ON(!buffer_migrate_lock_buffers(head, mode));
740
741 ClearPagePrivate(page);
742 set_page_private(newpage, page_private(page));
743 set_page_private(page, 0);
744 put_page(page);
745 get_page(newpage);
746
747 bh = head;
748 do {
749 set_bh_page(bh, newpage, bh_offset(bh));
750 bh = bh->b_this_page;
751
752 } while (bh != head);
753
754 SetPagePrivate(newpage);
755
756 migrate_page_copy(newpage, page);
757
758 bh = head;
759 do {
760 unlock_buffer(bh);
761 put_bh(bh);
762 bh = bh->b_this_page;
763
764 } while (bh != head);
765
766 return MIGRATEPAGE_SUCCESS;
767}
768EXPORT_SYMBOL(buffer_migrate_page);
769#endif
770
771
772
773
774static int writeout(struct address_space *mapping, struct page *page)
775{
776 struct writeback_control wbc = {
777 .sync_mode = WB_SYNC_NONE,
778 .nr_to_write = 1,
779 .range_start = 0,
780 .range_end = LLONG_MAX,
781 .for_reclaim = 1
782 };
783 int rc;
784
785 if (!mapping->a_ops->writepage)
786
787 return -EINVAL;
788
789 if (!clear_page_dirty_for_io(page))
790
791 return -EAGAIN;
792
793
794
795
796
797
798
799
800
801 remove_migration_ptes(page, page, false);
802
803 rc = mapping->a_ops->writepage(page, &wbc);
804
805 if (rc != AOP_WRITEPAGE_ACTIVATE)
806
807 lock_page(page);
808
809 return (rc < 0) ? -EIO : -EAGAIN;
810}
811
812
813
814
815static int fallback_migrate_page(struct address_space *mapping,
816 struct page *newpage, struct page *page, enum migrate_mode mode)
817{
818 if (PageDirty(page)) {
819
820 if (mode != MIGRATE_SYNC)
821 return -EBUSY;
822 return writeout(mapping, page);
823 }
824
825
826
827
828
829 if (page_has_private(page) &&
830 !try_to_release_page(page, GFP_KERNEL))
831 return -EAGAIN;
832
833 return migrate_page(mapping, newpage, page, mode);
834}
835
836
837
838
839
840
841
842
843
844
845
846
847static int move_to_new_page(struct page *newpage, struct page *page,
848 enum migrate_mode mode)
849{
850 struct address_space *mapping;
851 int rc = -EAGAIN;
852 bool is_lru = !__PageMovable(page);
853
854 VM_BUG_ON_PAGE(!PageLocked(page), page);
855 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
856
857 mapping = page_mapping(page);
858
859 if (likely(is_lru)) {
860 if (!mapping)
861 rc = migrate_page(mapping, newpage, page, mode);
862 else if (mapping->a_ops->migratepage)
863
864
865
866
867
868
869
870 rc = mapping->a_ops->migratepage(mapping, newpage,
871 page, mode);
872 else
873 rc = fallback_migrate_page(mapping, newpage,
874 page, mode);
875 } else {
876
877
878
879
880 VM_BUG_ON_PAGE(!PageIsolated(page), page);
881 if (!PageMovable(page)) {
882 rc = MIGRATEPAGE_SUCCESS;
883 __ClearPageIsolated(page);
884 goto out;
885 }
886
887 rc = mapping->a_ops->migratepage(mapping, newpage,
888 page, mode);
889 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
890 !PageIsolated(page));
891 }
892
893
894
895
896
897 if (rc == MIGRATEPAGE_SUCCESS) {
898 if (__PageMovable(page)) {
899 VM_BUG_ON_PAGE(!PageIsolated(page), page);
900
901
902
903
904
905 __ClearPageIsolated(page);
906 }
907
908
909
910
911
912
913 if (!PageMappingFlags(page))
914 page->mapping = NULL;
915 }
916out:
917 return rc;
918}
919
920static int __unmap_and_move(struct page *page, struct page *newpage,
921 int force, enum migrate_mode mode)
922{
923 int rc = -EAGAIN;
924 int page_was_mapped = 0;
925 struct anon_vma *anon_vma = NULL;
926 bool is_lru = !__PageMovable(page);
927
928 if (!trylock_page(page)) {
929 if (!force || mode == MIGRATE_ASYNC)
930 goto out;
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945 if (current->flags & PF_MEMALLOC)
946 goto out;
947
948 lock_page(page);
949 }
950
951 if (PageWriteback(page)) {
952
953
954
955
956
957
958 if (mode != MIGRATE_SYNC) {
959 rc = -EBUSY;
960 goto out_unlock;
961 }
962 if (!force)
963 goto out_unlock;
964 wait_on_page_writeback(page);
965 }
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981 if (PageAnon(page) && !PageKsm(page))
982 anon_vma = page_get_anon_vma(page);
983
984
985
986
987
988
989
990
991
992 if (unlikely(!trylock_page(newpage)))
993 goto out_unlock;
994
995 if (unlikely(!is_lru)) {
996 rc = move_to_new_page(newpage, page, mode);
997 goto out_unlock_both;
998 }
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012 if (!page->mapping) {
1013 VM_BUG_ON_PAGE(PageAnon(page), page);
1014 if (page_has_private(page)) {
1015 try_to_free_buffers(page);
1016 goto out_unlock_both;
1017 }
1018 } else if (page_mapped(page)) {
1019
1020 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
1021 page);
1022 try_to_unmap(page,
1023 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1024 page_was_mapped = 1;
1025 }
1026
1027 if (!page_mapped(page))
1028 rc = move_to_new_page(newpage, page, mode);
1029
1030 if (page_was_mapped)
1031 remove_migration_ptes(page,
1032 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
1033
1034out_unlock_both:
1035 unlock_page(newpage);
1036out_unlock:
1037
1038 if (anon_vma)
1039 put_anon_vma(anon_vma);
1040 unlock_page(page);
1041out:
1042
1043
1044
1045
1046
1047
1048 if (rc == MIGRATEPAGE_SUCCESS) {
1049 if (unlikely(__PageMovable(newpage)))
1050 put_page(newpage);
1051 else
1052 putback_lru_page(newpage);
1053 }
1054
1055 return rc;
1056}
1057
1058
1059
1060
1061
1062#if (GCC_VERSION >= 40700 && GCC_VERSION < 40900) && defined(CONFIG_ARM)
1063#define ICE_noinline noinline
1064#else
1065#define ICE_noinline
1066#endif
1067
1068
1069
1070
1071
1072static ICE_noinline int unmap_and_move(new_page_t get_new_page,
1073 free_page_t put_new_page,
1074 unsigned long private, struct page *page,
1075 int force, enum migrate_mode mode,
1076 enum migrate_reason reason)
1077{
1078 int rc = MIGRATEPAGE_SUCCESS;
1079 int *result = NULL;
1080 struct page *newpage;
1081
1082 newpage = get_new_page(page, private, &result);
1083 if (!newpage)
1084 return -ENOMEM;
1085
1086 if (page_count(page) == 1) {
1087
1088 ClearPageActive(page);
1089 ClearPageUnevictable(page);
1090 if (unlikely(__PageMovable(page))) {
1091 lock_page(page);
1092 if (!PageMovable(page))
1093 __ClearPageIsolated(page);
1094 unlock_page(page);
1095 }
1096 if (put_new_page)
1097 put_new_page(newpage, private);
1098 else
1099 put_page(newpage);
1100 goto out;
1101 }
1102
1103 if (unlikely(PageTransHuge(page))) {
1104 lock_page(page);
1105 rc = split_huge_page(page);
1106 unlock_page(page);
1107 if (rc)
1108 goto out;
1109 }
1110
1111 rc = __unmap_and_move(page, newpage, force, mode);
1112 if (rc == MIGRATEPAGE_SUCCESS)
1113 set_page_owner_migrate_reason(newpage, reason);
1114
1115out:
1116 if (rc != -EAGAIN) {
1117
1118
1119
1120
1121
1122
1123 list_del(&page->lru);
1124 dec_node_page_state(page, NR_ISOLATED_ANON +
1125 page_is_file_cache(page));
1126 }
1127
1128
1129
1130
1131
1132
1133 if (rc == MIGRATEPAGE_SUCCESS) {
1134 put_page(page);
1135 if (reason == MR_MEMORY_FAILURE) {
1136
1137
1138
1139
1140
1141 if (!test_set_page_hwpoison(page))
1142 num_poisoned_pages_inc();
1143 }
1144 } else {
1145 if (rc != -EAGAIN) {
1146 if (likely(!__PageMovable(page))) {
1147 putback_lru_page(page);
1148 goto put_new;
1149 }
1150
1151 lock_page(page);
1152 if (PageMovable(page))
1153 putback_movable_page(page);
1154 else
1155 __ClearPageIsolated(page);
1156 unlock_page(page);
1157 put_page(page);
1158 }
1159put_new:
1160 if (put_new_page)
1161 put_new_page(newpage, private);
1162 else
1163 put_page(newpage);
1164 }
1165
1166 if (result) {
1167 if (rc)
1168 *result = rc;
1169 else
1170 *result = page_to_nid(newpage);
1171 }
1172 return rc;
1173}
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193static int unmap_and_move_huge_page(new_page_t get_new_page,
1194 free_page_t put_new_page, unsigned long private,
1195 struct page *hpage, int force,
1196 enum migrate_mode mode, int reason)
1197{
1198 int rc = -EAGAIN;
1199 int *result = NULL;
1200 int page_was_mapped = 0;
1201 struct page *new_hpage;
1202 struct anon_vma *anon_vma = NULL;
1203
1204
1205
1206
1207
1208
1209
1210
1211 if (!hugepage_migration_supported(page_hstate(hpage))) {
1212 putback_active_hugepage(hpage);
1213 return -ENOSYS;
1214 }
1215
1216 new_hpage = get_new_page(hpage, private, &result);
1217 if (!new_hpage)
1218 return -ENOMEM;
1219
1220 if (!trylock_page(hpage)) {
1221 if (!force || mode != MIGRATE_SYNC)
1222 goto out;
1223 lock_page(hpage);
1224 }
1225
1226 if (PageAnon(hpage))
1227 anon_vma = page_get_anon_vma(hpage);
1228
1229 if (unlikely(!trylock_page(new_hpage)))
1230 goto put_anon;
1231
1232 if (page_mapped(hpage)) {
1233 try_to_unmap(hpage,
1234 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1235 page_was_mapped = 1;
1236 }
1237
1238 if (!page_mapped(hpage))
1239 rc = move_to_new_page(new_hpage, hpage, mode);
1240
1241 if (page_was_mapped)
1242 remove_migration_ptes(hpage,
1243 rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
1244
1245 unlock_page(new_hpage);
1246
1247put_anon:
1248 if (anon_vma)
1249 put_anon_vma(anon_vma);
1250
1251 if (rc == MIGRATEPAGE_SUCCESS) {
1252 hugetlb_cgroup_migrate(hpage, new_hpage);
1253 put_new_page = NULL;
1254 set_page_owner_migrate_reason(new_hpage, reason);
1255 }
1256
1257 unlock_page(hpage);
1258out:
1259 if (rc != -EAGAIN)
1260 putback_active_hugepage(hpage);
1261
1262
1263
1264
1265
1266
1267 if (put_new_page)
1268 put_new_page(new_hpage, private);
1269 else
1270 putback_active_hugepage(new_hpage);
1271
1272 if (result) {
1273 if (rc)
1274 *result = rc;
1275 else
1276 *result = page_to_nid(new_hpage);
1277 }
1278 return rc;
1279}
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302int migrate_pages(struct list_head *from, new_page_t get_new_page,
1303 free_page_t put_new_page, unsigned long private,
1304 enum migrate_mode mode, int reason)
1305{
1306 int retry = 1;
1307 int nr_failed = 0;
1308 int nr_succeeded = 0;
1309 int pass = 0;
1310 struct page *page;
1311 struct page *page2;
1312 int swapwrite = current->flags & PF_SWAPWRITE;
1313 int rc;
1314
1315 if (!swapwrite)
1316 current->flags |= PF_SWAPWRITE;
1317
1318 for(pass = 0; pass < 10 && retry; pass++) {
1319 retry = 0;
1320
1321 list_for_each_entry_safe(page, page2, from, lru) {
1322 cond_resched();
1323
1324 if (PageHuge(page))
1325 rc = unmap_and_move_huge_page(get_new_page,
1326 put_new_page, private, page,
1327 pass > 2, mode, reason);
1328 else
1329 rc = unmap_and_move(get_new_page, put_new_page,
1330 private, page, pass > 2, mode,
1331 reason);
1332
1333 switch(rc) {
1334 case -ENOMEM:
1335 nr_failed++;
1336 goto out;
1337 case -EAGAIN:
1338 retry++;
1339 break;
1340 case MIGRATEPAGE_SUCCESS:
1341 nr_succeeded++;
1342 break;
1343 default:
1344
1345
1346
1347
1348
1349
1350 nr_failed++;
1351 break;
1352 }
1353 }
1354 }
1355 nr_failed += retry;
1356 rc = nr_failed;
1357out:
1358 if (nr_succeeded)
1359 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1360 if (nr_failed)
1361 count_vm_events(PGMIGRATE_FAIL, nr_failed);
1362 trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
1363
1364 if (!swapwrite)
1365 current->flags &= ~PF_SWAPWRITE;
1366
1367 return rc;
1368}
1369
1370#ifdef CONFIG_NUMA
1371
1372
1373
1374struct page_to_node {
1375 unsigned long addr;
1376 struct page *page;
1377 int node;
1378 int status;
1379};
1380
1381static struct page *new_page_node(struct page *p, unsigned long private,
1382 int **result)
1383{
1384 struct page_to_node *pm = (struct page_to_node *)private;
1385
1386 while (pm->node != MAX_NUMNODES && pm->page != p)
1387 pm++;
1388
1389 if (pm->node == MAX_NUMNODES)
1390 return NULL;
1391
1392 *result = &pm->status;
1393
1394 if (PageHuge(p))
1395 return alloc_huge_page_node(page_hstate(compound_head(p)),
1396 pm->node);
1397 else
1398 return __alloc_pages_node(pm->node,
1399 GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0);
1400}
1401
1402
1403
1404
1405
1406
1407
1408static int do_move_page_to_node_array(struct mm_struct *mm,
1409 struct page_to_node *pm,
1410 int migrate_all)
1411{
1412 int err;
1413 struct page_to_node *pp;
1414 LIST_HEAD(pagelist);
1415
1416 down_read(&mm->mmap_sem);
1417
1418
1419
1420
1421 for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
1422 struct vm_area_struct *vma;
1423 struct page *page;
1424
1425 err = -EFAULT;
1426 vma = find_vma(mm, pp->addr);
1427 if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
1428 goto set_status;
1429
1430
1431 page = follow_page(vma, pp->addr,
1432 FOLL_GET | FOLL_SPLIT | FOLL_DUMP);
1433
1434 err = PTR_ERR(page);
1435 if (IS_ERR(page))
1436 goto set_status;
1437
1438 err = -ENOENT;
1439 if (!page)
1440 goto set_status;
1441
1442 pp->page = page;
1443 err = page_to_nid(page);
1444
1445 if (err == pp->node)
1446
1447
1448
1449 goto put_and_set;
1450
1451 err = -EACCES;
1452 if (page_mapcount(page) > 1 &&
1453 !migrate_all)
1454 goto put_and_set;
1455
1456 if (PageHuge(page)) {
1457 if (PageHead(page))
1458 isolate_huge_page(page, &pagelist);
1459 goto put_and_set;
1460 }
1461
1462 err = isolate_lru_page(page);
1463 if (!err) {
1464 list_add_tail(&page->lru, &pagelist);
1465 inc_node_page_state(page, NR_ISOLATED_ANON +
1466 page_is_file_cache(page));
1467 }
1468put_and_set:
1469
1470
1471
1472
1473
1474 put_page(page);
1475set_status:
1476 pp->status = err;
1477 }
1478
1479 err = 0;
1480 if (!list_empty(&pagelist)) {
1481 err = migrate_pages(&pagelist, new_page_node, NULL,
1482 (unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
1483 if (err)
1484 putback_movable_pages(&pagelist);
1485 }
1486
1487 up_read(&mm->mmap_sem);
1488 return err;
1489}
1490
1491
1492
1493
1494
1495static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1496 unsigned long nr_pages,
1497 const void __user * __user *pages,
1498 const int __user *nodes,
1499 int __user *status, int flags)
1500{
1501 struct page_to_node *pm;
1502 unsigned long chunk_nr_pages;
1503 unsigned long chunk_start;
1504 int err;
1505
1506 err = -ENOMEM;
1507 pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
1508 if (!pm)
1509 goto out;
1510
1511 migrate_prep();
1512
1513
1514
1515
1516
1517 chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
1518
1519 for (chunk_start = 0;
1520 chunk_start < nr_pages;
1521 chunk_start += chunk_nr_pages) {
1522 int j;
1523
1524 if (chunk_start + chunk_nr_pages > nr_pages)
1525 chunk_nr_pages = nr_pages - chunk_start;
1526
1527
1528 for (j = 0; j < chunk_nr_pages; j++) {
1529 const void __user *p;
1530 int node;
1531
1532 err = -EFAULT;
1533 if (get_user(p, pages + j + chunk_start))
1534 goto out_pm;
1535 pm[j].addr = (unsigned long) p;
1536
1537 if (get_user(node, nodes + j + chunk_start))
1538 goto out_pm;
1539
1540 err = -ENODEV;
1541 if (node < 0 || node >= MAX_NUMNODES)
1542 goto out_pm;
1543
1544 if (!node_state(node, N_MEMORY))
1545 goto out_pm;
1546
1547 err = -EACCES;
1548 if (!node_isset(node, task_nodes))
1549 goto out_pm;
1550
1551 pm[j].node = node;
1552 }
1553
1554
1555 pm[chunk_nr_pages].node = MAX_NUMNODES;
1556
1557
1558 err = do_move_page_to_node_array(mm, pm,
1559 flags & MPOL_MF_MOVE_ALL);
1560 if (err < 0)
1561 goto out_pm;
1562
1563
1564 for (j = 0; j < chunk_nr_pages; j++)
1565 if (put_user(pm[j].status, status + j + chunk_start)) {
1566 err = -EFAULT;
1567 goto out_pm;
1568 }
1569 }
1570 err = 0;
1571
1572out_pm:
1573 free_page((unsigned long)pm);
1574out:
1575 return err;
1576}
1577
1578
1579
1580
1581static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1582 const void __user **pages, int *status)
1583{
1584 unsigned long i;
1585
1586 down_read(&mm->mmap_sem);
1587
1588 for (i = 0; i < nr_pages; i++) {
1589 unsigned long addr = (unsigned long)(*pages);
1590 struct vm_area_struct *vma;
1591 struct page *page;
1592 int err = -EFAULT;
1593
1594 vma = find_vma(mm, addr);
1595 if (!vma || addr < vma->vm_start)
1596 goto set_status;
1597
1598
1599 page = follow_page(vma, addr, FOLL_DUMP);
1600
1601 err = PTR_ERR(page);
1602 if (IS_ERR(page))
1603 goto set_status;
1604
1605 err = page ? page_to_nid(page) : -ENOENT;
1606set_status:
1607 *status = err;
1608
1609 pages++;
1610 status++;
1611 }
1612
1613 up_read(&mm->mmap_sem);
1614}
1615
1616
1617
1618
1619
1620static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1621 const void __user * __user *pages,
1622 int __user *status)
1623{
1624#define DO_PAGES_STAT_CHUNK_NR 16
1625 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1626 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1627
1628 while (nr_pages) {
1629 unsigned long chunk_nr;
1630
1631 chunk_nr = nr_pages;
1632 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1633 chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1634
1635 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1636 break;
1637
1638 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1639
1640 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1641 break;
1642
1643 pages += chunk_nr;
1644 status += chunk_nr;
1645 nr_pages -= chunk_nr;
1646 }
1647 return nr_pages ? -EFAULT : 0;
1648}
1649
1650
1651
1652
1653
1654SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1655 const void __user * __user *, pages,
1656 const int __user *, nodes,
1657 int __user *, status, int, flags)
1658{
1659 const struct cred *cred = current_cred(), *tcred;
1660 struct task_struct *task;
1661 struct mm_struct *mm;
1662 int err;
1663 nodemask_t task_nodes;
1664
1665
1666 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1667 return -EINVAL;
1668
1669 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1670 return -EPERM;
1671
1672
1673 rcu_read_lock();
1674 task = pid ? find_task_by_vpid(pid) : current;
1675 if (!task) {
1676 rcu_read_unlock();
1677 return -ESRCH;
1678 }
1679 get_task_struct(task);
1680
1681
1682
1683
1684
1685
1686
1687 tcred = __task_cred(task);
1688 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1689 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
1690 !capable(CAP_SYS_NICE)) {
1691 rcu_read_unlock();
1692 err = -EPERM;
1693 goto out;
1694 }
1695 rcu_read_unlock();
1696
1697 err = security_task_movememory(task);
1698 if (err)
1699 goto out;
1700
1701 task_nodes = cpuset_mems_allowed(task);
1702 mm = get_task_mm(task);
1703 put_task_struct(task);
1704
1705 if (!mm)
1706 return -EINVAL;
1707
1708 if (nodes)
1709 err = do_pages_move(mm, task_nodes, nr_pages, pages,
1710 nodes, status, flags);
1711 else
1712 err = do_pages_stat(mm, nr_pages, pages, status);
1713
1714 mmput(mm);
1715 return err;
1716
1717out:
1718 put_task_struct(task);
1719 return err;
1720}
1721
1722#ifdef CONFIG_NUMA_BALANCING
1723
1724
1725
1726
1727static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
1728 unsigned long nr_migrate_pages)
1729{
1730 int z;
1731
1732 if (!pgdat_reclaimable(pgdat))
1733 return false;
1734
1735 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1736 struct zone *zone = pgdat->node_zones + z;
1737
1738 if (!populated_zone(zone))
1739 continue;
1740
1741
1742 if (!zone_watermark_ok(zone, 0,
1743 high_wmark_pages(zone) +
1744 nr_migrate_pages,
1745 0, 0))
1746 continue;
1747 return true;
1748 }
1749 return false;
1750}
1751
1752static struct page *alloc_misplaced_dst_page(struct page *page,
1753 unsigned long data,
1754 int **result)
1755{
1756 int nid = (int) data;
1757 struct page *newpage;
1758
1759 newpage = __alloc_pages_node(nid,
1760 (GFP_HIGHUSER_MOVABLE |
1761 __GFP_THISNODE | __GFP_NOMEMALLOC |
1762 __GFP_NORETRY | __GFP_NOWARN) &
1763 ~__GFP_RECLAIM, 0);
1764
1765 return newpage;
1766}
1767
1768
1769
1770
1771
1772
1773static unsigned int migrate_interval_millisecs __read_mostly = 100;
1774static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
1775
1776
1777static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
1778 unsigned long nr_pages)
1779{
1780
1781
1782
1783
1784
1785 if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
1786 spin_lock(&pgdat->numabalancing_migrate_lock);
1787 pgdat->numabalancing_migrate_nr_pages = 0;
1788 pgdat->numabalancing_migrate_next_window = jiffies +
1789 msecs_to_jiffies(migrate_interval_millisecs);
1790 spin_unlock(&pgdat->numabalancing_migrate_lock);
1791 }
1792 if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
1793 trace_mm_numa_migrate_ratelimit(current, pgdat->node_id,
1794 nr_pages);
1795 return true;
1796 }
1797
1798
1799
1800
1801
1802
1803
1804 pgdat->numabalancing_migrate_nr_pages += nr_pages;
1805 return false;
1806}
1807
1808static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
1809{
1810 int page_lru;
1811
1812 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
1813
1814
1815 if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
1816 return 0;
1817
1818 if (isolate_lru_page(page))
1819 return 0;
1820
1821
1822
1823
1824
1825
1826
1827
1828 if (PageTransHuge(page) && page_count(page) != 3) {
1829 putback_lru_page(page);
1830 return 0;
1831 }
1832
1833 page_lru = page_is_file_cache(page);
1834 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
1835 hpage_nr_pages(page));
1836
1837
1838
1839
1840
1841
1842 put_page(page);
1843 return 1;
1844}
1845
1846bool pmd_trans_migrating(pmd_t pmd)
1847{
1848 struct page *page = pmd_page(pmd);
1849 return PageLocked(page);
1850}
1851
1852
1853
1854
1855
1856
1857int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
1858 int node)
1859{
1860 pg_data_t *pgdat = NODE_DATA(node);
1861 int isolated;
1862 int nr_remaining;
1863 LIST_HEAD(migratepages);
1864
1865
1866
1867
1868
1869 if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
1870 (vma->vm_flags & VM_EXEC))
1871 goto out;
1872
1873
1874
1875
1876
1877
1878 if (numamigrate_update_ratelimit(pgdat, 1))
1879 goto out;
1880
1881 isolated = numamigrate_isolate_page(pgdat, page);
1882 if (!isolated)
1883 goto out;
1884
1885 list_add(&page->lru, &migratepages);
1886 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
1887 NULL, node, MIGRATE_ASYNC,
1888 MR_NUMA_MISPLACED);
1889 if (nr_remaining) {
1890 if (!list_empty(&migratepages)) {
1891 list_del(&page->lru);
1892 dec_node_page_state(page, NR_ISOLATED_ANON +
1893 page_is_file_cache(page));
1894 putback_lru_page(page);
1895 }
1896 isolated = 0;
1897 } else
1898 count_vm_numa_event(NUMA_PAGE_MIGRATE);
1899 BUG_ON(!list_empty(&migratepages));
1900 return isolated;
1901
1902out:
1903 put_page(page);
1904 return 0;
1905}
1906#endif
1907
1908#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1909
1910
1911
1912
1913int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1914 struct vm_area_struct *vma,
1915 pmd_t *pmd, pmd_t entry,
1916 unsigned long address,
1917 struct page *page, int node)
1918{
1919 spinlock_t *ptl;
1920 pg_data_t *pgdat = NODE_DATA(node);
1921 int isolated = 0;
1922 struct page *new_page = NULL;
1923 int page_lru = page_is_file_cache(page);
1924 unsigned long mmun_start = address & HPAGE_PMD_MASK;
1925 unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
1926 pmd_t orig_entry;
1927
1928
1929
1930
1931
1932
1933 if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
1934 goto out_dropref;
1935
1936 new_page = alloc_pages_node(node,
1937 (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
1938 HPAGE_PMD_ORDER);
1939 if (!new_page)
1940 goto out_fail;
1941 prep_transhuge_page(new_page);
1942
1943 isolated = numamigrate_isolate_page(pgdat, page);
1944 if (!isolated) {
1945 put_page(new_page);
1946 goto out_fail;
1947 }
1948
1949
1950
1951
1952 if (mm_tlb_flush_pending(mm))
1953 flush_tlb_range(vma, mmun_start, mmun_end);
1954
1955
1956 __SetPageLocked(new_page);
1957 __SetPageSwapBacked(new_page);
1958
1959
1960 new_page->mapping = page->mapping;
1961 new_page->index = page->index;
1962 migrate_page_copy(new_page, page);
1963 WARN_ON(PageLRU(new_page));
1964
1965
1966 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1967 ptl = pmd_lock(mm, pmd);
1968 if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) {
1969fail_putback:
1970 spin_unlock(ptl);
1971 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1972
1973
1974 if (TestClearPageActive(new_page))
1975 SetPageActive(page);
1976 if (TestClearPageUnevictable(new_page))
1977 SetPageUnevictable(page);
1978
1979 unlock_page(new_page);
1980 put_page(new_page);
1981
1982
1983 get_page(page);
1984 putback_lru_page(page);
1985 mod_node_page_state(page_pgdat(page),
1986 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
1987
1988 goto out_unlock;
1989 }
1990
1991 orig_entry = *pmd;
1992 entry = mk_huge_pmd(new_page, vma->vm_page_prot);
1993 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1994
1995
1996
1997
1998
1999
2000
2001
2002 flush_cache_range(vma, mmun_start, mmun_end);
2003 page_add_anon_rmap(new_page, vma, mmun_start, true);
2004 pmdp_huge_clear_flush_notify(vma, mmun_start, pmd);
2005 set_pmd_at(mm, mmun_start, pmd, entry);
2006 update_mmu_cache_pmd(vma, address, &entry);
2007
2008 if (page_count(page) != 2) {
2009 set_pmd_at(mm, mmun_start, pmd, orig_entry);
2010 flush_pmd_tlb_range(vma, mmun_start, mmun_end);
2011 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
2012 update_mmu_cache_pmd(vma, address, &entry);
2013 page_remove_rmap(new_page, true);
2014 goto fail_putback;
2015 }
2016
2017 mlock_migrate_page(new_page, page);
2018 page_remove_rmap(page, true);
2019 set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
2020
2021 spin_unlock(ptl);
2022 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2023
2024
2025 get_page(new_page);
2026 putback_lru_page(new_page);
2027
2028 unlock_page(new_page);
2029 unlock_page(page);
2030 put_page(page);
2031 put_page(page);
2032
2033 count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
2034 count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
2035
2036 mod_node_page_state(page_pgdat(page),
2037 NR_ISOLATED_ANON + page_lru,
2038 -HPAGE_PMD_NR);
2039 return isolated;
2040
2041out_fail:
2042 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
2043out_dropref:
2044 ptl = pmd_lock(mm, pmd);
2045 if (pmd_same(*pmd, entry)) {
2046 entry = pmd_modify(entry, vma->vm_page_prot);
2047 set_pmd_at(mm, mmun_start, pmd, entry);
2048 update_mmu_cache_pmd(vma, address, &entry);
2049 }
2050 spin_unlock(ptl);
2051
2052out_unlock:
2053 unlock_page(page);
2054 put_page(page);
2055 return 0;
2056}
2057#endif
2058
2059#endif
2060