1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/migrate.h>
16#include <linux/export.h>
17#include <linux/swap.h>
18#include <linux/swapops.h>
19#include <linux/pagemap.h>
20#include <linux/buffer_head.h>
21#include <linux/mm_inline.h>
22#include <linux/nsproxy.h>
23#include <linux/pagevec.h>
24#include <linux/ksm.h>
25#include <linux/rmap.h>
26#include <linux/topology.h>
27#include <linux/cpu.h>
28#include <linux/cpuset.h>
29#include <linux/writeback.h>
30#include <linux/mempolicy.h>
31#include <linux/vmalloc.h>
32#include <linux/security.h>
33#include <linux/backing-dev.h>
34#include <linux/compaction.h>
35#include <linux/syscalls.h>
36#include <linux/hugetlb.h>
37#include <linux/hugetlb_cgroup.h>
38#include <linux/gfp.h>
39#include <linux/balloon_compaction.h>
40#include <linux/mmu_notifier.h>
41#include <linux/page_idle.h>
42#include <linux/page_owner.h>
43#include <linux/sched/mm.h>
44#include <linux/ptrace.h>
45
46#include <asm/tlbflush.h>
47
48#define CREATE_TRACE_POINTS
49#include <trace/events/migrate.h>
50
51#include "internal.h"
52
53
54
55
56
57
58int migrate_prep(void)
59{
60
61
62
63
64
65
66 lru_add_drain_all();
67
68 return 0;
69}
70
71
72int migrate_prep_local(void)
73{
74 lru_add_drain();
75
76 return 0;
77}
78
79int isolate_movable_page(struct page *page, isolate_mode_t mode)
80{
81 struct address_space *mapping;
82
83
84
85
86
87
88
89
90
91
92 if (unlikely(!get_page_unless_zero(page)))
93 goto out;
94
95
96
97
98
99
100 if (unlikely(!__PageMovable(page)))
101 goto out_putpage;
102
103
104
105
106
107
108
109
110
111
112
113 if (unlikely(!trylock_page(page)))
114 goto out_putpage;
115
116 if (!PageMovable(page) || PageIsolated(page))
117 goto out_no_isolated;
118
119 mapping = page_mapping(page);
120 VM_BUG_ON_PAGE(!mapping, page);
121
122 if (!mapping->a_ops->isolate_page(page, mode))
123 goto out_no_isolated;
124
125
126 WARN_ON_ONCE(PageIsolated(page));
127 __SetPageIsolated(page);
128 unlock_page(page);
129
130 return 0;
131
132out_no_isolated:
133 unlock_page(page);
134out_putpage:
135 put_page(page);
136out:
137 return -EBUSY;
138}
139
140
141void putback_movable_page(struct page *page)
142{
143 struct address_space *mapping;
144
145 VM_BUG_ON_PAGE(!PageLocked(page), page);
146 VM_BUG_ON_PAGE(!PageMovable(page), page);
147 VM_BUG_ON_PAGE(!PageIsolated(page), page);
148
149 mapping = page_mapping(page);
150 mapping->a_ops->putback_page(page);
151 __ClearPageIsolated(page);
152}
153
154
155
156
157
158
159
160
161
162void putback_movable_pages(struct list_head *l)
163{
164 struct page *page;
165 struct page *page2;
166
167 list_for_each_entry_safe(page, page2, l, lru) {
168 if (unlikely(PageHuge(page))) {
169 putback_active_hugepage(page);
170 continue;
171 }
172 list_del(&page->lru);
173
174
175
176
177
178 if (unlikely(__PageMovable(page))) {
179 VM_BUG_ON_PAGE(!PageIsolated(page), page);
180 lock_page(page);
181 if (PageMovable(page))
182 putback_movable_page(page);
183 else
184 __ClearPageIsolated(page);
185 unlock_page(page);
186 put_page(page);
187 } else {
188 dec_node_page_state(page, NR_ISOLATED_ANON +
189 page_is_file_cache(page));
190 putback_lru_page(page);
191 }
192 }
193}
194
195
196
197
198static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
199 unsigned long addr, void *old)
200{
201 struct page_vma_mapped_walk pvmw = {
202 .page = old,
203 .vma = vma,
204 .address = addr,
205 .flags = PVMW_SYNC | PVMW_MIGRATION,
206 };
207 struct page *new;
208 pte_t pte;
209 swp_entry_t entry;
210
211 VM_BUG_ON_PAGE(PageTail(page), page);
212 while (page_vma_mapped_walk(&pvmw)) {
213 if (PageKsm(page))
214 new = page;
215 else
216 new = page - pvmw.page->index +
217 linear_page_index(vma, pvmw.address);
218
219 get_page(new);
220 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
221 if (pte_swp_soft_dirty(*pvmw.pte))
222 pte = pte_mksoft_dirty(pte);
223
224
225
226
227 entry = pte_to_swp_entry(*pvmw.pte);
228 if (is_write_migration_entry(entry))
229 pte = maybe_mkwrite(pte, vma);
230
231 flush_dcache_page(new);
232#ifdef CONFIG_HUGETLB_PAGE
233 if (PageHuge(new)) {
234 pte = pte_mkhuge(pte);
235 pte = arch_make_huge_pte(pte, vma, new, 0);
236 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
237 if (PageAnon(new))
238 hugepage_add_anon_rmap(new, vma, pvmw.address);
239 else
240 page_dup_rmap(new, true);
241 } else
242#endif
243 {
244 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
245
246 if (PageAnon(new))
247 page_add_anon_rmap(new, vma, pvmw.address, false);
248 else
249 page_add_file_rmap(new, false);
250 }
251 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
252 mlock_vma_page(new);
253
254
255 update_mmu_cache(vma, pvmw.address, pvmw.pte);
256 }
257
258 return true;
259}
260
261
262
263
264
265void remove_migration_ptes(struct page *old, struct page *new, bool locked)
266{
267 struct rmap_walk_control rwc = {
268 .rmap_one = remove_migration_pte,
269 .arg = old,
270 };
271
272 if (locked)
273 rmap_walk_locked(new, &rwc);
274 else
275 rmap_walk(new, &rwc);
276}
277
278
279
280
281
282
283void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
284 spinlock_t *ptl)
285{
286 pte_t pte;
287 swp_entry_t entry;
288 struct page *page;
289
290 spin_lock(ptl);
291 pte = *ptep;
292 if (!is_swap_pte(pte))
293 goto out;
294
295 entry = pte_to_swp_entry(pte);
296 if (!is_migration_entry(entry))
297 goto out;
298
299 page = migration_entry_to_page(entry);
300
301
302
303
304
305
306
307
308 if (!get_page_unless_zero(page))
309 goto out;
310 pte_unmap_unlock(ptep, ptl);
311 wait_on_page_locked(page);
312 put_page(page);
313 return;
314out:
315 pte_unmap_unlock(ptep, ptl);
316}
317
318void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
319 unsigned long address)
320{
321 spinlock_t *ptl = pte_lockptr(mm, pmd);
322 pte_t *ptep = pte_offset_map(pmd, address);
323 __migration_entry_wait(mm, ptep, ptl);
324}
325
326void migration_entry_wait_huge(struct vm_area_struct *vma,
327 struct mm_struct *mm, pte_t *pte)
328{
329 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
330 __migration_entry_wait(mm, pte, ptl);
331}
332
333#ifdef CONFIG_BLOCK
334
335static bool buffer_migrate_lock_buffers(struct buffer_head *head,
336 enum migrate_mode mode)
337{
338 struct buffer_head *bh = head;
339
340
341 if (mode != MIGRATE_ASYNC) {
342 do {
343 get_bh(bh);
344 lock_buffer(bh);
345 bh = bh->b_this_page;
346
347 } while (bh != head);
348
349 return true;
350 }
351
352
353 do {
354 get_bh(bh);
355 if (!trylock_buffer(bh)) {
356
357
358
359
360 struct buffer_head *failed_bh = bh;
361 put_bh(failed_bh);
362 bh = head;
363 while (bh != failed_bh) {
364 unlock_buffer(bh);
365 put_bh(bh);
366 bh = bh->b_this_page;
367 }
368 return false;
369 }
370
371 bh = bh->b_this_page;
372 } while (bh != head);
373 return true;
374}
375#else
376static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
377 enum migrate_mode mode)
378{
379 return true;
380}
381#endif
382
383
384
385
386
387
388
389
390
391int migrate_page_move_mapping(struct address_space *mapping,
392 struct page *newpage, struct page *page,
393 struct buffer_head *head, enum migrate_mode mode,
394 int extra_count)
395{
396 struct zone *oldzone, *newzone;
397 int dirty;
398 int expected_count = 1 + extra_count;
399 void **pslot;
400
401 if (!mapping) {
402
403 if (page_count(page) != expected_count)
404 return -EAGAIN;
405
406
407 newpage->index = page->index;
408 newpage->mapping = page->mapping;
409 if (PageSwapBacked(page))
410 __SetPageSwapBacked(newpage);
411
412 return MIGRATEPAGE_SUCCESS;
413 }
414
415 oldzone = page_zone(page);
416 newzone = page_zone(newpage);
417
418 spin_lock_irq(&mapping->tree_lock);
419
420 pslot = radix_tree_lookup_slot(&mapping->page_tree,
421 page_index(page));
422
423 expected_count += 1 + page_has_private(page);
424 if (page_count(page) != expected_count ||
425 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
426 spin_unlock_irq(&mapping->tree_lock);
427 return -EAGAIN;
428 }
429
430 if (!page_ref_freeze(page, expected_count)) {
431 spin_unlock_irq(&mapping->tree_lock);
432 return -EAGAIN;
433 }
434
435
436
437
438
439
440
441
442 if (mode == MIGRATE_ASYNC && head &&
443 !buffer_migrate_lock_buffers(head, mode)) {
444 page_ref_unfreeze(page, expected_count);
445 spin_unlock_irq(&mapping->tree_lock);
446 return -EAGAIN;
447 }
448
449
450
451
452
453 newpage->index = page->index;
454 newpage->mapping = page->mapping;
455 get_page(newpage);
456 if (PageSwapBacked(page)) {
457 __SetPageSwapBacked(newpage);
458 if (PageSwapCache(page)) {
459 SetPageSwapCache(newpage);
460 set_page_private(newpage, page_private(page));
461 }
462 } else {
463 VM_BUG_ON_PAGE(PageSwapCache(page), page);
464 }
465
466
467 dirty = PageDirty(page);
468 if (dirty) {
469 ClearPageDirty(page);
470 SetPageDirty(newpage);
471 }
472
473 radix_tree_replace_slot(&mapping->page_tree, pslot, newpage);
474
475
476
477
478
479
480 page_ref_unfreeze(page, expected_count - 1);
481
482 spin_unlock(&mapping->tree_lock);
483
484
485
486
487
488
489
490
491
492
493
494
495 if (newzone != oldzone) {
496 __dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES);
497 __inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES);
498 if (PageSwapBacked(page) && !PageSwapCache(page)) {
499 __dec_node_state(oldzone->zone_pgdat, NR_SHMEM);
500 __inc_node_state(newzone->zone_pgdat, NR_SHMEM);
501 }
502 if (dirty && mapping_cap_account_dirty(mapping)) {
503 __dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
504 __dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
505 __inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
506 __inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
507 }
508 }
509 local_irq_enable();
510
511 return MIGRATEPAGE_SUCCESS;
512}
513EXPORT_SYMBOL(migrate_page_move_mapping);
514
515
516
517
518
519int migrate_huge_page_move_mapping(struct address_space *mapping,
520 struct page *newpage, struct page *page)
521{
522 int expected_count;
523 void **pslot;
524
525 spin_lock_irq(&mapping->tree_lock);
526
527 pslot = radix_tree_lookup_slot(&mapping->page_tree,
528 page_index(page));
529
530 expected_count = 2 + page_has_private(page);
531 if (page_count(page) != expected_count ||
532 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
533 spin_unlock_irq(&mapping->tree_lock);
534 return -EAGAIN;
535 }
536
537 if (!page_ref_freeze(page, expected_count)) {
538 spin_unlock_irq(&mapping->tree_lock);
539 return -EAGAIN;
540 }
541
542 newpage->index = page->index;
543 newpage->mapping = page->mapping;
544
545 get_page(newpage);
546
547 radix_tree_replace_slot(&mapping->page_tree, pslot, newpage);
548
549 page_ref_unfreeze(page, expected_count - 1);
550
551 spin_unlock_irq(&mapping->tree_lock);
552
553 return MIGRATEPAGE_SUCCESS;
554}
555
556
557
558
559
560
561static void __copy_gigantic_page(struct page *dst, struct page *src,
562 int nr_pages)
563{
564 int i;
565 struct page *dst_base = dst;
566 struct page *src_base = src;
567
568 for (i = 0; i < nr_pages; ) {
569 cond_resched();
570 copy_highpage(dst, src);
571
572 i++;
573 dst = mem_map_next(dst, dst_base, i);
574 src = mem_map_next(src, src_base, i);
575 }
576}
577
578static void copy_huge_page(struct page *dst, struct page *src)
579{
580 int i;
581 int nr_pages;
582
583 if (PageHuge(src)) {
584
585 struct hstate *h = page_hstate(src);
586 nr_pages = pages_per_huge_page(h);
587
588 if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
589 __copy_gigantic_page(dst, src, nr_pages);
590 return;
591 }
592 } else {
593
594 BUG_ON(!PageTransHuge(src));
595 nr_pages = hpage_nr_pages(src);
596 }
597
598 for (i = 0; i < nr_pages; i++) {
599 cond_resched();
600 copy_highpage(dst + i, src + i);
601 }
602}
603
604
605
606
607void migrate_page_copy(struct page *newpage, struct page *page)
608{
609 int cpupid;
610
611 if (PageHuge(page) || PageTransHuge(page))
612 copy_huge_page(newpage, page);
613 else
614 copy_highpage(newpage, page);
615
616 if (PageError(page))
617 SetPageError(newpage);
618 if (PageReferenced(page))
619 SetPageReferenced(newpage);
620 if (PageUptodate(page))
621 SetPageUptodate(newpage);
622 if (TestClearPageActive(page)) {
623 VM_BUG_ON_PAGE(PageUnevictable(page), page);
624 SetPageActive(newpage);
625 } else if (TestClearPageUnevictable(page))
626 SetPageUnevictable(newpage);
627 if (PageChecked(page))
628 SetPageChecked(newpage);
629 if (PageMappedToDisk(page))
630 SetPageMappedToDisk(newpage);
631
632
633 if (PageDirty(page))
634 SetPageDirty(newpage);
635
636 if (page_is_young(page))
637 set_page_young(newpage);
638 if (page_is_idle(page))
639 set_page_idle(newpage);
640
641
642
643
644
645 cpupid = page_cpupid_xchg_last(page, -1);
646 page_cpupid_xchg_last(newpage, cpupid);
647
648 ksm_migrate_page(newpage, page);
649
650
651
652
653 if (PageSwapCache(page))
654 ClearPageSwapCache(page);
655 ClearPagePrivate(page);
656 set_page_private(page, 0);
657
658
659
660
661
662 if (PageWriteback(newpage))
663 end_page_writeback(newpage);
664
665 copy_page_owner(page, newpage);
666
667 mem_cgroup_migrate(page, newpage);
668}
669EXPORT_SYMBOL(migrate_page_copy);
670
671
672
673
674
675
676
677
678
679
680
681int migrate_page(struct address_space *mapping,
682 struct page *newpage, struct page *page,
683 enum migrate_mode mode)
684{
685 int rc;
686
687 BUG_ON(PageWriteback(page));
688
689 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
690
691 if (rc != MIGRATEPAGE_SUCCESS)
692 return rc;
693
694 migrate_page_copy(newpage, page);
695 return MIGRATEPAGE_SUCCESS;
696}
697EXPORT_SYMBOL(migrate_page);
698
699#ifdef CONFIG_BLOCK
700
701
702
703
704
705int buffer_migrate_page(struct address_space *mapping,
706 struct page *newpage, struct page *page, enum migrate_mode mode)
707{
708 struct buffer_head *bh, *head;
709 int rc;
710
711 if (!page_has_buffers(page))
712 return migrate_page(mapping, newpage, page, mode);
713
714 head = page_buffers(page);
715
716 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
717
718 if (rc != MIGRATEPAGE_SUCCESS)
719 return rc;
720
721
722
723
724
725
726 if (mode != MIGRATE_ASYNC)
727 BUG_ON(!buffer_migrate_lock_buffers(head, mode));
728
729 ClearPagePrivate(page);
730 set_page_private(newpage, page_private(page));
731 set_page_private(page, 0);
732 put_page(page);
733 get_page(newpage);
734
735 bh = head;
736 do {
737 set_bh_page(bh, newpage, bh_offset(bh));
738 bh = bh->b_this_page;
739
740 } while (bh != head);
741
742 SetPagePrivate(newpage);
743
744 migrate_page_copy(newpage, page);
745
746 bh = head;
747 do {
748 unlock_buffer(bh);
749 put_bh(bh);
750 bh = bh->b_this_page;
751
752 } while (bh != head);
753
754 return MIGRATEPAGE_SUCCESS;
755}
756EXPORT_SYMBOL(buffer_migrate_page);
757#endif
758
759
760
761
762static int writeout(struct address_space *mapping, struct page *page)
763{
764 struct writeback_control wbc = {
765 .sync_mode = WB_SYNC_NONE,
766 .nr_to_write = 1,
767 .range_start = 0,
768 .range_end = LLONG_MAX,
769 .for_reclaim = 1
770 };
771 int rc;
772
773 if (!mapping->a_ops->writepage)
774
775 return -EINVAL;
776
777 if (!clear_page_dirty_for_io(page))
778
779 return -EAGAIN;
780
781
782
783
784
785
786
787
788
789 remove_migration_ptes(page, page, false);
790
791 rc = mapping->a_ops->writepage(page, &wbc);
792
793 if (rc != AOP_WRITEPAGE_ACTIVATE)
794
795 lock_page(page);
796
797 return (rc < 0) ? -EIO : -EAGAIN;
798}
799
800
801
802
803static int fallback_migrate_page(struct address_space *mapping,
804 struct page *newpage, struct page *page, enum migrate_mode mode)
805{
806 if (PageDirty(page)) {
807
808 if (mode != MIGRATE_SYNC)
809 return -EBUSY;
810 return writeout(mapping, page);
811 }
812
813
814
815
816
817 if (page_has_private(page) &&
818 !try_to_release_page(page, GFP_KERNEL))
819 return -EAGAIN;
820
821 return migrate_page(mapping, newpage, page, mode);
822}
823
824
825
826
827
828
829
830
831
832
833
834
835static int move_to_new_page(struct page *newpage, struct page *page,
836 enum migrate_mode mode)
837{
838 struct address_space *mapping;
839 int rc = -EAGAIN;
840 bool is_lru = !__PageMovable(page);
841
842 VM_BUG_ON_PAGE(!PageLocked(page), page);
843 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
844
845 mapping = page_mapping(page);
846
847 if (likely(is_lru)) {
848 if (!mapping)
849 rc = migrate_page(mapping, newpage, page, mode);
850 else if (mapping->a_ops->migratepage)
851
852
853
854
855
856
857
858 rc = mapping->a_ops->migratepage(mapping, newpage,
859 page, mode);
860 else
861 rc = fallback_migrate_page(mapping, newpage,
862 page, mode);
863 } else {
864
865
866
867
868 VM_BUG_ON_PAGE(!PageIsolated(page), page);
869 if (!PageMovable(page)) {
870 rc = MIGRATEPAGE_SUCCESS;
871 __ClearPageIsolated(page);
872 goto out;
873 }
874
875 rc = mapping->a_ops->migratepage(mapping, newpage,
876 page, mode);
877 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
878 !PageIsolated(page));
879 }
880
881
882
883
884
885 if (rc == MIGRATEPAGE_SUCCESS) {
886 if (__PageMovable(page)) {
887 VM_BUG_ON_PAGE(!PageIsolated(page), page);
888
889
890
891
892
893 __ClearPageIsolated(page);
894 }
895
896
897
898
899
900
901 if (!PageMappingFlags(page))
902 page->mapping = NULL;
903 }
904out:
905 return rc;
906}
907
908static int __unmap_and_move(struct page *page, struct page *newpage,
909 int force, enum migrate_mode mode)
910{
911 int rc = -EAGAIN;
912 int page_was_mapped = 0;
913 struct anon_vma *anon_vma = NULL;
914 bool is_lru = !__PageMovable(page);
915
916 if (!trylock_page(page)) {
917 if (!force || mode == MIGRATE_ASYNC)
918 goto out;
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933 if (current->flags & PF_MEMALLOC)
934 goto out;
935
936 lock_page(page);
937 }
938
939 if (PageWriteback(page)) {
940
941
942
943
944
945
946 if (mode != MIGRATE_SYNC) {
947 rc = -EBUSY;
948 goto out_unlock;
949 }
950 if (!force)
951 goto out_unlock;
952 wait_on_page_writeback(page);
953 }
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969 if (PageAnon(page) && !PageKsm(page))
970 anon_vma = page_get_anon_vma(page);
971
972
973
974
975
976
977
978
979
980 if (unlikely(!trylock_page(newpage)))
981 goto out_unlock;
982
983 if (unlikely(!is_lru)) {
984 rc = move_to_new_page(newpage, page, mode);
985 goto out_unlock_both;
986 }
987
988
989
990
991
992
993
994
995
996
997
998
999
1000 if (!page->mapping) {
1001 VM_BUG_ON_PAGE(PageAnon(page), page);
1002 if (page_has_private(page)) {
1003 try_to_free_buffers(page);
1004 goto out_unlock_both;
1005 }
1006 } else if (page_mapped(page)) {
1007
1008 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
1009 page);
1010 try_to_unmap(page,
1011 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1012 page_was_mapped = 1;
1013 }
1014
1015 if (!page_mapped(page))
1016 rc = move_to_new_page(newpage, page, mode);
1017
1018 if (page_was_mapped)
1019 remove_migration_ptes(page,
1020 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
1021
1022out_unlock_both:
1023 unlock_page(newpage);
1024out_unlock:
1025
1026 if (anon_vma)
1027 put_anon_vma(anon_vma);
1028 unlock_page(page);
1029out:
1030
1031
1032
1033
1034
1035
1036 if (rc == MIGRATEPAGE_SUCCESS) {
1037 if (unlikely(__PageMovable(newpage)))
1038 put_page(newpage);
1039 else
1040 putback_lru_page(newpage);
1041 }
1042
1043 return rc;
1044}
1045
1046
1047
1048
1049
1050#if (GCC_VERSION >= 40700 && GCC_VERSION < 40900) && defined(CONFIG_ARM)
1051#define ICE_noinline noinline
1052#else
1053#define ICE_noinline
1054#endif
1055
1056
1057
1058
1059
1060static ICE_noinline int unmap_and_move(new_page_t get_new_page,
1061 free_page_t put_new_page,
1062 unsigned long private, struct page *page,
1063 int force, enum migrate_mode mode,
1064 enum migrate_reason reason)
1065{
1066 int rc = MIGRATEPAGE_SUCCESS;
1067 int *result = NULL;
1068 struct page *newpage;
1069
1070 newpage = get_new_page(page, private, &result);
1071 if (!newpage)
1072 return -ENOMEM;
1073
1074 if (page_count(page) == 1) {
1075
1076 ClearPageActive(page);
1077 ClearPageUnevictable(page);
1078 if (unlikely(__PageMovable(page))) {
1079 lock_page(page);
1080 if (!PageMovable(page))
1081 __ClearPageIsolated(page);
1082 unlock_page(page);
1083 }
1084 if (put_new_page)
1085 put_new_page(newpage, private);
1086 else
1087 put_page(newpage);
1088 goto out;
1089 }
1090
1091 if (unlikely(PageTransHuge(page))) {
1092 lock_page(page);
1093 rc = split_huge_page(page);
1094 unlock_page(page);
1095 if (rc)
1096 goto out;
1097 }
1098
1099 rc = __unmap_and_move(page, newpage, force, mode);
1100 if (rc == MIGRATEPAGE_SUCCESS)
1101 set_page_owner_migrate_reason(newpage, reason);
1102
1103out:
1104 if (rc != -EAGAIN) {
1105
1106
1107
1108
1109
1110
1111 list_del(&page->lru);
1112
1113
1114
1115
1116
1117
1118 if (likely(!__PageMovable(page)))
1119 dec_node_page_state(page, NR_ISOLATED_ANON +
1120 page_is_file_cache(page));
1121 }
1122
1123
1124
1125
1126
1127
1128 if (rc == MIGRATEPAGE_SUCCESS) {
1129 put_page(page);
1130 if (reason == MR_MEMORY_FAILURE) {
1131
1132
1133
1134
1135
1136 if (!test_set_page_hwpoison(page))
1137 num_poisoned_pages_inc();
1138 }
1139 } else {
1140 if (rc != -EAGAIN) {
1141 if (likely(!__PageMovable(page))) {
1142 putback_lru_page(page);
1143 goto put_new;
1144 }
1145
1146 lock_page(page);
1147 if (PageMovable(page))
1148 putback_movable_page(page);
1149 else
1150 __ClearPageIsolated(page);
1151 unlock_page(page);
1152 put_page(page);
1153 }
1154put_new:
1155 if (put_new_page)
1156 put_new_page(newpage, private);
1157 else
1158 put_page(newpage);
1159 }
1160
1161 if (result) {
1162 if (rc)
1163 *result = rc;
1164 else
1165 *result = page_to_nid(newpage);
1166 }
1167 return rc;
1168}
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188static int unmap_and_move_huge_page(new_page_t get_new_page,
1189 free_page_t put_new_page, unsigned long private,
1190 struct page *hpage, int force,
1191 enum migrate_mode mode, int reason)
1192{
1193 int rc = -EAGAIN;
1194 int *result = NULL;
1195 int page_was_mapped = 0;
1196 struct page *new_hpage;
1197 struct anon_vma *anon_vma = NULL;
1198
1199
1200
1201
1202
1203
1204
1205
1206 if (!hugepage_migration_supported(page_hstate(hpage))) {
1207 putback_active_hugepage(hpage);
1208 return -ENOSYS;
1209 }
1210
1211 new_hpage = get_new_page(hpage, private, &result);
1212 if (!new_hpage)
1213 return -ENOMEM;
1214
1215 if (!trylock_page(hpage)) {
1216 if (!force || mode != MIGRATE_SYNC)
1217 goto out;
1218 lock_page(hpage);
1219 }
1220
1221 if (PageAnon(hpage))
1222 anon_vma = page_get_anon_vma(hpage);
1223
1224 if (unlikely(!trylock_page(new_hpage)))
1225 goto put_anon;
1226
1227 if (page_mapped(hpage)) {
1228 try_to_unmap(hpage,
1229 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
1230 page_was_mapped = 1;
1231 }
1232
1233 if (!page_mapped(hpage))
1234 rc = move_to_new_page(new_hpage, hpage, mode);
1235
1236 if (page_was_mapped)
1237 remove_migration_ptes(hpage,
1238 rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
1239
1240 unlock_page(new_hpage);
1241
1242put_anon:
1243 if (anon_vma)
1244 put_anon_vma(anon_vma);
1245
1246 if (rc == MIGRATEPAGE_SUCCESS) {
1247 hugetlb_cgroup_migrate(hpage, new_hpage);
1248 put_new_page = NULL;
1249 set_page_owner_migrate_reason(new_hpage, reason);
1250 }
1251
1252 unlock_page(hpage);
1253out:
1254 if (rc != -EAGAIN)
1255 putback_active_hugepage(hpage);
1256 if (reason == MR_MEMORY_FAILURE && !test_set_page_hwpoison(hpage))
1257 num_poisoned_pages_inc();
1258
1259
1260
1261
1262
1263
1264 if (put_new_page)
1265 put_new_page(new_hpage, private);
1266 else
1267 putback_active_hugepage(new_hpage);
1268
1269 if (result) {
1270 if (rc)
1271 *result = rc;
1272 else
1273 *result = page_to_nid(new_hpage);
1274 }
1275 return rc;
1276}
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299int migrate_pages(struct list_head *from, new_page_t get_new_page,
1300 free_page_t put_new_page, unsigned long private,
1301 enum migrate_mode mode, int reason)
1302{
1303 int retry = 1;
1304 int nr_failed = 0;
1305 int nr_succeeded = 0;
1306 int pass = 0;
1307 struct page *page;
1308 struct page *page2;
1309 int swapwrite = current->flags & PF_SWAPWRITE;
1310 int rc;
1311
1312 if (!swapwrite)
1313 current->flags |= PF_SWAPWRITE;
1314
1315 for(pass = 0; pass < 10 && retry; pass++) {
1316 retry = 0;
1317
1318 list_for_each_entry_safe(page, page2, from, lru) {
1319 cond_resched();
1320
1321 if (PageHuge(page))
1322 rc = unmap_and_move_huge_page(get_new_page,
1323 put_new_page, private, page,
1324 pass > 2, mode, reason);
1325 else
1326 rc = unmap_and_move(get_new_page, put_new_page,
1327 private, page, pass > 2, mode,
1328 reason);
1329
1330 switch(rc) {
1331 case -ENOMEM:
1332 nr_failed++;
1333 goto out;
1334 case -EAGAIN:
1335 retry++;
1336 break;
1337 case MIGRATEPAGE_SUCCESS:
1338 nr_succeeded++;
1339 break;
1340 default:
1341
1342
1343
1344
1345
1346
1347 nr_failed++;
1348 break;
1349 }
1350 }
1351 }
1352 nr_failed += retry;
1353 rc = nr_failed;
1354out:
1355 if (nr_succeeded)
1356 count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1357 if (nr_failed)
1358 count_vm_events(PGMIGRATE_FAIL, nr_failed);
1359 trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
1360
1361 if (!swapwrite)
1362 current->flags &= ~PF_SWAPWRITE;
1363
1364 return rc;
1365}
1366
1367#ifdef CONFIG_NUMA
1368
1369
1370
1371struct page_to_node {
1372 unsigned long addr;
1373 struct page *page;
1374 int node;
1375 int status;
1376};
1377
1378static struct page *new_page_node(struct page *p, unsigned long private,
1379 int **result)
1380{
1381 struct page_to_node *pm = (struct page_to_node *)private;
1382
1383 while (pm->node != MAX_NUMNODES && pm->page != p)
1384 pm++;
1385
1386 if (pm->node == MAX_NUMNODES)
1387 return NULL;
1388
1389 *result = &pm->status;
1390
1391 if (PageHuge(p))
1392 return alloc_huge_page_node(page_hstate(compound_head(p)),
1393 pm->node);
1394 else
1395 return __alloc_pages_node(pm->node,
1396 GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0);
1397}
1398
1399
1400
1401
1402
1403
1404
1405static int do_move_page_to_node_array(struct mm_struct *mm,
1406 struct page_to_node *pm,
1407 int migrate_all)
1408{
1409 int err;
1410 struct page_to_node *pp;
1411 LIST_HEAD(pagelist);
1412
1413 down_read(&mm->mmap_sem);
1414
1415
1416
1417
1418 for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
1419 struct vm_area_struct *vma;
1420 struct page *page;
1421
1422 err = -EFAULT;
1423 vma = find_vma(mm, pp->addr);
1424 if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
1425 goto set_status;
1426
1427
1428 page = follow_page(vma, pp->addr,
1429 FOLL_GET | FOLL_SPLIT | FOLL_DUMP);
1430
1431 err = PTR_ERR(page);
1432 if (IS_ERR(page))
1433 goto set_status;
1434
1435 err = -ENOENT;
1436 if (!page)
1437 goto set_status;
1438
1439 pp->page = page;
1440 err = page_to_nid(page);
1441
1442 if (err == pp->node)
1443
1444
1445
1446 goto put_and_set;
1447
1448 err = -EACCES;
1449 if (page_mapcount(page) > 1 &&
1450 !migrate_all)
1451 goto put_and_set;
1452
1453 if (PageHuge(page)) {
1454 if (PageHead(page))
1455 isolate_huge_page(page, &pagelist);
1456 goto put_and_set;
1457 }
1458
1459 err = isolate_lru_page(page);
1460 if (!err) {
1461 list_add_tail(&page->lru, &pagelist);
1462 inc_node_page_state(page, NR_ISOLATED_ANON +
1463 page_is_file_cache(page));
1464 }
1465put_and_set:
1466
1467
1468
1469
1470
1471 put_page(page);
1472set_status:
1473 pp->status = err;
1474 }
1475
1476 err = 0;
1477 if (!list_empty(&pagelist)) {
1478 err = migrate_pages(&pagelist, new_page_node, NULL,
1479 (unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL);
1480 if (err)
1481 putback_movable_pages(&pagelist);
1482 }
1483
1484 up_read(&mm->mmap_sem);
1485 return err;
1486}
1487
1488
1489
1490
1491
1492static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1493 unsigned long nr_pages,
1494 const void __user * __user *pages,
1495 const int __user *nodes,
1496 int __user *status, int flags)
1497{
1498 struct page_to_node *pm;
1499 unsigned long chunk_nr_pages;
1500 unsigned long chunk_start;
1501 int err;
1502
1503 err = -ENOMEM;
1504 pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
1505 if (!pm)
1506 goto out;
1507
1508 migrate_prep();
1509
1510
1511
1512
1513
1514 chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
1515
1516 for (chunk_start = 0;
1517 chunk_start < nr_pages;
1518 chunk_start += chunk_nr_pages) {
1519 int j;
1520
1521 if (chunk_start + chunk_nr_pages > nr_pages)
1522 chunk_nr_pages = nr_pages - chunk_start;
1523
1524
1525 for (j = 0; j < chunk_nr_pages; j++) {
1526 const void __user *p;
1527 int node;
1528
1529 err = -EFAULT;
1530 if (get_user(p, pages + j + chunk_start))
1531 goto out_pm;
1532 pm[j].addr = (unsigned long) p;
1533
1534 if (get_user(node, nodes + j + chunk_start))
1535 goto out_pm;
1536
1537 err = -ENODEV;
1538 if (node < 0 || node >= MAX_NUMNODES)
1539 goto out_pm;
1540
1541 if (!node_state(node, N_MEMORY))
1542 goto out_pm;
1543
1544 err = -EACCES;
1545 if (!node_isset(node, task_nodes))
1546 goto out_pm;
1547
1548 pm[j].node = node;
1549 }
1550
1551
1552 pm[chunk_nr_pages].node = MAX_NUMNODES;
1553
1554
1555 err = do_move_page_to_node_array(mm, pm,
1556 flags & MPOL_MF_MOVE_ALL);
1557 if (err < 0)
1558 goto out_pm;
1559
1560
1561 for (j = 0; j < chunk_nr_pages; j++)
1562 if (put_user(pm[j].status, status + j + chunk_start)) {
1563 err = -EFAULT;
1564 goto out_pm;
1565 }
1566 }
1567 err = 0;
1568
1569out_pm:
1570 free_page((unsigned long)pm);
1571out:
1572 return err;
1573}
1574
1575
1576
1577
1578static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1579 const void __user **pages, int *status)
1580{
1581 unsigned long i;
1582
1583 down_read(&mm->mmap_sem);
1584
1585 for (i = 0; i < nr_pages; i++) {
1586 unsigned long addr = (unsigned long)(*pages);
1587 struct vm_area_struct *vma;
1588 struct page *page;
1589 int err = -EFAULT;
1590
1591 vma = find_vma(mm, addr);
1592 if (!vma || addr < vma->vm_start)
1593 goto set_status;
1594
1595
1596 page = follow_page(vma, addr, FOLL_DUMP);
1597
1598 err = PTR_ERR(page);
1599 if (IS_ERR(page))
1600 goto set_status;
1601
1602 err = page ? page_to_nid(page) : -ENOENT;
1603set_status:
1604 *status = err;
1605
1606 pages++;
1607 status++;
1608 }
1609
1610 up_read(&mm->mmap_sem);
1611}
1612
1613
1614
1615
1616
1617static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1618 const void __user * __user *pages,
1619 int __user *status)
1620{
1621#define DO_PAGES_STAT_CHUNK_NR 16
1622 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1623 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1624
1625 while (nr_pages) {
1626 unsigned long chunk_nr;
1627
1628 chunk_nr = nr_pages;
1629 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1630 chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1631
1632 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1633 break;
1634
1635 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1636
1637 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1638 break;
1639
1640 pages += chunk_nr;
1641 status += chunk_nr;
1642 nr_pages -= chunk_nr;
1643 }
1644 return nr_pages ? -EFAULT : 0;
1645}
1646
1647
1648
1649
1650
1651SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1652 const void __user * __user *, pages,
1653 const int __user *, nodes,
1654 int __user *, status, int, flags)
1655{
1656 struct task_struct *task;
1657 struct mm_struct *mm;
1658 int err;
1659 nodemask_t task_nodes;
1660
1661
1662 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1663 return -EINVAL;
1664
1665 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1666 return -EPERM;
1667
1668
1669 rcu_read_lock();
1670 task = pid ? find_task_by_vpid(pid) : current;
1671 if (!task) {
1672 rcu_read_unlock();
1673 return -ESRCH;
1674 }
1675 get_task_struct(task);
1676
1677
1678
1679
1680
1681 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1682 rcu_read_unlock();
1683 err = -EPERM;
1684 goto out;
1685 }
1686 rcu_read_unlock();
1687
1688 err = security_task_movememory(task);
1689 if (err)
1690 goto out;
1691
1692 task_nodes = cpuset_mems_allowed(task);
1693 mm = get_task_mm(task);
1694 put_task_struct(task);
1695
1696 if (!mm)
1697 return -EINVAL;
1698
1699 if (nodes)
1700 err = do_pages_move(mm, task_nodes, nr_pages, pages,
1701 nodes, status, flags);
1702 else
1703 err = do_pages_stat(mm, nr_pages, pages, status);
1704
1705 mmput(mm);
1706 return err;
1707
1708out:
1709 put_task_struct(task);
1710 return err;
1711}
1712
1713#ifdef CONFIG_NUMA_BALANCING
1714
1715
1716
1717
1718static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
1719 unsigned long nr_migrate_pages)
1720{
1721 int z;
1722
1723 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1724 struct zone *zone = pgdat->node_zones + z;
1725
1726 if (!populated_zone(zone))
1727 continue;
1728
1729
1730 if (!zone_watermark_ok(zone, 0,
1731 high_wmark_pages(zone) +
1732 nr_migrate_pages,
1733 0, 0))
1734 continue;
1735 return true;
1736 }
1737 return false;
1738}
1739
1740static struct page *alloc_misplaced_dst_page(struct page *page,
1741 unsigned long data,
1742 int **result)
1743{
1744 int nid = (int) data;
1745 struct page *newpage;
1746
1747 newpage = __alloc_pages_node(nid,
1748 (GFP_HIGHUSER_MOVABLE |
1749 __GFP_THISNODE | __GFP_NOMEMALLOC |
1750 __GFP_NORETRY | __GFP_NOWARN) &
1751 ~__GFP_RECLAIM, 0);
1752
1753 return newpage;
1754}
1755
1756
1757
1758
1759
1760
1761static unsigned int migrate_interval_millisecs __read_mostly = 100;
1762static unsigned int ratelimit_pages __read_mostly = 128 << (20 - PAGE_SHIFT);
1763
1764
1765static bool numamigrate_update_ratelimit(pg_data_t *pgdat,
1766 unsigned long nr_pages)
1767{
1768
1769
1770
1771
1772
1773 if (time_after(jiffies, pgdat->numabalancing_migrate_next_window)) {
1774 spin_lock(&pgdat->numabalancing_migrate_lock);
1775 pgdat->numabalancing_migrate_nr_pages = 0;
1776 pgdat->numabalancing_migrate_next_window = jiffies +
1777 msecs_to_jiffies(migrate_interval_millisecs);
1778 spin_unlock(&pgdat->numabalancing_migrate_lock);
1779 }
1780 if (pgdat->numabalancing_migrate_nr_pages > ratelimit_pages) {
1781 trace_mm_numa_migrate_ratelimit(current, pgdat->node_id,
1782 nr_pages);
1783 return true;
1784 }
1785
1786
1787
1788
1789
1790
1791
1792 pgdat->numabalancing_migrate_nr_pages += nr_pages;
1793 return false;
1794}
1795
1796static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
1797{
1798 int page_lru;
1799
1800 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
1801
1802
1803 if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page)))
1804 return 0;
1805
1806 if (isolate_lru_page(page))
1807 return 0;
1808
1809
1810
1811
1812
1813
1814
1815
1816 if (PageTransHuge(page) && page_count(page) != 3) {
1817 putback_lru_page(page);
1818 return 0;
1819 }
1820
1821 page_lru = page_is_file_cache(page);
1822 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
1823 hpage_nr_pages(page));
1824
1825
1826
1827
1828
1829
1830 put_page(page);
1831 return 1;
1832}
1833
1834bool pmd_trans_migrating(pmd_t pmd)
1835{
1836 struct page *page = pmd_page(pmd);
1837 return PageLocked(page);
1838}
1839
1840
1841
1842
1843
1844
1845int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
1846 int node)
1847{
1848 pg_data_t *pgdat = NODE_DATA(node);
1849 int isolated;
1850 int nr_remaining;
1851 LIST_HEAD(migratepages);
1852
1853
1854
1855
1856
1857 if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
1858 (vma->vm_flags & VM_EXEC))
1859 goto out;
1860
1861
1862
1863
1864
1865
1866 if (numamigrate_update_ratelimit(pgdat, 1))
1867 goto out;
1868
1869 isolated = numamigrate_isolate_page(pgdat, page);
1870 if (!isolated)
1871 goto out;
1872
1873 list_add(&page->lru, &migratepages);
1874 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
1875 NULL, node, MIGRATE_ASYNC,
1876 MR_NUMA_MISPLACED);
1877 if (nr_remaining) {
1878 if (!list_empty(&migratepages)) {
1879 list_del(&page->lru);
1880 dec_node_page_state(page, NR_ISOLATED_ANON +
1881 page_is_file_cache(page));
1882 putback_lru_page(page);
1883 }
1884 isolated = 0;
1885 } else
1886 count_vm_numa_event(NUMA_PAGE_MIGRATE);
1887 BUG_ON(!list_empty(&migratepages));
1888 return isolated;
1889
1890out:
1891 put_page(page);
1892 return 0;
1893}
1894#endif
1895
1896#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
1897
1898
1899
1900
1901int migrate_misplaced_transhuge_page(struct mm_struct *mm,
1902 struct vm_area_struct *vma,
1903 pmd_t *pmd, pmd_t entry,
1904 unsigned long address,
1905 struct page *page, int node)
1906{
1907 spinlock_t *ptl;
1908 pg_data_t *pgdat = NODE_DATA(node);
1909 int isolated = 0;
1910 struct page *new_page = NULL;
1911 int page_lru = page_is_file_cache(page);
1912 unsigned long mmun_start = address & HPAGE_PMD_MASK;
1913 unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
1914
1915
1916
1917
1918
1919
1920 if (numamigrate_update_ratelimit(pgdat, HPAGE_PMD_NR))
1921 goto out_dropref;
1922
1923 new_page = alloc_pages_node(node,
1924 (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
1925 HPAGE_PMD_ORDER);
1926 if (!new_page)
1927 goto out_fail;
1928 prep_transhuge_page(new_page);
1929
1930 isolated = numamigrate_isolate_page(pgdat, page);
1931 if (!isolated) {
1932 put_page(new_page);
1933 goto out_fail;
1934 }
1935
1936
1937 __SetPageLocked(new_page);
1938 if (PageSwapBacked(page))
1939 __SetPageSwapBacked(new_page);
1940
1941
1942 new_page->mapping = page->mapping;
1943 new_page->index = page->index;
1944 migrate_page_copy(new_page, page);
1945 WARN_ON(PageLRU(new_page));
1946
1947
1948 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1949 ptl = pmd_lock(mm, pmd);
1950 if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) {
1951 spin_unlock(ptl);
1952 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1953
1954
1955 if (TestClearPageActive(new_page))
1956 SetPageActive(page);
1957 if (TestClearPageUnevictable(new_page))
1958 SetPageUnevictable(page);
1959
1960 unlock_page(new_page);
1961 put_page(new_page);
1962
1963
1964 get_page(page);
1965 putback_lru_page(page);
1966 mod_node_page_state(page_pgdat(page),
1967 NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
1968
1969 goto out_unlock;
1970 }
1971
1972 entry = mk_huge_pmd(new_page, vma->vm_page_prot);
1973 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1974
1975
1976
1977
1978
1979
1980
1981
1982 flush_cache_range(vma, mmun_start, mmun_end);
1983 page_add_anon_rmap(new_page, vma, mmun_start, true);
1984 pmdp_huge_clear_flush_notify(vma, mmun_start, pmd);
1985 set_pmd_at(mm, mmun_start, pmd, entry);
1986 update_mmu_cache_pmd(vma, address, &entry);
1987
1988 page_ref_unfreeze(page, 2);
1989 mlock_migrate_page(new_page, page);
1990 page_remove_rmap(page, true);
1991 set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
1992
1993 spin_unlock(ptl);
1994 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1995
1996
1997 get_page(new_page);
1998 putback_lru_page(new_page);
1999
2000 unlock_page(new_page);
2001 unlock_page(page);
2002 put_page(page);
2003 put_page(page);
2004
2005 count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
2006 count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
2007
2008 mod_node_page_state(page_pgdat(page),
2009 NR_ISOLATED_ANON + page_lru,
2010 -HPAGE_PMD_NR);
2011 return isolated;
2012
2013out_fail:
2014 count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
2015out_dropref:
2016 ptl = pmd_lock(mm, pmd);
2017 if (pmd_same(*pmd, entry)) {
2018 entry = pmd_modify(entry, vma->vm_page_prot);
2019 set_pmd_at(mm, mmun_start, pmd, entry);
2020 update_mmu_cache_pmd(vma, address, &entry);
2021 }
2022 spin_unlock(ptl);
2023
2024out_unlock:
2025 unlock_page(page);
2026 put_page(page);
2027 return 0;
2028}
2029#endif
2030
2031#endif
2032