1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/migrate.h>
16#include <linux/module.h>
17#include <linux/swap.h>
18#include <linux/swapops.h>
19#include <linux/pagemap.h>
20#include <linux/buffer_head.h>
21#include <linux/mm_inline.h>
22#include <linux/nsproxy.h>
23#include <linux/pagevec.h>
24#include <linux/ksm.h>
25#include <linux/rmap.h>
26#include <linux/topology.h>
27#include <linux/cpu.h>
28#include <linux/cpuset.h>
29#include <linux/writeback.h>
30#include <linux/mempolicy.h>
31#include <linux/vmalloc.h>
32#include <linux/security.h>
33#include <linux/memcontrol.h>
34#include <linux/syscalls.h>
35#include <linux/hugetlb.h>
36#include <linux/gfp.h>
37
38#include <asm/tlbflush.h>
39
40#include "internal.h"
41
42#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
43
44
45
46
47
48
49int migrate_prep(void)
50{
51
52
53
54
55
56
57 lru_add_drain_all();
58
59 return 0;
60}
61
62
63int migrate_prep_local(void)
64{
65 lru_add_drain();
66
67 return 0;
68}
69
70
71
72
73
74void putback_lru_pages(struct list_head *l)
75{
76 struct page *page;
77 struct page *page2;
78
79 list_for_each_entry_safe(page, page2, l, lru) {
80 list_del(&page->lru);
81 dec_zone_page_state(page, NR_ISOLATED_ANON +
82 page_is_file_cache(page));
83 putback_lru_page(page);
84 }
85}
86
87
88
89
90static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
91 unsigned long addr, void *old)
92{
93 struct mm_struct *mm = vma->vm_mm;
94 swp_entry_t entry;
95 pgd_t *pgd;
96 pud_t *pud;
97 pmd_t *pmd;
98 pte_t *ptep, pte;
99 spinlock_t *ptl;
100
101 if (unlikely(PageHuge(new))) {
102 ptep = huge_pte_offset(mm, addr);
103 if (!ptep)
104 goto out;
105 ptl = &mm->page_table_lock;
106 } else {
107 pgd = pgd_offset(mm, addr);
108 if (!pgd_present(*pgd))
109 goto out;
110
111 pud = pud_offset(pgd, addr);
112 if (!pud_present(*pud))
113 goto out;
114
115 pmd = pmd_offset(pud, addr);
116 if (pmd_trans_huge(*pmd))
117 goto out;
118 if (!pmd_present(*pmd))
119 goto out;
120
121 ptep = pte_offset_map(pmd, addr);
122
123 if (!is_swap_pte(*ptep)) {
124 pte_unmap(ptep);
125 goto out;
126 }
127
128 ptl = pte_lockptr(mm, pmd);
129 }
130
131 spin_lock(ptl);
132 pte = *ptep;
133 if (!is_swap_pte(pte))
134 goto unlock;
135
136 entry = pte_to_swp_entry(pte);
137
138 if (!is_migration_entry(entry) ||
139 migration_entry_to_page(entry) != old)
140 goto unlock;
141
142 get_page(new);
143 pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
144 if (is_write_migration_entry(entry))
145 pte = pte_mkwrite(pte);
146#ifdef CONFIG_HUGETLB_PAGE
147 if (PageHuge(new))
148 pte = pte_mkhuge(pte);
149#endif
150 flush_cache_page(vma, addr, pte_pfn(pte));
151 set_pte_at(mm, addr, ptep, pte);
152
153 if (PageHuge(new)) {
154 if (PageAnon(new))
155 hugepage_add_anon_rmap(new, vma, addr);
156 else
157 page_dup_rmap(new);
158 } else if (PageAnon(new))
159 page_add_anon_rmap(new, vma, addr);
160 else
161 page_add_file_rmap(new);
162
163
164 update_mmu_cache(vma, addr, ptep);
165unlock:
166 pte_unmap_unlock(ptep, ptl);
167out:
168 return SWAP_AGAIN;
169}
170
171
172
173
174
175static void remove_migration_ptes(struct page *old, struct page *new)
176{
177 rmap_walk(new, remove_migration_pte, old);
178}
179
180
181
182
183
184
185
186
187void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
188 unsigned long address)
189{
190 pte_t *ptep, pte;
191 spinlock_t *ptl;
192 swp_entry_t entry;
193 struct page *page;
194
195 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
196 pte = *ptep;
197 if (!is_swap_pte(pte))
198 goto out;
199
200 entry = pte_to_swp_entry(pte);
201 if (!is_migration_entry(entry))
202 goto out;
203
204 page = migration_entry_to_page(entry);
205
206
207
208
209
210
211
212
213 if (!get_page_unless_zero(page))
214 goto out;
215 pte_unmap_unlock(ptep, ptl);
216 wait_on_page_locked(page);
217 put_page(page);
218 return;
219out:
220 pte_unmap_unlock(ptep, ptl);
221}
222
223
224
225
226
227
228
229
230
231static int migrate_page_move_mapping(struct address_space *mapping,
232 struct page *newpage, struct page *page)
233{
234 int expected_count;
235 void **pslot;
236
237 if (!mapping) {
238
239 if (page_count(page) != 1)
240 return -EAGAIN;
241 return 0;
242 }
243
244 spin_lock_irq(&mapping->tree_lock);
245
246 pslot = radix_tree_lookup_slot(&mapping->page_tree,
247 page_index(page));
248
249 expected_count = 2 + page_has_private(page);
250 if (page_count(page) != expected_count ||
251 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
252 spin_unlock_irq(&mapping->tree_lock);
253 return -EAGAIN;
254 }
255
256 if (!page_freeze_refs(page, expected_count)) {
257 spin_unlock_irq(&mapping->tree_lock);
258 return -EAGAIN;
259 }
260
261
262
263
264 get_page(newpage);
265 if (PageSwapCache(page)) {
266 SetPageSwapCache(newpage);
267 set_page_private(newpage, page_private(page));
268 }
269
270 radix_tree_replace_slot(pslot, newpage);
271
272 page_unfreeze_refs(page, expected_count);
273
274
275
276
277 __put_page(page);
278
279
280
281
282
283
284
285
286
287
288
289 __dec_zone_page_state(page, NR_FILE_PAGES);
290 __inc_zone_page_state(newpage, NR_FILE_PAGES);
291 if (PageSwapBacked(page)) {
292 __dec_zone_page_state(page, NR_SHMEM);
293 __inc_zone_page_state(newpage, NR_SHMEM);
294 }
295 spin_unlock_irq(&mapping->tree_lock);
296
297 return 0;
298}
299
300
301
302
303
304int migrate_huge_page_move_mapping(struct address_space *mapping,
305 struct page *newpage, struct page *page)
306{
307 int expected_count;
308 void **pslot;
309
310 if (!mapping) {
311 if (page_count(page) != 1)
312 return -EAGAIN;
313 return 0;
314 }
315
316 spin_lock_irq(&mapping->tree_lock);
317
318 pslot = radix_tree_lookup_slot(&mapping->page_tree,
319 page_index(page));
320
321 expected_count = 2 + page_has_private(page);
322 if (page_count(page) != expected_count ||
323 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
324 spin_unlock_irq(&mapping->tree_lock);
325 return -EAGAIN;
326 }
327
328 if (!page_freeze_refs(page, expected_count)) {
329 spin_unlock_irq(&mapping->tree_lock);
330 return -EAGAIN;
331 }
332
333 get_page(newpage);
334
335 radix_tree_replace_slot(pslot, newpage);
336
337 page_unfreeze_refs(page, expected_count);
338
339 __put_page(page);
340
341 spin_unlock_irq(&mapping->tree_lock);
342 return 0;
343}
344
345
346
347
348void migrate_page_copy(struct page *newpage, struct page *page)
349{
350 if (PageHuge(page))
351 copy_huge_page(newpage, page);
352 else
353 copy_highpage(newpage, page);
354
355 if (PageError(page))
356 SetPageError(newpage);
357 if (PageReferenced(page))
358 SetPageReferenced(newpage);
359 if (PageUptodate(page))
360 SetPageUptodate(newpage);
361 if (TestClearPageActive(page)) {
362 VM_BUG_ON(PageUnevictable(page));
363 SetPageActive(newpage);
364 } else if (TestClearPageUnevictable(page))
365 SetPageUnevictable(newpage);
366 if (PageChecked(page))
367 SetPageChecked(newpage);
368 if (PageMappedToDisk(page))
369 SetPageMappedToDisk(newpage);
370
371 if (PageDirty(page)) {
372 clear_page_dirty_for_io(page);
373
374
375
376
377
378
379
380 __set_page_dirty_nobuffers(newpage);
381 }
382
383 mlock_migrate_page(newpage, page);
384 ksm_migrate_page(newpage, page);
385
386 ClearPageSwapCache(page);
387 ClearPagePrivate(page);
388 set_page_private(page, 0);
389 page->mapping = NULL;
390
391
392
393
394
395 if (PageWriteback(newpage))
396 end_page_writeback(newpage);
397}
398
399
400
401
402
403
404int fail_migrate_page(struct address_space *mapping,
405 struct page *newpage, struct page *page)
406{
407 return -EIO;
408}
409EXPORT_SYMBOL(fail_migrate_page);
410
411
412
413
414
415
416
417int migrate_page(struct address_space *mapping,
418 struct page *newpage, struct page *page)
419{
420 int rc;
421
422 BUG_ON(PageWriteback(page));
423
424 rc = migrate_page_move_mapping(mapping, newpage, page);
425
426 if (rc)
427 return rc;
428
429 migrate_page_copy(newpage, page);
430 return 0;
431}
432EXPORT_SYMBOL(migrate_page);
433
434#ifdef CONFIG_BLOCK
435
436
437
438
439
440int buffer_migrate_page(struct address_space *mapping,
441 struct page *newpage, struct page *page)
442{
443 struct buffer_head *bh, *head;
444 int rc;
445
446 if (!page_has_buffers(page))
447 return migrate_page(mapping, newpage, page);
448
449 head = page_buffers(page);
450
451 rc = migrate_page_move_mapping(mapping, newpage, page);
452
453 if (rc)
454 return rc;
455
456 bh = head;
457 do {
458 get_bh(bh);
459 lock_buffer(bh);
460 bh = bh->b_this_page;
461
462 } while (bh != head);
463
464 ClearPagePrivate(page);
465 set_page_private(newpage, page_private(page));
466 set_page_private(page, 0);
467 put_page(page);
468 get_page(newpage);
469
470 bh = head;
471 do {
472 set_bh_page(bh, newpage, bh_offset(bh));
473 bh = bh->b_this_page;
474
475 } while (bh != head);
476
477 SetPagePrivate(newpage);
478
479 migrate_page_copy(newpage, page);
480
481 bh = head;
482 do {
483 unlock_buffer(bh);
484 put_bh(bh);
485 bh = bh->b_this_page;
486
487 } while (bh != head);
488
489 return 0;
490}
491EXPORT_SYMBOL(buffer_migrate_page);
492#endif
493
494
495
496
497static int writeout(struct address_space *mapping, struct page *page)
498{
499 struct writeback_control wbc = {
500 .sync_mode = WB_SYNC_NONE,
501 .nr_to_write = 1,
502 .range_start = 0,
503 .range_end = LLONG_MAX,
504 .for_reclaim = 1
505 };
506 int rc;
507
508 if (!mapping->a_ops->writepage)
509
510 return -EINVAL;
511
512 if (!clear_page_dirty_for_io(page))
513
514 return -EAGAIN;
515
516
517
518
519
520
521
522
523
524 remove_migration_ptes(page, page);
525
526 rc = mapping->a_ops->writepage(page, &wbc);
527
528 if (rc != AOP_WRITEPAGE_ACTIVATE)
529
530 lock_page(page);
531
532 return (rc < 0) ? -EIO : -EAGAIN;
533}
534
535
536
537
538static int fallback_migrate_page(struct address_space *mapping,
539 struct page *newpage, struct page *page)
540{
541 if (PageDirty(page))
542 return writeout(mapping, page);
543
544
545
546
547
548 if (page_has_private(page) &&
549 !try_to_release_page(page, GFP_KERNEL))
550 return -EAGAIN;
551
552 return migrate_page(mapping, newpage, page);
553}
554
555
556
557
558
559
560
561
562
563
564
565
566static int move_to_new_page(struct page *newpage, struct page *page,
567 int remap_swapcache, bool sync)
568{
569 struct address_space *mapping;
570 int rc;
571
572
573
574
575
576
577 if (!trylock_page(newpage))
578 BUG();
579
580
581 newpage->index = page->index;
582 newpage->mapping = page->mapping;
583 if (PageSwapBacked(page))
584 SetPageSwapBacked(newpage);
585
586 mapping = page_mapping(page);
587 if (!mapping)
588 rc = migrate_page(mapping, newpage, page);
589 else {
590
591
592
593
594
595 if (PageDirty(page) && !sync &&
596 mapping->a_ops->migratepage != migrate_page)
597 rc = -EBUSY;
598 else if (mapping->a_ops->migratepage)
599
600
601
602
603
604
605
606 rc = mapping->a_ops->migratepage(mapping,
607 newpage, page);
608 else
609 rc = fallback_migrate_page(mapping, newpage, page);
610 }
611
612 if (rc) {
613 newpage->mapping = NULL;
614 } else {
615 if (remap_swapcache)
616 remove_migration_ptes(page, newpage);
617 }
618
619 unlock_page(newpage);
620
621 return rc;
622}
623
624
625
626
627
628static int unmap_and_move(new_page_t get_new_page, unsigned long private,
629 struct page *page, int force, bool offlining, bool sync)
630{
631 int rc = 0;
632 int *result = NULL;
633 struct page *newpage = get_new_page(page, private, &result);
634 int remap_swapcache = 1;
635 int charge = 0;
636 struct mem_cgroup *mem;
637 struct anon_vma *anon_vma = NULL;
638
639 if (!newpage)
640 return -ENOMEM;
641
642 if (page_count(page) == 1) {
643
644 goto move_newpage;
645 }
646 if (unlikely(PageTransHuge(page)))
647 if (unlikely(split_huge_page(page)))
648 goto move_newpage;
649
650
651 rc = -EAGAIN;
652
653 if (!trylock_page(page)) {
654 if (!force || !sync)
655 goto move_newpage;
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670 if (current->flags & PF_MEMALLOC)
671 goto move_newpage;
672
673 lock_page(page);
674 }
675
676
677
678
679
680
681
682
683
684
685 if (PageKsm(page) && !offlining) {
686 rc = -EBUSY;
687 goto unlock;
688 }
689
690
691 charge = mem_cgroup_prepare_migration(page, newpage, &mem, GFP_KERNEL);
692 if (charge == -ENOMEM) {
693 rc = -ENOMEM;
694 goto unlock;
695 }
696 BUG_ON(charge);
697
698 if (PageWriteback(page)) {
699
700
701
702
703 if (!sync) {
704 rc = -EBUSY;
705 goto uncharge;
706 }
707 if (!force)
708 goto uncharge;
709 wait_on_page_writeback(page);
710 }
711
712
713
714
715
716
717
718
719 if (PageAnon(page)) {
720
721
722
723
724 anon_vma = page_lock_anon_vma(page);
725 if (anon_vma) {
726
727
728
729
730
731 get_anon_vma(anon_vma);
732 page_unlock_anon_vma(anon_vma);
733 } else if (PageSwapCache(page)) {
734
735
736
737
738
739
740
741
742
743
744
745
746 remap_swapcache = 0;
747 } else {
748 goto uncharge;
749 }
750 }
751
752
753
754
755
756
757
758
759
760
761
762
763
764 if (!page->mapping) {
765 VM_BUG_ON(PageAnon(page));
766 if (page_has_private(page)) {
767 try_to_free_buffers(page);
768 goto uncharge;
769 }
770 goto skip_unmap;
771 }
772
773
774 try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
775
776skip_unmap:
777 if (!page_mapped(page))
778 rc = move_to_new_page(newpage, page, remap_swapcache, sync);
779
780 if (rc && remap_swapcache)
781 remove_migration_ptes(page, page);
782
783
784 if (anon_vma)
785 put_anon_vma(anon_vma);
786
787uncharge:
788 if (!charge)
789 mem_cgroup_end_migration(mem, page, newpage, rc == 0);
790unlock:
791 unlock_page(page);
792
793move_newpage:
794 if (rc != -EAGAIN) {
795
796
797
798
799
800
801 list_del(&page->lru);
802 dec_zone_page_state(page, NR_ISOLATED_ANON +
803 page_is_file_cache(page));
804 putback_lru_page(page);
805 }
806
807
808
809
810
811 putback_lru_page(newpage);
812
813 if (result) {
814 if (rc)
815 *result = rc;
816 else
817 *result = page_to_nid(newpage);
818 }
819 return rc;
820}
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840static int unmap_and_move_huge_page(new_page_t get_new_page,
841 unsigned long private, struct page *hpage,
842 int force, bool offlining, bool sync)
843{
844 int rc = 0;
845 int *result = NULL;
846 struct page *new_hpage = get_new_page(hpage, private, &result);
847 struct anon_vma *anon_vma = NULL;
848
849 if (!new_hpage)
850 return -ENOMEM;
851
852 rc = -EAGAIN;
853
854 if (!trylock_page(hpage)) {
855 if (!force || !sync)
856 goto out;
857 lock_page(hpage);
858 }
859
860 if (PageAnon(hpage)) {
861 anon_vma = page_lock_anon_vma(hpage);
862 if (anon_vma) {
863 get_anon_vma(anon_vma);
864 page_unlock_anon_vma(anon_vma);
865 }
866 }
867
868 try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
869
870 if (!page_mapped(hpage))
871 rc = move_to_new_page(new_hpage, hpage, 1, sync);
872
873 if (rc)
874 remove_migration_ptes(hpage, hpage);
875
876 if (anon_vma)
877 put_anon_vma(anon_vma);
878out:
879 unlock_page(hpage);
880
881 if (rc != -EAGAIN) {
882 list_del(&hpage->lru);
883 put_page(hpage);
884 }
885
886 put_page(new_hpage);
887
888 if (result) {
889 if (rc)
890 *result = rc;
891 else
892 *result = page_to_nid(new_hpage);
893 }
894 return rc;
895}
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912int migrate_pages(struct list_head *from,
913 new_page_t get_new_page, unsigned long private, bool offlining,
914 bool sync)
915{
916 int retry = 1;
917 int nr_failed = 0;
918 int pass = 0;
919 struct page *page;
920 struct page *page2;
921 int swapwrite = current->flags & PF_SWAPWRITE;
922 int rc;
923
924 if (!swapwrite)
925 current->flags |= PF_SWAPWRITE;
926
927 for(pass = 0; pass < 10 && retry; pass++) {
928 retry = 0;
929
930 list_for_each_entry_safe(page, page2, from, lru) {
931 cond_resched();
932
933 rc = unmap_and_move(get_new_page, private,
934 page, pass > 2, offlining,
935 sync);
936
937 switch(rc) {
938 case -ENOMEM:
939 goto out;
940 case -EAGAIN:
941 retry++;
942 break;
943 case 0:
944 break;
945 default:
946
947 nr_failed++;
948 break;
949 }
950 }
951 }
952 rc = 0;
953out:
954 if (!swapwrite)
955 current->flags &= ~PF_SWAPWRITE;
956
957 if (rc)
958 return rc;
959
960 return nr_failed + retry;
961}
962
963int migrate_huge_pages(struct list_head *from,
964 new_page_t get_new_page, unsigned long private, bool offlining,
965 bool sync)
966{
967 int retry = 1;
968 int nr_failed = 0;
969 int pass = 0;
970 struct page *page;
971 struct page *page2;
972 int rc;
973
974 for (pass = 0; pass < 10 && retry; pass++) {
975 retry = 0;
976
977 list_for_each_entry_safe(page, page2, from, lru) {
978 cond_resched();
979
980 rc = unmap_and_move_huge_page(get_new_page,
981 private, page, pass > 2, offlining,
982 sync);
983
984 switch(rc) {
985 case -ENOMEM:
986 goto out;
987 case -EAGAIN:
988 retry++;
989 break;
990 case 0:
991 break;
992 default:
993
994 nr_failed++;
995 break;
996 }
997 }
998 }
999 rc = 0;
1000out:
1001 if (rc)
1002 return rc;
1003
1004 return nr_failed + retry;
1005}
1006
1007#ifdef CONFIG_NUMA
1008
1009
1010
1011struct page_to_node {
1012 unsigned long addr;
1013 struct page *page;
1014 int node;
1015 int status;
1016};
1017
1018static struct page *new_page_node(struct page *p, unsigned long private,
1019 int **result)
1020{
1021 struct page_to_node *pm = (struct page_to_node *)private;
1022
1023 while (pm->node != MAX_NUMNODES && pm->page != p)
1024 pm++;
1025
1026 if (pm->node == MAX_NUMNODES)
1027 return NULL;
1028
1029 *result = &pm->status;
1030
1031 return alloc_pages_exact_node(pm->node,
1032 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
1033}
1034
1035
1036
1037
1038
1039
1040
1041static int do_move_page_to_node_array(struct mm_struct *mm,
1042 struct page_to_node *pm,
1043 int migrate_all)
1044{
1045 int err;
1046 struct page_to_node *pp;
1047 LIST_HEAD(pagelist);
1048
1049 down_read(&mm->mmap_sem);
1050
1051
1052
1053
1054 for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
1055 struct vm_area_struct *vma;
1056 struct page *page;
1057
1058 err = -EFAULT;
1059 vma = find_vma(mm, pp->addr);
1060 if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
1061 goto set_status;
1062
1063 page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT);
1064
1065 err = PTR_ERR(page);
1066 if (IS_ERR(page))
1067 goto set_status;
1068
1069 err = -ENOENT;
1070 if (!page)
1071 goto set_status;
1072
1073
1074 if (PageReserved(page) || PageKsm(page))
1075 goto put_and_set;
1076
1077 pp->page = page;
1078 err = page_to_nid(page);
1079
1080 if (err == pp->node)
1081
1082
1083
1084 goto put_and_set;
1085
1086 err = -EACCES;
1087 if (page_mapcount(page) > 1 &&
1088 !migrate_all)
1089 goto put_and_set;
1090
1091 err = isolate_lru_page(page);
1092 if (!err) {
1093 list_add_tail(&page->lru, &pagelist);
1094 inc_zone_page_state(page, NR_ISOLATED_ANON +
1095 page_is_file_cache(page));
1096 }
1097put_and_set:
1098
1099
1100
1101
1102
1103 put_page(page);
1104set_status:
1105 pp->status = err;
1106 }
1107
1108 err = 0;
1109 if (!list_empty(&pagelist)) {
1110 err = migrate_pages(&pagelist, new_page_node,
1111 (unsigned long)pm, 0, true);
1112 if (err)
1113 putback_lru_pages(&pagelist);
1114 }
1115
1116 up_read(&mm->mmap_sem);
1117 return err;
1118}
1119
1120
1121
1122
1123
1124static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
1125 unsigned long nr_pages,
1126 const void __user * __user *pages,
1127 const int __user *nodes,
1128 int __user *status, int flags)
1129{
1130 struct page_to_node *pm;
1131 nodemask_t task_nodes;
1132 unsigned long chunk_nr_pages;
1133 unsigned long chunk_start;
1134 int err;
1135
1136 task_nodes = cpuset_mems_allowed(task);
1137
1138 err = -ENOMEM;
1139 pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
1140 if (!pm)
1141 goto out;
1142
1143 migrate_prep();
1144
1145
1146
1147
1148
1149 chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
1150
1151 for (chunk_start = 0;
1152 chunk_start < nr_pages;
1153 chunk_start += chunk_nr_pages) {
1154 int j;
1155
1156 if (chunk_start + chunk_nr_pages > nr_pages)
1157 chunk_nr_pages = nr_pages - chunk_start;
1158
1159
1160 for (j = 0; j < chunk_nr_pages; j++) {
1161 const void __user *p;
1162 int node;
1163
1164 err = -EFAULT;
1165 if (get_user(p, pages + j + chunk_start))
1166 goto out_pm;
1167 pm[j].addr = (unsigned long) p;
1168
1169 if (get_user(node, nodes + j + chunk_start))
1170 goto out_pm;
1171
1172 err = -ENODEV;
1173 if (node < 0 || node >= MAX_NUMNODES)
1174 goto out_pm;
1175
1176 if (!node_state(node, N_HIGH_MEMORY))
1177 goto out_pm;
1178
1179 err = -EACCES;
1180 if (!node_isset(node, task_nodes))
1181 goto out_pm;
1182
1183 pm[j].node = node;
1184 }
1185
1186
1187 pm[chunk_nr_pages].node = MAX_NUMNODES;
1188
1189
1190 err = do_move_page_to_node_array(mm, pm,
1191 flags & MPOL_MF_MOVE_ALL);
1192 if (err < 0)
1193 goto out_pm;
1194
1195
1196 for (j = 0; j < chunk_nr_pages; j++)
1197 if (put_user(pm[j].status, status + j + chunk_start)) {
1198 err = -EFAULT;
1199 goto out_pm;
1200 }
1201 }
1202 err = 0;
1203
1204out_pm:
1205 free_page((unsigned long)pm);
1206out:
1207 return err;
1208}
1209
1210
1211
1212
1213static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1214 const void __user **pages, int *status)
1215{
1216 unsigned long i;
1217
1218 down_read(&mm->mmap_sem);
1219
1220 for (i = 0; i < nr_pages; i++) {
1221 unsigned long addr = (unsigned long)(*pages);
1222 struct vm_area_struct *vma;
1223 struct page *page;
1224 int err = -EFAULT;
1225
1226 vma = find_vma(mm, addr);
1227 if (!vma || addr < vma->vm_start)
1228 goto set_status;
1229
1230 page = follow_page(vma, addr, 0);
1231
1232 err = PTR_ERR(page);
1233 if (IS_ERR(page))
1234 goto set_status;
1235
1236 err = -ENOENT;
1237
1238 if (!page || PageReserved(page) || PageKsm(page))
1239 goto set_status;
1240
1241 err = page_to_nid(page);
1242set_status:
1243 *status = err;
1244
1245 pages++;
1246 status++;
1247 }
1248
1249 up_read(&mm->mmap_sem);
1250}
1251
1252
1253
1254
1255
1256static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1257 const void __user * __user *pages,
1258 int __user *status)
1259{
1260#define DO_PAGES_STAT_CHUNK_NR 16
1261 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1262 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1263
1264 while (nr_pages) {
1265 unsigned long chunk_nr;
1266
1267 chunk_nr = nr_pages;
1268 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1269 chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1270
1271 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1272 break;
1273
1274 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1275
1276 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1277 break;
1278
1279 pages += chunk_nr;
1280 status += chunk_nr;
1281 nr_pages -= chunk_nr;
1282 }
1283 return nr_pages ? -EFAULT : 0;
1284}
1285
1286
1287
1288
1289
1290SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1291 const void __user * __user *, pages,
1292 const int __user *, nodes,
1293 int __user *, status, int, flags)
1294{
1295 const struct cred *cred = current_cred(), *tcred;
1296 struct task_struct *task;
1297 struct mm_struct *mm;
1298 int err;
1299
1300
1301 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1302 return -EINVAL;
1303
1304 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1305 return -EPERM;
1306
1307
1308 rcu_read_lock();
1309 task = pid ? find_task_by_vpid(pid) : current;
1310 if (!task) {
1311 rcu_read_unlock();
1312 return -ESRCH;
1313 }
1314 mm = get_task_mm(task);
1315 rcu_read_unlock();
1316
1317 if (!mm)
1318 return -EINVAL;
1319
1320
1321
1322
1323
1324
1325
1326 rcu_read_lock();
1327 tcred = __task_cred(task);
1328 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1329 cred->uid != tcred->suid && cred->uid != tcred->uid &&
1330 !capable(CAP_SYS_NICE)) {
1331 rcu_read_unlock();
1332 err = -EPERM;
1333 goto out;
1334 }
1335 rcu_read_unlock();
1336
1337 err = security_task_movememory(task);
1338 if (err)
1339 goto out;
1340
1341 if (nodes) {
1342 err = do_pages_move(mm, task, nr_pages, pages, nodes, status,
1343 flags);
1344 } else {
1345 err = do_pages_stat(mm, nr_pages, pages, status);
1346 }
1347
1348out:
1349 mmput(mm);
1350 return err;
1351}
1352
1353
1354
1355
1356
1357
1358int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
1359 const nodemask_t *from, unsigned long flags)
1360{
1361 struct vm_area_struct *vma;
1362 int err = 0;
1363
1364 for (vma = mm->mmap; vma && !err; vma = vma->vm_next) {
1365 if (vma->vm_ops && vma->vm_ops->migrate) {
1366 err = vma->vm_ops->migrate(vma, to, from, flags);
1367 if (err)
1368 break;
1369 }
1370 }
1371 return err;
1372}
1373#endif
1374