1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/migrate.h>
16#include <linux/module.h>
17#include <linux/swap.h>
18#include <linux/swapops.h>
19#include <linux/pagemap.h>
20#include <linux/buffer_head.h>
21#include <linux/mm_inline.h>
22#include <linux/nsproxy.h>
23#include <linux/pagevec.h>
24#include <linux/ksm.h>
25#include <linux/rmap.h>
26#include <linux/topology.h>
27#include <linux/cpu.h>
28#include <linux/cpuset.h>
29#include <linux/writeback.h>
30#include <linux/mempolicy.h>
31#include <linux/vmalloc.h>
32#include <linux/security.h>
33#include <linux/memcontrol.h>
34#include <linux/syscalls.h>
35#include <linux/hugetlb.h>
36#include <linux/gfp.h>
37
38#include <asm/tlbflush.h>
39
40#include "internal.h"
41
42#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
43
44
45
46
47
48
49int migrate_prep(void)
50{
51
52
53
54
55
56
57 lru_add_drain_all();
58
59 return 0;
60}
61
62
63int migrate_prep_local(void)
64{
65 lru_add_drain();
66
67 return 0;
68}
69
70
71
72
73
74void putback_lru_pages(struct list_head *l)
75{
76 struct page *page;
77 struct page *page2;
78
79 list_for_each_entry_safe(page, page2, l, lru) {
80 list_del(&page->lru);
81 dec_zone_page_state(page, NR_ISOLATED_ANON +
82 page_is_file_cache(page));
83 putback_lru_page(page);
84 }
85}
86
87
88
89
90static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
91 unsigned long addr, void *old)
92{
93 struct mm_struct *mm = vma->vm_mm;
94 swp_entry_t entry;
95 pgd_t *pgd;
96 pud_t *pud;
97 pmd_t *pmd;
98 pte_t *ptep, pte;
99 spinlock_t *ptl;
100
101 if (unlikely(PageHuge(new))) {
102 ptep = huge_pte_offset(mm, addr);
103 if (!ptep)
104 goto out;
105 ptl = &mm->page_table_lock;
106 } else {
107 pgd = pgd_offset(mm, addr);
108 if (!pgd_present(*pgd))
109 goto out;
110
111 pud = pud_offset(pgd, addr);
112 if (!pud_present(*pud))
113 goto out;
114
115 pmd = pmd_offset(pud, addr);
116 if (pmd_trans_huge(*pmd))
117 goto out;
118 if (!pmd_present(*pmd))
119 goto out;
120
121 ptep = pte_offset_map(pmd, addr);
122
123 if (!is_swap_pte(*ptep)) {
124 pte_unmap(ptep);
125 goto out;
126 }
127
128 ptl = pte_lockptr(mm, pmd);
129 }
130
131 spin_lock(ptl);
132 pte = *ptep;
133 if (!is_swap_pte(pte))
134 goto unlock;
135
136 entry = pte_to_swp_entry(pte);
137
138 if (!is_migration_entry(entry) ||
139 migration_entry_to_page(entry) != old)
140 goto unlock;
141
142 get_page(new);
143 pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
144 if (is_write_migration_entry(entry))
145 pte = pte_mkwrite(pte);
146#ifdef CONFIG_HUGETLB_PAGE
147 if (PageHuge(new))
148 pte = pte_mkhuge(pte);
149#endif
150 flush_cache_page(vma, addr, pte_pfn(pte));
151 set_pte_at(mm, addr, ptep, pte);
152
153 if (PageHuge(new)) {
154 if (PageAnon(new))
155 hugepage_add_anon_rmap(new, vma, addr);
156 else
157 page_dup_rmap(new);
158 } else if (PageAnon(new))
159 page_add_anon_rmap(new, vma, addr);
160 else
161 page_add_file_rmap(new);
162
163
164 update_mmu_cache(vma, addr, ptep);
165unlock:
166 pte_unmap_unlock(ptep, ptl);
167out:
168 return SWAP_AGAIN;
169}
170
171
172
173
174
175static void remove_migration_ptes(struct page *old, struct page *new)
176{
177 rmap_walk(new, remove_migration_pte, old);
178}
179
180
181
182
183
184
185
186
187void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
188 unsigned long address)
189{
190 pte_t *ptep, pte;
191 spinlock_t *ptl;
192 swp_entry_t entry;
193 struct page *page;
194
195 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
196 pte = *ptep;
197 if (!is_swap_pte(pte))
198 goto out;
199
200 entry = pte_to_swp_entry(pte);
201 if (!is_migration_entry(entry))
202 goto out;
203
204 page = migration_entry_to_page(entry);
205
206
207
208
209
210
211
212
213 if (!get_page_unless_zero(page))
214 goto out;
215 pte_unmap_unlock(ptep, ptl);
216 wait_on_page_locked(page);
217 put_page(page);
218 return;
219out:
220 pte_unmap_unlock(ptep, ptl);
221}
222
223
224
225
226
227
228
229
230
231static int migrate_page_move_mapping(struct address_space *mapping,
232 struct page *newpage, struct page *page)
233{
234 int expected_count;
235 void **pslot;
236
237 if (!mapping) {
238
239 if (page_count(page) != 1)
240 return -EAGAIN;
241 return 0;
242 }
243
244 spin_lock_irq(&mapping->tree_lock);
245
246 pslot = radix_tree_lookup_slot(&mapping->page_tree,
247 page_index(page));
248
249 expected_count = 2 + page_has_private(page);
250 if (page_count(page) != expected_count ||
251 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
252 spin_unlock_irq(&mapping->tree_lock);
253 return -EAGAIN;
254 }
255
256 if (!page_freeze_refs(page, expected_count)) {
257 spin_unlock_irq(&mapping->tree_lock);
258 return -EAGAIN;
259 }
260
261
262
263
264 get_page(newpage);
265 if (PageSwapCache(page)) {
266 SetPageSwapCache(newpage);
267 set_page_private(newpage, page_private(page));
268 }
269
270 radix_tree_replace_slot(pslot, newpage);
271
272 page_unfreeze_refs(page, expected_count);
273
274
275
276
277 __put_page(page);
278
279
280
281
282
283
284
285
286
287
288
289 __dec_zone_page_state(page, NR_FILE_PAGES);
290 __inc_zone_page_state(newpage, NR_FILE_PAGES);
291 if (PageSwapBacked(page)) {
292 __dec_zone_page_state(page, NR_SHMEM);
293 __inc_zone_page_state(newpage, NR_SHMEM);
294 }
295 spin_unlock_irq(&mapping->tree_lock);
296
297 return 0;
298}
299
300
301
302
303
304int migrate_huge_page_move_mapping(struct address_space *mapping,
305 struct page *newpage, struct page *page)
306{
307 int expected_count;
308 void **pslot;
309
310 if (!mapping) {
311 if (page_count(page) != 1)
312 return -EAGAIN;
313 return 0;
314 }
315
316 spin_lock_irq(&mapping->tree_lock);
317
318 pslot = radix_tree_lookup_slot(&mapping->page_tree,
319 page_index(page));
320
321 expected_count = 2 + page_has_private(page);
322 if (page_count(page) != expected_count ||
323 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
324 spin_unlock_irq(&mapping->tree_lock);
325 return -EAGAIN;
326 }
327
328 if (!page_freeze_refs(page, expected_count)) {
329 spin_unlock_irq(&mapping->tree_lock);
330 return -EAGAIN;
331 }
332
333 get_page(newpage);
334
335 radix_tree_replace_slot(pslot, newpage);
336
337 page_unfreeze_refs(page, expected_count);
338
339 __put_page(page);
340
341 spin_unlock_irq(&mapping->tree_lock);
342 return 0;
343}
344
345
346
347
348void migrate_page_copy(struct page *newpage, struct page *page)
349{
350 if (PageHuge(page))
351 copy_huge_page(newpage, page);
352 else
353 copy_highpage(newpage, page);
354
355 if (PageError(page))
356 SetPageError(newpage);
357 if (PageReferenced(page))
358 SetPageReferenced(newpage);
359 if (PageUptodate(page))
360 SetPageUptodate(newpage);
361 if (TestClearPageActive(page)) {
362 VM_BUG_ON(PageUnevictable(page));
363 SetPageActive(newpage);
364 } else if (TestClearPageUnevictable(page))
365 SetPageUnevictable(newpage);
366 if (PageChecked(page))
367 SetPageChecked(newpage);
368 if (PageMappedToDisk(page))
369 SetPageMappedToDisk(newpage);
370
371 if (PageDirty(page)) {
372 clear_page_dirty_for_io(page);
373
374
375
376
377
378
379
380 __set_page_dirty_nobuffers(newpage);
381 }
382
383 mlock_migrate_page(newpage, page);
384 ksm_migrate_page(newpage, page);
385
386 ClearPageSwapCache(page);
387 ClearPagePrivate(page);
388 set_page_private(page, 0);
389 page->mapping = NULL;
390
391
392
393
394
395 if (PageWriteback(newpage))
396 end_page_writeback(newpage);
397}
398
399
400
401
402
403
404int fail_migrate_page(struct address_space *mapping,
405 struct page *newpage, struct page *page)
406{
407 return -EIO;
408}
409EXPORT_SYMBOL(fail_migrate_page);
410
411
412
413
414
415
416
417int migrate_page(struct address_space *mapping,
418 struct page *newpage, struct page *page)
419{
420 int rc;
421
422 BUG_ON(PageWriteback(page));
423
424 rc = migrate_page_move_mapping(mapping, newpage, page);
425
426 if (rc)
427 return rc;
428
429 migrate_page_copy(newpage, page);
430 return 0;
431}
432EXPORT_SYMBOL(migrate_page);
433
434#ifdef CONFIG_BLOCK
435
436
437
438
439
440int buffer_migrate_page(struct address_space *mapping,
441 struct page *newpage, struct page *page)
442{
443 struct buffer_head *bh, *head;
444 int rc;
445
446 if (!page_has_buffers(page))
447 return migrate_page(mapping, newpage, page);
448
449 head = page_buffers(page);
450
451 rc = migrate_page_move_mapping(mapping, newpage, page);
452
453 if (rc)
454 return rc;
455
456 bh = head;
457 do {
458 get_bh(bh);
459 lock_buffer(bh);
460 bh = bh->b_this_page;
461
462 } while (bh != head);
463
464 ClearPagePrivate(page);
465 set_page_private(newpage, page_private(page));
466 set_page_private(page, 0);
467 put_page(page);
468 get_page(newpage);
469
470 bh = head;
471 do {
472 set_bh_page(bh, newpage, bh_offset(bh));
473 bh = bh->b_this_page;
474
475 } while (bh != head);
476
477 SetPagePrivate(newpage);
478
479 migrate_page_copy(newpage, page);
480
481 bh = head;
482 do {
483 unlock_buffer(bh);
484 put_bh(bh);
485 bh = bh->b_this_page;
486
487 } while (bh != head);
488
489 return 0;
490}
491EXPORT_SYMBOL(buffer_migrate_page);
492#endif
493
494
495
496
497static int writeout(struct address_space *mapping, struct page *page)
498{
499 struct writeback_control wbc = {
500 .sync_mode = WB_SYNC_NONE,
501 .nr_to_write = 1,
502 .range_start = 0,
503 .range_end = LLONG_MAX,
504 .for_reclaim = 1
505 };
506 int rc;
507
508 if (!mapping->a_ops->writepage)
509
510 return -EINVAL;
511
512 if (!clear_page_dirty_for_io(page))
513
514 return -EAGAIN;
515
516
517
518
519
520
521
522
523
524 remove_migration_ptes(page, page);
525
526 rc = mapping->a_ops->writepage(page, &wbc);
527
528 if (rc != AOP_WRITEPAGE_ACTIVATE)
529
530 lock_page(page);
531
532 return (rc < 0) ? -EIO : -EAGAIN;
533}
534
535
536
537
538static int fallback_migrate_page(struct address_space *mapping,
539 struct page *newpage, struct page *page)
540{
541 if (PageDirty(page))
542 return writeout(mapping, page);
543
544
545
546
547
548 if (page_has_private(page) &&
549 !try_to_release_page(page, GFP_KERNEL))
550 return -EAGAIN;
551
552 return migrate_page(mapping, newpage, page);
553}
554
555
556
557
558
559
560
561
562
563
564
565
566static int move_to_new_page(struct page *newpage, struct page *page,
567 int remap_swapcache)
568{
569 struct address_space *mapping;
570 int rc;
571
572
573
574
575
576
577 if (!trylock_page(newpage))
578 BUG();
579
580
581 newpage->index = page->index;
582 newpage->mapping = page->mapping;
583 if (PageSwapBacked(page))
584 SetPageSwapBacked(newpage);
585
586 mapping = page_mapping(page);
587 if (!mapping)
588 rc = migrate_page(mapping, newpage, page);
589 else if (mapping->a_ops->migratepage)
590
591
592
593
594
595
596
597 rc = mapping->a_ops->migratepage(mapping,
598 newpage, page);
599 else
600 rc = fallback_migrate_page(mapping, newpage, page);
601
602 if (rc) {
603 newpage->mapping = NULL;
604 } else {
605 if (remap_swapcache)
606 remove_migration_ptes(page, newpage);
607 }
608
609 unlock_page(newpage);
610
611 return rc;
612}
613
614
615
616
617
618static int unmap_and_move(new_page_t get_new_page, unsigned long private,
619 struct page *page, int force, bool offlining, bool sync)
620{
621 int rc = 0;
622 int *result = NULL;
623 struct page *newpage = get_new_page(page, private, &result);
624 int remap_swapcache = 1;
625 int charge = 0;
626 struct mem_cgroup *mem = NULL;
627 struct anon_vma *anon_vma = NULL;
628
629 if (!newpage)
630 return -ENOMEM;
631
632 if (page_count(page) == 1) {
633
634 goto move_newpage;
635 }
636 if (unlikely(PageTransHuge(page)))
637 if (unlikely(split_huge_page(page)))
638 goto move_newpage;
639
640
641 rc = -EAGAIN;
642
643 if (!trylock_page(page)) {
644 if (!force)
645 goto move_newpage;
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660 if (current->flags & PF_MEMALLOC)
661 goto move_newpage;
662
663 lock_page(page);
664 }
665
666
667
668
669
670
671
672
673
674
675 if (PageKsm(page) && !offlining) {
676 rc = -EBUSY;
677 goto unlock;
678 }
679
680
681 charge = mem_cgroup_prepare_migration(page, newpage, &mem);
682 if (charge == -ENOMEM) {
683 rc = -ENOMEM;
684 goto unlock;
685 }
686 BUG_ON(charge);
687
688 if (PageWriteback(page)) {
689 if (!force || !sync)
690 goto uncharge;
691 wait_on_page_writeback(page);
692 }
693
694
695
696
697
698
699
700
701 if (PageAnon(page)) {
702
703
704
705
706 anon_vma = page_lock_anon_vma(page);
707 if (anon_vma) {
708
709
710
711
712
713 get_anon_vma(anon_vma);
714 page_unlock_anon_vma(anon_vma);
715 } else if (PageSwapCache(page)) {
716
717
718
719
720
721
722
723
724
725
726
727
728 remap_swapcache = 0;
729 } else {
730 goto uncharge;
731 }
732 }
733
734
735
736
737
738
739
740
741
742
743
744
745
746 if (!page->mapping) {
747 VM_BUG_ON(PageAnon(page));
748 if (page_has_private(page)) {
749 try_to_free_buffers(page);
750 goto uncharge;
751 }
752 goto skip_unmap;
753 }
754
755
756 try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
757
758skip_unmap:
759 if (!page_mapped(page))
760 rc = move_to_new_page(newpage, page, remap_swapcache);
761
762 if (rc && remap_swapcache)
763 remove_migration_ptes(page, page);
764
765
766 if (anon_vma)
767 drop_anon_vma(anon_vma);
768
769uncharge:
770 if (!charge)
771 mem_cgroup_end_migration(mem, page, newpage, rc == 0);
772unlock:
773 unlock_page(page);
774
775move_newpage:
776 if (rc != -EAGAIN) {
777
778
779
780
781
782
783 list_del(&page->lru);
784 dec_zone_page_state(page, NR_ISOLATED_ANON +
785 page_is_file_cache(page));
786 putback_lru_page(page);
787 }
788
789
790
791
792
793 putback_lru_page(newpage);
794
795 if (result) {
796 if (rc)
797 *result = rc;
798 else
799 *result = page_to_nid(newpage);
800 }
801 return rc;
802}
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822static int unmap_and_move_huge_page(new_page_t get_new_page,
823 unsigned long private, struct page *hpage,
824 int force, bool offlining, bool sync)
825{
826 int rc = 0;
827 int *result = NULL;
828 struct page *new_hpage = get_new_page(hpage, private, &result);
829 struct anon_vma *anon_vma = NULL;
830
831 if (!new_hpage)
832 return -ENOMEM;
833
834 rc = -EAGAIN;
835
836 if (!trylock_page(hpage)) {
837 if (!force || !sync)
838 goto out;
839 lock_page(hpage);
840 }
841
842 if (PageAnon(hpage)) {
843 anon_vma = page_lock_anon_vma(hpage);
844 if (anon_vma) {
845 get_anon_vma(anon_vma);
846 page_unlock_anon_vma(anon_vma);
847 }
848 }
849
850 try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
851
852 if (!page_mapped(hpage))
853 rc = move_to_new_page(new_hpage, hpage, 1);
854
855 if (rc)
856 remove_migration_ptes(hpage, hpage);
857
858 if (anon_vma)
859 drop_anon_vma(anon_vma);
860out:
861 unlock_page(hpage);
862
863 if (rc != -EAGAIN) {
864 list_del(&hpage->lru);
865 put_page(hpage);
866 }
867
868 put_page(new_hpage);
869
870 if (result) {
871 if (rc)
872 *result = rc;
873 else
874 *result = page_to_nid(new_hpage);
875 }
876 return rc;
877}
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894int migrate_pages(struct list_head *from,
895 new_page_t get_new_page, unsigned long private, bool offlining,
896 bool sync)
897{
898 int retry = 1;
899 int nr_failed = 0;
900 int pass = 0;
901 struct page *page;
902 struct page *page2;
903 int swapwrite = current->flags & PF_SWAPWRITE;
904 int rc;
905
906 if (!swapwrite)
907 current->flags |= PF_SWAPWRITE;
908
909 for(pass = 0; pass < 10 && retry; pass++) {
910 retry = 0;
911
912 list_for_each_entry_safe(page, page2, from, lru) {
913 cond_resched();
914
915 rc = unmap_and_move(get_new_page, private,
916 page, pass > 2, offlining,
917 sync);
918
919 switch(rc) {
920 case -ENOMEM:
921 goto out;
922 case -EAGAIN:
923 retry++;
924 break;
925 case 0:
926 break;
927 default:
928
929 nr_failed++;
930 break;
931 }
932 }
933 }
934 rc = 0;
935out:
936 if (!swapwrite)
937 current->flags &= ~PF_SWAPWRITE;
938
939 if (rc)
940 return rc;
941
942 return nr_failed + retry;
943}
944
945int migrate_huge_pages(struct list_head *from,
946 new_page_t get_new_page, unsigned long private, bool offlining,
947 bool sync)
948{
949 int retry = 1;
950 int nr_failed = 0;
951 int pass = 0;
952 struct page *page;
953 struct page *page2;
954 int rc;
955
956 for (pass = 0; pass < 10 && retry; pass++) {
957 retry = 0;
958
959 list_for_each_entry_safe(page, page2, from, lru) {
960 cond_resched();
961
962 rc = unmap_and_move_huge_page(get_new_page,
963 private, page, pass > 2, offlining,
964 sync);
965
966 switch(rc) {
967 case -ENOMEM:
968 goto out;
969 case -EAGAIN:
970 retry++;
971 break;
972 case 0:
973 break;
974 default:
975
976 nr_failed++;
977 break;
978 }
979 }
980 }
981 rc = 0;
982out:
983 if (rc)
984 return rc;
985
986 return nr_failed + retry;
987}
988
989#ifdef CONFIG_NUMA
990
991
992
993struct page_to_node {
994 unsigned long addr;
995 struct page *page;
996 int node;
997 int status;
998};
999
1000static struct page *new_page_node(struct page *p, unsigned long private,
1001 int **result)
1002{
1003 struct page_to_node *pm = (struct page_to_node *)private;
1004
1005 while (pm->node != MAX_NUMNODES && pm->page != p)
1006 pm++;
1007
1008 if (pm->node == MAX_NUMNODES)
1009 return NULL;
1010
1011 *result = &pm->status;
1012
1013 return alloc_pages_exact_node(pm->node,
1014 GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
1015}
1016
1017
1018
1019
1020
1021
1022
1023static int do_move_page_to_node_array(struct mm_struct *mm,
1024 struct page_to_node *pm,
1025 int migrate_all)
1026{
1027 int err;
1028 struct page_to_node *pp;
1029 LIST_HEAD(pagelist);
1030
1031 down_read(&mm->mmap_sem);
1032
1033
1034
1035
1036 for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
1037 struct vm_area_struct *vma;
1038 struct page *page;
1039
1040 err = -EFAULT;
1041 vma = find_vma(mm, pp->addr);
1042 if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma))
1043 goto set_status;
1044
1045 page = follow_page(vma, pp->addr, FOLL_GET|FOLL_SPLIT);
1046
1047 err = PTR_ERR(page);
1048 if (IS_ERR(page))
1049 goto set_status;
1050
1051 err = -ENOENT;
1052 if (!page)
1053 goto set_status;
1054
1055
1056 if (PageReserved(page) || PageKsm(page))
1057 goto put_and_set;
1058
1059 pp->page = page;
1060 err = page_to_nid(page);
1061
1062 if (err == pp->node)
1063
1064
1065
1066 goto put_and_set;
1067
1068 err = -EACCES;
1069 if (page_mapcount(page) > 1 &&
1070 !migrate_all)
1071 goto put_and_set;
1072
1073 err = isolate_lru_page(page);
1074 if (!err) {
1075 list_add_tail(&page->lru, &pagelist);
1076 inc_zone_page_state(page, NR_ISOLATED_ANON +
1077 page_is_file_cache(page));
1078 }
1079put_and_set:
1080
1081
1082
1083
1084
1085 put_page(page);
1086set_status:
1087 pp->status = err;
1088 }
1089
1090 err = 0;
1091 if (!list_empty(&pagelist)) {
1092 err = migrate_pages(&pagelist, new_page_node,
1093 (unsigned long)pm, 0, true);
1094 if (err)
1095 putback_lru_pages(&pagelist);
1096 }
1097
1098 up_read(&mm->mmap_sem);
1099 return err;
1100}
1101
1102
1103
1104
1105
1106static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
1107 unsigned long nr_pages,
1108 const void __user * __user *pages,
1109 const int __user *nodes,
1110 int __user *status, int flags)
1111{
1112 struct page_to_node *pm;
1113 nodemask_t task_nodes;
1114 unsigned long chunk_nr_pages;
1115 unsigned long chunk_start;
1116 int err;
1117
1118 task_nodes = cpuset_mems_allowed(task);
1119
1120 err = -ENOMEM;
1121 pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
1122 if (!pm)
1123 goto out;
1124
1125 migrate_prep();
1126
1127
1128
1129
1130
1131 chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
1132
1133 for (chunk_start = 0;
1134 chunk_start < nr_pages;
1135 chunk_start += chunk_nr_pages) {
1136 int j;
1137
1138 if (chunk_start + chunk_nr_pages > nr_pages)
1139 chunk_nr_pages = nr_pages - chunk_start;
1140
1141
1142 for (j = 0; j < chunk_nr_pages; j++) {
1143 const void __user *p;
1144 int node;
1145
1146 err = -EFAULT;
1147 if (get_user(p, pages + j + chunk_start))
1148 goto out_pm;
1149 pm[j].addr = (unsigned long) p;
1150
1151 if (get_user(node, nodes + j + chunk_start))
1152 goto out_pm;
1153
1154 err = -ENODEV;
1155 if (node < 0 || node >= MAX_NUMNODES)
1156 goto out_pm;
1157
1158 if (!node_state(node, N_HIGH_MEMORY))
1159 goto out_pm;
1160
1161 err = -EACCES;
1162 if (!node_isset(node, task_nodes))
1163 goto out_pm;
1164
1165 pm[j].node = node;
1166 }
1167
1168
1169 pm[chunk_nr_pages].node = MAX_NUMNODES;
1170
1171
1172 err = do_move_page_to_node_array(mm, pm,
1173 flags & MPOL_MF_MOVE_ALL);
1174 if (err < 0)
1175 goto out_pm;
1176
1177
1178 for (j = 0; j < chunk_nr_pages; j++)
1179 if (put_user(pm[j].status, status + j + chunk_start)) {
1180 err = -EFAULT;
1181 goto out_pm;
1182 }
1183 }
1184 err = 0;
1185
1186out_pm:
1187 free_page((unsigned long)pm);
1188out:
1189 return err;
1190}
1191
1192
1193
1194
1195static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1196 const void __user **pages, int *status)
1197{
1198 unsigned long i;
1199
1200 down_read(&mm->mmap_sem);
1201
1202 for (i = 0; i < nr_pages; i++) {
1203 unsigned long addr = (unsigned long)(*pages);
1204 struct vm_area_struct *vma;
1205 struct page *page;
1206 int err = -EFAULT;
1207
1208 vma = find_vma(mm, addr);
1209 if (!vma || addr < vma->vm_start)
1210 goto set_status;
1211
1212 page = follow_page(vma, addr, 0);
1213
1214 err = PTR_ERR(page);
1215 if (IS_ERR(page))
1216 goto set_status;
1217
1218 err = -ENOENT;
1219
1220 if (!page || PageReserved(page) || PageKsm(page))
1221 goto set_status;
1222
1223 err = page_to_nid(page);
1224set_status:
1225 *status = err;
1226
1227 pages++;
1228 status++;
1229 }
1230
1231 up_read(&mm->mmap_sem);
1232}
1233
1234
1235
1236
1237
1238static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1239 const void __user * __user *pages,
1240 int __user *status)
1241{
1242#define DO_PAGES_STAT_CHUNK_NR 16
1243 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1244 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1245
1246 while (nr_pages) {
1247 unsigned long chunk_nr;
1248
1249 chunk_nr = nr_pages;
1250 if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1251 chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1252
1253 if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
1254 break;
1255
1256 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1257
1258 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1259 break;
1260
1261 pages += chunk_nr;
1262 status += chunk_nr;
1263 nr_pages -= chunk_nr;
1264 }
1265 return nr_pages ? -EFAULT : 0;
1266}
1267
1268
1269
1270
1271
1272SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1273 const void __user * __user *, pages,
1274 const int __user *, nodes,
1275 int __user *, status, int, flags)
1276{
1277 const struct cred *cred = current_cred(), *tcred;
1278 struct task_struct *task;
1279 struct mm_struct *mm;
1280 int err;
1281
1282
1283 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
1284 return -EINVAL;
1285
1286 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1287 return -EPERM;
1288
1289
1290 rcu_read_lock();
1291 task = pid ? find_task_by_vpid(pid) : current;
1292 if (!task) {
1293 rcu_read_unlock();
1294 return -ESRCH;
1295 }
1296 mm = get_task_mm(task);
1297 rcu_read_unlock();
1298
1299 if (!mm)
1300 return -EINVAL;
1301
1302
1303
1304
1305
1306
1307
1308 rcu_read_lock();
1309 tcred = __task_cred(task);
1310 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
1311 cred->uid != tcred->suid && cred->uid != tcred->uid &&
1312 !capable(CAP_SYS_NICE)) {
1313 rcu_read_unlock();
1314 err = -EPERM;
1315 goto out;
1316 }
1317 rcu_read_unlock();
1318
1319 err = security_task_movememory(task);
1320 if (err)
1321 goto out;
1322
1323 if (nodes) {
1324 err = do_pages_move(mm, task, nr_pages, pages, nodes, status,
1325 flags);
1326 } else {
1327 err = do_pages_stat(mm, nr_pages, pages, status);
1328 }
1329
1330out:
1331 mmput(mm);
1332 return err;
1333}
1334
1335
1336
1337
1338
1339
1340int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
1341 const nodemask_t *from, unsigned long flags)
1342{
1343 struct vm_area_struct *vma;
1344 int err = 0;
1345
1346 for (vma = mm->mmap; vma && !err; vma = vma->vm_next) {
1347 if (vma->vm_ops && vma->vm_ops->migrate) {
1348 err = vma->vm_ops->migrate(vma, to, from, flags);
1349 if (err)
1350 break;
1351 }
1352 }
1353 return err;
1354}
1355#endif
1356