1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
70#include <linux/mempolicy.h>
71#include <linux/mm.h>
72#include <linux/highmem.h>
73#include <linux/hugetlb.h>
74#include <linux/kernel.h>
75#include <linux/sched.h>
76#include <linux/nodemask.h>
77#include <linux/cpuset.h>
78#include <linux/slab.h>
79#include <linux/string.h>
80#include <linux/export.h>
81#include <linux/nsproxy.h>
82#include <linux/interrupt.h>
83#include <linux/init.h>
84#include <linux/compat.h>
85#include <linux/swap.h>
86#include <linux/seq_file.h>
87#include <linux/proc_fs.h>
88#include <linux/migrate.h>
89#include <linux/ksm.h>
90#include <linux/rmap.h>
91#include <linux/security.h>
92#include <linux/syscalls.h>
93#include <linux/ctype.h>
94#include <linux/mm_inline.h>
95#include <linux/mmu_notifier.h>
96#include <linux/printk.h>
97
98#include <asm/tlbflush.h>
99#include <asm/uaccess.h>
100#include <linux/random.h>
101
102#include "internal.h"
103
104
105#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)
106#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)
107
108static struct kmem_cache *policy_cache;
109static struct kmem_cache *sn_cache;
110
111
112
113enum zone_type policy_zone = 0;
114
115
116
117
118static struct mempolicy default_policy = {
119 .refcnt = ATOMIC_INIT(1),
120 .mode = MPOL_PREFERRED,
121 .flags = MPOL_F_LOCAL,
122};
123
124static struct mempolicy preferred_node_policy[MAX_NUMNODES];
125
126struct mempolicy *get_task_policy(struct task_struct *p)
127{
128 struct mempolicy *pol = p->mempolicy;
129 int node;
130
131 if (pol)
132 return pol;
133
134 node = numa_node_id();
135 if (node != NUMA_NO_NODE) {
136 pol = &preferred_node_policy[node];
137
138 if (pol->mode)
139 return pol;
140 }
141
142 return &default_policy;
143}
144
145static const struct mempolicy_operations {
146 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
162 enum mpol_rebind_step step);
163} mpol_ops[MPOL_MAX];
164
165static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
166{
167 return pol->flags & MPOL_MODE_FLAGS;
168}
169
170static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
171 const nodemask_t *rel)
172{
173 nodemask_t tmp;
174 nodes_fold(tmp, *orig, nodes_weight(*rel));
175 nodes_onto(*ret, tmp, *rel);
176}
177
178static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
179{
180 if (nodes_empty(*nodes))
181 return -EINVAL;
182 pol->v.nodes = *nodes;
183 return 0;
184}
185
186static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
187{
188 if (!nodes)
189 pol->flags |= MPOL_F_LOCAL;
190 else if (nodes_empty(*nodes))
191 return -EINVAL;
192 else
193 pol->v.preferred_node = first_node(*nodes);
194 return 0;
195}
196
197static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
198{
199 if (nodes_empty(*nodes))
200 return -EINVAL;
201 pol->v.nodes = *nodes;
202 return 0;
203}
204
205
206
207
208
209
210
211
212
213
214static int mpol_set_nodemask(struct mempolicy *pol,
215 const nodemask_t *nodes, struct nodemask_scratch *nsc)
216{
217 int ret;
218
219
220 if (pol == NULL)
221 return 0;
222
223 nodes_and(nsc->mask1,
224 cpuset_current_mems_allowed, node_states[N_MEMORY]);
225
226 VM_BUG_ON(!nodes);
227 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
228 nodes = NULL;
229 else {
230 if (pol->flags & MPOL_F_RELATIVE_NODES)
231 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
232 else
233 nodes_and(nsc->mask2, *nodes, nsc->mask1);
234
235 if (mpol_store_user_nodemask(pol))
236 pol->w.user_nodemask = *nodes;
237 else
238 pol->w.cpuset_mems_allowed =
239 cpuset_current_mems_allowed;
240 }
241
242 if (nodes)
243 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
244 else
245 ret = mpol_ops[pol->mode].create(pol, NULL);
246 return ret;
247}
248
249
250
251
252
253static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
254 nodemask_t *nodes)
255{
256 struct mempolicy *policy;
257
258 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
259 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
260
261 if (mode == MPOL_DEFAULT) {
262 if (nodes && !nodes_empty(*nodes))
263 return ERR_PTR(-EINVAL);
264 return NULL;
265 }
266 VM_BUG_ON(!nodes);
267
268
269
270
271
272
273 if (mode == MPOL_PREFERRED) {
274 if (nodes_empty(*nodes)) {
275 if (((flags & MPOL_F_STATIC_NODES) ||
276 (flags & MPOL_F_RELATIVE_NODES)))
277 return ERR_PTR(-EINVAL);
278 }
279 } else if (mode == MPOL_LOCAL) {
280 if (!nodes_empty(*nodes))
281 return ERR_PTR(-EINVAL);
282 mode = MPOL_PREFERRED;
283 } else if (nodes_empty(*nodes))
284 return ERR_PTR(-EINVAL);
285 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
286 if (!policy)
287 return ERR_PTR(-ENOMEM);
288 atomic_set(&policy->refcnt, 1);
289 policy->mode = mode;
290 policy->flags = flags;
291
292 return policy;
293}
294
295
296void __mpol_put(struct mempolicy *p)
297{
298 if (!atomic_dec_and_test(&p->refcnt))
299 return;
300 kmem_cache_free(policy_cache, p);
301}
302
303static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
304 enum mpol_rebind_step step)
305{
306}
307
308
309
310
311
312
313
314static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
315 enum mpol_rebind_step step)
316{
317 nodemask_t tmp;
318
319 if (pol->flags & MPOL_F_STATIC_NODES)
320 nodes_and(tmp, pol->w.user_nodemask, *nodes);
321 else if (pol->flags & MPOL_F_RELATIVE_NODES)
322 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
323 else {
324
325
326
327
328 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
329 nodes_remap(tmp, pol->v.nodes,
330 pol->w.cpuset_mems_allowed, *nodes);
331 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
332 } else if (step == MPOL_REBIND_STEP2) {
333 tmp = pol->w.cpuset_mems_allowed;
334 pol->w.cpuset_mems_allowed = *nodes;
335 } else
336 BUG();
337 }
338
339 if (nodes_empty(tmp))
340 tmp = *nodes;
341
342 if (step == MPOL_REBIND_STEP1)
343 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
344 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
345 pol->v.nodes = tmp;
346 else
347 BUG();
348
349 if (!node_isset(current->il_next, tmp)) {
350 current->il_next = next_node(current->il_next, tmp);
351 if (current->il_next >= MAX_NUMNODES)
352 current->il_next = first_node(tmp);
353 if (current->il_next >= MAX_NUMNODES)
354 current->il_next = numa_node_id();
355 }
356}
357
358static void mpol_rebind_preferred(struct mempolicy *pol,
359 const nodemask_t *nodes,
360 enum mpol_rebind_step step)
361{
362 nodemask_t tmp;
363
364 if (pol->flags & MPOL_F_STATIC_NODES) {
365 int node = first_node(pol->w.user_nodemask);
366
367 if (node_isset(node, *nodes)) {
368 pol->v.preferred_node = node;
369 pol->flags &= ~MPOL_F_LOCAL;
370 } else
371 pol->flags |= MPOL_F_LOCAL;
372 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
373 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
374 pol->v.preferred_node = first_node(tmp);
375 } else if (!(pol->flags & MPOL_F_LOCAL)) {
376 pol->v.preferred_node = node_remap(pol->v.preferred_node,
377 pol->w.cpuset_mems_allowed,
378 *nodes);
379 pol->w.cpuset_mems_allowed = *nodes;
380 }
381}
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
400 enum mpol_rebind_step step)
401{
402 if (!pol)
403 return;
404 if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
405 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
406 return;
407
408 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
409 return;
410
411 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
412 BUG();
413
414 if (step == MPOL_REBIND_STEP1)
415 pol->flags |= MPOL_F_REBINDING;
416 else if (step == MPOL_REBIND_STEP2)
417 pol->flags &= ~MPOL_F_REBINDING;
418 else if (step >= MPOL_REBIND_NSTEP)
419 BUG();
420
421 mpol_ops[pol->mode].rebind(pol, newmask, step);
422}
423
424
425
426
427
428
429
430
431void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
432 enum mpol_rebind_step step)
433{
434 mpol_rebind_policy(tsk->mempolicy, new, step);
435}
436
437
438
439
440
441
442
443void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
444{
445 struct vm_area_struct *vma;
446
447 down_write(&mm->mmap_sem);
448 for (vma = mm->mmap; vma; vma = vma->vm_next)
449 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
450 up_write(&mm->mmap_sem);
451}
452
453static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
454 [MPOL_DEFAULT] = {
455 .rebind = mpol_rebind_default,
456 },
457 [MPOL_INTERLEAVE] = {
458 .create = mpol_new_interleave,
459 .rebind = mpol_rebind_nodemask,
460 },
461 [MPOL_PREFERRED] = {
462 .create = mpol_new_preferred,
463 .rebind = mpol_rebind_preferred,
464 },
465 [MPOL_BIND] = {
466 .create = mpol_new_bind,
467 .rebind = mpol_rebind_nodemask,
468 },
469};
470
471static void migrate_page_add(struct page *page, struct list_head *pagelist,
472 unsigned long flags);
473
474struct queue_pages {
475 struct list_head *pagelist;
476 unsigned long flags;
477 nodemask_t *nmask;
478 struct vm_area_struct *prev;
479};
480
481
482
483
484
485static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
486 unsigned long end, struct mm_walk *walk)
487{
488 struct vm_area_struct *vma = walk->vma;
489 struct page *page;
490 struct queue_pages *qp = walk->private;
491 unsigned long flags = qp->flags;
492 int nid, ret;
493 pte_t *pte;
494 spinlock_t *ptl;
495
496 if (pmd_trans_huge(*pmd)) {
497 ptl = pmd_lock(walk->mm, pmd);
498 if (pmd_trans_huge(*pmd)) {
499 page = pmd_page(*pmd);
500 if (is_huge_zero_page(page)) {
501 spin_unlock(ptl);
502 split_huge_pmd(vma, pmd, addr);
503 } else {
504 get_page(page);
505 spin_unlock(ptl);
506 lock_page(page);
507 ret = split_huge_page(page);
508 unlock_page(page);
509 put_page(page);
510 if (ret)
511 return 0;
512 }
513 } else {
514 spin_unlock(ptl);
515 }
516 }
517
518retry:
519 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
520 for (; addr != end; pte++, addr += PAGE_SIZE) {
521 if (!pte_present(*pte))
522 continue;
523 page = vm_normal_page(vma, addr, *pte);
524 if (!page)
525 continue;
526
527
528
529
530 if (PageReserved(page))
531 continue;
532 nid = page_to_nid(page);
533 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
534 continue;
535 if (PageTransCompound(page) && PageAnon(page)) {
536 get_page(page);
537 pte_unmap_unlock(pte, ptl);
538 lock_page(page);
539 ret = split_huge_page(page);
540 unlock_page(page);
541 put_page(page);
542
543 if (ret) {
544 pte = pte_offset_map_lock(walk->mm, pmd,
545 addr, &ptl);
546 continue;
547 }
548 goto retry;
549 }
550
551 migrate_page_add(page, qp->pagelist, flags);
552 }
553 pte_unmap_unlock(pte - 1, ptl);
554 cond_resched();
555 return 0;
556}
557
558static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
559 unsigned long addr, unsigned long end,
560 struct mm_walk *walk)
561{
562#ifdef CONFIG_HUGETLB_PAGE
563 struct queue_pages *qp = walk->private;
564 unsigned long flags = qp->flags;
565 int nid;
566 struct page *page;
567 spinlock_t *ptl;
568 pte_t entry;
569
570 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
571 entry = huge_ptep_get(pte);
572 if (!pte_present(entry))
573 goto unlock;
574 page = pte_page(entry);
575 nid = page_to_nid(page);
576 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
577 goto unlock;
578
579 if (flags & (MPOL_MF_MOVE_ALL) ||
580 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
581 isolate_huge_page(page, qp->pagelist);
582unlock:
583 spin_unlock(ptl);
584#else
585 BUG();
586#endif
587 return 0;
588}
589
590#ifdef CONFIG_NUMA_BALANCING
591
592
593
594
595
596
597
598
599
600unsigned long change_prot_numa(struct vm_area_struct *vma,
601 unsigned long addr, unsigned long end)
602{
603 int nr_updated;
604
605 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
606 if (nr_updated)
607 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
608
609 return nr_updated;
610}
611#else
612static unsigned long change_prot_numa(struct vm_area_struct *vma,
613 unsigned long addr, unsigned long end)
614{
615 return 0;
616}
617#endif
618
619static int queue_pages_test_walk(unsigned long start, unsigned long end,
620 struct mm_walk *walk)
621{
622 struct vm_area_struct *vma = walk->vma;
623 struct queue_pages *qp = walk->private;
624 unsigned long endvma = vma->vm_end;
625 unsigned long flags = qp->flags;
626
627 if (!vma_migratable(vma))
628 return 1;
629
630 if (endvma > end)
631 endvma = end;
632 if (vma->vm_start > start)
633 start = vma->vm_start;
634
635 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
636 if (!vma->vm_next && vma->vm_end < end)
637 return -EFAULT;
638 if (qp->prev && qp->prev->vm_end < vma->vm_start)
639 return -EFAULT;
640 }
641
642 qp->prev = vma;
643
644 if (flags & MPOL_MF_LAZY) {
645
646 if (!is_vm_hugetlb_page(vma) &&
647 (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
648 !(vma->vm_flags & VM_MIXEDMAP))
649 change_prot_numa(vma, start, endvma);
650 return 1;
651 }
652
653
654 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
655 return 0;
656 return 1;
657}
658
659
660
661
662
663
664
665
666static int
667queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
668 nodemask_t *nodes, unsigned long flags,
669 struct list_head *pagelist)
670{
671 struct queue_pages qp = {
672 .pagelist = pagelist,
673 .flags = flags,
674 .nmask = nodes,
675 .prev = NULL,
676 };
677 struct mm_walk queue_pages_walk = {
678 .hugetlb_entry = queue_pages_hugetlb,
679 .pmd_entry = queue_pages_pte_range,
680 .test_walk = queue_pages_test_walk,
681 .mm = mm,
682 .private = &qp,
683 };
684
685 return walk_page_range(start, end, &queue_pages_walk);
686}
687
688
689
690
691
692static int vma_replace_policy(struct vm_area_struct *vma,
693 struct mempolicy *pol)
694{
695 int err;
696 struct mempolicy *old;
697 struct mempolicy *new;
698
699 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
700 vma->vm_start, vma->vm_end, vma->vm_pgoff,
701 vma->vm_ops, vma->vm_file,
702 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
703
704 new = mpol_dup(pol);
705 if (IS_ERR(new))
706 return PTR_ERR(new);
707
708 if (vma->vm_ops && vma->vm_ops->set_policy) {
709 err = vma->vm_ops->set_policy(vma, new);
710 if (err)
711 goto err_out;
712 }
713
714 old = vma->vm_policy;
715 vma->vm_policy = new;
716 mpol_put(old);
717
718 return 0;
719 err_out:
720 mpol_put(new);
721 return err;
722}
723
724
725static int mbind_range(struct mm_struct *mm, unsigned long start,
726 unsigned long end, struct mempolicy *new_pol)
727{
728 struct vm_area_struct *next;
729 struct vm_area_struct *prev;
730 struct vm_area_struct *vma;
731 int err = 0;
732 pgoff_t pgoff;
733 unsigned long vmstart;
734 unsigned long vmend;
735
736 vma = find_vma(mm, start);
737 if (!vma || vma->vm_start > start)
738 return -EFAULT;
739
740 prev = vma->vm_prev;
741 if (start > vma->vm_start)
742 prev = vma;
743
744 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
745 next = vma->vm_next;
746 vmstart = max(start, vma->vm_start);
747 vmend = min(end, vma->vm_end);
748
749 if (mpol_equal(vma_policy(vma), new_pol))
750 continue;
751
752 pgoff = vma->vm_pgoff +
753 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
754 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
755 vma->anon_vma, vma->vm_file, pgoff,
756 new_pol, vma->vm_userfaultfd_ctx);
757 if (prev) {
758 vma = prev;
759 next = vma->vm_next;
760 if (mpol_equal(vma_policy(vma), new_pol))
761 continue;
762
763 goto replace;
764 }
765 if (vma->vm_start != vmstart) {
766 err = split_vma(vma->vm_mm, vma, vmstart, 1);
767 if (err)
768 goto out;
769 }
770 if (vma->vm_end != vmend) {
771 err = split_vma(vma->vm_mm, vma, vmend, 0);
772 if (err)
773 goto out;
774 }
775 replace:
776 err = vma_replace_policy(vma, new_pol);
777 if (err)
778 goto out;
779 }
780
781 out:
782 return err;
783}
784
785
786static long do_set_mempolicy(unsigned short mode, unsigned short flags,
787 nodemask_t *nodes)
788{
789 struct mempolicy *new, *old;
790 NODEMASK_SCRATCH(scratch);
791 int ret;
792
793 if (!scratch)
794 return -ENOMEM;
795
796 new = mpol_new(mode, flags, nodes);
797 if (IS_ERR(new)) {
798 ret = PTR_ERR(new);
799 goto out;
800 }
801
802 task_lock(current);
803 ret = mpol_set_nodemask(new, nodes, scratch);
804 if (ret) {
805 task_unlock(current);
806 mpol_put(new);
807 goto out;
808 }
809 old = current->mempolicy;
810 current->mempolicy = new;
811 if (new && new->mode == MPOL_INTERLEAVE &&
812 nodes_weight(new->v.nodes))
813 current->il_next = first_node(new->v.nodes);
814 task_unlock(current);
815 mpol_put(old);
816 ret = 0;
817out:
818 NODEMASK_SCRATCH_FREE(scratch);
819 return ret;
820}
821
822
823
824
825
826
827static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
828{
829 nodes_clear(*nodes);
830 if (p == &default_policy)
831 return;
832
833 switch (p->mode) {
834 case MPOL_BIND:
835
836 case MPOL_INTERLEAVE:
837 *nodes = p->v.nodes;
838 break;
839 case MPOL_PREFERRED:
840 if (!(p->flags & MPOL_F_LOCAL))
841 node_set(p->v.preferred_node, *nodes);
842
843 break;
844 default:
845 BUG();
846 }
847}
848
849static int lookup_node(unsigned long addr)
850{
851 struct page *p;
852 int err;
853
854 err = get_user_pages(addr & PAGE_MASK, 1, 0, 0, &p, NULL);
855 if (err >= 0) {
856 err = page_to_nid(p);
857 put_page(p);
858 }
859 return err;
860}
861
862
863static long do_get_mempolicy(int *policy, nodemask_t *nmask,
864 unsigned long addr, unsigned long flags)
865{
866 int err;
867 struct mm_struct *mm = current->mm;
868 struct vm_area_struct *vma = NULL;
869 struct mempolicy *pol = current->mempolicy;
870
871 if (flags &
872 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
873 return -EINVAL;
874
875 if (flags & MPOL_F_MEMS_ALLOWED) {
876 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
877 return -EINVAL;
878 *policy = 0;
879 task_lock(current);
880 *nmask = cpuset_current_mems_allowed;
881 task_unlock(current);
882 return 0;
883 }
884
885 if (flags & MPOL_F_ADDR) {
886
887
888
889
890
891 down_read(&mm->mmap_sem);
892 vma = find_vma_intersection(mm, addr, addr+1);
893 if (!vma) {
894 up_read(&mm->mmap_sem);
895 return -EFAULT;
896 }
897 if (vma->vm_ops && vma->vm_ops->get_policy)
898 pol = vma->vm_ops->get_policy(vma, addr);
899 else
900 pol = vma->vm_policy;
901 } else if (addr)
902 return -EINVAL;
903
904 if (!pol)
905 pol = &default_policy;
906
907 if (flags & MPOL_F_NODE) {
908 if (flags & MPOL_F_ADDR) {
909 err = lookup_node(addr);
910 if (err < 0)
911 goto out;
912 *policy = err;
913 } else if (pol == current->mempolicy &&
914 pol->mode == MPOL_INTERLEAVE) {
915 *policy = current->il_next;
916 } else {
917 err = -EINVAL;
918 goto out;
919 }
920 } else {
921 *policy = pol == &default_policy ? MPOL_DEFAULT :
922 pol->mode;
923
924
925
926
927 *policy |= (pol->flags & MPOL_MODE_FLAGS);
928 }
929
930 if (vma) {
931 up_read(¤t->mm->mmap_sem);
932 vma = NULL;
933 }
934
935 err = 0;
936 if (nmask) {
937 if (mpol_store_user_nodemask(pol)) {
938 *nmask = pol->w.user_nodemask;
939 } else {
940 task_lock(current);
941 get_policy_nodemask(pol, nmask);
942 task_unlock(current);
943 }
944 }
945
946 out:
947 mpol_cond_put(pol);
948 if (vma)
949 up_read(¤t->mm->mmap_sem);
950 return err;
951}
952
953#ifdef CONFIG_MIGRATION
954
955
956
957static void migrate_page_add(struct page *page, struct list_head *pagelist,
958 unsigned long flags)
959{
960
961
962
963 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
964 if (!isolate_lru_page(page)) {
965 list_add_tail(&page->lru, pagelist);
966 inc_zone_page_state(page, NR_ISOLATED_ANON +
967 page_is_file_cache(page));
968 }
969 }
970}
971
972static struct page *new_node_page(struct page *page, unsigned long node, int **x)
973{
974 if (PageHuge(page))
975 return alloc_huge_page_node(page_hstate(compound_head(page)),
976 node);
977 else
978 return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
979 __GFP_THISNODE, 0);
980}
981
982
983
984
985
986static int migrate_to_node(struct mm_struct *mm, int source, int dest,
987 int flags)
988{
989 nodemask_t nmask;
990 LIST_HEAD(pagelist);
991 int err = 0;
992
993 nodes_clear(nmask);
994 node_set(source, nmask);
995
996
997
998
999
1000
1001 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1002 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
1003 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1004
1005 if (!list_empty(&pagelist)) {
1006 err = migrate_pages(&pagelist, new_node_page, NULL, dest,
1007 MIGRATE_SYNC, MR_SYSCALL);
1008 if (err)
1009 putback_movable_pages(&pagelist);
1010 }
1011
1012 return err;
1013}
1014
1015
1016
1017
1018
1019
1020
1021int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1022 const nodemask_t *to, int flags)
1023{
1024 int busy = 0;
1025 int err;
1026 nodemask_t tmp;
1027
1028 err = migrate_prep();
1029 if (err)
1030 return err;
1031
1032 down_read(&mm->mmap_sem);
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065 tmp = *from;
1066 while (!nodes_empty(tmp)) {
1067 int s,d;
1068 int source = NUMA_NO_NODE;
1069 int dest = 0;
1070
1071 for_each_node_mask(s, tmp) {
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1089 (node_isset(s, *to)))
1090 continue;
1091
1092 d = node_remap(s, *from, *to);
1093 if (s == d)
1094 continue;
1095
1096 source = s;
1097 dest = d;
1098
1099
1100 if (!node_isset(dest, tmp))
1101 break;
1102 }
1103 if (source == NUMA_NO_NODE)
1104 break;
1105
1106 node_clear(source, tmp);
1107 err = migrate_to_node(mm, source, dest, flags);
1108 if (err > 0)
1109 busy += err;
1110 if (err < 0)
1111 break;
1112 }
1113 up_read(&mm->mmap_sem);
1114 if (err < 0)
1115 return err;
1116 return busy;
1117
1118}
1119
1120
1121
1122
1123
1124
1125
1126
1127static struct page *new_page(struct page *page, unsigned long start, int **x)
1128{
1129 struct vm_area_struct *vma;
1130 unsigned long uninitialized_var(address);
1131
1132 vma = find_vma(current->mm, start);
1133 while (vma) {
1134 address = page_address_in_vma(page, vma);
1135 if (address != -EFAULT)
1136 break;
1137 vma = vma->vm_next;
1138 }
1139
1140 if (PageHuge(page)) {
1141 BUG_ON(!vma);
1142 return alloc_huge_page_noerr(vma, address, 1);
1143 }
1144
1145
1146
1147 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1148}
1149#else
1150
1151static void migrate_page_add(struct page *page, struct list_head *pagelist,
1152 unsigned long flags)
1153{
1154}
1155
1156int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1157 const nodemask_t *to, int flags)
1158{
1159 return -ENOSYS;
1160}
1161
1162static struct page *new_page(struct page *page, unsigned long start, int **x)
1163{
1164 return NULL;
1165}
1166#endif
1167
1168static long do_mbind(unsigned long start, unsigned long len,
1169 unsigned short mode, unsigned short mode_flags,
1170 nodemask_t *nmask, unsigned long flags)
1171{
1172 struct mm_struct *mm = current->mm;
1173 struct mempolicy *new;
1174 unsigned long end;
1175 int err;
1176 LIST_HEAD(pagelist);
1177
1178 if (flags & ~(unsigned long)MPOL_MF_VALID)
1179 return -EINVAL;
1180 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1181 return -EPERM;
1182
1183 if (start & ~PAGE_MASK)
1184 return -EINVAL;
1185
1186 if (mode == MPOL_DEFAULT)
1187 flags &= ~MPOL_MF_STRICT;
1188
1189 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1190 end = start + len;
1191
1192 if (end < start)
1193 return -EINVAL;
1194 if (end == start)
1195 return 0;
1196
1197 new = mpol_new(mode, mode_flags, nmask);
1198 if (IS_ERR(new))
1199 return PTR_ERR(new);
1200
1201 if (flags & MPOL_MF_LAZY)
1202 new->flags |= MPOL_F_MOF;
1203
1204
1205
1206
1207
1208 if (!new)
1209 flags |= MPOL_MF_DISCONTIG_OK;
1210
1211 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1212 start, start + len, mode, mode_flags,
1213 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1214
1215 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1216
1217 err = migrate_prep();
1218 if (err)
1219 goto mpol_out;
1220 }
1221 {
1222 NODEMASK_SCRATCH(scratch);
1223 if (scratch) {
1224 down_write(&mm->mmap_sem);
1225 task_lock(current);
1226 err = mpol_set_nodemask(new, nmask, scratch);
1227 task_unlock(current);
1228 if (err)
1229 up_write(&mm->mmap_sem);
1230 } else
1231 err = -ENOMEM;
1232 NODEMASK_SCRATCH_FREE(scratch);
1233 }
1234 if (err)
1235 goto mpol_out;
1236
1237 err = queue_pages_range(mm, start, end, nmask,
1238 flags | MPOL_MF_INVERT, &pagelist);
1239 if (!err)
1240 err = mbind_range(mm, start, end, new);
1241
1242 if (!err) {
1243 int nr_failed = 0;
1244
1245 if (!list_empty(&pagelist)) {
1246 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1247 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1248 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1249 if (nr_failed)
1250 putback_movable_pages(&pagelist);
1251 }
1252
1253 if (nr_failed && (flags & MPOL_MF_STRICT))
1254 err = -EIO;
1255 } else
1256 putback_movable_pages(&pagelist);
1257
1258 up_write(&mm->mmap_sem);
1259 mpol_out:
1260 mpol_put(new);
1261 return err;
1262}
1263
1264
1265
1266
1267
1268
1269static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1270 unsigned long maxnode)
1271{
1272 unsigned long k;
1273 unsigned long nlongs;
1274 unsigned long endmask;
1275
1276 --maxnode;
1277 nodes_clear(*nodes);
1278 if (maxnode == 0 || !nmask)
1279 return 0;
1280 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1281 return -EINVAL;
1282
1283 nlongs = BITS_TO_LONGS(maxnode);
1284 if ((maxnode % BITS_PER_LONG) == 0)
1285 endmask = ~0UL;
1286 else
1287 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1288
1289
1290
1291 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1292 if (nlongs > PAGE_SIZE/sizeof(long))
1293 return -EINVAL;
1294 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1295 unsigned long t;
1296 if (get_user(t, nmask + k))
1297 return -EFAULT;
1298 if (k == nlongs - 1) {
1299 if (t & endmask)
1300 return -EINVAL;
1301 } else if (t)
1302 return -EINVAL;
1303 }
1304 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1305 endmask = ~0UL;
1306 }
1307
1308 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1309 return -EFAULT;
1310 nodes_addr(*nodes)[nlongs-1] &= endmask;
1311 return 0;
1312}
1313
1314
1315static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1316 nodemask_t *nodes)
1317{
1318 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1319 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1320
1321 if (copy > nbytes) {
1322 if (copy > PAGE_SIZE)
1323 return -EINVAL;
1324 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1325 return -EFAULT;
1326 copy = nbytes;
1327 }
1328 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1329}
1330
1331SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1332 unsigned long, mode, const unsigned long __user *, nmask,
1333 unsigned long, maxnode, unsigned, flags)
1334{
1335 nodemask_t nodes;
1336 int err;
1337 unsigned short mode_flags;
1338
1339 mode_flags = mode & MPOL_MODE_FLAGS;
1340 mode &= ~MPOL_MODE_FLAGS;
1341 if (mode >= MPOL_MAX)
1342 return -EINVAL;
1343 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1344 (mode_flags & MPOL_F_RELATIVE_NODES))
1345 return -EINVAL;
1346 err = get_nodes(&nodes, nmask, maxnode);
1347 if (err)
1348 return err;
1349 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1350}
1351
1352
1353SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1354 unsigned long, maxnode)
1355{
1356 int err;
1357 nodemask_t nodes;
1358 unsigned short flags;
1359
1360 flags = mode & MPOL_MODE_FLAGS;
1361 mode &= ~MPOL_MODE_FLAGS;
1362 if ((unsigned int)mode >= MPOL_MAX)
1363 return -EINVAL;
1364 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1365 return -EINVAL;
1366 err = get_nodes(&nodes, nmask, maxnode);
1367 if (err)
1368 return err;
1369 return do_set_mempolicy(mode, flags, &nodes);
1370}
1371
1372SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1373 const unsigned long __user *, old_nodes,
1374 const unsigned long __user *, new_nodes)
1375{
1376 const struct cred *cred = current_cred(), *tcred;
1377 struct mm_struct *mm = NULL;
1378 struct task_struct *task;
1379 nodemask_t task_nodes;
1380 int err;
1381 nodemask_t *old;
1382 nodemask_t *new;
1383 NODEMASK_SCRATCH(scratch);
1384
1385 if (!scratch)
1386 return -ENOMEM;
1387
1388 old = &scratch->mask1;
1389 new = &scratch->mask2;
1390
1391 err = get_nodes(old, old_nodes, maxnode);
1392 if (err)
1393 goto out;
1394
1395 err = get_nodes(new, new_nodes, maxnode);
1396 if (err)
1397 goto out;
1398
1399
1400 rcu_read_lock();
1401 task = pid ? find_task_by_vpid(pid) : current;
1402 if (!task) {
1403 rcu_read_unlock();
1404 err = -ESRCH;
1405 goto out;
1406 }
1407 get_task_struct(task);
1408
1409 err = -EINVAL;
1410
1411
1412
1413
1414
1415
1416
1417 tcred = __task_cred(task);
1418 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1419 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
1420 !capable(CAP_SYS_NICE)) {
1421 rcu_read_unlock();
1422 err = -EPERM;
1423 goto out_put;
1424 }
1425 rcu_read_unlock();
1426
1427 task_nodes = cpuset_mems_allowed(task);
1428
1429 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1430 err = -EPERM;
1431 goto out_put;
1432 }
1433
1434 if (!nodes_subset(*new, node_states[N_MEMORY])) {
1435 err = -EINVAL;
1436 goto out_put;
1437 }
1438
1439 err = security_task_movememory(task);
1440 if (err)
1441 goto out_put;
1442
1443 mm = get_task_mm(task);
1444 put_task_struct(task);
1445
1446 if (!mm) {
1447 err = -EINVAL;
1448 goto out;
1449 }
1450
1451 err = do_migrate_pages(mm, old, new,
1452 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1453
1454 mmput(mm);
1455out:
1456 NODEMASK_SCRATCH_FREE(scratch);
1457
1458 return err;
1459
1460out_put:
1461 put_task_struct(task);
1462 goto out;
1463
1464}
1465
1466
1467
1468SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1469 unsigned long __user *, nmask, unsigned long, maxnode,
1470 unsigned long, addr, unsigned long, flags)
1471{
1472 int err;
1473 int uninitialized_var(pval);
1474 nodemask_t nodes;
1475
1476 if (nmask != NULL && maxnode < MAX_NUMNODES)
1477 return -EINVAL;
1478
1479 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1480
1481 if (err)
1482 return err;
1483
1484 if (policy && put_user(pval, policy))
1485 return -EFAULT;
1486
1487 if (nmask)
1488 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1489
1490 return err;
1491}
1492
1493#ifdef CONFIG_COMPAT
1494
1495COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1496 compat_ulong_t __user *, nmask,
1497 compat_ulong_t, maxnode,
1498 compat_ulong_t, addr, compat_ulong_t, flags)
1499{
1500 long err;
1501 unsigned long __user *nm = NULL;
1502 unsigned long nr_bits, alloc_size;
1503 DECLARE_BITMAP(bm, MAX_NUMNODES);
1504
1505 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1506 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1507
1508 if (nmask)
1509 nm = compat_alloc_user_space(alloc_size);
1510
1511 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1512
1513 if (!err && nmask) {
1514 unsigned long copy_size;
1515 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1516 err = copy_from_user(bm, nm, copy_size);
1517
1518 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1519 err |= compat_put_bitmap(nmask, bm, nr_bits);
1520 }
1521
1522 return err;
1523}
1524
1525COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1526 compat_ulong_t, maxnode)
1527{
1528 long err = 0;
1529 unsigned long __user *nm = NULL;
1530 unsigned long nr_bits, alloc_size;
1531 DECLARE_BITMAP(bm, MAX_NUMNODES);
1532
1533 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1534 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1535
1536 if (nmask) {
1537 err = compat_get_bitmap(bm, nmask, nr_bits);
1538 nm = compat_alloc_user_space(alloc_size);
1539 err |= copy_to_user(nm, bm, alloc_size);
1540 }
1541
1542 if (err)
1543 return -EFAULT;
1544
1545 return sys_set_mempolicy(mode, nm, nr_bits+1);
1546}
1547
1548COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1549 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1550 compat_ulong_t, maxnode, compat_ulong_t, flags)
1551{
1552 long err = 0;
1553 unsigned long __user *nm = NULL;
1554 unsigned long nr_bits, alloc_size;
1555 nodemask_t bm;
1556
1557 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1558 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1559
1560 if (nmask) {
1561 err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
1562 nm = compat_alloc_user_space(alloc_size);
1563 err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
1564 }
1565
1566 if (err)
1567 return -EFAULT;
1568
1569 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1570}
1571
1572#endif
1573
1574struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1575 unsigned long addr)
1576{
1577 struct mempolicy *pol = NULL;
1578
1579 if (vma) {
1580 if (vma->vm_ops && vma->vm_ops->get_policy) {
1581 pol = vma->vm_ops->get_policy(vma, addr);
1582 } else if (vma->vm_policy) {
1583 pol = vma->vm_policy;
1584
1585
1586
1587
1588
1589
1590
1591 if (mpol_needs_cond_ref(pol))
1592 mpol_get(pol);
1593 }
1594 }
1595
1596 return pol;
1597}
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1612 unsigned long addr)
1613{
1614 struct mempolicy *pol = __get_vma_policy(vma, addr);
1615
1616 if (!pol)
1617 pol = get_task_policy(current);
1618
1619 return pol;
1620}
1621
1622bool vma_policy_mof(struct vm_area_struct *vma)
1623{
1624 struct mempolicy *pol;
1625
1626 if (vma->vm_ops && vma->vm_ops->get_policy) {
1627 bool ret = false;
1628
1629 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1630 if (pol && (pol->flags & MPOL_F_MOF))
1631 ret = true;
1632 mpol_cond_put(pol);
1633
1634 return ret;
1635 }
1636
1637 pol = vma->vm_policy;
1638 if (!pol)
1639 pol = get_task_policy(current);
1640
1641 return pol->flags & MPOL_F_MOF;
1642}
1643
1644static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1645{
1646 enum zone_type dynamic_policy_zone = policy_zone;
1647
1648 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1659 dynamic_policy_zone = ZONE_MOVABLE;
1660
1661 return zone >= dynamic_policy_zone;
1662}
1663
1664
1665
1666
1667
1668static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1669{
1670
1671 if (unlikely(policy->mode == MPOL_BIND) &&
1672 apply_policy_zone(policy, gfp_zone(gfp)) &&
1673 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1674 return &policy->v.nodes;
1675
1676 return NULL;
1677}
1678
1679
1680static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1681 int nd)
1682{
1683 switch (policy->mode) {
1684 case MPOL_PREFERRED:
1685 if (!(policy->flags & MPOL_F_LOCAL))
1686 nd = policy->v.preferred_node;
1687 break;
1688 case MPOL_BIND:
1689
1690
1691
1692
1693
1694
1695 if (unlikely(gfp & __GFP_THISNODE) &&
1696 unlikely(!node_isset(nd, policy->v.nodes)))
1697 nd = first_node(policy->v.nodes);
1698 break;
1699 default:
1700 BUG();
1701 }
1702 return node_zonelist(nd, gfp);
1703}
1704
1705
1706static unsigned interleave_nodes(struct mempolicy *policy)
1707{
1708 unsigned nid, next;
1709 struct task_struct *me = current;
1710
1711 nid = me->il_next;
1712 next = next_node(nid, policy->v.nodes);
1713 if (next >= MAX_NUMNODES)
1714 next = first_node(policy->v.nodes);
1715 if (next < MAX_NUMNODES)
1716 me->il_next = next;
1717 return nid;
1718}
1719
1720
1721
1722
1723
1724unsigned int mempolicy_slab_node(void)
1725{
1726 struct mempolicy *policy;
1727 int node = numa_mem_id();
1728
1729 if (in_interrupt())
1730 return node;
1731
1732 policy = current->mempolicy;
1733 if (!policy || policy->flags & MPOL_F_LOCAL)
1734 return node;
1735
1736 switch (policy->mode) {
1737 case MPOL_PREFERRED:
1738
1739
1740
1741 return policy->v.preferred_node;
1742
1743 case MPOL_INTERLEAVE:
1744 return interleave_nodes(policy);
1745
1746 case MPOL_BIND: {
1747
1748
1749
1750
1751 struct zonelist *zonelist;
1752 struct zone *zone;
1753 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1754 zonelist = &NODE_DATA(node)->node_zonelists[0];
1755 (void)first_zones_zonelist(zonelist, highest_zoneidx,
1756 &policy->v.nodes,
1757 &zone);
1758 return zone ? zone->node : node;
1759 }
1760
1761 default:
1762 BUG();
1763 }
1764}
1765
1766
1767static unsigned offset_il_node(struct mempolicy *pol,
1768 struct vm_area_struct *vma, unsigned long off)
1769{
1770 unsigned nnodes = nodes_weight(pol->v.nodes);
1771 unsigned target;
1772 int c;
1773 int nid = NUMA_NO_NODE;
1774
1775 if (!nnodes)
1776 return numa_node_id();
1777 target = (unsigned int)off % nnodes;
1778 c = 0;
1779 do {
1780 nid = next_node(nid, pol->v.nodes);
1781 c++;
1782 } while (c <= target);
1783 return nid;
1784}
1785
1786
1787static inline unsigned interleave_nid(struct mempolicy *pol,
1788 struct vm_area_struct *vma, unsigned long addr, int shift)
1789{
1790 if (vma) {
1791 unsigned long off;
1792
1793
1794
1795
1796
1797
1798
1799
1800 BUG_ON(shift < PAGE_SHIFT);
1801 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1802 off += (addr - vma->vm_start) >> shift;
1803 return offset_il_node(pol, vma, off);
1804 } else
1805 return interleave_nodes(pol);
1806}
1807
1808
1809
1810
1811
1812int node_random(const nodemask_t *maskp)
1813{
1814 int w, bit = NUMA_NO_NODE;
1815
1816 w = nodes_weight(*maskp);
1817 if (w)
1818 bit = bitmap_ord_to_pos(maskp->bits,
1819 get_random_int() % w, MAX_NUMNODES);
1820 return bit;
1821}
1822
1823#ifdef CONFIG_HUGETLBFS
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1840 gfp_t gfp_flags, struct mempolicy **mpol,
1841 nodemask_t **nodemask)
1842{
1843 struct zonelist *zl;
1844
1845 *mpol = get_vma_policy(vma, addr);
1846 *nodemask = NULL;
1847
1848 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1849 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1850 huge_page_shift(hstate_vma(vma))), gfp_flags);
1851 } else {
1852 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
1853 if ((*mpol)->mode == MPOL_BIND)
1854 *nodemask = &(*mpol)->v.nodes;
1855 }
1856 return zl;
1857}
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875bool init_nodemask_of_mempolicy(nodemask_t *mask)
1876{
1877 struct mempolicy *mempolicy;
1878 int nid;
1879
1880 if (!(mask && current->mempolicy))
1881 return false;
1882
1883 task_lock(current);
1884 mempolicy = current->mempolicy;
1885 switch (mempolicy->mode) {
1886 case MPOL_PREFERRED:
1887 if (mempolicy->flags & MPOL_F_LOCAL)
1888 nid = numa_node_id();
1889 else
1890 nid = mempolicy->v.preferred_node;
1891 init_nodemask_of_node(mask, nid);
1892 break;
1893
1894 case MPOL_BIND:
1895
1896 case MPOL_INTERLEAVE:
1897 *mask = mempolicy->v.nodes;
1898 break;
1899
1900 default:
1901 BUG();
1902 }
1903 task_unlock(current);
1904
1905 return true;
1906}
1907#endif
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1920 const nodemask_t *mask)
1921{
1922 struct mempolicy *mempolicy;
1923 bool ret = true;
1924
1925 if (!mask)
1926 return ret;
1927 task_lock(tsk);
1928 mempolicy = tsk->mempolicy;
1929 if (!mempolicy)
1930 goto out;
1931
1932 switch (mempolicy->mode) {
1933 case MPOL_PREFERRED:
1934
1935
1936
1937
1938
1939
1940 break;
1941 case MPOL_BIND:
1942 case MPOL_INTERLEAVE:
1943 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1944 break;
1945 default:
1946 BUG();
1947 }
1948out:
1949 task_unlock(tsk);
1950 return ret;
1951}
1952
1953
1954
1955static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1956 unsigned nid)
1957{
1958 struct zonelist *zl;
1959 struct page *page;
1960
1961 zl = node_zonelist(nid, gfp);
1962 page = __alloc_pages(gfp, order, zl);
1963 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1964 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1965 return page;
1966}
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991struct page *
1992alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1993 unsigned long addr, int node, bool hugepage)
1994{
1995 struct mempolicy *pol;
1996 struct page *page;
1997 unsigned int cpuset_mems_cookie;
1998 struct zonelist *zl;
1999 nodemask_t *nmask;
2000
2001retry_cpuset:
2002 pol = get_vma_policy(vma, addr);
2003 cpuset_mems_cookie = read_mems_allowed_begin();
2004
2005 if (pol->mode == MPOL_INTERLEAVE) {
2006 unsigned nid;
2007
2008 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2009 mpol_cond_put(pol);
2010 page = alloc_page_interleave(gfp, order, nid);
2011 goto out;
2012 }
2013
2014 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2015 int hpage_node = node;
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027 if (pol->mode == MPOL_PREFERRED &&
2028 !(pol->flags & MPOL_F_LOCAL))
2029 hpage_node = pol->v.preferred_node;
2030
2031 nmask = policy_nodemask(gfp, pol);
2032 if (!nmask || node_isset(hpage_node, *nmask)) {
2033 mpol_cond_put(pol);
2034 page = __alloc_pages_node(hpage_node,
2035 gfp | __GFP_THISNODE, order);
2036 goto out;
2037 }
2038 }
2039
2040 nmask = policy_nodemask(gfp, pol);
2041 zl = policy_zonelist(gfp, pol, node);
2042 mpol_cond_put(pol);
2043 page = __alloc_pages_nodemask(gfp, order, zl, nmask);
2044out:
2045 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2046 goto retry_cpuset;
2047 return page;
2048}
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069struct page *alloc_pages_current(gfp_t gfp, unsigned order)
2070{
2071 struct mempolicy *pol = &default_policy;
2072 struct page *page;
2073 unsigned int cpuset_mems_cookie;
2074
2075 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2076 pol = get_task_policy(current);
2077
2078retry_cpuset:
2079 cpuset_mems_cookie = read_mems_allowed_begin();
2080
2081
2082
2083
2084
2085 if (pol->mode == MPOL_INTERLEAVE)
2086 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2087 else
2088 page = __alloc_pages_nodemask(gfp, order,
2089 policy_zonelist(gfp, pol, numa_node_id()),
2090 policy_nodemask(gfp, pol));
2091
2092 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2093 goto retry_cpuset;
2094
2095 return page;
2096}
2097EXPORT_SYMBOL(alloc_pages_current);
2098
2099int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2100{
2101 struct mempolicy *pol = mpol_dup(vma_policy(src));
2102
2103 if (IS_ERR(pol))
2104 return PTR_ERR(pol);
2105 dst->vm_policy = pol;
2106 return 0;
2107}
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121struct mempolicy *__mpol_dup(struct mempolicy *old)
2122{
2123 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2124
2125 if (!new)
2126 return ERR_PTR(-ENOMEM);
2127
2128
2129 if (old == current->mempolicy) {
2130 task_lock(current);
2131 *new = *old;
2132 task_unlock(current);
2133 } else
2134 *new = *old;
2135
2136 if (current_cpuset_is_being_rebound()) {
2137 nodemask_t mems = cpuset_mems_allowed(current);
2138 if (new->flags & MPOL_F_REBINDING)
2139 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2140 else
2141 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
2142 }
2143 atomic_set(&new->refcnt, 1);
2144 return new;
2145}
2146
2147
2148bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2149{
2150 if (!a || !b)
2151 return false;
2152 if (a->mode != b->mode)
2153 return false;
2154 if (a->flags != b->flags)
2155 return false;
2156 if (mpol_store_user_nodemask(a))
2157 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2158 return false;
2159
2160 switch (a->mode) {
2161 case MPOL_BIND:
2162
2163 case MPOL_INTERLEAVE:
2164 return !!nodes_equal(a->v.nodes, b->v.nodes);
2165 case MPOL_PREFERRED:
2166 return a->v.preferred_node == b->v.preferred_node;
2167 default:
2168 BUG();
2169 return false;
2170 }
2171}
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186static struct sp_node *
2187sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2188{
2189 struct rb_node *n = sp->root.rb_node;
2190
2191 while (n) {
2192 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2193
2194 if (start >= p->end)
2195 n = n->rb_right;
2196 else if (end <= p->start)
2197 n = n->rb_left;
2198 else
2199 break;
2200 }
2201 if (!n)
2202 return NULL;
2203 for (;;) {
2204 struct sp_node *w = NULL;
2205 struct rb_node *prev = rb_prev(n);
2206 if (!prev)
2207 break;
2208 w = rb_entry(prev, struct sp_node, nd);
2209 if (w->end <= start)
2210 break;
2211 n = prev;
2212 }
2213 return rb_entry(n, struct sp_node, nd);
2214}
2215
2216
2217
2218
2219
2220static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2221{
2222 struct rb_node **p = &sp->root.rb_node;
2223 struct rb_node *parent = NULL;
2224 struct sp_node *nd;
2225
2226 while (*p) {
2227 parent = *p;
2228 nd = rb_entry(parent, struct sp_node, nd);
2229 if (new->start < nd->start)
2230 p = &(*p)->rb_left;
2231 else if (new->end > nd->end)
2232 p = &(*p)->rb_right;
2233 else
2234 BUG();
2235 }
2236 rb_link_node(&new->nd, parent, p);
2237 rb_insert_color(&new->nd, &sp->root);
2238 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2239 new->policy ? new->policy->mode : 0);
2240}
2241
2242
2243struct mempolicy *
2244mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2245{
2246 struct mempolicy *pol = NULL;
2247 struct sp_node *sn;
2248
2249 if (!sp->root.rb_node)
2250 return NULL;
2251 read_lock(&sp->lock);
2252 sn = sp_lookup(sp, idx, idx+1);
2253 if (sn) {
2254 mpol_get(sn->policy);
2255 pol = sn->policy;
2256 }
2257 read_unlock(&sp->lock);
2258 return pol;
2259}
2260
2261static void sp_free(struct sp_node *n)
2262{
2263 mpol_put(n->policy);
2264 kmem_cache_free(sn_cache, n);
2265}
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2285{
2286 struct mempolicy *pol;
2287 struct zone *zone;
2288 int curnid = page_to_nid(page);
2289 unsigned long pgoff;
2290 int thiscpu = raw_smp_processor_id();
2291 int thisnid = cpu_to_node(thiscpu);
2292 int polnid = -1;
2293 int ret = -1;
2294
2295 BUG_ON(!vma);
2296
2297 pol = get_vma_policy(vma, addr);
2298 if (!(pol->flags & MPOL_F_MOF))
2299 goto out;
2300
2301 switch (pol->mode) {
2302 case MPOL_INTERLEAVE:
2303 BUG_ON(addr >= vma->vm_end);
2304 BUG_ON(addr < vma->vm_start);
2305
2306 pgoff = vma->vm_pgoff;
2307 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2308 polnid = offset_il_node(pol, vma, pgoff);
2309 break;
2310
2311 case MPOL_PREFERRED:
2312 if (pol->flags & MPOL_F_LOCAL)
2313 polnid = numa_node_id();
2314 else
2315 polnid = pol->v.preferred_node;
2316 break;
2317
2318 case MPOL_BIND:
2319
2320
2321
2322
2323
2324
2325 if (node_isset(curnid, pol->v.nodes))
2326 goto out;
2327 (void)first_zones_zonelist(
2328 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2329 gfp_zone(GFP_HIGHUSER),
2330 &pol->v.nodes, &zone);
2331 polnid = zone->node;
2332 break;
2333
2334 default:
2335 BUG();
2336 }
2337
2338
2339 if (pol->flags & MPOL_F_MORON) {
2340 polnid = thisnid;
2341
2342 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2343 goto out;
2344 }
2345
2346 if (curnid != polnid)
2347 ret = polnid;
2348out:
2349 mpol_cond_put(pol);
2350
2351 return ret;
2352}
2353
2354static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2355{
2356 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2357 rb_erase(&n->nd, &sp->root);
2358 sp_free(n);
2359}
2360
2361static void sp_node_init(struct sp_node *node, unsigned long start,
2362 unsigned long end, struct mempolicy *pol)
2363{
2364 node->start = start;
2365 node->end = end;
2366 node->policy = pol;
2367}
2368
2369static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2370 struct mempolicy *pol)
2371{
2372 struct sp_node *n;
2373 struct mempolicy *newpol;
2374
2375 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2376 if (!n)
2377 return NULL;
2378
2379 newpol = mpol_dup(pol);
2380 if (IS_ERR(newpol)) {
2381 kmem_cache_free(sn_cache, n);
2382 return NULL;
2383 }
2384 newpol->flags |= MPOL_F_SHARED;
2385 sp_node_init(n, start, end, newpol);
2386
2387 return n;
2388}
2389
2390
2391static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2392 unsigned long end, struct sp_node *new)
2393{
2394 struct sp_node *n;
2395 struct sp_node *n_new = NULL;
2396 struct mempolicy *mpol_new = NULL;
2397 int ret = 0;
2398
2399restart:
2400 write_lock(&sp->lock);
2401 n = sp_lookup(sp, start, end);
2402
2403 while (n && n->start < end) {
2404 struct rb_node *next = rb_next(&n->nd);
2405 if (n->start >= start) {
2406 if (n->end <= end)
2407 sp_delete(sp, n);
2408 else
2409 n->start = end;
2410 } else {
2411
2412 if (n->end > end) {
2413 if (!n_new)
2414 goto alloc_new;
2415
2416 *mpol_new = *n->policy;
2417 atomic_set(&mpol_new->refcnt, 1);
2418 sp_node_init(n_new, end, n->end, mpol_new);
2419 n->end = start;
2420 sp_insert(sp, n_new);
2421 n_new = NULL;
2422 mpol_new = NULL;
2423 break;
2424 } else
2425 n->end = start;
2426 }
2427 if (!next)
2428 break;
2429 n = rb_entry(next, struct sp_node, nd);
2430 }
2431 if (new)
2432 sp_insert(sp, new);
2433 write_unlock(&sp->lock);
2434 ret = 0;
2435
2436err_out:
2437 if (mpol_new)
2438 mpol_put(mpol_new);
2439 if (n_new)
2440 kmem_cache_free(sn_cache, n_new);
2441
2442 return ret;
2443
2444alloc_new:
2445 write_unlock(&sp->lock);
2446 ret = -ENOMEM;
2447 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2448 if (!n_new)
2449 goto err_out;
2450 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2451 if (!mpol_new)
2452 goto err_out;
2453 goto restart;
2454}
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2467{
2468 int ret;
2469
2470 sp->root = RB_ROOT;
2471 rwlock_init(&sp->lock);
2472
2473 if (mpol) {
2474 struct vm_area_struct pvma;
2475 struct mempolicy *new;
2476 NODEMASK_SCRATCH(scratch);
2477
2478 if (!scratch)
2479 goto put_mpol;
2480
2481 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2482 if (IS_ERR(new))
2483 goto free_scratch;
2484
2485 task_lock(current);
2486 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2487 task_unlock(current);
2488 if (ret)
2489 goto put_new;
2490
2491
2492 memset(&pvma, 0, sizeof(struct vm_area_struct));
2493 pvma.vm_end = TASK_SIZE;
2494 mpol_set_shared_policy(sp, &pvma, new);
2495
2496put_new:
2497 mpol_put(new);
2498free_scratch:
2499 NODEMASK_SCRATCH_FREE(scratch);
2500put_mpol:
2501 mpol_put(mpol);
2502 }
2503}
2504
2505int mpol_set_shared_policy(struct shared_policy *info,
2506 struct vm_area_struct *vma, struct mempolicy *npol)
2507{
2508 int err;
2509 struct sp_node *new = NULL;
2510 unsigned long sz = vma_pages(vma);
2511
2512 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2513 vma->vm_pgoff,
2514 sz, npol ? npol->mode : -1,
2515 npol ? npol->flags : -1,
2516 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
2517
2518 if (npol) {
2519 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2520 if (!new)
2521 return -ENOMEM;
2522 }
2523 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2524 if (err && new)
2525 sp_free(new);
2526 return err;
2527}
2528
2529
2530void mpol_free_shared_policy(struct shared_policy *p)
2531{
2532 struct sp_node *n;
2533 struct rb_node *next;
2534
2535 if (!p->root.rb_node)
2536 return;
2537 write_lock(&p->lock);
2538 next = rb_first(&p->root);
2539 while (next) {
2540 n = rb_entry(next, struct sp_node, nd);
2541 next = rb_next(&n->nd);
2542 sp_delete(p, n);
2543 }
2544 write_unlock(&p->lock);
2545}
2546
2547#ifdef CONFIG_NUMA_BALANCING
2548static int __initdata numabalancing_override;
2549
2550static void __init check_numabalancing_enable(void)
2551{
2552 bool numabalancing_default = false;
2553
2554 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2555 numabalancing_default = true;
2556
2557
2558 if (numabalancing_override)
2559 set_numabalancing_state(numabalancing_override == 1);
2560
2561 if (num_online_nodes() > 1 && !numabalancing_override) {
2562 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2563 numabalancing_default ? "Enabling" : "Disabling");
2564 set_numabalancing_state(numabalancing_default);
2565 }
2566}
2567
2568static int __init setup_numabalancing(char *str)
2569{
2570 int ret = 0;
2571 if (!str)
2572 goto out;
2573
2574 if (!strcmp(str, "enable")) {
2575 numabalancing_override = 1;
2576 ret = 1;
2577 } else if (!strcmp(str, "disable")) {
2578 numabalancing_override = -1;
2579 ret = 1;
2580 }
2581out:
2582 if (!ret)
2583 pr_warn("Unable to parse numa_balancing=\n");
2584
2585 return ret;
2586}
2587__setup("numa_balancing=", setup_numabalancing);
2588#else
2589static inline void __init check_numabalancing_enable(void)
2590{
2591}
2592#endif
2593
2594
2595void __init numa_policy_init(void)
2596{
2597 nodemask_t interleave_nodes;
2598 unsigned long largest = 0;
2599 int nid, prefer = 0;
2600
2601 policy_cache = kmem_cache_create("numa_policy",
2602 sizeof(struct mempolicy),
2603 0, SLAB_PANIC, NULL);
2604
2605 sn_cache = kmem_cache_create("shared_policy_node",
2606 sizeof(struct sp_node),
2607 0, SLAB_PANIC, NULL);
2608
2609 for_each_node(nid) {
2610 preferred_node_policy[nid] = (struct mempolicy) {
2611 .refcnt = ATOMIC_INIT(1),
2612 .mode = MPOL_PREFERRED,
2613 .flags = MPOL_F_MOF | MPOL_F_MORON,
2614 .v = { .preferred_node = nid, },
2615 };
2616 }
2617
2618
2619
2620
2621
2622
2623 nodes_clear(interleave_nodes);
2624 for_each_node_state(nid, N_MEMORY) {
2625 unsigned long total_pages = node_present_pages(nid);
2626
2627
2628 if (largest < total_pages) {
2629 largest = total_pages;
2630 prefer = nid;
2631 }
2632
2633
2634 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2635 node_set(nid, interleave_nodes);
2636 }
2637
2638
2639 if (unlikely(nodes_empty(interleave_nodes)))
2640 node_set(prefer, interleave_nodes);
2641
2642 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2643 pr_err("%s: interleaving failed\n", __func__);
2644
2645 check_numabalancing_enable();
2646}
2647
2648
2649void numa_default_policy(void)
2650{
2651 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2652}
2653
2654
2655
2656
2657
2658
2659
2660
2661static const char * const policy_modes[] =
2662{
2663 [MPOL_DEFAULT] = "default",
2664 [MPOL_PREFERRED] = "prefer",
2665 [MPOL_BIND] = "bind",
2666 [MPOL_INTERLEAVE] = "interleave",
2667 [MPOL_LOCAL] = "local",
2668};
2669
2670
2671#ifdef CONFIG_TMPFS
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682int mpol_parse_str(char *str, struct mempolicy **mpol)
2683{
2684 struct mempolicy *new = NULL;
2685 unsigned short mode;
2686 unsigned short mode_flags;
2687 nodemask_t nodes;
2688 char *nodelist = strchr(str, ':');
2689 char *flags = strchr(str, '=');
2690 int err = 1;
2691
2692 if (nodelist) {
2693
2694 *nodelist++ = '\0';
2695 if (nodelist_parse(nodelist, nodes))
2696 goto out;
2697 if (!nodes_subset(nodes, node_states[N_MEMORY]))
2698 goto out;
2699 } else
2700 nodes_clear(nodes);
2701
2702 if (flags)
2703 *flags++ = '\0';
2704
2705 for (mode = 0; mode < MPOL_MAX; mode++) {
2706 if (!strcmp(str, policy_modes[mode])) {
2707 break;
2708 }
2709 }
2710 if (mode >= MPOL_MAX)
2711 goto out;
2712
2713 switch (mode) {
2714 case MPOL_PREFERRED:
2715
2716
2717
2718 if (nodelist) {
2719 char *rest = nodelist;
2720 while (isdigit(*rest))
2721 rest++;
2722 if (*rest)
2723 goto out;
2724 }
2725 break;
2726 case MPOL_INTERLEAVE:
2727
2728
2729
2730 if (!nodelist)
2731 nodes = node_states[N_MEMORY];
2732 break;
2733 case MPOL_LOCAL:
2734
2735
2736
2737 if (nodelist)
2738 goto out;
2739 mode = MPOL_PREFERRED;
2740 break;
2741 case MPOL_DEFAULT:
2742
2743
2744
2745 if (!nodelist)
2746 err = 0;
2747 goto out;
2748 case MPOL_BIND:
2749
2750
2751
2752 if (!nodelist)
2753 goto out;
2754 }
2755
2756 mode_flags = 0;
2757 if (flags) {
2758
2759
2760
2761
2762 if (!strcmp(flags, "static"))
2763 mode_flags |= MPOL_F_STATIC_NODES;
2764 else if (!strcmp(flags, "relative"))
2765 mode_flags |= MPOL_F_RELATIVE_NODES;
2766 else
2767 goto out;
2768 }
2769
2770 new = mpol_new(mode, mode_flags, &nodes);
2771 if (IS_ERR(new))
2772 goto out;
2773
2774
2775
2776
2777
2778 if (mode != MPOL_PREFERRED)
2779 new->v.nodes = nodes;
2780 else if (nodelist)
2781 new->v.preferred_node = first_node(nodes);
2782 else
2783 new->flags |= MPOL_F_LOCAL;
2784
2785
2786
2787
2788
2789 new->w.user_nodemask = nodes;
2790
2791 err = 0;
2792
2793out:
2794
2795 if (nodelist)
2796 *--nodelist = ':';
2797 if (flags)
2798 *--flags = '=';
2799 if (!err)
2800 *mpol = new;
2801 return err;
2802}
2803#endif
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
2816{
2817 char *p = buffer;
2818 nodemask_t nodes = NODE_MASK_NONE;
2819 unsigned short mode = MPOL_DEFAULT;
2820 unsigned short flags = 0;
2821
2822 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2823 mode = pol->mode;
2824 flags = pol->flags;
2825 }
2826
2827 switch (mode) {
2828 case MPOL_DEFAULT:
2829 break;
2830 case MPOL_PREFERRED:
2831 if (flags & MPOL_F_LOCAL)
2832 mode = MPOL_LOCAL;
2833 else
2834 node_set(pol->v.preferred_node, nodes);
2835 break;
2836 case MPOL_BIND:
2837 case MPOL_INTERLEAVE:
2838 nodes = pol->v.nodes;
2839 break;
2840 default:
2841 WARN_ON_ONCE(1);
2842 snprintf(p, maxlen, "unknown");
2843 return;
2844 }
2845
2846 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
2847
2848 if (flags & MPOL_MODE_FLAGS) {
2849 p += snprintf(p, buffer + maxlen - p, "=");
2850
2851
2852
2853
2854 if (flags & MPOL_F_STATIC_NODES)
2855 p += snprintf(p, buffer + maxlen - p, "static");
2856 else if (flags & MPOL_F_RELATIVE_NODES)
2857 p += snprintf(p, buffer + maxlen - p, "relative");
2858 }
2859
2860 if (!nodes_empty(nodes))
2861 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
2862 nodemask_pr_args(&nodes));
2863}
2864