1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
70#include <linux/mempolicy.h>
71#include <linux/mm.h>
72#include <linux/highmem.h>
73#include <linux/hugetlb.h>
74#include <linux/kernel.h>
75#include <linux/sched.h>
76#include <linux/sched/mm.h>
77#include <linux/sched/numa_balancing.h>
78#include <linux/sched/task.h>
79#include <linux/nodemask.h>
80#include <linux/cpuset.h>
81#include <linux/slab.h>
82#include <linux/string.h>
83#include <linux/export.h>
84#include <linux/nsproxy.h>
85#include <linux/interrupt.h>
86#include <linux/init.h>
87#include <linux/compat.h>
88#include <linux/swap.h>
89#include <linux/seq_file.h>
90#include <linux/proc_fs.h>
91#include <linux/migrate.h>
92#include <linux/ksm.h>
93#include <linux/rmap.h>
94#include <linux/security.h>
95#include <linux/syscalls.h>
96#include <linux/ctype.h>
97#include <linux/mm_inline.h>
98#include <linux/mmu_notifier.h>
99#include <linux/printk.h>
100
101#include <asm/tlbflush.h>
102#include <linux/uaccess.h>
103
104#include "internal.h"
105
106
107#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)
108#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)
109
110static struct kmem_cache *policy_cache;
111static struct kmem_cache *sn_cache;
112
113
114
115enum zone_type policy_zone = 0;
116
117
118
119
120static struct mempolicy default_policy = {
121 .refcnt = ATOMIC_INIT(1),
122 .mode = MPOL_PREFERRED,
123 .flags = MPOL_F_LOCAL,
124};
125
126static struct mempolicy preferred_node_policy[MAX_NUMNODES];
127
128struct mempolicy *get_task_policy(struct task_struct *p)
129{
130 struct mempolicy *pol = p->mempolicy;
131 int node;
132
133 if (pol)
134 return pol;
135
136 node = numa_node_id();
137 if (node != NUMA_NO_NODE) {
138 pol = &preferred_node_policy[node];
139
140 if (pol->mode)
141 return pol;
142 }
143
144 return &default_policy;
145}
146
147static const struct mempolicy_operations {
148 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
164 enum mpol_rebind_step step);
165} mpol_ops[MPOL_MAX];
166
167static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
168{
169 return pol->flags & MPOL_MODE_FLAGS;
170}
171
172static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
173 const nodemask_t *rel)
174{
175 nodemask_t tmp;
176 nodes_fold(tmp, *orig, nodes_weight(*rel));
177 nodes_onto(*ret, tmp, *rel);
178}
179
180static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
181{
182 if (nodes_empty(*nodes))
183 return -EINVAL;
184 pol->v.nodes = *nodes;
185 return 0;
186}
187
188static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
189{
190 if (!nodes)
191 pol->flags |= MPOL_F_LOCAL;
192 else if (nodes_empty(*nodes))
193 return -EINVAL;
194 else
195 pol->v.preferred_node = first_node(*nodes);
196 return 0;
197}
198
199static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
200{
201 if (nodes_empty(*nodes))
202 return -EINVAL;
203 pol->v.nodes = *nodes;
204 return 0;
205}
206
207
208
209
210
211
212
213
214
215
216static int mpol_set_nodemask(struct mempolicy *pol,
217 const nodemask_t *nodes, struct nodemask_scratch *nsc)
218{
219 int ret;
220
221
222 if (pol == NULL)
223 return 0;
224
225 nodes_and(nsc->mask1,
226 cpuset_current_mems_allowed, node_states[N_MEMORY]);
227
228 VM_BUG_ON(!nodes);
229 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
230 nodes = NULL;
231 else {
232 if (pol->flags & MPOL_F_RELATIVE_NODES)
233 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
234 else
235 nodes_and(nsc->mask2, *nodes, nsc->mask1);
236
237 if (mpol_store_user_nodemask(pol))
238 pol->w.user_nodemask = *nodes;
239 else
240 pol->w.cpuset_mems_allowed =
241 cpuset_current_mems_allowed;
242 }
243
244 if (nodes)
245 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
246 else
247 ret = mpol_ops[pol->mode].create(pol, NULL);
248 return ret;
249}
250
251
252
253
254
255static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
256 nodemask_t *nodes)
257{
258 struct mempolicy *policy;
259
260 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
261 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
262
263 if (mode == MPOL_DEFAULT) {
264 if (nodes && !nodes_empty(*nodes))
265 return ERR_PTR(-EINVAL);
266 return NULL;
267 }
268 VM_BUG_ON(!nodes);
269
270
271
272
273
274
275 if (mode == MPOL_PREFERRED) {
276 if (nodes_empty(*nodes)) {
277 if (((flags & MPOL_F_STATIC_NODES) ||
278 (flags & MPOL_F_RELATIVE_NODES)))
279 return ERR_PTR(-EINVAL);
280 }
281 } else if (mode == MPOL_LOCAL) {
282 if (!nodes_empty(*nodes) ||
283 (flags & MPOL_F_STATIC_NODES) ||
284 (flags & MPOL_F_RELATIVE_NODES))
285 return ERR_PTR(-EINVAL);
286 mode = MPOL_PREFERRED;
287 } else if (nodes_empty(*nodes))
288 return ERR_PTR(-EINVAL);
289 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
290 if (!policy)
291 return ERR_PTR(-ENOMEM);
292 atomic_set(&policy->refcnt, 1);
293 policy->mode = mode;
294 policy->flags = flags;
295
296 return policy;
297}
298
299
300void __mpol_put(struct mempolicy *p)
301{
302 if (!atomic_dec_and_test(&p->refcnt))
303 return;
304 kmem_cache_free(policy_cache, p);
305}
306
307static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
308 enum mpol_rebind_step step)
309{
310}
311
312
313
314
315
316
317
318static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
319 enum mpol_rebind_step step)
320{
321 nodemask_t tmp;
322
323 if (pol->flags & MPOL_F_STATIC_NODES)
324 nodes_and(tmp, pol->w.user_nodemask, *nodes);
325 else if (pol->flags & MPOL_F_RELATIVE_NODES)
326 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
327 else {
328
329
330
331
332 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
333 nodes_remap(tmp, pol->v.nodes,
334 pol->w.cpuset_mems_allowed, *nodes);
335 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
336 } else if (step == MPOL_REBIND_STEP2) {
337 tmp = pol->w.cpuset_mems_allowed;
338 pol->w.cpuset_mems_allowed = *nodes;
339 } else
340 BUG();
341 }
342
343 if (nodes_empty(tmp))
344 tmp = *nodes;
345
346 if (step == MPOL_REBIND_STEP1)
347 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
348 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
349 pol->v.nodes = tmp;
350 else
351 BUG();
352
353 if (!node_isset(current->il_next, tmp)) {
354 current->il_next = next_node_in(current->il_next, tmp);
355 if (current->il_next >= MAX_NUMNODES)
356 current->il_next = numa_node_id();
357 }
358}
359
360static void mpol_rebind_preferred(struct mempolicy *pol,
361 const nodemask_t *nodes,
362 enum mpol_rebind_step step)
363{
364 nodemask_t tmp;
365
366 if (pol->flags & MPOL_F_STATIC_NODES) {
367 int node = first_node(pol->w.user_nodemask);
368
369 if (node_isset(node, *nodes)) {
370 pol->v.preferred_node = node;
371 pol->flags &= ~MPOL_F_LOCAL;
372 } else
373 pol->flags |= MPOL_F_LOCAL;
374 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
375 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
376 pol->v.preferred_node = first_node(tmp);
377 } else if (!(pol->flags & MPOL_F_LOCAL)) {
378 pol->v.preferred_node = node_remap(pol->v.preferred_node,
379 pol->w.cpuset_mems_allowed,
380 *nodes);
381 pol->w.cpuset_mems_allowed = *nodes;
382 }
383}
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
402 enum mpol_rebind_step step)
403{
404 if (!pol)
405 return;
406 if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
407 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
408 return;
409
410 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
411 return;
412
413 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
414 BUG();
415
416 if (step == MPOL_REBIND_STEP1)
417 pol->flags |= MPOL_F_REBINDING;
418 else if (step == MPOL_REBIND_STEP2)
419 pol->flags &= ~MPOL_F_REBINDING;
420 else if (step >= MPOL_REBIND_NSTEP)
421 BUG();
422
423 mpol_ops[pol->mode].rebind(pol, newmask, step);
424}
425
426
427
428
429
430
431
432
433void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
434 enum mpol_rebind_step step)
435{
436 mpol_rebind_policy(tsk->mempolicy, new, step);
437}
438
439
440
441
442
443
444
445void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
446{
447 struct vm_area_struct *vma;
448
449 down_write(&mm->mmap_sem);
450 for (vma = mm->mmap; vma; vma = vma->vm_next)
451 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
452 up_write(&mm->mmap_sem);
453}
454
455static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
456 [MPOL_DEFAULT] = {
457 .rebind = mpol_rebind_default,
458 },
459 [MPOL_INTERLEAVE] = {
460 .create = mpol_new_interleave,
461 .rebind = mpol_rebind_nodemask,
462 },
463 [MPOL_PREFERRED] = {
464 .create = mpol_new_preferred,
465 .rebind = mpol_rebind_preferred,
466 },
467 [MPOL_BIND] = {
468 .create = mpol_new_bind,
469 .rebind = mpol_rebind_nodemask,
470 },
471};
472
473static void migrate_page_add(struct page *page, struct list_head *pagelist,
474 unsigned long flags);
475
476struct queue_pages {
477 struct list_head *pagelist;
478 unsigned long flags;
479 nodemask_t *nmask;
480 struct vm_area_struct *prev;
481};
482
483
484
485
486
487static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
488 unsigned long end, struct mm_walk *walk)
489{
490 struct vm_area_struct *vma = walk->vma;
491 struct page *page;
492 struct queue_pages *qp = walk->private;
493 unsigned long flags = qp->flags;
494 int nid, ret;
495 pte_t *pte;
496 spinlock_t *ptl;
497
498 if (pmd_trans_huge(*pmd)) {
499 ptl = pmd_lock(walk->mm, pmd);
500 if (pmd_trans_huge(*pmd)) {
501 page = pmd_page(*pmd);
502 if (is_huge_zero_page(page)) {
503 spin_unlock(ptl);
504 __split_huge_pmd(vma, pmd, addr, false, NULL);
505 } else {
506 get_page(page);
507 spin_unlock(ptl);
508 lock_page(page);
509 ret = split_huge_page(page);
510 unlock_page(page);
511 put_page(page);
512 if (ret)
513 return 0;
514 }
515 } else {
516 spin_unlock(ptl);
517 }
518 }
519
520 if (pmd_trans_unstable(pmd))
521 return 0;
522retry:
523 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
524 for (; addr != end; pte++, addr += PAGE_SIZE) {
525 if (!pte_present(*pte))
526 continue;
527 page = vm_normal_page(vma, addr, *pte);
528 if (!page)
529 continue;
530
531
532
533
534 if (PageReserved(page))
535 continue;
536 nid = page_to_nid(page);
537 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
538 continue;
539 if (PageTransCompound(page)) {
540 get_page(page);
541 pte_unmap_unlock(pte, ptl);
542 lock_page(page);
543 ret = split_huge_page(page);
544 unlock_page(page);
545 put_page(page);
546
547 if (ret) {
548 pte = pte_offset_map_lock(walk->mm, pmd,
549 addr, &ptl);
550 continue;
551 }
552 goto retry;
553 }
554
555 migrate_page_add(page, qp->pagelist, flags);
556 }
557 pte_unmap_unlock(pte - 1, ptl);
558 cond_resched();
559 return 0;
560}
561
562static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
563 unsigned long addr, unsigned long end,
564 struct mm_walk *walk)
565{
566#ifdef CONFIG_HUGETLB_PAGE
567 struct queue_pages *qp = walk->private;
568 unsigned long flags = qp->flags;
569 int nid;
570 struct page *page;
571 spinlock_t *ptl;
572 pte_t entry;
573
574 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
575 entry = huge_ptep_get(pte);
576 if (!pte_present(entry))
577 goto unlock;
578 page = pte_page(entry);
579 nid = page_to_nid(page);
580 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
581 goto unlock;
582
583 if (flags & (MPOL_MF_MOVE_ALL) ||
584 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
585 isolate_huge_page(page, qp->pagelist);
586unlock:
587 spin_unlock(ptl);
588#else
589 BUG();
590#endif
591 return 0;
592}
593
594#ifdef CONFIG_NUMA_BALANCING
595
596
597
598
599
600
601
602
603
604unsigned long change_prot_numa(struct vm_area_struct *vma,
605 unsigned long addr, unsigned long end)
606{
607 int nr_updated;
608
609 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
610 if (nr_updated)
611 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
612
613 return nr_updated;
614}
615#else
616static unsigned long change_prot_numa(struct vm_area_struct *vma,
617 unsigned long addr, unsigned long end)
618{
619 return 0;
620}
621#endif
622
623static int queue_pages_test_walk(unsigned long start, unsigned long end,
624 struct mm_walk *walk)
625{
626 struct vm_area_struct *vma = walk->vma;
627 struct queue_pages *qp = walk->private;
628 unsigned long endvma = vma->vm_end;
629 unsigned long flags = qp->flags;
630
631 if (!vma_migratable(vma))
632 return 1;
633
634 if (endvma > end)
635 endvma = end;
636 if (vma->vm_start > start)
637 start = vma->vm_start;
638
639 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
640 if (!vma->vm_next && vma->vm_end < end)
641 return -EFAULT;
642 if (qp->prev && qp->prev->vm_end < vma->vm_start)
643 return -EFAULT;
644 }
645
646 qp->prev = vma;
647
648 if (flags & MPOL_MF_LAZY) {
649
650 if (!is_vm_hugetlb_page(vma) &&
651 (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
652 !(vma->vm_flags & VM_MIXEDMAP))
653 change_prot_numa(vma, start, endvma);
654 return 1;
655 }
656
657
658 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
659 return 0;
660 return 1;
661}
662
663
664
665
666
667
668
669
670static int
671queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
672 nodemask_t *nodes, unsigned long flags,
673 struct list_head *pagelist)
674{
675 struct queue_pages qp = {
676 .pagelist = pagelist,
677 .flags = flags,
678 .nmask = nodes,
679 .prev = NULL,
680 };
681 struct mm_walk queue_pages_walk = {
682 .hugetlb_entry = queue_pages_hugetlb,
683 .pmd_entry = queue_pages_pte_range,
684 .test_walk = queue_pages_test_walk,
685 .mm = mm,
686 .private = &qp,
687 };
688
689 return walk_page_range(start, end, &queue_pages_walk);
690}
691
692
693
694
695
696static int vma_replace_policy(struct vm_area_struct *vma,
697 struct mempolicy *pol)
698{
699 int err;
700 struct mempolicy *old;
701 struct mempolicy *new;
702
703 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
704 vma->vm_start, vma->vm_end, vma->vm_pgoff,
705 vma->vm_ops, vma->vm_file,
706 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
707
708 new = mpol_dup(pol);
709 if (IS_ERR(new))
710 return PTR_ERR(new);
711
712 if (vma->vm_ops && vma->vm_ops->set_policy) {
713 err = vma->vm_ops->set_policy(vma, new);
714 if (err)
715 goto err_out;
716 }
717
718 old = vma->vm_policy;
719 vma->vm_policy = new;
720 mpol_put(old);
721
722 return 0;
723 err_out:
724 mpol_put(new);
725 return err;
726}
727
728
729static int mbind_range(struct mm_struct *mm, unsigned long start,
730 unsigned long end, struct mempolicy *new_pol)
731{
732 struct vm_area_struct *next;
733 struct vm_area_struct *prev;
734 struct vm_area_struct *vma;
735 int err = 0;
736 pgoff_t pgoff;
737 unsigned long vmstart;
738 unsigned long vmend;
739
740 vma = find_vma(mm, start);
741 if (!vma || vma->vm_start > start)
742 return -EFAULT;
743
744 prev = vma->vm_prev;
745 if (start > vma->vm_start)
746 prev = vma;
747
748 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
749 next = vma->vm_next;
750 vmstart = max(start, vma->vm_start);
751 vmend = min(end, vma->vm_end);
752
753 if (mpol_equal(vma_policy(vma), new_pol))
754 continue;
755
756 pgoff = vma->vm_pgoff +
757 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
758 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
759 vma->anon_vma, vma->vm_file, pgoff,
760 new_pol, vma->vm_userfaultfd_ctx);
761 if (prev) {
762 vma = prev;
763 next = vma->vm_next;
764 if (mpol_equal(vma_policy(vma), new_pol))
765 continue;
766
767 goto replace;
768 }
769 if (vma->vm_start != vmstart) {
770 err = split_vma(vma->vm_mm, vma, vmstart, 1);
771 if (err)
772 goto out;
773 }
774 if (vma->vm_end != vmend) {
775 err = split_vma(vma->vm_mm, vma, vmend, 0);
776 if (err)
777 goto out;
778 }
779 replace:
780 err = vma_replace_policy(vma, new_pol);
781 if (err)
782 goto out;
783 }
784
785 out:
786 return err;
787}
788
789
790static long do_set_mempolicy(unsigned short mode, unsigned short flags,
791 nodemask_t *nodes)
792{
793 struct mempolicy *new, *old;
794 NODEMASK_SCRATCH(scratch);
795 int ret;
796
797 if (!scratch)
798 return -ENOMEM;
799
800 new = mpol_new(mode, flags, nodes);
801 if (IS_ERR(new)) {
802 ret = PTR_ERR(new);
803 goto out;
804 }
805
806 task_lock(current);
807 ret = mpol_set_nodemask(new, nodes, scratch);
808 if (ret) {
809 task_unlock(current);
810 mpol_put(new);
811 goto out;
812 }
813 old = current->mempolicy;
814 current->mempolicy = new;
815 if (new && new->mode == MPOL_INTERLEAVE &&
816 nodes_weight(new->v.nodes))
817 current->il_next = first_node(new->v.nodes);
818 task_unlock(current);
819 mpol_put(old);
820 ret = 0;
821out:
822 NODEMASK_SCRATCH_FREE(scratch);
823 return ret;
824}
825
826
827
828
829
830
831static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
832{
833 nodes_clear(*nodes);
834 if (p == &default_policy)
835 return;
836
837 switch (p->mode) {
838 case MPOL_BIND:
839
840 case MPOL_INTERLEAVE:
841 *nodes = p->v.nodes;
842 break;
843 case MPOL_PREFERRED:
844 if (!(p->flags & MPOL_F_LOCAL))
845 node_set(p->v.preferred_node, *nodes);
846
847 break;
848 default:
849 BUG();
850 }
851}
852
853static int lookup_node(unsigned long addr)
854{
855 struct page *p;
856 int err;
857
858 err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
859 if (err >= 0) {
860 err = page_to_nid(p);
861 put_page(p);
862 }
863 return err;
864}
865
866
867static long do_get_mempolicy(int *policy, nodemask_t *nmask,
868 unsigned long addr, unsigned long flags)
869{
870 int err;
871 struct mm_struct *mm = current->mm;
872 struct vm_area_struct *vma = NULL;
873 struct mempolicy *pol = current->mempolicy;
874
875 if (flags &
876 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
877 return -EINVAL;
878
879 if (flags & MPOL_F_MEMS_ALLOWED) {
880 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
881 return -EINVAL;
882 *policy = 0;
883 task_lock(current);
884 *nmask = cpuset_current_mems_allowed;
885 task_unlock(current);
886 return 0;
887 }
888
889 if (flags & MPOL_F_ADDR) {
890
891
892
893
894
895 down_read(&mm->mmap_sem);
896 vma = find_vma_intersection(mm, addr, addr+1);
897 if (!vma) {
898 up_read(&mm->mmap_sem);
899 return -EFAULT;
900 }
901 if (vma->vm_ops && vma->vm_ops->get_policy)
902 pol = vma->vm_ops->get_policy(vma, addr);
903 else
904 pol = vma->vm_policy;
905 } else if (addr)
906 return -EINVAL;
907
908 if (!pol)
909 pol = &default_policy;
910
911 if (flags & MPOL_F_NODE) {
912 if (flags & MPOL_F_ADDR) {
913 err = lookup_node(addr);
914 if (err < 0)
915 goto out;
916 *policy = err;
917 } else if (pol == current->mempolicy &&
918 pol->mode == MPOL_INTERLEAVE) {
919 *policy = current->il_next;
920 } else {
921 err = -EINVAL;
922 goto out;
923 }
924 } else {
925 *policy = pol == &default_policy ? MPOL_DEFAULT :
926 pol->mode;
927
928
929
930
931 *policy |= (pol->flags & MPOL_MODE_FLAGS);
932 }
933
934 if (vma) {
935 up_read(¤t->mm->mmap_sem);
936 vma = NULL;
937 }
938
939 err = 0;
940 if (nmask) {
941 if (mpol_store_user_nodemask(pol)) {
942 *nmask = pol->w.user_nodemask;
943 } else {
944 task_lock(current);
945 get_policy_nodemask(pol, nmask);
946 task_unlock(current);
947 }
948 }
949
950 out:
951 mpol_cond_put(pol);
952 if (vma)
953 up_read(¤t->mm->mmap_sem);
954 return err;
955}
956
957#ifdef CONFIG_MIGRATION
958
959
960
961static void migrate_page_add(struct page *page, struct list_head *pagelist,
962 unsigned long flags)
963{
964
965
966
967 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
968 if (!isolate_lru_page(page)) {
969 list_add_tail(&page->lru, pagelist);
970 inc_node_page_state(page, NR_ISOLATED_ANON +
971 page_is_file_cache(page));
972 }
973 }
974}
975
976static struct page *new_node_page(struct page *page, unsigned long node, int **x)
977{
978 if (PageHuge(page))
979 return alloc_huge_page_node(page_hstate(compound_head(page)),
980 node);
981 else
982 return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
983 __GFP_THISNODE, 0);
984}
985
986
987
988
989
990static int migrate_to_node(struct mm_struct *mm, int source, int dest,
991 int flags)
992{
993 nodemask_t nmask;
994 LIST_HEAD(pagelist);
995 int err = 0;
996
997 nodes_clear(nmask);
998 node_set(source, nmask);
999
1000
1001
1002
1003
1004
1005 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1006 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
1007 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1008
1009 if (!list_empty(&pagelist)) {
1010 err = migrate_pages(&pagelist, new_node_page, NULL, dest,
1011 MIGRATE_SYNC, MR_SYSCALL);
1012 if (err)
1013 putback_movable_pages(&pagelist);
1014 }
1015
1016 return err;
1017}
1018
1019
1020
1021
1022
1023
1024
1025int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1026 const nodemask_t *to, int flags)
1027{
1028 int busy = 0;
1029 int err;
1030 nodemask_t tmp;
1031
1032 err = migrate_prep();
1033 if (err)
1034 return err;
1035
1036 down_read(&mm->mmap_sem);
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069 tmp = *from;
1070 while (!nodes_empty(tmp)) {
1071 int s,d;
1072 int source = NUMA_NO_NODE;
1073 int dest = 0;
1074
1075 for_each_node_mask(s, tmp) {
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1093 (node_isset(s, *to)))
1094 continue;
1095
1096 d = node_remap(s, *from, *to);
1097 if (s == d)
1098 continue;
1099
1100 source = s;
1101 dest = d;
1102
1103
1104 if (!node_isset(dest, tmp))
1105 break;
1106 }
1107 if (source == NUMA_NO_NODE)
1108 break;
1109
1110 node_clear(source, tmp);
1111 err = migrate_to_node(mm, source, dest, flags);
1112 if (err > 0)
1113 busy += err;
1114 if (err < 0)
1115 break;
1116 }
1117 up_read(&mm->mmap_sem);
1118 if (err < 0)
1119 return err;
1120 return busy;
1121
1122}
1123
1124
1125
1126
1127
1128
1129
1130
1131static struct page *new_page(struct page *page, unsigned long start, int **x)
1132{
1133 struct vm_area_struct *vma;
1134 unsigned long uninitialized_var(address);
1135
1136 vma = find_vma(current->mm, start);
1137 while (vma) {
1138 address = page_address_in_vma(page, vma);
1139 if (address != -EFAULT)
1140 break;
1141 vma = vma->vm_next;
1142 }
1143
1144 if (PageHuge(page)) {
1145 BUG_ON(!vma);
1146 return alloc_huge_page_noerr(vma, address, 1);
1147 }
1148
1149
1150
1151 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1152}
1153#else
1154
1155static void migrate_page_add(struct page *page, struct list_head *pagelist,
1156 unsigned long flags)
1157{
1158}
1159
1160int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1161 const nodemask_t *to, int flags)
1162{
1163 return -ENOSYS;
1164}
1165
1166static struct page *new_page(struct page *page, unsigned long start, int **x)
1167{
1168 return NULL;
1169}
1170#endif
1171
1172static long do_mbind(unsigned long start, unsigned long len,
1173 unsigned short mode, unsigned short mode_flags,
1174 nodemask_t *nmask, unsigned long flags)
1175{
1176 struct mm_struct *mm = current->mm;
1177 struct mempolicy *new;
1178 unsigned long end;
1179 int err;
1180 LIST_HEAD(pagelist);
1181
1182 if (flags & ~(unsigned long)MPOL_MF_VALID)
1183 return -EINVAL;
1184 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1185 return -EPERM;
1186
1187 if (start & ~PAGE_MASK)
1188 return -EINVAL;
1189
1190 if (mode == MPOL_DEFAULT)
1191 flags &= ~MPOL_MF_STRICT;
1192
1193 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1194 end = start + len;
1195
1196 if (end < start)
1197 return -EINVAL;
1198 if (end == start)
1199 return 0;
1200
1201 new = mpol_new(mode, mode_flags, nmask);
1202 if (IS_ERR(new))
1203 return PTR_ERR(new);
1204
1205 if (flags & MPOL_MF_LAZY)
1206 new->flags |= MPOL_F_MOF;
1207
1208
1209
1210
1211
1212 if (!new)
1213 flags |= MPOL_MF_DISCONTIG_OK;
1214
1215 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1216 start, start + len, mode, mode_flags,
1217 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1218
1219 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1220
1221 err = migrate_prep();
1222 if (err)
1223 goto mpol_out;
1224 }
1225 {
1226 NODEMASK_SCRATCH(scratch);
1227 if (scratch) {
1228 down_write(&mm->mmap_sem);
1229 task_lock(current);
1230 err = mpol_set_nodemask(new, nmask, scratch);
1231 task_unlock(current);
1232 if (err)
1233 up_write(&mm->mmap_sem);
1234 } else
1235 err = -ENOMEM;
1236 NODEMASK_SCRATCH_FREE(scratch);
1237 }
1238 if (err)
1239 goto mpol_out;
1240
1241 err = queue_pages_range(mm, start, end, nmask,
1242 flags | MPOL_MF_INVERT, &pagelist);
1243 if (!err)
1244 err = mbind_range(mm, start, end, new);
1245
1246 if (!err) {
1247 int nr_failed = 0;
1248
1249 if (!list_empty(&pagelist)) {
1250 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1251 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1252 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1253 if (nr_failed)
1254 putback_movable_pages(&pagelist);
1255 }
1256
1257 if (nr_failed && (flags & MPOL_MF_STRICT))
1258 err = -EIO;
1259 } else
1260 putback_movable_pages(&pagelist);
1261
1262 up_write(&mm->mmap_sem);
1263 mpol_out:
1264 mpol_put(new);
1265 return err;
1266}
1267
1268
1269
1270
1271
1272
1273static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1274 unsigned long maxnode)
1275{
1276 unsigned long k;
1277 unsigned long nlongs;
1278 unsigned long endmask;
1279
1280 --maxnode;
1281 nodes_clear(*nodes);
1282 if (maxnode == 0 || !nmask)
1283 return 0;
1284 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1285 return -EINVAL;
1286
1287 nlongs = BITS_TO_LONGS(maxnode);
1288 if ((maxnode % BITS_PER_LONG) == 0)
1289 endmask = ~0UL;
1290 else
1291 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1292
1293
1294
1295 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1296 if (nlongs > PAGE_SIZE/sizeof(long))
1297 return -EINVAL;
1298 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1299 unsigned long t;
1300 if (get_user(t, nmask + k))
1301 return -EFAULT;
1302 if (k == nlongs - 1) {
1303 if (t & endmask)
1304 return -EINVAL;
1305 } else if (t)
1306 return -EINVAL;
1307 }
1308 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1309 endmask = ~0UL;
1310 }
1311
1312 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1313 return -EFAULT;
1314 nodes_addr(*nodes)[nlongs-1] &= endmask;
1315 return 0;
1316}
1317
1318
1319static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1320 nodemask_t *nodes)
1321{
1322 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1323 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1324
1325 if (copy > nbytes) {
1326 if (copy > PAGE_SIZE)
1327 return -EINVAL;
1328 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1329 return -EFAULT;
1330 copy = nbytes;
1331 }
1332 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1333}
1334
1335SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1336 unsigned long, mode, const unsigned long __user *, nmask,
1337 unsigned long, maxnode, unsigned, flags)
1338{
1339 nodemask_t nodes;
1340 int err;
1341 unsigned short mode_flags;
1342
1343 mode_flags = mode & MPOL_MODE_FLAGS;
1344 mode &= ~MPOL_MODE_FLAGS;
1345 if (mode >= MPOL_MAX)
1346 return -EINVAL;
1347 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1348 (mode_flags & MPOL_F_RELATIVE_NODES))
1349 return -EINVAL;
1350 err = get_nodes(&nodes, nmask, maxnode);
1351 if (err)
1352 return err;
1353 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1354}
1355
1356
1357SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1358 unsigned long, maxnode)
1359{
1360 int err;
1361 nodemask_t nodes;
1362 unsigned short flags;
1363
1364 flags = mode & MPOL_MODE_FLAGS;
1365 mode &= ~MPOL_MODE_FLAGS;
1366 if ((unsigned int)mode >= MPOL_MAX)
1367 return -EINVAL;
1368 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1369 return -EINVAL;
1370 err = get_nodes(&nodes, nmask, maxnode);
1371 if (err)
1372 return err;
1373 return do_set_mempolicy(mode, flags, &nodes);
1374}
1375
1376SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1377 const unsigned long __user *, old_nodes,
1378 const unsigned long __user *, new_nodes)
1379{
1380 const struct cred *cred = current_cred(), *tcred;
1381 struct mm_struct *mm = NULL;
1382 struct task_struct *task;
1383 nodemask_t task_nodes;
1384 int err;
1385 nodemask_t *old;
1386 nodemask_t *new;
1387 NODEMASK_SCRATCH(scratch);
1388
1389 if (!scratch)
1390 return -ENOMEM;
1391
1392 old = &scratch->mask1;
1393 new = &scratch->mask2;
1394
1395 err = get_nodes(old, old_nodes, maxnode);
1396 if (err)
1397 goto out;
1398
1399 err = get_nodes(new, new_nodes, maxnode);
1400 if (err)
1401 goto out;
1402
1403
1404 rcu_read_lock();
1405 task = pid ? find_task_by_vpid(pid) : current;
1406 if (!task) {
1407 rcu_read_unlock();
1408 err = -ESRCH;
1409 goto out;
1410 }
1411 get_task_struct(task);
1412
1413 err = -EINVAL;
1414
1415
1416
1417
1418
1419
1420
1421 tcred = __task_cred(task);
1422 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1423 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
1424 !capable(CAP_SYS_NICE)) {
1425 rcu_read_unlock();
1426 err = -EPERM;
1427 goto out_put;
1428 }
1429 rcu_read_unlock();
1430
1431 task_nodes = cpuset_mems_allowed(task);
1432
1433 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1434 err = -EPERM;
1435 goto out_put;
1436 }
1437
1438 if (!nodes_subset(*new, node_states[N_MEMORY])) {
1439 err = -EINVAL;
1440 goto out_put;
1441 }
1442
1443 err = security_task_movememory(task);
1444 if (err)
1445 goto out_put;
1446
1447 mm = get_task_mm(task);
1448 put_task_struct(task);
1449
1450 if (!mm) {
1451 err = -EINVAL;
1452 goto out;
1453 }
1454
1455 err = do_migrate_pages(mm, old, new,
1456 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1457
1458 mmput(mm);
1459out:
1460 NODEMASK_SCRATCH_FREE(scratch);
1461
1462 return err;
1463
1464out_put:
1465 put_task_struct(task);
1466 goto out;
1467
1468}
1469
1470
1471
1472SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1473 unsigned long __user *, nmask, unsigned long, maxnode,
1474 unsigned long, addr, unsigned long, flags)
1475{
1476 int err;
1477 int uninitialized_var(pval);
1478 nodemask_t nodes;
1479
1480 if (nmask != NULL && maxnode < MAX_NUMNODES)
1481 return -EINVAL;
1482
1483 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1484
1485 if (err)
1486 return err;
1487
1488 if (policy && put_user(pval, policy))
1489 return -EFAULT;
1490
1491 if (nmask)
1492 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1493
1494 return err;
1495}
1496
1497#ifdef CONFIG_COMPAT
1498
1499COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1500 compat_ulong_t __user *, nmask,
1501 compat_ulong_t, maxnode,
1502 compat_ulong_t, addr, compat_ulong_t, flags)
1503{
1504 long err;
1505 unsigned long __user *nm = NULL;
1506 unsigned long nr_bits, alloc_size;
1507 DECLARE_BITMAP(bm, MAX_NUMNODES);
1508
1509 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1510 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1511
1512 if (nmask)
1513 nm = compat_alloc_user_space(alloc_size);
1514
1515 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1516
1517 if (!err && nmask) {
1518 unsigned long copy_size;
1519 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1520 err = copy_from_user(bm, nm, copy_size);
1521
1522 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1523 err |= compat_put_bitmap(nmask, bm, nr_bits);
1524 }
1525
1526 return err;
1527}
1528
1529COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1530 compat_ulong_t, maxnode)
1531{
1532 unsigned long __user *nm = NULL;
1533 unsigned long nr_bits, alloc_size;
1534 DECLARE_BITMAP(bm, MAX_NUMNODES);
1535
1536 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1537 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1538
1539 if (nmask) {
1540 if (compat_get_bitmap(bm, nmask, nr_bits))
1541 return -EFAULT;
1542 nm = compat_alloc_user_space(alloc_size);
1543 if (copy_to_user(nm, bm, alloc_size))
1544 return -EFAULT;
1545 }
1546
1547 return sys_set_mempolicy(mode, nm, nr_bits+1);
1548}
1549
1550COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1551 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1552 compat_ulong_t, maxnode, compat_ulong_t, flags)
1553{
1554 unsigned long __user *nm = NULL;
1555 unsigned long nr_bits, alloc_size;
1556 nodemask_t bm;
1557
1558 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1559 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1560
1561 if (nmask) {
1562 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1563 return -EFAULT;
1564 nm = compat_alloc_user_space(alloc_size);
1565 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1566 return -EFAULT;
1567 }
1568
1569 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1570}
1571
1572#endif
1573
1574struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1575 unsigned long addr)
1576{
1577 struct mempolicy *pol = NULL;
1578
1579 if (vma) {
1580 if (vma->vm_ops && vma->vm_ops->get_policy) {
1581 pol = vma->vm_ops->get_policy(vma, addr);
1582 } else if (vma->vm_policy) {
1583 pol = vma->vm_policy;
1584
1585
1586
1587
1588
1589
1590
1591 if (mpol_needs_cond_ref(pol))
1592 mpol_get(pol);
1593 }
1594 }
1595
1596 return pol;
1597}
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1612 unsigned long addr)
1613{
1614 struct mempolicy *pol = __get_vma_policy(vma, addr);
1615
1616 if (!pol)
1617 pol = get_task_policy(current);
1618
1619 return pol;
1620}
1621
1622bool vma_policy_mof(struct vm_area_struct *vma)
1623{
1624 struct mempolicy *pol;
1625
1626 if (vma->vm_ops && vma->vm_ops->get_policy) {
1627 bool ret = false;
1628
1629 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1630 if (pol && (pol->flags & MPOL_F_MOF))
1631 ret = true;
1632 mpol_cond_put(pol);
1633
1634 return ret;
1635 }
1636
1637 pol = vma->vm_policy;
1638 if (!pol)
1639 pol = get_task_policy(current);
1640
1641 return pol->flags & MPOL_F_MOF;
1642}
1643
1644static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1645{
1646 enum zone_type dynamic_policy_zone = policy_zone;
1647
1648 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1659 dynamic_policy_zone = ZONE_MOVABLE;
1660
1661 return zone >= dynamic_policy_zone;
1662}
1663
1664
1665
1666
1667
1668static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1669{
1670
1671 if (unlikely(policy->mode == MPOL_BIND) &&
1672 apply_policy_zone(policy, gfp_zone(gfp)) &&
1673 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1674 return &policy->v.nodes;
1675
1676 return NULL;
1677}
1678
1679
1680static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1681 int nd)
1682{
1683 if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
1684 nd = policy->v.preferred_node;
1685 else {
1686
1687
1688
1689
1690
1691 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1692 }
1693
1694 return node_zonelist(nd, gfp);
1695}
1696
1697
1698static unsigned interleave_nodes(struct mempolicy *policy)
1699{
1700 unsigned nid, next;
1701 struct task_struct *me = current;
1702
1703 nid = me->il_next;
1704 next = next_node_in(nid, policy->v.nodes);
1705 if (next < MAX_NUMNODES)
1706 me->il_next = next;
1707 return nid;
1708}
1709
1710
1711
1712
1713
1714unsigned int mempolicy_slab_node(void)
1715{
1716 struct mempolicy *policy;
1717 int node = numa_mem_id();
1718
1719 if (in_interrupt())
1720 return node;
1721
1722 policy = current->mempolicy;
1723 if (!policy || policy->flags & MPOL_F_LOCAL)
1724 return node;
1725
1726 switch (policy->mode) {
1727 case MPOL_PREFERRED:
1728
1729
1730
1731 return policy->v.preferred_node;
1732
1733 case MPOL_INTERLEAVE:
1734 return interleave_nodes(policy);
1735
1736 case MPOL_BIND: {
1737 struct zoneref *z;
1738
1739
1740
1741
1742
1743 struct zonelist *zonelist;
1744 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1745 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1746 z = first_zones_zonelist(zonelist, highest_zoneidx,
1747 &policy->v.nodes);
1748 return z->zone ? z->zone->node : node;
1749 }
1750
1751 default:
1752 BUG();
1753 }
1754}
1755
1756
1757
1758
1759
1760
1761static unsigned offset_il_node(struct mempolicy *pol,
1762 struct vm_area_struct *vma, unsigned long n)
1763{
1764 unsigned nnodes = nodes_weight(pol->v.nodes);
1765 unsigned target;
1766 int i;
1767 int nid;
1768
1769 if (!nnodes)
1770 return numa_node_id();
1771 target = (unsigned int)n % nnodes;
1772 nid = first_node(pol->v.nodes);
1773 for (i = 0; i < target; i++)
1774 nid = next_node(nid, pol->v.nodes);
1775 return nid;
1776}
1777
1778
1779static inline unsigned interleave_nid(struct mempolicy *pol,
1780 struct vm_area_struct *vma, unsigned long addr, int shift)
1781{
1782 if (vma) {
1783 unsigned long off;
1784
1785
1786
1787
1788
1789
1790
1791
1792 BUG_ON(shift < PAGE_SHIFT);
1793 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1794 off += (addr - vma->vm_start) >> shift;
1795 return offset_il_node(pol, vma, off);
1796 } else
1797 return interleave_nodes(pol);
1798}
1799
1800#ifdef CONFIG_HUGETLBFS
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1817 gfp_t gfp_flags, struct mempolicy **mpol,
1818 nodemask_t **nodemask)
1819{
1820 struct zonelist *zl;
1821
1822 *mpol = get_vma_policy(vma, addr);
1823 *nodemask = NULL;
1824
1825 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1826 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1827 huge_page_shift(hstate_vma(vma))), gfp_flags);
1828 } else {
1829 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
1830 if ((*mpol)->mode == MPOL_BIND)
1831 *nodemask = &(*mpol)->v.nodes;
1832 }
1833 return zl;
1834}
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852bool init_nodemask_of_mempolicy(nodemask_t *mask)
1853{
1854 struct mempolicy *mempolicy;
1855 int nid;
1856
1857 if (!(mask && current->mempolicy))
1858 return false;
1859
1860 task_lock(current);
1861 mempolicy = current->mempolicy;
1862 switch (mempolicy->mode) {
1863 case MPOL_PREFERRED:
1864 if (mempolicy->flags & MPOL_F_LOCAL)
1865 nid = numa_node_id();
1866 else
1867 nid = mempolicy->v.preferred_node;
1868 init_nodemask_of_node(mask, nid);
1869 break;
1870
1871 case MPOL_BIND:
1872
1873 case MPOL_INTERLEAVE:
1874 *mask = mempolicy->v.nodes;
1875 break;
1876
1877 default:
1878 BUG();
1879 }
1880 task_unlock(current);
1881
1882 return true;
1883}
1884#endif
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1897 const nodemask_t *mask)
1898{
1899 struct mempolicy *mempolicy;
1900 bool ret = true;
1901
1902 if (!mask)
1903 return ret;
1904 task_lock(tsk);
1905 mempolicy = tsk->mempolicy;
1906 if (!mempolicy)
1907 goto out;
1908
1909 switch (mempolicy->mode) {
1910 case MPOL_PREFERRED:
1911
1912
1913
1914
1915
1916
1917 break;
1918 case MPOL_BIND:
1919 case MPOL_INTERLEAVE:
1920 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1921 break;
1922 default:
1923 BUG();
1924 }
1925out:
1926 task_unlock(tsk);
1927 return ret;
1928}
1929
1930
1931
1932static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1933 unsigned nid)
1934{
1935 struct zonelist *zl;
1936 struct page *page;
1937
1938 zl = node_zonelist(nid, gfp);
1939 page = __alloc_pages(gfp, order, zl);
1940 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1941 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1942 return page;
1943}
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968struct page *
1969alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1970 unsigned long addr, int node, bool hugepage)
1971{
1972 struct mempolicy *pol;
1973 struct page *page;
1974 unsigned int cpuset_mems_cookie;
1975 struct zonelist *zl;
1976 nodemask_t *nmask;
1977
1978retry_cpuset:
1979 pol = get_vma_policy(vma, addr);
1980 cpuset_mems_cookie = read_mems_allowed_begin();
1981
1982 if (pol->mode == MPOL_INTERLEAVE) {
1983 unsigned nid;
1984
1985 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
1986 mpol_cond_put(pol);
1987 page = alloc_page_interleave(gfp, order, nid);
1988 goto out;
1989 }
1990
1991 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
1992 int hpage_node = node;
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004 if (pol->mode == MPOL_PREFERRED &&
2005 !(pol->flags & MPOL_F_LOCAL))
2006 hpage_node = pol->v.preferred_node;
2007
2008 nmask = policy_nodemask(gfp, pol);
2009 if (!nmask || node_isset(hpage_node, *nmask)) {
2010 mpol_cond_put(pol);
2011 page = __alloc_pages_node(hpage_node,
2012 gfp | __GFP_THISNODE, order);
2013 goto out;
2014 }
2015 }
2016
2017 nmask = policy_nodemask(gfp, pol);
2018 zl = policy_zonelist(gfp, pol, node);
2019 page = __alloc_pages_nodemask(gfp, order, zl, nmask);
2020 mpol_cond_put(pol);
2021out:
2022 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2023 goto retry_cpuset;
2024 return page;
2025}
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046struct page *alloc_pages_current(gfp_t gfp, unsigned order)
2047{
2048 struct mempolicy *pol = &default_policy;
2049 struct page *page;
2050 unsigned int cpuset_mems_cookie;
2051
2052 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2053 pol = get_task_policy(current);
2054
2055retry_cpuset:
2056 cpuset_mems_cookie = read_mems_allowed_begin();
2057
2058
2059
2060
2061
2062 if (pol->mode == MPOL_INTERLEAVE)
2063 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2064 else
2065 page = __alloc_pages_nodemask(gfp, order,
2066 policy_zonelist(gfp, pol, numa_node_id()),
2067 policy_nodemask(gfp, pol));
2068
2069 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2070 goto retry_cpuset;
2071
2072 return page;
2073}
2074EXPORT_SYMBOL(alloc_pages_current);
2075
2076int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2077{
2078 struct mempolicy *pol = mpol_dup(vma_policy(src));
2079
2080 if (IS_ERR(pol))
2081 return PTR_ERR(pol);
2082 dst->vm_policy = pol;
2083 return 0;
2084}
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098struct mempolicy *__mpol_dup(struct mempolicy *old)
2099{
2100 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2101
2102 if (!new)
2103 return ERR_PTR(-ENOMEM);
2104
2105
2106 if (old == current->mempolicy) {
2107 task_lock(current);
2108 *new = *old;
2109 task_unlock(current);
2110 } else
2111 *new = *old;
2112
2113 if (current_cpuset_is_being_rebound()) {
2114 nodemask_t mems = cpuset_mems_allowed(current);
2115 if (new->flags & MPOL_F_REBINDING)
2116 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2117 else
2118 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
2119 }
2120 atomic_set(&new->refcnt, 1);
2121 return new;
2122}
2123
2124
2125bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2126{
2127 if (!a || !b)
2128 return false;
2129 if (a->mode != b->mode)
2130 return false;
2131 if (a->flags != b->flags)
2132 return false;
2133 if (mpol_store_user_nodemask(a))
2134 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2135 return false;
2136
2137 switch (a->mode) {
2138 case MPOL_BIND:
2139
2140 case MPOL_INTERLEAVE:
2141 return !!nodes_equal(a->v.nodes, b->v.nodes);
2142 case MPOL_PREFERRED:
2143 return a->v.preferred_node == b->v.preferred_node;
2144 default:
2145 BUG();
2146 return false;
2147 }
2148}
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163static struct sp_node *
2164sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2165{
2166 struct rb_node *n = sp->root.rb_node;
2167
2168 while (n) {
2169 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2170
2171 if (start >= p->end)
2172 n = n->rb_right;
2173 else if (end <= p->start)
2174 n = n->rb_left;
2175 else
2176 break;
2177 }
2178 if (!n)
2179 return NULL;
2180 for (;;) {
2181 struct sp_node *w = NULL;
2182 struct rb_node *prev = rb_prev(n);
2183 if (!prev)
2184 break;
2185 w = rb_entry(prev, struct sp_node, nd);
2186 if (w->end <= start)
2187 break;
2188 n = prev;
2189 }
2190 return rb_entry(n, struct sp_node, nd);
2191}
2192
2193
2194
2195
2196
2197static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2198{
2199 struct rb_node **p = &sp->root.rb_node;
2200 struct rb_node *parent = NULL;
2201 struct sp_node *nd;
2202
2203 while (*p) {
2204 parent = *p;
2205 nd = rb_entry(parent, struct sp_node, nd);
2206 if (new->start < nd->start)
2207 p = &(*p)->rb_left;
2208 else if (new->end > nd->end)
2209 p = &(*p)->rb_right;
2210 else
2211 BUG();
2212 }
2213 rb_link_node(&new->nd, parent, p);
2214 rb_insert_color(&new->nd, &sp->root);
2215 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2216 new->policy ? new->policy->mode : 0);
2217}
2218
2219
2220struct mempolicy *
2221mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2222{
2223 struct mempolicy *pol = NULL;
2224 struct sp_node *sn;
2225
2226 if (!sp->root.rb_node)
2227 return NULL;
2228 read_lock(&sp->lock);
2229 sn = sp_lookup(sp, idx, idx+1);
2230 if (sn) {
2231 mpol_get(sn->policy);
2232 pol = sn->policy;
2233 }
2234 read_unlock(&sp->lock);
2235 return pol;
2236}
2237
2238static void sp_free(struct sp_node *n)
2239{
2240 mpol_put(n->policy);
2241 kmem_cache_free(sn_cache, n);
2242}
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2262{
2263 struct mempolicy *pol;
2264 struct zoneref *z;
2265 int curnid = page_to_nid(page);
2266 unsigned long pgoff;
2267 int thiscpu = raw_smp_processor_id();
2268 int thisnid = cpu_to_node(thiscpu);
2269 int polnid = -1;
2270 int ret = -1;
2271
2272 BUG_ON(!vma);
2273
2274 pol = get_vma_policy(vma, addr);
2275 if (!(pol->flags & MPOL_F_MOF))
2276 goto out;
2277
2278 switch (pol->mode) {
2279 case MPOL_INTERLEAVE:
2280 BUG_ON(addr >= vma->vm_end);
2281 BUG_ON(addr < vma->vm_start);
2282
2283 pgoff = vma->vm_pgoff;
2284 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2285 polnid = offset_il_node(pol, vma, pgoff);
2286 break;
2287
2288 case MPOL_PREFERRED:
2289 if (pol->flags & MPOL_F_LOCAL)
2290 polnid = numa_node_id();
2291 else
2292 polnid = pol->v.preferred_node;
2293 break;
2294
2295 case MPOL_BIND:
2296
2297
2298
2299
2300
2301
2302
2303 if (node_isset(curnid, pol->v.nodes))
2304 goto out;
2305 z = first_zones_zonelist(
2306 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2307 gfp_zone(GFP_HIGHUSER),
2308 &pol->v.nodes);
2309 polnid = z->zone->node;
2310 break;
2311
2312 default:
2313 BUG();
2314 }
2315
2316
2317 if (pol->flags & MPOL_F_MORON) {
2318 polnid = thisnid;
2319
2320 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2321 goto out;
2322 }
2323
2324 if (curnid != polnid)
2325 ret = polnid;
2326out:
2327 mpol_cond_put(pol);
2328
2329 return ret;
2330}
2331
2332
2333
2334
2335
2336
2337
2338void mpol_put_task_policy(struct task_struct *task)
2339{
2340 struct mempolicy *pol;
2341
2342 task_lock(task);
2343 pol = task->mempolicy;
2344 task->mempolicy = NULL;
2345 task_unlock(task);
2346 mpol_put(pol);
2347}
2348
2349static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2350{
2351 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2352 rb_erase(&n->nd, &sp->root);
2353 sp_free(n);
2354}
2355
2356static void sp_node_init(struct sp_node *node, unsigned long start,
2357 unsigned long end, struct mempolicy *pol)
2358{
2359 node->start = start;
2360 node->end = end;
2361 node->policy = pol;
2362}
2363
2364static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2365 struct mempolicy *pol)
2366{
2367 struct sp_node *n;
2368 struct mempolicy *newpol;
2369
2370 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2371 if (!n)
2372 return NULL;
2373
2374 newpol = mpol_dup(pol);
2375 if (IS_ERR(newpol)) {
2376 kmem_cache_free(sn_cache, n);
2377 return NULL;
2378 }
2379 newpol->flags |= MPOL_F_SHARED;
2380 sp_node_init(n, start, end, newpol);
2381
2382 return n;
2383}
2384
2385
2386static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2387 unsigned long end, struct sp_node *new)
2388{
2389 struct sp_node *n;
2390 struct sp_node *n_new = NULL;
2391 struct mempolicy *mpol_new = NULL;
2392 int ret = 0;
2393
2394restart:
2395 write_lock(&sp->lock);
2396 n = sp_lookup(sp, start, end);
2397
2398 while (n && n->start < end) {
2399 struct rb_node *next = rb_next(&n->nd);
2400 if (n->start >= start) {
2401 if (n->end <= end)
2402 sp_delete(sp, n);
2403 else
2404 n->start = end;
2405 } else {
2406
2407 if (n->end > end) {
2408 if (!n_new)
2409 goto alloc_new;
2410
2411 *mpol_new = *n->policy;
2412 atomic_set(&mpol_new->refcnt, 1);
2413 sp_node_init(n_new, end, n->end, mpol_new);
2414 n->end = start;
2415 sp_insert(sp, n_new);
2416 n_new = NULL;
2417 mpol_new = NULL;
2418 break;
2419 } else
2420 n->end = start;
2421 }
2422 if (!next)
2423 break;
2424 n = rb_entry(next, struct sp_node, nd);
2425 }
2426 if (new)
2427 sp_insert(sp, new);
2428 write_unlock(&sp->lock);
2429 ret = 0;
2430
2431err_out:
2432 if (mpol_new)
2433 mpol_put(mpol_new);
2434 if (n_new)
2435 kmem_cache_free(sn_cache, n_new);
2436
2437 return ret;
2438
2439alloc_new:
2440 write_unlock(&sp->lock);
2441 ret = -ENOMEM;
2442 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2443 if (!n_new)
2444 goto err_out;
2445 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2446 if (!mpol_new)
2447 goto err_out;
2448 goto restart;
2449}
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2462{
2463 int ret;
2464
2465 sp->root = RB_ROOT;
2466 rwlock_init(&sp->lock);
2467
2468 if (mpol) {
2469 struct vm_area_struct pvma;
2470 struct mempolicy *new;
2471 NODEMASK_SCRATCH(scratch);
2472
2473 if (!scratch)
2474 goto put_mpol;
2475
2476 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2477 if (IS_ERR(new))
2478 goto free_scratch;
2479
2480 task_lock(current);
2481 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2482 task_unlock(current);
2483 if (ret)
2484 goto put_new;
2485
2486
2487 memset(&pvma, 0, sizeof(struct vm_area_struct));
2488 pvma.vm_end = TASK_SIZE;
2489 mpol_set_shared_policy(sp, &pvma, new);
2490
2491put_new:
2492 mpol_put(new);
2493free_scratch:
2494 NODEMASK_SCRATCH_FREE(scratch);
2495put_mpol:
2496 mpol_put(mpol);
2497 }
2498}
2499
2500int mpol_set_shared_policy(struct shared_policy *info,
2501 struct vm_area_struct *vma, struct mempolicy *npol)
2502{
2503 int err;
2504 struct sp_node *new = NULL;
2505 unsigned long sz = vma_pages(vma);
2506
2507 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2508 vma->vm_pgoff,
2509 sz, npol ? npol->mode : -1,
2510 npol ? npol->flags : -1,
2511 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
2512
2513 if (npol) {
2514 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2515 if (!new)
2516 return -ENOMEM;
2517 }
2518 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2519 if (err && new)
2520 sp_free(new);
2521 return err;
2522}
2523
2524
2525void mpol_free_shared_policy(struct shared_policy *p)
2526{
2527 struct sp_node *n;
2528 struct rb_node *next;
2529
2530 if (!p->root.rb_node)
2531 return;
2532 write_lock(&p->lock);
2533 next = rb_first(&p->root);
2534 while (next) {
2535 n = rb_entry(next, struct sp_node, nd);
2536 next = rb_next(&n->nd);
2537 sp_delete(p, n);
2538 }
2539 write_unlock(&p->lock);
2540}
2541
2542#ifdef CONFIG_NUMA_BALANCING
2543static int __initdata numabalancing_override;
2544
2545static void __init check_numabalancing_enable(void)
2546{
2547 bool numabalancing_default = false;
2548
2549 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2550 numabalancing_default = true;
2551
2552
2553 if (numabalancing_override)
2554 set_numabalancing_state(numabalancing_override == 1);
2555
2556 if (num_online_nodes() > 1 && !numabalancing_override) {
2557 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2558 numabalancing_default ? "Enabling" : "Disabling");
2559 set_numabalancing_state(numabalancing_default);
2560 }
2561}
2562
2563static int __init setup_numabalancing(char *str)
2564{
2565 int ret = 0;
2566 if (!str)
2567 goto out;
2568
2569 if (!strcmp(str, "enable")) {
2570 numabalancing_override = 1;
2571 ret = 1;
2572 } else if (!strcmp(str, "disable")) {
2573 numabalancing_override = -1;
2574 ret = 1;
2575 }
2576out:
2577 if (!ret)
2578 pr_warn("Unable to parse numa_balancing=\n");
2579
2580 return ret;
2581}
2582__setup("numa_balancing=", setup_numabalancing);
2583#else
2584static inline void __init check_numabalancing_enable(void)
2585{
2586}
2587#endif
2588
2589
2590void __init numa_policy_init(void)
2591{
2592 nodemask_t interleave_nodes;
2593 unsigned long largest = 0;
2594 int nid, prefer = 0;
2595
2596 policy_cache = kmem_cache_create("numa_policy",
2597 sizeof(struct mempolicy),
2598 0, SLAB_PANIC, NULL);
2599
2600 sn_cache = kmem_cache_create("shared_policy_node",
2601 sizeof(struct sp_node),
2602 0, SLAB_PANIC, NULL);
2603
2604 for_each_node(nid) {
2605 preferred_node_policy[nid] = (struct mempolicy) {
2606 .refcnt = ATOMIC_INIT(1),
2607 .mode = MPOL_PREFERRED,
2608 .flags = MPOL_F_MOF | MPOL_F_MORON,
2609 .v = { .preferred_node = nid, },
2610 };
2611 }
2612
2613
2614
2615
2616
2617
2618 nodes_clear(interleave_nodes);
2619 for_each_node_state(nid, N_MEMORY) {
2620 unsigned long total_pages = node_present_pages(nid);
2621
2622
2623 if (largest < total_pages) {
2624 largest = total_pages;
2625 prefer = nid;
2626 }
2627
2628
2629 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2630 node_set(nid, interleave_nodes);
2631 }
2632
2633
2634 if (unlikely(nodes_empty(interleave_nodes)))
2635 node_set(prefer, interleave_nodes);
2636
2637 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2638 pr_err("%s: interleaving failed\n", __func__);
2639
2640 check_numabalancing_enable();
2641}
2642
2643
2644void numa_default_policy(void)
2645{
2646 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2647}
2648
2649
2650
2651
2652
2653
2654
2655
2656static const char * const policy_modes[] =
2657{
2658 [MPOL_DEFAULT] = "default",
2659 [MPOL_PREFERRED] = "prefer",
2660 [MPOL_BIND] = "bind",
2661 [MPOL_INTERLEAVE] = "interleave",
2662 [MPOL_LOCAL] = "local",
2663};
2664
2665
2666#ifdef CONFIG_TMPFS
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677int mpol_parse_str(char *str, struct mempolicy **mpol)
2678{
2679 struct mempolicy *new = NULL;
2680 unsigned short mode;
2681 unsigned short mode_flags;
2682 nodemask_t nodes;
2683 char *nodelist = strchr(str, ':');
2684 char *flags = strchr(str, '=');
2685 int err = 1;
2686
2687 if (nodelist) {
2688
2689 *nodelist++ = '\0';
2690 if (nodelist_parse(nodelist, nodes))
2691 goto out;
2692 if (!nodes_subset(nodes, node_states[N_MEMORY]))
2693 goto out;
2694 } else
2695 nodes_clear(nodes);
2696
2697 if (flags)
2698 *flags++ = '\0';
2699
2700 for (mode = 0; mode < MPOL_MAX; mode++) {
2701 if (!strcmp(str, policy_modes[mode])) {
2702 break;
2703 }
2704 }
2705 if (mode >= MPOL_MAX)
2706 goto out;
2707
2708 switch (mode) {
2709 case MPOL_PREFERRED:
2710
2711
2712
2713 if (nodelist) {
2714 char *rest = nodelist;
2715 while (isdigit(*rest))
2716 rest++;
2717 if (*rest)
2718 goto out;
2719 }
2720 break;
2721 case MPOL_INTERLEAVE:
2722
2723
2724
2725 if (!nodelist)
2726 nodes = node_states[N_MEMORY];
2727 break;
2728 case MPOL_LOCAL:
2729
2730
2731
2732 if (nodelist)
2733 goto out;
2734 mode = MPOL_PREFERRED;
2735 break;
2736 case MPOL_DEFAULT:
2737
2738
2739
2740 if (!nodelist)
2741 err = 0;
2742 goto out;
2743 case MPOL_BIND:
2744
2745
2746
2747 if (!nodelist)
2748 goto out;
2749 }
2750
2751 mode_flags = 0;
2752 if (flags) {
2753
2754
2755
2756
2757 if (!strcmp(flags, "static"))
2758 mode_flags |= MPOL_F_STATIC_NODES;
2759 else if (!strcmp(flags, "relative"))
2760 mode_flags |= MPOL_F_RELATIVE_NODES;
2761 else
2762 goto out;
2763 }
2764
2765 new = mpol_new(mode, mode_flags, &nodes);
2766 if (IS_ERR(new))
2767 goto out;
2768
2769
2770
2771
2772
2773 if (mode != MPOL_PREFERRED)
2774 new->v.nodes = nodes;
2775 else if (nodelist)
2776 new->v.preferred_node = first_node(nodes);
2777 else
2778 new->flags |= MPOL_F_LOCAL;
2779
2780
2781
2782
2783
2784 new->w.user_nodemask = nodes;
2785
2786 err = 0;
2787
2788out:
2789
2790 if (nodelist)
2791 *--nodelist = ':';
2792 if (flags)
2793 *--flags = '=';
2794 if (!err)
2795 *mpol = new;
2796 return err;
2797}
2798#endif
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
2811{
2812 char *p = buffer;
2813 nodemask_t nodes = NODE_MASK_NONE;
2814 unsigned short mode = MPOL_DEFAULT;
2815 unsigned short flags = 0;
2816
2817 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2818 mode = pol->mode;
2819 flags = pol->flags;
2820 }
2821
2822 switch (mode) {
2823 case MPOL_DEFAULT:
2824 break;
2825 case MPOL_PREFERRED:
2826 if (flags & MPOL_F_LOCAL)
2827 mode = MPOL_LOCAL;
2828 else
2829 node_set(pol->v.preferred_node, nodes);
2830 break;
2831 case MPOL_BIND:
2832 case MPOL_INTERLEAVE:
2833 nodes = pol->v.nodes;
2834 break;
2835 default:
2836 WARN_ON_ONCE(1);
2837 snprintf(p, maxlen, "unknown");
2838 return;
2839 }
2840
2841 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
2842
2843 if (flags & MPOL_MODE_FLAGS) {
2844 p += snprintf(p, buffer + maxlen - p, "=");
2845
2846
2847
2848
2849 if (flags & MPOL_F_STATIC_NODES)
2850 p += snprintf(p, buffer + maxlen - p, "static");
2851 else if (flags & MPOL_F_RELATIVE_NODES)
2852 p += snprintf(p, buffer + maxlen - p, "relative");
2853 }
2854
2855 if (!nodes_empty(nodes))
2856 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
2857 nodemask_pr_args(&nodes));
2858}
2859