1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69
70#include <linux/mempolicy.h>
71#include <linux/mm.h>
72#include <linux/highmem.h>
73#include <linux/hugetlb.h>
74#include <linux/kernel.h>
75#include <linux/sched.h>
76#include <linux/sched/mm.h>
77#include <linux/sched/numa_balancing.h>
78#include <linux/sched/task.h>
79#include <linux/nodemask.h>
80#include <linux/cpuset.h>
81#include <linux/slab.h>
82#include <linux/string.h>
83#include <linux/export.h>
84#include <linux/nsproxy.h>
85#include <linux/interrupt.h>
86#include <linux/init.h>
87#include <linux/compat.h>
88#include <linux/swap.h>
89#include <linux/seq_file.h>
90#include <linux/proc_fs.h>
91#include <linux/migrate.h>
92#include <linux/ksm.h>
93#include <linux/rmap.h>
94#include <linux/security.h>
95#include <linux/syscalls.h>
96#include <linux/ctype.h>
97#include <linux/mm_inline.h>
98#include <linux/mmu_notifier.h>
99#include <linux/printk.h>
100
101#include <asm/tlbflush.h>
102#include <linux/uaccess.h>
103
104#include "internal.h"
105
106
107#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)
108#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)
109
110static struct kmem_cache *policy_cache;
111static struct kmem_cache *sn_cache;
112
113
114
115enum zone_type policy_zone = 0;
116
117
118
119
120static struct mempolicy default_policy = {
121 .refcnt = ATOMIC_INIT(1),
122 .mode = MPOL_PREFERRED,
123 .flags = MPOL_F_LOCAL,
124};
125
126static struct mempolicy preferred_node_policy[MAX_NUMNODES];
127
128struct mempolicy *get_task_policy(struct task_struct *p)
129{
130 struct mempolicy *pol = p->mempolicy;
131 int node;
132
133 if (pol)
134 return pol;
135
136 node = numa_node_id();
137 if (node != NUMA_NO_NODE) {
138 pol = &preferred_node_policy[node];
139
140 if (pol->mode)
141 return pol;
142 }
143
144 return &default_policy;
145}
146
147static const struct mempolicy_operations {
148 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
149 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
150} mpol_ops[MPOL_MAX];
151
152static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
153{
154 return pol->flags & MPOL_MODE_FLAGS;
155}
156
157static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
158 const nodemask_t *rel)
159{
160 nodemask_t tmp;
161 nodes_fold(tmp, *orig, nodes_weight(*rel));
162 nodes_onto(*ret, tmp, *rel);
163}
164
165static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
166{
167 if (nodes_empty(*nodes))
168 return -EINVAL;
169 pol->v.nodes = *nodes;
170 return 0;
171}
172
173static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
174{
175 if (!nodes)
176 pol->flags |= MPOL_F_LOCAL;
177 else if (nodes_empty(*nodes))
178 return -EINVAL;
179 else
180 pol->v.preferred_node = first_node(*nodes);
181 return 0;
182}
183
184static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
185{
186 if (nodes_empty(*nodes))
187 return -EINVAL;
188 pol->v.nodes = *nodes;
189 return 0;
190}
191
192
193
194
195
196
197
198
199
200
201static int mpol_set_nodemask(struct mempolicy *pol,
202 const nodemask_t *nodes, struct nodemask_scratch *nsc)
203{
204 int ret;
205
206
207 if (pol == NULL)
208 return 0;
209
210 nodes_and(nsc->mask1,
211 cpuset_current_mems_allowed, node_states[N_MEMORY]);
212
213 VM_BUG_ON(!nodes);
214 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
215 nodes = NULL;
216 else {
217 if (pol->flags & MPOL_F_RELATIVE_NODES)
218 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
219 else
220 nodes_and(nsc->mask2, *nodes, nsc->mask1);
221
222 if (mpol_store_user_nodemask(pol))
223 pol->w.user_nodemask = *nodes;
224 else
225 pol->w.cpuset_mems_allowed =
226 cpuset_current_mems_allowed;
227 }
228
229 if (nodes)
230 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
231 else
232 ret = mpol_ops[pol->mode].create(pol, NULL);
233 return ret;
234}
235
236
237
238
239
240static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
241 nodemask_t *nodes)
242{
243 struct mempolicy *policy;
244
245 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
246 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
247
248 if (mode == MPOL_DEFAULT) {
249 if (nodes && !nodes_empty(*nodes))
250 return ERR_PTR(-EINVAL);
251 return NULL;
252 }
253 VM_BUG_ON(!nodes);
254
255
256
257
258
259
260 if (mode == MPOL_PREFERRED) {
261 if (nodes_empty(*nodes)) {
262 if (((flags & MPOL_F_STATIC_NODES) ||
263 (flags & MPOL_F_RELATIVE_NODES)))
264 return ERR_PTR(-EINVAL);
265 }
266 } else if (mode == MPOL_LOCAL) {
267 if (!nodes_empty(*nodes) ||
268 (flags & MPOL_F_STATIC_NODES) ||
269 (flags & MPOL_F_RELATIVE_NODES))
270 return ERR_PTR(-EINVAL);
271 mode = MPOL_PREFERRED;
272 } else if (nodes_empty(*nodes))
273 return ERR_PTR(-EINVAL);
274 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
275 if (!policy)
276 return ERR_PTR(-ENOMEM);
277 atomic_set(&policy->refcnt, 1);
278 policy->mode = mode;
279 policy->flags = flags;
280
281 return policy;
282}
283
284
285void __mpol_put(struct mempolicy *p)
286{
287 if (!atomic_dec_and_test(&p->refcnt))
288 return;
289 kmem_cache_free(policy_cache, p);
290}
291
292static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
293{
294}
295
296static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
297{
298 nodemask_t tmp;
299
300 if (pol->flags & MPOL_F_STATIC_NODES)
301 nodes_and(tmp, pol->w.user_nodemask, *nodes);
302 else if (pol->flags & MPOL_F_RELATIVE_NODES)
303 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
304 else {
305 nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
306 *nodes);
307 pol->w.cpuset_mems_allowed = tmp;
308 }
309
310 if (nodes_empty(tmp))
311 tmp = *nodes;
312
313 pol->v.nodes = tmp;
314}
315
316static void mpol_rebind_preferred(struct mempolicy *pol,
317 const nodemask_t *nodes)
318{
319 nodemask_t tmp;
320
321 if (pol->flags & MPOL_F_STATIC_NODES) {
322 int node = first_node(pol->w.user_nodemask);
323
324 if (node_isset(node, *nodes)) {
325 pol->v.preferred_node = node;
326 pol->flags &= ~MPOL_F_LOCAL;
327 } else
328 pol->flags |= MPOL_F_LOCAL;
329 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
330 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
331 pol->v.preferred_node = first_node(tmp);
332 } else if (!(pol->flags & MPOL_F_LOCAL)) {
333 pol->v.preferred_node = node_remap(pol->v.preferred_node,
334 pol->w.cpuset_mems_allowed,
335 *nodes);
336 pol->w.cpuset_mems_allowed = *nodes;
337 }
338}
339
340
341
342
343
344
345
346
347static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
348{
349 if (!pol)
350 return;
351 if (!mpol_store_user_nodemask(pol) &&
352 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
353 return;
354
355 mpol_ops[pol->mode].rebind(pol, newmask);
356}
357
358
359
360
361
362
363
364
365void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
366{
367 mpol_rebind_policy(tsk->mempolicy, new);
368}
369
370
371
372
373
374
375
376void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
377{
378 struct vm_area_struct *vma;
379
380 down_write(&mm->mmap_sem);
381 for (vma = mm->mmap; vma; vma = vma->vm_next)
382 mpol_rebind_policy(vma->vm_policy, new);
383 up_write(&mm->mmap_sem);
384}
385
386static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
387 [MPOL_DEFAULT] = {
388 .rebind = mpol_rebind_default,
389 },
390 [MPOL_INTERLEAVE] = {
391 .create = mpol_new_interleave,
392 .rebind = mpol_rebind_nodemask,
393 },
394 [MPOL_PREFERRED] = {
395 .create = mpol_new_preferred,
396 .rebind = mpol_rebind_preferred,
397 },
398 [MPOL_BIND] = {
399 .create = mpol_new_bind,
400 .rebind = mpol_rebind_nodemask,
401 },
402};
403
404static void migrate_page_add(struct page *page, struct list_head *pagelist,
405 unsigned long flags);
406
407struct queue_pages {
408 struct list_head *pagelist;
409 unsigned long flags;
410 nodemask_t *nmask;
411 struct vm_area_struct *prev;
412};
413
414
415
416
417
418static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
419 unsigned long end, struct mm_walk *walk)
420{
421 struct vm_area_struct *vma = walk->vma;
422 struct page *page;
423 struct queue_pages *qp = walk->private;
424 unsigned long flags = qp->flags;
425 int nid, ret;
426 pte_t *pte;
427 spinlock_t *ptl;
428
429 if (pmd_trans_huge(*pmd)) {
430 ptl = pmd_lock(walk->mm, pmd);
431 if (pmd_trans_huge(*pmd)) {
432 page = pmd_page(*pmd);
433 if (is_huge_zero_page(page)) {
434 spin_unlock(ptl);
435 __split_huge_pmd(vma, pmd, addr, false, NULL);
436 } else {
437 get_page(page);
438 spin_unlock(ptl);
439 lock_page(page);
440 ret = split_huge_page(page);
441 unlock_page(page);
442 put_page(page);
443 if (ret)
444 return 0;
445 }
446 } else {
447 spin_unlock(ptl);
448 }
449 }
450
451 if (pmd_trans_unstable(pmd))
452 return 0;
453retry:
454 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
455 for (; addr != end; pte++, addr += PAGE_SIZE) {
456 if (!pte_present(*pte))
457 continue;
458 page = vm_normal_page(vma, addr, *pte);
459 if (!page)
460 continue;
461
462
463
464
465 if (PageReserved(page))
466 continue;
467 nid = page_to_nid(page);
468 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
469 continue;
470 if (PageTransCompound(page)) {
471 get_page(page);
472 pte_unmap_unlock(pte, ptl);
473 lock_page(page);
474 ret = split_huge_page(page);
475 unlock_page(page);
476 put_page(page);
477
478 if (ret) {
479 pte = pte_offset_map_lock(walk->mm, pmd,
480 addr, &ptl);
481 continue;
482 }
483 goto retry;
484 }
485
486 migrate_page_add(page, qp->pagelist, flags);
487 }
488 pte_unmap_unlock(pte - 1, ptl);
489 cond_resched();
490 return 0;
491}
492
493static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
494 unsigned long addr, unsigned long end,
495 struct mm_walk *walk)
496{
497#ifdef CONFIG_HUGETLB_PAGE
498 struct queue_pages *qp = walk->private;
499 unsigned long flags = qp->flags;
500 int nid;
501 struct page *page;
502 spinlock_t *ptl;
503 pte_t entry;
504
505 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
506 entry = huge_ptep_get(pte);
507 if (!pte_present(entry))
508 goto unlock;
509 page = pte_page(entry);
510 nid = page_to_nid(page);
511 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
512 goto unlock;
513
514 if (flags & (MPOL_MF_MOVE_ALL) ||
515 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
516 isolate_huge_page(page, qp->pagelist);
517unlock:
518 spin_unlock(ptl);
519#else
520 BUG();
521#endif
522 return 0;
523}
524
525#ifdef CONFIG_NUMA_BALANCING
526
527
528
529
530
531
532
533
534
535unsigned long change_prot_numa(struct vm_area_struct *vma,
536 unsigned long addr, unsigned long end)
537{
538 int nr_updated;
539
540 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
541 if (nr_updated)
542 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
543
544 return nr_updated;
545}
546#else
547static unsigned long change_prot_numa(struct vm_area_struct *vma,
548 unsigned long addr, unsigned long end)
549{
550 return 0;
551}
552#endif
553
554static int queue_pages_test_walk(unsigned long start, unsigned long end,
555 struct mm_walk *walk)
556{
557 struct vm_area_struct *vma = walk->vma;
558 struct queue_pages *qp = walk->private;
559 unsigned long endvma = vma->vm_end;
560 unsigned long flags = qp->flags;
561
562 if (!vma_migratable(vma))
563 return 1;
564
565 if (endvma > end)
566 endvma = end;
567 if (vma->vm_start > start)
568 start = vma->vm_start;
569
570 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
571 if (!vma->vm_next && vma->vm_end < end)
572 return -EFAULT;
573 if (qp->prev && qp->prev->vm_end < vma->vm_start)
574 return -EFAULT;
575 }
576
577 qp->prev = vma;
578
579 if (flags & MPOL_MF_LAZY) {
580
581 if (!is_vm_hugetlb_page(vma) &&
582 (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
583 !(vma->vm_flags & VM_MIXEDMAP))
584 change_prot_numa(vma, start, endvma);
585 return 1;
586 }
587
588
589 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
590 return 0;
591 return 1;
592}
593
594
595
596
597
598
599
600
601static int
602queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
603 nodemask_t *nodes, unsigned long flags,
604 struct list_head *pagelist)
605{
606 struct queue_pages qp = {
607 .pagelist = pagelist,
608 .flags = flags,
609 .nmask = nodes,
610 .prev = NULL,
611 };
612 struct mm_walk queue_pages_walk = {
613 .hugetlb_entry = queue_pages_hugetlb,
614 .pmd_entry = queue_pages_pte_range,
615 .test_walk = queue_pages_test_walk,
616 .mm = mm,
617 .private = &qp,
618 };
619
620 return walk_page_range(start, end, &queue_pages_walk);
621}
622
623
624
625
626
627static int vma_replace_policy(struct vm_area_struct *vma,
628 struct mempolicy *pol)
629{
630 int err;
631 struct mempolicy *old;
632 struct mempolicy *new;
633
634 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
635 vma->vm_start, vma->vm_end, vma->vm_pgoff,
636 vma->vm_ops, vma->vm_file,
637 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
638
639 new = mpol_dup(pol);
640 if (IS_ERR(new))
641 return PTR_ERR(new);
642
643 if (vma->vm_ops && vma->vm_ops->set_policy) {
644 err = vma->vm_ops->set_policy(vma, new);
645 if (err)
646 goto err_out;
647 }
648
649 old = vma->vm_policy;
650 vma->vm_policy = new;
651 mpol_put(old);
652
653 return 0;
654 err_out:
655 mpol_put(new);
656 return err;
657}
658
659
660static int mbind_range(struct mm_struct *mm, unsigned long start,
661 unsigned long end, struct mempolicy *new_pol)
662{
663 struct vm_area_struct *next;
664 struct vm_area_struct *prev;
665 struct vm_area_struct *vma;
666 int err = 0;
667 pgoff_t pgoff;
668 unsigned long vmstart;
669 unsigned long vmend;
670
671 vma = find_vma(mm, start);
672 if (!vma || vma->vm_start > start)
673 return -EFAULT;
674
675 prev = vma->vm_prev;
676 if (start > vma->vm_start)
677 prev = vma;
678
679 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
680 next = vma->vm_next;
681 vmstart = max(start, vma->vm_start);
682 vmend = min(end, vma->vm_end);
683
684 if (mpol_equal(vma_policy(vma), new_pol))
685 continue;
686
687 pgoff = vma->vm_pgoff +
688 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
689 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
690 vma->anon_vma, vma->vm_file, pgoff,
691 new_pol, vma->vm_userfaultfd_ctx);
692 if (prev) {
693 vma = prev;
694 next = vma->vm_next;
695 if (mpol_equal(vma_policy(vma), new_pol))
696 continue;
697
698 goto replace;
699 }
700 if (vma->vm_start != vmstart) {
701 err = split_vma(vma->vm_mm, vma, vmstart, 1);
702 if (err)
703 goto out;
704 }
705 if (vma->vm_end != vmend) {
706 err = split_vma(vma->vm_mm, vma, vmend, 0);
707 if (err)
708 goto out;
709 }
710 replace:
711 err = vma_replace_policy(vma, new_pol);
712 if (err)
713 goto out;
714 }
715
716 out:
717 return err;
718}
719
720
721static long do_set_mempolicy(unsigned short mode, unsigned short flags,
722 nodemask_t *nodes)
723{
724 struct mempolicy *new, *old;
725 NODEMASK_SCRATCH(scratch);
726 int ret;
727
728 if (!scratch)
729 return -ENOMEM;
730
731 new = mpol_new(mode, flags, nodes);
732 if (IS_ERR(new)) {
733 ret = PTR_ERR(new);
734 goto out;
735 }
736
737 task_lock(current);
738 ret = mpol_set_nodemask(new, nodes, scratch);
739 if (ret) {
740 task_unlock(current);
741 mpol_put(new);
742 goto out;
743 }
744 old = current->mempolicy;
745 current->mempolicy = new;
746 if (new && new->mode == MPOL_INTERLEAVE)
747 current->il_prev = MAX_NUMNODES-1;
748 task_unlock(current);
749 mpol_put(old);
750 ret = 0;
751out:
752 NODEMASK_SCRATCH_FREE(scratch);
753 return ret;
754}
755
756
757
758
759
760
761static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
762{
763 nodes_clear(*nodes);
764 if (p == &default_policy)
765 return;
766
767 switch (p->mode) {
768 case MPOL_BIND:
769
770 case MPOL_INTERLEAVE:
771 *nodes = p->v.nodes;
772 break;
773 case MPOL_PREFERRED:
774 if (!(p->flags & MPOL_F_LOCAL))
775 node_set(p->v.preferred_node, *nodes);
776
777 break;
778 default:
779 BUG();
780 }
781}
782
783static int lookup_node(unsigned long addr)
784{
785 struct page *p;
786 int err;
787
788 err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
789 if (err >= 0) {
790 err = page_to_nid(p);
791 put_page(p);
792 }
793 return err;
794}
795
796
797static long do_get_mempolicy(int *policy, nodemask_t *nmask,
798 unsigned long addr, unsigned long flags)
799{
800 int err;
801 struct mm_struct *mm = current->mm;
802 struct vm_area_struct *vma = NULL;
803 struct mempolicy *pol = current->mempolicy;
804
805 if (flags &
806 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
807 return -EINVAL;
808
809 if (flags & MPOL_F_MEMS_ALLOWED) {
810 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
811 return -EINVAL;
812 *policy = 0;
813 task_lock(current);
814 *nmask = cpuset_current_mems_allowed;
815 task_unlock(current);
816 return 0;
817 }
818
819 if (flags & MPOL_F_ADDR) {
820
821
822
823
824
825 down_read(&mm->mmap_sem);
826 vma = find_vma_intersection(mm, addr, addr+1);
827 if (!vma) {
828 up_read(&mm->mmap_sem);
829 return -EFAULT;
830 }
831 if (vma->vm_ops && vma->vm_ops->get_policy)
832 pol = vma->vm_ops->get_policy(vma, addr);
833 else
834 pol = vma->vm_policy;
835 } else if (addr)
836 return -EINVAL;
837
838 if (!pol)
839 pol = &default_policy;
840
841 if (flags & MPOL_F_NODE) {
842 if (flags & MPOL_F_ADDR) {
843 err = lookup_node(addr);
844 if (err < 0)
845 goto out;
846 *policy = err;
847 } else if (pol == current->mempolicy &&
848 pol->mode == MPOL_INTERLEAVE) {
849 *policy = next_node_in(current->il_prev, pol->v.nodes);
850 } else {
851 err = -EINVAL;
852 goto out;
853 }
854 } else {
855 *policy = pol == &default_policy ? MPOL_DEFAULT :
856 pol->mode;
857
858
859
860
861 *policy |= (pol->flags & MPOL_MODE_FLAGS);
862 }
863
864 err = 0;
865 if (nmask) {
866 if (mpol_store_user_nodemask(pol)) {
867 *nmask = pol->w.user_nodemask;
868 } else {
869 task_lock(current);
870 get_policy_nodemask(pol, nmask);
871 task_unlock(current);
872 }
873 }
874
875 out:
876 mpol_cond_put(pol);
877 if (vma)
878 up_read(¤t->mm->mmap_sem);
879 return err;
880}
881
882#ifdef CONFIG_MIGRATION
883
884
885
886static void migrate_page_add(struct page *page, struct list_head *pagelist,
887 unsigned long flags)
888{
889
890
891
892 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
893 if (!isolate_lru_page(page)) {
894 list_add_tail(&page->lru, pagelist);
895 inc_node_page_state(page, NR_ISOLATED_ANON +
896 page_is_file_cache(page));
897 }
898 }
899}
900
901static struct page *new_node_page(struct page *page, unsigned long node, int **x)
902{
903 if (PageHuge(page))
904 return alloc_huge_page_node(page_hstate(compound_head(page)),
905 node);
906 else
907 return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
908 __GFP_THISNODE, 0);
909}
910
911
912
913
914
915static int migrate_to_node(struct mm_struct *mm, int source, int dest,
916 int flags)
917{
918 nodemask_t nmask;
919 LIST_HEAD(pagelist);
920 int err = 0;
921
922 nodes_clear(nmask);
923 node_set(source, nmask);
924
925
926
927
928
929
930 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
931 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
932 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
933
934 if (!list_empty(&pagelist)) {
935 err = migrate_pages(&pagelist, new_node_page, NULL, dest,
936 MIGRATE_SYNC, MR_SYSCALL);
937 if (err)
938 putback_movable_pages(&pagelist);
939 }
940
941 return err;
942}
943
944
945
946
947
948
949
950int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
951 const nodemask_t *to, int flags)
952{
953 int busy = 0;
954 int err;
955 nodemask_t tmp;
956
957 err = migrate_prep();
958 if (err)
959 return err;
960
961 down_read(&mm->mmap_sem);
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994 tmp = *from;
995 while (!nodes_empty(tmp)) {
996 int s,d;
997 int source = NUMA_NO_NODE;
998 int dest = 0;
999
1000 for_each_node_mask(s, tmp) {
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1018 (node_isset(s, *to)))
1019 continue;
1020
1021 d = node_remap(s, *from, *to);
1022 if (s == d)
1023 continue;
1024
1025 source = s;
1026 dest = d;
1027
1028
1029 if (!node_isset(dest, tmp))
1030 break;
1031 }
1032 if (source == NUMA_NO_NODE)
1033 break;
1034
1035 node_clear(source, tmp);
1036 err = migrate_to_node(mm, source, dest, flags);
1037 if (err > 0)
1038 busy += err;
1039 if (err < 0)
1040 break;
1041 }
1042 up_read(&mm->mmap_sem);
1043 if (err < 0)
1044 return err;
1045 return busy;
1046
1047}
1048
1049
1050
1051
1052
1053
1054
1055
1056static struct page *new_page(struct page *page, unsigned long start, int **x)
1057{
1058 struct vm_area_struct *vma;
1059 unsigned long uninitialized_var(address);
1060
1061 vma = find_vma(current->mm, start);
1062 while (vma) {
1063 address = page_address_in_vma(page, vma);
1064 if (address != -EFAULT)
1065 break;
1066 vma = vma->vm_next;
1067 }
1068
1069 if (PageHuge(page)) {
1070 BUG_ON(!vma);
1071 return alloc_huge_page_noerr(vma, address, 1);
1072 }
1073
1074
1075
1076 return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1077 vma, address);
1078}
1079#else
1080
1081static void migrate_page_add(struct page *page, struct list_head *pagelist,
1082 unsigned long flags)
1083{
1084}
1085
1086int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1087 const nodemask_t *to, int flags)
1088{
1089 return -ENOSYS;
1090}
1091
1092static struct page *new_page(struct page *page, unsigned long start, int **x)
1093{
1094 return NULL;
1095}
1096#endif
1097
1098static long do_mbind(unsigned long start, unsigned long len,
1099 unsigned short mode, unsigned short mode_flags,
1100 nodemask_t *nmask, unsigned long flags)
1101{
1102 struct mm_struct *mm = current->mm;
1103 struct mempolicy *new;
1104 unsigned long end;
1105 int err;
1106 LIST_HEAD(pagelist);
1107
1108 if (flags & ~(unsigned long)MPOL_MF_VALID)
1109 return -EINVAL;
1110 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1111 return -EPERM;
1112
1113 if (start & ~PAGE_MASK)
1114 return -EINVAL;
1115
1116 if (mode == MPOL_DEFAULT)
1117 flags &= ~MPOL_MF_STRICT;
1118
1119 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1120 end = start + len;
1121
1122 if (end < start)
1123 return -EINVAL;
1124 if (end == start)
1125 return 0;
1126
1127 new = mpol_new(mode, mode_flags, nmask);
1128 if (IS_ERR(new))
1129 return PTR_ERR(new);
1130
1131 if (flags & MPOL_MF_LAZY)
1132 new->flags |= MPOL_F_MOF;
1133
1134
1135
1136
1137
1138 if (!new)
1139 flags |= MPOL_MF_DISCONTIG_OK;
1140
1141 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1142 start, start + len, mode, mode_flags,
1143 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1144
1145 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1146
1147 err = migrate_prep();
1148 if (err)
1149 goto mpol_out;
1150 }
1151 {
1152 NODEMASK_SCRATCH(scratch);
1153 if (scratch) {
1154 down_write(&mm->mmap_sem);
1155 task_lock(current);
1156 err = mpol_set_nodemask(new, nmask, scratch);
1157 task_unlock(current);
1158 if (err)
1159 up_write(&mm->mmap_sem);
1160 } else
1161 err = -ENOMEM;
1162 NODEMASK_SCRATCH_FREE(scratch);
1163 }
1164 if (err)
1165 goto mpol_out;
1166
1167 err = queue_pages_range(mm, start, end, nmask,
1168 flags | MPOL_MF_INVERT, &pagelist);
1169 if (!err)
1170 err = mbind_range(mm, start, end, new);
1171
1172 if (!err) {
1173 int nr_failed = 0;
1174
1175 if (!list_empty(&pagelist)) {
1176 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1177 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1178 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1179 if (nr_failed)
1180 putback_movable_pages(&pagelist);
1181 }
1182
1183 if (nr_failed && (flags & MPOL_MF_STRICT))
1184 err = -EIO;
1185 } else
1186 putback_movable_pages(&pagelist);
1187
1188 up_write(&mm->mmap_sem);
1189 mpol_out:
1190 mpol_put(new);
1191 return err;
1192}
1193
1194
1195
1196
1197
1198
1199static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1200 unsigned long maxnode)
1201{
1202 unsigned long k;
1203 unsigned long nlongs;
1204 unsigned long endmask;
1205
1206 --maxnode;
1207 nodes_clear(*nodes);
1208 if (maxnode == 0 || !nmask)
1209 return 0;
1210 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1211 return -EINVAL;
1212
1213 nlongs = BITS_TO_LONGS(maxnode);
1214 if ((maxnode % BITS_PER_LONG) == 0)
1215 endmask = ~0UL;
1216 else
1217 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1218
1219
1220
1221 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1222 if (nlongs > PAGE_SIZE/sizeof(long))
1223 return -EINVAL;
1224 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1225 unsigned long t;
1226 if (get_user(t, nmask + k))
1227 return -EFAULT;
1228 if (k == nlongs - 1) {
1229 if (t & endmask)
1230 return -EINVAL;
1231 } else if (t)
1232 return -EINVAL;
1233 }
1234 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1235 endmask = ~0UL;
1236 }
1237
1238 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1239 return -EFAULT;
1240 nodes_addr(*nodes)[nlongs-1] &= endmask;
1241 return 0;
1242}
1243
1244
1245static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1246 nodemask_t *nodes)
1247{
1248 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1249 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1250
1251 if (copy > nbytes) {
1252 if (copy > PAGE_SIZE)
1253 return -EINVAL;
1254 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1255 return -EFAULT;
1256 copy = nbytes;
1257 }
1258 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1259}
1260
1261SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1262 unsigned long, mode, const unsigned long __user *, nmask,
1263 unsigned long, maxnode, unsigned, flags)
1264{
1265 nodemask_t nodes;
1266 int err;
1267 unsigned short mode_flags;
1268
1269 mode_flags = mode & MPOL_MODE_FLAGS;
1270 mode &= ~MPOL_MODE_FLAGS;
1271 if (mode >= MPOL_MAX)
1272 return -EINVAL;
1273 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1274 (mode_flags & MPOL_F_RELATIVE_NODES))
1275 return -EINVAL;
1276 err = get_nodes(&nodes, nmask, maxnode);
1277 if (err)
1278 return err;
1279 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1280}
1281
1282
1283SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1284 unsigned long, maxnode)
1285{
1286 int err;
1287 nodemask_t nodes;
1288 unsigned short flags;
1289
1290 flags = mode & MPOL_MODE_FLAGS;
1291 mode &= ~MPOL_MODE_FLAGS;
1292 if ((unsigned int)mode >= MPOL_MAX)
1293 return -EINVAL;
1294 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1295 return -EINVAL;
1296 err = get_nodes(&nodes, nmask, maxnode);
1297 if (err)
1298 return err;
1299 return do_set_mempolicy(mode, flags, &nodes);
1300}
1301
1302SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1303 const unsigned long __user *, old_nodes,
1304 const unsigned long __user *, new_nodes)
1305{
1306 const struct cred *cred = current_cred(), *tcred;
1307 struct mm_struct *mm = NULL;
1308 struct task_struct *task;
1309 nodemask_t task_nodes;
1310 int err;
1311 nodemask_t *old;
1312 nodemask_t *new;
1313 NODEMASK_SCRATCH(scratch);
1314
1315 if (!scratch)
1316 return -ENOMEM;
1317
1318 old = &scratch->mask1;
1319 new = &scratch->mask2;
1320
1321 err = get_nodes(old, old_nodes, maxnode);
1322 if (err)
1323 goto out;
1324
1325 err = get_nodes(new, new_nodes, maxnode);
1326 if (err)
1327 goto out;
1328
1329
1330 rcu_read_lock();
1331 task = pid ? find_task_by_vpid(pid) : current;
1332 if (!task) {
1333 rcu_read_unlock();
1334 err = -ESRCH;
1335 goto out;
1336 }
1337 get_task_struct(task);
1338
1339 err = -EINVAL;
1340
1341
1342
1343
1344
1345
1346
1347 tcred = __task_cred(task);
1348 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1349 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
1350 !capable(CAP_SYS_NICE)) {
1351 rcu_read_unlock();
1352 err = -EPERM;
1353 goto out_put;
1354 }
1355 rcu_read_unlock();
1356
1357 task_nodes = cpuset_mems_allowed(task);
1358
1359 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1360 err = -EPERM;
1361 goto out_put;
1362 }
1363
1364 if (!nodes_subset(*new, node_states[N_MEMORY])) {
1365 err = -EINVAL;
1366 goto out_put;
1367 }
1368
1369 err = security_task_movememory(task);
1370 if (err)
1371 goto out_put;
1372
1373 mm = get_task_mm(task);
1374 put_task_struct(task);
1375
1376 if (!mm) {
1377 err = -EINVAL;
1378 goto out;
1379 }
1380
1381 err = do_migrate_pages(mm, old, new,
1382 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1383
1384 mmput(mm);
1385out:
1386 NODEMASK_SCRATCH_FREE(scratch);
1387
1388 return err;
1389
1390out_put:
1391 put_task_struct(task);
1392 goto out;
1393
1394}
1395
1396
1397
1398SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1399 unsigned long __user *, nmask, unsigned long, maxnode,
1400 unsigned long, addr, unsigned long, flags)
1401{
1402 int err;
1403 int uninitialized_var(pval);
1404 nodemask_t nodes;
1405
1406 if (nmask != NULL && maxnode < MAX_NUMNODES)
1407 return -EINVAL;
1408
1409 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1410
1411 if (err)
1412 return err;
1413
1414 if (policy && put_user(pval, policy))
1415 return -EFAULT;
1416
1417 if (nmask)
1418 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1419
1420 return err;
1421}
1422
1423#ifdef CONFIG_COMPAT
1424
1425COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1426 compat_ulong_t __user *, nmask,
1427 compat_ulong_t, maxnode,
1428 compat_ulong_t, addr, compat_ulong_t, flags)
1429{
1430 long err;
1431 unsigned long __user *nm = NULL;
1432 unsigned long nr_bits, alloc_size;
1433 DECLARE_BITMAP(bm, MAX_NUMNODES);
1434
1435 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1436 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1437
1438 if (nmask)
1439 nm = compat_alloc_user_space(alloc_size);
1440
1441 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1442
1443 if (!err && nmask) {
1444 unsigned long copy_size;
1445 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1446 err = copy_from_user(bm, nm, copy_size);
1447
1448 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1449 err |= compat_put_bitmap(nmask, bm, nr_bits);
1450 }
1451
1452 return err;
1453}
1454
1455COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1456 compat_ulong_t, maxnode)
1457{
1458 unsigned long __user *nm = NULL;
1459 unsigned long nr_bits, alloc_size;
1460 DECLARE_BITMAP(bm, MAX_NUMNODES);
1461
1462 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1463 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1464
1465 if (nmask) {
1466 if (compat_get_bitmap(bm, nmask, nr_bits))
1467 return -EFAULT;
1468 nm = compat_alloc_user_space(alloc_size);
1469 if (copy_to_user(nm, bm, alloc_size))
1470 return -EFAULT;
1471 }
1472
1473 return sys_set_mempolicy(mode, nm, nr_bits+1);
1474}
1475
1476COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1477 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1478 compat_ulong_t, maxnode, compat_ulong_t, flags)
1479{
1480 unsigned long __user *nm = NULL;
1481 unsigned long nr_bits, alloc_size;
1482 nodemask_t bm;
1483
1484 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1485 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1486
1487 if (nmask) {
1488 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1489 return -EFAULT;
1490 nm = compat_alloc_user_space(alloc_size);
1491 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1492 return -EFAULT;
1493 }
1494
1495 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1496}
1497
1498#endif
1499
1500struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1501 unsigned long addr)
1502{
1503 struct mempolicy *pol = NULL;
1504
1505 if (vma) {
1506 if (vma->vm_ops && vma->vm_ops->get_policy) {
1507 pol = vma->vm_ops->get_policy(vma, addr);
1508 } else if (vma->vm_policy) {
1509 pol = vma->vm_policy;
1510
1511
1512
1513
1514
1515
1516
1517 if (mpol_needs_cond_ref(pol))
1518 mpol_get(pol);
1519 }
1520 }
1521
1522 return pol;
1523}
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1538 unsigned long addr)
1539{
1540 struct mempolicy *pol = __get_vma_policy(vma, addr);
1541
1542 if (!pol)
1543 pol = get_task_policy(current);
1544
1545 return pol;
1546}
1547
1548bool vma_policy_mof(struct vm_area_struct *vma)
1549{
1550 struct mempolicy *pol;
1551
1552 if (vma->vm_ops && vma->vm_ops->get_policy) {
1553 bool ret = false;
1554
1555 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1556 if (pol && (pol->flags & MPOL_F_MOF))
1557 ret = true;
1558 mpol_cond_put(pol);
1559
1560 return ret;
1561 }
1562
1563 pol = vma->vm_policy;
1564 if (!pol)
1565 pol = get_task_policy(current);
1566
1567 return pol->flags & MPOL_F_MOF;
1568}
1569
1570static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1571{
1572 enum zone_type dynamic_policy_zone = policy_zone;
1573
1574 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1585 dynamic_policy_zone = ZONE_MOVABLE;
1586
1587 return zone >= dynamic_policy_zone;
1588}
1589
1590
1591
1592
1593
1594static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1595{
1596
1597 if (unlikely(policy->mode == MPOL_BIND) &&
1598 apply_policy_zone(policy, gfp_zone(gfp)) &&
1599 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1600 return &policy->v.nodes;
1601
1602 return NULL;
1603}
1604
1605
1606static int policy_node(gfp_t gfp, struct mempolicy *policy,
1607 int nd)
1608{
1609 if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
1610 nd = policy->v.preferred_node;
1611 else {
1612
1613
1614
1615
1616
1617 WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1618 }
1619
1620 return nd;
1621}
1622
1623
1624static unsigned interleave_nodes(struct mempolicy *policy)
1625{
1626 unsigned next;
1627 struct task_struct *me = current;
1628
1629 next = next_node_in(me->il_prev, policy->v.nodes);
1630 if (next < MAX_NUMNODES)
1631 me->il_prev = next;
1632 return next;
1633}
1634
1635
1636
1637
1638
1639unsigned int mempolicy_slab_node(void)
1640{
1641 struct mempolicy *policy;
1642 int node = numa_mem_id();
1643
1644 if (in_interrupt())
1645 return node;
1646
1647 policy = current->mempolicy;
1648 if (!policy || policy->flags & MPOL_F_LOCAL)
1649 return node;
1650
1651 switch (policy->mode) {
1652 case MPOL_PREFERRED:
1653
1654
1655
1656 return policy->v.preferred_node;
1657
1658 case MPOL_INTERLEAVE:
1659 return interleave_nodes(policy);
1660
1661 case MPOL_BIND: {
1662 struct zoneref *z;
1663
1664
1665
1666
1667
1668 struct zonelist *zonelist;
1669 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1670 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1671 z = first_zones_zonelist(zonelist, highest_zoneidx,
1672 &policy->v.nodes);
1673 return z->zone ? z->zone->node : node;
1674 }
1675
1676 default:
1677 BUG();
1678 }
1679}
1680
1681
1682
1683
1684
1685
1686static unsigned offset_il_node(struct mempolicy *pol,
1687 struct vm_area_struct *vma, unsigned long n)
1688{
1689 unsigned nnodes = nodes_weight(pol->v.nodes);
1690 unsigned target;
1691 int i;
1692 int nid;
1693
1694 if (!nnodes)
1695 return numa_node_id();
1696 target = (unsigned int)n % nnodes;
1697 nid = first_node(pol->v.nodes);
1698 for (i = 0; i < target; i++)
1699 nid = next_node(nid, pol->v.nodes);
1700 return nid;
1701}
1702
1703
1704static inline unsigned interleave_nid(struct mempolicy *pol,
1705 struct vm_area_struct *vma, unsigned long addr, int shift)
1706{
1707 if (vma) {
1708 unsigned long off;
1709
1710
1711
1712
1713
1714
1715
1716
1717 BUG_ON(shift < PAGE_SHIFT);
1718 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1719 off += (addr - vma->vm_start) >> shift;
1720 return offset_il_node(pol, vma, off);
1721 } else
1722 return interleave_nodes(pol);
1723}
1724
1725#ifdef CONFIG_HUGETLBFS
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
1742 struct mempolicy **mpol, nodemask_t **nodemask)
1743{
1744 int nid;
1745
1746 *mpol = get_vma_policy(vma, addr);
1747 *nodemask = NULL;
1748
1749 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1750 nid = interleave_nid(*mpol, vma, addr,
1751 huge_page_shift(hstate_vma(vma)));
1752 } else {
1753 nid = policy_node(gfp_flags, *mpol, numa_node_id());
1754 if ((*mpol)->mode == MPOL_BIND)
1755 *nodemask = &(*mpol)->v.nodes;
1756 }
1757 return nid;
1758}
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776bool init_nodemask_of_mempolicy(nodemask_t *mask)
1777{
1778 struct mempolicy *mempolicy;
1779 int nid;
1780
1781 if (!(mask && current->mempolicy))
1782 return false;
1783
1784 task_lock(current);
1785 mempolicy = current->mempolicy;
1786 switch (mempolicy->mode) {
1787 case MPOL_PREFERRED:
1788 if (mempolicy->flags & MPOL_F_LOCAL)
1789 nid = numa_node_id();
1790 else
1791 nid = mempolicy->v.preferred_node;
1792 init_nodemask_of_node(mask, nid);
1793 break;
1794
1795 case MPOL_BIND:
1796
1797 case MPOL_INTERLEAVE:
1798 *mask = mempolicy->v.nodes;
1799 break;
1800
1801 default:
1802 BUG();
1803 }
1804 task_unlock(current);
1805
1806 return true;
1807}
1808#endif
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1821 const nodemask_t *mask)
1822{
1823 struct mempolicy *mempolicy;
1824 bool ret = true;
1825
1826 if (!mask)
1827 return ret;
1828 task_lock(tsk);
1829 mempolicy = tsk->mempolicy;
1830 if (!mempolicy)
1831 goto out;
1832
1833 switch (mempolicy->mode) {
1834 case MPOL_PREFERRED:
1835
1836
1837
1838
1839
1840
1841 break;
1842 case MPOL_BIND:
1843 case MPOL_INTERLEAVE:
1844 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1845 break;
1846 default:
1847 BUG();
1848 }
1849out:
1850 task_unlock(tsk);
1851 return ret;
1852}
1853
1854
1855
1856static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1857 unsigned nid)
1858{
1859 struct page *page;
1860
1861 page = __alloc_pages(gfp, order, nid);
1862 if (page && page_to_nid(page) == nid)
1863 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1864 return page;
1865}
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890struct page *
1891alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1892 unsigned long addr, int node, bool hugepage)
1893{
1894 struct mempolicy *pol;
1895 struct page *page;
1896 int preferred_nid;
1897 nodemask_t *nmask;
1898
1899 pol = get_vma_policy(vma, addr);
1900
1901 if (pol->mode == MPOL_INTERLEAVE) {
1902 unsigned nid;
1903
1904 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
1905 mpol_cond_put(pol);
1906 page = alloc_page_interleave(gfp, order, nid);
1907 goto out;
1908 }
1909
1910 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
1911 int hpage_node = node;
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923 if (pol->mode == MPOL_PREFERRED &&
1924 !(pol->flags & MPOL_F_LOCAL))
1925 hpage_node = pol->v.preferred_node;
1926
1927 nmask = policy_nodemask(gfp, pol);
1928 if (!nmask || node_isset(hpage_node, *nmask)) {
1929 mpol_cond_put(pol);
1930 page = __alloc_pages_node(hpage_node,
1931 gfp | __GFP_THISNODE, order);
1932 goto out;
1933 }
1934 }
1935
1936 nmask = policy_nodemask(gfp, pol);
1937 preferred_nid = policy_node(gfp, pol, node);
1938 page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
1939 mpol_cond_put(pol);
1940out:
1941 return page;
1942}
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1960{
1961 struct mempolicy *pol = &default_policy;
1962 struct page *page;
1963
1964 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
1965 pol = get_task_policy(current);
1966
1967
1968
1969
1970
1971 if (pol->mode == MPOL_INTERLEAVE)
1972 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
1973 else
1974 page = __alloc_pages_nodemask(gfp, order,
1975 policy_node(gfp, pol, numa_node_id()),
1976 policy_nodemask(gfp, pol));
1977
1978 return page;
1979}
1980EXPORT_SYMBOL(alloc_pages_current);
1981
1982int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
1983{
1984 struct mempolicy *pol = mpol_dup(vma_policy(src));
1985
1986 if (IS_ERR(pol))
1987 return PTR_ERR(pol);
1988 dst->vm_policy = pol;
1989 return 0;
1990}
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004struct mempolicy *__mpol_dup(struct mempolicy *old)
2005{
2006 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2007
2008 if (!new)
2009 return ERR_PTR(-ENOMEM);
2010
2011
2012 if (old == current->mempolicy) {
2013 task_lock(current);
2014 *new = *old;
2015 task_unlock(current);
2016 } else
2017 *new = *old;
2018
2019 if (current_cpuset_is_being_rebound()) {
2020 nodemask_t mems = cpuset_mems_allowed(current);
2021 mpol_rebind_policy(new, &mems);
2022 }
2023 atomic_set(&new->refcnt, 1);
2024 return new;
2025}
2026
2027
2028bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2029{
2030 if (!a || !b)
2031 return false;
2032 if (a->mode != b->mode)
2033 return false;
2034 if (a->flags != b->flags)
2035 return false;
2036 if (mpol_store_user_nodemask(a))
2037 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2038 return false;
2039
2040 switch (a->mode) {
2041 case MPOL_BIND:
2042
2043 case MPOL_INTERLEAVE:
2044 return !!nodes_equal(a->v.nodes, b->v.nodes);
2045 case MPOL_PREFERRED:
2046 return a->v.preferred_node == b->v.preferred_node;
2047 default:
2048 BUG();
2049 return false;
2050 }
2051}
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066static struct sp_node *
2067sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2068{
2069 struct rb_node *n = sp->root.rb_node;
2070
2071 while (n) {
2072 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2073
2074 if (start >= p->end)
2075 n = n->rb_right;
2076 else if (end <= p->start)
2077 n = n->rb_left;
2078 else
2079 break;
2080 }
2081 if (!n)
2082 return NULL;
2083 for (;;) {
2084 struct sp_node *w = NULL;
2085 struct rb_node *prev = rb_prev(n);
2086 if (!prev)
2087 break;
2088 w = rb_entry(prev, struct sp_node, nd);
2089 if (w->end <= start)
2090 break;
2091 n = prev;
2092 }
2093 return rb_entry(n, struct sp_node, nd);
2094}
2095
2096
2097
2098
2099
2100static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2101{
2102 struct rb_node **p = &sp->root.rb_node;
2103 struct rb_node *parent = NULL;
2104 struct sp_node *nd;
2105
2106 while (*p) {
2107 parent = *p;
2108 nd = rb_entry(parent, struct sp_node, nd);
2109 if (new->start < nd->start)
2110 p = &(*p)->rb_left;
2111 else if (new->end > nd->end)
2112 p = &(*p)->rb_right;
2113 else
2114 BUG();
2115 }
2116 rb_link_node(&new->nd, parent, p);
2117 rb_insert_color(&new->nd, &sp->root);
2118 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2119 new->policy ? new->policy->mode : 0);
2120}
2121
2122
2123struct mempolicy *
2124mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2125{
2126 struct mempolicy *pol = NULL;
2127 struct sp_node *sn;
2128
2129 if (!sp->root.rb_node)
2130 return NULL;
2131 read_lock(&sp->lock);
2132 sn = sp_lookup(sp, idx, idx+1);
2133 if (sn) {
2134 mpol_get(sn->policy);
2135 pol = sn->policy;
2136 }
2137 read_unlock(&sp->lock);
2138 return pol;
2139}
2140
2141static void sp_free(struct sp_node *n)
2142{
2143 mpol_put(n->policy);
2144 kmem_cache_free(sn_cache, n);
2145}
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2165{
2166 struct mempolicy *pol;
2167 struct zoneref *z;
2168 int curnid = page_to_nid(page);
2169 unsigned long pgoff;
2170 int thiscpu = raw_smp_processor_id();
2171 int thisnid = cpu_to_node(thiscpu);
2172 int polnid = -1;
2173 int ret = -1;
2174
2175 BUG_ON(!vma);
2176
2177 pol = get_vma_policy(vma, addr);
2178 if (!(pol->flags & MPOL_F_MOF))
2179 goto out;
2180
2181 switch (pol->mode) {
2182 case MPOL_INTERLEAVE:
2183 BUG_ON(addr >= vma->vm_end);
2184 BUG_ON(addr < vma->vm_start);
2185
2186 pgoff = vma->vm_pgoff;
2187 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2188 polnid = offset_il_node(pol, vma, pgoff);
2189 break;
2190
2191 case MPOL_PREFERRED:
2192 if (pol->flags & MPOL_F_LOCAL)
2193 polnid = numa_node_id();
2194 else
2195 polnid = pol->v.preferred_node;
2196 break;
2197
2198 case MPOL_BIND:
2199
2200
2201
2202
2203
2204
2205
2206 if (node_isset(curnid, pol->v.nodes))
2207 goto out;
2208 z = first_zones_zonelist(
2209 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2210 gfp_zone(GFP_HIGHUSER),
2211 &pol->v.nodes);
2212 polnid = z->zone->node;
2213 break;
2214
2215 default:
2216 BUG();
2217 }
2218
2219
2220 if (pol->flags & MPOL_F_MORON) {
2221 polnid = thisnid;
2222
2223 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2224 goto out;
2225 }
2226
2227 if (curnid != polnid)
2228 ret = polnid;
2229out:
2230 mpol_cond_put(pol);
2231
2232 return ret;
2233}
2234
2235
2236
2237
2238
2239
2240
2241void mpol_put_task_policy(struct task_struct *task)
2242{
2243 struct mempolicy *pol;
2244
2245 task_lock(task);
2246 pol = task->mempolicy;
2247 task->mempolicy = NULL;
2248 task_unlock(task);
2249 mpol_put(pol);
2250}
2251
2252static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2253{
2254 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2255 rb_erase(&n->nd, &sp->root);
2256 sp_free(n);
2257}
2258
2259static void sp_node_init(struct sp_node *node, unsigned long start,
2260 unsigned long end, struct mempolicy *pol)
2261{
2262 node->start = start;
2263 node->end = end;
2264 node->policy = pol;
2265}
2266
2267static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2268 struct mempolicy *pol)
2269{
2270 struct sp_node *n;
2271 struct mempolicy *newpol;
2272
2273 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2274 if (!n)
2275 return NULL;
2276
2277 newpol = mpol_dup(pol);
2278 if (IS_ERR(newpol)) {
2279 kmem_cache_free(sn_cache, n);
2280 return NULL;
2281 }
2282 newpol->flags |= MPOL_F_SHARED;
2283 sp_node_init(n, start, end, newpol);
2284
2285 return n;
2286}
2287
2288
2289static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2290 unsigned long end, struct sp_node *new)
2291{
2292 struct sp_node *n;
2293 struct sp_node *n_new = NULL;
2294 struct mempolicy *mpol_new = NULL;
2295 int ret = 0;
2296
2297restart:
2298 write_lock(&sp->lock);
2299 n = sp_lookup(sp, start, end);
2300
2301 while (n && n->start < end) {
2302 struct rb_node *next = rb_next(&n->nd);
2303 if (n->start >= start) {
2304 if (n->end <= end)
2305 sp_delete(sp, n);
2306 else
2307 n->start = end;
2308 } else {
2309
2310 if (n->end > end) {
2311 if (!n_new)
2312 goto alloc_new;
2313
2314 *mpol_new = *n->policy;
2315 atomic_set(&mpol_new->refcnt, 1);
2316 sp_node_init(n_new, end, n->end, mpol_new);
2317 n->end = start;
2318 sp_insert(sp, n_new);
2319 n_new = NULL;
2320 mpol_new = NULL;
2321 break;
2322 } else
2323 n->end = start;
2324 }
2325 if (!next)
2326 break;
2327 n = rb_entry(next, struct sp_node, nd);
2328 }
2329 if (new)
2330 sp_insert(sp, new);
2331 write_unlock(&sp->lock);
2332 ret = 0;
2333
2334err_out:
2335 if (mpol_new)
2336 mpol_put(mpol_new);
2337 if (n_new)
2338 kmem_cache_free(sn_cache, n_new);
2339
2340 return ret;
2341
2342alloc_new:
2343 write_unlock(&sp->lock);
2344 ret = -ENOMEM;
2345 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2346 if (!n_new)
2347 goto err_out;
2348 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2349 if (!mpol_new)
2350 goto err_out;
2351 goto restart;
2352}
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2365{
2366 int ret;
2367
2368 sp->root = RB_ROOT;
2369 rwlock_init(&sp->lock);
2370
2371 if (mpol) {
2372 struct vm_area_struct pvma;
2373 struct mempolicy *new;
2374 NODEMASK_SCRATCH(scratch);
2375
2376 if (!scratch)
2377 goto put_mpol;
2378
2379 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2380 if (IS_ERR(new))
2381 goto free_scratch;
2382
2383 task_lock(current);
2384 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2385 task_unlock(current);
2386 if (ret)
2387 goto put_new;
2388
2389
2390 memset(&pvma, 0, sizeof(struct vm_area_struct));
2391 pvma.vm_end = TASK_SIZE;
2392 mpol_set_shared_policy(sp, &pvma, new);
2393
2394put_new:
2395 mpol_put(new);
2396free_scratch:
2397 NODEMASK_SCRATCH_FREE(scratch);
2398put_mpol:
2399 mpol_put(mpol);
2400 }
2401}
2402
2403int mpol_set_shared_policy(struct shared_policy *info,
2404 struct vm_area_struct *vma, struct mempolicy *npol)
2405{
2406 int err;
2407 struct sp_node *new = NULL;
2408 unsigned long sz = vma_pages(vma);
2409
2410 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2411 vma->vm_pgoff,
2412 sz, npol ? npol->mode : -1,
2413 npol ? npol->flags : -1,
2414 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
2415
2416 if (npol) {
2417 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2418 if (!new)
2419 return -ENOMEM;
2420 }
2421 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2422 if (err && new)
2423 sp_free(new);
2424 return err;
2425}
2426
2427
2428void mpol_free_shared_policy(struct shared_policy *p)
2429{
2430 struct sp_node *n;
2431 struct rb_node *next;
2432
2433 if (!p->root.rb_node)
2434 return;
2435 write_lock(&p->lock);
2436 next = rb_first(&p->root);
2437 while (next) {
2438 n = rb_entry(next, struct sp_node, nd);
2439 next = rb_next(&n->nd);
2440 sp_delete(p, n);
2441 }
2442 write_unlock(&p->lock);
2443}
2444
2445#ifdef CONFIG_NUMA_BALANCING
2446static int __initdata numabalancing_override;
2447
2448static void __init check_numabalancing_enable(void)
2449{
2450 bool numabalancing_default = false;
2451
2452 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2453 numabalancing_default = true;
2454
2455
2456 if (numabalancing_override)
2457 set_numabalancing_state(numabalancing_override == 1);
2458
2459 if (num_online_nodes() > 1 && !numabalancing_override) {
2460 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2461 numabalancing_default ? "Enabling" : "Disabling");
2462 set_numabalancing_state(numabalancing_default);
2463 }
2464}
2465
2466static int __init setup_numabalancing(char *str)
2467{
2468 int ret = 0;
2469 if (!str)
2470 goto out;
2471
2472 if (!strcmp(str, "enable")) {
2473 numabalancing_override = 1;
2474 ret = 1;
2475 } else if (!strcmp(str, "disable")) {
2476 numabalancing_override = -1;
2477 ret = 1;
2478 }
2479out:
2480 if (!ret)
2481 pr_warn("Unable to parse numa_balancing=\n");
2482
2483 return ret;
2484}
2485__setup("numa_balancing=", setup_numabalancing);
2486#else
2487static inline void __init check_numabalancing_enable(void)
2488{
2489}
2490#endif
2491
2492
2493void __init numa_policy_init(void)
2494{
2495 nodemask_t interleave_nodes;
2496 unsigned long largest = 0;
2497 int nid, prefer = 0;
2498
2499 policy_cache = kmem_cache_create("numa_policy",
2500 sizeof(struct mempolicy),
2501 0, SLAB_PANIC, NULL);
2502
2503 sn_cache = kmem_cache_create("shared_policy_node",
2504 sizeof(struct sp_node),
2505 0, SLAB_PANIC, NULL);
2506
2507 for_each_node(nid) {
2508 preferred_node_policy[nid] = (struct mempolicy) {
2509 .refcnt = ATOMIC_INIT(1),
2510 .mode = MPOL_PREFERRED,
2511 .flags = MPOL_F_MOF | MPOL_F_MORON,
2512 .v = { .preferred_node = nid, },
2513 };
2514 }
2515
2516
2517
2518
2519
2520
2521 nodes_clear(interleave_nodes);
2522 for_each_node_state(nid, N_MEMORY) {
2523 unsigned long total_pages = node_present_pages(nid);
2524
2525
2526 if (largest < total_pages) {
2527 largest = total_pages;
2528 prefer = nid;
2529 }
2530
2531
2532 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2533 node_set(nid, interleave_nodes);
2534 }
2535
2536
2537 if (unlikely(nodes_empty(interleave_nodes)))
2538 node_set(prefer, interleave_nodes);
2539
2540 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2541 pr_err("%s: interleaving failed\n", __func__);
2542
2543 check_numabalancing_enable();
2544}
2545
2546
2547void numa_default_policy(void)
2548{
2549 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2550}
2551
2552
2553
2554
2555
2556
2557
2558
2559static const char * const policy_modes[] =
2560{
2561 [MPOL_DEFAULT] = "default",
2562 [MPOL_PREFERRED] = "prefer",
2563 [MPOL_BIND] = "bind",
2564 [MPOL_INTERLEAVE] = "interleave",
2565 [MPOL_LOCAL] = "local",
2566};
2567
2568
2569#ifdef CONFIG_TMPFS
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580int mpol_parse_str(char *str, struct mempolicy **mpol)
2581{
2582 struct mempolicy *new = NULL;
2583 unsigned short mode;
2584 unsigned short mode_flags;
2585 nodemask_t nodes;
2586 char *nodelist = strchr(str, ':');
2587 char *flags = strchr(str, '=');
2588 int err = 1;
2589
2590 if (nodelist) {
2591
2592 *nodelist++ = '\0';
2593 if (nodelist_parse(nodelist, nodes))
2594 goto out;
2595 if (!nodes_subset(nodes, node_states[N_MEMORY]))
2596 goto out;
2597 } else
2598 nodes_clear(nodes);
2599
2600 if (flags)
2601 *flags++ = '\0';
2602
2603 for (mode = 0; mode < MPOL_MAX; mode++) {
2604 if (!strcmp(str, policy_modes[mode])) {
2605 break;
2606 }
2607 }
2608 if (mode >= MPOL_MAX)
2609 goto out;
2610
2611 switch (mode) {
2612 case MPOL_PREFERRED:
2613
2614
2615
2616 if (nodelist) {
2617 char *rest = nodelist;
2618 while (isdigit(*rest))
2619 rest++;
2620 if (*rest)
2621 goto out;
2622 }
2623 break;
2624 case MPOL_INTERLEAVE:
2625
2626
2627
2628 if (!nodelist)
2629 nodes = node_states[N_MEMORY];
2630 break;
2631 case MPOL_LOCAL:
2632
2633
2634
2635 if (nodelist)
2636 goto out;
2637 mode = MPOL_PREFERRED;
2638 break;
2639 case MPOL_DEFAULT:
2640
2641
2642
2643 if (!nodelist)
2644 err = 0;
2645 goto out;
2646 case MPOL_BIND:
2647
2648
2649
2650 if (!nodelist)
2651 goto out;
2652 }
2653
2654 mode_flags = 0;
2655 if (flags) {
2656
2657
2658
2659
2660 if (!strcmp(flags, "static"))
2661 mode_flags |= MPOL_F_STATIC_NODES;
2662 else if (!strcmp(flags, "relative"))
2663 mode_flags |= MPOL_F_RELATIVE_NODES;
2664 else
2665 goto out;
2666 }
2667
2668 new = mpol_new(mode, mode_flags, &nodes);
2669 if (IS_ERR(new))
2670 goto out;
2671
2672
2673
2674
2675
2676 if (mode != MPOL_PREFERRED)
2677 new->v.nodes = nodes;
2678 else if (nodelist)
2679 new->v.preferred_node = first_node(nodes);
2680 else
2681 new->flags |= MPOL_F_LOCAL;
2682
2683
2684
2685
2686
2687 new->w.user_nodemask = nodes;
2688
2689 err = 0;
2690
2691out:
2692
2693 if (nodelist)
2694 *--nodelist = ':';
2695 if (flags)
2696 *--flags = '=';
2697 if (!err)
2698 *mpol = new;
2699 return err;
2700}
2701#endif
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
2714{
2715 char *p = buffer;
2716 nodemask_t nodes = NODE_MASK_NONE;
2717 unsigned short mode = MPOL_DEFAULT;
2718 unsigned short flags = 0;
2719
2720 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2721 mode = pol->mode;
2722 flags = pol->flags;
2723 }
2724
2725 switch (mode) {
2726 case MPOL_DEFAULT:
2727 break;
2728 case MPOL_PREFERRED:
2729 if (flags & MPOL_F_LOCAL)
2730 mode = MPOL_LOCAL;
2731 else
2732 node_set(pol->v.preferred_node, nodes);
2733 break;
2734 case MPOL_BIND:
2735 case MPOL_INTERLEAVE:
2736 nodes = pol->v.nodes;
2737 break;
2738 default:
2739 WARN_ON_ONCE(1);
2740 snprintf(p, maxlen, "unknown");
2741 return;
2742 }
2743
2744 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
2745
2746 if (flags & MPOL_MODE_FLAGS) {
2747 p += snprintf(p, buffer + maxlen - p, "=");
2748
2749
2750
2751
2752 if (flags & MPOL_F_STATIC_NODES)
2753 p += snprintf(p, buffer + maxlen - p, "static");
2754 else if (flags & MPOL_F_RELATIVE_NODES)
2755 p += snprintf(p, buffer + maxlen - p, "relative");
2756 }
2757
2758 if (!nodes_empty(nodes))
2759 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
2760 nodemask_pr_args(&nodes));
2761}
2762