linux/mm/mempolicy.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Simple NUMA memory policy for the Linux kernel.
   4 *
   5 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
   6 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
   7 *
   8 * NUMA policy allows the user to give hints in which node(s) memory should
   9 * be allocated.
  10 *
  11 * Support four policies per VMA and per process:
  12 *
  13 * The VMA policy has priority over the process policy for a page fault.
  14 *
  15 * interleave     Allocate memory interleaved over a set of nodes,
  16 *                with normal fallback if it fails.
  17 *                For VMA based allocations this interleaves based on the
  18 *                offset into the backing object or offset into the mapping
  19 *                for anonymous memory. For process policy an process counter
  20 *                is used.
  21 *
  22 * bind           Only allocate memory on a specific set of nodes,
  23 *                no fallback.
  24 *                FIXME: memory is allocated starting with the first node
  25 *                to the last. It would be better if bind would truly restrict
  26 *                the allocation to memory nodes instead
  27 *
  28 * preferred       Try a specific node first before normal fallback.
  29 *                As a special case NUMA_NO_NODE here means do the allocation
  30 *                on the local CPU. This is normally identical to default,
  31 *                but useful to set in a VMA when you have a non default
  32 *                process policy.
  33 *
  34 * default        Allocate on the local node first, or when on a VMA
  35 *                use the process policy. This is what Linux always did
  36 *                in a NUMA aware kernel and still does by, ahem, default.
  37 *
  38 * The process policy is applied for most non interrupt memory allocations
  39 * in that process' context. Interrupts ignore the policies and always
  40 * try to allocate on the local CPU. The VMA policy is only applied for memory
  41 * allocations for a VMA in the VM.
  42 *
  43 * Currently there are a few corner cases in swapping where the policy
  44 * is not applied, but the majority should be handled. When process policy
  45 * is used it is not remembered over swap outs/swap ins.
  46 *
  47 * Only the highest zone in the zone hierarchy gets policied. Allocations
  48 * requesting a lower zone just use default policy. This implies that
  49 * on systems with highmem kernel lowmem allocation don't get policied.
  50 * Same with GFP_DMA allocations.
  51 *
  52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
  53 * all users and remembered even when nobody has memory mapped.
  54 */
  55
  56/* Notebook:
  57   fix mmap readahead to honour policy and enable policy for any page cache
  58   object
  59   statistics for bigpages
  60   global policy for page cache? currently it uses process policy. Requires
  61   first item above.
  62   handle mremap for shared memory (currently ignored for the policy)
  63   grows down?
  64   make bind policy root only? It can trigger oom much faster and the
  65   kernel is not always grateful with that.
  66*/
  67
  68#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  69
  70#include <linux/mempolicy.h>
  71#include <linux/pagewalk.h>
  72#include <linux/highmem.h>
  73#include <linux/hugetlb.h>
  74#include <linux/kernel.h>
  75#include <linux/sched.h>
  76#include <linux/sched/mm.h>
  77#include <linux/sched/numa_balancing.h>
  78#include <linux/sched/task.h>
  79#include <linux/nodemask.h>
  80#include <linux/cpuset.h>
  81#include <linux/slab.h>
  82#include <linux/string.h>
  83#include <linux/export.h>
  84#include <linux/nsproxy.h>
  85#include <linux/interrupt.h>
  86#include <linux/init.h>
  87#include <linux/compat.h>
  88#include <linux/ptrace.h>
  89#include <linux/swap.h>
  90#include <linux/seq_file.h>
  91#include <linux/proc_fs.h>
  92#include <linux/migrate.h>
  93#include <linux/ksm.h>
  94#include <linux/rmap.h>
  95#include <linux/security.h>
  96#include <linux/syscalls.h>
  97#include <linux/ctype.h>
  98#include <linux/mm_inline.h>
  99#include <linux/mmu_notifier.h>
 100#include <linux/printk.h>
 101#include <linux/swapops.h>
 102
 103#include <asm/tlbflush.h>
 104#include <linux/uaccess.h>
 105
 106#include "internal.h"
 107
 108/* Internal flags */
 109#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)    /* Skip checks for continuous vmas */
 110#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)          /* Invert check for nodemask */
 111
 112static struct kmem_cache *policy_cache;
 113static struct kmem_cache *sn_cache;
 114
 115/* Highest zone. An specific allocation for a zone below that is not
 116   policied. */
 117enum zone_type policy_zone = 0;
 118
 119/*
 120 * run-time system-wide default policy => local allocation
 121 */
 122static struct mempolicy default_policy = {
 123        .refcnt = ATOMIC_INIT(1), /* never free it */
 124        .mode = MPOL_PREFERRED,
 125        .flags = MPOL_F_LOCAL,
 126};
 127
 128static struct mempolicy preferred_node_policy[MAX_NUMNODES];
 129
 130/**
 131 * numa_map_to_online_node - Find closest online node
 132 * @node: Node id to start the search
 133 *
 134 * Lookup the next closest node by distance if @nid is not online.
 135 */
 136int numa_map_to_online_node(int node)
 137{
 138        int min_dist = INT_MAX, dist, n, min_node;
 139
 140        if (node == NUMA_NO_NODE || node_online(node))
 141                return node;
 142
 143        min_node = node;
 144        for_each_online_node(n) {
 145                dist = node_distance(node, n);
 146                if (dist < min_dist) {
 147                        min_dist = dist;
 148                        min_node = n;
 149                }
 150        }
 151
 152        return min_node;
 153}
 154EXPORT_SYMBOL_GPL(numa_map_to_online_node);
 155
 156struct mempolicy *get_task_policy(struct task_struct *p)
 157{
 158        struct mempolicy *pol = p->mempolicy;
 159        int node;
 160
 161        if (pol)
 162                return pol;
 163
 164        node = numa_node_id();
 165        if (node != NUMA_NO_NODE) {
 166                pol = &preferred_node_policy[node];
 167                /* preferred_node_policy is not initialised early in boot */
 168                if (pol->mode)
 169                        return pol;
 170        }
 171
 172        return &default_policy;
 173}
 174
 175static const struct mempolicy_operations {
 176        int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
 177        void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
 178} mpol_ops[MPOL_MAX];
 179
 180static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
 181{
 182        return pol->flags & MPOL_MODE_FLAGS;
 183}
 184
 185static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
 186                                   const nodemask_t *rel)
 187{
 188        nodemask_t tmp;
 189        nodes_fold(tmp, *orig, nodes_weight(*rel));
 190        nodes_onto(*ret, tmp, *rel);
 191}
 192
 193static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
 194{
 195        if (nodes_empty(*nodes))
 196                return -EINVAL;
 197        pol->v.nodes = *nodes;
 198        return 0;
 199}
 200
 201static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
 202{
 203        if (!nodes)
 204                pol->flags |= MPOL_F_LOCAL;     /* local allocation */
 205        else if (nodes_empty(*nodes))
 206                return -EINVAL;                 /*  no allowed nodes */
 207        else
 208                pol->v.preferred_node = first_node(*nodes);
 209        return 0;
 210}
 211
 212static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
 213{
 214        if (nodes_empty(*nodes))
 215                return -EINVAL;
 216        pol->v.nodes = *nodes;
 217        return 0;
 218}
 219
 220/*
 221 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
 222 * any, for the new policy.  mpol_new() has already validated the nodes
 223 * parameter with respect to the policy mode and flags.  But, we need to
 224 * handle an empty nodemask with MPOL_PREFERRED here.
 225 *
 226 * Must be called holding task's alloc_lock to protect task's mems_allowed
 227 * and mempolicy.  May also be called holding the mmap_lock for write.
 228 */
 229static int mpol_set_nodemask(struct mempolicy *pol,
 230                     const nodemask_t *nodes, struct nodemask_scratch *nsc)
 231{
 232        int ret;
 233
 234        /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
 235        if (pol == NULL)
 236                return 0;
 237        /* Check N_MEMORY */
 238        nodes_and(nsc->mask1,
 239                  cpuset_current_mems_allowed, node_states[N_MEMORY]);
 240
 241        VM_BUG_ON(!nodes);
 242        if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
 243                nodes = NULL;   /* explicit local allocation */
 244        else {
 245                if (pol->flags & MPOL_F_RELATIVE_NODES)
 246                        mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
 247                else
 248                        nodes_and(nsc->mask2, *nodes, nsc->mask1);
 249
 250                if (mpol_store_user_nodemask(pol))
 251                        pol->w.user_nodemask = *nodes;
 252                else
 253                        pol->w.cpuset_mems_allowed =
 254                                                cpuset_current_mems_allowed;
 255        }
 256
 257        if (nodes)
 258                ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
 259        else
 260                ret = mpol_ops[pol->mode].create(pol, NULL);
 261        return ret;
 262}
 263
 264/*
 265 * This function just creates a new policy, does some check and simple
 266 * initialization. You must invoke mpol_set_nodemask() to set nodes.
 267 */
 268static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
 269                                  nodemask_t *nodes)
 270{
 271        struct mempolicy *policy;
 272
 273        pr_debug("setting mode %d flags %d nodes[0] %lx\n",
 274                 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
 275
 276        if (mode == MPOL_DEFAULT) {
 277                if (nodes && !nodes_empty(*nodes))
 278                        return ERR_PTR(-EINVAL);
 279                return NULL;
 280        }
 281        VM_BUG_ON(!nodes);
 282
 283        /*
 284         * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
 285         * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
 286         * All other modes require a valid pointer to a non-empty nodemask.
 287         */
 288        if (mode == MPOL_PREFERRED) {
 289                if (nodes_empty(*nodes)) {
 290                        if (((flags & MPOL_F_STATIC_NODES) ||
 291                             (flags & MPOL_F_RELATIVE_NODES)))
 292                                return ERR_PTR(-EINVAL);
 293                }
 294        } else if (mode == MPOL_LOCAL) {
 295                if (!nodes_empty(*nodes) ||
 296                    (flags & MPOL_F_STATIC_NODES) ||
 297                    (flags & MPOL_F_RELATIVE_NODES))
 298                        return ERR_PTR(-EINVAL);
 299                mode = MPOL_PREFERRED;
 300        } else if (nodes_empty(*nodes))
 301                return ERR_PTR(-EINVAL);
 302        policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
 303        if (!policy)
 304                return ERR_PTR(-ENOMEM);
 305        atomic_set(&policy->refcnt, 1);
 306        policy->mode = mode;
 307        policy->flags = flags;
 308
 309        return policy;
 310}
 311
 312/* Slow path of a mpol destructor. */
 313void __mpol_put(struct mempolicy *p)
 314{
 315        if (!atomic_dec_and_test(&p->refcnt))
 316                return;
 317        kmem_cache_free(policy_cache, p);
 318}
 319
 320static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
 321{
 322}
 323
 324static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
 325{
 326        nodemask_t tmp;
 327
 328        if (pol->flags & MPOL_F_STATIC_NODES)
 329                nodes_and(tmp, pol->w.user_nodemask, *nodes);
 330        else if (pol->flags & MPOL_F_RELATIVE_NODES)
 331                mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
 332        else {
 333                nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
 334                                                                *nodes);
 335                pol->w.cpuset_mems_allowed = *nodes;
 336        }
 337
 338        if (nodes_empty(tmp))
 339                tmp = *nodes;
 340
 341        pol->v.nodes = tmp;
 342}
 343
 344static void mpol_rebind_preferred(struct mempolicy *pol,
 345                                                const nodemask_t *nodes)
 346{
 347        nodemask_t tmp;
 348
 349        if (pol->flags & MPOL_F_STATIC_NODES) {
 350                int node = first_node(pol->w.user_nodemask);
 351
 352                if (node_isset(node, *nodes)) {
 353                        pol->v.preferred_node = node;
 354                        pol->flags &= ~MPOL_F_LOCAL;
 355                } else
 356                        pol->flags |= MPOL_F_LOCAL;
 357        } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
 358                mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
 359                pol->v.preferred_node = first_node(tmp);
 360        } else if (!(pol->flags & MPOL_F_LOCAL)) {
 361                pol->v.preferred_node = node_remap(pol->v.preferred_node,
 362                                                   pol->w.cpuset_mems_allowed,
 363                                                   *nodes);
 364                pol->w.cpuset_mems_allowed = *nodes;
 365        }
 366}
 367
 368/*
 369 * mpol_rebind_policy - Migrate a policy to a different set of nodes
 370 *
 371 * Per-vma policies are protected by mmap_lock. Allocations using per-task
 372 * policies are protected by task->mems_allowed_seq to prevent a premature
 373 * OOM/allocation failure due to parallel nodemask modification.
 374 */
 375static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
 376{
 377        if (!pol)
 378                return;
 379        if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
 380            nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
 381                return;
 382
 383        mpol_ops[pol->mode].rebind(pol, newmask);
 384}
 385
 386/*
 387 * Wrapper for mpol_rebind_policy() that just requires task
 388 * pointer, and updates task mempolicy.
 389 *
 390 * Called with task's alloc_lock held.
 391 */
 392
 393void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
 394{
 395        mpol_rebind_policy(tsk->mempolicy, new);
 396}
 397
 398/*
 399 * Rebind each vma in mm to new nodemask.
 400 *
 401 * Call holding a reference to mm.  Takes mm->mmap_lock during call.
 402 */
 403
 404void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
 405{
 406        struct vm_area_struct *vma;
 407
 408        mmap_write_lock(mm);
 409        for (vma = mm->mmap; vma; vma = vma->vm_next)
 410                mpol_rebind_policy(vma->vm_policy, new);
 411        mmap_write_unlock(mm);
 412}
 413
 414static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
 415        [MPOL_DEFAULT] = {
 416                .rebind = mpol_rebind_default,
 417        },
 418        [MPOL_INTERLEAVE] = {
 419                .create = mpol_new_interleave,
 420                .rebind = mpol_rebind_nodemask,
 421        },
 422        [MPOL_PREFERRED] = {
 423                .create = mpol_new_preferred,
 424                .rebind = mpol_rebind_preferred,
 425        },
 426        [MPOL_BIND] = {
 427                .create = mpol_new_bind,
 428                .rebind = mpol_rebind_nodemask,
 429        },
 430};
 431
 432static int migrate_page_add(struct page *page, struct list_head *pagelist,
 433                                unsigned long flags);
 434
 435struct queue_pages {
 436        struct list_head *pagelist;
 437        unsigned long flags;
 438        nodemask_t *nmask;
 439        unsigned long start;
 440        unsigned long end;
 441        struct vm_area_struct *first;
 442};
 443
 444/*
 445 * Check if the page's nid is in qp->nmask.
 446 *
 447 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
 448 * in the invert of qp->nmask.
 449 */
 450static inline bool queue_pages_required(struct page *page,
 451                                        struct queue_pages *qp)
 452{
 453        int nid = page_to_nid(page);
 454        unsigned long flags = qp->flags;
 455
 456        return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
 457}
 458
 459/*
 460 * queue_pages_pmd() has four possible return values:
 461 * 0 - pages are placed on the right node or queued successfully.
 462 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
 463 *     specified.
 464 * 2 - THP was split.
 465 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
 466 *        existing page was already on a node that does not follow the
 467 *        policy.
 468 */
 469static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
 470                                unsigned long end, struct mm_walk *walk)
 471        __releases(ptl)
 472{
 473        int ret = 0;
 474        struct page *page;
 475        struct queue_pages *qp = walk->private;
 476        unsigned long flags;
 477
 478        if (unlikely(is_pmd_migration_entry(*pmd))) {
 479                ret = -EIO;
 480                goto unlock;
 481        }
 482        page = pmd_page(*pmd);
 483        if (is_huge_zero_page(page)) {
 484                spin_unlock(ptl);
 485                __split_huge_pmd(walk->vma, pmd, addr, false, NULL);
 486                ret = 2;
 487                goto out;
 488        }
 489        if (!queue_pages_required(page, qp))
 490                goto unlock;
 491
 492        flags = qp->flags;
 493        /* go to thp migration */
 494        if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
 495                if (!vma_migratable(walk->vma) ||
 496                    migrate_page_add(page, qp->pagelist, flags)) {
 497                        ret = 1;
 498                        goto unlock;
 499                }
 500        } else
 501                ret = -EIO;
 502unlock:
 503        spin_unlock(ptl);
 504out:
 505        return ret;
 506}
 507
 508/*
 509 * Scan through pages checking if pages follow certain conditions,
 510 * and move them to the pagelist if they do.
 511 *
 512 * queue_pages_pte_range() has three possible return values:
 513 * 0 - pages are placed on the right node or queued successfully.
 514 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
 515 *     specified.
 516 * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
 517 *        on a node that does not follow the policy.
 518 */
 519static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
 520                        unsigned long end, struct mm_walk *walk)
 521{
 522        struct vm_area_struct *vma = walk->vma;
 523        struct page *page;
 524        struct queue_pages *qp = walk->private;
 525        unsigned long flags = qp->flags;
 526        int ret;
 527        bool has_unmovable = false;
 528        pte_t *pte, *mapped_pte;
 529        spinlock_t *ptl;
 530
 531        ptl = pmd_trans_huge_lock(pmd, vma);
 532        if (ptl) {
 533                ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
 534                if (ret != 2)
 535                        return ret;
 536        }
 537        /* THP was split, fall through to pte walk */
 538
 539        if (pmd_trans_unstable(pmd))
 540                return 0;
 541
 542        mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
 543        for (; addr != end; pte++, addr += PAGE_SIZE) {
 544                if (!pte_present(*pte))
 545                        continue;
 546                page = vm_normal_page(vma, addr, *pte);
 547                if (!page)
 548                        continue;
 549                /*
 550                 * vm_normal_page() filters out zero pages, but there might
 551                 * still be PageReserved pages to skip, perhaps in a VDSO.
 552                 */
 553                if (PageReserved(page))
 554                        continue;
 555                if (!queue_pages_required(page, qp))
 556                        continue;
 557                if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
 558                        /* MPOL_MF_STRICT must be specified if we get here */
 559                        if (!vma_migratable(vma)) {
 560                                has_unmovable = true;
 561                                break;
 562                        }
 563
 564                        /*
 565                         * Do not abort immediately since there may be
 566                         * temporary off LRU pages in the range.  Still
 567                         * need migrate other LRU pages.
 568                         */
 569                        if (migrate_page_add(page, qp->pagelist, flags))
 570                                has_unmovable = true;
 571                } else
 572                        break;
 573        }
 574        pte_unmap_unlock(mapped_pte, ptl);
 575        cond_resched();
 576
 577        if (has_unmovable)
 578                return 1;
 579
 580        return addr != end ? -EIO : 0;
 581}
 582
 583static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
 584                               unsigned long addr, unsigned long end,
 585                               struct mm_walk *walk)
 586{
 587        int ret = 0;
 588#ifdef CONFIG_HUGETLB_PAGE
 589        struct queue_pages *qp = walk->private;
 590        unsigned long flags = (qp->flags & MPOL_MF_VALID);
 591        struct page *page;
 592        spinlock_t *ptl;
 593        pte_t entry;
 594
 595        ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
 596        entry = huge_ptep_get(pte);
 597        if (!pte_present(entry))
 598                goto unlock;
 599        page = pte_page(entry);
 600        if (!queue_pages_required(page, qp))
 601                goto unlock;
 602
 603        if (flags == MPOL_MF_STRICT) {
 604                /*
 605                 * STRICT alone means only detecting misplaced page and no
 606                 * need to further check other vma.
 607                 */
 608                ret = -EIO;
 609                goto unlock;
 610        }
 611
 612        if (!vma_migratable(walk->vma)) {
 613                /*
 614                 * Must be STRICT with MOVE*, otherwise .test_walk() have
 615                 * stopped walking current vma.
 616                 * Detecting misplaced page but allow migrating pages which
 617                 * have been queued.
 618                 */
 619                ret = 1;
 620                goto unlock;
 621        }
 622
 623        /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
 624        if (flags & (MPOL_MF_MOVE_ALL) ||
 625            (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
 626                if (!isolate_huge_page(page, qp->pagelist) &&
 627                        (flags & MPOL_MF_STRICT))
 628                        /*
 629                         * Failed to isolate page but allow migrating pages
 630                         * which have been queued.
 631                         */
 632                        ret = 1;
 633        }
 634unlock:
 635        spin_unlock(ptl);
 636#else
 637        BUG();
 638#endif
 639        return ret;
 640}
 641
 642#ifdef CONFIG_NUMA_BALANCING
 643/*
 644 * This is used to mark a range of virtual addresses to be inaccessible.
 645 * These are later cleared by a NUMA hinting fault. Depending on these
 646 * faults, pages may be migrated for better NUMA placement.
 647 *
 648 * This is assuming that NUMA faults are handled using PROT_NONE. If
 649 * an architecture makes a different choice, it will need further
 650 * changes to the core.
 651 */
 652unsigned long change_prot_numa(struct vm_area_struct *vma,
 653                        unsigned long addr, unsigned long end)
 654{
 655        int nr_updated;
 656
 657        nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA);
 658        if (nr_updated)
 659                count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
 660
 661        return nr_updated;
 662}
 663#else
 664static unsigned long change_prot_numa(struct vm_area_struct *vma,
 665                        unsigned long addr, unsigned long end)
 666{
 667        return 0;
 668}
 669#endif /* CONFIG_NUMA_BALANCING */
 670
 671static int queue_pages_test_walk(unsigned long start, unsigned long end,
 672                                struct mm_walk *walk)
 673{
 674        struct vm_area_struct *vma = walk->vma;
 675        struct queue_pages *qp = walk->private;
 676        unsigned long endvma = vma->vm_end;
 677        unsigned long flags = qp->flags;
 678
 679        /* range check first */
 680        VM_BUG_ON_VMA((vma->vm_start > start) || (vma->vm_end < end), vma);
 681
 682        if (!qp->first) {
 683                qp->first = vma;
 684                if (!(flags & MPOL_MF_DISCONTIG_OK) &&
 685                        (qp->start < vma->vm_start))
 686                        /* hole at head side of range */
 687                        return -EFAULT;
 688        }
 689        if (!(flags & MPOL_MF_DISCONTIG_OK) &&
 690                ((vma->vm_end < qp->end) &&
 691                (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
 692                /* hole at middle or tail of range */
 693                return -EFAULT;
 694
 695        /*
 696         * Need check MPOL_MF_STRICT to return -EIO if possible
 697         * regardless of vma_migratable
 698         */
 699        if (!vma_migratable(vma) &&
 700            !(flags & MPOL_MF_STRICT))
 701                return 1;
 702
 703        if (endvma > end)
 704                endvma = end;
 705
 706        if (flags & MPOL_MF_LAZY) {
 707                /* Similar to task_numa_work, skip inaccessible VMAs */
 708                if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
 709                        !(vma->vm_flags & VM_MIXEDMAP))
 710                        change_prot_numa(vma, start, endvma);
 711                return 1;
 712        }
 713
 714        /* queue pages from current vma */
 715        if (flags & MPOL_MF_VALID)
 716                return 0;
 717        return 1;
 718}
 719
 720static const struct mm_walk_ops queue_pages_walk_ops = {
 721        .hugetlb_entry          = queue_pages_hugetlb,
 722        .pmd_entry              = queue_pages_pte_range,
 723        .test_walk              = queue_pages_test_walk,
 724};
 725
 726/*
 727 * Walk through page tables and collect pages to be migrated.
 728 *
 729 * If pages found in a given range are on a set of nodes (determined by
 730 * @nodes and @flags,) it's isolated and queued to the pagelist which is
 731 * passed via @private.
 732 *
 733 * queue_pages_range() has three possible return values:
 734 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
 735 *     specified.
 736 * 0 - queue pages successfully or no misplaced page.
 737 * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
 738 *         memory range specified by nodemask and maxnode points outside
 739 *         your accessible address space (-EFAULT)
 740 */
 741static int
 742queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
 743                nodemask_t *nodes, unsigned long flags,
 744                struct list_head *pagelist)
 745{
 746        int err;
 747        struct queue_pages qp = {
 748                .pagelist = pagelist,
 749                .flags = flags,
 750                .nmask = nodes,
 751                .start = start,
 752                .end = end,
 753                .first = NULL,
 754        };
 755
 756        err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
 757
 758        if (!qp.first)
 759                /* whole range in hole */
 760                err = -EFAULT;
 761
 762        return err;
 763}
 764
 765/*
 766 * Apply policy to a single VMA
 767 * This must be called with the mmap_lock held for writing.
 768 */
 769static int vma_replace_policy(struct vm_area_struct *vma,
 770                                                struct mempolicy *pol)
 771{
 772        int err;
 773        struct mempolicy *old;
 774        struct mempolicy *new;
 775
 776        pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
 777                 vma->vm_start, vma->vm_end, vma->vm_pgoff,
 778                 vma->vm_ops, vma->vm_file,
 779                 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
 780
 781        new = mpol_dup(pol);
 782        if (IS_ERR(new))
 783                return PTR_ERR(new);
 784
 785        if (vma->vm_ops && vma->vm_ops->set_policy) {
 786                err = vma->vm_ops->set_policy(vma, new);
 787                if (err)
 788                        goto err_out;
 789        }
 790
 791        old = vma->vm_policy;
 792        vma->vm_policy = new; /* protected by mmap_lock */
 793        mpol_put(old);
 794
 795        return 0;
 796 err_out:
 797        mpol_put(new);
 798        return err;
 799}
 800
 801/* Step 2: apply policy to a range and do splits. */
 802static int mbind_range(struct mm_struct *mm, unsigned long start,
 803                       unsigned long end, struct mempolicy *new_pol)
 804{
 805        struct vm_area_struct *next;
 806        struct vm_area_struct *prev;
 807        struct vm_area_struct *vma;
 808        int err = 0;
 809        pgoff_t pgoff;
 810        unsigned long vmstart;
 811        unsigned long vmend;
 812
 813        vma = find_vma(mm, start);
 814        VM_BUG_ON(!vma);
 815
 816        prev = vma->vm_prev;
 817        if (start > vma->vm_start)
 818                prev = vma;
 819
 820        for (; vma && vma->vm_start < end; prev = vma, vma = next) {
 821                next = vma->vm_next;
 822                vmstart = max(start, vma->vm_start);
 823                vmend   = min(end, vma->vm_end);
 824
 825                if (mpol_equal(vma_policy(vma), new_pol))
 826                        continue;
 827
 828                pgoff = vma->vm_pgoff +
 829                        ((vmstart - vma->vm_start) >> PAGE_SHIFT);
 830                prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
 831                                 vma->anon_vma, vma->vm_file, pgoff,
 832                                 new_pol, vma->vm_userfaultfd_ctx);
 833                if (prev) {
 834                        vma = prev;
 835                        next = vma->vm_next;
 836                        if (mpol_equal(vma_policy(vma), new_pol))
 837                                continue;
 838                        /* vma_merge() joined vma && vma->next, case 8 */
 839                        goto replace;
 840                }
 841                if (vma->vm_start != vmstart) {
 842                        err = split_vma(vma->vm_mm, vma, vmstart, 1);
 843                        if (err)
 844                                goto out;
 845                }
 846                if (vma->vm_end != vmend) {
 847                        err = split_vma(vma->vm_mm, vma, vmend, 0);
 848                        if (err)
 849                                goto out;
 850                }
 851 replace:
 852                err = vma_replace_policy(vma, new_pol);
 853                if (err)
 854                        goto out;
 855        }
 856
 857 out:
 858        return err;
 859}
 860
 861/* Set the process memory policy */
 862static long do_set_mempolicy(unsigned short mode, unsigned short flags,
 863                             nodemask_t *nodes)
 864{
 865        struct mempolicy *new, *old;
 866        NODEMASK_SCRATCH(scratch);
 867        int ret;
 868
 869        if (!scratch)
 870                return -ENOMEM;
 871
 872        new = mpol_new(mode, flags, nodes);
 873        if (IS_ERR(new)) {
 874                ret = PTR_ERR(new);
 875                goto out;
 876        }
 877
 878        ret = mpol_set_nodemask(new, nodes, scratch);
 879        if (ret) {
 880                mpol_put(new);
 881                goto out;
 882        }
 883        task_lock(current);
 884        old = current->mempolicy;
 885        current->mempolicy = new;
 886        if (new && new->mode == MPOL_INTERLEAVE)
 887                current->il_prev = MAX_NUMNODES-1;
 888        task_unlock(current);
 889        mpol_put(old);
 890        ret = 0;
 891out:
 892        NODEMASK_SCRATCH_FREE(scratch);
 893        return ret;
 894}
 895
 896/*
 897 * Return nodemask for policy for get_mempolicy() query
 898 *
 899 * Called with task's alloc_lock held
 900 */
 901static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
 902{
 903        nodes_clear(*nodes);
 904        if (p == &default_policy)
 905                return;
 906
 907        switch (p->mode) {
 908        case MPOL_BIND:
 909        case MPOL_INTERLEAVE:
 910                *nodes = p->v.nodes;
 911                break;
 912        case MPOL_PREFERRED:
 913                if (!(p->flags & MPOL_F_LOCAL))
 914                        node_set(p->v.preferred_node, *nodes);
 915                /* else return empty node mask for local allocation */
 916                break;
 917        default:
 918                BUG();
 919        }
 920}
 921
 922static int lookup_node(struct mm_struct *mm, unsigned long addr)
 923{
 924        struct page *p = NULL;
 925        int err;
 926
 927        int locked = 1;
 928        err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
 929        if (err > 0) {
 930                err = page_to_nid(p);
 931                put_page(p);
 932        }
 933        if (locked)
 934                mmap_read_unlock(mm);
 935        return err;
 936}
 937
 938/* Retrieve NUMA policy */
 939static long do_get_mempolicy(int *policy, nodemask_t *nmask,
 940                             unsigned long addr, unsigned long flags)
 941{
 942        int err;
 943        struct mm_struct *mm = current->mm;
 944        struct vm_area_struct *vma = NULL;
 945        struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
 946
 947        if (flags &
 948                ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
 949                return -EINVAL;
 950
 951        if (flags & MPOL_F_MEMS_ALLOWED) {
 952                if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
 953                        return -EINVAL;
 954                *policy = 0;    /* just so it's initialized */
 955                task_lock(current);
 956                *nmask  = cpuset_current_mems_allowed;
 957                task_unlock(current);
 958                return 0;
 959        }
 960
 961        if (flags & MPOL_F_ADDR) {
 962                /*
 963                 * Do NOT fall back to task policy if the
 964                 * vma/shared policy at addr is NULL.  We
 965                 * want to return MPOL_DEFAULT in this case.
 966                 */
 967                mmap_read_lock(mm);
 968                vma = find_vma_intersection(mm, addr, addr+1);
 969                if (!vma) {
 970                        mmap_read_unlock(mm);
 971                        return -EFAULT;
 972                }
 973                if (vma->vm_ops && vma->vm_ops->get_policy)
 974                        pol = vma->vm_ops->get_policy(vma, addr);
 975                else
 976                        pol = vma->vm_policy;
 977        } else if (addr)
 978                return -EINVAL;
 979
 980        if (!pol)
 981                pol = &default_policy;  /* indicates default behavior */
 982
 983        if (flags & MPOL_F_NODE) {
 984                if (flags & MPOL_F_ADDR) {
 985                        /*
 986                         * Take a refcount on the mpol, lookup_node()
 987                         * wil drop the mmap_lock, so after calling
 988                         * lookup_node() only "pol" remains valid, "vma"
 989                         * is stale.
 990                         */
 991                        pol_refcount = pol;
 992                        vma = NULL;
 993                        mpol_get(pol);
 994                        err = lookup_node(mm, addr);
 995                        if (err < 0)
 996                                goto out;
 997                        *policy = err;
 998                } else if (pol == current->mempolicy &&
 999                                pol->mode == MPOL_INTERLEAVE) {
1000                        *policy = next_node_in(current->il_prev, pol->v.nodes);
1001                } else {
1002                        err = -EINVAL;
1003                        goto out;
1004                }
1005        } else {
1006                *policy = pol == &default_policy ? MPOL_DEFAULT :
1007                                                pol->mode;
1008                /*
1009                 * Internal mempolicy flags must be masked off before exposing
1010                 * the policy to userspace.
1011                 */
1012                *policy |= (pol->flags & MPOL_MODE_FLAGS);
1013        }
1014
1015        err = 0;
1016        if (nmask) {
1017                if (mpol_store_user_nodemask(pol)) {
1018                        *nmask = pol->w.user_nodemask;
1019                } else {
1020                        task_lock(current);
1021                        get_policy_nodemask(pol, nmask);
1022                        task_unlock(current);
1023                }
1024        }
1025
1026 out:
1027        mpol_cond_put(pol);
1028        if (vma)
1029                mmap_read_unlock(mm);
1030        if (pol_refcount)
1031                mpol_put(pol_refcount);
1032        return err;
1033}
1034
1035#ifdef CONFIG_MIGRATION
1036/*
1037 * page migration, thp tail pages can be passed.
1038 */
1039static int migrate_page_add(struct page *page, struct list_head *pagelist,
1040                                unsigned long flags)
1041{
1042        struct page *head = compound_head(page);
1043        /*
1044         * Avoid migrating a page that is shared with others.
1045         */
1046        if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1047                if (!isolate_lru_page(head)) {
1048                        list_add_tail(&head->lru, pagelist);
1049                        mod_node_page_state(page_pgdat(head),
1050                                NR_ISOLATED_ANON + page_is_file_lru(head),
1051                                thp_nr_pages(head));
1052                } else if (flags & MPOL_MF_STRICT) {
1053                        /*
1054                         * Non-movable page may reach here.  And, there may be
1055                         * temporary off LRU pages or non-LRU movable pages.
1056                         * Treat them as unmovable pages since they can't be
1057                         * isolated, so they can't be moved at the moment.  It
1058                         * should return -EIO for this case too.
1059                         */
1060                        return -EIO;
1061                }
1062        }
1063
1064        return 0;
1065}
1066
1067/*
1068 * Migrate pages from one node to a target node.
1069 * Returns error or the number of pages not migrated.
1070 */
1071static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1072                           int flags)
1073{
1074        nodemask_t nmask;
1075        LIST_HEAD(pagelist);
1076        int err = 0;
1077        struct migration_target_control mtc = {
1078                .nid = dest,
1079                .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1080        };
1081
1082        nodes_clear(nmask);
1083        node_set(source, nmask);
1084
1085        /*
1086         * This does not "check" the range but isolates all pages that
1087         * need migration.  Between passing in the full user address
1088         * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1089         */
1090        VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1091        queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
1092                        flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1093
1094        if (!list_empty(&pagelist)) {
1095                err = migrate_pages(&pagelist, alloc_migration_target, NULL,
1096                                (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL);
1097                if (err)
1098                        putback_movable_pages(&pagelist);
1099        }
1100
1101        return err;
1102}
1103
1104/*
1105 * Move pages between the two nodesets so as to preserve the physical
1106 * layout as much as possible.
1107 *
1108 * Returns the number of page that could not be moved.
1109 */
1110int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1111                     const nodemask_t *to, int flags)
1112{
1113        int busy = 0;
1114        int err;
1115        nodemask_t tmp;
1116
1117        err = migrate_prep();
1118        if (err)
1119                return err;
1120
1121        mmap_read_lock(mm);
1122
1123        /*
1124         * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1125         * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
1126         * bit in 'tmp', and return that <source, dest> pair for migration.
1127         * The pair of nodemasks 'to' and 'from' define the map.
1128         *
1129         * If no pair of bits is found that way, fallback to picking some
1130         * pair of 'source' and 'dest' bits that are not the same.  If the
1131         * 'source' and 'dest' bits are the same, this represents a node
1132         * that will be migrating to itself, so no pages need move.
1133         *
1134         * If no bits are left in 'tmp', or if all remaining bits left
1135         * in 'tmp' correspond to the same bit in 'to', return false
1136         * (nothing left to migrate).
1137         *
1138         * This lets us pick a pair of nodes to migrate between, such that
1139         * if possible the dest node is not already occupied by some other
1140         * source node, minimizing the risk of overloading the memory on a
1141         * node that would happen if we migrated incoming memory to a node
1142         * before migrating outgoing memory source that same node.
1143         *
1144         * A single scan of tmp is sufficient.  As we go, we remember the
1145         * most recent <s, d> pair that moved (s != d).  If we find a pair
1146         * that not only moved, but what's better, moved to an empty slot
1147         * (d is not set in tmp), then we break out then, with that pair.
1148         * Otherwise when we finish scanning from_tmp, we at least have the
1149         * most recent <s, d> pair that moved.  If we get all the way through
1150         * the scan of tmp without finding any node that moved, much less
1151         * moved to an empty node, then there is nothing left worth migrating.
1152         */
1153
1154        tmp = *from;
1155        while (!nodes_empty(tmp)) {
1156                int s,d;
1157                int source = NUMA_NO_NODE;
1158                int dest = 0;
1159
1160                for_each_node_mask(s, tmp) {
1161
1162                        /*
1163                         * do_migrate_pages() tries to maintain the relative
1164                         * node relationship of the pages established between
1165                         * threads and memory areas.
1166                         *
1167                         * However if the number of source nodes is not equal to
1168                         * the number of destination nodes we can not preserve
1169                         * this node relative relationship.  In that case, skip
1170                         * copying memory from a node that is in the destination
1171                         * mask.
1172                         *
1173                         * Example: [2,3,4] -> [3,4,5] moves everything.
1174                         *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1175                         */
1176
1177                        if ((nodes_weight(*from) != nodes_weight(*to)) &&
1178                                                (node_isset(s, *to)))
1179                                continue;
1180
1181                        d = node_remap(s, *from, *to);
1182                        if (s == d)
1183                                continue;
1184
1185                        source = s;     /* Node moved. Memorize */
1186                        dest = d;
1187
1188                        /* dest not in remaining from nodes? */
1189                        if (!node_isset(dest, tmp))
1190                                break;
1191                }
1192                if (source == NUMA_NO_NODE)
1193                        break;
1194
1195                node_clear(source, tmp);
1196                err = migrate_to_node(mm, source, dest, flags);
1197                if (err > 0)
1198                        busy += err;
1199                if (err < 0)
1200                        break;
1201        }
1202        mmap_read_unlock(mm);
1203        if (err < 0)
1204                return err;
1205        return busy;
1206
1207}
1208
1209/*
1210 * Allocate a new page for page migration based on vma policy.
1211 * Start by assuming the page is mapped by the same vma as contains @start.
1212 * Search forward from there, if not.  N.B., this assumes that the
1213 * list of pages handed to migrate_pages()--which is how we get here--
1214 * is in virtual address order.
1215 */
1216static struct page *new_page(struct page *page, unsigned long start)
1217{
1218        struct vm_area_struct *vma;
1219        unsigned long address;
1220
1221        vma = find_vma(current->mm, start);
1222        while (vma) {
1223                address = page_address_in_vma(page, vma);
1224                if (address != -EFAULT)
1225                        break;
1226                vma = vma->vm_next;
1227        }
1228
1229        if (PageHuge(page)) {
1230                return alloc_huge_page_vma(page_hstate(compound_head(page)),
1231                                vma, address);
1232        } else if (PageTransHuge(page)) {
1233                struct page *thp;
1234
1235                thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1236                                         HPAGE_PMD_ORDER);
1237                if (!thp)
1238                        return NULL;
1239                prep_transhuge_page(thp);
1240                return thp;
1241        }
1242        /*
1243         * if !vma, alloc_page_vma() will use task or system default policy
1244         */
1245        return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1246                        vma, address);
1247}
1248#else
1249
1250static int migrate_page_add(struct page *page, struct list_head *pagelist,
1251                                unsigned long flags)
1252{
1253        return -EIO;
1254}
1255
1256int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1257                     const nodemask_t *to, int flags)
1258{
1259        return -ENOSYS;
1260}
1261
1262static struct page *new_page(struct page *page, unsigned long start)
1263{
1264        return NULL;
1265}
1266#endif
1267
1268static long do_mbind(unsigned long start, unsigned long len,
1269                     unsigned short mode, unsigned short mode_flags,
1270                     nodemask_t *nmask, unsigned long flags)
1271{
1272        struct mm_struct *mm = current->mm;
1273        struct mempolicy *new;
1274        unsigned long end;
1275        int err;
1276        int ret;
1277        LIST_HEAD(pagelist);
1278
1279        if (flags & ~(unsigned long)MPOL_MF_VALID)
1280                return -EINVAL;
1281        if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1282                return -EPERM;
1283
1284        if (start & ~PAGE_MASK)
1285                return -EINVAL;
1286
1287        if (mode == MPOL_DEFAULT)
1288                flags &= ~MPOL_MF_STRICT;
1289
1290        len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1291        end = start + len;
1292
1293        if (end < start)
1294                return -EINVAL;
1295        if (end == start)
1296                return 0;
1297
1298        new = mpol_new(mode, mode_flags, nmask);
1299        if (IS_ERR(new))
1300                return PTR_ERR(new);
1301
1302        if (flags & MPOL_MF_LAZY)
1303                new->flags |= MPOL_F_MOF;
1304
1305        /*
1306         * If we are using the default policy then operation
1307         * on discontinuous address spaces is okay after all
1308         */
1309        if (!new)
1310                flags |= MPOL_MF_DISCONTIG_OK;
1311
1312        pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1313                 start, start + len, mode, mode_flags,
1314                 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1315
1316        if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1317
1318                err = migrate_prep();
1319                if (err)
1320                        goto mpol_out;
1321        }
1322        {
1323                NODEMASK_SCRATCH(scratch);
1324                if (scratch) {
1325                        mmap_write_lock(mm);
1326                        err = mpol_set_nodemask(new, nmask, scratch);
1327                        if (err)
1328                                mmap_write_unlock(mm);
1329                } else
1330                        err = -ENOMEM;
1331                NODEMASK_SCRATCH_FREE(scratch);
1332        }
1333        if (err)
1334                goto mpol_out;
1335
1336        ret = queue_pages_range(mm, start, end, nmask,
1337                          flags | MPOL_MF_INVERT, &pagelist);
1338
1339        if (ret < 0) {
1340                err = ret;
1341                goto up_out;
1342        }
1343
1344        err = mbind_range(mm, start, end, new);
1345
1346        if (!err) {
1347                int nr_failed = 0;
1348
1349                if (!list_empty(&pagelist)) {
1350                        WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1351                        nr_failed = migrate_pages(&pagelist, new_page, NULL,
1352                                start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1353                        if (nr_failed)
1354                                putback_movable_pages(&pagelist);
1355                }
1356
1357                if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
1358                        err = -EIO;
1359        } else {
1360up_out:
1361                if (!list_empty(&pagelist))
1362                        putback_movable_pages(&pagelist);
1363        }
1364
1365        mmap_write_unlock(mm);
1366mpol_out:
1367        mpol_put(new);
1368        return err;
1369}
1370
1371/*
1372 * User space interface with variable sized bitmaps for nodelists.
1373 */
1374
1375/* Copy a node mask from user space. */
1376static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1377                     unsigned long maxnode)
1378{
1379        unsigned long k;
1380        unsigned long t;
1381        unsigned long nlongs;
1382        unsigned long endmask;
1383
1384        --maxnode;
1385        nodes_clear(*nodes);
1386        if (maxnode == 0 || !nmask)
1387                return 0;
1388        if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1389                return -EINVAL;
1390
1391        nlongs = BITS_TO_LONGS(maxnode);
1392        if ((maxnode % BITS_PER_LONG) == 0)
1393                endmask = ~0UL;
1394        else
1395                endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1396
1397        /*
1398         * When the user specified more nodes than supported just check
1399         * if the non supported part is all zero.
1400         *
1401         * If maxnode have more longs than MAX_NUMNODES, check
1402         * the bits in that area first. And then go through to
1403         * check the rest bits which equal or bigger than MAX_NUMNODES.
1404         * Otherwise, just check bits [MAX_NUMNODES, maxnode).
1405         */
1406        if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1407                for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1408                        if (get_user(t, nmask + k))
1409                                return -EFAULT;
1410                        if (k == nlongs - 1) {
1411                                if (t & endmask)
1412                                        return -EINVAL;
1413                        } else if (t)
1414                                return -EINVAL;
1415                }
1416                nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1417                endmask = ~0UL;
1418        }
1419
1420        if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
1421                unsigned long valid_mask = endmask;
1422
1423                valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1424                if (get_user(t, nmask + nlongs - 1))
1425                        return -EFAULT;
1426                if (t & valid_mask)
1427                        return -EINVAL;
1428        }
1429
1430        if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1431                return -EFAULT;
1432        nodes_addr(*nodes)[nlongs-1] &= endmask;
1433        return 0;
1434}
1435
1436/* Copy a kernel node mask to user space */
1437static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1438                              nodemask_t *nodes)
1439{
1440        unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1441        unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1442
1443        if (copy > nbytes) {
1444                if (copy > PAGE_SIZE)
1445                        return -EINVAL;
1446                if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1447                        return -EFAULT;
1448                copy = nbytes;
1449        }
1450        return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1451}
1452
1453static long kernel_mbind(unsigned long start, unsigned long len,
1454                         unsigned long mode, const unsigned long __user *nmask,
1455                         unsigned long maxnode, unsigned int flags)
1456{
1457        nodemask_t nodes;
1458        int err;
1459        unsigned short mode_flags;
1460
1461        start = untagged_addr(start);
1462        mode_flags = mode & MPOL_MODE_FLAGS;
1463        mode &= ~MPOL_MODE_FLAGS;
1464        if (mode >= MPOL_MAX)
1465                return -EINVAL;
1466        if ((mode_flags & MPOL_F_STATIC_NODES) &&
1467            (mode_flags & MPOL_F_RELATIVE_NODES))
1468                return -EINVAL;
1469        err = get_nodes(&nodes, nmask, maxnode);
1470        if (err)
1471                return err;
1472        return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1473}
1474
1475SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1476                unsigned long, mode, const unsigned long __user *, nmask,
1477                unsigned long, maxnode, unsigned int, flags)
1478{
1479        return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1480}
1481
1482/* Set the process memory policy */
1483static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1484                                 unsigned long maxnode)
1485{
1486        int err;
1487        nodemask_t nodes;
1488        unsigned short flags;
1489
1490        flags = mode & MPOL_MODE_FLAGS;
1491        mode &= ~MPOL_MODE_FLAGS;
1492        if ((unsigned int)mode >= MPOL_MAX)
1493                return -EINVAL;
1494        if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1495                return -EINVAL;
1496        err = get_nodes(&nodes, nmask, maxnode);
1497        if (err)
1498                return err;
1499        return do_set_mempolicy(mode, flags, &nodes);
1500}
1501
1502SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1503                unsigned long, maxnode)
1504{
1505        return kernel_set_mempolicy(mode, nmask, maxnode);
1506}
1507
1508static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1509                                const unsigned long __user *old_nodes,
1510                                const unsigned long __user *new_nodes)
1511{
1512        struct mm_struct *mm = NULL;
1513        struct task_struct *task;
1514        nodemask_t task_nodes;
1515        int err;
1516        nodemask_t *old;
1517        nodemask_t *new;
1518        NODEMASK_SCRATCH(scratch);
1519
1520        if (!scratch)
1521                return -ENOMEM;
1522
1523        old = &scratch->mask1;
1524        new = &scratch->mask2;
1525
1526        err = get_nodes(old, old_nodes, maxnode);
1527        if (err)
1528                goto out;
1529
1530        err = get_nodes(new, new_nodes, maxnode);
1531        if (err)
1532                goto out;
1533
1534        /* Find the mm_struct */
1535        rcu_read_lock();
1536        task = pid ? find_task_by_vpid(pid) : current;
1537        if (!task) {
1538                rcu_read_unlock();
1539                err = -ESRCH;
1540                goto out;
1541        }
1542        get_task_struct(task);
1543
1544        err = -EINVAL;
1545
1546        /*
1547         * Check if this process has the right to modify the specified process.
1548         * Use the regular "ptrace_may_access()" checks.
1549         */
1550        if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1551                rcu_read_unlock();
1552                err = -EPERM;
1553                goto out_put;
1554        }
1555        rcu_read_unlock();
1556
1557        task_nodes = cpuset_mems_allowed(task);
1558        /* Is the user allowed to access the target nodes? */
1559        if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1560                err = -EPERM;
1561                goto out_put;
1562        }
1563
1564        task_nodes = cpuset_mems_allowed(current);
1565        nodes_and(*new, *new, task_nodes);
1566        if (nodes_empty(*new))
1567                goto out_put;
1568
1569        err = security_task_movememory(task);
1570        if (err)
1571                goto out_put;
1572
1573        mm = get_task_mm(task);
1574        put_task_struct(task);
1575
1576        if (!mm) {
1577                err = -EINVAL;
1578                goto out;
1579        }
1580
1581        err = do_migrate_pages(mm, old, new,
1582                capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1583
1584        mmput(mm);
1585out:
1586        NODEMASK_SCRATCH_FREE(scratch);
1587
1588        return err;
1589
1590out_put:
1591        put_task_struct(task);
1592        goto out;
1593
1594}
1595
1596SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1597                const unsigned long __user *, old_nodes,
1598                const unsigned long __user *, new_nodes)
1599{
1600        return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1601}
1602
1603
1604/* Retrieve NUMA policy */
1605static int kernel_get_mempolicy(int __user *policy,
1606                                unsigned long __user *nmask,
1607                                unsigned long maxnode,
1608                                unsigned long addr,
1609                                unsigned long flags)
1610{
1611        int err;
1612        int pval;
1613        nodemask_t nodes;
1614
1615        if (nmask != NULL && maxnode < nr_node_ids)
1616                return -EINVAL;
1617
1618        addr = untagged_addr(addr);
1619
1620        err = do_get_mempolicy(&pval, &nodes, addr, flags);
1621
1622        if (err)
1623                return err;
1624
1625        if (policy && put_user(pval, policy))
1626                return -EFAULT;
1627
1628        if (nmask)
1629                err = copy_nodes_to_user(nmask, maxnode, &nodes);
1630
1631        return err;
1632}
1633
1634SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1635                unsigned long __user *, nmask, unsigned long, maxnode,
1636                unsigned long, addr, unsigned long, flags)
1637{
1638        return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1639}
1640
1641#ifdef CONFIG_COMPAT
1642
1643COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1644                       compat_ulong_t __user *, nmask,
1645                       compat_ulong_t, maxnode,
1646                       compat_ulong_t, addr, compat_ulong_t, flags)
1647{
1648        long err;
1649        unsigned long __user *nm = NULL;
1650        unsigned long nr_bits, alloc_size;
1651        DECLARE_BITMAP(bm, MAX_NUMNODES);
1652
1653        nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
1654        alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1655
1656        if (nmask)
1657                nm = compat_alloc_user_space(alloc_size);
1658
1659        err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1660
1661        if (!err && nmask) {
1662                unsigned long copy_size;
1663                copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1664                err = copy_from_user(bm, nm, copy_size);
1665                /* ensure entire bitmap is zeroed */
1666                err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1667                err |= compat_put_bitmap(nmask, bm, nr_bits);
1668        }
1669
1670        return err;
1671}
1672
1673COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1674                       compat_ulong_t, maxnode)
1675{
1676        unsigned long __user *nm = NULL;
1677        unsigned long nr_bits, alloc_size;
1678        DECLARE_BITMAP(bm, MAX_NUMNODES);
1679
1680        nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1681        alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1682
1683        if (nmask) {
1684                if (compat_get_bitmap(bm, nmask, nr_bits))
1685                        return -EFAULT;
1686                nm = compat_alloc_user_space(alloc_size);
1687                if (copy_to_user(nm, bm, alloc_size))
1688                        return -EFAULT;
1689        }
1690
1691        return kernel_set_mempolicy(mode, nm, nr_bits+1);
1692}
1693
1694COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1695                       compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1696                       compat_ulong_t, maxnode, compat_ulong_t, flags)
1697{
1698        unsigned long __user *nm = NULL;
1699        unsigned long nr_bits, alloc_size;
1700        nodemask_t bm;
1701
1702        nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1703        alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1704
1705        if (nmask) {
1706                if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1707                        return -EFAULT;
1708                nm = compat_alloc_user_space(alloc_size);
1709                if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1710                        return -EFAULT;
1711        }
1712
1713        return kernel_mbind(start, len, mode, nm, nr_bits+1, flags);
1714}
1715
1716COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
1717                       compat_ulong_t, maxnode,
1718                       const compat_ulong_t __user *, old_nodes,
1719                       const compat_ulong_t __user *, new_nodes)
1720{
1721        unsigned long __user *old = NULL;
1722        unsigned long __user *new = NULL;
1723        nodemask_t tmp_mask;
1724        unsigned long nr_bits;
1725        unsigned long size;
1726
1727        nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
1728        size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1729        if (old_nodes) {
1730                if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1731                        return -EFAULT;
1732                old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1733                if (new_nodes)
1734                        new = old + size / sizeof(unsigned long);
1735                if (copy_to_user(old, nodes_addr(tmp_mask), size))
1736                        return -EFAULT;
1737        }
1738        if (new_nodes) {
1739                if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1740                        return -EFAULT;
1741                if (new == NULL)
1742                        new = compat_alloc_user_space(size);
1743                if (copy_to_user(new, nodes_addr(tmp_mask), size))
1744                        return -EFAULT;
1745        }
1746        return kernel_migrate_pages(pid, nr_bits + 1, old, new);
1747}
1748
1749#endif /* CONFIG_COMPAT */
1750
1751bool vma_migratable(struct vm_area_struct *vma)
1752{
1753        if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1754                return false;
1755
1756        /*
1757         * DAX device mappings require predictable access latency, so avoid
1758         * incurring periodic faults.
1759         */
1760        if (vma_is_dax(vma))
1761                return false;
1762
1763        if (is_vm_hugetlb_page(vma) &&
1764                !hugepage_migration_supported(hstate_vma(vma)))
1765                return false;
1766
1767        /*
1768         * Migration allocates pages in the highest zone. If we cannot
1769         * do so then migration (at least from node to node) is not
1770         * possible.
1771         */
1772        if (vma->vm_file &&
1773                gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1774                        < policy_zone)
1775                return false;
1776        return true;
1777}
1778
1779struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1780                                                unsigned long addr)
1781{
1782        struct mempolicy *pol = NULL;
1783
1784        if (vma) {
1785                if (vma->vm_ops && vma->vm_ops->get_policy) {
1786                        pol = vma->vm_ops->get_policy(vma, addr);
1787                } else if (vma->vm_policy) {
1788                        pol = vma->vm_policy;
1789
1790                        /*
1791                         * shmem_alloc_page() passes MPOL_F_SHARED policy with
1792                         * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1793                         * count on these policies which will be dropped by
1794                         * mpol_cond_put() later
1795                         */
1796                        if (mpol_needs_cond_ref(pol))
1797                                mpol_get(pol);
1798                }
1799        }
1800
1801        return pol;
1802}
1803
1804/*
1805 * get_vma_policy(@vma, @addr)
1806 * @vma: virtual memory area whose policy is sought
1807 * @addr: address in @vma for shared policy lookup
1808 *
1809 * Returns effective policy for a VMA at specified address.
1810 * Falls back to current->mempolicy or system default policy, as necessary.
1811 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1812 * count--added by the get_policy() vm_op, as appropriate--to protect against
1813 * freeing by another task.  It is the caller's responsibility to free the
1814 * extra reference for shared policies.
1815 */
1816static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1817                                                unsigned long addr)
1818{
1819        struct mempolicy *pol = __get_vma_policy(vma, addr);
1820
1821        if (!pol)
1822                pol = get_task_policy(current);
1823
1824        return pol;
1825}
1826
1827bool vma_policy_mof(struct vm_area_struct *vma)
1828{
1829        struct mempolicy *pol;
1830
1831        if (vma->vm_ops && vma->vm_ops->get_policy) {
1832                bool ret = false;
1833
1834                pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1835                if (pol && (pol->flags & MPOL_F_MOF))
1836                        ret = true;
1837                mpol_cond_put(pol);
1838
1839                return ret;
1840        }
1841
1842        pol = vma->vm_policy;
1843        if (!pol)
1844                pol = get_task_policy(current);
1845
1846        return pol->flags & MPOL_F_MOF;
1847}
1848
1849static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1850{
1851        enum zone_type dynamic_policy_zone = policy_zone;
1852
1853        BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1854
1855        /*
1856         * if policy->v.nodes has movable memory only,
1857         * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1858         *
1859         * policy->v.nodes is intersect with node_states[N_MEMORY].
1860         * so if the following test faile, it implies
1861         * policy->v.nodes has movable memory only.
1862         */
1863        if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1864                dynamic_policy_zone = ZONE_MOVABLE;
1865
1866        return zone >= dynamic_policy_zone;
1867}
1868
1869/*
1870 * Return a nodemask representing a mempolicy for filtering nodes for
1871 * page allocation
1872 */
1873nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1874{
1875        /* Lower zones don't get a nodemask applied for MPOL_BIND */
1876        if (unlikely(policy->mode == MPOL_BIND) &&
1877                        apply_policy_zone(policy, gfp_zone(gfp)) &&
1878                        cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1879                return &policy->v.nodes;
1880
1881        return NULL;
1882}
1883
1884/* Return the node id preferred by the given mempolicy, or the given id */
1885static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
1886{
1887        if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
1888                nd = policy->v.preferred_node;
1889        else {
1890                /*
1891                 * __GFP_THISNODE shouldn't even be used with the bind policy
1892                 * because we might easily break the expectation to stay on the
1893                 * requested node and not break the policy.
1894                 */
1895                WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1896        }
1897
1898        return nd;
1899}
1900
1901/* Do dynamic interleaving for a process */
1902static unsigned interleave_nodes(struct mempolicy *policy)
1903{
1904        unsigned next;
1905        struct task_struct *me = current;
1906
1907        next = next_node_in(me->il_prev, policy->v.nodes);
1908        if (next < MAX_NUMNODES)
1909                me->il_prev = next;
1910        return next;
1911}
1912
1913/*
1914 * Depending on the memory policy provide a node from which to allocate the
1915 * next slab entry.
1916 */
1917unsigned int mempolicy_slab_node(void)
1918{
1919        struct mempolicy *policy;
1920        int node = numa_mem_id();
1921
1922        if (in_interrupt())
1923                return node;
1924
1925        policy = current->mempolicy;
1926        if (!policy || policy->flags & MPOL_F_LOCAL)
1927                return node;
1928
1929        switch (policy->mode) {
1930        case MPOL_PREFERRED:
1931                /*
1932                 * handled MPOL_F_LOCAL above
1933                 */
1934                return policy->v.preferred_node;
1935
1936        case MPOL_INTERLEAVE:
1937                return interleave_nodes(policy);
1938
1939        case MPOL_BIND: {
1940                struct zoneref *z;
1941
1942                /*
1943                 * Follow bind policy behavior and start allocation at the
1944                 * first node.
1945                 */
1946                struct zonelist *zonelist;
1947                enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1948                zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1949                z = first_zones_zonelist(zonelist, highest_zoneidx,
1950                                                        &policy->v.nodes);
1951                return z->zone ? zone_to_nid(z->zone) : node;
1952        }
1953
1954        default:
1955                BUG();
1956        }
1957}
1958
1959/*
1960 * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1961 * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1962 * number of present nodes.
1963 */
1964static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1965{
1966        unsigned nnodes = nodes_weight(pol->v.nodes);
1967        unsigned target;
1968        int i;
1969        int nid;
1970
1971        if (!nnodes)
1972                return numa_node_id();
1973        target = (unsigned int)n % nnodes;
1974        nid = first_node(pol->v.nodes);
1975        for (i = 0; i < target; i++)
1976                nid = next_node(nid, pol->v.nodes);
1977        return nid;
1978}
1979
1980/* Determine a node number for interleave */
1981static inline unsigned interleave_nid(struct mempolicy *pol,
1982                 struct vm_area_struct *vma, unsigned long addr, int shift)
1983{
1984        if (vma) {
1985                unsigned long off;
1986
1987                /*
1988                 * for small pages, there is no difference between
1989                 * shift and PAGE_SHIFT, so the bit-shift is safe.
1990                 * for huge pages, since vm_pgoff is in units of small
1991                 * pages, we need to shift off the always 0 bits to get
1992                 * a useful offset.
1993                 */
1994                BUG_ON(shift < PAGE_SHIFT);
1995                off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1996                off += (addr - vma->vm_start) >> shift;
1997                return offset_il_node(pol, off);
1998        } else
1999                return interleave_nodes(pol);
2000}
2001
2002#ifdef CONFIG_HUGETLBFS
2003/*
2004 * huge_node(@vma, @addr, @gfp_flags, @mpol)
2005 * @vma: virtual memory area whose policy is sought
2006 * @addr: address in @vma for shared policy lookup and interleave policy
2007 * @gfp_flags: for requested zone
2008 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2009 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
2010 *
2011 * Returns a nid suitable for a huge page allocation and a pointer
2012 * to the struct mempolicy for conditional unref after allocation.
2013 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
2014 * @nodemask for filtering the zonelist.
2015 *
2016 * Must be protected by read_mems_allowed_begin()
2017 */
2018int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
2019                                struct mempolicy **mpol, nodemask_t **nodemask)
2020{
2021        int nid;
2022
2023        *mpol = get_vma_policy(vma, addr);
2024        *nodemask = NULL;       /* assume !MPOL_BIND */
2025
2026        if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
2027                nid = interleave_nid(*mpol, vma, addr,
2028                                        huge_page_shift(hstate_vma(vma)));
2029        } else {
2030                nid = policy_node(gfp_flags, *mpol, numa_node_id());
2031                if ((*mpol)->mode == MPOL_BIND)
2032                        *nodemask = &(*mpol)->v.nodes;
2033        }
2034        return nid;
2035}
2036
2037/*
2038 * init_nodemask_of_mempolicy
2039 *
2040 * If the current task's mempolicy is "default" [NULL], return 'false'
2041 * to indicate default policy.  Otherwise, extract the policy nodemask
2042 * for 'bind' or 'interleave' policy into the argument nodemask, or
2043 * initialize the argument nodemask to contain the single node for
2044 * 'preferred' or 'local' policy and return 'true' to indicate presence
2045 * of non-default mempolicy.
2046 *
2047 * We don't bother with reference counting the mempolicy [mpol_get/put]
2048 * because the current task is examining it's own mempolicy and a task's
2049 * mempolicy is only ever changed by the task itself.
2050 *
2051 * N.B., it is the caller's responsibility to free a returned nodemask.
2052 */
2053bool init_nodemask_of_mempolicy(nodemask_t *mask)
2054{
2055        struct mempolicy *mempolicy;
2056        int nid;
2057
2058        if (!(mask && current->mempolicy))
2059                return false;
2060
2061        task_lock(current);
2062        mempolicy = current->mempolicy;
2063        switch (mempolicy->mode) {
2064        case MPOL_PREFERRED:
2065                if (mempolicy->flags & MPOL_F_LOCAL)
2066                        nid = numa_node_id();
2067                else
2068                        nid = mempolicy->v.preferred_node;
2069                init_nodemask_of_node(mask, nid);
2070                break;
2071
2072        case MPOL_BIND:
2073        case MPOL_INTERLEAVE:
2074                *mask =  mempolicy->v.nodes;
2075                break;
2076
2077        default:
2078                BUG();
2079        }
2080        task_unlock(current);
2081
2082        return true;
2083}
2084#endif
2085
2086/*
2087 * mempolicy_nodemask_intersects
2088 *
2089 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
2090 * policy.  Otherwise, check for intersection between mask and the policy
2091 * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
2092 * policy, always return true since it may allocate elsewhere on fallback.
2093 *
2094 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2095 */
2096bool mempolicy_nodemask_intersects(struct task_struct *tsk,
2097                                        const nodemask_t *mask)
2098{
2099        struct mempolicy *mempolicy;
2100        bool ret = true;
2101
2102        if (!mask)
2103                return ret;
2104        task_lock(tsk);
2105        mempolicy = tsk->mempolicy;
2106        if (!mempolicy)
2107                goto out;
2108
2109        switch (mempolicy->mode) {
2110        case MPOL_PREFERRED:
2111                /*
2112                 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
2113                 * allocate from, they may fallback to other nodes when oom.
2114                 * Thus, it's possible for tsk to have allocated memory from
2115                 * nodes in mask.
2116                 */
2117                break;
2118        case MPOL_BIND:
2119        case MPOL_INTERLEAVE:
2120                ret = nodes_intersects(mempolicy->v.nodes, *mask);
2121                break;
2122        default:
2123                BUG();
2124        }
2125out:
2126        task_unlock(tsk);
2127        return ret;
2128}
2129
2130/* Allocate a page in interleaved policy.
2131   Own path because it needs to do special accounting. */
2132static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2133                                        unsigned nid)
2134{
2135        struct page *page;
2136
2137        page = __alloc_pages(gfp, order, nid);
2138        /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2139        if (!static_branch_likely(&vm_numa_stat_key))
2140                return page;
2141        if (page && page_to_nid(page) == nid) {
2142                preempt_disable();
2143                __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
2144                preempt_enable();
2145        }
2146        return page;
2147}
2148
2149/**
2150 *      alloc_pages_vma - Allocate a page for a VMA.
2151 *
2152 *      @gfp:
2153 *      %GFP_USER    user allocation.
2154 *      %GFP_KERNEL  kernel allocations,
2155 *      %GFP_HIGHMEM highmem/user allocations,
2156 *      %GFP_FS      allocation should not call back into a file system.
2157 *      %GFP_ATOMIC  don't sleep.
2158 *
2159 *      @order:Order of the GFP allocation.
2160 *      @vma:  Pointer to VMA or NULL if not available.
2161 *      @addr: Virtual Address of the allocation. Must be inside the VMA.
2162 *      @node: Which node to prefer for allocation (modulo policy).
2163 *      @hugepage: for hugepages try only the preferred node if possible
2164 *
2165 *      This function allocates a page from the kernel page pool and applies
2166 *      a NUMA policy associated with the VMA or the current process.
2167 *      When VMA is not NULL caller must read-lock the mmap_lock of the
2168 *      mm_struct of the VMA to prevent it from going away. Should be used for
2169 *      all allocations for pages that will be mapped into user space. Returns
2170 *      NULL when no page can be allocated.
2171 */
2172struct page *
2173alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2174                unsigned long addr, int node, bool hugepage)
2175{
2176        struct mempolicy *pol;
2177        struct page *page;
2178        int preferred_nid;
2179        nodemask_t *nmask;
2180
2181        pol = get_vma_policy(vma, addr);
2182
2183        if (pol->mode == MPOL_INTERLEAVE) {
2184                unsigned nid;
2185
2186                nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2187                mpol_cond_put(pol);
2188                page = alloc_page_interleave(gfp, order, nid);
2189                goto out;
2190        }
2191
2192        if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2193                int hpage_node = node;
2194
2195                /*
2196                 * For hugepage allocation and non-interleave policy which
2197                 * allows the current node (or other explicitly preferred
2198                 * node) we only try to allocate from the current/preferred
2199                 * node and don't fall back to other nodes, as the cost of
2200                 * remote accesses would likely offset THP benefits.
2201                 *
2202                 * If the policy is interleave, or does not allow the current
2203                 * node in its nodemask, we allocate the standard way.
2204                 */
2205                if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
2206                        hpage_node = pol->v.preferred_node;
2207
2208                nmask = policy_nodemask(gfp, pol);
2209                if (!nmask || node_isset(hpage_node, *nmask)) {
2210                        mpol_cond_put(pol);
2211                        /*
2212                         * First, try to allocate THP only on local node, but
2213                         * don't reclaim unnecessarily, just compact.
2214                         */
2215                        page = __alloc_pages_node(hpage_node,
2216                                gfp | __GFP_THISNODE | __GFP_NORETRY, order);
2217
2218                        /*
2219                         * If hugepage allocations are configured to always
2220                         * synchronous compact or the vma has been madvised
2221                         * to prefer hugepage backing, retry allowing remote
2222                         * memory with both reclaim and compact as well.
2223                         */
2224                        if (!page && (gfp & __GFP_DIRECT_RECLAIM))
2225                                page = __alloc_pages_node(hpage_node,
2226                                                                gfp, order);
2227
2228                        goto out;
2229                }
2230        }
2231
2232        nmask = policy_nodemask(gfp, pol);
2233        preferred_nid = policy_node(gfp, pol, node);
2234        page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
2235        mpol_cond_put(pol);
2236out:
2237        return page;
2238}
2239EXPORT_SYMBOL(alloc_pages_vma);
2240
2241/**
2242 *      alloc_pages_current - Allocate pages.
2243 *
2244 *      @gfp:
2245 *              %GFP_USER   user allocation,
2246 *              %GFP_KERNEL kernel allocation,
2247 *              %GFP_HIGHMEM highmem allocation,
2248 *              %GFP_FS     don't call back into a file system.
2249 *              %GFP_ATOMIC don't sleep.
2250 *      @order: Power of two of allocation size in pages. 0 is a single page.
2251 *
2252 *      Allocate a page from the kernel page pool.  When not in
2253 *      interrupt context and apply the current process NUMA policy.
2254 *      Returns NULL when no page can be allocated.
2255 */
2256struct page *alloc_pages_current(gfp_t gfp, unsigned order)
2257{
2258        struct mempolicy *pol = &default_policy;
2259        struct page *page;
2260
2261        if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2262                pol = get_task_policy(current);
2263
2264        /*
2265         * No reference counting needed for current->mempolicy
2266         * nor system default_policy
2267         */
2268        if (pol->mode == MPOL_INTERLEAVE)
2269                page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2270        else
2271                page = __alloc_pages_nodemask(gfp, order,
2272                                policy_node(gfp, pol, numa_node_id()),
2273                                policy_nodemask(gfp, pol));
2274
2275        return page;
2276}
2277EXPORT_SYMBOL(alloc_pages_current);
2278
2279int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2280{
2281        struct mempolicy *pol = mpol_dup(vma_policy(src));
2282
2283        if (IS_ERR(pol))
2284                return PTR_ERR(pol);
2285        dst->vm_policy = pol;
2286        return 0;
2287}
2288
2289/*
2290 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2291 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2292 * with the mems_allowed returned by cpuset_mems_allowed().  This
2293 * keeps mempolicies cpuset relative after its cpuset moves.  See
2294 * further kernel/cpuset.c update_nodemask().
2295 *
2296 * current's mempolicy may be rebinded by the other task(the task that changes
2297 * cpuset's mems), so we needn't do rebind work for current task.
2298 */
2299
2300/* Slow path of a mempolicy duplicate */
2301struct mempolicy *__mpol_dup(struct mempolicy *old)
2302{
2303        struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2304
2305        if (!new)
2306                return ERR_PTR(-ENOMEM);
2307
2308        /* task's mempolicy is protected by alloc_lock */
2309        if (old == current->mempolicy) {
2310                task_lock(current);
2311                *new = *old;
2312                task_unlock(current);
2313        } else
2314                *new = *old;
2315
2316        if (current_cpuset_is_being_rebound()) {
2317                nodemask_t mems = cpuset_mems_allowed(current);
2318                mpol_rebind_policy(new, &mems);
2319        }
2320        atomic_set(&new->refcnt, 1);
2321        return new;
2322}
2323
2324/* Slow path of a mempolicy comparison */
2325bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2326{
2327        if (!a || !b)
2328                return false;
2329        if (a->mode != b->mode)
2330                return false;
2331        if (a->flags != b->flags)
2332                return false;
2333        if (mpol_store_user_nodemask(a))
2334                if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2335                        return false;
2336
2337        switch (a->mode) {
2338        case MPOL_BIND:
2339        case MPOL_INTERLEAVE:
2340                return !!nodes_equal(a->v.nodes, b->v.nodes);
2341        case MPOL_PREFERRED:
2342                /* a's ->flags is the same as b's */
2343                if (a->flags & MPOL_F_LOCAL)
2344                        return true;
2345                return a->v.preferred_node == b->v.preferred_node;
2346        default:
2347                BUG();
2348                return false;
2349        }
2350}
2351
2352/*
2353 * Shared memory backing store policy support.
2354 *
2355 * Remember policies even when nobody has shared memory mapped.
2356 * The policies are kept in Red-Black tree linked from the inode.
2357 * They are protected by the sp->lock rwlock, which should be held
2358 * for any accesses to the tree.
2359 */
2360
2361/*
2362 * lookup first element intersecting start-end.  Caller holds sp->lock for
2363 * reading or for writing
2364 */
2365static struct sp_node *
2366sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2367{
2368        struct rb_node *n = sp->root.rb_node;
2369
2370        while (n) {
2371                struct sp_node *p = rb_entry(n, struct sp_node, nd);
2372
2373                if (start >= p->end)
2374                        n = n->rb_right;
2375                else if (end <= p->start)
2376                        n = n->rb_left;
2377                else
2378                        break;
2379        }
2380        if (!n)
2381                return NULL;
2382        for (;;) {
2383                struct sp_node *w = NULL;
2384                struct rb_node *prev = rb_prev(n);
2385                if (!prev)
2386                        break;
2387                w = rb_entry(prev, struct sp_node, nd);
2388                if (w->end <= start)
2389                        break;
2390                n = prev;
2391        }
2392        return rb_entry(n, struct sp_node, nd);
2393}
2394
2395/*
2396 * Insert a new shared policy into the list.  Caller holds sp->lock for
2397 * writing.
2398 */
2399static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2400{
2401        struct rb_node **p = &sp->root.rb_node;
2402        struct rb_node *parent = NULL;
2403        struct sp_node *nd;
2404
2405        while (*p) {
2406                parent = *p;
2407                nd = rb_entry(parent, struct sp_node, nd);
2408                if (new->start < nd->start)
2409                        p = &(*p)->rb_left;
2410                else if (new->end > nd->end)
2411                        p = &(*p)->rb_right;
2412                else
2413                        BUG();
2414        }
2415        rb_link_node(&new->nd, parent, p);
2416        rb_insert_color(&new->nd, &sp->root);
2417        pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2418                 new->policy ? new->policy->mode : 0);
2419}
2420
2421/* Find shared policy intersecting idx */
2422struct mempolicy *
2423mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2424{
2425        struct mempolicy *pol = NULL;
2426        struct sp_node *sn;
2427
2428        if (!sp->root.rb_node)
2429                return NULL;
2430        read_lock(&sp->lock);
2431        sn = sp_lookup(sp, idx, idx+1);
2432        if (sn) {
2433                mpol_get(sn->policy);
2434                pol = sn->policy;
2435        }
2436        read_unlock(&sp->lock);
2437        return pol;
2438}
2439
2440static void sp_free(struct sp_node *n)
2441{
2442        mpol_put(n->policy);
2443        kmem_cache_free(sn_cache, n);
2444}
2445
2446/**
2447 * mpol_misplaced - check whether current page node is valid in policy
2448 *
2449 * @page: page to be checked
2450 * @vma: vm area where page mapped
2451 * @addr: virtual address where page mapped
2452 *
2453 * Lookup current policy node id for vma,addr and "compare to" page's
2454 * node id.
2455 *
2456 * Returns:
2457 *      -1      - not misplaced, page is in the right node
2458 *      node    - node id where the page should be
2459 *
2460 * Policy determination "mimics" alloc_page_vma().
2461 * Called from fault path where we know the vma and faulting address.
2462 */
2463int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2464{
2465        struct mempolicy *pol;
2466        struct zoneref *z;
2467        int curnid = page_to_nid(page);
2468        unsigned long pgoff;
2469        int thiscpu = raw_smp_processor_id();
2470        int thisnid = cpu_to_node(thiscpu);
2471        int polnid = NUMA_NO_NODE;
2472        int ret = -1;
2473
2474        pol = get_vma_policy(vma, addr);
2475        if (!(pol->flags & MPOL_F_MOF))
2476                goto out;
2477
2478        switch (pol->mode) {
2479        case MPOL_INTERLEAVE:
2480                pgoff = vma->vm_pgoff;
2481                pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2482                polnid = offset_il_node(pol, pgoff);
2483                break;
2484
2485        case MPOL_PREFERRED:
2486                if (pol->flags & MPOL_F_LOCAL)
2487                        polnid = numa_node_id();
2488                else
2489                        polnid = pol->v.preferred_node;
2490                break;
2491
2492        case MPOL_BIND:
2493
2494                /*
2495                 * allows binding to multiple nodes.
2496                 * use current page if in policy nodemask,
2497                 * else select nearest allowed node, if any.
2498                 * If no allowed nodes, use current [!misplaced].
2499                 */
2500                if (node_isset(curnid, pol->v.nodes))
2501                        goto out;
2502                z = first_zones_zonelist(
2503                                node_zonelist(numa_node_id(), GFP_HIGHUSER),
2504                                gfp_zone(GFP_HIGHUSER),
2505                                &pol->v.nodes);
2506                polnid = zone_to_nid(z->zone);
2507                break;
2508
2509        default:
2510                BUG();
2511        }
2512
2513        /* Migrate the page towards the node whose CPU is referencing it */
2514        if (pol->flags & MPOL_F_MORON) {
2515                polnid = thisnid;
2516
2517                if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2518                        goto out;
2519        }
2520
2521        if (curnid != polnid)
2522                ret = polnid;
2523out:
2524        mpol_cond_put(pol);
2525
2526        return ret;
2527}
2528
2529/*
2530 * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2531 * dropped after task->mempolicy is set to NULL so that any allocation done as
2532 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2533 * policy.
2534 */
2535void mpol_put_task_policy(struct task_struct *task)
2536{
2537        struct mempolicy *pol;
2538
2539        task_lock(task);
2540        pol = task->mempolicy;
2541        task->mempolicy = NULL;
2542        task_unlock(task);
2543        mpol_put(pol);
2544}
2545
2546static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2547{
2548        pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2549        rb_erase(&n->nd, &sp->root);
2550        sp_free(n);
2551}
2552
2553static void sp_node_init(struct sp_node *node, unsigned long start,
2554                        unsigned long end, struct mempolicy *pol)
2555{
2556        node->start = start;
2557        node->end = end;
2558        node->policy = pol;
2559}
2560
2561static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2562                                struct mempolicy *pol)
2563{
2564        struct sp_node *n;
2565        struct mempolicy *newpol;
2566
2567        n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2568        if (!n)
2569                return NULL;
2570
2571        newpol = mpol_dup(pol);
2572        if (IS_ERR(newpol)) {
2573                kmem_cache_free(sn_cache, n);
2574                return NULL;
2575        }
2576        newpol->flags |= MPOL_F_SHARED;
2577        sp_node_init(n, start, end, newpol);
2578
2579        return n;
2580}
2581
2582/* Replace a policy range. */
2583static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2584                                 unsigned long end, struct sp_node *new)
2585{
2586        struct sp_node *n;
2587        struct sp_node *n_new = NULL;
2588        struct mempolicy *mpol_new = NULL;
2589        int ret = 0;
2590
2591restart:
2592        write_lock(&sp->lock);
2593        n = sp_lookup(sp, start, end);
2594        /* Take care of old policies in the same range. */
2595        while (n && n->start < end) {
2596                struct rb_node *next = rb_next(&n->nd);
2597                if (n->start >= start) {
2598                        if (n->end <= end)
2599                                sp_delete(sp, n);
2600                        else
2601                                n->start = end;
2602                } else {
2603                        /* Old policy spanning whole new range. */
2604                        if (n->end > end) {
2605                                if (!n_new)
2606                                        goto alloc_new;
2607
2608                                *mpol_new = *n->policy;
2609                                atomic_set(&mpol_new->refcnt, 1);
2610                                sp_node_init(n_new, end, n->end, mpol_new);
2611                                n->end = start;
2612                                sp_insert(sp, n_new);
2613                                n_new = NULL;
2614                                mpol_new = NULL;
2615                                break;
2616                        } else
2617                                n->end = start;
2618                }
2619                if (!next)
2620                        break;
2621                n = rb_entry(next, struct sp_node, nd);
2622        }
2623        if (new)
2624                sp_insert(sp, new);
2625        write_unlock(&sp->lock);
2626        ret = 0;
2627
2628err_out:
2629        if (mpol_new)
2630                mpol_put(mpol_new);
2631        if (n_new)
2632                kmem_cache_free(sn_cache, n_new);
2633
2634        return ret;
2635
2636alloc_new:
2637        write_unlock(&sp->lock);
2638        ret = -ENOMEM;
2639        n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2640        if (!n_new)
2641                goto err_out;
2642        mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2643        if (!mpol_new)
2644                goto err_out;
2645        goto restart;
2646}
2647
2648/**
2649 * mpol_shared_policy_init - initialize shared policy for inode
2650 * @sp: pointer to inode shared policy
2651 * @mpol:  struct mempolicy to install
2652 *
2653 * Install non-NULL @mpol in inode's shared policy rb-tree.
2654 * On entry, the current task has a reference on a non-NULL @mpol.
2655 * This must be released on exit.
2656 * This is called at get_inode() calls and we can use GFP_KERNEL.
2657 */
2658void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2659{
2660        int ret;
2661
2662        sp->root = RB_ROOT;             /* empty tree == default mempolicy */
2663        rwlock_init(&sp->lock);
2664
2665        if (mpol) {
2666                struct vm_area_struct pvma;
2667                struct mempolicy *new;
2668                NODEMASK_SCRATCH(scratch);
2669
2670                if (!scratch)
2671                        goto put_mpol;
2672                /* contextualize the tmpfs mount point mempolicy */
2673                new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2674                if (IS_ERR(new))
2675                        goto free_scratch; /* no valid nodemask intersection */
2676
2677                task_lock(current);
2678                ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2679                task_unlock(current);
2680                if (ret)
2681                        goto put_new;
2682
2683                /* Create pseudo-vma that contains just the policy */
2684                vma_init(&pvma, NULL);
2685                pvma.vm_end = TASK_SIZE;        /* policy covers entire file */
2686                mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2687
2688put_new:
2689                mpol_put(new);                  /* drop initial ref */
2690free_scratch:
2691                NODEMASK_SCRATCH_FREE(scratch);
2692put_mpol:
2693                mpol_put(mpol); /* drop our incoming ref on sb mpol */
2694        }
2695}
2696
2697int mpol_set_shared_policy(struct shared_policy *info,
2698                        struct vm_area_struct *vma, struct mempolicy *npol)
2699{
2700        int err;
2701        struct sp_node *new = NULL;
2702        unsigned long sz = vma_pages(vma);
2703
2704        pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2705                 vma->vm_pgoff,
2706                 sz, npol ? npol->mode : -1,
2707                 npol ? npol->flags : -1,
2708                 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
2709
2710        if (npol) {
2711                new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2712                if (!new)
2713                        return -ENOMEM;
2714        }
2715        err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2716        if (err && new)
2717                sp_free(new);
2718        return err;
2719}
2720
2721/* Free a backing policy store on inode delete. */
2722void mpol_free_shared_policy(struct shared_policy *p)
2723{
2724        struct sp_node *n;
2725        struct rb_node *next;
2726
2727        if (!p->root.rb_node)
2728                return;
2729        write_lock(&p->lock);
2730        next = rb_first(&p->root);
2731        while (next) {
2732                n = rb_entry(next, struct sp_node, nd);
2733                next = rb_next(&n->nd);
2734                sp_delete(p, n);
2735        }
2736        write_unlock(&p->lock);
2737}
2738
2739#ifdef CONFIG_NUMA_BALANCING
2740static int __initdata numabalancing_override;
2741
2742static void __init check_numabalancing_enable(void)
2743{
2744        bool numabalancing_default = false;
2745
2746        if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2747                numabalancing_default = true;
2748
2749        /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2750        if (numabalancing_override)
2751                set_numabalancing_state(numabalancing_override == 1);
2752
2753        if (num_online_nodes() > 1 && !numabalancing_override) {
2754                pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2755                        numabalancing_default ? "Enabling" : "Disabling");
2756                set_numabalancing_state(numabalancing_default);
2757        }
2758}
2759
2760static int __init setup_numabalancing(char *str)
2761{
2762        int ret = 0;
2763        if (!str)
2764                goto out;
2765
2766        if (!strcmp(str, "enable")) {
2767                numabalancing_override = 1;
2768                ret = 1;
2769        } else if (!strcmp(str, "disable")) {
2770                numabalancing_override = -1;
2771                ret = 1;
2772        }
2773out:
2774        if (!ret)
2775                pr_warn("Unable to parse numa_balancing=\n");
2776
2777        return ret;
2778}
2779__setup("numa_balancing=", setup_numabalancing);
2780#else
2781static inline void __init check_numabalancing_enable(void)
2782{
2783}
2784#endif /* CONFIG_NUMA_BALANCING */
2785
2786/* assumes fs == KERNEL_DS */
2787void __init numa_policy_init(void)
2788{
2789        nodemask_t interleave_nodes;
2790        unsigned long largest = 0;
2791        int nid, prefer = 0;
2792
2793        policy_cache = kmem_cache_create("numa_policy",
2794                                         sizeof(struct mempolicy),
2795                                         0, SLAB_PANIC, NULL);
2796
2797        sn_cache = kmem_cache_create("shared_policy_node",
2798                                     sizeof(struct sp_node),
2799                                     0, SLAB_PANIC, NULL);
2800
2801        for_each_node(nid) {
2802                preferred_node_policy[nid] = (struct mempolicy) {
2803                        .refcnt = ATOMIC_INIT(1),
2804                        .mode = MPOL_PREFERRED,
2805                        .flags = MPOL_F_MOF | MPOL_F_MORON,
2806                        .v = { .preferred_node = nid, },
2807                };
2808        }
2809
2810        /*
2811         * Set interleaving policy for system init. Interleaving is only
2812         * enabled across suitably sized nodes (default is >= 16MB), or
2813         * fall back to the largest node if they're all smaller.
2814         */
2815        nodes_clear(interleave_nodes);
2816        for_each_node_state(nid, N_MEMORY) {
2817                unsigned long total_pages = node_present_pages(nid);
2818
2819                /* Preserve the largest node */
2820                if (largest < total_pages) {
2821                        largest = total_pages;
2822                        prefer = nid;
2823                }
2824
2825                /* Interleave this node? */
2826                if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2827                        node_set(nid, interleave_nodes);
2828        }
2829
2830        /* All too small, use the largest */
2831        if (unlikely(nodes_empty(interleave_nodes)))
2832                node_set(prefer, interleave_nodes);
2833
2834        if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2835                pr_err("%s: interleaving failed\n", __func__);
2836
2837        check_numabalancing_enable();
2838}
2839
2840/* Reset policy of current process to default */
2841void numa_default_policy(void)
2842{
2843        do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2844}
2845
2846/*
2847 * Parse and format mempolicy from/to strings
2848 */
2849
2850/*
2851 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
2852 */
2853static const char * const policy_modes[] =
2854{
2855        [MPOL_DEFAULT]    = "default",
2856        [MPOL_PREFERRED]  = "prefer",
2857        [MPOL_BIND]       = "bind",
2858        [MPOL_INTERLEAVE] = "interleave",
2859        [MPOL_LOCAL]      = "local",
2860};
2861
2862
2863#ifdef CONFIG_TMPFS
2864/**
2865 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2866 * @str:  string containing mempolicy to parse
2867 * @mpol:  pointer to struct mempolicy pointer, returned on success.
2868 *
2869 * Format of input:
2870 *      <mode>[=<flags>][:<nodelist>]
2871 *
2872 * On success, returns 0, else 1
2873 */
2874int mpol_parse_str(char *str, struct mempolicy **mpol)
2875{
2876        struct mempolicy *new = NULL;
2877        unsigned short mode_flags;
2878        nodemask_t nodes;
2879        char *nodelist = strchr(str, ':');
2880        char *flags = strchr(str, '=');
2881        int err = 1, mode;
2882
2883        if (flags)
2884                *flags++ = '\0';        /* terminate mode string */
2885
2886        if (nodelist) {
2887                /* NUL-terminate mode or flags string */
2888                *nodelist++ = '\0';
2889                if (nodelist_parse(nodelist, nodes))
2890                        goto out;
2891                if (!nodes_subset(nodes, node_states[N_MEMORY]))
2892                        goto out;
2893        } else
2894                nodes_clear(nodes);
2895
2896        mode = match_string(policy_modes, MPOL_MAX, str);
2897        if (mode < 0)
2898                goto out;
2899
2900        switch (mode) {
2901        case MPOL_PREFERRED:
2902                /*
2903                 * Insist on a nodelist of one node only, although later
2904                 * we use first_node(nodes) to grab a single node, so here
2905                 * nodelist (or nodes) cannot be empty.
2906                 */
2907                if (nodelist) {
2908                        char *rest = nodelist;
2909                        while (isdigit(*rest))
2910                                rest++;
2911                        if (*rest)
2912                                goto out;
2913                        if (nodes_empty(nodes))
2914                                goto out;
2915                }
2916                break;
2917        case MPOL_INTERLEAVE:
2918                /*
2919                 * Default to online nodes with memory if no nodelist
2920                 */
2921                if (!nodelist)
2922                        nodes = node_states[N_MEMORY];
2923                break;
2924        case MPOL_LOCAL:
2925                /*
2926                 * Don't allow a nodelist;  mpol_new() checks flags
2927                 */
2928                if (nodelist)
2929                        goto out;
2930                mode = MPOL_PREFERRED;
2931                break;
2932        case MPOL_DEFAULT:
2933                /*
2934                 * Insist on a empty nodelist
2935                 */
2936                if (!nodelist)
2937                        err = 0;
2938                goto out;
2939        case MPOL_BIND:
2940                /*
2941                 * Insist on a nodelist
2942                 */
2943                if (!nodelist)
2944                        goto out;
2945        }
2946
2947        mode_flags = 0;
2948        if (flags) {
2949                /*
2950                 * Currently, we only support two mutually exclusive
2951                 * mode flags.
2952                 */
2953                if (!strcmp(flags, "static"))
2954                        mode_flags |= MPOL_F_STATIC_NODES;
2955                else if (!strcmp(flags, "relative"))
2956                        mode_flags |= MPOL_F_RELATIVE_NODES;
2957                else
2958                        goto out;
2959        }
2960
2961        new = mpol_new(mode, mode_flags, &nodes);
2962        if (IS_ERR(new))
2963                goto out;
2964
2965        /*
2966         * Save nodes for mpol_to_str() to show the tmpfs mount options
2967         * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2968         */
2969        if (mode != MPOL_PREFERRED)
2970                new->v.nodes = nodes;
2971        else if (nodelist)
2972                new->v.preferred_node = first_node(nodes);
2973        else
2974                new->flags |= MPOL_F_LOCAL;
2975
2976        /*
2977         * Save nodes for contextualization: this will be used to "clone"
2978         * the mempolicy in a specific context [cpuset] at a later time.
2979         */
2980        new->w.user_nodemask = nodes;
2981
2982        err = 0;
2983
2984out:
2985        /* Restore string for error message */
2986        if (nodelist)
2987                *--nodelist = ':';
2988        if (flags)
2989                *--flags = '=';
2990        if (!err)
2991                *mpol = new;
2992        return err;
2993}
2994#endif /* CONFIG_TMPFS */
2995
2996/**
2997 * mpol_to_str - format a mempolicy structure for printing
2998 * @buffer:  to contain formatted mempolicy string
2999 * @maxlen:  length of @buffer
3000 * @pol:  pointer to mempolicy to be formatted
3001 *
3002 * Convert @pol into a string.  If @buffer is too short, truncate the string.
3003 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3004 * longest flag, "relative", and to display at least a few node ids.
3005 */
3006void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
3007{
3008        char *p = buffer;
3009        nodemask_t nodes = NODE_MASK_NONE;
3010        unsigned short mode = MPOL_DEFAULT;
3011        unsigned short flags = 0;
3012
3013        if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
3014                mode = pol->mode;
3015                flags = pol->flags;
3016        }
3017
3018        switch (mode) {
3019        case MPOL_DEFAULT:
3020                break;
3021        case MPOL_PREFERRED:
3022                if (flags & MPOL_F_LOCAL)
3023                        mode = MPOL_LOCAL;
3024                else
3025                        node_set(pol->v.preferred_node, nodes);
3026                break;
3027        case MPOL_BIND:
3028        case MPOL_INTERLEAVE:
3029                nodes = pol->v.nodes;
3030                break;
3031        default:
3032                WARN_ON_ONCE(1);
3033                snprintf(p, maxlen, "unknown");
3034                return;
3035        }
3036
3037        p += snprintf(p, maxlen, "%s", policy_modes[mode]);
3038
3039        if (flags & MPOL_MODE_FLAGS) {
3040                p += snprintf(p, buffer + maxlen - p, "=");
3041
3042                /*
3043                 * Currently, the only defined flags are mutually exclusive
3044                 */
3045                if (flags & MPOL_F_STATIC_NODES)
3046                        p += snprintf(p, buffer + maxlen - p, "static");
3047                else if (flags & MPOL_F_RELATIVE_NODES)
3048                        p += snprintf(p, buffer + maxlen - p, "relative");
3049        }
3050
3051        if (!nodes_empty(nodes))
3052                p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3053                               nodemask_pr_args(&nodes));
3054}
3055