linux/mm/mempolicy.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Simple NUMA memory policy for the Linux kernel.
   4 *
   5 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
   6 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
   7 *
   8 * NUMA policy allows the user to give hints in which node(s) memory should
   9 * be allocated.
  10 *
  11 * Support four policies per VMA and per process:
  12 *
  13 * The VMA policy has priority over the process policy for a page fault.
  14 *
  15 * interleave     Allocate memory interleaved over a set of nodes,
  16 *                with normal fallback if it fails.
  17 *                For VMA based allocations this interleaves based on the
  18 *                offset into the backing object or offset into the mapping
  19 *                for anonymous memory. For process policy an process counter
  20 *                is used.
  21 *
  22 * bind           Only allocate memory on a specific set of nodes,
  23 *                no fallback.
  24 *                FIXME: memory is allocated starting with the first node
  25 *                to the last. It would be better if bind would truly restrict
  26 *                the allocation to memory nodes instead
  27 *
  28 * preferred       Try a specific node first before normal fallback.
  29 *                As a special case NUMA_NO_NODE here means do the allocation
  30 *                on the local CPU. This is normally identical to default,
  31 *                but useful to set in a VMA when you have a non default
  32 *                process policy.
  33 *
  34 * preferred many Try a set of nodes first before normal fallback. This is
  35 *                similar to preferred without the special case.
  36 *
  37 * default        Allocate on the local node first, or when on a VMA
  38 *                use the process policy. This is what Linux always did
  39 *                in a NUMA aware kernel and still does by, ahem, default.
  40 *
  41 * The process policy is applied for most non interrupt memory allocations
  42 * in that process' context. Interrupts ignore the policies and always
  43 * try to allocate on the local CPU. The VMA policy is only applied for memory
  44 * allocations for a VMA in the VM.
  45 *
  46 * Currently there are a few corner cases in swapping where the policy
  47 * is not applied, but the majority should be handled. When process policy
  48 * is used it is not remembered over swap outs/swap ins.
  49 *
  50 * Only the highest zone in the zone hierarchy gets policied. Allocations
  51 * requesting a lower zone just use default policy. This implies that
  52 * on systems with highmem kernel lowmem allocation don't get policied.
  53 * Same with GFP_DMA allocations.
  54 *
  55 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
  56 * all users and remembered even when nobody has memory mapped.
  57 */
  58
  59/* Notebook:
  60   fix mmap readahead to honour policy and enable policy for any page cache
  61   object
  62   statistics for bigpages
  63   global policy for page cache? currently it uses process policy. Requires
  64   first item above.
  65   handle mremap for shared memory (currently ignored for the policy)
  66   grows down?
  67   make bind policy root only? It can trigger oom much faster and the
  68   kernel is not always grateful with that.
  69*/
  70
  71#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  72
  73#include <linux/mempolicy.h>
  74#include <linux/pagewalk.h>
  75#include <linux/highmem.h>
  76#include <linux/hugetlb.h>
  77#include <linux/kernel.h>
  78#include <linux/sched.h>
  79#include <linux/sched/mm.h>
  80#include <linux/sched/numa_balancing.h>
  81#include <linux/sched/task.h>
  82#include <linux/nodemask.h>
  83#include <linux/cpuset.h>
  84#include <linux/slab.h>
  85#include <linux/string.h>
  86#include <linux/export.h>
  87#include <linux/nsproxy.h>
  88#include <linux/interrupt.h>
  89#include <linux/init.h>
  90#include <linux/compat.h>
  91#include <linux/ptrace.h>
  92#include <linux/swap.h>
  93#include <linux/seq_file.h>
  94#include <linux/proc_fs.h>
  95#include <linux/migrate.h>
  96#include <linux/ksm.h>
  97#include <linux/rmap.h>
  98#include <linux/security.h>
  99#include <linux/syscalls.h>
 100#include <linux/ctype.h>
 101#include <linux/mm_inline.h>
 102#include <linux/mmu_notifier.h>
 103#include <linux/printk.h>
 104#include <linux/swapops.h>
 105
 106#include <asm/tlbflush.h>
 107#include <linux/uaccess.h>
 108
 109#include "internal.h"
 110
 111/* Internal flags */
 112#define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)    /* Skip checks for continuous vmas */
 113#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)          /* Invert check for nodemask */
 114
 115static struct kmem_cache *policy_cache;
 116static struct kmem_cache *sn_cache;
 117
 118/* Highest zone. An specific allocation for a zone below that is not
 119   policied. */
 120enum zone_type policy_zone = 0;
 121
 122/*
 123 * run-time system-wide default policy => local allocation
 124 */
 125static struct mempolicy default_policy = {
 126        .refcnt = ATOMIC_INIT(1), /* never free it */
 127        .mode = MPOL_LOCAL,
 128};
 129
 130static struct mempolicy preferred_node_policy[MAX_NUMNODES];
 131
 132/**
 133 * numa_map_to_online_node - Find closest online node
 134 * @node: Node id to start the search
 135 *
 136 * Lookup the next closest node by distance if @nid is not online.
 137 */
 138int numa_map_to_online_node(int node)
 139{
 140        int min_dist = INT_MAX, dist, n, min_node;
 141
 142        if (node == NUMA_NO_NODE || node_online(node))
 143                return node;
 144
 145        min_node = node;
 146        for_each_online_node(n) {
 147                dist = node_distance(node, n);
 148                if (dist < min_dist) {
 149                        min_dist = dist;
 150                        min_node = n;
 151                }
 152        }
 153
 154        return min_node;
 155}
 156EXPORT_SYMBOL_GPL(numa_map_to_online_node);
 157
 158struct mempolicy *get_task_policy(struct task_struct *p)
 159{
 160        struct mempolicy *pol = p->mempolicy;
 161        int node;
 162
 163        if (pol)
 164                return pol;
 165
 166        node = numa_node_id();
 167        if (node != NUMA_NO_NODE) {
 168                pol = &preferred_node_policy[node];
 169                /* preferred_node_policy is not initialised early in boot */
 170                if (pol->mode)
 171                        return pol;
 172        }
 173
 174        return &default_policy;
 175}
 176
 177static const struct mempolicy_operations {
 178        int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
 179        void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
 180} mpol_ops[MPOL_MAX];
 181
 182static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
 183{
 184        return pol->flags & MPOL_MODE_FLAGS;
 185}
 186
 187static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
 188                                   const nodemask_t *rel)
 189{
 190        nodemask_t tmp;
 191        nodes_fold(tmp, *orig, nodes_weight(*rel));
 192        nodes_onto(*ret, tmp, *rel);
 193}
 194
 195static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
 196{
 197        if (nodes_empty(*nodes))
 198                return -EINVAL;
 199        pol->nodes = *nodes;
 200        return 0;
 201}
 202
 203static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
 204{
 205        if (nodes_empty(*nodes))
 206                return -EINVAL;
 207
 208        nodes_clear(pol->nodes);
 209        node_set(first_node(*nodes), pol->nodes);
 210        return 0;
 211}
 212
 213/*
 214 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
 215 * any, for the new policy.  mpol_new() has already validated the nodes
 216 * parameter with respect to the policy mode and flags.
 217 *
 218 * Must be called holding task's alloc_lock to protect task's mems_allowed
 219 * and mempolicy.  May also be called holding the mmap_lock for write.
 220 */
 221static int mpol_set_nodemask(struct mempolicy *pol,
 222                     const nodemask_t *nodes, struct nodemask_scratch *nsc)
 223{
 224        int ret;
 225
 226        /*
 227         * Default (pol==NULL) resp. local memory policies are not a
 228         * subject of any remapping. They also do not need any special
 229         * constructor.
 230         */
 231        if (!pol || pol->mode == MPOL_LOCAL)
 232                return 0;
 233
 234        /* Check N_MEMORY */
 235        nodes_and(nsc->mask1,
 236                  cpuset_current_mems_allowed, node_states[N_MEMORY]);
 237
 238        VM_BUG_ON(!nodes);
 239
 240        if (pol->flags & MPOL_F_RELATIVE_NODES)
 241                mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
 242        else
 243                nodes_and(nsc->mask2, *nodes, nsc->mask1);
 244
 245        if (mpol_store_user_nodemask(pol))
 246                pol->w.user_nodemask = *nodes;
 247        else
 248                pol->w.cpuset_mems_allowed = cpuset_current_mems_allowed;
 249
 250        ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
 251        return ret;
 252}
 253
 254/*
 255 * This function just creates a new policy, does some check and simple
 256 * initialization. You must invoke mpol_set_nodemask() to set nodes.
 257 */
 258static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
 259                                  nodemask_t *nodes)
 260{
 261        struct mempolicy *policy;
 262
 263        pr_debug("setting mode %d flags %d nodes[0] %lx\n",
 264                 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
 265
 266        if (mode == MPOL_DEFAULT) {
 267                if (nodes && !nodes_empty(*nodes))
 268                        return ERR_PTR(-EINVAL);
 269                return NULL;
 270        }
 271        VM_BUG_ON(!nodes);
 272
 273        /*
 274         * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
 275         * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
 276         * All other modes require a valid pointer to a non-empty nodemask.
 277         */
 278        if (mode == MPOL_PREFERRED) {
 279                if (nodes_empty(*nodes)) {
 280                        if (((flags & MPOL_F_STATIC_NODES) ||
 281                             (flags & MPOL_F_RELATIVE_NODES)))
 282                                return ERR_PTR(-EINVAL);
 283
 284                        mode = MPOL_LOCAL;
 285                }
 286        } else if (mode == MPOL_LOCAL) {
 287                if (!nodes_empty(*nodes) ||
 288                    (flags & MPOL_F_STATIC_NODES) ||
 289                    (flags & MPOL_F_RELATIVE_NODES))
 290                        return ERR_PTR(-EINVAL);
 291        } else if (nodes_empty(*nodes))
 292                return ERR_PTR(-EINVAL);
 293        policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
 294        if (!policy)
 295                return ERR_PTR(-ENOMEM);
 296        atomic_set(&policy->refcnt, 1);
 297        policy->mode = mode;
 298        policy->flags = flags;
 299
 300        return policy;
 301}
 302
 303/* Slow path of a mpol destructor. */
 304void __mpol_put(struct mempolicy *p)
 305{
 306        if (!atomic_dec_and_test(&p->refcnt))
 307                return;
 308        kmem_cache_free(policy_cache, p);
 309}
 310
 311static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
 312{
 313}
 314
 315static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
 316{
 317        nodemask_t tmp;
 318
 319        if (pol->flags & MPOL_F_STATIC_NODES)
 320                nodes_and(tmp, pol->w.user_nodemask, *nodes);
 321        else if (pol->flags & MPOL_F_RELATIVE_NODES)
 322                mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
 323        else {
 324                nodes_remap(tmp, pol->nodes, pol->w.cpuset_mems_allowed,
 325                                                                *nodes);
 326                pol->w.cpuset_mems_allowed = *nodes;
 327        }
 328
 329        if (nodes_empty(tmp))
 330                tmp = *nodes;
 331
 332        pol->nodes = tmp;
 333}
 334
 335static void mpol_rebind_preferred(struct mempolicy *pol,
 336                                                const nodemask_t *nodes)
 337{
 338        pol->w.cpuset_mems_allowed = *nodes;
 339}
 340
 341/*
 342 * mpol_rebind_policy - Migrate a policy to a different set of nodes
 343 *
 344 * Per-vma policies are protected by mmap_lock. Allocations using per-task
 345 * policies are protected by task->mems_allowed_seq to prevent a premature
 346 * OOM/allocation failure due to parallel nodemask modification.
 347 */
 348static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
 349{
 350        if (!pol)
 351                return;
 352        if (!mpol_store_user_nodemask(pol) &&
 353            nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
 354                return;
 355
 356        mpol_ops[pol->mode].rebind(pol, newmask);
 357}
 358
 359/*
 360 * Wrapper for mpol_rebind_policy() that just requires task
 361 * pointer, and updates task mempolicy.
 362 *
 363 * Called with task's alloc_lock held.
 364 */
 365
 366void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
 367{
 368        mpol_rebind_policy(tsk->mempolicy, new);
 369}
 370
 371/*
 372 * Rebind each vma in mm to new nodemask.
 373 *
 374 * Call holding a reference to mm.  Takes mm->mmap_lock during call.
 375 */
 376
 377void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
 378{
 379        struct vm_area_struct *vma;
 380
 381        mmap_write_lock(mm);
 382        for (vma = mm->mmap; vma; vma = vma->vm_next)
 383                mpol_rebind_policy(vma->vm_policy, new);
 384        mmap_write_unlock(mm);
 385}
 386
 387static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
 388        [MPOL_DEFAULT] = {
 389                .rebind = mpol_rebind_default,
 390        },
 391        [MPOL_INTERLEAVE] = {
 392                .create = mpol_new_nodemask,
 393                .rebind = mpol_rebind_nodemask,
 394        },
 395        [MPOL_PREFERRED] = {
 396                .create = mpol_new_preferred,
 397                .rebind = mpol_rebind_preferred,
 398        },
 399        [MPOL_BIND] = {
 400                .create = mpol_new_nodemask,
 401                .rebind = mpol_rebind_nodemask,
 402        },
 403        [MPOL_LOCAL] = {
 404                .rebind = mpol_rebind_default,
 405        },
 406        [MPOL_PREFERRED_MANY] = {
 407                .create = mpol_new_nodemask,
 408                .rebind = mpol_rebind_preferred,
 409        },
 410};
 411
 412static int migrate_page_add(struct page *page, struct list_head *pagelist,
 413                                unsigned long flags);
 414
 415struct queue_pages {
 416        struct list_head *pagelist;
 417        unsigned long flags;
 418        nodemask_t *nmask;
 419        unsigned long start;
 420        unsigned long end;
 421        struct vm_area_struct *first;
 422};
 423
 424/*
 425 * Check if the page's nid is in qp->nmask.
 426 *
 427 * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
 428 * in the invert of qp->nmask.
 429 */
 430static inline bool queue_pages_required(struct page *page,
 431                                        struct queue_pages *qp)
 432{
 433        int nid = page_to_nid(page);
 434        unsigned long flags = qp->flags;
 435
 436        return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
 437}
 438
 439/*
 440 * queue_pages_pmd() has four possible return values:
 441 * 0 - pages are placed on the right node or queued successfully, or
 442 *     special page is met, i.e. huge zero page.
 443 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
 444 *     specified.
 445 * 2 - THP was split.
 446 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
 447 *        existing page was already on a node that does not follow the
 448 *        policy.
 449 */
 450static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
 451                                unsigned long end, struct mm_walk *walk)
 452        __releases(ptl)
 453{
 454        int ret = 0;
 455        struct page *page;
 456        struct queue_pages *qp = walk->private;
 457        unsigned long flags;
 458
 459        if (unlikely(is_pmd_migration_entry(*pmd))) {
 460                ret = -EIO;
 461                goto unlock;
 462        }
 463        page = pmd_page(*pmd);
 464        if (is_huge_zero_page(page)) {
 465                spin_unlock(ptl);
 466                walk->action = ACTION_CONTINUE;
 467                goto out;
 468        }
 469        if (!queue_pages_required(page, qp))
 470                goto unlock;
 471
 472        flags = qp->flags;
 473        /* go to thp migration */
 474        if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
 475                if (!vma_migratable(walk->vma) ||
 476                    migrate_page_add(page, qp->pagelist, flags)) {
 477                        ret = 1;
 478                        goto unlock;
 479                }
 480        } else
 481                ret = -EIO;
 482unlock:
 483        spin_unlock(ptl);
 484out:
 485        return ret;
 486}
 487
 488/*
 489 * Scan through pages checking if pages follow certain conditions,
 490 * and move them to the pagelist if they do.
 491 *
 492 * queue_pages_pte_range() has three possible return values:
 493 * 0 - pages are placed on the right node or queued successfully, or
 494 *     special page is met, i.e. zero page.
 495 * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
 496 *     specified.
 497 * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
 498 *        on a node that does not follow the policy.
 499 */
 500static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
 501                        unsigned long end, struct mm_walk *walk)
 502{
 503        struct vm_area_struct *vma = walk->vma;
 504        struct page *page;
 505        struct queue_pages *qp = walk->private;
 506        unsigned long flags = qp->flags;
 507        int ret;
 508        bool has_unmovable = false;
 509        pte_t *pte, *mapped_pte;
 510        spinlock_t *ptl;
 511
 512        ptl = pmd_trans_huge_lock(pmd, vma);
 513        if (ptl) {
 514                ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
 515                if (ret != 2)
 516                        return ret;
 517        }
 518        /* THP was split, fall through to pte walk */
 519
 520        if (pmd_trans_unstable(pmd))
 521                return 0;
 522
 523        mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
 524        for (; addr != end; pte++, addr += PAGE_SIZE) {
 525                if (!pte_present(*pte))
 526                        continue;
 527                page = vm_normal_page(vma, addr, *pte);
 528                if (!page)
 529                        continue;
 530                /*
 531                 * vm_normal_page() filters out zero pages, but there might
 532                 * still be PageReserved pages to skip, perhaps in a VDSO.
 533                 */
 534                if (PageReserved(page))
 535                        continue;
 536                if (!queue_pages_required(page, qp))
 537                        continue;
 538                if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
 539                        /* MPOL_MF_STRICT must be specified if we get here */
 540                        if (!vma_migratable(vma)) {
 541                                has_unmovable = true;
 542                                break;
 543                        }
 544
 545                        /*
 546                         * Do not abort immediately since there may be
 547                         * temporary off LRU pages in the range.  Still
 548                         * need migrate other LRU pages.
 549                         */
 550                        if (migrate_page_add(page, qp->pagelist, flags))
 551                                has_unmovable = true;
 552                } else
 553                        break;
 554        }
 555        pte_unmap_unlock(mapped_pte, ptl);
 556        cond_resched();
 557
 558        if (has_unmovable)
 559                return 1;
 560
 561        return addr != end ? -EIO : 0;
 562}
 563
 564static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
 565                               unsigned long addr, unsigned long end,
 566                               struct mm_walk *walk)
 567{
 568        int ret = 0;
 569#ifdef CONFIG_HUGETLB_PAGE
 570        struct queue_pages *qp = walk->private;
 571        unsigned long flags = (qp->flags & MPOL_MF_VALID);
 572        struct page *page;
 573        spinlock_t *ptl;
 574        pte_t entry;
 575
 576        ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
 577        entry = huge_ptep_get(pte);
 578        if (!pte_present(entry))
 579                goto unlock;
 580        page = pte_page(entry);
 581        if (!queue_pages_required(page, qp))
 582                goto unlock;
 583
 584        if (flags == MPOL_MF_STRICT) {
 585                /*
 586                 * STRICT alone means only detecting misplaced page and no
 587                 * need to further check other vma.
 588                 */
 589                ret = -EIO;
 590                goto unlock;
 591        }
 592
 593        if (!vma_migratable(walk->vma)) {
 594                /*
 595                 * Must be STRICT with MOVE*, otherwise .test_walk() have
 596                 * stopped walking current vma.
 597                 * Detecting misplaced page but allow migrating pages which
 598                 * have been queued.
 599                 */
 600                ret = 1;
 601                goto unlock;
 602        }
 603
 604        /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
 605        if (flags & (MPOL_MF_MOVE_ALL) ||
 606            (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
 607                if (!isolate_huge_page(page, qp->pagelist) &&
 608                        (flags & MPOL_MF_STRICT))
 609                        /*
 610                         * Failed to isolate page but allow migrating pages
 611                         * which have been queued.
 612                         */
 613                        ret = 1;
 614        }
 615unlock:
 616        spin_unlock(ptl);
 617#else
 618        BUG();
 619#endif
 620        return ret;
 621}
 622
 623#ifdef CONFIG_NUMA_BALANCING
 624/*
 625 * This is used to mark a range of virtual addresses to be inaccessible.
 626 * These are later cleared by a NUMA hinting fault. Depending on these
 627 * faults, pages may be migrated for better NUMA placement.
 628 *
 629 * This is assuming that NUMA faults are handled using PROT_NONE. If
 630 * an architecture makes a different choice, it will need further
 631 * changes to the core.
 632 */
 633unsigned long change_prot_numa(struct vm_area_struct *vma,
 634                        unsigned long addr, unsigned long end)
 635{
 636        int nr_updated;
 637
 638        nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA);
 639        if (nr_updated)
 640                count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
 641
 642        return nr_updated;
 643}
 644#else
 645static unsigned long change_prot_numa(struct vm_area_struct *vma,
 646                        unsigned long addr, unsigned long end)
 647{
 648        return 0;
 649}
 650#endif /* CONFIG_NUMA_BALANCING */
 651
 652static int queue_pages_test_walk(unsigned long start, unsigned long end,
 653                                struct mm_walk *walk)
 654{
 655        struct vm_area_struct *vma = walk->vma;
 656        struct queue_pages *qp = walk->private;
 657        unsigned long endvma = vma->vm_end;
 658        unsigned long flags = qp->flags;
 659
 660        /* range check first */
 661        VM_BUG_ON_VMA(!range_in_vma(vma, start, end), vma);
 662
 663        if (!qp->first) {
 664                qp->first = vma;
 665                if (!(flags & MPOL_MF_DISCONTIG_OK) &&
 666                        (qp->start < vma->vm_start))
 667                        /* hole at head side of range */
 668                        return -EFAULT;
 669        }
 670        if (!(flags & MPOL_MF_DISCONTIG_OK) &&
 671                ((vma->vm_end < qp->end) &&
 672                (!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
 673                /* hole at middle or tail of range */
 674                return -EFAULT;
 675
 676        /*
 677         * Need check MPOL_MF_STRICT to return -EIO if possible
 678         * regardless of vma_migratable
 679         */
 680        if (!vma_migratable(vma) &&
 681            !(flags & MPOL_MF_STRICT))
 682                return 1;
 683
 684        if (endvma > end)
 685                endvma = end;
 686
 687        if (flags & MPOL_MF_LAZY) {
 688                /* Similar to task_numa_work, skip inaccessible VMAs */
 689                if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
 690                        !(vma->vm_flags & VM_MIXEDMAP))
 691                        change_prot_numa(vma, start, endvma);
 692                return 1;
 693        }
 694
 695        /* queue pages from current vma */
 696        if (flags & MPOL_MF_VALID)
 697                return 0;
 698        return 1;
 699}
 700
 701static const struct mm_walk_ops queue_pages_walk_ops = {
 702        .hugetlb_entry          = queue_pages_hugetlb,
 703        .pmd_entry              = queue_pages_pte_range,
 704        .test_walk              = queue_pages_test_walk,
 705};
 706
 707/*
 708 * Walk through page tables and collect pages to be migrated.
 709 *
 710 * If pages found in a given range are on a set of nodes (determined by
 711 * @nodes and @flags,) it's isolated and queued to the pagelist which is
 712 * passed via @private.
 713 *
 714 * queue_pages_range() has three possible return values:
 715 * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
 716 *     specified.
 717 * 0 - queue pages successfully or no misplaced page.
 718 * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
 719 *         memory range specified by nodemask and maxnode points outside
 720 *         your accessible address space (-EFAULT)
 721 */
 722static int
 723queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
 724                nodemask_t *nodes, unsigned long flags,
 725                struct list_head *pagelist)
 726{
 727        int err;
 728        struct queue_pages qp = {
 729                .pagelist = pagelist,
 730                .flags = flags,
 731                .nmask = nodes,
 732                .start = start,
 733                .end = end,
 734                .first = NULL,
 735        };
 736
 737        err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
 738
 739        if (!qp.first)
 740                /* whole range in hole */
 741                err = -EFAULT;
 742
 743        return err;
 744}
 745
 746/*
 747 * Apply policy to a single VMA
 748 * This must be called with the mmap_lock held for writing.
 749 */
 750static int vma_replace_policy(struct vm_area_struct *vma,
 751                                                struct mempolicy *pol)
 752{
 753        int err;
 754        struct mempolicy *old;
 755        struct mempolicy *new;
 756
 757        pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
 758                 vma->vm_start, vma->vm_end, vma->vm_pgoff,
 759                 vma->vm_ops, vma->vm_file,
 760                 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
 761
 762        new = mpol_dup(pol);
 763        if (IS_ERR(new))
 764                return PTR_ERR(new);
 765
 766        if (vma->vm_ops && vma->vm_ops->set_policy) {
 767                err = vma->vm_ops->set_policy(vma, new);
 768                if (err)
 769                        goto err_out;
 770        }
 771
 772        old = vma->vm_policy;
 773        vma->vm_policy = new; /* protected by mmap_lock */
 774        mpol_put(old);
 775
 776        return 0;
 777 err_out:
 778        mpol_put(new);
 779        return err;
 780}
 781
 782/* Step 2: apply policy to a range and do splits. */
 783static int mbind_range(struct mm_struct *mm, unsigned long start,
 784                       unsigned long end, struct mempolicy *new_pol)
 785{
 786        struct vm_area_struct *next;
 787        struct vm_area_struct *prev;
 788        struct vm_area_struct *vma;
 789        int err = 0;
 790        pgoff_t pgoff;
 791        unsigned long vmstart;
 792        unsigned long vmend;
 793
 794        vma = find_vma(mm, start);
 795        VM_BUG_ON(!vma);
 796
 797        prev = vma->vm_prev;
 798        if (start > vma->vm_start)
 799                prev = vma;
 800
 801        for (; vma && vma->vm_start < end; prev = vma, vma = next) {
 802                next = vma->vm_next;
 803                vmstart = max(start, vma->vm_start);
 804                vmend   = min(end, vma->vm_end);
 805
 806                if (mpol_equal(vma_policy(vma), new_pol))
 807                        continue;
 808
 809                pgoff = vma->vm_pgoff +
 810                        ((vmstart - vma->vm_start) >> PAGE_SHIFT);
 811                prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
 812                                 vma->anon_vma, vma->vm_file, pgoff,
 813                                 new_pol, vma->vm_userfaultfd_ctx);
 814                if (prev) {
 815                        vma = prev;
 816                        next = vma->vm_next;
 817                        if (mpol_equal(vma_policy(vma), new_pol))
 818                                continue;
 819                        /* vma_merge() joined vma && vma->next, case 8 */
 820                        goto replace;
 821                }
 822                if (vma->vm_start != vmstart) {
 823                        err = split_vma(vma->vm_mm, vma, vmstart, 1);
 824                        if (err)
 825                                goto out;
 826                }
 827                if (vma->vm_end != vmend) {
 828                        err = split_vma(vma->vm_mm, vma, vmend, 0);
 829                        if (err)
 830                                goto out;
 831                }
 832 replace:
 833                err = vma_replace_policy(vma, new_pol);
 834                if (err)
 835                        goto out;
 836        }
 837
 838 out:
 839        return err;
 840}
 841
 842/* Set the process memory policy */
 843static long do_set_mempolicy(unsigned short mode, unsigned short flags,
 844                             nodemask_t *nodes)
 845{
 846        struct mempolicy *new, *old;
 847        NODEMASK_SCRATCH(scratch);
 848        int ret;
 849
 850        if (!scratch)
 851                return -ENOMEM;
 852
 853        new = mpol_new(mode, flags, nodes);
 854        if (IS_ERR(new)) {
 855                ret = PTR_ERR(new);
 856                goto out;
 857        }
 858
 859        ret = mpol_set_nodemask(new, nodes, scratch);
 860        if (ret) {
 861                mpol_put(new);
 862                goto out;
 863        }
 864        task_lock(current);
 865        old = current->mempolicy;
 866        current->mempolicy = new;
 867        if (new && new->mode == MPOL_INTERLEAVE)
 868                current->il_prev = MAX_NUMNODES-1;
 869        task_unlock(current);
 870        mpol_put(old);
 871        ret = 0;
 872out:
 873        NODEMASK_SCRATCH_FREE(scratch);
 874        return ret;
 875}
 876
 877/*
 878 * Return nodemask for policy for get_mempolicy() query
 879 *
 880 * Called with task's alloc_lock held
 881 */
 882static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
 883{
 884        nodes_clear(*nodes);
 885        if (p == &default_policy)
 886                return;
 887
 888        switch (p->mode) {
 889        case MPOL_BIND:
 890        case MPOL_INTERLEAVE:
 891        case MPOL_PREFERRED:
 892        case MPOL_PREFERRED_MANY:
 893                *nodes = p->nodes;
 894                break;
 895        case MPOL_LOCAL:
 896                /* return empty node mask for local allocation */
 897                break;
 898        default:
 899                BUG();
 900        }
 901}
 902
 903static int lookup_node(struct mm_struct *mm, unsigned long addr)
 904{
 905        struct page *p = NULL;
 906        int err;
 907
 908        int locked = 1;
 909        err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
 910        if (err > 0) {
 911                err = page_to_nid(p);
 912                put_page(p);
 913        }
 914        if (locked)
 915                mmap_read_unlock(mm);
 916        return err;
 917}
 918
 919/* Retrieve NUMA policy */
 920static long do_get_mempolicy(int *policy, nodemask_t *nmask,
 921                             unsigned long addr, unsigned long flags)
 922{
 923        int err;
 924        struct mm_struct *mm = current->mm;
 925        struct vm_area_struct *vma = NULL;
 926        struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
 927
 928        if (flags &
 929                ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
 930                return -EINVAL;
 931
 932        if (flags & MPOL_F_MEMS_ALLOWED) {
 933                if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
 934                        return -EINVAL;
 935                *policy = 0;    /* just so it's initialized */
 936                task_lock(current);
 937                *nmask  = cpuset_current_mems_allowed;
 938                task_unlock(current);
 939                return 0;
 940        }
 941
 942        if (flags & MPOL_F_ADDR) {
 943                /*
 944                 * Do NOT fall back to task policy if the
 945                 * vma/shared policy at addr is NULL.  We
 946                 * want to return MPOL_DEFAULT in this case.
 947                 */
 948                mmap_read_lock(mm);
 949                vma = vma_lookup(mm, addr);
 950                if (!vma) {
 951                        mmap_read_unlock(mm);
 952                        return -EFAULT;
 953                }
 954                if (vma->vm_ops && vma->vm_ops->get_policy)
 955                        pol = vma->vm_ops->get_policy(vma, addr);
 956                else
 957                        pol = vma->vm_policy;
 958        } else if (addr)
 959                return -EINVAL;
 960
 961        if (!pol)
 962                pol = &default_policy;  /* indicates default behavior */
 963
 964        if (flags & MPOL_F_NODE) {
 965                if (flags & MPOL_F_ADDR) {
 966                        /*
 967                         * Take a refcount on the mpol, lookup_node()
 968                         * will drop the mmap_lock, so after calling
 969                         * lookup_node() only "pol" remains valid, "vma"
 970                         * is stale.
 971                         */
 972                        pol_refcount = pol;
 973                        vma = NULL;
 974                        mpol_get(pol);
 975                        err = lookup_node(mm, addr);
 976                        if (err < 0)
 977                                goto out;
 978                        *policy = err;
 979                } else if (pol == current->mempolicy &&
 980                                pol->mode == MPOL_INTERLEAVE) {
 981                        *policy = next_node_in(current->il_prev, pol->nodes);
 982                } else {
 983                        err = -EINVAL;
 984                        goto out;
 985                }
 986        } else {
 987                *policy = pol == &default_policy ? MPOL_DEFAULT :
 988                                                pol->mode;
 989                /*
 990                 * Internal mempolicy flags must be masked off before exposing
 991                 * the policy to userspace.
 992                 */
 993                *policy |= (pol->flags & MPOL_MODE_FLAGS);
 994        }
 995
 996        err = 0;
 997        if (nmask) {
 998                if (mpol_store_user_nodemask(pol)) {
 999                        *nmask = pol->w.user_nodemask;
1000                } else {
1001                        task_lock(current);
1002                        get_policy_nodemask(pol, nmask);
1003                        task_unlock(current);
1004                }
1005        }
1006
1007 out:
1008        mpol_cond_put(pol);
1009        if (vma)
1010                mmap_read_unlock(mm);
1011        if (pol_refcount)
1012                mpol_put(pol_refcount);
1013        return err;
1014}
1015
1016#ifdef CONFIG_MIGRATION
1017/*
1018 * page migration, thp tail pages can be passed.
1019 */
1020static int migrate_page_add(struct page *page, struct list_head *pagelist,
1021                                unsigned long flags)
1022{
1023        struct page *head = compound_head(page);
1024        /*
1025         * Avoid migrating a page that is shared with others.
1026         */
1027        if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1028                if (!isolate_lru_page(head)) {
1029                        list_add_tail(&head->lru, pagelist);
1030                        mod_node_page_state(page_pgdat(head),
1031                                NR_ISOLATED_ANON + page_is_file_lru(head),
1032                                thp_nr_pages(head));
1033                } else if (flags & MPOL_MF_STRICT) {
1034                        /*
1035                         * Non-movable page may reach here.  And, there may be
1036                         * temporary off LRU pages or non-LRU movable pages.
1037                         * Treat them as unmovable pages since they can't be
1038                         * isolated, so they can't be moved at the moment.  It
1039                         * should return -EIO for this case too.
1040                         */
1041                        return -EIO;
1042                }
1043        }
1044
1045        return 0;
1046}
1047
1048/*
1049 * Migrate pages from one node to a target node.
1050 * Returns error or the number of pages not migrated.
1051 */
1052static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1053                           int flags)
1054{
1055        nodemask_t nmask;
1056        LIST_HEAD(pagelist);
1057        int err = 0;
1058        struct migration_target_control mtc = {
1059                .nid = dest,
1060                .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1061        };
1062
1063        nodes_clear(nmask);
1064        node_set(source, nmask);
1065
1066        /*
1067         * This does not "check" the range but isolates all pages that
1068         * need migration.  Between passing in the full user address
1069         * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1070         */
1071        VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1072        queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
1073                        flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1074
1075        if (!list_empty(&pagelist)) {
1076                err = migrate_pages(&pagelist, alloc_migration_target, NULL,
1077                                (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1078                if (err)
1079                        putback_movable_pages(&pagelist);
1080        }
1081
1082        return err;
1083}
1084
1085/*
1086 * Move pages between the two nodesets so as to preserve the physical
1087 * layout as much as possible.
1088 *
1089 * Returns the number of page that could not be moved.
1090 */
1091int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1092                     const nodemask_t *to, int flags)
1093{
1094        int busy = 0;
1095        int err = 0;
1096        nodemask_t tmp;
1097
1098        lru_cache_disable();
1099
1100        mmap_read_lock(mm);
1101
1102        /*
1103         * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1104         * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
1105         * bit in 'tmp', and return that <source, dest> pair for migration.
1106         * The pair of nodemasks 'to' and 'from' define the map.
1107         *
1108         * If no pair of bits is found that way, fallback to picking some
1109         * pair of 'source' and 'dest' bits that are not the same.  If the
1110         * 'source' and 'dest' bits are the same, this represents a node
1111         * that will be migrating to itself, so no pages need move.
1112         *
1113         * If no bits are left in 'tmp', or if all remaining bits left
1114         * in 'tmp' correspond to the same bit in 'to', return false
1115         * (nothing left to migrate).
1116         *
1117         * This lets us pick a pair of nodes to migrate between, such that
1118         * if possible the dest node is not already occupied by some other
1119         * source node, minimizing the risk of overloading the memory on a
1120         * node that would happen if we migrated incoming memory to a node
1121         * before migrating outgoing memory source that same node.
1122         *
1123         * A single scan of tmp is sufficient.  As we go, we remember the
1124         * most recent <s, d> pair that moved (s != d).  If we find a pair
1125         * that not only moved, but what's better, moved to an empty slot
1126         * (d is not set in tmp), then we break out then, with that pair.
1127         * Otherwise when we finish scanning from_tmp, we at least have the
1128         * most recent <s, d> pair that moved.  If we get all the way through
1129         * the scan of tmp without finding any node that moved, much less
1130         * moved to an empty node, then there is nothing left worth migrating.
1131         */
1132
1133        tmp = *from;
1134        while (!nodes_empty(tmp)) {
1135                int s, d;
1136                int source = NUMA_NO_NODE;
1137                int dest = 0;
1138
1139                for_each_node_mask(s, tmp) {
1140
1141                        /*
1142                         * do_migrate_pages() tries to maintain the relative
1143                         * node relationship of the pages established between
1144                         * threads and memory areas.
1145                         *
1146                         * However if the number of source nodes is not equal to
1147                         * the number of destination nodes we can not preserve
1148                         * this node relative relationship.  In that case, skip
1149                         * copying memory from a node that is in the destination
1150                         * mask.
1151                         *
1152                         * Example: [2,3,4] -> [3,4,5] moves everything.
1153                         *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1154                         */
1155
1156                        if ((nodes_weight(*from) != nodes_weight(*to)) &&
1157                                                (node_isset(s, *to)))
1158                                continue;
1159
1160                        d = node_remap(s, *from, *to);
1161                        if (s == d)
1162                                continue;
1163
1164                        source = s;     /* Node moved. Memorize */
1165                        dest = d;
1166
1167                        /* dest not in remaining from nodes? */
1168                        if (!node_isset(dest, tmp))
1169                                break;
1170                }
1171                if (source == NUMA_NO_NODE)
1172                        break;
1173
1174                node_clear(source, tmp);
1175                err = migrate_to_node(mm, source, dest, flags);
1176                if (err > 0)
1177                        busy += err;
1178                if (err < 0)
1179                        break;
1180        }
1181        mmap_read_unlock(mm);
1182
1183        lru_cache_enable();
1184        if (err < 0)
1185                return err;
1186        return busy;
1187
1188}
1189
1190/*
1191 * Allocate a new page for page migration based on vma policy.
1192 * Start by assuming the page is mapped by the same vma as contains @start.
1193 * Search forward from there, if not.  N.B., this assumes that the
1194 * list of pages handed to migrate_pages()--which is how we get here--
1195 * is in virtual address order.
1196 */
1197static struct page *new_page(struct page *page, unsigned long start)
1198{
1199        struct vm_area_struct *vma;
1200        unsigned long address;
1201
1202        vma = find_vma(current->mm, start);
1203        while (vma) {
1204                address = page_address_in_vma(page, vma);
1205                if (address != -EFAULT)
1206                        break;
1207                vma = vma->vm_next;
1208        }
1209
1210        if (PageHuge(page)) {
1211                return alloc_huge_page_vma(page_hstate(compound_head(page)),
1212                                vma, address);
1213        } else if (PageTransHuge(page)) {
1214                struct page *thp;
1215
1216                thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1217                                         HPAGE_PMD_ORDER);
1218                if (!thp)
1219                        return NULL;
1220                prep_transhuge_page(thp);
1221                return thp;
1222        }
1223        /*
1224         * if !vma, alloc_page_vma() will use task or system default policy
1225         */
1226        return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1227                        vma, address);
1228}
1229#else
1230
1231static int migrate_page_add(struct page *page, struct list_head *pagelist,
1232                                unsigned long flags)
1233{
1234        return -EIO;
1235}
1236
1237int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1238                     const nodemask_t *to, int flags)
1239{
1240        return -ENOSYS;
1241}
1242
1243static struct page *new_page(struct page *page, unsigned long start)
1244{
1245        return NULL;
1246}
1247#endif
1248
1249static long do_mbind(unsigned long start, unsigned long len,
1250                     unsigned short mode, unsigned short mode_flags,
1251                     nodemask_t *nmask, unsigned long flags)
1252{
1253        struct mm_struct *mm = current->mm;
1254        struct mempolicy *new;
1255        unsigned long end;
1256        int err;
1257        int ret;
1258        LIST_HEAD(pagelist);
1259
1260        if (flags & ~(unsigned long)MPOL_MF_VALID)
1261                return -EINVAL;
1262        if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1263                return -EPERM;
1264
1265        if (start & ~PAGE_MASK)
1266                return -EINVAL;
1267
1268        if (mode == MPOL_DEFAULT)
1269                flags &= ~MPOL_MF_STRICT;
1270
1271        len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1272        end = start + len;
1273
1274        if (end < start)
1275                return -EINVAL;
1276        if (end == start)
1277                return 0;
1278
1279        new = mpol_new(mode, mode_flags, nmask);
1280        if (IS_ERR(new))
1281                return PTR_ERR(new);
1282
1283        if (flags & MPOL_MF_LAZY)
1284                new->flags |= MPOL_F_MOF;
1285
1286        /*
1287         * If we are using the default policy then operation
1288         * on discontinuous address spaces is okay after all
1289         */
1290        if (!new)
1291                flags |= MPOL_MF_DISCONTIG_OK;
1292
1293        pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1294                 start, start + len, mode, mode_flags,
1295                 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1296
1297        if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1298
1299                lru_cache_disable();
1300        }
1301        {
1302                NODEMASK_SCRATCH(scratch);
1303                if (scratch) {
1304                        mmap_write_lock(mm);
1305                        err = mpol_set_nodemask(new, nmask, scratch);
1306                        if (err)
1307                                mmap_write_unlock(mm);
1308                } else
1309                        err = -ENOMEM;
1310                NODEMASK_SCRATCH_FREE(scratch);
1311        }
1312        if (err)
1313                goto mpol_out;
1314
1315        ret = queue_pages_range(mm, start, end, nmask,
1316                          flags | MPOL_MF_INVERT, &pagelist);
1317
1318        if (ret < 0) {
1319                err = ret;
1320                goto up_out;
1321        }
1322
1323        err = mbind_range(mm, start, end, new);
1324
1325        if (!err) {
1326                int nr_failed = 0;
1327
1328                if (!list_empty(&pagelist)) {
1329                        WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1330                        nr_failed = migrate_pages(&pagelist, new_page, NULL,
1331                                start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL);
1332                        if (nr_failed)
1333                                putback_movable_pages(&pagelist);
1334                }
1335
1336                if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
1337                        err = -EIO;
1338        } else {
1339up_out:
1340                if (!list_empty(&pagelist))
1341                        putback_movable_pages(&pagelist);
1342        }
1343
1344        mmap_write_unlock(mm);
1345mpol_out:
1346        mpol_put(new);
1347        if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1348                lru_cache_enable();
1349        return err;
1350}
1351
1352/*
1353 * User space interface with variable sized bitmaps for nodelists.
1354 */
1355static int get_bitmap(unsigned long *mask, const unsigned long __user *nmask,
1356                      unsigned long maxnode)
1357{
1358        unsigned long nlongs = BITS_TO_LONGS(maxnode);
1359        int ret;
1360
1361        if (in_compat_syscall())
1362                ret = compat_get_bitmap(mask,
1363                                        (const compat_ulong_t __user *)nmask,
1364                                        maxnode);
1365        else
1366                ret = copy_from_user(mask, nmask,
1367                                     nlongs * sizeof(unsigned long));
1368
1369        if (ret)
1370                return -EFAULT;
1371
1372        if (maxnode % BITS_PER_LONG)
1373                mask[nlongs - 1] &= (1UL << (maxnode % BITS_PER_LONG)) - 1;
1374
1375        return 0;
1376}
1377
1378/* Copy a node mask from user space. */
1379static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1380                     unsigned long maxnode)
1381{
1382        --maxnode;
1383        nodes_clear(*nodes);
1384        if (maxnode == 0 || !nmask)
1385                return 0;
1386        if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1387                return -EINVAL;
1388
1389        /*
1390         * When the user specified more nodes than supported just check
1391         * if the non supported part is all zero, one word at a time,
1392         * starting at the end.
1393         */
1394        while (maxnode > MAX_NUMNODES) {
1395                unsigned long bits = min_t(unsigned long, maxnode, BITS_PER_LONG);
1396                unsigned long t;
1397
1398                if (get_bitmap(&t, &nmask[maxnode / BITS_PER_LONG], bits))
1399                        return -EFAULT;
1400
1401                if (maxnode - bits >= MAX_NUMNODES) {
1402                        maxnode -= bits;
1403                } else {
1404                        maxnode = MAX_NUMNODES;
1405                        t &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1406                }
1407                if (t)
1408                        return -EINVAL;
1409        }
1410
1411        return get_bitmap(nodes_addr(*nodes), nmask, maxnode);
1412}
1413
1414/* Copy a kernel node mask to user space */
1415static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1416                              nodemask_t *nodes)
1417{
1418        unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1419        unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1420        bool compat = in_compat_syscall();
1421
1422        if (compat)
1423                nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t);
1424
1425        if (copy > nbytes) {
1426                if (copy > PAGE_SIZE)
1427                        return -EINVAL;
1428                if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1429                        return -EFAULT;
1430                copy = nbytes;
1431                maxnode = nr_node_ids;
1432        }
1433
1434        if (compat)
1435                return compat_put_bitmap((compat_ulong_t __user *)mask,
1436                                         nodes_addr(*nodes), maxnode);
1437
1438        return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1439}
1440
1441/* Basic parameter sanity check used by both mbind() and set_mempolicy() */
1442static inline int sanitize_mpol_flags(int *mode, unsigned short *flags)
1443{
1444        *flags = *mode & MPOL_MODE_FLAGS;
1445        *mode &= ~MPOL_MODE_FLAGS;
1446
1447        if ((unsigned int)(*mode) >=  MPOL_MAX)
1448                return -EINVAL;
1449        if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES))
1450                return -EINVAL;
1451        if (*flags & MPOL_F_NUMA_BALANCING) {
1452                if (*mode != MPOL_BIND)
1453                        return -EINVAL;
1454                *flags |= (MPOL_F_MOF | MPOL_F_MORON);
1455        }
1456        return 0;
1457}
1458
1459static long kernel_mbind(unsigned long start, unsigned long len,
1460                         unsigned long mode, const unsigned long __user *nmask,
1461                         unsigned long maxnode, unsigned int flags)
1462{
1463        unsigned short mode_flags;
1464        nodemask_t nodes;
1465        int lmode = mode;
1466        int err;
1467
1468        start = untagged_addr(start);
1469        err = sanitize_mpol_flags(&lmode, &mode_flags);
1470        if (err)
1471                return err;
1472
1473        err = get_nodes(&nodes, nmask, maxnode);
1474        if (err)
1475                return err;
1476
1477        return do_mbind(start, len, lmode, mode_flags, &nodes, flags);
1478}
1479
1480SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1481                unsigned long, mode, const unsigned long __user *, nmask,
1482                unsigned long, maxnode, unsigned int, flags)
1483{
1484        return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1485}
1486
1487/* Set the process memory policy */
1488static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1489                                 unsigned long maxnode)
1490{
1491        unsigned short mode_flags;
1492        nodemask_t nodes;
1493        int lmode = mode;
1494        int err;
1495
1496        err = sanitize_mpol_flags(&lmode, &mode_flags);
1497        if (err)
1498                return err;
1499
1500        err = get_nodes(&nodes, nmask, maxnode);
1501        if (err)
1502                return err;
1503
1504        return do_set_mempolicy(lmode, mode_flags, &nodes);
1505}
1506
1507SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1508                unsigned long, maxnode)
1509{
1510        return kernel_set_mempolicy(mode, nmask, maxnode);
1511}
1512
1513static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1514                                const unsigned long __user *old_nodes,
1515                                const unsigned long __user *new_nodes)
1516{
1517        struct mm_struct *mm = NULL;
1518        struct task_struct *task;
1519        nodemask_t task_nodes;
1520        int err;
1521        nodemask_t *old;
1522        nodemask_t *new;
1523        NODEMASK_SCRATCH(scratch);
1524
1525        if (!scratch)
1526                return -ENOMEM;
1527
1528        old = &scratch->mask1;
1529        new = &scratch->mask2;
1530
1531        err = get_nodes(old, old_nodes, maxnode);
1532        if (err)
1533                goto out;
1534
1535        err = get_nodes(new, new_nodes, maxnode);
1536        if (err)
1537                goto out;
1538
1539        /* Find the mm_struct */
1540        rcu_read_lock();
1541        task = pid ? find_task_by_vpid(pid) : current;
1542        if (!task) {
1543                rcu_read_unlock();
1544                err = -ESRCH;
1545                goto out;
1546        }
1547        get_task_struct(task);
1548
1549        err = -EINVAL;
1550
1551        /*
1552         * Check if this process has the right to modify the specified process.
1553         * Use the regular "ptrace_may_access()" checks.
1554         */
1555        if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1556                rcu_read_unlock();
1557                err = -EPERM;
1558                goto out_put;
1559        }
1560        rcu_read_unlock();
1561
1562        task_nodes = cpuset_mems_allowed(task);
1563        /* Is the user allowed to access the target nodes? */
1564        if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1565                err = -EPERM;
1566                goto out_put;
1567        }
1568
1569        task_nodes = cpuset_mems_allowed(current);
1570        nodes_and(*new, *new, task_nodes);
1571        if (nodes_empty(*new))
1572                goto out_put;
1573
1574        err = security_task_movememory(task);
1575        if (err)
1576                goto out_put;
1577
1578        mm = get_task_mm(task);
1579        put_task_struct(task);
1580
1581        if (!mm) {
1582                err = -EINVAL;
1583                goto out;
1584        }
1585
1586        err = do_migrate_pages(mm, old, new,
1587                capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1588
1589        mmput(mm);
1590out:
1591        NODEMASK_SCRATCH_FREE(scratch);
1592
1593        return err;
1594
1595out_put:
1596        put_task_struct(task);
1597        goto out;
1598
1599}
1600
1601SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1602                const unsigned long __user *, old_nodes,
1603                const unsigned long __user *, new_nodes)
1604{
1605        return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1606}
1607
1608
1609/* Retrieve NUMA policy */
1610static int kernel_get_mempolicy(int __user *policy,
1611                                unsigned long __user *nmask,
1612                                unsigned long maxnode,
1613                                unsigned long addr,
1614                                unsigned long flags)
1615{
1616        int err;
1617        int pval;
1618        nodemask_t nodes;
1619
1620        if (nmask != NULL && maxnode < nr_node_ids)
1621                return -EINVAL;
1622
1623        addr = untagged_addr(addr);
1624
1625        err = do_get_mempolicy(&pval, &nodes, addr, flags);
1626
1627        if (err)
1628                return err;
1629
1630        if (policy && put_user(pval, policy))
1631                return -EFAULT;
1632
1633        if (nmask)
1634                err = copy_nodes_to_user(nmask, maxnode, &nodes);
1635
1636        return err;
1637}
1638
1639SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1640                unsigned long __user *, nmask, unsigned long, maxnode,
1641                unsigned long, addr, unsigned long, flags)
1642{
1643        return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1644}
1645
1646bool vma_migratable(struct vm_area_struct *vma)
1647{
1648        if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1649                return false;
1650
1651        /*
1652         * DAX device mappings require predictable access latency, so avoid
1653         * incurring periodic faults.
1654         */
1655        if (vma_is_dax(vma))
1656                return false;
1657
1658        if (is_vm_hugetlb_page(vma) &&
1659                !hugepage_migration_supported(hstate_vma(vma)))
1660                return false;
1661
1662        /*
1663         * Migration allocates pages in the highest zone. If we cannot
1664         * do so then migration (at least from node to node) is not
1665         * possible.
1666         */
1667        if (vma->vm_file &&
1668                gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1669                        < policy_zone)
1670                return false;
1671        return true;
1672}
1673
1674struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1675                                                unsigned long addr)
1676{
1677        struct mempolicy *pol = NULL;
1678
1679        if (vma) {
1680                if (vma->vm_ops && vma->vm_ops->get_policy) {
1681                        pol = vma->vm_ops->get_policy(vma, addr);
1682                } else if (vma->vm_policy) {
1683                        pol = vma->vm_policy;
1684
1685                        /*
1686                         * shmem_alloc_page() passes MPOL_F_SHARED policy with
1687                         * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1688                         * count on these policies which will be dropped by
1689                         * mpol_cond_put() later
1690                         */
1691                        if (mpol_needs_cond_ref(pol))
1692                                mpol_get(pol);
1693                }
1694        }
1695
1696        return pol;
1697}
1698
1699/*
1700 * get_vma_policy(@vma, @addr)
1701 * @vma: virtual memory area whose policy is sought
1702 * @addr: address in @vma for shared policy lookup
1703 *
1704 * Returns effective policy for a VMA at specified address.
1705 * Falls back to current->mempolicy or system default policy, as necessary.
1706 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1707 * count--added by the get_policy() vm_op, as appropriate--to protect against
1708 * freeing by another task.  It is the caller's responsibility to free the
1709 * extra reference for shared policies.
1710 */
1711static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1712                                                unsigned long addr)
1713{
1714        struct mempolicy *pol = __get_vma_policy(vma, addr);
1715
1716        if (!pol)
1717                pol = get_task_policy(current);
1718
1719        return pol;
1720}
1721
1722bool vma_policy_mof(struct vm_area_struct *vma)
1723{
1724        struct mempolicy *pol;
1725
1726        if (vma->vm_ops && vma->vm_ops->get_policy) {
1727                bool ret = false;
1728
1729                pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1730                if (pol && (pol->flags & MPOL_F_MOF))
1731                        ret = true;
1732                mpol_cond_put(pol);
1733
1734                return ret;
1735        }
1736
1737        pol = vma->vm_policy;
1738        if (!pol)
1739                pol = get_task_policy(current);
1740
1741        return pol->flags & MPOL_F_MOF;
1742}
1743
1744static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1745{
1746        enum zone_type dynamic_policy_zone = policy_zone;
1747
1748        BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1749
1750        /*
1751         * if policy->nodes has movable memory only,
1752         * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1753         *
1754         * policy->nodes is intersect with node_states[N_MEMORY].
1755         * so if the following test fails, it implies
1756         * policy->nodes has movable memory only.
1757         */
1758        if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY]))
1759                dynamic_policy_zone = ZONE_MOVABLE;
1760
1761        return zone >= dynamic_policy_zone;
1762}
1763
1764/*
1765 * Return a nodemask representing a mempolicy for filtering nodes for
1766 * page allocation
1767 */
1768nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1769{
1770        int mode = policy->mode;
1771
1772        /* Lower zones don't get a nodemask applied for MPOL_BIND */
1773        if (unlikely(mode == MPOL_BIND) &&
1774                apply_policy_zone(policy, gfp_zone(gfp)) &&
1775                cpuset_nodemask_valid_mems_allowed(&policy->nodes))
1776                return &policy->nodes;
1777
1778        if (mode == MPOL_PREFERRED_MANY)
1779                return &policy->nodes;
1780
1781        return NULL;
1782}
1783
1784/*
1785 * Return the  preferred node id for 'prefer' mempolicy, and return
1786 * the given id for all other policies.
1787 *
1788 * policy_node() is always coupled with policy_nodemask(), which
1789 * secures the nodemask limit for 'bind' and 'prefer-many' policy.
1790 */
1791static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
1792{
1793        if (policy->mode == MPOL_PREFERRED) {
1794                nd = first_node(policy->nodes);
1795        } else {
1796                /*
1797                 * __GFP_THISNODE shouldn't even be used with the bind policy
1798                 * because we might easily break the expectation to stay on the
1799                 * requested node and not break the policy.
1800                 */
1801                WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1802        }
1803
1804        return nd;
1805}
1806
1807/* Do dynamic interleaving for a process */
1808static unsigned interleave_nodes(struct mempolicy *policy)
1809{
1810        unsigned next;
1811        struct task_struct *me = current;
1812
1813        next = next_node_in(me->il_prev, policy->nodes);
1814        if (next < MAX_NUMNODES)
1815                me->il_prev = next;
1816        return next;
1817}
1818
1819/*
1820 * Depending on the memory policy provide a node from which to allocate the
1821 * next slab entry.
1822 */
1823unsigned int mempolicy_slab_node(void)
1824{
1825        struct mempolicy *policy;
1826        int node = numa_mem_id();
1827
1828        if (!in_task())
1829                return node;
1830
1831        policy = current->mempolicy;
1832        if (!policy)
1833                return node;
1834
1835        switch (policy->mode) {
1836        case MPOL_PREFERRED:
1837                return first_node(policy->nodes);
1838
1839        case MPOL_INTERLEAVE:
1840                return interleave_nodes(policy);
1841
1842        case MPOL_BIND:
1843        case MPOL_PREFERRED_MANY:
1844        {
1845                struct zoneref *z;
1846
1847                /*
1848                 * Follow bind policy behavior and start allocation at the
1849                 * first node.
1850                 */
1851                struct zonelist *zonelist;
1852                enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1853                zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1854                z = first_zones_zonelist(zonelist, highest_zoneidx,
1855                                                        &policy->nodes);
1856                return z->zone ? zone_to_nid(z->zone) : node;
1857        }
1858        case MPOL_LOCAL:
1859                return node;
1860
1861        default:
1862                BUG();
1863        }
1864}
1865
1866/*
1867 * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1868 * node in pol->nodes (starting from n=0), wrapping around if n exceeds the
1869 * number of present nodes.
1870 */
1871static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1872{
1873        nodemask_t nodemask = pol->nodes;
1874        unsigned int target, nnodes;
1875        int i;
1876        int nid;
1877        /*
1878         * The barrier will stabilize the nodemask in a register or on
1879         * the stack so that it will stop changing under the code.
1880         *
1881         * Between first_node() and next_node(), pol->nodes could be changed
1882         * by other threads. So we put pol->nodes in a local stack.
1883         */
1884        barrier();
1885
1886        nnodes = nodes_weight(nodemask);
1887        if (!nnodes)
1888                return numa_node_id();
1889        target = (unsigned int)n % nnodes;
1890        nid = first_node(nodemask);
1891        for (i = 0; i < target; i++)
1892                nid = next_node(nid, nodemask);
1893        return nid;
1894}
1895
1896/* Determine a node number for interleave */
1897static inline unsigned interleave_nid(struct mempolicy *pol,
1898                 struct vm_area_struct *vma, unsigned long addr, int shift)
1899{
1900        if (vma) {
1901                unsigned long off;
1902
1903                /*
1904                 * for small pages, there is no difference between
1905                 * shift and PAGE_SHIFT, so the bit-shift is safe.
1906                 * for huge pages, since vm_pgoff is in units of small
1907                 * pages, we need to shift off the always 0 bits to get
1908                 * a useful offset.
1909                 */
1910                BUG_ON(shift < PAGE_SHIFT);
1911                off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1912                off += (addr - vma->vm_start) >> shift;
1913                return offset_il_node(pol, off);
1914        } else
1915                return interleave_nodes(pol);
1916}
1917
1918#ifdef CONFIG_HUGETLBFS
1919/*
1920 * huge_node(@vma, @addr, @gfp_flags, @mpol)
1921 * @vma: virtual memory area whose policy is sought
1922 * @addr: address in @vma for shared policy lookup and interleave policy
1923 * @gfp_flags: for requested zone
1924 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1925 * @nodemask: pointer to nodemask pointer for 'bind' and 'prefer-many' policy
1926 *
1927 * Returns a nid suitable for a huge page allocation and a pointer
1928 * to the struct mempolicy for conditional unref after allocation.
1929 * If the effective policy is 'bind' or 'prefer-many', returns a pointer
1930 * to the mempolicy's @nodemask for filtering the zonelist.
1931 *
1932 * Must be protected by read_mems_allowed_begin()
1933 */
1934int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
1935                                struct mempolicy **mpol, nodemask_t **nodemask)
1936{
1937        int nid;
1938        int mode;
1939
1940        *mpol = get_vma_policy(vma, addr);
1941        *nodemask = NULL;
1942        mode = (*mpol)->mode;
1943
1944        if (unlikely(mode == MPOL_INTERLEAVE)) {
1945                nid = interleave_nid(*mpol, vma, addr,
1946                                        huge_page_shift(hstate_vma(vma)));
1947        } else {
1948                nid = policy_node(gfp_flags, *mpol, numa_node_id());
1949                if (mode == MPOL_BIND || mode == MPOL_PREFERRED_MANY)
1950                        *nodemask = &(*mpol)->nodes;
1951        }
1952        return nid;
1953}
1954
1955/*
1956 * init_nodemask_of_mempolicy
1957 *
1958 * If the current task's mempolicy is "default" [NULL], return 'false'
1959 * to indicate default policy.  Otherwise, extract the policy nodemask
1960 * for 'bind' or 'interleave' policy into the argument nodemask, or
1961 * initialize the argument nodemask to contain the single node for
1962 * 'preferred' or 'local' policy and return 'true' to indicate presence
1963 * of non-default mempolicy.
1964 *
1965 * We don't bother with reference counting the mempolicy [mpol_get/put]
1966 * because the current task is examining it's own mempolicy and a task's
1967 * mempolicy is only ever changed by the task itself.
1968 *
1969 * N.B., it is the caller's responsibility to free a returned nodemask.
1970 */
1971bool init_nodemask_of_mempolicy(nodemask_t *mask)
1972{
1973        struct mempolicy *mempolicy;
1974
1975        if (!(mask && current->mempolicy))
1976                return false;
1977
1978        task_lock(current);
1979        mempolicy = current->mempolicy;
1980        switch (mempolicy->mode) {
1981        case MPOL_PREFERRED:
1982        case MPOL_PREFERRED_MANY:
1983        case MPOL_BIND:
1984        case MPOL_INTERLEAVE:
1985                *mask = mempolicy->nodes;
1986                break;
1987
1988        case MPOL_LOCAL:
1989                init_nodemask_of_node(mask, numa_node_id());
1990                break;
1991
1992        default:
1993                BUG();
1994        }
1995        task_unlock(current);
1996
1997        return true;
1998}
1999#endif
2000
2001/*
2002 * mempolicy_in_oom_domain
2003 *
2004 * If tsk's mempolicy is "bind", check for intersection between mask and
2005 * the policy nodemask. Otherwise, return true for all other policies
2006 * including "interleave", as a tsk with "interleave" policy may have
2007 * memory allocated from all nodes in system.
2008 *
2009 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2010 */
2011bool mempolicy_in_oom_domain(struct task_struct *tsk,
2012                                        const nodemask_t *mask)
2013{
2014        struct mempolicy *mempolicy;
2015        bool ret = true;
2016
2017        if (!mask)
2018                return ret;
2019
2020        task_lock(tsk);
2021        mempolicy = tsk->mempolicy;
2022        if (mempolicy && mempolicy->mode == MPOL_BIND)
2023                ret = nodes_intersects(mempolicy->nodes, *mask);
2024        task_unlock(tsk);
2025
2026        return ret;
2027}
2028
2029/* Allocate a page in interleaved policy.
2030   Own path because it needs to do special accounting. */
2031static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2032                                        unsigned nid)
2033{
2034        struct page *page;
2035
2036        page = __alloc_pages(gfp, order, nid, NULL);
2037        /* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2038        if (!static_branch_likely(&vm_numa_stat_key))
2039                return page;
2040        if (page && page_to_nid(page) == nid) {
2041                preempt_disable();
2042                __count_numa_event(page_zone(page), NUMA_INTERLEAVE_HIT);
2043                preempt_enable();
2044        }
2045        return page;
2046}
2047
2048static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order,
2049                                                int nid, struct mempolicy *pol)
2050{
2051        struct page *page;
2052        gfp_t preferred_gfp;
2053
2054        /*
2055         * This is a two pass approach. The first pass will only try the
2056         * preferred nodes but skip the direct reclaim and allow the
2057         * allocation to fail, while the second pass will try all the
2058         * nodes in system.
2059         */
2060        preferred_gfp = gfp | __GFP_NOWARN;
2061        preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2062        page = __alloc_pages(preferred_gfp, order, nid, &pol->nodes);
2063        if (!page)
2064                page = __alloc_pages(gfp, order, numa_node_id(), NULL);
2065
2066        return page;
2067}
2068
2069/**
2070 * alloc_pages_vma - Allocate a page for a VMA.
2071 * @gfp: GFP flags.
2072 * @order: Order of the GFP allocation.
2073 * @vma: Pointer to VMA or NULL if not available.
2074 * @addr: Virtual address of the allocation.  Must be inside @vma.
2075 * @node: Which node to prefer for allocation (modulo policy).
2076 * @hugepage: For hugepages try only the preferred node if possible.
2077 *
2078 * Allocate a page for a specific address in @vma, using the appropriate
2079 * NUMA policy.  When @vma is not NULL the caller must hold the mmap_lock
2080 * of the mm_struct of the VMA to prevent it from going away.  Should be
2081 * used for all allocations for pages that will be mapped into user space.
2082 *
2083 * Return: The page on success or NULL if allocation fails.
2084 */
2085struct page *alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2086                unsigned long addr, int node, bool hugepage)
2087{
2088        struct mempolicy *pol;
2089        struct page *page;
2090        int preferred_nid;
2091        nodemask_t *nmask;
2092
2093        pol = get_vma_policy(vma, addr);
2094
2095        if (pol->mode == MPOL_INTERLEAVE) {
2096                unsigned nid;
2097
2098                nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2099                mpol_cond_put(pol);
2100                page = alloc_page_interleave(gfp, order, nid);
2101                goto out;
2102        }
2103
2104        if (pol->mode == MPOL_PREFERRED_MANY) {
2105                page = alloc_pages_preferred_many(gfp, order, node, pol);
2106                mpol_cond_put(pol);
2107                goto out;
2108        }
2109
2110        if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2111                int hpage_node = node;
2112
2113                /*
2114                 * For hugepage allocation and non-interleave policy which
2115                 * allows the current node (or other explicitly preferred
2116                 * node) we only try to allocate from the current/preferred
2117                 * node and don't fall back to other nodes, as the cost of
2118                 * remote accesses would likely offset THP benefits.
2119                 *
2120                 * If the policy is interleave or does not allow the current
2121                 * node in its nodemask, we allocate the standard way.
2122                 */
2123                if (pol->mode == MPOL_PREFERRED)
2124                        hpage_node = first_node(pol->nodes);
2125
2126                nmask = policy_nodemask(gfp, pol);
2127                if (!nmask || node_isset(hpage_node, *nmask)) {
2128                        mpol_cond_put(pol);
2129                        /*
2130                         * First, try to allocate THP only on local node, but
2131                         * don't reclaim unnecessarily, just compact.
2132                         */
2133                        page = __alloc_pages_node(hpage_node,
2134                                gfp | __GFP_THISNODE | __GFP_NORETRY, order);
2135
2136                        /*
2137                         * If hugepage allocations are configured to always
2138                         * synchronous compact or the vma has been madvised
2139                         * to prefer hugepage backing, retry allowing remote
2140                         * memory with both reclaim and compact as well.
2141                         */
2142                        if (!page && (gfp & __GFP_DIRECT_RECLAIM))
2143                                page = __alloc_pages_node(hpage_node,
2144                                                                gfp, order);
2145
2146                        goto out;
2147                }
2148        }
2149
2150        nmask = policy_nodemask(gfp, pol);
2151        preferred_nid = policy_node(gfp, pol, node);
2152        page = __alloc_pages(gfp, order, preferred_nid, nmask);
2153        mpol_cond_put(pol);
2154out:
2155        return page;
2156}
2157EXPORT_SYMBOL(alloc_pages_vma);
2158
2159/**
2160 * alloc_pages - Allocate pages.
2161 * @gfp: GFP flags.
2162 * @order: Power of two of number of pages to allocate.
2163 *
2164 * Allocate 1 << @order contiguous pages.  The physical address of the
2165 * first page is naturally aligned (eg an order-3 allocation will be aligned
2166 * to a multiple of 8 * PAGE_SIZE bytes).  The NUMA policy of the current
2167 * process is honoured when in process context.
2168 *
2169 * Context: Can be called from any context, providing the appropriate GFP
2170 * flags are used.
2171 * Return: The page on success or NULL if allocation fails.
2172 */
2173struct page *alloc_pages(gfp_t gfp, unsigned order)
2174{
2175        struct mempolicy *pol = &default_policy;
2176        struct page *page;
2177
2178        if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2179                pol = get_task_policy(current);
2180
2181        /*
2182         * No reference counting needed for current->mempolicy
2183         * nor system default_policy
2184         */
2185        if (pol->mode == MPOL_INTERLEAVE)
2186                page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2187        else if (pol->mode == MPOL_PREFERRED_MANY)
2188                page = alloc_pages_preferred_many(gfp, order,
2189                                numa_node_id(), pol);
2190        else
2191                page = __alloc_pages(gfp, order,
2192                                policy_node(gfp, pol, numa_node_id()),
2193                                policy_nodemask(gfp, pol));
2194
2195        return page;
2196}
2197EXPORT_SYMBOL(alloc_pages);
2198
2199int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2200{
2201        struct mempolicy *pol = mpol_dup(vma_policy(src));
2202
2203        if (IS_ERR(pol))
2204                return PTR_ERR(pol);
2205        dst->vm_policy = pol;
2206        return 0;
2207}
2208
2209/*
2210 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2211 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2212 * with the mems_allowed returned by cpuset_mems_allowed().  This
2213 * keeps mempolicies cpuset relative after its cpuset moves.  See
2214 * further kernel/cpuset.c update_nodemask().
2215 *
2216 * current's mempolicy may be rebinded by the other task(the task that changes
2217 * cpuset's mems), so we needn't do rebind work for current task.
2218 */
2219
2220/* Slow path of a mempolicy duplicate */
2221struct mempolicy *__mpol_dup(struct mempolicy *old)
2222{
2223        struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2224
2225        if (!new)
2226                return ERR_PTR(-ENOMEM);
2227
2228        /* task's mempolicy is protected by alloc_lock */
2229        if (old == current->mempolicy) {
2230                task_lock(current);
2231                *new = *old;
2232                task_unlock(current);
2233        } else
2234                *new = *old;
2235
2236        if (current_cpuset_is_being_rebound()) {
2237                nodemask_t mems = cpuset_mems_allowed(current);
2238                mpol_rebind_policy(new, &mems);
2239        }
2240        atomic_set(&new->refcnt, 1);
2241        return new;
2242}
2243
2244/* Slow path of a mempolicy comparison */
2245bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2246{
2247        if (!a || !b)
2248                return false;
2249        if (a->mode != b->mode)
2250                return false;
2251        if (a->flags != b->flags)
2252                return false;
2253        if (mpol_store_user_nodemask(a))
2254                if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2255                        return false;
2256
2257        switch (a->mode) {
2258        case MPOL_BIND:
2259        case MPOL_INTERLEAVE:
2260        case MPOL_PREFERRED:
2261        case MPOL_PREFERRED_MANY:
2262                return !!nodes_equal(a->nodes, b->nodes);
2263        case MPOL_LOCAL:
2264                return true;
2265        default:
2266                BUG();
2267                return false;
2268        }
2269}
2270
2271/*
2272 * Shared memory backing store policy support.
2273 *
2274 * Remember policies even when nobody has shared memory mapped.
2275 * The policies are kept in Red-Black tree linked from the inode.
2276 * They are protected by the sp->lock rwlock, which should be held
2277 * for any accesses to the tree.
2278 */
2279
2280/*
2281 * lookup first element intersecting start-end.  Caller holds sp->lock for
2282 * reading or for writing
2283 */
2284static struct sp_node *
2285sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2286{
2287        struct rb_node *n = sp->root.rb_node;
2288
2289        while (n) {
2290                struct sp_node *p = rb_entry(n, struct sp_node, nd);
2291
2292                if (start >= p->end)
2293                        n = n->rb_right;
2294                else if (end <= p->start)
2295                        n = n->rb_left;
2296                else
2297                        break;
2298        }
2299        if (!n)
2300                return NULL;
2301        for (;;) {
2302                struct sp_node *w = NULL;
2303                struct rb_node *prev = rb_prev(n);
2304                if (!prev)
2305                        break;
2306                w = rb_entry(prev, struct sp_node, nd);
2307                if (w->end <= start)
2308                        break;
2309                n = prev;
2310        }
2311        return rb_entry(n, struct sp_node, nd);
2312}
2313
2314/*
2315 * Insert a new shared policy into the list.  Caller holds sp->lock for
2316 * writing.
2317 */
2318static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2319{
2320        struct rb_node **p = &sp->root.rb_node;
2321        struct rb_node *parent = NULL;
2322        struct sp_node *nd;
2323
2324        while (*p) {
2325                parent = *p;
2326                nd = rb_entry(parent, struct sp_node, nd);
2327                if (new->start < nd->start)
2328                        p = &(*p)->rb_left;
2329                else if (new->end > nd->end)
2330                        p = &(*p)->rb_right;
2331                else
2332                        BUG();
2333        }
2334        rb_link_node(&new->nd, parent, p);
2335        rb_insert_color(&new->nd, &sp->root);
2336        pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2337                 new->policy ? new->policy->mode : 0);
2338}
2339
2340/* Find shared policy intersecting idx */
2341struct mempolicy *
2342mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2343{
2344        struct mempolicy *pol = NULL;
2345        struct sp_node *sn;
2346
2347        if (!sp->root.rb_node)
2348                return NULL;
2349        read_lock(&sp->lock);
2350        sn = sp_lookup(sp, idx, idx+1);
2351        if (sn) {
2352                mpol_get(sn->policy);
2353                pol = sn->policy;
2354        }
2355        read_unlock(&sp->lock);
2356        return pol;
2357}
2358
2359static void sp_free(struct sp_node *n)
2360{
2361        mpol_put(n->policy);
2362        kmem_cache_free(sn_cache, n);
2363}
2364
2365/**
2366 * mpol_misplaced - check whether current page node is valid in policy
2367 *
2368 * @page: page to be checked
2369 * @vma: vm area where page mapped
2370 * @addr: virtual address where page mapped
2371 *
2372 * Lookup current policy node id for vma,addr and "compare to" page's
2373 * node id.  Policy determination "mimics" alloc_page_vma().
2374 * Called from fault path where we know the vma and faulting address.
2375 *
2376 * Return: NUMA_NO_NODE if the page is in a node that is valid for this
2377 * policy, or a suitable node ID to allocate a replacement page from.
2378 */
2379int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2380{
2381        struct mempolicy *pol;
2382        struct zoneref *z;
2383        int curnid = page_to_nid(page);
2384        unsigned long pgoff;
2385        int thiscpu = raw_smp_processor_id();
2386        int thisnid = cpu_to_node(thiscpu);
2387        int polnid = NUMA_NO_NODE;
2388        int ret = NUMA_NO_NODE;
2389
2390        pol = get_vma_policy(vma, addr);
2391        if (!(pol->flags & MPOL_F_MOF))
2392                goto out;
2393
2394        switch (pol->mode) {
2395        case MPOL_INTERLEAVE:
2396                pgoff = vma->vm_pgoff;
2397                pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2398                polnid = offset_il_node(pol, pgoff);
2399                break;
2400
2401        case MPOL_PREFERRED:
2402                if (node_isset(curnid, pol->nodes))
2403                        goto out;
2404                polnid = first_node(pol->nodes);
2405                break;
2406
2407        case MPOL_LOCAL:
2408                polnid = numa_node_id();
2409                break;
2410
2411        case MPOL_BIND:
2412                /* Optimize placement among multiple nodes via NUMA balancing */
2413                if (pol->flags & MPOL_F_MORON) {
2414                        if (node_isset(thisnid, pol->nodes))
2415                                break;
2416                        goto out;
2417                }
2418                fallthrough;
2419
2420        case MPOL_PREFERRED_MANY:
2421                /*
2422                 * use current page if in policy nodemask,
2423                 * else select nearest allowed node, if any.
2424                 * If no allowed nodes, use current [!misplaced].
2425                 */
2426                if (node_isset(curnid, pol->nodes))
2427                        goto out;
2428                z = first_zones_zonelist(
2429                                node_zonelist(numa_node_id(), GFP_HIGHUSER),
2430                                gfp_zone(GFP_HIGHUSER),
2431                                &pol->nodes);
2432                polnid = zone_to_nid(z->zone);
2433                break;
2434
2435        default:
2436                BUG();
2437        }
2438
2439        /* Migrate the page towards the node whose CPU is referencing it */
2440        if (pol->flags & MPOL_F_MORON) {
2441                polnid = thisnid;
2442
2443                if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2444                        goto out;
2445        }
2446
2447        if (curnid != polnid)
2448                ret = polnid;
2449out:
2450        mpol_cond_put(pol);
2451
2452        return ret;
2453}
2454
2455/*
2456 * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2457 * dropped after task->mempolicy is set to NULL so that any allocation done as
2458 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2459 * policy.
2460 */
2461void mpol_put_task_policy(struct task_struct *task)
2462{
2463        struct mempolicy *pol;
2464
2465        task_lock(task);
2466        pol = task->mempolicy;
2467        task->mempolicy = NULL;
2468        task_unlock(task);
2469        mpol_put(pol);
2470}
2471
2472static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2473{
2474        pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2475        rb_erase(&n->nd, &sp->root);
2476        sp_free(n);
2477}
2478
2479static void sp_node_init(struct sp_node *node, unsigned long start,
2480                        unsigned long end, struct mempolicy *pol)
2481{
2482        node->start = start;
2483        node->end = end;
2484        node->policy = pol;
2485}
2486
2487static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2488                                struct mempolicy *pol)
2489{
2490        struct sp_node *n;
2491        struct mempolicy *newpol;
2492
2493        n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2494        if (!n)
2495                return NULL;
2496
2497        newpol = mpol_dup(pol);
2498        if (IS_ERR(newpol)) {
2499                kmem_cache_free(sn_cache, n);
2500                return NULL;
2501        }
2502        newpol->flags |= MPOL_F_SHARED;
2503        sp_node_init(n, start, end, newpol);
2504
2505        return n;
2506}
2507
2508/* Replace a policy range. */
2509static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2510                                 unsigned long end, struct sp_node *new)
2511{
2512        struct sp_node *n;
2513        struct sp_node *n_new = NULL;
2514        struct mempolicy *mpol_new = NULL;
2515        int ret = 0;
2516
2517restart:
2518        write_lock(&sp->lock);
2519        n = sp_lookup(sp, start, end);
2520        /* Take care of old policies in the same range. */
2521        while (n && n->start < end) {
2522                struct rb_node *next = rb_next(&n->nd);
2523                if (n->start >= start) {
2524                        if (n->end <= end)
2525                                sp_delete(sp, n);
2526                        else
2527                                n->start = end;
2528                } else {
2529                        /* Old policy spanning whole new range. */
2530                        if (n->end > end) {
2531                                if (!n_new)
2532                                        goto alloc_new;
2533
2534                                *mpol_new = *n->policy;
2535                                atomic_set(&mpol_new->refcnt, 1);
2536                                sp_node_init(n_new, end, n->end, mpol_new);
2537                                n->end = start;
2538                                sp_insert(sp, n_new);
2539                                n_new = NULL;
2540                                mpol_new = NULL;
2541                                break;
2542                        } else
2543                                n->end = start;
2544                }
2545                if (!next)
2546                        break;
2547                n = rb_entry(next, struct sp_node, nd);
2548        }
2549        if (new)
2550                sp_insert(sp, new);
2551        write_unlock(&sp->lock);
2552        ret = 0;
2553
2554err_out:
2555        if (mpol_new)
2556                mpol_put(mpol_new);
2557        if (n_new)
2558                kmem_cache_free(sn_cache, n_new);
2559
2560        return ret;
2561
2562alloc_new:
2563        write_unlock(&sp->lock);
2564        ret = -ENOMEM;
2565        n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2566        if (!n_new)
2567                goto err_out;
2568        mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2569        if (!mpol_new)
2570                goto err_out;
2571        goto restart;
2572}
2573
2574/**
2575 * mpol_shared_policy_init - initialize shared policy for inode
2576 * @sp: pointer to inode shared policy
2577 * @mpol:  struct mempolicy to install
2578 *
2579 * Install non-NULL @mpol in inode's shared policy rb-tree.
2580 * On entry, the current task has a reference on a non-NULL @mpol.
2581 * This must be released on exit.
2582 * This is called at get_inode() calls and we can use GFP_KERNEL.
2583 */
2584void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2585{
2586        int ret;
2587
2588        sp->root = RB_ROOT;             /* empty tree == default mempolicy */
2589        rwlock_init(&sp->lock);
2590
2591        if (mpol) {
2592                struct vm_area_struct pvma;
2593                struct mempolicy *new;
2594                NODEMASK_SCRATCH(scratch);
2595
2596                if (!scratch)
2597                        goto put_mpol;
2598                /* contextualize the tmpfs mount point mempolicy */
2599                new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2600                if (IS_ERR(new))
2601                        goto free_scratch; /* no valid nodemask intersection */
2602
2603                task_lock(current);
2604                ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2605                task_unlock(current);
2606                if (ret)
2607                        goto put_new;
2608
2609                /* Create pseudo-vma that contains just the policy */
2610                vma_init(&pvma, NULL);
2611                pvma.vm_end = TASK_SIZE;        /* policy covers entire file */
2612                mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2613
2614put_new:
2615                mpol_put(new);                  /* drop initial ref */
2616free_scratch:
2617                NODEMASK_SCRATCH_FREE(scratch);
2618put_mpol:
2619                mpol_put(mpol); /* drop our incoming ref on sb mpol */
2620        }
2621}
2622
2623int mpol_set_shared_policy(struct shared_policy *info,
2624                        struct vm_area_struct *vma, struct mempolicy *npol)
2625{
2626        int err;
2627        struct sp_node *new = NULL;
2628        unsigned long sz = vma_pages(vma);
2629
2630        pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2631                 vma->vm_pgoff,
2632                 sz, npol ? npol->mode : -1,
2633                 npol ? npol->flags : -1,
2634                 npol ? nodes_addr(npol->nodes)[0] : NUMA_NO_NODE);
2635
2636        if (npol) {
2637                new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2638                if (!new)
2639                        return -ENOMEM;
2640        }
2641        err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2642        if (err && new)
2643                sp_free(new);
2644        return err;
2645}
2646
2647/* Free a backing policy store on inode delete. */
2648void mpol_free_shared_policy(struct shared_policy *p)
2649{
2650        struct sp_node *n;
2651        struct rb_node *next;
2652
2653        if (!p->root.rb_node)
2654                return;
2655        write_lock(&p->lock);
2656        next = rb_first(&p->root);
2657        while (next) {
2658                n = rb_entry(next, struct sp_node, nd);
2659                next = rb_next(&n->nd);
2660                sp_delete(p, n);
2661        }
2662        write_unlock(&p->lock);
2663}
2664
2665#ifdef CONFIG_NUMA_BALANCING
2666static int __initdata numabalancing_override;
2667
2668static void __init check_numabalancing_enable(void)
2669{
2670        bool numabalancing_default = false;
2671
2672        if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2673                numabalancing_default = true;
2674
2675        /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2676        if (numabalancing_override)
2677                set_numabalancing_state(numabalancing_override == 1);
2678
2679        if (num_online_nodes() > 1 && !numabalancing_override) {
2680                pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2681                        numabalancing_default ? "Enabling" : "Disabling");
2682                set_numabalancing_state(numabalancing_default);
2683        }
2684}
2685
2686static int __init setup_numabalancing(char *str)
2687{
2688        int ret = 0;
2689        if (!str)
2690                goto out;
2691
2692        if (!strcmp(str, "enable")) {
2693                numabalancing_override = 1;
2694                ret = 1;
2695        } else if (!strcmp(str, "disable")) {
2696                numabalancing_override = -1;
2697                ret = 1;
2698        }
2699out:
2700        if (!ret)
2701                pr_warn("Unable to parse numa_balancing=\n");
2702
2703        return ret;
2704}
2705__setup("numa_balancing=", setup_numabalancing);
2706#else
2707static inline void __init check_numabalancing_enable(void)
2708{
2709}
2710#endif /* CONFIG_NUMA_BALANCING */
2711
2712/* assumes fs == KERNEL_DS */
2713void __init numa_policy_init(void)
2714{
2715        nodemask_t interleave_nodes;
2716        unsigned long largest = 0;
2717        int nid, prefer = 0;
2718
2719        policy_cache = kmem_cache_create("numa_policy",
2720                                         sizeof(struct mempolicy),
2721                                         0, SLAB_PANIC, NULL);
2722
2723        sn_cache = kmem_cache_create("shared_policy_node",
2724                                     sizeof(struct sp_node),
2725                                     0, SLAB_PANIC, NULL);
2726
2727        for_each_node(nid) {
2728                preferred_node_policy[nid] = (struct mempolicy) {
2729                        .refcnt = ATOMIC_INIT(1),
2730                        .mode = MPOL_PREFERRED,
2731                        .flags = MPOL_F_MOF | MPOL_F_MORON,
2732                        .nodes = nodemask_of_node(nid),
2733                };
2734        }
2735
2736        /*
2737         * Set interleaving policy for system init. Interleaving is only
2738         * enabled across suitably sized nodes (default is >= 16MB), or
2739         * fall back to the largest node if they're all smaller.
2740         */
2741        nodes_clear(interleave_nodes);
2742        for_each_node_state(nid, N_MEMORY) {
2743                unsigned long total_pages = node_present_pages(nid);
2744
2745                /* Preserve the largest node */
2746                if (largest < total_pages) {
2747                        largest = total_pages;
2748                        prefer = nid;
2749                }
2750
2751                /* Interleave this node? */
2752                if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2753                        node_set(nid, interleave_nodes);
2754        }
2755
2756        /* All too small, use the largest */
2757        if (unlikely(nodes_empty(interleave_nodes)))
2758                node_set(prefer, interleave_nodes);
2759
2760        if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2761                pr_err("%s: interleaving failed\n", __func__);
2762
2763        check_numabalancing_enable();
2764}
2765
2766/* Reset policy of current process to default */
2767void numa_default_policy(void)
2768{
2769        do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2770}
2771
2772/*
2773 * Parse and format mempolicy from/to strings
2774 */
2775
2776static const char * const policy_modes[] =
2777{
2778        [MPOL_DEFAULT]    = "default",
2779        [MPOL_PREFERRED]  = "prefer",
2780        [MPOL_BIND]       = "bind",
2781        [MPOL_INTERLEAVE] = "interleave",
2782        [MPOL_LOCAL]      = "local",
2783        [MPOL_PREFERRED_MANY]  = "prefer (many)",
2784};
2785
2786
2787#ifdef CONFIG_TMPFS
2788/**
2789 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2790 * @str:  string containing mempolicy to parse
2791 * @mpol:  pointer to struct mempolicy pointer, returned on success.
2792 *
2793 * Format of input:
2794 *      <mode>[=<flags>][:<nodelist>]
2795 *
2796 * On success, returns 0, else 1
2797 */
2798int mpol_parse_str(char *str, struct mempolicy **mpol)
2799{
2800        struct mempolicy *new = NULL;
2801        unsigned short mode_flags;
2802        nodemask_t nodes;
2803        char *nodelist = strchr(str, ':');
2804        char *flags = strchr(str, '=');
2805        int err = 1, mode;
2806
2807        if (flags)
2808                *flags++ = '\0';        /* terminate mode string */
2809
2810        if (nodelist) {
2811                /* NUL-terminate mode or flags string */
2812                *nodelist++ = '\0';
2813                if (nodelist_parse(nodelist, nodes))
2814                        goto out;
2815                if (!nodes_subset(nodes, node_states[N_MEMORY]))
2816                        goto out;
2817        } else
2818                nodes_clear(nodes);
2819
2820        mode = match_string(policy_modes, MPOL_MAX, str);
2821        if (mode < 0)
2822                goto out;
2823
2824        switch (mode) {
2825        case MPOL_PREFERRED:
2826                /*
2827                 * Insist on a nodelist of one node only, although later
2828                 * we use first_node(nodes) to grab a single node, so here
2829                 * nodelist (or nodes) cannot be empty.
2830                 */
2831                if (nodelist) {
2832                        char *rest = nodelist;
2833                        while (isdigit(*rest))
2834                                rest++;
2835                        if (*rest)
2836                                goto out;
2837                        if (nodes_empty(nodes))
2838                                goto out;
2839                }
2840                break;
2841        case MPOL_INTERLEAVE:
2842                /*
2843                 * Default to online nodes with memory if no nodelist
2844                 */
2845                if (!nodelist)
2846                        nodes = node_states[N_MEMORY];
2847                break;
2848        case MPOL_LOCAL:
2849                /*
2850                 * Don't allow a nodelist;  mpol_new() checks flags
2851                 */
2852                if (nodelist)
2853                        goto out;
2854                break;
2855        case MPOL_DEFAULT:
2856                /*
2857                 * Insist on a empty nodelist
2858                 */
2859                if (!nodelist)
2860                        err = 0;
2861                goto out;
2862        case MPOL_PREFERRED_MANY:
2863        case MPOL_BIND:
2864                /*
2865                 * Insist on a nodelist
2866                 */
2867                if (!nodelist)
2868                        goto out;
2869        }
2870
2871        mode_flags = 0;
2872        if (flags) {
2873                /*
2874                 * Currently, we only support two mutually exclusive
2875                 * mode flags.
2876                 */
2877                if (!strcmp(flags, "static"))
2878                        mode_flags |= MPOL_F_STATIC_NODES;
2879                else if (!strcmp(flags, "relative"))
2880                        mode_flags |= MPOL_F_RELATIVE_NODES;
2881                else
2882                        goto out;
2883        }
2884
2885        new = mpol_new(mode, mode_flags, &nodes);
2886        if (IS_ERR(new))
2887                goto out;
2888
2889        /*
2890         * Save nodes for mpol_to_str() to show the tmpfs mount options
2891         * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2892         */
2893        if (mode != MPOL_PREFERRED) {
2894                new->nodes = nodes;
2895        } else if (nodelist) {
2896                nodes_clear(new->nodes);
2897                node_set(first_node(nodes), new->nodes);
2898        } else {
2899                new->mode = MPOL_LOCAL;
2900        }
2901
2902        /*
2903         * Save nodes for contextualization: this will be used to "clone"
2904         * the mempolicy in a specific context [cpuset] at a later time.
2905         */
2906        new->w.user_nodemask = nodes;
2907
2908        err = 0;
2909
2910out:
2911        /* Restore string for error message */
2912        if (nodelist)
2913                *--nodelist = ':';
2914        if (flags)
2915                *--flags = '=';
2916        if (!err)
2917                *mpol = new;
2918        return err;
2919}
2920#endif /* CONFIG_TMPFS */
2921
2922/**
2923 * mpol_to_str - format a mempolicy structure for printing
2924 * @buffer:  to contain formatted mempolicy string
2925 * @maxlen:  length of @buffer
2926 * @pol:  pointer to mempolicy to be formatted
2927 *
2928 * Convert @pol into a string.  If @buffer is too short, truncate the string.
2929 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2930 * longest flag, "relative", and to display at least a few node ids.
2931 */
2932void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
2933{
2934        char *p = buffer;
2935        nodemask_t nodes = NODE_MASK_NONE;
2936        unsigned short mode = MPOL_DEFAULT;
2937        unsigned short flags = 0;
2938
2939        if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2940                mode = pol->mode;
2941                flags = pol->flags;
2942        }
2943
2944        switch (mode) {
2945        case MPOL_DEFAULT:
2946        case MPOL_LOCAL:
2947                break;
2948        case MPOL_PREFERRED:
2949        case MPOL_PREFERRED_MANY:
2950        case MPOL_BIND:
2951        case MPOL_INTERLEAVE:
2952                nodes = pol->nodes;
2953                break;
2954        default:
2955                WARN_ON_ONCE(1);
2956                snprintf(p, maxlen, "unknown");
2957                return;
2958        }
2959
2960        p += snprintf(p, maxlen, "%s", policy_modes[mode]);
2961
2962        if (flags & MPOL_MODE_FLAGS) {
2963                p += snprintf(p, buffer + maxlen - p, "=");
2964
2965                /*
2966                 * Currently, the only defined flags are mutually exclusive
2967                 */
2968                if (flags & MPOL_F_STATIC_NODES)
2969                        p += snprintf(p, buffer + maxlen - p, "static");
2970                else if (flags & MPOL_F_RELATIVE_NODES)
2971                        p += snprintf(p, buffer + maxlen - p, "relative");
2972        }
2973
2974        if (!nodes_empty(nodes))
2975                p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
2976                               nodemask_pr_args(&nodes));
2977}
2978
2979bool numa_demotion_enabled = false;
2980
2981#ifdef CONFIG_SYSFS
2982static ssize_t numa_demotion_enabled_show(struct kobject *kobj,
2983                                          struct kobj_attribute *attr, char *buf)
2984{
2985        return sysfs_emit(buf, "%s\n",
2986                          numa_demotion_enabled? "true" : "false");
2987}
2988
2989static ssize_t numa_demotion_enabled_store(struct kobject *kobj,
2990                                           struct kobj_attribute *attr,
2991                                           const char *buf, size_t count)
2992{
2993        if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
2994                numa_demotion_enabled = true;
2995        else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
2996                numa_demotion_enabled = false;
2997        else
2998                return -EINVAL;
2999
3000        return count;
3001}
3002
3003static struct kobj_attribute numa_demotion_enabled_attr =
3004        __ATTR(demotion_enabled, 0644, numa_demotion_enabled_show,
3005               numa_demotion_enabled_store);
3006
3007static struct attribute *numa_attrs[] = {
3008        &numa_demotion_enabled_attr.attr,
3009        NULL,
3010};
3011
3012static const struct attribute_group numa_attr_group = {
3013        .attrs = numa_attrs,
3014};
3015
3016static int __init numa_init_sysfs(void)
3017{
3018        int err;
3019        struct kobject *numa_kobj;
3020
3021        numa_kobj = kobject_create_and_add("numa", mm_kobj);
3022        if (!numa_kobj) {
3023                pr_err("failed to create numa kobject\n");
3024                return -ENOMEM;
3025        }
3026        err = sysfs_create_group(numa_kobj, &numa_attr_group);
3027        if (err) {
3028                pr_err("failed to register numa group\n");
3029                goto delete_obj;
3030        }
3031        return 0;
3032
3033delete_obj:
3034        kobject_put(numa_kobj);
3035        return err;
3036}
3037subsys_initcall(numa_init_sysfs);
3038#endif
3039