linux/mm/util.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include <linux/mm.h>
   3#include <linux/slab.h>
   4#include <linux/string.h>
   5#include <linux/compiler.h>
   6#include <linux/export.h>
   7#include <linux/err.h>
   8#include <linux/sched.h>
   9#include <linux/sched/mm.h>
  10#include <linux/sched/signal.h>
  11#include <linux/sched/task_stack.h>
  12#include <linux/security.h>
  13#include <linux/swap.h>
  14#include <linux/swapops.h>
  15#include <linux/mman.h>
  16#include <linux/hugetlb.h>
  17#include <linux/vmalloc.h>
  18#include <linux/userfaultfd_k.h>
  19#include <linux/elf.h>
  20#include <linux/elf-randomize.h>
  21#include <linux/personality.h>
  22#include <linux/random.h>
  23#include <linux/processor.h>
  24#include <linux/sizes.h>
  25#include <linux/compat.h>
  26
  27#include <linux/uaccess.h>
  28
  29#include "internal.h"
  30
  31/**
  32 * kfree_const - conditionally free memory
  33 * @x: pointer to the memory
  34 *
  35 * Function calls kfree only if @x is not in .rodata section.
  36 */
  37void kfree_const(const void *x)
  38{
  39        if (!is_kernel_rodata((unsigned long)x))
  40                kfree(x);
  41}
  42EXPORT_SYMBOL(kfree_const);
  43
  44/**
  45 * kstrdup - allocate space for and copy an existing string
  46 * @s: the string to duplicate
  47 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  48 *
  49 * Return: newly allocated copy of @s or %NULL in case of error
  50 */
  51char *kstrdup(const char *s, gfp_t gfp)
  52{
  53        size_t len;
  54        char *buf;
  55
  56        if (!s)
  57                return NULL;
  58
  59        len = strlen(s) + 1;
  60        buf = kmalloc_track_caller(len, gfp);
  61        if (buf)
  62                memcpy(buf, s, len);
  63        return buf;
  64}
  65EXPORT_SYMBOL(kstrdup);
  66
  67/**
  68 * kstrdup_const - conditionally duplicate an existing const string
  69 * @s: the string to duplicate
  70 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  71 *
  72 * Note: Strings allocated by kstrdup_const should be freed by kfree_const.
  73 *
  74 * Return: source string if it is in .rodata section otherwise
  75 * fallback to kstrdup.
  76 */
  77const char *kstrdup_const(const char *s, gfp_t gfp)
  78{
  79        if (is_kernel_rodata((unsigned long)s))
  80                return s;
  81
  82        return kstrdup(s, gfp);
  83}
  84EXPORT_SYMBOL(kstrdup_const);
  85
  86/**
  87 * kstrndup - allocate space for and copy an existing string
  88 * @s: the string to duplicate
  89 * @max: read at most @max chars from @s
  90 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  91 *
  92 * Note: Use kmemdup_nul() instead if the size is known exactly.
  93 *
  94 * Return: newly allocated copy of @s or %NULL in case of error
  95 */
  96char *kstrndup(const char *s, size_t max, gfp_t gfp)
  97{
  98        size_t len;
  99        char *buf;
 100
 101        if (!s)
 102                return NULL;
 103
 104        len = strnlen(s, max);
 105        buf = kmalloc_track_caller(len+1, gfp);
 106        if (buf) {
 107                memcpy(buf, s, len);
 108                buf[len] = '\0';
 109        }
 110        return buf;
 111}
 112EXPORT_SYMBOL(kstrndup);
 113
 114/**
 115 * kmemdup - duplicate region of memory
 116 *
 117 * @src: memory region to duplicate
 118 * @len: memory region length
 119 * @gfp: GFP mask to use
 120 *
 121 * Return: newly allocated copy of @src or %NULL in case of error
 122 */
 123void *kmemdup(const void *src, size_t len, gfp_t gfp)
 124{
 125        void *p;
 126
 127        p = kmalloc_track_caller(len, gfp);
 128        if (p)
 129                memcpy(p, src, len);
 130        return p;
 131}
 132EXPORT_SYMBOL(kmemdup);
 133
 134/**
 135 * kmemdup_nul - Create a NUL-terminated string from unterminated data
 136 * @s: The data to stringify
 137 * @len: The size of the data
 138 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 139 *
 140 * Return: newly allocated copy of @s with NUL-termination or %NULL in
 141 * case of error
 142 */
 143char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
 144{
 145        char *buf;
 146
 147        if (!s)
 148                return NULL;
 149
 150        buf = kmalloc_track_caller(len + 1, gfp);
 151        if (buf) {
 152                memcpy(buf, s, len);
 153                buf[len] = '\0';
 154        }
 155        return buf;
 156}
 157EXPORT_SYMBOL(kmemdup_nul);
 158
 159/**
 160 * memdup_user - duplicate memory region from user space
 161 *
 162 * @src: source address in user space
 163 * @len: number of bytes to copy
 164 *
 165 * Return: an ERR_PTR() on failure.  Result is physically
 166 * contiguous, to be freed by kfree().
 167 */
 168void *memdup_user(const void __user *src, size_t len)
 169{
 170        void *p;
 171
 172        p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
 173        if (!p)
 174                return ERR_PTR(-ENOMEM);
 175
 176        if (copy_from_user(p, src, len)) {
 177                kfree(p);
 178                return ERR_PTR(-EFAULT);
 179        }
 180
 181        return p;
 182}
 183EXPORT_SYMBOL(memdup_user);
 184
 185/**
 186 * vmemdup_user - duplicate memory region from user space
 187 *
 188 * @src: source address in user space
 189 * @len: number of bytes to copy
 190 *
 191 * Return: an ERR_PTR() on failure.  Result may be not
 192 * physically contiguous.  Use kvfree() to free.
 193 */
 194void *vmemdup_user(const void __user *src, size_t len)
 195{
 196        void *p;
 197
 198        p = kvmalloc(len, GFP_USER);
 199        if (!p)
 200                return ERR_PTR(-ENOMEM);
 201
 202        if (copy_from_user(p, src, len)) {
 203                kvfree(p);
 204                return ERR_PTR(-EFAULT);
 205        }
 206
 207        return p;
 208}
 209EXPORT_SYMBOL(vmemdup_user);
 210
 211/**
 212 * strndup_user - duplicate an existing string from user space
 213 * @s: The string to duplicate
 214 * @n: Maximum number of bytes to copy, including the trailing NUL.
 215 *
 216 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
 217 */
 218char *strndup_user(const char __user *s, long n)
 219{
 220        char *p;
 221        long length;
 222
 223        length = strnlen_user(s, n);
 224
 225        if (!length)
 226                return ERR_PTR(-EFAULT);
 227
 228        if (length > n)
 229                return ERR_PTR(-EINVAL);
 230
 231        p = memdup_user(s, length);
 232
 233        if (IS_ERR(p))
 234                return p;
 235
 236        p[length - 1] = '\0';
 237
 238        return p;
 239}
 240EXPORT_SYMBOL(strndup_user);
 241
 242/**
 243 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
 244 *
 245 * @src: source address in user space
 246 * @len: number of bytes to copy
 247 *
 248 * Return: an ERR_PTR() on failure.
 249 */
 250void *memdup_user_nul(const void __user *src, size_t len)
 251{
 252        char *p;
 253
 254        /*
 255         * Always use GFP_KERNEL, since copy_from_user() can sleep and
 256         * cause pagefault, which makes it pointless to use GFP_NOFS
 257         * or GFP_ATOMIC.
 258         */
 259        p = kmalloc_track_caller(len + 1, GFP_KERNEL);
 260        if (!p)
 261                return ERR_PTR(-ENOMEM);
 262
 263        if (copy_from_user(p, src, len)) {
 264                kfree(p);
 265                return ERR_PTR(-EFAULT);
 266        }
 267        p[len] = '\0';
 268
 269        return p;
 270}
 271EXPORT_SYMBOL(memdup_user_nul);
 272
 273void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
 274                struct vm_area_struct *prev)
 275{
 276        struct vm_area_struct *next;
 277
 278        vma->vm_prev = prev;
 279        if (prev) {
 280                next = prev->vm_next;
 281                prev->vm_next = vma;
 282        } else {
 283                next = mm->mmap;
 284                mm->mmap = vma;
 285        }
 286        vma->vm_next = next;
 287        if (next)
 288                next->vm_prev = vma;
 289}
 290
 291void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
 292{
 293        struct vm_area_struct *prev, *next;
 294
 295        next = vma->vm_next;
 296        prev = vma->vm_prev;
 297        if (prev)
 298                prev->vm_next = next;
 299        else
 300                mm->mmap = next;
 301        if (next)
 302                next->vm_prev = prev;
 303}
 304
 305/* Check if the vma is being used as a stack by this task */
 306int vma_is_stack_for_current(struct vm_area_struct *vma)
 307{
 308        struct task_struct * __maybe_unused t = current;
 309
 310        return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
 311}
 312
 313#ifndef STACK_RND_MASK
 314#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))     /* 8MB of VA */
 315#endif
 316
 317unsigned long randomize_stack_top(unsigned long stack_top)
 318{
 319        unsigned long random_variable = 0;
 320
 321        if (current->flags & PF_RANDOMIZE) {
 322                random_variable = get_random_long();
 323                random_variable &= STACK_RND_MASK;
 324                random_variable <<= PAGE_SHIFT;
 325        }
 326#ifdef CONFIG_STACK_GROWSUP
 327        return PAGE_ALIGN(stack_top) + random_variable;
 328#else
 329        return PAGE_ALIGN(stack_top) - random_variable;
 330#endif
 331}
 332
 333#ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
 334unsigned long arch_randomize_brk(struct mm_struct *mm)
 335{
 336        /* Is the current task 32bit ? */
 337        if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
 338                return randomize_page(mm->brk, SZ_32M);
 339
 340        return randomize_page(mm->brk, SZ_1G);
 341}
 342
 343unsigned long arch_mmap_rnd(void)
 344{
 345        unsigned long rnd;
 346
 347#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
 348        if (is_compat_task())
 349                rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
 350        else
 351#endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
 352                rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
 353
 354        return rnd << PAGE_SHIFT;
 355}
 356
 357static int mmap_is_legacy(struct rlimit *rlim_stack)
 358{
 359        if (current->personality & ADDR_COMPAT_LAYOUT)
 360                return 1;
 361
 362        if (rlim_stack->rlim_cur == RLIM_INFINITY)
 363                return 1;
 364
 365        return sysctl_legacy_va_layout;
 366}
 367
 368/*
 369 * Leave enough space between the mmap area and the stack to honour ulimit in
 370 * the face of randomisation.
 371 */
 372#define MIN_GAP         (SZ_128M)
 373#define MAX_GAP         (STACK_TOP / 6 * 5)
 374
 375static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
 376{
 377        unsigned long gap = rlim_stack->rlim_cur;
 378        unsigned long pad = stack_guard_gap;
 379
 380        /* Account for stack randomization if necessary */
 381        if (current->flags & PF_RANDOMIZE)
 382                pad += (STACK_RND_MASK << PAGE_SHIFT);
 383
 384        /* Values close to RLIM_INFINITY can overflow. */
 385        if (gap + pad > gap)
 386                gap += pad;
 387
 388        if (gap < MIN_GAP)
 389                gap = MIN_GAP;
 390        else if (gap > MAX_GAP)
 391                gap = MAX_GAP;
 392
 393        return PAGE_ALIGN(STACK_TOP - gap - rnd);
 394}
 395
 396void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 397{
 398        unsigned long random_factor = 0UL;
 399
 400        if (current->flags & PF_RANDOMIZE)
 401                random_factor = arch_mmap_rnd();
 402
 403        if (mmap_is_legacy(rlim_stack)) {
 404                mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
 405                mm->get_unmapped_area = arch_get_unmapped_area;
 406        } else {
 407                mm->mmap_base = mmap_base(random_factor, rlim_stack);
 408                mm->get_unmapped_area = arch_get_unmapped_area_topdown;
 409        }
 410}
 411#elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
 412void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 413{
 414        mm->mmap_base = TASK_UNMAPPED_BASE;
 415        mm->get_unmapped_area = arch_get_unmapped_area;
 416}
 417#endif
 418
 419/**
 420 * __account_locked_vm - account locked pages to an mm's locked_vm
 421 * @mm:          mm to account against
 422 * @pages:       number of pages to account
 423 * @inc:         %true if @pages should be considered positive, %false if not
 424 * @task:        task used to check RLIMIT_MEMLOCK
 425 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
 426 *
 427 * Assumes @task and @mm are valid (i.e. at least one reference on each), and
 428 * that mmap_sem is held as writer.
 429 *
 430 * Return:
 431 * * 0       on success
 432 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
 433 */
 434int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
 435                        struct task_struct *task, bool bypass_rlim)
 436{
 437        unsigned long locked_vm, limit;
 438        int ret = 0;
 439
 440        lockdep_assert_held_write(&mm->mmap_sem);
 441
 442        locked_vm = mm->locked_vm;
 443        if (inc) {
 444                if (!bypass_rlim) {
 445                        limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 446                        if (locked_vm + pages > limit)
 447                                ret = -ENOMEM;
 448                }
 449                if (!ret)
 450                        mm->locked_vm = locked_vm + pages;
 451        } else {
 452                WARN_ON_ONCE(pages > locked_vm);
 453                mm->locked_vm = locked_vm - pages;
 454        }
 455
 456        pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
 457                 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
 458                 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
 459                 ret ? " - exceeded" : "");
 460
 461        return ret;
 462}
 463EXPORT_SYMBOL_GPL(__account_locked_vm);
 464
 465/**
 466 * account_locked_vm - account locked pages to an mm's locked_vm
 467 * @mm:          mm to account against, may be NULL
 468 * @pages:       number of pages to account
 469 * @inc:         %true if @pages should be considered positive, %false if not
 470 *
 471 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
 472 *
 473 * Return:
 474 * * 0       on success, or if mm is NULL
 475 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
 476 */
 477int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
 478{
 479        int ret;
 480
 481        if (pages == 0 || !mm)
 482                return 0;
 483
 484        down_write(&mm->mmap_sem);
 485        ret = __account_locked_vm(mm, pages, inc, current,
 486                                  capable(CAP_IPC_LOCK));
 487        up_write(&mm->mmap_sem);
 488
 489        return ret;
 490}
 491EXPORT_SYMBOL_GPL(account_locked_vm);
 492
 493unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
 494        unsigned long len, unsigned long prot,
 495        unsigned long flag, unsigned long pgoff)
 496{
 497        unsigned long ret;
 498        struct mm_struct *mm = current->mm;
 499        unsigned long populate;
 500        LIST_HEAD(uf);
 501
 502        ret = security_mmap_file(file, prot, flag);
 503        if (!ret) {
 504                if (down_write_killable(&mm->mmap_sem))
 505                        return -EINTR;
 506                ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
 507                                    &populate, &uf);
 508                up_write(&mm->mmap_sem);
 509                userfaultfd_unmap_complete(mm, &uf);
 510                if (populate)
 511                        mm_populate(ret, populate);
 512        }
 513        return ret;
 514}
 515
 516unsigned long vm_mmap(struct file *file, unsigned long addr,
 517        unsigned long len, unsigned long prot,
 518        unsigned long flag, unsigned long offset)
 519{
 520        if (unlikely(offset + PAGE_ALIGN(len) < offset))
 521                return -EINVAL;
 522        if (unlikely(offset_in_page(offset)))
 523                return -EINVAL;
 524
 525        return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
 526}
 527EXPORT_SYMBOL(vm_mmap);
 528
 529/**
 530 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
 531 * failure, fall back to non-contiguous (vmalloc) allocation.
 532 * @size: size of the request.
 533 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
 534 * @node: numa node to allocate from
 535 *
 536 * Uses kmalloc to get the memory but if the allocation fails then falls back
 537 * to the vmalloc allocator. Use kvfree for freeing the memory.
 538 *
 539 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
 540 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
 541 * preferable to the vmalloc fallback, due to visible performance drawbacks.
 542 *
 543 * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
 544 * fall back to vmalloc.
 545 *
 546 * Return: pointer to the allocated memory of %NULL in case of failure
 547 */
 548void *kvmalloc_node(size_t size, gfp_t flags, int node)
 549{
 550        gfp_t kmalloc_flags = flags;
 551        void *ret;
 552
 553        /*
 554         * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
 555         * so the given set of flags has to be compatible.
 556         */
 557        if ((flags & GFP_KERNEL) != GFP_KERNEL)
 558                return kmalloc_node(size, flags, node);
 559
 560        /*
 561         * We want to attempt a large physically contiguous block first because
 562         * it is less likely to fragment multiple larger blocks and therefore
 563         * contribute to a long term fragmentation less than vmalloc fallback.
 564         * However make sure that larger requests are not too disruptive - no
 565         * OOM killer and no allocation failure warnings as we have a fallback.
 566         */
 567        if (size > PAGE_SIZE) {
 568                kmalloc_flags |= __GFP_NOWARN;
 569
 570                if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
 571                        kmalloc_flags |= __GFP_NORETRY;
 572        }
 573
 574        ret = kmalloc_node(size, kmalloc_flags, node);
 575
 576        /*
 577         * It doesn't really make sense to fallback to vmalloc for sub page
 578         * requests
 579         */
 580        if (ret || size <= PAGE_SIZE)
 581                return ret;
 582
 583        return __vmalloc_node_flags_caller(size, node, flags,
 584                        __builtin_return_address(0));
 585}
 586EXPORT_SYMBOL(kvmalloc_node);
 587
 588/**
 589 * kvfree() - Free memory.
 590 * @addr: Pointer to allocated memory.
 591 *
 592 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
 593 * It is slightly more efficient to use kfree() or vfree() if you are certain
 594 * that you know which one to use.
 595 *
 596 * Context: Either preemptible task context or not-NMI interrupt.
 597 */
 598void kvfree(const void *addr)
 599{
 600        if (is_vmalloc_addr(addr))
 601                vfree(addr);
 602        else
 603                kfree(addr);
 604}
 605EXPORT_SYMBOL(kvfree);
 606
 607static inline void *__page_rmapping(struct page *page)
 608{
 609        unsigned long mapping;
 610
 611        mapping = (unsigned long)page->mapping;
 612        mapping &= ~PAGE_MAPPING_FLAGS;
 613
 614        return (void *)mapping;
 615}
 616
 617/* Neutral page->mapping pointer to address_space or anon_vma or other */
 618void *page_rmapping(struct page *page)
 619{
 620        page = compound_head(page);
 621        return __page_rmapping(page);
 622}
 623
 624/*
 625 * Return true if this page is mapped into pagetables.
 626 * For compound page it returns true if any subpage of compound page is mapped.
 627 */
 628bool page_mapped(struct page *page)
 629{
 630        int i;
 631
 632        if (likely(!PageCompound(page)))
 633                return atomic_read(&page->_mapcount) >= 0;
 634        page = compound_head(page);
 635        if (atomic_read(compound_mapcount_ptr(page)) >= 0)
 636                return true;
 637        if (PageHuge(page))
 638                return false;
 639        for (i = 0; i < compound_nr(page); i++) {
 640                if (atomic_read(&page[i]._mapcount) >= 0)
 641                        return true;
 642        }
 643        return false;
 644}
 645EXPORT_SYMBOL(page_mapped);
 646
 647struct anon_vma *page_anon_vma(struct page *page)
 648{
 649        unsigned long mapping;
 650
 651        page = compound_head(page);
 652        mapping = (unsigned long)page->mapping;
 653        if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
 654                return NULL;
 655        return __page_rmapping(page);
 656}
 657
 658struct address_space *page_mapping(struct page *page)
 659{
 660        struct address_space *mapping;
 661
 662        page = compound_head(page);
 663
 664        /* This happens if someone calls flush_dcache_page on slab page */
 665        if (unlikely(PageSlab(page)))
 666                return NULL;
 667
 668        if (unlikely(PageSwapCache(page))) {
 669                swp_entry_t entry;
 670
 671                entry.val = page_private(page);
 672                return swap_address_space(entry);
 673        }
 674
 675        mapping = page->mapping;
 676        if ((unsigned long)mapping & PAGE_MAPPING_ANON)
 677                return NULL;
 678
 679        return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
 680}
 681EXPORT_SYMBOL(page_mapping);
 682
 683/*
 684 * For file cache pages, return the address_space, otherwise return NULL
 685 */
 686struct address_space *page_mapping_file(struct page *page)
 687{
 688        if (unlikely(PageSwapCache(page)))
 689                return NULL;
 690        return page_mapping(page);
 691}
 692
 693/* Slow path of page_mapcount() for compound pages */
 694int __page_mapcount(struct page *page)
 695{
 696        int ret;
 697
 698        ret = atomic_read(&page->_mapcount) + 1;
 699        /*
 700         * For file THP page->_mapcount contains total number of mapping
 701         * of the page: no need to look into compound_mapcount.
 702         */
 703        if (!PageAnon(page) && !PageHuge(page))
 704                return ret;
 705        page = compound_head(page);
 706        ret += atomic_read(compound_mapcount_ptr(page)) + 1;
 707        if (PageDoubleMap(page))
 708                ret--;
 709        return ret;
 710}
 711EXPORT_SYMBOL_GPL(__page_mapcount);
 712
 713int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
 714int sysctl_overcommit_ratio __read_mostly = 50;
 715unsigned long sysctl_overcommit_kbytes __read_mostly;
 716int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
 717unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
 718unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
 719
 720int overcommit_ratio_handler(struct ctl_table *table, int write,
 721                             void __user *buffer, size_t *lenp,
 722                             loff_t *ppos)
 723{
 724        int ret;
 725
 726        ret = proc_dointvec(table, write, buffer, lenp, ppos);
 727        if (ret == 0 && write)
 728                sysctl_overcommit_kbytes = 0;
 729        return ret;
 730}
 731
 732int overcommit_kbytes_handler(struct ctl_table *table, int write,
 733                             void __user *buffer, size_t *lenp,
 734                             loff_t *ppos)
 735{
 736        int ret;
 737
 738        ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 739        if (ret == 0 && write)
 740                sysctl_overcommit_ratio = 0;
 741        return ret;
 742}
 743
 744/*
 745 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
 746 */
 747unsigned long vm_commit_limit(void)
 748{
 749        unsigned long allowed;
 750
 751        if (sysctl_overcommit_kbytes)
 752                allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
 753        else
 754                allowed = ((totalram_pages() - hugetlb_total_pages())
 755                           * sysctl_overcommit_ratio / 100);
 756        allowed += total_swap_pages;
 757
 758        return allowed;
 759}
 760
 761/*
 762 * Make sure vm_committed_as in one cacheline and not cacheline shared with
 763 * other variables. It can be updated by several CPUs frequently.
 764 */
 765struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
 766
 767/*
 768 * The global memory commitment made in the system can be a metric
 769 * that can be used to drive ballooning decisions when Linux is hosted
 770 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
 771 * balancing memory across competing virtual machines that are hosted.
 772 * Several metrics drive this policy engine including the guest reported
 773 * memory commitment.
 774 */
 775unsigned long vm_memory_committed(void)
 776{
 777        return percpu_counter_read_positive(&vm_committed_as);
 778}
 779EXPORT_SYMBOL_GPL(vm_memory_committed);
 780
 781/*
 782 * Check that a process has enough memory to allocate a new virtual
 783 * mapping. 0 means there is enough memory for the allocation to
 784 * succeed and -ENOMEM implies there is not.
 785 *
 786 * We currently support three overcommit policies, which are set via the
 787 * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting.rst
 788 *
 789 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
 790 * Additional code 2002 Jul 20 by Robert Love.
 791 *
 792 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
 793 *
 794 * Note this is a helper function intended to be used by LSMs which
 795 * wish to use this logic.
 796 */
 797int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
 798{
 799        long allowed;
 800
 801        VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
 802                        -(s64)vm_committed_as_batch * num_online_cpus(),
 803                        "memory commitment underflow");
 804
 805        vm_acct_memory(pages);
 806
 807        /*
 808         * Sometimes we want to use more memory than we have
 809         */
 810        if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
 811                return 0;
 812
 813        if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
 814                if (pages > totalram_pages() + total_swap_pages)
 815                        goto error;
 816                return 0;
 817        }
 818
 819        allowed = vm_commit_limit();
 820        /*
 821         * Reserve some for root
 822         */
 823        if (!cap_sys_admin)
 824                allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
 825
 826        /*
 827         * Don't let a single process grow so big a user can't recover
 828         */
 829        if (mm) {
 830                long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
 831
 832                allowed -= min_t(long, mm->total_vm / 32, reserve);
 833        }
 834
 835        if (percpu_counter_read_positive(&vm_committed_as) < allowed)
 836                return 0;
 837error:
 838        vm_unacct_memory(pages);
 839
 840        return -ENOMEM;
 841}
 842
 843/**
 844 * get_cmdline() - copy the cmdline value to a buffer.
 845 * @task:     the task whose cmdline value to copy.
 846 * @buffer:   the buffer to copy to.
 847 * @buflen:   the length of the buffer. Larger cmdline values are truncated
 848 *            to this length.
 849 *
 850 * Return: the size of the cmdline field copied. Note that the copy does
 851 * not guarantee an ending NULL byte.
 852 */
 853int get_cmdline(struct task_struct *task, char *buffer, int buflen)
 854{
 855        int res = 0;
 856        unsigned int len;
 857        struct mm_struct *mm = get_task_mm(task);
 858        unsigned long arg_start, arg_end, env_start, env_end;
 859        if (!mm)
 860                goto out;
 861        if (!mm->arg_end)
 862                goto out_mm;    /* Shh! No looking before we're done */
 863
 864        spin_lock(&mm->arg_lock);
 865        arg_start = mm->arg_start;
 866        arg_end = mm->arg_end;
 867        env_start = mm->env_start;
 868        env_end = mm->env_end;
 869        spin_unlock(&mm->arg_lock);
 870
 871        len = arg_end - arg_start;
 872
 873        if (len > buflen)
 874                len = buflen;
 875
 876        res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
 877
 878        /*
 879         * If the nul at the end of args has been overwritten, then
 880         * assume application is using setproctitle(3).
 881         */
 882        if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
 883                len = strnlen(buffer, res);
 884                if (len < res) {
 885                        res = len;
 886                } else {
 887                        len = env_end - env_start;
 888                        if (len > buflen - res)
 889                                len = buflen - res;
 890                        res += access_process_vm(task, env_start,
 891                                                 buffer+res, len,
 892                                                 FOLL_FORCE);
 893                        res = strnlen(buffer, res);
 894                }
 895        }
 896out_mm:
 897        mmput(mm);
 898out:
 899        return res;
 900}
 901
 902int memcmp_pages(struct page *page1, struct page *page2)
 903{
 904        char *addr1, *addr2;
 905        int ret;
 906
 907        addr1 = kmap_atomic(page1);
 908        addr2 = kmap_atomic(page2);
 909        ret = memcmp(addr1, addr2, PAGE_SIZE);
 910        kunmap_atomic(addr2);
 911        kunmap_atomic(addr1);
 912        return ret;
 913}
 914