linux/mm/util.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include <linux/mm.h>
   3#include <linux/slab.h>
   4#include <linux/string.h>
   5#include <linux/compiler.h>
   6#include <linux/export.h>
   7#include <linux/err.h>
   8#include <linux/sched.h>
   9#include <linux/sched/mm.h>
  10#include <linux/sched/signal.h>
  11#include <linux/sched/task_stack.h>
  12#include <linux/security.h>
  13#include <linux/swap.h>
  14#include <linux/swapops.h>
  15#include <linux/mman.h>
  16#include <linux/hugetlb.h>
  17#include <linux/vmalloc.h>
  18#include <linux/userfaultfd_k.h>
  19
  20#include <linux/uaccess.h>
  21
  22#include "internal.h"
  23
  24/**
  25 * kfree_const - conditionally free memory
  26 * @x: pointer to the memory
  27 *
  28 * Function calls kfree only if @x is not in .rodata section.
  29 */
  30void kfree_const(const void *x)
  31{
  32        if (!is_kernel_rodata((unsigned long)x))
  33                kfree(x);
  34}
  35EXPORT_SYMBOL(kfree_const);
  36
  37/**
  38 * kstrdup - allocate space for and copy an existing string
  39 * @s: the string to duplicate
  40 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  41 *
  42 * Return: newly allocated copy of @s or %NULL in case of error
  43 */
  44char *kstrdup(const char *s, gfp_t gfp)
  45{
  46        size_t len;
  47        char *buf;
  48
  49        if (!s)
  50                return NULL;
  51
  52        len = strlen(s) + 1;
  53        buf = kmalloc_track_caller(len, gfp);
  54        if (buf)
  55                memcpy(buf, s, len);
  56        return buf;
  57}
  58EXPORT_SYMBOL(kstrdup);
  59
  60/**
  61 * kstrdup_const - conditionally duplicate an existing const string
  62 * @s: the string to duplicate
  63 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  64 *
  65 * Note: Strings allocated by kstrdup_const should be freed by kfree_const.
  66 *
  67 * Return: source string if it is in .rodata section otherwise
  68 * fallback to kstrdup.
  69 */
  70const char *kstrdup_const(const char *s, gfp_t gfp)
  71{
  72        if (is_kernel_rodata((unsigned long)s))
  73                return s;
  74
  75        return kstrdup(s, gfp);
  76}
  77EXPORT_SYMBOL(kstrdup_const);
  78
  79/**
  80 * kstrndup - allocate space for and copy an existing string
  81 * @s: the string to duplicate
  82 * @max: read at most @max chars from @s
  83 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  84 *
  85 * Note: Use kmemdup_nul() instead if the size is known exactly.
  86 *
  87 * Return: newly allocated copy of @s or %NULL in case of error
  88 */
  89char *kstrndup(const char *s, size_t max, gfp_t gfp)
  90{
  91        size_t len;
  92        char *buf;
  93
  94        if (!s)
  95                return NULL;
  96
  97        len = strnlen(s, max);
  98        buf = kmalloc_track_caller(len+1, gfp);
  99        if (buf) {
 100                memcpy(buf, s, len);
 101                buf[len] = '\0';
 102        }
 103        return buf;
 104}
 105EXPORT_SYMBOL(kstrndup);
 106
 107/**
 108 * kmemdup - duplicate region of memory
 109 *
 110 * @src: memory region to duplicate
 111 * @len: memory region length
 112 * @gfp: GFP mask to use
 113 *
 114 * Return: newly allocated copy of @src or %NULL in case of error
 115 */
 116void *kmemdup(const void *src, size_t len, gfp_t gfp)
 117{
 118        void *p;
 119
 120        p = kmalloc_track_caller(len, gfp);
 121        if (p)
 122                memcpy(p, src, len);
 123        return p;
 124}
 125EXPORT_SYMBOL(kmemdup);
 126
 127/**
 128 * kmemdup_nul - Create a NUL-terminated string from unterminated data
 129 * @s: The data to stringify
 130 * @len: The size of the data
 131 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 132 *
 133 * Return: newly allocated copy of @s with NUL-termination or %NULL in
 134 * case of error
 135 */
 136char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
 137{
 138        char *buf;
 139
 140        if (!s)
 141                return NULL;
 142
 143        buf = kmalloc_track_caller(len + 1, gfp);
 144        if (buf) {
 145                memcpy(buf, s, len);
 146                buf[len] = '\0';
 147        }
 148        return buf;
 149}
 150EXPORT_SYMBOL(kmemdup_nul);
 151
 152/**
 153 * memdup_user - duplicate memory region from user space
 154 *
 155 * @src: source address in user space
 156 * @len: number of bytes to copy
 157 *
 158 * Return: an ERR_PTR() on failure.  Result is physically
 159 * contiguous, to be freed by kfree().
 160 */
 161void *memdup_user(const void __user *src, size_t len)
 162{
 163        void *p;
 164
 165        p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
 166        if (!p)
 167                return ERR_PTR(-ENOMEM);
 168
 169        if (copy_from_user(p, src, len)) {
 170                kfree(p);
 171                return ERR_PTR(-EFAULT);
 172        }
 173
 174        return p;
 175}
 176EXPORT_SYMBOL(memdup_user);
 177
 178/**
 179 * vmemdup_user - duplicate memory region from user space
 180 *
 181 * @src: source address in user space
 182 * @len: number of bytes to copy
 183 *
 184 * Return: an ERR_PTR() on failure.  Result may be not
 185 * physically contiguous.  Use kvfree() to free.
 186 */
 187void *vmemdup_user(const void __user *src, size_t len)
 188{
 189        void *p;
 190
 191        p = kvmalloc(len, GFP_USER);
 192        if (!p)
 193                return ERR_PTR(-ENOMEM);
 194
 195        if (copy_from_user(p, src, len)) {
 196                kvfree(p);
 197                return ERR_PTR(-EFAULT);
 198        }
 199
 200        return p;
 201}
 202EXPORT_SYMBOL(vmemdup_user);
 203
 204/**
 205 * strndup_user - duplicate an existing string from user space
 206 * @s: The string to duplicate
 207 * @n: Maximum number of bytes to copy, including the trailing NUL.
 208 *
 209 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
 210 */
 211char *strndup_user(const char __user *s, long n)
 212{
 213        char *p;
 214        long length;
 215
 216        length = strnlen_user(s, n);
 217
 218        if (!length)
 219                return ERR_PTR(-EFAULT);
 220
 221        if (length > n)
 222                return ERR_PTR(-EINVAL);
 223
 224        p = memdup_user(s, length);
 225
 226        if (IS_ERR(p))
 227                return p;
 228
 229        p[length - 1] = '\0';
 230
 231        return p;
 232}
 233EXPORT_SYMBOL(strndup_user);
 234
 235/**
 236 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
 237 *
 238 * @src: source address in user space
 239 * @len: number of bytes to copy
 240 *
 241 * Return: an ERR_PTR() on failure.
 242 */
 243void *memdup_user_nul(const void __user *src, size_t len)
 244{
 245        char *p;
 246
 247        /*
 248         * Always use GFP_KERNEL, since copy_from_user() can sleep and
 249         * cause pagefault, which makes it pointless to use GFP_NOFS
 250         * or GFP_ATOMIC.
 251         */
 252        p = kmalloc_track_caller(len + 1, GFP_KERNEL);
 253        if (!p)
 254                return ERR_PTR(-ENOMEM);
 255
 256        if (copy_from_user(p, src, len)) {
 257                kfree(p);
 258                return ERR_PTR(-EFAULT);
 259        }
 260        p[len] = '\0';
 261
 262        return p;
 263}
 264EXPORT_SYMBOL(memdup_user_nul);
 265
 266void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
 267                struct vm_area_struct *prev, struct rb_node *rb_parent)
 268{
 269        struct vm_area_struct *next;
 270
 271        vma->vm_prev = prev;
 272        if (prev) {
 273                next = prev->vm_next;
 274                prev->vm_next = vma;
 275        } else {
 276                mm->mmap = vma;
 277                if (rb_parent)
 278                        next = rb_entry(rb_parent,
 279                                        struct vm_area_struct, vm_rb);
 280                else
 281                        next = NULL;
 282        }
 283        vma->vm_next = next;
 284        if (next)
 285                next->vm_prev = vma;
 286}
 287
 288/* Check if the vma is being used as a stack by this task */
 289int vma_is_stack_for_current(struct vm_area_struct *vma)
 290{
 291        struct task_struct * __maybe_unused t = current;
 292
 293        return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
 294}
 295
 296#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
 297void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 298{
 299        mm->mmap_base = TASK_UNMAPPED_BASE;
 300        mm->get_unmapped_area = arch_get_unmapped_area;
 301}
 302#endif
 303
 304/**
 305 * __account_locked_vm - account locked pages to an mm's locked_vm
 306 * @mm:          mm to account against
 307 * @pages:       number of pages to account
 308 * @inc:         %true if @pages should be considered positive, %false if not
 309 * @task:        task used to check RLIMIT_MEMLOCK
 310 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
 311 *
 312 * Assumes @task and @mm are valid (i.e. at least one reference on each), and
 313 * that mmap_sem is held as writer.
 314 *
 315 * Return:
 316 * * 0       on success
 317 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
 318 */
 319int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
 320                        struct task_struct *task, bool bypass_rlim)
 321{
 322        unsigned long locked_vm, limit;
 323        int ret = 0;
 324
 325        lockdep_assert_held_write(&mm->mmap_sem);
 326
 327        locked_vm = mm->locked_vm;
 328        if (inc) {
 329                if (!bypass_rlim) {
 330                        limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 331                        if (locked_vm + pages > limit)
 332                                ret = -ENOMEM;
 333                }
 334                if (!ret)
 335                        mm->locked_vm = locked_vm + pages;
 336        } else {
 337                WARN_ON_ONCE(pages > locked_vm);
 338                mm->locked_vm = locked_vm - pages;
 339        }
 340
 341        pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
 342                 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
 343                 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
 344                 ret ? " - exceeded" : "");
 345
 346        return ret;
 347}
 348EXPORT_SYMBOL_GPL(__account_locked_vm);
 349
 350/**
 351 * account_locked_vm - account locked pages to an mm's locked_vm
 352 * @mm:          mm to account against, may be NULL
 353 * @pages:       number of pages to account
 354 * @inc:         %true if @pages should be considered positive, %false if not
 355 *
 356 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
 357 *
 358 * Return:
 359 * * 0       on success, or if mm is NULL
 360 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
 361 */
 362int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
 363{
 364        int ret;
 365
 366        if (pages == 0 || !mm)
 367                return 0;
 368
 369        down_write(&mm->mmap_sem);
 370        ret = __account_locked_vm(mm, pages, inc, current,
 371                                  capable(CAP_IPC_LOCK));
 372        up_write(&mm->mmap_sem);
 373
 374        return ret;
 375}
 376EXPORT_SYMBOL_GPL(account_locked_vm);
 377
 378unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
 379        unsigned long len, unsigned long prot,
 380        unsigned long flag, unsigned long pgoff)
 381{
 382        unsigned long ret;
 383        struct mm_struct *mm = current->mm;
 384        unsigned long populate;
 385        LIST_HEAD(uf);
 386
 387        ret = security_mmap_file(file, prot, flag);
 388        if (!ret) {
 389                if (down_write_killable(&mm->mmap_sem))
 390                        return -EINTR;
 391                ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
 392                                    &populate, &uf);
 393                up_write(&mm->mmap_sem);
 394                userfaultfd_unmap_complete(mm, &uf);
 395                if (populate)
 396                        mm_populate(ret, populate);
 397        }
 398        return ret;
 399}
 400
 401unsigned long vm_mmap(struct file *file, unsigned long addr,
 402        unsigned long len, unsigned long prot,
 403        unsigned long flag, unsigned long offset)
 404{
 405        if (unlikely(offset + PAGE_ALIGN(len) < offset))
 406                return -EINVAL;
 407        if (unlikely(offset_in_page(offset)))
 408                return -EINVAL;
 409
 410        return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
 411}
 412EXPORT_SYMBOL(vm_mmap);
 413
 414/**
 415 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
 416 * failure, fall back to non-contiguous (vmalloc) allocation.
 417 * @size: size of the request.
 418 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
 419 * @node: numa node to allocate from
 420 *
 421 * Uses kmalloc to get the memory but if the allocation fails then falls back
 422 * to the vmalloc allocator. Use kvfree for freeing the memory.
 423 *
 424 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
 425 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
 426 * preferable to the vmalloc fallback, due to visible performance drawbacks.
 427 *
 428 * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
 429 * fall back to vmalloc.
 430 *
 431 * Return: pointer to the allocated memory of %NULL in case of failure
 432 */
 433void *kvmalloc_node(size_t size, gfp_t flags, int node)
 434{
 435        gfp_t kmalloc_flags = flags;
 436        void *ret;
 437
 438        /*
 439         * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
 440         * so the given set of flags has to be compatible.
 441         */
 442        if ((flags & GFP_KERNEL) != GFP_KERNEL)
 443                return kmalloc_node(size, flags, node);
 444
 445        /*
 446         * We want to attempt a large physically contiguous block first because
 447         * it is less likely to fragment multiple larger blocks and therefore
 448         * contribute to a long term fragmentation less than vmalloc fallback.
 449         * However make sure that larger requests are not too disruptive - no
 450         * OOM killer and no allocation failure warnings as we have a fallback.
 451         */
 452        if (size > PAGE_SIZE) {
 453                kmalloc_flags |= __GFP_NOWARN;
 454
 455                if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
 456                        kmalloc_flags |= __GFP_NORETRY;
 457        }
 458
 459        ret = kmalloc_node(size, kmalloc_flags, node);
 460
 461        /*
 462         * It doesn't really make sense to fallback to vmalloc for sub page
 463         * requests
 464         */
 465        if (ret || size <= PAGE_SIZE)
 466                return ret;
 467
 468        return __vmalloc_node_flags_caller(size, node, flags,
 469                        __builtin_return_address(0));
 470}
 471EXPORT_SYMBOL(kvmalloc_node);
 472
 473/**
 474 * kvfree() - Free memory.
 475 * @addr: Pointer to allocated memory.
 476 *
 477 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
 478 * It is slightly more efficient to use kfree() or vfree() if you are certain
 479 * that you know which one to use.
 480 *
 481 * Context: Either preemptible task context or not-NMI interrupt.
 482 */
 483void kvfree(const void *addr)
 484{
 485        if (is_vmalloc_addr(addr))
 486                vfree(addr);
 487        else
 488                kfree(addr);
 489}
 490EXPORT_SYMBOL(kvfree);
 491
 492static inline void *__page_rmapping(struct page *page)
 493{
 494        unsigned long mapping;
 495
 496        mapping = (unsigned long)page->mapping;
 497        mapping &= ~PAGE_MAPPING_FLAGS;
 498
 499        return (void *)mapping;
 500}
 501
 502/* Neutral page->mapping pointer to address_space or anon_vma or other */
 503void *page_rmapping(struct page *page)
 504{
 505        page = compound_head(page);
 506        return __page_rmapping(page);
 507}
 508
 509/*
 510 * Return true if this page is mapped into pagetables.
 511 * For compound page it returns true if any subpage of compound page is mapped.
 512 */
 513bool page_mapped(struct page *page)
 514{
 515        int i;
 516
 517        if (likely(!PageCompound(page)))
 518                return atomic_read(&page->_mapcount) >= 0;
 519        page = compound_head(page);
 520        if (atomic_read(compound_mapcount_ptr(page)) >= 0)
 521                return true;
 522        if (PageHuge(page))
 523                return false;
 524        for (i = 0; i < (1 << compound_order(page)); i++) {
 525                if (atomic_read(&page[i]._mapcount) >= 0)
 526                        return true;
 527        }
 528        return false;
 529}
 530EXPORT_SYMBOL(page_mapped);
 531
 532struct anon_vma *page_anon_vma(struct page *page)
 533{
 534        unsigned long mapping;
 535
 536        page = compound_head(page);
 537        mapping = (unsigned long)page->mapping;
 538        if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
 539                return NULL;
 540        return __page_rmapping(page);
 541}
 542
 543struct address_space *page_mapping(struct page *page)
 544{
 545        struct address_space *mapping;
 546
 547        page = compound_head(page);
 548
 549        /* This happens if someone calls flush_dcache_page on slab page */
 550        if (unlikely(PageSlab(page)))
 551                return NULL;
 552
 553        if (unlikely(PageSwapCache(page))) {
 554                swp_entry_t entry;
 555
 556                entry.val = page_private(page);
 557                return swap_address_space(entry);
 558        }
 559
 560        mapping = page->mapping;
 561        if ((unsigned long)mapping & PAGE_MAPPING_ANON)
 562                return NULL;
 563
 564        return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
 565}
 566EXPORT_SYMBOL(page_mapping);
 567
 568/*
 569 * For file cache pages, return the address_space, otherwise return NULL
 570 */
 571struct address_space *page_mapping_file(struct page *page)
 572{
 573        if (unlikely(PageSwapCache(page)))
 574                return NULL;
 575        return page_mapping(page);
 576}
 577
 578/* Slow path of page_mapcount() for compound pages */
 579int __page_mapcount(struct page *page)
 580{
 581        int ret;
 582
 583        ret = atomic_read(&page->_mapcount) + 1;
 584        /*
 585         * For file THP page->_mapcount contains total number of mapping
 586         * of the page: no need to look into compound_mapcount.
 587         */
 588        if (!PageAnon(page) && !PageHuge(page))
 589                return ret;
 590        page = compound_head(page);
 591        ret += atomic_read(compound_mapcount_ptr(page)) + 1;
 592        if (PageDoubleMap(page))
 593                ret--;
 594        return ret;
 595}
 596EXPORT_SYMBOL_GPL(__page_mapcount);
 597
 598int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
 599int sysctl_overcommit_ratio __read_mostly = 50;
 600unsigned long sysctl_overcommit_kbytes __read_mostly;
 601int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
 602unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
 603unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
 604
 605int overcommit_ratio_handler(struct ctl_table *table, int write,
 606                             void __user *buffer, size_t *lenp,
 607                             loff_t *ppos)
 608{
 609        int ret;
 610
 611        ret = proc_dointvec(table, write, buffer, lenp, ppos);
 612        if (ret == 0 && write)
 613                sysctl_overcommit_kbytes = 0;
 614        return ret;
 615}
 616
 617int overcommit_kbytes_handler(struct ctl_table *table, int write,
 618                             void __user *buffer, size_t *lenp,
 619                             loff_t *ppos)
 620{
 621        int ret;
 622
 623        ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 624        if (ret == 0 && write)
 625                sysctl_overcommit_ratio = 0;
 626        return ret;
 627}
 628
 629/*
 630 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
 631 */
 632unsigned long vm_commit_limit(void)
 633{
 634        unsigned long allowed;
 635
 636        if (sysctl_overcommit_kbytes)
 637                allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
 638        else
 639                allowed = ((totalram_pages() - hugetlb_total_pages())
 640                           * sysctl_overcommit_ratio / 100);
 641        allowed += total_swap_pages;
 642
 643        return allowed;
 644}
 645
 646/*
 647 * Make sure vm_committed_as in one cacheline and not cacheline shared with
 648 * other variables. It can be updated by several CPUs frequently.
 649 */
 650struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
 651
 652/*
 653 * The global memory commitment made in the system can be a metric
 654 * that can be used to drive ballooning decisions when Linux is hosted
 655 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
 656 * balancing memory across competing virtual machines that are hosted.
 657 * Several metrics drive this policy engine including the guest reported
 658 * memory commitment.
 659 */
 660unsigned long vm_memory_committed(void)
 661{
 662        return percpu_counter_read_positive(&vm_committed_as);
 663}
 664EXPORT_SYMBOL_GPL(vm_memory_committed);
 665
 666/*
 667 * Check that a process has enough memory to allocate a new virtual
 668 * mapping. 0 means there is enough memory for the allocation to
 669 * succeed and -ENOMEM implies there is not.
 670 *
 671 * We currently support three overcommit policies, which are set via the
 672 * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting.rst
 673 *
 674 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
 675 * Additional code 2002 Jul 20 by Robert Love.
 676 *
 677 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
 678 *
 679 * Note this is a helper function intended to be used by LSMs which
 680 * wish to use this logic.
 681 */
 682int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
 683{
 684        long allowed;
 685
 686        VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
 687                        -(s64)vm_committed_as_batch * num_online_cpus(),
 688                        "memory commitment underflow");
 689
 690        vm_acct_memory(pages);
 691
 692        /*
 693         * Sometimes we want to use more memory than we have
 694         */
 695        if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
 696                return 0;
 697
 698        if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
 699                if (pages > totalram_pages() + total_swap_pages)
 700                        goto error;
 701                return 0;
 702        }
 703
 704        allowed = vm_commit_limit();
 705        /*
 706         * Reserve some for root
 707         */
 708        if (!cap_sys_admin)
 709                allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
 710
 711        /*
 712         * Don't let a single process grow so big a user can't recover
 713         */
 714        if (mm) {
 715                long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
 716
 717                allowed -= min_t(long, mm->total_vm / 32, reserve);
 718        }
 719
 720        if (percpu_counter_read_positive(&vm_committed_as) < allowed)
 721                return 0;
 722error:
 723        vm_unacct_memory(pages);
 724
 725        return -ENOMEM;
 726}
 727
 728/**
 729 * get_cmdline() - copy the cmdline value to a buffer.
 730 * @task:     the task whose cmdline value to copy.
 731 * @buffer:   the buffer to copy to.
 732 * @buflen:   the length of the buffer. Larger cmdline values are truncated
 733 *            to this length.
 734 *
 735 * Return: the size of the cmdline field copied. Note that the copy does
 736 * not guarantee an ending NULL byte.
 737 */
 738int get_cmdline(struct task_struct *task, char *buffer, int buflen)
 739{
 740        int res = 0;
 741        unsigned int len;
 742        struct mm_struct *mm = get_task_mm(task);
 743        unsigned long arg_start, arg_end, env_start, env_end;
 744        if (!mm)
 745                goto out;
 746        if (!mm->arg_end)
 747                goto out_mm;    /* Shh! No looking before we're done */
 748
 749        spin_lock(&mm->arg_lock);
 750        arg_start = mm->arg_start;
 751        arg_end = mm->arg_end;
 752        env_start = mm->env_start;
 753        env_end = mm->env_end;
 754        spin_unlock(&mm->arg_lock);
 755
 756        len = arg_end - arg_start;
 757
 758        if (len > buflen)
 759                len = buflen;
 760
 761        res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
 762
 763        /*
 764         * If the nul at the end of args has been overwritten, then
 765         * assume application is using setproctitle(3).
 766         */
 767        if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
 768                len = strnlen(buffer, res);
 769                if (len < res) {
 770                        res = len;
 771                } else {
 772                        len = env_end - env_start;
 773                        if (len > buflen - res)
 774                                len = buflen - res;
 775                        res += access_process_vm(task, env_start,
 776                                                 buffer+res, len,
 777                                                 FOLL_FORCE);
 778                        res = strnlen(buffer, res);
 779                }
 780        }
 781out_mm:
 782        mmput(mm);
 783out:
 784        return res;
 785}
 786