linux/mm/util.c
<<
>>
Prefs
   1#include <linux/mm.h>
   2#include <linux/slab.h>
   3#include <linux/string.h>
   4#include <linux/compiler.h>
   5#include <linux/export.h>
   6#include <linux/err.h>
   7#include <linux/sched.h>
   8#include <linux/sched/mm.h>
   9#include <linux/sched/task_stack.h>
  10#include <linux/security.h>
  11#include <linux/swap.h>
  12#include <linux/swapops.h>
  13#include <linux/mman.h>
  14#include <linux/hugetlb.h>
  15#include <linux/vmalloc.h>
  16#include <linux/userfaultfd_k.h>
  17
  18#include <asm/sections.h>
  19#include <linux/uaccess.h>
  20
  21#include "internal.h"
  22
  23static inline int is_kernel_rodata(unsigned long addr)
  24{
  25        return addr >= (unsigned long)__start_rodata &&
  26                addr < (unsigned long)__end_rodata;
  27}
  28
  29/**
  30 * kfree_const - conditionally free memory
  31 * @x: pointer to the memory
  32 *
  33 * Function calls kfree only if @x is not in .rodata section.
  34 */
  35void kfree_const(const void *x)
  36{
  37        if (!is_kernel_rodata((unsigned long)x))
  38                kfree(x);
  39}
  40EXPORT_SYMBOL(kfree_const);
  41
  42/**
  43 * kstrdup - allocate space for and copy an existing string
  44 * @s: the string to duplicate
  45 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  46 */
  47char *kstrdup(const char *s, gfp_t gfp)
  48{
  49        size_t len;
  50        char *buf;
  51
  52        if (!s)
  53                return NULL;
  54
  55        len = strlen(s) + 1;
  56        buf = kmalloc_track_caller(len, gfp);
  57        if (buf)
  58                memcpy(buf, s, len);
  59        return buf;
  60}
  61EXPORT_SYMBOL(kstrdup);
  62
  63/**
  64 * kstrdup_const - conditionally duplicate an existing const string
  65 * @s: the string to duplicate
  66 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  67 *
  68 * Function returns source string if it is in .rodata section otherwise it
  69 * fallbacks to kstrdup.
  70 * Strings allocated by kstrdup_const should be freed by kfree_const.
  71 */
  72const char *kstrdup_const(const char *s, gfp_t gfp)
  73{
  74        if (is_kernel_rodata((unsigned long)s))
  75                return s;
  76
  77        return kstrdup(s, gfp);
  78}
  79EXPORT_SYMBOL(kstrdup_const);
  80
  81/**
  82 * kstrndup - allocate space for and copy an existing string
  83 * @s: the string to duplicate
  84 * @max: read at most @max chars from @s
  85 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  86 *
  87 * Note: Use kmemdup_nul() instead if the size is known exactly.
  88 */
  89char *kstrndup(const char *s, size_t max, gfp_t gfp)
  90{
  91        size_t len;
  92        char *buf;
  93
  94        if (!s)
  95                return NULL;
  96
  97        len = strnlen(s, max);
  98        buf = kmalloc_track_caller(len+1, gfp);
  99        if (buf) {
 100                memcpy(buf, s, len);
 101                buf[len] = '\0';
 102        }
 103        return buf;
 104}
 105EXPORT_SYMBOL(kstrndup);
 106
 107/**
 108 * kmemdup - duplicate region of memory
 109 *
 110 * @src: memory region to duplicate
 111 * @len: memory region length
 112 * @gfp: GFP mask to use
 113 */
 114void *kmemdup(const void *src, size_t len, gfp_t gfp)
 115{
 116        void *p;
 117
 118        p = kmalloc_track_caller(len, gfp);
 119        if (p)
 120                memcpy(p, src, len);
 121        return p;
 122}
 123EXPORT_SYMBOL(kmemdup);
 124
 125/**
 126 * kmemdup_nul - Create a NUL-terminated string from unterminated data
 127 * @s: The data to stringify
 128 * @len: The size of the data
 129 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 130 */
 131char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
 132{
 133        char *buf;
 134
 135        if (!s)
 136                return NULL;
 137
 138        buf = kmalloc_track_caller(len + 1, gfp);
 139        if (buf) {
 140                memcpy(buf, s, len);
 141                buf[len] = '\0';
 142        }
 143        return buf;
 144}
 145EXPORT_SYMBOL(kmemdup_nul);
 146
 147/**
 148 * memdup_user - duplicate memory region from user space
 149 *
 150 * @src: source address in user space
 151 * @len: number of bytes to copy
 152 *
 153 * Returns an ERR_PTR() on failure.  Result is physically
 154 * contiguous, to be freed by kfree().
 155 */
 156void *memdup_user(const void __user *src, size_t len)
 157{
 158        void *p;
 159
 160        p = kmalloc_track_caller(len, GFP_USER);
 161        if (!p)
 162                return ERR_PTR(-ENOMEM);
 163
 164        if (copy_from_user(p, src, len)) {
 165                kfree(p);
 166                return ERR_PTR(-EFAULT);
 167        }
 168
 169        return p;
 170}
 171EXPORT_SYMBOL(memdup_user);
 172
 173/**
 174 * vmemdup_user - duplicate memory region from user space
 175 *
 176 * @src: source address in user space
 177 * @len: number of bytes to copy
 178 *
 179 * Returns an ERR_PTR() on failure.  Result may be not
 180 * physically contiguous.  Use kvfree() to free.
 181 */
 182void *vmemdup_user(const void __user *src, size_t len)
 183{
 184        void *p;
 185
 186        p = kvmalloc(len, GFP_USER);
 187        if (!p)
 188                return ERR_PTR(-ENOMEM);
 189
 190        if (copy_from_user(p, src, len)) {
 191                kvfree(p);
 192                return ERR_PTR(-EFAULT);
 193        }
 194
 195        return p;
 196}
 197EXPORT_SYMBOL(vmemdup_user);
 198
 199/**
 200 * strndup_user - duplicate an existing string from user space
 201 * @s: The string to duplicate
 202 * @n: Maximum number of bytes to copy, including the trailing NUL.
 203 */
 204char *strndup_user(const char __user *s, long n)
 205{
 206        char *p;
 207        long length;
 208
 209        length = strnlen_user(s, n);
 210
 211        if (!length)
 212                return ERR_PTR(-EFAULT);
 213
 214        if (length > n)
 215                return ERR_PTR(-EINVAL);
 216
 217        p = memdup_user(s, length);
 218
 219        if (IS_ERR(p))
 220                return p;
 221
 222        p[length - 1] = '\0';
 223
 224        return p;
 225}
 226EXPORT_SYMBOL(strndup_user);
 227
 228/**
 229 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
 230 *
 231 * @src: source address in user space
 232 * @len: number of bytes to copy
 233 *
 234 * Returns an ERR_PTR() on failure.
 235 */
 236void *memdup_user_nul(const void __user *src, size_t len)
 237{
 238        char *p;
 239
 240        /*
 241         * Always use GFP_KERNEL, since copy_from_user() can sleep and
 242         * cause pagefault, which makes it pointless to use GFP_NOFS
 243         * or GFP_ATOMIC.
 244         */
 245        p = kmalloc_track_caller(len + 1, GFP_KERNEL);
 246        if (!p)
 247                return ERR_PTR(-ENOMEM);
 248
 249        if (copy_from_user(p, src, len)) {
 250                kfree(p);
 251                return ERR_PTR(-EFAULT);
 252        }
 253        p[len] = '\0';
 254
 255        return p;
 256}
 257EXPORT_SYMBOL(memdup_user_nul);
 258
 259void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
 260                struct vm_area_struct *prev, struct rb_node *rb_parent)
 261{
 262        struct vm_area_struct *next;
 263
 264        vma->vm_prev = prev;
 265        if (prev) {
 266                next = prev->vm_next;
 267                prev->vm_next = vma;
 268        } else {
 269                mm->mmap = vma;
 270                if (rb_parent)
 271                        next = rb_entry(rb_parent,
 272                                        struct vm_area_struct, vm_rb);
 273                else
 274                        next = NULL;
 275        }
 276        vma->vm_next = next;
 277        if (next)
 278                next->vm_prev = vma;
 279}
 280
 281/* Check if the vma is being used as a stack by this task */
 282int vma_is_stack_for_current(struct vm_area_struct *vma)
 283{
 284        struct task_struct * __maybe_unused t = current;
 285
 286        return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
 287}
 288
 289#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
 290void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 291{
 292        mm->mmap_base = TASK_UNMAPPED_BASE;
 293        mm->get_unmapped_area = arch_get_unmapped_area;
 294}
 295#endif
 296
 297/*
 298 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
 299 * back to the regular GUP.
 300 * Note a difference with get_user_pages_fast: this always returns the
 301 * number of pages pinned, 0 if no pages were pinned.
 302 * If the architecture does not support this function, simply return with no
 303 * pages pinned.
 304 */
 305int __weak __get_user_pages_fast(unsigned long start,
 306                                 int nr_pages, int write, struct page **pages)
 307{
 308        return 0;
 309}
 310EXPORT_SYMBOL_GPL(__get_user_pages_fast);
 311
 312/**
 313 * get_user_pages_fast() - pin user pages in memory
 314 * @start:      starting user address
 315 * @nr_pages:   number of pages from start to pin
 316 * @write:      whether pages will be written to
 317 * @pages:      array that receives pointers to the pages pinned.
 318 *              Should be at least nr_pages long.
 319 *
 320 * Returns number of pages pinned. This may be fewer than the number
 321 * requested. If nr_pages is 0 or negative, returns 0. If no pages
 322 * were pinned, returns -errno.
 323 *
 324 * get_user_pages_fast provides equivalent functionality to get_user_pages,
 325 * operating on current and current->mm, with force=0 and vma=NULL. However
 326 * unlike get_user_pages, it must be called without mmap_sem held.
 327 *
 328 * get_user_pages_fast may take mmap_sem and page table locks, so no
 329 * assumptions can be made about lack of locking. get_user_pages_fast is to be
 330 * implemented in a way that is advantageous (vs get_user_pages()) when the
 331 * user memory area is already faulted in and present in ptes. However if the
 332 * pages have to be faulted in, it may turn out to be slightly slower so
 333 * callers need to carefully consider what to use. On many architectures,
 334 * get_user_pages_fast simply falls back to get_user_pages.
 335 */
 336int __weak get_user_pages_fast(unsigned long start,
 337                                int nr_pages, int write, struct page **pages)
 338{
 339        return get_user_pages_unlocked(start, nr_pages, pages,
 340                                       write ? FOLL_WRITE : 0);
 341}
 342EXPORT_SYMBOL_GPL(get_user_pages_fast);
 343
 344unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
 345        unsigned long len, unsigned long prot,
 346        unsigned long flag, unsigned long pgoff)
 347{
 348        unsigned long ret;
 349        struct mm_struct *mm = current->mm;
 350        unsigned long populate;
 351        LIST_HEAD(uf);
 352
 353        ret = security_mmap_file(file, prot, flag);
 354        if (!ret) {
 355                if (down_write_killable(&mm->mmap_sem))
 356                        return -EINTR;
 357                ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
 358                                    &populate, &uf);
 359                up_write(&mm->mmap_sem);
 360                userfaultfd_unmap_complete(mm, &uf);
 361                if (populate)
 362                        mm_populate(ret, populate);
 363        }
 364        return ret;
 365}
 366
 367unsigned long vm_mmap(struct file *file, unsigned long addr,
 368        unsigned long len, unsigned long prot,
 369        unsigned long flag, unsigned long offset)
 370{
 371        if (unlikely(offset + PAGE_ALIGN(len) < offset))
 372                return -EINVAL;
 373        if (unlikely(offset_in_page(offset)))
 374                return -EINVAL;
 375
 376        return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
 377}
 378EXPORT_SYMBOL(vm_mmap);
 379
 380/**
 381 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
 382 * failure, fall back to non-contiguous (vmalloc) allocation.
 383 * @size: size of the request.
 384 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
 385 * @node: numa node to allocate from
 386 *
 387 * Uses kmalloc to get the memory but if the allocation fails then falls back
 388 * to the vmalloc allocator. Use kvfree for freeing the memory.
 389 *
 390 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
 391 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
 392 * preferable to the vmalloc fallback, due to visible performance drawbacks.
 393 *
 394 * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
 395 * fall back to vmalloc.
 396 */
 397void *kvmalloc_node(size_t size, gfp_t flags, int node)
 398{
 399        gfp_t kmalloc_flags = flags;
 400        void *ret;
 401
 402        /*
 403         * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
 404         * so the given set of flags has to be compatible.
 405         */
 406        if ((flags & GFP_KERNEL) != GFP_KERNEL)
 407                return kmalloc_node(size, flags, node);
 408
 409        /*
 410         * We want to attempt a large physically contiguous block first because
 411         * it is less likely to fragment multiple larger blocks and therefore
 412         * contribute to a long term fragmentation less than vmalloc fallback.
 413         * However make sure that larger requests are not too disruptive - no
 414         * OOM killer and no allocation failure warnings as we have a fallback.
 415         */
 416        if (size > PAGE_SIZE) {
 417                kmalloc_flags |= __GFP_NOWARN;
 418
 419                if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
 420                        kmalloc_flags |= __GFP_NORETRY;
 421        }
 422
 423        ret = kmalloc_node(size, kmalloc_flags, node);
 424
 425        /*
 426         * It doesn't really make sense to fallback to vmalloc for sub page
 427         * requests
 428         */
 429        if (ret || size <= PAGE_SIZE)
 430                return ret;
 431
 432        return __vmalloc_node_flags_caller(size, node, flags,
 433                        __builtin_return_address(0));
 434}
 435EXPORT_SYMBOL(kvmalloc_node);
 436
 437/**
 438 * kvfree() - Free memory.
 439 * @addr: Pointer to allocated memory.
 440 *
 441 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
 442 * It is slightly more efficient to use kfree() or vfree() if you are certain
 443 * that you know which one to use.
 444 *
 445 * Context: Any context except NMI.
 446 */
 447void kvfree(const void *addr)
 448{
 449        if (is_vmalloc_addr(addr))
 450                vfree(addr);
 451        else
 452                kfree(addr);
 453}
 454EXPORT_SYMBOL(kvfree);
 455
 456static inline void *__page_rmapping(struct page *page)
 457{
 458        unsigned long mapping;
 459
 460        mapping = (unsigned long)page->mapping;
 461        mapping &= ~PAGE_MAPPING_FLAGS;
 462
 463        return (void *)mapping;
 464}
 465
 466/* Neutral page->mapping pointer to address_space or anon_vma or other */
 467void *page_rmapping(struct page *page)
 468{
 469        page = compound_head(page);
 470        return __page_rmapping(page);
 471}
 472
 473/*
 474 * Return true if this page is mapped into pagetables.
 475 * For compound page it returns true if any subpage of compound page is mapped.
 476 */
 477bool page_mapped(struct page *page)
 478{
 479        int i;
 480
 481        if (likely(!PageCompound(page)))
 482                return atomic_read(&page->_mapcount) >= 0;
 483        page = compound_head(page);
 484        if (atomic_read(compound_mapcount_ptr(page)) >= 0)
 485                return true;
 486        if (PageHuge(page))
 487                return false;
 488        for (i = 0; i < hpage_nr_pages(page); i++) {
 489                if (atomic_read(&page[i]._mapcount) >= 0)
 490                        return true;
 491        }
 492        return false;
 493}
 494EXPORT_SYMBOL(page_mapped);
 495
 496struct anon_vma *page_anon_vma(struct page *page)
 497{
 498        unsigned long mapping;
 499
 500        page = compound_head(page);
 501        mapping = (unsigned long)page->mapping;
 502        if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
 503                return NULL;
 504        return __page_rmapping(page);
 505}
 506
 507struct address_space *page_mapping(struct page *page)
 508{
 509        struct address_space *mapping;
 510
 511        page = compound_head(page);
 512
 513        /* This happens if someone calls flush_dcache_page on slab page */
 514        if (unlikely(PageSlab(page)))
 515                return NULL;
 516
 517        if (unlikely(PageSwapCache(page))) {
 518                swp_entry_t entry;
 519
 520                entry.val = page_private(page);
 521                return swap_address_space(entry);
 522        }
 523
 524        mapping = page->mapping;
 525        if ((unsigned long)mapping & PAGE_MAPPING_ANON)
 526                return NULL;
 527
 528        return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
 529}
 530EXPORT_SYMBOL(page_mapping);
 531
 532/*
 533 * For file cache pages, return the address_space, otherwise return NULL
 534 */
 535struct address_space *page_mapping_file(struct page *page)
 536{
 537        if (unlikely(PageSwapCache(page)))
 538                return NULL;
 539        return page_mapping(page);
 540}
 541
 542/* Slow path of page_mapcount() for compound pages */
 543int __page_mapcount(struct page *page)
 544{
 545        int ret;
 546
 547        ret = atomic_read(&page->_mapcount) + 1;
 548        /*
 549         * For file THP page->_mapcount contains total number of mapping
 550         * of the page: no need to look into compound_mapcount.
 551         */
 552        if (!PageAnon(page) && !PageHuge(page))
 553                return ret;
 554        page = compound_head(page);
 555        ret += atomic_read(compound_mapcount_ptr(page)) + 1;
 556        if (PageDoubleMap(page))
 557                ret--;
 558        return ret;
 559}
 560EXPORT_SYMBOL_GPL(__page_mapcount);
 561
 562int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
 563int sysctl_overcommit_ratio __read_mostly = 50;
 564unsigned long sysctl_overcommit_kbytes __read_mostly;
 565int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
 566unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
 567unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
 568
 569int overcommit_ratio_handler(struct ctl_table *table, int write,
 570                             void __user *buffer, size_t *lenp,
 571                             loff_t *ppos)
 572{
 573        int ret;
 574
 575        ret = proc_dointvec(table, write, buffer, lenp, ppos);
 576        if (ret == 0 && write)
 577                sysctl_overcommit_kbytes = 0;
 578        return ret;
 579}
 580
 581int overcommit_kbytes_handler(struct ctl_table *table, int write,
 582                             void __user *buffer, size_t *lenp,
 583                             loff_t *ppos)
 584{
 585        int ret;
 586
 587        ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 588        if (ret == 0 && write)
 589                sysctl_overcommit_ratio = 0;
 590        return ret;
 591}
 592
 593/*
 594 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
 595 */
 596unsigned long vm_commit_limit(void)
 597{
 598        unsigned long allowed;
 599
 600        if (sysctl_overcommit_kbytes)
 601                allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
 602        else
 603                allowed = ((totalram_pages - hugetlb_total_pages())
 604                           * sysctl_overcommit_ratio / 100);
 605        allowed += total_swap_pages;
 606
 607        return allowed;
 608}
 609
 610/*
 611 * Make sure vm_committed_as in one cacheline and not cacheline shared with
 612 * other variables. It can be updated by several CPUs frequently.
 613 */
 614struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
 615
 616/*
 617 * The global memory commitment made in the system can be a metric
 618 * that can be used to drive ballooning decisions when Linux is hosted
 619 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
 620 * balancing memory across competing virtual machines that are hosted.
 621 * Several metrics drive this policy engine including the guest reported
 622 * memory commitment.
 623 */
 624unsigned long vm_memory_committed(void)
 625{
 626        return percpu_counter_read_positive(&vm_committed_as);
 627}
 628EXPORT_SYMBOL_GPL(vm_memory_committed);
 629
 630/*
 631 * Check that a process has enough memory to allocate a new virtual
 632 * mapping. 0 means there is enough memory for the allocation to
 633 * succeed and -ENOMEM implies there is not.
 634 *
 635 * We currently support three overcommit policies, which are set via the
 636 * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting.rst
 637 *
 638 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
 639 * Additional code 2002 Jul 20 by Robert Love.
 640 *
 641 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
 642 *
 643 * Note this is a helper function intended to be used by LSMs which
 644 * wish to use this logic.
 645 */
 646int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
 647{
 648        long free, allowed, reserve;
 649
 650        VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
 651                        -(s64)vm_committed_as_batch * num_online_cpus(),
 652                        "memory commitment underflow");
 653
 654        vm_acct_memory(pages);
 655
 656        /*
 657         * Sometimes we want to use more memory than we have
 658         */
 659        if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
 660                return 0;
 661
 662        if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
 663                free = global_zone_page_state(NR_FREE_PAGES);
 664                free += global_node_page_state(NR_FILE_PAGES);
 665
 666                /*
 667                 * shmem pages shouldn't be counted as free in this
 668                 * case, they can't be purged, only swapped out, and
 669                 * that won't affect the overall amount of available
 670                 * memory in the system.
 671                 */
 672                free -= global_node_page_state(NR_SHMEM);
 673
 674                free += get_nr_swap_pages();
 675
 676                /*
 677                 * Any slabs which are created with the
 678                 * SLAB_RECLAIM_ACCOUNT flag claim to have contents
 679                 * which are reclaimable, under pressure.  The dentry
 680                 * cache and most inode caches should fall into this
 681                 */
 682                free += global_node_page_state(NR_SLAB_RECLAIMABLE);
 683
 684                /*
 685                 * Part of the kernel memory, which can be released
 686                 * under memory pressure.
 687                 */
 688                free += global_node_page_state(
 689                        NR_INDIRECTLY_RECLAIMABLE_BYTES) >> PAGE_SHIFT;
 690
 691                /*
 692                 * Leave reserved pages. The pages are not for anonymous pages.
 693                 */
 694                if (free <= totalreserve_pages)
 695                        goto error;
 696                else
 697                        free -= totalreserve_pages;
 698
 699                /*
 700                 * Reserve some for root
 701                 */
 702                if (!cap_sys_admin)
 703                        free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
 704
 705                if (free > pages)
 706                        return 0;
 707
 708                goto error;
 709        }
 710
 711        allowed = vm_commit_limit();
 712        /*
 713         * Reserve some for root
 714         */
 715        if (!cap_sys_admin)
 716                allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
 717
 718        /*
 719         * Don't let a single process grow so big a user can't recover
 720         */
 721        if (mm) {
 722                reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
 723                allowed -= min_t(long, mm->total_vm / 32, reserve);
 724        }
 725
 726        if (percpu_counter_read_positive(&vm_committed_as) < allowed)
 727                return 0;
 728error:
 729        vm_unacct_memory(pages);
 730
 731        return -ENOMEM;
 732}
 733
 734/**
 735 * get_cmdline() - copy the cmdline value to a buffer.
 736 * @task:     the task whose cmdline value to copy.
 737 * @buffer:   the buffer to copy to.
 738 * @buflen:   the length of the buffer. Larger cmdline values are truncated
 739 *            to this length.
 740 * Returns the size of the cmdline field copied. Note that the copy does
 741 * not guarantee an ending NULL byte.
 742 */
 743int get_cmdline(struct task_struct *task, char *buffer, int buflen)
 744{
 745        int res = 0;
 746        unsigned int len;
 747        struct mm_struct *mm = get_task_mm(task);
 748        unsigned long arg_start, arg_end, env_start, env_end;
 749        if (!mm)
 750                goto out;
 751        if (!mm->arg_end)
 752                goto out_mm;    /* Shh! No looking before we're done */
 753
 754        down_read(&mm->mmap_sem);
 755        arg_start = mm->arg_start;
 756        arg_end = mm->arg_end;
 757        env_start = mm->env_start;
 758        env_end = mm->env_end;
 759        up_read(&mm->mmap_sem);
 760
 761        len = arg_end - arg_start;
 762
 763        if (len > buflen)
 764                len = buflen;
 765
 766        res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
 767
 768        /*
 769         * If the nul at the end of args has been overwritten, then
 770         * assume application is using setproctitle(3).
 771         */
 772        if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
 773                len = strnlen(buffer, res);
 774                if (len < res) {
 775                        res = len;
 776                } else {
 777                        len = env_end - env_start;
 778                        if (len > buflen - res)
 779                                len = buflen - res;
 780                        res += access_process_vm(task, env_start,
 781                                                 buffer+res, len,
 782                                                 FOLL_FORCE);
 783                        res = strnlen(buffer, res);
 784                }
 785        }
 786out_mm:
 787        mmput(mm);
 788out:
 789        return res;
 790}
 791