linux/mm/util.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include <linux/mm.h>
   3#include <linux/slab.h>
   4#include <linux/string.h>
   5#include <linux/compiler.h>
   6#include <linux/export.h>
   7#include <linux/err.h>
   8#include <linux/sched.h>
   9#include <linux/sched/mm.h>
  10#include <linux/sched/task_stack.h>
  11#include <linux/security.h>
  12#include <linux/swap.h>
  13#include <linux/swapops.h>
  14#include <linux/mman.h>
  15#include <linux/hugetlb.h>
  16#include <linux/vmalloc.h>
  17#include <linux/userfaultfd_k.h>
  18
  19#include <linux/uaccess.h>
  20
  21#include "internal.h"
  22
  23/**
  24 * kfree_const - conditionally free memory
  25 * @x: pointer to the memory
  26 *
  27 * Function calls kfree only if @x is not in .rodata section.
  28 */
  29void kfree_const(const void *x)
  30{
  31        if (!is_kernel_rodata((unsigned long)x))
  32                kfree(x);
  33}
  34EXPORT_SYMBOL(kfree_const);
  35
  36/**
  37 * kstrdup - allocate space for and copy an existing string
  38 * @s: the string to duplicate
  39 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  40 *
  41 * Return: newly allocated copy of @s or %NULL in case of error
  42 */
  43char *kstrdup(const char *s, gfp_t gfp)
  44{
  45        size_t len;
  46        char *buf;
  47
  48        if (!s)
  49                return NULL;
  50
  51        len = strlen(s) + 1;
  52        buf = kmalloc_track_caller(len, gfp);
  53        if (buf)
  54                memcpy(buf, s, len);
  55        return buf;
  56}
  57EXPORT_SYMBOL(kstrdup);
  58
  59/**
  60 * kstrdup_const - conditionally duplicate an existing const string
  61 * @s: the string to duplicate
  62 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  63 *
  64 * Note: Strings allocated by kstrdup_const should be freed by kfree_const.
  65 *
  66 * Return: source string if it is in .rodata section otherwise
  67 * fallback to kstrdup.
  68 */
  69const char *kstrdup_const(const char *s, gfp_t gfp)
  70{
  71        if (is_kernel_rodata((unsigned long)s))
  72                return s;
  73
  74        return kstrdup(s, gfp);
  75}
  76EXPORT_SYMBOL(kstrdup_const);
  77
  78/**
  79 * kstrndup - allocate space for and copy an existing string
  80 * @s: the string to duplicate
  81 * @max: read at most @max chars from @s
  82 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  83 *
  84 * Note: Use kmemdup_nul() instead if the size is known exactly.
  85 *
  86 * Return: newly allocated copy of @s or %NULL in case of error
  87 */
  88char *kstrndup(const char *s, size_t max, gfp_t gfp)
  89{
  90        size_t len;
  91        char *buf;
  92
  93        if (!s)
  94                return NULL;
  95
  96        len = strnlen(s, max);
  97        buf = kmalloc_track_caller(len+1, gfp);
  98        if (buf) {
  99                memcpy(buf, s, len);
 100                buf[len] = '\0';
 101        }
 102        return buf;
 103}
 104EXPORT_SYMBOL(kstrndup);
 105
 106/**
 107 * kmemdup - duplicate region of memory
 108 *
 109 * @src: memory region to duplicate
 110 * @len: memory region length
 111 * @gfp: GFP mask to use
 112 *
 113 * Return: newly allocated copy of @src or %NULL in case of error
 114 */
 115void *kmemdup(const void *src, size_t len, gfp_t gfp)
 116{
 117        void *p;
 118
 119        p = kmalloc_track_caller(len, gfp);
 120        if (p)
 121                memcpy(p, src, len);
 122        return p;
 123}
 124EXPORT_SYMBOL(kmemdup);
 125
 126/**
 127 * kmemdup_nul - Create a NUL-terminated string from unterminated data
 128 * @s: The data to stringify
 129 * @len: The size of the data
 130 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 131 *
 132 * Return: newly allocated copy of @s with NUL-termination or %NULL in
 133 * case of error
 134 */
 135char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
 136{
 137        char *buf;
 138
 139        if (!s)
 140                return NULL;
 141
 142        buf = kmalloc_track_caller(len + 1, gfp);
 143        if (buf) {
 144                memcpy(buf, s, len);
 145                buf[len] = '\0';
 146        }
 147        return buf;
 148}
 149EXPORT_SYMBOL(kmemdup_nul);
 150
 151/**
 152 * memdup_user - duplicate memory region from user space
 153 *
 154 * @src: source address in user space
 155 * @len: number of bytes to copy
 156 *
 157 * Return: an ERR_PTR() on failure.  Result is physically
 158 * contiguous, to be freed by kfree().
 159 */
 160void *memdup_user(const void __user *src, size_t len)
 161{
 162        void *p;
 163
 164        p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
 165        if (!p)
 166                return ERR_PTR(-ENOMEM);
 167
 168        if (copy_from_user(p, src, len)) {
 169                kfree(p);
 170                return ERR_PTR(-EFAULT);
 171        }
 172
 173        return p;
 174}
 175EXPORT_SYMBOL(memdup_user);
 176
 177/**
 178 * vmemdup_user - duplicate memory region from user space
 179 *
 180 * @src: source address in user space
 181 * @len: number of bytes to copy
 182 *
 183 * Return: an ERR_PTR() on failure.  Result may be not
 184 * physically contiguous.  Use kvfree() to free.
 185 */
 186void *vmemdup_user(const void __user *src, size_t len)
 187{
 188        void *p;
 189
 190        p = kvmalloc(len, GFP_USER);
 191        if (!p)
 192                return ERR_PTR(-ENOMEM);
 193
 194        if (copy_from_user(p, src, len)) {
 195                kvfree(p);
 196                return ERR_PTR(-EFAULT);
 197        }
 198
 199        return p;
 200}
 201EXPORT_SYMBOL(vmemdup_user);
 202
 203/**
 204 * strndup_user - duplicate an existing string from user space
 205 * @s: The string to duplicate
 206 * @n: Maximum number of bytes to copy, including the trailing NUL.
 207 *
 208 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
 209 */
 210char *strndup_user(const char __user *s, long n)
 211{
 212        char *p;
 213        long length;
 214
 215        length = strnlen_user(s, n);
 216
 217        if (!length)
 218                return ERR_PTR(-EFAULT);
 219
 220        if (length > n)
 221                return ERR_PTR(-EINVAL);
 222
 223        p = memdup_user(s, length);
 224
 225        if (IS_ERR(p))
 226                return p;
 227
 228        p[length - 1] = '\0';
 229
 230        return p;
 231}
 232EXPORT_SYMBOL(strndup_user);
 233
 234/**
 235 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
 236 *
 237 * @src: source address in user space
 238 * @len: number of bytes to copy
 239 *
 240 * Return: an ERR_PTR() on failure.
 241 */
 242void *memdup_user_nul(const void __user *src, size_t len)
 243{
 244        char *p;
 245
 246        /*
 247         * Always use GFP_KERNEL, since copy_from_user() can sleep and
 248         * cause pagefault, which makes it pointless to use GFP_NOFS
 249         * or GFP_ATOMIC.
 250         */
 251        p = kmalloc_track_caller(len + 1, GFP_KERNEL);
 252        if (!p)
 253                return ERR_PTR(-ENOMEM);
 254
 255        if (copy_from_user(p, src, len)) {
 256                kfree(p);
 257                return ERR_PTR(-EFAULT);
 258        }
 259        p[len] = '\0';
 260
 261        return p;
 262}
 263EXPORT_SYMBOL(memdup_user_nul);
 264
 265void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
 266                struct vm_area_struct *prev, struct rb_node *rb_parent)
 267{
 268        struct vm_area_struct *next;
 269
 270        vma->vm_prev = prev;
 271        if (prev) {
 272                next = prev->vm_next;
 273                prev->vm_next = vma;
 274        } else {
 275                mm->mmap = vma;
 276                if (rb_parent)
 277                        next = rb_entry(rb_parent,
 278                                        struct vm_area_struct, vm_rb);
 279                else
 280                        next = NULL;
 281        }
 282        vma->vm_next = next;
 283        if (next)
 284                next->vm_prev = vma;
 285}
 286
 287/* Check if the vma is being used as a stack by this task */
 288int vma_is_stack_for_current(struct vm_area_struct *vma)
 289{
 290        struct task_struct * __maybe_unused t = current;
 291
 292        return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
 293}
 294
 295#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
 296void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 297{
 298        mm->mmap_base = TASK_UNMAPPED_BASE;
 299        mm->get_unmapped_area = arch_get_unmapped_area;
 300}
 301#endif
 302
 303/*
 304 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
 305 * back to the regular GUP.
 306 * Note a difference with get_user_pages_fast: this always returns the
 307 * number of pages pinned, 0 if no pages were pinned.
 308 * If the architecture does not support this function, simply return with no
 309 * pages pinned.
 310 */
 311int __weak __get_user_pages_fast(unsigned long start,
 312                                 int nr_pages, int write, struct page **pages)
 313{
 314        return 0;
 315}
 316EXPORT_SYMBOL_GPL(__get_user_pages_fast);
 317
 318/**
 319 * get_user_pages_fast() - pin user pages in memory
 320 * @start:      starting user address
 321 * @nr_pages:   number of pages from start to pin
 322 * @gup_flags:  flags modifying pin behaviour
 323 * @pages:      array that receives pointers to the pages pinned.
 324 *              Should be at least nr_pages long.
 325 *
 326 * get_user_pages_fast provides equivalent functionality to get_user_pages,
 327 * operating on current and current->mm, with force=0 and vma=NULL. However
 328 * unlike get_user_pages, it must be called without mmap_sem held.
 329 *
 330 * get_user_pages_fast may take mmap_sem and page table locks, so no
 331 * assumptions can be made about lack of locking. get_user_pages_fast is to be
 332 * implemented in a way that is advantageous (vs get_user_pages()) when the
 333 * user memory area is already faulted in and present in ptes. However if the
 334 * pages have to be faulted in, it may turn out to be slightly slower so
 335 * callers need to carefully consider what to use. On many architectures,
 336 * get_user_pages_fast simply falls back to get_user_pages.
 337 *
 338 * Return: number of pages pinned. This may be fewer than the number
 339 * requested. If nr_pages is 0 or negative, returns 0. If no pages
 340 * were pinned, returns -errno.
 341 */
 342int __weak get_user_pages_fast(unsigned long start,
 343                                int nr_pages, unsigned int gup_flags,
 344                                struct page **pages)
 345{
 346        return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
 347}
 348EXPORT_SYMBOL_GPL(get_user_pages_fast);
 349
 350unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
 351        unsigned long len, unsigned long prot,
 352        unsigned long flag, unsigned long pgoff)
 353{
 354        unsigned long ret;
 355        struct mm_struct *mm = current->mm;
 356        unsigned long populate;
 357        LIST_HEAD(uf);
 358
 359        ret = security_mmap_file(file, prot, flag);
 360        if (!ret) {
 361                if (down_write_killable(&mm->mmap_sem))
 362                        return -EINTR;
 363                ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
 364                                    &populate, &uf);
 365                up_write(&mm->mmap_sem);
 366                userfaultfd_unmap_complete(mm, &uf);
 367                if (populate)
 368                        mm_populate(ret, populate);
 369        }
 370        return ret;
 371}
 372
 373unsigned long vm_mmap(struct file *file, unsigned long addr,
 374        unsigned long len, unsigned long prot,
 375        unsigned long flag, unsigned long offset)
 376{
 377        if (unlikely(offset + PAGE_ALIGN(len) < offset))
 378                return -EINVAL;
 379        if (unlikely(offset_in_page(offset)))
 380                return -EINVAL;
 381
 382        return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
 383}
 384EXPORT_SYMBOL(vm_mmap);
 385
 386/**
 387 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
 388 * failure, fall back to non-contiguous (vmalloc) allocation.
 389 * @size: size of the request.
 390 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
 391 * @node: numa node to allocate from
 392 *
 393 * Uses kmalloc to get the memory but if the allocation fails then falls back
 394 * to the vmalloc allocator. Use kvfree for freeing the memory.
 395 *
 396 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
 397 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
 398 * preferable to the vmalloc fallback, due to visible performance drawbacks.
 399 *
 400 * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
 401 * fall back to vmalloc.
 402 *
 403 * Return: pointer to the allocated memory of %NULL in case of failure
 404 */
 405void *kvmalloc_node(size_t size, gfp_t flags, int node)
 406{
 407        gfp_t kmalloc_flags = flags;
 408        void *ret;
 409
 410        /*
 411         * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
 412         * so the given set of flags has to be compatible.
 413         */
 414        if ((flags & GFP_KERNEL) != GFP_KERNEL)
 415                return kmalloc_node(size, flags, node);
 416
 417        /*
 418         * We want to attempt a large physically contiguous block first because
 419         * it is less likely to fragment multiple larger blocks and therefore
 420         * contribute to a long term fragmentation less than vmalloc fallback.
 421         * However make sure that larger requests are not too disruptive - no
 422         * OOM killer and no allocation failure warnings as we have a fallback.
 423         */
 424        if (size > PAGE_SIZE) {
 425                kmalloc_flags |= __GFP_NOWARN;
 426
 427                if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
 428                        kmalloc_flags |= __GFP_NORETRY;
 429        }
 430
 431        ret = kmalloc_node(size, kmalloc_flags, node);
 432
 433        /*
 434         * It doesn't really make sense to fallback to vmalloc for sub page
 435         * requests
 436         */
 437        if (ret || size <= PAGE_SIZE)
 438                return ret;
 439
 440        return __vmalloc_node_flags_caller(size, node, flags,
 441                        __builtin_return_address(0));
 442}
 443EXPORT_SYMBOL(kvmalloc_node);
 444
 445/**
 446 * kvfree() - Free memory.
 447 * @addr: Pointer to allocated memory.
 448 *
 449 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
 450 * It is slightly more efficient to use kfree() or vfree() if you are certain
 451 * that you know which one to use.
 452 *
 453 * Context: Either preemptible task context or not-NMI interrupt.
 454 */
 455void kvfree(const void *addr)
 456{
 457        if (is_vmalloc_addr(addr))
 458                vfree(addr);
 459        else
 460                kfree(addr);
 461}
 462EXPORT_SYMBOL(kvfree);
 463
 464static inline void *__page_rmapping(struct page *page)
 465{
 466        unsigned long mapping;
 467
 468        mapping = (unsigned long)page->mapping;
 469        mapping &= ~PAGE_MAPPING_FLAGS;
 470
 471        return (void *)mapping;
 472}
 473
 474/* Neutral page->mapping pointer to address_space or anon_vma or other */
 475void *page_rmapping(struct page *page)
 476{
 477        page = compound_head(page);
 478        return __page_rmapping(page);
 479}
 480
 481/*
 482 * Return true if this page is mapped into pagetables.
 483 * For compound page it returns true if any subpage of compound page is mapped.
 484 */
 485bool page_mapped(struct page *page)
 486{
 487        int i;
 488
 489        if (likely(!PageCompound(page)))
 490                return atomic_read(&page->_mapcount) >= 0;
 491        page = compound_head(page);
 492        if (atomic_read(compound_mapcount_ptr(page)) >= 0)
 493                return true;
 494        if (PageHuge(page))
 495                return false;
 496        for (i = 0; i < (1 << compound_order(page)); i++) {
 497                if (atomic_read(&page[i]._mapcount) >= 0)
 498                        return true;
 499        }
 500        return false;
 501}
 502EXPORT_SYMBOL(page_mapped);
 503
 504struct anon_vma *page_anon_vma(struct page *page)
 505{
 506        unsigned long mapping;
 507
 508        page = compound_head(page);
 509        mapping = (unsigned long)page->mapping;
 510        if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
 511                return NULL;
 512        return __page_rmapping(page);
 513}
 514
 515struct address_space *page_mapping(struct page *page)
 516{
 517        struct address_space *mapping;
 518
 519        page = compound_head(page);
 520
 521        /* This happens if someone calls flush_dcache_page on slab page */
 522        if (unlikely(PageSlab(page)))
 523                return NULL;
 524
 525        if (unlikely(PageSwapCache(page))) {
 526                swp_entry_t entry;
 527
 528                entry.val = page_private(page);
 529                return swap_address_space(entry);
 530        }
 531
 532        mapping = page->mapping;
 533        if ((unsigned long)mapping & PAGE_MAPPING_ANON)
 534                return NULL;
 535
 536        return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
 537}
 538EXPORT_SYMBOL(page_mapping);
 539
 540/*
 541 * For file cache pages, return the address_space, otherwise return NULL
 542 */
 543struct address_space *page_mapping_file(struct page *page)
 544{
 545        if (unlikely(PageSwapCache(page)))
 546                return NULL;
 547        return page_mapping(page);
 548}
 549
 550/* Slow path of page_mapcount() for compound pages */
 551int __page_mapcount(struct page *page)
 552{
 553        int ret;
 554
 555        ret = atomic_read(&page->_mapcount) + 1;
 556        /*
 557         * For file THP page->_mapcount contains total number of mapping
 558         * of the page: no need to look into compound_mapcount.
 559         */
 560        if (!PageAnon(page) && !PageHuge(page))
 561                return ret;
 562        page = compound_head(page);
 563        ret += atomic_read(compound_mapcount_ptr(page)) + 1;
 564        if (PageDoubleMap(page))
 565                ret--;
 566        return ret;
 567}
 568EXPORT_SYMBOL_GPL(__page_mapcount);
 569
 570int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
 571int sysctl_overcommit_ratio __read_mostly = 50;
 572unsigned long sysctl_overcommit_kbytes __read_mostly;
 573int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
 574unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
 575unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
 576
 577int overcommit_ratio_handler(struct ctl_table *table, int write,
 578                             void __user *buffer, size_t *lenp,
 579                             loff_t *ppos)
 580{
 581        int ret;
 582
 583        ret = proc_dointvec(table, write, buffer, lenp, ppos);
 584        if (ret == 0 && write)
 585                sysctl_overcommit_kbytes = 0;
 586        return ret;
 587}
 588
 589int overcommit_kbytes_handler(struct ctl_table *table, int write,
 590                             void __user *buffer, size_t *lenp,
 591                             loff_t *ppos)
 592{
 593        int ret;
 594
 595        ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 596        if (ret == 0 && write)
 597                sysctl_overcommit_ratio = 0;
 598        return ret;
 599}
 600
 601/*
 602 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
 603 */
 604unsigned long vm_commit_limit(void)
 605{
 606        unsigned long allowed;
 607
 608        if (sysctl_overcommit_kbytes)
 609                allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
 610        else
 611                allowed = ((totalram_pages() - hugetlb_total_pages())
 612                           * sysctl_overcommit_ratio / 100);
 613        allowed += total_swap_pages;
 614
 615        return allowed;
 616}
 617
 618/*
 619 * Make sure vm_committed_as in one cacheline and not cacheline shared with
 620 * other variables. It can be updated by several CPUs frequently.
 621 */
 622struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
 623
 624/*
 625 * The global memory commitment made in the system can be a metric
 626 * that can be used to drive ballooning decisions when Linux is hosted
 627 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
 628 * balancing memory across competing virtual machines that are hosted.
 629 * Several metrics drive this policy engine including the guest reported
 630 * memory commitment.
 631 */
 632unsigned long vm_memory_committed(void)
 633{
 634        return percpu_counter_read_positive(&vm_committed_as);
 635}
 636EXPORT_SYMBOL_GPL(vm_memory_committed);
 637
 638/*
 639 * Check that a process has enough memory to allocate a new virtual
 640 * mapping. 0 means there is enough memory for the allocation to
 641 * succeed and -ENOMEM implies there is not.
 642 *
 643 * We currently support three overcommit policies, which are set via the
 644 * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting.rst
 645 *
 646 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
 647 * Additional code 2002 Jul 20 by Robert Love.
 648 *
 649 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
 650 *
 651 * Note this is a helper function intended to be used by LSMs which
 652 * wish to use this logic.
 653 */
 654int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
 655{
 656        long allowed;
 657
 658        VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
 659                        -(s64)vm_committed_as_batch * num_online_cpus(),
 660                        "memory commitment underflow");
 661
 662        vm_acct_memory(pages);
 663
 664        /*
 665         * Sometimes we want to use more memory than we have
 666         */
 667        if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
 668                return 0;
 669
 670        if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
 671                if (pages > totalram_pages() + total_swap_pages)
 672                        goto error;
 673                return 0;
 674        }
 675
 676        allowed = vm_commit_limit();
 677        /*
 678         * Reserve some for root
 679         */
 680        if (!cap_sys_admin)
 681                allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
 682
 683        /*
 684         * Don't let a single process grow so big a user can't recover
 685         */
 686        if (mm) {
 687                long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
 688
 689                allowed -= min_t(long, mm->total_vm / 32, reserve);
 690        }
 691
 692        if (percpu_counter_read_positive(&vm_committed_as) < allowed)
 693                return 0;
 694error:
 695        vm_unacct_memory(pages);
 696
 697        return -ENOMEM;
 698}
 699
 700/**
 701 * get_cmdline() - copy the cmdline value to a buffer.
 702 * @task:     the task whose cmdline value to copy.
 703 * @buffer:   the buffer to copy to.
 704 * @buflen:   the length of the buffer. Larger cmdline values are truncated
 705 *            to this length.
 706 *
 707 * Return: the size of the cmdline field copied. Note that the copy does
 708 * not guarantee an ending NULL byte.
 709 */
 710int get_cmdline(struct task_struct *task, char *buffer, int buflen)
 711{
 712        int res = 0;
 713        unsigned int len;
 714        struct mm_struct *mm = get_task_mm(task);
 715        unsigned long arg_start, arg_end, env_start, env_end;
 716        if (!mm)
 717                goto out;
 718        if (!mm->arg_end)
 719                goto out_mm;    /* Shh! No looking before we're done */
 720
 721        spin_lock(&mm->arg_lock);
 722        arg_start = mm->arg_start;
 723        arg_end = mm->arg_end;
 724        env_start = mm->env_start;
 725        env_end = mm->env_end;
 726        spin_unlock(&mm->arg_lock);
 727
 728        len = arg_end - arg_start;
 729
 730        if (len > buflen)
 731                len = buflen;
 732
 733        res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
 734
 735        /*
 736         * If the nul at the end of args has been overwritten, then
 737         * assume application is using setproctitle(3).
 738         */
 739        if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
 740                len = strnlen(buffer, res);
 741                if (len < res) {
 742                        res = len;
 743                } else {
 744                        len = env_end - env_start;
 745                        if (len > buflen - res)
 746                                len = buflen - res;
 747                        res += access_process_vm(task, env_start,
 748                                                 buffer+res, len,
 749                                                 FOLL_FORCE);
 750                        res = strnlen(buffer, res);
 751                }
 752        }
 753out_mm:
 754        mmput(mm);
 755out:
 756        return res;
 757}
 758