linux/mm/util.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include <linux/mm.h>
   3#include <linux/slab.h>
   4#include <linux/string.h>
   5#include <linux/compiler.h>
   6#include <linux/export.h>
   7#include <linux/err.h>
   8#include <linux/sched.h>
   9#include <linux/sched/mm.h>
  10#include <linux/sched/signal.h>
  11#include <linux/sched/task_stack.h>
  12#include <linux/security.h>
  13#include <linux/swap.h>
  14#include <linux/swapops.h>
  15#include <linux/mman.h>
  16#include <linux/hugetlb.h>
  17#include <linux/vmalloc.h>
  18#include <linux/userfaultfd_k.h>
  19#include <linux/elf.h>
  20#include <linux/elf-randomize.h>
  21#include <linux/personality.h>
  22#include <linux/random.h>
  23#include <linux/processor.h>
  24#include <linux/sizes.h>
  25#include <linux/compat.h>
  26
  27#include <linux/uaccess.h>
  28
  29#include "internal.h"
  30
  31/**
  32 * kfree_const - conditionally free memory
  33 * @x: pointer to the memory
  34 *
  35 * Function calls kfree only if @x is not in .rodata section.
  36 */
  37void kfree_const(const void *x)
  38{
  39        if (!is_kernel_rodata((unsigned long)x))
  40                kfree(x);
  41}
  42EXPORT_SYMBOL(kfree_const);
  43
  44/**
  45 * kstrdup - allocate space for and copy an existing string
  46 * @s: the string to duplicate
  47 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  48 *
  49 * Return: newly allocated copy of @s or %NULL in case of error
  50 */
  51char *kstrdup(const char *s, gfp_t gfp)
  52{
  53        size_t len;
  54        char *buf;
  55
  56        if (!s)
  57                return NULL;
  58
  59        len = strlen(s) + 1;
  60        buf = kmalloc_track_caller(len, gfp);
  61        if (buf)
  62                memcpy(buf, s, len);
  63        return buf;
  64}
  65EXPORT_SYMBOL(kstrdup);
  66
  67/**
  68 * kstrdup_const - conditionally duplicate an existing const string
  69 * @s: the string to duplicate
  70 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  71 *
  72 * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
  73 * must not be passed to krealloc().
  74 *
  75 * Return: source string if it is in .rodata section otherwise
  76 * fallback to kstrdup.
  77 */
  78const char *kstrdup_const(const char *s, gfp_t gfp)
  79{
  80        if (is_kernel_rodata((unsigned long)s))
  81                return s;
  82
  83        return kstrdup(s, gfp);
  84}
  85EXPORT_SYMBOL(kstrdup_const);
  86
  87/**
  88 * kstrndup - allocate space for and copy an existing string
  89 * @s: the string to duplicate
  90 * @max: read at most @max chars from @s
  91 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
  92 *
  93 * Note: Use kmemdup_nul() instead if the size is known exactly.
  94 *
  95 * Return: newly allocated copy of @s or %NULL in case of error
  96 */
  97char *kstrndup(const char *s, size_t max, gfp_t gfp)
  98{
  99        size_t len;
 100        char *buf;
 101
 102        if (!s)
 103                return NULL;
 104
 105        len = strnlen(s, max);
 106        buf = kmalloc_track_caller(len+1, gfp);
 107        if (buf) {
 108                memcpy(buf, s, len);
 109                buf[len] = '\0';
 110        }
 111        return buf;
 112}
 113EXPORT_SYMBOL(kstrndup);
 114
 115/**
 116 * kmemdup - duplicate region of memory
 117 *
 118 * @src: memory region to duplicate
 119 * @len: memory region length
 120 * @gfp: GFP mask to use
 121 *
 122 * Return: newly allocated copy of @src or %NULL in case of error
 123 */
 124void *kmemdup(const void *src, size_t len, gfp_t gfp)
 125{
 126        void *p;
 127
 128        p = kmalloc_track_caller(len, gfp);
 129        if (p)
 130                memcpy(p, src, len);
 131        return p;
 132}
 133EXPORT_SYMBOL(kmemdup);
 134
 135/**
 136 * kmemdup_nul - Create a NUL-terminated string from unterminated data
 137 * @s: The data to stringify
 138 * @len: The size of the data
 139 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
 140 *
 141 * Return: newly allocated copy of @s with NUL-termination or %NULL in
 142 * case of error
 143 */
 144char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
 145{
 146        char *buf;
 147
 148        if (!s)
 149                return NULL;
 150
 151        buf = kmalloc_track_caller(len + 1, gfp);
 152        if (buf) {
 153                memcpy(buf, s, len);
 154                buf[len] = '\0';
 155        }
 156        return buf;
 157}
 158EXPORT_SYMBOL(kmemdup_nul);
 159
 160/**
 161 * memdup_user - duplicate memory region from user space
 162 *
 163 * @src: source address in user space
 164 * @len: number of bytes to copy
 165 *
 166 * Return: an ERR_PTR() on failure.  Result is physically
 167 * contiguous, to be freed by kfree().
 168 */
 169void *memdup_user(const void __user *src, size_t len)
 170{
 171        void *p;
 172
 173        p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
 174        if (!p)
 175                return ERR_PTR(-ENOMEM);
 176
 177        if (copy_from_user(p, src, len)) {
 178                kfree(p);
 179                return ERR_PTR(-EFAULT);
 180        }
 181
 182        return p;
 183}
 184EXPORT_SYMBOL(memdup_user);
 185
 186/**
 187 * vmemdup_user - duplicate memory region from user space
 188 *
 189 * @src: source address in user space
 190 * @len: number of bytes to copy
 191 *
 192 * Return: an ERR_PTR() on failure.  Result may be not
 193 * physically contiguous.  Use kvfree() to free.
 194 */
 195void *vmemdup_user(const void __user *src, size_t len)
 196{
 197        void *p;
 198
 199        p = kvmalloc(len, GFP_USER);
 200        if (!p)
 201                return ERR_PTR(-ENOMEM);
 202
 203        if (copy_from_user(p, src, len)) {
 204                kvfree(p);
 205                return ERR_PTR(-EFAULT);
 206        }
 207
 208        return p;
 209}
 210EXPORT_SYMBOL(vmemdup_user);
 211
 212/**
 213 * strndup_user - duplicate an existing string from user space
 214 * @s: The string to duplicate
 215 * @n: Maximum number of bytes to copy, including the trailing NUL.
 216 *
 217 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
 218 */
 219char *strndup_user(const char __user *s, long n)
 220{
 221        char *p;
 222        long length;
 223
 224        length = strnlen_user(s, n);
 225
 226        if (!length)
 227                return ERR_PTR(-EFAULT);
 228
 229        if (length > n)
 230                return ERR_PTR(-EINVAL);
 231
 232        p = memdup_user(s, length);
 233
 234        if (IS_ERR(p))
 235                return p;
 236
 237        p[length - 1] = '\0';
 238
 239        return p;
 240}
 241EXPORT_SYMBOL(strndup_user);
 242
 243/**
 244 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
 245 *
 246 * @src: source address in user space
 247 * @len: number of bytes to copy
 248 *
 249 * Return: an ERR_PTR() on failure.
 250 */
 251void *memdup_user_nul(const void __user *src, size_t len)
 252{
 253        char *p;
 254
 255        /*
 256         * Always use GFP_KERNEL, since copy_from_user() can sleep and
 257         * cause pagefault, which makes it pointless to use GFP_NOFS
 258         * or GFP_ATOMIC.
 259         */
 260        p = kmalloc_track_caller(len + 1, GFP_KERNEL);
 261        if (!p)
 262                return ERR_PTR(-ENOMEM);
 263
 264        if (copy_from_user(p, src, len)) {
 265                kfree(p);
 266                return ERR_PTR(-EFAULT);
 267        }
 268        p[len] = '\0';
 269
 270        return p;
 271}
 272EXPORT_SYMBOL(memdup_user_nul);
 273
 274void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
 275                struct vm_area_struct *prev)
 276{
 277        struct vm_area_struct *next;
 278
 279        vma->vm_prev = prev;
 280        if (prev) {
 281                next = prev->vm_next;
 282                prev->vm_next = vma;
 283        } else {
 284                next = mm->mmap;
 285                mm->mmap = vma;
 286        }
 287        vma->vm_next = next;
 288        if (next)
 289                next->vm_prev = vma;
 290}
 291
 292void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
 293{
 294        struct vm_area_struct *prev, *next;
 295
 296        next = vma->vm_next;
 297        prev = vma->vm_prev;
 298        if (prev)
 299                prev->vm_next = next;
 300        else
 301                mm->mmap = next;
 302        if (next)
 303                next->vm_prev = prev;
 304}
 305
 306/* Check if the vma is being used as a stack by this task */
 307int vma_is_stack_for_current(struct vm_area_struct *vma)
 308{
 309        struct task_struct * __maybe_unused t = current;
 310
 311        return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
 312}
 313
 314/*
 315 * Change backing file, only valid to use during initial VMA setup.
 316 */
 317void vma_set_file(struct vm_area_struct *vma, struct file *file)
 318{
 319        /* Changing an anonymous vma with this is illegal */
 320        get_file(file);
 321        swap(vma->vm_file, file);
 322        fput(file);
 323}
 324EXPORT_SYMBOL(vma_set_file);
 325
 326#ifndef STACK_RND_MASK
 327#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))     /* 8MB of VA */
 328#endif
 329
 330unsigned long randomize_stack_top(unsigned long stack_top)
 331{
 332        unsigned long random_variable = 0;
 333
 334        if (current->flags & PF_RANDOMIZE) {
 335                random_variable = get_random_long();
 336                random_variable &= STACK_RND_MASK;
 337                random_variable <<= PAGE_SHIFT;
 338        }
 339#ifdef CONFIG_STACK_GROWSUP
 340        return PAGE_ALIGN(stack_top) + random_variable;
 341#else
 342        return PAGE_ALIGN(stack_top) - random_variable;
 343#endif
 344}
 345
 346#ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
 347unsigned long arch_randomize_brk(struct mm_struct *mm)
 348{
 349        /* Is the current task 32bit ? */
 350        if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
 351                return randomize_page(mm->brk, SZ_32M);
 352
 353        return randomize_page(mm->brk, SZ_1G);
 354}
 355
 356unsigned long arch_mmap_rnd(void)
 357{
 358        unsigned long rnd;
 359
 360#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
 361        if (is_compat_task())
 362                rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
 363        else
 364#endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
 365                rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
 366
 367        return rnd << PAGE_SHIFT;
 368}
 369
 370static int mmap_is_legacy(struct rlimit *rlim_stack)
 371{
 372        if (current->personality & ADDR_COMPAT_LAYOUT)
 373                return 1;
 374
 375        if (rlim_stack->rlim_cur == RLIM_INFINITY)
 376                return 1;
 377
 378        return sysctl_legacy_va_layout;
 379}
 380
 381/*
 382 * Leave enough space between the mmap area and the stack to honour ulimit in
 383 * the face of randomisation.
 384 */
 385#define MIN_GAP         (SZ_128M)
 386#define MAX_GAP         (STACK_TOP / 6 * 5)
 387
 388static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
 389{
 390        unsigned long gap = rlim_stack->rlim_cur;
 391        unsigned long pad = stack_guard_gap;
 392
 393        /* Account for stack randomization if necessary */
 394        if (current->flags & PF_RANDOMIZE)
 395                pad += (STACK_RND_MASK << PAGE_SHIFT);
 396
 397        /* Values close to RLIM_INFINITY can overflow. */
 398        if (gap + pad > gap)
 399                gap += pad;
 400
 401        if (gap < MIN_GAP)
 402                gap = MIN_GAP;
 403        else if (gap > MAX_GAP)
 404                gap = MAX_GAP;
 405
 406        return PAGE_ALIGN(STACK_TOP - gap - rnd);
 407}
 408
 409void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 410{
 411        unsigned long random_factor = 0UL;
 412
 413        if (current->flags & PF_RANDOMIZE)
 414                random_factor = arch_mmap_rnd();
 415
 416        if (mmap_is_legacy(rlim_stack)) {
 417                mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
 418                mm->get_unmapped_area = arch_get_unmapped_area;
 419        } else {
 420                mm->mmap_base = mmap_base(random_factor, rlim_stack);
 421                mm->get_unmapped_area = arch_get_unmapped_area_topdown;
 422        }
 423}
 424#elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
 425void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
 426{
 427        mm->mmap_base = TASK_UNMAPPED_BASE;
 428        mm->get_unmapped_area = arch_get_unmapped_area;
 429}
 430#endif
 431
 432/**
 433 * __account_locked_vm - account locked pages to an mm's locked_vm
 434 * @mm:          mm to account against
 435 * @pages:       number of pages to account
 436 * @inc:         %true if @pages should be considered positive, %false if not
 437 * @task:        task used to check RLIMIT_MEMLOCK
 438 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
 439 *
 440 * Assumes @task and @mm are valid (i.e. at least one reference on each), and
 441 * that mmap_lock is held as writer.
 442 *
 443 * Return:
 444 * * 0       on success
 445 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
 446 */
 447int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
 448                        struct task_struct *task, bool bypass_rlim)
 449{
 450        unsigned long locked_vm, limit;
 451        int ret = 0;
 452
 453        mmap_assert_write_locked(mm);
 454
 455        locked_vm = mm->locked_vm;
 456        if (inc) {
 457                if (!bypass_rlim) {
 458                        limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 459                        if (locked_vm + pages > limit)
 460                                ret = -ENOMEM;
 461                }
 462                if (!ret)
 463                        mm->locked_vm = locked_vm + pages;
 464        } else {
 465                WARN_ON_ONCE(pages > locked_vm);
 466                mm->locked_vm = locked_vm - pages;
 467        }
 468
 469        pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
 470                 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
 471                 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
 472                 ret ? " - exceeded" : "");
 473
 474        return ret;
 475}
 476EXPORT_SYMBOL_GPL(__account_locked_vm);
 477
 478/**
 479 * account_locked_vm - account locked pages to an mm's locked_vm
 480 * @mm:          mm to account against, may be NULL
 481 * @pages:       number of pages to account
 482 * @inc:         %true if @pages should be considered positive, %false if not
 483 *
 484 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
 485 *
 486 * Return:
 487 * * 0       on success, or if mm is NULL
 488 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
 489 */
 490int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
 491{
 492        int ret;
 493
 494        if (pages == 0 || !mm)
 495                return 0;
 496
 497        mmap_write_lock(mm);
 498        ret = __account_locked_vm(mm, pages, inc, current,
 499                                  capable(CAP_IPC_LOCK));
 500        mmap_write_unlock(mm);
 501
 502        return ret;
 503}
 504EXPORT_SYMBOL_GPL(account_locked_vm);
 505
 506unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
 507        unsigned long len, unsigned long prot,
 508        unsigned long flag, unsigned long pgoff)
 509{
 510        unsigned long ret;
 511        struct mm_struct *mm = current->mm;
 512        unsigned long populate;
 513        LIST_HEAD(uf);
 514
 515        ret = security_mmap_file(file, prot, flag);
 516        if (!ret) {
 517                if (mmap_write_lock_killable(mm))
 518                        return -EINTR;
 519                ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate,
 520                              &uf);
 521                mmap_write_unlock(mm);
 522                userfaultfd_unmap_complete(mm, &uf);
 523                if (populate)
 524                        mm_populate(ret, populate);
 525        }
 526        return ret;
 527}
 528
 529unsigned long vm_mmap(struct file *file, unsigned long addr,
 530        unsigned long len, unsigned long prot,
 531        unsigned long flag, unsigned long offset)
 532{
 533        if (unlikely(offset + PAGE_ALIGN(len) < offset))
 534                return -EINVAL;
 535        if (unlikely(offset_in_page(offset)))
 536                return -EINVAL;
 537
 538        return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
 539}
 540EXPORT_SYMBOL(vm_mmap);
 541
 542/**
 543 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
 544 * failure, fall back to non-contiguous (vmalloc) allocation.
 545 * @size: size of the request.
 546 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
 547 * @node: numa node to allocate from
 548 *
 549 * Uses kmalloc to get the memory but if the allocation fails then falls back
 550 * to the vmalloc allocator. Use kvfree for freeing the memory.
 551 *
 552 * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier.
 553 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
 554 * preferable to the vmalloc fallback, due to visible performance drawbacks.
 555 *
 556 * Return: pointer to the allocated memory of %NULL in case of failure
 557 */
 558void *kvmalloc_node(size_t size, gfp_t flags, int node)
 559{
 560        gfp_t kmalloc_flags = flags;
 561        void *ret;
 562
 563        /*
 564         * We want to attempt a large physically contiguous block first because
 565         * it is less likely to fragment multiple larger blocks and therefore
 566         * contribute to a long term fragmentation less than vmalloc fallback.
 567         * However make sure that larger requests are not too disruptive - no
 568         * OOM killer and no allocation failure warnings as we have a fallback.
 569         */
 570        if (size > PAGE_SIZE) {
 571                kmalloc_flags |= __GFP_NOWARN;
 572
 573                if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
 574                        kmalloc_flags |= __GFP_NORETRY;
 575
 576                /* nofail semantic is implemented by the vmalloc fallback */
 577                kmalloc_flags &= ~__GFP_NOFAIL;
 578        }
 579
 580        ret = kmalloc_node(size, kmalloc_flags, node);
 581
 582        /*
 583         * It doesn't really make sense to fallback to vmalloc for sub page
 584         * requests
 585         */
 586        if (ret || size <= PAGE_SIZE)
 587                return ret;
 588
 589        /* Don't even allow crazy sizes */
 590        if (unlikely(size > INT_MAX)) {
 591                WARN_ON_ONCE(!(flags & __GFP_NOWARN));
 592                return NULL;
 593        }
 594
 595        return __vmalloc_node(size, 1, flags, node,
 596                        __builtin_return_address(0));
 597}
 598EXPORT_SYMBOL(kvmalloc_node);
 599
 600/**
 601 * kvfree() - Free memory.
 602 * @addr: Pointer to allocated memory.
 603 *
 604 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
 605 * It is slightly more efficient to use kfree() or vfree() if you are certain
 606 * that you know which one to use.
 607 *
 608 * Context: Either preemptible task context or not-NMI interrupt.
 609 */
 610void kvfree(const void *addr)
 611{
 612        if (is_vmalloc_addr(addr))
 613                vfree(addr);
 614        else
 615                kfree(addr);
 616}
 617EXPORT_SYMBOL(kvfree);
 618
 619/**
 620 * kvfree_sensitive - Free a data object containing sensitive information.
 621 * @addr: address of the data object to be freed.
 622 * @len: length of the data object.
 623 *
 624 * Use the special memzero_explicit() function to clear the content of a
 625 * kvmalloc'ed object containing sensitive data to make sure that the
 626 * compiler won't optimize out the data clearing.
 627 */
 628void kvfree_sensitive(const void *addr, size_t len)
 629{
 630        if (likely(!ZERO_OR_NULL_PTR(addr))) {
 631                memzero_explicit((void *)addr, len);
 632                kvfree(addr);
 633        }
 634}
 635EXPORT_SYMBOL(kvfree_sensitive);
 636
 637void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
 638{
 639        void *newp;
 640
 641        if (oldsize >= newsize)
 642                return (void *)p;
 643        newp = kvmalloc(newsize, flags);
 644        if (!newp)
 645                return NULL;
 646        memcpy(newp, p, oldsize);
 647        kvfree(p);
 648        return newp;
 649}
 650EXPORT_SYMBOL(kvrealloc);
 651
 652/* Neutral page->mapping pointer to address_space or anon_vma or other */
 653void *page_rmapping(struct page *page)
 654{
 655        return folio_raw_mapping(page_folio(page));
 656}
 657
 658/**
 659 * folio_mapped - Is this folio mapped into userspace?
 660 * @folio: The folio.
 661 *
 662 * Return: True if any page in this folio is referenced by user page tables.
 663 */
 664bool folio_mapped(struct folio *folio)
 665{
 666        long i, nr;
 667
 668        if (!folio_test_large(folio))
 669                return atomic_read(&folio->_mapcount) >= 0;
 670        if (atomic_read(folio_mapcount_ptr(folio)) >= 0)
 671                return true;
 672        if (folio_test_hugetlb(folio))
 673                return false;
 674
 675        nr = folio_nr_pages(folio);
 676        for (i = 0; i < nr; i++) {
 677                if (atomic_read(&folio_page(folio, i)->_mapcount) >= 0)
 678                        return true;
 679        }
 680        return false;
 681}
 682EXPORT_SYMBOL(folio_mapped);
 683
 684struct anon_vma *page_anon_vma(struct page *page)
 685{
 686        struct folio *folio = page_folio(page);
 687        unsigned long mapping = (unsigned long)folio->mapping;
 688
 689        if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
 690                return NULL;
 691        return (void *)(mapping - PAGE_MAPPING_ANON);
 692}
 693
 694/**
 695 * folio_mapping - Find the mapping where this folio is stored.
 696 * @folio: The folio.
 697 *
 698 * For folios which are in the page cache, return the mapping that this
 699 * page belongs to.  Folios in the swap cache return the swap mapping
 700 * this page is stored in (which is different from the mapping for the
 701 * swap file or swap device where the data is stored).
 702 *
 703 * You can call this for folios which aren't in the swap cache or page
 704 * cache and it will return NULL.
 705 */
 706struct address_space *folio_mapping(struct folio *folio)
 707{
 708        struct address_space *mapping;
 709
 710        /* This happens if someone calls flush_dcache_page on slab page */
 711        if (unlikely(folio_test_slab(folio)))
 712                return NULL;
 713
 714        if (unlikely(folio_test_swapcache(folio)))
 715                return swap_address_space(folio_swap_entry(folio));
 716
 717        mapping = folio->mapping;
 718        if ((unsigned long)mapping & PAGE_MAPPING_ANON)
 719                return NULL;
 720
 721        return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
 722}
 723EXPORT_SYMBOL(folio_mapping);
 724
 725/* Slow path of page_mapcount() for compound pages */
 726int __page_mapcount(struct page *page)
 727{
 728        int ret;
 729
 730        ret = atomic_read(&page->_mapcount) + 1;
 731        /*
 732         * For file THP page->_mapcount contains total number of mapping
 733         * of the page: no need to look into compound_mapcount.
 734         */
 735        if (!PageAnon(page) && !PageHuge(page))
 736                return ret;
 737        page = compound_head(page);
 738        ret += atomic_read(compound_mapcount_ptr(page)) + 1;
 739        if (PageDoubleMap(page))
 740                ret--;
 741        return ret;
 742}
 743EXPORT_SYMBOL_GPL(__page_mapcount);
 744
 745/**
 746 * folio_copy - Copy the contents of one folio to another.
 747 * @dst: Folio to copy to.
 748 * @src: Folio to copy from.
 749 *
 750 * The bytes in the folio represented by @src are copied to @dst.
 751 * Assumes the caller has validated that @dst is at least as large as @src.
 752 * Can be called in atomic context for order-0 folios, but if the folio is
 753 * larger, it may sleep.
 754 */
 755void folio_copy(struct folio *dst, struct folio *src)
 756{
 757        long i = 0;
 758        long nr = folio_nr_pages(src);
 759
 760        for (;;) {
 761                copy_highpage(folio_page(dst, i), folio_page(src, i));
 762                if (++i == nr)
 763                        break;
 764                cond_resched();
 765        }
 766}
 767
 768int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
 769int sysctl_overcommit_ratio __read_mostly = 50;
 770unsigned long sysctl_overcommit_kbytes __read_mostly;
 771int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
 772unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
 773unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
 774
 775int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
 776                size_t *lenp, loff_t *ppos)
 777{
 778        int ret;
 779
 780        ret = proc_dointvec(table, write, buffer, lenp, ppos);
 781        if (ret == 0 && write)
 782                sysctl_overcommit_kbytes = 0;
 783        return ret;
 784}
 785
 786static void sync_overcommit_as(struct work_struct *dummy)
 787{
 788        percpu_counter_sync(&vm_committed_as);
 789}
 790
 791int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
 792                size_t *lenp, loff_t *ppos)
 793{
 794        struct ctl_table t;
 795        int new_policy = -1;
 796        int ret;
 797
 798        /*
 799         * The deviation of sync_overcommit_as could be big with loose policy
 800         * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
 801         * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
 802         * with the strict "NEVER", and to avoid possible race condition (even
 803         * though user usually won't too frequently do the switching to policy
 804         * OVERCOMMIT_NEVER), the switch is done in the following order:
 805         *      1. changing the batch
 806         *      2. sync percpu count on each CPU
 807         *      3. switch the policy
 808         */
 809        if (write) {
 810                t = *table;
 811                t.data = &new_policy;
 812                ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
 813                if (ret || new_policy == -1)
 814                        return ret;
 815
 816                mm_compute_batch(new_policy);
 817                if (new_policy == OVERCOMMIT_NEVER)
 818                        schedule_on_each_cpu(sync_overcommit_as);
 819                sysctl_overcommit_memory = new_policy;
 820        } else {
 821                ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
 822        }
 823
 824        return ret;
 825}
 826
 827int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
 828                size_t *lenp, loff_t *ppos)
 829{
 830        int ret;
 831
 832        ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
 833        if (ret == 0 && write)
 834                sysctl_overcommit_ratio = 0;
 835        return ret;
 836}
 837
 838/*
 839 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
 840 */
 841unsigned long vm_commit_limit(void)
 842{
 843        unsigned long allowed;
 844
 845        if (sysctl_overcommit_kbytes)
 846                allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
 847        else
 848                allowed = ((totalram_pages() - hugetlb_total_pages())
 849                           * sysctl_overcommit_ratio / 100);
 850        allowed += total_swap_pages;
 851
 852        return allowed;
 853}
 854
 855/*
 856 * Make sure vm_committed_as in one cacheline and not cacheline shared with
 857 * other variables. It can be updated by several CPUs frequently.
 858 */
 859struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
 860
 861/*
 862 * The global memory commitment made in the system can be a metric
 863 * that can be used to drive ballooning decisions when Linux is hosted
 864 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
 865 * balancing memory across competing virtual machines that are hosted.
 866 * Several metrics drive this policy engine including the guest reported
 867 * memory commitment.
 868 *
 869 * The time cost of this is very low for small platforms, and for big
 870 * platform like a 2S/36C/72T Skylake server, in worst case where
 871 * vm_committed_as's spinlock is under severe contention, the time cost
 872 * could be about 30~40 microseconds.
 873 */
 874unsigned long vm_memory_committed(void)
 875{
 876        return percpu_counter_sum_positive(&vm_committed_as);
 877}
 878EXPORT_SYMBOL_GPL(vm_memory_committed);
 879
 880/*
 881 * Check that a process has enough memory to allocate a new virtual
 882 * mapping. 0 means there is enough memory for the allocation to
 883 * succeed and -ENOMEM implies there is not.
 884 *
 885 * We currently support three overcommit policies, which are set via the
 886 * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting.rst
 887 *
 888 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
 889 * Additional code 2002 Jul 20 by Robert Love.
 890 *
 891 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
 892 *
 893 * Note this is a helper function intended to be used by LSMs which
 894 * wish to use this logic.
 895 */
 896int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
 897{
 898        long allowed;
 899
 900        vm_acct_memory(pages);
 901
 902        /*
 903         * Sometimes we want to use more memory than we have
 904         */
 905        if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
 906                return 0;
 907
 908        if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
 909                if (pages > totalram_pages() + total_swap_pages)
 910                        goto error;
 911                return 0;
 912        }
 913
 914        allowed = vm_commit_limit();
 915        /*
 916         * Reserve some for root
 917         */
 918        if (!cap_sys_admin)
 919                allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
 920
 921        /*
 922         * Don't let a single process grow so big a user can't recover
 923         */
 924        if (mm) {
 925                long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
 926
 927                allowed -= min_t(long, mm->total_vm / 32, reserve);
 928        }
 929
 930        if (percpu_counter_read_positive(&vm_committed_as) < allowed)
 931                return 0;
 932error:
 933        vm_unacct_memory(pages);
 934
 935        return -ENOMEM;
 936}
 937
 938/**
 939 * get_cmdline() - copy the cmdline value to a buffer.
 940 * @task:     the task whose cmdline value to copy.
 941 * @buffer:   the buffer to copy to.
 942 * @buflen:   the length of the buffer. Larger cmdline values are truncated
 943 *            to this length.
 944 *
 945 * Return: the size of the cmdline field copied. Note that the copy does
 946 * not guarantee an ending NULL byte.
 947 */
 948int get_cmdline(struct task_struct *task, char *buffer, int buflen)
 949{
 950        int res = 0;
 951        unsigned int len;
 952        struct mm_struct *mm = get_task_mm(task);
 953        unsigned long arg_start, arg_end, env_start, env_end;
 954        if (!mm)
 955                goto out;
 956        if (!mm->arg_end)
 957                goto out_mm;    /* Shh! No looking before we're done */
 958
 959        spin_lock(&mm->arg_lock);
 960        arg_start = mm->arg_start;
 961        arg_end = mm->arg_end;
 962        env_start = mm->env_start;
 963        env_end = mm->env_end;
 964        spin_unlock(&mm->arg_lock);
 965
 966        len = arg_end - arg_start;
 967
 968        if (len > buflen)
 969                len = buflen;
 970
 971        res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
 972
 973        /*
 974         * If the nul at the end of args has been overwritten, then
 975         * assume application is using setproctitle(3).
 976         */
 977        if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
 978                len = strnlen(buffer, res);
 979                if (len < res) {
 980                        res = len;
 981                } else {
 982                        len = env_end - env_start;
 983                        if (len > buflen - res)
 984                                len = buflen - res;
 985                        res += access_process_vm(task, env_start,
 986                                                 buffer+res, len,
 987                                                 FOLL_FORCE);
 988                        res = strnlen(buffer, res);
 989                }
 990        }
 991out_mm:
 992        mmput(mm);
 993out:
 994        return res;
 995}
 996
 997int __weak memcmp_pages(struct page *page1, struct page *page2)
 998{
 999        char *addr1, *addr2;
1000        int ret;
1001
1002        addr1 = kmap_atomic(page1);
1003        addr2 = kmap_atomic(page2);
1004        ret = memcmp(addr1, addr2, PAGE_SIZE);
1005        kunmap_atomic(addr2);
1006        kunmap_atomic(addr1);
1007        return ret;
1008}
1009
1010#ifdef CONFIG_PRINTK
1011/**
1012 * mem_dump_obj - Print available provenance information
1013 * @object: object for which to find provenance information.
1014 *
1015 * This function uses pr_cont(), so that the caller is expected to have
1016 * printed out whatever preamble is appropriate.  The provenance information
1017 * depends on the type of object and on how much debugging is enabled.
1018 * For example, for a slab-cache object, the slab name is printed, and,
1019 * if available, the return address and stack trace from the allocation
1020 * and last free path of that object.
1021 */
1022void mem_dump_obj(void *object)
1023{
1024        const char *type;
1025
1026        if (kmem_valid_obj(object)) {
1027                kmem_dump_obj(object);
1028                return;
1029        }
1030
1031        if (vmalloc_dump_obj(object))
1032                return;
1033
1034        if (virt_addr_valid(object))
1035                type = "non-slab/vmalloc memory";
1036        else if (object == NULL)
1037                type = "NULL pointer";
1038        else if (object == ZERO_SIZE_PTR)
1039                type = "zero-size pointer";
1040        else
1041                type = "non-paged memory";
1042
1043        pr_cont(" %s\n", type);
1044}
1045EXPORT_SYMBOL_GPL(mem_dump_obj);
1046#endif
1047
1048/*
1049 * A driver might set a page logically offline -- PageOffline() -- and
1050 * turn the page inaccessible in the hypervisor; after that, access to page
1051 * content can be fatal.
1052 *
1053 * Some special PFN walkers -- i.e., /proc/kcore -- read content of random
1054 * pages after checking PageOffline(); however, these PFN walkers can race
1055 * with drivers that set PageOffline().
1056 *
1057 * page_offline_freeze()/page_offline_thaw() allows for a subsystem to
1058 * synchronize with such drivers, achieving that a page cannot be set
1059 * PageOffline() while frozen.
1060 *
1061 * page_offline_begin()/page_offline_end() is used by drivers that care about
1062 * such races when setting a page PageOffline().
1063 */
1064static DECLARE_RWSEM(page_offline_rwsem);
1065
1066void page_offline_freeze(void)
1067{
1068        down_read(&page_offline_rwsem);
1069}
1070
1071void page_offline_thaw(void)
1072{
1073        up_read(&page_offline_rwsem);
1074}
1075
1076void page_offline_begin(void)
1077{
1078        down_write(&page_offline_rwsem);
1079}
1080EXPORT_SYMBOL(page_offline_begin);
1081
1082void page_offline_end(void)
1083{
1084        up_write(&page_offline_rwsem);
1085}
1086EXPORT_SYMBOL(page_offline_end);
1087
1088#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
1089void flush_dcache_folio(struct folio *folio)
1090{
1091        long i, nr = folio_nr_pages(folio);
1092
1093        for (i = 0; i < nr; i++)
1094                flush_dcache_page(folio_page(folio, i));
1095}
1096EXPORT_SYMBOL(flush_dcache_folio);
1097#endif
1098