linux/include/linux/mm.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_MM_H
   3#define _LINUX_MM_H
   4
   5#include <linux/errno.h>
   6
   7#ifdef __KERNEL__
   8
   9#include <linux/mmdebug.h>
  10#include <linux/gfp.h>
  11#include <linux/bug.h>
  12#include <linux/list.h>
  13#include <linux/mmzone.h>
  14#include <linux/rbtree.h>
  15#include <linux/atomic.h>
  16#include <linux/debug_locks.h>
  17#include <linux/mm_types.h>
  18#include <linux/range.h>
  19#include <linux/pfn.h>
  20#include <linux/percpu-refcount.h>
  21#include <linux/bit_spinlock.h>
  22#include <linux/shrinker.h>
  23#include <linux/resource.h>
  24#include <linux/page_ext.h>
  25#include <linux/err.h>
  26#include <linux/page_ref.h>
  27#include <linux/memremap.h>
  28
  29struct mempolicy;
  30struct anon_vma;
  31struct anon_vma_chain;
  32struct file_ra_state;
  33struct user_struct;
  34struct writeback_control;
  35struct bdi_writeback;
  36
  37void init_mm_internals(void);
  38
  39#ifndef CONFIG_NEED_MULTIPLE_NODES      /* Don't use mapnrs, do it properly */
  40extern unsigned long max_mapnr;
  41
  42static inline void set_max_mapnr(unsigned long limit)
  43{
  44        max_mapnr = limit;
  45}
  46#else
  47static inline void set_max_mapnr(unsigned long limit) { }
  48#endif
  49
  50extern unsigned long totalram_pages;
  51extern void * high_memory;
  52extern int page_cluster;
  53
  54#ifdef CONFIG_SYSCTL
  55extern int sysctl_legacy_va_layout;
  56#else
  57#define sysctl_legacy_va_layout 0
  58#endif
  59
  60#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
  61extern const int mmap_rnd_bits_min;
  62extern const int mmap_rnd_bits_max;
  63extern int mmap_rnd_bits __read_mostly;
  64#endif
  65#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
  66extern const int mmap_rnd_compat_bits_min;
  67extern const int mmap_rnd_compat_bits_max;
  68extern int mmap_rnd_compat_bits __read_mostly;
  69#endif
  70
  71#include <asm/page.h>
  72#include <asm/pgtable.h>
  73#include <asm/processor.h>
  74
  75#ifndef __pa_symbol
  76#define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
  77#endif
  78
  79#ifndef page_to_virt
  80#define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x)))
  81#endif
  82
  83#ifndef lm_alias
  84#define lm_alias(x)     __va(__pa_symbol(x))
  85#endif
  86
  87/*
  88 * To prevent common memory management code establishing
  89 * a zero page mapping on a read fault.
  90 * This macro should be defined within <asm/pgtable.h>.
  91 * s390 does this to prevent multiplexing of hardware bits
  92 * related to the physical page in case of virtualization.
  93 */
  94#ifndef mm_forbids_zeropage
  95#define mm_forbids_zeropage(X)  (0)
  96#endif
  97
  98/*
  99 * Default maximum number of active map areas, this limits the number of vmas
 100 * per mm struct. Users can overwrite this number by sysctl but there is a
 101 * problem.
 102 *
 103 * When a program's coredump is generated as ELF format, a section is created
 104 * per a vma. In ELF, the number of sections is represented in unsigned short.
 105 * This means the number of sections should be smaller than 65535 at coredump.
 106 * Because the kernel adds some informative sections to a image of program at
 107 * generating coredump, we need some margin. The number of extra sections is
 108 * 1-3 now and depends on arch. We use "5" as safe margin, here.
 109 *
 110 * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
 111 * not a hard limit any more. Although some userspace tools can be surprised by
 112 * that.
 113 */
 114#define MAPCOUNT_ELF_CORE_MARGIN        (5)
 115#define DEFAULT_MAX_MAP_COUNT   (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
 116
 117extern int sysctl_max_map_count;
 118
 119extern unsigned long sysctl_user_reserve_kbytes;
 120extern unsigned long sysctl_admin_reserve_kbytes;
 121
 122extern int sysctl_overcommit_memory;
 123extern int sysctl_overcommit_ratio;
 124extern unsigned long sysctl_overcommit_kbytes;
 125
 126extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
 127                                    size_t *, loff_t *);
 128extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
 129                                    size_t *, loff_t *);
 130
 131#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
 132
 133/* to align the pointer to the (next) page boundary */
 134#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
 135
 136/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
 137#define PAGE_ALIGNED(addr)      IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
 138
 139/*
 140 * Linux kernel virtual memory manager primitives.
 141 * The idea being to have a "virtual" mm in the same way
 142 * we have a virtual fs - giving a cleaner interface to the
 143 * mm details, and allowing different kinds of memory mappings
 144 * (from shared memory to executable loading to arbitrary
 145 * mmap() functions).
 146 */
 147
 148extern struct kmem_cache *vm_area_cachep;
 149
 150#ifndef CONFIG_MMU
 151extern struct rb_root nommu_region_tree;
 152extern struct rw_semaphore nommu_region_sem;
 153
 154extern unsigned int kobjsize(const void *objp);
 155#endif
 156
 157/*
 158 * vm_flags in vm_area_struct, see mm_types.h.
 159 * When changing, update also include/trace/events/mmflags.h
 160 */
 161#define VM_NONE         0x00000000
 162
 163#define VM_READ         0x00000001      /* currently active flags */
 164#define VM_WRITE        0x00000002
 165#define VM_EXEC         0x00000004
 166#define VM_SHARED       0x00000008
 167
 168/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
 169#define VM_MAYREAD      0x00000010      /* limits for mprotect() etc */
 170#define VM_MAYWRITE     0x00000020
 171#define VM_MAYEXEC      0x00000040
 172#define VM_MAYSHARE     0x00000080
 173
 174#define VM_GROWSDOWN    0x00000100      /* general info on the segment */
 175#define VM_UFFD_MISSING 0x00000200      /* missing pages tracking */
 176#define VM_PFNMAP       0x00000400      /* Page-ranges managed without "struct page", just pure PFN */
 177#define VM_DENYWRITE    0x00000800      /* ETXTBSY on write attempts.. */
 178#define VM_UFFD_WP      0x00001000      /* wrprotect pages tracking */
 179
 180#define VM_LOCKED       0x00002000
 181#define VM_IO           0x00004000      /* Memory mapped I/O or similar */
 182
 183                                        /* Used by sys_madvise() */
 184#define VM_SEQ_READ     0x00008000      /* App will access data sequentially */
 185#define VM_RAND_READ    0x00010000      /* App will not benefit from clustered reads */
 186
 187#define VM_DONTCOPY     0x00020000      /* Do not copy this vma on fork */
 188#define VM_DONTEXPAND   0x00040000      /* Cannot expand with mremap() */
 189#define VM_LOCKONFAULT  0x00080000      /* Lock the pages covered when they are faulted in */
 190#define VM_ACCOUNT      0x00100000      /* Is a VM accounted object */
 191#define VM_NORESERVE    0x00200000      /* should the VM suppress accounting */
 192#define VM_HUGETLB      0x00400000      /* Huge TLB Page VM */
 193#define VM_ARCH_1       0x01000000      /* Architecture-specific flag */
 194#define VM_WIPEONFORK   0x02000000      /* Wipe VMA contents in child. */
 195#define VM_DONTDUMP     0x04000000      /* Do not include in the core dump */
 196
 197#ifdef CONFIG_MEM_SOFT_DIRTY
 198# define VM_SOFTDIRTY   0x08000000      /* Not soft dirty clean area */
 199#else
 200# define VM_SOFTDIRTY   0
 201#endif
 202
 203#define VM_MIXEDMAP     0x10000000      /* Can contain "struct page" and pure PFN pages */
 204#define VM_HUGEPAGE     0x20000000      /* MADV_HUGEPAGE marked this vma */
 205#define VM_NOHUGEPAGE   0x40000000      /* MADV_NOHUGEPAGE marked this vma */
 206#define VM_MERGEABLE    0x80000000      /* KSM may merge identical pages */
 207
 208#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
 209#define VM_HIGH_ARCH_BIT_0      32      /* bit only usable on 64-bit architectures */
 210#define VM_HIGH_ARCH_BIT_1      33      /* bit only usable on 64-bit architectures */
 211#define VM_HIGH_ARCH_BIT_2      34      /* bit only usable on 64-bit architectures */
 212#define VM_HIGH_ARCH_BIT_3      35      /* bit only usable on 64-bit architectures */
 213#define VM_HIGH_ARCH_BIT_4      36      /* bit only usable on 64-bit architectures */
 214#define VM_HIGH_ARCH_0  BIT(VM_HIGH_ARCH_BIT_0)
 215#define VM_HIGH_ARCH_1  BIT(VM_HIGH_ARCH_BIT_1)
 216#define VM_HIGH_ARCH_2  BIT(VM_HIGH_ARCH_BIT_2)
 217#define VM_HIGH_ARCH_3  BIT(VM_HIGH_ARCH_BIT_3)
 218#define VM_HIGH_ARCH_4  BIT(VM_HIGH_ARCH_BIT_4)
 219#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
 220
 221#if defined(CONFIG_X86)
 222# define VM_PAT         VM_ARCH_1       /* PAT reserves whole VMA at once (x86) */
 223#if defined (CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS)
 224# define VM_PKEY_SHIFT  VM_HIGH_ARCH_BIT_0
 225# define VM_PKEY_BIT0   VM_HIGH_ARCH_0  /* A protection key is a 4-bit value */
 226# define VM_PKEY_BIT1   VM_HIGH_ARCH_1
 227# define VM_PKEY_BIT2   VM_HIGH_ARCH_2
 228# define VM_PKEY_BIT3   VM_HIGH_ARCH_3
 229#endif
 230#elif defined(CONFIG_PPC)
 231# define VM_SAO         VM_ARCH_1       /* Strong Access Ordering (powerpc) */
 232#elif defined(CONFIG_PARISC)
 233# define VM_GROWSUP     VM_ARCH_1
 234#elif defined(CONFIG_METAG)
 235# define VM_GROWSUP     VM_ARCH_1
 236#elif defined(CONFIG_IA64)
 237# define VM_GROWSUP     VM_ARCH_1
 238#elif !defined(CONFIG_MMU)
 239# define VM_MAPPED_COPY VM_ARCH_1       /* T if mapped copy of data (nommu mmap) */
 240#endif
 241
 242#if defined(CONFIG_X86_INTEL_MPX)
 243/* MPX specific bounds table or bounds directory */
 244# define VM_MPX         VM_HIGH_ARCH_4
 245#else
 246# define VM_MPX         VM_NONE
 247#endif
 248
 249#ifndef VM_GROWSUP
 250# define VM_GROWSUP     VM_NONE
 251#endif
 252
 253/* Bits set in the VMA until the stack is in its final location */
 254#define VM_STACK_INCOMPLETE_SETUP       (VM_RAND_READ | VM_SEQ_READ)
 255
 256#ifndef VM_STACK_DEFAULT_FLAGS          /* arch can override this */
 257#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
 258#endif
 259
 260#ifdef CONFIG_STACK_GROWSUP
 261#define VM_STACK        VM_GROWSUP
 262#else
 263#define VM_STACK        VM_GROWSDOWN
 264#endif
 265
 266#define VM_STACK_FLAGS  (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
 267
 268/*
 269 * Special vmas that are non-mergable, non-mlock()able.
 270 * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
 271 */
 272#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
 273
 274/* This mask defines which mm->def_flags a process can inherit its parent */
 275#define VM_INIT_DEF_MASK        VM_NOHUGEPAGE
 276
 277/* This mask is used to clear all the VMA flags used by mlock */
 278#define VM_LOCKED_CLEAR_MASK    (~(VM_LOCKED | VM_LOCKONFAULT))
 279
 280/*
 281 * mapping from the currently active vm_flags protection bits (the
 282 * low four bits) to a page protection mask..
 283 */
 284extern pgprot_t protection_map[16];
 285
 286#define FAULT_FLAG_WRITE        0x01    /* Fault was a write access */
 287#define FAULT_FLAG_MKWRITE      0x02    /* Fault was mkwrite of existing pte */
 288#define FAULT_FLAG_ALLOW_RETRY  0x04    /* Retry fault if blocking */
 289#define FAULT_FLAG_RETRY_NOWAIT 0x08    /* Don't drop mmap_sem and wait when retrying */
 290#define FAULT_FLAG_KILLABLE     0x10    /* The fault task is in SIGKILL killable region */
 291#define FAULT_FLAG_TRIED        0x20    /* Second try */
 292#define FAULT_FLAG_USER         0x40    /* The fault originated in userspace */
 293#define FAULT_FLAG_REMOTE       0x80    /* faulting for non current tsk/mm */
 294#define FAULT_FLAG_INSTRUCTION  0x100   /* The fault was during an instruction fetch */
 295
 296#define FAULT_FLAG_TRACE \
 297        { FAULT_FLAG_WRITE,             "WRITE" }, \
 298        { FAULT_FLAG_MKWRITE,           "MKWRITE" }, \
 299        { FAULT_FLAG_ALLOW_RETRY,       "ALLOW_RETRY" }, \
 300        { FAULT_FLAG_RETRY_NOWAIT,      "RETRY_NOWAIT" }, \
 301        { FAULT_FLAG_KILLABLE,          "KILLABLE" }, \
 302        { FAULT_FLAG_TRIED,             "TRIED" }, \
 303        { FAULT_FLAG_USER,              "USER" }, \
 304        { FAULT_FLAG_REMOTE,            "REMOTE" }, \
 305        { FAULT_FLAG_INSTRUCTION,       "INSTRUCTION" }
 306
 307/*
 308 * vm_fault is filled by the the pagefault handler and passed to the vma's
 309 * ->fault function. The vma's ->fault is responsible for returning a bitmask
 310 * of VM_FAULT_xxx flags that give details about how the fault was handled.
 311 *
 312 * MM layer fills up gfp_mask for page allocations but fault handler might
 313 * alter it if its implementation requires a different allocation context.
 314 *
 315 * pgoff should be used in favour of virtual_address, if possible.
 316 */
 317struct vm_fault {
 318        struct vm_area_struct *vma;     /* Target VMA */
 319        unsigned int flags;             /* FAULT_FLAG_xxx flags */
 320        gfp_t gfp_mask;                 /* gfp mask to be used for allocations */
 321        pgoff_t pgoff;                  /* Logical page offset based on vma */
 322        unsigned long address;          /* Faulting virtual address */
 323        pmd_t *pmd;                     /* Pointer to pmd entry matching
 324                                         * the 'address' */
 325        pud_t *pud;                     /* Pointer to pud entry matching
 326                                         * the 'address'
 327                                         */
 328        pte_t orig_pte;                 /* Value of PTE at the time of fault */
 329
 330        struct page *cow_page;          /* Page handler may use for COW fault */
 331        struct mem_cgroup *memcg;       /* Cgroup cow_page belongs to */
 332        struct page *page;              /* ->fault handlers should return a
 333                                         * page here, unless VM_FAULT_NOPAGE
 334                                         * is set (which is also implied by
 335                                         * VM_FAULT_ERROR).
 336                                         */
 337        /* These three entries are valid only while holding ptl lock */
 338        pte_t *pte;                     /* Pointer to pte entry matching
 339                                         * the 'address'. NULL if the page
 340                                         * table hasn't been allocated.
 341                                         */
 342        spinlock_t *ptl;                /* Page table lock.
 343                                         * Protects pte page table if 'pte'
 344                                         * is not NULL, otherwise pmd.
 345                                         */
 346        pgtable_t prealloc_pte;         /* Pre-allocated pte page table.
 347                                         * vm_ops->map_pages() calls
 348                                         * alloc_set_pte() from atomic context.
 349                                         * do_fault_around() pre-allocates
 350                                         * page table to avoid allocation from
 351                                         * atomic context.
 352                                         */
 353};
 354
 355/* page entry size for vm->huge_fault() */
 356enum page_entry_size {
 357        PE_SIZE_PTE = 0,
 358        PE_SIZE_PMD,
 359        PE_SIZE_PUD,
 360};
 361
 362/*
 363 * These are the virtual MM functions - opening of an area, closing and
 364 * unmapping it (needed to keep files on disk up-to-date etc), pointer
 365 * to the functions called when a no-page or a wp-page exception occurs. 
 366 */
 367struct vm_operations_struct {
 368        void (*open)(struct vm_area_struct * area);
 369        void (*close)(struct vm_area_struct * area);
 370        int (*mremap)(struct vm_area_struct * area);
 371        int (*fault)(struct vm_fault *vmf);
 372        int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size);
 373        void (*map_pages)(struct vm_fault *vmf,
 374                        pgoff_t start_pgoff, pgoff_t end_pgoff);
 375
 376        /* notification that a previously read-only page is about to become
 377         * writable, if an error is returned it will cause a SIGBUS */
 378        int (*page_mkwrite)(struct vm_fault *vmf);
 379
 380        /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
 381        int (*pfn_mkwrite)(struct vm_fault *vmf);
 382
 383        /* called by access_process_vm when get_user_pages() fails, typically
 384         * for use by special VMAs that can switch between memory and hardware
 385         */
 386        int (*access)(struct vm_area_struct *vma, unsigned long addr,
 387                      void *buf, int len, int write);
 388
 389        /* Called by the /proc/PID/maps code to ask the vma whether it
 390         * has a special name.  Returning non-NULL will also cause this
 391         * vma to be dumped unconditionally. */
 392        const char *(*name)(struct vm_area_struct *vma);
 393
 394#ifdef CONFIG_NUMA
 395        /*
 396         * set_policy() op must add a reference to any non-NULL @new mempolicy
 397         * to hold the policy upon return.  Caller should pass NULL @new to
 398         * remove a policy and fall back to surrounding context--i.e. do not
 399         * install a MPOL_DEFAULT policy, nor the task or system default
 400         * mempolicy.
 401         */
 402        int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
 403
 404        /*
 405         * get_policy() op must add reference [mpol_get()] to any policy at
 406         * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
 407         * in mm/mempolicy.c will do this automatically.
 408         * get_policy() must NOT add a ref if the policy at (vma,addr) is not
 409         * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
 410         * If no [shared/vma] mempolicy exists at the addr, get_policy() op
 411         * must return NULL--i.e., do not "fallback" to task or system default
 412         * policy.
 413         */
 414        struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
 415                                        unsigned long addr);
 416#endif
 417        /*
 418         * Called by vm_normal_page() for special PTEs to find the
 419         * page for @addr.  This is useful if the default behavior
 420         * (using pte_page()) would not find the correct page.
 421         */
 422        struct page *(*find_special_page)(struct vm_area_struct *vma,
 423                                          unsigned long addr);
 424};
 425
 426struct mmu_gather;
 427struct inode;
 428
 429#define page_private(page)              ((page)->private)
 430#define set_page_private(page, v)       ((page)->private = (v))
 431
 432#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
 433static inline int pmd_devmap(pmd_t pmd)
 434{
 435        return 0;
 436}
 437static inline int pud_devmap(pud_t pud)
 438{
 439        return 0;
 440}
 441static inline int pgd_devmap(pgd_t pgd)
 442{
 443        return 0;
 444}
 445#endif
 446
 447/*
 448 * FIXME: take this include out, include page-flags.h in
 449 * files which need it (119 of them)
 450 */
 451#include <linux/page-flags.h>
 452#include <linux/huge_mm.h>
 453
 454/*
 455 * Methods to modify the page usage count.
 456 *
 457 * What counts for a page usage:
 458 * - cache mapping   (page->mapping)
 459 * - private data    (page->private)
 460 * - page mapped in a task's page tables, each mapping
 461 *   is counted separately
 462 *
 463 * Also, many kernel routines increase the page count before a critical
 464 * routine so they can be sure the page doesn't go away from under them.
 465 */
 466
 467/*
 468 * Drop a ref, return true if the refcount fell to zero (the page has no users)
 469 */
 470static inline int put_page_testzero(struct page *page)
 471{
 472        VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
 473        return page_ref_dec_and_test(page);
 474}
 475
 476/*
 477 * Try to grab a ref unless the page has a refcount of zero, return false if
 478 * that is the case.
 479 * This can be called when MMU is off so it must not access
 480 * any of the virtual mappings.
 481 */
 482static inline int get_page_unless_zero(struct page *page)
 483{
 484        return page_ref_add_unless(page, 1, 0);
 485}
 486
 487extern int page_is_ram(unsigned long pfn);
 488
 489enum {
 490        REGION_INTERSECTS,
 491        REGION_DISJOINT,
 492        REGION_MIXED,
 493};
 494
 495int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
 496                      unsigned long desc);
 497
 498/* Support for virtually mapped pages */
 499struct page *vmalloc_to_page(const void *addr);
 500unsigned long vmalloc_to_pfn(const void *addr);
 501
 502/*
 503 * Determine if an address is within the vmalloc range
 504 *
 505 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
 506 * is no special casing required.
 507 */
 508static inline bool is_vmalloc_addr(const void *x)
 509{
 510#ifdef CONFIG_MMU
 511        unsigned long addr = (unsigned long)x;
 512
 513        return addr >= VMALLOC_START && addr < VMALLOC_END;
 514#else
 515        return false;
 516#endif
 517}
 518#ifdef CONFIG_MMU
 519extern int is_vmalloc_or_module_addr(const void *x);
 520#else
 521static inline int is_vmalloc_or_module_addr(const void *x)
 522{
 523        return 0;
 524}
 525#endif
 526
 527extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
 528static inline void *kvmalloc(size_t size, gfp_t flags)
 529{
 530        return kvmalloc_node(size, flags, NUMA_NO_NODE);
 531}
 532static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
 533{
 534        return kvmalloc_node(size, flags | __GFP_ZERO, node);
 535}
 536static inline void *kvzalloc(size_t size, gfp_t flags)
 537{
 538        return kvmalloc(size, flags | __GFP_ZERO);
 539}
 540
 541static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
 542{
 543        if (size != 0 && n > SIZE_MAX / size)
 544                return NULL;
 545
 546        return kvmalloc(n * size, flags);
 547}
 548
 549extern void kvfree(const void *addr);
 550
 551static inline atomic_t *compound_mapcount_ptr(struct page *page)
 552{
 553        return &page[1].compound_mapcount;
 554}
 555
 556static inline int compound_mapcount(struct page *page)
 557{
 558        VM_BUG_ON_PAGE(!PageCompound(page), page);
 559        page = compound_head(page);
 560        return atomic_read(compound_mapcount_ptr(page)) + 1;
 561}
 562
 563/*
 564 * The atomic page->_mapcount, starts from -1: so that transitions
 565 * both from it and to it can be tracked, using atomic_inc_and_test
 566 * and atomic_add_negative(-1).
 567 */
 568static inline void page_mapcount_reset(struct page *page)
 569{
 570        atomic_set(&(page)->_mapcount, -1);
 571}
 572
 573int __page_mapcount(struct page *page);
 574
 575static inline int page_mapcount(struct page *page)
 576{
 577        VM_BUG_ON_PAGE(PageSlab(page), page);
 578
 579        if (unlikely(PageCompound(page)))
 580                return __page_mapcount(page);
 581        return atomic_read(&page->_mapcount) + 1;
 582}
 583
 584#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 585int total_mapcount(struct page *page);
 586int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
 587#else
 588static inline int total_mapcount(struct page *page)
 589{
 590        return page_mapcount(page);
 591}
 592static inline int page_trans_huge_mapcount(struct page *page,
 593                                           int *total_mapcount)
 594{
 595        int mapcount = page_mapcount(page);
 596        if (total_mapcount)
 597                *total_mapcount = mapcount;
 598        return mapcount;
 599}
 600#endif
 601
 602static inline struct page *virt_to_head_page(const void *x)
 603{
 604        struct page *page = virt_to_page(x);
 605
 606        return compound_head(page);
 607}
 608
 609void __put_page(struct page *page);
 610
 611void put_pages_list(struct list_head *pages);
 612
 613void split_page(struct page *page, unsigned int order);
 614
 615/*
 616 * Compound pages have a destructor function.  Provide a
 617 * prototype for that function and accessor functions.
 618 * These are _only_ valid on the head of a compound page.
 619 */
 620typedef void compound_page_dtor(struct page *);
 621
 622/* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */
 623enum compound_dtor_id {
 624        NULL_COMPOUND_DTOR,
 625        COMPOUND_PAGE_DTOR,
 626#ifdef CONFIG_HUGETLB_PAGE
 627        HUGETLB_PAGE_DTOR,
 628#endif
 629#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 630        TRANSHUGE_PAGE_DTOR,
 631#endif
 632        NR_COMPOUND_DTORS,
 633};
 634extern compound_page_dtor * const compound_page_dtors[];
 635
 636static inline void set_compound_page_dtor(struct page *page,
 637                enum compound_dtor_id compound_dtor)
 638{
 639        VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
 640        page[1].compound_dtor = compound_dtor;
 641}
 642
 643static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
 644{
 645        VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
 646        return compound_page_dtors[page[1].compound_dtor];
 647}
 648
 649static inline unsigned int compound_order(struct page *page)
 650{
 651        if (!PageHead(page))
 652                return 0;
 653        return page[1].compound_order;
 654}
 655
 656static inline void set_compound_order(struct page *page, unsigned int order)
 657{
 658        page[1].compound_order = order;
 659}
 660
 661void free_compound_page(struct page *page);
 662
 663#ifdef CONFIG_MMU
 664/*
 665 * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
 666 * servicing faults for write access.  In the normal case, do always want
 667 * pte_mkwrite.  But get_user_pages can cause write faults for mappings
 668 * that do not have writing enabled, when used by access_process_vm.
 669 */
 670static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
 671{
 672        if (likely(vma->vm_flags & VM_WRITE))
 673                pte = pte_mkwrite(pte);
 674        return pte;
 675}
 676
 677int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
 678                struct page *page);
 679int finish_fault(struct vm_fault *vmf);
 680int finish_mkwrite_fault(struct vm_fault *vmf);
 681#endif
 682
 683/*
 684 * Multiple processes may "see" the same page. E.g. for untouched
 685 * mappings of /dev/null, all processes see the same page full of
 686 * zeroes, and text pages of executables and shared libraries have
 687 * only one copy in memory, at most, normally.
 688 *
 689 * For the non-reserved pages, page_count(page) denotes a reference count.
 690 *   page_count() == 0 means the page is free. page->lru is then used for
 691 *   freelist management in the buddy allocator.
 692 *   page_count() > 0  means the page has been allocated.
 693 *
 694 * Pages are allocated by the slab allocator in order to provide memory
 695 * to kmalloc and kmem_cache_alloc. In this case, the management of the
 696 * page, and the fields in 'struct page' are the responsibility of mm/slab.c
 697 * unless a particular usage is carefully commented. (the responsibility of
 698 * freeing the kmalloc memory is the caller's, of course).
 699 *
 700 * A page may be used by anyone else who does a __get_free_page().
 701 * In this case, page_count still tracks the references, and should only
 702 * be used through the normal accessor functions. The top bits of page->flags
 703 * and page->virtual store page management information, but all other fields
 704 * are unused and could be used privately, carefully. The management of this
 705 * page is the responsibility of the one who allocated it, and those who have
 706 * subsequently been given references to it.
 707 *
 708 * The other pages (we may call them "pagecache pages") are completely
 709 * managed by the Linux memory manager: I/O, buffers, swapping etc.
 710 * The following discussion applies only to them.
 711 *
 712 * A pagecache page contains an opaque `private' member, which belongs to the
 713 * page's address_space. Usually, this is the address of a circular list of
 714 * the page's disk buffers. PG_private must be set to tell the VM to call
 715 * into the filesystem to release these pages.
 716 *
 717 * A page may belong to an inode's memory mapping. In this case, page->mapping
 718 * is the pointer to the inode, and page->index is the file offset of the page,
 719 * in units of PAGE_SIZE.
 720 *
 721 * If pagecache pages are not associated with an inode, they are said to be
 722 * anonymous pages. These may become associated with the swapcache, and in that
 723 * case PG_swapcache is set, and page->private is an offset into the swapcache.
 724 *
 725 * In either case (swapcache or inode backed), the pagecache itself holds one
 726 * reference to the page. Setting PG_private should also increment the
 727 * refcount. The each user mapping also has a reference to the page.
 728 *
 729 * The pagecache pages are stored in a per-mapping radix tree, which is
 730 * rooted at mapping->page_tree, and indexed by offset.
 731 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
 732 * lists, we instead now tag pages as dirty/writeback in the radix tree.
 733 *
 734 * All pagecache pages may be subject to I/O:
 735 * - inode pages may need to be read from disk,
 736 * - inode pages which have been modified and are MAP_SHARED may need
 737 *   to be written back to the inode on disk,
 738 * - anonymous pages (including MAP_PRIVATE file mappings) which have been
 739 *   modified may need to be swapped out to swap space and (later) to be read
 740 *   back into memory.
 741 */
 742
 743/*
 744 * The zone field is never updated after free_area_init_core()
 745 * sets it, so none of the operations on it need to be atomic.
 746 */
 747
 748/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
 749#define SECTIONS_PGOFF          ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
 750#define NODES_PGOFF             (SECTIONS_PGOFF - NODES_WIDTH)
 751#define ZONES_PGOFF             (NODES_PGOFF - ZONES_WIDTH)
 752#define LAST_CPUPID_PGOFF       (ZONES_PGOFF - LAST_CPUPID_WIDTH)
 753
 754/*
 755 * Define the bit shifts to access each section.  For non-existent
 756 * sections we define the shift as 0; that plus a 0 mask ensures
 757 * the compiler will optimise away reference to them.
 758 */
 759#define SECTIONS_PGSHIFT        (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
 760#define NODES_PGSHIFT           (NODES_PGOFF * (NODES_WIDTH != 0))
 761#define ZONES_PGSHIFT           (ZONES_PGOFF * (ZONES_WIDTH != 0))
 762#define LAST_CPUPID_PGSHIFT     (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
 763
 764/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
 765#ifdef NODE_NOT_IN_PAGE_FLAGS
 766#define ZONEID_SHIFT            (SECTIONS_SHIFT + ZONES_SHIFT)
 767#define ZONEID_PGOFF            ((SECTIONS_PGOFF < ZONES_PGOFF)? \
 768                                                SECTIONS_PGOFF : ZONES_PGOFF)
 769#else
 770#define ZONEID_SHIFT            (NODES_SHIFT + ZONES_SHIFT)
 771#define ZONEID_PGOFF            ((NODES_PGOFF < ZONES_PGOFF)? \
 772                                                NODES_PGOFF : ZONES_PGOFF)
 773#endif
 774
 775#define ZONEID_PGSHIFT          (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
 776
 777#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
 778#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
 779#endif
 780
 781#define ZONES_MASK              ((1UL << ZONES_WIDTH) - 1)
 782#define NODES_MASK              ((1UL << NODES_WIDTH) - 1)
 783#define SECTIONS_MASK           ((1UL << SECTIONS_WIDTH) - 1)
 784#define LAST_CPUPID_MASK        ((1UL << LAST_CPUPID_SHIFT) - 1)
 785#define ZONEID_MASK             ((1UL << ZONEID_SHIFT) - 1)
 786
 787static inline enum zone_type page_zonenum(const struct page *page)
 788{
 789        return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
 790}
 791
 792#ifdef CONFIG_ZONE_DEVICE
 793static inline bool is_zone_device_page(const struct page *page)
 794{
 795        return page_zonenum(page) == ZONE_DEVICE;
 796}
 797#else
 798static inline bool is_zone_device_page(const struct page *page)
 799{
 800        return false;
 801}
 802#endif
 803
 804#if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC)
 805void put_zone_device_private_or_public_page(struct page *page);
 806DECLARE_STATIC_KEY_FALSE(device_private_key);
 807#define IS_HMM_ENABLED static_branch_unlikely(&device_private_key)
 808static inline bool is_device_private_page(const struct page *page);
 809static inline bool is_device_public_page(const struct page *page);
 810#else /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
 811static inline void put_zone_device_private_or_public_page(struct page *page)
 812{
 813}
 814#define IS_HMM_ENABLED 0
 815static inline bool is_device_private_page(const struct page *page)
 816{
 817        return false;
 818}
 819static inline bool is_device_public_page(const struct page *page)
 820{
 821        return false;
 822}
 823#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
 824
 825
 826static inline void get_page(struct page *page)
 827{
 828        page = compound_head(page);
 829        /*
 830         * Getting a normal page or the head of a compound page
 831         * requires to already have an elevated page->_refcount.
 832         */
 833        VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
 834        page_ref_inc(page);
 835}
 836
 837static inline void put_page(struct page *page)
 838{
 839        page = compound_head(page);
 840
 841        /*
 842         * For private device pages we need to catch refcount transition from
 843         * 2 to 1, when refcount reach one it means the private device page is
 844         * free and we need to inform the device driver through callback. See
 845         * include/linux/memremap.h and HMM for details.
 846         */
 847        if (IS_HMM_ENABLED && unlikely(is_device_private_page(page) ||
 848            unlikely(is_device_public_page(page)))) {
 849                put_zone_device_private_or_public_page(page);
 850                return;
 851        }
 852
 853        if (put_page_testzero(page))
 854                __put_page(page);
 855}
 856
 857#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
 858#define SECTION_IN_PAGE_FLAGS
 859#endif
 860
 861/*
 862 * The identification function is mainly used by the buddy allocator for
 863 * determining if two pages could be buddies. We are not really identifying
 864 * the zone since we could be using the section number id if we do not have
 865 * node id available in page flags.
 866 * We only guarantee that it will return the same value for two combinable
 867 * pages in a zone.
 868 */
 869static inline int page_zone_id(struct page *page)
 870{
 871        return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
 872}
 873
 874static inline int zone_to_nid(struct zone *zone)
 875{
 876#ifdef CONFIG_NUMA
 877        return zone->node;
 878#else
 879        return 0;
 880#endif
 881}
 882
 883#ifdef NODE_NOT_IN_PAGE_FLAGS
 884extern int page_to_nid(const struct page *page);
 885#else
 886static inline int page_to_nid(const struct page *page)
 887{
 888        return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
 889}
 890#endif
 891
 892#ifdef CONFIG_NUMA_BALANCING
 893static inline int cpu_pid_to_cpupid(int cpu, int pid)
 894{
 895        return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
 896}
 897
 898static inline int cpupid_to_pid(int cpupid)
 899{
 900        return cpupid & LAST__PID_MASK;
 901}
 902
 903static inline int cpupid_to_cpu(int cpupid)
 904{
 905        return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
 906}
 907
 908static inline int cpupid_to_nid(int cpupid)
 909{
 910        return cpu_to_node(cpupid_to_cpu(cpupid));
 911}
 912
 913static inline bool cpupid_pid_unset(int cpupid)
 914{
 915        return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
 916}
 917
 918static inline bool cpupid_cpu_unset(int cpupid)
 919{
 920        return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
 921}
 922
 923static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
 924{
 925        return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
 926}
 927
 928#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
 929#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
 930static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
 931{
 932        return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
 933}
 934
 935static inline int page_cpupid_last(struct page *page)
 936{
 937        return page->_last_cpupid;
 938}
 939static inline void page_cpupid_reset_last(struct page *page)
 940{
 941        page->_last_cpupid = -1 & LAST_CPUPID_MASK;
 942}
 943#else
 944static inline int page_cpupid_last(struct page *page)
 945{
 946        return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
 947}
 948
 949extern int page_cpupid_xchg_last(struct page *page, int cpupid);
 950
 951static inline void page_cpupid_reset_last(struct page *page)
 952{
 953        page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
 954}
 955#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
 956#else /* !CONFIG_NUMA_BALANCING */
 957static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
 958{
 959        return page_to_nid(page); /* XXX */
 960}
 961
 962static inline int page_cpupid_last(struct page *page)
 963{
 964        return page_to_nid(page); /* XXX */
 965}
 966
 967static inline int cpupid_to_nid(int cpupid)
 968{
 969        return -1;
 970}
 971
 972static inline int cpupid_to_pid(int cpupid)
 973{
 974        return -1;
 975}
 976
 977static inline int cpupid_to_cpu(int cpupid)
 978{
 979        return -1;
 980}
 981
 982static inline int cpu_pid_to_cpupid(int nid, int pid)
 983{
 984        return -1;
 985}
 986
 987static inline bool cpupid_pid_unset(int cpupid)
 988{
 989        return 1;
 990}
 991
 992static inline void page_cpupid_reset_last(struct page *page)
 993{
 994}
 995
 996static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
 997{
 998        return false;
 999}
1000#endif /* CONFIG_NUMA_BALANCING */
1001
1002static inline struct zone *page_zone(const struct page *page)
1003{
1004        return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
1005}
1006
1007static inline pg_data_t *page_pgdat(const struct page *page)
1008{
1009        return NODE_DATA(page_to_nid(page));
1010}
1011
1012#ifdef SECTION_IN_PAGE_FLAGS
1013static inline void set_page_section(struct page *page, unsigned long section)
1014{
1015        page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
1016        page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
1017}
1018
1019static inline unsigned long page_to_section(const struct page *page)
1020{
1021        return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
1022}
1023#endif
1024
1025static inline void set_page_zone(struct page *page, enum zone_type zone)
1026{
1027        page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
1028        page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
1029}
1030
1031static inline void set_page_node(struct page *page, unsigned long node)
1032{
1033        page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
1034        page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
1035}
1036
1037static inline void set_page_links(struct page *page, enum zone_type zone,
1038        unsigned long node, unsigned long pfn)
1039{
1040        set_page_zone(page, zone);
1041        set_page_node(page, node);
1042#ifdef SECTION_IN_PAGE_FLAGS
1043        set_page_section(page, pfn_to_section_nr(pfn));
1044#endif
1045}
1046
1047#ifdef CONFIG_MEMCG
1048static inline struct mem_cgroup *page_memcg(struct page *page)
1049{
1050        return page->mem_cgroup;
1051}
1052static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1053{
1054        WARN_ON_ONCE(!rcu_read_lock_held());
1055        return READ_ONCE(page->mem_cgroup);
1056}
1057#else
1058static inline struct mem_cgroup *page_memcg(struct page *page)
1059{
1060        return NULL;
1061}
1062static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1063{
1064        WARN_ON_ONCE(!rcu_read_lock_held());
1065        return NULL;
1066}
1067#endif
1068
1069/*
1070 * Some inline functions in vmstat.h depend on page_zone()
1071 */
1072#include <linux/vmstat.h>
1073
1074static __always_inline void *lowmem_page_address(const struct page *page)
1075{
1076        return page_to_virt(page);
1077}
1078
1079#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
1080#define HASHED_PAGE_VIRTUAL
1081#endif
1082
1083#if defined(WANT_PAGE_VIRTUAL)
1084static inline void *page_address(const struct page *page)
1085{
1086        return page->virtual;
1087}
1088static inline void set_page_address(struct page *page, void *address)
1089{
1090        page->virtual = address;
1091}
1092#define page_address_init()  do { } while(0)
1093#endif
1094
1095#if defined(HASHED_PAGE_VIRTUAL)
1096void *page_address(const struct page *page);
1097void set_page_address(struct page *page, void *virtual);
1098void page_address_init(void);
1099#endif
1100
1101#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
1102#define page_address(page) lowmem_page_address(page)
1103#define set_page_address(page, address)  do { } while(0)
1104#define page_address_init()  do { } while(0)
1105#endif
1106
1107extern void *page_rmapping(struct page *page);
1108extern struct anon_vma *page_anon_vma(struct page *page);
1109extern struct address_space *page_mapping(struct page *page);
1110
1111extern struct address_space *__page_file_mapping(struct page *);
1112
1113static inline
1114struct address_space *page_file_mapping(struct page *page)
1115{
1116        if (unlikely(PageSwapCache(page)))
1117                return __page_file_mapping(page);
1118
1119        return page->mapping;
1120}
1121
1122extern pgoff_t __page_file_index(struct page *page);
1123
1124/*
1125 * Return the pagecache index of the passed page.  Regular pagecache pages
1126 * use ->index whereas swapcache pages use swp_offset(->private)
1127 */
1128static inline pgoff_t page_index(struct page *page)
1129{
1130        if (unlikely(PageSwapCache(page)))
1131                return __page_file_index(page);
1132        return page->index;
1133}
1134
1135bool page_mapped(struct page *page);
1136struct address_space *page_mapping(struct page *page);
1137
1138/*
1139 * Return true only if the page has been allocated with
1140 * ALLOC_NO_WATERMARKS and the low watermark was not
1141 * met implying that the system is under some pressure.
1142 */
1143static inline bool page_is_pfmemalloc(struct page *page)
1144{
1145        /*
1146         * Page index cannot be this large so this must be
1147         * a pfmemalloc page.
1148         */
1149        return page->index == -1UL;
1150}
1151
1152/*
1153 * Only to be called by the page allocator on a freshly allocated
1154 * page.
1155 */
1156static inline void set_page_pfmemalloc(struct page *page)
1157{
1158        page->index = -1UL;
1159}
1160
1161static inline void clear_page_pfmemalloc(struct page *page)
1162{
1163        page->index = 0;
1164}
1165
1166/*
1167 * Different kinds of faults, as returned by handle_mm_fault().
1168 * Used to decide whether a process gets delivered SIGBUS or
1169 * just gets major/minor fault counters bumped up.
1170 */
1171
1172#define VM_FAULT_OOM    0x0001
1173#define VM_FAULT_SIGBUS 0x0002
1174#define VM_FAULT_MAJOR  0x0004
1175#define VM_FAULT_WRITE  0x0008  /* Special case for get_user_pages */
1176#define VM_FAULT_HWPOISON 0x0010        /* Hit poisoned small page */
1177#define VM_FAULT_HWPOISON_LARGE 0x0020  /* Hit poisoned large page. Index encoded in upper bits */
1178#define VM_FAULT_SIGSEGV 0x0040
1179
1180#define VM_FAULT_NOPAGE 0x0100  /* ->fault installed the pte, not return page */
1181#define VM_FAULT_LOCKED 0x0200  /* ->fault locked the returned page */
1182#define VM_FAULT_RETRY  0x0400  /* ->fault blocked, must retry */
1183#define VM_FAULT_FALLBACK 0x0800        /* huge page fault failed, fall back to small */
1184#define VM_FAULT_DONE_COW   0x1000      /* ->fault has fully handled COW */
1185
1186#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
1187
1188#define VM_FAULT_ERROR  (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
1189                         VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
1190                         VM_FAULT_FALLBACK)
1191
1192#define VM_FAULT_RESULT_TRACE \
1193        { VM_FAULT_OOM,                 "OOM" }, \
1194        { VM_FAULT_SIGBUS,              "SIGBUS" }, \
1195        { VM_FAULT_MAJOR,               "MAJOR" }, \
1196        { VM_FAULT_WRITE,               "WRITE" }, \
1197        { VM_FAULT_HWPOISON,            "HWPOISON" }, \
1198        { VM_FAULT_HWPOISON_LARGE,      "HWPOISON_LARGE" }, \
1199        { VM_FAULT_SIGSEGV,             "SIGSEGV" }, \
1200        { VM_FAULT_NOPAGE,              "NOPAGE" }, \
1201        { VM_FAULT_LOCKED,              "LOCKED" }, \
1202        { VM_FAULT_RETRY,               "RETRY" }, \
1203        { VM_FAULT_FALLBACK,            "FALLBACK" }, \
1204        { VM_FAULT_DONE_COW,            "DONE_COW" }
1205
1206/* Encode hstate index for a hwpoisoned large page */
1207#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
1208#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
1209
1210/*
1211 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
1212 */
1213extern void pagefault_out_of_memory(void);
1214
1215#define offset_in_page(p)       ((unsigned long)(p) & ~PAGE_MASK)
1216
1217/*
1218 * Flags passed to show_mem() and show_free_areas() to suppress output in
1219 * various contexts.
1220 */
1221#define SHOW_MEM_FILTER_NODES           (0x0001u)       /* disallowed nodes */
1222
1223extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
1224
1225extern bool can_do_mlock(void);
1226extern int user_shm_lock(size_t, struct user_struct *);
1227extern void user_shm_unlock(size_t, struct user_struct *);
1228
1229/*
1230 * Parameter block passed down to zap_pte_range in exceptional cases.
1231 */
1232struct zap_details {
1233        struct address_space *check_mapping;    /* Check page->mapping if set */
1234        pgoff_t first_index;                    /* Lowest page->index to unmap */
1235        pgoff_t last_index;                     /* Highest page->index to unmap */
1236};
1237
1238struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1239                             pte_t pte, bool with_public_device);
1240#define vm_normal_page(vma, addr, pte) _vm_normal_page(vma, addr, pte, false)
1241
1242struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
1243                                pmd_t pmd);
1244
1245int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1246                unsigned long size);
1247void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1248                unsigned long size);
1249void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1250                unsigned long start, unsigned long end);
1251
1252/**
1253 * mm_walk - callbacks for walk_page_range
1254 * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
1255 *             this handler should only handle pud_trans_huge() puds.
1256 *             the pmd_entry or pte_entry callbacks will be used for
1257 *             regular PUDs.
1258 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
1259 *             this handler is required to be able to handle
1260 *             pmd_trans_huge() pmds.  They may simply choose to
1261 *             split_huge_page() instead of handling it explicitly.
1262 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
1263 * @pte_hole: if set, called for each hole at all levels
1264 * @hugetlb_entry: if set, called for each hugetlb entry
1265 * @test_walk: caller specific callback function to determine whether
1266 *             we walk over the current vma or not. Returning 0
1267 *             value means "do page table walk over the current vma,"
1268 *             and a negative one means "abort current page table walk
1269 *             right now." 1 means "skip the current vma."
1270 * @mm:        mm_struct representing the target process of page table walk
1271 * @vma:       vma currently walked (NULL if walking outside vmas)
1272 * @private:   private data for callbacks' usage
1273 *
1274 * (see the comment on walk_page_range() for more details)
1275 */
1276struct mm_walk {
1277        int (*pud_entry)(pud_t *pud, unsigned long addr,
1278                         unsigned long next, struct mm_walk *walk);
1279        int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
1280                         unsigned long next, struct mm_walk *walk);
1281        int (*pte_entry)(pte_t *pte, unsigned long addr,
1282                         unsigned long next, struct mm_walk *walk);
1283        int (*pte_hole)(unsigned long addr, unsigned long next,
1284                        struct mm_walk *walk);
1285        int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
1286                             unsigned long addr, unsigned long next,
1287                             struct mm_walk *walk);
1288        int (*test_walk)(unsigned long addr, unsigned long next,
1289                        struct mm_walk *walk);
1290        struct mm_struct *mm;
1291        struct vm_area_struct *vma;
1292        void *private;
1293};
1294
1295int walk_page_range(unsigned long addr, unsigned long end,
1296                struct mm_walk *walk);
1297int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
1298void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1299                unsigned long end, unsigned long floor, unsigned long ceiling);
1300int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
1301                        struct vm_area_struct *vma);
1302void unmap_mapping_range(struct address_space *mapping,
1303                loff_t const holebegin, loff_t const holelen, int even_cows);
1304int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
1305                             unsigned long *start, unsigned long *end,
1306                             pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
1307int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1308        unsigned long *pfn);
1309int follow_phys(struct vm_area_struct *vma, unsigned long address,
1310                unsigned int flags, unsigned long *prot, resource_size_t *phys);
1311int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1312                        void *buf, int len, int write);
1313
1314static inline void unmap_shared_mapping_range(struct address_space *mapping,
1315                loff_t const holebegin, loff_t const holelen)
1316{
1317        unmap_mapping_range(mapping, holebegin, holelen, 0);
1318}
1319
1320extern void truncate_pagecache(struct inode *inode, loff_t new);
1321extern void truncate_setsize(struct inode *inode, loff_t newsize);
1322void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
1323void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1324int truncate_inode_page(struct address_space *mapping, struct page *page);
1325int generic_error_remove_page(struct address_space *mapping, struct page *page);
1326int invalidate_inode_page(struct page *page);
1327
1328#ifdef CONFIG_MMU
1329extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
1330                unsigned int flags);
1331extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1332                            unsigned long address, unsigned int fault_flags,
1333                            bool *unlocked);
1334#else
1335static inline int handle_mm_fault(struct vm_area_struct *vma,
1336                unsigned long address, unsigned int flags)
1337{
1338        /* should never happen if there's no MMU */
1339        BUG();
1340        return VM_FAULT_SIGBUS;
1341}
1342static inline int fixup_user_fault(struct task_struct *tsk,
1343                struct mm_struct *mm, unsigned long address,
1344                unsigned int fault_flags, bool *unlocked)
1345{
1346        /* should never happen if there's no MMU */
1347        BUG();
1348        return -EFAULT;
1349}
1350#endif
1351
1352extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
1353                unsigned int gup_flags);
1354extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1355                void *buf, int len, unsigned int gup_flags);
1356extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
1357                unsigned long addr, void *buf, int len, unsigned int gup_flags);
1358
1359long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1360                            unsigned long start, unsigned long nr_pages,
1361                            unsigned int gup_flags, struct page **pages,
1362                            struct vm_area_struct **vmas, int *locked);
1363long get_user_pages(unsigned long start, unsigned long nr_pages,
1364                            unsigned int gup_flags, struct page **pages,
1365                            struct vm_area_struct **vmas);
1366long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1367                    unsigned int gup_flags, struct page **pages, int *locked);
1368long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1369                    struct page **pages, unsigned int gup_flags);
1370int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1371                        struct page **pages);
1372
1373/* Container for pinned pfns / pages */
1374struct frame_vector {
1375        unsigned int nr_allocated;      /* Number of frames we have space for */
1376        unsigned int nr_frames; /* Number of frames stored in ptrs array */
1377        bool got_ref;           /* Did we pin pages by getting page ref? */
1378        bool is_pfns;           /* Does array contain pages or pfns? */
1379        void *ptrs[0];          /* Array of pinned pfns / pages. Use
1380                                 * pfns_vector_pages() or pfns_vector_pfns()
1381                                 * for access */
1382};
1383
1384struct frame_vector *frame_vector_create(unsigned int nr_frames);
1385void frame_vector_destroy(struct frame_vector *vec);
1386int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
1387                     unsigned int gup_flags, struct frame_vector *vec);
1388void put_vaddr_frames(struct frame_vector *vec);
1389int frame_vector_to_pages(struct frame_vector *vec);
1390void frame_vector_to_pfns(struct frame_vector *vec);
1391
1392static inline unsigned int frame_vector_count(struct frame_vector *vec)
1393{
1394        return vec->nr_frames;
1395}
1396
1397static inline struct page **frame_vector_pages(struct frame_vector *vec)
1398{
1399        if (vec->is_pfns) {
1400                int err = frame_vector_to_pages(vec);
1401
1402                if (err)
1403                        return ERR_PTR(err);
1404        }
1405        return (struct page **)(vec->ptrs);
1406}
1407
1408static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
1409{
1410        if (!vec->is_pfns)
1411                frame_vector_to_pfns(vec);
1412        return (unsigned long *)(vec->ptrs);
1413}
1414
1415struct kvec;
1416int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1417                        struct page **pages);
1418int get_kernel_page(unsigned long start, int write, struct page **pages);
1419struct page *get_dump_page(unsigned long addr);
1420
1421extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1422extern void do_invalidatepage(struct page *page, unsigned int offset,
1423                              unsigned int length);
1424
1425int __set_page_dirty_nobuffers(struct page *page);
1426int __set_page_dirty_no_writeback(struct page *page);
1427int redirty_page_for_writepage(struct writeback_control *wbc,
1428                                struct page *page);
1429void account_page_dirtied(struct page *page, struct address_space *mapping);
1430void account_page_cleaned(struct page *page, struct address_space *mapping,
1431                          struct bdi_writeback *wb);
1432int set_page_dirty(struct page *page);
1433int set_page_dirty_lock(struct page *page);
1434void cancel_dirty_page(struct page *page);
1435int clear_page_dirty_for_io(struct page *page);
1436
1437int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1438
1439static inline bool vma_is_anonymous(struct vm_area_struct *vma)
1440{
1441        return !vma->vm_ops;
1442}
1443
1444#ifdef CONFIG_SHMEM
1445/*
1446 * The vma_is_shmem is not inline because it is used only by slow
1447 * paths in userfault.
1448 */
1449bool vma_is_shmem(struct vm_area_struct *vma);
1450#else
1451static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
1452#endif
1453
1454int vma_is_stack_for_current(struct vm_area_struct *vma);
1455
1456extern unsigned long move_page_tables(struct vm_area_struct *vma,
1457                unsigned long old_addr, struct vm_area_struct *new_vma,
1458                unsigned long new_addr, unsigned long len,
1459                bool need_rmap_locks);
1460extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1461                              unsigned long end, pgprot_t newprot,
1462                              int dirty_accountable, int prot_numa);
1463extern int mprotect_fixup(struct vm_area_struct *vma,
1464                          struct vm_area_struct **pprev, unsigned long start,
1465                          unsigned long end, unsigned long newflags);
1466
1467/*
1468 * doesn't attempt to fault and will return short.
1469 */
1470int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1471                          struct page **pages);
1472/*
1473 * per-process(per-mm_struct) statistics.
1474 */
1475static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1476{
1477        long val = atomic_long_read(&mm->rss_stat.count[member]);
1478
1479#ifdef SPLIT_RSS_COUNTING
1480        /*
1481         * counter is updated in asynchronous manner and may go to minus.
1482         * But it's never be expected number for users.
1483         */
1484        if (val < 0)
1485                val = 0;
1486#endif
1487        return (unsigned long)val;
1488}
1489
1490static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1491{
1492        atomic_long_add(value, &mm->rss_stat.count[member]);
1493}
1494
1495static inline void inc_mm_counter(struct mm_struct *mm, int member)
1496{
1497        atomic_long_inc(&mm->rss_stat.count[member]);
1498}
1499
1500static inline void dec_mm_counter(struct mm_struct *mm, int member)
1501{
1502        atomic_long_dec(&mm->rss_stat.count[member]);
1503}
1504
1505/* Optimized variant when page is already known not to be PageAnon */
1506static inline int mm_counter_file(struct page *page)
1507{
1508        if (PageSwapBacked(page))
1509                return MM_SHMEMPAGES;
1510        return MM_FILEPAGES;
1511}
1512
1513static inline int mm_counter(struct page *page)
1514{
1515        if (PageAnon(page))
1516                return MM_ANONPAGES;
1517        return mm_counter_file(page);
1518}
1519
1520static inline unsigned long get_mm_rss(struct mm_struct *mm)
1521{
1522        return get_mm_counter(mm, MM_FILEPAGES) +
1523                get_mm_counter(mm, MM_ANONPAGES) +
1524                get_mm_counter(mm, MM_SHMEMPAGES);
1525}
1526
1527static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1528{
1529        return max(mm->hiwater_rss, get_mm_rss(mm));
1530}
1531
1532static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1533{
1534        return max(mm->hiwater_vm, mm->total_vm);
1535}
1536
1537static inline void update_hiwater_rss(struct mm_struct *mm)
1538{
1539        unsigned long _rss = get_mm_rss(mm);
1540
1541        if ((mm)->hiwater_rss < _rss)
1542                (mm)->hiwater_rss = _rss;
1543}
1544
1545static inline void update_hiwater_vm(struct mm_struct *mm)
1546{
1547        if (mm->hiwater_vm < mm->total_vm)
1548                mm->hiwater_vm = mm->total_vm;
1549}
1550
1551static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
1552{
1553        mm->hiwater_rss = get_mm_rss(mm);
1554}
1555
1556static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1557                                         struct mm_struct *mm)
1558{
1559        unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1560
1561        if (*maxrss < hiwater_rss)
1562                *maxrss = hiwater_rss;
1563}
1564
1565#if defined(SPLIT_RSS_COUNTING)
1566void sync_mm_rss(struct mm_struct *mm);
1567#else
1568static inline void sync_mm_rss(struct mm_struct *mm)
1569{
1570}
1571#endif
1572
1573#ifndef __HAVE_ARCH_PTE_DEVMAP
1574static inline int pte_devmap(pte_t pte)
1575{
1576        return 0;
1577}
1578#endif
1579
1580int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
1581
1582extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1583                               spinlock_t **ptl);
1584static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1585                                    spinlock_t **ptl)
1586{
1587        pte_t *ptep;
1588        __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1589        return ptep;
1590}
1591
1592#ifdef __PAGETABLE_P4D_FOLDED
1593static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
1594                                                unsigned long address)
1595{
1596        return 0;
1597}
1598#else
1599int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1600#endif
1601
1602#ifdef __PAGETABLE_PUD_FOLDED
1603static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
1604                                                unsigned long address)
1605{
1606        return 0;
1607}
1608#else
1609int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
1610#endif
1611
1612#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
1613static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1614                                                unsigned long address)
1615{
1616        return 0;
1617}
1618
1619static inline void mm_nr_pmds_init(struct mm_struct *mm) {}
1620
1621static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
1622{
1623        return 0;
1624}
1625
1626static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
1627static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
1628
1629#else
1630int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1631
1632static inline void mm_nr_pmds_init(struct mm_struct *mm)
1633{
1634        atomic_long_set(&mm->nr_pmds, 0);
1635}
1636
1637static inline unsigned long mm_nr_pmds(struct mm_struct *mm)
1638{
1639        return atomic_long_read(&mm->nr_pmds);
1640}
1641
1642static inline void mm_inc_nr_pmds(struct mm_struct *mm)
1643{
1644        atomic_long_inc(&mm->nr_pmds);
1645}
1646
1647static inline void mm_dec_nr_pmds(struct mm_struct *mm)
1648{
1649        atomic_long_dec(&mm->nr_pmds);
1650}
1651#endif
1652
1653int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
1654int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
1655
1656/*
1657 * The following ifdef needed to get the 4level-fixup.h header to work.
1658 * Remove it when 4level-fixup.h has been removed.
1659 */
1660#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1661
1662#ifndef __ARCH_HAS_5LEVEL_HACK
1663static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
1664                unsigned long address)
1665{
1666        return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
1667                NULL : p4d_offset(pgd, address);
1668}
1669
1670static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
1671                unsigned long address)
1672{
1673        return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
1674                NULL : pud_offset(p4d, address);
1675}
1676#endif /* !__ARCH_HAS_5LEVEL_HACK */
1677
1678static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1679{
1680        return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1681                NULL: pmd_offset(pud, address);
1682}
1683#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
1684
1685#if USE_SPLIT_PTE_PTLOCKS
1686#if ALLOC_SPLIT_PTLOCKS
1687void __init ptlock_cache_init(void);
1688extern bool ptlock_alloc(struct page *page);
1689extern void ptlock_free(struct page *page);
1690
1691static inline spinlock_t *ptlock_ptr(struct page *page)
1692{
1693        return page->ptl;
1694}
1695#else /* ALLOC_SPLIT_PTLOCKS */
1696static inline void ptlock_cache_init(void)
1697{
1698}
1699
1700static inline bool ptlock_alloc(struct page *page)
1701{
1702        return true;
1703}
1704
1705static inline void ptlock_free(struct page *page)
1706{
1707}
1708
1709static inline spinlock_t *ptlock_ptr(struct page *page)
1710{
1711        return &page->ptl;
1712}
1713#endif /* ALLOC_SPLIT_PTLOCKS */
1714
1715static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1716{
1717        return ptlock_ptr(pmd_page(*pmd));
1718}
1719
1720static inline bool ptlock_init(struct page *page)
1721{
1722        /*
1723         * prep_new_page() initialize page->private (and therefore page->ptl)
1724         * with 0. Make sure nobody took it in use in between.
1725         *
1726         * It can happen if arch try to use slab for page table allocation:
1727         * slab code uses page->slab_cache, which share storage with page->ptl.
1728         */
1729        VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
1730        if (!ptlock_alloc(page))
1731                return false;
1732        spin_lock_init(ptlock_ptr(page));
1733        return true;
1734}
1735
1736/* Reset page->mapping so free_pages_check won't complain. */
1737static inline void pte_lock_deinit(struct page *page)
1738{
1739        page->mapping = NULL;
1740        ptlock_free(page);
1741}
1742
1743#else   /* !USE_SPLIT_PTE_PTLOCKS */
1744/*
1745 * We use mm->page_table_lock to guard all pagetable pages of the mm.
1746 */
1747static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1748{
1749        return &mm->page_table_lock;
1750}
1751static inline void ptlock_cache_init(void) {}
1752static inline bool ptlock_init(struct page *page) { return true; }
1753static inline void pte_lock_deinit(struct page *page) {}
1754#endif /* USE_SPLIT_PTE_PTLOCKS */
1755
1756static inline void pgtable_init(void)
1757{
1758        ptlock_cache_init();
1759        pgtable_cache_init();
1760}
1761
1762static inline bool pgtable_page_ctor(struct page *page)
1763{
1764        if (!ptlock_init(page))
1765                return false;
1766        inc_zone_page_state(page, NR_PAGETABLE);
1767        return true;
1768}
1769
1770static inline void pgtable_page_dtor(struct page *page)
1771{
1772        pte_lock_deinit(page);
1773        dec_zone_page_state(page, NR_PAGETABLE);
1774}
1775
1776#define pte_offset_map_lock(mm, pmd, address, ptlp)     \
1777({                                                      \
1778        spinlock_t *__ptl = pte_lockptr(mm, pmd);       \
1779        pte_t *__pte = pte_offset_map(pmd, address);    \
1780        *(ptlp) = __ptl;                                \
1781        spin_lock(__ptl);                               \
1782        __pte;                                          \
1783})
1784
1785#define pte_unmap_unlock(pte, ptl)      do {            \
1786        spin_unlock(ptl);                               \
1787        pte_unmap(pte);                                 \
1788} while (0)
1789
1790#define pte_alloc(mm, pmd, address)                     \
1791        (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address))
1792
1793#define pte_alloc_map(mm, pmd, address)                 \
1794        (pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address))
1795
1796#define pte_alloc_map_lock(mm, pmd, address, ptlp)      \
1797        (pte_alloc(mm, pmd, address) ?                  \
1798                 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
1799
1800#define pte_alloc_kernel(pmd, address)                  \
1801        ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1802                NULL: pte_offset_kernel(pmd, address))
1803
1804#if USE_SPLIT_PMD_PTLOCKS
1805
1806static struct page *pmd_to_page(pmd_t *pmd)
1807{
1808        unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
1809        return virt_to_page((void *)((unsigned long) pmd & mask));
1810}
1811
1812static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1813{
1814        return ptlock_ptr(pmd_to_page(pmd));
1815}
1816
1817static inline bool pgtable_pmd_page_ctor(struct page *page)
1818{
1819#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1820        page->pmd_huge_pte = NULL;
1821#endif
1822        return ptlock_init(page);
1823}
1824
1825static inline void pgtable_pmd_page_dtor(struct page *page)
1826{
1827#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1828        VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
1829#endif
1830        ptlock_free(page);
1831}
1832
1833#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
1834
1835#else
1836
1837static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1838{
1839        return &mm->page_table_lock;
1840}
1841
1842static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
1843static inline void pgtable_pmd_page_dtor(struct page *page) {}
1844
1845#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
1846
1847#endif
1848
1849static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
1850{
1851        spinlock_t *ptl = pmd_lockptr(mm, pmd);
1852        spin_lock(ptl);
1853        return ptl;
1854}
1855
1856/*
1857 * No scalability reason to split PUD locks yet, but follow the same pattern
1858 * as the PMD locks to make it easier if we decide to.  The VM should not be
1859 * considered ready to switch to split PUD locks yet; there may be places
1860 * which need to be converted from page_table_lock.
1861 */
1862static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
1863{
1864        return &mm->page_table_lock;
1865}
1866
1867static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
1868{
1869        spinlock_t *ptl = pud_lockptr(mm, pud);
1870
1871        spin_lock(ptl);
1872        return ptl;
1873}
1874
1875extern void __init pagecache_init(void);
1876extern void free_area_init(unsigned long * zones_size);
1877extern void free_area_init_node(int nid, unsigned long * zones_size,
1878                unsigned long zone_start_pfn, unsigned long *zholes_size);
1879extern void free_initmem(void);
1880
1881/*
1882 * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
1883 * into the buddy system. The freed pages will be poisoned with pattern
1884 * "poison" if it's within range [0, UCHAR_MAX].
1885 * Return pages freed into the buddy system.
1886 */
1887extern unsigned long free_reserved_area(void *start, void *end,
1888                                        int poison, char *s);
1889
1890#ifdef  CONFIG_HIGHMEM
1891/*
1892 * Free a highmem page into the buddy system, adjusting totalhigh_pages
1893 * and totalram_pages.
1894 */
1895extern void free_highmem_page(struct page *page);
1896#endif
1897
1898extern void adjust_managed_page_count(struct page *page, long count);
1899extern void mem_init_print_info(const char *str);
1900
1901extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
1902
1903/* Free the reserved page into the buddy system, so it gets managed. */
1904static inline void __free_reserved_page(struct page *page)
1905{
1906        ClearPageReserved(page);
1907        init_page_count(page);
1908        __free_page(page);
1909}
1910
1911static inline void free_reserved_page(struct page *page)
1912{
1913        __free_reserved_page(page);
1914        adjust_managed_page_count(page, 1);
1915}
1916
1917static inline void mark_page_reserved(struct page *page)
1918{
1919        SetPageReserved(page);
1920        adjust_managed_page_count(page, -1);
1921}
1922
1923/*
1924 * Default method to free all the __init memory into the buddy system.
1925 * The freed pages will be poisoned with pattern "poison" if it's within
1926 * range [0, UCHAR_MAX].
1927 * Return pages freed into the buddy system.
1928 */
1929static inline unsigned long free_initmem_default(int poison)
1930{
1931        extern char __init_begin[], __init_end[];
1932
1933        return free_reserved_area(&__init_begin, &__init_end,
1934                                  poison, "unused kernel");
1935}
1936
1937static inline unsigned long get_num_physpages(void)
1938{
1939        int nid;
1940        unsigned long phys_pages = 0;
1941
1942        for_each_online_node(nid)
1943                phys_pages += node_present_pages(nid);
1944
1945        return phys_pages;
1946}
1947
1948#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1949/*
1950 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
1951 * zones, allocate the backing mem_map and account for memory holes in a more
1952 * architecture independent manner. This is a substitute for creating the
1953 * zone_sizes[] and zholes_size[] arrays and passing them to
1954 * free_area_init_node()
1955 *
1956 * An architecture is expected to register range of page frames backed by
1957 * physical memory with memblock_add[_node]() before calling
1958 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
1959 * usage, an architecture is expected to do something like
1960 *
1961 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
1962 *                                                       max_highmem_pfn};
1963 * for_each_valid_physical_page_range()
1964 *      memblock_add_node(base, size, nid)
1965 * free_area_init_nodes(max_zone_pfns);
1966 *
1967 * free_bootmem_with_active_regions() calls free_bootmem_node() for each
1968 * registered physical page range.  Similarly
1969 * sparse_memory_present_with_active_regions() calls memory_present() for
1970 * each range when SPARSEMEM is enabled.
1971 *
1972 * See mm/page_alloc.c for more information on each function exposed by
1973 * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
1974 */
1975extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1976unsigned long node_map_pfn_alignment(void);
1977unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
1978                                                unsigned long end_pfn);
1979extern unsigned long absent_pages_in_range(unsigned long start_pfn,
1980                                                unsigned long end_pfn);
1981extern void get_pfn_range_for_nid(unsigned int nid,
1982                        unsigned long *start_pfn, unsigned long *end_pfn);
1983extern unsigned long find_min_pfn_with_active_regions(void);
1984extern void free_bootmem_with_active_regions(int nid,
1985                                                unsigned long max_low_pfn);
1986extern void sparse_memory_present_with_active_regions(int nid);
1987
1988#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1989
1990#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
1991    !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
1992static inline int __early_pfn_to_nid(unsigned long pfn,
1993                                        struct mminit_pfnnid_cache *state)
1994{
1995        return 0;
1996}
1997#else
1998/* please see mm/page_alloc.c */
1999extern int __meminit early_pfn_to_nid(unsigned long pfn);
2000/* there is a per-arch backend function. */
2001extern int __meminit __early_pfn_to_nid(unsigned long pfn,
2002                                        struct mminit_pfnnid_cache *state);
2003#endif
2004
2005extern void set_dma_reserve(unsigned long new_dma_reserve);
2006extern void memmap_init_zone(unsigned long, int, unsigned long,
2007                                unsigned long, enum memmap_context);
2008extern void setup_per_zone_wmarks(void);
2009extern int __meminit init_per_zone_wmark_min(void);
2010extern void mem_init(void);
2011extern void __init mmap_init(void);
2012extern void show_mem(unsigned int flags, nodemask_t *nodemask);
2013extern long si_mem_available(void);
2014extern void si_meminfo(struct sysinfo * val);
2015extern void si_meminfo_node(struct sysinfo *val, int nid);
2016#ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES
2017extern unsigned long arch_reserved_kernel_pages(void);
2018#endif
2019
2020extern __printf(3, 4)
2021void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
2022
2023extern void setup_per_cpu_pageset(void);
2024
2025extern void zone_pcp_update(struct zone *zone);
2026extern void zone_pcp_reset(struct zone *zone);
2027
2028/* page_alloc.c */
2029extern int min_free_kbytes;
2030extern int watermark_scale_factor;
2031
2032/* nommu.c */
2033extern atomic_long_t mmap_pages_allocated;
2034extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
2035
2036/* interval_tree.c */
2037void vma_interval_tree_insert(struct vm_area_struct *node,
2038                              struct rb_root_cached *root);
2039void vma_interval_tree_insert_after(struct vm_area_struct *node,
2040                                    struct vm_area_struct *prev,
2041                                    struct rb_root_cached *root);
2042void vma_interval_tree_remove(struct vm_area_struct *node,
2043                              struct rb_root_cached *root);
2044struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
2045                                unsigned long start, unsigned long last);
2046struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
2047                                unsigned long start, unsigned long last);
2048
2049#define vma_interval_tree_foreach(vma, root, start, last)               \
2050        for (vma = vma_interval_tree_iter_first(root, start, last);     \
2051             vma; vma = vma_interval_tree_iter_next(vma, start, last))
2052
2053void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
2054                                   struct rb_root_cached *root);
2055void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
2056                                   struct rb_root_cached *root);
2057struct anon_vma_chain *
2058anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
2059                                  unsigned long start, unsigned long last);
2060struct anon_vma_chain *anon_vma_interval_tree_iter_next(
2061        struct anon_vma_chain *node, unsigned long start, unsigned long last);
2062#ifdef CONFIG_DEBUG_VM_RB
2063void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
2064#endif
2065
2066#define anon_vma_interval_tree_foreach(avc, root, start, last)           \
2067        for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
2068             avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
2069
2070/* mmap.c */
2071extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
2072extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
2073        unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
2074        struct vm_area_struct *expand);
2075static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
2076        unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
2077{
2078        return __vma_adjust(vma, start, end, pgoff, insert, NULL);
2079}
2080extern struct vm_area_struct *vma_merge(struct mm_struct *,
2081        struct vm_area_struct *prev, unsigned long addr, unsigned long end,
2082        unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
2083        struct mempolicy *, struct vm_userfaultfd_ctx);
2084extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
2085extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
2086        unsigned long addr, int new_below);
2087extern int split_vma(struct mm_struct *, struct vm_area_struct *,
2088        unsigned long addr, int new_below);
2089extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
2090extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
2091        struct rb_node **, struct rb_node *);
2092extern void unlink_file_vma(struct vm_area_struct *);
2093extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
2094        unsigned long addr, unsigned long len, pgoff_t pgoff,
2095        bool *need_rmap_locks);
2096extern void exit_mmap(struct mm_struct *);
2097
2098static inline int check_data_rlimit(unsigned long rlim,
2099                                    unsigned long new,
2100                                    unsigned long start,
2101                                    unsigned long end_data,
2102                                    unsigned long start_data)
2103{
2104        if (rlim < RLIM_INFINITY) {
2105                if (((new - start) + (end_data - start_data)) > rlim)
2106                        return -ENOSPC;
2107        }
2108
2109        return 0;
2110}
2111
2112extern int mm_take_all_locks(struct mm_struct *mm);
2113extern void mm_drop_all_locks(struct mm_struct *mm);
2114
2115extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
2116extern struct file *get_mm_exe_file(struct mm_struct *mm);
2117extern struct file *get_task_exe_file(struct task_struct *task);
2118
2119extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
2120extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
2121
2122extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
2123                                   const struct vm_special_mapping *sm);
2124extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
2125                                   unsigned long addr, unsigned long len,
2126                                   unsigned long flags,
2127                                   const struct vm_special_mapping *spec);
2128/* This is an obsolete alternative to _install_special_mapping. */
2129extern int install_special_mapping(struct mm_struct *mm,
2130                                   unsigned long addr, unsigned long len,
2131                                   unsigned long flags, struct page **pages);
2132
2133extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
2134
2135extern unsigned long mmap_region(struct file *file, unsigned long addr,
2136        unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2137        struct list_head *uf);
2138extern unsigned long do_mmap(struct file *file, unsigned long addr,
2139        unsigned long len, unsigned long prot, unsigned long flags,
2140        vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
2141        struct list_head *uf);
2142extern int do_munmap(struct mm_struct *, unsigned long, size_t,
2143                     struct list_head *uf);
2144
2145static inline unsigned long
2146do_mmap_pgoff(struct file *file, unsigned long addr,
2147        unsigned long len, unsigned long prot, unsigned long flags,
2148        unsigned long pgoff, unsigned long *populate,
2149        struct list_head *uf)
2150{
2151        return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf);
2152}
2153
2154#ifdef CONFIG_MMU
2155extern int __mm_populate(unsigned long addr, unsigned long len,
2156                         int ignore_errors);
2157static inline void mm_populate(unsigned long addr, unsigned long len)
2158{
2159        /* Ignore errors */
2160        (void) __mm_populate(addr, len, 1);
2161}
2162#else
2163static inline void mm_populate(unsigned long addr, unsigned long len) {}
2164#endif
2165
2166/* These take the mm semaphore themselves */
2167extern int __must_check vm_brk(unsigned long, unsigned long);
2168extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
2169extern int vm_munmap(unsigned long, size_t);
2170extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
2171        unsigned long, unsigned long,
2172        unsigned long, unsigned long);
2173
2174struct vm_unmapped_area_info {
2175#define VM_UNMAPPED_AREA_TOPDOWN 1
2176        unsigned long flags;
2177        unsigned long length;
2178        unsigned long low_limit;
2179        unsigned long high_limit;
2180        unsigned long align_mask;
2181        unsigned long align_offset;
2182};
2183
2184extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
2185extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
2186
2187/*
2188 * Search for an unmapped address range.
2189 *
2190 * We are looking for a range that:
2191 * - does not intersect with any VMA;
2192 * - is contained within the [low_limit, high_limit) interval;
2193 * - is at least the desired size.
2194 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
2195 */
2196static inline unsigned long
2197vm_unmapped_area(struct vm_unmapped_area_info *info)
2198{
2199        if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
2200                return unmapped_area_topdown(info);
2201        else
2202                return unmapped_area(info);
2203}
2204
2205/* truncate.c */
2206extern void truncate_inode_pages(struct address_space *, loff_t);
2207extern void truncate_inode_pages_range(struct address_space *,
2208                                       loff_t lstart, loff_t lend);
2209extern void truncate_inode_pages_final(struct address_space *);
2210
2211/* generic vm_area_ops exported for stackable file systems */
2212extern int filemap_fault(struct vm_fault *vmf);
2213extern void filemap_map_pages(struct vm_fault *vmf,
2214                pgoff_t start_pgoff, pgoff_t end_pgoff);
2215extern int filemap_page_mkwrite(struct vm_fault *vmf);
2216
2217/* mm/page-writeback.c */
2218int __must_check write_one_page(struct page *page);
2219void task_dirty_inc(struct task_struct *tsk);
2220
2221/* readahead.c */
2222#define VM_MAX_READAHEAD        128     /* kbytes */
2223#define VM_MIN_READAHEAD        16      /* kbytes (includes current page) */
2224
2225int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
2226                        pgoff_t offset, unsigned long nr_to_read);
2227
2228void page_cache_sync_readahead(struct address_space *mapping,
2229                               struct file_ra_state *ra,
2230                               struct file *filp,
2231                               pgoff_t offset,
2232                               unsigned long size);
2233
2234void page_cache_async_readahead(struct address_space *mapping,
2235                                struct file_ra_state *ra,
2236                                struct file *filp,
2237                                struct page *pg,
2238                                pgoff_t offset,
2239                                unsigned long size);
2240
2241extern unsigned long stack_guard_gap;
2242/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
2243extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2244
2245/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
2246extern int expand_downwards(struct vm_area_struct *vma,
2247                unsigned long address);
2248#if VM_GROWSUP
2249extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
2250#else
2251  #define expand_upwards(vma, address) (0)
2252#endif
2253
2254/* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
2255extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
2256extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
2257                                             struct vm_area_struct **pprev);
2258
2259/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
2260   NULL if none.  Assume start_addr < end_addr. */
2261static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
2262{
2263        struct vm_area_struct * vma = find_vma(mm,start_addr);
2264
2265        if (vma && end_addr <= vma->vm_start)
2266                vma = NULL;
2267        return vma;
2268}
2269
2270static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
2271{
2272        unsigned long vm_start = vma->vm_start;
2273
2274        if (vma->vm_flags & VM_GROWSDOWN) {
2275                vm_start -= stack_guard_gap;
2276                if (vm_start > vma->vm_start)
2277                        vm_start = 0;
2278        }
2279        return vm_start;
2280}
2281
2282static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
2283{
2284        unsigned long vm_end = vma->vm_end;
2285
2286        if (vma->vm_flags & VM_GROWSUP) {
2287                vm_end += stack_guard_gap;
2288                if (vm_end < vma->vm_end)
2289                        vm_end = -PAGE_SIZE;
2290        }
2291        return vm_end;
2292}
2293
2294static inline unsigned long vma_pages(struct vm_area_struct *vma)
2295{
2296        return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
2297}
2298
2299/* Look up the first VMA which exactly match the interval vm_start ... vm_end */
2300static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
2301                                unsigned long vm_start, unsigned long vm_end)
2302{
2303        struct vm_area_struct *vma = find_vma(mm, vm_start);
2304
2305        if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
2306                vma = NULL;
2307
2308        return vma;
2309}
2310
2311#ifdef CONFIG_MMU
2312pgprot_t vm_get_page_prot(unsigned long vm_flags);
2313void vma_set_page_prot(struct vm_area_struct *vma);
2314#else
2315static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
2316{
2317        return __pgprot(0);
2318}
2319static inline void vma_set_page_prot(struct vm_area_struct *vma)
2320{
2321        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2322}
2323#endif
2324
2325#ifdef CONFIG_NUMA_BALANCING
2326unsigned long change_prot_numa(struct vm_area_struct *vma,
2327                        unsigned long start, unsigned long end);
2328#endif
2329
2330struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
2331int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2332                        unsigned long pfn, unsigned long size, pgprot_t);
2333int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2334int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2335                        unsigned long pfn);
2336int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2337                        unsigned long pfn, pgprot_t pgprot);
2338int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2339                        pfn_t pfn);
2340int vm_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr,
2341                        pfn_t pfn);
2342int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
2343
2344
2345struct page *follow_page_mask(struct vm_area_struct *vma,
2346                              unsigned long address, unsigned int foll_flags,
2347                              unsigned int *page_mask);
2348
2349static inline struct page *follow_page(struct vm_area_struct *vma,
2350                unsigned long address, unsigned int foll_flags)
2351{
2352        unsigned int unused_page_mask;
2353        return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
2354}
2355
2356#define FOLL_WRITE      0x01    /* check pte is writable */
2357#define FOLL_TOUCH      0x02    /* mark page accessed */
2358#define FOLL_GET        0x04    /* do get_page on page */
2359#define FOLL_DUMP       0x08    /* give error on hole if it would be zero */
2360#define FOLL_FORCE      0x10    /* get_user_pages read/write w/o permission */
2361#define FOLL_NOWAIT     0x20    /* if a disk transfer is needed, start the IO
2362                                 * and return without waiting upon it */
2363#define FOLL_POPULATE   0x40    /* fault in page */
2364#define FOLL_SPLIT      0x80    /* don't return transhuge pages, split them */
2365#define FOLL_HWPOISON   0x100   /* check page is hwpoisoned */
2366#define FOLL_NUMA       0x200   /* force NUMA hinting page fault */
2367#define FOLL_MIGRATION  0x400   /* wait for page to replace migration entry */
2368#define FOLL_TRIED      0x800   /* a retry, previous pass started an IO */
2369#define FOLL_MLOCK      0x1000  /* lock present pages */
2370#define FOLL_REMOTE     0x2000  /* we are working on non-current tsk/mm */
2371#define FOLL_COW        0x4000  /* internal GUP flag */
2372
2373static inline int vm_fault_to_errno(int vm_fault, int foll_flags)
2374{
2375        if (vm_fault & VM_FAULT_OOM)
2376                return -ENOMEM;
2377        if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
2378                return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
2379        if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
2380                return -EFAULT;
2381        return 0;
2382}
2383
2384typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
2385                        void *data);
2386extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
2387                               unsigned long size, pte_fn_t fn, void *data);
2388
2389
2390#ifdef CONFIG_PAGE_POISONING
2391extern bool page_poisoning_enabled(void);
2392extern void kernel_poison_pages(struct page *page, int numpages, int enable);
2393extern bool page_is_poisoned(struct page *page);
2394#else
2395static inline bool page_poisoning_enabled(void) { return false; }
2396static inline void kernel_poison_pages(struct page *page, int numpages,
2397                                        int enable) { }
2398static inline bool page_is_poisoned(struct page *page) { return false; }
2399#endif
2400
2401#ifdef CONFIG_DEBUG_PAGEALLOC
2402extern bool _debug_pagealloc_enabled;
2403extern void __kernel_map_pages(struct page *page, int numpages, int enable);
2404
2405static inline bool debug_pagealloc_enabled(void)
2406{
2407        return _debug_pagealloc_enabled;
2408}
2409
2410static inline void
2411kernel_map_pages(struct page *page, int numpages, int enable)
2412{
2413        if (!debug_pagealloc_enabled())
2414                return;
2415
2416        __kernel_map_pages(page, numpages, enable);
2417}
2418#ifdef CONFIG_HIBERNATION
2419extern bool kernel_page_present(struct page *page);
2420#endif  /* CONFIG_HIBERNATION */
2421#else   /* CONFIG_DEBUG_PAGEALLOC */
2422static inline void
2423kernel_map_pages(struct page *page, int numpages, int enable) {}
2424#ifdef CONFIG_HIBERNATION
2425static inline bool kernel_page_present(struct page *page) { return true; }
2426#endif  /* CONFIG_HIBERNATION */
2427static inline bool debug_pagealloc_enabled(void)
2428{
2429        return false;
2430}
2431#endif  /* CONFIG_DEBUG_PAGEALLOC */
2432
2433#ifdef __HAVE_ARCH_GATE_AREA
2434extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
2435extern int in_gate_area_no_mm(unsigned long addr);
2436extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
2437#else
2438static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
2439{
2440        return NULL;
2441}
2442static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
2443static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
2444{
2445        return 0;
2446}
2447#endif  /* __HAVE_ARCH_GATE_AREA */
2448
2449extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
2450
2451#ifdef CONFIG_SYSCTL
2452extern int sysctl_drop_caches;
2453int drop_caches_sysctl_handler(struct ctl_table *, int,
2454                                        void __user *, size_t *, loff_t *);
2455#endif
2456
2457void drop_slab(void);
2458void drop_slab_node(int nid);
2459
2460#ifndef CONFIG_MMU
2461#define randomize_va_space 0
2462#else
2463extern int randomize_va_space;
2464#endif
2465
2466const char * arch_vma_name(struct vm_area_struct *vma);
2467void print_vma_addr(char *prefix, unsigned long rip);
2468
2469void sparse_mem_maps_populate_node(struct page **map_map,
2470                                   unsigned long pnum_begin,
2471                                   unsigned long pnum_end,
2472                                   unsigned long map_count,
2473                                   int nodeid);
2474
2475struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
2476pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
2477p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
2478pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
2479pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
2480pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
2481void *vmemmap_alloc_block(unsigned long size, int node);
2482struct vmem_altmap;
2483void *__vmemmap_alloc_block_buf(unsigned long size, int node,
2484                struct vmem_altmap *altmap);
2485static inline void *vmemmap_alloc_block_buf(unsigned long size, int node)
2486{
2487        return __vmemmap_alloc_block_buf(size, node, NULL);
2488}
2489
2490void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
2491int vmemmap_populate_basepages(unsigned long start, unsigned long end,
2492                               int node);
2493int vmemmap_populate(unsigned long start, unsigned long end, int node);
2494void vmemmap_populate_print_last(void);
2495#ifdef CONFIG_MEMORY_HOTPLUG
2496void vmemmap_free(unsigned long start, unsigned long end);
2497#endif
2498void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
2499                                  unsigned long size);
2500
2501enum mf_flags {
2502        MF_COUNT_INCREASED = 1 << 0,
2503        MF_ACTION_REQUIRED = 1 << 1,
2504        MF_MUST_KILL = 1 << 2,
2505        MF_SOFT_OFFLINE = 1 << 3,
2506};
2507extern int memory_failure(unsigned long pfn, int trapno, int flags);
2508extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
2509extern int unpoison_memory(unsigned long pfn);
2510extern int get_hwpoison_page(struct page *page);
2511#define put_hwpoison_page(page) put_page(page)
2512extern int sysctl_memory_failure_early_kill;
2513extern int sysctl_memory_failure_recovery;
2514extern void shake_page(struct page *p, int access);
2515extern atomic_long_t num_poisoned_pages;
2516extern int soft_offline_page(struct page *page, int flags);
2517
2518
2519/*
2520 * Error handlers for various types of pages.
2521 */
2522enum mf_result {
2523        MF_IGNORED,     /* Error: cannot be handled */
2524        MF_FAILED,      /* Error: handling failed */
2525        MF_DELAYED,     /* Will be handled later */
2526        MF_RECOVERED,   /* Successfully recovered */
2527};
2528
2529enum mf_action_page_type {
2530        MF_MSG_KERNEL,
2531        MF_MSG_KERNEL_HIGH_ORDER,
2532        MF_MSG_SLAB,
2533        MF_MSG_DIFFERENT_COMPOUND,
2534        MF_MSG_POISONED_HUGE,
2535        MF_MSG_HUGE,
2536        MF_MSG_FREE_HUGE,
2537        MF_MSG_UNMAP_FAILED,
2538        MF_MSG_DIRTY_SWAPCACHE,
2539        MF_MSG_CLEAN_SWAPCACHE,
2540        MF_MSG_DIRTY_MLOCKED_LRU,
2541        MF_MSG_CLEAN_MLOCKED_LRU,
2542        MF_MSG_DIRTY_UNEVICTABLE_LRU,
2543        MF_MSG_CLEAN_UNEVICTABLE_LRU,
2544        MF_MSG_DIRTY_LRU,
2545        MF_MSG_CLEAN_LRU,
2546        MF_MSG_TRUNCATED_LRU,
2547        MF_MSG_BUDDY,
2548        MF_MSG_BUDDY_2ND,
2549        MF_MSG_UNKNOWN,
2550};
2551
2552#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
2553extern void clear_huge_page(struct page *page,
2554                            unsigned long addr_hint,
2555                            unsigned int pages_per_huge_page);
2556extern void copy_user_huge_page(struct page *dst, struct page *src,
2557                                unsigned long addr, struct vm_area_struct *vma,
2558                                unsigned int pages_per_huge_page);
2559extern long copy_huge_page_from_user(struct page *dst_page,
2560                                const void __user *usr_src,
2561                                unsigned int pages_per_huge_page,
2562                                bool allow_pagefault);
2563#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
2564
2565extern struct page_ext_operations debug_guardpage_ops;
2566
2567#ifdef CONFIG_DEBUG_PAGEALLOC
2568extern unsigned int _debug_guardpage_minorder;
2569extern bool _debug_guardpage_enabled;
2570
2571static inline unsigned int debug_guardpage_minorder(void)
2572{
2573        return _debug_guardpage_minorder;
2574}
2575
2576static inline bool debug_guardpage_enabled(void)
2577{
2578        return _debug_guardpage_enabled;
2579}
2580
2581static inline bool page_is_guard(struct page *page)
2582{
2583        struct page_ext *page_ext;
2584
2585        if (!debug_guardpage_enabled())
2586                return false;
2587
2588        page_ext = lookup_page_ext(page);
2589        if (unlikely(!page_ext))
2590                return false;
2591
2592        return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
2593}
2594#else
2595static inline unsigned int debug_guardpage_minorder(void) { return 0; }
2596static inline bool debug_guardpage_enabled(void) { return false; }
2597static inline bool page_is_guard(struct page *page) { return false; }
2598#endif /* CONFIG_DEBUG_PAGEALLOC */
2599
2600#if MAX_NUMNODES > 1
2601void __init setup_nr_node_ids(void);
2602#else
2603static inline void setup_nr_node_ids(void) {}
2604#endif
2605
2606#endif /* __KERNEL__ */
2607#endif /* _LINUX_MM_H */
2608