linux/include/linux/mm.h
<<
>>
Prefs
   1#ifndef _LINUX_MM_H
   2#define _LINUX_MM_H
   3
   4#include <linux/errno.h>
   5
   6#ifdef __KERNEL__
   7
   8#include <linux/mmdebug.h>
   9#include <linux/gfp.h>
  10#include <linux/bug.h>
  11#include <linux/list.h>
  12#include <linux/mmzone.h>
  13#include <linux/rbtree.h>
  14#include <linux/atomic.h>
  15#include <linux/debug_locks.h>
  16#include <linux/mm_types.h>
  17#include <linux/range.h>
  18#include <linux/pfn.h>
  19#include <linux/percpu-refcount.h>
  20#include <linux/bit_spinlock.h>
  21#include <linux/shrinker.h>
  22#include <linux/resource.h>
  23#include <linux/err.h>
  24#include <linux/page_ref.h>
  25#include <linux/page_ext.h>
  26
  27struct mempolicy;
  28struct anon_vma;
  29struct anon_vma_chain;
  30struct file_ra_state;
  31struct user_struct;
  32struct writeback_control;
  33
  34#ifndef CONFIG_DISCONTIGMEM          /* Don't use mapnrs, do it properly */
  35extern unsigned long max_mapnr;
  36#endif
  37
  38extern unsigned long num_physpages;
  39extern unsigned long totalram_pages;
  40extern unsigned long totalcma_pages;
  41extern void * high_memory;
  42extern int page_cluster;
  43
  44#ifdef CONFIG_SYSCTL
  45extern int sysctl_legacy_va_layout;
  46#else
  47#define sysctl_legacy_va_layout 0
  48#endif
  49
  50#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
  51extern const int mmap_rnd_bits_min;
  52extern const int mmap_rnd_bits_max;
  53extern int mmap_rnd_bits __read_mostly;
  54#endif
  55#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
  56extern const int mmap_rnd_compat_bits_min;
  57extern const int mmap_rnd_compat_bits_max;
  58extern int mmap_rnd_compat_bits __read_mostly;
  59#endif
  60
  61#include <asm/page.h>
  62#include <asm/pgtable.h>
  63#include <asm/processor.h>
  64
  65#ifndef __pa_symbol
  66#define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
  67#endif
  68
  69extern unsigned long sysctl_user_reserve_kbytes;
  70extern unsigned long sysctl_admin_reserve_kbytes;
  71
  72extern int sysctl_overcommit_memory;
  73extern int sysctl_overcommit_ratio;
  74extern unsigned long sysctl_overcommit_kbytes;
  75
  76extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
  77                                    size_t *, loff_t *);
  78extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
  79                                    size_t *, loff_t *);
  80
  81#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
  82
  83/* to align the pointer to the (next) page boundary */
  84#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
  85
  86/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
  87#define PAGE_ALIGNED(addr)      IS_ALIGNED((unsigned long)addr, PAGE_SIZE)
  88
  89/*
  90 * Linux kernel virtual memory manager primitives.
  91 * The idea being to have a "virtual" mm in the same way
  92 * we have a virtual fs - giving a cleaner interface to the
  93 * mm details, and allowing different kinds of memory mappings
  94 * (from shared memory to executable loading to arbitrary
  95 * mmap() functions).
  96 */
  97
  98extern struct kmem_cache *vm_area_cachep;
  99
 100#ifndef CONFIG_MMU
 101extern struct rb_root nommu_region_tree;
 102extern struct rw_semaphore nommu_region_sem;
 103
 104extern unsigned int kobjsize(const void *objp);
 105#endif
 106
 107/*
 108 * vm_flags in vm_area_struct, see mm_types.h.
 109 */
 110#define VM_NONE         0x00000000
 111
 112#define VM_READ         0x00000001      /* currently active flags */
 113#define VM_WRITE        0x00000002
 114#define VM_EXEC         0x00000004
 115#define VM_SHARED       0x00000008
 116
 117/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
 118#define VM_MAYREAD      0x00000010      /* limits for mprotect() etc */
 119#define VM_MAYWRITE     0x00000020
 120#define VM_MAYEXEC      0x00000040
 121#define VM_MAYSHARE     0x00000080
 122
 123#define VM_GROWSDOWN    0x00000100      /* general info on the segment */
 124#define VM_UFFD_MISSING 0x00000200      /* missing pages tracking */
 125#define VM_PFNMAP       0x00000400      /* Page-ranges managed without "struct page", just pure PFN */
 126#define VM_DENYWRITE    0x00000800      /* ETXTBSY on write attempts.. */
 127#define VM_UFFD_WP      0x00001000      /* wrprotect pages tracking */
 128
 129#define VM_LOCKED       0x00002000
 130#define VM_IO           0x00004000      /* Memory mapped I/O or similar */
 131
 132                                        /* Used by sys_madvise() */
 133#define VM_SEQ_READ     0x00008000      /* App will access data sequentially */
 134#define VM_RAND_READ    0x00010000      /* App will not benefit from clustered reads */
 135
 136#define VM_DONTCOPY     0x00020000      /* Do not copy this vma on fork */
 137#define VM_DONTEXPAND   0x00040000      /* Cannot expand with mremap() */
 138#define VM_FOP_EXTEND   0x00080000      /* RH usage only, not for external modules use */
 139
 140#define VM_ACCOUNT      0x00100000      /* Is a VM accounted object */
 141#define VM_NORESERVE    0x00200000      /* should the VM suppress accounting */
 142#define VM_HUGETLB      0x00400000      /* Huge TLB Page VM */
 143#define VM_NONLINEAR    0x00800000      /* Is non-linear (remap_file_pages) */
 144#define VM_ARCH_1       0x01000000      /* Architecture-specific flag */
 145#define VM_ARCH_2       0x02000000
 146#define VM_DONTDUMP     0x04000000      /* Do not include in the core dump */
 147
 148#ifdef CONFIG_MEM_SOFT_DIRTY
 149# define VM_SOFTDIRTY   0x08000000      /* Not soft dirty clean area */
 150#else
 151# define VM_SOFTDIRTY   0
 152#endif
 153
 154#define VM_MIXEDMAP     0x10000000      /* Can contain "struct page" and pure PFN pages */
 155#define VM_HUGEPAGE     0x20000000      /* MADV_HUGEPAGE marked this vma */
 156#define VM_NOHUGEPAGE   0x40000000      /* MADV_NOHUGEPAGE marked this vma */
 157#define VM_MERGEABLE    0x80000000      /* KSM may merge identical pages */
 158
 159#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
 160#define VM_HIGH_ARCH_BIT_0      32      /* bit only usable on 64-bit architectures */
 161#define VM_HIGH_ARCH_BIT_1      33      /* bit only usable on 64-bit architectures */
 162#define VM_HIGH_ARCH_BIT_2      34      /* bit only usable on 64-bit architectures */
 163#define VM_HIGH_ARCH_BIT_3      35      /* bit only usable on 64-bit architectures */
 164#define VM_HIGH_ARCH_0  BIT(VM_HIGH_ARCH_BIT_0)
 165#define VM_HIGH_ARCH_1  BIT(VM_HIGH_ARCH_BIT_1)
 166#define VM_HIGH_ARCH_2  BIT(VM_HIGH_ARCH_BIT_2)
 167#define VM_HIGH_ARCH_3  BIT(VM_HIGH_ARCH_BIT_3)
 168#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
 169
 170/*
 171 * vm_flags2 in vm_area_struct, see mm_types.h.
 172 */
 173#define VM_PFN_MKWRITE  0x00000001      /* vm_operations_struct includes pfn_mkwrite */
 174#define VM_HUGE_FAULT   0x00000002      /* vm_operations_struct includes huge_fault */
 175#define VM_SPLIT        0x00000004      /* vm_operations_struct includes split */
 176
 177#if defined(CONFIG_X86)
 178# define VM_PAT         VM_ARCH_1       /* PAT reserves whole VMA at once (x86) */
 179#if defined (CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS)
 180# define VM_PKEY_SHIFT  VM_HIGH_ARCH_BIT_0
 181# define VM_PKEY_BIT0   VM_HIGH_ARCH_0  /* A protection key is a 4-bit value */
 182# define VM_PKEY_BIT1   VM_HIGH_ARCH_1
 183# define VM_PKEY_BIT2   VM_HIGH_ARCH_2
 184# define VM_PKEY_BIT3   VM_HIGH_ARCH_3
 185#endif
 186#elif defined(CONFIG_PPC)
 187# define VM_SAO         VM_ARCH_1       /* Strong Access Ordering (powerpc) */
 188#elif defined(CONFIG_PARISC)
 189# define VM_GROWSUP     VM_ARCH_1
 190#elif defined(CONFIG_METAG)
 191# define VM_GROWSUP     VM_ARCH_1
 192#elif defined(CONFIG_IA64)
 193# define VM_GROWSUP     VM_ARCH_1
 194#elif !defined(CONFIG_MMU)
 195# define VM_MAPPED_COPY VM_ARCH_1       /* T if mapped copy of data (nommu mmap) */
 196#endif
 197
 198#if defined(CONFIG_X86)
 199/* MPX specific bounds table or bounds directory */
 200# define VM_MPX         VM_ARCH_2
 201#endif
 202
 203#ifndef VM_GROWSUP
 204# define VM_GROWSUP     VM_NONE
 205#endif
 206
 207/* Bits set in the VMA until the stack is in its final location */
 208#define VM_STACK_INCOMPLETE_SETUP       (VM_RAND_READ | VM_SEQ_READ)
 209
 210#ifndef VM_STACK_DEFAULT_FLAGS          /* arch can override this */
 211#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
 212#endif
 213
 214#ifdef CONFIG_STACK_GROWSUP
 215#define VM_STACK_FLAGS  (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
 216#else
 217#define VM_STACK_FLAGS  (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
 218#endif
 219
 220#define VM_READHINTMASK                 (VM_SEQ_READ | VM_RAND_READ)
 221#define VM_ClearReadHint(v)             (v)->vm_flags &= ~VM_READHINTMASK
 222#define VM_NormalReadHint(v)            (!((v)->vm_flags & VM_READHINTMASK))
 223#define VM_SequentialReadHint(v)        ((v)->vm_flags & VM_SEQ_READ)
 224#define VM_RandomReadHint(v)            ((v)->vm_flags & VM_RAND_READ)
 225
 226/*
 227 * Special vmas that are non-mergable, non-mlock()able.
 228 * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
 229 */
 230#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP)
 231
 232/* This mask defines which mm->def_flags a process can inherit its parent */
 233#define VM_INIT_DEF_MASK        VM_NOHUGEPAGE
 234
 235/*
 236 * mapping from the currently active vm_flags protection bits (the
 237 * low four bits) to a page protection mask..
 238 */
 239extern pgprot_t protection_map[16];
 240
 241#define FAULT_FLAG_WRITE        0x01    /* Fault was a write access */
 242#define FAULT_FLAG_NONLINEAR    0x02    /* Fault was via a nonlinear mapping */
 243#define FAULT_FLAG_MKWRITE      0x04    /* Fault was mkwrite of existing pte */
 244#define FAULT_FLAG_ALLOW_RETRY  0x08    /* Retry fault if blocking */
 245#define FAULT_FLAG_RETRY_NOWAIT 0x10    /* Don't drop mmap_sem and wait when retrying */
 246#define FAULT_FLAG_KILLABLE     0x20    /* The fault task is in SIGKILL killable region */
 247#define FAULT_FLAG_TRIED        0x40    /* second try */
 248#define FAULT_FLAG_USER         0x80    /* The fault originated in userspace */
 249#define FAULT_FLAG_REMOTE       0x100   /* faulting for non current tsk/mm */
 250#define FAULT_FLAG_INSTRUCTION  0x200   /* The fault was during an instruction fetch */
 251
 252#define FAULT_FLAG_TRACE \
 253        { FAULT_FLAG_WRITE,             "WRITE" }, \
 254        { FAULT_FLAG_MKWRITE,           "MKWRITE" }, \
 255        { FAULT_FLAG_ALLOW_RETRY,       "ALLOW_RETRY" }, \
 256        { FAULT_FLAG_RETRY_NOWAIT,      "RETRY_NOWAIT" }, \
 257        { FAULT_FLAG_KILLABLE,          "KILLABLE" }, \
 258        { FAULT_FLAG_TRIED,             "TRIED" }, \
 259        { FAULT_FLAG_USER,              "USER" }, \
 260        { FAULT_FLAG_REMOTE,            "REMOTE" }, \
 261        { FAULT_FLAG_INSTRUCTION,       "INSTRUCTION" }
 262
 263/*
 264 * vm_fault is filled by the the pagefault handler and passed to the vma's
 265 * ->fault function. The vma's ->fault is responsible for returning a bitmask
 266 * of VM_FAULT_xxx flags that give details about how the fault was handled.
 267 *
 268 * pgoff should be used in favour of virtual_address, if possible. If pgoff
 269 * is used, one may implement ->remap_pages to get nonlinear mapping support.
 270 *
 271 * MM layer fills up gfp_mask for page allocations but fault handler might
 272 * alter it if its implementation requires a different allocation context.
 273 */
 274struct vm_fault {
 275        unsigned int flags;             /* FAULT_FLAG_xxx flags */
 276        pgoff_t pgoff;                  /* Logical page offset based on vma */
 277        void __user *virtual_address;   /* Faulting virtual address */
 278
 279        struct page *page;              /* ->fault handlers should return a
 280                                         * page here, unless VM_FAULT_NOPAGE
 281                                         * is set (which is also implied by
 282                                         * VM_FAULT_ERROR).
 283                                         */
 284        RH_KABI_EXTEND(struct page *cow_page)   /* Handler may choose to COW */
 285        RH_KABI_EXTEND(pte_t orig_pte)  /* Value of PTE at the time of fault */
 286        RH_KABI_EXTEND(pmd_t *pmd)      /* Pointer to pmd entry matching
 287                                         * the 'virtual_address'
 288                                         */
 289        RH_KABI_EXTEND(struct vm_area_struct *vma)      /* Target VMA */
 290        RH_KABI_EXTEND(gfp_t gfp_mask)  /* gfp mask to be used for allocations */
 291        RH_KABI_EXTEND(pte_t *pte)
 292        RH_KABI_EXTEND(pud_t *pud)              /* Pointer to pud entry matching
 293                                                 * the address
 294                                                 */
 295};
 296
 297/* page entry size for vm->huge_fault() */
 298enum page_entry_size {
 299        PE_SIZE_PTE = 0,
 300        PE_SIZE_PMD,
 301        PE_SIZE_PUD,
 302};
 303
 304/*
 305 * These are the virtual MM functions - opening of an area, closing and
 306 * unmapping it (needed to keep files on disk up-to-date etc), pointer
 307 * to the functions called when a no-page or a wp-page exception occurs. 
 308 */
 309struct vm_operations_struct {
 310        void (*open)(struct vm_area_struct * area);
 311        void (*close)(struct vm_area_struct * area);
 312        int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
 313
 314        /* notification that a previously read-only page is about to become
 315         * writable, if an error is returned it will cause a SIGBUS */
 316        int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
 317
 318        /* called by access_process_vm when get_user_pages() fails, typically
 319         * for use by special VMAs that can switch between memory and hardware
 320         */
 321        int (*access)(struct vm_area_struct *vma, unsigned long addr,
 322                      void *buf, int len, int write);
 323#ifdef CONFIG_NUMA
 324        /*
 325         * set_policy() op must add a reference to any non-NULL @new mempolicy
 326         * to hold the policy upon return.  Caller should pass NULL @new to
 327         * remove a policy and fall back to surrounding context--i.e. do not
 328         * install a MPOL_DEFAULT policy, nor the task or system default
 329         * mempolicy.
 330         */
 331        int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
 332
 333        /*
 334         * get_policy() op must add reference [mpol_get()] to any policy at
 335         * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
 336         * in mm/mempolicy.c will do this automatically.
 337         * get_policy() must NOT add a ref if the policy at (vma,addr) is not
 338         * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
 339         * If no [shared/vma] mempolicy exists at the addr, get_policy() op
 340         * must return NULL--i.e., do not "fallback" to task or system default
 341         * policy.
 342         */
 343        struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
 344                                        unsigned long addr);
 345        int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
 346                const nodemask_t *to, unsigned long flags);
 347#endif
 348        /* called by sys_remap_file_pages() to populate non-linear mapping */
 349        int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
 350                           unsigned long size, pgoff_t pgoff);
 351
 352        /* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
 353        RH_KABI_EXTEND(int (*pfn_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf))
 354        RH_KABI_EXTEND(int (*huge_fault)(struct vm_fault *vmf,
 355                                         enum page_entry_size pe_size))
 356        RH_KABI_EXTEND(int (*split)(struct vm_area_struct *area,
 357                                    unsigned long addr))
 358};
 359
 360struct mmu_gather;
 361struct inode;
 362
 363#define page_private(page)              ((page)->private)
 364#define set_page_private(page, v)       ((page)->private = (v))
 365
 366/* It's valid only if the page is free path or free_list */
 367static inline void set_freepage_migratetype(struct page *page, int migratetype)
 368{
 369        page->index = migratetype;
 370}
 371
 372/* It's valid only if the page is free path or free_list */
 373static inline int get_freepage_migratetype(struct page *page)
 374{
 375        return page->index;
 376}
 377
 378#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
 379static inline int pmd_devmap(pmd_t pmd)
 380{
 381        return 0;
 382}
 383static inline int pud_devmap(pud_t pud)
 384{
 385        return 0;
 386}
 387#endif
 388
 389/*
 390 * FIXME: take this include out, include page-flags.h in
 391 * files which need it (119 of them)
 392 */
 393#include <linux/page-flags.h>
 394#include <linux/huge_mm.h>
 395
 396/*
 397 * Methods to modify the page usage count.
 398 *
 399 * What counts for a page usage:
 400 * - cache mapping   (page->mapping)
 401 * - private data    (page->private)
 402 * - page mapped in a task's page tables, each mapping
 403 *   is counted separately
 404 *
 405 * Also, many kernel routines increase the page count before a critical
 406 * routine so they can be sure the page doesn't go away from under them.
 407 */
 408
 409/*
 410 * Drop a ref, return true if the refcount fell to zero (the page has no users)
 411 */
 412static inline int put_page_testzero(struct page *page)
 413{
 414        VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
 415        return page_ref_dec_and_test(page);
 416}
 417
 418/*
 419 * Try to grab a ref unless the page has a refcount of zero, return false if
 420 * that is the case.
 421 */
 422static inline int get_page_unless_zero(struct page *page)
 423{
 424        return page_ref_add_unless(page, 1, 0);
 425}
 426
 427extern int page_is_ram(unsigned long pfn);
 428
 429enum {
 430        REGION_INTERSECTS,
 431        REGION_DISJOINT,
 432        REGION_MIXED,
 433};
 434
 435int region_intersects(resource_size_t offset, size_t size, const char *type,
 436                        unsigned long flags);
 437int region_intersects_ram(resource_size_t offset, size_t size);
 438int region_intersects_pmem(resource_size_t offset, size_t size);
 439
 440/* Support for virtually mapped pages */
 441struct page *vmalloc_to_page(const void *addr);
 442unsigned long vmalloc_to_pfn(const void *addr);
 443
 444/*
 445 * Determine if an address is within the vmalloc range
 446 *
 447 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
 448 * is no special casing required.
 449 */
 450static inline int is_vmalloc_addr(const void *x)
 451{
 452#ifdef CONFIG_MMU
 453        unsigned long addr = (unsigned long)x;
 454
 455        return addr >= VMALLOC_START && addr < VMALLOC_END;
 456#else
 457        return 0;
 458#endif
 459}
 460#ifdef CONFIG_MMU
 461extern int is_vmalloc_or_module_addr(const void *x);
 462#else
 463static inline int is_vmalloc_or_module_addr(const void *x)
 464{
 465        return 0;
 466}
 467#endif
 468
 469extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
 470static inline void *kvmalloc(size_t size, gfp_t flags)
 471{
 472        return kvmalloc_node(size, flags, NUMA_NO_NODE);
 473}
 474static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
 475{
 476        return kvmalloc_node(size, flags | __GFP_ZERO, node);
 477}
 478static inline void *kvzalloc(size_t size, gfp_t flags)
 479{
 480        return kvmalloc(size, flags | __GFP_ZERO);
 481}
 482
 483static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
 484{
 485        if (size != 0 && n > SIZE_MAX / size)
 486                return NULL;
 487
 488        return kvmalloc(n * size, flags);
 489}
 490
 491extern void kvfree(const void *addr);
 492
 493static inline void compound_lock(struct page *page)
 494{
 495#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 496        VM_BUG_ON_PAGE(PageSlab(page), page);
 497        bit_spin_lock(PG_compound_lock, &page->flags);
 498#endif
 499}
 500
 501static inline void compound_unlock(struct page *page)
 502{
 503#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 504        VM_BUG_ON_PAGE(PageSlab(page), page);
 505        bit_spin_unlock(PG_compound_lock, &page->flags);
 506#endif
 507}
 508
 509static inline unsigned long compound_lock_irqsave(struct page *page)
 510{
 511        unsigned long uninitialized_var(flags);
 512#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 513        local_irq_save(flags);
 514        compound_lock(page);
 515#endif
 516        return flags;
 517}
 518
 519static inline void compound_unlock_irqrestore(struct page *page,
 520                                              unsigned long flags)
 521{
 522#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 523        compound_unlock(page);
 524        local_irq_restore(flags);
 525#endif
 526}
 527
 528/*
 529 * The atomic page->_mapcount, starts from -1: so that transitions
 530 * both from it and to it can be tracked, using atomic_inc_and_test
 531 * and atomic_add_negative(-1).
 532 */
 533static inline void page_mapcount_reset(struct page *page)
 534{
 535        atomic_set(&(page)->_mapcount, -1);
 536}
 537
 538static inline int page_mapcount(struct page *page)
 539{
 540        return atomic_read(&(page)->_mapcount) + 1;
 541}
 542
 543#ifdef CONFIG_HUGETLB_PAGE
 544extern int PageHeadHuge(struct page *page_head);
 545#else /* CONFIG_HUGETLB_PAGE */
 546static inline int PageHeadHuge(struct page *page_head)
 547{
 548        return 0;
 549}
 550#endif /* CONFIG_HUGETLB_PAGE */
 551
 552static inline bool __compound_tail_refcounted(struct page *page)
 553{
 554        return !PageSlab(page) && !PageHeadHuge(page);
 555}
 556
 557/*
 558 * This takes a head page as parameter and tells if the
 559 * tail page reference counting can be skipped.
 560 *
 561 * For this to be safe, PageSlab and PageHeadHuge must remain true on
 562 * any given page where they return true here, until all tail pins
 563 * have been released.
 564 */
 565static inline bool compound_tail_refcounted(struct page *page)
 566{
 567        VM_BUG_ON_PAGE(!PageHead(page), page);
 568        return __compound_tail_refcounted(page);
 569}
 570
 571static inline void get_huge_page_tail(struct page *page)
 572{
 573        /*
 574         * __split_huge_page_refcount() cannot run from under us.
 575         */
 576        VM_BUG_ON_PAGE(!PageTail(page), page);
 577        VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
 578        VM_BUG_ON_PAGE(page_ref_count(page) != 0, page);
 579        if (compound_tail_refcounted(page->first_page))
 580                atomic_inc(&page->_mapcount);
 581}
 582
 583static inline struct page *virt_to_head_page(const void *x)
 584{
 585        struct page *page = virt_to_page(x);
 586        return compound_head(page);
 587}
 588
 589/*
 590 * PageBuddy() indicate that the page is free and in the buddy system
 591 * (see mm/page_alloc.c).
 592 *
 593 * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
 594 * -2 so that an underflow of the page_mapcount() won't be mistaken
 595 * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
 596 * efficiently by most CPU architectures.
 597 */
 598#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
 599
 600static inline int PageBuddy(struct page *page)
 601{
 602        return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
 603}
 604
 605static inline void __SetPageBuddy(struct page *page)
 606{
 607        VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
 608        atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
 609}
 610
 611static inline void __ClearPageBuddy(struct page *page)
 612{
 613        VM_BUG_ON_PAGE(!PageBuddy(page), page);
 614        atomic_set(&page->_mapcount, -1);
 615}
 616
 617#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
 618
 619static inline int PageBalloon(struct page *page)
 620{
 621        return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
 622}
 623
 624static inline void __SetPageBalloon(struct page *page)
 625{
 626        VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
 627        atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
 628}
 629
 630static inline void __ClearPageBalloon(struct page *page)
 631{
 632        VM_BUG_ON_PAGE(!PageBalloon(page), page);
 633        atomic_set(&page->_mapcount, -1);
 634}
 635
 636void put_pages_list(struct list_head *pages);
 637
 638void split_page(struct page *page, unsigned int order);
 639int split_free_page(struct page *page);
 640
 641/*
 642 * Compound pages have a destructor function.  Provide a
 643 * prototype for that function and accessor functions.
 644 * These are _only_ valid on the head of a PG_compound page.
 645 */
 646typedef void compound_page_dtor(struct page *);
 647
 648static inline void set_compound_page_dtor(struct page *page,
 649                                                compound_page_dtor *dtor)
 650{
 651        page[1].lru.next = (void *)dtor;
 652}
 653
 654static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
 655{
 656        return (compound_page_dtor *)page[1].lru.next;
 657}
 658
 659static inline int compound_order(struct page *page)
 660{
 661        if (!PageHead(page))
 662                return 0;
 663        return (unsigned long)page[1].lru.prev;
 664}
 665
 666static inline void set_compound_order(struct page *page, unsigned long order)
 667{
 668        page[1].lru.prev = (void *)order;
 669}
 670
 671#ifdef CONFIG_MMU
 672/*
 673 * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
 674 * servicing faults for write access.  In the normal case, do always want
 675 * pte_mkwrite.  But get_user_pages can cause write faults for mappings
 676 * that do not have writing enabled, when used by access_process_vm.
 677 */
 678static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
 679{
 680        if (likely(vma->vm_flags & VM_WRITE))
 681                pte = pte_mkwrite(pte);
 682        return pte;
 683}
 684int finish_fault(struct vm_fault *vmf);
 685int finish_mkwrite_fault(struct vm_fault *vmf);
 686#endif
 687
 688/*
 689 * Multiple processes may "see" the same page. E.g. for untouched
 690 * mappings of /dev/null, all processes see the same page full of
 691 * zeroes, and text pages of executables and shared libraries have
 692 * only one copy in memory, at most, normally.
 693 *
 694 * For the non-reserved pages, page_count(page) denotes a reference count.
 695 *   page_count() == 0 means the page is free. page->lru is then used for
 696 *   freelist management in the buddy allocator.
 697 *   page_count() > 0  means the page has been allocated.
 698 *
 699 * Pages are allocated by the slab allocator in order to provide memory
 700 * to kmalloc and kmem_cache_alloc. In this case, the management of the
 701 * page, and the fields in 'struct page' are the responsibility of mm/slab.c
 702 * unless a particular usage is carefully commented. (the responsibility of
 703 * freeing the kmalloc memory is the caller's, of course).
 704 *
 705 * A page may be used by anyone else who does a __get_free_page().
 706 * In this case, page_count still tracks the references, and should only
 707 * be used through the normal accessor functions. The top bits of page->flags
 708 * and page->virtual store page management information, but all other fields
 709 * are unused and could be used privately, carefully. The management of this
 710 * page is the responsibility of the one who allocated it, and those who have
 711 * subsequently been given references to it.
 712 *
 713 * The other pages (we may call them "pagecache pages") are completely
 714 * managed by the Linux memory manager: I/O, buffers, swapping etc.
 715 * The following discussion applies only to them.
 716 *
 717 * A pagecache page contains an opaque `private' member, which belongs to the
 718 * page's address_space. Usually, this is the address of a circular list of
 719 * the page's disk buffers. PG_private must be set to tell the VM to call
 720 * into the filesystem to release these pages.
 721 *
 722 * A page may belong to an inode's memory mapping. In this case, page->mapping
 723 * is the pointer to the inode, and page->index is the file offset of the page,
 724 * in units of PAGE_CACHE_SIZE.
 725 *
 726 * If pagecache pages are not associated with an inode, they are said to be
 727 * anonymous pages. These may become associated with the swapcache, and in that
 728 * case PG_swapcache is set, and page->private is an offset into the swapcache.
 729 *
 730 * In either case (swapcache or inode backed), the pagecache itself holds one
 731 * reference to the page. Setting PG_private should also increment the
 732 * refcount. The each user mapping also has a reference to the page.
 733 *
 734 * The pagecache pages are stored in a per-mapping radix tree, which is
 735 * rooted at mapping->page_tree, and indexed by offset.
 736 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
 737 * lists, we instead now tag pages as dirty/writeback in the radix tree.
 738 *
 739 * All pagecache pages may be subject to I/O:
 740 * - inode pages may need to be read from disk,
 741 * - inode pages which have been modified and are MAP_SHARED may need
 742 *   to be written back to the inode on disk,
 743 * - anonymous pages (including MAP_PRIVATE file mappings) which have been
 744 *   modified may need to be swapped out to swap space and (later) to be read
 745 *   back into memory.
 746 */
 747
 748/*
 749 * The zone field is never updated after free_area_init_core()
 750 * sets it, so none of the operations on it need to be atomic.
 751 */
 752
 753/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
 754#define SECTIONS_PGOFF          ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
 755#define NODES_PGOFF             (SECTIONS_PGOFF - NODES_WIDTH)
 756#define ZONES_PGOFF             (NODES_PGOFF - ZONES_WIDTH)
 757#define LAST_CPUPID_PGOFF       (ZONES_PGOFF - LAST_CPUPID_WIDTH)
 758#define ZONE_DEVICE_PGOFF       (LAST_CPUPID_PGOFF - ZONE_DEVICE_WIDTH)
 759
 760/*
 761 * Define the bit shifts to access each section.  For non-existent
 762 * sections we define the shift as 0; that plus a 0 mask ensures
 763 * the compiler will optimise away reference to them.
 764 */
 765#define SECTIONS_PGSHIFT        (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
 766#define NODES_PGSHIFT           (NODES_PGOFF * (NODES_WIDTH != 0))
 767#define ZONES_PGSHIFT           (ZONES_PGOFF * (ZONES_WIDTH != 0))
 768#define LAST_CPUPID_PGSHIFT     (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
 769#define ZONE_DEVICE_PGSHIFT     (ZONE_DEVICE_PGOFF * (ZONE_DEVICE_WIDTH != 0))
 770
 771/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
 772#ifdef NODE_NOT_IN_PAGE_FLAGS
 773#define ZONEID_SHIFT            (SECTIONS_SHIFT + ZONES_SHIFT)
 774#define ZONEID_PGOFF            ((SECTIONS_PGOFF < ZONES_PGOFF)? \
 775                                                SECTIONS_PGOFF : ZONES_PGOFF)
 776#else
 777#define ZONEID_SHIFT            (NODES_SHIFT + ZONES_SHIFT)
 778#define ZONEID_PGOFF            ((NODES_PGOFF < ZONES_PGOFF)? \
 779                                                NODES_PGOFF : ZONES_PGOFF)
 780#endif
 781
 782#define ZONEID_PGSHIFT          (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
 783
 784#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
 785#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
 786#endif
 787
 788#define ZONES_MASK              ((1UL << ZONES_WIDTH) - 1)
 789#define NODES_MASK              ((1UL << NODES_WIDTH) - 1)
 790#define SECTIONS_MASK           ((1UL << SECTIONS_WIDTH) - 1)
 791#define LAST_CPUPID_MASK        ((1UL << LAST_CPUPID_WIDTH) - 1)
 792#define ZONEID_MASK             ((1UL << ZONEID_SHIFT) - 1)
 793
 794#define ZONE_DEVICE_FLAG        (1UL << ZONE_DEVICE_PGSHIFT)
 795
 796static inline enum zone_type page_zonenum(const struct page *page)
 797{
 798#ifdef CONFIG_ZONE_DEVICE
 799        if (page->flags & ZONE_DEVICE_FLAG)
 800                return ZONE_DEVICE;
 801#endif
 802        return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
 803}
 804
 805#ifdef CONFIG_ZONE_DEVICE
 806void get_zone_device_page(struct page *page);
 807void put_zone_device_page(struct page *page);
 808static inline bool is_zone_device_page(const struct page *page)
 809{
 810        return page_zonenum(page) == ZONE_DEVICE;
 811}
 812#else
 813static inline void get_zone_device_page(struct page *page)
 814{
 815}
 816static inline void put_zone_device_page(struct page *page)
 817{
 818}
 819static inline bool is_zone_device_page(const struct page *page)
 820{
 821        return false;
 822}
 823#endif
 824
 825extern bool __get_page_tail(struct page *page);
 826
 827static inline void get_page(struct page *page)
 828{
 829        if (unlikely(PageTail(page)))
 830                if (likely(__get_page_tail(page)))
 831                        return;
 832        /*
 833         * Getting a normal page or the head of a compound page
 834         * requires to already have an elevated page->_count.
 835         */
 836        VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
 837
 838        page_ref_inc(page);
 839
 840        if (unlikely(is_zone_device_page(page)))
 841                get_zone_device_page(page);
 842}
 843
 844void put_page(struct page *page);
 845
 846#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
 847#define SECTION_IN_PAGE_FLAGS
 848#endif
 849
 850/*
 851 * The identification function is only used by the buddy allocator for
 852 * determining if two pages could be buddies. We are not really
 853 * identifying a zone since we could be using a the section number
 854 * id if we have not node id available in page flags.
 855 * We guarantee only that it will return the same value for two
 856 * combinable pages in a zone.
 857 */
 858static inline int page_zone_id(struct page *page)
 859{
 860        return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
 861}
 862
 863static inline int zone_to_nid(struct zone *zone)
 864{
 865#ifdef CONFIG_NUMA
 866        return zone->node;
 867#else
 868        return 0;
 869#endif
 870}
 871
 872#ifdef NODE_NOT_IN_PAGE_FLAGS
 873extern int page_to_nid(const struct page *page);
 874#else
 875static inline int page_to_nid(const struct page *page)
 876{
 877        return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
 878}
 879#endif
 880
 881#ifdef CONFIG_NUMA_BALANCING
 882static inline int cpu_pid_to_cpupid(int cpu, int pid)
 883{
 884        return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
 885}
 886
 887static inline int cpupid_to_pid(int cpupid)
 888{
 889        return cpupid & LAST__PID_MASK;
 890}
 891
 892static inline int cpupid_to_cpu(int cpupid)
 893{
 894        return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
 895}
 896
 897static inline int cpupid_to_nid(int cpupid)
 898{
 899        return cpu_to_node(cpupid_to_cpu(cpupid));
 900}
 901
 902static inline bool cpupid_pid_unset(int cpupid)
 903{
 904        return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
 905}
 906
 907static inline bool cpupid_cpu_unset(int cpupid)
 908{
 909        return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
 910}
 911
 912static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
 913{
 914        return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
 915}
 916
 917#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
 918#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
 919static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
 920{
 921        return xchg(&page->_last_cpupid, cpupid);
 922}
 923
 924static inline int page_cpupid_last(struct page *page)
 925{
 926        return page->_last_cpupid;
 927}
 928static inline void page_cpupid_reset_last(struct page *page)
 929{
 930        page->_last_cpupid = -1;
 931}
 932#else
 933static inline int page_cpupid_last(struct page *page)
 934{
 935        return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
 936}
 937
 938extern int page_cpupid_xchg_last(struct page *page, int cpupid);
 939
 940static inline void page_cpupid_reset_last(struct page *page)
 941{
 942        int cpupid = (1 << LAST_CPUPID_SHIFT) - 1;
 943
 944        page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
 945        page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
 946}
 947#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
 948#else /* !CONFIG_NUMA_BALANCING */
 949static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
 950{
 951        return page_to_nid(page); /* XXX */
 952}
 953
 954static inline int page_cpupid_last(struct page *page)
 955{
 956        return page_to_nid(page); /* XXX */
 957}
 958
 959static inline int cpupid_to_nid(int cpupid)
 960{
 961        return -1;
 962}
 963
 964static inline int cpupid_to_pid(int cpupid)
 965{
 966        return -1;
 967}
 968
 969static inline int cpupid_to_cpu(int cpupid)
 970{
 971        return -1;
 972}
 973
 974static inline int cpu_pid_to_cpupid(int nid, int pid)
 975{
 976        return -1;
 977}
 978
 979static inline bool cpupid_pid_unset(int cpupid)
 980{
 981        return 1;
 982}
 983
 984static inline void page_cpupid_reset_last(struct page *page)
 985{
 986}
 987
 988static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
 989{
 990        return false;
 991}
 992#endif /* CONFIG_NUMA_BALANCING */
 993
 994static inline struct zone *page_zone(const struct page *page)
 995{
 996#ifdef CONFIG_ZONE_DEVICE
 997        if (page->flags & ZONE_DEVICE_FLAG)
 998                return NODE_DATA(page_to_nid(page))->zone_device;
 999#endif
1000        return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
1001}
1002
1003#ifdef SECTION_IN_PAGE_FLAGS
1004static inline void set_page_section(struct page *page, unsigned long section)
1005{
1006        page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
1007        page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
1008}
1009
1010static inline unsigned long page_to_section(const struct page *page)
1011{
1012        return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
1013}
1014#endif
1015
1016static inline void set_page_zone(struct page *page, enum zone_type zone)
1017{
1018        page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
1019#ifdef CONFIG_ZONE_DEVICE
1020        page->flags &= ~ZONE_DEVICE_FLAG;
1021        if (zone == ZONE_DEVICE)
1022                page->flags |= ZONE_DEVICE_FLAG;
1023        else
1024#endif
1025                page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
1026}
1027
1028static inline void set_page_node(struct page *page, unsigned long node)
1029{
1030        page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
1031        page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
1032}
1033
1034static inline void set_page_links(struct page *page, enum zone_type zone,
1035        unsigned long node, unsigned long pfn)
1036{
1037        set_page_zone(page, zone);
1038        set_page_node(page, node);
1039#ifdef SECTION_IN_PAGE_FLAGS
1040        set_page_section(page, pfn_to_section_nr(pfn));
1041#endif
1042}
1043
1044/*
1045 * Some inline functions in vmstat.h depend on page_zone()
1046 */
1047#include <linux/vmstat.h>
1048
1049static __always_inline void *lowmem_page_address(const struct page *page)
1050{
1051        return __va(PFN_PHYS(page_to_pfn(page)));
1052}
1053
1054#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
1055#define HASHED_PAGE_VIRTUAL
1056#endif
1057
1058#if defined(WANT_PAGE_VIRTUAL)
1059#define page_address(page) ((page)->virtual)
1060#define set_page_address(page, address)                 \
1061        do {                                            \
1062                (page)->virtual = (address);            \
1063        } while(0)
1064#define page_address_init()  do { } while(0)
1065#endif
1066
1067#if defined(HASHED_PAGE_VIRTUAL)
1068void *page_address(const struct page *page);
1069void set_page_address(struct page *page, void *virtual);
1070void page_address_init(void);
1071#endif
1072
1073#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
1074#define page_address(page) lowmem_page_address(page)
1075#define set_page_address(page, address)  do { } while(0)
1076#define page_address_init()  do { } while(0)
1077#endif
1078
1079/*
1080 * On an anonymous page mapped into a user virtual memory area,
1081 * page->mapping points to its anon_vma, not to a struct address_space;
1082 * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
1083 *
1084 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
1085 * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
1086 * and then page->mapping points, not to an anon_vma, but to a private
1087 * structure which KSM associates with that merged page.  See ksm.h.
1088 *
1089 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
1090 *
1091 * Please note that, confusingly, "page_mapping" refers to the inode
1092 * address_space which maps the page from disk; whereas "page_mapped"
1093 * refers to user virtual address space into which the page is mapped.
1094 */
1095#define PAGE_MAPPING_ANON       1
1096#define PAGE_MAPPING_KSM        2
1097#define PAGE_MAPPING_FLAGS      (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
1098
1099extern struct address_space *page_mapping(struct page *page);
1100
1101/* Neutral page->mapping pointer to address_space or anon_vma or other */
1102static inline void *page_rmapping(struct page *page)
1103{
1104        return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
1105}
1106
1107extern struct address_space *__page_file_mapping(struct page *);
1108
1109static inline
1110struct address_space *page_file_mapping(struct page *page)
1111{
1112        if (unlikely(PageSwapCache(page)))
1113                return __page_file_mapping(page);
1114
1115        return page->mapping;
1116}
1117
1118static inline int PageAnon(struct page *page)
1119{
1120        return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
1121}
1122
1123/*
1124 * Return the pagecache index of the passed page.  Regular pagecache pages
1125 * use ->index whereas swapcache pages use ->private
1126 */
1127static inline pgoff_t page_index(struct page *page)
1128{
1129        if (unlikely(PageSwapCache(page)))
1130                return page_private(page);
1131        return page->index;
1132}
1133
1134extern pgoff_t __page_file_index(struct page *page);
1135
1136/*
1137 * Return the file index of the page. Regular pagecache pages use ->index
1138 * whereas swapcache pages use swp_offset(->private)
1139 */
1140static inline pgoff_t page_file_index(struct page *page)
1141{
1142        if (unlikely(PageSwapCache(page)))
1143                return __page_file_index(page);
1144
1145        return page->index;
1146}
1147
1148/*
1149 * Return true if this page is mapped into pagetables.
1150 */
1151static inline int page_mapped(struct page *page)
1152{
1153        return atomic_read(&(page)->_mapcount) >= 0;
1154}
1155
1156/*
1157 * Return true only if the page has been allocated with
1158 * ALLOC_NO_WATERMARKS and the low watermark was not
1159 * met implying that the system is under some pressure.
1160 */
1161static inline bool page_is_pfmemalloc(struct page *page)
1162{
1163        /*
1164         * Page index cannot be this large so this must be
1165         * a pfmemalloc page.
1166         */
1167        return page->index == -1UL;
1168}
1169
1170/*
1171 * Only to be called by the page allocator on a freshly allocated
1172 * page.
1173 */
1174static inline void set_page_pfmemalloc(struct page *page)
1175{
1176        page->index = -1UL;
1177}
1178
1179static inline void clear_page_pfmemalloc(struct page *page)
1180{
1181        page->index = 0;
1182}
1183
1184/*
1185 * Different kinds of faults, as returned by handle_mm_fault().
1186 * Used to decide whether a process gets delivered SIGBUS or
1187 * just gets major/minor fault counters bumped up.
1188 */
1189
1190#define VM_FAULT_MINOR  0 /* For backwards compat. Remove me quickly. */
1191
1192#define VM_FAULT_OOM    0x0001
1193#define VM_FAULT_SIGBUS 0x0002
1194#define VM_FAULT_MAJOR  0x0004
1195#define VM_FAULT_WRITE  0x0008  /* Special case for get_user_pages */
1196#define VM_FAULT_HWPOISON 0x0010        /* Hit poisoned small page */
1197#define VM_FAULT_HWPOISON_LARGE 0x0020  /* Hit poisoned large page. Index encoded in upper bits */
1198#define VM_FAULT_SIGSEGV 0x0040
1199
1200#define VM_FAULT_NOPAGE 0x0100  /* ->fault installed the pte, not return page */
1201#define VM_FAULT_LOCKED 0x0200  /* ->fault locked the returned page */
1202#define VM_FAULT_RETRY  0x0400  /* ->fault blocked, must retry */
1203#define VM_FAULT_FALLBACK 0x0800        /* huge page fault failed, fall back to small */
1204#define VM_FAULT_DONE_COW   0x1000      /* ->fault has fully handled COW */
1205
1206#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
1207
1208#define VM_FAULT_ERROR  (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
1209                         VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
1210                         VM_FAULT_FALLBACK)
1211
1212#define VM_FAULT_RESULT_TRACE \
1213        { VM_FAULT_OOM,                 "OOM" }, \
1214        { VM_FAULT_SIGBUS,              "SIGBUS" }, \
1215        { VM_FAULT_MAJOR,               "MAJOR" }, \
1216        { VM_FAULT_WRITE,               "WRITE" }, \
1217        { VM_FAULT_HWPOISON,            "HWPOISON" }, \
1218        { VM_FAULT_HWPOISON_LARGE,      "HWPOISON_LARGE" }, \
1219        { VM_FAULT_SIGSEGV,             "SIGSEGV" }, \
1220        { VM_FAULT_NOPAGE,              "NOPAGE" }, \
1221        { VM_FAULT_LOCKED,              "LOCKED" }, \
1222        { VM_FAULT_RETRY,               "RETRY" }, \
1223        { VM_FAULT_FALLBACK,            "FALLBACK" }, \
1224        { VM_FAULT_DONE_COW,            "DONE_COW" }
1225
1226/* Encode hstate index for a hwpoisoned large page */
1227#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
1228#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
1229
1230/*
1231 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
1232 */
1233extern void pagefault_out_of_memory(void);
1234
1235#define offset_in_page(p)       ((unsigned long)(p) & ~PAGE_MASK)
1236
1237/*
1238 * Flags passed to show_mem() and show_free_areas() to suppress output in
1239 * various contexts.
1240 */
1241#define SHOW_MEM_FILTER_NODES           (0x0001u)       /* disallowed nodes */
1242#define SHOW_MEM_FILTER_PAGE_COUNT      (0x0002u)       /* page type count */
1243
1244extern void show_free_areas(unsigned int flags);
1245extern bool skip_free_areas_node(unsigned int flags, int nid);
1246
1247int shmem_zero_setup(struct vm_area_struct *);
1248#ifdef CONFIG_SHMEM
1249bool shmem_mapping(struct address_space *mapping);
1250#else
1251static inline bool shmem_mapping(struct address_space *mapping)
1252{
1253        return false;
1254}
1255#endif
1256
1257extern int can_do_mlock(void);
1258extern int user_shm_lock(size_t, struct user_struct *);
1259extern void user_shm_unlock(size_t, struct user_struct *);
1260
1261/*
1262 * Parameter block passed down to zap_pte_range in exceptional cases.
1263 */
1264struct zap_details {
1265        struct vm_area_struct *nonlinear_vma;   /* Check page->index if set */
1266        struct address_space *check_mapping;    /* Check page->mapping if set */
1267        pgoff_t first_index;                    /* Lowest page->index to unmap */
1268        pgoff_t last_index;                     /* Highest page->index to unmap */
1269};
1270
1271struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1272                pte_t pte);
1273
1274int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1275                unsigned long size);
1276void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1277                unsigned long size, struct zap_details *);
1278void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1279                unsigned long start, unsigned long end);
1280
1281/**
1282 * mm_walk - callbacks for walk_page_range
1283 * @pgd_entry: if set, called for each non-empty PGD (top-level) entry
1284 * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
1285 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
1286 *             this handler is required to be able to handle
1287 *             pmd_trans_huge() pmds.  They may simply choose to
1288 *             split_huge_page() instead of handling it explicitly.
1289 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
1290 * @pte_hole: if set, called for each hole at all levels
1291 * @hugetlb_entry: if set, called for each hugetlb entry
1292 *                 *Caution*: The caller must hold mmap_sem() if @hugetlb_entry
1293 *                            is used.
1294 *
1295 * (see walk_page_range for more details)
1296 */
1297struct mm_walk {
1298        int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
1299                         unsigned long next, struct mm_walk *walk);
1300        int (*pud_entry)(pud_t *pud, unsigned long addr,
1301                         unsigned long next, struct mm_walk *walk);
1302        int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
1303                         unsigned long next, struct mm_walk *walk);
1304        int (*pte_entry)(pte_t *pte, unsigned long addr,
1305                         unsigned long next, struct mm_walk *walk);
1306        int (*pte_hole)(unsigned long addr, unsigned long next,
1307                        struct mm_walk *walk);
1308        int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
1309                             unsigned long addr, unsigned long next,
1310                             struct mm_walk *walk);
1311        struct mm_struct *mm;
1312        void *private;
1313};
1314
1315int walk_page_range(unsigned long addr, unsigned long end,
1316                struct mm_walk *walk);
1317void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1318                unsigned long end, unsigned long floor, unsigned long ceiling);
1319int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
1320                        struct vm_area_struct *vma);
1321void unmap_mapping_range(struct address_space *mapping,
1322                loff_t const holebegin, loff_t const holelen, int even_cows);
1323int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
1324                             pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
1325int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1326        unsigned long *pfn);
1327int follow_phys(struct vm_area_struct *vma, unsigned long address,
1328                unsigned int flags, unsigned long *prot, resource_size_t *phys);
1329int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1330                        void *buf, int len, int write);
1331
1332static inline void unmap_shared_mapping_range(struct address_space *mapping,
1333                loff_t const holebegin, loff_t const holelen)
1334{
1335        unmap_mapping_range(mapping, holebegin, holelen, 0);
1336}
1337
1338extern void truncate_pagecache(struct inode *inode, loff_t new);
1339extern void truncate_setsize(struct inode *inode, loff_t newsize);
1340void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
1341void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1342int truncate_inode_page(struct address_space *mapping, struct page *page);
1343int generic_error_remove_page(struct address_space *mapping, struct page *page);
1344int invalidate_inode_page(struct page *page);
1345
1346#ifdef CONFIG_MMU
1347extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
1348                unsigned int flags);
1349extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1350                            unsigned long address, unsigned int fault_flags);
1351#else
1352static inline int handle_mm_fault(struct vm_area_struct *vma,
1353                unsigned long address, unsigned int flags)
1354{
1355        /* should never happen if there's no MMU */
1356        BUG();
1357        return VM_FAULT_SIGBUS;
1358}
1359static inline int fixup_user_fault(struct task_struct *tsk,
1360                struct mm_struct *mm, unsigned long address,
1361                unsigned int fault_flags)
1362{
1363        /* should never happen if there's no MMU */
1364        BUG();
1365        return -EFAULT;
1366}
1367#endif
1368
1369extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
1370extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1371                void *buf, int len, int write);
1372
1373long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1374                      unsigned long start, unsigned long nr_pages,
1375                      unsigned int foll_flags, struct page **pages,
1376                      struct vm_area_struct **vmas, int *nonblocking);
1377long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1378                            unsigned long start, unsigned long nr_pages,
1379                            int write, int force, struct page **pages,
1380                            struct vm_area_struct **vmas);
1381long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1382                    unsigned long start, unsigned long nr_pages,
1383                    int write, int force, struct page **pages,
1384                    struct vm_area_struct **vmas);
1385long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
1386                    unsigned long start, unsigned long nr_pages,
1387                    int write, int force, struct page **pages,
1388                    int *locked);
1389long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
1390                               unsigned long start, unsigned long nr_pages,
1391                               int write, int force, struct page **pages,
1392                               unsigned int gup_flags);
1393long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
1394                    unsigned long start, unsigned long nr_pages,
1395                    int write, int force, struct page **pages);
1396int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1397                        struct page **pages);
1398struct kvec;
1399int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1400                        struct page **pages);
1401int get_kernel_page(unsigned long start, int write, struct page **pages);
1402struct page *get_dump_page(unsigned long addr);
1403
1404extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1405extern void do_invalidatepage(struct page *page, unsigned long offset);
1406extern void do_invalidatepage_range(struct page *page, unsigned int offset,
1407                                    unsigned int length);
1408
1409int __set_page_dirty_nobuffers(struct page *page);
1410int __set_page_dirty_no_writeback(struct page *page);
1411int redirty_page_for_writepage(struct writeback_control *wbc,
1412                                struct page *page);
1413void account_page_dirtied(struct page *page, struct address_space *mapping);
1414void account_page_writeback(struct page *page);
1415int set_page_dirty(struct page *page);
1416int set_page_dirty_lock(struct page *page);
1417int clear_page_dirty_for_io(struct page *page);
1418int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1419
1420static inline bool vma_is_anonymous(struct vm_area_struct *vma)
1421{
1422        return !vma->vm_ops;
1423}
1424
1425#ifdef CONFIG_SHMEM
1426/*
1427 * The vma_is_shmem is not inline because it is used only by slow
1428 * paths in userfault.
1429 */
1430bool vma_is_shmem(struct vm_area_struct *vma);
1431#else
1432static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
1433#endif
1434
1435int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
1436
1437extern unsigned long move_page_tables(struct vm_area_struct *vma,
1438                unsigned long old_addr, struct vm_area_struct *new_vma,
1439                unsigned long new_addr, unsigned long len,
1440                bool need_rmap_locks);
1441extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1442                              unsigned long end, pgprot_t newprot,
1443                              int dirty_accountable, int prot_numa);
1444extern int mprotect_fixup(struct vm_area_struct *vma,
1445                          struct vm_area_struct **pprev, unsigned long start,
1446                          unsigned long end, unsigned long newflags);
1447
1448/*
1449 * doesn't attempt to fault and will return short.
1450 */
1451int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1452                          struct page **pages);
1453/*
1454 * per-process(per-mm_struct) statistics.
1455 */
1456static inline atomic_long_t *__get_mm_counter(struct mm_struct *mm, int member)
1457{
1458        if (member == MM_SHMEMPAGES)
1459                return &mm->mm_shmempages;
1460        else
1461                return &mm->rss_stat.count[member];
1462}
1463
1464static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1465{
1466        long val = atomic_long_read(__get_mm_counter(mm, member));
1467
1468#ifdef SPLIT_RSS_COUNTING
1469        /*
1470         * counter is updated in asynchronous manner and may go to minus.
1471         * But it's never be expected number for users.
1472         */
1473        if (val < 0)
1474                val = 0;
1475#endif
1476        return (unsigned long)val;
1477}
1478
1479static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1480{
1481        atomic_long_add(value, __get_mm_counter(mm, member));
1482}
1483
1484static inline void inc_mm_counter(struct mm_struct *mm, int member)
1485{
1486        atomic_long_inc(__get_mm_counter(mm, member));
1487}
1488
1489static inline void dec_mm_counter(struct mm_struct *mm, int member)
1490{
1491        atomic_long_dec(__get_mm_counter(mm, member));
1492}
1493
1494/* Optimized variant when page is already known not to be PageAnon */
1495static inline int mm_counter_file(struct page *page)
1496{
1497        if (PageSwapBacked(page))
1498                return MM_SHMEMPAGES;
1499        return MM_FILEPAGES;
1500}
1501
1502static inline int mm_counter(struct page *page)
1503{
1504        if (PageAnon(page))
1505                return MM_ANONPAGES;
1506        return mm_counter_file(page);
1507}
1508
1509static inline unsigned long get_mm_rss(struct mm_struct *mm)
1510{
1511        return get_mm_counter(mm, MM_FILEPAGES) +
1512                get_mm_counter(mm, MM_ANONPAGES) +
1513                get_mm_counter(mm, MM_SHMEMPAGES);
1514}
1515
1516static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1517{
1518        return max(mm->hiwater_rss, get_mm_rss(mm));
1519}
1520
1521static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1522{
1523        return max(mm->hiwater_vm, mm->total_vm);
1524}
1525
1526static inline void update_hiwater_rss(struct mm_struct *mm)
1527{
1528        unsigned long _rss = get_mm_rss(mm);
1529
1530        if ((mm)->hiwater_rss < _rss)
1531                (mm)->hiwater_rss = _rss;
1532}
1533
1534static inline void update_hiwater_vm(struct mm_struct *mm)
1535{
1536        if (mm->hiwater_vm < mm->total_vm)
1537                mm->hiwater_vm = mm->total_vm;
1538}
1539
1540static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1541                                         struct mm_struct *mm)
1542{
1543        unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1544
1545        if (*maxrss < hiwater_rss)
1546                *maxrss = hiwater_rss;
1547}
1548
1549#if defined(SPLIT_RSS_COUNTING)
1550void sync_mm_rss(struct mm_struct *mm);
1551#else
1552static inline void sync_mm_rss(struct mm_struct *mm)
1553{
1554}
1555#endif
1556
1557#ifndef __HAVE_ARCH_PTE_DEVMAP
1558static inline int pte_devmap(pte_t pte)
1559{
1560        return 0;
1561}
1562#endif
1563
1564int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
1565
1566extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1567                               spinlock_t **ptl);
1568static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1569                                    spinlock_t **ptl)
1570{
1571        pte_t *ptep;
1572        __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1573        return ptep;
1574}
1575
1576#ifdef __PAGETABLE_PUD_FOLDED
1577static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
1578                                                unsigned long address)
1579{
1580        return 0;
1581}
1582#else
1583int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1584#endif
1585
1586#ifdef __PAGETABLE_PMD_FOLDED
1587static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1588                                                unsigned long address)
1589{
1590        return 0;
1591}
1592#else
1593int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1594#endif
1595
1596int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
1597                pmd_t *pmd, unsigned long address);
1598int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
1599
1600/*
1601 * The following ifdef needed to get the 4level-fixup.h header to work.
1602 * Remove it when 4level-fixup.h has been removed.
1603 */
1604#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1605static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
1606{
1607        return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
1608                NULL: pud_offset(pgd, address);
1609}
1610
1611static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1612{
1613        return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1614                NULL: pmd_offset(pud, address);
1615}
1616#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
1617
1618#if USE_SPLIT_PTE_PTLOCKS
1619#if BLOATED_SPINLOCKS
1620void __init ptlock_cache_init(void);
1621extern bool ptlock_alloc(struct page *page);
1622extern void ptlock_free(struct page *page);
1623
1624static inline spinlock_t *ptlock_ptr(struct page *page)
1625{
1626        return page->ptl;
1627}
1628#else /* BLOATED_SPINLOCKS */
1629static inline void ptlock_cache_init(void) {}
1630static inline bool ptlock_alloc(struct page *page)
1631{
1632        return true;
1633}
1634
1635static inline void ptlock_free(struct page *page)
1636{
1637}
1638
1639static inline spinlock_t *ptlock_ptr(struct page *page)
1640{
1641        return &page->ptl;
1642}
1643#endif /* BLOATED_SPINLOCKS */
1644
1645static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1646{
1647        return ptlock_ptr(pmd_page(*pmd));
1648}
1649
1650static inline bool ptlock_init(struct page *page)
1651{
1652        /*
1653         * prep_new_page() initialize page->private (and therefore page->ptl)
1654         * with 0. Make sure nobody took it in use in between.
1655         *
1656         * It can happen if arch try to use slab for page table allocation:
1657         * slab code uses page->slab_cache and page->first_page (for tail
1658         * pages), which share storage with page->ptl.
1659         */
1660        VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
1661        if (!ptlock_alloc(page))
1662                return false;
1663        spin_lock_init(ptlock_ptr(page));
1664        return true;
1665}
1666
1667/* Reset page->mapping so free_pages_check won't complain. */
1668static inline void pte_lock_deinit(struct page *page)
1669{
1670        page->mapping = NULL;
1671        ptlock_free(page);
1672}
1673
1674#else   /* !USE_SPLIT_PTE_PTLOCKS */
1675/*
1676 * We use mm->page_table_lock to guard all pagetable pages of the mm.
1677 */
1678static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1679{
1680        return &mm->page_table_lock;
1681}
1682static inline void ptlock_cache_init(void) {}
1683static inline bool ptlock_init(struct page *page) { return true; }
1684static inline void pte_lock_deinit(struct page *page) {}
1685#endif /* USE_SPLIT_PTE_PTLOCKS */
1686
1687static inline void pgtable_init(void)
1688{
1689        ptlock_cache_init();
1690        pgtable_cache_init();
1691}
1692
1693static inline bool pgtable_page_ctor(struct page *page)
1694{
1695        inc_zone_page_state(page, NR_PAGETABLE);
1696        return ptlock_init(page);
1697}
1698
1699static inline void pgtable_page_dtor(struct page *page)
1700{
1701        pte_lock_deinit(page);
1702        dec_zone_page_state(page, NR_PAGETABLE);
1703}
1704
1705#define pte_offset_map_lock(mm, pmd, address, ptlp)     \
1706({                                                      \
1707        spinlock_t *__ptl = pte_lockptr(mm, pmd);       \
1708        pte_t *__pte = pte_offset_map(pmd, address);    \
1709        *(ptlp) = __ptl;                                \
1710        spin_lock(__ptl);                               \
1711        __pte;                                          \
1712})
1713
1714#define pte_unmap_unlock(pte, ptl)      do {            \
1715        spin_unlock(ptl);                               \
1716        pte_unmap(pte);                                 \
1717} while (0)
1718
1719#define pte_alloc_map(mm, vma, pmd, address)                            \
1720        ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma,    \
1721                                                        pmd, address))? \
1722         NULL: pte_offset_map(pmd, address))
1723
1724#define pte_alloc_map_lock(mm, pmd, address, ptlp)      \
1725        ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL,   \
1726                                                        pmd, address))? \
1727                NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
1728
1729#define pte_alloc_kernel(pmd, address)                  \
1730        ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1731                NULL: pte_offset_kernel(pmd, address))
1732
1733#if USE_SPLIT_PMD_PTLOCKS
1734
1735static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1736{
1737        return ptlock_ptr(virt_to_page(pmd));
1738}
1739
1740static inline bool pgtable_pmd_page_ctor(struct page *page)
1741{
1742#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1743        page->pmd_huge_pte = NULL;
1744#endif
1745        return ptlock_init(page);
1746}
1747
1748static inline void pgtable_pmd_page_dtor(struct page *page)
1749{
1750#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1751        VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
1752#endif
1753        ptlock_free(page);
1754}
1755
1756#define pmd_huge_pte(mm, pmd) (virt_to_page(pmd)->pmd_huge_pte)
1757
1758#else
1759
1760static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1761{
1762        return &mm->page_table_lock;
1763}
1764
1765static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
1766static inline void pgtable_pmd_page_dtor(struct page *page) {}
1767
1768#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
1769
1770#endif
1771
1772static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
1773{
1774        spinlock_t *ptl = pmd_lockptr(mm, pmd);
1775        spin_lock(ptl);
1776        return ptl;
1777}
1778
1779/*
1780 * No scalability reason to split PUD locks yet, but follow the same pattern
1781 * as the PMD locks to make it easier if we decide to.  The VM should not be
1782 * considered ready to switch to split PUD locks yet; there may be places
1783 * which need to be converted from page_table_lock.
1784 */
1785static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
1786{
1787        return &mm->page_table_lock;
1788}
1789
1790static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
1791{
1792        spinlock_t *ptl = pud_lockptr(mm, pud);
1793
1794        spin_lock(ptl);
1795        return ptl;
1796}
1797
1798extern void free_area_init(unsigned long * zones_size);
1799extern void free_area_init_node(int nid, unsigned long * zones_size,
1800                unsigned long zone_start_pfn, unsigned long *zholes_size);
1801extern void free_initmem(void);
1802
1803/*
1804 * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
1805 * into the buddy system. The freed pages will be poisoned with pattern
1806 * "poison" if it's non-zero.
1807 * Return pages freed into the buddy system.
1808 */
1809extern unsigned long free_reserved_area(unsigned long start, unsigned long end,
1810                                        int poison, char *s);
1811
1812#ifdef  CONFIG_HIGHMEM
1813/*
1814 * Free a highmem page into the buddy system, adjusting totalhigh_pages
1815 * and totalram_pages.
1816 */
1817extern void free_highmem_page(struct page *page);
1818#endif
1819
1820extern void adjust_managed_page_count(struct page *page, long count);
1821extern void mem_init_print_info(const char *str);
1822
1823extern void reserve_bootmem_region(unsigned long start, unsigned long end);
1824
1825/* Free the reserved page into the buddy system, so it gets managed. */
1826static inline void __free_reserved_page(struct page *page)
1827{
1828        ClearPageReserved(page);
1829        init_page_count(page);
1830        __free_page(page);
1831}
1832
1833static inline void free_reserved_page(struct page *page)
1834{
1835        __free_reserved_page(page);
1836        adjust_managed_page_count(page, 1);
1837}
1838
1839static inline void mark_page_reserved(struct page *page)
1840{
1841        SetPageReserved(page);
1842        adjust_managed_page_count(page, -1);
1843}
1844
1845/*
1846 * Default method to free all the __init memory into the buddy system.
1847 * The freed pages will be poisoned with pattern "poison" if it is
1848 * non-zero. Return pages freed into the buddy system.
1849 */
1850static inline unsigned long free_initmem_default(int poison)
1851{
1852        extern char __init_begin[], __init_end[];
1853
1854        return free_reserved_area(PAGE_ALIGN((unsigned long)&__init_begin) ,
1855                                  ((unsigned long)&__init_end) & PAGE_MASK,
1856                                  poison, "unused kernel");
1857}
1858
1859static inline unsigned long get_num_physpages(void)
1860{
1861        int nid;
1862        unsigned long phys_pages = 0;
1863
1864        for_each_online_node(nid)
1865                phys_pages += node_present_pages(nid);
1866
1867        return phys_pages;
1868}
1869
1870#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1871/*
1872 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
1873 * zones, allocate the backing mem_map and account for memory holes in a more
1874 * architecture independent manner. This is a substitute for creating the
1875 * zone_sizes[] and zholes_size[] arrays and passing them to
1876 * free_area_init_node()
1877 *
1878 * An architecture is expected to register range of page frames backed by
1879 * physical memory with memblock_add[_node]() before calling
1880 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
1881 * usage, an architecture is expected to do something like
1882 *
1883 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
1884 *                                                       max_highmem_pfn};
1885 * for_each_valid_physical_page_range()
1886 *      memblock_add_node(base, size, nid)
1887 * free_area_init_nodes(max_zone_pfns);
1888 *
1889 * free_bootmem_with_active_regions() calls free_bootmem_node() for each
1890 * registered physical page range.  Similarly
1891 * sparse_memory_present_with_active_regions() calls memory_present() for
1892 * each range when SPARSEMEM is enabled.
1893 *
1894 * See mm/page_alloc.c for more information on each function exposed by
1895 * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
1896 */
1897extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1898unsigned long node_map_pfn_alignment(void);
1899unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
1900                                                unsigned long end_pfn);
1901extern unsigned long absent_pages_in_range(unsigned long start_pfn,
1902                                                unsigned long end_pfn);
1903extern void get_pfn_range_for_nid(unsigned int nid,
1904                        unsigned long *start_pfn, unsigned long *end_pfn);
1905extern unsigned long find_min_pfn_with_active_regions(void);
1906extern void free_bootmem_with_active_regions(int nid,
1907                                                unsigned long max_low_pfn);
1908extern void sparse_memory_present_with_active_regions(int nid);
1909
1910#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1911
1912#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
1913    !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
1914static inline int __early_pfn_to_nid(unsigned long pfn,
1915                                        struct mminit_pfnnid_cache *state)
1916{
1917        return 0;
1918}
1919#else
1920/* please see mm/page_alloc.c */
1921extern int __meminit early_pfn_to_nid(unsigned long pfn);
1922/* there is a per-arch backend function. */
1923extern int __meminit __early_pfn_to_nid(unsigned long pfn,
1924                                        struct mminit_pfnnid_cache *state);
1925#endif
1926
1927extern void set_dma_reserve(unsigned long new_dma_reserve);
1928extern void memmap_init_zone(unsigned long, int, unsigned long,
1929                                unsigned long, enum memmap_context);
1930extern void setup_per_zone_wmarks(void);
1931extern int __meminit init_per_zone_wmark_min(void);
1932extern void mem_init(void);
1933extern void __init mmap_init(void);
1934extern void show_mem(unsigned int flags);
1935extern long si_mem_available(void);
1936extern void si_meminfo(struct sysinfo * val);
1937extern void si_meminfo_node(struct sysinfo *val, int nid);
1938
1939extern __printf(3, 4)
1940void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
1941
1942extern void setup_per_cpu_pageset(void);
1943
1944extern void zone_pcp_update(struct zone *zone);
1945extern void zone_pcp_reset(struct zone *zone);
1946
1947/* page_alloc.c */
1948extern int min_free_kbytes;
1949
1950/* nommu.c */
1951extern atomic_long_t mmap_pages_allocated;
1952extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
1953
1954/* interval_tree.c */
1955void vma_interval_tree_insert(struct vm_area_struct *node,
1956                              struct rb_root *root);
1957void vma_interval_tree_insert_after(struct vm_area_struct *node,
1958                                    struct vm_area_struct *prev,
1959                                    struct rb_root *root);
1960void vma_interval_tree_remove(struct vm_area_struct *node,
1961                              struct rb_root *root);
1962struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root,
1963                                unsigned long start, unsigned long last);
1964struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
1965                                unsigned long start, unsigned long last);
1966
1967#define vma_interval_tree_foreach(vma, root, start, last)               \
1968        for (vma = vma_interval_tree_iter_first(root, start, last);     \
1969             vma; vma = vma_interval_tree_iter_next(vma, start, last))
1970
1971static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
1972                                        struct list_head *list)
1973{
1974        list_add_tail(&vma->shared.nonlinear, list);
1975}
1976
1977void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
1978                                   struct rb_root *root);
1979void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
1980                                   struct rb_root *root);
1981struct anon_vma_chain *anon_vma_interval_tree_iter_first(
1982        struct rb_root *root, unsigned long start, unsigned long last);
1983struct anon_vma_chain *anon_vma_interval_tree_iter_next(
1984        struct anon_vma_chain *node, unsigned long start, unsigned long last);
1985#ifdef CONFIG_DEBUG_VM_RB
1986void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
1987#endif
1988
1989#define anon_vma_interval_tree_foreach(avc, root, start, last)           \
1990        for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
1991             avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
1992
1993/* mmap.c */
1994extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
1995extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
1996        unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
1997        struct vm_area_struct *expand);
1998static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
1999        unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
2000{
2001        return __vma_adjust(vma, start, end, pgoff, insert, NULL);
2002}
2003extern struct vm_area_struct *vma_merge(struct mm_struct *,
2004        struct vm_area_struct *prev, unsigned long addr, unsigned long end,
2005        unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
2006        struct mempolicy *, struct vm_userfaultfd_ctx);
2007extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
2008extern int split_vma(struct mm_struct *,
2009        struct vm_area_struct *, unsigned long addr, int new_below);
2010extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
2011extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
2012        struct rb_node **, struct rb_node *);
2013extern void unlink_file_vma(struct vm_area_struct *);
2014extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
2015        unsigned long addr, unsigned long len, pgoff_t pgoff,
2016        bool *need_rmap_locks);
2017extern void exit_mmap(struct mm_struct *);
2018
2019static inline int check_data_rlimit(unsigned long rlim,
2020                                    unsigned long new,
2021                                    unsigned long start,
2022                                    unsigned long end_data,
2023                                    unsigned long start_data)
2024{
2025        if (rlim < RLIM_INFINITY) {
2026                if (((new - start) + (end_data - start_data)) > rlim)
2027                        return -ENOSPC;
2028        }
2029
2030        return 0;
2031}
2032
2033extern int mm_take_all_locks(struct mm_struct *mm);
2034extern void mm_drop_all_locks(struct mm_struct *mm);
2035
2036extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
2037extern struct file *get_mm_exe_file(struct mm_struct *mm);
2038extern struct file *get_task_exe_file(struct task_struct *task);
2039
2040extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
2041extern int install_special_mapping(struct mm_struct *mm,
2042                                   unsigned long addr, unsigned long len,
2043                                   unsigned long flags, struct page **pages);
2044
2045extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
2046
2047extern unsigned long mmap_region(struct file *file, unsigned long addr,
2048        unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2049         struct list_head *uf);
2050extern unsigned long do_mmap(struct file *file, unsigned long addr,
2051        unsigned long len, unsigned long prot, unsigned long flags,
2052        vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
2053        struct list_head *uf);
2054extern int do_munmap(struct mm_struct *, unsigned long, size_t,
2055                     struct list_head *uf);
2056
2057static inline unsigned long
2058do_mmap_pgoff(struct file *file, unsigned long addr,
2059        unsigned long len, unsigned long prot, unsigned long flags,
2060        unsigned long pgoff, unsigned long *populate, struct list_head *uf)
2061{
2062        return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, NULL);
2063}
2064
2065#ifdef CONFIG_MMU
2066extern int __mm_populate(unsigned long addr, unsigned long len,
2067                         int ignore_errors);
2068static inline void mm_populate(unsigned long addr, unsigned long len)
2069{
2070        /* Ignore errors */
2071        (void) __mm_populate(addr, len, 1);
2072}
2073#else
2074static inline void mm_populate(unsigned long addr, unsigned long len) {}
2075#endif
2076
2077/* These take the mm semaphore themselves */
2078extern unsigned long vm_brk(unsigned long, unsigned long);
2079extern unsigned long vm_brk_flags(unsigned long, unsigned long, unsigned long);
2080extern int vm_munmap(unsigned long, size_t);
2081extern unsigned long vm_mmap(struct file *, unsigned long,
2082        unsigned long, unsigned long,
2083        unsigned long, unsigned long);
2084
2085struct vm_unmapped_area_info {
2086#define VM_UNMAPPED_AREA_TOPDOWN 1
2087        unsigned long flags;
2088        unsigned long length;
2089        unsigned long low_limit;
2090        unsigned long high_limit;
2091        unsigned long align_mask;
2092        unsigned long align_offset;
2093};
2094
2095extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
2096extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
2097
2098/*
2099 * Search for an unmapped address range.
2100 *
2101 * We are looking for a range that:
2102 * - does not intersect with any VMA;
2103 * - is contained within the [low_limit, high_limit) interval;
2104 * - is at least the desired size.
2105 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
2106 */
2107static inline unsigned long
2108vm_unmapped_area(struct vm_unmapped_area_info *info)
2109{
2110        if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
2111                return unmapped_area(info);
2112        else
2113                return unmapped_area_topdown(info);
2114}
2115
2116/* truncate.c */
2117extern void truncate_inode_pages(struct address_space *, loff_t);
2118extern void truncate_inode_pages_range(struct address_space *,
2119                                       loff_t lstart, loff_t lend);
2120extern void truncate_inode_pages_final(struct address_space *);
2121
2122/* generic vm_area_ops exported for stackable file systems */
2123extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
2124extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
2125
2126/* mm/page-writeback.c */
2127int write_one_page(struct page *page, int wait);
2128void task_dirty_inc(struct task_struct *tsk);
2129
2130/* readahead.c */
2131#define VM_MAX_READAHEAD        128     /* kbytes */
2132#define VM_MIN_READAHEAD        16      /* kbytes (includes current page) */
2133
2134int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
2135                        pgoff_t offset, unsigned long nr_to_read);
2136
2137void page_cache_sync_readahead(struct address_space *mapping,
2138                               struct file_ra_state *ra,
2139                               struct file *filp,
2140                               pgoff_t offset,
2141                               unsigned long size);
2142
2143void page_cache_async_readahead(struct address_space *mapping,
2144                                struct file_ra_state *ra,
2145                                struct file *filp,
2146                                struct page *pg,
2147                                pgoff_t offset,
2148                                unsigned long size);
2149
2150unsigned long ra_submit(struct file_ra_state *ra,
2151                        struct address_space *mapping,
2152                        struct file *filp);
2153
2154extern unsigned long stack_guard_gap;
2155
2156/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
2157extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2158
2159/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
2160extern int expand_downwards(struct vm_area_struct *vma,
2161                unsigned long address);
2162#if VM_GROWSUP
2163extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
2164#else
2165  #define expand_upwards(vma, address) do { } while (0)
2166#endif
2167
2168/* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
2169extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
2170extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
2171                                             struct vm_area_struct **pprev);
2172
2173/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
2174   NULL if none.  Assume start_addr < end_addr. */
2175static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
2176{
2177        struct vm_area_struct * vma = find_vma(mm,start_addr);
2178
2179        if (vma && end_addr <= vma->vm_start)
2180                vma = NULL;
2181        return vma;
2182}
2183
2184static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
2185{
2186        unsigned long vm_start = vma->vm_start;
2187
2188        if (vma->vm_flags & VM_GROWSDOWN) {
2189                vm_start -= stack_guard_gap;
2190                if (vm_start > vma->vm_start)
2191                        vm_start = 0;
2192        }
2193        return vm_start;
2194}
2195
2196static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
2197{
2198        unsigned long vm_end = vma->vm_end;
2199
2200        if (vma->vm_flags & VM_GROWSUP) {
2201                vm_end += stack_guard_gap;
2202                if (vm_end < vma->vm_end)
2203                        vm_end = -PAGE_SIZE;
2204        }
2205        return vm_end;
2206}
2207
2208static inline unsigned long vma_pages(struct vm_area_struct *vma)
2209{
2210        return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
2211}
2212
2213/* Look up the first VMA which exactly match the interval vm_start ... vm_end */
2214static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
2215                                unsigned long vm_start, unsigned long vm_end)
2216{
2217        struct vm_area_struct *vma = find_vma(mm, vm_start);
2218
2219        if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
2220                vma = NULL;
2221
2222        return vma;
2223}
2224
2225#ifdef CONFIG_MMU
2226pgprot_t vm_get_page_prot(unsigned long vm_flags);
2227void vma_set_page_prot(struct vm_area_struct *vma);
2228#else
2229static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
2230{
2231        return __pgprot(0);
2232}
2233static inline void vma_set_page_prot(struct vm_area_struct *vma)
2234{
2235        vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2236}
2237#endif
2238
2239#ifdef CONFIG_NUMA_BALANCING
2240unsigned long change_prot_numa(struct vm_area_struct *vma,
2241                        unsigned long start, unsigned long end);
2242#endif
2243
2244struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
2245int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2246                        unsigned long pfn, unsigned long size, pgprot_t);
2247int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2248int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2249                        unsigned long pfn);
2250int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2251                        pfn_t pfn);
2252int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
2253
2254
2255struct page *follow_page_mask(struct vm_area_struct *vma,
2256                              unsigned long address, unsigned int foll_flags,
2257                              unsigned int *page_mask);
2258
2259static inline struct page *follow_page(struct vm_area_struct *vma,
2260                unsigned long address, unsigned int foll_flags)
2261{
2262        unsigned int unused_page_mask;
2263        return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
2264}
2265
2266#define FOLL_WRITE      0x01    /* check pte is writable */
2267#define FOLL_TOUCH      0x02    /* mark page accessed */
2268#define FOLL_GET        0x04    /* do get_page on page */
2269#define FOLL_DUMP       0x08    /* give error on hole if it would be zero */
2270#define FOLL_FORCE      0x10    /* get_user_pages read/write w/o permission */
2271#define FOLL_NOWAIT     0x20    /* if a disk transfer is needed, start the IO
2272                                 * and return without waiting upon it */
2273#define FOLL_MLOCK      0x40    /* mark page as mlocked */
2274#define FOLL_SPLIT      0x80    /* don't return transhuge pages, split them */
2275#define FOLL_HWPOISON   0x100   /* check page is hwpoisoned */
2276#define FOLL_NUMA       0x200   /* force NUMA hinting page fault */
2277#define FOLL_MIGRATION  0x400   /* wait for page to replace migration entry */
2278#define FOLL_TRIED      0x800   /* a retry, previous pass started an IO */
2279#define FOLL_REMOTE     0x2000  /* we are working on non-current tsk/mm */
2280#define FOLL_COW        0x4000  /* internal GUP flag */
2281
2282typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
2283                        void *data);
2284extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
2285                               unsigned long size, pte_fn_t fn, void *data);
2286
2287#ifdef CONFIG_PROC_FS
2288void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
2289#else
2290static inline void vm_stat_account(struct mm_struct *mm,
2291                        unsigned long flags, struct file *file, long pages)
2292{
2293        mm->total_vm += pages;
2294}
2295#endif /* CONFIG_PROC_FS */
2296
2297#ifdef CONFIG_DEBUG_PAGEALLOC
2298extern bool _debug_pagealloc_enabled;
2299extern void __kernel_map_pages(struct page *page, int numpages, int enable);
2300
2301static inline bool debug_pagealloc_enabled(void)
2302{
2303        return _debug_pagealloc_enabled;
2304}
2305
2306static inline void
2307kernel_map_pages(struct page *page, int numpages, int enable)
2308{
2309        if (!debug_pagealloc_enabled())
2310                return;
2311
2312        __kernel_map_pages(page, numpages, enable);
2313}
2314#ifdef CONFIG_HIBERNATION
2315extern bool kernel_page_present(struct page *page);
2316#endif  /* CONFIG_HIBERNATION */
2317#else   /* CONFIG_DEBUG_PAGEALLOC */
2318static inline void
2319kernel_map_pages(struct page *page, int numpages, int enable) {}
2320#ifdef CONFIG_HIBERNATION
2321static inline bool kernel_page_present(struct page *page) { return true; }
2322#endif  /* CONFIG_HIBERNATION */
2323static inline bool debug_pagealloc_enabled(void)
2324{
2325        return false;
2326}
2327#endif  /* CONFIG_DEBUG_PAGEALLOC */
2328
2329extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
2330#ifdef  __HAVE_ARCH_GATE_AREA
2331int in_gate_area_no_mm(unsigned long addr);
2332int in_gate_area(struct mm_struct *mm, unsigned long addr);
2333#else
2334int in_gate_area_no_mm(unsigned long addr);
2335#define in_gate_area(mm, addr) ({(void)mm; in_gate_area_no_mm(addr);})
2336#endif  /* __HAVE_ARCH_GATE_AREA */
2337
2338#ifdef CONFIG_SYSCTL
2339extern int sysctl_drop_caches;
2340int drop_caches_sysctl_handler(struct ctl_table *, int,
2341                                        void __user *, size_t *, loff_t *);
2342#endif
2343
2344unsigned long shrink_slab(struct shrink_control *shrink,
2345                          unsigned long nr_pages_scanned,
2346                          unsigned long lru_pages);
2347
2348#ifndef CONFIG_MMU
2349#define randomize_va_space 0
2350#else
2351extern int randomize_va_space;
2352#endif
2353
2354const char * arch_vma_name(struct vm_area_struct *vma);
2355void print_vma_addr(char *prefix, unsigned long rip);
2356
2357void sparse_mem_maps_populate_node(struct page **map_map,
2358                                   unsigned long pnum_begin,
2359                                   unsigned long pnum_end,
2360                                   unsigned long map_count,
2361                                   int nodeid);
2362
2363struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
2364pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
2365pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
2366pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
2367pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
2368void *vmemmap_alloc_block(unsigned long size, int node);
2369struct vmem_altmap;
2370void *__vmemmap_alloc_block_buf(unsigned long size, int node,
2371                struct vmem_altmap *altmap);
2372static inline void *vmemmap_alloc_block_buf(unsigned long size, int node)
2373{
2374        return __vmemmap_alloc_block_buf(size, node, NULL);
2375}
2376
2377void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
2378int vmemmap_populate_basepages(unsigned long start, unsigned long end,
2379                               int node);
2380int vmemmap_populate(unsigned long start, unsigned long end, int node);
2381void vmemmap_populate_print_last(void);
2382#ifdef CONFIG_MEMORY_HOTPLUG
2383void vmemmap_free(unsigned long start, unsigned long end);
2384#endif
2385void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
2386                                  unsigned long size);
2387
2388enum mf_flags {
2389        MF_COUNT_INCREASED = 1 << 0,
2390        MF_ACTION_REQUIRED = 1 << 1,
2391        MF_MUST_KILL = 1 << 2,
2392        MF_SOFT_OFFLINE = 1 << 3,
2393};
2394extern int memory_failure(unsigned long pfn, int trapno, int flags);
2395extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
2396extern int unpoison_memory(unsigned long pfn);
2397extern int get_hwpoison_page(struct page *page);
2398extern int sysctl_memory_failure_early_kill;
2399extern int sysctl_memory_failure_recovery;
2400extern void shake_page(struct page *p, int access);
2401extern atomic_long_t num_poisoned_pages;
2402extern int soft_offline_page(struct page *page, int flags);
2403
2404#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
2405extern void clear_huge_page(struct page *page,
2406                            unsigned long addr,
2407                            unsigned int pages_per_huge_page);
2408extern void copy_user_huge_page(struct page *dst, struct page *src,
2409                                unsigned long addr, struct vm_area_struct *vma,
2410                                unsigned int pages_per_huge_page);
2411extern long copy_huge_page_from_user(struct page *dst_page,
2412                                const void __user *usr_src,
2413                                unsigned int pages_per_huge_page,
2414                                bool allow_pagefault);
2415#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
2416
2417extern struct page_ext_operations debug_guardpage_ops;
2418extern struct page_ext_operations page_poisoning_ops;
2419
2420#ifdef CONFIG_DEBUG_PAGEALLOC
2421extern unsigned int _debug_guardpage_minorder;
2422extern bool _debug_guardpage_enabled;
2423
2424static inline unsigned int debug_guardpage_minorder(void)
2425{
2426        return _debug_guardpage_minorder;
2427}
2428
2429static inline bool debug_guardpage_enabled(void)
2430{
2431        return _debug_guardpage_enabled;
2432}
2433
2434static inline bool page_is_guard(struct page *page)
2435{
2436        struct page_ext *page_ext;
2437
2438        if (!debug_guardpage_enabled())
2439                return false;
2440
2441        page_ext = lookup_page_ext(page);
2442        return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
2443}
2444#else
2445static inline unsigned int debug_guardpage_minorder(void) { return 0; }
2446static inline bool debug_guardpage_enabled(void) { return false; }
2447static inline bool page_is_guard(struct page *page) { return false; }
2448#endif /* CONFIG_DEBUG_PAGEALLOC */
2449
2450#if MAX_NUMNODES > 1
2451void __init setup_nr_node_ids(void);
2452#else
2453static inline void setup_nr_node_ids(void) {}
2454#endif
2455
2456#endif /* __KERNEL__ */
2457#endif /* _LINUX_MM_H */
2458