linux/include/linux/mm_types.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_MM_TYPES_H
   3#define _LINUX_MM_TYPES_H
   4
   5#include <linux/mm_types_task.h>
   6
   7#include <linux/auxvec.h>
   8#include <linux/list.h>
   9#include <linux/spinlock.h>
  10#include <linux/rbtree.h>
  11#include <linux/rwsem.h>
  12#include <linux/completion.h>
  13#include <linux/cpumask.h>
  14#include <linux/uprobes.h>
  15#include <linux/page-flags-layout.h>
  16#include <linux/workqueue.h>
  17
  18#include <asm/mmu.h>
  19
  20#ifndef AT_VECTOR_SIZE_ARCH
  21#define AT_VECTOR_SIZE_ARCH 0
  22#endif
  23#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
  24
  25
  26struct address_space;
  27struct mem_cgroup;
  28struct hmm;
  29
  30/*
  31 * Each physical page in the system has a struct page associated with
  32 * it to keep track of whatever it is we are using the page for at the
  33 * moment. Note that we have no way to track which tasks are using
  34 * a page, though if it is a pagecache page, rmap structures can tell us
  35 * who is mapping it.
  36 *
  37 * If you allocate the page using alloc_pages(), you can use some of the
  38 * space in struct page for your own purposes.  The five words in the main
  39 * union are available, except for bit 0 of the first word which must be
  40 * kept clear.  Many users use this word to store a pointer to an object
  41 * which is guaranteed to be aligned.  If you use the same storage as
  42 * page->mapping, you must restore it to NULL before freeing the page.
  43 *
  44 * If your page will not be mapped to userspace, you can also use the four
  45 * bytes in the mapcount union, but you must call page_mapcount_reset()
  46 * before freeing it.
  47 *
  48 * If you want to use the refcount field, it must be used in such a way
  49 * that other CPUs temporarily incrementing and then decrementing the
  50 * refcount does not cause problems.  On receiving the page from
  51 * alloc_pages(), the refcount will be positive.
  52 *
  53 * If you allocate pages of order > 0, you can use some of the fields
  54 * in each subpage, but you may need to restore some of their values
  55 * afterwards.
  56 *
  57 * SLUB uses cmpxchg_double() to atomically update its freelist and
  58 * counters.  That requires that freelist & counters be adjacent and
  59 * double-word aligned.  We align all struct pages to double-word
  60 * boundaries, and ensure that 'freelist' is aligned within the
  61 * struct.
  62 */
  63#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
  64#define _struct_page_alignment  __aligned(2 * sizeof(unsigned long))
  65#else
  66#define _struct_page_alignment
  67#endif
  68
  69struct page {
  70        unsigned long flags;            /* Atomic flags, some possibly
  71                                         * updated asynchronously */
  72        /*
  73         * Five words (20/40 bytes) are available in this union.
  74         * WARNING: bit 0 of the first word is used for PageTail(). That
  75         * means the other users of this union MUST NOT use the bit to
  76         * avoid collision and false-positive PageTail().
  77         */
  78        union {
  79                struct {        /* Page cache and anonymous pages */
  80                        /**
  81                         * @lru: Pageout list, eg. active_list protected by
  82                         * pgdat->lru_lock.  Sometimes used as a generic list
  83                         * by the page owner.
  84                         */
  85                        struct list_head lru;
  86                        /* See page-flags.h for PAGE_MAPPING_FLAGS */
  87                        struct address_space *mapping;
  88                        pgoff_t index;          /* Our offset within mapping. */
  89                        /**
  90                         * @private: Mapping-private opaque data.
  91                         * Usually used for buffer_heads if PagePrivate.
  92                         * Used for swp_entry_t if PageSwapCache.
  93                         * Indicates order in the buddy system if PageBuddy.
  94                         */
  95                        unsigned long private;
  96                };
  97                struct {        /* page_pool used by netstack */
  98                        /**
  99                         * @dma_addr: might require a 64-bit value even on
 100                         * 32-bit architectures.
 101                         */
 102                        dma_addr_t dma_addr;
 103                };
 104                struct {        /* slab, slob and slub */
 105                        union {
 106                                struct list_head slab_list;
 107                                struct {        /* Partial pages */
 108                                        struct page *next;
 109#ifdef CONFIG_64BIT
 110                                        int pages;      /* Nr of pages left */
 111                                        int pobjects;   /* Approximate count */
 112#else
 113                                        short int pages;
 114                                        short int pobjects;
 115#endif
 116                                };
 117                        };
 118                        struct kmem_cache *slab_cache; /* not slob */
 119                        /* Double-word boundary */
 120                        void *freelist;         /* first free object */
 121                        union {
 122                                void *s_mem;    /* slab: first object */
 123                                unsigned long counters;         /* SLUB */
 124                                struct {                        /* SLUB */
 125                                        unsigned inuse:16;
 126                                        unsigned objects:15;
 127                                        unsigned frozen:1;
 128                                };
 129                        };
 130                };
 131                struct {        /* Tail pages of compound page */
 132                        unsigned long compound_head;    /* Bit zero is set */
 133
 134                        /* First tail page only */
 135                        unsigned char compound_dtor;
 136                        unsigned char compound_order;
 137                        atomic_t compound_mapcount;
 138                };
 139                struct {        /* Second tail page of compound page */
 140                        unsigned long _compound_pad_1;  /* compound_head */
 141                        unsigned long _compound_pad_2;
 142                        struct list_head deferred_list;
 143                };
 144                struct {        /* Page table pages */
 145                        unsigned long _pt_pad_1;        /* compound_head */
 146                        pgtable_t pmd_huge_pte; /* protected by page->ptl */
 147                        unsigned long _pt_pad_2;        /* mapping */
 148                        union {
 149                                struct mm_struct *pt_mm; /* x86 pgds only */
 150                                atomic_t pt_frag_refcount; /* powerpc */
 151                        };
 152#if ALLOC_SPLIT_PTLOCKS
 153                        spinlock_t *ptl;
 154#else
 155                        spinlock_t ptl;
 156#endif
 157                };
 158                struct {        /* ZONE_DEVICE pages */
 159                        /** @pgmap: Points to the hosting device page map. */
 160                        struct dev_pagemap *pgmap;
 161                        unsigned long hmm_data;
 162                        unsigned long _zd_pad_1;        /* uses mapping */
 163                };
 164
 165                /** @rcu_head: You can use this to free a page by RCU. */
 166                struct rcu_head rcu_head;
 167        };
 168
 169        union {         /* This union is 4 bytes in size. */
 170                /*
 171                 * If the page can be mapped to userspace, encodes the number
 172                 * of times this page is referenced by a page table.
 173                 */
 174                atomic_t _mapcount;
 175
 176                /*
 177                 * If the page is neither PageSlab nor mappable to userspace,
 178                 * the value stored here may help determine what this page
 179                 * is used for.  See page-flags.h for a list of page types
 180                 * which are currently stored here.
 181                 */
 182                unsigned int page_type;
 183
 184                unsigned int active;            /* SLAB */
 185                int units;                      /* SLOB */
 186        };
 187
 188        /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
 189        atomic_t _refcount;
 190
 191#ifdef CONFIG_MEMCG
 192        struct mem_cgroup *mem_cgroup;
 193#endif
 194
 195        /*
 196         * On machines where all RAM is mapped into kernel address space,
 197         * we can simply calculate the virtual address. On machines with
 198         * highmem some memory is mapped into kernel virtual memory
 199         * dynamically, so we need a place to store that address.
 200         * Note that this field could be 16 bits on x86 ... ;)
 201         *
 202         * Architectures with slow multiplication can define
 203         * WANT_PAGE_VIRTUAL in asm/page.h
 204         */
 205#if defined(WANT_PAGE_VIRTUAL)
 206        void *virtual;                  /* Kernel virtual address (NULL if
 207                                           not kmapped, ie. highmem) */
 208#endif /* WANT_PAGE_VIRTUAL */
 209
 210#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
 211        int _last_cpupid;
 212#endif
 213} _struct_page_alignment;
 214
 215/*
 216 * Used for sizing the vmemmap region on some architectures
 217 */
 218#define STRUCT_PAGE_MAX_SHIFT   (order_base_2(sizeof(struct page)))
 219
 220#define PAGE_FRAG_CACHE_MAX_SIZE        __ALIGN_MASK(32768, ~PAGE_MASK)
 221#define PAGE_FRAG_CACHE_MAX_ORDER       get_order(PAGE_FRAG_CACHE_MAX_SIZE)
 222
 223#define page_private(page)              ((page)->private)
 224#define set_page_private(page, v)       ((page)->private = (v))
 225
 226struct page_frag_cache {
 227        void * va;
 228#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
 229        __u16 offset;
 230        __u16 size;
 231#else
 232        __u32 offset;
 233#endif
 234        /* we maintain a pagecount bias, so that we dont dirty cache line
 235         * containing page->_refcount every time we allocate a fragment.
 236         */
 237        unsigned int            pagecnt_bias;
 238        bool pfmemalloc;
 239};
 240
 241typedef unsigned long vm_flags_t;
 242
 243/*
 244 * A region containing a mapping of a non-memory backed file under NOMMU
 245 * conditions.  These are held in a global tree and are pinned by the VMAs that
 246 * map parts of them.
 247 */
 248struct vm_region {
 249        struct rb_node  vm_rb;          /* link in global region tree */
 250        vm_flags_t      vm_flags;       /* VMA vm_flags */
 251        unsigned long   vm_start;       /* start address of region */
 252        unsigned long   vm_end;         /* region initialised to here */
 253        unsigned long   vm_top;         /* region allocated to here */
 254        unsigned long   vm_pgoff;       /* the offset in vm_file corresponding to vm_start */
 255        struct file     *vm_file;       /* the backing file or NULL */
 256
 257        int             vm_usage;       /* region usage count (access under nommu_region_sem) */
 258        bool            vm_icache_flushed : 1; /* true if the icache has been flushed for
 259                                                * this region */
 260};
 261
 262#ifdef CONFIG_USERFAULTFD
 263#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
 264struct vm_userfaultfd_ctx {
 265        struct userfaultfd_ctx *ctx;
 266};
 267#else /* CONFIG_USERFAULTFD */
 268#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
 269struct vm_userfaultfd_ctx {};
 270#endif /* CONFIG_USERFAULTFD */
 271
 272/*
 273 * This struct defines a memory VMM memory area. There is one of these
 274 * per VM-area/task.  A VM area is any part of the process virtual memory
 275 * space that has a special rule for the page-fault handlers (ie a shared
 276 * library, the executable area etc).
 277 */
 278struct vm_area_struct {
 279        /* The first cache line has the info for VMA tree walking. */
 280
 281        unsigned long vm_start;         /* Our start address within vm_mm. */
 282        unsigned long vm_end;           /* The first byte after our end address
 283                                           within vm_mm. */
 284
 285        /* linked list of VM areas per task, sorted by address */
 286        struct vm_area_struct *vm_next, *vm_prev;
 287
 288        struct rb_node vm_rb;
 289
 290        /*
 291         * Largest free memory gap in bytes to the left of this VMA.
 292         * Either between this VMA and vma->vm_prev, or between one of the
 293         * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
 294         * get_unmapped_area find a free area of the right size.
 295         */
 296        unsigned long rb_subtree_gap;
 297
 298        /* Second cache line starts here. */
 299
 300        struct mm_struct *vm_mm;        /* The address space we belong to. */
 301        pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
 302        unsigned long vm_flags;         /* Flags, see mm.h. */
 303
 304        /*
 305         * For areas with an address space and backing store,
 306         * linkage into the address_space->i_mmap interval tree.
 307         */
 308        struct {
 309                struct rb_node rb;
 310                unsigned long rb_subtree_last;
 311        } shared;
 312
 313        /*
 314         * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
 315         * list, after a COW of one of the file pages.  A MAP_SHARED vma
 316         * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
 317         * or brk vma (with NULL file) can only be in an anon_vma list.
 318         */
 319        struct list_head anon_vma_chain; /* Serialized by mmap_sem &
 320                                          * page_table_lock */
 321        struct anon_vma *anon_vma;      /* Serialized by page_table_lock */
 322
 323        /* Function pointers to deal with this struct. */
 324        const struct vm_operations_struct *vm_ops;
 325
 326        /* Information about our backing store: */
 327        unsigned long vm_pgoff;         /* Offset (within vm_file) in PAGE_SIZE
 328                                           units */
 329        struct file * vm_file;          /* File we map to (can be NULL). */
 330        void * vm_private_data;         /* was vm_pte (shared mem) */
 331
 332        atomic_long_t swap_readahead_info;
 333#ifndef CONFIG_MMU
 334        struct vm_region *vm_region;    /* NOMMU mapping region */
 335#endif
 336#ifdef CONFIG_NUMA
 337        struct mempolicy *vm_policy;    /* NUMA policy for the VMA */
 338#endif
 339        struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
 340} __randomize_layout;
 341
 342struct core_thread {
 343        struct task_struct *task;
 344        struct core_thread *next;
 345};
 346
 347struct core_state {
 348        atomic_t nr_threads;
 349        struct core_thread dumper;
 350        struct completion startup;
 351};
 352
 353struct kioctx_table;
 354struct mm_struct {
 355        struct {
 356                struct vm_area_struct *mmap;            /* list of VMAs */
 357                struct rb_root mm_rb;
 358                u64 vmacache_seqnum;                   /* per-thread vmacache */
 359#ifdef CONFIG_MMU
 360                unsigned long (*get_unmapped_area) (struct file *filp,
 361                                unsigned long addr, unsigned long len,
 362                                unsigned long pgoff, unsigned long flags);
 363#endif
 364                unsigned long mmap_base;        /* base of mmap area */
 365                unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
 366#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
 367                /* Base adresses for compatible mmap() */
 368                unsigned long mmap_compat_base;
 369                unsigned long mmap_compat_legacy_base;
 370#endif
 371                unsigned long task_size;        /* size of task vm space */
 372                unsigned long highest_vm_end;   /* highest vma end address */
 373                pgd_t * pgd;
 374
 375                /**
 376                 * @mm_users: The number of users including userspace.
 377                 *
 378                 * Use mmget()/mmget_not_zero()/mmput() to modify. When this
 379                 * drops to 0 (i.e. when the task exits and there are no other
 380                 * temporary reference holders), we also release a reference on
 381                 * @mm_count (which may then free the &struct mm_struct if
 382                 * @mm_count also drops to 0).
 383                 */
 384                atomic_t mm_users;
 385
 386                /**
 387                 * @mm_count: The number of references to &struct mm_struct
 388                 * (@mm_users count as 1).
 389                 *
 390                 * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
 391                 * &struct mm_struct is freed.
 392                 */
 393                atomic_t mm_count;
 394
 395#ifdef CONFIG_MMU
 396                atomic_long_t pgtables_bytes;   /* PTE page table pages */
 397#endif
 398                int map_count;                  /* number of VMAs */
 399
 400                spinlock_t page_table_lock; /* Protects page tables and some
 401                                             * counters
 402                                             */
 403                struct rw_semaphore mmap_sem;
 404
 405                struct list_head mmlist; /* List of maybe swapped mm's. These
 406                                          * are globally strung together off
 407                                          * init_mm.mmlist, and are protected
 408                                          * by mmlist_lock
 409                                          */
 410
 411
 412                unsigned long hiwater_rss; /* High-watermark of RSS usage */
 413                unsigned long hiwater_vm;  /* High-water virtual memory usage */
 414
 415                unsigned long total_vm;    /* Total pages mapped */
 416                unsigned long locked_vm;   /* Pages that have PG_mlocked set */
 417                atomic64_t    pinned_vm;   /* Refcount permanently increased */
 418                unsigned long data_vm;     /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
 419                unsigned long exec_vm;     /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
 420                unsigned long stack_vm;    /* VM_STACK */
 421                unsigned long def_flags;
 422
 423                spinlock_t arg_lock; /* protect the below fields */
 424                unsigned long start_code, end_code, start_data, end_data;
 425                unsigned long start_brk, brk, start_stack;
 426                unsigned long arg_start, arg_end, env_start, env_end;
 427
 428                unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
 429
 430                /*
 431                 * Special counters, in some configurations protected by the
 432                 * page_table_lock, in other configurations by being atomic.
 433                 */
 434                struct mm_rss_stat rss_stat;
 435
 436                struct linux_binfmt *binfmt;
 437
 438                /* Architecture-specific MM context */
 439                mm_context_t context;
 440
 441                unsigned long flags; /* Must use atomic bitops to access */
 442
 443                struct core_state *core_state; /* coredumping support */
 444#ifdef CONFIG_MEMBARRIER
 445                atomic_t membarrier_state;
 446#endif
 447#ifdef CONFIG_AIO
 448                spinlock_t                      ioctx_lock;
 449                struct kioctx_table __rcu       *ioctx_table;
 450#endif
 451#ifdef CONFIG_MEMCG
 452                /*
 453                 * "owner" points to a task that is regarded as the canonical
 454                 * user/owner of this mm. All of the following must be true in
 455                 * order for it to be changed:
 456                 *
 457                 * current == mm->owner
 458                 * current->mm != mm
 459                 * new_owner->mm == mm
 460                 * new_owner->alloc_lock is held
 461                 */
 462                struct task_struct __rcu *owner;
 463#endif
 464                struct user_namespace *user_ns;
 465
 466                /* store ref to file /proc/<pid>/exe symlink points to */
 467                struct file __rcu *exe_file;
 468#ifdef CONFIG_MMU_NOTIFIER
 469                struct mmu_notifier_mm *mmu_notifier_mm;
 470#endif
 471#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
 472                pgtable_t pmd_huge_pte; /* protected by page_table_lock */
 473#endif
 474#ifdef CONFIG_NUMA_BALANCING
 475                /*
 476                 * numa_next_scan is the next time that the PTEs will be marked
 477                 * pte_numa. NUMA hinting faults will gather statistics and
 478                 * migrate pages to new nodes if necessary.
 479                 */
 480                unsigned long numa_next_scan;
 481
 482                /* Restart point for scanning and setting pte_numa */
 483                unsigned long numa_scan_offset;
 484
 485                /* numa_scan_seq prevents two threads setting pte_numa */
 486                int numa_scan_seq;
 487#endif
 488                /*
 489                 * An operation with batched TLB flushing is going on. Anything
 490                 * that can move process memory needs to flush the TLB when
 491                 * moving a PROT_NONE or PROT_NUMA mapped page.
 492                 */
 493                atomic_t tlb_flush_pending;
 494#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
 495                /* See flush_tlb_batched_pending() */
 496                bool tlb_flush_batched;
 497#endif
 498                struct uprobes_state uprobes_state;
 499#ifdef CONFIG_HUGETLB_PAGE
 500                atomic_long_t hugetlb_usage;
 501#endif
 502                struct work_struct async_put_work;
 503
 504#if IS_ENABLED(CONFIG_HMM)
 505                /* HMM needs to track a few things per mm */
 506                struct hmm *hmm;
 507#endif
 508        } __randomize_layout;
 509
 510        /*
 511         * The mm_cpumask needs to be at the end of mm_struct, because it
 512         * is dynamically sized based on nr_cpu_ids.
 513         */
 514        unsigned long cpu_bitmap[];
 515};
 516
 517extern struct mm_struct init_mm;
 518
 519/* Pointer magic because the dynamic array size confuses some compilers. */
 520static inline void mm_init_cpumask(struct mm_struct *mm)
 521{
 522        unsigned long cpu_bitmap = (unsigned long)mm;
 523
 524        cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
 525        cpumask_clear((struct cpumask *)cpu_bitmap);
 526}
 527
 528/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
 529static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
 530{
 531        return (struct cpumask *)&mm->cpu_bitmap;
 532}
 533
 534struct mmu_gather;
 535extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
 536                                unsigned long start, unsigned long end);
 537extern void tlb_finish_mmu(struct mmu_gather *tlb,
 538                                unsigned long start, unsigned long end);
 539
 540static inline void init_tlb_flush_pending(struct mm_struct *mm)
 541{
 542        atomic_set(&mm->tlb_flush_pending, 0);
 543}
 544
 545static inline void inc_tlb_flush_pending(struct mm_struct *mm)
 546{
 547        atomic_inc(&mm->tlb_flush_pending);
 548        /*
 549         * The only time this value is relevant is when there are indeed pages
 550         * to flush. And we'll only flush pages after changing them, which
 551         * requires the PTL.
 552         *
 553         * So the ordering here is:
 554         *
 555         *      atomic_inc(&mm->tlb_flush_pending);
 556         *      spin_lock(&ptl);
 557         *      ...
 558         *      set_pte_at();
 559         *      spin_unlock(&ptl);
 560         *
 561         *                              spin_lock(&ptl)
 562         *                              mm_tlb_flush_pending();
 563         *                              ....
 564         *                              spin_unlock(&ptl);
 565         *
 566         *      flush_tlb_range();
 567         *      atomic_dec(&mm->tlb_flush_pending);
 568         *
 569         * Where the increment if constrained by the PTL unlock, it thus
 570         * ensures that the increment is visible if the PTE modification is
 571         * visible. After all, if there is no PTE modification, nobody cares
 572         * about TLB flushes either.
 573         *
 574         * This very much relies on users (mm_tlb_flush_pending() and
 575         * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
 576         * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
 577         * locks (PPC) the unlock of one doesn't order against the lock of
 578         * another PTL.
 579         *
 580         * The decrement is ordered by the flush_tlb_range(), such that
 581         * mm_tlb_flush_pending() will not return false unless all flushes have
 582         * completed.
 583         */
 584}
 585
 586static inline void dec_tlb_flush_pending(struct mm_struct *mm)
 587{
 588        /*
 589         * See inc_tlb_flush_pending().
 590         *
 591         * This cannot be smp_mb__before_atomic() because smp_mb() simply does
 592         * not order against TLB invalidate completion, which is what we need.
 593         *
 594         * Therefore we must rely on tlb_flush_*() to guarantee order.
 595         */
 596        atomic_dec(&mm->tlb_flush_pending);
 597}
 598
 599static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
 600{
 601        /*
 602         * Must be called after having acquired the PTL; orders against that
 603         * PTLs release and therefore ensures that if we observe the modified
 604         * PTE we must also observe the increment from inc_tlb_flush_pending().
 605         *
 606         * That is, it only guarantees to return true if there is a flush
 607         * pending for _this_ PTL.
 608         */
 609        return atomic_read(&mm->tlb_flush_pending);
 610}
 611
 612static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
 613{
 614        /*
 615         * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
 616         * for which there is a TLB flush pending in order to guarantee
 617         * we've seen both that PTE modification and the increment.
 618         *
 619         * (no requirement on actually still holding the PTL, that is irrelevant)
 620         */
 621        return atomic_read(&mm->tlb_flush_pending) > 1;
 622}
 623
 624struct vm_fault;
 625
 626/**
 627 * typedef vm_fault_t - Return type for page fault handlers.
 628 *
 629 * Page fault handlers return a bitmask of %VM_FAULT values.
 630 */
 631typedef __bitwise unsigned int vm_fault_t;
 632
 633/**
 634 * enum vm_fault_reason - Page fault handlers return a bitmask of
 635 * these values to tell the core VM what happened when handling the
 636 * fault. Used to decide whether a process gets delivered SIGBUS or
 637 * just gets major/minor fault counters bumped up.
 638 *
 639 * @VM_FAULT_OOM:               Out Of Memory
 640 * @VM_FAULT_SIGBUS:            Bad access
 641 * @VM_FAULT_MAJOR:             Page read from storage
 642 * @VM_FAULT_WRITE:             Special case for get_user_pages
 643 * @VM_FAULT_HWPOISON:          Hit poisoned small page
 644 * @VM_FAULT_HWPOISON_LARGE:    Hit poisoned large page. Index encoded
 645 *                              in upper bits
 646 * @VM_FAULT_SIGSEGV:           segmentation fault
 647 * @VM_FAULT_NOPAGE:            ->fault installed the pte, not return page
 648 * @VM_FAULT_LOCKED:            ->fault locked the returned page
 649 * @VM_FAULT_RETRY:             ->fault blocked, must retry
 650 * @VM_FAULT_FALLBACK:          huge page fault failed, fall back to small
 651 * @VM_FAULT_DONE_COW:          ->fault has fully handled COW
 652 * @VM_FAULT_NEEDDSYNC:         ->fault did not modify page tables and needs
 653 *                              fsync() to complete (for synchronous page faults
 654 *                              in DAX)
 655 * @VM_FAULT_HINDEX_MASK:       mask HINDEX value
 656 *
 657 */
 658enum vm_fault_reason {
 659        VM_FAULT_OOM            = (__force vm_fault_t)0x000001,
 660        VM_FAULT_SIGBUS         = (__force vm_fault_t)0x000002,
 661        VM_FAULT_MAJOR          = (__force vm_fault_t)0x000004,
 662        VM_FAULT_WRITE          = (__force vm_fault_t)0x000008,
 663        VM_FAULT_HWPOISON       = (__force vm_fault_t)0x000010,
 664        VM_FAULT_HWPOISON_LARGE = (__force vm_fault_t)0x000020,
 665        VM_FAULT_SIGSEGV        = (__force vm_fault_t)0x000040,
 666        VM_FAULT_NOPAGE         = (__force vm_fault_t)0x000100,
 667        VM_FAULT_LOCKED         = (__force vm_fault_t)0x000200,
 668        VM_FAULT_RETRY          = (__force vm_fault_t)0x000400,
 669        VM_FAULT_FALLBACK       = (__force vm_fault_t)0x000800,
 670        VM_FAULT_DONE_COW       = (__force vm_fault_t)0x001000,
 671        VM_FAULT_NEEDDSYNC      = (__force vm_fault_t)0x002000,
 672        VM_FAULT_HINDEX_MASK    = (__force vm_fault_t)0x0f0000,
 673};
 674
 675/* Encode hstate index for a hwpoisoned large page */
 676#define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16))
 677#define VM_FAULT_GET_HINDEX(x) (((__force unsigned int)(x) >> 16) & 0xf)
 678
 679#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS |        \
 680                        VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON |  \
 681                        VM_FAULT_HWPOISON_LARGE | VM_FAULT_FALLBACK)
 682
 683#define VM_FAULT_RESULT_TRACE \
 684        { VM_FAULT_OOM,                 "OOM" },        \
 685        { VM_FAULT_SIGBUS,              "SIGBUS" },     \
 686        { VM_FAULT_MAJOR,               "MAJOR" },      \
 687        { VM_FAULT_WRITE,               "WRITE" },      \
 688        { VM_FAULT_HWPOISON,            "HWPOISON" },   \
 689        { VM_FAULT_HWPOISON_LARGE,      "HWPOISON_LARGE" },     \
 690        { VM_FAULT_SIGSEGV,             "SIGSEGV" },    \
 691        { VM_FAULT_NOPAGE,              "NOPAGE" },     \
 692        { VM_FAULT_LOCKED,              "LOCKED" },     \
 693        { VM_FAULT_RETRY,               "RETRY" },      \
 694        { VM_FAULT_FALLBACK,            "FALLBACK" },   \
 695        { VM_FAULT_DONE_COW,            "DONE_COW" },   \
 696        { VM_FAULT_NEEDDSYNC,           "NEEDDSYNC" }
 697
 698struct vm_special_mapping {
 699        const char *name;       /* The name, e.g. "[vdso]". */
 700
 701        /*
 702         * If .fault is not provided, this points to a
 703         * NULL-terminated array of pages that back the special mapping.
 704         *
 705         * This must not be NULL unless .fault is provided.
 706         */
 707        struct page **pages;
 708
 709        /*
 710         * If non-NULL, then this is called to resolve page faults
 711         * on the special mapping.  If used, .pages is not checked.
 712         */
 713        vm_fault_t (*fault)(const struct vm_special_mapping *sm,
 714                                struct vm_area_struct *vma,
 715                                struct vm_fault *vmf);
 716
 717        int (*mremap)(const struct vm_special_mapping *sm,
 718                     struct vm_area_struct *new_vma);
 719};
 720
 721enum tlb_flush_reason {
 722        TLB_FLUSH_ON_TASK_SWITCH,
 723        TLB_REMOTE_SHOOTDOWN,
 724        TLB_LOCAL_SHOOTDOWN,
 725        TLB_LOCAL_MM_SHOOTDOWN,
 726        TLB_REMOTE_SEND_IPI,
 727        NR_TLB_FLUSH_REASONS,
 728};
 729
 730 /*
 731  * A swap entry has to fit into a "unsigned long", as the entry is hidden
 732  * in the "index" field of the swapper address space.
 733  */
 734typedef struct {
 735        unsigned long val;
 736} swp_entry_t;
 737
 738#endif /* _LINUX_MM_TYPES_H */
 739