linux/include/linux/mm_types.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_MM_TYPES_H
   3#define _LINUX_MM_TYPES_H
   4
   5#include <linux/mm_types_task.h>
   6
   7#include <linux/auxvec.h>
   8#include <linux/list.h>
   9#include <linux/spinlock.h>
  10#include <linux/rbtree.h>
  11#include <linux/rwsem.h>
  12#include <linux/completion.h>
  13#include <linux/cpumask.h>
  14#include <linux/uprobes.h>
  15#include <linux/page-flags-layout.h>
  16#include <linux/workqueue.h>
  17#include <linux/seqlock.h>
  18
  19#include <asm/mmu.h>
  20
  21#ifndef AT_VECTOR_SIZE_ARCH
  22#define AT_VECTOR_SIZE_ARCH 0
  23#endif
  24#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
  25
  26#define INIT_PASID      0
  27
  28struct address_space;
  29struct mem_cgroup;
  30
  31/*
  32 * Each physical page in the system has a struct page associated with
  33 * it to keep track of whatever it is we are using the page for at the
  34 * moment. Note that we have no way to track which tasks are using
  35 * a page, though if it is a pagecache page, rmap structures can tell us
  36 * who is mapping it.
  37 *
  38 * If you allocate the page using alloc_pages(), you can use some of the
  39 * space in struct page for your own purposes.  The five words in the main
  40 * union are available, except for bit 0 of the first word which must be
  41 * kept clear.  Many users use this word to store a pointer to an object
  42 * which is guaranteed to be aligned.  If you use the same storage as
  43 * page->mapping, you must restore it to NULL before freeing the page.
  44 *
  45 * If your page will not be mapped to userspace, you can also use the four
  46 * bytes in the mapcount union, but you must call page_mapcount_reset()
  47 * before freeing it.
  48 *
  49 * If you want to use the refcount field, it must be used in such a way
  50 * that other CPUs temporarily incrementing and then decrementing the
  51 * refcount does not cause problems.  On receiving the page from
  52 * alloc_pages(), the refcount will be positive.
  53 *
  54 * If you allocate pages of order > 0, you can use some of the fields
  55 * in each subpage, but you may need to restore some of their values
  56 * afterwards.
  57 *
  58 * SLUB uses cmpxchg_double() to atomically update its freelist and
  59 * counters.  That requires that freelist & counters be adjacent and
  60 * double-word aligned.  We align all struct pages to double-word
  61 * boundaries, and ensure that 'freelist' is aligned within the
  62 * struct.
  63 */
  64#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
  65#define _struct_page_alignment  __aligned(2 * sizeof(unsigned long))
  66#else
  67#define _struct_page_alignment
  68#endif
  69
  70struct page {
  71        unsigned long flags;            /* Atomic flags, some possibly
  72                                         * updated asynchronously */
  73        /*
  74         * Five words (20/40 bytes) are available in this union.
  75         * WARNING: bit 0 of the first word is used for PageTail(). That
  76         * means the other users of this union MUST NOT use the bit to
  77         * avoid collision and false-positive PageTail().
  78         */
  79        union {
  80                struct {        /* Page cache and anonymous pages */
  81                        /**
  82                         * @lru: Pageout list, eg. active_list protected by
  83                         * lruvec->lru_lock.  Sometimes used as a generic list
  84                         * by the page owner.
  85                         */
  86                        struct list_head lru;
  87                        /* See page-flags.h for PAGE_MAPPING_FLAGS */
  88                        struct address_space *mapping;
  89                        pgoff_t index;          /* Our offset within mapping. */
  90                        /**
  91                         * @private: Mapping-private opaque data.
  92                         * Usually used for buffer_heads if PagePrivate.
  93                         * Used for swp_entry_t if PageSwapCache.
  94                         * Indicates order in the buddy system if PageBuddy.
  95                         */
  96                        unsigned long private;
  97                };
  98                struct {        /* page_pool used by netstack */
  99                        /**
 100                         * @dma_addr: might require a 64-bit value on
 101                         * 32-bit architectures.
 102                         */
 103                        unsigned long dma_addr[2];
 104                };
 105                struct {        /* slab, slob and slub */
 106                        union {
 107                                struct list_head slab_list;
 108                                struct {        /* Partial pages */
 109                                        struct page *next;
 110#ifdef CONFIG_64BIT
 111                                        int pages;      /* Nr of pages left */
 112                                        int pobjects;   /* Approximate count */
 113#else
 114                                        short int pages;
 115                                        short int pobjects;
 116#endif
 117                                };
 118                        };
 119                        struct kmem_cache *slab_cache; /* not slob */
 120                        /* Double-word boundary */
 121                        void *freelist;         /* first free object */
 122                        union {
 123                                void *s_mem;    /* slab: first object */
 124                                unsigned long counters;         /* SLUB */
 125                                struct {                        /* SLUB */
 126                                        unsigned inuse:16;
 127                                        unsigned objects:15;
 128                                        unsigned frozen:1;
 129                                };
 130                        };
 131                };
 132                struct {        /* Tail pages of compound page */
 133                        unsigned long compound_head;    /* Bit zero is set */
 134
 135                        /* First tail page only */
 136                        unsigned char compound_dtor;
 137                        unsigned char compound_order;
 138                        atomic_t compound_mapcount;
 139                        unsigned int compound_nr; /* 1 << compound_order */
 140                };
 141                struct {        /* Second tail page of compound page */
 142                        unsigned long _compound_pad_1;  /* compound_head */
 143                        atomic_t hpage_pinned_refcount;
 144                        /* For both global and memcg */
 145                        struct list_head deferred_list;
 146                };
 147                struct {        /* Page table pages */
 148                        unsigned long _pt_pad_1;        /* compound_head */
 149                        pgtable_t pmd_huge_pte; /* protected by page->ptl */
 150                        unsigned long _pt_pad_2;        /* mapping */
 151                        union {
 152                                struct mm_struct *pt_mm; /* x86 pgds only */
 153                                atomic_t pt_frag_refcount; /* powerpc */
 154                        };
 155#if ALLOC_SPLIT_PTLOCKS
 156                        spinlock_t *ptl;
 157#else
 158                        spinlock_t ptl;
 159#endif
 160                };
 161                struct {        /* ZONE_DEVICE pages */
 162                        /** @pgmap: Points to the hosting device page map. */
 163                        struct dev_pagemap *pgmap;
 164                        void *zone_device_data;
 165                        /*
 166                         * ZONE_DEVICE private pages are counted as being
 167                         * mapped so the next 3 words hold the mapping, index,
 168                         * and private fields from the source anonymous or
 169                         * page cache page while the page is migrated to device
 170                         * private memory.
 171                         * ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also
 172                         * use the mapping, index, and private fields when
 173                         * pmem backed DAX files are mapped.
 174                         */
 175                };
 176
 177                /** @rcu_head: You can use this to free a page by RCU. */
 178                struct rcu_head rcu_head;
 179        };
 180
 181        union {         /* This union is 4 bytes in size. */
 182                /*
 183                 * If the page can be mapped to userspace, encodes the number
 184                 * of times this page is referenced by a page table.
 185                 */
 186                atomic_t _mapcount;
 187
 188                /*
 189                 * If the page is neither PageSlab nor mappable to userspace,
 190                 * the value stored here may help determine what this page
 191                 * is used for.  See page-flags.h for a list of page types
 192                 * which are currently stored here.
 193                 */
 194                unsigned int page_type;
 195
 196                unsigned int active;            /* SLAB */
 197                int units;                      /* SLOB */
 198        };
 199
 200        /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
 201        atomic_t _refcount;
 202
 203#ifdef CONFIG_MEMCG
 204        unsigned long memcg_data;
 205#endif
 206
 207        /*
 208         * On machines where all RAM is mapped into kernel address space,
 209         * we can simply calculate the virtual address. On machines with
 210         * highmem some memory is mapped into kernel virtual memory
 211         * dynamically, so we need a place to store that address.
 212         * Note that this field could be 16 bits on x86 ... ;)
 213         *
 214         * Architectures with slow multiplication can define
 215         * WANT_PAGE_VIRTUAL in asm/page.h
 216         */
 217#if defined(WANT_PAGE_VIRTUAL)
 218        void *virtual;                  /* Kernel virtual address (NULL if
 219                                           not kmapped, ie. highmem) */
 220#endif /* WANT_PAGE_VIRTUAL */
 221
 222#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
 223        int _last_cpupid;
 224#endif
 225} _struct_page_alignment;
 226
 227static inline atomic_t *compound_mapcount_ptr(struct page *page)
 228{
 229        return &page[1].compound_mapcount;
 230}
 231
 232static inline atomic_t *compound_pincount_ptr(struct page *page)
 233{
 234        return &page[2].hpage_pinned_refcount;
 235}
 236
 237/*
 238 * Used for sizing the vmemmap region on some architectures
 239 */
 240#define STRUCT_PAGE_MAX_SHIFT   (order_base_2(sizeof(struct page)))
 241
 242#define PAGE_FRAG_CACHE_MAX_SIZE        __ALIGN_MASK(32768, ~PAGE_MASK)
 243#define PAGE_FRAG_CACHE_MAX_ORDER       get_order(PAGE_FRAG_CACHE_MAX_SIZE)
 244
 245#define page_private(page)              ((page)->private)
 246
 247static inline void set_page_private(struct page *page, unsigned long private)
 248{
 249        page->private = private;
 250}
 251
 252struct page_frag_cache {
 253        void * va;
 254#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
 255        __u16 offset;
 256        __u16 size;
 257#else
 258        __u32 offset;
 259#endif
 260        /* we maintain a pagecount bias, so that we dont dirty cache line
 261         * containing page->_refcount every time we allocate a fragment.
 262         */
 263        unsigned int            pagecnt_bias;
 264        bool pfmemalloc;
 265};
 266
 267typedef unsigned long vm_flags_t;
 268
 269/*
 270 * A region containing a mapping of a non-memory backed file under NOMMU
 271 * conditions.  These are held in a global tree and are pinned by the VMAs that
 272 * map parts of them.
 273 */
 274struct vm_region {
 275        struct rb_node  vm_rb;          /* link in global region tree */
 276        vm_flags_t      vm_flags;       /* VMA vm_flags */
 277        unsigned long   vm_start;       /* start address of region */
 278        unsigned long   vm_end;         /* region initialised to here */
 279        unsigned long   vm_top;         /* region allocated to here */
 280        unsigned long   vm_pgoff;       /* the offset in vm_file corresponding to vm_start */
 281        struct file     *vm_file;       /* the backing file or NULL */
 282
 283        int             vm_usage;       /* region usage count (access under nommu_region_sem) */
 284        bool            vm_icache_flushed : 1; /* true if the icache has been flushed for
 285                                                * this region */
 286};
 287
 288#ifdef CONFIG_USERFAULTFD
 289#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
 290struct vm_userfaultfd_ctx {
 291        struct userfaultfd_ctx *ctx;
 292};
 293#else /* CONFIG_USERFAULTFD */
 294#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
 295struct vm_userfaultfd_ctx {};
 296#endif /* CONFIG_USERFAULTFD */
 297
 298/*
 299 * This struct describes a virtual memory area. There is one of these
 300 * per VM-area/task. A VM area is any part of the process virtual memory
 301 * space that has a special rule for the page-fault handlers (ie a shared
 302 * library, the executable area etc).
 303 */
 304struct vm_area_struct {
 305        /* The first cache line has the info for VMA tree walking. */
 306
 307        unsigned long vm_start;         /* Our start address within vm_mm. */
 308        unsigned long vm_end;           /* The first byte after our end address
 309                                           within vm_mm. */
 310
 311        /* linked list of VM areas per task, sorted by address */
 312        struct vm_area_struct *vm_next, *vm_prev;
 313
 314        struct rb_node vm_rb;
 315
 316        /*
 317         * Largest free memory gap in bytes to the left of this VMA.
 318         * Either between this VMA and vma->vm_prev, or between one of the
 319         * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
 320         * get_unmapped_area find a free area of the right size.
 321         */
 322        unsigned long rb_subtree_gap;
 323
 324        /* Second cache line starts here. */
 325
 326        struct mm_struct *vm_mm;        /* The address space we belong to. */
 327
 328        /*
 329         * Access permissions of this VMA.
 330         * See vmf_insert_mixed_prot() for discussion.
 331         */
 332        pgprot_t vm_page_prot;
 333        unsigned long vm_flags;         /* Flags, see mm.h. */
 334
 335        /*
 336         * For areas with an address space and backing store,
 337         * linkage into the address_space->i_mmap interval tree.
 338         */
 339        struct {
 340                struct rb_node rb;
 341                unsigned long rb_subtree_last;
 342        } shared;
 343
 344        /*
 345         * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
 346         * list, after a COW of one of the file pages.  A MAP_SHARED vma
 347         * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
 348         * or brk vma (with NULL file) can only be in an anon_vma list.
 349         */
 350        struct list_head anon_vma_chain; /* Serialized by mmap_lock &
 351                                          * page_table_lock */
 352        struct anon_vma *anon_vma;      /* Serialized by page_table_lock */
 353
 354        /* Function pointers to deal with this struct. */
 355        const struct vm_operations_struct *vm_ops;
 356
 357        /* Information about our backing store: */
 358        unsigned long vm_pgoff;         /* Offset (within vm_file) in PAGE_SIZE
 359                                           units */
 360        struct file * vm_file;          /* File we map to (can be NULL). */
 361        void * vm_private_data;         /* was vm_pte (shared mem) */
 362
 363#ifdef CONFIG_SWAP
 364        atomic_long_t swap_readahead_info;
 365#endif
 366#ifndef CONFIG_MMU
 367        struct vm_region *vm_region;    /* NOMMU mapping region */
 368#endif
 369#ifdef CONFIG_NUMA
 370        struct mempolicy *vm_policy;    /* NUMA policy for the VMA */
 371#endif
 372        struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
 373} __randomize_layout;
 374
 375struct core_thread {
 376        struct task_struct *task;
 377        struct core_thread *next;
 378};
 379
 380struct core_state {
 381        atomic_t nr_threads;
 382        struct core_thread dumper;
 383        struct completion startup;
 384};
 385
 386struct kioctx_table;
 387struct mm_struct {
 388        struct {
 389                struct vm_area_struct *mmap;            /* list of VMAs */
 390                struct rb_root mm_rb;
 391                u64 vmacache_seqnum;                   /* per-thread vmacache */
 392#ifdef CONFIG_MMU
 393                unsigned long (*get_unmapped_area) (struct file *filp,
 394                                unsigned long addr, unsigned long len,
 395                                unsigned long pgoff, unsigned long flags);
 396#endif
 397                unsigned long mmap_base;        /* base of mmap area */
 398                unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
 399#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
 400                /* Base adresses for compatible mmap() */
 401                unsigned long mmap_compat_base;
 402                unsigned long mmap_compat_legacy_base;
 403#endif
 404                unsigned long task_size;        /* size of task vm space */
 405                unsigned long highest_vm_end;   /* highest vma end address */
 406                pgd_t * pgd;
 407
 408#ifdef CONFIG_MEMBARRIER
 409                /**
 410                 * @membarrier_state: Flags controlling membarrier behavior.
 411                 *
 412                 * This field is close to @pgd to hopefully fit in the same
 413                 * cache-line, which needs to be touched by switch_mm().
 414                 */
 415                atomic_t membarrier_state;
 416#endif
 417
 418                /**
 419                 * @mm_users: The number of users including userspace.
 420                 *
 421                 * Use mmget()/mmget_not_zero()/mmput() to modify. When this
 422                 * drops to 0 (i.e. when the task exits and there are no other
 423                 * temporary reference holders), we also release a reference on
 424                 * @mm_count (which may then free the &struct mm_struct if
 425                 * @mm_count also drops to 0).
 426                 */
 427                atomic_t mm_users;
 428
 429                /**
 430                 * @mm_count: The number of references to &struct mm_struct
 431                 * (@mm_users count as 1).
 432                 *
 433                 * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
 434                 * &struct mm_struct is freed.
 435                 */
 436                atomic_t mm_count;
 437
 438                /**
 439                 * @has_pinned: Whether this mm has pinned any pages.  This can
 440                 * be either replaced in the future by @pinned_vm when it
 441                 * becomes stable, or grow into a counter on its own. We're
 442                 * aggresive on this bit now - even if the pinned pages were
 443                 * unpinned later on, we'll still keep this bit set for the
 444                 * lifecycle of this mm just for simplicity.
 445                 */
 446                atomic_t has_pinned;
 447
 448#ifdef CONFIG_MMU
 449                atomic_long_t pgtables_bytes;   /* PTE page table pages */
 450#endif
 451                int map_count;                  /* number of VMAs */
 452
 453                spinlock_t page_table_lock; /* Protects page tables and some
 454                                             * counters
 455                                             */
 456                /*
 457                 * With some kernel config, the current mmap_lock's offset
 458                 * inside 'mm_struct' is at 0x120, which is very optimal, as
 459                 * its two hot fields 'count' and 'owner' sit in 2 different
 460                 * cachelines,  and when mmap_lock is highly contended, both
 461                 * of the 2 fields will be accessed frequently, current layout
 462                 * will help to reduce cache bouncing.
 463                 *
 464                 * So please be careful with adding new fields before
 465                 * mmap_lock, which can easily push the 2 fields into one
 466                 * cacheline.
 467                 */
 468                struct rw_semaphore mmap_lock;
 469
 470                struct list_head mmlist; /* List of maybe swapped mm's. These
 471                                          * are globally strung together off
 472                                          * init_mm.mmlist, and are protected
 473                                          * by mmlist_lock
 474                                          */
 475
 476
 477                unsigned long hiwater_rss; /* High-watermark of RSS usage */
 478                unsigned long hiwater_vm;  /* High-water virtual memory usage */
 479
 480                unsigned long total_vm;    /* Total pages mapped */
 481                unsigned long locked_vm;   /* Pages that have PG_mlocked set */
 482                atomic64_t    pinned_vm;   /* Refcount permanently increased */
 483                unsigned long data_vm;     /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
 484                unsigned long exec_vm;     /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
 485                unsigned long stack_vm;    /* VM_STACK */
 486                unsigned long def_flags;
 487
 488                /**
 489                 * @write_protect_seq: Locked when any thread is write
 490                 * protecting pages mapped by this mm to enforce a later COW,
 491                 * for instance during page table copying for fork().
 492                 */
 493                seqcount_t write_protect_seq;
 494
 495                spinlock_t arg_lock; /* protect the below fields */
 496
 497                unsigned long start_code, end_code, start_data, end_data;
 498                unsigned long start_brk, brk, start_stack;
 499                unsigned long arg_start, arg_end, env_start, env_end;
 500
 501                unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
 502
 503                /*
 504                 * Special counters, in some configurations protected by the
 505                 * page_table_lock, in other configurations by being atomic.
 506                 */
 507                struct mm_rss_stat rss_stat;
 508
 509                struct linux_binfmt *binfmt;
 510
 511                /* Architecture-specific MM context */
 512                mm_context_t context;
 513
 514                unsigned long flags; /* Must use atomic bitops to access */
 515
 516                struct core_state *core_state; /* coredumping support */
 517
 518#ifdef CONFIG_AIO
 519                spinlock_t                      ioctx_lock;
 520                struct kioctx_table __rcu       *ioctx_table;
 521#endif
 522#ifdef CONFIG_MEMCG
 523                /*
 524                 * "owner" points to a task that is regarded as the canonical
 525                 * user/owner of this mm. All of the following must be true in
 526                 * order for it to be changed:
 527                 *
 528                 * current == mm->owner
 529                 * current->mm != mm
 530                 * new_owner->mm == mm
 531                 * new_owner->alloc_lock is held
 532                 */
 533                struct task_struct __rcu *owner;
 534#endif
 535                struct user_namespace *user_ns;
 536
 537                /* store ref to file /proc/<pid>/exe symlink points to */
 538                struct file __rcu *exe_file;
 539#ifdef CONFIG_MMU_NOTIFIER
 540                struct mmu_notifier_subscriptions *notifier_subscriptions;
 541#endif
 542#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
 543                pgtable_t pmd_huge_pte; /* protected by page_table_lock */
 544#endif
 545#ifdef CONFIG_NUMA_BALANCING
 546                /*
 547                 * numa_next_scan is the next time that the PTEs will be marked
 548                 * pte_numa. NUMA hinting faults will gather statistics and
 549                 * migrate pages to new nodes if necessary.
 550                 */
 551                unsigned long numa_next_scan;
 552
 553                /* Restart point for scanning and setting pte_numa */
 554                unsigned long numa_scan_offset;
 555
 556                /* numa_scan_seq prevents two threads setting pte_numa */
 557                int numa_scan_seq;
 558#endif
 559                /*
 560                 * An operation with batched TLB flushing is going on. Anything
 561                 * that can move process memory needs to flush the TLB when
 562                 * moving a PROT_NONE or PROT_NUMA mapped page.
 563                 */
 564                atomic_t tlb_flush_pending;
 565#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
 566                /* See flush_tlb_batched_pending() */
 567                bool tlb_flush_batched;
 568#endif
 569                struct uprobes_state uprobes_state;
 570#ifdef CONFIG_HUGETLB_PAGE
 571                atomic_long_t hugetlb_usage;
 572#endif
 573                struct work_struct async_put_work;
 574
 575#ifdef CONFIG_IOMMU_SUPPORT
 576                u32 pasid;
 577#endif
 578        } __randomize_layout;
 579
 580        /*
 581         * The mm_cpumask needs to be at the end of mm_struct, because it
 582         * is dynamically sized based on nr_cpu_ids.
 583         */
 584        unsigned long cpu_bitmap[];
 585};
 586
 587extern struct mm_struct init_mm;
 588
 589/* Pointer magic because the dynamic array size confuses some compilers. */
 590static inline void mm_init_cpumask(struct mm_struct *mm)
 591{
 592        unsigned long cpu_bitmap = (unsigned long)mm;
 593
 594        cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
 595        cpumask_clear((struct cpumask *)cpu_bitmap);
 596}
 597
 598/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
 599static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
 600{
 601        return (struct cpumask *)&mm->cpu_bitmap;
 602}
 603
 604struct mmu_gather;
 605extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
 606extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
 607extern void tlb_finish_mmu(struct mmu_gather *tlb);
 608
 609static inline void init_tlb_flush_pending(struct mm_struct *mm)
 610{
 611        atomic_set(&mm->tlb_flush_pending, 0);
 612}
 613
 614static inline void inc_tlb_flush_pending(struct mm_struct *mm)
 615{
 616        atomic_inc(&mm->tlb_flush_pending);
 617        /*
 618         * The only time this value is relevant is when there are indeed pages
 619         * to flush. And we'll only flush pages after changing them, which
 620         * requires the PTL.
 621         *
 622         * So the ordering here is:
 623         *
 624         *      atomic_inc(&mm->tlb_flush_pending);
 625         *      spin_lock(&ptl);
 626         *      ...
 627         *      set_pte_at();
 628         *      spin_unlock(&ptl);
 629         *
 630         *                              spin_lock(&ptl)
 631         *                              mm_tlb_flush_pending();
 632         *                              ....
 633         *                              spin_unlock(&ptl);
 634         *
 635         *      flush_tlb_range();
 636         *      atomic_dec(&mm->tlb_flush_pending);
 637         *
 638         * Where the increment if constrained by the PTL unlock, it thus
 639         * ensures that the increment is visible if the PTE modification is
 640         * visible. After all, if there is no PTE modification, nobody cares
 641         * about TLB flushes either.
 642         *
 643         * This very much relies on users (mm_tlb_flush_pending() and
 644         * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
 645         * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
 646         * locks (PPC) the unlock of one doesn't order against the lock of
 647         * another PTL.
 648         *
 649         * The decrement is ordered by the flush_tlb_range(), such that
 650         * mm_tlb_flush_pending() will not return false unless all flushes have
 651         * completed.
 652         */
 653}
 654
 655static inline void dec_tlb_flush_pending(struct mm_struct *mm)
 656{
 657        /*
 658         * See inc_tlb_flush_pending().
 659         *
 660         * This cannot be smp_mb__before_atomic() because smp_mb() simply does
 661         * not order against TLB invalidate completion, which is what we need.
 662         *
 663         * Therefore we must rely on tlb_flush_*() to guarantee order.
 664         */
 665        atomic_dec(&mm->tlb_flush_pending);
 666}
 667
 668static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
 669{
 670        /*
 671         * Must be called after having acquired the PTL; orders against that
 672         * PTLs release and therefore ensures that if we observe the modified
 673         * PTE we must also observe the increment from inc_tlb_flush_pending().
 674         *
 675         * That is, it only guarantees to return true if there is a flush
 676         * pending for _this_ PTL.
 677         */
 678        return atomic_read(&mm->tlb_flush_pending);
 679}
 680
 681static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
 682{
 683        /*
 684         * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
 685         * for which there is a TLB flush pending in order to guarantee
 686         * we've seen both that PTE modification and the increment.
 687         *
 688         * (no requirement on actually still holding the PTL, that is irrelevant)
 689         */
 690        return atomic_read(&mm->tlb_flush_pending) > 1;
 691}
 692
 693struct vm_fault;
 694
 695/**
 696 * typedef vm_fault_t - Return type for page fault handlers.
 697 *
 698 * Page fault handlers return a bitmask of %VM_FAULT values.
 699 */
 700typedef __bitwise unsigned int vm_fault_t;
 701
 702/**
 703 * enum vm_fault_reason - Page fault handlers return a bitmask of
 704 * these values to tell the core VM what happened when handling the
 705 * fault. Used to decide whether a process gets delivered SIGBUS or
 706 * just gets major/minor fault counters bumped up.
 707 *
 708 * @VM_FAULT_OOM:               Out Of Memory
 709 * @VM_FAULT_SIGBUS:            Bad access
 710 * @VM_FAULT_MAJOR:             Page read from storage
 711 * @VM_FAULT_WRITE:             Special case for get_user_pages
 712 * @VM_FAULT_HWPOISON:          Hit poisoned small page
 713 * @VM_FAULT_HWPOISON_LARGE:    Hit poisoned large page. Index encoded
 714 *                              in upper bits
 715 * @VM_FAULT_SIGSEGV:           segmentation fault
 716 * @VM_FAULT_NOPAGE:            ->fault installed the pte, not return page
 717 * @VM_FAULT_LOCKED:            ->fault locked the returned page
 718 * @VM_FAULT_RETRY:             ->fault blocked, must retry
 719 * @VM_FAULT_FALLBACK:          huge page fault failed, fall back to small
 720 * @VM_FAULT_DONE_COW:          ->fault has fully handled COW
 721 * @VM_FAULT_NEEDDSYNC:         ->fault did not modify page tables and needs
 722 *                              fsync() to complete (for synchronous page faults
 723 *                              in DAX)
 724 * @VM_FAULT_HINDEX_MASK:       mask HINDEX value
 725 *
 726 */
 727enum vm_fault_reason {
 728        VM_FAULT_OOM            = (__force vm_fault_t)0x000001,
 729        VM_FAULT_SIGBUS         = (__force vm_fault_t)0x000002,
 730        VM_FAULT_MAJOR          = (__force vm_fault_t)0x000004,
 731        VM_FAULT_WRITE          = (__force vm_fault_t)0x000008,
 732        VM_FAULT_HWPOISON       = (__force vm_fault_t)0x000010,
 733        VM_FAULT_HWPOISON_LARGE = (__force vm_fault_t)0x000020,
 734        VM_FAULT_SIGSEGV        = (__force vm_fault_t)0x000040,
 735        VM_FAULT_NOPAGE         = (__force vm_fault_t)0x000100,
 736        VM_FAULT_LOCKED         = (__force vm_fault_t)0x000200,
 737        VM_FAULT_RETRY          = (__force vm_fault_t)0x000400,
 738        VM_FAULT_FALLBACK       = (__force vm_fault_t)0x000800,
 739        VM_FAULT_DONE_COW       = (__force vm_fault_t)0x001000,
 740        VM_FAULT_NEEDDSYNC      = (__force vm_fault_t)0x002000,
 741        VM_FAULT_HINDEX_MASK    = (__force vm_fault_t)0x0f0000,
 742};
 743
 744/* Encode hstate index for a hwpoisoned large page */
 745#define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16))
 746#define VM_FAULT_GET_HINDEX(x) (((__force unsigned int)(x) >> 16) & 0xf)
 747
 748#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS |        \
 749                        VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON |  \
 750                        VM_FAULT_HWPOISON_LARGE | VM_FAULT_FALLBACK)
 751
 752#define VM_FAULT_RESULT_TRACE \
 753        { VM_FAULT_OOM,                 "OOM" },        \
 754        { VM_FAULT_SIGBUS,              "SIGBUS" },     \
 755        { VM_FAULT_MAJOR,               "MAJOR" },      \
 756        { VM_FAULT_WRITE,               "WRITE" },      \
 757        { VM_FAULT_HWPOISON,            "HWPOISON" },   \
 758        { VM_FAULT_HWPOISON_LARGE,      "HWPOISON_LARGE" },     \
 759        { VM_FAULT_SIGSEGV,             "SIGSEGV" },    \
 760        { VM_FAULT_NOPAGE,              "NOPAGE" },     \
 761        { VM_FAULT_LOCKED,              "LOCKED" },     \
 762        { VM_FAULT_RETRY,               "RETRY" },      \
 763        { VM_FAULT_FALLBACK,            "FALLBACK" },   \
 764        { VM_FAULT_DONE_COW,            "DONE_COW" },   \
 765        { VM_FAULT_NEEDDSYNC,           "NEEDDSYNC" }
 766
 767struct vm_special_mapping {
 768        const char *name;       /* The name, e.g. "[vdso]". */
 769
 770        /*
 771         * If .fault is not provided, this points to a
 772         * NULL-terminated array of pages that back the special mapping.
 773         *
 774         * This must not be NULL unless .fault is provided.
 775         */
 776        struct page **pages;
 777
 778        /*
 779         * If non-NULL, then this is called to resolve page faults
 780         * on the special mapping.  If used, .pages is not checked.
 781         */
 782        vm_fault_t (*fault)(const struct vm_special_mapping *sm,
 783                                struct vm_area_struct *vma,
 784                                struct vm_fault *vmf);
 785
 786        int (*mremap)(const struct vm_special_mapping *sm,
 787                     struct vm_area_struct *new_vma);
 788};
 789
 790enum tlb_flush_reason {
 791        TLB_FLUSH_ON_TASK_SWITCH,
 792        TLB_REMOTE_SHOOTDOWN,
 793        TLB_LOCAL_SHOOTDOWN,
 794        TLB_LOCAL_MM_SHOOTDOWN,
 795        TLB_REMOTE_SEND_IPI,
 796        NR_TLB_FLUSH_REASONS,
 797};
 798
 799 /*
 800  * A swap entry has to fit into a "unsigned long", as the entry is hidden
 801  * in the "index" field of the swapper address space.
 802  */
 803typedef struct {
 804        unsigned long val;
 805} swp_entry_t;
 806
 807#endif /* _LINUX_MM_TYPES_H */
 808