linux/include/linux/mm_types.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_MM_TYPES_H
   3#define _LINUX_MM_TYPES_H
   4
   5#include <linux/mm_types_task.h>
   6
   7#include <linux/auxvec.h>
   8#include <linux/list.h>
   9#include <linux/spinlock.h>
  10#include <linux/rbtree.h>
  11#include <linux/rwsem.h>
  12#include <linux/completion.h>
  13#include <linux/cpumask.h>
  14#include <linux/uprobes.h>
  15#include <linux/page-flags-layout.h>
  16#include <linux/workqueue.h>
  17#include <linux/seqlock.h>
  18
  19#include <asm/mmu.h>
  20
  21#ifndef AT_VECTOR_SIZE_ARCH
  22#define AT_VECTOR_SIZE_ARCH 0
  23#endif
  24#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
  25
  26#define INIT_PASID      0
  27
  28struct address_space;
  29struct mem_cgroup;
  30
  31/*
  32 * Each physical page in the system has a struct page associated with
  33 * it to keep track of whatever it is we are using the page for at the
  34 * moment. Note that we have no way to track which tasks are using
  35 * a page, though if it is a pagecache page, rmap structures can tell us
  36 * who is mapping it.
  37 *
  38 * If you allocate the page using alloc_pages(), you can use some of the
  39 * space in struct page for your own purposes.  The five words in the main
  40 * union are available, except for bit 0 of the first word which must be
  41 * kept clear.  Many users use this word to store a pointer to an object
  42 * which is guaranteed to be aligned.  If you use the same storage as
  43 * page->mapping, you must restore it to NULL before freeing the page.
  44 *
  45 * If your page will not be mapped to userspace, you can also use the four
  46 * bytes in the mapcount union, but you must call page_mapcount_reset()
  47 * before freeing it.
  48 *
  49 * If you want to use the refcount field, it must be used in such a way
  50 * that other CPUs temporarily incrementing and then decrementing the
  51 * refcount does not cause problems.  On receiving the page from
  52 * alloc_pages(), the refcount will be positive.
  53 *
  54 * If you allocate pages of order > 0, you can use some of the fields
  55 * in each subpage, but you may need to restore some of their values
  56 * afterwards.
  57 *
  58 * SLUB uses cmpxchg_double() to atomically update its freelist and
  59 * counters.  That requires that freelist & counters be adjacent and
  60 * double-word aligned.  We align all struct pages to double-word
  61 * boundaries, and ensure that 'freelist' is aligned within the
  62 * struct.
  63 */
  64#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
  65#define _struct_page_alignment  __aligned(2 * sizeof(unsigned long))
  66#else
  67#define _struct_page_alignment
  68#endif
  69
  70struct page {
  71        unsigned long flags;            /* Atomic flags, some possibly
  72                                         * updated asynchronously */
  73        /*
  74         * Five words (20/40 bytes) are available in this union.
  75         * WARNING: bit 0 of the first word is used for PageTail(). That
  76         * means the other users of this union MUST NOT use the bit to
  77         * avoid collision and false-positive PageTail().
  78         */
  79        union {
  80                struct {        /* Page cache and anonymous pages */
  81                        /**
  82                         * @lru: Pageout list, eg. active_list protected by
  83                         * lruvec->lru_lock.  Sometimes used as a generic list
  84                         * by the page owner.
  85                         */
  86                        struct list_head lru;
  87                        /* See page-flags.h for PAGE_MAPPING_FLAGS */
  88                        struct address_space *mapping;
  89                        pgoff_t index;          /* Our offset within mapping. */
  90                        /**
  91                         * @private: Mapping-private opaque data.
  92                         * Usually used for buffer_heads if PagePrivate.
  93                         * Used for swp_entry_t if PageSwapCache.
  94                         * Indicates order in the buddy system if PageBuddy.
  95                         */
  96                        unsigned long private;
  97                };
  98                struct {        /* page_pool used by netstack */
  99                        /**
 100                         * @pp_magic: magic value to avoid recycling non
 101                         * page_pool allocated pages.
 102                         */
 103                        unsigned long pp_magic;
 104                        struct page_pool *pp;
 105                        unsigned long _pp_mapping_pad;
 106                        unsigned long dma_addr;
 107                        union {
 108                                /**
 109                                 * dma_addr_upper: might require a 64-bit
 110                                 * value on 32-bit architectures.
 111                                 */
 112                                unsigned long dma_addr_upper;
 113                                /**
 114                                 * For frag page support, not supported in
 115                                 * 32-bit architectures with 64-bit DMA.
 116                                 */
 117                                atomic_long_t pp_frag_count;
 118                        };
 119                };
 120                struct {        /* slab, slob and slub */
 121                        union {
 122                                struct list_head slab_list;
 123                                struct {        /* Partial pages */
 124                                        struct page *next;
 125#ifdef CONFIG_64BIT
 126                                        int pages;      /* Nr of pages left */
 127                                        int pobjects;   /* Approximate count */
 128#else
 129                                        short int pages;
 130                                        short int pobjects;
 131#endif
 132                                };
 133                        };
 134                        struct kmem_cache *slab_cache; /* not slob */
 135                        /* Double-word boundary */
 136                        void *freelist;         /* first free object */
 137                        union {
 138                                void *s_mem;    /* slab: first object */
 139                                unsigned long counters;         /* SLUB */
 140                                struct {                        /* SLUB */
 141                                        unsigned inuse:16;
 142                                        unsigned objects:15;
 143                                        unsigned frozen:1;
 144                                };
 145                        };
 146                };
 147                struct {        /* Tail pages of compound page */
 148                        unsigned long compound_head;    /* Bit zero is set */
 149
 150                        /* First tail page only */
 151                        unsigned char compound_dtor;
 152                        unsigned char compound_order;
 153                        atomic_t compound_mapcount;
 154                        unsigned int compound_nr; /* 1 << compound_order */
 155                };
 156                struct {        /* Second tail page of compound page */
 157                        unsigned long _compound_pad_1;  /* compound_head */
 158                        atomic_t hpage_pinned_refcount;
 159                        /* For both global and memcg */
 160                        struct list_head deferred_list;
 161                };
 162                struct {        /* Page table pages */
 163                        unsigned long _pt_pad_1;        /* compound_head */
 164                        pgtable_t pmd_huge_pte; /* protected by page->ptl */
 165                        unsigned long _pt_pad_2;        /* mapping */
 166                        union {
 167                                struct mm_struct *pt_mm; /* x86 pgds only */
 168                                atomic_t pt_frag_refcount; /* powerpc */
 169                        };
 170#if ALLOC_SPLIT_PTLOCKS
 171                        spinlock_t *ptl;
 172#else
 173                        spinlock_t ptl;
 174#endif
 175                };
 176                struct {        /* ZONE_DEVICE pages */
 177                        /** @pgmap: Points to the hosting device page map. */
 178                        struct dev_pagemap *pgmap;
 179                        void *zone_device_data;
 180                        /*
 181                         * ZONE_DEVICE private pages are counted as being
 182                         * mapped so the next 3 words hold the mapping, index,
 183                         * and private fields from the source anonymous or
 184                         * page cache page while the page is migrated to device
 185                         * private memory.
 186                         * ZONE_DEVICE MEMORY_DEVICE_FS_DAX pages also
 187                         * use the mapping, index, and private fields when
 188                         * pmem backed DAX files are mapped.
 189                         */
 190                };
 191
 192                /** @rcu_head: You can use this to free a page by RCU. */
 193                struct rcu_head rcu_head;
 194        };
 195
 196        union {         /* This union is 4 bytes in size. */
 197                /*
 198                 * If the page can be mapped to userspace, encodes the number
 199                 * of times this page is referenced by a page table.
 200                 */
 201                atomic_t _mapcount;
 202
 203                /*
 204                 * If the page is neither PageSlab nor mappable to userspace,
 205                 * the value stored here may help determine what this page
 206                 * is used for.  See page-flags.h for a list of page types
 207                 * which are currently stored here.
 208                 */
 209                unsigned int page_type;
 210
 211                unsigned int active;            /* SLAB */
 212                int units;                      /* SLOB */
 213        };
 214
 215        /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
 216        atomic_t _refcount;
 217
 218#ifdef CONFIG_MEMCG
 219        unsigned long memcg_data;
 220#endif
 221
 222        /*
 223         * On machines where all RAM is mapped into kernel address space,
 224         * we can simply calculate the virtual address. On machines with
 225         * highmem some memory is mapped into kernel virtual memory
 226         * dynamically, so we need a place to store that address.
 227         * Note that this field could be 16 bits on x86 ... ;)
 228         *
 229         * Architectures with slow multiplication can define
 230         * WANT_PAGE_VIRTUAL in asm/page.h
 231         */
 232#if defined(WANT_PAGE_VIRTUAL)
 233        void *virtual;                  /* Kernel virtual address (NULL if
 234                                           not kmapped, ie. highmem) */
 235#endif /* WANT_PAGE_VIRTUAL */
 236
 237#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
 238        int _last_cpupid;
 239#endif
 240} _struct_page_alignment;
 241
 242static inline atomic_t *compound_mapcount_ptr(struct page *page)
 243{
 244        return &page[1].compound_mapcount;
 245}
 246
 247static inline atomic_t *compound_pincount_ptr(struct page *page)
 248{
 249        return &page[2].hpage_pinned_refcount;
 250}
 251
 252/*
 253 * Used for sizing the vmemmap region on some architectures
 254 */
 255#define STRUCT_PAGE_MAX_SHIFT   (order_base_2(sizeof(struct page)))
 256
 257#define PAGE_FRAG_CACHE_MAX_SIZE        __ALIGN_MASK(32768, ~PAGE_MASK)
 258#define PAGE_FRAG_CACHE_MAX_ORDER       get_order(PAGE_FRAG_CACHE_MAX_SIZE)
 259
 260#define page_private(page)              ((page)->private)
 261
 262static inline void set_page_private(struct page *page, unsigned long private)
 263{
 264        page->private = private;
 265}
 266
 267struct page_frag_cache {
 268        void * va;
 269#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
 270        __u16 offset;
 271        __u16 size;
 272#else
 273        __u32 offset;
 274#endif
 275        /* we maintain a pagecount bias, so that we dont dirty cache line
 276         * containing page->_refcount every time we allocate a fragment.
 277         */
 278        unsigned int            pagecnt_bias;
 279        bool pfmemalloc;
 280};
 281
 282typedef unsigned long vm_flags_t;
 283
 284/*
 285 * A region containing a mapping of a non-memory backed file under NOMMU
 286 * conditions.  These are held in a global tree and are pinned by the VMAs that
 287 * map parts of them.
 288 */
 289struct vm_region {
 290        struct rb_node  vm_rb;          /* link in global region tree */
 291        vm_flags_t      vm_flags;       /* VMA vm_flags */
 292        unsigned long   vm_start;       /* start address of region */
 293        unsigned long   vm_end;         /* region initialised to here */
 294        unsigned long   vm_top;         /* region allocated to here */
 295        unsigned long   vm_pgoff;       /* the offset in vm_file corresponding to vm_start */
 296        struct file     *vm_file;       /* the backing file or NULL */
 297
 298        int             vm_usage;       /* region usage count (access under nommu_region_sem) */
 299        bool            vm_icache_flushed : 1; /* true if the icache has been flushed for
 300                                                * this region */
 301};
 302
 303#ifdef CONFIG_USERFAULTFD
 304#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
 305struct vm_userfaultfd_ctx {
 306        struct userfaultfd_ctx *ctx;
 307};
 308#else /* CONFIG_USERFAULTFD */
 309#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
 310struct vm_userfaultfd_ctx {};
 311#endif /* CONFIG_USERFAULTFD */
 312
 313/*
 314 * This struct describes a virtual memory area. There is one of these
 315 * per VM-area/task. A VM area is any part of the process virtual memory
 316 * space that has a special rule for the page-fault handlers (ie a shared
 317 * library, the executable area etc).
 318 */
 319struct vm_area_struct {
 320        /* The first cache line has the info for VMA tree walking. */
 321
 322        unsigned long vm_start;         /* Our start address within vm_mm. */
 323        unsigned long vm_end;           /* The first byte after our end address
 324                                           within vm_mm. */
 325
 326        /* linked list of VM areas per task, sorted by address */
 327        struct vm_area_struct *vm_next, *vm_prev;
 328
 329        struct rb_node vm_rb;
 330
 331        /*
 332         * Largest free memory gap in bytes to the left of this VMA.
 333         * Either between this VMA and vma->vm_prev, or between one of the
 334         * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
 335         * get_unmapped_area find a free area of the right size.
 336         */
 337        unsigned long rb_subtree_gap;
 338
 339        /* Second cache line starts here. */
 340
 341        struct mm_struct *vm_mm;        /* The address space we belong to. */
 342
 343        /*
 344         * Access permissions of this VMA.
 345         * See vmf_insert_mixed_prot() for discussion.
 346         */
 347        pgprot_t vm_page_prot;
 348        unsigned long vm_flags;         /* Flags, see mm.h. */
 349
 350        /*
 351         * For areas with an address space and backing store,
 352         * linkage into the address_space->i_mmap interval tree.
 353         */
 354        struct {
 355                struct rb_node rb;
 356                unsigned long rb_subtree_last;
 357        } shared;
 358
 359        /*
 360         * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
 361         * list, after a COW of one of the file pages.  A MAP_SHARED vma
 362         * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
 363         * or brk vma (with NULL file) can only be in an anon_vma list.
 364         */
 365        struct list_head anon_vma_chain; /* Serialized by mmap_lock &
 366                                          * page_table_lock */
 367        struct anon_vma *anon_vma;      /* Serialized by page_table_lock */
 368
 369        /* Function pointers to deal with this struct. */
 370        const struct vm_operations_struct *vm_ops;
 371
 372        /* Information about our backing store: */
 373        unsigned long vm_pgoff;         /* Offset (within vm_file) in PAGE_SIZE
 374                                           units */
 375        struct file * vm_file;          /* File we map to (can be NULL). */
 376        void * vm_private_data;         /* was vm_pte (shared mem) */
 377
 378#ifdef CONFIG_SWAP
 379        atomic_long_t swap_readahead_info;
 380#endif
 381#ifndef CONFIG_MMU
 382        struct vm_region *vm_region;    /* NOMMU mapping region */
 383#endif
 384#ifdef CONFIG_NUMA
 385        struct mempolicy *vm_policy;    /* NUMA policy for the VMA */
 386#endif
 387        struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
 388} __randomize_layout;
 389
 390struct core_thread {
 391        struct task_struct *task;
 392        struct core_thread *next;
 393};
 394
 395struct core_state {
 396        atomic_t nr_threads;
 397        struct core_thread dumper;
 398        struct completion startup;
 399};
 400
 401struct kioctx_table;
 402struct mm_struct {
 403        struct {
 404                struct vm_area_struct *mmap;            /* list of VMAs */
 405                struct rb_root mm_rb;
 406                u64 vmacache_seqnum;                   /* per-thread vmacache */
 407#ifdef CONFIG_MMU
 408                unsigned long (*get_unmapped_area) (struct file *filp,
 409                                unsigned long addr, unsigned long len,
 410                                unsigned long pgoff, unsigned long flags);
 411#endif
 412                unsigned long mmap_base;        /* base of mmap area */
 413                unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
 414#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
 415                /* Base addresses for compatible mmap() */
 416                unsigned long mmap_compat_base;
 417                unsigned long mmap_compat_legacy_base;
 418#endif
 419                unsigned long task_size;        /* size of task vm space */
 420                unsigned long highest_vm_end;   /* highest vma end address */
 421                pgd_t * pgd;
 422
 423#ifdef CONFIG_MEMBARRIER
 424                /**
 425                 * @membarrier_state: Flags controlling membarrier behavior.
 426                 *
 427                 * This field is close to @pgd to hopefully fit in the same
 428                 * cache-line, which needs to be touched by switch_mm().
 429                 */
 430                atomic_t membarrier_state;
 431#endif
 432
 433                /**
 434                 * @mm_users: The number of users including userspace.
 435                 *
 436                 * Use mmget()/mmget_not_zero()/mmput() to modify. When this
 437                 * drops to 0 (i.e. when the task exits and there are no other
 438                 * temporary reference holders), we also release a reference on
 439                 * @mm_count (which may then free the &struct mm_struct if
 440                 * @mm_count also drops to 0).
 441                 */
 442                atomic_t mm_users;
 443
 444                /**
 445                 * @mm_count: The number of references to &struct mm_struct
 446                 * (@mm_users count as 1).
 447                 *
 448                 * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
 449                 * &struct mm_struct is freed.
 450                 */
 451                atomic_t mm_count;
 452
 453#ifdef CONFIG_MMU
 454                atomic_long_t pgtables_bytes;   /* PTE page table pages */
 455#endif
 456                int map_count;                  /* number of VMAs */
 457
 458                spinlock_t page_table_lock; /* Protects page tables and some
 459                                             * counters
 460                                             */
 461                /*
 462                 * With some kernel config, the current mmap_lock's offset
 463                 * inside 'mm_struct' is at 0x120, which is very optimal, as
 464                 * its two hot fields 'count' and 'owner' sit in 2 different
 465                 * cachelines,  and when mmap_lock is highly contended, both
 466                 * of the 2 fields will be accessed frequently, current layout
 467                 * will help to reduce cache bouncing.
 468                 *
 469                 * So please be careful with adding new fields before
 470                 * mmap_lock, which can easily push the 2 fields into one
 471                 * cacheline.
 472                 */
 473                struct rw_semaphore mmap_lock;
 474
 475                struct list_head mmlist; /* List of maybe swapped mm's. These
 476                                          * are globally strung together off
 477                                          * init_mm.mmlist, and are protected
 478                                          * by mmlist_lock
 479                                          */
 480
 481
 482                unsigned long hiwater_rss; /* High-watermark of RSS usage */
 483                unsigned long hiwater_vm;  /* High-water virtual memory usage */
 484
 485                unsigned long total_vm;    /* Total pages mapped */
 486                unsigned long locked_vm;   /* Pages that have PG_mlocked set */
 487                atomic64_t    pinned_vm;   /* Refcount permanently increased */
 488                unsigned long data_vm;     /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
 489                unsigned long exec_vm;     /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
 490                unsigned long stack_vm;    /* VM_STACK */
 491                unsigned long def_flags;
 492
 493                /**
 494                 * @write_protect_seq: Locked when any thread is write
 495                 * protecting pages mapped by this mm to enforce a later COW,
 496                 * for instance during page table copying for fork().
 497                 */
 498                seqcount_t write_protect_seq;
 499
 500                spinlock_t arg_lock; /* protect the below fields */
 501
 502                unsigned long start_code, end_code, start_data, end_data;
 503                unsigned long start_brk, brk, start_stack;
 504                unsigned long arg_start, arg_end, env_start, env_end;
 505
 506                unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
 507
 508                /*
 509                 * Special counters, in some configurations protected by the
 510                 * page_table_lock, in other configurations by being atomic.
 511                 */
 512                struct mm_rss_stat rss_stat;
 513
 514                struct linux_binfmt *binfmt;
 515
 516                /* Architecture-specific MM context */
 517                mm_context_t context;
 518
 519                unsigned long flags; /* Must use atomic bitops to access */
 520
 521                struct core_state *core_state; /* coredumping support */
 522
 523#ifdef CONFIG_AIO
 524                spinlock_t                      ioctx_lock;
 525                struct kioctx_table __rcu       *ioctx_table;
 526#endif
 527#ifdef CONFIG_MEMCG
 528                /*
 529                 * "owner" points to a task that is regarded as the canonical
 530                 * user/owner of this mm. All of the following must be true in
 531                 * order for it to be changed:
 532                 *
 533                 * current == mm->owner
 534                 * current->mm != mm
 535                 * new_owner->mm == mm
 536                 * new_owner->alloc_lock is held
 537                 */
 538                struct task_struct __rcu *owner;
 539#endif
 540                struct user_namespace *user_ns;
 541
 542                /* store ref to file /proc/<pid>/exe symlink points to */
 543                struct file __rcu *exe_file;
 544#ifdef CONFIG_MMU_NOTIFIER
 545                struct mmu_notifier_subscriptions *notifier_subscriptions;
 546#endif
 547#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
 548                pgtable_t pmd_huge_pte; /* protected by page_table_lock */
 549#endif
 550#ifdef CONFIG_NUMA_BALANCING
 551                /*
 552                 * numa_next_scan is the next time that the PTEs will be marked
 553                 * pte_numa. NUMA hinting faults will gather statistics and
 554                 * migrate pages to new nodes if necessary.
 555                 */
 556                unsigned long numa_next_scan;
 557
 558                /* Restart point for scanning and setting pte_numa */
 559                unsigned long numa_scan_offset;
 560
 561                /* numa_scan_seq prevents two threads setting pte_numa */
 562                int numa_scan_seq;
 563#endif
 564                /*
 565                 * An operation with batched TLB flushing is going on. Anything
 566                 * that can move process memory needs to flush the TLB when
 567                 * moving a PROT_NONE or PROT_NUMA mapped page.
 568                 */
 569                atomic_t tlb_flush_pending;
 570#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
 571                /* See flush_tlb_batched_pending() */
 572                bool tlb_flush_batched;
 573#endif
 574                struct uprobes_state uprobes_state;
 575#ifdef CONFIG_HUGETLB_PAGE
 576                atomic_long_t hugetlb_usage;
 577#endif
 578                struct work_struct async_put_work;
 579
 580#ifdef CONFIG_IOMMU_SUPPORT
 581                u32 pasid;
 582#endif
 583        } __randomize_layout;
 584
 585        /*
 586         * The mm_cpumask needs to be at the end of mm_struct, because it
 587         * is dynamically sized based on nr_cpu_ids.
 588         */
 589        unsigned long cpu_bitmap[];
 590};
 591
 592extern struct mm_struct init_mm;
 593
 594/* Pointer magic because the dynamic array size confuses some compilers. */
 595static inline void mm_init_cpumask(struct mm_struct *mm)
 596{
 597        unsigned long cpu_bitmap = (unsigned long)mm;
 598
 599        cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
 600        cpumask_clear((struct cpumask *)cpu_bitmap);
 601}
 602
 603/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
 604static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
 605{
 606        return (struct cpumask *)&mm->cpu_bitmap;
 607}
 608
 609struct mmu_gather;
 610extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
 611extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
 612extern void tlb_finish_mmu(struct mmu_gather *tlb);
 613
 614static inline void init_tlb_flush_pending(struct mm_struct *mm)
 615{
 616        atomic_set(&mm->tlb_flush_pending, 0);
 617}
 618
 619static inline void inc_tlb_flush_pending(struct mm_struct *mm)
 620{
 621        atomic_inc(&mm->tlb_flush_pending);
 622        /*
 623         * The only time this value is relevant is when there are indeed pages
 624         * to flush. And we'll only flush pages after changing them, which
 625         * requires the PTL.
 626         *
 627         * So the ordering here is:
 628         *
 629         *      atomic_inc(&mm->tlb_flush_pending);
 630         *      spin_lock(&ptl);
 631         *      ...
 632         *      set_pte_at();
 633         *      spin_unlock(&ptl);
 634         *
 635         *                              spin_lock(&ptl)
 636         *                              mm_tlb_flush_pending();
 637         *                              ....
 638         *                              spin_unlock(&ptl);
 639         *
 640         *      flush_tlb_range();
 641         *      atomic_dec(&mm->tlb_flush_pending);
 642         *
 643         * Where the increment if constrained by the PTL unlock, it thus
 644         * ensures that the increment is visible if the PTE modification is
 645         * visible. After all, if there is no PTE modification, nobody cares
 646         * about TLB flushes either.
 647         *
 648         * This very much relies on users (mm_tlb_flush_pending() and
 649         * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
 650         * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
 651         * locks (PPC) the unlock of one doesn't order against the lock of
 652         * another PTL.
 653         *
 654         * The decrement is ordered by the flush_tlb_range(), such that
 655         * mm_tlb_flush_pending() will not return false unless all flushes have
 656         * completed.
 657         */
 658}
 659
 660static inline void dec_tlb_flush_pending(struct mm_struct *mm)
 661{
 662        /*
 663         * See inc_tlb_flush_pending().
 664         *
 665         * This cannot be smp_mb__before_atomic() because smp_mb() simply does
 666         * not order against TLB invalidate completion, which is what we need.
 667         *
 668         * Therefore we must rely on tlb_flush_*() to guarantee order.
 669         */
 670        atomic_dec(&mm->tlb_flush_pending);
 671}
 672
 673static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
 674{
 675        /*
 676         * Must be called after having acquired the PTL; orders against that
 677         * PTLs release and therefore ensures that if we observe the modified
 678         * PTE we must also observe the increment from inc_tlb_flush_pending().
 679         *
 680         * That is, it only guarantees to return true if there is a flush
 681         * pending for _this_ PTL.
 682         */
 683        return atomic_read(&mm->tlb_flush_pending);
 684}
 685
 686static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
 687{
 688        /*
 689         * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
 690         * for which there is a TLB flush pending in order to guarantee
 691         * we've seen both that PTE modification and the increment.
 692         *
 693         * (no requirement on actually still holding the PTL, that is irrelevant)
 694         */
 695        return atomic_read(&mm->tlb_flush_pending) > 1;
 696}
 697
 698struct vm_fault;
 699
 700/**
 701 * typedef vm_fault_t - Return type for page fault handlers.
 702 *
 703 * Page fault handlers return a bitmask of %VM_FAULT values.
 704 */
 705typedef __bitwise unsigned int vm_fault_t;
 706
 707/**
 708 * enum vm_fault_reason - Page fault handlers return a bitmask of
 709 * these values to tell the core VM what happened when handling the
 710 * fault. Used to decide whether a process gets delivered SIGBUS or
 711 * just gets major/minor fault counters bumped up.
 712 *
 713 * @VM_FAULT_OOM:               Out Of Memory
 714 * @VM_FAULT_SIGBUS:            Bad access
 715 * @VM_FAULT_MAJOR:             Page read from storage
 716 * @VM_FAULT_WRITE:             Special case for get_user_pages
 717 * @VM_FAULT_HWPOISON:          Hit poisoned small page
 718 * @VM_FAULT_HWPOISON_LARGE:    Hit poisoned large page. Index encoded
 719 *                              in upper bits
 720 * @VM_FAULT_SIGSEGV:           segmentation fault
 721 * @VM_FAULT_NOPAGE:            ->fault installed the pte, not return page
 722 * @VM_FAULT_LOCKED:            ->fault locked the returned page
 723 * @VM_FAULT_RETRY:             ->fault blocked, must retry
 724 * @VM_FAULT_FALLBACK:          huge page fault failed, fall back to small
 725 * @VM_FAULT_DONE_COW:          ->fault has fully handled COW
 726 * @VM_FAULT_NEEDDSYNC:         ->fault did not modify page tables and needs
 727 *                              fsync() to complete (for synchronous page faults
 728 *                              in DAX)
 729 * @VM_FAULT_HINDEX_MASK:       mask HINDEX value
 730 *
 731 */
 732enum vm_fault_reason {
 733        VM_FAULT_OOM            = (__force vm_fault_t)0x000001,
 734        VM_FAULT_SIGBUS         = (__force vm_fault_t)0x000002,
 735        VM_FAULT_MAJOR          = (__force vm_fault_t)0x000004,
 736        VM_FAULT_WRITE          = (__force vm_fault_t)0x000008,
 737        VM_FAULT_HWPOISON       = (__force vm_fault_t)0x000010,
 738        VM_FAULT_HWPOISON_LARGE = (__force vm_fault_t)0x000020,
 739        VM_FAULT_SIGSEGV        = (__force vm_fault_t)0x000040,
 740        VM_FAULT_NOPAGE         = (__force vm_fault_t)0x000100,
 741        VM_FAULT_LOCKED         = (__force vm_fault_t)0x000200,
 742        VM_FAULT_RETRY          = (__force vm_fault_t)0x000400,
 743        VM_FAULT_FALLBACK       = (__force vm_fault_t)0x000800,
 744        VM_FAULT_DONE_COW       = (__force vm_fault_t)0x001000,
 745        VM_FAULT_NEEDDSYNC      = (__force vm_fault_t)0x002000,
 746        VM_FAULT_HINDEX_MASK    = (__force vm_fault_t)0x0f0000,
 747};
 748
 749/* Encode hstate index for a hwpoisoned large page */
 750#define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16))
 751#define VM_FAULT_GET_HINDEX(x) (((__force unsigned int)(x) >> 16) & 0xf)
 752
 753#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS |        \
 754                        VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON |  \
 755                        VM_FAULT_HWPOISON_LARGE | VM_FAULT_FALLBACK)
 756
 757#define VM_FAULT_RESULT_TRACE \
 758        { VM_FAULT_OOM,                 "OOM" },        \
 759        { VM_FAULT_SIGBUS,              "SIGBUS" },     \
 760        { VM_FAULT_MAJOR,               "MAJOR" },      \
 761        { VM_FAULT_WRITE,               "WRITE" },      \
 762        { VM_FAULT_HWPOISON,            "HWPOISON" },   \
 763        { VM_FAULT_HWPOISON_LARGE,      "HWPOISON_LARGE" },     \
 764        { VM_FAULT_SIGSEGV,             "SIGSEGV" },    \
 765        { VM_FAULT_NOPAGE,              "NOPAGE" },     \
 766        { VM_FAULT_LOCKED,              "LOCKED" },     \
 767        { VM_FAULT_RETRY,               "RETRY" },      \
 768        { VM_FAULT_FALLBACK,            "FALLBACK" },   \
 769        { VM_FAULT_DONE_COW,            "DONE_COW" },   \
 770        { VM_FAULT_NEEDDSYNC,           "NEEDDSYNC" }
 771
 772struct vm_special_mapping {
 773        const char *name;       /* The name, e.g. "[vdso]". */
 774
 775        /*
 776         * If .fault is not provided, this points to a
 777         * NULL-terminated array of pages that back the special mapping.
 778         *
 779         * This must not be NULL unless .fault is provided.
 780         */
 781        struct page **pages;
 782
 783        /*
 784         * If non-NULL, then this is called to resolve page faults
 785         * on the special mapping.  If used, .pages is not checked.
 786         */
 787        vm_fault_t (*fault)(const struct vm_special_mapping *sm,
 788                                struct vm_area_struct *vma,
 789                                struct vm_fault *vmf);
 790
 791        int (*mremap)(const struct vm_special_mapping *sm,
 792                     struct vm_area_struct *new_vma);
 793};
 794
 795enum tlb_flush_reason {
 796        TLB_FLUSH_ON_TASK_SWITCH,
 797        TLB_REMOTE_SHOOTDOWN,
 798        TLB_LOCAL_SHOOTDOWN,
 799        TLB_LOCAL_MM_SHOOTDOWN,
 800        TLB_REMOTE_SEND_IPI,
 801        NR_TLB_FLUSH_REASONS,
 802};
 803
 804 /*
 805  * A swap entry has to fit into a "unsigned long", as the entry is hidden
 806  * in the "index" field of the swapper address space.
 807  */
 808typedef struct {
 809        unsigned long val;
 810} swp_entry_t;
 811
 812#endif /* _LINUX_MM_TYPES_H */
 813