linux/include/linux/mm_types.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_MM_TYPES_H
   3#define _LINUX_MM_TYPES_H
   4
   5#include <linux/mm_types_task.h>
   6
   7#include <linux/auxvec.h>
   8#include <linux/list.h>
   9#include <linux/spinlock.h>
  10#include <linux/rbtree.h>
  11#include <linux/rwsem.h>
  12#include <linux/completion.h>
  13#include <linux/cpumask.h>
  14#include <linux/uprobes.h>
  15#include <linux/page-flags-layout.h>
  16#include <linux/workqueue.h>
  17
  18#include <asm/mmu.h>
  19
  20#ifndef AT_VECTOR_SIZE_ARCH
  21#define AT_VECTOR_SIZE_ARCH 0
  22#endif
  23#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
  24
  25typedef int vm_fault_t;
  26
  27struct address_space;
  28struct mem_cgroup;
  29struct hmm;
  30
  31/*
  32 * Each physical page in the system has a struct page associated with
  33 * it to keep track of whatever it is we are using the page for at the
  34 * moment. Note that we have no way to track which tasks are using
  35 * a page, though if it is a pagecache page, rmap structures can tell us
  36 * who is mapping it.
  37 *
  38 * If you allocate the page using alloc_pages(), you can use some of the
  39 * space in struct page for your own purposes.  The five words in the main
  40 * union are available, except for bit 0 of the first word which must be
  41 * kept clear.  Many users use this word to store a pointer to an object
  42 * which is guaranteed to be aligned.  If you use the same storage as
  43 * page->mapping, you must restore it to NULL before freeing the page.
  44 *
  45 * If your page will not be mapped to userspace, you can also use the four
  46 * bytes in the mapcount union, but you must call page_mapcount_reset()
  47 * before freeing it.
  48 *
  49 * If you want to use the refcount field, it must be used in such a way
  50 * that other CPUs temporarily incrementing and then decrementing the
  51 * refcount does not cause problems.  On receiving the page from
  52 * alloc_pages(), the refcount will be positive.
  53 *
  54 * If you allocate pages of order > 0, you can use some of the fields
  55 * in each subpage, but you may need to restore some of their values
  56 * afterwards.
  57 *
  58 * SLUB uses cmpxchg_double() to atomically update its freelist and
  59 * counters.  That requires that freelist & counters be adjacent and
  60 * double-word aligned.  We align all struct pages to double-word
  61 * boundaries, and ensure that 'freelist' is aligned within the
  62 * struct.
  63 */
  64#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
  65#define _struct_page_alignment  __aligned(2 * sizeof(unsigned long))
  66#else
  67#define _struct_page_alignment
  68#endif
  69
  70struct page {
  71        unsigned long flags;            /* Atomic flags, some possibly
  72                                         * updated asynchronously */
  73        /*
  74         * Five words (20/40 bytes) are available in this union.
  75         * WARNING: bit 0 of the first word is used for PageTail(). That
  76         * means the other users of this union MUST NOT use the bit to
  77         * avoid collision and false-positive PageTail().
  78         */
  79        union {
  80                struct {        /* Page cache and anonymous pages */
  81                        /**
  82                         * @lru: Pageout list, eg. active_list protected by
  83                         * zone_lru_lock.  Sometimes used as a generic list
  84                         * by the page owner.
  85                         */
  86                        struct list_head lru;
  87                        /* See page-flags.h for PAGE_MAPPING_FLAGS */
  88                        struct address_space *mapping;
  89                        pgoff_t index;          /* Our offset within mapping. */
  90                        /**
  91                         * @private: Mapping-private opaque data.
  92                         * Usually used for buffer_heads if PagePrivate.
  93                         * Used for swp_entry_t if PageSwapCache.
  94                         * Indicates order in the buddy system if PageBuddy.
  95                         */
  96                        unsigned long private;
  97                };
  98                struct {        /* slab, slob and slub */
  99                        union {
 100                                struct list_head slab_list;     /* uses lru */
 101                                struct {        /* Partial pages */
 102                                        struct page *next;
 103#ifdef CONFIG_64BIT
 104                                        int pages;      /* Nr of pages left */
 105                                        int pobjects;   /* Approximate count */
 106#else
 107                                        short int pages;
 108                                        short int pobjects;
 109#endif
 110                                };
 111                        };
 112                        struct kmem_cache *slab_cache; /* not slob */
 113                        /* Double-word boundary */
 114                        void *freelist;         /* first free object */
 115                        union {
 116                                void *s_mem;    /* slab: first object */
 117                                unsigned long counters;         /* SLUB */
 118                                struct {                        /* SLUB */
 119                                        unsigned inuse:16;
 120                                        unsigned objects:15;
 121                                        unsigned frozen:1;
 122                                };
 123                        };
 124                };
 125                struct {        /* Tail pages of compound page */
 126                        unsigned long compound_head;    /* Bit zero is set */
 127
 128                        /* First tail page only */
 129                        unsigned char compound_dtor;
 130                        unsigned char compound_order;
 131                        atomic_t compound_mapcount;
 132                };
 133                struct {        /* Second tail page of compound page */
 134                        unsigned long _compound_pad_1;  /* compound_head */
 135                        unsigned long _compound_pad_2;
 136                        struct list_head deferred_list;
 137                };
 138                struct {        /* Page table pages */
 139                        unsigned long _pt_pad_1;        /* compound_head */
 140                        pgtable_t pmd_huge_pte; /* protected by page->ptl */
 141                        unsigned long _pt_pad_2;        /* mapping */
 142                        struct mm_struct *pt_mm;        /* x86 pgds only */
 143#if ALLOC_SPLIT_PTLOCKS
 144                        spinlock_t *ptl;
 145#else
 146                        spinlock_t ptl;
 147#endif
 148                };
 149                struct {        /* ZONE_DEVICE pages */
 150                        /** @pgmap: Points to the hosting device page map. */
 151                        struct dev_pagemap *pgmap;
 152                        unsigned long hmm_data;
 153                        unsigned long _zd_pad_1;        /* uses mapping */
 154                };
 155
 156                /** @rcu_head: You can use this to free a page by RCU. */
 157                struct rcu_head rcu_head;
 158        };
 159
 160        union {         /* This union is 4 bytes in size. */
 161                /*
 162                 * If the page can be mapped to userspace, encodes the number
 163                 * of times this page is referenced by a page table.
 164                 */
 165                atomic_t _mapcount;
 166
 167                /*
 168                 * If the page is neither PageSlab nor mappable to userspace,
 169                 * the value stored here may help determine what this page
 170                 * is used for.  See page-flags.h for a list of page types
 171                 * which are currently stored here.
 172                 */
 173                unsigned int page_type;
 174
 175                unsigned int active;            /* SLAB */
 176                int units;                      /* SLOB */
 177        };
 178
 179        /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
 180        atomic_t _refcount;
 181
 182#ifdef CONFIG_MEMCG
 183        struct mem_cgroup *mem_cgroup;
 184#endif
 185
 186        /*
 187         * On machines where all RAM is mapped into kernel address space,
 188         * we can simply calculate the virtual address. On machines with
 189         * highmem some memory is mapped into kernel virtual memory
 190         * dynamically, so we need a place to store that address.
 191         * Note that this field could be 16 bits on x86 ... ;)
 192         *
 193         * Architectures with slow multiplication can define
 194         * WANT_PAGE_VIRTUAL in asm/page.h
 195         */
 196#if defined(WANT_PAGE_VIRTUAL)
 197        void *virtual;                  /* Kernel virtual address (NULL if
 198                                           not kmapped, ie. highmem) */
 199#endif /* WANT_PAGE_VIRTUAL */
 200
 201#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
 202        int _last_cpupid;
 203#endif
 204} _struct_page_alignment;
 205
 206#define PAGE_FRAG_CACHE_MAX_SIZE        __ALIGN_MASK(32768, ~PAGE_MASK)
 207#define PAGE_FRAG_CACHE_MAX_ORDER       get_order(PAGE_FRAG_CACHE_MAX_SIZE)
 208
 209struct page_frag_cache {
 210        void * va;
 211#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
 212        __u16 offset;
 213        __u16 size;
 214#else
 215        __u32 offset;
 216#endif
 217        /* we maintain a pagecount bias, so that we dont dirty cache line
 218         * containing page->_refcount every time we allocate a fragment.
 219         */
 220        unsigned int            pagecnt_bias;
 221        bool pfmemalloc;
 222};
 223
 224typedef unsigned long vm_flags_t;
 225
 226/*
 227 * A region containing a mapping of a non-memory backed file under NOMMU
 228 * conditions.  These are held in a global tree and are pinned by the VMAs that
 229 * map parts of them.
 230 */
 231struct vm_region {
 232        struct rb_node  vm_rb;          /* link in global region tree */
 233        vm_flags_t      vm_flags;       /* VMA vm_flags */
 234        unsigned long   vm_start;       /* start address of region */
 235        unsigned long   vm_end;         /* region initialised to here */
 236        unsigned long   vm_top;         /* region allocated to here */
 237        unsigned long   vm_pgoff;       /* the offset in vm_file corresponding to vm_start */
 238        struct file     *vm_file;       /* the backing file or NULL */
 239
 240        int             vm_usage;       /* region usage count (access under nommu_region_sem) */
 241        bool            vm_icache_flushed : 1; /* true if the icache has been flushed for
 242                                                * this region */
 243};
 244
 245#ifdef CONFIG_USERFAULTFD
 246#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
 247struct vm_userfaultfd_ctx {
 248        struct userfaultfd_ctx *ctx;
 249};
 250#else /* CONFIG_USERFAULTFD */
 251#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
 252struct vm_userfaultfd_ctx {};
 253#endif /* CONFIG_USERFAULTFD */
 254
 255/*
 256 * This struct defines a memory VMM memory area. There is one of these
 257 * per VM-area/task.  A VM area is any part of the process virtual memory
 258 * space that has a special rule for the page-fault handlers (ie a shared
 259 * library, the executable area etc).
 260 */
 261struct vm_area_struct {
 262        /* The first cache line has the info for VMA tree walking. */
 263
 264        unsigned long vm_start;         /* Our start address within vm_mm. */
 265        unsigned long vm_end;           /* The first byte after our end address
 266                                           within vm_mm. */
 267
 268        /* linked list of VM areas per task, sorted by address */
 269        struct vm_area_struct *vm_next, *vm_prev;
 270
 271        struct rb_node vm_rb;
 272
 273        /*
 274         * Largest free memory gap in bytes to the left of this VMA.
 275         * Either between this VMA and vma->vm_prev, or between one of the
 276         * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
 277         * get_unmapped_area find a free area of the right size.
 278         */
 279        unsigned long rb_subtree_gap;
 280
 281        /* Second cache line starts here. */
 282
 283        struct mm_struct *vm_mm;        /* The address space we belong to. */
 284        pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
 285        unsigned long vm_flags;         /* Flags, see mm.h. */
 286
 287        /*
 288         * For areas with an address space and backing store,
 289         * linkage into the address_space->i_mmap interval tree.
 290         */
 291        struct {
 292                struct rb_node rb;
 293                unsigned long rb_subtree_last;
 294        } shared;
 295
 296        /*
 297         * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
 298         * list, after a COW of one of the file pages.  A MAP_SHARED vma
 299         * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
 300         * or brk vma (with NULL file) can only be in an anon_vma list.
 301         */
 302        struct list_head anon_vma_chain; /* Serialized by mmap_sem &
 303                                          * page_table_lock */
 304        struct anon_vma *anon_vma;      /* Serialized by page_table_lock */
 305
 306        /* Function pointers to deal with this struct. */
 307        const struct vm_operations_struct *vm_ops;
 308
 309        /* Information about our backing store: */
 310        unsigned long vm_pgoff;         /* Offset (within vm_file) in PAGE_SIZE
 311                                           units */
 312        struct file * vm_file;          /* File we map to (can be NULL). */
 313        void * vm_private_data;         /* was vm_pte (shared mem) */
 314
 315        atomic_long_t swap_readahead_info;
 316#ifndef CONFIG_MMU
 317        struct vm_region *vm_region;    /* NOMMU mapping region */
 318#endif
 319#ifdef CONFIG_NUMA
 320        struct mempolicy *vm_policy;    /* NUMA policy for the VMA */
 321#endif
 322        struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
 323} __randomize_layout;
 324
 325struct core_thread {
 326        struct task_struct *task;
 327        struct core_thread *next;
 328};
 329
 330struct core_state {
 331        atomic_t nr_threads;
 332        struct core_thread dumper;
 333        struct completion startup;
 334};
 335
 336struct kioctx_table;
 337struct mm_struct {
 338        struct vm_area_struct *mmap;            /* list of VMAs */
 339        struct rb_root mm_rb;
 340        u32 vmacache_seqnum;                   /* per-thread vmacache */
 341#ifdef CONFIG_MMU
 342        unsigned long (*get_unmapped_area) (struct file *filp,
 343                                unsigned long addr, unsigned long len,
 344                                unsigned long pgoff, unsigned long flags);
 345#endif
 346        unsigned long mmap_base;                /* base of mmap area */
 347        unsigned long mmap_legacy_base;         /* base of mmap area in bottom-up allocations */
 348#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
 349        /* Base adresses for compatible mmap() */
 350        unsigned long mmap_compat_base;
 351        unsigned long mmap_compat_legacy_base;
 352#endif
 353        unsigned long task_size;                /* size of task vm space */
 354        unsigned long highest_vm_end;           /* highest vma end address */
 355        pgd_t * pgd;
 356
 357        /**
 358         * @mm_users: The number of users including userspace.
 359         *
 360         * Use mmget()/mmget_not_zero()/mmput() to modify. When this drops
 361         * to 0 (i.e. when the task exits and there are no other temporary
 362         * reference holders), we also release a reference on @mm_count
 363         * (which may then free the &struct mm_struct if @mm_count also
 364         * drops to 0).
 365         */
 366        atomic_t mm_users;
 367
 368        /**
 369         * @mm_count: The number of references to &struct mm_struct
 370         * (@mm_users count as 1).
 371         *
 372         * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
 373         * &struct mm_struct is freed.
 374         */
 375        atomic_t mm_count;
 376
 377#ifdef CONFIG_MMU
 378        atomic_long_t pgtables_bytes;           /* PTE page table pages */
 379#endif
 380        int map_count;                          /* number of VMAs */
 381
 382        spinlock_t page_table_lock;             /* Protects page tables and some counters */
 383        struct rw_semaphore mmap_sem;
 384
 385        struct list_head mmlist;                /* List of maybe swapped mm's.  These are globally strung
 386                                                 * together off init_mm.mmlist, and are protected
 387                                                 * by mmlist_lock
 388                                                 */
 389
 390
 391        unsigned long hiwater_rss;      /* High-watermark of RSS usage */
 392        unsigned long hiwater_vm;       /* High-water virtual memory usage */
 393
 394        unsigned long total_vm;         /* Total pages mapped */
 395        unsigned long locked_vm;        /* Pages that have PG_mlocked set */
 396        unsigned long pinned_vm;        /* Refcount permanently increased */
 397        unsigned long data_vm;          /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
 398        unsigned long exec_vm;          /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
 399        unsigned long stack_vm;         /* VM_STACK */
 400        unsigned long def_flags;
 401
 402        spinlock_t arg_lock; /* protect the below fields */
 403        unsigned long start_code, end_code, start_data, end_data;
 404        unsigned long start_brk, brk, start_stack;
 405        unsigned long arg_start, arg_end, env_start, env_end;
 406
 407        unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
 408
 409        /*
 410         * Special counters, in some configurations protected by the
 411         * page_table_lock, in other configurations by being atomic.
 412         */
 413        struct mm_rss_stat rss_stat;
 414
 415        struct linux_binfmt *binfmt;
 416
 417        cpumask_var_t cpu_vm_mask_var;
 418
 419        /* Architecture-specific MM context */
 420        mm_context_t context;
 421
 422        unsigned long flags; /* Must use atomic bitops to access the bits */
 423
 424        struct core_state *core_state; /* coredumping support */
 425#ifdef CONFIG_MEMBARRIER
 426        atomic_t membarrier_state;
 427#endif
 428#ifdef CONFIG_AIO
 429        spinlock_t                      ioctx_lock;
 430        struct kioctx_table __rcu       *ioctx_table;
 431#endif
 432#ifdef CONFIG_MEMCG
 433        /*
 434         * "owner" points to a task that is regarded as the canonical
 435         * user/owner of this mm. All of the following must be true in
 436         * order for it to be changed:
 437         *
 438         * current == mm->owner
 439         * current->mm != mm
 440         * new_owner->mm == mm
 441         * new_owner->alloc_lock is held
 442         */
 443        struct task_struct __rcu *owner;
 444#endif
 445        struct user_namespace *user_ns;
 446
 447        /* store ref to file /proc/<pid>/exe symlink points to */
 448        struct file __rcu *exe_file;
 449#ifdef CONFIG_MMU_NOTIFIER
 450        struct mmu_notifier_mm *mmu_notifier_mm;
 451#endif
 452#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
 453        pgtable_t pmd_huge_pte; /* protected by page_table_lock */
 454#endif
 455#ifdef CONFIG_CPUMASK_OFFSTACK
 456        struct cpumask cpumask_allocation;
 457#endif
 458#ifdef CONFIG_NUMA_BALANCING
 459        /*
 460         * numa_next_scan is the next time that the PTEs will be marked
 461         * pte_numa. NUMA hinting faults will gather statistics and migrate
 462         * pages to new nodes if necessary.
 463         */
 464        unsigned long numa_next_scan;
 465
 466        /* Restart point for scanning and setting pte_numa */
 467        unsigned long numa_scan_offset;
 468
 469        /* numa_scan_seq prevents two threads setting pte_numa */
 470        int numa_scan_seq;
 471#endif
 472        /*
 473         * An operation with batched TLB flushing is going on. Anything that
 474         * can move process memory needs to flush the TLB when moving a
 475         * PROT_NONE or PROT_NUMA mapped page.
 476         */
 477        atomic_t tlb_flush_pending;
 478#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
 479        /* See flush_tlb_batched_pending() */
 480        bool tlb_flush_batched;
 481#endif
 482        struct uprobes_state uprobes_state;
 483#ifdef CONFIG_HUGETLB_PAGE
 484        atomic_long_t hugetlb_usage;
 485#endif
 486        struct work_struct async_put_work;
 487
 488#if IS_ENABLED(CONFIG_HMM)
 489        /* HMM needs to track a few things per mm */
 490        struct hmm *hmm;
 491#endif
 492} __randomize_layout;
 493
 494extern struct mm_struct init_mm;
 495
 496static inline void mm_init_cpumask(struct mm_struct *mm)
 497{
 498#ifdef CONFIG_CPUMASK_OFFSTACK
 499        mm->cpu_vm_mask_var = &mm->cpumask_allocation;
 500#endif
 501        cpumask_clear(mm->cpu_vm_mask_var);
 502}
 503
 504/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
 505static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
 506{
 507        return mm->cpu_vm_mask_var;
 508}
 509
 510struct mmu_gather;
 511extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
 512                                unsigned long start, unsigned long end);
 513extern void tlb_finish_mmu(struct mmu_gather *tlb,
 514                                unsigned long start, unsigned long end);
 515
 516static inline void init_tlb_flush_pending(struct mm_struct *mm)
 517{
 518        atomic_set(&mm->tlb_flush_pending, 0);
 519}
 520
 521static inline void inc_tlb_flush_pending(struct mm_struct *mm)
 522{
 523        atomic_inc(&mm->tlb_flush_pending);
 524        /*
 525         * The only time this value is relevant is when there are indeed pages
 526         * to flush. And we'll only flush pages after changing them, which
 527         * requires the PTL.
 528         *
 529         * So the ordering here is:
 530         *
 531         *      atomic_inc(&mm->tlb_flush_pending);
 532         *      spin_lock(&ptl);
 533         *      ...
 534         *      set_pte_at();
 535         *      spin_unlock(&ptl);
 536         *
 537         *                              spin_lock(&ptl)
 538         *                              mm_tlb_flush_pending();
 539         *                              ....
 540         *                              spin_unlock(&ptl);
 541         *
 542         *      flush_tlb_range();
 543         *      atomic_dec(&mm->tlb_flush_pending);
 544         *
 545         * Where the increment if constrained by the PTL unlock, it thus
 546         * ensures that the increment is visible if the PTE modification is
 547         * visible. After all, if there is no PTE modification, nobody cares
 548         * about TLB flushes either.
 549         *
 550         * This very much relies on users (mm_tlb_flush_pending() and
 551         * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
 552         * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
 553         * locks (PPC) the unlock of one doesn't order against the lock of
 554         * another PTL.
 555         *
 556         * The decrement is ordered by the flush_tlb_range(), such that
 557         * mm_tlb_flush_pending() will not return false unless all flushes have
 558         * completed.
 559         */
 560}
 561
 562static inline void dec_tlb_flush_pending(struct mm_struct *mm)
 563{
 564        /*
 565         * See inc_tlb_flush_pending().
 566         *
 567         * This cannot be smp_mb__before_atomic() because smp_mb() simply does
 568         * not order against TLB invalidate completion, which is what we need.
 569         *
 570         * Therefore we must rely on tlb_flush_*() to guarantee order.
 571         */
 572        atomic_dec(&mm->tlb_flush_pending);
 573}
 574
 575static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
 576{
 577        /*
 578         * Must be called after having acquired the PTL; orders against that
 579         * PTLs release and therefore ensures that if we observe the modified
 580         * PTE we must also observe the increment from inc_tlb_flush_pending().
 581         *
 582         * That is, it only guarantees to return true if there is a flush
 583         * pending for _this_ PTL.
 584         */
 585        return atomic_read(&mm->tlb_flush_pending);
 586}
 587
 588static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
 589{
 590        /*
 591         * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
 592         * for which there is a TLB flush pending in order to guarantee
 593         * we've seen both that PTE modification and the increment.
 594         *
 595         * (no requirement on actually still holding the PTL, that is irrelevant)
 596         */
 597        return atomic_read(&mm->tlb_flush_pending) > 1;
 598}
 599
 600struct vm_fault;
 601
 602struct vm_special_mapping {
 603        const char *name;       /* The name, e.g. "[vdso]". */
 604
 605        /*
 606         * If .fault is not provided, this points to a
 607         * NULL-terminated array of pages that back the special mapping.
 608         *
 609         * This must not be NULL unless .fault is provided.
 610         */
 611        struct page **pages;
 612
 613        /*
 614         * If non-NULL, then this is called to resolve page faults
 615         * on the special mapping.  If used, .pages is not checked.
 616         */
 617        vm_fault_t (*fault)(const struct vm_special_mapping *sm,
 618                                struct vm_area_struct *vma,
 619                                struct vm_fault *vmf);
 620
 621        int (*mremap)(const struct vm_special_mapping *sm,
 622                     struct vm_area_struct *new_vma);
 623};
 624
 625enum tlb_flush_reason {
 626        TLB_FLUSH_ON_TASK_SWITCH,
 627        TLB_REMOTE_SHOOTDOWN,
 628        TLB_LOCAL_SHOOTDOWN,
 629        TLB_LOCAL_MM_SHOOTDOWN,
 630        TLB_REMOTE_SEND_IPI,
 631        NR_TLB_FLUSH_REASONS,
 632};
 633
 634 /*
 635  * A swap entry has to fit into a "unsigned long", as the entry is hidden
 636  * in the "index" field of the swapper address space.
 637  */
 638typedef struct {
 639        unsigned long val;
 640} swp_entry_t;
 641
 642#endif /* _LINUX_MM_TYPES_H */
 643