linux/include/linux/mm_types.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_MM_TYPES_H
   3#define _LINUX_MM_TYPES_H
   4
   5#include <linux/mm_types_task.h>
   6
   7#include <linux/auxvec.h>
   8#include <linux/list.h>
   9#include <linux/spinlock.h>
  10#include <linux/rbtree.h>
  11#include <linux/rwsem.h>
  12#include <linux/completion.h>
  13#include <linux/cpumask.h>
  14#include <linux/uprobes.h>
  15#include <linux/page-flags-layout.h>
  16#include <linux/workqueue.h>
  17
  18#include <asm/mmu.h>
  19
  20#ifndef AT_VECTOR_SIZE_ARCH
  21#define AT_VECTOR_SIZE_ARCH 0
  22#endif
  23#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
  24
  25typedef int vm_fault_t;
  26
  27struct address_space;
  28struct mem_cgroup;
  29struct hmm;
  30
  31/*
  32 * Each physical page in the system has a struct page associated with
  33 * it to keep track of whatever it is we are using the page for at the
  34 * moment. Note that we have no way to track which tasks are using
  35 * a page, though if it is a pagecache page, rmap structures can tell us
  36 * who is mapping it.
  37 *
  38 * If you allocate the page using alloc_pages(), you can use some of the
  39 * space in struct page for your own purposes.  The five words in the main
  40 * union are available, except for bit 0 of the first word which must be
  41 * kept clear.  Many users use this word to store a pointer to an object
  42 * which is guaranteed to be aligned.  If you use the same storage as
  43 * page->mapping, you must restore it to NULL before freeing the page.
  44 *
  45 * If your page will not be mapped to userspace, you can also use the four
  46 * bytes in the mapcount union, but you must call page_mapcount_reset()
  47 * before freeing it.
  48 *
  49 * If you want to use the refcount field, it must be used in such a way
  50 * that other CPUs temporarily incrementing and then decrementing the
  51 * refcount does not cause problems.  On receiving the page from
  52 * alloc_pages(), the refcount will be positive.
  53 *
  54 * If you allocate pages of order > 0, you can use some of the fields
  55 * in each subpage, but you may need to restore some of their values
  56 * afterwards.
  57 *
  58 * SLUB uses cmpxchg_double() to atomically update its freelist and
  59 * counters.  That requires that freelist & counters be adjacent and
  60 * double-word aligned.  We align all struct pages to double-word
  61 * boundaries, and ensure that 'freelist' is aligned within the
  62 * struct.
  63 */
  64#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
  65#define _struct_page_alignment  __aligned(2 * sizeof(unsigned long))
  66#else
  67#define _struct_page_alignment
  68#endif
  69
  70struct page {
  71        unsigned long flags;            /* Atomic flags, some possibly
  72                                         * updated asynchronously */
  73        /*
  74         * Five words (20/40 bytes) are available in this union.
  75         * WARNING: bit 0 of the first word is used for PageTail(). That
  76         * means the other users of this union MUST NOT use the bit to
  77         * avoid collision and false-positive PageTail().
  78         */
  79        union {
  80                struct {        /* Page cache and anonymous pages */
  81                        /**
  82                         * @lru: Pageout list, eg. active_list protected by
  83                         * zone_lru_lock.  Sometimes used as a generic list
  84                         * by the page owner.
  85                         */
  86                        struct list_head lru;
  87                        /* See page-flags.h for PAGE_MAPPING_FLAGS */
  88                        struct address_space *mapping;
  89                        pgoff_t index;          /* Our offset within mapping. */
  90                        /**
  91                         * @private: Mapping-private opaque data.
  92                         * Usually used for buffer_heads if PagePrivate.
  93                         * Used for swp_entry_t if PageSwapCache.
  94                         * Indicates order in the buddy system if PageBuddy.
  95                         */
  96                        unsigned long private;
  97                };
  98                struct {        /* slab, slob and slub */
  99                        union {
 100                                struct list_head slab_list;     /* uses lru */
 101                                struct {        /* Partial pages */
 102                                        struct page *next;
 103#ifdef CONFIG_64BIT
 104                                        int pages;      /* Nr of pages left */
 105                                        int pobjects;   /* Approximate count */
 106#else
 107                                        short int pages;
 108                                        short int pobjects;
 109#endif
 110                                };
 111                        };
 112                        struct kmem_cache *slab_cache; /* not slob */
 113                        /* Double-word boundary */
 114                        void *freelist;         /* first free object */
 115                        union {
 116                                void *s_mem;    /* slab: first object */
 117                                unsigned long counters;         /* SLUB */
 118                                struct {                        /* SLUB */
 119                                        unsigned inuse:16;
 120                                        unsigned objects:15;
 121                                        unsigned frozen:1;
 122                                };
 123                        };
 124                };
 125                struct {        /* Tail pages of compound page */
 126                        unsigned long compound_head;    /* Bit zero is set */
 127
 128                        /* First tail page only */
 129                        unsigned char compound_dtor;
 130                        unsigned char compound_order;
 131                        atomic_t compound_mapcount;
 132                };
 133                struct {        /* Second tail page of compound page */
 134                        unsigned long _compound_pad_1;  /* compound_head */
 135                        unsigned long _compound_pad_2;
 136                        struct list_head deferred_list;
 137                };
 138                struct {        /* Page table pages */
 139                        unsigned long _pt_pad_1;        /* compound_head */
 140                        pgtable_t pmd_huge_pte; /* protected by page->ptl */
 141                        unsigned long _pt_pad_2;        /* mapping */
 142                        union {
 143                                struct mm_struct *pt_mm; /* x86 pgds only */
 144                                atomic_t pt_frag_refcount; /* powerpc */
 145                        };
 146#if ALLOC_SPLIT_PTLOCKS
 147                        spinlock_t *ptl;
 148#else
 149                        spinlock_t ptl;
 150#endif
 151                };
 152                struct {        /* ZONE_DEVICE pages */
 153                        /** @pgmap: Points to the hosting device page map. */
 154                        struct dev_pagemap *pgmap;
 155                        unsigned long hmm_data;
 156                        unsigned long _zd_pad_1;        /* uses mapping */
 157                };
 158
 159                /** @rcu_head: You can use this to free a page by RCU. */
 160                struct rcu_head rcu_head;
 161        };
 162
 163        union {         /* This union is 4 bytes in size. */
 164                /*
 165                 * If the page can be mapped to userspace, encodes the number
 166                 * of times this page is referenced by a page table.
 167                 */
 168                atomic_t _mapcount;
 169
 170                /*
 171                 * If the page is neither PageSlab nor mappable to userspace,
 172                 * the value stored here may help determine what this page
 173                 * is used for.  See page-flags.h for a list of page types
 174                 * which are currently stored here.
 175                 */
 176                unsigned int page_type;
 177
 178                unsigned int active;            /* SLAB */
 179                int units;                      /* SLOB */
 180        };
 181
 182        /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
 183        atomic_t _refcount;
 184
 185#ifdef CONFIG_MEMCG
 186        struct mem_cgroup *mem_cgroup;
 187#endif
 188
 189        /*
 190         * On machines where all RAM is mapped into kernel address space,
 191         * we can simply calculate the virtual address. On machines with
 192         * highmem some memory is mapped into kernel virtual memory
 193         * dynamically, so we need a place to store that address.
 194         * Note that this field could be 16 bits on x86 ... ;)
 195         *
 196         * Architectures with slow multiplication can define
 197         * WANT_PAGE_VIRTUAL in asm/page.h
 198         */
 199#if defined(WANT_PAGE_VIRTUAL)
 200        void *virtual;                  /* Kernel virtual address (NULL if
 201                                           not kmapped, ie. highmem) */
 202#endif /* WANT_PAGE_VIRTUAL */
 203
 204#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
 205        int _last_cpupid;
 206#endif
 207} _struct_page_alignment;
 208
 209/*
 210 * Used for sizing the vmemmap region on some architectures
 211 */
 212#define STRUCT_PAGE_MAX_SHIFT   (order_base_2(sizeof(struct page)))
 213
 214#define PAGE_FRAG_CACHE_MAX_SIZE        __ALIGN_MASK(32768, ~PAGE_MASK)
 215#define PAGE_FRAG_CACHE_MAX_ORDER       get_order(PAGE_FRAG_CACHE_MAX_SIZE)
 216
 217struct page_frag_cache {
 218        void * va;
 219#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
 220        __u16 offset;
 221        __u16 size;
 222#else
 223        __u32 offset;
 224#endif
 225        /* we maintain a pagecount bias, so that we dont dirty cache line
 226         * containing page->_refcount every time we allocate a fragment.
 227         */
 228        unsigned int            pagecnt_bias;
 229        bool pfmemalloc;
 230};
 231
 232typedef unsigned long vm_flags_t;
 233
 234/*
 235 * A region containing a mapping of a non-memory backed file under NOMMU
 236 * conditions.  These are held in a global tree and are pinned by the VMAs that
 237 * map parts of them.
 238 */
 239struct vm_region {
 240        struct rb_node  vm_rb;          /* link in global region tree */
 241        vm_flags_t      vm_flags;       /* VMA vm_flags */
 242        unsigned long   vm_start;       /* start address of region */
 243        unsigned long   vm_end;         /* region initialised to here */
 244        unsigned long   vm_top;         /* region allocated to here */
 245        unsigned long   vm_pgoff;       /* the offset in vm_file corresponding to vm_start */
 246        struct file     *vm_file;       /* the backing file or NULL */
 247
 248        int             vm_usage;       /* region usage count (access under nommu_region_sem) */
 249        bool            vm_icache_flushed : 1; /* true if the icache has been flushed for
 250                                                * this region */
 251};
 252
 253#ifdef CONFIG_USERFAULTFD
 254#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
 255struct vm_userfaultfd_ctx {
 256        struct userfaultfd_ctx *ctx;
 257};
 258#else /* CONFIG_USERFAULTFD */
 259#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
 260struct vm_userfaultfd_ctx {};
 261#endif /* CONFIG_USERFAULTFD */
 262
 263/*
 264 * This struct defines a memory VMM memory area. There is one of these
 265 * per VM-area/task.  A VM area is any part of the process virtual memory
 266 * space that has a special rule for the page-fault handlers (ie a shared
 267 * library, the executable area etc).
 268 */
 269struct vm_area_struct {
 270        /* The first cache line has the info for VMA tree walking. */
 271
 272        unsigned long vm_start;         /* Our start address within vm_mm. */
 273        unsigned long vm_end;           /* The first byte after our end address
 274                                           within vm_mm. */
 275
 276        /* linked list of VM areas per task, sorted by address */
 277        struct vm_area_struct *vm_next, *vm_prev;
 278
 279        struct rb_node vm_rb;
 280
 281        /*
 282         * Largest free memory gap in bytes to the left of this VMA.
 283         * Either between this VMA and vma->vm_prev, or between one of the
 284         * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
 285         * get_unmapped_area find a free area of the right size.
 286         */
 287        unsigned long rb_subtree_gap;
 288
 289        /* Second cache line starts here. */
 290
 291        struct mm_struct *vm_mm;        /* The address space we belong to. */
 292        pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
 293        unsigned long vm_flags;         /* Flags, see mm.h. */
 294
 295        /*
 296         * For areas with an address space and backing store,
 297         * linkage into the address_space->i_mmap interval tree.
 298         */
 299        struct {
 300                struct rb_node rb;
 301                unsigned long rb_subtree_last;
 302        } shared;
 303
 304        /*
 305         * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
 306         * list, after a COW of one of the file pages.  A MAP_SHARED vma
 307         * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
 308         * or brk vma (with NULL file) can only be in an anon_vma list.
 309         */
 310        struct list_head anon_vma_chain; /* Serialized by mmap_sem &
 311                                          * page_table_lock */
 312        struct anon_vma *anon_vma;      /* Serialized by page_table_lock */
 313
 314        /* Function pointers to deal with this struct. */
 315        const struct vm_operations_struct *vm_ops;
 316
 317        /* Information about our backing store: */
 318        unsigned long vm_pgoff;         /* Offset (within vm_file) in PAGE_SIZE
 319                                           units */
 320        struct file * vm_file;          /* File we map to (can be NULL). */
 321        void * vm_private_data;         /* was vm_pte (shared mem) */
 322
 323        atomic_long_t swap_readahead_info;
 324#ifndef CONFIG_MMU
 325        struct vm_region *vm_region;    /* NOMMU mapping region */
 326#endif
 327#ifdef CONFIG_NUMA
 328        struct mempolicy *vm_policy;    /* NUMA policy for the VMA */
 329#endif
 330        struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
 331} __randomize_layout;
 332
 333struct core_thread {
 334        struct task_struct *task;
 335        struct core_thread *next;
 336};
 337
 338struct core_state {
 339        atomic_t nr_threads;
 340        struct core_thread dumper;
 341        struct completion startup;
 342};
 343
 344struct kioctx_table;
 345struct mm_struct {
 346        struct {
 347                struct vm_area_struct *mmap;            /* list of VMAs */
 348                struct rb_root mm_rb;
 349                u64 vmacache_seqnum;                   /* per-thread vmacache */
 350#ifdef CONFIG_MMU
 351                unsigned long (*get_unmapped_area) (struct file *filp,
 352                                unsigned long addr, unsigned long len,
 353                                unsigned long pgoff, unsigned long flags);
 354#endif
 355                unsigned long mmap_base;        /* base of mmap area */
 356                unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
 357#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
 358                /* Base adresses for compatible mmap() */
 359                unsigned long mmap_compat_base;
 360                unsigned long mmap_compat_legacy_base;
 361#endif
 362                unsigned long task_size;        /* size of task vm space */
 363                unsigned long highest_vm_end;   /* highest vma end address */
 364                pgd_t * pgd;
 365
 366                /**
 367                 * @mm_users: The number of users including userspace.
 368                 *
 369                 * Use mmget()/mmget_not_zero()/mmput() to modify. When this
 370                 * drops to 0 (i.e. when the task exits and there are no other
 371                 * temporary reference holders), we also release a reference on
 372                 * @mm_count (which may then free the &struct mm_struct if
 373                 * @mm_count also drops to 0).
 374                 */
 375                atomic_t mm_users;
 376
 377                /**
 378                 * @mm_count: The number of references to &struct mm_struct
 379                 * (@mm_users count as 1).
 380                 *
 381                 * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
 382                 * &struct mm_struct is freed.
 383                 */
 384                atomic_t mm_count;
 385
 386#ifdef CONFIG_MMU
 387                atomic_long_t pgtables_bytes;   /* PTE page table pages */
 388#endif
 389                int map_count;                  /* number of VMAs */
 390
 391                spinlock_t page_table_lock; /* Protects page tables and some
 392                                             * counters
 393                                             */
 394                struct rw_semaphore mmap_sem;
 395
 396                struct list_head mmlist; /* List of maybe swapped mm's. These
 397                                          * are globally strung together off
 398                                          * init_mm.mmlist, and are protected
 399                                          * by mmlist_lock
 400                                          */
 401
 402
 403                unsigned long hiwater_rss; /* High-watermark of RSS usage */
 404                unsigned long hiwater_vm;  /* High-water virtual memory usage */
 405
 406                unsigned long total_vm;    /* Total pages mapped */
 407                unsigned long locked_vm;   /* Pages that have PG_mlocked set */
 408                unsigned long pinned_vm;   /* Refcount permanently increased */
 409                unsigned long data_vm;     /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
 410                unsigned long exec_vm;     /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
 411                unsigned long stack_vm;    /* VM_STACK */
 412                unsigned long def_flags;
 413
 414                spinlock_t arg_lock; /* protect the below fields */
 415                unsigned long start_code, end_code, start_data, end_data;
 416                unsigned long start_brk, brk, start_stack;
 417                unsigned long arg_start, arg_end, env_start, env_end;
 418
 419                unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
 420
 421                /*
 422                 * Special counters, in some configurations protected by the
 423                 * page_table_lock, in other configurations by being atomic.
 424                 */
 425                struct mm_rss_stat rss_stat;
 426
 427                struct linux_binfmt *binfmt;
 428
 429                /* Architecture-specific MM context */
 430                mm_context_t context;
 431
 432                unsigned long flags; /* Must use atomic bitops to access */
 433
 434                struct core_state *core_state; /* coredumping support */
 435#ifdef CONFIG_MEMBARRIER
 436                atomic_t membarrier_state;
 437#endif
 438#ifdef CONFIG_AIO
 439                spinlock_t                      ioctx_lock;
 440                struct kioctx_table __rcu       *ioctx_table;
 441#endif
 442#ifdef CONFIG_MEMCG
 443                /*
 444                 * "owner" points to a task that is regarded as the canonical
 445                 * user/owner of this mm. All of the following must be true in
 446                 * order for it to be changed:
 447                 *
 448                 * current == mm->owner
 449                 * current->mm != mm
 450                 * new_owner->mm == mm
 451                 * new_owner->alloc_lock is held
 452                 */
 453                struct task_struct __rcu *owner;
 454#endif
 455                struct user_namespace *user_ns;
 456
 457                /* store ref to file /proc/<pid>/exe symlink points to */
 458                struct file __rcu *exe_file;
 459#ifdef CONFIG_MMU_NOTIFIER
 460                struct mmu_notifier_mm *mmu_notifier_mm;
 461#endif
 462#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
 463                pgtable_t pmd_huge_pte; /* protected by page_table_lock */
 464#endif
 465#ifdef CONFIG_NUMA_BALANCING
 466                /*
 467                 * numa_next_scan is the next time that the PTEs will be marked
 468                 * pte_numa. NUMA hinting faults will gather statistics and
 469                 * migrate pages to new nodes if necessary.
 470                 */
 471                unsigned long numa_next_scan;
 472
 473                /* Restart point for scanning and setting pte_numa */
 474                unsigned long numa_scan_offset;
 475
 476                /* numa_scan_seq prevents two threads setting pte_numa */
 477                int numa_scan_seq;
 478#endif
 479                /*
 480                 * An operation with batched TLB flushing is going on. Anything
 481                 * that can move process memory needs to flush the TLB when
 482                 * moving a PROT_NONE or PROT_NUMA mapped page.
 483                 */
 484                atomic_t tlb_flush_pending;
 485#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
 486                /* See flush_tlb_batched_pending() */
 487                bool tlb_flush_batched;
 488#endif
 489                struct uprobes_state uprobes_state;
 490#ifdef CONFIG_HUGETLB_PAGE
 491                atomic_long_t hugetlb_usage;
 492#endif
 493                struct work_struct async_put_work;
 494
 495#if IS_ENABLED(CONFIG_HMM)
 496                /* HMM needs to track a few things per mm */
 497                struct hmm *hmm;
 498#endif
 499        } __randomize_layout;
 500
 501        /*
 502         * The mm_cpumask needs to be at the end of mm_struct, because it
 503         * is dynamically sized based on nr_cpu_ids.
 504         */
 505        unsigned long cpu_bitmap[];
 506};
 507
 508extern struct mm_struct init_mm;
 509
 510/* Pointer magic because the dynamic array size confuses some compilers. */
 511static inline void mm_init_cpumask(struct mm_struct *mm)
 512{
 513        unsigned long cpu_bitmap = (unsigned long)mm;
 514
 515        cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
 516        cpumask_clear((struct cpumask *)cpu_bitmap);
 517}
 518
 519/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
 520static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
 521{
 522        return (struct cpumask *)&mm->cpu_bitmap;
 523}
 524
 525struct mmu_gather;
 526extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
 527                                unsigned long start, unsigned long end);
 528extern void tlb_finish_mmu(struct mmu_gather *tlb,
 529                                unsigned long start, unsigned long end);
 530
 531static inline void init_tlb_flush_pending(struct mm_struct *mm)
 532{
 533        atomic_set(&mm->tlb_flush_pending, 0);
 534}
 535
 536static inline void inc_tlb_flush_pending(struct mm_struct *mm)
 537{
 538        atomic_inc(&mm->tlb_flush_pending);
 539        /*
 540         * The only time this value is relevant is when there are indeed pages
 541         * to flush. And we'll only flush pages after changing them, which
 542         * requires the PTL.
 543         *
 544         * So the ordering here is:
 545         *
 546         *      atomic_inc(&mm->tlb_flush_pending);
 547         *      spin_lock(&ptl);
 548         *      ...
 549         *      set_pte_at();
 550         *      spin_unlock(&ptl);
 551         *
 552         *                              spin_lock(&ptl)
 553         *                              mm_tlb_flush_pending();
 554         *                              ....
 555         *                              spin_unlock(&ptl);
 556         *
 557         *      flush_tlb_range();
 558         *      atomic_dec(&mm->tlb_flush_pending);
 559         *
 560         * Where the increment if constrained by the PTL unlock, it thus
 561         * ensures that the increment is visible if the PTE modification is
 562         * visible. After all, if there is no PTE modification, nobody cares
 563         * about TLB flushes either.
 564         *
 565         * This very much relies on users (mm_tlb_flush_pending() and
 566         * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
 567         * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
 568         * locks (PPC) the unlock of one doesn't order against the lock of
 569         * another PTL.
 570         *
 571         * The decrement is ordered by the flush_tlb_range(), such that
 572         * mm_tlb_flush_pending() will not return false unless all flushes have
 573         * completed.
 574         */
 575}
 576
 577static inline void dec_tlb_flush_pending(struct mm_struct *mm)
 578{
 579        /*
 580         * See inc_tlb_flush_pending().
 581         *
 582         * This cannot be smp_mb__before_atomic() because smp_mb() simply does
 583         * not order against TLB invalidate completion, which is what we need.
 584         *
 585         * Therefore we must rely on tlb_flush_*() to guarantee order.
 586         */
 587        atomic_dec(&mm->tlb_flush_pending);
 588}
 589
 590static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
 591{
 592        /*
 593         * Must be called after having acquired the PTL; orders against that
 594         * PTLs release and therefore ensures that if we observe the modified
 595         * PTE we must also observe the increment from inc_tlb_flush_pending().
 596         *
 597         * That is, it only guarantees to return true if there is a flush
 598         * pending for _this_ PTL.
 599         */
 600        return atomic_read(&mm->tlb_flush_pending);
 601}
 602
 603static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
 604{
 605        /*
 606         * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
 607         * for which there is a TLB flush pending in order to guarantee
 608         * we've seen both that PTE modification and the increment.
 609         *
 610         * (no requirement on actually still holding the PTL, that is irrelevant)
 611         */
 612        return atomic_read(&mm->tlb_flush_pending) > 1;
 613}
 614
 615struct vm_fault;
 616
 617struct vm_special_mapping {
 618        const char *name;       /* The name, e.g. "[vdso]". */
 619
 620        /*
 621         * If .fault is not provided, this points to a
 622         * NULL-terminated array of pages that back the special mapping.
 623         *
 624         * This must not be NULL unless .fault is provided.
 625         */
 626        struct page **pages;
 627
 628        /*
 629         * If non-NULL, then this is called to resolve page faults
 630         * on the special mapping.  If used, .pages is not checked.
 631         */
 632        vm_fault_t (*fault)(const struct vm_special_mapping *sm,
 633                                struct vm_area_struct *vma,
 634                                struct vm_fault *vmf);
 635
 636        int (*mremap)(const struct vm_special_mapping *sm,
 637                     struct vm_area_struct *new_vma);
 638};
 639
 640enum tlb_flush_reason {
 641        TLB_FLUSH_ON_TASK_SWITCH,
 642        TLB_REMOTE_SHOOTDOWN,
 643        TLB_LOCAL_SHOOTDOWN,
 644        TLB_LOCAL_MM_SHOOTDOWN,
 645        TLB_REMOTE_SEND_IPI,
 646        NR_TLB_FLUSH_REASONS,
 647};
 648
 649 /*
 650  * A swap entry has to fit into a "unsigned long", as the entry is hidden
 651  * in the "index" field of the swapper address space.
 652  */
 653typedef struct {
 654        unsigned long val;
 655} swp_entry_t;
 656
 657#endif /* _LINUX_MM_TYPES_H */
 658