linux/include/linux/mm_types.h
<<
>>
Prefs
   1#ifndef _LINUX_MM_TYPES_H
   2#define _LINUX_MM_TYPES_H
   3
   4#include <linux/mm_types_task.h>
   5
   6#include <linux/auxvec.h>
   7#include <linux/list.h>
   8#include <linux/spinlock.h>
   9#include <linux/rbtree.h>
  10#include <linux/rwsem.h>
  11#include <linux/completion.h>
  12#include <linux/cpumask.h>
  13#include <linux/uprobes.h>
  14#include <linux/page-flags-layout.h>
  15#include <linux/workqueue.h>
  16
  17#include <asm/mmu.h>
  18
  19#ifndef AT_VECTOR_SIZE_ARCH
  20#define AT_VECTOR_SIZE_ARCH 0
  21#endif
  22#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
  23
  24struct address_space;
  25struct mem_cgroup;
  26
  27/*
  28 * Each physical page in the system has a struct page associated with
  29 * it to keep track of whatever it is we are using the page for at the
  30 * moment. Note that we have no way to track which tasks are using
  31 * a page, though if it is a pagecache page, rmap structures can tell us
  32 * who is mapping it.
  33 *
  34 * The objects in struct page are organized in double word blocks in
  35 * order to allows us to use atomic double word operations on portions
  36 * of struct page. That is currently only used by slub but the arrangement
  37 * allows the use of atomic double word operations on the flags/mapping
  38 * and lru list pointers also.
  39 */
  40struct page {
  41        /* First double word block */
  42        unsigned long flags;            /* Atomic flags, some possibly
  43                                         * updated asynchronously */
  44        union {
  45                struct address_space *mapping;  /* If low bit clear, points to
  46                                                 * inode address_space, or NULL.
  47                                                 * If page mapped as anonymous
  48                                                 * memory, low bit is set, and
  49                                                 * it points to anon_vma object:
  50                                                 * see PAGE_MAPPING_ANON below.
  51                                                 */
  52                void *s_mem;                    /* slab first object */
  53                atomic_t compound_mapcount;     /* first tail page */
  54                /* page_deferred_list().next     -- second tail page */
  55        };
  56
  57        /* Second double word */
  58        union {
  59                pgoff_t index;          /* Our offset within mapping. */
  60                void *freelist;         /* sl[aou]b first free object */
  61                /* page_deferred_list().prev    -- second tail page */
  62        };
  63
  64        union {
  65#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
  66        defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
  67                /* Used for cmpxchg_double in slub */
  68                unsigned long counters;
  69#else
  70                /*
  71                 * Keep _refcount separate from slub cmpxchg_double data.
  72                 * As the rest of the double word is protected by slab_lock
  73                 * but _refcount is not.
  74                 */
  75                unsigned counters;
  76#endif
  77                struct {
  78
  79                        union {
  80                                /*
  81                                 * Count of ptes mapped in mms, to show when
  82                                 * page is mapped & limit reverse map searches.
  83                                 *
  84                                 * Extra information about page type may be
  85                                 * stored here for pages that are never mapped,
  86                                 * in which case the value MUST BE <= -2.
  87                                 * See page-flags.h for more details.
  88                                 */
  89                                atomic_t _mapcount;
  90
  91                                unsigned int active;            /* SLAB */
  92                                struct {                        /* SLUB */
  93                                        unsigned inuse:16;
  94                                        unsigned objects:15;
  95                                        unsigned frozen:1;
  96                                };
  97                                int units;                      /* SLOB */
  98                        };
  99                        /*
 100                         * Usage count, *USE WRAPPER FUNCTION* when manual
 101                         * accounting. See page_ref.h
 102                         */
 103                        atomic_t _refcount;
 104                };
 105        };
 106
 107        /*
 108         * Third double word block
 109         *
 110         * WARNING: bit 0 of the first word encode PageTail(). That means
 111         * the rest users of the storage space MUST NOT use the bit to
 112         * avoid collision and false-positive PageTail().
 113         */
 114        union {
 115                struct list_head lru;   /* Pageout list, eg. active_list
 116                                         * protected by zone_lru_lock !
 117                                         * Can be used as a generic list
 118                                         * by the page owner.
 119                                         */
 120                struct dev_pagemap *pgmap; /* ZONE_DEVICE pages are never on an
 121                                            * lru or handled by a slab
 122                                            * allocator, this points to the
 123                                            * hosting device page map.
 124                                            */
 125                struct {                /* slub per cpu partial pages */
 126                        struct page *next;      /* Next partial slab */
 127#ifdef CONFIG_64BIT
 128                        int pages;      /* Nr of partial slabs left */
 129                        int pobjects;   /* Approximate # of objects */
 130#else
 131                        short int pages;
 132                        short int pobjects;
 133#endif
 134                };
 135
 136                struct rcu_head rcu_head;       /* Used by SLAB
 137                                                 * when destroying via RCU
 138                                                 */
 139                /* Tail pages of compound page */
 140                struct {
 141                        unsigned long compound_head; /* If bit zero is set */
 142
 143                        /* First tail page only */
 144#ifdef CONFIG_64BIT
 145                        /*
 146                         * On 64 bit system we have enough space in struct page
 147                         * to encode compound_dtor and compound_order with
 148                         * unsigned int. It can help compiler generate better or
 149                         * smaller code on some archtectures.
 150                         */
 151                        unsigned int compound_dtor;
 152                        unsigned int compound_order;
 153#else
 154                        unsigned short int compound_dtor;
 155                        unsigned short int compound_order;
 156#endif
 157                };
 158
 159#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
 160                struct {
 161                        unsigned long __pad;    /* do not overlay pmd_huge_pte
 162                                                 * with compound_head to avoid
 163                                                 * possible bit 0 collision.
 164                                                 */
 165                        pgtable_t pmd_huge_pte; /* protected by page->ptl */
 166                };
 167#endif
 168        };
 169
 170        /* Remainder is not double word aligned */
 171        union {
 172                unsigned long private;          /* Mapping-private opaque data:
 173                                                 * usually used for buffer_heads
 174                                                 * if PagePrivate set; used for
 175                                                 * swp_entry_t if PageSwapCache;
 176                                                 * indicates order in the buddy
 177                                                 * system if PG_buddy is set.
 178                                                 */
 179#if USE_SPLIT_PTE_PTLOCKS
 180#if ALLOC_SPLIT_PTLOCKS
 181                spinlock_t *ptl;
 182#else
 183                spinlock_t ptl;
 184#endif
 185#endif
 186                struct kmem_cache *slab_cache;  /* SL[AU]B: Pointer to slab */
 187        };
 188
 189#ifdef CONFIG_MEMCG
 190        struct mem_cgroup *mem_cgroup;
 191#endif
 192
 193        /*
 194         * On machines where all RAM is mapped into kernel address space,
 195         * we can simply calculate the virtual address. On machines with
 196         * highmem some memory is mapped into kernel virtual memory
 197         * dynamically, so we need a place to store that address.
 198         * Note that this field could be 16 bits on x86 ... ;)
 199         *
 200         * Architectures with slow multiplication can define
 201         * WANT_PAGE_VIRTUAL in asm/page.h
 202         */
 203#if defined(WANT_PAGE_VIRTUAL)
 204        void *virtual;                  /* Kernel virtual address (NULL if
 205                                           not kmapped, ie. highmem) */
 206#endif /* WANT_PAGE_VIRTUAL */
 207
 208#ifdef CONFIG_KMEMCHECK
 209        /*
 210         * kmemcheck wants to track the status of each byte in a page; this
 211         * is a pointer to such a status block. NULL if not tracked.
 212         */
 213        void *shadow;
 214#endif
 215
 216#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
 217        int _last_cpupid;
 218#endif
 219}
 220/*
 221 * The struct page can be forced to be double word aligned so that atomic ops
 222 * on double words work. The SLUB allocator can make use of such a feature.
 223 */
 224#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
 225        __aligned(2 * sizeof(unsigned long))
 226#endif
 227;
 228
 229#define PAGE_FRAG_CACHE_MAX_SIZE        __ALIGN_MASK(32768, ~PAGE_MASK)
 230#define PAGE_FRAG_CACHE_MAX_ORDER       get_order(PAGE_FRAG_CACHE_MAX_SIZE)
 231
 232struct page_frag_cache {
 233        void * va;
 234#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
 235        __u16 offset;
 236        __u16 size;
 237#else
 238        __u32 offset;
 239#endif
 240        /* we maintain a pagecount bias, so that we dont dirty cache line
 241         * containing page->_refcount every time we allocate a fragment.
 242         */
 243        unsigned int            pagecnt_bias;
 244        bool pfmemalloc;
 245};
 246
 247typedef unsigned long vm_flags_t;
 248
 249/*
 250 * A region containing a mapping of a non-memory backed file under NOMMU
 251 * conditions.  These are held in a global tree and are pinned by the VMAs that
 252 * map parts of them.
 253 */
 254struct vm_region {
 255        struct rb_node  vm_rb;          /* link in global region tree */
 256        vm_flags_t      vm_flags;       /* VMA vm_flags */
 257        unsigned long   vm_start;       /* start address of region */
 258        unsigned long   vm_end;         /* region initialised to here */
 259        unsigned long   vm_top;         /* region allocated to here */
 260        unsigned long   vm_pgoff;       /* the offset in vm_file corresponding to vm_start */
 261        struct file     *vm_file;       /* the backing file or NULL */
 262
 263        int             vm_usage;       /* region usage count (access under nommu_region_sem) */
 264        bool            vm_icache_flushed : 1; /* true if the icache has been flushed for
 265                                                * this region */
 266};
 267
 268#ifdef CONFIG_USERFAULTFD
 269#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
 270struct vm_userfaultfd_ctx {
 271        struct userfaultfd_ctx *ctx;
 272};
 273#else /* CONFIG_USERFAULTFD */
 274#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
 275struct vm_userfaultfd_ctx {};
 276#endif /* CONFIG_USERFAULTFD */
 277
 278/*
 279 * This struct defines a memory VMM memory area. There is one of these
 280 * per VM-area/task.  A VM area is any part of the process virtual memory
 281 * space that has a special rule for the page-fault handlers (ie a shared
 282 * library, the executable area etc).
 283 */
 284struct vm_area_struct {
 285        /* The first cache line has the info for VMA tree walking. */
 286
 287        unsigned long vm_start;         /* Our start address within vm_mm. */
 288        unsigned long vm_end;           /* The first byte after our end address
 289                                           within vm_mm. */
 290
 291        /* linked list of VM areas per task, sorted by address */
 292        struct vm_area_struct *vm_next, *vm_prev;
 293
 294        struct rb_node vm_rb;
 295
 296        /*
 297         * Largest free memory gap in bytes to the left of this VMA.
 298         * Either between this VMA and vma->vm_prev, or between one of the
 299         * VMAs below us in the VMA rbtree and its ->vm_prev. This helps
 300         * get_unmapped_area find a free area of the right size.
 301         */
 302        unsigned long rb_subtree_gap;
 303
 304        /* Second cache line starts here. */
 305
 306        struct mm_struct *vm_mm;        /* The address space we belong to. */
 307        pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
 308        unsigned long vm_flags;         /* Flags, see mm.h. */
 309
 310        /*
 311         * For areas with an address space and backing store,
 312         * linkage into the address_space->i_mmap interval tree.
 313         */
 314        struct {
 315                struct rb_node rb;
 316                unsigned long rb_subtree_last;
 317        } shared;
 318
 319        /*
 320         * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
 321         * list, after a COW of one of the file pages.  A MAP_SHARED vma
 322         * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
 323         * or brk vma (with NULL file) can only be in an anon_vma list.
 324         */
 325        struct list_head anon_vma_chain; /* Serialized by mmap_sem &
 326                                          * page_table_lock */
 327        struct anon_vma *anon_vma;      /* Serialized by page_table_lock */
 328
 329        /* Function pointers to deal with this struct. */
 330        const struct vm_operations_struct *vm_ops;
 331
 332        /* Information about our backing store: */
 333        unsigned long vm_pgoff;         /* Offset (within vm_file) in PAGE_SIZE
 334                                           units */
 335        struct file * vm_file;          /* File we map to (can be NULL). */
 336        void * vm_private_data;         /* was vm_pte (shared mem) */
 337
 338#ifndef CONFIG_MMU
 339        struct vm_region *vm_region;    /* NOMMU mapping region */
 340#endif
 341#ifdef CONFIG_NUMA
 342        struct mempolicy *vm_policy;    /* NUMA policy for the VMA */
 343#endif
 344        struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
 345};
 346
 347struct core_thread {
 348        struct task_struct *task;
 349        struct core_thread *next;
 350};
 351
 352struct core_state {
 353        atomic_t nr_threads;
 354        struct core_thread dumper;
 355        struct completion startup;
 356};
 357
 358struct kioctx_table;
 359struct mm_struct {
 360        struct vm_area_struct *mmap;            /* list of VMAs */
 361        struct rb_root mm_rb;
 362        u32 vmacache_seqnum;                   /* per-thread vmacache */
 363#ifdef CONFIG_MMU
 364        unsigned long (*get_unmapped_area) (struct file *filp,
 365                                unsigned long addr, unsigned long len,
 366                                unsigned long pgoff, unsigned long flags);
 367#endif
 368        unsigned long mmap_base;                /* base of mmap area */
 369        unsigned long mmap_legacy_base;         /* base of mmap area in bottom-up allocations */
 370#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
 371        /* Base adresses for compatible mmap() */
 372        unsigned long mmap_compat_base;
 373        unsigned long mmap_compat_legacy_base;
 374#endif
 375        unsigned long task_size;                /* size of task vm space */
 376        unsigned long highest_vm_end;           /* highest vma end address */
 377        pgd_t * pgd;
 378
 379        /**
 380         * @mm_users: The number of users including userspace.
 381         *
 382         * Use mmget()/mmget_not_zero()/mmput() to modify. When this drops
 383         * to 0 (i.e. when the task exits and there are no other temporary
 384         * reference holders), we also release a reference on @mm_count
 385         * (which may then free the &struct mm_struct if @mm_count also
 386         * drops to 0).
 387         */
 388        atomic_t mm_users;
 389
 390        /**
 391         * @mm_count: The number of references to &struct mm_struct
 392         * (@mm_users count as 1).
 393         *
 394         * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
 395         * &struct mm_struct is freed.
 396         */
 397        atomic_t mm_count;
 398
 399        atomic_long_t nr_ptes;                  /* PTE page table pages */
 400#if CONFIG_PGTABLE_LEVELS > 2
 401        atomic_long_t nr_pmds;                  /* PMD page table pages */
 402#endif
 403        int map_count;                          /* number of VMAs */
 404
 405        spinlock_t page_table_lock;             /* Protects page tables and some counters */
 406        struct rw_semaphore mmap_sem;
 407
 408        struct list_head mmlist;                /* List of maybe swapped mm's.  These are globally strung
 409                                                 * together off init_mm.mmlist, and are protected
 410                                                 * by mmlist_lock
 411                                                 */
 412
 413
 414        unsigned long hiwater_rss;      /* High-watermark of RSS usage */
 415        unsigned long hiwater_vm;       /* High-water virtual memory usage */
 416
 417        unsigned long total_vm;         /* Total pages mapped */
 418        unsigned long locked_vm;        /* Pages that have PG_mlocked set */
 419        unsigned long pinned_vm;        /* Refcount permanently increased */
 420        unsigned long data_vm;          /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
 421        unsigned long exec_vm;          /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
 422        unsigned long stack_vm;         /* VM_STACK */
 423        unsigned long def_flags;
 424        unsigned long start_code, end_code, start_data, end_data;
 425        unsigned long start_brk, brk, start_stack;
 426        unsigned long arg_start, arg_end, env_start, env_end;
 427
 428        unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
 429
 430        /*
 431         * Special counters, in some configurations protected by the
 432         * page_table_lock, in other configurations by being atomic.
 433         */
 434        struct mm_rss_stat rss_stat;
 435
 436        struct linux_binfmt *binfmt;
 437
 438        cpumask_var_t cpu_vm_mask_var;
 439
 440        /* Architecture-specific MM context */
 441        mm_context_t context;
 442
 443        unsigned long flags; /* Must use atomic bitops to access the bits */
 444
 445        struct core_state *core_state; /* coredumping support */
 446#ifdef CONFIG_AIO
 447        spinlock_t                      ioctx_lock;
 448        struct kioctx_table __rcu       *ioctx_table;
 449#endif
 450#ifdef CONFIG_MEMCG
 451        /*
 452         * "owner" points to a task that is regarded as the canonical
 453         * user/owner of this mm. All of the following must be true in
 454         * order for it to be changed:
 455         *
 456         * current == mm->owner
 457         * current->mm != mm
 458         * new_owner->mm == mm
 459         * new_owner->alloc_lock is held
 460         */
 461        struct task_struct __rcu *owner;
 462#endif
 463        struct user_namespace *user_ns;
 464
 465        /* store ref to file /proc/<pid>/exe symlink points to */
 466        struct file __rcu *exe_file;
 467#ifdef CONFIG_MMU_NOTIFIER
 468        struct mmu_notifier_mm *mmu_notifier_mm;
 469#endif
 470#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
 471        pgtable_t pmd_huge_pte; /* protected by page_table_lock */
 472#endif
 473#ifdef CONFIG_CPUMASK_OFFSTACK
 474        struct cpumask cpumask_allocation;
 475#endif
 476#ifdef CONFIG_NUMA_BALANCING
 477        /*
 478         * numa_next_scan is the next time that the PTEs will be marked
 479         * pte_numa. NUMA hinting faults will gather statistics and migrate
 480         * pages to new nodes if necessary.
 481         */
 482        unsigned long numa_next_scan;
 483
 484        /* Restart point for scanning and setting pte_numa */
 485        unsigned long numa_scan_offset;
 486
 487        /* numa_scan_seq prevents two threads setting pte_numa */
 488        int numa_scan_seq;
 489#endif
 490#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
 491        /*
 492         * An operation with batched TLB flushing is going on. Anything that
 493         * can move process memory needs to flush the TLB when moving a
 494         * PROT_NONE or PROT_NUMA mapped page.
 495         */
 496        bool tlb_flush_pending;
 497#endif
 498        struct uprobes_state uprobes_state;
 499#ifdef CONFIG_HUGETLB_PAGE
 500        atomic_long_t hugetlb_usage;
 501#endif
 502        struct work_struct async_put_work;
 503};
 504
 505extern struct mm_struct init_mm;
 506
 507static inline void mm_init_cpumask(struct mm_struct *mm)
 508{
 509#ifdef CONFIG_CPUMASK_OFFSTACK
 510        mm->cpu_vm_mask_var = &mm->cpumask_allocation;
 511#endif
 512        cpumask_clear(mm->cpu_vm_mask_var);
 513}
 514
 515/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
 516static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
 517{
 518        return mm->cpu_vm_mask_var;
 519}
 520
 521#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
 522/*
 523 * Memory barriers to keep this state in sync are graciously provided by
 524 * the page table locks, outside of which no page table modifications happen.
 525 * The barriers below prevent the compiler from re-ordering the instructions
 526 * around the memory barriers that are already present in the code.
 527 */
 528static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
 529{
 530        barrier();
 531        return mm->tlb_flush_pending;
 532}
 533static inline void set_tlb_flush_pending(struct mm_struct *mm)
 534{
 535        mm->tlb_flush_pending = true;
 536
 537        /*
 538         * Guarantee that the tlb_flush_pending store does not leak into the
 539         * critical section updating the page tables
 540         */
 541        smp_mb__before_spinlock();
 542}
 543/* Clearing is done after a TLB flush, which also provides a barrier. */
 544static inline void clear_tlb_flush_pending(struct mm_struct *mm)
 545{
 546        barrier();
 547        mm->tlb_flush_pending = false;
 548}
 549#else
 550static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
 551{
 552        return false;
 553}
 554static inline void set_tlb_flush_pending(struct mm_struct *mm)
 555{
 556}
 557static inline void clear_tlb_flush_pending(struct mm_struct *mm)
 558{
 559}
 560#endif
 561
 562struct vm_fault;
 563
 564struct vm_special_mapping {
 565        const char *name;       /* The name, e.g. "[vdso]". */
 566
 567        /*
 568         * If .fault is not provided, this points to a
 569         * NULL-terminated array of pages that back the special mapping.
 570         *
 571         * This must not be NULL unless .fault is provided.
 572         */
 573        struct page **pages;
 574
 575        /*
 576         * If non-NULL, then this is called to resolve page faults
 577         * on the special mapping.  If used, .pages is not checked.
 578         */
 579        int (*fault)(const struct vm_special_mapping *sm,
 580                     struct vm_area_struct *vma,
 581                     struct vm_fault *vmf);
 582
 583        int (*mremap)(const struct vm_special_mapping *sm,
 584                     struct vm_area_struct *new_vma);
 585};
 586
 587enum tlb_flush_reason {
 588        TLB_FLUSH_ON_TASK_SWITCH,
 589        TLB_REMOTE_SHOOTDOWN,
 590        TLB_LOCAL_SHOOTDOWN,
 591        TLB_LOCAL_MM_SHOOTDOWN,
 592        TLB_REMOTE_SEND_IPI,
 593        NR_TLB_FLUSH_REASONS,
 594};
 595
 596 /*
 597  * A swap entry has to fit into a "unsigned long", as the entry is hidden
 598  * in the "index" field of the swapper address space.
 599  */
 600typedef struct {
 601        unsigned long val;
 602} swp_entry_t;
 603
 604#endif /* _LINUX_MM_TYPES_H */
 605