1#ifndef _LINUX_MM_TYPES_H 2#define _LINUX_MM_TYPES_H 3 4#include <linux/auxvec.h> 5#include <linux/types.h> 6#include <linux/threads.h> 7#include <linux/list.h> 8#include <linux/spinlock.h> 9#include <linux/prio_tree.h> 10#include <linux/rbtree.h> 11#include <linux/rwsem.h> 12#include <linux/completion.h> 13#include <asm/page.h> 14#include <asm/mmu.h> 15 16#ifndef AT_VECTOR_SIZE_ARCH 17#define AT_VECTOR_SIZE_ARCH 0 18#endif 19#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) 20 21struct address_space; 22 23#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS 24typedef atomic_long_t mm_counter_t; 25#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ 26typedef unsigned long mm_counter_t; 27#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ 28 29/* 30 * Each physical page in the system has a struct page associated with 31 * it to keep track of whatever it is we are using the page for at the 32 * moment. Note that we have no way to track which tasks are using 33 * a page, though if it is a pagecache page, rmap structures can tell us 34 * who is mapping it. 35 */ 36struct page { 37 unsigned long flags; /* Atomic flags, some possibly 38 * updated asynchronously */ 39 atomic_t _count; /* Usage count, see below. */ 40 union { 41 atomic_t _mapcount; /* Count of ptes mapped in mms, 42 * to show when page is mapped 43 * & limit reverse map searches. 44 */ 45 unsigned int inuse; /* SLUB: Nr of objects */ 46 }; 47 union { 48 struct { 49 unsigned long private; /* Mapping-private opaque data: 50 * usually used for buffer_heads 51 * if PagePrivate set; used for 52 * swp_entry_t if PageSwapCache; 53 * indicates order in the buddy 54 * system if PG_buddy is set. 55 */ 56 struct address_space *mapping; /* If low bit clear, points to 57 * inode address_space, or NULL. 58 * If page mapped as anonymous 59 * memory, low bit is set, and 60 * it points to anon_vma object: 61 * see PAGE_MAPPING_ANON below. 62 */ 63 }; 64#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS 65 spinlock_t ptl; 66#endif 67 struct kmem_cache *slab; /* SLUB: Pointer to slab */ 68 struct page *first_page; /* Compound tail pages */ 69 }; 70 union { 71 pgoff_t index; /* Our offset within mapping. */ 72 void *freelist; /* SLUB: freelist req. slab lock */ 73 }; 74 struct list_head lru; /* Pageout list, eg. active_list 75 * protected by zone->lru_lock ! 76 */ 77 /* 78 * On machines where all RAM is mapped into kernel address space, 79 * we can simply calculate the virtual address. On machines with 80 * highmem some memory is mapped into kernel virtual memory 81 * dynamically, so we need a place to store that address. 82 * Note that this field could be 16 bits on x86 ... ;) 83 * 84 * Architectures with slow multiplication can define 85 * WANT_PAGE_VIRTUAL in asm/page.h 86 */ 87#if defined(WANT_PAGE_VIRTUAL) 88 void *virtual; /* Kernel virtual address (NULL if 89 not kmapped, ie. highmem) */ 90#endif /* WANT_PAGE_VIRTUAL */ 91}; 92 93/* 94 * This struct defines a memory VMM memory area. There is one of these 95 * per VM-area/task. A VM area is any part of the process virtual memory 96 * space that has a special rule for the page-fault handlers (ie a shared 97 * library, the executable area etc). 98 */ 99struct vm_area_struct { 100 struct mm_struct * vm_mm; /* The address space we belong to. */ 101 unsigned long vm_start; /* Our start address within vm_mm. */ 102 unsigned long vm_end; /* The first byte after our end address 103 within vm_mm. */ 104 105 /* linked list of VM areas per task, sorted by address */ 106 struct vm_area_struct *vm_next; 107 108 pgprot_t vm_page_prot; /* Access permissions of this VMA. */ 109 unsigned long vm_flags; /* Flags, listed below. */ 110 111 struct rb_node vm_rb; 112 113 /* 114 * For areas with an address space and backing store, 115 * linkage into the address_space->i_mmap prio tree, or 116 * linkage to the list of like vmas hanging off its node, or 117 * linkage of vma in the address_space->i_mmap_nonlinear list. 118 */ 119 union { 120 struct { 121 struct list_head list; 122 void *parent; /* aligns with prio_tree_node parent */ 123 struct vm_area_struct *head; 124 } vm_set; 125 126 struct raw_prio_tree_node prio_tree_node; 127 } shared; 128 129 /* 130 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma 131 * list, after a COW of one of the file pages. A MAP_SHARED vma 132 * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack 133 * or brk vma (with NULL file) can only be in an anon_vma list. 134 */ 135 struct list_head anon_vma_node; /* Serialized by anon_vma->lock */ 136 struct anon_vma *anon_vma; /* Serialized by page_table_lock */ 137 138 /* Function pointers to deal with this struct. */ 139 struct vm_operations_struct * vm_ops; 140 141 /* Information about our backing store: */ 142 unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE 143 units, *not* PAGE_CACHE_SIZE */ 144 struct file * vm_file; /* File we map to (can be NULL). */ 145 void * vm_private_data; /* was vm_pte (shared mem) */ 146 unsigned long vm_truncate_count;/* truncate_count or restart_addr */ 147 148#ifndef CONFIG_MMU 149 atomic_t vm_usage; /* refcount (VMAs shared if !MMU) */ 150#endif 151#ifdef CONFIG_NUMA 152 struct mempolicy *vm_policy; /* NUMA policy for the VMA */ 153#endif 154}; 155 156struct mm_struct { 157 struct vm_area_struct * mmap; /* list of VMAs */ 158 struct rb_root mm_rb; 159 struct vm_area_struct * mmap_cache; /* last find_vma result */ 160 unsigned long (*get_unmapped_area) (struct file *filp, 161 unsigned long addr, unsigned long len, 162 unsigned long pgoff, unsigned long flags); 163 void (*unmap_area) (struct mm_struct *mm, unsigned long addr); 164 unsigned long mmap_base; /* base of mmap area */ 165 unsigned long task_size; /* size of task vm space */ 166 unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */ 167 unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */ 168 pgd_t * pgd; 169 atomic_t mm_users; /* How many users with user space? */ 170 atomic_t mm_count; /* How many references to "struct mm_struct" (users count as 1) */ 171 int map_count; /* number of VMAs */ 172 struct rw_semaphore mmap_sem; 173 spinlock_t page_table_lock; /* Protects page tables and some counters */ 174 175 struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung 176 * together off init_mm.mmlist, and are protected 177 * by mmlist_lock 178 */ 179 180 /* Special counters, in some configurations protected by the 181 * page_table_lock, in other configurations by being atomic. 182 */ 183 mm_counter_t _file_rss; 184 mm_counter_t _anon_rss; 185 186 unsigned long hiwater_rss; /* High-watermark of RSS usage */ 187 unsigned long hiwater_vm; /* High-water virtual memory usage */ 188 189 unsigned long total_vm, locked_vm, shared_vm, exec_vm; 190 unsigned long stack_vm, reserved_vm, def_flags, nr_ptes; 191 unsigned long start_code, end_code, start_data, end_data; 192 unsigned long start_brk, brk, start_stack; 193 unsigned long arg_start, arg_end, env_start, env_end; 194 195 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */ 196 197 cpumask_t cpu_vm_mask; 198 199 /* Architecture-specific MM context */ 200 mm_context_t context; 201 202 /* Swap token stuff */ 203 /* 204 * Last value of global fault stamp as seen by this process. 205 * In other words, this value gives an indication of how long 206 * it has been since this task got the token. 207 * Look at mm/thrash.c 208 */ 209 unsigned int faultstamp; 210 unsigned int token_priority; 211 unsigned int last_interval; 212 213 unsigned long flags; /* Must use atomic bitops to access the bits */ 214 215 /* coredumping support */ 216 int core_waiters; 217 struct completion *core_startup_done, core_done; 218 219 /* aio bits */ 220 rwlock_t ioctx_list_lock; 221 struct kioctx *ioctx_list; 222}; 223 224#endif /* _LINUX_MM_TYPES_H */ 225