1#ifndef _LINUX_MM_TYPES_H
2#define _LINUX_MM_TYPES_H
3
4#include <linux/auxvec.h>
5#include <linux/types.h>
6#include <linux/threads.h>
7#include <linux/list.h>
8#include <linux/spinlock.h>
9#include <linux/prio_tree.h>
10#include <linux/rbtree.h>
11#include <linux/rwsem.h>
12#include <linux/completion.h>
13#include <linux/cpumask.h>
14#include <linux/page-debug-flags.h>
15#include <linux/uprobes.h>
16#include <asm/page.h>
17#include <asm/mmu.h>
18
19#ifndef AT_VECTOR_SIZE_ARCH
20#define AT_VECTOR_SIZE_ARCH 0
21#endif
22#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
23
24struct address_space;
25
26#define USE_SPLIT_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41struct page {
42
43 unsigned long flags;
44
45 struct address_space *mapping;
46
47
48
49
50
51
52
53 struct {
54 union {
55 pgoff_t index;
56 void *freelist;
57 };
58
59 union {
60#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
61 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
62
63 unsigned long counters;
64#else
65
66
67
68
69
70 unsigned counters;
71#endif
72
73 struct {
74
75 union {
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92 atomic_t _mapcount;
93
94 struct {
95 unsigned inuse:16;
96 unsigned objects:15;
97 unsigned frozen:1;
98 };
99 };
100 atomic_t _count;
101 };
102 };
103 };
104
105
106 union {
107 struct list_head lru;
108
109
110 struct {
111 struct page *next;
112#ifdef CONFIG_64BIT
113 int pages;
114 int pobjects;
115#else
116 short int pages;
117 short int pobjects;
118#endif
119 };
120 };
121
122
123 union {
124 unsigned long private;
125
126
127
128
129
130
131#if USE_SPLIT_PTLOCKS
132 spinlock_t ptl;
133#endif
134 struct kmem_cache *slab;
135 struct page *first_page;
136 };
137
138
139
140
141
142
143
144
145
146
147
148#if defined(WANT_PAGE_VIRTUAL)
149 void *virtual;
150
151#endif
152#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
153 unsigned long debug_flags;
154#endif
155
156#ifdef CONFIG_KMEMCHECK
157
158
159
160
161 void *shadow;
162#endif
163}
164
165
166
167
168#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
169 __aligned(2 * sizeof(unsigned long))
170#endif
171;
172
173struct page_frag {
174 struct page *page;
175#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
176 __u32 offset;
177 __u32 size;
178#else
179 __u16 offset;
180 __u16 size;
181#endif
182};
183
184typedef unsigned long __nocast vm_flags_t;
185
186
187
188
189
190
191struct vm_region {
192 struct rb_node vm_rb;
193 vm_flags_t vm_flags;
194 unsigned long vm_start;
195 unsigned long vm_end;
196 unsigned long vm_top;
197 unsigned long vm_pgoff;
198 struct file *vm_file;
199
200 int vm_usage;
201 bool vm_icache_flushed : 1;
202
203};
204
205
206
207
208
209
210
211struct vm_area_struct {
212 struct mm_struct * vm_mm;
213 unsigned long vm_start;
214 unsigned long vm_end;
215
216
217
218 struct vm_area_struct *vm_next, *vm_prev;
219
220 pgprot_t vm_page_prot;
221 unsigned long vm_flags;
222
223 struct rb_node vm_rb;
224
225
226
227
228
229
230
231 union {
232 struct {
233 struct list_head list;
234 void *parent;
235 struct vm_area_struct *head;
236 } vm_set;
237
238 struct raw_prio_tree_node prio_tree_node;
239 } shared;
240
241
242
243
244
245
246
247 struct list_head anon_vma_chain;
248
249 struct anon_vma *anon_vma;
250
251
252 const struct vm_operations_struct *vm_ops;
253
254
255 unsigned long vm_pgoff;
256
257 struct file * vm_file;
258 void * vm_private_data;
259
260#ifndef CONFIG_MMU
261 struct vm_region *vm_region;
262#endif
263#ifdef CONFIG_NUMA
264 struct mempolicy *vm_policy;
265#endif
266};
267
268struct core_thread {
269 struct task_struct *task;
270 struct core_thread *next;
271};
272
273struct core_state {
274 atomic_t nr_threads;
275 struct core_thread dumper;
276 struct completion startup;
277};
278
279enum {
280 MM_FILEPAGES,
281 MM_ANONPAGES,
282 MM_SWAPENTS,
283 NR_MM_COUNTERS
284};
285
286#if USE_SPLIT_PTLOCKS && defined(CONFIG_MMU)
287#define SPLIT_RSS_COUNTING
288
289struct task_rss_stat {
290 int events;
291 int count[NR_MM_COUNTERS];
292};
293#endif
294
295struct mm_rss_stat {
296 atomic_long_t count[NR_MM_COUNTERS];
297};
298
299struct mm_struct {
300 struct vm_area_struct * mmap;
301 struct rb_root mm_rb;
302 struct vm_area_struct * mmap_cache;
303#ifdef CONFIG_MMU
304 unsigned long (*get_unmapped_area) (struct file *filp,
305 unsigned long addr, unsigned long len,
306 unsigned long pgoff, unsigned long flags);
307 void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
308#endif
309 unsigned long mmap_base;
310 unsigned long task_size;
311 unsigned long cached_hole_size;
312 unsigned long free_area_cache;
313 pgd_t * pgd;
314 atomic_t mm_users;
315 atomic_t mm_count;
316 int map_count;
317
318 spinlock_t page_table_lock;
319 struct rw_semaphore mmap_sem;
320
321 struct list_head mmlist;
322
323
324
325
326
327 unsigned long hiwater_rss;
328 unsigned long hiwater_vm;
329
330 unsigned long total_vm;
331 unsigned long locked_vm;
332 unsigned long pinned_vm;
333 unsigned long shared_vm;
334 unsigned long exec_vm;
335 unsigned long stack_vm;
336 unsigned long reserved_vm;
337 unsigned long def_flags;
338 unsigned long nr_ptes;
339 unsigned long start_code, end_code, start_data, end_data;
340 unsigned long start_brk, brk, start_stack;
341 unsigned long arg_start, arg_end, env_start, env_end;
342
343 unsigned long saved_auxv[AT_VECTOR_SIZE];
344
345
346
347
348
349 struct mm_rss_stat rss_stat;
350
351 struct linux_binfmt *binfmt;
352
353 cpumask_var_t cpu_vm_mask_var;
354
355
356 mm_context_t context;
357
358 unsigned long flags;
359
360 struct core_state *core_state;
361#ifdef CONFIG_AIO
362 spinlock_t ioctx_lock;
363 struct hlist_head ioctx_list;
364#endif
365#ifdef CONFIG_MM_OWNER
366
367
368
369
370
371
372
373
374
375
376 struct task_struct __rcu *owner;
377#endif
378
379
380 struct file *exe_file;
381 unsigned long num_exe_file_vmas;
382#ifdef CONFIG_MMU_NOTIFIER
383 struct mmu_notifier_mm *mmu_notifier_mm;
384#endif
385#ifdef CONFIG_TRANSPARENT_HUGEPAGE
386 pgtable_t pmd_huge_pte;
387#endif
388#ifdef CONFIG_CPUMASK_OFFSTACK
389 struct cpumask cpumask_allocation;
390#endif
391 struct uprobes_state uprobes_state;
392};
393
394static inline void mm_init_cpumask(struct mm_struct *mm)
395{
396#ifdef CONFIG_CPUMASK_OFFSTACK
397 mm->cpu_vm_mask_var = &mm->cpumask_allocation;
398#endif
399}
400
401
402static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
403{
404 return mm->cpu_vm_mask_var;
405}
406
407#endif
408