1#ifndef _LINUX_MM_TYPES_H
2#define _LINUX_MM_TYPES_H
3
4#include <linux/auxvec.h>
5#include <linux/types.h>
6#include <linux/threads.h>
7#include <linux/list.h>
8#include <linux/spinlock.h>
9#include <linux/rbtree.h>
10#include <linux/rwsem.h>
11#include <linux/completion.h>
12#include <linux/cpumask.h>
13#include <linux/page-debug-flags.h>
14#include <linux/uprobes.h>
15#include <linux/page-flags-layout.h>
16#include <asm/page.h>
17#include <asm/mmu.h>
18
19#ifndef AT_VECTOR_SIZE_ARCH
20#define AT_VECTOR_SIZE_ARCH 0
21#endif
22#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
23
24struct address_space;
25
26#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
27#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
28 IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
29#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44struct page {
45
46 unsigned long flags;
47
48 union {
49 struct address_space *mapping;
50
51
52
53
54
55
56 void *s_mem;
57 };
58
59
60 struct {
61 union {
62 pgoff_t index;
63 void *freelist;
64 bool pfmemalloc;
65
66
67
68
69
70
71
72
73 };
74
75 union {
76#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
77 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
78
79 unsigned long counters;
80#else
81
82
83
84
85
86 unsigned counters;
87#endif
88
89 struct {
90
91 union {
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108 atomic_t _mapcount;
109
110 struct {
111 unsigned inuse:16;
112 unsigned objects:15;
113 unsigned frozen:1;
114 };
115 int units;
116 };
117 atomic_t _count;
118 };
119 unsigned int active;
120 };
121 };
122
123
124 union {
125 struct list_head lru;
126
127
128
129
130 struct {
131 struct page *next;
132#ifdef CONFIG_64BIT
133 int pages;
134 int pobjects;
135#else
136 short int pages;
137 short int pobjects;
138#endif
139 };
140
141 struct slab *slab_page;
142 struct rcu_head rcu_head;
143
144
145#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
146 pgtable_t pmd_huge_pte;
147#endif
148 };
149
150
151 union {
152 unsigned long private;
153
154
155
156
157
158
159#if USE_SPLIT_PTE_PTLOCKS
160#if ALLOC_SPLIT_PTLOCKS
161 spinlock_t *ptl;
162#else
163 spinlock_t ptl;
164#endif
165#endif
166 struct kmem_cache *slab_cache;
167 struct page *first_page;
168 };
169
170
171
172
173
174
175
176
177
178
179
180#if defined(WANT_PAGE_VIRTUAL)
181 void *virtual;
182
183#endif
184#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
185 unsigned long debug_flags;
186#endif
187
188#ifdef CONFIG_KMEMCHECK
189
190
191
192
193 void *shadow;
194#endif
195
196#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
197 int _last_cpupid;
198#endif
199}
200
201
202
203
204#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
205 __aligned(2 * sizeof(unsigned long))
206#endif
207;
208
209struct page_frag {
210 struct page *page;
211#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
212 __u32 offset;
213 __u32 size;
214#else
215 __u16 offset;
216 __u16 size;
217#endif
218};
219
220typedef unsigned long __nocast vm_flags_t;
221
222
223
224
225
226
227struct vm_region {
228 struct rb_node vm_rb;
229 vm_flags_t vm_flags;
230 unsigned long vm_start;
231 unsigned long vm_end;
232 unsigned long vm_top;
233 unsigned long vm_pgoff;
234 struct file *vm_file;
235
236 int vm_usage;
237 bool vm_icache_flushed : 1;
238
239};
240
241
242
243
244
245
246
247struct vm_area_struct {
248
249
250 unsigned long vm_start;
251 unsigned long vm_end;
252
253
254
255 struct vm_area_struct *vm_next, *vm_prev;
256
257 struct rb_node vm_rb;
258
259
260
261
262
263
264
265 unsigned long rb_subtree_gap;
266
267
268
269 struct mm_struct *vm_mm;
270 pgprot_t vm_page_prot;
271 unsigned long vm_flags;
272
273
274
275
276
277
278 union {
279 struct {
280 struct rb_node rb;
281 unsigned long rb_subtree_last;
282 } linear;
283 struct list_head nonlinear;
284 } shared;
285
286
287
288
289
290
291
292 struct list_head anon_vma_chain;
293
294 struct anon_vma *anon_vma;
295
296
297 const struct vm_operations_struct *vm_ops;
298
299
300 unsigned long vm_pgoff;
301
302 struct file * vm_file;
303 void * vm_private_data;
304
305#ifndef CONFIG_MMU
306 struct vm_region *vm_region;
307#endif
308#ifdef CONFIG_NUMA
309 struct mempolicy *vm_policy;
310#endif
311};
312
313struct core_thread {
314 struct task_struct *task;
315 struct core_thread *next;
316};
317
318struct core_state {
319 atomic_t nr_threads;
320 struct core_thread dumper;
321 struct completion startup;
322};
323
324enum {
325 MM_FILEPAGES,
326 MM_ANONPAGES,
327 MM_SWAPENTS,
328 NR_MM_COUNTERS
329};
330
331#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
332#define SPLIT_RSS_COUNTING
333
334struct task_rss_stat {
335 int events;
336 int count[NR_MM_COUNTERS];
337};
338#endif
339
340struct mm_rss_stat {
341 atomic_long_t count[NR_MM_COUNTERS];
342};
343
344struct kioctx_table;
345struct mm_struct {
346 struct vm_area_struct *mmap;
347 struct rb_root mm_rb;
348 u32 vmacache_seqnum;
349#ifdef CONFIG_MMU
350 unsigned long (*get_unmapped_area) (struct file *filp,
351 unsigned long addr, unsigned long len,
352 unsigned long pgoff, unsigned long flags);
353#endif
354 unsigned long mmap_base;
355 unsigned long mmap_legacy_base;
356 unsigned long task_size;
357 unsigned long highest_vm_end;
358 pgd_t * pgd;
359 atomic_t mm_users;
360 atomic_t mm_count;
361 atomic_long_t nr_ptes;
362 int map_count;
363
364 spinlock_t page_table_lock;
365 struct rw_semaphore mmap_sem;
366
367 struct list_head mmlist;
368
369
370
371
372
373 unsigned long hiwater_rss;
374 unsigned long hiwater_vm;
375
376 unsigned long total_vm;
377 unsigned long locked_vm;
378 unsigned long pinned_vm;
379 unsigned long shared_vm;
380 unsigned long exec_vm;
381 unsigned long stack_vm;
382 unsigned long def_flags;
383 unsigned long start_code, end_code, start_data, end_data;
384 unsigned long start_brk, brk, start_stack;
385 unsigned long arg_start, arg_end, env_start, env_end;
386
387 unsigned long saved_auxv[AT_VECTOR_SIZE];
388
389
390
391
392
393 struct mm_rss_stat rss_stat;
394
395 struct linux_binfmt *binfmt;
396
397 cpumask_var_t cpu_vm_mask_var;
398
399
400 mm_context_t context;
401
402 unsigned long flags;
403
404 struct core_state *core_state;
405#ifdef CONFIG_AIO
406 spinlock_t ioctx_lock;
407 struct kioctx_table __rcu *ioctx_table;
408#endif
409#ifdef CONFIG_MM_OWNER
410
411
412
413
414
415
416
417
418
419
420 struct task_struct __rcu *owner;
421#endif
422
423
424 struct file *exe_file;
425#ifdef CONFIG_MMU_NOTIFIER
426 struct mmu_notifier_mm *mmu_notifier_mm;
427#endif
428#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
429 pgtable_t pmd_huge_pte;
430#endif
431#ifdef CONFIG_CPUMASK_OFFSTACK
432 struct cpumask cpumask_allocation;
433#endif
434#ifdef CONFIG_NUMA_BALANCING
435
436
437
438
439
440 unsigned long numa_next_scan;
441
442
443 unsigned long numa_scan_offset;
444
445
446 int numa_scan_seq;
447#endif
448#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
449
450
451
452
453
454 bool tlb_flush_pending;
455#endif
456 struct uprobes_state uprobes_state;
457};
458
459static inline void mm_init_cpumask(struct mm_struct *mm)
460{
461#ifdef CONFIG_CPUMASK_OFFSTACK
462 mm->cpu_vm_mask_var = &mm->cpumask_allocation;
463#endif
464}
465
466
467static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
468{
469 return mm->cpu_vm_mask_var;
470}
471
472#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
473
474
475
476
477
478
479static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
480{
481 barrier();
482 return mm->tlb_flush_pending;
483}
484static inline void set_tlb_flush_pending(struct mm_struct *mm)
485{
486 mm->tlb_flush_pending = true;
487
488
489
490
491
492 smp_mb__before_spinlock();
493}
494
495static inline void clear_tlb_flush_pending(struct mm_struct *mm)
496{
497 barrier();
498 mm->tlb_flush_pending = false;
499}
500#else
501static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
502{
503 return false;
504}
505static inline void set_tlb_flush_pending(struct mm_struct *mm)
506{
507}
508static inline void clear_tlb_flush_pending(struct mm_struct *mm)
509{
510}
511#endif
512
513#endif
514