1#ifndef _LINUX_MM_TYPES_H
2#define _LINUX_MM_TYPES_H
3
4#include <linux/mm_types_task.h>
5
6#include <linux/auxvec.h>
7#include <linux/list.h>
8#include <linux/spinlock.h>
9#include <linux/rbtree.h>
10#include <linux/rwsem.h>
11#include <linux/completion.h>
12#include <linux/cpumask.h>
13#include <linux/uprobes.h>
14#include <linux/page-flags-layout.h>
15#include <linux/workqueue.h>
16
17#include <asm/mmu.h>
18
19#ifndef AT_VECTOR_SIZE_ARCH
20#define AT_VECTOR_SIZE_ARCH 0
21#endif
22#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
23
24struct address_space;
25struct mem_cgroup;
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40struct page {
41
42 unsigned long flags;
43
44 union {
45 struct address_space *mapping;
46
47
48
49
50
51
52 void *s_mem;
53 atomic_t compound_mapcount;
54
55 };
56
57
58 union {
59 pgoff_t index;
60 void *freelist;
61
62 };
63
64 union {
65#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
66 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
67
68 unsigned long counters;
69#else
70
71
72
73
74
75 unsigned counters;
76#endif
77 struct {
78
79 union {
80
81
82
83
84
85
86
87
88
89 atomic_t _mapcount;
90
91 unsigned int active;
92 struct {
93 unsigned inuse:16;
94 unsigned objects:15;
95 unsigned frozen:1;
96 };
97 int units;
98 };
99
100
101
102
103 atomic_t _refcount;
104 };
105 };
106
107
108
109
110
111
112
113
114 union {
115 struct list_head lru;
116
117
118
119
120 struct dev_pagemap *pgmap;
121
122
123
124
125 struct {
126 struct page *next;
127#ifdef CONFIG_64BIT
128 int pages;
129 int pobjects;
130#else
131 short int pages;
132 short int pobjects;
133#endif
134 };
135
136 struct rcu_head rcu_head;
137
138
139
140 struct {
141 unsigned long compound_head;
142
143
144#ifdef CONFIG_64BIT
145
146
147
148
149
150
151 unsigned int compound_dtor;
152 unsigned int compound_order;
153#else
154 unsigned short int compound_dtor;
155 unsigned short int compound_order;
156#endif
157 };
158
159#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
160 struct {
161 unsigned long __pad;
162
163
164
165 pgtable_t pmd_huge_pte;
166 };
167#endif
168 };
169
170
171 union {
172 unsigned long private;
173
174
175
176
177
178
179#if USE_SPLIT_PTE_PTLOCKS
180#if ALLOC_SPLIT_PTLOCKS
181 spinlock_t *ptl;
182#else
183 spinlock_t ptl;
184#endif
185#endif
186 struct kmem_cache *slab_cache;
187 };
188
189#ifdef CONFIG_MEMCG
190 struct mem_cgroup *mem_cgroup;
191#endif
192
193
194
195
196
197
198
199
200
201
202
203#if defined(WANT_PAGE_VIRTUAL)
204 void *virtual;
205
206#endif
207
208#ifdef CONFIG_KMEMCHECK
209
210
211
212
213 void *shadow;
214#endif
215
216#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
217 int _last_cpupid;
218#endif
219}
220
221
222
223
224#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
225 __aligned(2 * sizeof(unsigned long))
226#endif
227;
228
229#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
230#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
231
232struct page_frag_cache {
233 void * va;
234#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
235 __u16 offset;
236 __u16 size;
237#else
238 __u32 offset;
239#endif
240
241
242
243 unsigned int pagecnt_bias;
244 bool pfmemalloc;
245};
246
247typedef unsigned long vm_flags_t;
248
249
250
251
252
253
254struct vm_region {
255 struct rb_node vm_rb;
256 vm_flags_t vm_flags;
257 unsigned long vm_start;
258 unsigned long vm_end;
259 unsigned long vm_top;
260 unsigned long vm_pgoff;
261 struct file *vm_file;
262
263 int vm_usage;
264 bool vm_icache_flushed : 1;
265
266};
267
268#ifdef CONFIG_USERFAULTFD
269#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
270struct vm_userfaultfd_ctx {
271 struct userfaultfd_ctx *ctx;
272};
273#else
274#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
275struct vm_userfaultfd_ctx {};
276#endif
277
278
279
280
281
282
283
284struct vm_area_struct {
285
286
287 unsigned long vm_start;
288 unsigned long vm_end;
289
290
291
292 struct vm_area_struct *vm_next, *vm_prev;
293
294 struct rb_node vm_rb;
295
296
297
298
299
300
301
302 unsigned long rb_subtree_gap;
303
304
305
306 struct mm_struct *vm_mm;
307 pgprot_t vm_page_prot;
308 unsigned long vm_flags;
309
310
311
312
313
314 struct {
315 struct rb_node rb;
316 unsigned long rb_subtree_last;
317 } shared;
318
319
320
321
322
323
324
325 struct list_head anon_vma_chain;
326
327 struct anon_vma *anon_vma;
328
329
330 const struct vm_operations_struct *vm_ops;
331
332
333 unsigned long vm_pgoff;
334
335 struct file * vm_file;
336 void * vm_private_data;
337
338#ifndef CONFIG_MMU
339 struct vm_region *vm_region;
340#endif
341#ifdef CONFIG_NUMA
342 struct mempolicy *vm_policy;
343#endif
344 struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
345};
346
347struct core_thread {
348 struct task_struct *task;
349 struct core_thread *next;
350};
351
352struct core_state {
353 atomic_t nr_threads;
354 struct core_thread dumper;
355 struct completion startup;
356};
357
358struct kioctx_table;
359struct mm_struct {
360 struct vm_area_struct *mmap;
361 struct rb_root mm_rb;
362 u32 vmacache_seqnum;
363#ifdef CONFIG_MMU
364 unsigned long (*get_unmapped_area) (struct file *filp,
365 unsigned long addr, unsigned long len,
366 unsigned long pgoff, unsigned long flags);
367#endif
368 unsigned long mmap_base;
369 unsigned long mmap_legacy_base;
370#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
371
372 unsigned long mmap_compat_base;
373 unsigned long mmap_compat_legacy_base;
374#endif
375 unsigned long task_size;
376 unsigned long highest_vm_end;
377 pgd_t * pgd;
378
379
380
381
382
383
384
385
386
387
388 atomic_t mm_users;
389
390
391
392
393
394
395
396
397 atomic_t mm_count;
398
399 atomic_long_t nr_ptes;
400#if CONFIG_PGTABLE_LEVELS > 2
401 atomic_long_t nr_pmds;
402#endif
403 int map_count;
404
405 spinlock_t page_table_lock;
406 struct rw_semaphore mmap_sem;
407
408 struct list_head mmlist;
409
410
411
412
413
414 unsigned long hiwater_rss;
415 unsigned long hiwater_vm;
416
417 unsigned long total_vm;
418 unsigned long locked_vm;
419 unsigned long pinned_vm;
420 unsigned long data_vm;
421 unsigned long exec_vm;
422 unsigned long stack_vm;
423 unsigned long def_flags;
424 unsigned long start_code, end_code, start_data, end_data;
425 unsigned long start_brk, brk, start_stack;
426 unsigned long arg_start, arg_end, env_start, env_end;
427
428 unsigned long saved_auxv[AT_VECTOR_SIZE];
429
430
431
432
433
434 struct mm_rss_stat rss_stat;
435
436 struct linux_binfmt *binfmt;
437
438 cpumask_var_t cpu_vm_mask_var;
439
440
441 mm_context_t context;
442
443 unsigned long flags;
444
445 struct core_state *core_state;
446#ifdef CONFIG_AIO
447 spinlock_t ioctx_lock;
448 struct kioctx_table __rcu *ioctx_table;
449#endif
450#ifdef CONFIG_MEMCG
451
452
453
454
455
456
457
458
459
460
461 struct task_struct __rcu *owner;
462#endif
463 struct user_namespace *user_ns;
464
465
466 struct file __rcu *exe_file;
467#ifdef CONFIG_MMU_NOTIFIER
468 struct mmu_notifier_mm *mmu_notifier_mm;
469#endif
470#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
471 pgtable_t pmd_huge_pte;
472#endif
473#ifdef CONFIG_CPUMASK_OFFSTACK
474 struct cpumask cpumask_allocation;
475#endif
476#ifdef CONFIG_NUMA_BALANCING
477
478
479
480
481
482 unsigned long numa_next_scan;
483
484
485 unsigned long numa_scan_offset;
486
487
488 int numa_scan_seq;
489#endif
490#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
491
492
493
494
495
496 bool tlb_flush_pending;
497#endif
498 struct uprobes_state uprobes_state;
499#ifdef CONFIG_HUGETLB_PAGE
500 atomic_long_t hugetlb_usage;
501#endif
502 struct work_struct async_put_work;
503};
504
505extern struct mm_struct init_mm;
506
507static inline void mm_init_cpumask(struct mm_struct *mm)
508{
509#ifdef CONFIG_CPUMASK_OFFSTACK
510 mm->cpu_vm_mask_var = &mm->cpumask_allocation;
511#endif
512 cpumask_clear(mm->cpu_vm_mask_var);
513}
514
515
516static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
517{
518 return mm->cpu_vm_mask_var;
519}
520
521#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
522
523
524
525
526
527
528static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
529{
530 barrier();
531 return mm->tlb_flush_pending;
532}
533static inline void set_tlb_flush_pending(struct mm_struct *mm)
534{
535 mm->tlb_flush_pending = true;
536
537
538
539
540
541 smp_mb__before_spinlock();
542}
543
544static inline void clear_tlb_flush_pending(struct mm_struct *mm)
545{
546 barrier();
547 mm->tlb_flush_pending = false;
548}
549#else
550static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
551{
552 return false;
553}
554static inline void set_tlb_flush_pending(struct mm_struct *mm)
555{
556}
557static inline void clear_tlb_flush_pending(struct mm_struct *mm)
558{
559}
560#endif
561
562struct vm_fault;
563
564struct vm_special_mapping {
565 const char *name;
566
567
568
569
570
571
572
573 struct page **pages;
574
575
576
577
578
579 int (*fault)(const struct vm_special_mapping *sm,
580 struct vm_area_struct *vma,
581 struct vm_fault *vmf);
582
583 int (*mremap)(const struct vm_special_mapping *sm,
584 struct vm_area_struct *new_vma);
585};
586
587enum tlb_flush_reason {
588 TLB_FLUSH_ON_TASK_SWITCH,
589 TLB_REMOTE_SHOOTDOWN,
590 TLB_LOCAL_SHOOTDOWN,
591 TLB_LOCAL_MM_SHOOTDOWN,
592 TLB_REMOTE_SEND_IPI,
593 NR_TLB_FLUSH_REASONS,
594};
595
596
597
598
599
600typedef struct {
601 unsigned long val;
602} swp_entry_t;
603
604#endif
605