1#ifndef _LINUX_MM_TYPES_H
2#define _LINUX_MM_TYPES_H
3
4#include <linux/auxvec.h>
5#include <linux/types.h>
6#include <linux/threads.h>
7#include <linux/list.h>
8#include <linux/spinlock.h>
9#include <linux/rbtree.h>
10#include <linux/rwsem.h>
11#include <linux/completion.h>
12#include <linux/cpumask.h>
13#include <linux/uprobes.h>
14#include <linux/page-flags-layout.h>
15#include <asm/page.h>
16#include <asm/mmu.h>
17
18#ifndef AT_VECTOR_SIZE_ARCH
19#define AT_VECTOR_SIZE_ARCH 0
20#endif
21#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
22
23struct address_space;
24struct mem_cgroup;
25
26#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
27#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
28 IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
29#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44struct page {
45
46 unsigned long flags;
47
48 union {
49 struct address_space *mapping;
50
51
52
53
54
55
56 void *s_mem;
57 atomic_t compound_mapcount;
58
59 };
60
61
62 struct {
63 union {
64 pgoff_t index;
65 void *freelist;
66
67 };
68
69 union {
70#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
71 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
72
73 unsigned long counters;
74#else
75
76
77
78
79
80 unsigned counters;
81#endif
82
83 struct {
84
85 union {
86
87
88
89
90
91 atomic_t _mapcount;
92
93 struct {
94 unsigned inuse:16;
95 unsigned objects:15;
96 unsigned frozen:1;
97 };
98 int units;
99 };
100 atomic_t _count;
101 };
102 unsigned int active;
103 };
104 };
105
106
107
108
109
110
111
112
113 union {
114 struct list_head lru;
115
116
117
118
119 struct dev_pagemap *pgmap;
120
121
122
123
124 struct {
125 struct page *next;
126#ifdef CONFIG_64BIT
127 int pages;
128 int pobjects;
129#else
130 short int pages;
131 short int pobjects;
132#endif
133 };
134
135 struct rcu_head rcu_head;
136
137
138
139 struct {
140 unsigned long compound_head;
141
142
143#ifdef CONFIG_64BIT
144
145
146
147
148
149
150 unsigned int compound_dtor;
151 unsigned int compound_order;
152#else
153 unsigned short int compound_dtor;
154 unsigned short int compound_order;
155#endif
156 };
157
158#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
159 struct {
160 unsigned long __pad;
161
162
163
164 pgtable_t pmd_huge_pte;
165 };
166#endif
167 };
168
169
170 union {
171 unsigned long private;
172
173
174
175
176
177
178#if USE_SPLIT_PTE_PTLOCKS
179#if ALLOC_SPLIT_PTLOCKS
180 spinlock_t *ptl;
181#else
182 spinlock_t ptl;
183#endif
184#endif
185 struct kmem_cache *slab_cache;
186 };
187
188#ifdef CONFIG_MEMCG
189 struct mem_cgroup *mem_cgroup;
190#endif
191
192
193
194
195
196
197
198
199
200
201
202#if defined(WANT_PAGE_VIRTUAL)
203 void *virtual;
204
205#endif
206
207#ifdef CONFIG_KMEMCHECK
208
209
210
211
212 void *shadow;
213#endif
214
215#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
216 int _last_cpupid;
217#endif
218}
219
220
221
222
223#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
224 __aligned(2 * sizeof(unsigned long))
225#endif
226;
227
228struct page_frag {
229 struct page *page;
230#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
231 __u32 offset;
232 __u32 size;
233#else
234 __u16 offset;
235 __u16 size;
236#endif
237};
238
239#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
240#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
241
242struct page_frag_cache {
243 void * va;
244#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
245 __u16 offset;
246 __u16 size;
247#else
248 __u32 offset;
249#endif
250
251
252
253 unsigned int pagecnt_bias;
254 bool pfmemalloc;
255};
256
257typedef unsigned long vm_flags_t;
258
259
260
261
262
263
264struct vm_region {
265 struct rb_node vm_rb;
266 vm_flags_t vm_flags;
267 unsigned long vm_start;
268 unsigned long vm_end;
269 unsigned long vm_top;
270 unsigned long vm_pgoff;
271 struct file *vm_file;
272
273 int vm_usage;
274 bool vm_icache_flushed : 1;
275
276};
277
278#ifdef CONFIG_USERFAULTFD
279#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
280struct vm_userfaultfd_ctx {
281 struct userfaultfd_ctx *ctx;
282};
283#else
284#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
285struct vm_userfaultfd_ctx {};
286#endif
287
288
289
290
291
292
293
294struct vm_area_struct {
295
296
297 unsigned long vm_start;
298 unsigned long vm_end;
299
300
301
302 struct vm_area_struct *vm_next, *vm_prev;
303
304 struct rb_node vm_rb;
305
306
307
308
309
310
311
312 unsigned long rb_subtree_gap;
313
314
315
316 struct mm_struct *vm_mm;
317 pgprot_t vm_page_prot;
318 unsigned long vm_flags;
319
320
321
322
323
324 struct {
325 struct rb_node rb;
326 unsigned long rb_subtree_last;
327 } shared;
328
329
330
331
332
333
334
335 struct list_head anon_vma_chain;
336
337 struct anon_vma *anon_vma;
338
339
340 const struct vm_operations_struct *vm_ops;
341
342
343 unsigned long vm_pgoff;
344
345 struct file * vm_file;
346 void * vm_private_data;
347
348#ifndef CONFIG_MMU
349 struct vm_region *vm_region;
350#endif
351#ifdef CONFIG_NUMA
352 struct mempolicy *vm_policy;
353#endif
354 struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
355};
356
357struct core_thread {
358 struct task_struct *task;
359 struct core_thread *next;
360};
361
362struct core_state {
363 atomic_t nr_threads;
364 struct core_thread dumper;
365 struct completion startup;
366};
367
368enum {
369 MM_FILEPAGES,
370 MM_ANONPAGES,
371 MM_SWAPENTS,
372 MM_SHMEMPAGES,
373 NR_MM_COUNTERS
374};
375
376#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
377#define SPLIT_RSS_COUNTING
378
379struct task_rss_stat {
380 int events;
381 int count[NR_MM_COUNTERS];
382};
383#endif
384
385struct mm_rss_stat {
386 atomic_long_t count[NR_MM_COUNTERS];
387};
388
389struct kioctx_table;
390struct mm_struct {
391 struct vm_area_struct *mmap;
392 struct rb_root mm_rb;
393 u32 vmacache_seqnum;
394#ifdef CONFIG_MMU
395 unsigned long (*get_unmapped_area) (struct file *filp,
396 unsigned long addr, unsigned long len,
397 unsigned long pgoff, unsigned long flags);
398#endif
399 unsigned long mmap_base;
400 unsigned long mmap_legacy_base;
401 unsigned long task_size;
402 unsigned long highest_vm_end;
403 pgd_t * pgd;
404 atomic_t mm_users;
405 atomic_t mm_count;
406 atomic_long_t nr_ptes;
407#if CONFIG_PGTABLE_LEVELS > 2
408 atomic_long_t nr_pmds;
409#endif
410 int map_count;
411
412 spinlock_t page_table_lock;
413 struct rw_semaphore mmap_sem;
414
415 struct list_head mmlist;
416
417
418
419
420
421 unsigned long hiwater_rss;
422 unsigned long hiwater_vm;
423
424 unsigned long total_vm;
425 unsigned long locked_vm;
426 unsigned long pinned_vm;
427 unsigned long data_vm;
428 unsigned long exec_vm;
429 unsigned long stack_vm;
430 unsigned long def_flags;
431 unsigned long start_code, end_code, start_data, end_data;
432 unsigned long start_brk, brk, start_stack;
433 unsigned long arg_start, arg_end, env_start, env_end;
434
435 unsigned long saved_auxv[AT_VECTOR_SIZE];
436
437
438
439
440
441 struct mm_rss_stat rss_stat;
442
443 struct linux_binfmt *binfmt;
444
445 cpumask_var_t cpu_vm_mask_var;
446
447
448 mm_context_t context;
449
450 unsigned long flags;
451
452 struct core_state *core_state;
453#ifdef CONFIG_AIO
454 spinlock_t ioctx_lock;
455 struct kioctx_table __rcu *ioctx_table;
456#endif
457#ifdef CONFIG_MEMCG
458
459
460
461
462
463
464
465
466
467
468 struct task_struct __rcu *owner;
469#endif
470
471
472 struct file __rcu *exe_file;
473#ifdef CONFIG_MMU_NOTIFIER
474 struct mmu_notifier_mm *mmu_notifier_mm;
475#endif
476#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
477 pgtable_t pmd_huge_pte;
478#endif
479#ifdef CONFIG_CPUMASK_OFFSTACK
480 struct cpumask cpumask_allocation;
481#endif
482#ifdef CONFIG_NUMA_BALANCING
483
484
485
486
487
488 unsigned long numa_next_scan;
489
490
491 unsigned long numa_scan_offset;
492
493
494 int numa_scan_seq;
495#endif
496#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
497
498
499
500
501
502 bool tlb_flush_pending;
503#endif
504 struct uprobes_state uprobes_state;
505#ifdef CONFIG_X86_INTEL_MPX
506
507 void __user *bd_addr;
508#endif
509#ifdef CONFIG_HUGETLB_PAGE
510 atomic_long_t hugetlb_usage;
511#endif
512};
513
514static inline void mm_init_cpumask(struct mm_struct *mm)
515{
516#ifdef CONFIG_CPUMASK_OFFSTACK
517 mm->cpu_vm_mask_var = &mm->cpumask_allocation;
518#endif
519 cpumask_clear(mm->cpu_vm_mask_var);
520}
521
522
523static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
524{
525 return mm->cpu_vm_mask_var;
526}
527
528#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
529
530
531
532
533
534
535static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
536{
537 barrier();
538 return mm->tlb_flush_pending;
539}
540static inline void set_tlb_flush_pending(struct mm_struct *mm)
541{
542 mm->tlb_flush_pending = true;
543
544
545
546
547
548 smp_mb__before_spinlock();
549}
550
551static inline void clear_tlb_flush_pending(struct mm_struct *mm)
552{
553 barrier();
554 mm->tlb_flush_pending = false;
555}
556#else
557static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
558{
559 return false;
560}
561static inline void set_tlb_flush_pending(struct mm_struct *mm)
562{
563}
564static inline void clear_tlb_flush_pending(struct mm_struct *mm)
565{
566}
567#endif
568
569struct vm_special_mapping
570{
571 const char *name;
572 struct page **pages;
573};
574
575enum tlb_flush_reason {
576 TLB_FLUSH_ON_TASK_SWITCH,
577 TLB_REMOTE_SHOOTDOWN,
578 TLB_LOCAL_SHOOTDOWN,
579 TLB_LOCAL_MM_SHOOTDOWN,
580 TLB_REMOTE_SEND_IPI,
581 NR_TLB_FLUSH_REASONS,
582};
583
584
585
586
587
588typedef struct {
589 unsigned long val;
590} swp_entry_t;
591
592#endif
593