1#ifndef _LINUX_MM_TYPES_H
2#define _LINUX_MM_TYPES_H
3
4#include <linux/auxvec.h>
5#include <linux/types.h>
6#include <linux/threads.h>
7#include <linux/list.h>
8#include <linux/spinlock.h>
9#include <linux/rbtree.h>
10#include <linux/rwsem.h>
11#include <linux/completion.h>
12#include <linux/cpumask.h>
13#include <linux/uprobes.h>
14#include <linux/page-flags-layout.h>
15#include <linux/workqueue.h>
16#include <asm/page.h>
17#include <asm/mmu.h>
18
19#ifndef AT_VECTOR_SIZE_ARCH
20#define AT_VECTOR_SIZE_ARCH 0
21#endif
22#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
23
24struct address_space;
25struct mem_cgroup;
26
27#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
28#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
29 IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
30#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45struct page {
46
47 unsigned long flags;
48
49 union {
50 struct address_space *mapping;
51
52
53
54
55
56
57 void *s_mem;
58 atomic_t compound_mapcount;
59
60 };
61
62
63 union {
64 pgoff_t index;
65 void *freelist;
66
67 };
68
69 union {
70#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
71 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
72
73 unsigned long counters;
74#else
75
76
77
78
79
80 unsigned counters;
81#endif
82 struct {
83
84 union {
85
86
87
88
89
90
91
92
93
94 atomic_t _mapcount;
95
96 unsigned int active;
97 struct {
98 unsigned inuse:16;
99 unsigned objects:15;
100 unsigned frozen:1;
101 };
102 int units;
103 };
104
105
106
107
108 atomic_t _refcount;
109 };
110 };
111
112
113
114
115
116
117
118
119 union {
120 struct list_head lru;
121
122
123
124
125 struct dev_pagemap *pgmap;
126
127
128
129
130 struct {
131 struct page *next;
132#ifdef CONFIG_64BIT
133 int pages;
134 int pobjects;
135#else
136 short int pages;
137 short int pobjects;
138#endif
139 };
140
141 struct rcu_head rcu_head;
142
143
144
145 struct {
146 unsigned long compound_head;
147
148
149#ifdef CONFIG_64BIT
150
151
152
153
154
155
156 unsigned int compound_dtor;
157 unsigned int compound_order;
158#else
159 unsigned short int compound_dtor;
160 unsigned short int compound_order;
161#endif
162 };
163
164#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
165 struct {
166 unsigned long __pad;
167
168
169
170 pgtable_t pmd_huge_pte;
171 };
172#endif
173 };
174
175
176 union {
177 unsigned long private;
178
179
180
181
182
183
184#if USE_SPLIT_PTE_PTLOCKS
185#if ALLOC_SPLIT_PTLOCKS
186 spinlock_t *ptl;
187#else
188 spinlock_t ptl;
189#endif
190#endif
191 struct kmem_cache *slab_cache;
192 };
193
194#ifdef CONFIG_MEMCG
195 struct mem_cgroup *mem_cgroup;
196#endif
197
198
199
200
201
202
203
204
205
206
207
208#if defined(WANT_PAGE_VIRTUAL)
209 void *virtual;
210
211#endif
212
213#ifdef CONFIG_KMEMCHECK
214
215
216
217
218 void *shadow;
219#endif
220
221#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
222 int _last_cpupid;
223#endif
224}
225
226
227
228
229#ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
230 __aligned(2 * sizeof(unsigned long))
231#endif
232;
233
234struct page_frag {
235 struct page *page;
236#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
237 __u32 offset;
238 __u32 size;
239#else
240 __u16 offset;
241 __u16 size;
242#endif
243};
244
245#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK)
246#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE)
247
248struct page_frag_cache {
249 void * va;
250#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
251 __u16 offset;
252 __u16 size;
253#else
254 __u32 offset;
255#endif
256
257
258
259 unsigned int pagecnt_bias;
260 bool pfmemalloc;
261};
262
263typedef unsigned long vm_flags_t;
264
265
266
267
268
269
270struct vm_region {
271 struct rb_node vm_rb;
272 vm_flags_t vm_flags;
273 unsigned long vm_start;
274 unsigned long vm_end;
275 unsigned long vm_top;
276 unsigned long vm_pgoff;
277 struct file *vm_file;
278
279 int vm_usage;
280 bool vm_icache_flushed : 1;
281
282};
283
284#ifdef CONFIG_USERFAULTFD
285#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
286struct vm_userfaultfd_ctx {
287 struct userfaultfd_ctx *ctx;
288};
289#else
290#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
291struct vm_userfaultfd_ctx {};
292#endif
293
294
295
296
297
298
299
300struct vm_area_struct {
301
302
303 unsigned long vm_start;
304 unsigned long vm_end;
305
306
307
308 struct vm_area_struct *vm_next, *vm_prev;
309
310 struct rb_node vm_rb;
311
312
313
314
315
316
317
318 unsigned long rb_subtree_gap;
319
320
321
322 struct mm_struct *vm_mm;
323 pgprot_t vm_page_prot;
324 unsigned long vm_flags;
325
326
327
328
329
330 struct {
331 struct rb_node rb;
332 unsigned long rb_subtree_last;
333 } shared;
334
335
336
337
338
339
340
341 struct list_head anon_vma_chain;
342
343 struct anon_vma *anon_vma;
344
345
346 const struct vm_operations_struct *vm_ops;
347
348
349 unsigned long vm_pgoff;
350
351 struct file * vm_file;
352 void * vm_private_data;
353
354#ifndef CONFIG_MMU
355 struct vm_region *vm_region;
356#endif
357#ifdef CONFIG_NUMA
358 struct mempolicy *vm_policy;
359#endif
360 struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
361};
362
363struct core_thread {
364 struct task_struct *task;
365 struct core_thread *next;
366};
367
368struct core_state {
369 atomic_t nr_threads;
370 struct core_thread dumper;
371 struct completion startup;
372};
373
374enum {
375 MM_FILEPAGES,
376 MM_ANONPAGES,
377 MM_SWAPENTS,
378 MM_SHMEMPAGES,
379 NR_MM_COUNTERS
380};
381
382#if USE_SPLIT_PTE_PTLOCKS && defined(CONFIG_MMU)
383#define SPLIT_RSS_COUNTING
384
385struct task_rss_stat {
386 int events;
387 int count[NR_MM_COUNTERS];
388};
389#endif
390
391struct mm_rss_stat {
392 atomic_long_t count[NR_MM_COUNTERS];
393};
394
395struct kioctx_table;
396struct mm_struct {
397 struct vm_area_struct *mmap;
398 struct rb_root mm_rb;
399 u32 vmacache_seqnum;
400#ifdef CONFIG_MMU
401 unsigned long (*get_unmapped_area) (struct file *filp,
402 unsigned long addr, unsigned long len,
403 unsigned long pgoff, unsigned long flags);
404#endif
405 unsigned long mmap_base;
406 unsigned long mmap_legacy_base;
407 unsigned long task_size;
408 unsigned long highest_vm_end;
409 pgd_t * pgd;
410 atomic_t mm_users;
411 atomic_t mm_count;
412 atomic_long_t nr_ptes;
413#if CONFIG_PGTABLE_LEVELS > 2
414 atomic_long_t nr_pmds;
415#endif
416 int map_count;
417
418 spinlock_t page_table_lock;
419 struct rw_semaphore mmap_sem;
420
421 struct list_head mmlist;
422
423
424
425
426
427 unsigned long hiwater_rss;
428 unsigned long hiwater_vm;
429
430 unsigned long total_vm;
431 unsigned long locked_vm;
432 unsigned long pinned_vm;
433 unsigned long data_vm;
434 unsigned long exec_vm;
435 unsigned long stack_vm;
436 unsigned long def_flags;
437 unsigned long start_code, end_code, start_data, end_data;
438 unsigned long start_brk, brk, start_stack;
439 unsigned long arg_start, arg_end, env_start, env_end;
440
441 unsigned long saved_auxv[AT_VECTOR_SIZE];
442
443
444
445
446
447 struct mm_rss_stat rss_stat;
448
449 struct linux_binfmt *binfmt;
450
451 cpumask_var_t cpu_vm_mask_var;
452
453
454 mm_context_t context;
455
456 unsigned long flags;
457
458 struct core_state *core_state;
459#ifdef CONFIG_AIO
460 spinlock_t ioctx_lock;
461 struct kioctx_table __rcu *ioctx_table;
462#endif
463#ifdef CONFIG_MEMCG
464
465
466
467
468
469
470
471
472
473
474 struct task_struct __rcu *owner;
475#endif
476
477
478 struct file __rcu *exe_file;
479#ifdef CONFIG_MMU_NOTIFIER
480 struct mmu_notifier_mm *mmu_notifier_mm;
481#endif
482#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
483 pgtable_t pmd_huge_pte;
484#endif
485#ifdef CONFIG_CPUMASK_OFFSTACK
486 struct cpumask cpumask_allocation;
487#endif
488#ifdef CONFIG_NUMA_BALANCING
489
490
491
492
493
494 unsigned long numa_next_scan;
495
496
497 unsigned long numa_scan_offset;
498
499
500 int numa_scan_seq;
501#endif
502#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
503
504
505
506
507
508 bool tlb_flush_pending;
509#endif
510 struct uprobes_state uprobes_state;
511#ifdef CONFIG_X86_INTEL_MPX
512
513 void __user *bd_addr;
514#endif
515#ifdef CONFIG_HUGETLB_PAGE
516 atomic_long_t hugetlb_usage;
517#endif
518 struct work_struct async_put_work;
519};
520
521static inline void mm_init_cpumask(struct mm_struct *mm)
522{
523#ifdef CONFIG_CPUMASK_OFFSTACK
524 mm->cpu_vm_mask_var = &mm->cpumask_allocation;
525#endif
526 cpumask_clear(mm->cpu_vm_mask_var);
527}
528
529
530static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
531{
532 return mm->cpu_vm_mask_var;
533}
534
535#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
536
537
538
539
540
541
542static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
543{
544 barrier();
545 return mm->tlb_flush_pending;
546}
547static inline void set_tlb_flush_pending(struct mm_struct *mm)
548{
549 mm->tlb_flush_pending = true;
550
551
552
553
554
555 smp_mb__before_spinlock();
556}
557
558static inline void clear_tlb_flush_pending(struct mm_struct *mm)
559{
560 barrier();
561 mm->tlb_flush_pending = false;
562}
563#else
564static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
565{
566 return false;
567}
568static inline void set_tlb_flush_pending(struct mm_struct *mm)
569{
570}
571static inline void clear_tlb_flush_pending(struct mm_struct *mm)
572{
573}
574#endif
575
576struct vm_fault;
577
578struct vm_special_mapping {
579 const char *name;
580
581
582
583
584
585
586
587 struct page **pages;
588
589
590
591
592
593 int (*fault)(const struct vm_special_mapping *sm,
594 struct vm_area_struct *vma,
595 struct vm_fault *vmf);
596
597 int (*mremap)(const struct vm_special_mapping *sm,
598 struct vm_area_struct *new_vma);
599};
600
601enum tlb_flush_reason {
602 TLB_FLUSH_ON_TASK_SWITCH,
603 TLB_REMOTE_SHOOTDOWN,
604 TLB_LOCAL_SHOOTDOWN,
605 TLB_LOCAL_MM_SHOOTDOWN,
606 TLB_REMOTE_SEND_IPI,
607 NR_TLB_FLUSH_REASONS,
608};
609
610
611
612
613
614typedef struct {
615 unsigned long val;
616} swp_entry_t;
617
618#endif
619