1
2#ifndef LINUX_MM_INLINE_H
3#define LINUX_MM_INLINE_H
4
5#include <linux/atomic.h>
6#include <linux/huge_mm.h>
7#include <linux/swap.h>
8#include <linux/string.h>
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25static inline int folio_is_file_lru(struct folio *folio)
26{
27 return !folio_test_swapbacked(folio);
28}
29
30static inline int page_is_file_lru(struct page *page)
31{
32 return folio_is_file_lru(page_folio(page));
33}
34
35static __always_inline void update_lru_size(struct lruvec *lruvec,
36 enum lru_list lru, enum zone_type zid,
37 long nr_pages)
38{
39 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
40
41 __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages);
42 __mod_zone_page_state(&pgdat->node_zones[zid],
43 NR_ZONE_LRU_BASE + lru, nr_pages);
44#ifdef CONFIG_MEMCG
45 mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
46#endif
47}
48
49
50
51
52
53static __always_inline void __folio_clear_lru_flags(struct folio *folio)
54{
55 VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio);
56
57 __folio_clear_lru(folio);
58
59
60 if (folio_test_active(folio) && folio_test_unevictable(folio))
61 return;
62
63 __folio_clear_active(folio);
64 __folio_clear_unevictable(folio);
65}
66
67static __always_inline void __clear_page_lru_flags(struct page *page)
68{
69 __folio_clear_lru_flags(page_folio(page));
70}
71
72
73
74
75
76
77
78
79static __always_inline enum lru_list folio_lru_list(struct folio *folio)
80{
81 enum lru_list lru;
82
83 VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio);
84
85 if (folio_test_unevictable(folio))
86 return LRU_UNEVICTABLE;
87
88 lru = folio_is_file_lru(folio) ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON;
89 if (folio_test_active(folio))
90 lru += LRU_ACTIVE;
91
92 return lru;
93}
94
95static __always_inline
96void lruvec_add_folio(struct lruvec *lruvec, struct folio *folio)
97{
98 enum lru_list lru = folio_lru_list(folio);
99
100 update_lru_size(lruvec, lru, folio_zonenum(folio),
101 folio_nr_pages(folio));
102 if (lru != LRU_UNEVICTABLE)
103 list_add(&folio->lru, &lruvec->lists[lru]);
104}
105
106static __always_inline void add_page_to_lru_list(struct page *page,
107 struct lruvec *lruvec)
108{
109 lruvec_add_folio(lruvec, page_folio(page));
110}
111
112static __always_inline
113void lruvec_add_folio_tail(struct lruvec *lruvec, struct folio *folio)
114{
115 enum lru_list lru = folio_lru_list(folio);
116
117 update_lru_size(lruvec, lru, folio_zonenum(folio),
118 folio_nr_pages(folio));
119
120 list_add_tail(&folio->lru, &lruvec->lists[lru]);
121}
122
123static __always_inline void add_page_to_lru_list_tail(struct page *page,
124 struct lruvec *lruvec)
125{
126 lruvec_add_folio_tail(lruvec, page_folio(page));
127}
128
129static __always_inline
130void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
131{
132 enum lru_list lru = folio_lru_list(folio);
133
134 if (lru != LRU_UNEVICTABLE)
135 list_del(&folio->lru);
136 update_lru_size(lruvec, lru, folio_zonenum(folio),
137 -folio_nr_pages(folio));
138}
139
140static __always_inline void del_page_from_lru_list(struct page *page,
141 struct lruvec *lruvec)
142{
143 lruvec_del_folio(lruvec, page_folio(page));
144}
145
146#ifdef CONFIG_ANON_VMA_NAME
147
148
149
150
151
152extern struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma);
153extern struct anon_vma_name *anon_vma_name_alloc(const char *name);
154extern void anon_vma_name_free(struct kref *kref);
155
156
157static inline void anon_vma_name_get(struct anon_vma_name *anon_name)
158{
159 if (anon_name)
160 kref_get(&anon_name->kref);
161}
162
163static inline void anon_vma_name_put(struct anon_vma_name *anon_name)
164{
165 if (anon_name)
166 kref_put(&anon_name->kref, anon_vma_name_free);
167}
168
169static inline
170struct anon_vma_name *anon_vma_name_reuse(struct anon_vma_name *anon_name)
171{
172
173 if (kref_read(&anon_name->kref) < REFCOUNT_MAX) {
174 anon_vma_name_get(anon_name);
175 return anon_name;
176
177 }
178 return anon_vma_name_alloc(anon_name->name);
179}
180
181static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
182 struct vm_area_struct *new_vma)
183{
184 struct anon_vma_name *anon_name = anon_vma_name(orig_vma);
185
186 if (anon_name)
187 new_vma->anon_name = anon_vma_name_reuse(anon_name);
188}
189
190static inline void free_anon_vma_name(struct vm_area_struct *vma)
191{
192
193
194
195
196 if (!vma->vm_file)
197 anon_vma_name_put(vma->anon_name);
198}
199
200static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
201 struct anon_vma_name *anon_name2)
202{
203 if (anon_name1 == anon_name2)
204 return true;
205
206 return anon_name1 && anon_name2 &&
207 !strcmp(anon_name1->name, anon_name2->name);
208}
209
210#else
211static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
212{
213 return NULL;
214}
215
216static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
217{
218 return NULL;
219}
220
221static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {}
222static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {}
223static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
224 struct vm_area_struct *new_vma) {}
225static inline void free_anon_vma_name(struct vm_area_struct *vma) {}
226
227static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
228 struct anon_vma_name *anon_name2)
229{
230 return true;
231}
232
233#endif
234
235static inline void init_tlb_flush_pending(struct mm_struct *mm)
236{
237 atomic_set(&mm->tlb_flush_pending, 0);
238}
239
240static inline void inc_tlb_flush_pending(struct mm_struct *mm)
241{
242 atomic_inc(&mm->tlb_flush_pending);
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279}
280
281static inline void dec_tlb_flush_pending(struct mm_struct *mm)
282{
283
284
285
286
287
288
289
290
291 atomic_dec(&mm->tlb_flush_pending);
292}
293
294static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
295{
296
297
298
299
300
301
302
303
304 return atomic_read(&mm->tlb_flush_pending);
305}
306
307static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
308{
309
310
311
312
313
314
315
316 return atomic_read(&mm->tlb_flush_pending) > 1;
317}
318
319
320#endif
321