1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#ifndef _LINUX_MEMCONTROL_H
21#define _LINUX_MEMCONTROL_H
22#include <linux/cgroup.h>
23#include <linux/vm_event_item.h>
24#include <linux/hardirq.h>
25#include <linux/jump_label.h>
26
27struct mem_cgroup;
28struct page_cgroup;
29struct page;
30struct mm_struct;
31struct kmem_cache;
32
33
34enum mem_cgroup_page_stat_item {
35 MEMCG_NR_FILE_MAPPED,
36};
37
38struct mem_cgroup_reclaim_cookie {
39 struct zone *zone;
40 int priority;
41 unsigned int generation;
42};
43
44#ifdef CONFIG_MEMCG
45
46
47
48
49
50
51
52
53
54
55
56extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
57 gfp_t gfp_mask);
58
59extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
60 struct page *page, gfp_t mask, struct mem_cgroup **memcgp);
61extern void mem_cgroup_commit_charge_swapin(struct page *page,
62 struct mem_cgroup *memcg);
63extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg);
64
65extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
66 gfp_t gfp_mask);
67
68struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
69struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
70
71
72extern void mem_cgroup_uncharge_start(void);
73extern void mem_cgroup_uncharge_end(void);
74
75extern void mem_cgroup_uncharge_page(struct page *page);
76extern void mem_cgroup_uncharge_cache_page(struct page *page);
77
78bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
79 struct mem_cgroup *memcg);
80bool task_in_mem_cgroup(struct task_struct *task,
81 const struct mem_cgroup *memcg);
82
83extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
84extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
85extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
86
87extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
88extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
89
90static inline
91bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg)
92{
93 struct mem_cgroup *task_memcg;
94 bool match;
95
96 rcu_read_lock();
97 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
98 match = __mem_cgroup_same_or_subtree(memcg, task_memcg);
99 rcu_read_unlock();
100 return match;
101}
102
103extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
104
105extern void
106mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
107 struct mem_cgroup **memcgp);
108extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
109 struct page *oldpage, struct page *newpage, bool migration_ok);
110
111struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
112 struct mem_cgroup *,
113 struct mem_cgroup_reclaim_cookie *);
114void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
115
116
117
118
119int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
120int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
121unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
122void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
123extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
124 struct task_struct *p);
125extern void mem_cgroup_replace_page_cache(struct page *oldpage,
126 struct page *newpage);
127
128#ifdef CONFIG_MEMCG_SWAP
129extern int do_swap_account;
130#endif
131
132static inline bool mem_cgroup_disabled(void)
133{
134 if (mem_cgroup_subsys.disabled)
135 return true;
136 return false;
137}
138
139void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
140 unsigned long *flags);
141
142extern atomic_t memcg_moving;
143
144static inline void mem_cgroup_begin_update_page_stat(struct page *page,
145 bool *locked, unsigned long *flags)
146{
147 if (mem_cgroup_disabled())
148 return;
149 rcu_read_lock();
150 *locked = false;
151 if (atomic_read(&memcg_moving))
152 __mem_cgroup_begin_update_page_stat(page, locked, flags);
153}
154
155void __mem_cgroup_end_update_page_stat(struct page *page,
156 unsigned long *flags);
157static inline void mem_cgroup_end_update_page_stat(struct page *page,
158 bool *locked, unsigned long *flags)
159{
160 if (mem_cgroup_disabled())
161 return;
162 if (*locked)
163 __mem_cgroup_end_update_page_stat(page, flags);
164 rcu_read_unlock();
165}
166
167void mem_cgroup_update_page_stat(struct page *page,
168 enum mem_cgroup_page_stat_item idx,
169 int val);
170
171static inline void mem_cgroup_inc_page_stat(struct page *page,
172 enum mem_cgroup_page_stat_item idx)
173{
174 mem_cgroup_update_page_stat(page, idx, 1);
175}
176
177static inline void mem_cgroup_dec_page_stat(struct page *page,
178 enum mem_cgroup_page_stat_item idx)
179{
180 mem_cgroup_update_page_stat(page, idx, -1);
181}
182
183unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
184 gfp_t gfp_mask,
185 unsigned long *total_scanned);
186
187void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
188static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
189 enum vm_event_item idx)
190{
191 if (mem_cgroup_disabled())
192 return;
193 __mem_cgroup_count_vm_event(mm, idx);
194}
195#ifdef CONFIG_TRANSPARENT_HUGEPAGE
196void mem_cgroup_split_huge_fixup(struct page *head);
197#endif
198
199#ifdef CONFIG_DEBUG_VM
200bool mem_cgroup_bad_page_check(struct page *page);
201void mem_cgroup_print_bad_page(struct page *page);
202#endif
203#else
204struct mem_cgroup;
205
206static inline int mem_cgroup_newpage_charge(struct page *page,
207 struct mm_struct *mm, gfp_t gfp_mask)
208{
209 return 0;
210}
211
212static inline int mem_cgroup_cache_charge(struct page *page,
213 struct mm_struct *mm, gfp_t gfp_mask)
214{
215 return 0;
216}
217
218static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
219 struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp)
220{
221 return 0;
222}
223
224static inline void mem_cgroup_commit_charge_swapin(struct page *page,
225 struct mem_cgroup *memcg)
226{
227}
228
229static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
230{
231}
232
233static inline void mem_cgroup_uncharge_start(void)
234{
235}
236
237static inline void mem_cgroup_uncharge_end(void)
238{
239}
240
241static inline void mem_cgroup_uncharge_page(struct page *page)
242{
243}
244
245static inline void mem_cgroup_uncharge_cache_page(struct page *page)
246{
247}
248
249static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
250 struct mem_cgroup *memcg)
251{
252 return &zone->lruvec;
253}
254
255static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
256 struct zone *zone)
257{
258 return &zone->lruvec;
259}
260
261static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
262{
263 return NULL;
264}
265
266static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
267{
268 return NULL;
269}
270
271static inline bool mm_match_cgroup(struct mm_struct *mm,
272 struct mem_cgroup *memcg)
273{
274 return true;
275}
276
277static inline bool task_in_mem_cgroup(struct task_struct *task,
278 const struct mem_cgroup *memcg)
279{
280 return true;
281}
282
283static inline struct cgroup_subsys_state
284 *mem_cgroup_css(struct mem_cgroup *memcg)
285{
286 return NULL;
287}
288
289static inline void
290mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
291 struct mem_cgroup **memcgp)
292{
293}
294
295static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
296 struct page *oldpage, struct page *newpage, bool migration_ok)
297{
298}
299
300static inline struct mem_cgroup *
301mem_cgroup_iter(struct mem_cgroup *root,
302 struct mem_cgroup *prev,
303 struct mem_cgroup_reclaim_cookie *reclaim)
304{
305 return NULL;
306}
307
308static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
309 struct mem_cgroup *prev)
310{
311}
312
313static inline bool mem_cgroup_disabled(void)
314{
315 return true;
316}
317
318static inline int
319mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
320{
321 return 1;
322}
323
324static inline unsigned long
325mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
326{
327 return 0;
328}
329
330static inline void
331mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
332 int increment)
333{
334}
335
336static inline void
337mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
338{
339}
340
341static inline void mem_cgroup_begin_update_page_stat(struct page *page,
342 bool *locked, unsigned long *flags)
343{
344}
345
346static inline void mem_cgroup_end_update_page_stat(struct page *page,
347 bool *locked, unsigned long *flags)
348{
349}
350
351static inline void mem_cgroup_inc_page_stat(struct page *page,
352 enum mem_cgroup_page_stat_item idx)
353{
354}
355
356static inline void mem_cgroup_dec_page_stat(struct page *page,
357 enum mem_cgroup_page_stat_item idx)
358{
359}
360
361static inline
362unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
363 gfp_t gfp_mask,
364 unsigned long *total_scanned)
365{
366 return 0;
367}
368
369static inline void mem_cgroup_split_huge_fixup(struct page *head)
370{
371}
372
373static inline
374void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
375{
376}
377static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
378 struct page *newpage)
379{
380}
381#endif
382
383#if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM)
384static inline bool
385mem_cgroup_bad_page_check(struct page *page)
386{
387 return false;
388}
389
390static inline void
391mem_cgroup_print_bad_page(struct page *page)
392{
393}
394#endif
395
396enum {
397 UNDER_LIMIT,
398 SOFT_LIMIT,
399 OVER_LIMIT,
400};
401
402struct sock;
403#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
404void sock_update_memcg(struct sock *sk);
405void sock_release_memcg(struct sock *sk);
406#else
407static inline void sock_update_memcg(struct sock *sk)
408{
409}
410static inline void sock_release_memcg(struct sock *sk)
411{
412}
413#endif
414
415#ifdef CONFIG_MEMCG_KMEM
416extern struct static_key memcg_kmem_enabled_key;
417
418extern int memcg_limited_groups_array_size;
419
420
421
422
423
424
425#define for_each_memcg_cache_index(_idx) \
426 for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++)
427
428static inline bool memcg_kmem_enabled(void)
429{
430 return static_key_false(&memcg_kmem_enabled_key);
431}
432
433
434
435
436
437
438
439
440
441
442
443
444bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg,
445 int order);
446void __memcg_kmem_commit_charge(struct page *page,
447 struct mem_cgroup *memcg, int order);
448void __memcg_kmem_uncharge_pages(struct page *page, int order);
449
450int memcg_cache_id(struct mem_cgroup *memcg);
451int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
452 struct kmem_cache *root_cache);
453void memcg_release_cache(struct kmem_cache *cachep);
454void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep);
455
456int memcg_update_cache_size(struct kmem_cache *s, int num_groups);
457void memcg_update_array_size(int num_groups);
458
459struct kmem_cache *
460__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
461
462void mem_cgroup_destroy_cache(struct kmem_cache *cachep);
463void kmem_cache_destroy_memcg_children(struct kmem_cache *s);
464
465
466
467
468
469
470
471
472
473
474
475
476
477static inline bool
478memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
479{
480 if (!memcg_kmem_enabled())
481 return true;
482
483
484
485
486
487
488
489
490 if (!(gfp & __GFP_KMEMCG) || (gfp & __GFP_NOFAIL))
491 return true;
492 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
493 return true;
494
495
496 if (unlikely(fatal_signal_pending(current)))
497 return true;
498
499 return __memcg_kmem_newpage_charge(gfp, memcg, order);
500}
501
502
503
504
505
506
507
508
509static inline void
510memcg_kmem_uncharge_pages(struct page *page, int order)
511{
512 if (memcg_kmem_enabled())
513 __memcg_kmem_uncharge_pages(page, order);
514}
515
516
517
518
519
520
521
522
523
524
525
526
527static inline void
528memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
529{
530 if (memcg_kmem_enabled() && memcg)
531 __memcg_kmem_commit_charge(page, memcg, order);
532}
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551static __always_inline struct kmem_cache *
552memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
553{
554 if (!memcg_kmem_enabled())
555 return cachep;
556 if (gfp & __GFP_NOFAIL)
557 return cachep;
558 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
559 return cachep;
560 if (unlikely(fatal_signal_pending(current)))
561 return cachep;
562
563 return __memcg_kmem_get_cache(cachep, gfp);
564}
565#else
566#define for_each_memcg_cache_index(_idx) \
567 for (; NULL; )
568
569static inline bool memcg_kmem_enabled(void)
570{
571 return false;
572}
573
574static inline bool
575memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
576{
577 return true;
578}
579
580static inline void memcg_kmem_uncharge_pages(struct page *page, int order)
581{
582}
583
584static inline void
585memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
586{
587}
588
589static inline int memcg_cache_id(struct mem_cgroup *memcg)
590{
591 return -1;
592}
593
594static inline int
595memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
596 struct kmem_cache *root_cache)
597{
598 return 0;
599}
600
601static inline void memcg_release_cache(struct kmem_cache *cachep)
602{
603}
604
605static inline void memcg_cache_list_add(struct mem_cgroup *memcg,
606 struct kmem_cache *s)
607{
608}
609
610static inline struct kmem_cache *
611memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
612{
613 return cachep;
614}
615
616static inline void kmem_cache_destroy_memcg_children(struct kmem_cache *s)
617{
618}
619#endif
620#endif
621
622