1
2
3
4
5
6
7
8
9
10
11#ifndef _LINUX_MEMCONTROL_H
12#define _LINUX_MEMCONTROL_H
13#include <linux/cgroup.h>
14#include <linux/vm_event_item.h>
15#include <linux/hardirq.h>
16#include <linux/jump_label.h>
17#include <linux/page_counter.h>
18#include <linux/vmpressure.h>
19#include <linux/eventfd.h>
20#include <linux/mm.h>
21#include <linux/vmstat.h>
22#include <linux/writeback.h>
23#include <linux/page-flags.h>
24
25struct mem_cgroup;
26struct page;
27struct mm_struct;
28struct kmem_cache;
29
30
31enum memcg_stat_item {
32 MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,
33 MEMCG_RSS,
34 MEMCG_RSS_HUGE,
35 MEMCG_SWAP,
36 MEMCG_SOCK,
37
38 MEMCG_KERNEL_STACK_KB,
39 MEMCG_NR_STAT,
40};
41
42enum memcg_memory_event {
43 MEMCG_LOW,
44 MEMCG_HIGH,
45 MEMCG_MAX,
46 MEMCG_OOM,
47 MEMCG_OOM_KILL,
48 MEMCG_SWAP_MAX,
49 MEMCG_SWAP_FAIL,
50 MEMCG_NR_MEMORY_EVENTS,
51};
52
53enum mem_cgroup_protection {
54 MEMCG_PROT_NONE,
55 MEMCG_PROT_LOW,
56 MEMCG_PROT_MIN,
57};
58
59struct mem_cgroup_reclaim_cookie {
60 pg_data_t *pgdat;
61 int priority;
62 unsigned int generation;
63};
64
65#ifdef CONFIG_MEMCG
66
67#define MEM_CGROUP_ID_SHIFT 16
68#define MEM_CGROUP_ID_MAX USHRT_MAX
69
70struct mem_cgroup_id {
71 int id;
72 refcount_t ref;
73};
74
75
76
77
78
79
80
81enum mem_cgroup_events_target {
82 MEM_CGROUP_TARGET_THRESH,
83 MEM_CGROUP_TARGET_SOFTLIMIT,
84 MEM_CGROUP_TARGET_NUMAINFO,
85 MEM_CGROUP_NTARGETS,
86};
87
88struct memcg_vmstats_percpu {
89 long stat[MEMCG_NR_STAT];
90 unsigned long events[NR_VM_EVENT_ITEMS];
91 unsigned long nr_page_events;
92 unsigned long targets[MEM_CGROUP_NTARGETS];
93};
94
95struct mem_cgroup_reclaim_iter {
96 struct mem_cgroup *position;
97
98 unsigned int generation;
99};
100
101struct lruvec_stat {
102 long count[NR_VM_NODE_STAT_ITEMS];
103};
104
105
106
107
108
109struct memcg_shrinker_map {
110 struct rcu_head rcu;
111 unsigned long map[0];
112};
113
114
115
116
117struct mem_cgroup_per_node {
118 struct lruvec lruvec;
119
120
121 struct lruvec_stat __percpu *lruvec_stat_local;
122
123
124 struct lruvec_stat __percpu *lruvec_stat_cpu;
125 atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
126
127 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
128
129 struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
130
131#ifdef CONFIG_MEMCG_KMEM
132 struct memcg_shrinker_map __rcu *shrinker_map;
133#endif
134 struct rb_node tree_node;
135 unsigned long usage_in_excess;
136
137 bool on_tree;
138 bool congested;
139
140
141 struct mem_cgroup *memcg;
142
143};
144
145struct mem_cgroup_threshold {
146 struct eventfd_ctx *eventfd;
147 unsigned long threshold;
148};
149
150
151struct mem_cgroup_threshold_ary {
152
153 int current_threshold;
154
155 unsigned int size;
156
157 struct mem_cgroup_threshold entries[0];
158};
159
160struct mem_cgroup_thresholds {
161
162 struct mem_cgroup_threshold_ary *primary;
163
164
165
166
167
168 struct mem_cgroup_threshold_ary *spare;
169};
170
171enum memcg_kmem_state {
172 KMEM_NONE,
173 KMEM_ALLOCATED,
174 KMEM_ONLINE,
175};
176
177#if defined(CONFIG_SMP)
178struct memcg_padding {
179 char x[0];
180} ____cacheline_internodealigned_in_smp;
181#define MEMCG_PADDING(name) struct memcg_padding name;
182#else
183#define MEMCG_PADDING(name)
184#endif
185
186
187
188
189
190
191
192struct mem_cgroup {
193 struct cgroup_subsys_state css;
194
195
196 struct mem_cgroup_id id;
197
198
199 struct page_counter memory;
200 struct page_counter swap;
201
202
203 struct page_counter memsw;
204 struct page_counter kmem;
205 struct page_counter tcpmem;
206
207
208 unsigned long high;
209
210
211 struct work_struct high_work;
212
213 unsigned long soft_limit;
214
215
216 struct vmpressure vmpressure;
217
218
219
220
221 bool use_hierarchy;
222
223
224
225
226 bool oom_group;
227
228
229 bool oom_lock;
230 int under_oom;
231
232 int swappiness;
233
234 int oom_kill_disable;
235
236
237 struct cgroup_file events_file;
238
239
240 struct cgroup_file swap_events_file;
241
242
243 struct mutex thresholds_lock;
244
245
246 struct mem_cgroup_thresholds thresholds;
247
248
249 struct mem_cgroup_thresholds memsw_thresholds;
250
251
252 struct list_head oom_notify;
253
254
255
256
257
258 unsigned long move_charge_at_immigrate;
259
260 spinlock_t move_lock;
261 unsigned long move_lock_flags;
262
263 MEMCG_PADDING(_pad1_);
264
265
266
267
268 atomic_t moving_account;
269 struct task_struct *move_lock_task;
270
271
272 struct memcg_vmstats_percpu __percpu *vmstats_local;
273
274
275 struct memcg_vmstats_percpu __percpu *vmstats_percpu;
276
277 MEMCG_PADDING(_pad2_);
278
279 atomic_long_t vmstats[MEMCG_NR_STAT];
280 atomic_long_t vmevents[NR_VM_EVENT_ITEMS];
281
282
283 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
284
285 unsigned long socket_pressure;
286
287
288 bool tcpmem_active;
289 int tcpmem_pressure;
290
291#ifdef CONFIG_MEMCG_KMEM
292
293 int kmemcg_id;
294 enum memcg_kmem_state kmem_state;
295 struct list_head kmem_caches;
296#endif
297
298 int last_scanned_node;
299#if MAX_NUMNODES > 1
300 nodemask_t scan_nodes;
301 atomic_t numainfo_events;
302 atomic_t numainfo_updating;
303#endif
304
305#ifdef CONFIG_CGROUP_WRITEBACK
306 struct list_head cgwb_list;
307 struct wb_domain cgwb_domain;
308#endif
309
310
311 struct list_head event_list;
312 spinlock_t event_list_lock;
313
314 struct mem_cgroup_per_node *nodeinfo[0];
315
316};
317
318
319
320
321
322#define MEMCG_CHARGE_BATCH 32U
323
324extern struct mem_cgroup *root_mem_cgroup;
325
326static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
327{
328 return (memcg == root_mem_cgroup);
329}
330
331static inline bool mem_cgroup_disabled(void)
332{
333 return !cgroup_subsys_enabled(memory_cgrp_subsys);
334}
335
336enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
337 struct mem_cgroup *memcg);
338
339int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
340 gfp_t gfp_mask, struct mem_cgroup **memcgp,
341 bool compound);
342int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
343 gfp_t gfp_mask, struct mem_cgroup **memcgp,
344 bool compound);
345void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
346 bool lrucare, bool compound);
347void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
348 bool compound);
349void mem_cgroup_uncharge(struct page *page);
350void mem_cgroup_uncharge_list(struct list_head *page_list);
351
352void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
353
354static struct mem_cgroup_per_node *
355mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
356{
357 return memcg->nodeinfo[nid];
358}
359
360
361
362
363
364
365
366
367
368
369static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
370 struct mem_cgroup *memcg)
371{
372 struct mem_cgroup_per_node *mz;
373 struct lruvec *lruvec;
374
375 if (mem_cgroup_disabled()) {
376 lruvec = node_lruvec(pgdat);
377 goto out;
378 }
379
380 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
381 lruvec = &mz->lruvec;
382out:
383
384
385
386
387
388 if (unlikely(lruvec->pgdat != pgdat))
389 lruvec->pgdat = pgdat;
390 return lruvec;
391}
392
393struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
394
395bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
396struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
397
398struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
399
400struct mem_cgroup *get_mem_cgroup_from_page(struct page *page);
401
402static inline
403struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
404 return css ? container_of(css, struct mem_cgroup, css) : NULL;
405}
406
407static inline void mem_cgroup_put(struct mem_cgroup *memcg)
408{
409 if (memcg)
410 css_put(&memcg->css);
411}
412
413#define mem_cgroup_from_counter(counter, member) \
414 container_of(counter, struct mem_cgroup, member)
415
416struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
417 struct mem_cgroup *,
418 struct mem_cgroup_reclaim_cookie *);
419void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
420int mem_cgroup_scan_tasks(struct mem_cgroup *,
421 int (*)(struct task_struct *, void *), void *);
422
423static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
424{
425 if (mem_cgroup_disabled())
426 return 0;
427
428 return memcg->id.id;
429}
430struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
431
432static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
433{
434 return mem_cgroup_from_css(seq_css(m));
435}
436
437static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
438{
439 struct mem_cgroup_per_node *mz;
440
441 if (mem_cgroup_disabled())
442 return NULL;
443
444 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
445 return mz->memcg;
446}
447
448
449
450
451
452
453
454
455static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
456{
457 if (!memcg->memory.parent)
458 return NULL;
459 return mem_cgroup_from_counter(memcg->memory.parent, memory);
460}
461
462static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
463 struct mem_cgroup *root)
464{
465 if (root == memcg)
466 return true;
467 if (!root->use_hierarchy)
468 return false;
469 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
470}
471
472static inline bool mm_match_cgroup(struct mm_struct *mm,
473 struct mem_cgroup *memcg)
474{
475 struct mem_cgroup *task_memcg;
476 bool match = false;
477
478 rcu_read_lock();
479 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
480 if (task_memcg)
481 match = mem_cgroup_is_descendant(task_memcg, memcg);
482 rcu_read_unlock();
483 return match;
484}
485
486struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
487ino_t page_cgroup_ino(struct page *page);
488
489static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
490{
491 if (mem_cgroup_disabled())
492 return true;
493 return !!(memcg->css.flags & CSS_ONLINE);
494}
495
496
497
498
499int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
500
501void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
502 int zid, int nr_pages);
503
504static inline
505unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
506 enum lru_list lru, int zone_idx)
507{
508 struct mem_cgroup_per_node *mz;
509
510 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
511 return mz->lru_zone_size[zone_idx][lru];
512}
513
514void mem_cgroup_handle_over_high(void);
515
516unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
517
518void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
519 struct task_struct *p);
520
521void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
522
523static inline void mem_cgroup_enter_user_fault(void)
524{
525 WARN_ON(current->in_user_fault);
526 current->in_user_fault = 1;
527}
528
529static inline void mem_cgroup_exit_user_fault(void)
530{
531 WARN_ON(!current->in_user_fault);
532 current->in_user_fault = 0;
533}
534
535static inline bool task_in_memcg_oom(struct task_struct *p)
536{
537 return p->memcg_in_oom;
538}
539
540bool mem_cgroup_oom_synchronize(bool wait);
541struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
542 struct mem_cgroup *oom_domain);
543void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
544
545#ifdef CONFIG_MEMCG_SWAP
546extern int do_swap_account;
547#endif
548
549struct mem_cgroup *lock_page_memcg(struct page *page);
550void __unlock_page_memcg(struct mem_cgroup *memcg);
551void unlock_page_memcg(struct page *page);
552
553
554
555
556
557static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
558{
559 long x = atomic_long_read(&memcg->vmstats[idx]);
560#ifdef CONFIG_SMP
561 if (x < 0)
562 x = 0;
563#endif
564 return x;
565}
566
567
568
569
570
571static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
572 int idx)
573{
574 long x = 0;
575 int cpu;
576
577 for_each_possible_cpu(cpu)
578 x += per_cpu(memcg->vmstats_local->stat[idx], cpu);
579#ifdef CONFIG_SMP
580 if (x < 0)
581 x = 0;
582#endif
583 return x;
584}
585
586void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
587
588
589static inline void mod_memcg_state(struct mem_cgroup *memcg,
590 int idx, int val)
591{
592 unsigned long flags;
593
594 local_irq_save(flags);
595 __mod_memcg_state(memcg, idx, val);
596 local_irq_restore(flags);
597}
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616static inline void __mod_memcg_page_state(struct page *page,
617 int idx, int val)
618{
619 if (page->mem_cgroup)
620 __mod_memcg_state(page->mem_cgroup, idx, val);
621}
622
623static inline void mod_memcg_page_state(struct page *page,
624 int idx, int val)
625{
626 if (page->mem_cgroup)
627 mod_memcg_state(page->mem_cgroup, idx, val);
628}
629
630static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
631 enum node_stat_item idx)
632{
633 struct mem_cgroup_per_node *pn;
634 long x;
635
636 if (mem_cgroup_disabled())
637 return node_page_state(lruvec_pgdat(lruvec), idx);
638
639 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
640 x = atomic_long_read(&pn->lruvec_stat[idx]);
641#ifdef CONFIG_SMP
642 if (x < 0)
643 x = 0;
644#endif
645 return x;
646}
647
648static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
649 enum node_stat_item idx)
650{
651 struct mem_cgroup_per_node *pn;
652 long x = 0;
653 int cpu;
654
655 if (mem_cgroup_disabled())
656 return node_page_state(lruvec_pgdat(lruvec), idx);
657
658 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
659 for_each_possible_cpu(cpu)
660 x += per_cpu(pn->lruvec_stat_local->count[idx], cpu);
661#ifdef CONFIG_SMP
662 if (x < 0)
663 x = 0;
664#endif
665 return x;
666}
667
668void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
669 int val);
670
671static inline void mod_lruvec_state(struct lruvec *lruvec,
672 enum node_stat_item idx, int val)
673{
674 unsigned long flags;
675
676 local_irq_save(flags);
677 __mod_lruvec_state(lruvec, idx, val);
678 local_irq_restore(flags);
679}
680
681static inline void __mod_lruvec_page_state(struct page *page,
682 enum node_stat_item idx, int val)
683{
684 pg_data_t *pgdat = page_pgdat(page);
685 struct lruvec *lruvec;
686
687
688 if (!page->mem_cgroup) {
689 __mod_node_page_state(pgdat, idx, val);
690 return;
691 }
692
693 lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
694 __mod_lruvec_state(lruvec, idx, val);
695}
696
697static inline void mod_lruvec_page_state(struct page *page,
698 enum node_stat_item idx, int val)
699{
700 unsigned long flags;
701
702 local_irq_save(flags);
703 __mod_lruvec_page_state(page, idx, val);
704 local_irq_restore(flags);
705}
706
707unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
708 gfp_t gfp_mask,
709 unsigned long *total_scanned);
710
711void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
712 unsigned long count);
713
714static inline void count_memcg_events(struct mem_cgroup *memcg,
715 enum vm_event_item idx,
716 unsigned long count)
717{
718 unsigned long flags;
719
720 local_irq_save(flags);
721 __count_memcg_events(memcg, idx, count);
722 local_irq_restore(flags);
723}
724
725static inline void count_memcg_page_event(struct page *page,
726 enum vm_event_item idx)
727{
728 if (page->mem_cgroup)
729 count_memcg_events(page->mem_cgroup, idx, 1);
730}
731
732static inline void count_memcg_event_mm(struct mm_struct *mm,
733 enum vm_event_item idx)
734{
735 struct mem_cgroup *memcg;
736
737 if (mem_cgroup_disabled())
738 return;
739
740 rcu_read_lock();
741 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
742 if (likely(memcg))
743 count_memcg_events(memcg, idx, 1);
744 rcu_read_unlock();
745}
746
747static inline void memcg_memory_event(struct mem_cgroup *memcg,
748 enum memcg_memory_event event)
749{
750 do {
751 atomic_long_inc(&memcg->memory_events[event]);
752 cgroup_file_notify(&memcg->events_file);
753
754 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
755 break;
756 } while ((memcg = parent_mem_cgroup(memcg)) &&
757 !mem_cgroup_is_root(memcg));
758}
759
760static inline void memcg_memory_event_mm(struct mm_struct *mm,
761 enum memcg_memory_event event)
762{
763 struct mem_cgroup *memcg;
764
765 if (mem_cgroup_disabled())
766 return;
767
768 rcu_read_lock();
769 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
770 if (likely(memcg))
771 memcg_memory_event(memcg, event);
772 rcu_read_unlock();
773}
774
775#ifdef CONFIG_TRANSPARENT_HUGEPAGE
776void mem_cgroup_split_huge_fixup(struct page *head);
777#endif
778
779#else
780
781#define MEM_CGROUP_ID_SHIFT 0
782#define MEM_CGROUP_ID_MAX 0
783
784struct mem_cgroup;
785
786static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
787{
788 return true;
789}
790
791static inline bool mem_cgroup_disabled(void)
792{
793 return true;
794}
795
796static inline void memcg_memory_event(struct mem_cgroup *memcg,
797 enum memcg_memory_event event)
798{
799}
800
801static inline void memcg_memory_event_mm(struct mm_struct *mm,
802 enum memcg_memory_event event)
803{
804}
805
806static inline enum mem_cgroup_protection mem_cgroup_protected(
807 struct mem_cgroup *root, struct mem_cgroup *memcg)
808{
809 return MEMCG_PROT_NONE;
810}
811
812static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
813 gfp_t gfp_mask,
814 struct mem_cgroup **memcgp,
815 bool compound)
816{
817 *memcgp = NULL;
818 return 0;
819}
820
821static inline int mem_cgroup_try_charge_delay(struct page *page,
822 struct mm_struct *mm,
823 gfp_t gfp_mask,
824 struct mem_cgroup **memcgp,
825 bool compound)
826{
827 *memcgp = NULL;
828 return 0;
829}
830
831static inline void mem_cgroup_commit_charge(struct page *page,
832 struct mem_cgroup *memcg,
833 bool lrucare, bool compound)
834{
835}
836
837static inline void mem_cgroup_cancel_charge(struct page *page,
838 struct mem_cgroup *memcg,
839 bool compound)
840{
841}
842
843static inline void mem_cgroup_uncharge(struct page *page)
844{
845}
846
847static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
848{
849}
850
851static inline void mem_cgroup_migrate(struct page *old, struct page *new)
852{
853}
854
855static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
856 struct mem_cgroup *memcg)
857{
858 return node_lruvec(pgdat);
859}
860
861static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
862 struct pglist_data *pgdat)
863{
864 return &pgdat->lruvec;
865}
866
867static inline bool mm_match_cgroup(struct mm_struct *mm,
868 struct mem_cgroup *memcg)
869{
870 return true;
871}
872
873static inline bool task_in_mem_cgroup(struct task_struct *task,
874 const struct mem_cgroup *memcg)
875{
876 return true;
877}
878
879static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
880{
881 return NULL;
882}
883
884static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
885{
886 return NULL;
887}
888
889static inline void mem_cgroup_put(struct mem_cgroup *memcg)
890{
891}
892
893static inline struct mem_cgroup *
894mem_cgroup_iter(struct mem_cgroup *root,
895 struct mem_cgroup *prev,
896 struct mem_cgroup_reclaim_cookie *reclaim)
897{
898 return NULL;
899}
900
901static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
902 struct mem_cgroup *prev)
903{
904}
905
906static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
907 int (*fn)(struct task_struct *, void *), void *arg)
908{
909 return 0;
910}
911
912static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
913{
914 return 0;
915}
916
917static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
918{
919 WARN_ON_ONCE(id);
920
921 return NULL;
922}
923
924static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
925{
926 return NULL;
927}
928
929static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
930{
931 return NULL;
932}
933
934static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
935{
936 return true;
937}
938
939static inline
940unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
941 enum lru_list lru, int zone_idx)
942{
943 return 0;
944}
945
946static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
947{
948 return 0;
949}
950
951static inline void
952mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
953{
954}
955
956static inline void
957mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
958{
959}
960
961static inline struct mem_cgroup *lock_page_memcg(struct page *page)
962{
963 return NULL;
964}
965
966static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
967{
968}
969
970static inline void unlock_page_memcg(struct page *page)
971{
972}
973
974static inline void mem_cgroup_handle_over_high(void)
975{
976}
977
978static inline void mem_cgroup_enter_user_fault(void)
979{
980}
981
982static inline void mem_cgroup_exit_user_fault(void)
983{
984}
985
986static inline bool task_in_memcg_oom(struct task_struct *p)
987{
988 return false;
989}
990
991static inline bool mem_cgroup_oom_synchronize(bool wait)
992{
993 return false;
994}
995
996static inline struct mem_cgroup *mem_cgroup_get_oom_group(
997 struct task_struct *victim, struct mem_cgroup *oom_domain)
998{
999 return NULL;
1000}
1001
1002static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1003{
1004}
1005
1006static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1007{
1008 return 0;
1009}
1010
1011static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
1012 int idx)
1013{
1014 return 0;
1015}
1016
1017static inline void __mod_memcg_state(struct mem_cgroup *memcg,
1018 int idx,
1019 int nr)
1020{
1021}
1022
1023static inline void mod_memcg_state(struct mem_cgroup *memcg,
1024 int idx,
1025 int nr)
1026{
1027}
1028
1029static inline void __mod_memcg_page_state(struct page *page,
1030 int idx,
1031 int nr)
1032{
1033}
1034
1035static inline void mod_memcg_page_state(struct page *page,
1036 int idx,
1037 int nr)
1038{
1039}
1040
1041static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1042 enum node_stat_item idx)
1043{
1044 return node_page_state(lruvec_pgdat(lruvec), idx);
1045}
1046
1047static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1048 enum node_stat_item idx)
1049{
1050 return node_page_state(lruvec_pgdat(lruvec), idx);
1051}
1052
1053static inline void __mod_lruvec_state(struct lruvec *lruvec,
1054 enum node_stat_item idx, int val)
1055{
1056 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
1057}
1058
1059static inline void mod_lruvec_state(struct lruvec *lruvec,
1060 enum node_stat_item idx, int val)
1061{
1062 mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
1063}
1064
1065static inline void __mod_lruvec_page_state(struct page *page,
1066 enum node_stat_item idx, int val)
1067{
1068 __mod_node_page_state(page_pgdat(page), idx, val);
1069}
1070
1071static inline void mod_lruvec_page_state(struct page *page,
1072 enum node_stat_item idx, int val)
1073{
1074 mod_node_page_state(page_pgdat(page), idx, val);
1075}
1076
1077static inline
1078unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1079 gfp_t gfp_mask,
1080 unsigned long *total_scanned)
1081{
1082 return 0;
1083}
1084
1085static inline void mem_cgroup_split_huge_fixup(struct page *head)
1086{
1087}
1088
1089static inline void count_memcg_events(struct mem_cgroup *memcg,
1090 enum vm_event_item idx,
1091 unsigned long count)
1092{
1093}
1094
1095static inline void __count_memcg_events(struct mem_cgroup *memcg,
1096 enum vm_event_item idx,
1097 unsigned long count)
1098{
1099}
1100
1101static inline void count_memcg_page_event(struct page *page,
1102 int idx)
1103{
1104}
1105
1106static inline
1107void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
1108{
1109}
1110#endif
1111
1112
1113static inline void __inc_memcg_state(struct mem_cgroup *memcg,
1114 int idx)
1115{
1116 __mod_memcg_state(memcg, idx, 1);
1117}
1118
1119
1120static inline void __dec_memcg_state(struct mem_cgroup *memcg,
1121 int idx)
1122{
1123 __mod_memcg_state(memcg, idx, -1);
1124}
1125
1126
1127static inline void __inc_memcg_page_state(struct page *page,
1128 int idx)
1129{
1130 __mod_memcg_page_state(page, idx, 1);
1131}
1132
1133
1134static inline void __dec_memcg_page_state(struct page *page,
1135 int idx)
1136{
1137 __mod_memcg_page_state(page, idx, -1);
1138}
1139
1140static inline void __inc_lruvec_state(struct lruvec *lruvec,
1141 enum node_stat_item idx)
1142{
1143 __mod_lruvec_state(lruvec, idx, 1);
1144}
1145
1146static inline void __dec_lruvec_state(struct lruvec *lruvec,
1147 enum node_stat_item idx)
1148{
1149 __mod_lruvec_state(lruvec, idx, -1);
1150}
1151
1152static inline void __inc_lruvec_page_state(struct page *page,
1153 enum node_stat_item idx)
1154{
1155 __mod_lruvec_page_state(page, idx, 1);
1156}
1157
1158static inline void __dec_lruvec_page_state(struct page *page,
1159 enum node_stat_item idx)
1160{
1161 __mod_lruvec_page_state(page, idx, -1);
1162}
1163
1164
1165static inline void inc_memcg_state(struct mem_cgroup *memcg,
1166 int idx)
1167{
1168 mod_memcg_state(memcg, idx, 1);
1169}
1170
1171
1172static inline void dec_memcg_state(struct mem_cgroup *memcg,
1173 int idx)
1174{
1175 mod_memcg_state(memcg, idx, -1);
1176}
1177
1178
1179static inline void inc_memcg_page_state(struct page *page,
1180 int idx)
1181{
1182 mod_memcg_page_state(page, idx, 1);
1183}
1184
1185
1186static inline void dec_memcg_page_state(struct page *page,
1187 int idx)
1188{
1189 mod_memcg_page_state(page, idx, -1);
1190}
1191
1192static inline void inc_lruvec_state(struct lruvec *lruvec,
1193 enum node_stat_item idx)
1194{
1195 mod_lruvec_state(lruvec, idx, 1);
1196}
1197
1198static inline void dec_lruvec_state(struct lruvec *lruvec,
1199 enum node_stat_item idx)
1200{
1201 mod_lruvec_state(lruvec, idx, -1);
1202}
1203
1204static inline void inc_lruvec_page_state(struct page *page,
1205 enum node_stat_item idx)
1206{
1207 mod_lruvec_page_state(page, idx, 1);
1208}
1209
1210static inline void dec_lruvec_page_state(struct page *page,
1211 enum node_stat_item idx)
1212{
1213 mod_lruvec_page_state(page, idx, -1);
1214}
1215
1216#ifdef CONFIG_CGROUP_WRITEBACK
1217
1218struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
1219void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1220 unsigned long *pheadroom, unsigned long *pdirty,
1221 unsigned long *pwriteback);
1222
1223#else
1224
1225static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1226{
1227 return NULL;
1228}
1229
1230static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
1231 unsigned long *pfilepages,
1232 unsigned long *pheadroom,
1233 unsigned long *pdirty,
1234 unsigned long *pwriteback)
1235{
1236}
1237
1238#endif
1239
1240struct sock;
1241bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1242void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1243#ifdef CONFIG_MEMCG
1244extern struct static_key_false memcg_sockets_enabled_key;
1245#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1246void mem_cgroup_sk_alloc(struct sock *sk);
1247void mem_cgroup_sk_free(struct sock *sk);
1248static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1249{
1250 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
1251 return true;
1252 do {
1253 if (time_before(jiffies, memcg->socket_pressure))
1254 return true;
1255 } while ((memcg = parent_mem_cgroup(memcg)));
1256 return false;
1257}
1258#else
1259#define mem_cgroup_sockets_enabled 0
1260static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
1261static inline void mem_cgroup_sk_free(struct sock *sk) { };
1262static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1263{
1264 return false;
1265}
1266#endif
1267
1268struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
1269void memcg_kmem_put_cache(struct kmem_cache *cachep);
1270
1271#ifdef CONFIG_MEMCG_KMEM
1272int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
1273void __memcg_kmem_uncharge(struct page *page, int order);
1274int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
1275 struct mem_cgroup *memcg);
1276
1277extern struct static_key_false memcg_kmem_enabled_key;
1278extern struct workqueue_struct *memcg_kmem_cache_wq;
1279
1280extern int memcg_nr_cache_ids;
1281void memcg_get_cache_ids(void);
1282void memcg_put_cache_ids(void);
1283
1284
1285
1286
1287
1288
1289#define for_each_memcg_cache_index(_idx) \
1290 for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
1291
1292static inline bool memcg_kmem_enabled(void)
1293{
1294 return static_branch_unlikely(&memcg_kmem_enabled_key);
1295}
1296
1297static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
1298{
1299 if (memcg_kmem_enabled())
1300 return __memcg_kmem_charge(page, gfp, order);
1301 return 0;
1302}
1303
1304static inline void memcg_kmem_uncharge(struct page *page, int order)
1305{
1306 if (memcg_kmem_enabled())
1307 __memcg_kmem_uncharge(page, order);
1308}
1309
1310static inline int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp,
1311 int order, struct mem_cgroup *memcg)
1312{
1313 if (memcg_kmem_enabled())
1314 return __memcg_kmem_charge_memcg(page, gfp, order, memcg);
1315 return 0;
1316}
1317
1318
1319
1320
1321
1322static inline int memcg_cache_id(struct mem_cgroup *memcg)
1323{
1324 return memcg ? memcg->kmemcg_id : -1;
1325}
1326
1327extern int memcg_expand_shrinker_maps(int new_id);
1328
1329extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1330 int nid, int shrinker_id);
1331#else
1332
1333static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
1334{
1335 return 0;
1336}
1337
1338static inline void memcg_kmem_uncharge(struct page *page, int order)
1339{
1340}
1341
1342static inline int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
1343{
1344 return 0;
1345}
1346
1347static inline void __memcg_kmem_uncharge(struct page *page, int order)
1348{
1349}
1350
1351#define for_each_memcg_cache_index(_idx) \
1352 for (; NULL; )
1353
1354static inline bool memcg_kmem_enabled(void)
1355{
1356 return false;
1357}
1358
1359static inline int memcg_cache_id(struct mem_cgroup *memcg)
1360{
1361 return -1;
1362}
1363
1364static inline void memcg_get_cache_ids(void)
1365{
1366}
1367
1368static inline void memcg_put_cache_ids(void)
1369{
1370}
1371
1372static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1373 int nid, int shrinker_id) { }
1374#endif
1375
1376#endif
1377