1
2
3
4
5
6
7
8
9
10
11#ifndef _LINUX_MEMCONTROL_H
12#define _LINUX_MEMCONTROL_H
13#include <linux/cgroup.h>
14#include <linux/vm_event_item.h>
15#include <linux/hardirq.h>
16#include <linux/jump_label.h>
17#include <linux/page_counter.h>
18#include <linux/vmpressure.h>
19#include <linux/eventfd.h>
20#include <linux/mm.h>
21#include <linux/vmstat.h>
22#include <linux/writeback.h>
23#include <linux/page-flags.h>
24
25struct mem_cgroup;
26struct obj_cgroup;
27struct page;
28struct mm_struct;
29struct kmem_cache;
30
31
32enum memcg_stat_item {
33 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
34 MEMCG_SOCK,
35 MEMCG_PERCPU_B,
36 MEMCG_NR_STAT,
37};
38
39enum memcg_memory_event {
40 MEMCG_LOW,
41 MEMCG_HIGH,
42 MEMCG_MAX,
43 MEMCG_OOM,
44 MEMCG_OOM_KILL,
45 MEMCG_SWAP_HIGH,
46 MEMCG_SWAP_MAX,
47 MEMCG_SWAP_FAIL,
48 MEMCG_NR_MEMORY_EVENTS,
49};
50
51struct mem_cgroup_reclaim_cookie {
52 pg_data_t *pgdat;
53 unsigned int generation;
54};
55
56#ifdef CONFIG_MEMCG
57
58#define MEM_CGROUP_ID_SHIFT 16
59#define MEM_CGROUP_ID_MAX USHRT_MAX
60
61struct mem_cgroup_id {
62 int id;
63 refcount_t ref;
64};
65
66
67
68
69
70
71
72enum mem_cgroup_events_target {
73 MEM_CGROUP_TARGET_THRESH,
74 MEM_CGROUP_TARGET_SOFTLIMIT,
75 MEM_CGROUP_NTARGETS,
76};
77
78struct memcg_vmstats_percpu {
79 long stat[MEMCG_NR_STAT];
80 unsigned long events[NR_VM_EVENT_ITEMS];
81 unsigned long nr_page_events;
82 unsigned long targets[MEM_CGROUP_NTARGETS];
83};
84
85struct mem_cgroup_reclaim_iter {
86 struct mem_cgroup *position;
87
88 unsigned int generation;
89};
90
91struct lruvec_stat {
92 long count[NR_VM_NODE_STAT_ITEMS];
93};
94
95
96
97
98
99struct memcg_shrinker_map {
100 struct rcu_head rcu;
101 unsigned long map[];
102};
103
104
105
106
107struct mem_cgroup_per_node {
108 struct lruvec lruvec;
109
110
111 struct lruvec_stat __percpu *lruvec_stat_local;
112
113
114 struct lruvec_stat __percpu *lruvec_stat_cpu;
115 atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
116
117 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
118
119 struct mem_cgroup_reclaim_iter iter;
120
121 struct memcg_shrinker_map __rcu *shrinker_map;
122
123 struct rb_node tree_node;
124 unsigned long usage_in_excess;
125
126 bool on_tree;
127 struct mem_cgroup *memcg;
128
129};
130
131struct mem_cgroup_threshold {
132 struct eventfd_ctx *eventfd;
133 unsigned long threshold;
134};
135
136
137struct mem_cgroup_threshold_ary {
138
139 int current_threshold;
140
141 unsigned int size;
142
143 struct mem_cgroup_threshold entries[];
144};
145
146struct mem_cgroup_thresholds {
147
148 struct mem_cgroup_threshold_ary *primary;
149
150
151
152
153
154 struct mem_cgroup_threshold_ary *spare;
155};
156
157enum memcg_kmem_state {
158 KMEM_NONE,
159 KMEM_ALLOCATED,
160 KMEM_ONLINE,
161};
162
163#if defined(CONFIG_SMP)
164struct memcg_padding {
165 char x[0];
166} ____cacheline_internodealigned_in_smp;
167#define MEMCG_PADDING(name) struct memcg_padding name;
168#else
169#define MEMCG_PADDING(name)
170#endif
171
172
173
174
175
176
177
178
179
180#define MEMCG_CGWB_FRN_CNT 4
181
182struct memcg_cgwb_frn {
183 u64 bdi_id;
184 int memcg_id;
185 u64 at;
186 struct wb_completion done;
187};
188
189
190
191
192
193
194
195struct obj_cgroup {
196 struct percpu_ref refcnt;
197 struct mem_cgroup *memcg;
198 atomic_t nr_charged_bytes;
199 union {
200 struct list_head list;
201 struct rcu_head rcu;
202 };
203};
204
205
206
207
208
209
210
211struct mem_cgroup {
212 struct cgroup_subsys_state css;
213
214
215 struct mem_cgroup_id id;
216
217
218 struct page_counter memory;
219
220 union {
221 struct page_counter swap;
222 struct page_counter memsw;
223 };
224
225
226 struct page_counter kmem;
227 struct page_counter tcpmem;
228
229
230 struct work_struct high_work;
231
232 unsigned long soft_limit;
233
234
235 struct vmpressure vmpressure;
236
237
238
239
240 bool use_hierarchy;
241
242
243
244
245 bool oom_group;
246
247
248 bool oom_lock;
249 int under_oom;
250
251 int swappiness;
252
253 int oom_kill_disable;
254
255
256 struct cgroup_file events_file;
257 struct cgroup_file events_local_file;
258
259
260 struct cgroup_file swap_events_file;
261
262
263 struct mutex thresholds_lock;
264
265
266 struct mem_cgroup_thresholds thresholds;
267
268
269 struct mem_cgroup_thresholds memsw_thresholds;
270
271
272 struct list_head oom_notify;
273
274
275
276
277
278 unsigned long move_charge_at_immigrate;
279
280 spinlock_t move_lock;
281 unsigned long move_lock_flags;
282
283 MEMCG_PADDING(_pad1_);
284
285 atomic_long_t vmstats[MEMCG_NR_STAT];
286 atomic_long_t vmevents[NR_VM_EVENT_ITEMS];
287
288
289 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
290 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
291
292 unsigned long socket_pressure;
293
294
295 bool tcpmem_active;
296 int tcpmem_pressure;
297
298#ifdef CONFIG_MEMCG_KMEM
299
300 int kmemcg_id;
301 enum memcg_kmem_state kmem_state;
302 struct obj_cgroup __rcu *objcg;
303 struct list_head objcg_list;
304#endif
305
306 MEMCG_PADDING(_pad2_);
307
308
309
310
311 atomic_t moving_account;
312 struct task_struct *move_lock_task;
313
314
315 struct memcg_vmstats_percpu __percpu *vmstats_local;
316
317
318 struct memcg_vmstats_percpu __percpu *vmstats_percpu;
319
320#ifdef CONFIG_CGROUP_WRITEBACK
321 struct list_head cgwb_list;
322 struct wb_domain cgwb_domain;
323 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
324#endif
325
326
327 struct list_head event_list;
328 spinlock_t event_list_lock;
329
330#ifdef CONFIG_TRANSPARENT_HUGEPAGE
331 struct deferred_split deferred_split_queue;
332#endif
333
334 struct mem_cgroup_per_node *nodeinfo[0];
335
336};
337
338
339
340
341
342#define MEMCG_CHARGE_BATCH 32U
343
344extern struct mem_cgroup *root_mem_cgroup;
345
346static __always_inline bool memcg_stat_item_in_bytes(int idx)
347{
348 if (idx == MEMCG_PERCPU_B)
349 return true;
350 return vmstat_item_in_bytes(idx);
351}
352
353static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
354{
355 return (memcg == root_mem_cgroup);
356}
357
358static inline bool mem_cgroup_disabled(void)
359{
360 return !cgroup_subsys_enabled(memory_cgrp_subsys);
361}
362
363static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
364 struct mem_cgroup *memcg,
365 bool in_low_reclaim)
366{
367 if (mem_cgroup_disabled())
368 return 0;
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403 if (root == memcg)
404 return 0;
405
406 if (in_low_reclaim)
407 return READ_ONCE(memcg->memory.emin);
408
409 return max(READ_ONCE(memcg->memory.emin),
410 READ_ONCE(memcg->memory.elow));
411}
412
413void mem_cgroup_calculate_protection(struct mem_cgroup *root,
414 struct mem_cgroup *memcg);
415
416static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg)
417{
418
419
420
421
422 return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg);
423
424}
425
426static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
427{
428 if (!mem_cgroup_supports_protection(memcg))
429 return false;
430
431 return READ_ONCE(memcg->memory.elow) >=
432 page_counter_read(&memcg->memory);
433}
434
435static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
436{
437 if (!mem_cgroup_supports_protection(memcg))
438 return false;
439
440 return READ_ONCE(memcg->memory.emin) >=
441 page_counter_read(&memcg->memory);
442}
443
444int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
445
446void mem_cgroup_uncharge(struct page *page);
447void mem_cgroup_uncharge_list(struct list_head *page_list);
448
449void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
450
451static struct mem_cgroup_per_node *
452mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
453{
454 return memcg->nodeinfo[nid];
455}
456
457
458
459
460
461
462
463
464
465static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
466 struct pglist_data *pgdat)
467{
468 struct mem_cgroup_per_node *mz;
469 struct lruvec *lruvec;
470
471 if (mem_cgroup_disabled()) {
472 lruvec = &pgdat->__lruvec;
473 goto out;
474 }
475
476 if (!memcg)
477 memcg = root_mem_cgroup;
478
479 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
480 lruvec = &mz->lruvec;
481out:
482
483
484
485
486
487 if (unlikely(lruvec->pgdat != pgdat))
488 lruvec->pgdat = pgdat;
489 return lruvec;
490}
491
492struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
493
494struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
495
496struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
497
498struct mem_cgroup *get_mem_cgroup_from_page(struct page *page);
499
500static inline
501struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
502 return css ? container_of(css, struct mem_cgroup, css) : NULL;
503}
504
505static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
506{
507 return percpu_ref_tryget(&objcg->refcnt);
508}
509
510static inline void obj_cgroup_get(struct obj_cgroup *objcg)
511{
512 percpu_ref_get(&objcg->refcnt);
513}
514
515static inline void obj_cgroup_put(struct obj_cgroup *objcg)
516{
517 percpu_ref_put(&objcg->refcnt);
518}
519
520
521
522
523
524
525
526
527static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
528{
529 return READ_ONCE(objcg->memcg);
530}
531
532static inline void mem_cgroup_put(struct mem_cgroup *memcg)
533{
534 if (memcg)
535 css_put(&memcg->css);
536}
537
538#define mem_cgroup_from_counter(counter, member) \
539 container_of(counter, struct mem_cgroup, member)
540
541struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
542 struct mem_cgroup *,
543 struct mem_cgroup_reclaim_cookie *);
544void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
545int mem_cgroup_scan_tasks(struct mem_cgroup *,
546 int (*)(struct task_struct *, void *), void *);
547
548static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
549{
550 if (mem_cgroup_disabled())
551 return 0;
552
553 return memcg->id.id;
554}
555struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
556
557static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
558{
559 return mem_cgroup_from_css(seq_css(m));
560}
561
562static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
563{
564 struct mem_cgroup_per_node *mz;
565
566 if (mem_cgroup_disabled())
567 return NULL;
568
569 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
570 return mz->memcg;
571}
572
573
574
575
576
577
578
579
580static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
581{
582 if (!memcg->memory.parent)
583 return NULL;
584 return mem_cgroup_from_counter(memcg->memory.parent, memory);
585}
586
587static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
588 struct mem_cgroup *root)
589{
590 if (root == memcg)
591 return true;
592 if (!root->use_hierarchy)
593 return false;
594 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
595}
596
597static inline bool mm_match_cgroup(struct mm_struct *mm,
598 struct mem_cgroup *memcg)
599{
600 struct mem_cgroup *task_memcg;
601 bool match = false;
602
603 rcu_read_lock();
604 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
605 if (task_memcg)
606 match = mem_cgroup_is_descendant(task_memcg, memcg);
607 rcu_read_unlock();
608 return match;
609}
610
611struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
612ino_t page_cgroup_ino(struct page *page);
613
614static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
615{
616 if (mem_cgroup_disabled())
617 return true;
618 return !!(memcg->css.flags & CSS_ONLINE);
619}
620
621
622
623
624int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
625
626void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
627 int zid, int nr_pages);
628
629static inline
630unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
631 enum lru_list lru, int zone_idx)
632{
633 struct mem_cgroup_per_node *mz;
634
635 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
636 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
637}
638
639void mem_cgroup_handle_over_high(void);
640
641unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
642
643unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
644
645void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
646 struct task_struct *p);
647
648void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
649
650static inline void mem_cgroup_enter_user_fault(void)
651{
652 WARN_ON(current->in_user_fault);
653 current->in_user_fault = 1;
654}
655
656static inline void mem_cgroup_exit_user_fault(void)
657{
658 WARN_ON(!current->in_user_fault);
659 current->in_user_fault = 0;
660}
661
662static inline bool task_in_memcg_oom(struct task_struct *p)
663{
664 return p->memcg_in_oom;
665}
666
667bool mem_cgroup_oom_synchronize(bool wait);
668struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
669 struct mem_cgroup *oom_domain);
670void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
671
672#ifdef CONFIG_MEMCG_SWAP
673extern bool cgroup_memory_noswap;
674#endif
675
676struct mem_cgroup *lock_page_memcg(struct page *page);
677void __unlock_page_memcg(struct mem_cgroup *memcg);
678void unlock_page_memcg(struct page *page);
679
680
681
682
683
684static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
685{
686 long x = atomic_long_read(&memcg->vmstats[idx]);
687#ifdef CONFIG_SMP
688 if (x < 0)
689 x = 0;
690#endif
691 return x;
692}
693
694
695
696
697
698static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
699 int idx)
700{
701 long x = 0;
702 int cpu;
703
704 for_each_possible_cpu(cpu)
705 x += per_cpu(memcg->vmstats_local->stat[idx], cpu);
706#ifdef CONFIG_SMP
707 if (x < 0)
708 x = 0;
709#endif
710 return x;
711}
712
713void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
714
715
716static inline void mod_memcg_state(struct mem_cgroup *memcg,
717 int idx, int val)
718{
719 unsigned long flags;
720
721 local_irq_save(flags);
722 __mod_memcg_state(memcg, idx, val);
723 local_irq_restore(flags);
724}
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743static inline void __mod_memcg_page_state(struct page *page,
744 int idx, int val)
745{
746 if (page->mem_cgroup)
747 __mod_memcg_state(page->mem_cgroup, idx, val);
748}
749
750static inline void mod_memcg_page_state(struct page *page,
751 int idx, int val)
752{
753 if (page->mem_cgroup)
754 mod_memcg_state(page->mem_cgroup, idx, val);
755}
756
757static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
758 enum node_stat_item idx)
759{
760 struct mem_cgroup_per_node *pn;
761 long x;
762
763 if (mem_cgroup_disabled())
764 return node_page_state(lruvec_pgdat(lruvec), idx);
765
766 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
767 x = atomic_long_read(&pn->lruvec_stat[idx]);
768#ifdef CONFIG_SMP
769 if (x < 0)
770 x = 0;
771#endif
772 return x;
773}
774
775static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
776 enum node_stat_item idx)
777{
778 struct mem_cgroup_per_node *pn;
779 long x = 0;
780 int cpu;
781
782 if (mem_cgroup_disabled())
783 return node_page_state(lruvec_pgdat(lruvec), idx);
784
785 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
786 for_each_possible_cpu(cpu)
787 x += per_cpu(pn->lruvec_stat_local->count[idx], cpu);
788#ifdef CONFIG_SMP
789 if (x < 0)
790 x = 0;
791#endif
792 return x;
793}
794
795void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
796 int val);
797void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
798 int val);
799void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
800
801void mod_memcg_obj_state(void *p, int idx, int val);
802
803static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx,
804 int val)
805{
806 unsigned long flags;
807
808 local_irq_save(flags);
809 __mod_lruvec_slab_state(p, idx, val);
810 local_irq_restore(flags);
811}
812
813static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
814 enum node_stat_item idx, int val)
815{
816 unsigned long flags;
817
818 local_irq_save(flags);
819 __mod_memcg_lruvec_state(lruvec, idx, val);
820 local_irq_restore(flags);
821}
822
823static inline void mod_lruvec_state(struct lruvec *lruvec,
824 enum node_stat_item idx, int val)
825{
826 unsigned long flags;
827
828 local_irq_save(flags);
829 __mod_lruvec_state(lruvec, idx, val);
830 local_irq_restore(flags);
831}
832
833static inline void __mod_lruvec_page_state(struct page *page,
834 enum node_stat_item idx, int val)
835{
836 struct page *head = compound_head(page);
837 pg_data_t *pgdat = page_pgdat(page);
838 struct lruvec *lruvec;
839
840
841 if (!head->mem_cgroup) {
842 __mod_node_page_state(pgdat, idx, val);
843 return;
844 }
845
846 lruvec = mem_cgroup_lruvec(head->mem_cgroup, pgdat);
847 __mod_lruvec_state(lruvec, idx, val);
848}
849
850static inline void mod_lruvec_page_state(struct page *page,
851 enum node_stat_item idx, int val)
852{
853 unsigned long flags;
854
855 local_irq_save(flags);
856 __mod_lruvec_page_state(page, idx, val);
857 local_irq_restore(flags);
858}
859
860unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
861 gfp_t gfp_mask,
862 unsigned long *total_scanned);
863
864void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
865 unsigned long count);
866
867static inline void count_memcg_events(struct mem_cgroup *memcg,
868 enum vm_event_item idx,
869 unsigned long count)
870{
871 unsigned long flags;
872
873 local_irq_save(flags);
874 __count_memcg_events(memcg, idx, count);
875 local_irq_restore(flags);
876}
877
878static inline void count_memcg_page_event(struct page *page,
879 enum vm_event_item idx)
880{
881 if (page->mem_cgroup)
882 count_memcg_events(page->mem_cgroup, idx, 1);
883}
884
885static inline void count_memcg_event_mm(struct mm_struct *mm,
886 enum vm_event_item idx)
887{
888 struct mem_cgroup *memcg;
889
890 if (mem_cgroup_disabled())
891 return;
892
893 rcu_read_lock();
894 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
895 if (likely(memcg))
896 count_memcg_events(memcg, idx, 1);
897 rcu_read_unlock();
898}
899
900static inline void memcg_memory_event(struct mem_cgroup *memcg,
901 enum memcg_memory_event event)
902{
903 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
904 event == MEMCG_SWAP_FAIL;
905
906 atomic_long_inc(&memcg->memory_events_local[event]);
907 if (!swap_event)
908 cgroup_file_notify(&memcg->events_local_file);
909
910 do {
911 atomic_long_inc(&memcg->memory_events[event]);
912 if (swap_event)
913 cgroup_file_notify(&memcg->swap_events_file);
914 else
915 cgroup_file_notify(&memcg->events_file);
916
917 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
918 break;
919 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
920 break;
921 } while ((memcg = parent_mem_cgroup(memcg)) &&
922 !mem_cgroup_is_root(memcg));
923}
924
925static inline void memcg_memory_event_mm(struct mm_struct *mm,
926 enum memcg_memory_event event)
927{
928 struct mem_cgroup *memcg;
929
930 if (mem_cgroup_disabled())
931 return;
932
933 rcu_read_lock();
934 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
935 if (likely(memcg))
936 memcg_memory_event(memcg, event);
937 rcu_read_unlock();
938}
939
940#ifdef CONFIG_TRANSPARENT_HUGEPAGE
941void mem_cgroup_split_huge_fixup(struct page *head);
942#endif
943
944#else
945
946#define MEM_CGROUP_ID_SHIFT 0
947#define MEM_CGROUP_ID_MAX 0
948
949struct mem_cgroup;
950
951static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
952{
953 return true;
954}
955
956static inline bool mem_cgroup_disabled(void)
957{
958 return true;
959}
960
961static inline void memcg_memory_event(struct mem_cgroup *memcg,
962 enum memcg_memory_event event)
963{
964}
965
966static inline void memcg_memory_event_mm(struct mm_struct *mm,
967 enum memcg_memory_event event)
968{
969}
970
971static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
972 struct mem_cgroup *memcg,
973 bool in_low_reclaim)
974{
975 return 0;
976}
977
978static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
979 struct mem_cgroup *memcg)
980{
981}
982
983static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
984{
985 return false;
986}
987
988static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
989{
990 return false;
991}
992
993static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
994 gfp_t gfp_mask)
995{
996 return 0;
997}
998
999static inline void mem_cgroup_uncharge(struct page *page)
1000{
1001}
1002
1003static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
1004{
1005}
1006
1007static inline void mem_cgroup_migrate(struct page *old, struct page *new)
1008{
1009}
1010
1011static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
1012 struct pglist_data *pgdat)
1013{
1014 return &pgdat->__lruvec;
1015}
1016
1017static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
1018 struct pglist_data *pgdat)
1019{
1020 return &pgdat->__lruvec;
1021}
1022
1023static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
1024{
1025 return NULL;
1026}
1027
1028static inline bool mm_match_cgroup(struct mm_struct *mm,
1029 struct mem_cgroup *memcg)
1030{
1031 return true;
1032}
1033
1034static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1035{
1036 return NULL;
1037}
1038
1039static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
1040{
1041 return NULL;
1042}
1043
1044static inline void mem_cgroup_put(struct mem_cgroup *memcg)
1045{
1046}
1047
1048static inline struct mem_cgroup *
1049mem_cgroup_iter(struct mem_cgroup *root,
1050 struct mem_cgroup *prev,
1051 struct mem_cgroup_reclaim_cookie *reclaim)
1052{
1053 return NULL;
1054}
1055
1056static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
1057 struct mem_cgroup *prev)
1058{
1059}
1060
1061static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1062 int (*fn)(struct task_struct *, void *), void *arg)
1063{
1064 return 0;
1065}
1066
1067static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
1068{
1069 return 0;
1070}
1071
1072static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
1073{
1074 WARN_ON_ONCE(id);
1075
1076 return NULL;
1077}
1078
1079static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
1080{
1081 return NULL;
1082}
1083
1084static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
1085{
1086 return NULL;
1087}
1088
1089static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
1090{
1091 return true;
1092}
1093
1094static inline
1095unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
1096 enum lru_list lru, int zone_idx)
1097{
1098 return 0;
1099}
1100
1101static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1102{
1103 return 0;
1104}
1105
1106static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1107{
1108 return 0;
1109}
1110
1111static inline void
1112mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1113{
1114}
1115
1116static inline void
1117mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1118{
1119}
1120
1121static inline struct mem_cgroup *lock_page_memcg(struct page *page)
1122{
1123 return NULL;
1124}
1125
1126static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
1127{
1128}
1129
1130static inline void unlock_page_memcg(struct page *page)
1131{
1132}
1133
1134static inline void mem_cgroup_handle_over_high(void)
1135{
1136}
1137
1138static inline void mem_cgroup_enter_user_fault(void)
1139{
1140}
1141
1142static inline void mem_cgroup_exit_user_fault(void)
1143{
1144}
1145
1146static inline bool task_in_memcg_oom(struct task_struct *p)
1147{
1148 return false;
1149}
1150
1151static inline bool mem_cgroup_oom_synchronize(bool wait)
1152{
1153 return false;
1154}
1155
1156static inline struct mem_cgroup *mem_cgroup_get_oom_group(
1157 struct task_struct *victim, struct mem_cgroup *oom_domain)
1158{
1159 return NULL;
1160}
1161
1162static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1163{
1164}
1165
1166static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1167{
1168 return 0;
1169}
1170
1171static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
1172 int idx)
1173{
1174 return 0;
1175}
1176
1177static inline void __mod_memcg_state(struct mem_cgroup *memcg,
1178 int idx,
1179 int nr)
1180{
1181}
1182
1183static inline void mod_memcg_state(struct mem_cgroup *memcg,
1184 int idx,
1185 int nr)
1186{
1187}
1188
1189static inline void __mod_memcg_page_state(struct page *page,
1190 int idx,
1191 int nr)
1192{
1193}
1194
1195static inline void mod_memcg_page_state(struct page *page,
1196 int idx,
1197 int nr)
1198{
1199}
1200
1201static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1202 enum node_stat_item idx)
1203{
1204 return node_page_state(lruvec_pgdat(lruvec), idx);
1205}
1206
1207static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1208 enum node_stat_item idx)
1209{
1210 return node_page_state(lruvec_pgdat(lruvec), idx);
1211}
1212
1213static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
1214 enum node_stat_item idx, int val)
1215{
1216}
1217
1218static inline void __mod_lruvec_state(struct lruvec *lruvec,
1219 enum node_stat_item idx, int val)
1220{
1221 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
1222}
1223
1224static inline void mod_lruvec_state(struct lruvec *lruvec,
1225 enum node_stat_item idx, int val)
1226{
1227 mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
1228}
1229
1230static inline void __mod_lruvec_page_state(struct page *page,
1231 enum node_stat_item idx, int val)
1232{
1233 __mod_node_page_state(page_pgdat(page), idx, val);
1234}
1235
1236static inline void mod_lruvec_page_state(struct page *page,
1237 enum node_stat_item idx, int val)
1238{
1239 mod_node_page_state(page_pgdat(page), idx, val);
1240}
1241
1242static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
1243 int val)
1244{
1245 struct page *page = virt_to_head_page(p);
1246
1247 __mod_node_page_state(page_pgdat(page), idx, val);
1248}
1249
1250static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx,
1251 int val)
1252{
1253 struct page *page = virt_to_head_page(p);
1254
1255 mod_node_page_state(page_pgdat(page), idx, val);
1256}
1257
1258static inline void mod_memcg_obj_state(void *p, int idx, int val)
1259{
1260}
1261
1262static inline
1263unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1264 gfp_t gfp_mask,
1265 unsigned long *total_scanned)
1266{
1267 return 0;
1268}
1269
1270static inline void mem_cgroup_split_huge_fixup(struct page *head)
1271{
1272}
1273
1274static inline void count_memcg_events(struct mem_cgroup *memcg,
1275 enum vm_event_item idx,
1276 unsigned long count)
1277{
1278}
1279
1280static inline void __count_memcg_events(struct mem_cgroup *memcg,
1281 enum vm_event_item idx,
1282 unsigned long count)
1283{
1284}
1285
1286static inline void count_memcg_page_event(struct page *page,
1287 int idx)
1288{
1289}
1290
1291static inline
1292void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
1293{
1294}
1295#endif
1296
1297
1298static inline void __inc_memcg_state(struct mem_cgroup *memcg,
1299 int idx)
1300{
1301 __mod_memcg_state(memcg, idx, 1);
1302}
1303
1304
1305static inline void __dec_memcg_state(struct mem_cgroup *memcg,
1306 int idx)
1307{
1308 __mod_memcg_state(memcg, idx, -1);
1309}
1310
1311
1312static inline void __inc_memcg_page_state(struct page *page,
1313 int idx)
1314{
1315 __mod_memcg_page_state(page, idx, 1);
1316}
1317
1318
1319static inline void __dec_memcg_page_state(struct page *page,
1320 int idx)
1321{
1322 __mod_memcg_page_state(page, idx, -1);
1323}
1324
1325static inline void __inc_lruvec_state(struct lruvec *lruvec,
1326 enum node_stat_item idx)
1327{
1328 __mod_lruvec_state(lruvec, idx, 1);
1329}
1330
1331static inline void __dec_lruvec_state(struct lruvec *lruvec,
1332 enum node_stat_item idx)
1333{
1334 __mod_lruvec_state(lruvec, idx, -1);
1335}
1336
1337static inline void __inc_lruvec_page_state(struct page *page,
1338 enum node_stat_item idx)
1339{
1340 __mod_lruvec_page_state(page, idx, 1);
1341}
1342
1343static inline void __dec_lruvec_page_state(struct page *page,
1344 enum node_stat_item idx)
1345{
1346 __mod_lruvec_page_state(page, idx, -1);
1347}
1348
1349static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx)
1350{
1351 __mod_lruvec_slab_state(p, idx, 1);
1352}
1353
1354static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx)
1355{
1356 __mod_lruvec_slab_state(p, idx, -1);
1357}
1358
1359
1360static inline void inc_memcg_state(struct mem_cgroup *memcg,
1361 int idx)
1362{
1363 mod_memcg_state(memcg, idx, 1);
1364}
1365
1366
1367static inline void dec_memcg_state(struct mem_cgroup *memcg,
1368 int idx)
1369{
1370 mod_memcg_state(memcg, idx, -1);
1371}
1372
1373
1374static inline void inc_memcg_page_state(struct page *page,
1375 int idx)
1376{
1377 mod_memcg_page_state(page, idx, 1);
1378}
1379
1380
1381static inline void dec_memcg_page_state(struct page *page,
1382 int idx)
1383{
1384 mod_memcg_page_state(page, idx, -1);
1385}
1386
1387static inline void inc_lruvec_state(struct lruvec *lruvec,
1388 enum node_stat_item idx)
1389{
1390 mod_lruvec_state(lruvec, idx, 1);
1391}
1392
1393static inline void dec_lruvec_state(struct lruvec *lruvec,
1394 enum node_stat_item idx)
1395{
1396 mod_lruvec_state(lruvec, idx, -1);
1397}
1398
1399static inline void inc_lruvec_page_state(struct page *page,
1400 enum node_stat_item idx)
1401{
1402 mod_lruvec_page_state(page, idx, 1);
1403}
1404
1405static inline void dec_lruvec_page_state(struct page *page,
1406 enum node_stat_item idx)
1407{
1408 mod_lruvec_page_state(page, idx, -1);
1409}
1410
1411static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
1412{
1413 struct mem_cgroup *memcg;
1414
1415 memcg = lruvec_memcg(lruvec);
1416 if (!memcg)
1417 return NULL;
1418 memcg = parent_mem_cgroup(memcg);
1419 if (!memcg)
1420 return NULL;
1421 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
1422}
1423
1424#ifdef CONFIG_CGROUP_WRITEBACK
1425
1426struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
1427void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1428 unsigned long *pheadroom, unsigned long *pdirty,
1429 unsigned long *pwriteback);
1430
1431void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
1432 struct bdi_writeback *wb);
1433
1434static inline void mem_cgroup_track_foreign_dirty(struct page *page,
1435 struct bdi_writeback *wb)
1436{
1437 if (mem_cgroup_disabled())
1438 return;
1439
1440 if (unlikely(&page->mem_cgroup->css != wb->memcg_css))
1441 mem_cgroup_track_foreign_dirty_slowpath(page, wb);
1442}
1443
1444void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
1445
1446#else
1447
1448static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1449{
1450 return NULL;
1451}
1452
1453static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
1454 unsigned long *pfilepages,
1455 unsigned long *pheadroom,
1456 unsigned long *pdirty,
1457 unsigned long *pwriteback)
1458{
1459}
1460
1461static inline void mem_cgroup_track_foreign_dirty(struct page *page,
1462 struct bdi_writeback *wb)
1463{
1464}
1465
1466static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
1467{
1468}
1469
1470#endif
1471
1472struct sock;
1473bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1474void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1475#ifdef CONFIG_MEMCG
1476extern struct static_key_false memcg_sockets_enabled_key;
1477#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1478void mem_cgroup_sk_alloc(struct sock *sk);
1479void mem_cgroup_sk_free(struct sock *sk);
1480static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1481{
1482 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
1483 return true;
1484 do {
1485 if (time_before(jiffies, memcg->socket_pressure))
1486 return true;
1487 } while ((memcg = parent_mem_cgroup(memcg)));
1488 return false;
1489}
1490
1491extern int memcg_expand_shrinker_maps(int new_id);
1492
1493extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1494 int nid, int shrinker_id);
1495#else
1496#define mem_cgroup_sockets_enabled 0
1497static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
1498static inline void mem_cgroup_sk_free(struct sock *sk) { };
1499static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1500{
1501 return false;
1502}
1503
1504static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1505 int nid, int shrinker_id)
1506{
1507}
1508#endif
1509
1510#ifdef CONFIG_MEMCG_KMEM
1511int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
1512 unsigned int nr_pages);
1513void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages);
1514int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
1515void __memcg_kmem_uncharge_page(struct page *page, int order);
1516
1517struct obj_cgroup *get_obj_cgroup_from_current(void);
1518
1519int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
1520void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
1521
1522extern struct static_key_false memcg_kmem_enabled_key;
1523
1524extern int memcg_nr_cache_ids;
1525void memcg_get_cache_ids(void);
1526void memcg_put_cache_ids(void);
1527
1528
1529
1530
1531
1532
1533#define for_each_memcg_cache_index(_idx) \
1534 for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
1535
1536static inline bool memcg_kmem_enabled(void)
1537{
1538 return static_branch_likely(&memcg_kmem_enabled_key);
1539}
1540
1541static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1542 int order)
1543{
1544 if (memcg_kmem_enabled())
1545 return __memcg_kmem_charge_page(page, gfp, order);
1546 return 0;
1547}
1548
1549static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1550{
1551 if (memcg_kmem_enabled())
1552 __memcg_kmem_uncharge_page(page, order);
1553}
1554
1555static inline int memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
1556 unsigned int nr_pages)
1557{
1558 if (memcg_kmem_enabled())
1559 return __memcg_kmem_charge(memcg, gfp, nr_pages);
1560 return 0;
1561}
1562
1563static inline void memcg_kmem_uncharge(struct mem_cgroup *memcg,
1564 unsigned int nr_pages)
1565{
1566 if (memcg_kmem_enabled())
1567 __memcg_kmem_uncharge(memcg, nr_pages);
1568}
1569
1570
1571
1572
1573
1574
1575static inline int memcg_cache_id(struct mem_cgroup *memcg)
1576{
1577 return memcg ? memcg->kmemcg_id : -1;
1578}
1579
1580struct mem_cgroup *mem_cgroup_from_obj(void *p);
1581
1582#else
1583
1584static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1585 int order)
1586{
1587 return 0;
1588}
1589
1590static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1591{
1592}
1593
1594static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1595 int order)
1596{
1597 return 0;
1598}
1599
1600static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
1601{
1602}
1603
1604#define for_each_memcg_cache_index(_idx) \
1605 for (; NULL; )
1606
1607static inline bool memcg_kmem_enabled(void)
1608{
1609 return false;
1610}
1611
1612static inline int memcg_cache_id(struct mem_cgroup *memcg)
1613{
1614 return -1;
1615}
1616
1617static inline void memcg_get_cache_ids(void)
1618{
1619}
1620
1621static inline void memcg_put_cache_ids(void)
1622{
1623}
1624
1625static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
1626{
1627 return NULL;
1628}
1629
1630#endif
1631
1632#endif
1633