1
2
3
4
5
6
7
8
9
10
11#ifndef _LINUX_MEMCONTROL_H
12#define _LINUX_MEMCONTROL_H
13#include <linux/cgroup.h>
14#include <linux/vm_event_item.h>
15#include <linux/hardirq.h>
16#include <linux/jump_label.h>
17#include <linux/page_counter.h>
18#include <linux/vmpressure.h>
19#include <linux/eventfd.h>
20#include <linux/mm.h>
21#include <linux/vmstat.h>
22#include <linux/writeback.h>
23#include <linux/page-flags.h>
24
25struct mem_cgroup;
26struct obj_cgroup;
27struct page;
28struct mm_struct;
29struct kmem_cache;
30
31
32enum memcg_stat_item {
33 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
34 MEMCG_SOCK,
35 MEMCG_PERCPU_B,
36 MEMCG_NR_STAT,
37};
38
39enum memcg_memory_event {
40 MEMCG_LOW,
41 MEMCG_HIGH,
42 MEMCG_MAX,
43 MEMCG_OOM,
44 MEMCG_OOM_KILL,
45 MEMCG_SWAP_HIGH,
46 MEMCG_SWAP_MAX,
47 MEMCG_SWAP_FAIL,
48 MEMCG_NR_MEMORY_EVENTS,
49};
50
51struct mem_cgroup_reclaim_cookie {
52 pg_data_t *pgdat;
53 unsigned int generation;
54};
55
56#ifdef CONFIG_MEMCG
57
58#define MEM_CGROUP_ID_SHIFT 16
59#define MEM_CGROUP_ID_MAX USHRT_MAX
60
61struct mem_cgroup_id {
62 int id;
63 refcount_t ref;
64};
65
66
67
68
69
70
71
72enum mem_cgroup_events_target {
73 MEM_CGROUP_TARGET_THRESH,
74 MEM_CGROUP_TARGET_SOFTLIMIT,
75 MEM_CGROUP_NTARGETS,
76};
77
78struct memcg_vmstats_percpu {
79
80 long state[MEMCG_NR_STAT];
81 unsigned long events[NR_VM_EVENT_ITEMS];
82
83
84 long state_prev[MEMCG_NR_STAT];
85 unsigned long events_prev[NR_VM_EVENT_ITEMS];
86
87
88 unsigned long nr_page_events;
89 unsigned long targets[MEM_CGROUP_NTARGETS];
90};
91
92struct memcg_vmstats {
93
94 long state[MEMCG_NR_STAT];
95 unsigned long events[NR_VM_EVENT_ITEMS];
96
97
98 long state_pending[MEMCG_NR_STAT];
99 unsigned long events_pending[NR_VM_EVENT_ITEMS];
100};
101
102struct mem_cgroup_reclaim_iter {
103 struct mem_cgroup *position;
104
105 unsigned int generation;
106};
107
108
109
110
111
112struct shrinker_info {
113 struct rcu_head rcu;
114 atomic_long_t *nr_deferred;
115 unsigned long *map;
116};
117
118struct lruvec_stats_percpu {
119
120 long state[NR_VM_NODE_STAT_ITEMS];
121
122
123 long state_prev[NR_VM_NODE_STAT_ITEMS];
124};
125
126struct lruvec_stats {
127
128 long state[NR_VM_NODE_STAT_ITEMS];
129
130
131 long state_pending[NR_VM_NODE_STAT_ITEMS];
132};
133
134
135
136
137struct mem_cgroup_per_node {
138 struct lruvec lruvec;
139
140 struct lruvec_stats_percpu __percpu *lruvec_stats_percpu;
141 struct lruvec_stats lruvec_stats;
142
143 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
144
145 struct mem_cgroup_reclaim_iter iter;
146
147 struct shrinker_info __rcu *shrinker_info;
148
149 struct rb_node tree_node;
150 unsigned long usage_in_excess;
151
152 bool on_tree;
153 struct mem_cgroup *memcg;
154
155};
156
157struct mem_cgroup_threshold {
158 struct eventfd_ctx *eventfd;
159 unsigned long threshold;
160};
161
162
163struct mem_cgroup_threshold_ary {
164
165 int current_threshold;
166
167 unsigned int size;
168
169 struct mem_cgroup_threshold entries[];
170};
171
172struct mem_cgroup_thresholds {
173
174 struct mem_cgroup_threshold_ary *primary;
175
176
177
178
179
180 struct mem_cgroup_threshold_ary *spare;
181};
182
183#if defined(CONFIG_SMP)
184struct memcg_padding {
185 char x[0];
186} ____cacheline_internodealigned_in_smp;
187#define MEMCG_PADDING(name) struct memcg_padding name
188#else
189#define MEMCG_PADDING(name)
190#endif
191
192
193
194
195
196
197
198
199
200#define MEMCG_CGWB_FRN_CNT 4
201
202struct memcg_cgwb_frn {
203 u64 bdi_id;
204 int memcg_id;
205 u64 at;
206 struct wb_completion done;
207};
208
209
210
211
212
213
214
215struct obj_cgroup {
216 struct percpu_ref refcnt;
217 struct mem_cgroup *memcg;
218 atomic_t nr_charged_bytes;
219 union {
220 struct list_head list;
221 struct rcu_head rcu;
222 };
223};
224
225
226
227
228
229
230
231struct mem_cgroup {
232 struct cgroup_subsys_state css;
233
234
235 struct mem_cgroup_id id;
236
237
238 struct page_counter memory;
239
240 union {
241 struct page_counter swap;
242 struct page_counter memsw;
243 };
244
245
246 struct page_counter kmem;
247 struct page_counter tcpmem;
248
249
250 struct work_struct high_work;
251
252 unsigned long soft_limit;
253
254
255 struct vmpressure vmpressure;
256
257
258
259
260 bool oom_group;
261
262
263 bool oom_lock;
264 int under_oom;
265
266 int swappiness;
267
268 int oom_kill_disable;
269
270
271 struct cgroup_file events_file;
272 struct cgroup_file events_local_file;
273
274
275 struct cgroup_file swap_events_file;
276
277
278 struct mutex thresholds_lock;
279
280
281 struct mem_cgroup_thresholds thresholds;
282
283
284 struct mem_cgroup_thresholds memsw_thresholds;
285
286
287 struct list_head oom_notify;
288
289
290
291
292
293 unsigned long move_charge_at_immigrate;
294
295 spinlock_t move_lock;
296 unsigned long move_lock_flags;
297
298 MEMCG_PADDING(_pad1_);
299
300
301 struct memcg_vmstats vmstats;
302
303
304 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
305 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
306
307 unsigned long socket_pressure;
308
309
310 bool tcpmem_active;
311 int tcpmem_pressure;
312
313#ifdef CONFIG_MEMCG_KMEM
314 int kmemcg_id;
315 struct obj_cgroup __rcu *objcg;
316 struct list_head objcg_list;
317#endif
318
319 MEMCG_PADDING(_pad2_);
320
321
322
323
324 atomic_t moving_account;
325 struct task_struct *move_lock_task;
326
327 struct memcg_vmstats_percpu __percpu *vmstats_percpu;
328
329#ifdef CONFIG_CGROUP_WRITEBACK
330 struct list_head cgwb_list;
331 struct wb_domain cgwb_domain;
332 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
333#endif
334
335
336 struct list_head event_list;
337 spinlock_t event_list_lock;
338
339#ifdef CONFIG_TRANSPARENT_HUGEPAGE
340 struct deferred_split deferred_split_queue;
341#endif
342
343 struct mem_cgroup_per_node *nodeinfo[];
344};
345
346
347
348
349
350#define MEMCG_CHARGE_BATCH 32U
351
352extern struct mem_cgroup *root_mem_cgroup;
353
354enum page_memcg_data_flags {
355
356 MEMCG_DATA_OBJCGS = (1UL << 0),
357
358 MEMCG_DATA_KMEM = (1UL << 1),
359
360 __NR_MEMCG_DATA_FLAGS = (1UL << 2),
361};
362
363#define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1)
364
365static inline bool folio_memcg_kmem(struct folio *folio);
366
367
368
369
370
371
372
373
374static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
375{
376 return READ_ONCE(objcg->memcg);
377}
378
379
380
381
382
383
384
385
386
387
388
389static inline struct mem_cgroup *__folio_memcg(struct folio *folio)
390{
391 unsigned long memcg_data = folio->memcg_data;
392
393 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
394 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
395 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio);
396
397 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
398}
399
400
401
402
403
404
405
406
407
408
409
410static inline struct obj_cgroup *__folio_objcg(struct folio *folio)
411{
412 unsigned long memcg_data = folio->memcg_data;
413
414 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
415 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio);
416 VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio);
417
418 return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
419}
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441static inline struct mem_cgroup *folio_memcg(struct folio *folio)
442{
443 if (folio_memcg_kmem(folio))
444 return obj_cgroup_memcg(__folio_objcg(folio));
445 return __folio_memcg(folio);
446}
447
448static inline struct mem_cgroup *page_memcg(struct page *page)
449{
450 return folio_memcg(page_folio(page));
451}
452
453
454
455
456
457
458
459
460
461
462
463
464static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
465{
466 unsigned long memcg_data = READ_ONCE(folio->memcg_data);
467
468 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio);
469 WARN_ON_ONCE(!rcu_read_lock_held());
470
471 if (memcg_data & MEMCG_DATA_KMEM) {
472 struct obj_cgroup *objcg;
473
474 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
475 return obj_cgroup_memcg(objcg);
476 }
477
478 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
479}
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502static inline struct mem_cgroup *page_memcg_check(struct page *page)
503{
504
505
506
507
508 unsigned long memcg_data = READ_ONCE(page->memcg_data);
509
510 if (memcg_data & MEMCG_DATA_OBJCGS)
511 return NULL;
512
513 if (memcg_data & MEMCG_DATA_KMEM) {
514 struct obj_cgroup *objcg;
515
516 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
517 return obj_cgroup_memcg(objcg);
518 }
519
520 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
521}
522
523#ifdef CONFIG_MEMCG_KMEM
524
525
526
527
528
529
530
531
532static inline bool folio_memcg_kmem(struct folio *folio)
533{
534 VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page);
535 VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio);
536 return folio->memcg_data & MEMCG_DATA_KMEM;
537}
538
539
540
541
542
543
544
545
546
547
548
549static inline struct obj_cgroup **page_objcgs(struct page *page)
550{
551 unsigned long memcg_data = READ_ONCE(page->memcg_data);
552
553 VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS), page);
554 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page);
555
556 return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
557}
558
559
560
561
562
563
564
565
566
567static inline struct obj_cgroup **page_objcgs_check(struct page *page)
568{
569 unsigned long memcg_data = READ_ONCE(page->memcg_data);
570
571 if (!memcg_data || !(memcg_data & MEMCG_DATA_OBJCGS))
572 return NULL;
573
574 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page);
575
576 return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
577}
578
579#else
580static inline bool folio_memcg_kmem(struct folio *folio)
581{
582 return false;
583}
584
585static inline struct obj_cgroup **page_objcgs(struct page *page)
586{
587 return NULL;
588}
589
590static inline struct obj_cgroup **page_objcgs_check(struct page *page)
591{
592 return NULL;
593}
594#endif
595
596static inline bool PageMemcgKmem(struct page *page)
597{
598 return folio_memcg_kmem(page_folio(page));
599}
600
601static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
602{
603 return (memcg == root_mem_cgroup);
604}
605
606static inline bool mem_cgroup_disabled(void)
607{
608 return !cgroup_subsys_enabled(memory_cgrp_subsys);
609}
610
611static inline void mem_cgroup_protection(struct mem_cgroup *root,
612 struct mem_cgroup *memcg,
613 unsigned long *min,
614 unsigned long *low)
615{
616 *min = *low = 0;
617
618 if (mem_cgroup_disabled())
619 return;
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654 if (root == memcg)
655 return;
656
657 *min = READ_ONCE(memcg->memory.emin);
658 *low = READ_ONCE(memcg->memory.elow);
659}
660
661void mem_cgroup_calculate_protection(struct mem_cgroup *root,
662 struct mem_cgroup *memcg);
663
664static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg)
665{
666
667
668
669
670 return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg);
671
672}
673
674static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
675{
676 if (!mem_cgroup_supports_protection(memcg))
677 return false;
678
679 return READ_ONCE(memcg->memory.elow) >=
680 page_counter_read(&memcg->memory);
681}
682
683static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
684{
685 if (!mem_cgroup_supports_protection(memcg))
686 return false;
687
688 return READ_ONCE(memcg->memory.emin) >=
689 page_counter_read(&memcg->memory);
690}
691
692int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
709 gfp_t gfp)
710{
711 if (mem_cgroup_disabled())
712 return 0;
713 return __mem_cgroup_charge(folio, mm, gfp);
714}
715
716int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
717 gfp_t gfp, swp_entry_t entry);
718void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
719
720void __mem_cgroup_uncharge(struct folio *folio);
721
722
723
724
725
726
727
728static inline void mem_cgroup_uncharge(struct folio *folio)
729{
730 if (mem_cgroup_disabled())
731 return;
732 __mem_cgroup_uncharge(folio);
733}
734
735void __mem_cgroup_uncharge_list(struct list_head *page_list);
736static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
737{
738 if (mem_cgroup_disabled())
739 return;
740 __mem_cgroup_uncharge_list(page_list);
741}
742
743void mem_cgroup_migrate(struct folio *old, struct folio *new);
744
745
746
747
748
749
750
751
752
753
754static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
755 struct pglist_data *pgdat)
756{
757 struct mem_cgroup_per_node *mz;
758 struct lruvec *lruvec;
759
760 if (mem_cgroup_disabled()) {
761 lruvec = &pgdat->__lruvec;
762 goto out;
763 }
764
765 if (!memcg)
766 memcg = root_mem_cgroup;
767
768 mz = memcg->nodeinfo[pgdat->node_id];
769 lruvec = &mz->lruvec;
770out:
771
772
773
774
775
776 if (unlikely(lruvec->pgdat != pgdat))
777 lruvec->pgdat = pgdat;
778 return lruvec;
779}
780
781
782
783
784
785
786
787static inline struct lruvec *folio_lruvec(struct folio *folio)
788{
789 struct mem_cgroup *memcg = folio_memcg(folio);
790
791 VM_WARN_ON_ONCE_FOLIO(!memcg && !mem_cgroup_disabled(), folio);
792 return mem_cgroup_lruvec(memcg, folio_pgdat(folio));
793}
794
795struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
796
797struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
798
799struct lruvec *folio_lruvec_lock(struct folio *folio);
800struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
801struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
802 unsigned long *flags);
803
804#ifdef CONFIG_DEBUG_VM
805void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio);
806#else
807static inline
808void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
809{
810}
811#endif
812
813static inline
814struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
815 return css ? container_of(css, struct mem_cgroup, css) : NULL;
816}
817
818static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
819{
820 return percpu_ref_tryget(&objcg->refcnt);
821}
822
823static inline void obj_cgroup_get(struct obj_cgroup *objcg)
824{
825 percpu_ref_get(&objcg->refcnt);
826}
827
828static inline void obj_cgroup_get_many(struct obj_cgroup *objcg,
829 unsigned long nr)
830{
831 percpu_ref_get_many(&objcg->refcnt, nr);
832}
833
834static inline void obj_cgroup_put(struct obj_cgroup *objcg)
835{
836 percpu_ref_put(&objcg->refcnt);
837}
838
839static inline void mem_cgroup_put(struct mem_cgroup *memcg)
840{
841 if (memcg)
842 css_put(&memcg->css);
843}
844
845#define mem_cgroup_from_counter(counter, member) \
846 container_of(counter, struct mem_cgroup, member)
847
848struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
849 struct mem_cgroup *,
850 struct mem_cgroup_reclaim_cookie *);
851void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
852int mem_cgroup_scan_tasks(struct mem_cgroup *,
853 int (*)(struct task_struct *, void *), void *);
854
855static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
856{
857 if (mem_cgroup_disabled())
858 return 0;
859
860 return memcg->id.id;
861}
862struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
863
864static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
865{
866 return mem_cgroup_from_css(seq_css(m));
867}
868
869static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
870{
871 struct mem_cgroup_per_node *mz;
872
873 if (mem_cgroup_disabled())
874 return NULL;
875
876 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
877 return mz->memcg;
878}
879
880
881
882
883
884
885
886
887static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
888{
889 if (!memcg->memory.parent)
890 return NULL;
891 return mem_cgroup_from_counter(memcg->memory.parent, memory);
892}
893
894static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
895 struct mem_cgroup *root)
896{
897 if (root == memcg)
898 return true;
899 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
900}
901
902static inline bool mm_match_cgroup(struct mm_struct *mm,
903 struct mem_cgroup *memcg)
904{
905 struct mem_cgroup *task_memcg;
906 bool match = false;
907
908 rcu_read_lock();
909 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
910 if (task_memcg)
911 match = mem_cgroup_is_descendant(task_memcg, memcg);
912 rcu_read_unlock();
913 return match;
914}
915
916struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
917ino_t page_cgroup_ino(struct page *page);
918
919static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
920{
921 if (mem_cgroup_disabled())
922 return true;
923 return !!(memcg->css.flags & CSS_ONLINE);
924}
925
926void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
927 int zid, int nr_pages);
928
929static inline
930unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
931 enum lru_list lru, int zone_idx)
932{
933 struct mem_cgroup_per_node *mz;
934
935 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
936 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
937}
938
939void mem_cgroup_handle_over_high(void);
940
941unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
942
943unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
944
945void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
946 struct task_struct *p);
947
948void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
949
950static inline void mem_cgroup_enter_user_fault(void)
951{
952 WARN_ON(current->in_user_fault);
953 current->in_user_fault = 1;
954}
955
956static inline void mem_cgroup_exit_user_fault(void)
957{
958 WARN_ON(!current->in_user_fault);
959 current->in_user_fault = 0;
960}
961
962static inline bool task_in_memcg_oom(struct task_struct *p)
963{
964 return p->memcg_in_oom;
965}
966
967bool mem_cgroup_oom_synchronize(bool wait);
968struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
969 struct mem_cgroup *oom_domain);
970void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
971
972#ifdef CONFIG_MEMCG_SWAP
973extern bool cgroup_memory_noswap;
974#endif
975
976void folio_memcg_lock(struct folio *folio);
977void folio_memcg_unlock(struct folio *folio);
978void lock_page_memcg(struct page *page);
979void unlock_page_memcg(struct page *page);
980
981void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
982
983
984static inline void mod_memcg_state(struct mem_cgroup *memcg,
985 int idx, int val)
986{
987 unsigned long flags;
988
989 local_irq_save(flags);
990 __mod_memcg_state(memcg, idx, val);
991 local_irq_restore(flags);
992}
993
994static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
995{
996 return READ_ONCE(memcg->vmstats.state[idx]);
997}
998
999static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1000 enum node_stat_item idx)
1001{
1002 struct mem_cgroup_per_node *pn;
1003
1004 if (mem_cgroup_disabled())
1005 return node_page_state(lruvec_pgdat(lruvec), idx);
1006
1007 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1008 return READ_ONCE(pn->lruvec_stats.state[idx]);
1009}
1010
1011static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1012 enum node_stat_item idx)
1013{
1014 struct mem_cgroup_per_node *pn;
1015 long x = 0;
1016 int cpu;
1017
1018 if (mem_cgroup_disabled())
1019 return node_page_state(lruvec_pgdat(lruvec), idx);
1020
1021 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1022 for_each_possible_cpu(cpu)
1023 x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu);
1024#ifdef CONFIG_SMP
1025 if (x < 0)
1026 x = 0;
1027#endif
1028 return x;
1029}
1030
1031void mem_cgroup_flush_stats(void);
1032
1033void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
1034 int val);
1035void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
1036
1037static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1038 int val)
1039{
1040 unsigned long flags;
1041
1042 local_irq_save(flags);
1043 __mod_lruvec_kmem_state(p, idx, val);
1044 local_irq_restore(flags);
1045}
1046
1047static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
1048 enum node_stat_item idx, int val)
1049{
1050 unsigned long flags;
1051
1052 local_irq_save(flags);
1053 __mod_memcg_lruvec_state(lruvec, idx, val);
1054 local_irq_restore(flags);
1055}
1056
1057void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
1058 unsigned long count);
1059
1060static inline void count_memcg_events(struct mem_cgroup *memcg,
1061 enum vm_event_item idx,
1062 unsigned long count)
1063{
1064 unsigned long flags;
1065
1066 local_irq_save(flags);
1067 __count_memcg_events(memcg, idx, count);
1068 local_irq_restore(flags);
1069}
1070
1071static inline void count_memcg_page_event(struct page *page,
1072 enum vm_event_item idx)
1073{
1074 struct mem_cgroup *memcg = page_memcg(page);
1075
1076 if (memcg)
1077 count_memcg_events(memcg, idx, 1);
1078}
1079
1080static inline void count_memcg_event_mm(struct mm_struct *mm,
1081 enum vm_event_item idx)
1082{
1083 struct mem_cgroup *memcg;
1084
1085 if (mem_cgroup_disabled())
1086 return;
1087
1088 rcu_read_lock();
1089 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1090 if (likely(memcg))
1091 count_memcg_events(memcg, idx, 1);
1092 rcu_read_unlock();
1093}
1094
1095static inline void memcg_memory_event(struct mem_cgroup *memcg,
1096 enum memcg_memory_event event)
1097{
1098 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
1099 event == MEMCG_SWAP_FAIL;
1100
1101 atomic_long_inc(&memcg->memory_events_local[event]);
1102 if (!swap_event)
1103 cgroup_file_notify(&memcg->events_local_file);
1104
1105 do {
1106 atomic_long_inc(&memcg->memory_events[event]);
1107 if (swap_event)
1108 cgroup_file_notify(&memcg->swap_events_file);
1109 else
1110 cgroup_file_notify(&memcg->events_file);
1111
1112 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1113 break;
1114 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
1115 break;
1116 } while ((memcg = parent_mem_cgroup(memcg)) &&
1117 !mem_cgroup_is_root(memcg));
1118}
1119
1120static inline void memcg_memory_event_mm(struct mm_struct *mm,
1121 enum memcg_memory_event event)
1122{
1123 struct mem_cgroup *memcg;
1124
1125 if (mem_cgroup_disabled())
1126 return;
1127
1128 rcu_read_lock();
1129 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1130 if (likely(memcg))
1131 memcg_memory_event(memcg, event);
1132 rcu_read_unlock();
1133}
1134
1135void split_page_memcg(struct page *head, unsigned int nr);
1136
1137unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1138 gfp_t gfp_mask,
1139 unsigned long *total_scanned);
1140
1141#else
1142
1143#define MEM_CGROUP_ID_SHIFT 0
1144#define MEM_CGROUP_ID_MAX 0
1145
1146static inline struct mem_cgroup *folio_memcg(struct folio *folio)
1147{
1148 return NULL;
1149}
1150
1151static inline struct mem_cgroup *page_memcg(struct page *page)
1152{
1153 return NULL;
1154}
1155
1156static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio)
1157{
1158 WARN_ON_ONCE(!rcu_read_lock_held());
1159 return NULL;
1160}
1161
1162static inline struct mem_cgroup *page_memcg_check(struct page *page)
1163{
1164 return NULL;
1165}
1166
1167static inline bool folio_memcg_kmem(struct folio *folio)
1168{
1169 return false;
1170}
1171
1172static inline bool PageMemcgKmem(struct page *page)
1173{
1174 return false;
1175}
1176
1177static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
1178{
1179 return true;
1180}
1181
1182static inline bool mem_cgroup_disabled(void)
1183{
1184 return true;
1185}
1186
1187static inline void memcg_memory_event(struct mem_cgroup *memcg,
1188 enum memcg_memory_event event)
1189{
1190}
1191
1192static inline void memcg_memory_event_mm(struct mm_struct *mm,
1193 enum memcg_memory_event event)
1194{
1195}
1196
1197static inline void mem_cgroup_protection(struct mem_cgroup *root,
1198 struct mem_cgroup *memcg,
1199 unsigned long *min,
1200 unsigned long *low)
1201{
1202 *min = *low = 0;
1203}
1204
1205static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
1206 struct mem_cgroup *memcg)
1207{
1208}
1209
1210static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
1211{
1212 return false;
1213}
1214
1215static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
1216{
1217 return false;
1218}
1219
1220static inline int mem_cgroup_charge(struct folio *folio,
1221 struct mm_struct *mm, gfp_t gfp)
1222{
1223 return 0;
1224}
1225
1226static inline int mem_cgroup_swapin_charge_page(struct page *page,
1227 struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
1228{
1229 return 0;
1230}
1231
1232static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
1233{
1234}
1235
1236static inline void mem_cgroup_uncharge(struct folio *folio)
1237{
1238}
1239
1240static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
1241{
1242}
1243
1244static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
1245{
1246}
1247
1248static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
1249 struct pglist_data *pgdat)
1250{
1251 return &pgdat->__lruvec;
1252}
1253
1254static inline struct lruvec *folio_lruvec(struct folio *folio)
1255{
1256 struct pglist_data *pgdat = folio_pgdat(folio);
1257 return &pgdat->__lruvec;
1258}
1259
1260static inline
1261void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1262{
1263}
1264
1265static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
1266{
1267 return NULL;
1268}
1269
1270static inline bool mm_match_cgroup(struct mm_struct *mm,
1271 struct mem_cgroup *memcg)
1272{
1273 return true;
1274}
1275
1276static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1277{
1278 return NULL;
1279}
1280
1281static inline
1282struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
1283{
1284 return NULL;
1285}
1286
1287static inline void mem_cgroup_put(struct mem_cgroup *memcg)
1288{
1289}
1290
1291static inline struct lruvec *folio_lruvec_lock(struct folio *folio)
1292{
1293 struct pglist_data *pgdat = folio_pgdat(folio);
1294
1295 spin_lock(&pgdat->__lruvec.lru_lock);
1296 return &pgdat->__lruvec;
1297}
1298
1299static inline struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1300{
1301 struct pglist_data *pgdat = folio_pgdat(folio);
1302
1303 spin_lock_irq(&pgdat->__lruvec.lru_lock);
1304 return &pgdat->__lruvec;
1305}
1306
1307static inline struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1308 unsigned long *flagsp)
1309{
1310 struct pglist_data *pgdat = folio_pgdat(folio);
1311
1312 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
1313 return &pgdat->__lruvec;
1314}
1315
1316static inline struct mem_cgroup *
1317mem_cgroup_iter(struct mem_cgroup *root,
1318 struct mem_cgroup *prev,
1319 struct mem_cgroup_reclaim_cookie *reclaim)
1320{
1321 return NULL;
1322}
1323
1324static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
1325 struct mem_cgroup *prev)
1326{
1327}
1328
1329static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1330 int (*fn)(struct task_struct *, void *), void *arg)
1331{
1332 return 0;
1333}
1334
1335static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
1336{
1337 return 0;
1338}
1339
1340static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
1341{
1342 WARN_ON_ONCE(id);
1343
1344 return NULL;
1345}
1346
1347static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
1348{
1349 return NULL;
1350}
1351
1352static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
1353{
1354 return NULL;
1355}
1356
1357static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
1358{
1359 return true;
1360}
1361
1362static inline
1363unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
1364 enum lru_list lru, int zone_idx)
1365{
1366 return 0;
1367}
1368
1369static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1370{
1371 return 0;
1372}
1373
1374static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1375{
1376 return 0;
1377}
1378
1379static inline void
1380mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1381{
1382}
1383
1384static inline void
1385mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1386{
1387}
1388
1389static inline void lock_page_memcg(struct page *page)
1390{
1391}
1392
1393static inline void unlock_page_memcg(struct page *page)
1394{
1395}
1396
1397static inline void folio_memcg_lock(struct folio *folio)
1398{
1399}
1400
1401static inline void folio_memcg_unlock(struct folio *folio)
1402{
1403}
1404
1405static inline void mem_cgroup_handle_over_high(void)
1406{
1407}
1408
1409static inline void mem_cgroup_enter_user_fault(void)
1410{
1411}
1412
1413static inline void mem_cgroup_exit_user_fault(void)
1414{
1415}
1416
1417static inline bool task_in_memcg_oom(struct task_struct *p)
1418{
1419 return false;
1420}
1421
1422static inline bool mem_cgroup_oom_synchronize(bool wait)
1423{
1424 return false;
1425}
1426
1427static inline struct mem_cgroup *mem_cgroup_get_oom_group(
1428 struct task_struct *victim, struct mem_cgroup *oom_domain)
1429{
1430 return NULL;
1431}
1432
1433static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1434{
1435}
1436
1437static inline void __mod_memcg_state(struct mem_cgroup *memcg,
1438 int idx,
1439 int nr)
1440{
1441}
1442
1443static inline void mod_memcg_state(struct mem_cgroup *memcg,
1444 int idx,
1445 int nr)
1446{
1447}
1448
1449static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1450{
1451 return 0;
1452}
1453
1454static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1455 enum node_stat_item idx)
1456{
1457 return node_page_state(lruvec_pgdat(lruvec), idx);
1458}
1459
1460static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1461 enum node_stat_item idx)
1462{
1463 return node_page_state(lruvec_pgdat(lruvec), idx);
1464}
1465
1466static inline void mem_cgroup_flush_stats(void)
1467{
1468}
1469
1470static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
1471 enum node_stat_item idx, int val)
1472{
1473}
1474
1475static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1476 int val)
1477{
1478 struct page *page = virt_to_head_page(p);
1479
1480 __mod_node_page_state(page_pgdat(page), idx, val);
1481}
1482
1483static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1484 int val)
1485{
1486 struct page *page = virt_to_head_page(p);
1487
1488 mod_node_page_state(page_pgdat(page), idx, val);
1489}
1490
1491static inline void count_memcg_events(struct mem_cgroup *memcg,
1492 enum vm_event_item idx,
1493 unsigned long count)
1494{
1495}
1496
1497static inline void __count_memcg_events(struct mem_cgroup *memcg,
1498 enum vm_event_item idx,
1499 unsigned long count)
1500{
1501}
1502
1503static inline void count_memcg_page_event(struct page *page,
1504 int idx)
1505{
1506}
1507
1508static inline
1509void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
1510{
1511}
1512
1513static inline void split_page_memcg(struct page *head, unsigned int nr)
1514{
1515}
1516
1517static inline
1518unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1519 gfp_t gfp_mask,
1520 unsigned long *total_scanned)
1521{
1522 return 0;
1523}
1524#endif
1525
1526static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
1527{
1528 __mod_lruvec_kmem_state(p, idx, 1);
1529}
1530
1531static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
1532{
1533 __mod_lruvec_kmem_state(p, idx, -1);
1534}
1535
1536static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
1537{
1538 struct mem_cgroup *memcg;
1539
1540 memcg = lruvec_memcg(lruvec);
1541 if (!memcg)
1542 return NULL;
1543 memcg = parent_mem_cgroup(memcg);
1544 if (!memcg)
1545 return NULL;
1546 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
1547}
1548
1549static inline void unlock_page_lruvec(struct lruvec *lruvec)
1550{
1551 spin_unlock(&lruvec->lru_lock);
1552}
1553
1554static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
1555{
1556 spin_unlock_irq(&lruvec->lru_lock);
1557}
1558
1559static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
1560 unsigned long flags)
1561{
1562 spin_unlock_irqrestore(&lruvec->lru_lock, flags);
1563}
1564
1565
1566static inline bool folio_matches_lruvec(struct folio *folio,
1567 struct lruvec *lruvec)
1568{
1569 return lruvec_pgdat(lruvec) == folio_pgdat(folio) &&
1570 lruvec_memcg(lruvec) == folio_memcg(folio);
1571}
1572
1573
1574static inline struct lruvec *folio_lruvec_relock_irq(struct folio *folio,
1575 struct lruvec *locked_lruvec)
1576{
1577 if (locked_lruvec) {
1578 if (folio_matches_lruvec(folio, locked_lruvec))
1579 return locked_lruvec;
1580
1581 unlock_page_lruvec_irq(locked_lruvec);
1582 }
1583
1584 return folio_lruvec_lock_irq(folio);
1585}
1586
1587
1588static inline struct lruvec *folio_lruvec_relock_irqsave(struct folio *folio,
1589 struct lruvec *locked_lruvec, unsigned long *flags)
1590{
1591 if (locked_lruvec) {
1592 if (folio_matches_lruvec(folio, locked_lruvec))
1593 return locked_lruvec;
1594
1595 unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
1596 }
1597
1598 return folio_lruvec_lock_irqsave(folio, flags);
1599}
1600
1601#ifdef CONFIG_CGROUP_WRITEBACK
1602
1603struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
1604void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1605 unsigned long *pheadroom, unsigned long *pdirty,
1606 unsigned long *pwriteback);
1607
1608void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
1609 struct bdi_writeback *wb);
1610
1611static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
1612 struct bdi_writeback *wb)
1613{
1614 if (mem_cgroup_disabled())
1615 return;
1616
1617 if (unlikely(&folio_memcg(folio)->css != wb->memcg_css))
1618 mem_cgroup_track_foreign_dirty_slowpath(folio, wb);
1619}
1620
1621void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
1622
1623#else
1624
1625static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1626{
1627 return NULL;
1628}
1629
1630static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
1631 unsigned long *pfilepages,
1632 unsigned long *pheadroom,
1633 unsigned long *pdirty,
1634 unsigned long *pwriteback)
1635{
1636}
1637
1638static inline void mem_cgroup_track_foreign_dirty(struct folio *folio,
1639 struct bdi_writeback *wb)
1640{
1641}
1642
1643static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
1644{
1645}
1646
1647#endif
1648
1649struct sock;
1650bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
1651 gfp_t gfp_mask);
1652void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1653#ifdef CONFIG_MEMCG
1654extern struct static_key_false memcg_sockets_enabled_key;
1655#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1656void mem_cgroup_sk_alloc(struct sock *sk);
1657void mem_cgroup_sk_free(struct sock *sk);
1658static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1659{
1660 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
1661 return true;
1662 do {
1663 if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
1664 return true;
1665 } while ((memcg = parent_mem_cgroup(memcg)));
1666 return false;
1667}
1668
1669int alloc_shrinker_info(struct mem_cgroup *memcg);
1670void free_shrinker_info(struct mem_cgroup *memcg);
1671void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
1672void reparent_shrinker_deferred(struct mem_cgroup *memcg);
1673#else
1674#define mem_cgroup_sockets_enabled 0
1675static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
1676static inline void mem_cgroup_sk_free(struct sock *sk) { };
1677static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1678{
1679 return false;
1680}
1681
1682static inline void set_shrinker_bit(struct mem_cgroup *memcg,
1683 int nid, int shrinker_id)
1684{
1685}
1686#endif
1687
1688#ifdef CONFIG_MEMCG_KMEM
1689bool mem_cgroup_kmem_disabled(void);
1690int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
1691void __memcg_kmem_uncharge_page(struct page *page, int order);
1692
1693struct obj_cgroup *get_obj_cgroup_from_current(void);
1694
1695int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
1696void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
1697
1698extern struct static_key_false memcg_kmem_enabled_key;
1699
1700extern int memcg_nr_cache_ids;
1701void memcg_get_cache_ids(void);
1702void memcg_put_cache_ids(void);
1703
1704
1705
1706
1707
1708
1709#define for_each_memcg_cache_index(_idx) \
1710 for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
1711
1712static inline bool memcg_kmem_enabled(void)
1713{
1714 return static_branch_likely(&memcg_kmem_enabled_key);
1715}
1716
1717static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1718 int order)
1719{
1720 if (memcg_kmem_enabled())
1721 return __memcg_kmem_charge_page(page, gfp, order);
1722 return 0;
1723}
1724
1725static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1726{
1727 if (memcg_kmem_enabled())
1728 __memcg_kmem_uncharge_page(page, order);
1729}
1730
1731
1732
1733
1734
1735static inline int memcg_cache_id(struct mem_cgroup *memcg)
1736{
1737 return memcg ? memcg->kmemcg_id : -1;
1738}
1739
1740struct mem_cgroup *mem_cgroup_from_obj(void *p);
1741
1742#else
1743static inline bool mem_cgroup_kmem_disabled(void)
1744{
1745 return true;
1746}
1747
1748static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1749 int order)
1750{
1751 return 0;
1752}
1753
1754static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1755{
1756}
1757
1758static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1759 int order)
1760{
1761 return 0;
1762}
1763
1764static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
1765{
1766}
1767
1768#define for_each_memcg_cache_index(_idx) \
1769 for (; NULL; )
1770
1771static inline bool memcg_kmem_enabled(void)
1772{
1773 return false;
1774}
1775
1776static inline int memcg_cache_id(struct mem_cgroup *memcg)
1777{
1778 return -1;
1779}
1780
1781static inline void memcg_get_cache_ids(void)
1782{
1783}
1784
1785static inline void memcg_put_cache_ids(void)
1786{
1787}
1788
1789static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
1790{
1791 return NULL;
1792}
1793
1794#endif
1795
1796#endif
1797