1
2
3
4
5
6
7
8
9
10
11#ifndef _LINUX_MEMCONTROL_H
12#define _LINUX_MEMCONTROL_H
13#include <linux/cgroup.h>
14#include <linux/vm_event_item.h>
15#include <linux/hardirq.h>
16#include <linux/jump_label.h>
17#include <linux/page_counter.h>
18#include <linux/vmpressure.h>
19#include <linux/eventfd.h>
20#include <linux/mm.h>
21#include <linux/vmstat.h>
22#include <linux/writeback.h>
23#include <linux/page-flags.h>
24
25struct mem_cgroup;
26struct obj_cgroup;
27struct page;
28struct mm_struct;
29struct kmem_cache;
30
31
32enum memcg_stat_item {
33 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
34 MEMCG_SOCK,
35 MEMCG_PERCPU_B,
36 MEMCG_NR_STAT,
37};
38
39enum memcg_memory_event {
40 MEMCG_LOW,
41 MEMCG_HIGH,
42 MEMCG_MAX,
43 MEMCG_OOM,
44 MEMCG_OOM_KILL,
45 MEMCG_SWAP_HIGH,
46 MEMCG_SWAP_MAX,
47 MEMCG_SWAP_FAIL,
48 MEMCG_NR_MEMORY_EVENTS,
49};
50
51struct mem_cgroup_reclaim_cookie {
52 pg_data_t *pgdat;
53 unsigned int generation;
54};
55
56#ifdef CONFIG_MEMCG
57
58#define MEM_CGROUP_ID_SHIFT 16
59#define MEM_CGROUP_ID_MAX USHRT_MAX
60
61struct mem_cgroup_id {
62 int id;
63 refcount_t ref;
64};
65
66
67
68
69
70
71
72enum mem_cgroup_events_target {
73 MEM_CGROUP_TARGET_THRESH,
74 MEM_CGROUP_TARGET_SOFTLIMIT,
75 MEM_CGROUP_NTARGETS,
76};
77
78struct memcg_vmstats_percpu {
79 long stat[MEMCG_NR_STAT];
80 unsigned long events[NR_VM_EVENT_ITEMS];
81 unsigned long nr_page_events;
82 unsigned long targets[MEM_CGROUP_NTARGETS];
83};
84
85struct mem_cgroup_reclaim_iter {
86 struct mem_cgroup *position;
87
88 unsigned int generation;
89};
90
91struct lruvec_stat {
92 long count[NR_VM_NODE_STAT_ITEMS];
93};
94
95struct batched_lruvec_stat {
96 s32 count[NR_VM_NODE_STAT_ITEMS];
97};
98
99
100
101
102
103struct memcg_shrinker_map {
104 struct rcu_head rcu;
105 unsigned long map[];
106};
107
108
109
110
111struct mem_cgroup_per_node {
112 struct lruvec lruvec;
113
114
115
116
117
118
119
120
121 struct lruvec_stat __percpu *lruvec_stat_local;
122
123
124 struct batched_lruvec_stat __percpu *lruvec_stat_cpu;
125 atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
126
127 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
128
129 struct mem_cgroup_reclaim_iter iter;
130
131 struct memcg_shrinker_map __rcu *shrinker_map;
132
133 struct rb_node tree_node;
134 unsigned long usage_in_excess;
135
136 bool on_tree;
137 struct mem_cgroup *memcg;
138
139};
140
141struct mem_cgroup_threshold {
142 struct eventfd_ctx *eventfd;
143 unsigned long threshold;
144};
145
146
147struct mem_cgroup_threshold_ary {
148
149 int current_threshold;
150
151 unsigned int size;
152
153 struct mem_cgroup_threshold entries[];
154};
155
156struct mem_cgroup_thresholds {
157
158 struct mem_cgroup_threshold_ary *primary;
159
160
161
162
163
164 struct mem_cgroup_threshold_ary *spare;
165};
166
167enum memcg_kmem_state {
168 KMEM_NONE,
169 KMEM_ALLOCATED,
170 KMEM_ONLINE,
171};
172
173#if defined(CONFIG_SMP)
174struct memcg_padding {
175 char x[0];
176} ____cacheline_internodealigned_in_smp;
177#define MEMCG_PADDING(name) struct memcg_padding name;
178#else
179#define MEMCG_PADDING(name)
180#endif
181
182
183
184
185
186
187
188
189
190#define MEMCG_CGWB_FRN_CNT 4
191
192struct memcg_cgwb_frn {
193 u64 bdi_id;
194 int memcg_id;
195 u64 at;
196 struct wb_completion done;
197};
198
199
200
201
202
203
204
205struct obj_cgroup {
206 struct percpu_ref refcnt;
207 struct mem_cgroup *memcg;
208 atomic_t nr_charged_bytes;
209 union {
210 struct list_head list;
211 struct rcu_head rcu;
212 };
213};
214
215
216
217
218
219
220
221struct mem_cgroup {
222 struct cgroup_subsys_state css;
223
224
225 struct mem_cgroup_id id;
226
227
228 struct page_counter memory;
229
230 union {
231 struct page_counter swap;
232 struct page_counter memsw;
233 };
234
235
236 struct page_counter kmem;
237 struct page_counter tcpmem;
238
239
240 struct work_struct high_work;
241
242 unsigned long soft_limit;
243
244
245 struct vmpressure vmpressure;
246
247
248
249
250 bool oom_group;
251
252
253 bool oom_lock;
254 int under_oom;
255
256 int swappiness;
257
258 int oom_kill_disable;
259
260
261 struct cgroup_file events_file;
262 struct cgroup_file events_local_file;
263
264
265 struct cgroup_file swap_events_file;
266
267
268 struct mutex thresholds_lock;
269
270
271 struct mem_cgroup_thresholds thresholds;
272
273
274 struct mem_cgroup_thresholds memsw_thresholds;
275
276
277 struct list_head oom_notify;
278
279
280
281
282
283 unsigned long move_charge_at_immigrate;
284
285 spinlock_t move_lock;
286 unsigned long move_lock_flags;
287
288 MEMCG_PADDING(_pad1_);
289
290 atomic_long_t vmstats[MEMCG_NR_STAT];
291 atomic_long_t vmevents[NR_VM_EVENT_ITEMS];
292
293
294 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
295 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
296
297 unsigned long socket_pressure;
298
299
300 bool tcpmem_active;
301 int tcpmem_pressure;
302
303#ifdef CONFIG_MEMCG_KMEM
304 int kmemcg_id;
305 enum memcg_kmem_state kmem_state;
306 struct obj_cgroup __rcu *objcg;
307 struct list_head objcg_list;
308#endif
309
310 MEMCG_PADDING(_pad2_);
311
312
313
314
315 atomic_t moving_account;
316 struct task_struct *move_lock_task;
317
318
319 struct memcg_vmstats_percpu __percpu *vmstats_local;
320
321
322 struct memcg_vmstats_percpu __percpu *vmstats_percpu;
323
324#ifdef CONFIG_CGROUP_WRITEBACK
325 struct list_head cgwb_list;
326 struct wb_domain cgwb_domain;
327 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
328#endif
329
330
331 struct list_head event_list;
332 spinlock_t event_list_lock;
333
334#ifdef CONFIG_TRANSPARENT_HUGEPAGE
335 struct deferred_split deferred_split_queue;
336#endif
337
338 struct mem_cgroup_per_node *nodeinfo[0];
339
340};
341
342
343
344
345
346#define MEMCG_CHARGE_BATCH 32U
347
348extern struct mem_cgroup *root_mem_cgroup;
349
350enum page_memcg_data_flags {
351
352 MEMCG_DATA_OBJCGS = (1UL << 0),
353
354 MEMCG_DATA_KMEM = (1UL << 1),
355
356 __NR_MEMCG_DATA_FLAGS = (1UL << 2),
357};
358
359#define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1)
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376static inline struct mem_cgroup *page_memcg(struct page *page)
377{
378 unsigned long memcg_data = page->memcg_data;
379
380 VM_BUG_ON_PAGE(PageSlab(page), page);
381 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page);
382
383 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
384}
385
386
387
388
389
390
391
392
393
394
395static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
396{
397 VM_BUG_ON_PAGE(PageSlab(page), page);
398 WARN_ON_ONCE(!rcu_read_lock_held());
399
400 return (struct mem_cgroup *)(READ_ONCE(page->memcg_data) &
401 ~MEMCG_DATA_FLAGS_MASK);
402}
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419static inline struct mem_cgroup *page_memcg_check(struct page *page)
420{
421
422
423
424
425 unsigned long memcg_data = READ_ONCE(page->memcg_data);
426
427 if (memcg_data & MEMCG_DATA_OBJCGS)
428 return NULL;
429
430 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
431}
432
433
434
435
436
437
438
439
440
441static inline bool PageMemcgKmem(struct page *page)
442{
443 VM_BUG_ON_PAGE(page->memcg_data & MEMCG_DATA_OBJCGS, page);
444 return page->memcg_data & MEMCG_DATA_KMEM;
445}
446
447#ifdef CONFIG_MEMCG_KMEM
448
449
450
451
452
453
454
455
456
457
458static inline struct obj_cgroup **page_objcgs(struct page *page)
459{
460 unsigned long memcg_data = READ_ONCE(page->memcg_data);
461
462 VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS), page);
463 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page);
464
465 return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
466}
467
468
469
470
471
472
473
474
475
476static inline struct obj_cgroup **page_objcgs_check(struct page *page)
477{
478 unsigned long memcg_data = READ_ONCE(page->memcg_data);
479
480 if (!memcg_data || !(memcg_data & MEMCG_DATA_OBJCGS))
481 return NULL;
482
483 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page);
484
485 return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
486}
487
488#else
489static inline struct obj_cgroup **page_objcgs(struct page *page)
490{
491 return NULL;
492}
493
494static inline struct obj_cgroup **page_objcgs_check(struct page *page)
495{
496 return NULL;
497}
498#endif
499
500static __always_inline bool memcg_stat_item_in_bytes(int idx)
501{
502 if (idx == MEMCG_PERCPU_B)
503 return true;
504 return vmstat_item_in_bytes(idx);
505}
506
507static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
508{
509 return (memcg == root_mem_cgroup);
510}
511
512static inline bool mem_cgroup_disabled(void)
513{
514 return !cgroup_subsys_enabled(memory_cgrp_subsys);
515}
516
517static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
518 struct mem_cgroup *memcg,
519 bool in_low_reclaim)
520{
521 if (mem_cgroup_disabled())
522 return 0;
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557 if (root == memcg)
558 return 0;
559
560 if (in_low_reclaim)
561 return READ_ONCE(memcg->memory.emin);
562
563 return max(READ_ONCE(memcg->memory.emin),
564 READ_ONCE(memcg->memory.elow));
565}
566
567void mem_cgroup_calculate_protection(struct mem_cgroup *root,
568 struct mem_cgroup *memcg);
569
570static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg)
571{
572
573
574
575
576 return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg);
577
578}
579
580static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
581{
582 if (!mem_cgroup_supports_protection(memcg))
583 return false;
584
585 return READ_ONCE(memcg->memory.elow) >=
586 page_counter_read(&memcg->memory);
587}
588
589static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
590{
591 if (!mem_cgroup_supports_protection(memcg))
592 return false;
593
594 return READ_ONCE(memcg->memory.emin) >=
595 page_counter_read(&memcg->memory);
596}
597
598int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
599
600void mem_cgroup_uncharge(struct page *page);
601void mem_cgroup_uncharge_list(struct list_head *page_list);
602
603void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
604
605static struct mem_cgroup_per_node *
606mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
607{
608 return memcg->nodeinfo[nid];
609}
610
611
612
613
614
615
616
617
618
619
620static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
621 struct pglist_data *pgdat)
622{
623 struct mem_cgroup_per_node *mz;
624 struct lruvec *lruvec;
625
626 if (mem_cgroup_disabled()) {
627 lruvec = &pgdat->__lruvec;
628 goto out;
629 }
630
631 if (!memcg)
632 memcg = root_mem_cgroup;
633
634 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
635 lruvec = &mz->lruvec;
636out:
637
638
639
640
641
642 if (unlikely(lruvec->pgdat != pgdat))
643 lruvec->pgdat = pgdat;
644 return lruvec;
645}
646
647
648
649
650
651
652
653
654static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
655 struct pglist_data *pgdat)
656{
657 struct mem_cgroup *memcg = page_memcg(page);
658
659 VM_WARN_ON_ONCE_PAGE(!memcg && !mem_cgroup_disabled(), page);
660 return mem_cgroup_lruvec(memcg, pgdat);
661}
662
663static inline bool lruvec_holds_page_lru_lock(struct page *page,
664 struct lruvec *lruvec)
665{
666 pg_data_t *pgdat = page_pgdat(page);
667 const struct mem_cgroup *memcg;
668 struct mem_cgroup_per_node *mz;
669
670 if (mem_cgroup_disabled())
671 return lruvec == &pgdat->__lruvec;
672
673 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
674 memcg = page_memcg(page) ? : root_mem_cgroup;
675
676 return lruvec->pgdat == pgdat && mz->memcg == memcg;
677}
678
679struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
680
681struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
682
683struct lruvec *lock_page_lruvec(struct page *page);
684struct lruvec *lock_page_lruvec_irq(struct page *page);
685struct lruvec *lock_page_lruvec_irqsave(struct page *page,
686 unsigned long *flags);
687
688#ifdef CONFIG_DEBUG_VM
689void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page);
690#else
691static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
692{
693}
694#endif
695
696static inline
697struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
698 return css ? container_of(css, struct mem_cgroup, css) : NULL;
699}
700
701static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
702{
703 return percpu_ref_tryget(&objcg->refcnt);
704}
705
706static inline void obj_cgroup_get(struct obj_cgroup *objcg)
707{
708 percpu_ref_get(&objcg->refcnt);
709}
710
711static inline void obj_cgroup_put(struct obj_cgroup *objcg)
712{
713 percpu_ref_put(&objcg->refcnt);
714}
715
716
717
718
719
720
721
722
723static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
724{
725 return READ_ONCE(objcg->memcg);
726}
727
728static inline void mem_cgroup_put(struct mem_cgroup *memcg)
729{
730 if (memcg)
731 css_put(&memcg->css);
732}
733
734#define mem_cgroup_from_counter(counter, member) \
735 container_of(counter, struct mem_cgroup, member)
736
737struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
738 struct mem_cgroup *,
739 struct mem_cgroup_reclaim_cookie *);
740void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
741int mem_cgroup_scan_tasks(struct mem_cgroup *,
742 int (*)(struct task_struct *, void *), void *);
743
744static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
745{
746 if (mem_cgroup_disabled())
747 return 0;
748
749 return memcg->id.id;
750}
751struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
752
753static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
754{
755 return mem_cgroup_from_css(seq_css(m));
756}
757
758static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
759{
760 struct mem_cgroup_per_node *mz;
761
762 if (mem_cgroup_disabled())
763 return NULL;
764
765 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
766 return mz->memcg;
767}
768
769
770
771
772
773
774
775
776static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
777{
778 if (!memcg->memory.parent)
779 return NULL;
780 return mem_cgroup_from_counter(memcg->memory.parent, memory);
781}
782
783static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
784 struct mem_cgroup *root)
785{
786 if (root == memcg)
787 return true;
788 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
789}
790
791static inline bool mm_match_cgroup(struct mm_struct *mm,
792 struct mem_cgroup *memcg)
793{
794 struct mem_cgroup *task_memcg;
795 bool match = false;
796
797 rcu_read_lock();
798 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
799 if (task_memcg)
800 match = mem_cgroup_is_descendant(task_memcg, memcg);
801 rcu_read_unlock();
802 return match;
803}
804
805struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
806ino_t page_cgroup_ino(struct page *page);
807
808static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
809{
810 if (mem_cgroup_disabled())
811 return true;
812 return !!(memcg->css.flags & CSS_ONLINE);
813}
814
815
816
817
818int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
819
820void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
821 int zid, int nr_pages);
822
823static inline
824unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
825 enum lru_list lru, int zone_idx)
826{
827 struct mem_cgroup_per_node *mz;
828
829 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
830 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
831}
832
833void mem_cgroup_handle_over_high(void);
834
835unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
836
837unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
838
839void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
840 struct task_struct *p);
841
842void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
843
844static inline void mem_cgroup_enter_user_fault(void)
845{
846 WARN_ON(current->in_user_fault);
847 current->in_user_fault = 1;
848}
849
850static inline void mem_cgroup_exit_user_fault(void)
851{
852 WARN_ON(!current->in_user_fault);
853 current->in_user_fault = 0;
854}
855
856static inline bool task_in_memcg_oom(struct task_struct *p)
857{
858 return p->memcg_in_oom;
859}
860
861bool mem_cgroup_oom_synchronize(bool wait);
862struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
863 struct mem_cgroup *oom_domain);
864void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
865
866#ifdef CONFIG_MEMCG_SWAP
867extern bool cgroup_memory_noswap;
868#endif
869
870struct mem_cgroup *lock_page_memcg(struct page *page);
871void __unlock_page_memcg(struct mem_cgroup *memcg);
872void unlock_page_memcg(struct page *page);
873
874
875
876
877
878static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
879{
880 long x = atomic_long_read(&memcg->vmstats[idx]);
881#ifdef CONFIG_SMP
882 if (x < 0)
883 x = 0;
884#endif
885 return x;
886}
887
888
889
890
891
892static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
893 int idx)
894{
895 long x = 0;
896 int cpu;
897
898 for_each_possible_cpu(cpu)
899 x += per_cpu(memcg->vmstats_local->stat[idx], cpu);
900#ifdef CONFIG_SMP
901 if (x < 0)
902 x = 0;
903#endif
904 return x;
905}
906
907void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
908
909
910static inline void mod_memcg_state(struct mem_cgroup *memcg,
911 int idx, int val)
912{
913 unsigned long flags;
914
915 local_irq_save(flags);
916 __mod_memcg_state(memcg, idx, val);
917 local_irq_restore(flags);
918}
919
920static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
921 enum node_stat_item idx)
922{
923 struct mem_cgroup_per_node *pn;
924 long x;
925
926 if (mem_cgroup_disabled())
927 return node_page_state(lruvec_pgdat(lruvec), idx);
928
929 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
930 x = atomic_long_read(&pn->lruvec_stat[idx]);
931#ifdef CONFIG_SMP
932 if (x < 0)
933 x = 0;
934#endif
935 return x;
936}
937
938static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
939 enum node_stat_item idx)
940{
941 struct mem_cgroup_per_node *pn;
942 long x = 0;
943 int cpu;
944
945 if (mem_cgroup_disabled())
946 return node_page_state(lruvec_pgdat(lruvec), idx);
947
948 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
949 for_each_possible_cpu(cpu)
950 x += per_cpu(pn->lruvec_stat_local->count[idx], cpu);
951#ifdef CONFIG_SMP
952 if (x < 0)
953 x = 0;
954#endif
955 return x;
956}
957
958void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
959 int val);
960void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
961
962static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
963 int val)
964{
965 unsigned long flags;
966
967 local_irq_save(flags);
968 __mod_lruvec_kmem_state(p, idx, val);
969 local_irq_restore(flags);
970}
971
972static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
973 enum node_stat_item idx, int val)
974{
975 unsigned long flags;
976
977 local_irq_save(flags);
978 __mod_memcg_lruvec_state(lruvec, idx, val);
979 local_irq_restore(flags);
980}
981
982unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
983 gfp_t gfp_mask,
984 unsigned long *total_scanned);
985
986void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
987 unsigned long count);
988
989static inline void count_memcg_events(struct mem_cgroup *memcg,
990 enum vm_event_item idx,
991 unsigned long count)
992{
993 unsigned long flags;
994
995 local_irq_save(flags);
996 __count_memcg_events(memcg, idx, count);
997 local_irq_restore(flags);
998}
999
1000static inline void count_memcg_page_event(struct page *page,
1001 enum vm_event_item idx)
1002{
1003 struct mem_cgroup *memcg = page_memcg(page);
1004
1005 if (memcg)
1006 count_memcg_events(memcg, idx, 1);
1007}
1008
1009static inline void count_memcg_event_mm(struct mm_struct *mm,
1010 enum vm_event_item idx)
1011{
1012 struct mem_cgroup *memcg;
1013
1014 if (mem_cgroup_disabled())
1015 return;
1016
1017 rcu_read_lock();
1018 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1019 if (likely(memcg))
1020 count_memcg_events(memcg, idx, 1);
1021 rcu_read_unlock();
1022}
1023
1024static inline void memcg_memory_event(struct mem_cgroup *memcg,
1025 enum memcg_memory_event event)
1026{
1027 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
1028 event == MEMCG_SWAP_FAIL;
1029
1030 atomic_long_inc(&memcg->memory_events_local[event]);
1031 if (!swap_event)
1032 cgroup_file_notify(&memcg->events_local_file);
1033
1034 do {
1035 atomic_long_inc(&memcg->memory_events[event]);
1036 if (swap_event)
1037 cgroup_file_notify(&memcg->swap_events_file);
1038 else
1039 cgroup_file_notify(&memcg->events_file);
1040
1041 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1042 break;
1043 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
1044 break;
1045 } while ((memcg = parent_mem_cgroup(memcg)) &&
1046 !mem_cgroup_is_root(memcg));
1047}
1048
1049static inline void memcg_memory_event_mm(struct mm_struct *mm,
1050 enum memcg_memory_event event)
1051{
1052 struct mem_cgroup *memcg;
1053
1054 if (mem_cgroup_disabled())
1055 return;
1056
1057 rcu_read_lock();
1058 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1059 if (likely(memcg))
1060 memcg_memory_event(memcg, event);
1061 rcu_read_unlock();
1062}
1063
1064void split_page_memcg(struct page *head, unsigned int nr);
1065
1066#else
1067
1068#define MEM_CGROUP_ID_SHIFT 0
1069#define MEM_CGROUP_ID_MAX 0
1070
1071struct mem_cgroup;
1072
1073static inline struct mem_cgroup *page_memcg(struct page *page)
1074{
1075 return NULL;
1076}
1077
1078static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1079{
1080 WARN_ON_ONCE(!rcu_read_lock_held());
1081 return NULL;
1082}
1083
1084static inline struct mem_cgroup *page_memcg_check(struct page *page)
1085{
1086 return NULL;
1087}
1088
1089static inline bool PageMemcgKmem(struct page *page)
1090{
1091 return false;
1092}
1093
1094static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
1095{
1096 return true;
1097}
1098
1099static inline bool mem_cgroup_disabled(void)
1100{
1101 return true;
1102}
1103
1104static inline void memcg_memory_event(struct mem_cgroup *memcg,
1105 enum memcg_memory_event event)
1106{
1107}
1108
1109static inline void memcg_memory_event_mm(struct mm_struct *mm,
1110 enum memcg_memory_event event)
1111{
1112}
1113
1114static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
1115 struct mem_cgroup *memcg,
1116 bool in_low_reclaim)
1117{
1118 return 0;
1119}
1120
1121static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
1122 struct mem_cgroup *memcg)
1123{
1124}
1125
1126static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
1127{
1128 return false;
1129}
1130
1131static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
1132{
1133 return false;
1134}
1135
1136static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
1137 gfp_t gfp_mask)
1138{
1139 return 0;
1140}
1141
1142static inline void mem_cgroup_uncharge(struct page *page)
1143{
1144}
1145
1146static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
1147{
1148}
1149
1150static inline void mem_cgroup_migrate(struct page *old, struct page *new)
1151{
1152}
1153
1154static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
1155 struct pglist_data *pgdat)
1156{
1157 return &pgdat->__lruvec;
1158}
1159
1160static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
1161 struct pglist_data *pgdat)
1162{
1163 return &pgdat->__lruvec;
1164}
1165
1166static inline bool lruvec_holds_page_lru_lock(struct page *page,
1167 struct lruvec *lruvec)
1168{
1169 pg_data_t *pgdat = page_pgdat(page);
1170
1171 return lruvec == &pgdat->__lruvec;
1172}
1173
1174static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
1175{
1176 return NULL;
1177}
1178
1179static inline bool mm_match_cgroup(struct mm_struct *mm,
1180 struct mem_cgroup *memcg)
1181{
1182 return true;
1183}
1184
1185static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1186{
1187 return NULL;
1188}
1189
1190static inline void mem_cgroup_put(struct mem_cgroup *memcg)
1191{
1192}
1193
1194static inline struct lruvec *lock_page_lruvec(struct page *page)
1195{
1196 struct pglist_data *pgdat = page_pgdat(page);
1197
1198 spin_lock(&pgdat->__lruvec.lru_lock);
1199 return &pgdat->__lruvec;
1200}
1201
1202static inline struct lruvec *lock_page_lruvec_irq(struct page *page)
1203{
1204 struct pglist_data *pgdat = page_pgdat(page);
1205
1206 spin_lock_irq(&pgdat->__lruvec.lru_lock);
1207 return &pgdat->__lruvec;
1208}
1209
1210static inline struct lruvec *lock_page_lruvec_irqsave(struct page *page,
1211 unsigned long *flagsp)
1212{
1213 struct pglist_data *pgdat = page_pgdat(page);
1214
1215 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
1216 return &pgdat->__lruvec;
1217}
1218
1219static inline struct mem_cgroup *
1220mem_cgroup_iter(struct mem_cgroup *root,
1221 struct mem_cgroup *prev,
1222 struct mem_cgroup_reclaim_cookie *reclaim)
1223{
1224 return NULL;
1225}
1226
1227static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
1228 struct mem_cgroup *prev)
1229{
1230}
1231
1232static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1233 int (*fn)(struct task_struct *, void *), void *arg)
1234{
1235 return 0;
1236}
1237
1238static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
1239{
1240 return 0;
1241}
1242
1243static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
1244{
1245 WARN_ON_ONCE(id);
1246
1247 return NULL;
1248}
1249
1250static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
1251{
1252 return NULL;
1253}
1254
1255static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
1256{
1257 return NULL;
1258}
1259
1260static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
1261{
1262 return true;
1263}
1264
1265static inline
1266unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
1267 enum lru_list lru, int zone_idx)
1268{
1269 return 0;
1270}
1271
1272static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1273{
1274 return 0;
1275}
1276
1277static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1278{
1279 return 0;
1280}
1281
1282static inline void
1283mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1284{
1285}
1286
1287static inline void
1288mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1289{
1290}
1291
1292static inline struct mem_cgroup *lock_page_memcg(struct page *page)
1293{
1294 return NULL;
1295}
1296
1297static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
1298{
1299}
1300
1301static inline void unlock_page_memcg(struct page *page)
1302{
1303}
1304
1305static inline void mem_cgroup_handle_over_high(void)
1306{
1307}
1308
1309static inline void mem_cgroup_enter_user_fault(void)
1310{
1311}
1312
1313static inline void mem_cgroup_exit_user_fault(void)
1314{
1315}
1316
1317static inline bool task_in_memcg_oom(struct task_struct *p)
1318{
1319 return false;
1320}
1321
1322static inline bool mem_cgroup_oom_synchronize(bool wait)
1323{
1324 return false;
1325}
1326
1327static inline struct mem_cgroup *mem_cgroup_get_oom_group(
1328 struct task_struct *victim, struct mem_cgroup *oom_domain)
1329{
1330 return NULL;
1331}
1332
1333static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1334{
1335}
1336
1337static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1338{
1339 return 0;
1340}
1341
1342static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
1343 int idx)
1344{
1345 return 0;
1346}
1347
1348static inline void __mod_memcg_state(struct mem_cgroup *memcg,
1349 int idx,
1350 int nr)
1351{
1352}
1353
1354static inline void mod_memcg_state(struct mem_cgroup *memcg,
1355 int idx,
1356 int nr)
1357{
1358}
1359
1360static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1361 enum node_stat_item idx)
1362{
1363 return node_page_state(lruvec_pgdat(lruvec), idx);
1364}
1365
1366static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1367 enum node_stat_item idx)
1368{
1369 return node_page_state(lruvec_pgdat(lruvec), idx);
1370}
1371
1372static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
1373 enum node_stat_item idx, int val)
1374{
1375}
1376
1377static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1378 int val)
1379{
1380 struct page *page = virt_to_head_page(p);
1381
1382 __mod_node_page_state(page_pgdat(page), idx, val);
1383}
1384
1385static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1386 int val)
1387{
1388 struct page *page = virt_to_head_page(p);
1389
1390 mod_node_page_state(page_pgdat(page), idx, val);
1391}
1392
1393static inline
1394unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1395 gfp_t gfp_mask,
1396 unsigned long *total_scanned)
1397{
1398 return 0;
1399}
1400
1401static inline void split_page_memcg(struct page *head, unsigned int nr)
1402{
1403}
1404
1405static inline void count_memcg_events(struct mem_cgroup *memcg,
1406 enum vm_event_item idx,
1407 unsigned long count)
1408{
1409}
1410
1411static inline void __count_memcg_events(struct mem_cgroup *memcg,
1412 enum vm_event_item idx,
1413 unsigned long count)
1414{
1415}
1416
1417static inline void count_memcg_page_event(struct page *page,
1418 int idx)
1419{
1420}
1421
1422static inline
1423void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
1424{
1425}
1426
1427static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
1428{
1429}
1430#endif
1431
1432static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
1433{
1434 __mod_lruvec_kmem_state(p, idx, 1);
1435}
1436
1437static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
1438{
1439 __mod_lruvec_kmem_state(p, idx, -1);
1440}
1441
1442static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
1443{
1444 struct mem_cgroup *memcg;
1445
1446 memcg = lruvec_memcg(lruvec);
1447 if (!memcg)
1448 return NULL;
1449 memcg = parent_mem_cgroup(memcg);
1450 if (!memcg)
1451 return NULL;
1452 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
1453}
1454
1455static inline void unlock_page_lruvec(struct lruvec *lruvec)
1456{
1457 spin_unlock(&lruvec->lru_lock);
1458}
1459
1460static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
1461{
1462 spin_unlock_irq(&lruvec->lru_lock);
1463}
1464
1465static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
1466 unsigned long flags)
1467{
1468 spin_unlock_irqrestore(&lruvec->lru_lock, flags);
1469}
1470
1471
1472static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
1473 struct lruvec *locked_lruvec)
1474{
1475 if (locked_lruvec) {
1476 if (lruvec_holds_page_lru_lock(page, locked_lruvec))
1477 return locked_lruvec;
1478
1479 unlock_page_lruvec_irq(locked_lruvec);
1480 }
1481
1482 return lock_page_lruvec_irq(page);
1483}
1484
1485
1486static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
1487 struct lruvec *locked_lruvec, unsigned long *flags)
1488{
1489 if (locked_lruvec) {
1490 if (lruvec_holds_page_lru_lock(page, locked_lruvec))
1491 return locked_lruvec;
1492
1493 unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
1494 }
1495
1496 return lock_page_lruvec_irqsave(page, flags);
1497}
1498
1499#ifdef CONFIG_CGROUP_WRITEBACK
1500
1501struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
1502void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1503 unsigned long *pheadroom, unsigned long *pdirty,
1504 unsigned long *pwriteback);
1505
1506void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
1507 struct bdi_writeback *wb);
1508
1509static inline void mem_cgroup_track_foreign_dirty(struct page *page,
1510 struct bdi_writeback *wb)
1511{
1512 if (mem_cgroup_disabled())
1513 return;
1514
1515 if (unlikely(&page_memcg(page)->css != wb->memcg_css))
1516 mem_cgroup_track_foreign_dirty_slowpath(page, wb);
1517}
1518
1519void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
1520
1521#else
1522
1523static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1524{
1525 return NULL;
1526}
1527
1528static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
1529 unsigned long *pfilepages,
1530 unsigned long *pheadroom,
1531 unsigned long *pdirty,
1532 unsigned long *pwriteback)
1533{
1534}
1535
1536static inline void mem_cgroup_track_foreign_dirty(struct page *page,
1537 struct bdi_writeback *wb)
1538{
1539}
1540
1541static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
1542{
1543}
1544
1545#endif
1546
1547struct sock;
1548bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1549void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1550#ifdef CONFIG_MEMCG
1551extern struct static_key_false memcg_sockets_enabled_key;
1552#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1553void mem_cgroup_sk_alloc(struct sock *sk);
1554void mem_cgroup_sk_free(struct sock *sk);
1555static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1556{
1557 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
1558 return true;
1559 do {
1560 if (time_before(jiffies, memcg->socket_pressure))
1561 return true;
1562 } while ((memcg = parent_mem_cgroup(memcg)));
1563 return false;
1564}
1565
1566extern int memcg_expand_shrinker_maps(int new_id);
1567
1568extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1569 int nid, int shrinker_id);
1570#else
1571#define mem_cgroup_sockets_enabled 0
1572static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
1573static inline void mem_cgroup_sk_free(struct sock *sk) { };
1574static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1575{
1576 return false;
1577}
1578
1579static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1580 int nid, int shrinker_id)
1581{
1582}
1583#endif
1584
1585#ifdef CONFIG_MEMCG_KMEM
1586int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
1587void __memcg_kmem_uncharge_page(struct page *page, int order);
1588
1589struct obj_cgroup *get_obj_cgroup_from_current(void);
1590
1591int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
1592void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
1593
1594extern struct static_key_false memcg_kmem_enabled_key;
1595
1596extern int memcg_nr_cache_ids;
1597void memcg_get_cache_ids(void);
1598void memcg_put_cache_ids(void);
1599
1600
1601
1602
1603
1604
1605#define for_each_memcg_cache_index(_idx) \
1606 for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
1607
1608static inline bool memcg_kmem_enabled(void)
1609{
1610 return static_branch_likely(&memcg_kmem_enabled_key);
1611}
1612
1613static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1614 int order)
1615{
1616 if (memcg_kmem_enabled())
1617 return __memcg_kmem_charge_page(page, gfp, order);
1618 return 0;
1619}
1620
1621static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1622{
1623 if (memcg_kmem_enabled())
1624 __memcg_kmem_uncharge_page(page, order);
1625}
1626
1627
1628
1629
1630
1631static inline int memcg_cache_id(struct mem_cgroup *memcg)
1632{
1633 return memcg ? memcg->kmemcg_id : -1;
1634}
1635
1636struct mem_cgroup *mem_cgroup_from_obj(void *p);
1637
1638#else
1639
1640static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1641 int order)
1642{
1643 return 0;
1644}
1645
1646static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1647{
1648}
1649
1650static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1651 int order)
1652{
1653 return 0;
1654}
1655
1656static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
1657{
1658}
1659
1660#define for_each_memcg_cache_index(_idx) \
1661 for (; NULL; )
1662
1663static inline bool memcg_kmem_enabled(void)
1664{
1665 return false;
1666}
1667
1668static inline int memcg_cache_id(struct mem_cgroup *memcg)
1669{
1670 return -1;
1671}
1672
1673static inline void memcg_get_cache_ids(void)
1674{
1675}
1676
1677static inline void memcg_put_cache_ids(void)
1678{
1679}
1680
1681static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
1682{
1683 return NULL;
1684}
1685
1686#endif
1687
1688#endif
1689