1
2
3
4
5
6
7
8
9
10
11#ifndef _LINUX_MEMCONTROL_H
12#define _LINUX_MEMCONTROL_H
13#include <linux/cgroup.h>
14#include <linux/vm_event_item.h>
15#include <linux/hardirq.h>
16#include <linux/jump_label.h>
17#include <linux/page_counter.h>
18#include <linux/vmpressure.h>
19#include <linux/eventfd.h>
20#include <linux/mm.h>
21#include <linux/vmstat.h>
22#include <linux/writeback.h>
23#include <linux/page-flags.h>
24
25struct mem_cgroup;
26struct obj_cgroup;
27struct page;
28struct mm_struct;
29struct kmem_cache;
30
31
32enum memcg_stat_item {
33 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
34 MEMCG_SOCK,
35 MEMCG_PERCPU_B,
36 MEMCG_NR_STAT,
37};
38
39enum memcg_memory_event {
40 MEMCG_LOW,
41 MEMCG_HIGH,
42 MEMCG_MAX,
43 MEMCG_OOM,
44 MEMCG_OOM_KILL,
45 MEMCG_SWAP_HIGH,
46 MEMCG_SWAP_MAX,
47 MEMCG_SWAP_FAIL,
48 MEMCG_NR_MEMORY_EVENTS,
49};
50
51struct mem_cgroup_reclaim_cookie {
52 pg_data_t *pgdat;
53 unsigned int generation;
54};
55
56#ifdef CONFIG_MEMCG
57
58#define MEM_CGROUP_ID_SHIFT 16
59#define MEM_CGROUP_ID_MAX USHRT_MAX
60
61struct mem_cgroup_id {
62 int id;
63 refcount_t ref;
64};
65
66
67
68
69
70
71
72enum mem_cgroup_events_target {
73 MEM_CGROUP_TARGET_THRESH,
74 MEM_CGROUP_TARGET_SOFTLIMIT,
75 MEM_CGROUP_NTARGETS,
76};
77
78struct memcg_vmstats_percpu {
79
80 long state[MEMCG_NR_STAT];
81 unsigned long events[NR_VM_EVENT_ITEMS];
82
83
84 long state_prev[MEMCG_NR_STAT];
85 unsigned long events_prev[NR_VM_EVENT_ITEMS];
86
87
88 unsigned long nr_page_events;
89 unsigned long targets[MEM_CGROUP_NTARGETS];
90};
91
92struct memcg_vmstats {
93
94 long state[MEMCG_NR_STAT];
95 unsigned long events[NR_VM_EVENT_ITEMS];
96
97
98 long state_pending[MEMCG_NR_STAT];
99 unsigned long events_pending[NR_VM_EVENT_ITEMS];
100};
101
102struct mem_cgroup_reclaim_iter {
103 struct mem_cgroup *position;
104
105 unsigned int generation;
106};
107
108struct lruvec_stat {
109 long count[NR_VM_NODE_STAT_ITEMS];
110};
111
112struct batched_lruvec_stat {
113 s32 count[NR_VM_NODE_STAT_ITEMS];
114};
115
116
117
118
119
120struct shrinker_info {
121 struct rcu_head rcu;
122 atomic_long_t *nr_deferred;
123 unsigned long *map;
124};
125
126
127
128
129struct mem_cgroup_per_node {
130 struct lruvec lruvec;
131
132
133
134
135
136
137
138
139 struct lruvec_stat __percpu *lruvec_stat_local;
140
141
142 struct batched_lruvec_stat __percpu *lruvec_stat_cpu;
143 atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
144
145 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
146
147 struct mem_cgroup_reclaim_iter iter;
148
149 struct shrinker_info __rcu *shrinker_info;
150
151 struct rb_node tree_node;
152 unsigned long usage_in_excess;
153
154 bool on_tree;
155 struct mem_cgroup *memcg;
156
157};
158
159struct mem_cgroup_threshold {
160 struct eventfd_ctx *eventfd;
161 unsigned long threshold;
162};
163
164
165struct mem_cgroup_threshold_ary {
166
167 int current_threshold;
168
169 unsigned int size;
170
171 struct mem_cgroup_threshold entries[];
172};
173
174struct mem_cgroup_thresholds {
175
176 struct mem_cgroup_threshold_ary *primary;
177
178
179
180
181
182 struct mem_cgroup_threshold_ary *spare;
183};
184
185enum memcg_kmem_state {
186 KMEM_NONE,
187 KMEM_ALLOCATED,
188 KMEM_ONLINE,
189};
190
191#if defined(CONFIG_SMP)
192struct memcg_padding {
193 char x[0];
194} ____cacheline_internodealigned_in_smp;
195#define MEMCG_PADDING(name) struct memcg_padding name
196#else
197#define MEMCG_PADDING(name)
198#endif
199
200
201
202
203
204
205
206
207
208#define MEMCG_CGWB_FRN_CNT 4
209
210struct memcg_cgwb_frn {
211 u64 bdi_id;
212 int memcg_id;
213 u64 at;
214 struct wb_completion done;
215};
216
217
218
219
220
221
222
223struct obj_cgroup {
224 struct percpu_ref refcnt;
225 struct mem_cgroup *memcg;
226 atomic_t nr_charged_bytes;
227 union {
228 struct list_head list;
229 struct rcu_head rcu;
230 };
231};
232
233
234
235
236
237
238
239struct mem_cgroup {
240 struct cgroup_subsys_state css;
241
242
243 struct mem_cgroup_id id;
244
245
246 struct page_counter memory;
247
248 union {
249 struct page_counter swap;
250 struct page_counter memsw;
251 };
252
253
254 struct page_counter kmem;
255 struct page_counter tcpmem;
256
257
258 struct work_struct high_work;
259
260 unsigned long soft_limit;
261
262
263 struct vmpressure vmpressure;
264
265
266
267
268 bool oom_group;
269
270
271 bool oom_lock;
272 int under_oom;
273
274 int swappiness;
275
276 int oom_kill_disable;
277
278
279 struct cgroup_file events_file;
280 struct cgroup_file events_local_file;
281
282
283 struct cgroup_file swap_events_file;
284
285
286 struct mutex thresholds_lock;
287
288
289 struct mem_cgroup_thresholds thresholds;
290
291
292 struct mem_cgroup_thresholds memsw_thresholds;
293
294
295 struct list_head oom_notify;
296
297
298
299
300
301 unsigned long move_charge_at_immigrate;
302
303 spinlock_t move_lock;
304 unsigned long move_lock_flags;
305
306 MEMCG_PADDING(_pad1_);
307
308
309 struct memcg_vmstats vmstats;
310
311
312 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
313 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
314
315 unsigned long socket_pressure;
316
317
318 bool tcpmem_active;
319 int tcpmem_pressure;
320
321#ifdef CONFIG_MEMCG_KMEM
322 int kmemcg_id;
323 enum memcg_kmem_state kmem_state;
324 struct obj_cgroup __rcu *objcg;
325 struct list_head objcg_list;
326#endif
327
328 MEMCG_PADDING(_pad2_);
329
330
331
332
333 atomic_t moving_account;
334 struct task_struct *move_lock_task;
335
336 struct memcg_vmstats_percpu __percpu *vmstats_percpu;
337
338#ifdef CONFIG_CGROUP_WRITEBACK
339 struct list_head cgwb_list;
340 struct wb_domain cgwb_domain;
341 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
342#endif
343
344
345 struct list_head event_list;
346 spinlock_t event_list_lock;
347
348#ifdef CONFIG_TRANSPARENT_HUGEPAGE
349 struct deferred_split deferred_split_queue;
350#endif
351
352 struct mem_cgroup_per_node *nodeinfo[];
353};
354
355
356
357
358
359#define MEMCG_CHARGE_BATCH 32U
360
361extern struct mem_cgroup *root_mem_cgroup;
362
363enum page_memcg_data_flags {
364
365 MEMCG_DATA_OBJCGS = (1UL << 0),
366
367 MEMCG_DATA_KMEM = (1UL << 1),
368
369 __NR_MEMCG_DATA_FLAGS = (1UL << 2),
370};
371
372#define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1)
373
374static inline bool PageMemcgKmem(struct page *page);
375
376
377
378
379
380
381
382
383static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
384{
385 return READ_ONCE(objcg->memcg);
386}
387
388
389
390
391
392
393
394
395
396
397
398static inline struct mem_cgroup *__page_memcg(struct page *page)
399{
400 unsigned long memcg_data = page->memcg_data;
401
402 VM_BUG_ON_PAGE(PageSlab(page), page);
403 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page);
404 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page);
405
406 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
407}
408
409
410
411
412
413
414
415
416
417
418
419static inline struct obj_cgroup *__page_objcg(struct page *page)
420{
421 unsigned long memcg_data = page->memcg_data;
422
423 VM_BUG_ON_PAGE(PageSlab(page), page);
424 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page);
425 VM_BUG_ON_PAGE(!(memcg_data & MEMCG_DATA_KMEM), page);
426
427 return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
428}
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450static inline struct mem_cgroup *page_memcg(struct page *page)
451{
452 if (PageMemcgKmem(page))
453 return obj_cgroup_memcg(__page_objcg(page));
454 else
455 return __page_memcg(page);
456}
457
458
459
460
461
462
463
464
465
466
467static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
468{
469 unsigned long memcg_data = READ_ONCE(page->memcg_data);
470
471 VM_BUG_ON_PAGE(PageSlab(page), page);
472 WARN_ON_ONCE(!rcu_read_lock_held());
473
474 if (memcg_data & MEMCG_DATA_KMEM) {
475 struct obj_cgroup *objcg;
476
477 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
478 return obj_cgroup_memcg(objcg);
479 }
480
481 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
482}
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505static inline struct mem_cgroup *page_memcg_check(struct page *page)
506{
507
508
509
510
511 unsigned long memcg_data = READ_ONCE(page->memcg_data);
512
513 if (memcg_data & MEMCG_DATA_OBJCGS)
514 return NULL;
515
516 if (memcg_data & MEMCG_DATA_KMEM) {
517 struct obj_cgroup *objcg;
518
519 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
520 return obj_cgroup_memcg(objcg);
521 }
522
523 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
524}
525
526#ifdef CONFIG_MEMCG_KMEM
527
528
529
530
531
532
533
534
535static inline bool PageMemcgKmem(struct page *page)
536{
537 VM_BUG_ON_PAGE(page->memcg_data & MEMCG_DATA_OBJCGS, page);
538 return page->memcg_data & MEMCG_DATA_KMEM;
539}
540
541
542
543
544
545
546
547
548
549
550
551static inline struct obj_cgroup **page_objcgs(struct page *page)
552{
553 unsigned long memcg_data = READ_ONCE(page->memcg_data);
554
555 VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS), page);
556 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page);
557
558 return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
559}
560
561
562
563
564
565
566
567
568
569static inline struct obj_cgroup **page_objcgs_check(struct page *page)
570{
571 unsigned long memcg_data = READ_ONCE(page->memcg_data);
572
573 if (!memcg_data || !(memcg_data & MEMCG_DATA_OBJCGS))
574 return NULL;
575
576 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page);
577
578 return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
579}
580
581#else
582static inline bool PageMemcgKmem(struct page *page)
583{
584 return false;
585}
586
587static inline struct obj_cgroup **page_objcgs(struct page *page)
588{
589 return NULL;
590}
591
592static inline struct obj_cgroup **page_objcgs_check(struct page *page)
593{
594 return NULL;
595}
596#endif
597
598static __always_inline bool memcg_stat_item_in_bytes(int idx)
599{
600 if (idx == MEMCG_PERCPU_B)
601 return true;
602 return vmstat_item_in_bytes(idx);
603}
604
605static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
606{
607 return (memcg == root_mem_cgroup);
608}
609
610static inline bool mem_cgroup_disabled(void)
611{
612 return !cgroup_subsys_enabled(memory_cgrp_subsys);
613}
614
615static inline void mem_cgroup_protection(struct mem_cgroup *root,
616 struct mem_cgroup *memcg,
617 unsigned long *min,
618 unsigned long *low)
619{
620 *min = *low = 0;
621
622 if (mem_cgroup_disabled())
623 return;
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658 if (root == memcg)
659 return;
660
661 *min = READ_ONCE(memcg->memory.emin);
662 *low = READ_ONCE(memcg->memory.elow);
663}
664
665void mem_cgroup_calculate_protection(struct mem_cgroup *root,
666 struct mem_cgroup *memcg);
667
668static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg)
669{
670
671
672
673
674 return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg);
675
676}
677
678static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
679{
680 if (!mem_cgroup_supports_protection(memcg))
681 return false;
682
683 return READ_ONCE(memcg->memory.elow) >=
684 page_counter_read(&memcg->memory);
685}
686
687static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
688{
689 if (!mem_cgroup_supports_protection(memcg))
690 return false;
691
692 return READ_ONCE(memcg->memory.emin) >=
693 page_counter_read(&memcg->memory);
694}
695
696int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
697int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
698 gfp_t gfp, swp_entry_t entry);
699void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
700
701void mem_cgroup_uncharge(struct page *page);
702void mem_cgroup_uncharge_list(struct list_head *page_list);
703
704void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
705
706
707
708
709
710
711
712
713
714
715static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
716 struct pglist_data *pgdat)
717{
718 struct mem_cgroup_per_node *mz;
719 struct lruvec *lruvec;
720
721 if (mem_cgroup_disabled()) {
722 lruvec = &pgdat->__lruvec;
723 goto out;
724 }
725
726 if (!memcg)
727 memcg = root_mem_cgroup;
728
729 mz = memcg->nodeinfo[pgdat->node_id];
730 lruvec = &mz->lruvec;
731out:
732
733
734
735
736
737 if (unlikely(lruvec->pgdat != pgdat))
738 lruvec->pgdat = pgdat;
739 return lruvec;
740}
741
742
743
744
745
746
747
748static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page)
749{
750 pg_data_t *pgdat = page_pgdat(page);
751 struct mem_cgroup *memcg = page_memcg(page);
752
753 VM_WARN_ON_ONCE_PAGE(!memcg && !mem_cgroup_disabled(), page);
754 return mem_cgroup_lruvec(memcg, pgdat);
755}
756
757struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
758
759struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
760
761struct lruvec *lock_page_lruvec(struct page *page);
762struct lruvec *lock_page_lruvec_irq(struct page *page);
763struct lruvec *lock_page_lruvec_irqsave(struct page *page,
764 unsigned long *flags);
765
766#ifdef CONFIG_DEBUG_VM
767void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page);
768#else
769static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
770{
771}
772#endif
773
774static inline
775struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
776 return css ? container_of(css, struct mem_cgroup, css) : NULL;
777}
778
779static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
780{
781 return percpu_ref_tryget(&objcg->refcnt);
782}
783
784static inline void obj_cgroup_get(struct obj_cgroup *objcg)
785{
786 percpu_ref_get(&objcg->refcnt);
787}
788
789static inline void obj_cgroup_get_many(struct obj_cgroup *objcg,
790 unsigned long nr)
791{
792 percpu_ref_get_many(&objcg->refcnt, nr);
793}
794
795static inline void obj_cgroup_put(struct obj_cgroup *objcg)
796{
797 percpu_ref_put(&objcg->refcnt);
798}
799
800static inline void mem_cgroup_put(struct mem_cgroup *memcg)
801{
802 if (memcg)
803 css_put(&memcg->css);
804}
805
806#define mem_cgroup_from_counter(counter, member) \
807 container_of(counter, struct mem_cgroup, member)
808
809struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
810 struct mem_cgroup *,
811 struct mem_cgroup_reclaim_cookie *);
812void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
813int mem_cgroup_scan_tasks(struct mem_cgroup *,
814 int (*)(struct task_struct *, void *), void *);
815
816static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
817{
818 if (mem_cgroup_disabled())
819 return 0;
820
821 return memcg->id.id;
822}
823struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
824
825static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
826{
827 return mem_cgroup_from_css(seq_css(m));
828}
829
830static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
831{
832 struct mem_cgroup_per_node *mz;
833
834 if (mem_cgroup_disabled())
835 return NULL;
836
837 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
838 return mz->memcg;
839}
840
841
842
843
844
845
846
847
848static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
849{
850 if (!memcg->memory.parent)
851 return NULL;
852 return mem_cgroup_from_counter(memcg->memory.parent, memory);
853}
854
855static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
856 struct mem_cgroup *root)
857{
858 if (root == memcg)
859 return true;
860 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
861}
862
863static inline bool mm_match_cgroup(struct mm_struct *mm,
864 struct mem_cgroup *memcg)
865{
866 struct mem_cgroup *task_memcg;
867 bool match = false;
868
869 rcu_read_lock();
870 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
871 if (task_memcg)
872 match = mem_cgroup_is_descendant(task_memcg, memcg);
873 rcu_read_unlock();
874 return match;
875}
876
877struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
878ino_t page_cgroup_ino(struct page *page);
879
880static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
881{
882 if (mem_cgroup_disabled())
883 return true;
884 return !!(memcg->css.flags & CSS_ONLINE);
885}
886
887
888
889
890int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
891
892void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
893 int zid, int nr_pages);
894
895static inline
896unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
897 enum lru_list lru, int zone_idx)
898{
899 struct mem_cgroup_per_node *mz;
900
901 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
902 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
903}
904
905void mem_cgroup_handle_over_high(void);
906
907unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
908
909unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
910
911void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
912 struct task_struct *p);
913
914void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
915
916static inline void mem_cgroup_enter_user_fault(void)
917{
918 WARN_ON(current->in_user_fault);
919 current->in_user_fault = 1;
920}
921
922static inline void mem_cgroup_exit_user_fault(void)
923{
924 WARN_ON(!current->in_user_fault);
925 current->in_user_fault = 0;
926}
927
928static inline bool task_in_memcg_oom(struct task_struct *p)
929{
930 return p->memcg_in_oom;
931}
932
933bool mem_cgroup_oom_synchronize(bool wait);
934struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
935 struct mem_cgroup *oom_domain);
936void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
937
938#ifdef CONFIG_MEMCG_SWAP
939extern bool cgroup_memory_noswap;
940#endif
941
942void lock_page_memcg(struct page *page);
943void unlock_page_memcg(struct page *page);
944
945void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
946
947
948static inline void mod_memcg_state(struct mem_cgroup *memcg,
949 int idx, int val)
950{
951 unsigned long flags;
952
953 local_irq_save(flags);
954 __mod_memcg_state(memcg, idx, val);
955 local_irq_restore(flags);
956}
957
958static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
959 enum node_stat_item idx)
960{
961 struct mem_cgroup_per_node *pn;
962 long x;
963
964 if (mem_cgroup_disabled())
965 return node_page_state(lruvec_pgdat(lruvec), idx);
966
967 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
968 x = atomic_long_read(&pn->lruvec_stat[idx]);
969#ifdef CONFIG_SMP
970 if (x < 0)
971 x = 0;
972#endif
973 return x;
974}
975
976static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
977 enum node_stat_item idx)
978{
979 struct mem_cgroup_per_node *pn;
980 long x = 0;
981 int cpu;
982
983 if (mem_cgroup_disabled())
984 return node_page_state(lruvec_pgdat(lruvec), idx);
985
986 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
987 for_each_possible_cpu(cpu)
988 x += per_cpu(pn->lruvec_stat_local->count[idx], cpu);
989#ifdef CONFIG_SMP
990 if (x < 0)
991 x = 0;
992#endif
993 return x;
994}
995
996void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
997 int val);
998void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
999
1000static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1001 int val)
1002{
1003 unsigned long flags;
1004
1005 local_irq_save(flags);
1006 __mod_lruvec_kmem_state(p, idx, val);
1007 local_irq_restore(flags);
1008}
1009
1010static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
1011 enum node_stat_item idx, int val)
1012{
1013 unsigned long flags;
1014
1015 local_irq_save(flags);
1016 __mod_memcg_lruvec_state(lruvec, idx, val);
1017 local_irq_restore(flags);
1018}
1019
1020void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
1021 unsigned long count);
1022
1023static inline void count_memcg_events(struct mem_cgroup *memcg,
1024 enum vm_event_item idx,
1025 unsigned long count)
1026{
1027 unsigned long flags;
1028
1029 local_irq_save(flags);
1030 __count_memcg_events(memcg, idx, count);
1031 local_irq_restore(flags);
1032}
1033
1034static inline void count_memcg_page_event(struct page *page,
1035 enum vm_event_item idx)
1036{
1037 struct mem_cgroup *memcg = page_memcg(page);
1038
1039 if (memcg)
1040 count_memcg_events(memcg, idx, 1);
1041}
1042
1043static inline void count_memcg_event_mm(struct mm_struct *mm,
1044 enum vm_event_item idx)
1045{
1046 struct mem_cgroup *memcg;
1047
1048 if (mem_cgroup_disabled())
1049 return;
1050
1051 rcu_read_lock();
1052 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1053 if (likely(memcg))
1054 count_memcg_events(memcg, idx, 1);
1055 rcu_read_unlock();
1056}
1057
1058static inline void memcg_memory_event(struct mem_cgroup *memcg,
1059 enum memcg_memory_event event)
1060{
1061 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
1062 event == MEMCG_SWAP_FAIL;
1063
1064 atomic_long_inc(&memcg->memory_events_local[event]);
1065 if (!swap_event)
1066 cgroup_file_notify(&memcg->events_local_file);
1067
1068 do {
1069 atomic_long_inc(&memcg->memory_events[event]);
1070 if (swap_event)
1071 cgroup_file_notify(&memcg->swap_events_file);
1072 else
1073 cgroup_file_notify(&memcg->events_file);
1074
1075 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1076 break;
1077 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
1078 break;
1079 } while ((memcg = parent_mem_cgroup(memcg)) &&
1080 !mem_cgroup_is_root(memcg));
1081}
1082
1083static inline void memcg_memory_event_mm(struct mm_struct *mm,
1084 enum memcg_memory_event event)
1085{
1086 struct mem_cgroup *memcg;
1087
1088 if (mem_cgroup_disabled())
1089 return;
1090
1091 rcu_read_lock();
1092 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1093 if (likely(memcg))
1094 memcg_memory_event(memcg, event);
1095 rcu_read_unlock();
1096}
1097
1098void split_page_memcg(struct page *head, unsigned int nr);
1099
1100unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1101 gfp_t gfp_mask,
1102 unsigned long *total_scanned);
1103
1104#else
1105
1106#define MEM_CGROUP_ID_SHIFT 0
1107#define MEM_CGROUP_ID_MAX 0
1108
1109static inline struct mem_cgroup *page_memcg(struct page *page)
1110{
1111 return NULL;
1112}
1113
1114static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1115{
1116 WARN_ON_ONCE(!rcu_read_lock_held());
1117 return NULL;
1118}
1119
1120static inline struct mem_cgroup *page_memcg_check(struct page *page)
1121{
1122 return NULL;
1123}
1124
1125static inline bool PageMemcgKmem(struct page *page)
1126{
1127 return false;
1128}
1129
1130static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
1131{
1132 return true;
1133}
1134
1135static inline bool mem_cgroup_disabled(void)
1136{
1137 return true;
1138}
1139
1140static inline void memcg_memory_event(struct mem_cgroup *memcg,
1141 enum memcg_memory_event event)
1142{
1143}
1144
1145static inline void memcg_memory_event_mm(struct mm_struct *mm,
1146 enum memcg_memory_event event)
1147{
1148}
1149
1150static inline void mem_cgroup_protection(struct mem_cgroup *root,
1151 struct mem_cgroup *memcg,
1152 unsigned long *min,
1153 unsigned long *low)
1154{
1155 *min = *low = 0;
1156}
1157
1158static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
1159 struct mem_cgroup *memcg)
1160{
1161}
1162
1163static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
1164{
1165 return false;
1166}
1167
1168static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
1169{
1170 return false;
1171}
1172
1173static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
1174 gfp_t gfp_mask)
1175{
1176 return 0;
1177}
1178
1179static inline int mem_cgroup_swapin_charge_page(struct page *page,
1180 struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
1181{
1182 return 0;
1183}
1184
1185static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
1186{
1187}
1188
1189static inline void mem_cgroup_uncharge(struct page *page)
1190{
1191}
1192
1193static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
1194{
1195}
1196
1197static inline void mem_cgroup_migrate(struct page *old, struct page *new)
1198{
1199}
1200
1201static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
1202 struct pglist_data *pgdat)
1203{
1204 return &pgdat->__lruvec;
1205}
1206
1207static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page)
1208{
1209 pg_data_t *pgdat = page_pgdat(page);
1210
1211 return &pgdat->__lruvec;
1212}
1213
1214static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
1215{
1216}
1217
1218static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
1219{
1220 return NULL;
1221}
1222
1223static inline bool mm_match_cgroup(struct mm_struct *mm,
1224 struct mem_cgroup *memcg)
1225{
1226 return true;
1227}
1228
1229static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1230{
1231 return NULL;
1232}
1233
1234static inline
1235struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
1236{
1237 return NULL;
1238}
1239
1240static inline void mem_cgroup_put(struct mem_cgroup *memcg)
1241{
1242}
1243
1244static inline struct lruvec *lock_page_lruvec(struct page *page)
1245{
1246 struct pglist_data *pgdat = page_pgdat(page);
1247
1248 spin_lock(&pgdat->__lruvec.lru_lock);
1249 return &pgdat->__lruvec;
1250}
1251
1252static inline struct lruvec *lock_page_lruvec_irq(struct page *page)
1253{
1254 struct pglist_data *pgdat = page_pgdat(page);
1255
1256 spin_lock_irq(&pgdat->__lruvec.lru_lock);
1257 return &pgdat->__lruvec;
1258}
1259
1260static inline struct lruvec *lock_page_lruvec_irqsave(struct page *page,
1261 unsigned long *flagsp)
1262{
1263 struct pglist_data *pgdat = page_pgdat(page);
1264
1265 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
1266 return &pgdat->__lruvec;
1267}
1268
1269static inline struct mem_cgroup *
1270mem_cgroup_iter(struct mem_cgroup *root,
1271 struct mem_cgroup *prev,
1272 struct mem_cgroup_reclaim_cookie *reclaim)
1273{
1274 return NULL;
1275}
1276
1277static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
1278 struct mem_cgroup *prev)
1279{
1280}
1281
1282static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1283 int (*fn)(struct task_struct *, void *), void *arg)
1284{
1285 return 0;
1286}
1287
1288static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
1289{
1290 return 0;
1291}
1292
1293static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
1294{
1295 WARN_ON_ONCE(id);
1296
1297 return NULL;
1298}
1299
1300static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
1301{
1302 return NULL;
1303}
1304
1305static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
1306{
1307 return NULL;
1308}
1309
1310static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
1311{
1312 return true;
1313}
1314
1315static inline
1316unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
1317 enum lru_list lru, int zone_idx)
1318{
1319 return 0;
1320}
1321
1322static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1323{
1324 return 0;
1325}
1326
1327static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1328{
1329 return 0;
1330}
1331
1332static inline void
1333mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1334{
1335}
1336
1337static inline void
1338mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1339{
1340}
1341
1342static inline void lock_page_memcg(struct page *page)
1343{
1344}
1345
1346static inline void unlock_page_memcg(struct page *page)
1347{
1348}
1349
1350static inline void mem_cgroup_handle_over_high(void)
1351{
1352}
1353
1354static inline void mem_cgroup_enter_user_fault(void)
1355{
1356}
1357
1358static inline void mem_cgroup_exit_user_fault(void)
1359{
1360}
1361
1362static inline bool task_in_memcg_oom(struct task_struct *p)
1363{
1364 return false;
1365}
1366
1367static inline bool mem_cgroup_oom_synchronize(bool wait)
1368{
1369 return false;
1370}
1371
1372static inline struct mem_cgroup *mem_cgroup_get_oom_group(
1373 struct task_struct *victim, struct mem_cgroup *oom_domain)
1374{
1375 return NULL;
1376}
1377
1378static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1379{
1380}
1381
1382static inline void __mod_memcg_state(struct mem_cgroup *memcg,
1383 int idx,
1384 int nr)
1385{
1386}
1387
1388static inline void mod_memcg_state(struct mem_cgroup *memcg,
1389 int idx,
1390 int nr)
1391{
1392}
1393
1394static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1395 enum node_stat_item idx)
1396{
1397 return node_page_state(lruvec_pgdat(lruvec), idx);
1398}
1399
1400static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1401 enum node_stat_item idx)
1402{
1403 return node_page_state(lruvec_pgdat(lruvec), idx);
1404}
1405
1406static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
1407 enum node_stat_item idx, int val)
1408{
1409}
1410
1411static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1412 int val)
1413{
1414 struct page *page = virt_to_head_page(p);
1415
1416 __mod_node_page_state(page_pgdat(page), idx, val);
1417}
1418
1419static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1420 int val)
1421{
1422 struct page *page = virt_to_head_page(p);
1423
1424 mod_node_page_state(page_pgdat(page), idx, val);
1425}
1426
1427static inline void count_memcg_events(struct mem_cgroup *memcg,
1428 enum vm_event_item idx,
1429 unsigned long count)
1430{
1431}
1432
1433static inline void __count_memcg_events(struct mem_cgroup *memcg,
1434 enum vm_event_item idx,
1435 unsigned long count)
1436{
1437}
1438
1439static inline void count_memcg_page_event(struct page *page,
1440 int idx)
1441{
1442}
1443
1444static inline
1445void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
1446{
1447}
1448
1449static inline void split_page_memcg(struct page *head, unsigned int nr)
1450{
1451}
1452
1453static inline
1454unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1455 gfp_t gfp_mask,
1456 unsigned long *total_scanned)
1457{
1458 return 0;
1459}
1460#endif
1461
1462static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
1463{
1464 __mod_lruvec_kmem_state(p, idx, 1);
1465}
1466
1467static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
1468{
1469 __mod_lruvec_kmem_state(p, idx, -1);
1470}
1471
1472static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
1473{
1474 struct mem_cgroup *memcg;
1475
1476 memcg = lruvec_memcg(lruvec);
1477 if (!memcg)
1478 return NULL;
1479 memcg = parent_mem_cgroup(memcg);
1480 if (!memcg)
1481 return NULL;
1482 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
1483}
1484
1485static inline void unlock_page_lruvec(struct lruvec *lruvec)
1486{
1487 spin_unlock(&lruvec->lru_lock);
1488}
1489
1490static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
1491{
1492 spin_unlock_irq(&lruvec->lru_lock);
1493}
1494
1495static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
1496 unsigned long flags)
1497{
1498 spin_unlock_irqrestore(&lruvec->lru_lock, flags);
1499}
1500
1501
1502static inline bool page_matches_lruvec(struct page *page, struct lruvec *lruvec)
1503{
1504 return lruvec_pgdat(lruvec) == page_pgdat(page) &&
1505 lruvec_memcg(lruvec) == page_memcg(page);
1506}
1507
1508
1509static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
1510 struct lruvec *locked_lruvec)
1511{
1512 if (locked_lruvec) {
1513 if (page_matches_lruvec(page, locked_lruvec))
1514 return locked_lruvec;
1515
1516 unlock_page_lruvec_irq(locked_lruvec);
1517 }
1518
1519 return lock_page_lruvec_irq(page);
1520}
1521
1522
1523static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
1524 struct lruvec *locked_lruvec, unsigned long *flags)
1525{
1526 if (locked_lruvec) {
1527 if (page_matches_lruvec(page, locked_lruvec))
1528 return locked_lruvec;
1529
1530 unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
1531 }
1532
1533 return lock_page_lruvec_irqsave(page, flags);
1534}
1535
1536#ifdef CONFIG_CGROUP_WRITEBACK
1537
1538struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
1539void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1540 unsigned long *pheadroom, unsigned long *pdirty,
1541 unsigned long *pwriteback);
1542
1543void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
1544 struct bdi_writeback *wb);
1545
1546static inline void mem_cgroup_track_foreign_dirty(struct page *page,
1547 struct bdi_writeback *wb)
1548{
1549 if (mem_cgroup_disabled())
1550 return;
1551
1552 if (unlikely(&page_memcg(page)->css != wb->memcg_css))
1553 mem_cgroup_track_foreign_dirty_slowpath(page, wb);
1554}
1555
1556void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
1557
1558#else
1559
1560static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1561{
1562 return NULL;
1563}
1564
1565static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
1566 unsigned long *pfilepages,
1567 unsigned long *pheadroom,
1568 unsigned long *pdirty,
1569 unsigned long *pwriteback)
1570{
1571}
1572
1573static inline void mem_cgroup_track_foreign_dirty(struct page *page,
1574 struct bdi_writeback *wb)
1575{
1576}
1577
1578static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
1579{
1580}
1581
1582#endif
1583
1584struct sock;
1585bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1586void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1587#ifdef CONFIG_MEMCG
1588extern struct static_key_false memcg_sockets_enabled_key;
1589#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1590void mem_cgroup_sk_alloc(struct sock *sk);
1591void mem_cgroup_sk_free(struct sock *sk);
1592static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1593{
1594 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
1595 return true;
1596 do {
1597 if (time_before(jiffies, memcg->socket_pressure))
1598 return true;
1599 } while ((memcg = parent_mem_cgroup(memcg)));
1600 return false;
1601}
1602
1603int alloc_shrinker_info(struct mem_cgroup *memcg);
1604void free_shrinker_info(struct mem_cgroup *memcg);
1605void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
1606void reparent_shrinker_deferred(struct mem_cgroup *memcg);
1607#else
1608#define mem_cgroup_sockets_enabled 0
1609static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
1610static inline void mem_cgroup_sk_free(struct sock *sk) { };
1611static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1612{
1613 return false;
1614}
1615
1616static inline void set_shrinker_bit(struct mem_cgroup *memcg,
1617 int nid, int shrinker_id)
1618{
1619}
1620#endif
1621
1622#ifdef CONFIG_MEMCG_KMEM
1623bool mem_cgroup_kmem_disabled(void);
1624int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
1625void __memcg_kmem_uncharge_page(struct page *page, int order);
1626
1627struct obj_cgroup *get_obj_cgroup_from_current(void);
1628
1629int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
1630void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
1631
1632extern struct static_key_false memcg_kmem_enabled_key;
1633
1634extern int memcg_nr_cache_ids;
1635void memcg_get_cache_ids(void);
1636void memcg_put_cache_ids(void);
1637
1638
1639
1640
1641
1642
1643#define for_each_memcg_cache_index(_idx) \
1644 for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
1645
1646static inline bool memcg_kmem_enabled(void)
1647{
1648 return static_branch_likely(&memcg_kmem_enabled_key);
1649}
1650
1651static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1652 int order)
1653{
1654 if (memcg_kmem_enabled())
1655 return __memcg_kmem_charge_page(page, gfp, order);
1656 return 0;
1657}
1658
1659static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1660{
1661 if (memcg_kmem_enabled())
1662 __memcg_kmem_uncharge_page(page, order);
1663}
1664
1665
1666
1667
1668
1669static inline int memcg_cache_id(struct mem_cgroup *memcg)
1670{
1671 return memcg ? memcg->kmemcg_id : -1;
1672}
1673
1674struct mem_cgroup *mem_cgroup_from_obj(void *p);
1675
1676#else
1677static inline bool mem_cgroup_kmem_disabled(void)
1678{
1679 return true;
1680}
1681
1682static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1683 int order)
1684{
1685 return 0;
1686}
1687
1688static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1689{
1690}
1691
1692static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1693 int order)
1694{
1695 return 0;
1696}
1697
1698static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
1699{
1700}
1701
1702#define for_each_memcg_cache_index(_idx) \
1703 for (; NULL; )
1704
1705static inline bool memcg_kmem_enabled(void)
1706{
1707 return false;
1708}
1709
1710static inline int memcg_cache_id(struct mem_cgroup *memcg)
1711{
1712 return -1;
1713}
1714
1715static inline void memcg_get_cache_ids(void)
1716{
1717}
1718
1719static inline void memcg_put_cache_ids(void)
1720{
1721}
1722
1723static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
1724{
1725 return NULL;
1726}
1727
1728#endif
1729
1730#endif
1731