1
2
3
4
5
6
7
8
9
10
11#ifndef _LINUX_MEMCONTROL_H
12#define _LINUX_MEMCONTROL_H
13#include <linux/cgroup.h>
14#include <linux/vm_event_item.h>
15#include <linux/hardirq.h>
16#include <linux/jump_label.h>
17#include <linux/page_counter.h>
18#include <linux/vmpressure.h>
19#include <linux/eventfd.h>
20#include <linux/mm.h>
21#include <linux/vmstat.h>
22#include <linux/writeback.h>
23#include <linux/page-flags.h>
24
25struct mem_cgroup;
26struct obj_cgroup;
27struct page;
28struct mm_struct;
29struct kmem_cache;
30
31
32enum memcg_stat_item {
33 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
34 MEMCG_SOCK,
35 MEMCG_PERCPU_B,
36 MEMCG_NR_STAT,
37};
38
39enum memcg_memory_event {
40 MEMCG_LOW,
41 MEMCG_HIGH,
42 MEMCG_MAX,
43 MEMCG_OOM,
44 MEMCG_OOM_KILL,
45 MEMCG_SWAP_HIGH,
46 MEMCG_SWAP_MAX,
47 MEMCG_SWAP_FAIL,
48 MEMCG_NR_MEMORY_EVENTS,
49};
50
51struct mem_cgroup_reclaim_cookie {
52 pg_data_t *pgdat;
53 unsigned int generation;
54};
55
56#ifdef CONFIG_MEMCG
57
58#define MEM_CGROUP_ID_SHIFT 16
59#define MEM_CGROUP_ID_MAX USHRT_MAX
60
61struct mem_cgroup_id {
62 int id;
63 refcount_t ref;
64};
65
66
67
68
69
70
71
72enum mem_cgroup_events_target {
73 MEM_CGROUP_TARGET_THRESH,
74 MEM_CGROUP_TARGET_SOFTLIMIT,
75 MEM_CGROUP_NTARGETS,
76};
77
78struct memcg_vmstats_percpu {
79
80 long state[MEMCG_NR_STAT];
81 unsigned long events[NR_VM_EVENT_ITEMS];
82
83
84 long state_prev[MEMCG_NR_STAT];
85 unsigned long events_prev[NR_VM_EVENT_ITEMS];
86
87
88 unsigned long nr_page_events;
89 unsigned long targets[MEM_CGROUP_NTARGETS];
90};
91
92struct memcg_vmstats {
93
94 long state[MEMCG_NR_STAT];
95 unsigned long events[NR_VM_EVENT_ITEMS];
96
97
98 long state_pending[MEMCG_NR_STAT];
99 unsigned long events_pending[NR_VM_EVENT_ITEMS];
100};
101
102struct mem_cgroup_reclaim_iter {
103 struct mem_cgroup *position;
104
105 unsigned int generation;
106};
107
108
109
110
111
112struct shrinker_info {
113 struct rcu_head rcu;
114 atomic_long_t *nr_deferred;
115 unsigned long *map;
116};
117
118struct lruvec_stats_percpu {
119
120 long state[NR_VM_NODE_STAT_ITEMS];
121
122
123 long state_prev[NR_VM_NODE_STAT_ITEMS];
124};
125
126struct lruvec_stats {
127
128 long state[NR_VM_NODE_STAT_ITEMS];
129
130
131 long state_pending[NR_VM_NODE_STAT_ITEMS];
132};
133
134
135
136
137struct mem_cgroup_per_node {
138 struct lruvec lruvec;
139
140 struct lruvec_stats_percpu __percpu *lruvec_stats_percpu;
141 struct lruvec_stats lruvec_stats;
142
143 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
144
145 struct mem_cgroup_reclaim_iter iter;
146
147 struct shrinker_info __rcu *shrinker_info;
148
149 struct rb_node tree_node;
150 unsigned long usage_in_excess;
151
152 bool on_tree;
153 struct mem_cgroup *memcg;
154
155};
156
157struct mem_cgroup_threshold {
158 struct eventfd_ctx *eventfd;
159 unsigned long threshold;
160};
161
162
163struct mem_cgroup_threshold_ary {
164
165 int current_threshold;
166
167 unsigned int size;
168
169 struct mem_cgroup_threshold entries[];
170};
171
172struct mem_cgroup_thresholds {
173
174 struct mem_cgroup_threshold_ary *primary;
175
176
177
178
179
180 struct mem_cgroup_threshold_ary *spare;
181};
182
183enum memcg_kmem_state {
184 KMEM_NONE,
185 KMEM_ALLOCATED,
186 KMEM_ONLINE,
187};
188
189#if defined(CONFIG_SMP)
190struct memcg_padding {
191 char x[0];
192} ____cacheline_internodealigned_in_smp;
193#define MEMCG_PADDING(name) struct memcg_padding name
194#else
195#define MEMCG_PADDING(name)
196#endif
197
198
199
200
201
202
203
204
205
206#define MEMCG_CGWB_FRN_CNT 4
207
208struct memcg_cgwb_frn {
209 u64 bdi_id;
210 int memcg_id;
211 u64 at;
212 struct wb_completion done;
213};
214
215
216
217
218
219
220
221struct obj_cgroup {
222 struct percpu_ref refcnt;
223 struct mem_cgroup *memcg;
224 atomic_t nr_charged_bytes;
225 union {
226 struct list_head list;
227 struct rcu_head rcu;
228 };
229};
230
231
232
233
234
235
236
237struct mem_cgroup {
238 struct cgroup_subsys_state css;
239
240
241 struct mem_cgroup_id id;
242
243
244 struct page_counter memory;
245
246 union {
247 struct page_counter swap;
248 struct page_counter memsw;
249 };
250
251
252 struct page_counter kmem;
253 struct page_counter tcpmem;
254
255
256 struct work_struct high_work;
257
258 unsigned long soft_limit;
259
260
261 struct vmpressure vmpressure;
262
263
264
265
266 bool oom_group;
267
268
269 bool oom_lock;
270 int under_oom;
271
272 int swappiness;
273
274 int oom_kill_disable;
275
276
277 struct cgroup_file events_file;
278 struct cgroup_file events_local_file;
279
280
281 struct cgroup_file swap_events_file;
282
283
284 struct mutex thresholds_lock;
285
286
287 struct mem_cgroup_thresholds thresholds;
288
289
290 struct mem_cgroup_thresholds memsw_thresholds;
291
292
293 struct list_head oom_notify;
294
295
296
297
298
299 unsigned long move_charge_at_immigrate;
300
301 spinlock_t move_lock;
302 unsigned long move_lock_flags;
303
304 MEMCG_PADDING(_pad1_);
305
306
307 struct memcg_vmstats vmstats;
308
309
310 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
311 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
312
313 unsigned long socket_pressure;
314
315
316 bool tcpmem_active;
317 int tcpmem_pressure;
318
319#ifdef CONFIG_MEMCG_KMEM
320 int kmemcg_id;
321 enum memcg_kmem_state kmem_state;
322 struct obj_cgroup __rcu *objcg;
323 struct list_head objcg_list;
324#endif
325
326 MEMCG_PADDING(_pad2_);
327
328
329
330
331 atomic_t moving_account;
332 struct task_struct *move_lock_task;
333
334 struct memcg_vmstats_percpu __percpu *vmstats_percpu;
335
336#ifdef CONFIG_CGROUP_WRITEBACK
337 struct list_head cgwb_list;
338 struct wb_domain cgwb_domain;
339 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
340#endif
341
342
343 struct list_head event_list;
344 spinlock_t event_list_lock;
345
346#ifdef CONFIG_TRANSPARENT_HUGEPAGE
347 struct deferred_split deferred_split_queue;
348#endif
349
350 struct mem_cgroup_per_node *nodeinfo[];
351};
352
353
354
355
356
357#define MEMCG_CHARGE_BATCH 32U
358
359extern struct mem_cgroup *root_mem_cgroup;
360
361enum page_memcg_data_flags {
362
363 MEMCG_DATA_OBJCGS = (1UL << 0),
364
365 MEMCG_DATA_KMEM = (1UL << 1),
366
367 __NR_MEMCG_DATA_FLAGS = (1UL << 2),
368};
369
370#define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1)
371
372static inline bool PageMemcgKmem(struct page *page);
373
374
375
376
377
378
379
380
381static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
382{
383 return READ_ONCE(objcg->memcg);
384}
385
386
387
388
389
390
391
392
393
394
395
396static inline struct mem_cgroup *__page_memcg(struct page *page)
397{
398 unsigned long memcg_data = page->memcg_data;
399
400 VM_BUG_ON_PAGE(PageSlab(page), page);
401 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page);
402 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page);
403
404 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
405}
406
407
408
409
410
411
412
413
414
415
416
417static inline struct obj_cgroup *__page_objcg(struct page *page)
418{
419 unsigned long memcg_data = page->memcg_data;
420
421 VM_BUG_ON_PAGE(PageSlab(page), page);
422 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page);
423 VM_BUG_ON_PAGE(!(memcg_data & MEMCG_DATA_KMEM), page);
424
425 return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
426}
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448static inline struct mem_cgroup *page_memcg(struct page *page)
449{
450 if (PageMemcgKmem(page))
451 return obj_cgroup_memcg(__page_objcg(page));
452 else
453 return __page_memcg(page);
454}
455
456
457
458
459
460
461
462
463
464
465static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
466{
467 unsigned long memcg_data = READ_ONCE(page->memcg_data);
468
469 VM_BUG_ON_PAGE(PageSlab(page), page);
470 WARN_ON_ONCE(!rcu_read_lock_held());
471
472 if (memcg_data & MEMCG_DATA_KMEM) {
473 struct obj_cgroup *objcg;
474
475 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
476 return obj_cgroup_memcg(objcg);
477 }
478
479 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
480}
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503static inline struct mem_cgroup *page_memcg_check(struct page *page)
504{
505
506
507
508
509 unsigned long memcg_data = READ_ONCE(page->memcg_data);
510
511 if (memcg_data & MEMCG_DATA_OBJCGS)
512 return NULL;
513
514 if (memcg_data & MEMCG_DATA_KMEM) {
515 struct obj_cgroup *objcg;
516
517 objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
518 return obj_cgroup_memcg(objcg);
519 }
520
521 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
522}
523
524#ifdef CONFIG_MEMCG_KMEM
525
526
527
528
529
530
531
532
533static inline bool PageMemcgKmem(struct page *page)
534{
535 VM_BUG_ON_PAGE(page->memcg_data & MEMCG_DATA_OBJCGS, page);
536 return page->memcg_data & MEMCG_DATA_KMEM;
537}
538
539
540
541
542
543
544
545
546
547
548
549static inline struct obj_cgroup **page_objcgs(struct page *page)
550{
551 unsigned long memcg_data = READ_ONCE(page->memcg_data);
552
553 VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS), page);
554 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page);
555
556 return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
557}
558
559
560
561
562
563
564
565
566
567static inline struct obj_cgroup **page_objcgs_check(struct page *page)
568{
569 unsigned long memcg_data = READ_ONCE(page->memcg_data);
570
571 if (!memcg_data || !(memcg_data & MEMCG_DATA_OBJCGS))
572 return NULL;
573
574 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page);
575
576 return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
577}
578
579#else
580static inline bool PageMemcgKmem(struct page *page)
581{
582 return false;
583}
584
585static inline struct obj_cgroup **page_objcgs(struct page *page)
586{
587 return NULL;
588}
589
590static inline struct obj_cgroup **page_objcgs_check(struct page *page)
591{
592 return NULL;
593}
594#endif
595
596static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
597{
598 return (memcg == root_mem_cgroup);
599}
600
601static inline bool mem_cgroup_disabled(void)
602{
603 return !cgroup_subsys_enabled(memory_cgrp_subsys);
604}
605
606static inline void mem_cgroup_protection(struct mem_cgroup *root,
607 struct mem_cgroup *memcg,
608 unsigned long *min,
609 unsigned long *low)
610{
611 *min = *low = 0;
612
613 if (mem_cgroup_disabled())
614 return;
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649 if (root == memcg)
650 return;
651
652 *min = READ_ONCE(memcg->memory.emin);
653 *low = READ_ONCE(memcg->memory.elow);
654}
655
656void mem_cgroup_calculate_protection(struct mem_cgroup *root,
657 struct mem_cgroup *memcg);
658
659static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg)
660{
661
662
663
664
665 return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg);
666
667}
668
669static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
670{
671 if (!mem_cgroup_supports_protection(memcg))
672 return false;
673
674 return READ_ONCE(memcg->memory.elow) >=
675 page_counter_read(&memcg->memory);
676}
677
678static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
679{
680 if (!mem_cgroup_supports_protection(memcg))
681 return false;
682
683 return READ_ONCE(memcg->memory.emin) >=
684 page_counter_read(&memcg->memory);
685}
686
687int __mem_cgroup_charge(struct page *page, struct mm_struct *mm,
688 gfp_t gfp_mask);
689static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
690 gfp_t gfp_mask)
691{
692 if (mem_cgroup_disabled())
693 return 0;
694 return __mem_cgroup_charge(page, mm, gfp_mask);
695}
696
697int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
698 gfp_t gfp, swp_entry_t entry);
699void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
700
701void __mem_cgroup_uncharge(struct page *page);
702static inline void mem_cgroup_uncharge(struct page *page)
703{
704 if (mem_cgroup_disabled())
705 return;
706 __mem_cgroup_uncharge(page);
707}
708
709void __mem_cgroup_uncharge_list(struct list_head *page_list);
710static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
711{
712 if (mem_cgroup_disabled())
713 return;
714 __mem_cgroup_uncharge_list(page_list);
715}
716
717void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
718
719
720
721
722
723
724
725
726
727
728static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
729 struct pglist_data *pgdat)
730{
731 struct mem_cgroup_per_node *mz;
732 struct lruvec *lruvec;
733
734 if (mem_cgroup_disabled()) {
735 lruvec = &pgdat->__lruvec;
736 goto out;
737 }
738
739 if (!memcg)
740 memcg = root_mem_cgroup;
741
742 mz = memcg->nodeinfo[pgdat->node_id];
743 lruvec = &mz->lruvec;
744out:
745
746
747
748
749
750 if (unlikely(lruvec->pgdat != pgdat))
751 lruvec->pgdat = pgdat;
752 return lruvec;
753}
754
755
756
757
758
759
760
761static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page)
762{
763 pg_data_t *pgdat = page_pgdat(page);
764 struct mem_cgroup *memcg = page_memcg(page);
765
766 VM_WARN_ON_ONCE_PAGE(!memcg && !mem_cgroup_disabled(), page);
767 return mem_cgroup_lruvec(memcg, pgdat);
768}
769
770struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
771
772struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
773
774struct lruvec *lock_page_lruvec(struct page *page);
775struct lruvec *lock_page_lruvec_irq(struct page *page);
776struct lruvec *lock_page_lruvec_irqsave(struct page *page,
777 unsigned long *flags);
778
779#ifdef CONFIG_DEBUG_VM
780void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page);
781#else
782static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
783{
784}
785#endif
786
787static inline
788struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
789 return css ? container_of(css, struct mem_cgroup, css) : NULL;
790}
791
792static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
793{
794 return percpu_ref_tryget(&objcg->refcnt);
795}
796
797static inline void obj_cgroup_get(struct obj_cgroup *objcg)
798{
799 percpu_ref_get(&objcg->refcnt);
800}
801
802static inline void obj_cgroup_get_many(struct obj_cgroup *objcg,
803 unsigned long nr)
804{
805 percpu_ref_get_many(&objcg->refcnt, nr);
806}
807
808static inline void obj_cgroup_put(struct obj_cgroup *objcg)
809{
810 percpu_ref_put(&objcg->refcnt);
811}
812
813static inline void mem_cgroup_put(struct mem_cgroup *memcg)
814{
815 if (memcg)
816 css_put(&memcg->css);
817}
818
819#define mem_cgroup_from_counter(counter, member) \
820 container_of(counter, struct mem_cgroup, member)
821
822struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
823 struct mem_cgroup *,
824 struct mem_cgroup_reclaim_cookie *);
825void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
826int mem_cgroup_scan_tasks(struct mem_cgroup *,
827 int (*)(struct task_struct *, void *), void *);
828
829static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
830{
831 if (mem_cgroup_disabled())
832 return 0;
833
834 return memcg->id.id;
835}
836struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
837
838static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
839{
840 return mem_cgroup_from_css(seq_css(m));
841}
842
843static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
844{
845 struct mem_cgroup_per_node *mz;
846
847 if (mem_cgroup_disabled())
848 return NULL;
849
850 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
851 return mz->memcg;
852}
853
854
855
856
857
858
859
860
861static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
862{
863 if (!memcg->memory.parent)
864 return NULL;
865 return mem_cgroup_from_counter(memcg->memory.parent, memory);
866}
867
868static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
869 struct mem_cgroup *root)
870{
871 if (root == memcg)
872 return true;
873 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
874}
875
876static inline bool mm_match_cgroup(struct mm_struct *mm,
877 struct mem_cgroup *memcg)
878{
879 struct mem_cgroup *task_memcg;
880 bool match = false;
881
882 rcu_read_lock();
883 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
884 if (task_memcg)
885 match = mem_cgroup_is_descendant(task_memcg, memcg);
886 rcu_read_unlock();
887 return match;
888}
889
890struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
891ino_t page_cgroup_ino(struct page *page);
892
893static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
894{
895 if (mem_cgroup_disabled())
896 return true;
897 return !!(memcg->css.flags & CSS_ONLINE);
898}
899
900void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
901 int zid, int nr_pages);
902
903static inline
904unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
905 enum lru_list lru, int zone_idx)
906{
907 struct mem_cgroup_per_node *mz;
908
909 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
910 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
911}
912
913void mem_cgroup_handle_over_high(void);
914
915unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
916
917unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
918
919void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
920 struct task_struct *p);
921
922void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
923
924static inline void mem_cgroup_enter_user_fault(void)
925{
926 WARN_ON(current->in_user_fault);
927 current->in_user_fault = 1;
928}
929
930static inline void mem_cgroup_exit_user_fault(void)
931{
932 WARN_ON(!current->in_user_fault);
933 current->in_user_fault = 0;
934}
935
936static inline bool task_in_memcg_oom(struct task_struct *p)
937{
938 return p->memcg_in_oom;
939}
940
941bool mem_cgroup_oom_synchronize(bool wait);
942struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
943 struct mem_cgroup *oom_domain);
944void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
945
946#ifdef CONFIG_MEMCG_SWAP
947extern bool cgroup_memory_noswap;
948#endif
949
950void lock_page_memcg(struct page *page);
951void unlock_page_memcg(struct page *page);
952
953void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
954
955
956static inline void mod_memcg_state(struct mem_cgroup *memcg,
957 int idx, int val)
958{
959 unsigned long flags;
960
961 local_irq_save(flags);
962 __mod_memcg_state(memcg, idx, val);
963 local_irq_restore(flags);
964}
965
966static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
967{
968 return READ_ONCE(memcg->vmstats.state[idx]);
969}
970
971static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
972 enum node_stat_item idx)
973{
974 struct mem_cgroup_per_node *pn;
975
976 if (mem_cgroup_disabled())
977 return node_page_state(lruvec_pgdat(lruvec), idx);
978
979 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
980 return READ_ONCE(pn->lruvec_stats.state[idx]);
981}
982
983static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
984 enum node_stat_item idx)
985{
986 struct mem_cgroup_per_node *pn;
987 long x = 0;
988 int cpu;
989
990 if (mem_cgroup_disabled())
991 return node_page_state(lruvec_pgdat(lruvec), idx);
992
993 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
994 for_each_possible_cpu(cpu)
995 x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu);
996#ifdef CONFIG_SMP
997 if (x < 0)
998 x = 0;
999#endif
1000 return x;
1001}
1002
1003void mem_cgroup_flush_stats(void);
1004
1005void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
1006 int val);
1007void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
1008
1009static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1010 int val)
1011{
1012 unsigned long flags;
1013
1014 local_irq_save(flags);
1015 __mod_lruvec_kmem_state(p, idx, val);
1016 local_irq_restore(flags);
1017}
1018
1019static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
1020 enum node_stat_item idx, int val)
1021{
1022 unsigned long flags;
1023
1024 local_irq_save(flags);
1025 __mod_memcg_lruvec_state(lruvec, idx, val);
1026 local_irq_restore(flags);
1027}
1028
1029void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
1030 unsigned long count);
1031
1032static inline void count_memcg_events(struct mem_cgroup *memcg,
1033 enum vm_event_item idx,
1034 unsigned long count)
1035{
1036 unsigned long flags;
1037
1038 local_irq_save(flags);
1039 __count_memcg_events(memcg, idx, count);
1040 local_irq_restore(flags);
1041}
1042
1043static inline void count_memcg_page_event(struct page *page,
1044 enum vm_event_item idx)
1045{
1046 struct mem_cgroup *memcg = page_memcg(page);
1047
1048 if (memcg)
1049 count_memcg_events(memcg, idx, 1);
1050}
1051
1052static inline void count_memcg_event_mm(struct mm_struct *mm,
1053 enum vm_event_item idx)
1054{
1055 struct mem_cgroup *memcg;
1056
1057 if (mem_cgroup_disabled())
1058 return;
1059
1060 rcu_read_lock();
1061 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1062 if (likely(memcg))
1063 count_memcg_events(memcg, idx, 1);
1064 rcu_read_unlock();
1065}
1066
1067static inline void memcg_memory_event(struct mem_cgroup *memcg,
1068 enum memcg_memory_event event)
1069{
1070 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
1071 event == MEMCG_SWAP_FAIL;
1072
1073 atomic_long_inc(&memcg->memory_events_local[event]);
1074 if (!swap_event)
1075 cgroup_file_notify(&memcg->events_local_file);
1076
1077 do {
1078 atomic_long_inc(&memcg->memory_events[event]);
1079 if (swap_event)
1080 cgroup_file_notify(&memcg->swap_events_file);
1081 else
1082 cgroup_file_notify(&memcg->events_file);
1083
1084 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1085 break;
1086 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
1087 break;
1088 } while ((memcg = parent_mem_cgroup(memcg)) &&
1089 !mem_cgroup_is_root(memcg));
1090}
1091
1092static inline void memcg_memory_event_mm(struct mm_struct *mm,
1093 enum memcg_memory_event event)
1094{
1095 struct mem_cgroup *memcg;
1096
1097 if (mem_cgroup_disabled())
1098 return;
1099
1100 rcu_read_lock();
1101 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1102 if (likely(memcg))
1103 memcg_memory_event(memcg, event);
1104 rcu_read_unlock();
1105}
1106
1107void split_page_memcg(struct page *head, unsigned int nr);
1108
1109unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1110 gfp_t gfp_mask,
1111 unsigned long *total_scanned);
1112
1113#else
1114
1115#define MEM_CGROUP_ID_SHIFT 0
1116#define MEM_CGROUP_ID_MAX 0
1117
1118static inline struct mem_cgroup *page_memcg(struct page *page)
1119{
1120 return NULL;
1121}
1122
1123static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1124{
1125 WARN_ON_ONCE(!rcu_read_lock_held());
1126 return NULL;
1127}
1128
1129static inline struct mem_cgroup *page_memcg_check(struct page *page)
1130{
1131 return NULL;
1132}
1133
1134static inline bool PageMemcgKmem(struct page *page)
1135{
1136 return false;
1137}
1138
1139static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
1140{
1141 return true;
1142}
1143
1144static inline bool mem_cgroup_disabled(void)
1145{
1146 return true;
1147}
1148
1149static inline void memcg_memory_event(struct mem_cgroup *memcg,
1150 enum memcg_memory_event event)
1151{
1152}
1153
1154static inline void memcg_memory_event_mm(struct mm_struct *mm,
1155 enum memcg_memory_event event)
1156{
1157}
1158
1159static inline void mem_cgroup_protection(struct mem_cgroup *root,
1160 struct mem_cgroup *memcg,
1161 unsigned long *min,
1162 unsigned long *low)
1163{
1164 *min = *low = 0;
1165}
1166
1167static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
1168 struct mem_cgroup *memcg)
1169{
1170}
1171
1172static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
1173{
1174 return false;
1175}
1176
1177static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
1178{
1179 return false;
1180}
1181
1182static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
1183 gfp_t gfp_mask)
1184{
1185 return 0;
1186}
1187
1188static inline int mem_cgroup_swapin_charge_page(struct page *page,
1189 struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
1190{
1191 return 0;
1192}
1193
1194static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
1195{
1196}
1197
1198static inline void mem_cgroup_uncharge(struct page *page)
1199{
1200}
1201
1202static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
1203{
1204}
1205
1206static inline void mem_cgroup_migrate(struct page *old, struct page *new)
1207{
1208}
1209
1210static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
1211 struct pglist_data *pgdat)
1212{
1213 return &pgdat->__lruvec;
1214}
1215
1216static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page)
1217{
1218 pg_data_t *pgdat = page_pgdat(page);
1219
1220 return &pgdat->__lruvec;
1221}
1222
1223static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
1224{
1225}
1226
1227static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
1228{
1229 return NULL;
1230}
1231
1232static inline bool mm_match_cgroup(struct mm_struct *mm,
1233 struct mem_cgroup *memcg)
1234{
1235 return true;
1236}
1237
1238static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1239{
1240 return NULL;
1241}
1242
1243static inline
1244struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
1245{
1246 return NULL;
1247}
1248
1249static inline void mem_cgroup_put(struct mem_cgroup *memcg)
1250{
1251}
1252
1253static inline struct lruvec *lock_page_lruvec(struct page *page)
1254{
1255 struct pglist_data *pgdat = page_pgdat(page);
1256
1257 spin_lock(&pgdat->__lruvec.lru_lock);
1258 return &pgdat->__lruvec;
1259}
1260
1261static inline struct lruvec *lock_page_lruvec_irq(struct page *page)
1262{
1263 struct pglist_data *pgdat = page_pgdat(page);
1264
1265 spin_lock_irq(&pgdat->__lruvec.lru_lock);
1266 return &pgdat->__lruvec;
1267}
1268
1269static inline struct lruvec *lock_page_lruvec_irqsave(struct page *page,
1270 unsigned long *flagsp)
1271{
1272 struct pglist_data *pgdat = page_pgdat(page);
1273
1274 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
1275 return &pgdat->__lruvec;
1276}
1277
1278static inline struct mem_cgroup *
1279mem_cgroup_iter(struct mem_cgroup *root,
1280 struct mem_cgroup *prev,
1281 struct mem_cgroup_reclaim_cookie *reclaim)
1282{
1283 return NULL;
1284}
1285
1286static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
1287 struct mem_cgroup *prev)
1288{
1289}
1290
1291static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1292 int (*fn)(struct task_struct *, void *), void *arg)
1293{
1294 return 0;
1295}
1296
1297static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
1298{
1299 return 0;
1300}
1301
1302static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
1303{
1304 WARN_ON_ONCE(id);
1305
1306 return NULL;
1307}
1308
1309static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
1310{
1311 return NULL;
1312}
1313
1314static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
1315{
1316 return NULL;
1317}
1318
1319static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
1320{
1321 return true;
1322}
1323
1324static inline
1325unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
1326 enum lru_list lru, int zone_idx)
1327{
1328 return 0;
1329}
1330
1331static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1332{
1333 return 0;
1334}
1335
1336static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1337{
1338 return 0;
1339}
1340
1341static inline void
1342mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1343{
1344}
1345
1346static inline void
1347mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1348{
1349}
1350
1351static inline void lock_page_memcg(struct page *page)
1352{
1353}
1354
1355static inline void unlock_page_memcg(struct page *page)
1356{
1357}
1358
1359static inline void mem_cgroup_handle_over_high(void)
1360{
1361}
1362
1363static inline void mem_cgroup_enter_user_fault(void)
1364{
1365}
1366
1367static inline void mem_cgroup_exit_user_fault(void)
1368{
1369}
1370
1371static inline bool task_in_memcg_oom(struct task_struct *p)
1372{
1373 return false;
1374}
1375
1376static inline bool mem_cgroup_oom_synchronize(bool wait)
1377{
1378 return false;
1379}
1380
1381static inline struct mem_cgroup *mem_cgroup_get_oom_group(
1382 struct task_struct *victim, struct mem_cgroup *oom_domain)
1383{
1384 return NULL;
1385}
1386
1387static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1388{
1389}
1390
1391static inline void __mod_memcg_state(struct mem_cgroup *memcg,
1392 int idx,
1393 int nr)
1394{
1395}
1396
1397static inline void mod_memcg_state(struct mem_cgroup *memcg,
1398 int idx,
1399 int nr)
1400{
1401}
1402
1403static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1404{
1405 return 0;
1406}
1407
1408static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1409 enum node_stat_item idx)
1410{
1411 return node_page_state(lruvec_pgdat(lruvec), idx);
1412}
1413
1414static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1415 enum node_stat_item idx)
1416{
1417 return node_page_state(lruvec_pgdat(lruvec), idx);
1418}
1419
1420static inline void mem_cgroup_flush_stats(void)
1421{
1422}
1423
1424static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
1425 enum node_stat_item idx, int val)
1426{
1427}
1428
1429static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1430 int val)
1431{
1432 struct page *page = virt_to_head_page(p);
1433
1434 __mod_node_page_state(page_pgdat(page), idx, val);
1435}
1436
1437static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1438 int val)
1439{
1440 struct page *page = virt_to_head_page(p);
1441
1442 mod_node_page_state(page_pgdat(page), idx, val);
1443}
1444
1445static inline void count_memcg_events(struct mem_cgroup *memcg,
1446 enum vm_event_item idx,
1447 unsigned long count)
1448{
1449}
1450
1451static inline void __count_memcg_events(struct mem_cgroup *memcg,
1452 enum vm_event_item idx,
1453 unsigned long count)
1454{
1455}
1456
1457static inline void count_memcg_page_event(struct page *page,
1458 int idx)
1459{
1460}
1461
1462static inline
1463void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
1464{
1465}
1466
1467static inline void split_page_memcg(struct page *head, unsigned int nr)
1468{
1469}
1470
1471static inline
1472unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1473 gfp_t gfp_mask,
1474 unsigned long *total_scanned)
1475{
1476 return 0;
1477}
1478#endif
1479
1480static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
1481{
1482 __mod_lruvec_kmem_state(p, idx, 1);
1483}
1484
1485static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
1486{
1487 __mod_lruvec_kmem_state(p, idx, -1);
1488}
1489
1490static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
1491{
1492 struct mem_cgroup *memcg;
1493
1494 memcg = lruvec_memcg(lruvec);
1495 if (!memcg)
1496 return NULL;
1497 memcg = parent_mem_cgroup(memcg);
1498 if (!memcg)
1499 return NULL;
1500 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
1501}
1502
1503static inline void unlock_page_lruvec(struct lruvec *lruvec)
1504{
1505 spin_unlock(&lruvec->lru_lock);
1506}
1507
1508static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
1509{
1510 spin_unlock_irq(&lruvec->lru_lock);
1511}
1512
1513static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
1514 unsigned long flags)
1515{
1516 spin_unlock_irqrestore(&lruvec->lru_lock, flags);
1517}
1518
1519
1520static inline bool page_matches_lruvec(struct page *page, struct lruvec *lruvec)
1521{
1522 return lruvec_pgdat(lruvec) == page_pgdat(page) &&
1523 lruvec_memcg(lruvec) == page_memcg(page);
1524}
1525
1526
1527static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
1528 struct lruvec *locked_lruvec)
1529{
1530 if (locked_lruvec) {
1531 if (page_matches_lruvec(page, locked_lruvec))
1532 return locked_lruvec;
1533
1534 unlock_page_lruvec_irq(locked_lruvec);
1535 }
1536
1537 return lock_page_lruvec_irq(page);
1538}
1539
1540
1541static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
1542 struct lruvec *locked_lruvec, unsigned long *flags)
1543{
1544 if (locked_lruvec) {
1545 if (page_matches_lruvec(page, locked_lruvec))
1546 return locked_lruvec;
1547
1548 unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
1549 }
1550
1551 return lock_page_lruvec_irqsave(page, flags);
1552}
1553
1554#ifdef CONFIG_CGROUP_WRITEBACK
1555
1556struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
1557void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1558 unsigned long *pheadroom, unsigned long *pdirty,
1559 unsigned long *pwriteback);
1560
1561void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
1562 struct bdi_writeback *wb);
1563
1564static inline void mem_cgroup_track_foreign_dirty(struct page *page,
1565 struct bdi_writeback *wb)
1566{
1567 if (mem_cgroup_disabled())
1568 return;
1569
1570 if (unlikely(&page_memcg(page)->css != wb->memcg_css))
1571 mem_cgroup_track_foreign_dirty_slowpath(page, wb);
1572}
1573
1574void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
1575
1576#else
1577
1578static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1579{
1580 return NULL;
1581}
1582
1583static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
1584 unsigned long *pfilepages,
1585 unsigned long *pheadroom,
1586 unsigned long *pdirty,
1587 unsigned long *pwriteback)
1588{
1589}
1590
1591static inline void mem_cgroup_track_foreign_dirty(struct page *page,
1592 struct bdi_writeback *wb)
1593{
1594}
1595
1596static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
1597{
1598}
1599
1600#endif
1601
1602struct sock;
1603bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
1604 gfp_t gfp_mask);
1605void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1606#ifdef CONFIG_MEMCG
1607extern struct static_key_false memcg_sockets_enabled_key;
1608#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1609void mem_cgroup_sk_alloc(struct sock *sk);
1610void mem_cgroup_sk_free(struct sock *sk);
1611static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1612{
1613 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
1614 return true;
1615 do {
1616 if (time_before(jiffies, memcg->socket_pressure))
1617 return true;
1618 } while ((memcg = parent_mem_cgroup(memcg)));
1619 return false;
1620}
1621
1622int alloc_shrinker_info(struct mem_cgroup *memcg);
1623void free_shrinker_info(struct mem_cgroup *memcg);
1624void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id);
1625void reparent_shrinker_deferred(struct mem_cgroup *memcg);
1626#else
1627#define mem_cgroup_sockets_enabled 0
1628static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
1629static inline void mem_cgroup_sk_free(struct sock *sk) { };
1630static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1631{
1632 return false;
1633}
1634
1635static inline void set_shrinker_bit(struct mem_cgroup *memcg,
1636 int nid, int shrinker_id)
1637{
1638}
1639#endif
1640
1641#ifdef CONFIG_MEMCG_KMEM
1642bool mem_cgroup_kmem_disabled(void);
1643int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
1644void __memcg_kmem_uncharge_page(struct page *page, int order);
1645
1646struct obj_cgroup *get_obj_cgroup_from_current(void);
1647
1648int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
1649void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
1650
1651extern struct static_key_false memcg_kmem_enabled_key;
1652
1653extern int memcg_nr_cache_ids;
1654void memcg_get_cache_ids(void);
1655void memcg_put_cache_ids(void);
1656
1657
1658
1659
1660
1661
1662#define for_each_memcg_cache_index(_idx) \
1663 for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
1664
1665static inline bool memcg_kmem_enabled(void)
1666{
1667 return static_branch_likely(&memcg_kmem_enabled_key);
1668}
1669
1670static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1671 int order)
1672{
1673 if (memcg_kmem_enabled())
1674 return __memcg_kmem_charge_page(page, gfp, order);
1675 return 0;
1676}
1677
1678static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1679{
1680 if (memcg_kmem_enabled())
1681 __memcg_kmem_uncharge_page(page, order);
1682}
1683
1684
1685
1686
1687
1688static inline int memcg_cache_id(struct mem_cgroup *memcg)
1689{
1690 return memcg ? memcg->kmemcg_id : -1;
1691}
1692
1693struct mem_cgroup *mem_cgroup_from_obj(void *p);
1694
1695#else
1696static inline bool mem_cgroup_kmem_disabled(void)
1697{
1698 return true;
1699}
1700
1701static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1702 int order)
1703{
1704 return 0;
1705}
1706
1707static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1708{
1709}
1710
1711static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1712 int order)
1713{
1714 return 0;
1715}
1716
1717static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
1718{
1719}
1720
1721#define for_each_memcg_cache_index(_idx) \
1722 for (; NULL; )
1723
1724static inline bool memcg_kmem_enabled(void)
1725{
1726 return false;
1727}
1728
1729static inline int memcg_cache_id(struct mem_cgroup *memcg)
1730{
1731 return -1;
1732}
1733
1734static inline void memcg_get_cache_ids(void)
1735{
1736}
1737
1738static inline void memcg_put_cache_ids(void)
1739{
1740}
1741
1742static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
1743{
1744 return NULL;
1745}
1746
1747#endif
1748
1749#endif
1750