1
2
3
4
5
6
7
8
9
10
11#ifndef _LINUX_MEMCONTROL_H
12#define _LINUX_MEMCONTROL_H
13#include <linux/cgroup.h>
14#include <linux/vm_event_item.h>
15#include <linux/hardirq.h>
16#include <linux/jump_label.h>
17#include <linux/page_counter.h>
18#include <linux/vmpressure.h>
19#include <linux/eventfd.h>
20#include <linux/mm.h>
21#include <linux/vmstat.h>
22#include <linux/writeback.h>
23#include <linux/page-flags.h>
24
25struct mem_cgroup;
26struct obj_cgroup;
27struct page;
28struct mm_struct;
29struct kmem_cache;
30
31
32enum memcg_stat_item {
33 MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
34 MEMCG_SOCK,
35 MEMCG_PERCPU_B,
36 MEMCG_NR_STAT,
37};
38
39enum memcg_memory_event {
40 MEMCG_LOW,
41 MEMCG_HIGH,
42 MEMCG_MAX,
43 MEMCG_OOM,
44 MEMCG_OOM_KILL,
45 MEMCG_SWAP_HIGH,
46 MEMCG_SWAP_MAX,
47 MEMCG_SWAP_FAIL,
48 MEMCG_NR_MEMORY_EVENTS,
49};
50
51struct mem_cgroup_reclaim_cookie {
52 pg_data_t *pgdat;
53 unsigned int generation;
54};
55
56#ifdef CONFIG_MEMCG
57
58#define MEM_CGROUP_ID_SHIFT 16
59#define MEM_CGROUP_ID_MAX USHRT_MAX
60
61struct mem_cgroup_id {
62 int id;
63 refcount_t ref;
64};
65
66
67
68
69
70
71
72enum mem_cgroup_events_target {
73 MEM_CGROUP_TARGET_THRESH,
74 MEM_CGROUP_TARGET_SOFTLIMIT,
75 MEM_CGROUP_NTARGETS,
76};
77
78struct memcg_vmstats_percpu {
79 long stat[MEMCG_NR_STAT];
80 unsigned long events[NR_VM_EVENT_ITEMS];
81 unsigned long nr_page_events;
82 unsigned long targets[MEM_CGROUP_NTARGETS];
83};
84
85struct mem_cgroup_reclaim_iter {
86 struct mem_cgroup *position;
87
88 unsigned int generation;
89};
90
91struct lruvec_stat {
92 long count[NR_VM_NODE_STAT_ITEMS];
93};
94
95
96
97
98
99struct memcg_shrinker_map {
100 struct rcu_head rcu;
101 unsigned long map[];
102};
103
104
105
106
107struct mem_cgroup_per_node {
108 struct lruvec lruvec;
109
110
111 struct lruvec_stat __percpu *lruvec_stat_local;
112
113
114 struct lruvec_stat __percpu *lruvec_stat_cpu;
115 atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
116
117 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
118
119 struct mem_cgroup_reclaim_iter iter;
120
121 struct memcg_shrinker_map __rcu *shrinker_map;
122
123 struct rb_node tree_node;
124 unsigned long usage_in_excess;
125
126 bool on_tree;
127 struct mem_cgroup *memcg;
128
129};
130
131struct mem_cgroup_threshold {
132 struct eventfd_ctx *eventfd;
133 unsigned long threshold;
134};
135
136
137struct mem_cgroup_threshold_ary {
138
139 int current_threshold;
140
141 unsigned int size;
142
143 struct mem_cgroup_threshold entries[];
144};
145
146struct mem_cgroup_thresholds {
147
148 struct mem_cgroup_threshold_ary *primary;
149
150
151
152
153
154 struct mem_cgroup_threshold_ary *spare;
155};
156
157enum memcg_kmem_state {
158 KMEM_NONE,
159 KMEM_ALLOCATED,
160 KMEM_ONLINE,
161};
162
163#if defined(CONFIG_SMP)
164struct memcg_padding {
165 char x[0];
166} ____cacheline_internodealigned_in_smp;
167#define MEMCG_PADDING(name) struct memcg_padding name;
168#else
169#define MEMCG_PADDING(name)
170#endif
171
172
173
174
175
176
177
178
179
180#define MEMCG_CGWB_FRN_CNT 4
181
182struct memcg_cgwb_frn {
183 u64 bdi_id;
184 int memcg_id;
185 u64 at;
186 struct wb_completion done;
187};
188
189
190
191
192
193
194
195struct obj_cgroup {
196 struct percpu_ref refcnt;
197 struct mem_cgroup *memcg;
198 atomic_t nr_charged_bytes;
199 union {
200 struct list_head list;
201 struct rcu_head rcu;
202 };
203};
204
205
206
207
208
209
210
211struct mem_cgroup {
212 struct cgroup_subsys_state css;
213
214
215 struct mem_cgroup_id id;
216
217
218 struct page_counter memory;
219
220 union {
221 struct page_counter swap;
222 struct page_counter memsw;
223 };
224
225
226 struct page_counter kmem;
227 struct page_counter tcpmem;
228
229
230 struct work_struct high_work;
231
232 unsigned long soft_limit;
233
234
235 struct vmpressure vmpressure;
236
237
238
239
240 bool oom_group;
241
242
243 bool oom_lock;
244 int under_oom;
245
246 int swappiness;
247
248 int oom_kill_disable;
249
250
251 struct cgroup_file events_file;
252 struct cgroup_file events_local_file;
253
254
255 struct cgroup_file swap_events_file;
256
257
258 struct mutex thresholds_lock;
259
260
261 struct mem_cgroup_thresholds thresholds;
262
263
264 struct mem_cgroup_thresholds memsw_thresholds;
265
266
267 struct list_head oom_notify;
268
269
270
271
272
273 unsigned long move_charge_at_immigrate;
274
275 spinlock_t move_lock;
276 unsigned long move_lock_flags;
277
278 MEMCG_PADDING(_pad1_);
279
280 atomic_long_t vmstats[MEMCG_NR_STAT];
281 atomic_long_t vmevents[NR_VM_EVENT_ITEMS];
282
283
284 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
285 atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
286
287 unsigned long socket_pressure;
288
289
290 bool tcpmem_active;
291 int tcpmem_pressure;
292
293#ifdef CONFIG_MEMCG_KMEM
294 int kmemcg_id;
295 enum memcg_kmem_state kmem_state;
296 struct obj_cgroup __rcu *objcg;
297 struct list_head objcg_list;
298#endif
299
300 MEMCG_PADDING(_pad2_);
301
302
303
304
305 atomic_t moving_account;
306 struct task_struct *move_lock_task;
307
308
309 struct memcg_vmstats_percpu __percpu *vmstats_local;
310
311
312 struct memcg_vmstats_percpu __percpu *vmstats_percpu;
313
314#ifdef CONFIG_CGROUP_WRITEBACK
315 struct list_head cgwb_list;
316 struct wb_domain cgwb_domain;
317 struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
318#endif
319
320
321 struct list_head event_list;
322 spinlock_t event_list_lock;
323
324#ifdef CONFIG_TRANSPARENT_HUGEPAGE
325 struct deferred_split deferred_split_queue;
326#endif
327
328 struct mem_cgroup_per_node *nodeinfo[0];
329
330};
331
332
333
334
335
336#define MEMCG_CHARGE_BATCH 32U
337
338extern struct mem_cgroup *root_mem_cgroup;
339
340enum page_memcg_data_flags {
341
342 MEMCG_DATA_OBJCGS = (1UL << 0),
343
344 MEMCG_DATA_KMEM = (1UL << 1),
345
346 __NR_MEMCG_DATA_FLAGS = (1UL << 2),
347};
348
349#define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1)
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366static inline struct mem_cgroup *page_memcg(struct page *page)
367{
368 unsigned long memcg_data = page->memcg_data;
369
370 VM_BUG_ON_PAGE(PageSlab(page), page);
371 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_OBJCGS, page);
372
373 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
374}
375
376
377
378
379
380
381
382
383
384
385static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
386{
387 VM_BUG_ON_PAGE(PageSlab(page), page);
388 WARN_ON_ONCE(!rcu_read_lock_held());
389
390 return (struct mem_cgroup *)(READ_ONCE(page->memcg_data) &
391 ~MEMCG_DATA_FLAGS_MASK);
392}
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409static inline struct mem_cgroup *page_memcg_check(struct page *page)
410{
411
412
413
414
415 unsigned long memcg_data = READ_ONCE(page->memcg_data);
416
417 if (memcg_data & MEMCG_DATA_OBJCGS)
418 return NULL;
419
420 return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
421}
422
423
424
425
426
427
428
429
430
431static inline bool PageMemcgKmem(struct page *page)
432{
433 VM_BUG_ON_PAGE(page->memcg_data & MEMCG_DATA_OBJCGS, page);
434 return page->memcg_data & MEMCG_DATA_KMEM;
435}
436
437#ifdef CONFIG_MEMCG_KMEM
438
439
440
441
442
443
444
445
446
447
448static inline struct obj_cgroup **page_objcgs(struct page *page)
449{
450 unsigned long memcg_data = READ_ONCE(page->memcg_data);
451
452 VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS), page);
453 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page);
454
455 return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
456}
457
458
459
460
461
462
463
464
465
466static inline struct obj_cgroup **page_objcgs_check(struct page *page)
467{
468 unsigned long memcg_data = READ_ONCE(page->memcg_data);
469
470 if (!memcg_data || !(memcg_data & MEMCG_DATA_OBJCGS))
471 return NULL;
472
473 VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, page);
474
475 return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK);
476}
477
478
479
480
481
482
483
484
485static inline bool set_page_objcgs(struct page *page,
486 struct obj_cgroup **objcgs)
487{
488 return !cmpxchg(&page->memcg_data, 0, (unsigned long)objcgs |
489 MEMCG_DATA_OBJCGS);
490}
491#else
492static inline struct obj_cgroup **page_objcgs(struct page *page)
493{
494 return NULL;
495}
496
497static inline struct obj_cgroup **page_objcgs_check(struct page *page)
498{
499 return NULL;
500}
501
502static inline bool set_page_objcgs(struct page *page,
503 struct obj_cgroup **objcgs)
504{
505 return true;
506}
507#endif
508
509static __always_inline bool memcg_stat_item_in_bytes(int idx)
510{
511 if (idx == MEMCG_PERCPU_B)
512 return true;
513 return vmstat_item_in_bytes(idx);
514}
515
516static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
517{
518 return (memcg == root_mem_cgroup);
519}
520
521static inline bool mem_cgroup_disabled(void)
522{
523 return !cgroup_subsys_enabled(memory_cgrp_subsys);
524}
525
526static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
527 struct mem_cgroup *memcg,
528 bool in_low_reclaim)
529{
530 if (mem_cgroup_disabled())
531 return 0;
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566 if (root == memcg)
567 return 0;
568
569 if (in_low_reclaim)
570 return READ_ONCE(memcg->memory.emin);
571
572 return max(READ_ONCE(memcg->memory.emin),
573 READ_ONCE(memcg->memory.elow));
574}
575
576void mem_cgroup_calculate_protection(struct mem_cgroup *root,
577 struct mem_cgroup *memcg);
578
579static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg)
580{
581
582
583
584
585 return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg);
586
587}
588
589static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
590{
591 if (!mem_cgroup_supports_protection(memcg))
592 return false;
593
594 return READ_ONCE(memcg->memory.elow) >=
595 page_counter_read(&memcg->memory);
596}
597
598static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
599{
600 if (!mem_cgroup_supports_protection(memcg))
601 return false;
602
603 return READ_ONCE(memcg->memory.emin) >=
604 page_counter_read(&memcg->memory);
605}
606
607int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);
608
609void mem_cgroup_uncharge(struct page *page);
610void mem_cgroup_uncharge_list(struct list_head *page_list);
611
612void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
613
614static struct mem_cgroup_per_node *
615mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
616{
617 return memcg->nodeinfo[nid];
618}
619
620
621
622
623
624
625
626
627
628
629static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
630 struct pglist_data *pgdat)
631{
632 struct mem_cgroup_per_node *mz;
633 struct lruvec *lruvec;
634
635 if (mem_cgroup_disabled()) {
636 lruvec = &pgdat->__lruvec;
637 goto out;
638 }
639
640 if (!memcg)
641 memcg = root_mem_cgroup;
642
643 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
644 lruvec = &mz->lruvec;
645out:
646
647
648
649
650
651 if (unlikely(lruvec->pgdat != pgdat))
652 lruvec->pgdat = pgdat;
653 return lruvec;
654}
655
656
657
658
659
660
661
662
663static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
664 struct pglist_data *pgdat)
665{
666 struct mem_cgroup *memcg = page_memcg(page);
667
668 VM_WARN_ON_ONCE_PAGE(!memcg && !mem_cgroup_disabled(), page);
669 return mem_cgroup_lruvec(memcg, pgdat);
670}
671
672static inline bool lruvec_holds_page_lru_lock(struct page *page,
673 struct lruvec *lruvec)
674{
675 pg_data_t *pgdat = page_pgdat(page);
676 const struct mem_cgroup *memcg;
677 struct mem_cgroup_per_node *mz;
678
679 if (mem_cgroup_disabled())
680 return lruvec == &pgdat->__lruvec;
681
682 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
683 memcg = page_memcg(page) ? : root_mem_cgroup;
684
685 return lruvec->pgdat == pgdat && mz->memcg == memcg;
686}
687
688struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
689
690struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
691
692struct mem_cgroup *get_mem_cgroup_from_page(struct page *page);
693
694struct lruvec *lock_page_lruvec(struct page *page);
695struct lruvec *lock_page_lruvec_irq(struct page *page);
696struct lruvec *lock_page_lruvec_irqsave(struct page *page,
697 unsigned long *flags);
698
699#ifdef CONFIG_DEBUG_VM
700void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page);
701#else
702static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
703{
704}
705#endif
706
707static inline
708struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
709 return css ? container_of(css, struct mem_cgroup, css) : NULL;
710}
711
712static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
713{
714 return percpu_ref_tryget(&objcg->refcnt);
715}
716
717static inline void obj_cgroup_get(struct obj_cgroup *objcg)
718{
719 percpu_ref_get(&objcg->refcnt);
720}
721
722static inline void obj_cgroup_put(struct obj_cgroup *objcg)
723{
724 percpu_ref_put(&objcg->refcnt);
725}
726
727
728
729
730
731
732
733
734static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
735{
736 return READ_ONCE(objcg->memcg);
737}
738
739static inline void mem_cgroup_put(struct mem_cgroup *memcg)
740{
741 if (memcg)
742 css_put(&memcg->css);
743}
744
745#define mem_cgroup_from_counter(counter, member) \
746 container_of(counter, struct mem_cgroup, member)
747
748struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
749 struct mem_cgroup *,
750 struct mem_cgroup_reclaim_cookie *);
751void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
752int mem_cgroup_scan_tasks(struct mem_cgroup *,
753 int (*)(struct task_struct *, void *), void *);
754
755static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
756{
757 if (mem_cgroup_disabled())
758 return 0;
759
760 return memcg->id.id;
761}
762struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
763
764static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
765{
766 return mem_cgroup_from_css(seq_css(m));
767}
768
769static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
770{
771 struct mem_cgroup_per_node *mz;
772
773 if (mem_cgroup_disabled())
774 return NULL;
775
776 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
777 return mz->memcg;
778}
779
780
781
782
783
784
785
786
787static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
788{
789 if (!memcg->memory.parent)
790 return NULL;
791 return mem_cgroup_from_counter(memcg->memory.parent, memory);
792}
793
794static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
795 struct mem_cgroup *root)
796{
797 if (root == memcg)
798 return true;
799 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
800}
801
802static inline bool mm_match_cgroup(struct mm_struct *mm,
803 struct mem_cgroup *memcg)
804{
805 struct mem_cgroup *task_memcg;
806 bool match = false;
807
808 rcu_read_lock();
809 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
810 if (task_memcg)
811 match = mem_cgroup_is_descendant(task_memcg, memcg);
812 rcu_read_unlock();
813 return match;
814}
815
816struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
817ino_t page_cgroup_ino(struct page *page);
818
819static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
820{
821 if (mem_cgroup_disabled())
822 return true;
823 return !!(memcg->css.flags & CSS_ONLINE);
824}
825
826
827
828
829int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
830
831void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
832 int zid, int nr_pages);
833
834static inline
835unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
836 enum lru_list lru, int zone_idx)
837{
838 struct mem_cgroup_per_node *mz;
839
840 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
841 return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
842}
843
844void mem_cgroup_handle_over_high(void);
845
846unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
847
848unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
849
850void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
851 struct task_struct *p);
852
853void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
854
855static inline void mem_cgroup_enter_user_fault(void)
856{
857 WARN_ON(current->in_user_fault);
858 current->in_user_fault = 1;
859}
860
861static inline void mem_cgroup_exit_user_fault(void)
862{
863 WARN_ON(!current->in_user_fault);
864 current->in_user_fault = 0;
865}
866
867static inline bool task_in_memcg_oom(struct task_struct *p)
868{
869 return p->memcg_in_oom;
870}
871
872bool mem_cgroup_oom_synchronize(bool wait);
873struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
874 struct mem_cgroup *oom_domain);
875void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
876
877#ifdef CONFIG_MEMCG_SWAP
878extern bool cgroup_memory_noswap;
879#endif
880
881struct mem_cgroup *lock_page_memcg(struct page *page);
882void __unlock_page_memcg(struct mem_cgroup *memcg);
883void unlock_page_memcg(struct page *page);
884
885
886
887
888
889static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
890{
891 long x = atomic_long_read(&memcg->vmstats[idx]);
892#ifdef CONFIG_SMP
893 if (x < 0)
894 x = 0;
895#endif
896 return x;
897}
898
899
900
901
902
903static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
904 int idx)
905{
906 long x = 0;
907 int cpu;
908
909 for_each_possible_cpu(cpu)
910 x += per_cpu(memcg->vmstats_local->stat[idx], cpu);
911#ifdef CONFIG_SMP
912 if (x < 0)
913 x = 0;
914#endif
915 return x;
916}
917
918void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
919
920
921static inline void mod_memcg_state(struct mem_cgroup *memcg,
922 int idx, int val)
923{
924 unsigned long flags;
925
926 local_irq_save(flags);
927 __mod_memcg_state(memcg, idx, val);
928 local_irq_restore(flags);
929}
930
931static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
932 enum node_stat_item idx)
933{
934 struct mem_cgroup_per_node *pn;
935 long x;
936
937 if (mem_cgroup_disabled())
938 return node_page_state(lruvec_pgdat(lruvec), idx);
939
940 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
941 x = atomic_long_read(&pn->lruvec_stat[idx]);
942#ifdef CONFIG_SMP
943 if (x < 0)
944 x = 0;
945#endif
946 return x;
947}
948
949static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
950 enum node_stat_item idx)
951{
952 struct mem_cgroup_per_node *pn;
953 long x = 0;
954 int cpu;
955
956 if (mem_cgroup_disabled())
957 return node_page_state(lruvec_pgdat(lruvec), idx);
958
959 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
960 for_each_possible_cpu(cpu)
961 x += per_cpu(pn->lruvec_stat_local->count[idx], cpu);
962#ifdef CONFIG_SMP
963 if (x < 0)
964 x = 0;
965#endif
966 return x;
967}
968
969void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
970 int val);
971void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val);
972
973static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
974 int val)
975{
976 unsigned long flags;
977
978 local_irq_save(flags);
979 __mod_lruvec_kmem_state(p, idx, val);
980 local_irq_restore(flags);
981}
982
983static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
984 enum node_stat_item idx, int val)
985{
986 unsigned long flags;
987
988 local_irq_save(flags);
989 __mod_memcg_lruvec_state(lruvec, idx, val);
990 local_irq_restore(flags);
991}
992
993unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
994 gfp_t gfp_mask,
995 unsigned long *total_scanned);
996
997void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
998 unsigned long count);
999
1000static inline void count_memcg_events(struct mem_cgroup *memcg,
1001 enum vm_event_item idx,
1002 unsigned long count)
1003{
1004 unsigned long flags;
1005
1006 local_irq_save(flags);
1007 __count_memcg_events(memcg, idx, count);
1008 local_irq_restore(flags);
1009}
1010
1011static inline void count_memcg_page_event(struct page *page,
1012 enum vm_event_item idx)
1013{
1014 struct mem_cgroup *memcg = page_memcg(page);
1015
1016 if (memcg)
1017 count_memcg_events(memcg, idx, 1);
1018}
1019
1020static inline void count_memcg_event_mm(struct mm_struct *mm,
1021 enum vm_event_item idx)
1022{
1023 struct mem_cgroup *memcg;
1024
1025 if (mem_cgroup_disabled())
1026 return;
1027
1028 rcu_read_lock();
1029 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1030 if (likely(memcg))
1031 count_memcg_events(memcg, idx, 1);
1032 rcu_read_unlock();
1033}
1034
1035static inline void memcg_memory_event(struct mem_cgroup *memcg,
1036 enum memcg_memory_event event)
1037{
1038 bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
1039 event == MEMCG_SWAP_FAIL;
1040
1041 atomic_long_inc(&memcg->memory_events_local[event]);
1042 if (!swap_event)
1043 cgroup_file_notify(&memcg->events_local_file);
1044
1045 do {
1046 atomic_long_inc(&memcg->memory_events[event]);
1047 if (swap_event)
1048 cgroup_file_notify(&memcg->swap_events_file);
1049 else
1050 cgroup_file_notify(&memcg->events_file);
1051
1052 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1053 break;
1054 if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
1055 break;
1056 } while ((memcg = parent_mem_cgroup(memcg)) &&
1057 !mem_cgroup_is_root(memcg));
1058}
1059
1060static inline void memcg_memory_event_mm(struct mm_struct *mm,
1061 enum memcg_memory_event event)
1062{
1063 struct mem_cgroup *memcg;
1064
1065 if (mem_cgroup_disabled())
1066 return;
1067
1068 rcu_read_lock();
1069 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1070 if (likely(memcg))
1071 memcg_memory_event(memcg, event);
1072 rcu_read_unlock();
1073}
1074
1075#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1076void mem_cgroup_split_huge_fixup(struct page *head);
1077#endif
1078
1079#else
1080
1081#define MEM_CGROUP_ID_SHIFT 0
1082#define MEM_CGROUP_ID_MAX 0
1083
1084struct mem_cgroup;
1085
1086static inline struct mem_cgroup *page_memcg(struct page *page)
1087{
1088 return NULL;
1089}
1090
1091static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1092{
1093 WARN_ON_ONCE(!rcu_read_lock_held());
1094 return NULL;
1095}
1096
1097static inline struct mem_cgroup *page_memcg_check(struct page *page)
1098{
1099 return NULL;
1100}
1101
1102static inline bool PageMemcgKmem(struct page *page)
1103{
1104 return false;
1105}
1106
1107static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
1108{
1109 return true;
1110}
1111
1112static inline bool mem_cgroup_disabled(void)
1113{
1114 return true;
1115}
1116
1117static inline void memcg_memory_event(struct mem_cgroup *memcg,
1118 enum memcg_memory_event event)
1119{
1120}
1121
1122static inline void memcg_memory_event_mm(struct mm_struct *mm,
1123 enum memcg_memory_event event)
1124{
1125}
1126
1127static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root,
1128 struct mem_cgroup *memcg,
1129 bool in_low_reclaim)
1130{
1131 return 0;
1132}
1133
1134static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
1135 struct mem_cgroup *memcg)
1136{
1137}
1138
1139static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
1140{
1141 return false;
1142}
1143
1144static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
1145{
1146 return false;
1147}
1148
1149static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
1150 gfp_t gfp_mask)
1151{
1152 return 0;
1153}
1154
1155static inline void mem_cgroup_uncharge(struct page *page)
1156{
1157}
1158
1159static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
1160{
1161}
1162
1163static inline void mem_cgroup_migrate(struct page *old, struct page *new)
1164{
1165}
1166
1167static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
1168 struct pglist_data *pgdat)
1169{
1170 return &pgdat->__lruvec;
1171}
1172
1173static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
1174 struct pglist_data *pgdat)
1175{
1176 return &pgdat->__lruvec;
1177}
1178
1179static inline bool lruvec_holds_page_lru_lock(struct page *page,
1180 struct lruvec *lruvec)
1181{
1182 pg_data_t *pgdat = page_pgdat(page);
1183
1184 return lruvec == &pgdat->__lruvec;
1185}
1186
1187static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
1188{
1189 return NULL;
1190}
1191
1192static inline bool mm_match_cgroup(struct mm_struct *mm,
1193 struct mem_cgroup *memcg)
1194{
1195 return true;
1196}
1197
1198static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1199{
1200 return NULL;
1201}
1202
1203static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
1204{
1205 return NULL;
1206}
1207
1208static inline void mem_cgroup_put(struct mem_cgroup *memcg)
1209{
1210}
1211
1212static inline struct lruvec *lock_page_lruvec(struct page *page)
1213{
1214 struct pglist_data *pgdat = page_pgdat(page);
1215
1216 spin_lock(&pgdat->__lruvec.lru_lock);
1217 return &pgdat->__lruvec;
1218}
1219
1220static inline struct lruvec *lock_page_lruvec_irq(struct page *page)
1221{
1222 struct pglist_data *pgdat = page_pgdat(page);
1223
1224 spin_lock_irq(&pgdat->__lruvec.lru_lock);
1225 return &pgdat->__lruvec;
1226}
1227
1228static inline struct lruvec *lock_page_lruvec_irqsave(struct page *page,
1229 unsigned long *flagsp)
1230{
1231 struct pglist_data *pgdat = page_pgdat(page);
1232
1233 spin_lock_irqsave(&pgdat->__lruvec.lru_lock, *flagsp);
1234 return &pgdat->__lruvec;
1235}
1236
1237static inline struct mem_cgroup *
1238mem_cgroup_iter(struct mem_cgroup *root,
1239 struct mem_cgroup *prev,
1240 struct mem_cgroup_reclaim_cookie *reclaim)
1241{
1242 return NULL;
1243}
1244
1245static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
1246 struct mem_cgroup *prev)
1247{
1248}
1249
1250static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1251 int (*fn)(struct task_struct *, void *), void *arg)
1252{
1253 return 0;
1254}
1255
1256static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
1257{
1258 return 0;
1259}
1260
1261static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
1262{
1263 WARN_ON_ONCE(id);
1264
1265 return NULL;
1266}
1267
1268static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
1269{
1270 return NULL;
1271}
1272
1273static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
1274{
1275 return NULL;
1276}
1277
1278static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
1279{
1280 return true;
1281}
1282
1283static inline
1284unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
1285 enum lru_list lru, int zone_idx)
1286{
1287 return 0;
1288}
1289
1290static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1291{
1292 return 0;
1293}
1294
1295static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1296{
1297 return 0;
1298}
1299
1300static inline void
1301mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1302{
1303}
1304
1305static inline void
1306mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1307{
1308}
1309
1310static inline struct mem_cgroup *lock_page_memcg(struct page *page)
1311{
1312 return NULL;
1313}
1314
1315static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
1316{
1317}
1318
1319static inline void unlock_page_memcg(struct page *page)
1320{
1321}
1322
1323static inline void mem_cgroup_handle_over_high(void)
1324{
1325}
1326
1327static inline void mem_cgroup_enter_user_fault(void)
1328{
1329}
1330
1331static inline void mem_cgroup_exit_user_fault(void)
1332{
1333}
1334
1335static inline bool task_in_memcg_oom(struct task_struct *p)
1336{
1337 return false;
1338}
1339
1340static inline bool mem_cgroup_oom_synchronize(bool wait)
1341{
1342 return false;
1343}
1344
1345static inline struct mem_cgroup *mem_cgroup_get_oom_group(
1346 struct task_struct *victim, struct mem_cgroup *oom_domain)
1347{
1348 return NULL;
1349}
1350
1351static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1352{
1353}
1354
1355static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1356{
1357 return 0;
1358}
1359
1360static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
1361 int idx)
1362{
1363 return 0;
1364}
1365
1366static inline void __mod_memcg_state(struct mem_cgroup *memcg,
1367 int idx,
1368 int nr)
1369{
1370}
1371
1372static inline void mod_memcg_state(struct mem_cgroup *memcg,
1373 int idx,
1374 int nr)
1375{
1376}
1377
1378static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
1379 enum node_stat_item idx)
1380{
1381 return node_page_state(lruvec_pgdat(lruvec), idx);
1382}
1383
1384static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1385 enum node_stat_item idx)
1386{
1387 return node_page_state(lruvec_pgdat(lruvec), idx);
1388}
1389
1390static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
1391 enum node_stat_item idx, int val)
1392{
1393}
1394
1395static inline void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1396 int val)
1397{
1398 struct page *page = virt_to_head_page(p);
1399
1400 __mod_node_page_state(page_pgdat(page), idx, val);
1401}
1402
1403static inline void mod_lruvec_kmem_state(void *p, enum node_stat_item idx,
1404 int val)
1405{
1406 struct page *page = virt_to_head_page(p);
1407
1408 mod_node_page_state(page_pgdat(page), idx, val);
1409}
1410
1411static inline
1412unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1413 gfp_t gfp_mask,
1414 unsigned long *total_scanned)
1415{
1416 return 0;
1417}
1418
1419static inline void mem_cgroup_split_huge_fixup(struct page *head)
1420{
1421}
1422
1423static inline void count_memcg_events(struct mem_cgroup *memcg,
1424 enum vm_event_item idx,
1425 unsigned long count)
1426{
1427}
1428
1429static inline void __count_memcg_events(struct mem_cgroup *memcg,
1430 enum vm_event_item idx,
1431 unsigned long count)
1432{
1433}
1434
1435static inline void count_memcg_page_event(struct page *page,
1436 int idx)
1437{
1438}
1439
1440static inline
1441void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
1442{
1443}
1444
1445static inline void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
1446{
1447}
1448#endif
1449
1450static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx)
1451{
1452 __mod_lruvec_kmem_state(p, idx, 1);
1453}
1454
1455static inline void __dec_lruvec_kmem_state(void *p, enum node_stat_item idx)
1456{
1457 __mod_lruvec_kmem_state(p, idx, -1);
1458}
1459
1460static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
1461{
1462 struct mem_cgroup *memcg;
1463
1464 memcg = lruvec_memcg(lruvec);
1465 if (!memcg)
1466 return NULL;
1467 memcg = parent_mem_cgroup(memcg);
1468 if (!memcg)
1469 return NULL;
1470 return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
1471}
1472
1473static inline void unlock_page_lruvec(struct lruvec *lruvec)
1474{
1475 spin_unlock(&lruvec->lru_lock);
1476}
1477
1478static inline void unlock_page_lruvec_irq(struct lruvec *lruvec)
1479{
1480 spin_unlock_irq(&lruvec->lru_lock);
1481}
1482
1483static inline void unlock_page_lruvec_irqrestore(struct lruvec *lruvec,
1484 unsigned long flags)
1485{
1486 spin_unlock_irqrestore(&lruvec->lru_lock, flags);
1487}
1488
1489
1490static inline struct lruvec *relock_page_lruvec_irq(struct page *page,
1491 struct lruvec *locked_lruvec)
1492{
1493 if (locked_lruvec) {
1494 if (lruvec_holds_page_lru_lock(page, locked_lruvec))
1495 return locked_lruvec;
1496
1497 unlock_page_lruvec_irq(locked_lruvec);
1498 }
1499
1500 return lock_page_lruvec_irq(page);
1501}
1502
1503
1504static inline struct lruvec *relock_page_lruvec_irqsave(struct page *page,
1505 struct lruvec *locked_lruvec, unsigned long *flags)
1506{
1507 if (locked_lruvec) {
1508 if (lruvec_holds_page_lru_lock(page, locked_lruvec))
1509 return locked_lruvec;
1510
1511 unlock_page_lruvec_irqrestore(locked_lruvec, *flags);
1512 }
1513
1514 return lock_page_lruvec_irqsave(page, flags);
1515}
1516
1517#ifdef CONFIG_CGROUP_WRITEBACK
1518
1519struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
1520void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1521 unsigned long *pheadroom, unsigned long *pdirty,
1522 unsigned long *pwriteback);
1523
1524void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
1525 struct bdi_writeback *wb);
1526
1527static inline void mem_cgroup_track_foreign_dirty(struct page *page,
1528 struct bdi_writeback *wb)
1529{
1530 if (mem_cgroup_disabled())
1531 return;
1532
1533 if (unlikely(&page_memcg(page)->css != wb->memcg_css))
1534 mem_cgroup_track_foreign_dirty_slowpath(page, wb);
1535}
1536
1537void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
1538
1539#else
1540
1541static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1542{
1543 return NULL;
1544}
1545
1546static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
1547 unsigned long *pfilepages,
1548 unsigned long *pheadroom,
1549 unsigned long *pdirty,
1550 unsigned long *pwriteback)
1551{
1552}
1553
1554static inline void mem_cgroup_track_foreign_dirty(struct page *page,
1555 struct bdi_writeback *wb)
1556{
1557}
1558
1559static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
1560{
1561}
1562
1563#endif
1564
1565struct sock;
1566bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1567void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1568#ifdef CONFIG_MEMCG
1569extern struct static_key_false memcg_sockets_enabled_key;
1570#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1571void mem_cgroup_sk_alloc(struct sock *sk);
1572void mem_cgroup_sk_free(struct sock *sk);
1573static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1574{
1575 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
1576 return true;
1577 do {
1578 if (time_before(jiffies, memcg->socket_pressure))
1579 return true;
1580 } while ((memcg = parent_mem_cgroup(memcg)));
1581 return false;
1582}
1583
1584extern int memcg_expand_shrinker_maps(int new_id);
1585
1586extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1587 int nid, int shrinker_id);
1588#else
1589#define mem_cgroup_sockets_enabled 0
1590static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
1591static inline void mem_cgroup_sk_free(struct sock *sk) { };
1592static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1593{
1594 return false;
1595}
1596
1597static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1598 int nid, int shrinker_id)
1599{
1600}
1601#endif
1602
1603#ifdef CONFIG_MEMCG_KMEM
1604int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
1605 unsigned int nr_pages);
1606void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages);
1607int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
1608void __memcg_kmem_uncharge_page(struct page *page, int order);
1609
1610struct obj_cgroup *get_obj_cgroup_from_current(void);
1611
1612int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
1613void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
1614
1615extern struct static_key_false memcg_kmem_enabled_key;
1616
1617extern int memcg_nr_cache_ids;
1618void memcg_get_cache_ids(void);
1619void memcg_put_cache_ids(void);
1620
1621
1622
1623
1624
1625
1626#define for_each_memcg_cache_index(_idx) \
1627 for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
1628
1629static inline bool memcg_kmem_enabled(void)
1630{
1631 return static_branch_likely(&memcg_kmem_enabled_key);
1632}
1633
1634static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1635 int order)
1636{
1637 if (memcg_kmem_enabled())
1638 return __memcg_kmem_charge_page(page, gfp, order);
1639 return 0;
1640}
1641
1642static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1643{
1644 if (memcg_kmem_enabled())
1645 __memcg_kmem_uncharge_page(page, order);
1646}
1647
1648
1649
1650
1651
1652static inline int memcg_cache_id(struct mem_cgroup *memcg)
1653{
1654 return memcg ? memcg->kmemcg_id : -1;
1655}
1656
1657struct mem_cgroup *mem_cgroup_from_obj(void *p);
1658
1659#else
1660
1661static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1662 int order)
1663{
1664 return 0;
1665}
1666
1667static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1668{
1669}
1670
1671static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1672 int order)
1673{
1674 return 0;
1675}
1676
1677static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
1678{
1679}
1680
1681#define for_each_memcg_cache_index(_idx) \
1682 for (; NULL; )
1683
1684static inline bool memcg_kmem_enabled(void)
1685{
1686 return false;
1687}
1688
1689static inline int memcg_cache_id(struct mem_cgroup *memcg)
1690{
1691 return -1;
1692}
1693
1694static inline void memcg_get_cache_ids(void)
1695{
1696}
1697
1698static inline void memcg_put_cache_ids(void)
1699{
1700}
1701
1702static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
1703{
1704 return NULL;
1705}
1706
1707#endif
1708
1709#endif
1710