1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#ifndef _LINUX_MEMCONTROL_H
21#define _LINUX_MEMCONTROL_H
22#include <linux/cgroup.h>
23#include <linux/vm_event_item.h>
24#include <linux/hardirq.h>
25#include <linux/jump_label.h>
26#include <linux/page_counter.h>
27#include <linux/vmpressure.h>
28#include <linux/eventfd.h>
29#include <linux/mm.h>
30#include <linux/vmstat.h>
31#include <linux/writeback.h>
32#include <linux/page-flags.h>
33
34struct mem_cgroup;
35struct page;
36struct mm_struct;
37struct kmem_cache;
38
39
40enum memcg_stat_item {
41 MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,
42 MEMCG_RSS,
43 MEMCG_RSS_HUGE,
44 MEMCG_SWAP,
45 MEMCG_SOCK,
46
47 MEMCG_KERNEL_STACK_KB,
48 MEMCG_NR_STAT,
49};
50
51enum memcg_memory_event {
52 MEMCG_LOW,
53 MEMCG_HIGH,
54 MEMCG_MAX,
55 MEMCG_OOM,
56 MEMCG_OOM_KILL,
57 MEMCG_SWAP_MAX,
58 MEMCG_SWAP_FAIL,
59 MEMCG_NR_MEMORY_EVENTS,
60};
61
62enum mem_cgroup_protection {
63 MEMCG_PROT_NONE,
64 MEMCG_PROT_LOW,
65 MEMCG_PROT_MIN,
66};
67
68struct mem_cgroup_reclaim_cookie {
69 pg_data_t *pgdat;
70 int priority;
71 unsigned int generation;
72};
73
74#ifdef CONFIG_MEMCG
75
76#define MEM_CGROUP_ID_SHIFT 16
77#define MEM_CGROUP_ID_MAX USHRT_MAX
78
79struct mem_cgroup_id {
80 int id;
81 atomic_t ref;
82};
83
84
85
86
87
88
89
90enum mem_cgroup_events_target {
91 MEM_CGROUP_TARGET_THRESH,
92 MEM_CGROUP_TARGET_SOFTLIMIT,
93 MEM_CGROUP_TARGET_NUMAINFO,
94 MEM_CGROUP_NTARGETS,
95};
96
97struct mem_cgroup_stat_cpu {
98 long count[MEMCG_NR_STAT];
99 unsigned long events[NR_VM_EVENT_ITEMS];
100 unsigned long nr_page_events;
101 unsigned long targets[MEM_CGROUP_NTARGETS];
102};
103
104struct mem_cgroup_reclaim_iter {
105 struct mem_cgroup *position;
106
107 unsigned int generation;
108};
109
110struct lruvec_stat {
111 long count[NR_VM_NODE_STAT_ITEMS];
112};
113
114
115
116
117struct mem_cgroup_per_node {
118 struct lruvec lruvec;
119
120 struct lruvec_stat __percpu *lruvec_stat_cpu;
121 atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
122
123 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
124
125 struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
126
127 struct rb_node tree_node;
128 unsigned long usage_in_excess;
129
130 bool on_tree;
131 bool congested;
132
133
134 struct mem_cgroup *memcg;
135
136};
137
138struct mem_cgroup_threshold {
139 struct eventfd_ctx *eventfd;
140 unsigned long threshold;
141};
142
143
144struct mem_cgroup_threshold_ary {
145
146 int current_threshold;
147
148 unsigned int size;
149
150 struct mem_cgroup_threshold entries[0];
151};
152
153struct mem_cgroup_thresholds {
154
155 struct mem_cgroup_threshold_ary *primary;
156
157
158
159
160
161 struct mem_cgroup_threshold_ary *spare;
162};
163
164enum memcg_kmem_state {
165 KMEM_NONE,
166 KMEM_ALLOCATED,
167 KMEM_ONLINE,
168};
169
170#if defined(CONFIG_SMP)
171struct memcg_padding {
172 char x[0];
173} ____cacheline_internodealigned_in_smp;
174#define MEMCG_PADDING(name) struct memcg_padding name;
175#else
176#define MEMCG_PADDING(name)
177#endif
178
179
180
181
182
183
184
185struct mem_cgroup {
186 struct cgroup_subsys_state css;
187
188
189 struct mem_cgroup_id id;
190
191
192 struct page_counter memory;
193 struct page_counter swap;
194
195
196 struct page_counter memsw;
197 struct page_counter kmem;
198 struct page_counter tcpmem;
199
200
201 unsigned long high;
202
203
204 struct work_struct high_work;
205
206 unsigned long soft_limit;
207
208
209 struct vmpressure vmpressure;
210
211
212
213
214 bool use_hierarchy;
215
216
217 bool oom_lock;
218 int under_oom;
219
220 int swappiness;
221
222 int oom_kill_disable;
223
224
225 struct cgroup_file events_file;
226
227
228 struct cgroup_file swap_events_file;
229
230
231 struct mutex thresholds_lock;
232
233
234 struct mem_cgroup_thresholds thresholds;
235
236
237 struct mem_cgroup_thresholds memsw_thresholds;
238
239
240 struct list_head oom_notify;
241
242
243
244
245
246 unsigned long move_charge_at_immigrate;
247
248 spinlock_t move_lock;
249 unsigned long move_lock_flags;
250
251 MEMCG_PADDING(_pad1_);
252
253
254
255
256 atomic_t moving_account;
257 struct task_struct *move_lock_task;
258
259
260 struct mem_cgroup_stat_cpu __percpu *stat_cpu;
261
262 MEMCG_PADDING(_pad2_);
263
264 atomic_long_t stat[MEMCG_NR_STAT];
265 atomic_long_t events[NR_VM_EVENT_ITEMS];
266 atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
267
268 unsigned long socket_pressure;
269
270
271 bool tcpmem_active;
272 int tcpmem_pressure;
273
274#ifndef CONFIG_SLOB
275
276 int kmemcg_id;
277 enum memcg_kmem_state kmem_state;
278 struct list_head kmem_caches;
279#endif
280
281 int last_scanned_node;
282#if MAX_NUMNODES > 1
283 nodemask_t scan_nodes;
284 atomic_t numainfo_events;
285 atomic_t numainfo_updating;
286#endif
287
288#ifdef CONFIG_CGROUP_WRITEBACK
289 struct list_head cgwb_list;
290 struct wb_domain cgwb_domain;
291#endif
292
293
294 struct list_head event_list;
295 spinlock_t event_list_lock;
296
297 struct mem_cgroup_per_node *nodeinfo[0];
298
299};
300
301
302
303
304
305#define MEMCG_CHARGE_BATCH 32U
306
307extern struct mem_cgroup *root_mem_cgroup;
308
309static inline bool mem_cgroup_disabled(void)
310{
311 return !cgroup_subsys_enabled(memory_cgrp_subsys);
312}
313
314enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
315 struct mem_cgroup *memcg);
316
317int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
318 gfp_t gfp_mask, struct mem_cgroup **memcgp,
319 bool compound);
320void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
321 bool lrucare, bool compound);
322void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
323 bool compound);
324void mem_cgroup_uncharge(struct page *page);
325void mem_cgroup_uncharge_list(struct list_head *page_list);
326
327void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
328
329static struct mem_cgroup_per_node *
330mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
331{
332 return memcg->nodeinfo[nid];
333}
334
335
336
337
338
339
340
341
342
343
344static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
345 struct mem_cgroup *memcg)
346{
347 struct mem_cgroup_per_node *mz;
348 struct lruvec *lruvec;
349
350 if (mem_cgroup_disabled()) {
351 lruvec = node_lruvec(pgdat);
352 goto out;
353 }
354
355 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
356 lruvec = &mz->lruvec;
357out:
358
359
360
361
362
363 if (unlikely(lruvec->pgdat != pgdat))
364 lruvec->pgdat = pgdat;
365 return lruvec;
366}
367
368struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
369
370bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
371struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
372
373static inline
374struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
375 return css ? container_of(css, struct mem_cgroup, css) : NULL;
376}
377
378#define mem_cgroup_from_counter(counter, member) \
379 container_of(counter, struct mem_cgroup, member)
380
381struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
382 struct mem_cgroup *,
383 struct mem_cgroup_reclaim_cookie *);
384void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
385int mem_cgroup_scan_tasks(struct mem_cgroup *,
386 int (*)(struct task_struct *, void *), void *);
387
388static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
389{
390 if (mem_cgroup_disabled())
391 return 0;
392
393 return memcg->id.id;
394}
395struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
396
397static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
398{
399 struct mem_cgroup_per_node *mz;
400
401 if (mem_cgroup_disabled())
402 return NULL;
403
404 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
405 return mz->memcg;
406}
407
408
409
410
411
412
413
414
415static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
416{
417 if (!memcg->memory.parent)
418 return NULL;
419 return mem_cgroup_from_counter(memcg->memory.parent, memory);
420}
421
422static inline bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
423 struct mem_cgroup *root)
424{
425 if (root == memcg)
426 return true;
427 if (!root->use_hierarchy)
428 return false;
429 return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup);
430}
431
432static inline bool mm_match_cgroup(struct mm_struct *mm,
433 struct mem_cgroup *memcg)
434{
435 struct mem_cgroup *task_memcg;
436 bool match = false;
437
438 rcu_read_lock();
439 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
440 if (task_memcg)
441 match = mem_cgroup_is_descendant(task_memcg, memcg);
442 rcu_read_unlock();
443 return match;
444}
445
446struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page);
447ino_t page_cgroup_ino(struct page *page);
448
449static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
450{
451 if (mem_cgroup_disabled())
452 return true;
453 return !!(memcg->css.flags & CSS_ONLINE);
454}
455
456
457
458
459int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
460
461void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
462 int zid, int nr_pages);
463
464unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
465 int nid, unsigned int lru_mask);
466
467static inline
468unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
469{
470 struct mem_cgroup_per_node *mz;
471 unsigned long nr_pages = 0;
472 int zid;
473
474 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
475 for (zid = 0; zid < MAX_NR_ZONES; zid++)
476 nr_pages += mz->lru_zone_size[zid][lru];
477 return nr_pages;
478}
479
480static inline
481unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
482 enum lru_list lru, int zone_idx)
483{
484 struct mem_cgroup_per_node *mz;
485
486 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
487 return mz->lru_zone_size[zone_idx][lru];
488}
489
490void mem_cgroup_handle_over_high(void);
491
492unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
493
494void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
495 struct task_struct *p);
496
497static inline void mem_cgroup_oom_enable(void)
498{
499 WARN_ON(current->memcg_may_oom);
500 current->memcg_may_oom = 1;
501}
502
503static inline void mem_cgroup_oom_disable(void)
504{
505 WARN_ON(!current->memcg_may_oom);
506 current->memcg_may_oom = 0;
507}
508
509static inline bool task_in_memcg_oom(struct task_struct *p)
510{
511 return p->memcg_in_oom;
512}
513
514bool mem_cgroup_oom_synchronize(bool wait);
515
516#ifdef CONFIG_MEMCG_SWAP
517extern int do_swap_account;
518#endif
519
520struct mem_cgroup *lock_page_memcg(struct page *page);
521void __unlock_page_memcg(struct mem_cgroup *memcg);
522void unlock_page_memcg(struct page *page);
523
524
525static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
526 int idx)
527{
528 long x = atomic_long_read(&memcg->stat[idx]);
529#ifdef CONFIG_SMP
530 if (x < 0)
531 x = 0;
532#endif
533 return x;
534}
535
536
537static inline void __mod_memcg_state(struct mem_cgroup *memcg,
538 int idx, int val)
539{
540 long x;
541
542 if (mem_cgroup_disabled())
543 return;
544
545 x = val + __this_cpu_read(memcg->stat_cpu->count[idx]);
546 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
547 atomic_long_add(x, &memcg->stat[idx]);
548 x = 0;
549 }
550 __this_cpu_write(memcg->stat_cpu->count[idx], x);
551}
552
553
554static inline void mod_memcg_state(struct mem_cgroup *memcg,
555 int idx, int val)
556{
557 unsigned long flags;
558
559 local_irq_save(flags);
560 __mod_memcg_state(memcg, idx, val);
561 local_irq_restore(flags);
562}
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581static inline void __mod_memcg_page_state(struct page *page,
582 int idx, int val)
583{
584 if (page->mem_cgroup)
585 __mod_memcg_state(page->mem_cgroup, idx, val);
586}
587
588static inline void mod_memcg_page_state(struct page *page,
589 int idx, int val)
590{
591 if (page->mem_cgroup)
592 mod_memcg_state(page->mem_cgroup, idx, val);
593}
594
595static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
596 enum node_stat_item idx)
597{
598 struct mem_cgroup_per_node *pn;
599 long x;
600
601 if (mem_cgroup_disabled())
602 return node_page_state(lruvec_pgdat(lruvec), idx);
603
604 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
605 x = atomic_long_read(&pn->lruvec_stat[idx]);
606#ifdef CONFIG_SMP
607 if (x < 0)
608 x = 0;
609#endif
610 return x;
611}
612
613static inline void __mod_lruvec_state(struct lruvec *lruvec,
614 enum node_stat_item idx, int val)
615{
616 struct mem_cgroup_per_node *pn;
617 long x;
618
619
620 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
621
622 if (mem_cgroup_disabled())
623 return;
624
625 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
626
627
628 __mod_memcg_state(pn->memcg, idx, val);
629
630
631 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
632 if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
633 atomic_long_add(x, &pn->lruvec_stat[idx]);
634 x = 0;
635 }
636 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
637}
638
639static inline void mod_lruvec_state(struct lruvec *lruvec,
640 enum node_stat_item idx, int val)
641{
642 unsigned long flags;
643
644 local_irq_save(flags);
645 __mod_lruvec_state(lruvec, idx, val);
646 local_irq_restore(flags);
647}
648
649static inline void __mod_lruvec_page_state(struct page *page,
650 enum node_stat_item idx, int val)
651{
652 pg_data_t *pgdat = page_pgdat(page);
653 struct lruvec *lruvec;
654
655
656 if (!page->mem_cgroup) {
657 __mod_node_page_state(pgdat, idx, val);
658 return;
659 }
660
661 lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
662 __mod_lruvec_state(lruvec, idx, val);
663}
664
665static inline void mod_lruvec_page_state(struct page *page,
666 enum node_stat_item idx, int val)
667{
668 unsigned long flags;
669
670 local_irq_save(flags);
671 __mod_lruvec_page_state(page, idx, val);
672 local_irq_restore(flags);
673}
674
675unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
676 gfp_t gfp_mask,
677 unsigned long *total_scanned);
678
679static inline void __count_memcg_events(struct mem_cgroup *memcg,
680 enum vm_event_item idx,
681 unsigned long count)
682{
683 unsigned long x;
684
685 if (mem_cgroup_disabled())
686 return;
687
688 x = count + __this_cpu_read(memcg->stat_cpu->events[idx]);
689 if (unlikely(x > MEMCG_CHARGE_BATCH)) {
690 atomic_long_add(x, &memcg->events[idx]);
691 x = 0;
692 }
693 __this_cpu_write(memcg->stat_cpu->events[idx], x);
694}
695
696static inline void count_memcg_events(struct mem_cgroup *memcg,
697 enum vm_event_item idx,
698 unsigned long count)
699{
700 unsigned long flags;
701
702 local_irq_save(flags);
703 __count_memcg_events(memcg, idx, count);
704 local_irq_restore(flags);
705}
706
707static inline void count_memcg_page_event(struct page *page,
708 enum vm_event_item idx)
709{
710 if (page->mem_cgroup)
711 count_memcg_events(page->mem_cgroup, idx, 1);
712}
713
714static inline void count_memcg_event_mm(struct mm_struct *mm,
715 enum vm_event_item idx)
716{
717 struct mem_cgroup *memcg;
718
719 if (mem_cgroup_disabled())
720 return;
721
722 rcu_read_lock();
723 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
724 if (likely(memcg))
725 count_memcg_events(memcg, idx, 1);
726 rcu_read_unlock();
727}
728
729static inline void memcg_memory_event(struct mem_cgroup *memcg,
730 enum memcg_memory_event event)
731{
732 atomic_long_inc(&memcg->memory_events[event]);
733 cgroup_file_notify(&memcg->events_file);
734}
735
736static inline void memcg_memory_event_mm(struct mm_struct *mm,
737 enum memcg_memory_event event)
738{
739 struct mem_cgroup *memcg;
740
741 if (mem_cgroup_disabled())
742 return;
743
744 rcu_read_lock();
745 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
746 if (likely(memcg))
747 memcg_memory_event(memcg, event);
748 rcu_read_unlock();
749}
750
751#ifdef CONFIG_TRANSPARENT_HUGEPAGE
752void mem_cgroup_split_huge_fixup(struct page *head);
753#endif
754
755#else
756
757#define MEM_CGROUP_ID_SHIFT 0
758#define MEM_CGROUP_ID_MAX 0
759
760struct mem_cgroup;
761
762static inline bool mem_cgroup_disabled(void)
763{
764 return true;
765}
766
767static inline void memcg_memory_event(struct mem_cgroup *memcg,
768 enum memcg_memory_event event)
769{
770}
771
772static inline void memcg_memory_event_mm(struct mm_struct *mm,
773 enum memcg_memory_event event)
774{
775}
776
777static inline enum mem_cgroup_protection mem_cgroup_protected(
778 struct mem_cgroup *root, struct mem_cgroup *memcg)
779{
780 return MEMCG_PROT_NONE;
781}
782
783static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
784 gfp_t gfp_mask,
785 struct mem_cgroup **memcgp,
786 bool compound)
787{
788 *memcgp = NULL;
789 return 0;
790}
791
792static inline void mem_cgroup_commit_charge(struct page *page,
793 struct mem_cgroup *memcg,
794 bool lrucare, bool compound)
795{
796}
797
798static inline void mem_cgroup_cancel_charge(struct page *page,
799 struct mem_cgroup *memcg,
800 bool compound)
801{
802}
803
804static inline void mem_cgroup_uncharge(struct page *page)
805{
806}
807
808static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
809{
810}
811
812static inline void mem_cgroup_migrate(struct page *old, struct page *new)
813{
814}
815
816static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
817 struct mem_cgroup *memcg)
818{
819 return node_lruvec(pgdat);
820}
821
822static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
823 struct pglist_data *pgdat)
824{
825 return &pgdat->lruvec;
826}
827
828static inline bool mm_match_cgroup(struct mm_struct *mm,
829 struct mem_cgroup *memcg)
830{
831 return true;
832}
833
834static inline bool task_in_mem_cgroup(struct task_struct *task,
835 const struct mem_cgroup *memcg)
836{
837 return true;
838}
839
840static inline struct mem_cgroup *
841mem_cgroup_iter(struct mem_cgroup *root,
842 struct mem_cgroup *prev,
843 struct mem_cgroup_reclaim_cookie *reclaim)
844{
845 return NULL;
846}
847
848static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
849 struct mem_cgroup *prev)
850{
851}
852
853static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
854 int (*fn)(struct task_struct *, void *), void *arg)
855{
856 return 0;
857}
858
859static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
860{
861 return 0;
862}
863
864static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
865{
866 WARN_ON_ONCE(id);
867
868 return NULL;
869}
870
871static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
872{
873 return NULL;
874}
875
876static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
877{
878 return true;
879}
880
881static inline unsigned long
882mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
883{
884 return 0;
885}
886static inline
887unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
888 enum lru_list lru, int zone_idx)
889{
890 return 0;
891}
892
893static inline unsigned long
894mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
895 int nid, unsigned int lru_mask)
896{
897 return 0;
898}
899
900static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
901{
902 return 0;
903}
904
905static inline void
906mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
907{
908}
909
910static inline struct mem_cgroup *lock_page_memcg(struct page *page)
911{
912 return NULL;
913}
914
915static inline void __unlock_page_memcg(struct mem_cgroup *memcg)
916{
917}
918
919static inline void unlock_page_memcg(struct page *page)
920{
921}
922
923static inline void mem_cgroup_handle_over_high(void)
924{
925}
926
927static inline void mem_cgroup_oom_enable(void)
928{
929}
930
931static inline void mem_cgroup_oom_disable(void)
932{
933}
934
935static inline bool task_in_memcg_oom(struct task_struct *p)
936{
937 return false;
938}
939
940static inline bool mem_cgroup_oom_synchronize(bool wait)
941{
942 return false;
943}
944
945static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
946 int idx)
947{
948 return 0;
949}
950
951static inline void __mod_memcg_state(struct mem_cgroup *memcg,
952 int idx,
953 int nr)
954{
955}
956
957static inline void mod_memcg_state(struct mem_cgroup *memcg,
958 int idx,
959 int nr)
960{
961}
962
963static inline void __mod_memcg_page_state(struct page *page,
964 int idx,
965 int nr)
966{
967}
968
969static inline void mod_memcg_page_state(struct page *page,
970 int idx,
971 int nr)
972{
973}
974
975static inline unsigned long lruvec_page_state(struct lruvec *lruvec,
976 enum node_stat_item idx)
977{
978 return node_page_state(lruvec_pgdat(lruvec), idx);
979}
980
981static inline void __mod_lruvec_state(struct lruvec *lruvec,
982 enum node_stat_item idx, int val)
983{
984 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
985}
986
987static inline void mod_lruvec_state(struct lruvec *lruvec,
988 enum node_stat_item idx, int val)
989{
990 mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
991}
992
993static inline void __mod_lruvec_page_state(struct page *page,
994 enum node_stat_item idx, int val)
995{
996 __mod_node_page_state(page_pgdat(page), idx, val);
997}
998
999static inline void mod_lruvec_page_state(struct page *page,
1000 enum node_stat_item idx, int val)
1001{
1002 mod_node_page_state(page_pgdat(page), idx, val);
1003}
1004
1005static inline
1006unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1007 gfp_t gfp_mask,
1008 unsigned long *total_scanned)
1009{
1010 return 0;
1011}
1012
1013static inline void mem_cgroup_split_huge_fixup(struct page *head)
1014{
1015}
1016
1017static inline void count_memcg_events(struct mem_cgroup *memcg,
1018 enum vm_event_item idx,
1019 unsigned long count)
1020{
1021}
1022
1023static inline void count_memcg_page_event(struct page *page,
1024 int idx)
1025{
1026}
1027
1028static inline
1029void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx)
1030{
1031}
1032#endif
1033
1034
1035static inline void __inc_memcg_state(struct mem_cgroup *memcg,
1036 int idx)
1037{
1038 __mod_memcg_state(memcg, idx, 1);
1039}
1040
1041
1042static inline void __dec_memcg_state(struct mem_cgroup *memcg,
1043 int idx)
1044{
1045 __mod_memcg_state(memcg, idx, -1);
1046}
1047
1048
1049static inline void __inc_memcg_page_state(struct page *page,
1050 int idx)
1051{
1052 __mod_memcg_page_state(page, idx, 1);
1053}
1054
1055
1056static inline void __dec_memcg_page_state(struct page *page,
1057 int idx)
1058{
1059 __mod_memcg_page_state(page, idx, -1);
1060}
1061
1062static inline void __inc_lruvec_state(struct lruvec *lruvec,
1063 enum node_stat_item idx)
1064{
1065 __mod_lruvec_state(lruvec, idx, 1);
1066}
1067
1068static inline void __dec_lruvec_state(struct lruvec *lruvec,
1069 enum node_stat_item idx)
1070{
1071 __mod_lruvec_state(lruvec, idx, -1);
1072}
1073
1074static inline void __inc_lruvec_page_state(struct page *page,
1075 enum node_stat_item idx)
1076{
1077 __mod_lruvec_page_state(page, idx, 1);
1078}
1079
1080static inline void __dec_lruvec_page_state(struct page *page,
1081 enum node_stat_item idx)
1082{
1083 __mod_lruvec_page_state(page, idx, -1);
1084}
1085
1086
1087static inline void inc_memcg_state(struct mem_cgroup *memcg,
1088 int idx)
1089{
1090 mod_memcg_state(memcg, idx, 1);
1091}
1092
1093
1094static inline void dec_memcg_state(struct mem_cgroup *memcg,
1095 int idx)
1096{
1097 mod_memcg_state(memcg, idx, -1);
1098}
1099
1100
1101static inline void inc_memcg_page_state(struct page *page,
1102 int idx)
1103{
1104 mod_memcg_page_state(page, idx, 1);
1105}
1106
1107
1108static inline void dec_memcg_page_state(struct page *page,
1109 int idx)
1110{
1111 mod_memcg_page_state(page, idx, -1);
1112}
1113
1114static inline void inc_lruvec_state(struct lruvec *lruvec,
1115 enum node_stat_item idx)
1116{
1117 mod_lruvec_state(lruvec, idx, 1);
1118}
1119
1120static inline void dec_lruvec_state(struct lruvec *lruvec,
1121 enum node_stat_item idx)
1122{
1123 mod_lruvec_state(lruvec, idx, -1);
1124}
1125
1126static inline void inc_lruvec_page_state(struct page *page,
1127 enum node_stat_item idx)
1128{
1129 mod_lruvec_page_state(page, idx, 1);
1130}
1131
1132static inline void dec_lruvec_page_state(struct page *page,
1133 enum node_stat_item idx)
1134{
1135 mod_lruvec_page_state(page, idx, -1);
1136}
1137
1138#ifdef CONFIG_CGROUP_WRITEBACK
1139
1140struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
1141void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
1142 unsigned long *pheadroom, unsigned long *pdirty,
1143 unsigned long *pwriteback);
1144
1145#else
1146
1147static inline struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
1148{
1149 return NULL;
1150}
1151
1152static inline void mem_cgroup_wb_stats(struct bdi_writeback *wb,
1153 unsigned long *pfilepages,
1154 unsigned long *pheadroom,
1155 unsigned long *pdirty,
1156 unsigned long *pwriteback)
1157{
1158}
1159
1160#endif
1161
1162struct sock;
1163bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1164void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
1165#ifdef CONFIG_MEMCG
1166extern struct static_key_false memcg_sockets_enabled_key;
1167#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
1168void mem_cgroup_sk_alloc(struct sock *sk);
1169void mem_cgroup_sk_free(struct sock *sk);
1170static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1171{
1172 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
1173 return true;
1174 do {
1175 if (time_before(jiffies, memcg->socket_pressure))
1176 return true;
1177 } while ((memcg = parent_mem_cgroup(memcg)));
1178 return false;
1179}
1180#else
1181#define mem_cgroup_sockets_enabled 0
1182static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
1183static inline void mem_cgroup_sk_free(struct sock *sk) { };
1184static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
1185{
1186 return false;
1187}
1188#endif
1189
1190struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
1191void memcg_kmem_put_cache(struct kmem_cache *cachep);
1192int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
1193 struct mem_cgroup *memcg);
1194int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
1195void memcg_kmem_uncharge(struct page *page, int order);
1196
1197#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
1198extern struct static_key_false memcg_kmem_enabled_key;
1199extern struct workqueue_struct *memcg_kmem_cache_wq;
1200
1201extern int memcg_nr_cache_ids;
1202void memcg_get_cache_ids(void);
1203void memcg_put_cache_ids(void);
1204
1205
1206
1207
1208
1209
1210#define for_each_memcg_cache_index(_idx) \
1211 for ((_idx) = 0; (_idx) < memcg_nr_cache_ids; (_idx)++)
1212
1213static inline bool memcg_kmem_enabled(void)
1214{
1215 return static_branch_unlikely(&memcg_kmem_enabled_key);
1216}
1217
1218
1219
1220
1221
1222
1223static inline int memcg_cache_id(struct mem_cgroup *memcg)
1224{
1225 return memcg ? memcg->kmemcg_id : -1;
1226}
1227
1228#else
1229#define for_each_memcg_cache_index(_idx) \
1230 for (; NULL; )
1231
1232static inline bool memcg_kmem_enabled(void)
1233{
1234 return false;
1235}
1236
1237static inline int memcg_cache_id(struct mem_cgroup *memcg)
1238{
1239 return -1;
1240}
1241
1242static inline void memcg_get_cache_ids(void)
1243{
1244}
1245
1246static inline void memcg_put_cache_ids(void)
1247{
1248}
1249
1250#endif
1251
1252#endif
1253