1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#include <linux/page_counter.h>
35#include <linux/memcontrol.h>
36#include <linux/cgroup.h>
37#include <linux/mm.h>
38#include <linux/hugetlb.h>
39#include <linux/pagemap.h>
40#include <linux/smp.h>
41#include <linux/page-flags.h>
42#include <linux/backing-dev.h>
43#include <linux/bit_spinlock.h>
44#include <linux/rcupdate.h>
45#include <linux/limits.h>
46#include <linux/export.h>
47#include <linux/mutex.h>
48#include <linux/rbtree.h>
49#include <linux/slab.h>
50#include <linux/swap.h>
51#include <linux/swapops.h>
52#include <linux/spinlock.h>
53#include <linux/eventfd.h>
54#include <linux/poll.h>
55#include <linux/sort.h>
56#include <linux/fs.h>
57#include <linux/seq_file.h>
58#include <linux/vmpressure.h>
59#include <linux/mm_inline.h>
60#include <linux/swap_cgroup.h>
61#include <linux/cpu.h>
62#include <linux/oom.h>
63#include <linux/lockdep.h>
64#include <linux/file.h>
65#include <linux/tracehook.h>
66#include "internal.h"
67#include <net/sock.h>
68#include <net/ip.h>
69#include "slab.h"
70
71#include <asm/uaccess.h>
72
73#include <trace/events/vmscan.h>
74
75struct cgroup_subsys memory_cgrp_subsys __read_mostly;
76EXPORT_SYMBOL(memory_cgrp_subsys);
77
78struct mem_cgroup *root_mem_cgroup __read_mostly;
79
80#define MEM_CGROUP_RECLAIM_RETRIES 5
81
82
83static bool cgroup_memory_nosocket;
84
85
86static bool cgroup_memory_nokmem;
87
88
89#ifdef CONFIG_MEMCG_SWAP
90int do_swap_account __read_mostly;
91#else
92#define do_swap_account 0
93#endif
94
95
96static bool do_memsw_account(void)
97{
98 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && do_swap_account;
99}
100
101static const char * const mem_cgroup_stat_names[] = {
102 "cache",
103 "rss",
104 "rss_huge",
105 "mapped_file",
106 "dirty",
107 "writeback",
108 "swap",
109};
110
111static const char * const mem_cgroup_events_names[] = {
112 "pgpgin",
113 "pgpgout",
114 "pgfault",
115 "pgmajfault",
116};
117
118static const char * const mem_cgroup_lru_names[] = {
119 "inactive_anon",
120 "active_anon",
121 "inactive_file",
122 "active_file",
123 "unevictable",
124};
125
126#define THRESHOLDS_EVENTS_TARGET 128
127#define SOFTLIMIT_EVENTS_TARGET 1024
128#define NUMAINFO_EVENTS_TARGET 1024
129
130
131
132
133
134
135struct mem_cgroup_tree_per_node {
136 struct rb_root rb_root;
137 spinlock_t lock;
138};
139
140struct mem_cgroup_tree {
141 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
142};
143
144static struct mem_cgroup_tree soft_limit_tree __read_mostly;
145
146
147struct mem_cgroup_eventfd_list {
148 struct list_head list;
149 struct eventfd_ctx *eventfd;
150};
151
152
153
154
155struct mem_cgroup_event {
156
157
158
159 struct mem_cgroup *memcg;
160
161
162
163 struct eventfd_ctx *eventfd;
164
165
166
167 struct list_head list;
168
169
170
171
172
173 int (*register_event)(struct mem_cgroup *memcg,
174 struct eventfd_ctx *eventfd, const char *args);
175
176
177
178
179
180 void (*unregister_event)(struct mem_cgroup *memcg,
181 struct eventfd_ctx *eventfd);
182
183
184
185
186 poll_table pt;
187 wait_queue_head_t *wqh;
188 wait_queue_t wait;
189 struct work_struct remove;
190};
191
192static void mem_cgroup_threshold(struct mem_cgroup *memcg);
193static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
194
195
196
197
198
199#define MOVE_ANON 0x1U
200#define MOVE_FILE 0x2U
201#define MOVE_MASK (MOVE_ANON | MOVE_FILE)
202
203
204static struct move_charge_struct {
205 spinlock_t lock;
206 struct mm_struct *mm;
207 struct mem_cgroup *from;
208 struct mem_cgroup *to;
209 unsigned long flags;
210 unsigned long precharge;
211 unsigned long moved_charge;
212 unsigned long moved_swap;
213 struct task_struct *moving_task;
214 wait_queue_head_t waitq;
215} mc = {
216 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
217 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
218};
219
220
221
222
223
224#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
225#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
226
227enum charge_type {
228 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
229 MEM_CGROUP_CHARGE_TYPE_ANON,
230 MEM_CGROUP_CHARGE_TYPE_SWAPOUT,
231 MEM_CGROUP_CHARGE_TYPE_DROP,
232 NR_CHARGE_TYPE,
233};
234
235
236enum res_type {
237 _MEM,
238 _MEMSWAP,
239 _OOM_TYPE,
240 _KMEM,
241 _TCP,
242};
243
244#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
245#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
246#define MEMFILE_ATTR(val) ((val) & 0xffff)
247
248#define OOM_CONTROL (0)
249
250
251struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
252{
253 if (!memcg)
254 memcg = root_mem_cgroup;
255 return &memcg->vmpressure;
256}
257
258struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
259{
260 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
261}
262
263static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
264{
265 return (memcg == root_mem_cgroup);
266}
267
268#ifndef CONFIG_SLOB
269
270
271
272
273
274
275
276
277
278
279
280static DEFINE_IDA(memcg_cache_ida);
281int memcg_nr_cache_ids;
282
283
284static DECLARE_RWSEM(memcg_cache_ids_sem);
285
286void memcg_get_cache_ids(void)
287{
288 down_read(&memcg_cache_ids_sem);
289}
290
291void memcg_put_cache_ids(void)
292{
293 up_read(&memcg_cache_ids_sem);
294}
295
296
297
298
299
300
301
302
303
304
305
306
307
308#define MEMCG_CACHES_MIN_SIZE 4
309#define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
310
311
312
313
314
315
316
317DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
318EXPORT_SYMBOL(memcg_kmem_enabled_key);
319
320#endif
321
322
323
324
325
326
327
328
329
330
331
332
333struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
334{
335 struct mem_cgroup *memcg;
336
337 memcg = page->mem_cgroup;
338
339 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
340 memcg = root_mem_cgroup;
341
342 return &memcg->css;
343}
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358ino_t page_cgroup_ino(struct page *page)
359{
360 struct mem_cgroup *memcg;
361 unsigned long ino = 0;
362
363 rcu_read_lock();
364 memcg = READ_ONCE(page->mem_cgroup);
365 while (memcg && !(memcg->css.flags & CSS_ONLINE))
366 memcg = parent_mem_cgroup(memcg);
367 if (memcg)
368 ino = cgroup_ino(memcg->css.cgroup);
369 rcu_read_unlock();
370 return ino;
371}
372
373static struct mem_cgroup_per_node *
374mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
375{
376 int nid = page_to_nid(page);
377
378 return memcg->nodeinfo[nid];
379}
380
381static struct mem_cgroup_tree_per_node *
382soft_limit_tree_node(int nid)
383{
384 return soft_limit_tree.rb_tree_per_node[nid];
385}
386
387static struct mem_cgroup_tree_per_node *
388soft_limit_tree_from_page(struct page *page)
389{
390 int nid = page_to_nid(page);
391
392 return soft_limit_tree.rb_tree_per_node[nid];
393}
394
395static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
396 struct mem_cgroup_tree_per_node *mctz,
397 unsigned long new_usage_in_excess)
398{
399 struct rb_node **p = &mctz->rb_root.rb_node;
400 struct rb_node *parent = NULL;
401 struct mem_cgroup_per_node *mz_node;
402
403 if (mz->on_tree)
404 return;
405
406 mz->usage_in_excess = new_usage_in_excess;
407 if (!mz->usage_in_excess)
408 return;
409 while (*p) {
410 parent = *p;
411 mz_node = rb_entry(parent, struct mem_cgroup_per_node,
412 tree_node);
413 if (mz->usage_in_excess < mz_node->usage_in_excess)
414 p = &(*p)->rb_left;
415
416
417
418
419 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
420 p = &(*p)->rb_right;
421 }
422 rb_link_node(&mz->tree_node, parent, p);
423 rb_insert_color(&mz->tree_node, &mctz->rb_root);
424 mz->on_tree = true;
425}
426
427static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
428 struct mem_cgroup_tree_per_node *mctz)
429{
430 if (!mz->on_tree)
431 return;
432 rb_erase(&mz->tree_node, &mctz->rb_root);
433 mz->on_tree = false;
434}
435
436static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
437 struct mem_cgroup_tree_per_node *mctz)
438{
439 unsigned long flags;
440
441 spin_lock_irqsave(&mctz->lock, flags);
442 __mem_cgroup_remove_exceeded(mz, mctz);
443 spin_unlock_irqrestore(&mctz->lock, flags);
444}
445
446static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
447{
448 unsigned long nr_pages = page_counter_read(&memcg->memory);
449 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
450 unsigned long excess = 0;
451
452 if (nr_pages > soft_limit)
453 excess = nr_pages - soft_limit;
454
455 return excess;
456}
457
458static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
459{
460 unsigned long excess;
461 struct mem_cgroup_per_node *mz;
462 struct mem_cgroup_tree_per_node *mctz;
463
464 mctz = soft_limit_tree_from_page(page);
465
466
467
468
469 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
470 mz = mem_cgroup_page_nodeinfo(memcg, page);
471 excess = soft_limit_excess(memcg);
472
473
474
475
476 if (excess || mz->on_tree) {
477 unsigned long flags;
478
479 spin_lock_irqsave(&mctz->lock, flags);
480
481 if (mz->on_tree)
482 __mem_cgroup_remove_exceeded(mz, mctz);
483
484
485
486
487 __mem_cgroup_insert_exceeded(mz, mctz, excess);
488 spin_unlock_irqrestore(&mctz->lock, flags);
489 }
490 }
491}
492
493static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
494{
495 struct mem_cgroup_tree_per_node *mctz;
496 struct mem_cgroup_per_node *mz;
497 int nid;
498
499 for_each_node(nid) {
500 mz = mem_cgroup_nodeinfo(memcg, nid);
501 mctz = soft_limit_tree_node(nid);
502 mem_cgroup_remove_exceeded(mz, mctz);
503 }
504}
505
506static struct mem_cgroup_per_node *
507__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
508{
509 struct rb_node *rightmost = NULL;
510 struct mem_cgroup_per_node *mz;
511
512retry:
513 mz = NULL;
514 rightmost = rb_last(&mctz->rb_root);
515 if (!rightmost)
516 goto done;
517
518 mz = rb_entry(rightmost, struct mem_cgroup_per_node, tree_node);
519
520
521
522
523
524 __mem_cgroup_remove_exceeded(mz, mctz);
525 if (!soft_limit_excess(mz->memcg) ||
526 !css_tryget_online(&mz->memcg->css))
527 goto retry;
528done:
529 return mz;
530}
531
532static struct mem_cgroup_per_node *
533mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
534{
535 struct mem_cgroup_per_node *mz;
536
537 spin_lock_irq(&mctz->lock);
538 mz = __mem_cgroup_largest_soft_limit_node(mctz);
539 spin_unlock_irq(&mctz->lock);
540 return mz;
541}
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564static unsigned long
565mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
566{
567 long val = 0;
568 int cpu;
569
570
571 for_each_possible_cpu(cpu)
572 val += per_cpu(memcg->stat->count[idx], cpu);
573
574
575
576
577 if (val < 0)
578 val = 0;
579 return val;
580}
581
582static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
583 enum mem_cgroup_events_index idx)
584{
585 unsigned long val = 0;
586 int cpu;
587
588 for_each_possible_cpu(cpu)
589 val += per_cpu(memcg->stat->events[idx], cpu);
590 return val;
591}
592
593static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
594 struct page *page,
595 bool compound, int nr_pages)
596{
597
598
599
600
601 if (PageAnon(page))
602 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
603 nr_pages);
604 else
605 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
606 nr_pages);
607
608 if (compound) {
609 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
610 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
611 nr_pages);
612 }
613
614
615 if (nr_pages > 0)
616 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
617 else {
618 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
619 nr_pages = -nr_pages;
620 }
621
622 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
623}
624
625unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
626 int nid, unsigned int lru_mask)
627{
628 unsigned long nr = 0;
629 struct mem_cgroup_per_node *mz;
630 enum lru_list lru;
631
632 VM_BUG_ON((unsigned)nid >= nr_node_ids);
633
634 for_each_lru(lru) {
635 if (!(BIT(lru) & lru_mask))
636 continue;
637 mz = mem_cgroup_nodeinfo(memcg, nid);
638 nr += mz->lru_size[lru];
639 }
640 return nr;
641}
642
643static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
644 unsigned int lru_mask)
645{
646 unsigned long nr = 0;
647 int nid;
648
649 for_each_node_state(nid, N_MEMORY)
650 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
651 return nr;
652}
653
654static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
655 enum mem_cgroup_events_target target)
656{
657 unsigned long val, next;
658
659 val = __this_cpu_read(memcg->stat->nr_page_events);
660 next = __this_cpu_read(memcg->stat->targets[target]);
661
662 if ((long)next - (long)val < 0) {
663 switch (target) {
664 case MEM_CGROUP_TARGET_THRESH:
665 next = val + THRESHOLDS_EVENTS_TARGET;
666 break;
667 case MEM_CGROUP_TARGET_SOFTLIMIT:
668 next = val + SOFTLIMIT_EVENTS_TARGET;
669 break;
670 case MEM_CGROUP_TARGET_NUMAINFO:
671 next = val + NUMAINFO_EVENTS_TARGET;
672 break;
673 default:
674 break;
675 }
676 __this_cpu_write(memcg->stat->targets[target], next);
677 return true;
678 }
679 return false;
680}
681
682
683
684
685
686static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
687{
688
689 if (unlikely(mem_cgroup_event_ratelimit(memcg,
690 MEM_CGROUP_TARGET_THRESH))) {
691 bool do_softlimit;
692 bool do_numainfo __maybe_unused;
693
694 do_softlimit = mem_cgroup_event_ratelimit(memcg,
695 MEM_CGROUP_TARGET_SOFTLIMIT);
696#if MAX_NUMNODES > 1
697 do_numainfo = mem_cgroup_event_ratelimit(memcg,
698 MEM_CGROUP_TARGET_NUMAINFO);
699#endif
700 mem_cgroup_threshold(memcg);
701 if (unlikely(do_softlimit))
702 mem_cgroup_update_tree(memcg, page);
703#if MAX_NUMNODES > 1
704 if (unlikely(do_numainfo))
705 atomic_inc(&memcg->numainfo_events);
706#endif
707 }
708}
709
710struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
711{
712
713
714
715
716
717 if (unlikely(!p))
718 return NULL;
719
720 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
721}
722EXPORT_SYMBOL(mem_cgroup_from_task);
723
724static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
725{
726 struct mem_cgroup *memcg = NULL;
727
728 rcu_read_lock();
729 do {
730
731
732
733
734
735 if (unlikely(!mm))
736 memcg = root_mem_cgroup;
737 else {
738 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
739 if (unlikely(!memcg))
740 memcg = root_mem_cgroup;
741 }
742 } while (!css_tryget_online(&memcg->css));
743 rcu_read_unlock();
744 return memcg;
745}
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
765 struct mem_cgroup *prev,
766 struct mem_cgroup_reclaim_cookie *reclaim)
767{
768 struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
769 struct cgroup_subsys_state *css = NULL;
770 struct mem_cgroup *memcg = NULL;
771 struct mem_cgroup *pos = NULL;
772
773 if (mem_cgroup_disabled())
774 return NULL;
775
776 if (!root)
777 root = root_mem_cgroup;
778
779 if (prev && !reclaim)
780 pos = prev;
781
782 if (!root->use_hierarchy && root != root_mem_cgroup) {
783 if (prev)
784 goto out;
785 return root;
786 }
787
788 rcu_read_lock();
789
790 if (reclaim) {
791 struct mem_cgroup_per_node *mz;
792
793 mz = mem_cgroup_nodeinfo(root, reclaim->pgdat->node_id);
794 iter = &mz->iter[reclaim->priority];
795
796 if (prev && reclaim->generation != iter->generation)
797 goto out_unlock;
798
799 while (1) {
800 pos = READ_ONCE(iter->position);
801 if (!pos || css_tryget(&pos->css))
802 break;
803
804
805
806
807
808
809
810
811 (void)cmpxchg(&iter->position, pos, NULL);
812 }
813 }
814
815 if (pos)
816 css = &pos->css;
817
818 for (;;) {
819 css = css_next_descendant_pre(css, &root->css);
820 if (!css) {
821
822
823
824
825
826
827 if (!prev)
828 continue;
829 break;
830 }
831
832
833
834
835
836
837 memcg = mem_cgroup_from_css(css);
838
839 if (css == &root->css)
840 break;
841
842 if (css_tryget(css))
843 break;
844
845 memcg = NULL;
846 }
847
848 if (reclaim) {
849
850
851
852
853
854 (void)cmpxchg(&iter->position, pos, memcg);
855
856 if (pos)
857 css_put(&pos->css);
858
859 if (!memcg)
860 iter->generation++;
861 else if (!prev)
862 reclaim->generation = iter->generation;
863 }
864
865out_unlock:
866 rcu_read_unlock();
867out:
868 if (prev && prev != root)
869 css_put(&prev->css);
870
871 return memcg;
872}
873
874
875
876
877
878
879void mem_cgroup_iter_break(struct mem_cgroup *root,
880 struct mem_cgroup *prev)
881{
882 if (!root)
883 root = root_mem_cgroup;
884 if (prev && prev != root)
885 css_put(&prev->css);
886}
887
888static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
889{
890 struct mem_cgroup *memcg = dead_memcg;
891 struct mem_cgroup_reclaim_iter *iter;
892 struct mem_cgroup_per_node *mz;
893 int nid;
894 int i;
895
896 while ((memcg = parent_mem_cgroup(memcg))) {
897 for_each_node(nid) {
898 mz = mem_cgroup_nodeinfo(memcg, nid);
899 for (i = 0; i <= DEF_PRIORITY; i++) {
900 iter = &mz->iter[i];
901 cmpxchg(&iter->position,
902 dead_memcg, NULL);
903 }
904 }
905 }
906}
907
908
909
910
911
912
913#define for_each_mem_cgroup_tree(iter, root) \
914 for (iter = mem_cgroup_iter(root, NULL, NULL); \
915 iter != NULL; \
916 iter = mem_cgroup_iter(root, iter, NULL))
917
918#define for_each_mem_cgroup(iter) \
919 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
920 iter != NULL; \
921 iter = mem_cgroup_iter(NULL, iter, NULL))
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
937 int (*fn)(struct task_struct *, void *), void *arg)
938{
939 struct mem_cgroup *iter;
940 int ret = 0;
941
942 BUG_ON(memcg == root_mem_cgroup);
943
944 for_each_mem_cgroup_tree(iter, memcg) {
945 struct css_task_iter it;
946 struct task_struct *task;
947
948 css_task_iter_start(&iter->css, &it);
949 while (!ret && (task = css_task_iter_next(&it)))
950 ret = fn(task, arg);
951 css_task_iter_end(&it);
952 if (ret) {
953 mem_cgroup_iter_break(memcg, iter);
954 break;
955 }
956 }
957 return ret;
958}
959
960
961
962
963
964
965
966
967
968
969struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct pglist_data *pgdat)
970{
971 struct mem_cgroup_per_node *mz;
972 struct mem_cgroup *memcg;
973 struct lruvec *lruvec;
974
975 if (mem_cgroup_disabled()) {
976 lruvec = &pgdat->lruvec;
977 goto out;
978 }
979
980 memcg = page->mem_cgroup;
981
982
983
984
985 if (!memcg)
986 memcg = root_mem_cgroup;
987
988 mz = mem_cgroup_page_nodeinfo(memcg, page);
989 lruvec = &mz->lruvec;
990out:
991
992
993
994
995
996 if (unlikely(lruvec->pgdat != pgdat))
997 lruvec->pgdat = pgdat;
998 return lruvec;
999}
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1012 int nr_pages)
1013{
1014 struct mem_cgroup_per_node *mz;
1015 unsigned long *lru_size;
1016 long size;
1017 bool empty;
1018
1019 if (mem_cgroup_disabled())
1020 return;
1021
1022 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1023 lru_size = mz->lru_size + lru;
1024 empty = list_empty(lruvec->lists + lru);
1025
1026 if (nr_pages < 0)
1027 *lru_size += nr_pages;
1028
1029 size = *lru_size;
1030 if (WARN_ONCE(size < 0 || empty != !size,
1031 "%s(%p, %d, %d): lru_size %ld but %sempty\n",
1032 __func__, lruvec, lru, nr_pages, size, empty ? "" : "not ")) {
1033 VM_BUG_ON(1);
1034 *lru_size = 0;
1035 }
1036
1037 if (nr_pages > 0)
1038 *lru_size += nr_pages;
1039}
1040
1041bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
1042{
1043 struct mem_cgroup *task_memcg;
1044 struct task_struct *p;
1045 bool ret;
1046
1047 p = find_lock_task_mm(task);
1048 if (p) {
1049 task_memcg = get_mem_cgroup_from_mm(p->mm);
1050 task_unlock(p);
1051 } else {
1052
1053
1054
1055
1056
1057 rcu_read_lock();
1058 task_memcg = mem_cgroup_from_task(task);
1059 css_get(&task_memcg->css);
1060 rcu_read_unlock();
1061 }
1062 ret = mem_cgroup_is_descendant(task_memcg, memcg);
1063 css_put(&task_memcg->css);
1064 return ret;
1065}
1066
1067
1068
1069
1070
1071
1072
1073
1074static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1075{
1076 unsigned long margin = 0;
1077 unsigned long count;
1078 unsigned long limit;
1079
1080 count = page_counter_read(&memcg->memory);
1081 limit = READ_ONCE(memcg->memory.limit);
1082 if (count < limit)
1083 margin = limit - count;
1084
1085 if (do_memsw_account()) {
1086 count = page_counter_read(&memcg->memsw);
1087 limit = READ_ONCE(memcg->memsw.limit);
1088 if (count <= limit)
1089 margin = min(margin, limit - count);
1090 else
1091 margin = 0;
1092 }
1093
1094 return margin;
1095}
1096
1097
1098
1099
1100
1101
1102
1103
1104static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1105{
1106 struct mem_cgroup *from;
1107 struct mem_cgroup *to;
1108 bool ret = false;
1109
1110
1111
1112
1113 spin_lock(&mc.lock);
1114 from = mc.from;
1115 to = mc.to;
1116 if (!from)
1117 goto unlock;
1118
1119 ret = mem_cgroup_is_descendant(from, memcg) ||
1120 mem_cgroup_is_descendant(to, memcg);
1121unlock:
1122 spin_unlock(&mc.lock);
1123 return ret;
1124}
1125
1126static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1127{
1128 if (mc.moving_task && current != mc.moving_task) {
1129 if (mem_cgroup_under_move(memcg)) {
1130 DEFINE_WAIT(wait);
1131 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1132
1133 if (mc.moving_task)
1134 schedule();
1135 finish_wait(&mc.waitq, &wait);
1136 return true;
1137 }
1138 }
1139 return false;
1140}
1141
1142#define K(x) ((x) << (PAGE_SHIFT-10))
1143
1144
1145
1146
1147
1148
1149
1150
1151void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1152{
1153 struct mem_cgroup *iter;
1154 unsigned int i;
1155
1156 rcu_read_lock();
1157
1158 if (p) {
1159 pr_info("Task in ");
1160 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1161 pr_cont(" killed as a result of limit of ");
1162 } else {
1163 pr_info("Memory limit reached of cgroup ");
1164 }
1165
1166 pr_cont_cgroup_path(memcg->css.cgroup);
1167 pr_cont("\n");
1168
1169 rcu_read_unlock();
1170
1171 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1172 K((u64)page_counter_read(&memcg->memory)),
1173 K((u64)memcg->memory.limit), memcg->memory.failcnt);
1174 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1175 K((u64)page_counter_read(&memcg->memsw)),
1176 K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
1177 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1178 K((u64)page_counter_read(&memcg->kmem)),
1179 K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
1180
1181 for_each_mem_cgroup_tree(iter, memcg) {
1182 pr_info("Memory cgroup stats for ");
1183 pr_cont_cgroup_path(iter->css.cgroup);
1184 pr_cont(":");
1185
1186 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1187 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1188 continue;
1189 pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
1190 K(mem_cgroup_read_stat(iter, i)));
1191 }
1192
1193 for (i = 0; i < NR_LRU_LISTS; i++)
1194 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1195 K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1196
1197 pr_cont("\n");
1198 }
1199}
1200
1201
1202
1203
1204
1205static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1206{
1207 int num = 0;
1208 struct mem_cgroup *iter;
1209
1210 for_each_mem_cgroup_tree(iter, memcg)
1211 num++;
1212 return num;
1213}
1214
1215
1216
1217
1218unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
1219{
1220 unsigned long limit;
1221
1222 limit = memcg->memory.limit;
1223 if (mem_cgroup_swappiness(memcg)) {
1224 unsigned long memsw_limit;
1225 unsigned long swap_limit;
1226
1227 memsw_limit = memcg->memsw.limit;
1228 swap_limit = memcg->swap.limit;
1229 swap_limit = min(swap_limit, (unsigned long)total_swap_pages);
1230 limit = min(limit + swap_limit, memsw_limit);
1231 }
1232 return limit;
1233}
1234
1235static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1236 int order)
1237{
1238 struct oom_control oc = {
1239 .zonelist = NULL,
1240 .nodemask = NULL,
1241 .memcg = memcg,
1242 .gfp_mask = gfp_mask,
1243 .order = order,
1244 };
1245 bool ret;
1246
1247 mutex_lock(&oom_lock);
1248 ret = out_of_memory(&oc);
1249 mutex_unlock(&oom_lock);
1250 return ret;
1251}
1252
1253#if MAX_NUMNODES > 1
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1266 int nid, bool noswap)
1267{
1268 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1269 return true;
1270 if (noswap || !total_swap_pages)
1271 return false;
1272 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1273 return true;
1274 return false;
1275
1276}
1277
1278
1279
1280
1281
1282
1283
1284static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1285{
1286 int nid;
1287
1288
1289
1290
1291 if (!atomic_read(&memcg->numainfo_events))
1292 return;
1293 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1294 return;
1295
1296
1297 memcg->scan_nodes = node_states[N_MEMORY];
1298
1299 for_each_node_mask(nid, node_states[N_MEMORY]) {
1300
1301 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1302 node_clear(nid, memcg->scan_nodes);
1303 }
1304
1305 atomic_set(&memcg->numainfo_events, 0);
1306 atomic_set(&memcg->numainfo_updating, 0);
1307}
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1322{
1323 int node;
1324
1325 mem_cgroup_may_update_nodemask(memcg);
1326 node = memcg->last_scanned_node;
1327
1328 node = next_node_in(node, memcg->scan_nodes);
1329
1330
1331
1332
1333
1334 if (unlikely(node == MAX_NUMNODES))
1335 node = numa_node_id();
1336
1337 memcg->last_scanned_node = node;
1338 return node;
1339}
1340#else
1341int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1342{
1343 return 0;
1344}
1345#endif
1346
1347static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1348 pg_data_t *pgdat,
1349 gfp_t gfp_mask,
1350 unsigned long *total_scanned)
1351{
1352 struct mem_cgroup *victim = NULL;
1353 int total = 0;
1354 int loop = 0;
1355 unsigned long excess;
1356 unsigned long nr_scanned;
1357 struct mem_cgroup_reclaim_cookie reclaim = {
1358 .pgdat = pgdat,
1359 .priority = 0,
1360 };
1361
1362 excess = soft_limit_excess(root_memcg);
1363
1364 while (1) {
1365 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1366 if (!victim) {
1367 loop++;
1368 if (loop >= 2) {
1369
1370
1371
1372
1373
1374 if (!total)
1375 break;
1376
1377
1378
1379
1380
1381
1382 if (total >= (excess >> 2) ||
1383 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1384 break;
1385 }
1386 continue;
1387 }
1388 total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1389 pgdat, &nr_scanned);
1390 *total_scanned += nr_scanned;
1391 if (!soft_limit_excess(root_memcg))
1392 break;
1393 }
1394 mem_cgroup_iter_break(root_memcg, victim);
1395 return total;
1396}
1397
1398#ifdef CONFIG_LOCKDEP
1399static struct lockdep_map memcg_oom_lock_dep_map = {
1400 .name = "memcg_oom_lock",
1401};
1402#endif
1403
1404static DEFINE_SPINLOCK(memcg_oom_lock);
1405
1406
1407
1408
1409
1410static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1411{
1412 struct mem_cgroup *iter, *failed = NULL;
1413
1414 spin_lock(&memcg_oom_lock);
1415
1416 for_each_mem_cgroup_tree(iter, memcg) {
1417 if (iter->oom_lock) {
1418
1419
1420
1421
1422 failed = iter;
1423 mem_cgroup_iter_break(memcg, iter);
1424 break;
1425 } else
1426 iter->oom_lock = true;
1427 }
1428
1429 if (failed) {
1430
1431
1432
1433
1434 for_each_mem_cgroup_tree(iter, memcg) {
1435 if (iter == failed) {
1436 mem_cgroup_iter_break(memcg, iter);
1437 break;
1438 }
1439 iter->oom_lock = false;
1440 }
1441 } else
1442 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1443
1444 spin_unlock(&memcg_oom_lock);
1445
1446 return !failed;
1447}
1448
1449static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1450{
1451 struct mem_cgroup *iter;
1452
1453 spin_lock(&memcg_oom_lock);
1454 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
1455 for_each_mem_cgroup_tree(iter, memcg)
1456 iter->oom_lock = false;
1457 spin_unlock(&memcg_oom_lock);
1458}
1459
1460static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1461{
1462 struct mem_cgroup *iter;
1463
1464 spin_lock(&memcg_oom_lock);
1465 for_each_mem_cgroup_tree(iter, memcg)
1466 iter->under_oom++;
1467 spin_unlock(&memcg_oom_lock);
1468}
1469
1470static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1471{
1472 struct mem_cgroup *iter;
1473
1474
1475
1476
1477
1478 spin_lock(&memcg_oom_lock);
1479 for_each_mem_cgroup_tree(iter, memcg)
1480 if (iter->under_oom > 0)
1481 iter->under_oom--;
1482 spin_unlock(&memcg_oom_lock);
1483}
1484
1485static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1486
1487struct oom_wait_info {
1488 struct mem_cgroup *memcg;
1489 wait_queue_t wait;
1490};
1491
1492static int memcg_oom_wake_function(wait_queue_t *wait,
1493 unsigned mode, int sync, void *arg)
1494{
1495 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1496 struct mem_cgroup *oom_wait_memcg;
1497 struct oom_wait_info *oom_wait_info;
1498
1499 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1500 oom_wait_memcg = oom_wait_info->memcg;
1501
1502 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1503 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1504 return 0;
1505 return autoremove_wake_function(wait, mode, sync, arg);
1506}
1507
1508static void memcg_oom_recover(struct mem_cgroup *memcg)
1509{
1510
1511
1512
1513
1514
1515
1516
1517
1518 if (memcg && memcg->under_oom)
1519 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1520}
1521
1522static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1523{
1524 if (!current->memcg_may_oom)
1525 return;
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540 css_get(&memcg->css);
1541 current->memcg_in_oom = memcg;
1542 current->memcg_oom_gfp_mask = mask;
1543 current->memcg_oom_order = order;
1544}
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563bool mem_cgroup_oom_synchronize(bool handle)
1564{
1565 struct mem_cgroup *memcg = current->memcg_in_oom;
1566 struct oom_wait_info owait;
1567 bool locked;
1568
1569
1570 if (!memcg)
1571 return false;
1572
1573 if (!handle)
1574 goto cleanup;
1575
1576 owait.memcg = memcg;
1577 owait.wait.flags = 0;
1578 owait.wait.func = memcg_oom_wake_function;
1579 owait.wait.private = current;
1580 INIT_LIST_HEAD(&owait.wait.task_list);
1581
1582 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1583 mem_cgroup_mark_under_oom(memcg);
1584
1585 locked = mem_cgroup_oom_trylock(memcg);
1586
1587 if (locked)
1588 mem_cgroup_oom_notify(memcg);
1589
1590 if (locked && !memcg->oom_kill_disable) {
1591 mem_cgroup_unmark_under_oom(memcg);
1592 finish_wait(&memcg_oom_waitq, &owait.wait);
1593 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1594 current->memcg_oom_order);
1595 } else {
1596 schedule();
1597 mem_cgroup_unmark_under_oom(memcg);
1598 finish_wait(&memcg_oom_waitq, &owait.wait);
1599 }
1600
1601 if (locked) {
1602 mem_cgroup_oom_unlock(memcg);
1603
1604
1605
1606
1607
1608 memcg_oom_recover(memcg);
1609 }
1610cleanup:
1611 current->memcg_in_oom = NULL;
1612 css_put(&memcg->css);
1613 return true;
1614}
1615
1616
1617
1618
1619
1620
1621
1622
1623void lock_page_memcg(struct page *page)
1624{
1625 struct mem_cgroup *memcg;
1626 unsigned long flags;
1627
1628
1629
1630
1631
1632
1633 rcu_read_lock();
1634
1635 if (mem_cgroup_disabled())
1636 return;
1637again:
1638 memcg = page->mem_cgroup;
1639 if (unlikely(!memcg))
1640 return;
1641
1642 if (atomic_read(&memcg->moving_account) <= 0)
1643 return;
1644
1645 spin_lock_irqsave(&memcg->move_lock, flags);
1646 if (memcg != page->mem_cgroup) {
1647 spin_unlock_irqrestore(&memcg->move_lock, flags);
1648 goto again;
1649 }
1650
1651
1652
1653
1654
1655
1656 memcg->move_lock_task = current;
1657 memcg->move_lock_flags = flags;
1658
1659 return;
1660}
1661EXPORT_SYMBOL(lock_page_memcg);
1662
1663
1664
1665
1666
1667void unlock_page_memcg(struct page *page)
1668{
1669 struct mem_cgroup *memcg = page->mem_cgroup;
1670
1671 if (memcg && memcg->move_lock_task == current) {
1672 unsigned long flags = memcg->move_lock_flags;
1673
1674 memcg->move_lock_task = NULL;
1675 memcg->move_lock_flags = 0;
1676
1677 spin_unlock_irqrestore(&memcg->move_lock, flags);
1678 }
1679
1680 rcu_read_unlock();
1681}
1682EXPORT_SYMBOL(unlock_page_memcg);
1683
1684
1685
1686
1687
1688#define CHARGE_BATCH 32U
1689struct memcg_stock_pcp {
1690 struct mem_cgroup *cached;
1691 unsigned int nr_pages;
1692 struct work_struct work;
1693 unsigned long flags;
1694#define FLUSHING_CACHED_CHARGE 0
1695};
1696static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1697static DEFINE_MUTEX(percpu_charge_mutex);
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1711{
1712 struct memcg_stock_pcp *stock;
1713 unsigned long flags;
1714 bool ret = false;
1715
1716 if (nr_pages > CHARGE_BATCH)
1717 return ret;
1718
1719 local_irq_save(flags);
1720
1721 stock = this_cpu_ptr(&memcg_stock);
1722 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
1723 stock->nr_pages -= nr_pages;
1724 ret = true;
1725 }
1726
1727 local_irq_restore(flags);
1728
1729 return ret;
1730}
1731
1732
1733
1734
1735static void drain_stock(struct memcg_stock_pcp *stock)
1736{
1737 struct mem_cgroup *old = stock->cached;
1738
1739 if (stock->nr_pages) {
1740 page_counter_uncharge(&old->memory, stock->nr_pages);
1741 if (do_memsw_account())
1742 page_counter_uncharge(&old->memsw, stock->nr_pages);
1743 css_put_many(&old->css, stock->nr_pages);
1744 stock->nr_pages = 0;
1745 }
1746 stock->cached = NULL;
1747}
1748
1749static void drain_local_stock(struct work_struct *dummy)
1750{
1751 struct memcg_stock_pcp *stock;
1752 unsigned long flags;
1753
1754 local_irq_save(flags);
1755
1756 stock = this_cpu_ptr(&memcg_stock);
1757 drain_stock(stock);
1758 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1759
1760 local_irq_restore(flags);
1761}
1762
1763
1764
1765
1766
1767static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1768{
1769 struct memcg_stock_pcp *stock;
1770 unsigned long flags;
1771
1772 local_irq_save(flags);
1773
1774 stock = this_cpu_ptr(&memcg_stock);
1775 if (stock->cached != memcg) {
1776 drain_stock(stock);
1777 stock->cached = memcg;
1778 }
1779 stock->nr_pages += nr_pages;
1780
1781 local_irq_restore(flags);
1782}
1783
1784
1785
1786
1787
1788static void drain_all_stock(struct mem_cgroup *root_memcg)
1789{
1790 int cpu, curcpu;
1791
1792
1793 if (!mutex_trylock(&percpu_charge_mutex))
1794 return;
1795
1796 get_online_cpus();
1797 curcpu = get_cpu();
1798 for_each_online_cpu(cpu) {
1799 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1800 struct mem_cgroup *memcg;
1801
1802 memcg = stock->cached;
1803 if (!memcg || !stock->nr_pages)
1804 continue;
1805 if (!mem_cgroup_is_descendant(memcg, root_memcg))
1806 continue;
1807 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1808 if (cpu == curcpu)
1809 drain_local_stock(&stock->work);
1810 else
1811 schedule_work_on(cpu, &stock->work);
1812 }
1813 }
1814 put_cpu();
1815 put_online_cpus();
1816 mutex_unlock(&percpu_charge_mutex);
1817}
1818
1819static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
1820 unsigned long action,
1821 void *hcpu)
1822{
1823 int cpu = (unsigned long)hcpu;
1824 struct memcg_stock_pcp *stock;
1825
1826 if (action == CPU_ONLINE)
1827 return NOTIFY_OK;
1828
1829 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1830 return NOTIFY_OK;
1831
1832 stock = &per_cpu(memcg_stock, cpu);
1833 drain_stock(stock);
1834 return NOTIFY_OK;
1835}
1836
1837static void reclaim_high(struct mem_cgroup *memcg,
1838 unsigned int nr_pages,
1839 gfp_t gfp_mask)
1840{
1841 do {
1842 if (page_counter_read(&memcg->memory) <= memcg->high)
1843 continue;
1844 mem_cgroup_events(memcg, MEMCG_HIGH, 1);
1845 try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
1846 } while ((memcg = parent_mem_cgroup(memcg)));
1847}
1848
1849static void high_work_func(struct work_struct *work)
1850{
1851 struct mem_cgroup *memcg;
1852
1853 memcg = container_of(work, struct mem_cgroup, high_work);
1854 reclaim_high(memcg, CHARGE_BATCH, GFP_KERNEL);
1855}
1856
1857
1858
1859
1860
1861void mem_cgroup_handle_over_high(void)
1862{
1863 unsigned int nr_pages = current->memcg_nr_pages_over_high;
1864 struct mem_cgroup *memcg;
1865
1866 if (likely(!nr_pages))
1867 return;
1868
1869 memcg = get_mem_cgroup_from_mm(current->mm);
1870 reclaim_high(memcg, nr_pages, GFP_KERNEL);
1871 css_put(&memcg->css);
1872 current->memcg_nr_pages_over_high = 0;
1873}
1874
1875static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
1876 unsigned int nr_pages)
1877{
1878 unsigned int batch = max(CHARGE_BATCH, nr_pages);
1879 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1880 struct mem_cgroup *mem_over_limit;
1881 struct page_counter *counter;
1882 unsigned long nr_reclaimed;
1883 bool may_swap = true;
1884 bool drained = false;
1885
1886 if (mem_cgroup_is_root(memcg))
1887 return 0;
1888retry:
1889 if (consume_stock(memcg, nr_pages))
1890 return 0;
1891
1892 if (!do_memsw_account() ||
1893 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
1894 if (page_counter_try_charge(&memcg->memory, batch, &counter))
1895 goto done_restock;
1896 if (do_memsw_account())
1897 page_counter_uncharge(&memcg->memsw, batch);
1898 mem_over_limit = mem_cgroup_from_counter(counter, memory);
1899 } else {
1900 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
1901 may_swap = false;
1902 }
1903
1904 if (batch > nr_pages) {
1905 batch = nr_pages;
1906 goto retry;
1907 }
1908
1909
1910
1911
1912
1913
1914
1915 if (unlikely(test_thread_flag(TIF_MEMDIE) ||
1916 fatal_signal_pending(current) ||
1917 current->flags & PF_EXITING))
1918 goto force;
1919
1920
1921
1922
1923
1924
1925
1926 if (unlikely(current->flags & PF_MEMALLOC))
1927 goto force;
1928
1929 if (unlikely(task_in_memcg_oom(current)))
1930 goto nomem;
1931
1932 if (!gfpflags_allow_blocking(gfp_mask))
1933 goto nomem;
1934
1935 mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1);
1936
1937 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
1938 gfp_mask, may_swap);
1939
1940 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
1941 goto retry;
1942
1943 if (!drained) {
1944 drain_all_stock(mem_over_limit);
1945 drained = true;
1946 goto retry;
1947 }
1948
1949 if (gfp_mask & __GFP_NORETRY)
1950 goto nomem;
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
1961 goto retry;
1962
1963
1964
1965
1966 if (mem_cgroup_wait_acct_move(mem_over_limit))
1967 goto retry;
1968
1969 if (nr_retries--)
1970 goto retry;
1971
1972 if (gfp_mask & __GFP_NOFAIL)
1973 goto force;
1974
1975 if (fatal_signal_pending(current))
1976 goto force;
1977
1978 mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1);
1979
1980 mem_cgroup_oom(mem_over_limit, gfp_mask,
1981 get_order(nr_pages * PAGE_SIZE));
1982nomem:
1983 if (!(gfp_mask & __GFP_NOFAIL))
1984 return -ENOMEM;
1985force:
1986
1987
1988
1989
1990
1991 page_counter_charge(&memcg->memory, nr_pages);
1992 if (do_memsw_account())
1993 page_counter_charge(&memcg->memsw, nr_pages);
1994 css_get_many(&memcg->css, nr_pages);
1995
1996 return 0;
1997
1998done_restock:
1999 css_get_many(&memcg->css, batch);
2000 if (batch > nr_pages)
2001 refill_stock(memcg, batch - nr_pages);
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012 do {
2013 if (page_counter_read(&memcg->memory) > memcg->high) {
2014
2015 if (in_interrupt()) {
2016 schedule_work(&memcg->high_work);
2017 break;
2018 }
2019 current->memcg_nr_pages_over_high += batch;
2020 set_notify_resume(current);
2021 break;
2022 }
2023 } while ((memcg = parent_mem_cgroup(memcg)));
2024
2025 return 0;
2026}
2027
2028static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2029{
2030 if (mem_cgroup_is_root(memcg))
2031 return;
2032
2033 page_counter_uncharge(&memcg->memory, nr_pages);
2034 if (do_memsw_account())
2035 page_counter_uncharge(&memcg->memsw, nr_pages);
2036
2037 css_put_many(&memcg->css, nr_pages);
2038}
2039
2040static void lock_page_lru(struct page *page, int *isolated)
2041{
2042 struct zone *zone = page_zone(page);
2043
2044 spin_lock_irq(zone_lru_lock(zone));
2045 if (PageLRU(page)) {
2046 struct lruvec *lruvec;
2047
2048 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
2049 ClearPageLRU(page);
2050 del_page_from_lru_list(page, lruvec, page_lru(page));
2051 *isolated = 1;
2052 } else
2053 *isolated = 0;
2054}
2055
2056static void unlock_page_lru(struct page *page, int isolated)
2057{
2058 struct zone *zone = page_zone(page);
2059
2060 if (isolated) {
2061 struct lruvec *lruvec;
2062
2063 lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
2064 VM_BUG_ON_PAGE(PageLRU(page), page);
2065 SetPageLRU(page);
2066 add_page_to_lru_list(page, lruvec, page_lru(page));
2067 }
2068 spin_unlock_irq(zone_lru_lock(zone));
2069}
2070
2071static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2072 bool lrucare)
2073{
2074 int isolated;
2075
2076 VM_BUG_ON_PAGE(page->mem_cgroup, page);
2077
2078
2079
2080
2081
2082 if (lrucare)
2083 lock_page_lru(page, &isolated);
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099 page->mem_cgroup = memcg;
2100
2101 if (lrucare)
2102 unlock_page_lru(page, isolated);
2103}
2104
2105#ifndef CONFIG_SLOB
2106static int memcg_alloc_cache_id(void)
2107{
2108 int id, size;
2109 int err;
2110
2111 id = ida_simple_get(&memcg_cache_ida,
2112 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2113 if (id < 0)
2114 return id;
2115
2116 if (id < memcg_nr_cache_ids)
2117 return id;
2118
2119
2120
2121
2122
2123 down_write(&memcg_cache_ids_sem);
2124
2125 size = 2 * (id + 1);
2126 if (size < MEMCG_CACHES_MIN_SIZE)
2127 size = MEMCG_CACHES_MIN_SIZE;
2128 else if (size > MEMCG_CACHES_MAX_SIZE)
2129 size = MEMCG_CACHES_MAX_SIZE;
2130
2131 err = memcg_update_all_caches(size);
2132 if (!err)
2133 err = memcg_update_all_list_lrus(size);
2134 if (!err)
2135 memcg_nr_cache_ids = size;
2136
2137 up_write(&memcg_cache_ids_sem);
2138
2139 if (err) {
2140 ida_simple_remove(&memcg_cache_ida, id);
2141 return err;
2142 }
2143 return id;
2144}
2145
2146static void memcg_free_cache_id(int id)
2147{
2148 ida_simple_remove(&memcg_cache_ida, id);
2149}
2150
2151struct memcg_kmem_cache_create_work {
2152 struct mem_cgroup *memcg;
2153 struct kmem_cache *cachep;
2154 struct work_struct work;
2155};
2156
2157static void memcg_kmem_cache_create_func(struct work_struct *w)
2158{
2159 struct memcg_kmem_cache_create_work *cw =
2160 container_of(w, struct memcg_kmem_cache_create_work, work);
2161 struct mem_cgroup *memcg = cw->memcg;
2162 struct kmem_cache *cachep = cw->cachep;
2163
2164 memcg_create_kmem_cache(memcg, cachep);
2165
2166 css_put(&memcg->css);
2167 kfree(cw);
2168}
2169
2170
2171
2172
2173static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2174 struct kmem_cache *cachep)
2175{
2176 struct memcg_kmem_cache_create_work *cw;
2177
2178 cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
2179 if (!cw)
2180 return;
2181
2182 css_get(&memcg->css);
2183
2184 cw->memcg = memcg;
2185 cw->cachep = cachep;
2186 INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
2187
2188 schedule_work(&cw->work);
2189}
2190
2191static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2192 struct kmem_cache *cachep)
2193{
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205 current->memcg_kmem_skip_account = 1;
2206 __memcg_schedule_kmem_cache_create(memcg, cachep);
2207 current->memcg_kmem_skip_account = 0;
2208}
2209
2210static inline bool memcg_kmem_bypass(void)
2211{
2212 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
2213 return true;
2214 return false;
2215}
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
2234{
2235 struct mem_cgroup *memcg;
2236 struct kmem_cache *memcg_cachep;
2237 int kmemcg_id;
2238
2239 VM_BUG_ON(!is_root_cache(cachep));
2240
2241 if (memcg_kmem_bypass())
2242 return cachep;
2243
2244 if (current->memcg_kmem_skip_account)
2245 return cachep;
2246
2247 memcg = get_mem_cgroup_from_mm(current->mm);
2248 kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2249 if (kmemcg_id < 0)
2250 goto out;
2251
2252 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
2253 if (likely(memcg_cachep))
2254 return memcg_cachep;
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268 memcg_schedule_kmem_cache_create(memcg, cachep);
2269out:
2270 css_put(&memcg->css);
2271 return cachep;
2272}
2273
2274
2275
2276
2277
2278void memcg_kmem_put_cache(struct kmem_cache *cachep)
2279{
2280 if (!is_root_cache(cachep))
2281 css_put(&cachep->memcg_params.memcg->css);
2282}
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
2294 struct mem_cgroup *memcg)
2295{
2296 unsigned int nr_pages = 1 << order;
2297 struct page_counter *counter;
2298 int ret;
2299
2300 ret = try_charge(memcg, gfp, nr_pages);
2301 if (ret)
2302 return ret;
2303
2304 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
2305 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
2306 cancel_charge(memcg, nr_pages);
2307 return -ENOMEM;
2308 }
2309
2310 page->mem_cgroup = memcg;
2311
2312 return 0;
2313}
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
2324{
2325 struct mem_cgroup *memcg;
2326 int ret = 0;
2327
2328 if (memcg_kmem_bypass())
2329 return 0;
2330
2331 memcg = get_mem_cgroup_from_mm(current->mm);
2332 if (!mem_cgroup_is_root(memcg)) {
2333 ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
2334 if (!ret)
2335 __SetPageKmemcg(page);
2336 }
2337 css_put(&memcg->css);
2338 return ret;
2339}
2340
2341
2342
2343
2344
2345void memcg_kmem_uncharge(struct page *page, int order)
2346{
2347 struct mem_cgroup *memcg = page->mem_cgroup;
2348 unsigned int nr_pages = 1 << order;
2349
2350 if (!memcg)
2351 return;
2352
2353 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
2354
2355 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2356 page_counter_uncharge(&memcg->kmem, nr_pages);
2357
2358 page_counter_uncharge(&memcg->memory, nr_pages);
2359 if (do_memsw_account())
2360 page_counter_uncharge(&memcg->memsw, nr_pages);
2361
2362 page->mem_cgroup = NULL;
2363
2364
2365 if (PageKmemcg(page))
2366 __ClearPageKmemcg(page);
2367
2368 css_put_many(&memcg->css, nr_pages);
2369}
2370#endif
2371
2372#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2373
2374
2375
2376
2377
2378void mem_cgroup_split_huge_fixup(struct page *head)
2379{
2380 int i;
2381
2382 if (mem_cgroup_disabled())
2383 return;
2384
2385 for (i = 1; i < HPAGE_PMD_NR; i++)
2386 head[i].mem_cgroup = head->mem_cgroup;
2387
2388 __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
2389 HPAGE_PMD_NR);
2390}
2391#endif
2392
2393#ifdef CONFIG_MEMCG_SWAP
2394static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
2395 bool charge)
2396{
2397 int val = (charge) ? 1 : -1;
2398 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
2399}
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415static int mem_cgroup_move_swap_account(swp_entry_t entry,
2416 struct mem_cgroup *from, struct mem_cgroup *to)
2417{
2418 unsigned short old_id, new_id;
2419
2420 old_id = mem_cgroup_id(from);
2421 new_id = mem_cgroup_id(to);
2422
2423 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2424 mem_cgroup_swap_statistics(from, false);
2425 mem_cgroup_swap_statistics(to, true);
2426 return 0;
2427 }
2428 return -EINVAL;
2429}
2430#else
2431static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2432 struct mem_cgroup *from, struct mem_cgroup *to)
2433{
2434 return -EINVAL;
2435}
2436#endif
2437
2438static DEFINE_MUTEX(memcg_limit_mutex);
2439
2440static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2441 unsigned long limit)
2442{
2443 unsigned long curusage;
2444 unsigned long oldusage;
2445 bool enlarge = false;
2446 int retry_count;
2447 int ret;
2448
2449
2450
2451
2452
2453
2454 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2455 mem_cgroup_count_children(memcg);
2456
2457 oldusage = page_counter_read(&memcg->memory);
2458
2459 do {
2460 if (signal_pending(current)) {
2461 ret = -EINTR;
2462 break;
2463 }
2464
2465 mutex_lock(&memcg_limit_mutex);
2466 if (limit > memcg->memsw.limit) {
2467 mutex_unlock(&memcg_limit_mutex);
2468 ret = -EINVAL;
2469 break;
2470 }
2471 if (limit > memcg->memory.limit)
2472 enlarge = true;
2473 ret = page_counter_limit(&memcg->memory, limit);
2474 mutex_unlock(&memcg_limit_mutex);
2475
2476 if (!ret)
2477 break;
2478
2479 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
2480
2481 curusage = page_counter_read(&memcg->memory);
2482
2483 if (curusage >= oldusage)
2484 retry_count--;
2485 else
2486 oldusage = curusage;
2487 } while (retry_count);
2488
2489 if (!ret && enlarge)
2490 memcg_oom_recover(memcg);
2491
2492 return ret;
2493}
2494
2495static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2496 unsigned long limit)
2497{
2498 unsigned long curusage;
2499 unsigned long oldusage;
2500 bool enlarge = false;
2501 int retry_count;
2502 int ret;
2503
2504
2505 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2506 mem_cgroup_count_children(memcg);
2507
2508 oldusage = page_counter_read(&memcg->memsw);
2509
2510 do {
2511 if (signal_pending(current)) {
2512 ret = -EINTR;
2513 break;
2514 }
2515
2516 mutex_lock(&memcg_limit_mutex);
2517 if (limit < memcg->memory.limit) {
2518 mutex_unlock(&memcg_limit_mutex);
2519 ret = -EINVAL;
2520 break;
2521 }
2522 if (limit > memcg->memsw.limit)
2523 enlarge = true;
2524 ret = page_counter_limit(&memcg->memsw, limit);
2525 mutex_unlock(&memcg_limit_mutex);
2526
2527 if (!ret)
2528 break;
2529
2530 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
2531
2532 curusage = page_counter_read(&memcg->memsw);
2533
2534 if (curusage >= oldusage)
2535 retry_count--;
2536 else
2537 oldusage = curusage;
2538 } while (retry_count);
2539
2540 if (!ret && enlarge)
2541 memcg_oom_recover(memcg);
2542
2543 return ret;
2544}
2545
2546unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
2547 gfp_t gfp_mask,
2548 unsigned long *total_scanned)
2549{
2550 unsigned long nr_reclaimed = 0;
2551 struct mem_cgroup_per_node *mz, *next_mz = NULL;
2552 unsigned long reclaimed;
2553 int loop = 0;
2554 struct mem_cgroup_tree_per_node *mctz;
2555 unsigned long excess;
2556 unsigned long nr_scanned;
2557
2558 if (order > 0)
2559 return 0;
2560
2561 mctz = soft_limit_tree_node(pgdat->node_id);
2562
2563
2564
2565
2566
2567
2568 if (RB_EMPTY_ROOT(&mctz->rb_root))
2569 return 0;
2570
2571
2572
2573
2574
2575
2576 do {
2577 if (next_mz)
2578 mz = next_mz;
2579 else
2580 mz = mem_cgroup_largest_soft_limit_node(mctz);
2581 if (!mz)
2582 break;
2583
2584 nr_scanned = 0;
2585 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
2586 gfp_mask, &nr_scanned);
2587 nr_reclaimed += reclaimed;
2588 *total_scanned += nr_scanned;
2589 spin_lock_irq(&mctz->lock);
2590 __mem_cgroup_remove_exceeded(mz, mctz);
2591
2592
2593
2594
2595
2596 next_mz = NULL;
2597 if (!reclaimed)
2598 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
2599
2600 excess = soft_limit_excess(mz->memcg);
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610 __mem_cgroup_insert_exceeded(mz, mctz, excess);
2611 spin_unlock_irq(&mctz->lock);
2612 css_put(&mz->memcg->css);
2613 loop++;
2614
2615
2616
2617
2618
2619 if (!nr_reclaimed &&
2620 (next_mz == NULL ||
2621 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2622 break;
2623 } while (!nr_reclaimed);
2624 if (next_mz)
2625 css_put(&next_mz->memcg->css);
2626 return nr_reclaimed;
2627}
2628
2629
2630
2631
2632
2633
2634
2635static inline bool memcg_has_children(struct mem_cgroup *memcg)
2636{
2637 bool ret;
2638
2639 rcu_read_lock();
2640 ret = css_next_child(NULL, &memcg->css);
2641 rcu_read_unlock();
2642 return ret;
2643}
2644
2645
2646
2647
2648
2649
2650static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
2651{
2652 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2653
2654
2655 lru_add_drain_all();
2656
2657 while (nr_retries && page_counter_read(&memcg->memory)) {
2658 int progress;
2659
2660 if (signal_pending(current))
2661 return -EINTR;
2662
2663 progress = try_to_free_mem_cgroup_pages(memcg, 1,
2664 GFP_KERNEL, true);
2665 if (!progress) {
2666 nr_retries--;
2667
2668 congestion_wait(BLK_RW_ASYNC, HZ/10);
2669 }
2670
2671 }
2672
2673 return 0;
2674}
2675
2676static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
2677 char *buf, size_t nbytes,
2678 loff_t off)
2679{
2680 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2681
2682 if (mem_cgroup_is_root(memcg))
2683 return -EINVAL;
2684 return mem_cgroup_force_empty(memcg) ?: nbytes;
2685}
2686
2687static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
2688 struct cftype *cft)
2689{
2690 return mem_cgroup_from_css(css)->use_hierarchy;
2691}
2692
2693static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
2694 struct cftype *cft, u64 val)
2695{
2696 int retval = 0;
2697 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2698 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
2699
2700 if (memcg->use_hierarchy == val)
2701 return 0;
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
2712 (val == 1 || val == 0)) {
2713 if (!memcg_has_children(memcg))
2714 memcg->use_hierarchy = val;
2715 else
2716 retval = -EBUSY;
2717 } else
2718 retval = -EINVAL;
2719
2720 return retval;
2721}
2722
2723static void tree_stat(struct mem_cgroup *memcg, unsigned long *stat)
2724{
2725 struct mem_cgroup *iter;
2726 int i;
2727
2728 memset(stat, 0, sizeof(*stat) * MEMCG_NR_STAT);
2729
2730 for_each_mem_cgroup_tree(iter, memcg) {
2731 for (i = 0; i < MEMCG_NR_STAT; i++)
2732 stat[i] += mem_cgroup_read_stat(iter, i);
2733 }
2734}
2735
2736static void tree_events(struct mem_cgroup *memcg, unsigned long *events)
2737{
2738 struct mem_cgroup *iter;
2739 int i;
2740
2741 memset(events, 0, sizeof(*events) * MEMCG_NR_EVENTS);
2742
2743 for_each_mem_cgroup_tree(iter, memcg) {
2744 for (i = 0; i < MEMCG_NR_EVENTS; i++)
2745 events[i] += mem_cgroup_read_events(iter, i);
2746 }
2747}
2748
2749static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
2750{
2751 unsigned long val = 0;
2752
2753 if (mem_cgroup_is_root(memcg)) {
2754 struct mem_cgroup *iter;
2755
2756 for_each_mem_cgroup_tree(iter, memcg) {
2757 val += mem_cgroup_read_stat(iter,
2758 MEM_CGROUP_STAT_CACHE);
2759 val += mem_cgroup_read_stat(iter,
2760 MEM_CGROUP_STAT_RSS);
2761 if (swap)
2762 val += mem_cgroup_read_stat(iter,
2763 MEM_CGROUP_STAT_SWAP);
2764 }
2765 } else {
2766 if (!swap)
2767 val = page_counter_read(&memcg->memory);
2768 else
2769 val = page_counter_read(&memcg->memsw);
2770 }
2771 return val;
2772}
2773
2774enum {
2775 RES_USAGE,
2776 RES_LIMIT,
2777 RES_MAX_USAGE,
2778 RES_FAILCNT,
2779 RES_SOFT_LIMIT,
2780};
2781
2782static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
2783 struct cftype *cft)
2784{
2785 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2786 struct page_counter *counter;
2787
2788 switch (MEMFILE_TYPE(cft->private)) {
2789 case _MEM:
2790 counter = &memcg->memory;
2791 break;
2792 case _MEMSWAP:
2793 counter = &memcg->memsw;
2794 break;
2795 case _KMEM:
2796 counter = &memcg->kmem;
2797 break;
2798 case _TCP:
2799 counter = &memcg->tcpmem;
2800 break;
2801 default:
2802 BUG();
2803 }
2804
2805 switch (MEMFILE_ATTR(cft->private)) {
2806 case RES_USAGE:
2807 if (counter == &memcg->memory)
2808 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
2809 if (counter == &memcg->memsw)
2810 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
2811 return (u64)page_counter_read(counter) * PAGE_SIZE;
2812 case RES_LIMIT:
2813 return (u64)counter->limit * PAGE_SIZE;
2814 case RES_MAX_USAGE:
2815 return (u64)counter->watermark * PAGE_SIZE;
2816 case RES_FAILCNT:
2817 return counter->failcnt;
2818 case RES_SOFT_LIMIT:
2819 return (u64)memcg->soft_limit * PAGE_SIZE;
2820 default:
2821 BUG();
2822 }
2823}
2824
2825#ifndef CONFIG_SLOB
2826static int memcg_online_kmem(struct mem_cgroup *memcg)
2827{
2828 int memcg_id;
2829
2830 if (cgroup_memory_nokmem)
2831 return 0;
2832
2833 BUG_ON(memcg->kmemcg_id >= 0);
2834 BUG_ON(memcg->kmem_state);
2835
2836 memcg_id = memcg_alloc_cache_id();
2837 if (memcg_id < 0)
2838 return memcg_id;
2839
2840 static_branch_inc(&memcg_kmem_enabled_key);
2841
2842
2843
2844
2845
2846
2847 memcg->kmemcg_id = memcg_id;
2848 memcg->kmem_state = KMEM_ONLINE;
2849
2850 return 0;
2851}
2852
2853static void memcg_offline_kmem(struct mem_cgroup *memcg)
2854{
2855 struct cgroup_subsys_state *css;
2856 struct mem_cgroup *parent, *child;
2857 int kmemcg_id;
2858
2859 if (memcg->kmem_state != KMEM_ONLINE)
2860 return;
2861
2862
2863
2864
2865
2866
2867 memcg->kmem_state = KMEM_ALLOCATED;
2868
2869 memcg_deactivate_kmem_caches(memcg);
2870
2871 kmemcg_id = memcg->kmemcg_id;
2872 BUG_ON(kmemcg_id < 0);
2873
2874 parent = parent_mem_cgroup(memcg);
2875 if (!parent)
2876 parent = root_mem_cgroup;
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886 rcu_read_lock();
2887 css_for_each_descendant_pre(css, &memcg->css) {
2888 child = mem_cgroup_from_css(css);
2889 BUG_ON(child->kmemcg_id != kmemcg_id);
2890 child->kmemcg_id = parent->kmemcg_id;
2891 if (!memcg->use_hierarchy)
2892 break;
2893 }
2894 rcu_read_unlock();
2895
2896 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
2897
2898 memcg_free_cache_id(kmemcg_id);
2899}
2900
2901static void memcg_free_kmem(struct mem_cgroup *memcg)
2902{
2903
2904 if (unlikely(memcg->kmem_state == KMEM_ONLINE))
2905 memcg_offline_kmem(memcg);
2906
2907 if (memcg->kmem_state == KMEM_ALLOCATED) {
2908 memcg_destroy_kmem_caches(memcg);
2909 static_branch_dec(&memcg_kmem_enabled_key);
2910 WARN_ON(page_counter_read(&memcg->kmem));
2911 }
2912}
2913#else
2914static int memcg_online_kmem(struct mem_cgroup *memcg)
2915{
2916 return 0;
2917}
2918static void memcg_offline_kmem(struct mem_cgroup *memcg)
2919{
2920}
2921static void memcg_free_kmem(struct mem_cgroup *memcg)
2922{
2923}
2924#endif
2925
2926static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
2927 unsigned long limit)
2928{
2929 int ret;
2930
2931 mutex_lock(&memcg_limit_mutex);
2932 ret = page_counter_limit(&memcg->kmem, limit);
2933 mutex_unlock(&memcg_limit_mutex);
2934 return ret;
2935}
2936
2937static int memcg_update_tcp_limit(struct mem_cgroup *memcg, unsigned long limit)
2938{
2939 int ret;
2940
2941 mutex_lock(&memcg_limit_mutex);
2942
2943 ret = page_counter_limit(&memcg->tcpmem, limit);
2944 if (ret)
2945 goto out;
2946
2947 if (!memcg->tcpmem_active) {
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964 static_branch_inc(&memcg_sockets_enabled_key);
2965 memcg->tcpmem_active = true;
2966 }
2967out:
2968 mutex_unlock(&memcg_limit_mutex);
2969 return ret;
2970}
2971
2972
2973
2974
2975
2976static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
2977 char *buf, size_t nbytes, loff_t off)
2978{
2979 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2980 unsigned long nr_pages;
2981 int ret;
2982
2983 buf = strstrip(buf);
2984 ret = page_counter_memparse(buf, "-1", &nr_pages);
2985 if (ret)
2986 return ret;
2987
2988 switch (MEMFILE_ATTR(of_cft(of)->private)) {
2989 case RES_LIMIT:
2990 if (mem_cgroup_is_root(memcg)) {
2991 ret = -EINVAL;
2992 break;
2993 }
2994 switch (MEMFILE_TYPE(of_cft(of)->private)) {
2995 case _MEM:
2996 ret = mem_cgroup_resize_limit(memcg, nr_pages);
2997 break;
2998 case _MEMSWAP:
2999 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages);
3000 break;
3001 case _KMEM:
3002 ret = memcg_update_kmem_limit(memcg, nr_pages);
3003 break;
3004 case _TCP:
3005 ret = memcg_update_tcp_limit(memcg, nr_pages);
3006 break;
3007 }
3008 break;
3009 case RES_SOFT_LIMIT:
3010 memcg->soft_limit = nr_pages;
3011 ret = 0;
3012 break;
3013 }
3014 return ret ?: nbytes;
3015}
3016
3017static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3018 size_t nbytes, loff_t off)
3019{
3020 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3021 struct page_counter *counter;
3022
3023 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3024 case _MEM:
3025 counter = &memcg->memory;
3026 break;
3027 case _MEMSWAP:
3028 counter = &memcg->memsw;
3029 break;
3030 case _KMEM:
3031 counter = &memcg->kmem;
3032 break;
3033 case _TCP:
3034 counter = &memcg->tcpmem;
3035 break;
3036 default:
3037 BUG();
3038 }
3039
3040 switch (MEMFILE_ATTR(of_cft(of)->private)) {
3041 case RES_MAX_USAGE:
3042 page_counter_reset_watermark(counter);
3043 break;
3044 case RES_FAILCNT:
3045 counter->failcnt = 0;
3046 break;
3047 default:
3048 BUG();
3049 }
3050
3051 return nbytes;
3052}
3053
3054static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3055 struct cftype *cft)
3056{
3057 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3058}
3059
3060#ifdef CONFIG_MMU
3061static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3062 struct cftype *cft, u64 val)
3063{
3064 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3065
3066 if (val & ~MOVE_MASK)
3067 return -EINVAL;
3068
3069
3070
3071
3072
3073
3074
3075 memcg->move_charge_at_immigrate = val;
3076 return 0;
3077}
3078#else
3079static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3080 struct cftype *cft, u64 val)
3081{
3082 return -ENOSYS;
3083}
3084#endif
3085
3086#ifdef CONFIG_NUMA
3087static int memcg_numa_stat_show(struct seq_file *m, void *v)
3088{
3089 struct numa_stat {
3090 const char *name;
3091 unsigned int lru_mask;
3092 };
3093
3094 static const struct numa_stat stats[] = {
3095 { "total", LRU_ALL },
3096 { "file", LRU_ALL_FILE },
3097 { "anon", LRU_ALL_ANON },
3098 { "unevictable", BIT(LRU_UNEVICTABLE) },
3099 };
3100 const struct numa_stat *stat;
3101 int nid;
3102 unsigned long nr;
3103 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3104
3105 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3106 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3107 seq_printf(m, "%s=%lu", stat->name, nr);
3108 for_each_node_state(nid, N_MEMORY) {
3109 nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3110 stat->lru_mask);
3111 seq_printf(m, " N%d=%lu", nid, nr);
3112 }
3113 seq_putc(m, '\n');
3114 }
3115
3116 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3117 struct mem_cgroup *iter;
3118
3119 nr = 0;
3120 for_each_mem_cgroup_tree(iter, memcg)
3121 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3122 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3123 for_each_node_state(nid, N_MEMORY) {
3124 nr = 0;
3125 for_each_mem_cgroup_tree(iter, memcg)
3126 nr += mem_cgroup_node_nr_lru_pages(
3127 iter, nid, stat->lru_mask);
3128 seq_printf(m, " N%d=%lu", nid, nr);
3129 }
3130 seq_putc(m, '\n');
3131 }
3132
3133 return 0;
3134}
3135#endif
3136
3137static int memcg_stat_show(struct seq_file *m, void *v)
3138{
3139 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3140 unsigned long memory, memsw;
3141 struct mem_cgroup *mi;
3142 unsigned int i;
3143
3144 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) !=
3145 MEM_CGROUP_STAT_NSTATS);
3146 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) !=
3147 MEM_CGROUP_EVENTS_NSTATS);
3148 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3149
3150 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3151 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
3152 continue;
3153 seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
3154 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
3155 }
3156
3157 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
3158 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
3159 mem_cgroup_read_events(memcg, i));
3160
3161 for (i = 0; i < NR_LRU_LISTS; i++)
3162 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3163 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
3164
3165
3166 memory = memsw = PAGE_COUNTER_MAX;
3167 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3168 memory = min(memory, mi->memory.limit);
3169 memsw = min(memsw, mi->memsw.limit);
3170 }
3171 seq_printf(m, "hierarchical_memory_limit %llu\n",
3172 (u64)memory * PAGE_SIZE);
3173 if (do_memsw_account())
3174 seq_printf(m, "hierarchical_memsw_limit %llu\n",
3175 (u64)memsw * PAGE_SIZE);
3176
3177 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3178 unsigned long long val = 0;
3179
3180 if (i == MEM_CGROUP_STAT_SWAP && !do_memsw_account())
3181 continue;
3182 for_each_mem_cgroup_tree(mi, memcg)
3183 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
3184 seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
3185 }
3186
3187 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
3188 unsigned long long val = 0;
3189
3190 for_each_mem_cgroup_tree(mi, memcg)
3191 val += mem_cgroup_read_events(mi, i);
3192 seq_printf(m, "total_%s %llu\n",
3193 mem_cgroup_events_names[i], val);
3194 }
3195
3196 for (i = 0; i < NR_LRU_LISTS; i++) {
3197 unsigned long long val = 0;
3198
3199 for_each_mem_cgroup_tree(mi, memcg)
3200 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
3201 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
3202 }
3203
3204#ifdef CONFIG_DEBUG_VM
3205 {
3206 pg_data_t *pgdat;
3207 struct mem_cgroup_per_node *mz;
3208 struct zone_reclaim_stat *rstat;
3209 unsigned long recent_rotated[2] = {0, 0};
3210 unsigned long recent_scanned[2] = {0, 0};
3211
3212 for_each_online_pgdat(pgdat) {
3213 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
3214 rstat = &mz->lruvec.reclaim_stat;
3215
3216 recent_rotated[0] += rstat->recent_rotated[0];
3217 recent_rotated[1] += rstat->recent_rotated[1];
3218 recent_scanned[0] += rstat->recent_scanned[0];
3219 recent_scanned[1] += rstat->recent_scanned[1];
3220 }
3221 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3222 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3223 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3224 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
3225 }
3226#endif
3227
3228 return 0;
3229}
3230
3231static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3232 struct cftype *cft)
3233{
3234 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3235
3236 return mem_cgroup_swappiness(memcg);
3237}
3238
3239static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3240 struct cftype *cft, u64 val)
3241{
3242 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3243
3244 if (val > 100)
3245 return -EINVAL;
3246
3247 if (css->parent)
3248 memcg->swappiness = val;
3249 else
3250 vm_swappiness = val;
3251
3252 return 0;
3253}
3254
3255static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3256{
3257 struct mem_cgroup_threshold_ary *t;
3258 unsigned long usage;
3259 int i;
3260
3261 rcu_read_lock();
3262 if (!swap)
3263 t = rcu_dereference(memcg->thresholds.primary);
3264 else
3265 t = rcu_dereference(memcg->memsw_thresholds.primary);
3266
3267 if (!t)
3268 goto unlock;
3269
3270 usage = mem_cgroup_usage(memcg, swap);
3271
3272
3273
3274
3275
3276
3277 i = t->current_threshold;
3278
3279
3280
3281
3282
3283
3284
3285 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3286 eventfd_signal(t->entries[i].eventfd, 1);
3287
3288
3289 i++;
3290
3291
3292
3293
3294
3295
3296
3297 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3298 eventfd_signal(t->entries[i].eventfd, 1);
3299
3300
3301 t->current_threshold = i - 1;
3302unlock:
3303 rcu_read_unlock();
3304}
3305
3306static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3307{
3308 while (memcg) {
3309 __mem_cgroup_threshold(memcg, false);
3310 if (do_memsw_account())
3311 __mem_cgroup_threshold(memcg, true);
3312
3313 memcg = parent_mem_cgroup(memcg);
3314 }
3315}
3316
3317static int compare_thresholds(const void *a, const void *b)
3318{
3319 const struct mem_cgroup_threshold *_a = a;
3320 const struct mem_cgroup_threshold *_b = b;
3321
3322 if (_a->threshold > _b->threshold)
3323 return 1;
3324
3325 if (_a->threshold < _b->threshold)
3326 return -1;
3327
3328 return 0;
3329}
3330
3331static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
3332{
3333 struct mem_cgroup_eventfd_list *ev;
3334
3335 spin_lock(&memcg_oom_lock);
3336
3337 list_for_each_entry(ev, &memcg->oom_notify, list)
3338 eventfd_signal(ev->eventfd, 1);
3339
3340 spin_unlock(&memcg_oom_lock);
3341 return 0;
3342}
3343
3344static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
3345{
3346 struct mem_cgroup *iter;
3347
3348 for_each_mem_cgroup_tree(iter, memcg)
3349 mem_cgroup_oom_notify_cb(iter);
3350}
3351
3352static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3353 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
3354{
3355 struct mem_cgroup_thresholds *thresholds;
3356 struct mem_cgroup_threshold_ary *new;
3357 unsigned long threshold;
3358 unsigned long usage;
3359 int i, size, ret;
3360
3361 ret = page_counter_memparse(args, "-1", &threshold);
3362 if (ret)
3363 return ret;
3364
3365 mutex_lock(&memcg->thresholds_lock);
3366
3367 if (type == _MEM) {
3368 thresholds = &memcg->thresholds;
3369 usage = mem_cgroup_usage(memcg, false);
3370 } else if (type == _MEMSWAP) {
3371 thresholds = &memcg->memsw_thresholds;
3372 usage = mem_cgroup_usage(memcg, true);
3373 } else
3374 BUG();
3375
3376
3377 if (thresholds->primary)
3378 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3379
3380 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3381
3382
3383 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3384 GFP_KERNEL);
3385 if (!new) {
3386 ret = -ENOMEM;
3387 goto unlock;
3388 }
3389 new->size = size;
3390
3391
3392 if (thresholds->primary) {
3393 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3394 sizeof(struct mem_cgroup_threshold));
3395 }
3396
3397
3398 new->entries[size - 1].eventfd = eventfd;
3399 new->entries[size - 1].threshold = threshold;
3400
3401
3402 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3403 compare_thresholds, NULL);
3404
3405
3406 new->current_threshold = -1;
3407 for (i = 0; i < size; i++) {
3408 if (new->entries[i].threshold <= usage) {
3409
3410
3411
3412
3413
3414 ++new->current_threshold;
3415 } else
3416 break;
3417 }
3418
3419
3420 kfree(thresholds->spare);
3421 thresholds->spare = thresholds->primary;
3422
3423 rcu_assign_pointer(thresholds->primary, new);
3424
3425
3426 synchronize_rcu();
3427
3428unlock:
3429 mutex_unlock(&memcg->thresholds_lock);
3430
3431 return ret;
3432}
3433
3434static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3435 struct eventfd_ctx *eventfd, const char *args)
3436{
3437 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
3438}
3439
3440static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
3441 struct eventfd_ctx *eventfd, const char *args)
3442{
3443 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
3444}
3445
3446static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3447 struct eventfd_ctx *eventfd, enum res_type type)
3448{
3449 struct mem_cgroup_thresholds *thresholds;
3450 struct mem_cgroup_threshold_ary *new;
3451 unsigned long usage;
3452 int i, j, size;
3453
3454 mutex_lock(&memcg->thresholds_lock);
3455
3456 if (type == _MEM) {
3457 thresholds = &memcg->thresholds;
3458 usage = mem_cgroup_usage(memcg, false);
3459 } else if (type == _MEMSWAP) {
3460 thresholds = &memcg->memsw_thresholds;
3461 usage = mem_cgroup_usage(memcg, true);
3462 } else
3463 BUG();
3464
3465 if (!thresholds->primary)
3466 goto unlock;
3467
3468
3469 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3470
3471
3472 size = 0;
3473 for (i = 0; i < thresholds->primary->size; i++) {
3474 if (thresholds->primary->entries[i].eventfd != eventfd)
3475 size++;
3476 }
3477
3478 new = thresholds->spare;
3479
3480
3481 if (!size) {
3482 kfree(new);
3483 new = NULL;
3484 goto swap_buffers;
3485 }
3486
3487 new->size = size;
3488
3489
3490 new->current_threshold = -1;
3491 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3492 if (thresholds->primary->entries[i].eventfd == eventfd)
3493 continue;
3494
3495 new->entries[j] = thresholds->primary->entries[i];
3496 if (new->entries[j].threshold <= usage) {
3497
3498
3499
3500
3501
3502 ++new->current_threshold;
3503 }
3504 j++;
3505 }
3506
3507swap_buffers:
3508
3509 thresholds->spare = thresholds->primary;
3510
3511 rcu_assign_pointer(thresholds->primary, new);
3512
3513
3514 synchronize_rcu();
3515
3516
3517 if (!new) {
3518 kfree(thresholds->spare);
3519 thresholds->spare = NULL;
3520 }
3521unlock:
3522 mutex_unlock(&memcg->thresholds_lock);
3523}
3524
3525static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3526 struct eventfd_ctx *eventfd)
3527{
3528 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
3529}
3530
3531static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3532 struct eventfd_ctx *eventfd)
3533{
3534 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
3535}
3536
3537static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
3538 struct eventfd_ctx *eventfd, const char *args)
3539{
3540 struct mem_cgroup_eventfd_list *event;
3541
3542 event = kmalloc(sizeof(*event), GFP_KERNEL);
3543 if (!event)
3544 return -ENOMEM;
3545
3546 spin_lock(&memcg_oom_lock);
3547
3548 event->eventfd = eventfd;
3549 list_add(&event->list, &memcg->oom_notify);
3550
3551
3552 if (memcg->under_oom)
3553 eventfd_signal(eventfd, 1);
3554 spin_unlock(&memcg_oom_lock);
3555
3556 return 0;
3557}
3558
3559static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
3560 struct eventfd_ctx *eventfd)
3561{
3562 struct mem_cgroup_eventfd_list *ev, *tmp;
3563
3564 spin_lock(&memcg_oom_lock);
3565
3566 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
3567 if (ev->eventfd == eventfd) {
3568 list_del(&ev->list);
3569 kfree(ev);
3570 }
3571 }
3572
3573 spin_unlock(&memcg_oom_lock);
3574}
3575
3576static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3577{
3578 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3579
3580 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
3581 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
3582 return 0;
3583}
3584
3585static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3586 struct cftype *cft, u64 val)
3587{
3588 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3589
3590
3591 if (!css->parent || !((val == 0) || (val == 1)))
3592 return -EINVAL;
3593
3594 memcg->oom_kill_disable = val;
3595 if (!val)
3596 memcg_oom_recover(memcg);
3597
3598 return 0;
3599}
3600
3601#ifdef CONFIG_CGROUP_WRITEBACK
3602
3603struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
3604{
3605 return &memcg->cgwb_list;
3606}
3607
3608static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3609{
3610 return wb_domain_init(&memcg->cgwb_domain, gfp);
3611}
3612
3613static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3614{
3615 wb_domain_exit(&memcg->cgwb_domain);
3616}
3617
3618static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3619{
3620 wb_domain_size_changed(&memcg->cgwb_domain);
3621}
3622
3623struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3624{
3625 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3626
3627 if (!memcg->css.parent)
3628 return NULL;
3629
3630 return &memcg->cgwb_domain;
3631}
3632
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645
3646
3647
3648
3649
3650
3651void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3652 unsigned long *pheadroom, unsigned long *pdirty,
3653 unsigned long *pwriteback)
3654{
3655 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3656 struct mem_cgroup *parent;
3657
3658 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY);
3659
3660
3661 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
3662 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3663 (1 << LRU_ACTIVE_FILE));
3664 *pheadroom = PAGE_COUNTER_MAX;
3665
3666 while ((parent = parent_mem_cgroup(memcg))) {
3667 unsigned long ceiling = min(memcg->memory.limit, memcg->high);
3668 unsigned long used = page_counter_read(&memcg->memory);
3669
3670 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3671 memcg = parent;
3672 }
3673}
3674
3675#else
3676
3677static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3678{
3679 return 0;
3680}
3681
3682static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3683{
3684}
3685
3686static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3687{
3688}
3689
3690#endif
3691
3692
3693
3694
3695
3696
3697
3698
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708
3709
3710static void memcg_event_remove(struct work_struct *work)
3711{
3712 struct mem_cgroup_event *event =
3713 container_of(work, struct mem_cgroup_event, remove);
3714 struct mem_cgroup *memcg = event->memcg;
3715
3716 remove_wait_queue(event->wqh, &event->wait);
3717
3718 event->unregister_event(memcg, event->eventfd);
3719
3720
3721 eventfd_signal(event->eventfd, 1);
3722
3723 eventfd_ctx_put(event->eventfd);
3724 kfree(event);
3725 css_put(&memcg->css);
3726}
3727
3728
3729
3730
3731
3732
3733static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
3734 int sync, void *key)
3735{
3736 struct mem_cgroup_event *event =
3737 container_of(wait, struct mem_cgroup_event, wait);
3738 struct mem_cgroup *memcg = event->memcg;
3739 unsigned long flags = (unsigned long)key;
3740
3741 if (flags & POLLHUP) {
3742
3743
3744
3745
3746
3747
3748
3749
3750
3751 spin_lock(&memcg->event_list_lock);
3752 if (!list_empty(&event->list)) {
3753 list_del_init(&event->list);
3754
3755
3756
3757
3758 schedule_work(&event->remove);
3759 }
3760 spin_unlock(&memcg->event_list_lock);
3761 }
3762
3763 return 0;
3764}
3765
3766static void memcg_event_ptable_queue_proc(struct file *file,
3767 wait_queue_head_t *wqh, poll_table *pt)
3768{
3769 struct mem_cgroup_event *event =
3770 container_of(pt, struct mem_cgroup_event, pt);
3771
3772 event->wqh = wqh;
3773 add_wait_queue(wqh, &event->wait);
3774}
3775
3776
3777
3778
3779
3780
3781
3782
3783
3784static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
3785 char *buf, size_t nbytes, loff_t off)
3786{
3787 struct cgroup_subsys_state *css = of_css(of);
3788 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3789 struct mem_cgroup_event *event;
3790 struct cgroup_subsys_state *cfile_css;
3791 unsigned int efd, cfd;
3792 struct fd efile;
3793 struct fd cfile;
3794 const char *name;
3795 char *endp;
3796 int ret;
3797
3798 buf = strstrip(buf);
3799
3800 efd = simple_strtoul(buf, &endp, 10);
3801 if (*endp != ' ')
3802 return -EINVAL;
3803 buf = endp + 1;
3804
3805 cfd = simple_strtoul(buf, &endp, 10);
3806 if ((*endp != ' ') && (*endp != '\0'))
3807 return -EINVAL;
3808 buf = endp + 1;
3809
3810 event = kzalloc(sizeof(*event), GFP_KERNEL);
3811 if (!event)
3812 return -ENOMEM;
3813
3814 event->memcg = memcg;
3815 INIT_LIST_HEAD(&event->list);
3816 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
3817 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
3818 INIT_WORK(&event->remove, memcg_event_remove);
3819
3820 efile = fdget(efd);
3821 if (!efile.file) {
3822 ret = -EBADF;
3823 goto out_kfree;
3824 }
3825
3826 event->eventfd = eventfd_ctx_fileget(efile.file);
3827 if (IS_ERR(event->eventfd)) {
3828 ret = PTR_ERR(event->eventfd);
3829 goto out_put_efile;
3830 }
3831
3832 cfile = fdget(cfd);
3833 if (!cfile.file) {
3834 ret = -EBADF;
3835 goto out_put_eventfd;
3836 }
3837
3838
3839
3840 ret = inode_permission(file_inode(cfile.file), MAY_READ);
3841 if (ret < 0)
3842 goto out_put_cfile;
3843
3844
3845
3846
3847
3848
3849
3850
3851
3852 name = cfile.file->f_path.dentry->d_name.name;
3853
3854 if (!strcmp(name, "memory.usage_in_bytes")) {
3855 event->register_event = mem_cgroup_usage_register_event;
3856 event->unregister_event = mem_cgroup_usage_unregister_event;
3857 } else if (!strcmp(name, "memory.oom_control")) {
3858 event->register_event = mem_cgroup_oom_register_event;
3859 event->unregister_event = mem_cgroup_oom_unregister_event;
3860 } else if (!strcmp(name, "memory.pressure_level")) {
3861 event->register_event = vmpressure_register_event;
3862 event->unregister_event = vmpressure_unregister_event;
3863 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
3864 event->register_event = memsw_cgroup_usage_register_event;
3865 event->unregister_event = memsw_cgroup_usage_unregister_event;
3866 } else {
3867 ret = -EINVAL;
3868 goto out_put_cfile;
3869 }
3870
3871
3872
3873
3874
3875
3876 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
3877 &memory_cgrp_subsys);
3878 ret = -EINVAL;
3879 if (IS_ERR(cfile_css))
3880 goto out_put_cfile;
3881 if (cfile_css != css) {
3882 css_put(cfile_css);
3883 goto out_put_cfile;
3884 }
3885
3886 ret = event->register_event(memcg, event->eventfd, buf);
3887 if (ret)
3888 goto out_put_css;
3889
3890 efile.file->f_op->poll(efile.file, &event->pt);
3891
3892 spin_lock(&memcg->event_list_lock);
3893 list_add(&event->list, &memcg->event_list);
3894 spin_unlock(&memcg->event_list_lock);
3895
3896 fdput(cfile);
3897 fdput(efile);
3898
3899 return nbytes;
3900
3901out_put_css:
3902 css_put(css);
3903out_put_cfile:
3904 fdput(cfile);
3905out_put_eventfd:
3906 eventfd_ctx_put(event->eventfd);
3907out_put_efile:
3908 fdput(efile);
3909out_kfree:
3910 kfree(event);
3911
3912 return ret;
3913}
3914
3915static struct cftype mem_cgroup_legacy_files[] = {
3916 {
3917 .name = "usage_in_bytes",
3918 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
3919 .read_u64 = mem_cgroup_read_u64,
3920 },
3921 {
3922 .name = "max_usage_in_bytes",
3923 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
3924 .write = mem_cgroup_reset,
3925 .read_u64 = mem_cgroup_read_u64,
3926 },
3927 {
3928 .name = "limit_in_bytes",
3929 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
3930 .write = mem_cgroup_write,
3931 .read_u64 = mem_cgroup_read_u64,
3932 },
3933 {
3934 .name = "soft_limit_in_bytes",
3935 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
3936 .write = mem_cgroup_write,
3937 .read_u64 = mem_cgroup_read_u64,
3938 },
3939 {
3940 .name = "failcnt",
3941 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
3942 .write = mem_cgroup_reset,
3943 .read_u64 = mem_cgroup_read_u64,
3944 },
3945 {
3946 .name = "stat",
3947 .seq_show = memcg_stat_show,
3948 },
3949 {
3950 .name = "force_empty",
3951 .write = mem_cgroup_force_empty_write,
3952 },
3953 {
3954 .name = "use_hierarchy",
3955 .write_u64 = mem_cgroup_hierarchy_write,
3956 .read_u64 = mem_cgroup_hierarchy_read,
3957 },
3958 {
3959 .name = "cgroup.event_control",
3960 .write = memcg_write_event_control,
3961 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
3962 },
3963 {
3964 .name = "swappiness",
3965 .read_u64 = mem_cgroup_swappiness_read,
3966 .write_u64 = mem_cgroup_swappiness_write,
3967 },
3968 {
3969 .name = "move_charge_at_immigrate",
3970 .read_u64 = mem_cgroup_move_charge_read,
3971 .write_u64 = mem_cgroup_move_charge_write,
3972 },
3973 {
3974 .name = "oom_control",
3975 .seq_show = mem_cgroup_oom_control_read,
3976 .write_u64 = mem_cgroup_oom_control_write,
3977 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
3978 },
3979 {
3980 .name = "pressure_level",
3981 },
3982#ifdef CONFIG_NUMA
3983 {
3984 .name = "numa_stat",
3985 .seq_show = memcg_numa_stat_show,
3986 },
3987#endif
3988 {
3989 .name = "kmem.limit_in_bytes",
3990 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
3991 .write = mem_cgroup_write,
3992 .read_u64 = mem_cgroup_read_u64,
3993 },
3994 {
3995 .name = "kmem.usage_in_bytes",
3996 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
3997 .read_u64 = mem_cgroup_read_u64,
3998 },
3999 {
4000 .name = "kmem.failcnt",
4001 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
4002 .write = mem_cgroup_reset,
4003 .read_u64 = mem_cgroup_read_u64,
4004 },
4005 {
4006 .name = "kmem.max_usage_in_bytes",
4007 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
4008 .write = mem_cgroup_reset,
4009 .read_u64 = mem_cgroup_read_u64,
4010 },
4011#ifdef CONFIG_SLABINFO
4012 {
4013 .name = "kmem.slabinfo",
4014 .seq_start = slab_start,
4015 .seq_next = slab_next,
4016 .seq_stop = slab_stop,
4017 .seq_show = memcg_slab_show,
4018 },
4019#endif
4020 {
4021 .name = "kmem.tcp.limit_in_bytes",
4022 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
4023 .write = mem_cgroup_write,
4024 .read_u64 = mem_cgroup_read_u64,
4025 },
4026 {
4027 .name = "kmem.tcp.usage_in_bytes",
4028 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
4029 .read_u64 = mem_cgroup_read_u64,
4030 },
4031 {
4032 .name = "kmem.tcp.failcnt",
4033 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
4034 .write = mem_cgroup_reset,
4035 .read_u64 = mem_cgroup_read_u64,
4036 },
4037 {
4038 .name = "kmem.tcp.max_usage_in_bytes",
4039 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4040 .write = mem_cgroup_reset,
4041 .read_u64 = mem_cgroup_read_u64,
4042 },
4043 { },
4044};
4045
4046
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057
4058
4059
4060
4061
4062
4063
4064
4065
4066
4067
4068
4069
4070static DEFINE_IDR(mem_cgroup_idr);
4071
4072static void mem_cgroup_id_get_many(struct mem_cgroup *memcg, unsigned int n)
4073{
4074 VM_BUG_ON(atomic_read(&memcg->id.ref) <= 0);
4075 atomic_add(n, &memcg->id.ref);
4076}
4077
4078static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
4079{
4080 VM_BUG_ON(atomic_read(&memcg->id.ref) < n);
4081 if (atomic_sub_and_test(n, &memcg->id.ref)) {
4082 idr_remove(&mem_cgroup_idr, memcg->id.id);
4083 memcg->id.id = 0;
4084
4085
4086 css_put(&memcg->css);
4087 }
4088}
4089
4090static inline void mem_cgroup_id_get(struct mem_cgroup *memcg)
4091{
4092 mem_cgroup_id_get_many(memcg, 1);
4093}
4094
4095static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
4096{
4097 mem_cgroup_id_put_many(memcg, 1);
4098}
4099
4100
4101
4102
4103
4104
4105
4106struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
4107{
4108 WARN_ON_ONCE(!rcu_read_lock_held());
4109 return idr_find(&mem_cgroup_idr, id);
4110}
4111
4112static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4113{
4114 struct mem_cgroup_per_node *pn;
4115 int tmp = node;
4116
4117
4118
4119
4120
4121
4122
4123
4124 if (!node_state(node, N_NORMAL_MEMORY))
4125 tmp = -1;
4126 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4127 if (!pn)
4128 return 1;
4129
4130 lruvec_init(&pn->lruvec);
4131 pn->usage_in_excess = 0;
4132 pn->on_tree = false;
4133 pn->memcg = memcg;
4134
4135 memcg->nodeinfo[node] = pn;
4136 return 0;
4137}
4138
4139static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
4140{
4141 kfree(memcg->nodeinfo[node]);
4142}
4143
4144static void mem_cgroup_free(struct mem_cgroup *memcg)
4145{
4146 int node;
4147
4148 memcg_wb_domain_exit(memcg);
4149 for_each_node(node)
4150 free_mem_cgroup_per_node_info(memcg, node);
4151 free_percpu(memcg->stat);
4152 kfree(memcg);
4153}
4154
4155static struct mem_cgroup *mem_cgroup_alloc(void)
4156{
4157 struct mem_cgroup *memcg;
4158 size_t size;
4159 int node;
4160
4161 size = sizeof(struct mem_cgroup);
4162 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
4163
4164 memcg = kzalloc(size, GFP_KERNEL);
4165 if (!memcg)
4166 return NULL;
4167
4168 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
4169 1, MEM_CGROUP_ID_MAX,
4170 GFP_KERNEL);
4171 if (memcg->id.id < 0)
4172 goto fail;
4173
4174 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4175 if (!memcg->stat)
4176 goto fail;
4177
4178 for_each_node(node)
4179 if (alloc_mem_cgroup_per_node_info(memcg, node))
4180 goto fail;
4181
4182 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4183 goto fail;
4184
4185 INIT_WORK(&memcg->high_work, high_work_func);
4186 memcg->last_scanned_node = MAX_NUMNODES;
4187 INIT_LIST_HEAD(&memcg->oom_notify);
4188 mutex_init(&memcg->thresholds_lock);
4189 spin_lock_init(&memcg->move_lock);
4190 vmpressure_init(&memcg->vmpressure);
4191 INIT_LIST_HEAD(&memcg->event_list);
4192 spin_lock_init(&memcg->event_list_lock);
4193 memcg->socket_pressure = jiffies;
4194#ifndef CONFIG_SLOB
4195 memcg->kmemcg_id = -1;
4196#endif
4197#ifdef CONFIG_CGROUP_WRITEBACK
4198 INIT_LIST_HEAD(&memcg->cgwb_list);
4199#endif
4200 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
4201 return memcg;
4202fail:
4203 if (memcg->id.id > 0)
4204 idr_remove(&mem_cgroup_idr, memcg->id.id);
4205 mem_cgroup_free(memcg);
4206 return NULL;
4207}
4208
4209static struct cgroup_subsys_state * __ref
4210mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4211{
4212 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
4213 struct mem_cgroup *memcg;
4214 long error = -ENOMEM;
4215
4216 memcg = mem_cgroup_alloc();
4217 if (!memcg)
4218 return ERR_PTR(error);
4219
4220 memcg->high = PAGE_COUNTER_MAX;
4221 memcg->soft_limit = PAGE_COUNTER_MAX;
4222 if (parent) {
4223 memcg->swappiness = mem_cgroup_swappiness(parent);
4224 memcg->oom_kill_disable = parent->oom_kill_disable;
4225 }
4226 if (parent && parent->use_hierarchy) {
4227 memcg->use_hierarchy = true;
4228 page_counter_init(&memcg->memory, &parent->memory);
4229 page_counter_init(&memcg->swap, &parent->swap);
4230 page_counter_init(&memcg->memsw, &parent->memsw);
4231 page_counter_init(&memcg->kmem, &parent->kmem);
4232 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
4233 } else {
4234 page_counter_init(&memcg->memory, NULL);
4235 page_counter_init(&memcg->swap, NULL);
4236 page_counter_init(&memcg->memsw, NULL);
4237 page_counter_init(&memcg->kmem, NULL);
4238 page_counter_init(&memcg->tcpmem, NULL);
4239
4240
4241
4242
4243
4244 if (parent != root_mem_cgroup)
4245 memory_cgrp_subsys.broken_hierarchy = true;
4246 }
4247
4248
4249 if (!parent) {
4250 root_mem_cgroup = memcg;
4251 return &memcg->css;
4252 }
4253
4254 error = memcg_online_kmem(memcg);
4255 if (error)
4256 goto fail;
4257
4258 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4259 static_branch_inc(&memcg_sockets_enabled_key);
4260
4261 return &memcg->css;
4262fail:
4263 mem_cgroup_free(memcg);
4264 return ERR_PTR(-ENOMEM);
4265}
4266
4267static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
4268{
4269 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4270
4271
4272 atomic_set(&memcg->id.ref, 1);
4273 css_get(css);
4274 return 0;
4275}
4276
4277static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
4278{
4279 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4280 struct mem_cgroup_event *event, *tmp;
4281
4282
4283
4284
4285
4286
4287 spin_lock(&memcg->event_list_lock);
4288 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
4289 list_del_init(&event->list);
4290 schedule_work(&event->remove);
4291 }
4292 spin_unlock(&memcg->event_list_lock);
4293
4294 memcg_offline_kmem(memcg);
4295 wb_memcg_offline(memcg);
4296
4297 mem_cgroup_id_put(memcg);
4298}
4299
4300static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
4301{
4302 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4303
4304 invalidate_reclaim_iterators(memcg);
4305}
4306
4307static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
4308{
4309 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4310
4311 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
4312 static_branch_dec(&memcg_sockets_enabled_key);
4313
4314 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
4315 static_branch_dec(&memcg_sockets_enabled_key);
4316
4317 vmpressure_cleanup(&memcg->vmpressure);
4318 cancel_work_sync(&memcg->high_work);
4319 mem_cgroup_remove_from_trees(memcg);
4320 memcg_free_kmem(memcg);
4321 mem_cgroup_free(memcg);
4322}
4323
4324
4325
4326
4327
4328
4329
4330
4331
4332
4333
4334
4335
4336
4337static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4338{
4339 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4340
4341 page_counter_limit(&memcg->memory, PAGE_COUNTER_MAX);
4342 page_counter_limit(&memcg->swap, PAGE_COUNTER_MAX);
4343 page_counter_limit(&memcg->memsw, PAGE_COUNTER_MAX);
4344 page_counter_limit(&memcg->kmem, PAGE_COUNTER_MAX);
4345 page_counter_limit(&memcg->tcpmem, PAGE_COUNTER_MAX);
4346 memcg->low = 0;
4347 memcg->high = PAGE_COUNTER_MAX;
4348 memcg->soft_limit = PAGE_COUNTER_MAX;
4349 memcg_wb_domain_size_changed(memcg);
4350}
4351
4352#ifdef CONFIG_MMU
4353
4354static int mem_cgroup_do_precharge(unsigned long count)
4355{
4356 int ret;
4357
4358
4359 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
4360 if (!ret) {
4361 mc.precharge += count;
4362 return ret;
4363 }
4364
4365
4366 while (count--) {
4367 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
4368 if (ret)
4369 return ret;
4370 mc.precharge++;
4371 cond_resched();
4372 }
4373 return 0;
4374}
4375
4376union mc_target {
4377 struct page *page;
4378 swp_entry_t ent;
4379};
4380
4381enum mc_target_type {
4382 MC_TARGET_NONE = 0,
4383 MC_TARGET_PAGE,
4384 MC_TARGET_SWAP,
4385};
4386
4387static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4388 unsigned long addr, pte_t ptent)
4389{
4390 struct page *page = vm_normal_page(vma, addr, ptent);
4391
4392 if (!page || !page_mapped(page))
4393 return NULL;
4394 if (PageAnon(page)) {
4395 if (!(mc.flags & MOVE_ANON))
4396 return NULL;
4397 } else {
4398 if (!(mc.flags & MOVE_FILE))
4399 return NULL;
4400 }
4401 if (!get_page_unless_zero(page))
4402 return NULL;
4403
4404 return page;
4405}
4406
4407#ifdef CONFIG_SWAP
4408static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4409 pte_t ptent, swp_entry_t *entry)
4410{
4411 struct page *page = NULL;
4412 swp_entry_t ent = pte_to_swp_entry(ptent);
4413
4414 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
4415 return NULL;
4416
4417
4418
4419
4420 page = find_get_page(swap_address_space(ent), swp_offset(ent));
4421 if (do_memsw_account())
4422 entry->val = ent.val;
4423
4424 return page;
4425}
4426#else
4427static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4428 pte_t ptent, swp_entry_t *entry)
4429{
4430 return NULL;
4431}
4432#endif
4433
4434static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4435 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4436{
4437 struct page *page = NULL;
4438 struct address_space *mapping;
4439 pgoff_t pgoff;
4440
4441 if (!vma->vm_file)
4442 return NULL;
4443 if (!(mc.flags & MOVE_FILE))
4444 return NULL;
4445
4446 mapping = vma->vm_file->f_mapping;
4447 pgoff = linear_page_index(vma, addr);
4448
4449
4450#ifdef CONFIG_SWAP
4451
4452 if (shmem_mapping(mapping)) {
4453 page = find_get_entry(mapping, pgoff);
4454 if (radix_tree_exceptional_entry(page)) {
4455 swp_entry_t swp = radix_to_swp_entry(page);
4456 if (do_memsw_account())
4457 *entry = swp;
4458 page = find_get_page(swap_address_space(swp),
4459 swp_offset(swp));
4460 }
4461 } else
4462 page = find_get_page(mapping, pgoff);
4463#else
4464 page = find_get_page(mapping, pgoff);
4465#endif
4466 return page;
4467}
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477
4478
4479
4480
4481static int mem_cgroup_move_account(struct page *page,
4482 bool compound,
4483 struct mem_cgroup *from,
4484 struct mem_cgroup *to)
4485{
4486 unsigned long flags;
4487 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
4488 int ret;
4489 bool anon;
4490
4491 VM_BUG_ON(from == to);
4492 VM_BUG_ON_PAGE(PageLRU(page), page);
4493 VM_BUG_ON(compound && !PageTransHuge(page));
4494
4495
4496
4497
4498
4499 ret = -EBUSY;
4500 if (!trylock_page(page))
4501 goto out;
4502
4503 ret = -EINVAL;
4504 if (page->mem_cgroup != from)
4505 goto out_unlock;
4506
4507 anon = PageAnon(page);
4508
4509 spin_lock_irqsave(&from->move_lock, flags);
4510
4511 if (!anon && page_mapped(page)) {
4512 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4513 nr_pages);
4514 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4515 nr_pages);
4516 }
4517
4518
4519
4520
4521
4522
4523 if (!anon && PageDirty(page)) {
4524 struct address_space *mapping = page_mapping(page);
4525
4526 if (mapping_cap_account_dirty(mapping)) {
4527 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY],
4528 nr_pages);
4529 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY],
4530 nr_pages);
4531 }
4532 }
4533
4534 if (PageWriteback(page)) {
4535 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4536 nr_pages);
4537 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4538 nr_pages);
4539 }
4540
4541
4542
4543
4544
4545
4546
4547
4548 page->mem_cgroup = to;
4549 spin_unlock_irqrestore(&from->move_lock, flags);
4550
4551 ret = 0;
4552
4553 local_irq_disable();
4554 mem_cgroup_charge_statistics(to, page, compound, nr_pages);
4555 memcg_check_events(to, page);
4556 mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
4557 memcg_check_events(from, page);
4558 local_irq_enable();
4559out_unlock:
4560 unlock_page(page);
4561out:
4562 return ret;
4563}
4564
4565
4566
4567
4568
4569
4570
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
4585 unsigned long addr, pte_t ptent, union mc_target *target)
4586{
4587 struct page *page = NULL;
4588 enum mc_target_type ret = MC_TARGET_NONE;
4589 swp_entry_t ent = { .val = 0 };
4590
4591 if (pte_present(ptent))
4592 page = mc_handle_present_pte(vma, addr, ptent);
4593 else if (is_swap_pte(ptent))
4594 page = mc_handle_swap_pte(vma, ptent, &ent);
4595 else if (pte_none(ptent))
4596 page = mc_handle_file_pte(vma, addr, ptent, &ent);
4597
4598 if (!page && !ent.val)
4599 return ret;
4600 if (page) {
4601
4602
4603
4604
4605
4606 if (page->mem_cgroup == mc.from) {
4607 ret = MC_TARGET_PAGE;
4608 if (target)
4609 target->page = page;
4610 }
4611 if (!ret || !target)
4612 put_page(page);
4613 }
4614
4615 if (ent.val && !ret &&
4616 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
4617 ret = MC_TARGET_SWAP;
4618 if (target)
4619 target->ent = ent;
4620 }
4621 return ret;
4622}
4623
4624#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4625
4626
4627
4628
4629
4630static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4631 unsigned long addr, pmd_t pmd, union mc_target *target)
4632{
4633 struct page *page = NULL;
4634 enum mc_target_type ret = MC_TARGET_NONE;
4635
4636 page = pmd_page(pmd);
4637 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
4638 if (!(mc.flags & MOVE_ANON))
4639 return ret;
4640 if (page->mem_cgroup == mc.from) {
4641 ret = MC_TARGET_PAGE;
4642 if (target) {
4643 get_page(page);
4644 target->page = page;
4645 }
4646 }
4647 return ret;
4648}
4649#else
4650static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4651 unsigned long addr, pmd_t pmd, union mc_target *target)
4652{
4653 return MC_TARGET_NONE;
4654}
4655#endif
4656
4657static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4658 unsigned long addr, unsigned long end,
4659 struct mm_walk *walk)
4660{
4661 struct vm_area_struct *vma = walk->vma;
4662 pte_t *pte;
4663 spinlock_t *ptl;
4664
4665 ptl = pmd_trans_huge_lock(pmd, vma);
4666 if (ptl) {
4667 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
4668 mc.precharge += HPAGE_PMD_NR;
4669 spin_unlock(ptl);
4670 return 0;
4671 }
4672
4673 if (pmd_trans_unstable(pmd))
4674 return 0;
4675 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4676 for (; addr != end; pte++, addr += PAGE_SIZE)
4677 if (get_mctgt_type(vma, addr, *pte, NULL))
4678 mc.precharge++;
4679 pte_unmap_unlock(pte - 1, ptl);
4680 cond_resched();
4681
4682 return 0;
4683}
4684
4685static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4686{
4687 unsigned long precharge;
4688
4689 struct mm_walk mem_cgroup_count_precharge_walk = {
4690 .pmd_entry = mem_cgroup_count_precharge_pte_range,
4691 .mm = mm,
4692 };
4693 down_read(&mm->mmap_sem);
4694 walk_page_range(0, mm->highest_vm_end,
4695 &mem_cgroup_count_precharge_walk);
4696 up_read(&mm->mmap_sem);
4697
4698 precharge = mc.precharge;
4699 mc.precharge = 0;
4700
4701 return precharge;
4702}
4703
4704static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4705{
4706 unsigned long precharge = mem_cgroup_count_precharge(mm);
4707
4708 VM_BUG_ON(mc.moving_task);
4709 mc.moving_task = current;
4710 return mem_cgroup_do_precharge(precharge);
4711}
4712
4713
4714static void __mem_cgroup_clear_mc(void)
4715{
4716 struct mem_cgroup *from = mc.from;
4717 struct mem_cgroup *to = mc.to;
4718
4719
4720 if (mc.precharge) {
4721 cancel_charge(mc.to, mc.precharge);
4722 mc.precharge = 0;
4723 }
4724
4725
4726
4727
4728 if (mc.moved_charge) {
4729 cancel_charge(mc.from, mc.moved_charge);
4730 mc.moved_charge = 0;
4731 }
4732
4733 if (mc.moved_swap) {
4734
4735 if (!mem_cgroup_is_root(mc.from))
4736 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
4737
4738 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
4739
4740
4741
4742
4743
4744 if (!mem_cgroup_is_root(mc.to))
4745 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
4746
4747 mem_cgroup_id_get_many(mc.to, mc.moved_swap);
4748 css_put_many(&mc.to->css, mc.moved_swap);
4749
4750 mc.moved_swap = 0;
4751 }
4752 memcg_oom_recover(from);
4753 memcg_oom_recover(to);
4754 wake_up_all(&mc.waitq);
4755}
4756
4757static void mem_cgroup_clear_mc(void)
4758{
4759 struct mm_struct *mm = mc.mm;
4760
4761
4762
4763
4764
4765 mc.moving_task = NULL;
4766 __mem_cgroup_clear_mc();
4767 spin_lock(&mc.lock);
4768 mc.from = NULL;
4769 mc.to = NULL;
4770 mc.mm = NULL;
4771 spin_unlock(&mc.lock);
4772
4773 mmput(mm);
4774}
4775
4776static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4777{
4778 struct cgroup_subsys_state *css;
4779 struct mem_cgroup *memcg = NULL;
4780 struct mem_cgroup *from;
4781 struct task_struct *leader, *p;
4782 struct mm_struct *mm;
4783 unsigned long move_flags;
4784 int ret = 0;
4785
4786
4787 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
4788 return 0;
4789
4790
4791
4792
4793
4794
4795
4796 p = NULL;
4797 cgroup_taskset_for_each_leader(leader, css, tset) {
4798 WARN_ON_ONCE(p);
4799 p = leader;
4800 memcg = mem_cgroup_from_css(css);
4801 }
4802 if (!p)
4803 return 0;
4804
4805
4806
4807
4808
4809
4810 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
4811 if (!move_flags)
4812 return 0;
4813
4814 from = mem_cgroup_from_task(p);
4815
4816 VM_BUG_ON(from == memcg);
4817
4818 mm = get_task_mm(p);
4819 if (!mm)
4820 return 0;
4821
4822 if (mm->owner == p) {
4823 VM_BUG_ON(mc.from);
4824 VM_BUG_ON(mc.to);
4825 VM_BUG_ON(mc.precharge);
4826 VM_BUG_ON(mc.moved_charge);
4827 VM_BUG_ON(mc.moved_swap);
4828
4829 spin_lock(&mc.lock);
4830 mc.mm = mm;
4831 mc.from = from;
4832 mc.to = memcg;
4833 mc.flags = move_flags;
4834 spin_unlock(&mc.lock);
4835
4836
4837 ret = mem_cgroup_precharge_mc(mm);
4838 if (ret)
4839 mem_cgroup_clear_mc();
4840 } else {
4841 mmput(mm);
4842 }
4843 return ret;
4844}
4845
4846static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
4847{
4848 if (mc.to)
4849 mem_cgroup_clear_mc();
4850}
4851
4852static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4853 unsigned long addr, unsigned long end,
4854 struct mm_walk *walk)
4855{
4856 int ret = 0;
4857 struct vm_area_struct *vma = walk->vma;
4858 pte_t *pte;
4859 spinlock_t *ptl;
4860 enum mc_target_type target_type;
4861 union mc_target target;
4862 struct page *page;
4863
4864 ptl = pmd_trans_huge_lock(pmd, vma);
4865 if (ptl) {
4866 if (mc.precharge < HPAGE_PMD_NR) {
4867 spin_unlock(ptl);
4868 return 0;
4869 }
4870 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
4871 if (target_type == MC_TARGET_PAGE) {
4872 page = target.page;
4873 if (!isolate_lru_page(page)) {
4874 if (!mem_cgroup_move_account(page, true,
4875 mc.from, mc.to)) {
4876 mc.precharge -= HPAGE_PMD_NR;
4877 mc.moved_charge += HPAGE_PMD_NR;
4878 }
4879 putback_lru_page(page);
4880 }
4881 put_page(page);
4882 }
4883 spin_unlock(ptl);
4884 return 0;
4885 }
4886
4887 if (pmd_trans_unstable(pmd))
4888 return 0;
4889retry:
4890 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4891 for (; addr != end; addr += PAGE_SIZE) {
4892 pte_t ptent = *(pte++);
4893 swp_entry_t ent;
4894
4895 if (!mc.precharge)
4896 break;
4897
4898 switch (get_mctgt_type(vma, addr, ptent, &target)) {
4899 case MC_TARGET_PAGE:
4900 page = target.page;
4901
4902
4903
4904
4905
4906
4907 if (PageTransCompound(page))
4908 goto put;
4909 if (isolate_lru_page(page))
4910 goto put;
4911 if (!mem_cgroup_move_account(page, false,
4912 mc.from, mc.to)) {
4913 mc.precharge--;
4914
4915 mc.moved_charge++;
4916 }
4917 putback_lru_page(page);
4918put:
4919 put_page(page);
4920 break;
4921 case MC_TARGET_SWAP:
4922 ent = target.ent;
4923 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
4924 mc.precharge--;
4925
4926 mc.moved_swap++;
4927 }
4928 break;
4929 default:
4930 break;
4931 }
4932 }
4933 pte_unmap_unlock(pte - 1, ptl);
4934 cond_resched();
4935
4936 if (addr != end) {
4937
4938
4939
4940
4941
4942
4943 ret = mem_cgroup_do_precharge(1);
4944 if (!ret)
4945 goto retry;
4946 }
4947
4948 return ret;
4949}
4950
4951static void mem_cgroup_move_charge(void)
4952{
4953 struct mm_walk mem_cgroup_move_charge_walk = {
4954 .pmd_entry = mem_cgroup_move_charge_pte_range,
4955 .mm = mc.mm,
4956 };
4957
4958 lru_add_drain_all();
4959
4960
4961
4962
4963
4964 atomic_inc(&mc.from->moving_account);
4965 synchronize_rcu();
4966retry:
4967 if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
4968
4969
4970
4971
4972
4973
4974
4975 __mem_cgroup_clear_mc();
4976 cond_resched();
4977 goto retry;
4978 }
4979
4980
4981
4982
4983 walk_page_range(0, mc.mm->highest_vm_end, &mem_cgroup_move_charge_walk);
4984
4985 up_read(&mc.mm->mmap_sem);
4986 atomic_dec(&mc.from->moving_account);
4987}
4988
4989static void mem_cgroup_move_task(void)
4990{
4991 if (mc.to) {
4992 mem_cgroup_move_charge();
4993 mem_cgroup_clear_mc();
4994 }
4995}
4996#else
4997static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
4998{
4999 return 0;
5000}
5001static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5002{
5003}
5004static void mem_cgroup_move_task(void)
5005{
5006}
5007#endif
5008
5009
5010
5011
5012
5013
5014static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
5015{
5016
5017
5018
5019
5020
5021 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5022 root_mem_cgroup->use_hierarchy = true;
5023 else
5024 root_mem_cgroup->use_hierarchy = false;
5025}
5026
5027static u64 memory_current_read(struct cgroup_subsys_state *css,
5028 struct cftype *cft)
5029{
5030 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5031
5032 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
5033}
5034
5035static int memory_low_show(struct seq_file *m, void *v)
5036{
5037 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5038 unsigned long low = READ_ONCE(memcg->low);
5039
5040 if (low == PAGE_COUNTER_MAX)
5041 seq_puts(m, "max\n");
5042 else
5043 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
5044
5045 return 0;
5046}
5047
5048static ssize_t memory_low_write(struct kernfs_open_file *of,
5049 char *buf, size_t nbytes, loff_t off)
5050{
5051 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5052 unsigned long low;
5053 int err;
5054
5055 buf = strstrip(buf);
5056 err = page_counter_memparse(buf, "max", &low);
5057 if (err)
5058 return err;
5059
5060 memcg->low = low;
5061
5062 return nbytes;
5063}
5064
5065static int memory_high_show(struct seq_file *m, void *v)
5066{
5067 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5068 unsigned long high = READ_ONCE(memcg->high);
5069
5070 if (high == PAGE_COUNTER_MAX)
5071 seq_puts(m, "max\n");
5072 else
5073 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
5074
5075 return 0;
5076}
5077
5078static ssize_t memory_high_write(struct kernfs_open_file *of,
5079 char *buf, size_t nbytes, loff_t off)
5080{
5081 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5082 unsigned long nr_pages;
5083 unsigned long high;
5084 int err;
5085
5086 buf = strstrip(buf);
5087 err = page_counter_memparse(buf, "max", &high);
5088 if (err)
5089 return err;
5090
5091 memcg->high = high;
5092
5093 nr_pages = page_counter_read(&memcg->memory);
5094 if (nr_pages > high)
5095 try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
5096 GFP_KERNEL, true);
5097
5098 memcg_wb_domain_size_changed(memcg);
5099 return nbytes;
5100}
5101
5102static int memory_max_show(struct seq_file *m, void *v)
5103{
5104 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5105 unsigned long max = READ_ONCE(memcg->memory.limit);
5106
5107 if (max == PAGE_COUNTER_MAX)
5108 seq_puts(m, "max\n");
5109 else
5110 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5111
5112 return 0;
5113}
5114
5115static ssize_t memory_max_write(struct kernfs_open_file *of,
5116 char *buf, size_t nbytes, loff_t off)
5117{
5118 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5119 unsigned int nr_reclaims = MEM_CGROUP_RECLAIM_RETRIES;
5120 bool drained = false;
5121 unsigned long max;
5122 int err;
5123
5124 buf = strstrip(buf);
5125 err = page_counter_memparse(buf, "max", &max);
5126 if (err)
5127 return err;
5128
5129 xchg(&memcg->memory.limit, max);
5130
5131 for (;;) {
5132 unsigned long nr_pages = page_counter_read(&memcg->memory);
5133
5134 if (nr_pages <= max)
5135 break;
5136
5137 if (signal_pending(current)) {
5138 err = -EINTR;
5139 break;
5140 }
5141
5142 if (!drained) {
5143 drain_all_stock(memcg);
5144 drained = true;
5145 continue;
5146 }
5147
5148 if (nr_reclaims) {
5149 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
5150 GFP_KERNEL, true))
5151 nr_reclaims--;
5152 continue;
5153 }
5154
5155 mem_cgroup_events(memcg, MEMCG_OOM, 1);
5156 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
5157 break;
5158 }
5159
5160 memcg_wb_domain_size_changed(memcg);
5161 return nbytes;
5162}
5163
5164static int memory_events_show(struct seq_file *m, void *v)
5165{
5166 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5167
5168 seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW));
5169 seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH));
5170 seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX));
5171 seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM));
5172
5173 return 0;
5174}
5175
5176static int memory_stat_show(struct seq_file *m, void *v)
5177{
5178 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5179 unsigned long stat[MEMCG_NR_STAT];
5180 unsigned long events[MEMCG_NR_EVENTS];
5181 int i;
5182
5183
5184
5185
5186
5187
5188
5189
5190
5191
5192
5193
5194 tree_stat(memcg, stat);
5195 tree_events(memcg, events);
5196
5197 seq_printf(m, "anon %llu\n",
5198 (u64)stat[MEM_CGROUP_STAT_RSS] * PAGE_SIZE);
5199 seq_printf(m, "file %llu\n",
5200 (u64)stat[MEM_CGROUP_STAT_CACHE] * PAGE_SIZE);
5201 seq_printf(m, "kernel_stack %llu\n",
5202 (u64)stat[MEMCG_KERNEL_STACK_KB] * 1024);
5203 seq_printf(m, "slab %llu\n",
5204 (u64)(stat[MEMCG_SLAB_RECLAIMABLE] +
5205 stat[MEMCG_SLAB_UNRECLAIMABLE]) * PAGE_SIZE);
5206 seq_printf(m, "sock %llu\n",
5207 (u64)stat[MEMCG_SOCK] * PAGE_SIZE);
5208
5209 seq_printf(m, "file_mapped %llu\n",
5210 (u64)stat[MEM_CGROUP_STAT_FILE_MAPPED] * PAGE_SIZE);
5211 seq_printf(m, "file_dirty %llu\n",
5212 (u64)stat[MEM_CGROUP_STAT_DIRTY] * PAGE_SIZE);
5213 seq_printf(m, "file_writeback %llu\n",
5214 (u64)stat[MEM_CGROUP_STAT_WRITEBACK] * PAGE_SIZE);
5215
5216 for (i = 0; i < NR_LRU_LISTS; i++) {
5217 struct mem_cgroup *mi;
5218 unsigned long val = 0;
5219
5220 for_each_mem_cgroup_tree(mi, memcg)
5221 val += mem_cgroup_nr_lru_pages(mi, BIT(i));
5222 seq_printf(m, "%s %llu\n",
5223 mem_cgroup_lru_names[i], (u64)val * PAGE_SIZE);
5224 }
5225
5226 seq_printf(m, "slab_reclaimable %llu\n",
5227 (u64)stat[MEMCG_SLAB_RECLAIMABLE] * PAGE_SIZE);
5228 seq_printf(m, "slab_unreclaimable %llu\n",
5229 (u64)stat[MEMCG_SLAB_UNRECLAIMABLE] * PAGE_SIZE);
5230
5231
5232
5233 seq_printf(m, "pgfault %lu\n",
5234 events[MEM_CGROUP_EVENTS_PGFAULT]);
5235 seq_printf(m, "pgmajfault %lu\n",
5236 events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
5237
5238 return 0;
5239}
5240
5241static struct cftype memory_files[] = {
5242 {
5243 .name = "current",
5244 .flags = CFTYPE_NOT_ON_ROOT,
5245 .read_u64 = memory_current_read,
5246 },
5247 {
5248 .name = "low",
5249 .flags = CFTYPE_NOT_ON_ROOT,
5250 .seq_show = memory_low_show,
5251 .write = memory_low_write,
5252 },
5253 {
5254 .name = "high",
5255 .flags = CFTYPE_NOT_ON_ROOT,
5256 .seq_show = memory_high_show,
5257 .write = memory_high_write,
5258 },
5259 {
5260 .name = "max",
5261 .flags = CFTYPE_NOT_ON_ROOT,
5262 .seq_show = memory_max_show,
5263 .write = memory_max_write,
5264 },
5265 {
5266 .name = "events",
5267 .flags = CFTYPE_NOT_ON_ROOT,
5268 .file_offset = offsetof(struct mem_cgroup, events_file),
5269 .seq_show = memory_events_show,
5270 },
5271 {
5272 .name = "stat",
5273 .flags = CFTYPE_NOT_ON_ROOT,
5274 .seq_show = memory_stat_show,
5275 },
5276 { }
5277};
5278
5279struct cgroup_subsys memory_cgrp_subsys = {
5280 .css_alloc = mem_cgroup_css_alloc,
5281 .css_online = mem_cgroup_css_online,
5282 .css_offline = mem_cgroup_css_offline,
5283 .css_released = mem_cgroup_css_released,
5284 .css_free = mem_cgroup_css_free,
5285 .css_reset = mem_cgroup_css_reset,
5286 .can_attach = mem_cgroup_can_attach,
5287 .cancel_attach = mem_cgroup_cancel_attach,
5288 .post_attach = mem_cgroup_move_task,
5289 .bind = mem_cgroup_bind,
5290 .dfl_cftypes = memory_files,
5291 .legacy_cftypes = mem_cgroup_legacy_files,
5292 .early_init = 0,
5293};
5294
5295
5296
5297
5298
5299
5300
5301
5302
5303bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
5304{
5305 if (mem_cgroup_disabled())
5306 return false;
5307
5308
5309
5310
5311
5312
5313
5314 if (memcg == root_mem_cgroup)
5315 return false;
5316
5317 if (page_counter_read(&memcg->memory) >= memcg->low)
5318 return false;
5319
5320 while (memcg != root) {
5321 memcg = parent_mem_cgroup(memcg);
5322
5323 if (memcg == root_mem_cgroup)
5324 break;
5325
5326 if (page_counter_read(&memcg->memory) >= memcg->low)
5327 return false;
5328 }
5329 return true;
5330}
5331
5332
5333
5334
5335
5336
5337
5338
5339
5340
5341
5342
5343
5344
5345
5346
5347
5348
5349
5350int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
5351 gfp_t gfp_mask, struct mem_cgroup **memcgp,
5352 bool compound)
5353{
5354 struct mem_cgroup *memcg = NULL;
5355 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5356 int ret = 0;
5357
5358 if (mem_cgroup_disabled())
5359 goto out;
5360
5361 if (PageSwapCache(page)) {
5362
5363
5364
5365
5366
5367
5368
5369 VM_BUG_ON_PAGE(!PageLocked(page), page);
5370 if (page->mem_cgroup)
5371 goto out;
5372
5373 if (do_swap_account) {
5374 swp_entry_t ent = { .val = page_private(page), };
5375 unsigned short id = lookup_swap_cgroup_id(ent);
5376
5377 rcu_read_lock();
5378 memcg = mem_cgroup_from_id(id);
5379 if (memcg && !css_tryget_online(&memcg->css))
5380 memcg = NULL;
5381 rcu_read_unlock();
5382 }
5383 }
5384
5385 if (!memcg)
5386 memcg = get_mem_cgroup_from_mm(mm);
5387
5388 ret = try_charge(memcg, gfp_mask, nr_pages);
5389
5390 css_put(&memcg->css);
5391out:
5392 *memcgp = memcg;
5393 return ret;
5394}
5395
5396
5397
5398
5399
5400
5401
5402
5403
5404
5405
5406
5407
5408
5409
5410
5411
5412
5413void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5414 bool lrucare, bool compound)
5415{
5416 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5417
5418 VM_BUG_ON_PAGE(!page->mapping, page);
5419 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
5420
5421 if (mem_cgroup_disabled())
5422 return;
5423
5424
5425
5426
5427
5428 if (!memcg)
5429 return;
5430
5431 commit_charge(page, memcg, lrucare);
5432
5433 local_irq_disable();
5434 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
5435 memcg_check_events(memcg, page);
5436 local_irq_enable();
5437
5438 if (do_memsw_account() && PageSwapCache(page)) {
5439 swp_entry_t entry = { .val = page_private(page) };
5440
5441
5442
5443
5444
5445 mem_cgroup_uncharge_swap(entry);
5446 }
5447}
5448
5449
5450
5451
5452
5453
5454
5455
5456
5457void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
5458 bool compound)
5459{
5460 unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
5461
5462 if (mem_cgroup_disabled())
5463 return;
5464
5465
5466
5467
5468
5469 if (!memcg)
5470 return;
5471
5472 cancel_charge(memcg, nr_pages);
5473}
5474
5475static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
5476 unsigned long nr_anon, unsigned long nr_file,
5477 unsigned long nr_huge, unsigned long nr_kmem,
5478 struct page *dummy_page)
5479{
5480 unsigned long nr_pages = nr_anon + nr_file + nr_kmem;
5481 unsigned long flags;
5482
5483 if (!mem_cgroup_is_root(memcg)) {
5484 page_counter_uncharge(&memcg->memory, nr_pages);
5485 if (do_memsw_account())
5486 page_counter_uncharge(&memcg->memsw, nr_pages);
5487 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && nr_kmem)
5488 page_counter_uncharge(&memcg->kmem, nr_kmem);
5489 memcg_oom_recover(memcg);
5490 }
5491
5492 local_irq_save(flags);
5493 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
5494 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
5495 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
5496 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
5497 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
5498 memcg_check_events(memcg, dummy_page);
5499 local_irq_restore(flags);
5500
5501 if (!mem_cgroup_is_root(memcg))
5502 css_put_many(&memcg->css, nr_pages);
5503}
5504
5505static void uncharge_list(struct list_head *page_list)
5506{
5507 struct mem_cgroup *memcg = NULL;
5508 unsigned long nr_anon = 0;
5509 unsigned long nr_file = 0;
5510 unsigned long nr_huge = 0;
5511 unsigned long nr_kmem = 0;
5512 unsigned long pgpgout = 0;
5513 struct list_head *next;
5514 struct page *page;
5515
5516
5517
5518
5519
5520 next = page_list->next;
5521 do {
5522 page = list_entry(next, struct page, lru);
5523 next = page->lru.next;
5524
5525 VM_BUG_ON_PAGE(PageLRU(page), page);
5526 VM_BUG_ON_PAGE(page_count(page), page);
5527
5528 if (!page->mem_cgroup)
5529 continue;
5530
5531
5532
5533
5534
5535
5536
5537 if (memcg != page->mem_cgroup) {
5538 if (memcg) {
5539 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5540 nr_huge, nr_kmem, page);
5541 pgpgout = nr_anon = nr_file =
5542 nr_huge = nr_kmem = 0;
5543 }
5544 memcg = page->mem_cgroup;
5545 }
5546
5547 if (!PageKmemcg(page)) {
5548 unsigned int nr_pages = 1;
5549
5550 if (PageTransHuge(page)) {
5551 nr_pages <<= compound_order(page);
5552 nr_huge += nr_pages;
5553 }
5554 if (PageAnon(page))
5555 nr_anon += nr_pages;
5556 else
5557 nr_file += nr_pages;
5558 pgpgout++;
5559 } else {
5560 nr_kmem += 1 << compound_order(page);
5561 __ClearPageKmemcg(page);
5562 }
5563
5564 page->mem_cgroup = NULL;
5565 } while (next != page_list);
5566
5567 if (memcg)
5568 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5569 nr_huge, nr_kmem, page);
5570}
5571
5572
5573
5574
5575
5576
5577
5578
5579void mem_cgroup_uncharge(struct page *page)
5580{
5581 if (mem_cgroup_disabled())
5582 return;
5583
5584
5585 if (!page->mem_cgroup)
5586 return;
5587
5588 INIT_LIST_HEAD(&page->lru);
5589 uncharge_list(&page->lru);
5590}
5591
5592
5593
5594
5595
5596
5597
5598
5599void mem_cgroup_uncharge_list(struct list_head *page_list)
5600{
5601 if (mem_cgroup_disabled())
5602 return;
5603
5604 if (!list_empty(page_list))
5605 uncharge_list(page_list);
5606}
5607
5608
5609
5610
5611
5612
5613
5614
5615
5616
5617
5618void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
5619{
5620 struct mem_cgroup *memcg;
5621 unsigned int nr_pages;
5622 bool compound;
5623 unsigned long flags;
5624
5625 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
5626 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
5627 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
5628 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
5629 newpage);
5630
5631 if (mem_cgroup_disabled())
5632 return;
5633
5634
5635 if (newpage->mem_cgroup)
5636 return;
5637
5638
5639 memcg = oldpage->mem_cgroup;
5640 if (!memcg)
5641 return;
5642
5643
5644 compound = PageTransHuge(newpage);
5645 nr_pages = compound ? hpage_nr_pages(newpage) : 1;
5646
5647 page_counter_charge(&memcg->memory, nr_pages);
5648 if (do_memsw_account())
5649 page_counter_charge(&memcg->memsw, nr_pages);
5650 css_get_many(&memcg->css, nr_pages);
5651
5652 commit_charge(newpage, memcg, false);
5653
5654 local_irq_save(flags);
5655 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
5656 memcg_check_events(memcg, newpage);
5657 local_irq_restore(flags);
5658}
5659
5660DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
5661EXPORT_SYMBOL(memcg_sockets_enabled_key);
5662
5663void mem_cgroup_sk_alloc(struct sock *sk)
5664{
5665 struct mem_cgroup *memcg;
5666
5667 if (!mem_cgroup_sockets_enabled)
5668 return;
5669
5670
5671
5672
5673
5674
5675
5676
5677
5678
5679 if (sk->sk_memcg) {
5680 BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
5681 css_get(&sk->sk_memcg->css);
5682 return;
5683 }
5684
5685 rcu_read_lock();
5686 memcg = mem_cgroup_from_task(current);
5687 if (memcg == root_mem_cgroup)
5688 goto out;
5689 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
5690 goto out;
5691 if (css_tryget_online(&memcg->css))
5692 sk->sk_memcg = memcg;
5693out:
5694 rcu_read_unlock();
5695}
5696
5697void mem_cgroup_sk_free(struct sock *sk)
5698{
5699 if (sk->sk_memcg)
5700 css_put(&sk->sk_memcg->css);
5701}
5702
5703
5704
5705
5706
5707
5708
5709
5710
5711bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5712{
5713 gfp_t gfp_mask = GFP_KERNEL;
5714
5715 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5716 struct page_counter *fail;
5717
5718 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
5719 memcg->tcpmem_pressure = 0;
5720 return true;
5721 }
5722 page_counter_charge(&memcg->tcpmem, nr_pages);
5723 memcg->tcpmem_pressure = 1;
5724 return false;
5725 }
5726
5727
5728 if (in_softirq())
5729 gfp_mask = GFP_NOWAIT;
5730
5731 this_cpu_add(memcg->stat->count[MEMCG_SOCK], nr_pages);
5732
5733 if (try_charge(memcg, gfp_mask, nr_pages) == 0)
5734 return true;
5735
5736 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
5737 return false;
5738}
5739
5740
5741
5742
5743
5744
5745void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
5746{
5747 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
5748 page_counter_uncharge(&memcg->tcpmem, nr_pages);
5749 return;
5750 }
5751
5752 this_cpu_sub(memcg->stat->count[MEMCG_SOCK], nr_pages);
5753
5754 page_counter_uncharge(&memcg->memory, nr_pages);
5755 css_put_many(&memcg->css, nr_pages);
5756}
5757
5758static int __init cgroup_memory(char *s)
5759{
5760 char *token;
5761
5762 while ((token = strsep(&s, ",")) != NULL) {
5763 if (!*token)
5764 continue;
5765 if (!strcmp(token, "nosocket"))
5766 cgroup_memory_nosocket = true;
5767 if (!strcmp(token, "nokmem"))
5768 cgroup_memory_nokmem = true;
5769 }
5770 return 0;
5771}
5772__setup("cgroup.memory=", cgroup_memory);
5773
5774
5775
5776
5777
5778
5779
5780
5781
5782static int __init mem_cgroup_init(void)
5783{
5784 int cpu, node;
5785
5786 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
5787
5788 for_each_possible_cpu(cpu)
5789 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5790 drain_local_stock);
5791
5792 for_each_node(node) {
5793 struct mem_cgroup_tree_per_node *rtpn;
5794
5795 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
5796 node_online(node) ? node : NUMA_NO_NODE);
5797
5798 rtpn->rb_root = RB_ROOT;
5799 spin_lock_init(&rtpn->lock);
5800 soft_limit_tree.rb_tree_per_node[node] = rtpn;
5801 }
5802
5803 return 0;
5804}
5805subsys_initcall(mem_cgroup_init);
5806
5807#ifdef CONFIG_MEMCG_SWAP
5808static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
5809{
5810 while (!atomic_inc_not_zero(&memcg->id.ref)) {
5811
5812
5813
5814
5815 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
5816 VM_BUG_ON(1);
5817 break;
5818 }
5819 memcg = parent_mem_cgroup(memcg);
5820 if (!memcg)
5821 memcg = root_mem_cgroup;
5822 }
5823 return memcg;
5824}
5825
5826
5827
5828
5829
5830
5831
5832
5833void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5834{
5835 struct mem_cgroup *memcg, *swap_memcg;
5836 unsigned short oldid;
5837
5838 VM_BUG_ON_PAGE(PageLRU(page), page);
5839 VM_BUG_ON_PAGE(page_count(page), page);
5840
5841 if (!do_memsw_account())
5842 return;
5843
5844 memcg = page->mem_cgroup;
5845
5846
5847 if (!memcg)
5848 return;
5849
5850
5851
5852
5853
5854
5855 swap_memcg = mem_cgroup_id_get_online(memcg);
5856 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg));
5857 VM_BUG_ON_PAGE(oldid, page);
5858 mem_cgroup_swap_statistics(swap_memcg, true);
5859
5860 page->mem_cgroup = NULL;
5861
5862 if (!mem_cgroup_is_root(memcg))
5863 page_counter_uncharge(&memcg->memory, 1);
5864
5865 if (memcg != swap_memcg) {
5866 if (!mem_cgroup_is_root(swap_memcg))
5867 page_counter_charge(&swap_memcg->memsw, 1);
5868 page_counter_uncharge(&memcg->memsw, 1);
5869 }
5870
5871
5872
5873
5874
5875
5876
5877 VM_BUG_ON(!irqs_disabled());
5878 mem_cgroup_charge_statistics(memcg, page, false, -1);
5879 memcg_check_events(memcg, page);
5880
5881 if (!mem_cgroup_is_root(memcg))
5882 css_put(&memcg->css);
5883}
5884
5885
5886
5887
5888
5889
5890
5891
5892
5893
5894int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
5895{
5896 struct mem_cgroup *memcg;
5897 struct page_counter *counter;
5898 unsigned short oldid;
5899
5900 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) || !do_swap_account)
5901 return 0;
5902
5903 memcg = page->mem_cgroup;
5904
5905
5906 if (!memcg)
5907 return 0;
5908
5909 memcg = mem_cgroup_id_get_online(memcg);
5910
5911 if (!mem_cgroup_is_root(memcg) &&
5912 !page_counter_try_charge(&memcg->swap, 1, &counter)) {
5913 mem_cgroup_id_put(memcg);
5914 return -ENOMEM;
5915 }
5916
5917 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5918 VM_BUG_ON_PAGE(oldid, page);
5919 mem_cgroup_swap_statistics(memcg, true);
5920
5921 return 0;
5922}
5923
5924
5925
5926
5927
5928
5929
5930void mem_cgroup_uncharge_swap(swp_entry_t entry)
5931{
5932 struct mem_cgroup *memcg;
5933 unsigned short id;
5934
5935 if (!do_swap_account)
5936 return;
5937
5938 id = swap_cgroup_record(entry, 0);
5939 rcu_read_lock();
5940 memcg = mem_cgroup_from_id(id);
5941 if (memcg) {
5942 if (!mem_cgroup_is_root(memcg)) {
5943 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5944 page_counter_uncharge(&memcg->swap, 1);
5945 else
5946 page_counter_uncharge(&memcg->memsw, 1);
5947 }
5948 mem_cgroup_swap_statistics(memcg, false);
5949 mem_cgroup_id_put(memcg);
5950 }
5951 rcu_read_unlock();
5952}
5953
5954long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
5955{
5956 long nr_swap_pages = get_nr_swap_pages();
5957
5958 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5959 return nr_swap_pages;
5960 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
5961 nr_swap_pages = min_t(long, nr_swap_pages,
5962 READ_ONCE(memcg->swap.limit) -
5963 page_counter_read(&memcg->swap));
5964 return nr_swap_pages;
5965}
5966
5967bool mem_cgroup_swap_full(struct page *page)
5968{
5969 struct mem_cgroup *memcg;
5970
5971 VM_BUG_ON_PAGE(!PageLocked(page), page);
5972
5973 if (vm_swap_full())
5974 return true;
5975 if (!do_swap_account || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
5976 return false;
5977
5978 memcg = page->mem_cgroup;
5979 if (!memcg)
5980 return false;
5981
5982 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
5983 if (page_counter_read(&memcg->swap) * 2 >= memcg->swap.limit)
5984 return true;
5985
5986 return false;
5987}
5988
5989
5990#ifdef CONFIG_MEMCG_SWAP_ENABLED
5991static int really_do_swap_account __initdata = 1;
5992#else
5993static int really_do_swap_account __initdata;
5994#endif
5995
5996static int __init enable_swap_account(char *s)
5997{
5998 if (!strcmp(s, "1"))
5999 really_do_swap_account = 1;
6000 else if (!strcmp(s, "0"))
6001 really_do_swap_account = 0;
6002 return 1;
6003}
6004__setup("swapaccount=", enable_swap_account);
6005
6006static u64 swap_current_read(struct cgroup_subsys_state *css,
6007 struct cftype *cft)
6008{
6009 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6010
6011 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
6012}
6013
6014static int swap_max_show(struct seq_file *m, void *v)
6015{
6016 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
6017 unsigned long max = READ_ONCE(memcg->swap.limit);
6018
6019 if (max == PAGE_COUNTER_MAX)
6020 seq_puts(m, "max\n");
6021 else
6022 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
6023
6024 return 0;
6025}
6026
6027static ssize_t swap_max_write(struct kernfs_open_file *of,
6028 char *buf, size_t nbytes, loff_t off)
6029{
6030 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6031 unsigned long max;
6032 int err;
6033
6034 buf = strstrip(buf);
6035 err = page_counter_memparse(buf, "max", &max);
6036 if (err)
6037 return err;
6038
6039 mutex_lock(&memcg_limit_mutex);
6040 err = page_counter_limit(&memcg->swap, max);
6041 mutex_unlock(&memcg_limit_mutex);
6042 if (err)
6043 return err;
6044
6045 return nbytes;
6046}
6047
6048static struct cftype swap_files[] = {
6049 {
6050 .name = "swap.current",
6051 .flags = CFTYPE_NOT_ON_ROOT,
6052 .read_u64 = swap_current_read,
6053 },
6054 {
6055 .name = "swap.max",
6056 .flags = CFTYPE_NOT_ON_ROOT,
6057 .seq_show = swap_max_show,
6058 .write = swap_max_write,
6059 },
6060 { }
6061};
6062
6063static struct cftype memsw_cgroup_files[] = {
6064 {
6065 .name = "memsw.usage_in_bytes",
6066 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
6067 .read_u64 = mem_cgroup_read_u64,
6068 },
6069 {
6070 .name = "memsw.max_usage_in_bytes",
6071 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
6072 .write = mem_cgroup_reset,
6073 .read_u64 = mem_cgroup_read_u64,
6074 },
6075 {
6076 .name = "memsw.limit_in_bytes",
6077 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
6078 .write = mem_cgroup_write,
6079 .read_u64 = mem_cgroup_read_u64,
6080 },
6081 {
6082 .name = "memsw.failcnt",
6083 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
6084 .write = mem_cgroup_reset,
6085 .read_u64 = mem_cgroup_read_u64,
6086 },
6087 { },
6088};
6089
6090static int __init mem_cgroup_swap_init(void)
6091{
6092 if (!mem_cgroup_disabled() && really_do_swap_account) {
6093 do_swap_account = 1;
6094 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys,
6095 swap_files));
6096 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
6097 memsw_cgroup_files));
6098 }
6099 return 0;
6100}
6101subsys_initcall(mem_cgroup_swap_init);
6102
6103#endif
6104